sync_test.go 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607
  1. // Copyright 2020 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package snap
  17. import (
  18. "bytes"
  19. "crypto/rand"
  20. "encoding/binary"
  21. "fmt"
  22. "math/big"
  23. "sort"
  24. "sync"
  25. "testing"
  26. "time"
  27. "github.com/ethereum/go-ethereum/common"
  28. "github.com/ethereum/go-ethereum/core/rawdb"
  29. "github.com/ethereum/go-ethereum/core/state"
  30. "github.com/ethereum/go-ethereum/crypto"
  31. "github.com/ethereum/go-ethereum/ethdb"
  32. "github.com/ethereum/go-ethereum/light"
  33. "github.com/ethereum/go-ethereum/log"
  34. "github.com/ethereum/go-ethereum/rlp"
  35. "github.com/ethereum/go-ethereum/trie"
  36. "golang.org/x/crypto/sha3"
  37. )
  38. func TestHashing(t *testing.T) {
  39. t.Parallel()
  40. var bytecodes = make([][]byte, 10)
  41. for i := 0; i < len(bytecodes); i++ {
  42. buf := make([]byte, 100)
  43. rand.Read(buf)
  44. bytecodes[i] = buf
  45. }
  46. var want, got string
  47. var old = func() {
  48. hasher := sha3.NewLegacyKeccak256()
  49. for i := 0; i < len(bytecodes); i++ {
  50. hasher.Reset()
  51. hasher.Write(bytecodes[i])
  52. hash := hasher.Sum(nil)
  53. got = fmt.Sprintf("%v\n%v", got, hash)
  54. }
  55. }
  56. var new = func() {
  57. hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  58. var hash = make([]byte, 32)
  59. for i := 0; i < len(bytecodes); i++ {
  60. hasher.Reset()
  61. hasher.Write(bytecodes[i])
  62. hasher.Read(hash)
  63. want = fmt.Sprintf("%v\n%v", want, hash)
  64. }
  65. }
  66. old()
  67. new()
  68. if want != got {
  69. t.Errorf("want\n%v\ngot\n%v\n", want, got)
  70. }
  71. }
  72. func BenchmarkHashing(b *testing.B) {
  73. var bytecodes = make([][]byte, 10000)
  74. for i := 0; i < len(bytecodes); i++ {
  75. buf := make([]byte, 100)
  76. rand.Read(buf)
  77. bytecodes[i] = buf
  78. }
  79. var old = func() {
  80. hasher := sha3.NewLegacyKeccak256()
  81. for i := 0; i < len(bytecodes); i++ {
  82. hasher.Reset()
  83. hasher.Write(bytecodes[i])
  84. hasher.Sum(nil)
  85. }
  86. }
  87. var new = func() {
  88. hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  89. var hash = make([]byte, 32)
  90. for i := 0; i < len(bytecodes); i++ {
  91. hasher.Reset()
  92. hasher.Write(bytecodes[i])
  93. hasher.Read(hash)
  94. }
  95. }
  96. b.Run("old", func(b *testing.B) {
  97. b.ReportAllocs()
  98. for i := 0; i < b.N; i++ {
  99. old()
  100. }
  101. })
  102. b.Run("new", func(b *testing.B) {
  103. b.ReportAllocs()
  104. for i := 0; i < b.N; i++ {
  105. new()
  106. }
  107. })
  108. }
  109. type (
  110. accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error
  111. storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
  112. trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
  113. codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
  114. )
  115. type testPeer struct {
  116. id string
  117. test *testing.T
  118. remote *Syncer
  119. logger log.Logger
  120. accountTrie *trie.Trie
  121. accountValues entrySlice
  122. storageTries map[common.Hash]*trie.Trie
  123. storageValues map[common.Hash]entrySlice
  124. accountRequestHandler accountHandlerFunc
  125. storageRequestHandler storageHandlerFunc
  126. trieRequestHandler trieHandlerFunc
  127. codeRequestHandler codeHandlerFunc
  128. term func()
  129. }
  130. func newTestPeer(id string, t *testing.T, term func()) *testPeer {
  131. peer := &testPeer{
  132. id: id,
  133. test: t,
  134. logger: log.New("id", id),
  135. accountRequestHandler: defaultAccountRequestHandler,
  136. trieRequestHandler: defaultTrieRequestHandler,
  137. storageRequestHandler: defaultStorageRequestHandler,
  138. codeRequestHandler: defaultCodeRequestHandler,
  139. term: term,
  140. }
  141. //stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
  142. //peer.logger.SetHandler(stderrHandler)
  143. return peer
  144. }
  145. func (t *testPeer) ID() string { return t.id }
  146. func (t *testPeer) Log() log.Logger { return t.logger }
  147. func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
  148. t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
  149. go t.accountRequestHandler(t, id, root, origin, limit, bytes)
  150. return nil
  151. }
  152. func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
  153. t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
  154. go t.trieRequestHandler(t, id, root, paths, bytes)
  155. return nil
  156. }
  157. func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
  158. if len(accounts) == 1 && origin != nil {
  159. t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
  160. } else {
  161. t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
  162. }
  163. go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
  164. return nil
  165. }
  166. func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
  167. t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
  168. go t.codeRequestHandler(t, id, hashes, bytes)
  169. return nil
  170. }
  171. // defaultTrieRequestHandler is a well-behaving handler for trie healing requests
  172. func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
  173. // Pass the response
  174. var nodes [][]byte
  175. for _, pathset := range paths {
  176. switch len(pathset) {
  177. case 1:
  178. blob, _, err := t.accountTrie.TryGetNode(pathset[0])
  179. if err != nil {
  180. t.logger.Info("Error handling req", "error", err)
  181. break
  182. }
  183. nodes = append(nodes, blob)
  184. default:
  185. account := t.storageTries[(common.BytesToHash(pathset[0]))]
  186. for _, path := range pathset[1:] {
  187. blob, _, err := account.TryGetNode(path)
  188. if err != nil {
  189. t.logger.Info("Error handling req", "error", err)
  190. break
  191. }
  192. nodes = append(nodes, blob)
  193. }
  194. }
  195. }
  196. t.remote.OnTrieNodes(t, requestId, nodes)
  197. return nil
  198. }
  199. // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
  200. func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
  201. keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
  202. if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
  203. t.test.Errorf("Remote side rejected our delivery: %v", err)
  204. t.term()
  205. return err
  206. }
  207. return nil
  208. }
  209. func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
  210. var size uint64
  211. if limit == (common.Hash{}) {
  212. limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  213. }
  214. for _, entry := range t.accountValues {
  215. if size > cap {
  216. break
  217. }
  218. if bytes.Compare(origin[:], entry.k) <= 0 {
  219. keys = append(keys, common.BytesToHash(entry.k))
  220. vals = append(vals, entry.v)
  221. size += uint64(32 + len(entry.v))
  222. }
  223. // If we've exceeded the request threshold, abort
  224. if bytes.Compare(entry.k, limit[:]) >= 0 {
  225. break
  226. }
  227. }
  228. // Unless we send the entire trie, we need to supply proofs
  229. // Actually, we need to supply proofs either way! This seems to be an implementation
  230. // quirk in go-ethereum
  231. proof := light.NewNodeSet()
  232. if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
  233. t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
  234. }
  235. if len(keys) > 0 {
  236. lastK := (keys[len(keys)-1])[:]
  237. if err := t.accountTrie.Prove(lastK, 0, proof); err != nil {
  238. t.logger.Error("Could not prove last item", "error", err)
  239. }
  240. }
  241. for _, blob := range proof.NodeList() {
  242. proofs = append(proofs, blob)
  243. }
  244. return keys, vals, proofs
  245. }
  246. // defaultStorageRequestHandler is a well-behaving storage request handler
  247. func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
  248. hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
  249. if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
  250. t.test.Errorf("Remote side rejected our delivery: %v", err)
  251. t.term()
  252. }
  253. return nil
  254. }
  255. func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  256. var bytecodes [][]byte
  257. for _, h := range hashes {
  258. bytecodes = append(bytecodes, getCodeByHash(h))
  259. }
  260. if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
  261. t.test.Errorf("Remote side rejected our delivery: %v", err)
  262. t.term()
  263. }
  264. return nil
  265. }
  266. func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
  267. var size uint64
  268. for _, account := range accounts {
  269. // The first account might start from a different origin and end sooner
  270. var originHash common.Hash
  271. if len(origin) > 0 {
  272. originHash = common.BytesToHash(origin)
  273. }
  274. var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  275. if len(limit) > 0 {
  276. limitHash = common.BytesToHash(limit)
  277. }
  278. var (
  279. keys []common.Hash
  280. vals [][]byte
  281. abort bool
  282. )
  283. for _, entry := range t.storageValues[account] {
  284. if size >= max {
  285. abort = true
  286. break
  287. }
  288. if bytes.Compare(entry.k, originHash[:]) < 0 {
  289. continue
  290. }
  291. keys = append(keys, common.BytesToHash(entry.k))
  292. vals = append(vals, entry.v)
  293. size += uint64(32 + len(entry.v))
  294. if bytes.Compare(entry.k, limitHash[:]) >= 0 {
  295. break
  296. }
  297. }
  298. hashes = append(hashes, keys)
  299. slots = append(slots, vals)
  300. // Generate the Merkle proofs for the first and last storage slot, but
  301. // only if the response was capped. If the entire storage trie included
  302. // in the response, no need for any proofs.
  303. if originHash != (common.Hash{}) || abort {
  304. // If we're aborting, we need to prove the first and last item
  305. // This terminates the response (and thus the loop)
  306. proof := light.NewNodeSet()
  307. stTrie := t.storageTries[account]
  308. // Here's a potential gotcha: when constructing the proof, we cannot
  309. // use the 'origin' slice directly, but must use the full 32-byte
  310. // hash form.
  311. if err := stTrie.Prove(originHash[:], 0, proof); err != nil {
  312. t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err)
  313. }
  314. if len(keys) > 0 {
  315. lastK := (keys[len(keys)-1])[:]
  316. if err := stTrie.Prove(lastK, 0, proof); err != nil {
  317. t.logger.Error("Could not prove last item", "error", err)
  318. }
  319. }
  320. for _, blob := range proof.NodeList() {
  321. proofs = append(proofs, blob)
  322. }
  323. break
  324. }
  325. }
  326. return hashes, slots, proofs
  327. }
  328. // the createStorageRequestResponseAlwaysProve tests a cornercase, where it always
  329. // supplies the proof for the last account, even if it is 'complete'.h
  330. func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
  331. var size uint64
  332. max = max * 3 / 4
  333. var origin common.Hash
  334. if len(bOrigin) > 0 {
  335. origin = common.BytesToHash(bOrigin)
  336. }
  337. var exit bool
  338. for i, account := range accounts {
  339. var keys []common.Hash
  340. var vals [][]byte
  341. for _, entry := range t.storageValues[account] {
  342. if bytes.Compare(entry.k, origin[:]) < 0 {
  343. exit = true
  344. }
  345. keys = append(keys, common.BytesToHash(entry.k))
  346. vals = append(vals, entry.v)
  347. size += uint64(32 + len(entry.v))
  348. if size > max {
  349. exit = true
  350. }
  351. }
  352. if i == len(accounts)-1 {
  353. exit = true
  354. }
  355. hashes = append(hashes, keys)
  356. slots = append(slots, vals)
  357. if exit {
  358. // If we're aborting, we need to prove the first and last item
  359. // This terminates the response (and thus the loop)
  360. proof := light.NewNodeSet()
  361. stTrie := t.storageTries[account]
  362. // Here's a potential gotcha: when constructing the proof, we cannot
  363. // use the 'origin' slice directly, but must use the full 32-byte
  364. // hash form.
  365. if err := stTrie.Prove(origin[:], 0, proof); err != nil {
  366. t.logger.Error("Could not prove inexistence of origin", "origin", origin,
  367. "error", err)
  368. }
  369. if len(keys) > 0 {
  370. lastK := (keys[len(keys)-1])[:]
  371. if err := stTrie.Prove(lastK, 0, proof); err != nil {
  372. t.logger.Error("Could not prove last item", "error", err)
  373. }
  374. }
  375. for _, blob := range proof.NodeList() {
  376. proofs = append(proofs, blob)
  377. }
  378. break
  379. }
  380. }
  381. return hashes, slots, proofs
  382. }
  383. // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
  384. func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
  385. t.remote.OnAccounts(t, requestId, nil, nil, nil)
  386. return nil
  387. }
  388. func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
  389. return nil
  390. }
  391. func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
  392. t.remote.OnTrieNodes(t, requestId, nil)
  393. return nil
  394. }
  395. func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
  396. return nil
  397. }
  398. func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  399. t.remote.OnStorage(t, requestId, nil, nil, nil)
  400. return nil
  401. }
  402. func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  403. return nil
  404. }
  405. func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  406. hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)
  407. if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
  408. t.test.Errorf("Remote side rejected our delivery: %v", err)
  409. t.term()
  410. }
  411. return nil
  412. }
  413. //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  414. // var bytecodes [][]byte
  415. // t.remote.OnByteCodes(t, id, bytecodes)
  416. // return nil
  417. //}
  418. func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  419. var bytecodes [][]byte
  420. for _, h := range hashes {
  421. // Send back the hashes
  422. bytecodes = append(bytecodes, h[:])
  423. }
  424. if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
  425. t.logger.Info("remote error on delivery (as expected)", "error", err)
  426. // Mimic the real-life handler, which drops a peer on errors
  427. t.remote.Unregister(t.id)
  428. }
  429. return nil
  430. }
  431. func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  432. var bytecodes [][]byte
  433. for _, h := range hashes[:1] {
  434. bytecodes = append(bytecodes, getCodeByHash(h))
  435. }
  436. // Missing bytecode can be retrieved again, no error expected
  437. if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
  438. t.test.Errorf("Remote side rejected our delivery: %v", err)
  439. t.term()
  440. }
  441. return nil
  442. }
  443. // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
  444. func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  445. return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
  446. }
  447. func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
  448. return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)
  449. }
  450. //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
  451. // return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
  452. //}
  453. func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
  454. hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
  455. if len(proofs) > 0 {
  456. proofs = proofs[1:]
  457. }
  458. if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
  459. t.logger.Info("remote error on delivery (as expected)", "error", err)
  460. // Mimic the real-life handler, which drops a peer on errors
  461. t.remote.Unregister(t.id)
  462. }
  463. return nil
  464. }
  465. // corruptStorageRequestHandler doesn't provide good proofs
  466. func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  467. hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
  468. if len(proofs) > 0 {
  469. proofs = proofs[1:]
  470. }
  471. if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
  472. t.logger.Info("remote error on delivery (as expected)", "error", err)
  473. // Mimic the real-life handler, which drops a peer on errors
  474. t.remote.Unregister(t.id)
  475. }
  476. return nil
  477. }
  478. func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  479. hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
  480. if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
  481. t.logger.Info("remote error on delivery (as expected)", "error", err)
  482. // Mimic the real-life handler, which drops a peer on errors
  483. t.remote.Unregister(t.id)
  484. }
  485. return nil
  486. }
  487. // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
  488. // also ship the entire trie inside the proof. If the attack is successful,
  489. // the remote side does not do any follow-up requests
  490. func TestSyncBloatedProof(t *testing.T) {
  491. t.Parallel()
  492. var (
  493. once sync.Once
  494. cancel = make(chan struct{})
  495. term = func() {
  496. once.Do(func() {
  497. close(cancel)
  498. })
  499. }
  500. )
  501. sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
  502. source := newTestPeer("source", t, term)
  503. source.accountTrie = sourceAccountTrie
  504. source.accountValues = elems
  505. source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
  506. var (
  507. proofs [][]byte
  508. keys []common.Hash
  509. vals [][]byte
  510. )
  511. // The values
  512. for _, entry := range t.accountValues {
  513. if bytes.Compare(entry.k, origin[:]) < 0 {
  514. continue
  515. }
  516. if bytes.Compare(entry.k, limit[:]) > 0 {
  517. continue
  518. }
  519. keys = append(keys, common.BytesToHash(entry.k))
  520. vals = append(vals, entry.v)
  521. }
  522. // The proofs
  523. proof := light.NewNodeSet()
  524. if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
  525. t.logger.Error("Could not prove origin", "origin", origin, "error", err)
  526. }
  527. // The bloat: add proof of every single element
  528. for _, entry := range t.accountValues {
  529. if err := t.accountTrie.Prove(entry.k, 0, proof); err != nil {
  530. t.logger.Error("Could not prove item", "error", err)
  531. }
  532. }
  533. // And remove one item from the elements
  534. if len(keys) > 2 {
  535. keys = append(keys[:1], keys[2:]...)
  536. vals = append(vals[:1], vals[2:]...)
  537. }
  538. for _, blob := range proof.NodeList() {
  539. proofs = append(proofs, blob)
  540. }
  541. if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
  542. t.logger.Info("remote error on delivery (as expected)", "error", err)
  543. t.term()
  544. // This is actually correct, signal to exit the test successfully
  545. }
  546. return nil
  547. }
  548. syncer := setupSyncer(source)
  549. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
  550. t.Fatal("No error returned from incomplete/cancelled sync")
  551. }
  552. }
  553. func setupSyncer(peers ...*testPeer) *Syncer {
  554. stateDb := rawdb.NewMemoryDatabase()
  555. syncer := NewSyncer(stateDb)
  556. for _, peer := range peers {
  557. syncer.Register(peer)
  558. peer.remote = syncer
  559. }
  560. return syncer
  561. }
  562. // TestSync tests a basic sync with one peer
  563. func TestSync(t *testing.T) {
  564. t.Parallel()
  565. var (
  566. once sync.Once
  567. cancel = make(chan struct{})
  568. term = func() {
  569. once.Do(func() {
  570. close(cancel)
  571. })
  572. }
  573. )
  574. sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
  575. mkSource := func(name string) *testPeer {
  576. source := newTestPeer(name, t, term)
  577. source.accountTrie = sourceAccountTrie
  578. source.accountValues = elems
  579. return source
  580. }
  581. syncer := setupSyncer(mkSource("source"))
  582. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  583. t.Fatalf("sync failed: %v", err)
  584. }
  585. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  586. }
  587. // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
  588. // panic within the prover
  589. func TestSyncTinyTriePanic(t *testing.T) {
  590. t.Parallel()
  591. var (
  592. once sync.Once
  593. cancel = make(chan struct{})
  594. term = func() {
  595. once.Do(func() {
  596. close(cancel)
  597. })
  598. }
  599. )
  600. sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
  601. mkSource := func(name string) *testPeer {
  602. source := newTestPeer(name, t, term)
  603. source.accountTrie = sourceAccountTrie
  604. source.accountValues = elems
  605. return source
  606. }
  607. syncer := setupSyncer(mkSource("source"))
  608. done := checkStall(t, term)
  609. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  610. t.Fatalf("sync failed: %v", err)
  611. }
  612. close(done)
  613. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  614. }
  615. // TestMultiSync tests a basic sync with multiple peers
  616. func TestMultiSync(t *testing.T) {
  617. t.Parallel()
  618. var (
  619. once sync.Once
  620. cancel = make(chan struct{})
  621. term = func() {
  622. once.Do(func() {
  623. close(cancel)
  624. })
  625. }
  626. )
  627. sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
  628. mkSource := func(name string) *testPeer {
  629. source := newTestPeer(name, t, term)
  630. source.accountTrie = sourceAccountTrie
  631. source.accountValues = elems
  632. return source
  633. }
  634. syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB"))
  635. done := checkStall(t, term)
  636. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  637. t.Fatalf("sync failed: %v", err)
  638. }
  639. close(done)
  640. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  641. }
  642. // TestSyncWithStorage tests basic sync using accounts + storage + code
  643. func TestSyncWithStorage(t *testing.T) {
  644. t.Parallel()
  645. var (
  646. once sync.Once
  647. cancel = make(chan struct{})
  648. term = func() {
  649. once.Do(func() {
  650. close(cancel)
  651. })
  652. }
  653. )
  654. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)
  655. mkSource := func(name string) *testPeer {
  656. source := newTestPeer(name, t, term)
  657. source.accountTrie = sourceAccountTrie
  658. source.accountValues = elems
  659. source.storageTries = storageTries
  660. source.storageValues = storageElems
  661. return source
  662. }
  663. syncer := setupSyncer(mkSource("sourceA"))
  664. done := checkStall(t, term)
  665. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  666. t.Fatalf("sync failed: %v", err)
  667. }
  668. close(done)
  669. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  670. }
  671. // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
  672. func TestMultiSyncManyUseless(t *testing.T) {
  673. t.Parallel()
  674. var (
  675. once sync.Once
  676. cancel = make(chan struct{})
  677. term = func() {
  678. once.Do(func() {
  679. close(cancel)
  680. })
  681. }
  682. )
  683. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  684. mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
  685. source := newTestPeer(name, t, term)
  686. source.accountTrie = sourceAccountTrie
  687. source.accountValues = elems
  688. source.storageTries = storageTries
  689. source.storageValues = storageElems
  690. if !noAccount {
  691. source.accountRequestHandler = emptyRequestAccountRangeFn
  692. }
  693. if !noStorage {
  694. source.storageRequestHandler = emptyStorageRequestHandler
  695. }
  696. if !noTrieNode {
  697. source.trieRequestHandler = emptyTrieRequestHandler
  698. }
  699. return source
  700. }
  701. syncer := setupSyncer(
  702. mkSource("full", true, true, true),
  703. mkSource("noAccounts", false, true, true),
  704. mkSource("noStorage", true, false, true),
  705. mkSource("noTrie", true, true, false),
  706. )
  707. done := checkStall(t, term)
  708. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  709. t.Fatalf("sync failed: %v", err)
  710. }
  711. close(done)
  712. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  713. }
  714. // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
  715. func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
  716. // We're setting the timeout to very low, to increase the chance of the timeout
  717. // being triggered. This was previously a cause of panic, when a response
  718. // arrived simultaneously as a timeout was triggered.
  719. defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
  720. requestTimeout = time.Millisecond
  721. var (
  722. once sync.Once
  723. cancel = make(chan struct{})
  724. term = func() {
  725. once.Do(func() {
  726. close(cancel)
  727. })
  728. }
  729. )
  730. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  731. mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
  732. source := newTestPeer(name, t, term)
  733. source.accountTrie = sourceAccountTrie
  734. source.accountValues = elems
  735. source.storageTries = storageTries
  736. source.storageValues = storageElems
  737. if !noAccount {
  738. source.accountRequestHandler = emptyRequestAccountRangeFn
  739. }
  740. if !noStorage {
  741. source.storageRequestHandler = emptyStorageRequestHandler
  742. }
  743. if !noTrieNode {
  744. source.trieRequestHandler = emptyTrieRequestHandler
  745. }
  746. return source
  747. }
  748. syncer := setupSyncer(
  749. mkSource("full", true, true, true),
  750. mkSource("noAccounts", false, true, true),
  751. mkSource("noStorage", true, false, true),
  752. mkSource("noTrie", true, true, false),
  753. )
  754. done := checkStall(t, term)
  755. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  756. t.Fatalf("sync failed: %v", err)
  757. }
  758. close(done)
  759. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  760. }
  761. // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
  762. func TestMultiSyncManyUnresponsive(t *testing.T) {
  763. // We're setting the timeout to very low, to make the test run a bit faster
  764. defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
  765. requestTimeout = time.Millisecond
  766. var (
  767. once sync.Once
  768. cancel = make(chan struct{})
  769. term = func() {
  770. once.Do(func() {
  771. close(cancel)
  772. })
  773. }
  774. )
  775. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  776. mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
  777. source := newTestPeer(name, t, term)
  778. source.accountTrie = sourceAccountTrie
  779. source.accountValues = elems
  780. source.storageTries = storageTries
  781. source.storageValues = storageElems
  782. if !noAccount {
  783. source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
  784. }
  785. if !noStorage {
  786. source.storageRequestHandler = nonResponsiveStorageRequestHandler
  787. }
  788. if !noTrieNode {
  789. source.trieRequestHandler = nonResponsiveTrieRequestHandler
  790. }
  791. return source
  792. }
  793. syncer := setupSyncer(
  794. mkSource("full", true, true, true),
  795. mkSource("noAccounts", false, true, true),
  796. mkSource("noStorage", true, false, true),
  797. mkSource("noTrie", true, true, false),
  798. )
  799. done := checkStall(t, term)
  800. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  801. t.Fatalf("sync failed: %v", err)
  802. }
  803. close(done)
  804. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  805. }
  806. func checkStall(t *testing.T, term func()) chan struct{} {
  807. testDone := make(chan struct{})
  808. go func() {
  809. select {
  810. case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
  811. t.Log("Sync stalled")
  812. term()
  813. case <-testDone:
  814. return
  815. }
  816. }()
  817. return testDone
  818. }
  819. // TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
  820. // account trie has a few boundary elements.
  821. func TestSyncBoundaryAccountTrie(t *testing.T) {
  822. t.Parallel()
  823. var (
  824. once sync.Once
  825. cancel = make(chan struct{})
  826. term = func() {
  827. once.Do(func() {
  828. close(cancel)
  829. })
  830. }
  831. )
  832. sourceAccountTrie, elems := makeBoundaryAccountTrie(3000)
  833. mkSource := func(name string) *testPeer {
  834. source := newTestPeer(name, t, term)
  835. source.accountTrie = sourceAccountTrie
  836. source.accountValues = elems
  837. return source
  838. }
  839. syncer := setupSyncer(
  840. mkSource("peer-a"),
  841. mkSource("peer-b"),
  842. )
  843. done := checkStall(t, term)
  844. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  845. t.Fatalf("sync failed: %v", err)
  846. }
  847. close(done)
  848. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  849. }
  850. // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
  851. // consistently returning very small results
  852. func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
  853. t.Parallel()
  854. var (
  855. once sync.Once
  856. cancel = make(chan struct{})
  857. term = func() {
  858. once.Do(func() {
  859. close(cancel)
  860. })
  861. }
  862. )
  863. sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  864. mkSource := func(name string, slow bool) *testPeer {
  865. source := newTestPeer(name, t, term)
  866. source.accountTrie = sourceAccountTrie
  867. source.accountValues = elems
  868. if slow {
  869. source.accountRequestHandler = starvingAccountRequestHandler
  870. }
  871. return source
  872. }
  873. syncer := setupSyncer(
  874. mkSource("nice-a", false),
  875. mkSource("nice-b", false),
  876. mkSource("nice-c", false),
  877. mkSource("capped", true),
  878. )
  879. done := checkStall(t, term)
  880. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  881. t.Fatalf("sync failed: %v", err)
  882. }
  883. close(done)
  884. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  885. }
  886. // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
  887. // code requests properly.
  888. func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
  889. t.Parallel()
  890. var (
  891. once sync.Once
  892. cancel = make(chan struct{})
  893. term = func() {
  894. once.Do(func() {
  895. close(cancel)
  896. })
  897. }
  898. )
  899. sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  900. mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  901. source := newTestPeer(name, t, term)
  902. source.accountTrie = sourceAccountTrie
  903. source.accountValues = elems
  904. source.codeRequestHandler = codeFn
  905. return source
  906. }
  907. // One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  908. // chance that the full set of codes requested are sent only to the
  909. // non-corrupt peer, which delivers everything in one go, and makes the
  910. // test moot
  911. syncer := setupSyncer(
  912. mkSource("capped", cappedCodeRequestHandler),
  913. mkSource("corrupt", corruptCodeRequestHandler),
  914. )
  915. done := checkStall(t, term)
  916. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  917. t.Fatalf("sync failed: %v", err)
  918. }
  919. close(done)
  920. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  921. }
  922. func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
  923. t.Parallel()
  924. var (
  925. once sync.Once
  926. cancel = make(chan struct{})
  927. term = func() {
  928. once.Do(func() {
  929. close(cancel)
  930. })
  931. }
  932. )
  933. sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  934. mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
  935. source := newTestPeer(name, t, term)
  936. source.accountTrie = sourceAccountTrie
  937. source.accountValues = elems
  938. source.accountRequestHandler = accFn
  939. return source
  940. }
  941. // One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  942. // chance that the full set of codes requested are sent only to the
  943. // non-corrupt peer, which delivers everything in one go, and makes the
  944. // test moot
  945. syncer := setupSyncer(
  946. mkSource("capped", defaultAccountRequestHandler),
  947. mkSource("corrupt", corruptAccountRequestHandler),
  948. )
  949. done := checkStall(t, term)
  950. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  951. t.Fatalf("sync failed: %v", err)
  952. }
  953. close(done)
  954. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  955. }
  956. // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
  957. // one by one
  958. func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
  959. t.Parallel()
  960. var (
  961. once sync.Once
  962. cancel = make(chan struct{})
  963. term = func() {
  964. once.Do(func() {
  965. close(cancel)
  966. })
  967. }
  968. )
  969. sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  970. mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  971. source := newTestPeer(name, t, term)
  972. source.accountTrie = sourceAccountTrie
  973. source.accountValues = elems
  974. source.codeRequestHandler = codeFn
  975. return source
  976. }
  977. // Count how many times it's invoked. Remember, there are only 8 unique hashes,
  978. // so it shouldn't be more than that
  979. var counter int
  980. syncer := setupSyncer(
  981. mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  982. counter++
  983. return cappedCodeRequestHandler(t, id, hashes, max)
  984. }),
  985. )
  986. done := checkStall(t, term)
  987. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  988. t.Fatalf("sync failed: %v", err)
  989. }
  990. close(done)
  991. // There are only 8 unique hashes, and 3K accounts. However, the code
  992. // deduplication is per request batch. If it were a perfect global dedup,
  993. // we would expect only 8 requests. If there were no dedup, there would be
  994. // 3k requests.
  995. // We expect somewhere below 100 requests for these 8 unique hashes.
  996. if threshold := 100; counter > threshold {
  997. t.Fatalf("Error, expected < %d invocations, got %d", threshold, counter)
  998. }
  999. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1000. }
  1001. // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
  1002. // storage trie has a few boundary elements.
  1003. func TestSyncBoundaryStorageTrie(t *testing.T) {
  1004. t.Parallel()
  1005. var (
  1006. once sync.Once
  1007. cancel = make(chan struct{})
  1008. term = func() {
  1009. once.Do(func() {
  1010. close(cancel)
  1011. })
  1012. }
  1013. )
  1014. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)
  1015. mkSource := func(name string) *testPeer {
  1016. source := newTestPeer(name, t, term)
  1017. source.accountTrie = sourceAccountTrie
  1018. source.accountValues = elems
  1019. source.storageTries = storageTries
  1020. source.storageValues = storageElems
  1021. return source
  1022. }
  1023. syncer := setupSyncer(
  1024. mkSource("peer-a"),
  1025. mkSource("peer-b"),
  1026. )
  1027. done := checkStall(t, term)
  1028. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1029. t.Fatalf("sync failed: %v", err)
  1030. }
  1031. close(done)
  1032. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1033. }
  1034. // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
  1035. // consistently returning very small results
  1036. func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
  1037. t.Parallel()
  1038. var (
  1039. once sync.Once
  1040. cancel = make(chan struct{})
  1041. term = func() {
  1042. once.Do(func() {
  1043. close(cancel)
  1044. })
  1045. }
  1046. )
  1047. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)
  1048. mkSource := func(name string, slow bool) *testPeer {
  1049. source := newTestPeer(name, t, term)
  1050. source.accountTrie = sourceAccountTrie
  1051. source.accountValues = elems
  1052. source.storageTries = storageTries
  1053. source.storageValues = storageElems
  1054. if slow {
  1055. source.storageRequestHandler = starvingStorageRequestHandler
  1056. }
  1057. return source
  1058. }
  1059. syncer := setupSyncer(
  1060. mkSource("nice-a", false),
  1061. mkSource("slow", true),
  1062. )
  1063. done := checkStall(t, term)
  1064. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1065. t.Fatalf("sync failed: %v", err)
  1066. }
  1067. close(done)
  1068. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1069. }
  1070. // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
  1071. // sometimes sending bad proofs
  1072. func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
  1073. t.Parallel()
  1074. var (
  1075. once sync.Once
  1076. cancel = make(chan struct{})
  1077. term = func() {
  1078. once.Do(func() {
  1079. close(cancel)
  1080. })
  1081. }
  1082. )
  1083. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1084. mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1085. source := newTestPeer(name, t, term)
  1086. source.accountTrie = sourceAccountTrie
  1087. source.accountValues = elems
  1088. source.storageTries = storageTries
  1089. source.storageValues = storageElems
  1090. source.storageRequestHandler = handler
  1091. return source
  1092. }
  1093. syncer := setupSyncer(
  1094. mkSource("nice-a", defaultStorageRequestHandler),
  1095. mkSource("nice-b", defaultStorageRequestHandler),
  1096. mkSource("nice-c", defaultStorageRequestHandler),
  1097. mkSource("corrupt", corruptStorageRequestHandler),
  1098. )
  1099. done := checkStall(t, term)
  1100. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1101. t.Fatalf("sync failed: %v", err)
  1102. }
  1103. close(done)
  1104. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1105. }
  1106. func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
  1107. t.Parallel()
  1108. var (
  1109. once sync.Once
  1110. cancel = make(chan struct{})
  1111. term = func() {
  1112. once.Do(func() {
  1113. close(cancel)
  1114. })
  1115. }
  1116. )
  1117. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1118. mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1119. source := newTestPeer(name, t, term)
  1120. source.accountTrie = sourceAccountTrie
  1121. source.accountValues = elems
  1122. source.storageTries = storageTries
  1123. source.storageValues = storageElems
  1124. source.storageRequestHandler = handler
  1125. return source
  1126. }
  1127. syncer := setupSyncer(
  1128. mkSource("nice-a", defaultStorageRequestHandler),
  1129. mkSource("nice-b", defaultStorageRequestHandler),
  1130. mkSource("nice-c", defaultStorageRequestHandler),
  1131. mkSource("corrupt", noProofStorageRequestHandler),
  1132. )
  1133. done := checkStall(t, term)
  1134. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1135. t.Fatalf("sync failed: %v", err)
  1136. }
  1137. close(done)
  1138. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1139. }
  1140. // TestSyncWithStorage tests basic sync using accounts + storage + code, against
  1141. // a peer who insists on delivering full storage sets _and_ proofs. This triggered
  1142. // an error, where the recipient erroneously clipped the boundary nodes, but
  1143. // did not mark the account for healing.
  1144. func TestSyncWithStorageMisbehavingProve(t *testing.T) {
  1145. t.Parallel()
  1146. var (
  1147. once sync.Once
  1148. cancel = make(chan struct{})
  1149. term = func() {
  1150. once.Do(func() {
  1151. close(cancel)
  1152. })
  1153. }
  1154. )
  1155. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)
  1156. mkSource := func(name string) *testPeer {
  1157. source := newTestPeer(name, t, term)
  1158. source.accountTrie = sourceAccountTrie
  1159. source.accountValues = elems
  1160. source.storageTries = storageTries
  1161. source.storageValues = storageElems
  1162. source.storageRequestHandler = proofHappyStorageRequestHandler
  1163. return source
  1164. }
  1165. syncer := setupSyncer(mkSource("sourceA"))
  1166. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1167. t.Fatalf("sync failed: %v", err)
  1168. }
  1169. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1170. }
  1171. type kv struct {
  1172. k, v []byte
  1173. }
  1174. // Some helpers for sorting
  1175. type entrySlice []*kv
  1176. func (p entrySlice) Len() int { return len(p) }
  1177. func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }
  1178. func (p entrySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1179. func key32(i uint64) []byte {
  1180. key := make([]byte, 32)
  1181. binary.LittleEndian.PutUint64(key, i)
  1182. return key
  1183. }
  1184. var (
  1185. codehashes = []common.Hash{
  1186. crypto.Keccak256Hash([]byte{0}),
  1187. crypto.Keccak256Hash([]byte{1}),
  1188. crypto.Keccak256Hash([]byte{2}),
  1189. crypto.Keccak256Hash([]byte{3}),
  1190. crypto.Keccak256Hash([]byte{4}),
  1191. crypto.Keccak256Hash([]byte{5}),
  1192. crypto.Keccak256Hash([]byte{6}),
  1193. crypto.Keccak256Hash([]byte{7}),
  1194. }
  1195. )
  1196. // getCodeHash returns a pseudo-random code hash
  1197. func getCodeHash(i uint64) []byte {
  1198. h := codehashes[int(i)%len(codehashes)]
  1199. return common.CopyBytes(h[:])
  1200. }
  1201. // getCodeByHash convenience function to lookup the code from the code hash
  1202. func getCodeByHash(hash common.Hash) []byte {
  1203. if hash == emptyCode {
  1204. return nil
  1205. }
  1206. for i, h := range codehashes {
  1207. if h == hash {
  1208. return []byte{byte(i)}
  1209. }
  1210. }
  1211. return nil
  1212. }
  1213. // makeAccountTrieNoStorage spits out a trie, along with the leafs
  1214. func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
  1215. db := trie.NewDatabase(rawdb.NewMemoryDatabase())
  1216. accTrie, _ := trie.New(common.Hash{}, db)
  1217. var entries entrySlice
  1218. for i := uint64(1); i <= uint64(n); i++ {
  1219. value, _ := rlp.EncodeToBytes(state.Account{
  1220. Nonce: i,
  1221. Balance: big.NewInt(int64(i)),
  1222. Root: emptyRoot,
  1223. CodeHash: getCodeHash(i),
  1224. })
  1225. key := key32(i)
  1226. elem := &kv{key, value}
  1227. accTrie.Update(elem.k, elem.v)
  1228. entries = append(entries, elem)
  1229. }
  1230. sort.Sort(entries)
  1231. accTrie.Commit(nil)
  1232. return accTrie, entries
  1233. }
  1234. // makeBoundaryAccountTrie constructs an account trie. Instead of filling
  1235. // accounts normally, this function will fill a few accounts which have
  1236. // boundary hash.
  1237. func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
  1238. var (
  1239. entries entrySlice
  1240. boundaries []common.Hash
  1241. db = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1242. trie, _ = trie.New(common.Hash{}, db)
  1243. )
  1244. // Initialize boundaries
  1245. var next common.Hash
  1246. step := new(big.Int).Sub(
  1247. new(big.Int).Div(
  1248. new(big.Int).Exp(common.Big2, common.Big256, nil),
  1249. big.NewInt(accountConcurrency),
  1250. ), common.Big1,
  1251. )
  1252. for i := 0; i < accountConcurrency; i++ {
  1253. last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1254. if i == accountConcurrency-1 {
  1255. last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1256. }
  1257. boundaries = append(boundaries, last)
  1258. next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1259. }
  1260. // Fill boundary accounts
  1261. for i := 0; i < len(boundaries); i++ {
  1262. value, _ := rlp.EncodeToBytes(state.Account{
  1263. Nonce: uint64(0),
  1264. Balance: big.NewInt(int64(i)),
  1265. Root: emptyRoot,
  1266. CodeHash: getCodeHash(uint64(i)),
  1267. })
  1268. elem := &kv{boundaries[i].Bytes(), value}
  1269. trie.Update(elem.k, elem.v)
  1270. entries = append(entries, elem)
  1271. }
  1272. // Fill other accounts if required
  1273. for i := uint64(1); i <= uint64(n); i++ {
  1274. value, _ := rlp.EncodeToBytes(state.Account{
  1275. Nonce: i,
  1276. Balance: big.NewInt(int64(i)),
  1277. Root: emptyRoot,
  1278. CodeHash: getCodeHash(i),
  1279. })
  1280. elem := &kv{key32(i), value}
  1281. trie.Update(elem.k, elem.v)
  1282. entries = append(entries, elem)
  1283. }
  1284. sort.Sort(entries)
  1285. trie.Commit(nil)
  1286. return trie, entries
  1287. }
  1288. // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
  1289. // has a unique storage set.
  1290. func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1291. var (
  1292. db = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1293. accTrie, _ = trie.New(common.Hash{}, db)
  1294. entries entrySlice
  1295. storageTries = make(map[common.Hash]*trie.Trie)
  1296. storageEntries = make(map[common.Hash]entrySlice)
  1297. )
  1298. // Create n accounts in the trie
  1299. for i := uint64(1); i <= uint64(accounts); i++ {
  1300. key := key32(i)
  1301. codehash := emptyCode[:]
  1302. if code {
  1303. codehash = getCodeHash(i)
  1304. }
  1305. // Create a storage trie
  1306. stTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db)
  1307. stRoot := stTrie.Hash()
  1308. stTrie.Commit(nil)
  1309. value, _ := rlp.EncodeToBytes(state.Account{
  1310. Nonce: i,
  1311. Balance: big.NewInt(int64(i)),
  1312. Root: stRoot,
  1313. CodeHash: codehash,
  1314. })
  1315. elem := &kv{key, value}
  1316. accTrie.Update(elem.k, elem.v)
  1317. entries = append(entries, elem)
  1318. storageTries[common.BytesToHash(key)] = stTrie
  1319. storageEntries[common.BytesToHash(key)] = stEntries
  1320. }
  1321. sort.Sort(entries)
  1322. accTrie.Commit(nil)
  1323. return accTrie, entries, storageTries, storageEntries
  1324. }
  1325. // makeAccountTrieWithStorage spits out a trie, along with the leafs
  1326. func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1327. var (
  1328. db = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1329. accTrie, _ = trie.New(common.Hash{}, db)
  1330. entries entrySlice
  1331. storageTries = make(map[common.Hash]*trie.Trie)
  1332. storageEntries = make(map[common.Hash]entrySlice)
  1333. )
  1334. // Make a storage trie which we reuse for the whole lot
  1335. var (
  1336. stTrie *trie.Trie
  1337. stEntries entrySlice
  1338. )
  1339. if boundary {
  1340. stTrie, stEntries = makeBoundaryStorageTrie(slots, db)
  1341. } else {
  1342. stTrie, stEntries = makeStorageTrieWithSeed(uint64(slots), 0, db)
  1343. }
  1344. stRoot := stTrie.Hash()
  1345. // Create n accounts in the trie
  1346. for i := uint64(1); i <= uint64(accounts); i++ {
  1347. key := key32(i)
  1348. codehash := emptyCode[:]
  1349. if code {
  1350. codehash = getCodeHash(i)
  1351. }
  1352. value, _ := rlp.EncodeToBytes(state.Account{
  1353. Nonce: i,
  1354. Balance: big.NewInt(int64(i)),
  1355. Root: stRoot,
  1356. CodeHash: codehash,
  1357. })
  1358. elem := &kv{key, value}
  1359. accTrie.Update(elem.k, elem.v)
  1360. entries = append(entries, elem)
  1361. // we reuse the same one for all accounts
  1362. storageTries[common.BytesToHash(key)] = stTrie
  1363. storageEntries[common.BytesToHash(key)] = stEntries
  1364. }
  1365. sort.Sort(entries)
  1366. stTrie.Commit(nil)
  1367. accTrie.Commit(nil)
  1368. return accTrie, entries, storageTries, storageEntries
  1369. }
  1370. // makeStorageTrieWithSeed fills a storage trie with n items, returning the
  1371. // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
  1372. // that tries are unique.
  1373. func makeStorageTrieWithSeed(n, seed uint64, db *trie.Database) (*trie.Trie, entrySlice) {
  1374. trie, _ := trie.New(common.Hash{}, db)
  1375. var entries entrySlice
  1376. for i := uint64(1); i <= n; i++ {
  1377. // store 'x' at slot 'x'
  1378. slotValue := key32(i + seed)
  1379. rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1380. slotKey := key32(i)
  1381. key := crypto.Keccak256Hash(slotKey[:])
  1382. elem := &kv{key[:], rlpSlotValue}
  1383. trie.Update(elem.k, elem.v)
  1384. entries = append(entries, elem)
  1385. }
  1386. sort.Sort(entries)
  1387. trie.Commit(nil)
  1388. return trie, entries
  1389. }
  1390. // makeBoundaryStorageTrie constructs a storage trie. Instead of filling
  1391. // storage slots normally, this function will fill a few slots which have
  1392. // boundary hash.
  1393. func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) {
  1394. var (
  1395. entries entrySlice
  1396. boundaries []common.Hash
  1397. trie, _ = trie.New(common.Hash{}, db)
  1398. )
  1399. // Initialize boundaries
  1400. var next common.Hash
  1401. step := new(big.Int).Sub(
  1402. new(big.Int).Div(
  1403. new(big.Int).Exp(common.Big2, common.Big256, nil),
  1404. big.NewInt(accountConcurrency),
  1405. ), common.Big1,
  1406. )
  1407. for i := 0; i < accountConcurrency; i++ {
  1408. last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1409. if i == accountConcurrency-1 {
  1410. last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1411. }
  1412. boundaries = append(boundaries, last)
  1413. next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1414. }
  1415. // Fill boundary slots
  1416. for i := 0; i < len(boundaries); i++ {
  1417. key := boundaries[i]
  1418. val := []byte{0xde, 0xad, 0xbe, 0xef}
  1419. elem := &kv{key[:], val}
  1420. trie.Update(elem.k, elem.v)
  1421. entries = append(entries, elem)
  1422. }
  1423. // Fill other slots if required
  1424. for i := uint64(1); i <= uint64(n); i++ {
  1425. slotKey := key32(i)
  1426. key := crypto.Keccak256Hash(slotKey[:])
  1427. slotValue := key32(i)
  1428. rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1429. elem := &kv{key[:], rlpSlotValue}
  1430. trie.Update(elem.k, elem.v)
  1431. entries = append(entries, elem)
  1432. }
  1433. sort.Sort(entries)
  1434. trie.Commit(nil)
  1435. return trie, entries
  1436. }
  1437. func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
  1438. t.Helper()
  1439. triedb := trie.NewDatabase(db)
  1440. accTrie, err := trie.New(root, triedb)
  1441. if err != nil {
  1442. t.Fatal(err)
  1443. }
  1444. accounts, slots := 0, 0
  1445. accIt := trie.NewIterator(accTrie.NodeIterator(nil))
  1446. for accIt.Next() {
  1447. var acc struct {
  1448. Nonce uint64
  1449. Balance *big.Int
  1450. Root common.Hash
  1451. CodeHash []byte
  1452. }
  1453. if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
  1454. log.Crit("Invalid account encountered during snapshot creation", "err", err)
  1455. }
  1456. accounts++
  1457. if acc.Root != emptyRoot {
  1458. storeTrie, err := trie.NewSecure(acc.Root, triedb)
  1459. if err != nil {
  1460. t.Fatal(err)
  1461. }
  1462. storeIt := trie.NewIterator(storeTrie.NodeIterator(nil))
  1463. for storeIt.Next() {
  1464. slots++
  1465. }
  1466. if err := storeIt.Err; err != nil {
  1467. t.Fatal(err)
  1468. }
  1469. }
  1470. }
  1471. if err := accIt.Err; err != nil {
  1472. t.Fatal(err)
  1473. }
  1474. t.Logf("accounts: %d, slots: %d", accounts, slots)
  1475. }