sync_test.go 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716
  1. // Copyright 2020 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package snap
  17. import (
  18. "bytes"
  19. "crypto/rand"
  20. "encoding/binary"
  21. "fmt"
  22. "math/big"
  23. "sort"
  24. "sync"
  25. "testing"
  26. "time"
  27. "github.com/ethereum/go-ethereum/common"
  28. "github.com/ethereum/go-ethereum/core/rawdb"
  29. "github.com/ethereum/go-ethereum/core/state"
  30. "github.com/ethereum/go-ethereum/crypto"
  31. "github.com/ethereum/go-ethereum/ethdb"
  32. "github.com/ethereum/go-ethereum/light"
  33. "github.com/ethereum/go-ethereum/log"
  34. "github.com/ethereum/go-ethereum/rlp"
  35. "github.com/ethereum/go-ethereum/trie"
  36. "golang.org/x/crypto/sha3"
  37. )
  38. func TestHashing(t *testing.T) {
  39. t.Parallel()
  40. var bytecodes = make([][]byte, 10)
  41. for i := 0; i < len(bytecodes); i++ {
  42. buf := make([]byte, 100)
  43. rand.Read(buf)
  44. bytecodes[i] = buf
  45. }
  46. var want, got string
  47. var old = func() {
  48. hasher := sha3.NewLegacyKeccak256()
  49. for i := 0; i < len(bytecodes); i++ {
  50. hasher.Reset()
  51. hasher.Write(bytecodes[i])
  52. hash := hasher.Sum(nil)
  53. got = fmt.Sprintf("%v\n%v", got, hash)
  54. }
  55. }
  56. var new = func() {
  57. hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  58. var hash = make([]byte, 32)
  59. for i := 0; i < len(bytecodes); i++ {
  60. hasher.Reset()
  61. hasher.Write(bytecodes[i])
  62. hasher.Read(hash)
  63. want = fmt.Sprintf("%v\n%v", want, hash)
  64. }
  65. }
  66. old()
  67. new()
  68. if want != got {
  69. t.Errorf("want\n%v\ngot\n%v\n", want, got)
  70. }
  71. }
  72. func BenchmarkHashing(b *testing.B) {
  73. var bytecodes = make([][]byte, 10000)
  74. for i := 0; i < len(bytecodes); i++ {
  75. buf := make([]byte, 100)
  76. rand.Read(buf)
  77. bytecodes[i] = buf
  78. }
  79. var old = func() {
  80. hasher := sha3.NewLegacyKeccak256()
  81. for i := 0; i < len(bytecodes); i++ {
  82. hasher.Reset()
  83. hasher.Write(bytecodes[i])
  84. hasher.Sum(nil)
  85. }
  86. }
  87. var new = func() {
  88. hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  89. var hash = make([]byte, 32)
  90. for i := 0; i < len(bytecodes); i++ {
  91. hasher.Reset()
  92. hasher.Write(bytecodes[i])
  93. hasher.Read(hash)
  94. }
  95. }
  96. b.Run("old", func(b *testing.B) {
  97. b.ReportAllocs()
  98. for i := 0; i < b.N; i++ {
  99. old()
  100. }
  101. })
  102. b.Run("new", func(b *testing.B) {
  103. b.ReportAllocs()
  104. for i := 0; i < b.N; i++ {
  105. new()
  106. }
  107. })
  108. }
  109. type (
  110. accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error
  111. storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
  112. trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
  113. codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
  114. )
  115. type testPeer struct {
  116. id string
  117. test *testing.T
  118. remote *Syncer
  119. logger log.Logger
  120. accountTrie *trie.Trie
  121. accountValues entrySlice
  122. storageTries map[common.Hash]*trie.Trie
  123. storageValues map[common.Hash]entrySlice
  124. accountRequestHandler accountHandlerFunc
  125. storageRequestHandler storageHandlerFunc
  126. trieRequestHandler trieHandlerFunc
  127. codeRequestHandler codeHandlerFunc
  128. term func()
  129. // counters
  130. nAccountRequests int
  131. nStorageRequests int
  132. nBytecodeRequests int
  133. nTrienodeRequests int
  134. }
  135. func newTestPeer(id string, t *testing.T, term func()) *testPeer {
  136. peer := &testPeer{
  137. id: id,
  138. test: t,
  139. logger: log.New("id", id),
  140. accountRequestHandler: defaultAccountRequestHandler,
  141. trieRequestHandler: defaultTrieRequestHandler,
  142. storageRequestHandler: defaultStorageRequestHandler,
  143. codeRequestHandler: defaultCodeRequestHandler,
  144. term: term,
  145. }
  146. //stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
  147. //peer.logger.SetHandler(stderrHandler)
  148. return peer
  149. }
  150. func (t *testPeer) ID() string { return t.id }
  151. func (t *testPeer) Log() log.Logger { return t.logger }
  152. func (t *testPeer) Stats() string {
  153. return fmt.Sprintf(`Account requests: %d
  154. Storage requests: %d
  155. Bytecode requests: %d
  156. Trienode requests: %d
  157. `, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests)
  158. }
  159. func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
  160. t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
  161. t.nAccountRequests++
  162. go t.accountRequestHandler(t, id, root, origin, limit, bytes)
  163. return nil
  164. }
  165. func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
  166. t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
  167. t.nTrienodeRequests++
  168. go t.trieRequestHandler(t, id, root, paths, bytes)
  169. return nil
  170. }
  171. func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
  172. t.nStorageRequests++
  173. if len(accounts) == 1 && origin != nil {
  174. t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
  175. } else {
  176. t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
  177. }
  178. go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
  179. return nil
  180. }
  181. func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
  182. t.nBytecodeRequests++
  183. t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
  184. go t.codeRequestHandler(t, id, hashes, bytes)
  185. return nil
  186. }
  187. // defaultTrieRequestHandler is a well-behaving handler for trie healing requests
  188. func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
  189. // Pass the response
  190. var nodes [][]byte
  191. for _, pathset := range paths {
  192. switch len(pathset) {
  193. case 1:
  194. blob, _, err := t.accountTrie.TryGetNode(pathset[0])
  195. if err != nil {
  196. t.logger.Info("Error handling req", "error", err)
  197. break
  198. }
  199. nodes = append(nodes, blob)
  200. default:
  201. account := t.storageTries[(common.BytesToHash(pathset[0]))]
  202. for _, path := range pathset[1:] {
  203. blob, _, err := account.TryGetNode(path)
  204. if err != nil {
  205. t.logger.Info("Error handling req", "error", err)
  206. break
  207. }
  208. nodes = append(nodes, blob)
  209. }
  210. }
  211. }
  212. t.remote.OnTrieNodes(t, requestId, nodes)
  213. return nil
  214. }
  215. // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
  216. func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
  217. keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
  218. if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
  219. t.test.Errorf("Remote side rejected our delivery: %v", err)
  220. t.term()
  221. return err
  222. }
  223. return nil
  224. }
  225. func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
  226. var size uint64
  227. if limit == (common.Hash{}) {
  228. limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  229. }
  230. for _, entry := range t.accountValues {
  231. if size > cap {
  232. break
  233. }
  234. if bytes.Compare(origin[:], entry.k) <= 0 {
  235. keys = append(keys, common.BytesToHash(entry.k))
  236. vals = append(vals, entry.v)
  237. size += uint64(32 + len(entry.v))
  238. }
  239. // If we've exceeded the request threshold, abort
  240. if bytes.Compare(entry.k, limit[:]) >= 0 {
  241. break
  242. }
  243. }
  244. // Unless we send the entire trie, we need to supply proofs
  245. // Actually, we need to supply proofs either way! This seems to be an implementation
  246. // quirk in go-ethereum
  247. proof := light.NewNodeSet()
  248. if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
  249. t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
  250. }
  251. if len(keys) > 0 {
  252. lastK := (keys[len(keys)-1])[:]
  253. if err := t.accountTrie.Prove(lastK, 0, proof); err != nil {
  254. t.logger.Error("Could not prove last item", "error", err)
  255. }
  256. }
  257. for _, blob := range proof.NodeList() {
  258. proofs = append(proofs, blob)
  259. }
  260. return keys, vals, proofs
  261. }
  262. // defaultStorageRequestHandler is a well-behaving storage request handler
  263. func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
  264. hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
  265. if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
  266. t.test.Errorf("Remote side rejected our delivery: %v", err)
  267. t.term()
  268. }
  269. return nil
  270. }
  271. func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  272. var bytecodes [][]byte
  273. for _, h := range hashes {
  274. bytecodes = append(bytecodes, getCodeByHash(h))
  275. }
  276. if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
  277. t.test.Errorf("Remote side rejected our delivery: %v", err)
  278. t.term()
  279. }
  280. return nil
  281. }
  282. func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
  283. var size uint64
  284. for _, account := range accounts {
  285. // The first account might start from a different origin and end sooner
  286. var originHash common.Hash
  287. if len(origin) > 0 {
  288. originHash = common.BytesToHash(origin)
  289. }
  290. var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  291. if len(limit) > 0 {
  292. limitHash = common.BytesToHash(limit)
  293. }
  294. var (
  295. keys []common.Hash
  296. vals [][]byte
  297. abort bool
  298. )
  299. for _, entry := range t.storageValues[account] {
  300. if size >= max {
  301. abort = true
  302. break
  303. }
  304. if bytes.Compare(entry.k, originHash[:]) < 0 {
  305. continue
  306. }
  307. keys = append(keys, common.BytesToHash(entry.k))
  308. vals = append(vals, entry.v)
  309. size += uint64(32 + len(entry.v))
  310. if bytes.Compare(entry.k, limitHash[:]) >= 0 {
  311. break
  312. }
  313. }
  314. hashes = append(hashes, keys)
  315. slots = append(slots, vals)
  316. // Generate the Merkle proofs for the first and last storage slot, but
  317. // only if the response was capped. If the entire storage trie included
  318. // in the response, no need for any proofs.
  319. if originHash != (common.Hash{}) || abort {
  320. // If we're aborting, we need to prove the first and last item
  321. // This terminates the response (and thus the loop)
  322. proof := light.NewNodeSet()
  323. stTrie := t.storageTries[account]
  324. // Here's a potential gotcha: when constructing the proof, we cannot
  325. // use the 'origin' slice directly, but must use the full 32-byte
  326. // hash form.
  327. if err := stTrie.Prove(originHash[:], 0, proof); err != nil {
  328. t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err)
  329. }
  330. if len(keys) > 0 {
  331. lastK := (keys[len(keys)-1])[:]
  332. if err := stTrie.Prove(lastK, 0, proof); err != nil {
  333. t.logger.Error("Could not prove last item", "error", err)
  334. }
  335. }
  336. for _, blob := range proof.NodeList() {
  337. proofs = append(proofs, blob)
  338. }
  339. break
  340. }
  341. }
  342. return hashes, slots, proofs
  343. }
  344. // the createStorageRequestResponseAlwaysProve tests a cornercase, where it always
  345. // supplies the proof for the last account, even if it is 'complete'.h
  346. func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
  347. var size uint64
  348. max = max * 3 / 4
  349. var origin common.Hash
  350. if len(bOrigin) > 0 {
  351. origin = common.BytesToHash(bOrigin)
  352. }
  353. var exit bool
  354. for i, account := range accounts {
  355. var keys []common.Hash
  356. var vals [][]byte
  357. for _, entry := range t.storageValues[account] {
  358. if bytes.Compare(entry.k, origin[:]) < 0 {
  359. exit = true
  360. }
  361. keys = append(keys, common.BytesToHash(entry.k))
  362. vals = append(vals, entry.v)
  363. size += uint64(32 + len(entry.v))
  364. if size > max {
  365. exit = true
  366. }
  367. }
  368. if i == len(accounts)-1 {
  369. exit = true
  370. }
  371. hashes = append(hashes, keys)
  372. slots = append(slots, vals)
  373. if exit {
  374. // If we're aborting, we need to prove the first and last item
  375. // This terminates the response (and thus the loop)
  376. proof := light.NewNodeSet()
  377. stTrie := t.storageTries[account]
  378. // Here's a potential gotcha: when constructing the proof, we cannot
  379. // use the 'origin' slice directly, but must use the full 32-byte
  380. // hash form.
  381. if err := stTrie.Prove(origin[:], 0, proof); err != nil {
  382. t.logger.Error("Could not prove inexistence of origin", "origin", origin,
  383. "error", err)
  384. }
  385. if len(keys) > 0 {
  386. lastK := (keys[len(keys)-1])[:]
  387. if err := stTrie.Prove(lastK, 0, proof); err != nil {
  388. t.logger.Error("Could not prove last item", "error", err)
  389. }
  390. }
  391. for _, blob := range proof.NodeList() {
  392. proofs = append(proofs, blob)
  393. }
  394. break
  395. }
  396. }
  397. return hashes, slots, proofs
  398. }
  399. // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
  400. func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
  401. t.remote.OnAccounts(t, requestId, nil, nil, nil)
  402. return nil
  403. }
  404. func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
  405. return nil
  406. }
  407. func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
  408. t.remote.OnTrieNodes(t, requestId, nil)
  409. return nil
  410. }
  411. func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
  412. return nil
  413. }
  414. func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  415. t.remote.OnStorage(t, requestId, nil, nil, nil)
  416. return nil
  417. }
  418. func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  419. return nil
  420. }
  421. func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  422. hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)
  423. if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
  424. t.test.Errorf("Remote side rejected our delivery: %v", err)
  425. t.term()
  426. }
  427. return nil
  428. }
  429. //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  430. // var bytecodes [][]byte
  431. // t.remote.OnByteCodes(t, id, bytecodes)
  432. // return nil
  433. //}
  434. func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  435. var bytecodes [][]byte
  436. for _, h := range hashes {
  437. // Send back the hashes
  438. bytecodes = append(bytecodes, h[:])
  439. }
  440. if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
  441. t.logger.Info("remote error on delivery (as expected)", "error", err)
  442. // Mimic the real-life handler, which drops a peer on errors
  443. t.remote.Unregister(t.id)
  444. }
  445. return nil
  446. }
  447. func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  448. var bytecodes [][]byte
  449. for _, h := range hashes[:1] {
  450. bytecodes = append(bytecodes, getCodeByHash(h))
  451. }
  452. // Missing bytecode can be retrieved again, no error expected
  453. if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
  454. t.test.Errorf("Remote side rejected our delivery: %v", err)
  455. t.term()
  456. }
  457. return nil
  458. }
  459. // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
  460. func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  461. return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
  462. }
  463. func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
  464. return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)
  465. }
  466. //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
  467. // return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
  468. //}
  469. func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
  470. hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
  471. if len(proofs) > 0 {
  472. proofs = proofs[1:]
  473. }
  474. if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
  475. t.logger.Info("remote error on delivery (as expected)", "error", err)
  476. // Mimic the real-life handler, which drops a peer on errors
  477. t.remote.Unregister(t.id)
  478. }
  479. return nil
  480. }
  481. // corruptStorageRequestHandler doesn't provide good proofs
  482. func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  483. hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
  484. if len(proofs) > 0 {
  485. proofs = proofs[1:]
  486. }
  487. if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
  488. t.logger.Info("remote error on delivery (as expected)", "error", err)
  489. // Mimic the real-life handler, which drops a peer on errors
  490. t.remote.Unregister(t.id)
  491. }
  492. return nil
  493. }
  494. func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  495. hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
  496. if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
  497. t.logger.Info("remote error on delivery (as expected)", "error", err)
  498. // Mimic the real-life handler, which drops a peer on errors
  499. t.remote.Unregister(t.id)
  500. }
  501. return nil
  502. }
  503. // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
  504. // also ship the entire trie inside the proof. If the attack is successful,
  505. // the remote side does not do any follow-up requests
  506. func TestSyncBloatedProof(t *testing.T) {
  507. t.Parallel()
  508. var (
  509. once sync.Once
  510. cancel = make(chan struct{})
  511. term = func() {
  512. once.Do(func() {
  513. close(cancel)
  514. })
  515. }
  516. )
  517. sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
  518. source := newTestPeer("source", t, term)
  519. source.accountTrie = sourceAccountTrie
  520. source.accountValues = elems
  521. source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
  522. var (
  523. proofs [][]byte
  524. keys []common.Hash
  525. vals [][]byte
  526. )
  527. // The values
  528. for _, entry := range t.accountValues {
  529. if bytes.Compare(entry.k, origin[:]) < 0 {
  530. continue
  531. }
  532. if bytes.Compare(entry.k, limit[:]) > 0 {
  533. continue
  534. }
  535. keys = append(keys, common.BytesToHash(entry.k))
  536. vals = append(vals, entry.v)
  537. }
  538. // The proofs
  539. proof := light.NewNodeSet()
  540. if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
  541. t.logger.Error("Could not prove origin", "origin", origin, "error", err)
  542. }
  543. // The bloat: add proof of every single element
  544. for _, entry := range t.accountValues {
  545. if err := t.accountTrie.Prove(entry.k, 0, proof); err != nil {
  546. t.logger.Error("Could not prove item", "error", err)
  547. }
  548. }
  549. // And remove one item from the elements
  550. if len(keys) > 2 {
  551. keys = append(keys[:1], keys[2:]...)
  552. vals = append(vals[:1], vals[2:]...)
  553. }
  554. for _, blob := range proof.NodeList() {
  555. proofs = append(proofs, blob)
  556. }
  557. if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
  558. t.logger.Info("remote error on delivery (as expected)", "error", err)
  559. t.term()
  560. // This is actually correct, signal to exit the test successfully
  561. }
  562. return nil
  563. }
  564. syncer := setupSyncer(source)
  565. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
  566. t.Fatal("No error returned from incomplete/cancelled sync")
  567. }
  568. }
  569. func setupSyncer(peers ...*testPeer) *Syncer {
  570. stateDb := rawdb.NewMemoryDatabase()
  571. syncer := NewSyncer(stateDb)
  572. for _, peer := range peers {
  573. syncer.Register(peer)
  574. peer.remote = syncer
  575. }
  576. return syncer
  577. }
  578. // TestSync tests a basic sync with one peer
  579. func TestSync(t *testing.T) {
  580. t.Parallel()
  581. var (
  582. once sync.Once
  583. cancel = make(chan struct{})
  584. term = func() {
  585. once.Do(func() {
  586. close(cancel)
  587. })
  588. }
  589. )
  590. sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
  591. mkSource := func(name string) *testPeer {
  592. source := newTestPeer(name, t, term)
  593. source.accountTrie = sourceAccountTrie
  594. source.accountValues = elems
  595. return source
  596. }
  597. syncer := setupSyncer(mkSource("source"))
  598. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  599. t.Fatalf("sync failed: %v", err)
  600. }
  601. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  602. }
  603. // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
  604. // panic within the prover
  605. func TestSyncTinyTriePanic(t *testing.T) {
  606. t.Parallel()
  607. var (
  608. once sync.Once
  609. cancel = make(chan struct{})
  610. term = func() {
  611. once.Do(func() {
  612. close(cancel)
  613. })
  614. }
  615. )
  616. sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
  617. mkSource := func(name string) *testPeer {
  618. source := newTestPeer(name, t, term)
  619. source.accountTrie = sourceAccountTrie
  620. source.accountValues = elems
  621. return source
  622. }
  623. syncer := setupSyncer(mkSource("source"))
  624. done := checkStall(t, term)
  625. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  626. t.Fatalf("sync failed: %v", err)
  627. }
  628. close(done)
  629. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  630. }
  631. // TestMultiSync tests a basic sync with multiple peers
  632. func TestMultiSync(t *testing.T) {
  633. t.Parallel()
  634. var (
  635. once sync.Once
  636. cancel = make(chan struct{})
  637. term = func() {
  638. once.Do(func() {
  639. close(cancel)
  640. })
  641. }
  642. )
  643. sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
  644. mkSource := func(name string) *testPeer {
  645. source := newTestPeer(name, t, term)
  646. source.accountTrie = sourceAccountTrie
  647. source.accountValues = elems
  648. return source
  649. }
  650. syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB"))
  651. done := checkStall(t, term)
  652. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  653. t.Fatalf("sync failed: %v", err)
  654. }
  655. close(done)
  656. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  657. }
  658. // TestSyncWithStorage tests basic sync using accounts + storage + code
  659. func TestSyncWithStorage(t *testing.T) {
  660. t.Parallel()
  661. var (
  662. once sync.Once
  663. cancel = make(chan struct{})
  664. term = func() {
  665. once.Do(func() {
  666. close(cancel)
  667. })
  668. }
  669. )
  670. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)
  671. mkSource := func(name string) *testPeer {
  672. source := newTestPeer(name, t, term)
  673. source.accountTrie = sourceAccountTrie
  674. source.accountValues = elems
  675. source.storageTries = storageTries
  676. source.storageValues = storageElems
  677. return source
  678. }
  679. syncer := setupSyncer(mkSource("sourceA"))
  680. done := checkStall(t, term)
  681. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  682. t.Fatalf("sync failed: %v", err)
  683. }
  684. close(done)
  685. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  686. }
  687. // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
  688. func TestMultiSyncManyUseless(t *testing.T) {
  689. t.Parallel()
  690. var (
  691. once sync.Once
  692. cancel = make(chan struct{})
  693. term = func() {
  694. once.Do(func() {
  695. close(cancel)
  696. })
  697. }
  698. )
  699. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  700. mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
  701. source := newTestPeer(name, t, term)
  702. source.accountTrie = sourceAccountTrie
  703. source.accountValues = elems
  704. source.storageTries = storageTries
  705. source.storageValues = storageElems
  706. if !noAccount {
  707. source.accountRequestHandler = emptyRequestAccountRangeFn
  708. }
  709. if !noStorage {
  710. source.storageRequestHandler = emptyStorageRequestHandler
  711. }
  712. if !noTrieNode {
  713. source.trieRequestHandler = emptyTrieRequestHandler
  714. }
  715. return source
  716. }
  717. syncer := setupSyncer(
  718. mkSource("full", true, true, true),
  719. mkSource("noAccounts", false, true, true),
  720. mkSource("noStorage", true, false, true),
  721. mkSource("noTrie", true, true, false),
  722. )
  723. done := checkStall(t, term)
  724. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  725. t.Fatalf("sync failed: %v", err)
  726. }
  727. close(done)
  728. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  729. }
  730. // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
  731. func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
  732. // We're setting the timeout to very low, to increase the chance of the timeout
  733. // being triggered. This was previously a cause of panic, when a response
  734. // arrived simultaneously as a timeout was triggered.
  735. defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
  736. requestTimeout = time.Millisecond
  737. var (
  738. once sync.Once
  739. cancel = make(chan struct{})
  740. term = func() {
  741. once.Do(func() {
  742. close(cancel)
  743. })
  744. }
  745. )
  746. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  747. mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
  748. source := newTestPeer(name, t, term)
  749. source.accountTrie = sourceAccountTrie
  750. source.accountValues = elems
  751. source.storageTries = storageTries
  752. source.storageValues = storageElems
  753. if !noAccount {
  754. source.accountRequestHandler = emptyRequestAccountRangeFn
  755. }
  756. if !noStorage {
  757. source.storageRequestHandler = emptyStorageRequestHandler
  758. }
  759. if !noTrieNode {
  760. source.trieRequestHandler = emptyTrieRequestHandler
  761. }
  762. return source
  763. }
  764. syncer := setupSyncer(
  765. mkSource("full", true, true, true),
  766. mkSource("noAccounts", false, true, true),
  767. mkSource("noStorage", true, false, true),
  768. mkSource("noTrie", true, true, false),
  769. )
  770. done := checkStall(t, term)
  771. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  772. t.Fatalf("sync failed: %v", err)
  773. }
  774. close(done)
  775. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  776. }
  777. // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
  778. func TestMultiSyncManyUnresponsive(t *testing.T) {
  779. // We're setting the timeout to very low, to make the test run a bit faster
  780. defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
  781. requestTimeout = time.Millisecond
  782. var (
  783. once sync.Once
  784. cancel = make(chan struct{})
  785. term = func() {
  786. once.Do(func() {
  787. close(cancel)
  788. })
  789. }
  790. )
  791. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  792. mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
  793. source := newTestPeer(name, t, term)
  794. source.accountTrie = sourceAccountTrie
  795. source.accountValues = elems
  796. source.storageTries = storageTries
  797. source.storageValues = storageElems
  798. if !noAccount {
  799. source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
  800. }
  801. if !noStorage {
  802. source.storageRequestHandler = nonResponsiveStorageRequestHandler
  803. }
  804. if !noTrieNode {
  805. source.trieRequestHandler = nonResponsiveTrieRequestHandler
  806. }
  807. return source
  808. }
  809. syncer := setupSyncer(
  810. mkSource("full", true, true, true),
  811. mkSource("noAccounts", false, true, true),
  812. mkSource("noStorage", true, false, true),
  813. mkSource("noTrie", true, true, false),
  814. )
  815. done := checkStall(t, term)
  816. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  817. t.Fatalf("sync failed: %v", err)
  818. }
  819. close(done)
  820. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  821. }
  822. func checkStall(t *testing.T, term func()) chan struct{} {
  823. testDone := make(chan struct{})
  824. go func() {
  825. select {
  826. case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
  827. t.Log("Sync stalled")
  828. term()
  829. case <-testDone:
  830. return
  831. }
  832. }()
  833. return testDone
  834. }
  835. // TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
  836. // account trie has a few boundary elements.
  837. func TestSyncBoundaryAccountTrie(t *testing.T) {
  838. t.Parallel()
  839. var (
  840. once sync.Once
  841. cancel = make(chan struct{})
  842. term = func() {
  843. once.Do(func() {
  844. close(cancel)
  845. })
  846. }
  847. )
  848. sourceAccountTrie, elems := makeBoundaryAccountTrie(3000)
  849. mkSource := func(name string) *testPeer {
  850. source := newTestPeer(name, t, term)
  851. source.accountTrie = sourceAccountTrie
  852. source.accountValues = elems
  853. return source
  854. }
  855. syncer := setupSyncer(
  856. mkSource("peer-a"),
  857. mkSource("peer-b"),
  858. )
  859. done := checkStall(t, term)
  860. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  861. t.Fatalf("sync failed: %v", err)
  862. }
  863. close(done)
  864. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  865. }
  866. // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
  867. // consistently returning very small results
  868. func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
  869. t.Parallel()
  870. var (
  871. once sync.Once
  872. cancel = make(chan struct{})
  873. term = func() {
  874. once.Do(func() {
  875. close(cancel)
  876. })
  877. }
  878. )
  879. sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  880. mkSource := func(name string, slow bool) *testPeer {
  881. source := newTestPeer(name, t, term)
  882. source.accountTrie = sourceAccountTrie
  883. source.accountValues = elems
  884. if slow {
  885. source.accountRequestHandler = starvingAccountRequestHandler
  886. }
  887. return source
  888. }
  889. syncer := setupSyncer(
  890. mkSource("nice-a", false),
  891. mkSource("nice-b", false),
  892. mkSource("nice-c", false),
  893. mkSource("capped", true),
  894. )
  895. done := checkStall(t, term)
  896. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  897. t.Fatalf("sync failed: %v", err)
  898. }
  899. close(done)
  900. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  901. }
  902. // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
  903. // code requests properly.
  904. func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
  905. t.Parallel()
  906. var (
  907. once sync.Once
  908. cancel = make(chan struct{})
  909. term = func() {
  910. once.Do(func() {
  911. close(cancel)
  912. })
  913. }
  914. )
  915. sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  916. mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  917. source := newTestPeer(name, t, term)
  918. source.accountTrie = sourceAccountTrie
  919. source.accountValues = elems
  920. source.codeRequestHandler = codeFn
  921. return source
  922. }
  923. // One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  924. // chance that the full set of codes requested are sent only to the
  925. // non-corrupt peer, which delivers everything in one go, and makes the
  926. // test moot
  927. syncer := setupSyncer(
  928. mkSource("capped", cappedCodeRequestHandler),
  929. mkSource("corrupt", corruptCodeRequestHandler),
  930. )
  931. done := checkStall(t, term)
  932. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  933. t.Fatalf("sync failed: %v", err)
  934. }
  935. close(done)
  936. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  937. }
  938. func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
  939. t.Parallel()
  940. var (
  941. once sync.Once
  942. cancel = make(chan struct{})
  943. term = func() {
  944. once.Do(func() {
  945. close(cancel)
  946. })
  947. }
  948. )
  949. sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  950. mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
  951. source := newTestPeer(name, t, term)
  952. source.accountTrie = sourceAccountTrie
  953. source.accountValues = elems
  954. source.accountRequestHandler = accFn
  955. return source
  956. }
  957. // One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  958. // chance that the full set of codes requested are sent only to the
  959. // non-corrupt peer, which delivers everything in one go, and makes the
  960. // test moot
  961. syncer := setupSyncer(
  962. mkSource("capped", defaultAccountRequestHandler),
  963. mkSource("corrupt", corruptAccountRequestHandler),
  964. )
  965. done := checkStall(t, term)
  966. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  967. t.Fatalf("sync failed: %v", err)
  968. }
  969. close(done)
  970. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  971. }
  972. // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
  973. // one by one
  974. func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
  975. t.Parallel()
  976. var (
  977. once sync.Once
  978. cancel = make(chan struct{})
  979. term = func() {
  980. once.Do(func() {
  981. close(cancel)
  982. })
  983. }
  984. )
  985. sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  986. mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  987. source := newTestPeer(name, t, term)
  988. source.accountTrie = sourceAccountTrie
  989. source.accountValues = elems
  990. source.codeRequestHandler = codeFn
  991. return source
  992. }
  993. // Count how many times it's invoked. Remember, there are only 8 unique hashes,
  994. // so it shouldn't be more than that
  995. var counter int
  996. syncer := setupSyncer(
  997. mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  998. counter++
  999. return cappedCodeRequestHandler(t, id, hashes, max)
  1000. }),
  1001. )
  1002. done := checkStall(t, term)
  1003. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1004. t.Fatalf("sync failed: %v", err)
  1005. }
  1006. close(done)
  1007. // There are only 8 unique hashes, and 3K accounts. However, the code
  1008. // deduplication is per request batch. If it were a perfect global dedup,
  1009. // we would expect only 8 requests. If there were no dedup, there would be
  1010. // 3k requests.
  1011. // We expect somewhere below 100 requests for these 8 unique hashes.
  1012. if threshold := 100; counter > threshold {
  1013. t.Fatalf("Error, expected < %d invocations, got %d", threshold, counter)
  1014. }
  1015. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1016. }
  1017. // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
  1018. // storage trie has a few boundary elements.
  1019. func TestSyncBoundaryStorageTrie(t *testing.T) {
  1020. t.Parallel()
  1021. var (
  1022. once sync.Once
  1023. cancel = make(chan struct{})
  1024. term = func() {
  1025. once.Do(func() {
  1026. close(cancel)
  1027. })
  1028. }
  1029. )
  1030. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)
  1031. mkSource := func(name string) *testPeer {
  1032. source := newTestPeer(name, t, term)
  1033. source.accountTrie = sourceAccountTrie
  1034. source.accountValues = elems
  1035. source.storageTries = storageTries
  1036. source.storageValues = storageElems
  1037. return source
  1038. }
  1039. syncer := setupSyncer(
  1040. mkSource("peer-a"),
  1041. mkSource("peer-b"),
  1042. )
  1043. done := checkStall(t, term)
  1044. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1045. t.Fatalf("sync failed: %v", err)
  1046. }
  1047. close(done)
  1048. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1049. }
  1050. // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
  1051. // consistently returning very small results
  1052. func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
  1053. t.Parallel()
  1054. var (
  1055. once sync.Once
  1056. cancel = make(chan struct{})
  1057. term = func() {
  1058. once.Do(func() {
  1059. close(cancel)
  1060. })
  1061. }
  1062. )
  1063. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)
  1064. mkSource := func(name string, slow bool) *testPeer {
  1065. source := newTestPeer(name, t, term)
  1066. source.accountTrie = sourceAccountTrie
  1067. source.accountValues = elems
  1068. source.storageTries = storageTries
  1069. source.storageValues = storageElems
  1070. if slow {
  1071. source.storageRequestHandler = starvingStorageRequestHandler
  1072. }
  1073. return source
  1074. }
  1075. syncer := setupSyncer(
  1076. mkSource("nice-a", false),
  1077. mkSource("slow", true),
  1078. )
  1079. done := checkStall(t, term)
  1080. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1081. t.Fatalf("sync failed: %v", err)
  1082. }
  1083. close(done)
  1084. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1085. }
  1086. // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
  1087. // sometimes sending bad proofs
  1088. func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
  1089. t.Parallel()
  1090. var (
  1091. once sync.Once
  1092. cancel = make(chan struct{})
  1093. term = func() {
  1094. once.Do(func() {
  1095. close(cancel)
  1096. })
  1097. }
  1098. )
  1099. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1100. mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1101. source := newTestPeer(name, t, term)
  1102. source.accountTrie = sourceAccountTrie
  1103. source.accountValues = elems
  1104. source.storageTries = storageTries
  1105. source.storageValues = storageElems
  1106. source.storageRequestHandler = handler
  1107. return source
  1108. }
  1109. syncer := setupSyncer(
  1110. mkSource("nice-a", defaultStorageRequestHandler),
  1111. mkSource("nice-b", defaultStorageRequestHandler),
  1112. mkSource("nice-c", defaultStorageRequestHandler),
  1113. mkSource("corrupt", corruptStorageRequestHandler),
  1114. )
  1115. done := checkStall(t, term)
  1116. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1117. t.Fatalf("sync failed: %v", err)
  1118. }
  1119. close(done)
  1120. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1121. }
  1122. func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
  1123. t.Parallel()
  1124. var (
  1125. once sync.Once
  1126. cancel = make(chan struct{})
  1127. term = func() {
  1128. once.Do(func() {
  1129. close(cancel)
  1130. })
  1131. }
  1132. )
  1133. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1134. mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1135. source := newTestPeer(name, t, term)
  1136. source.accountTrie = sourceAccountTrie
  1137. source.accountValues = elems
  1138. source.storageTries = storageTries
  1139. source.storageValues = storageElems
  1140. source.storageRequestHandler = handler
  1141. return source
  1142. }
  1143. syncer := setupSyncer(
  1144. mkSource("nice-a", defaultStorageRequestHandler),
  1145. mkSource("nice-b", defaultStorageRequestHandler),
  1146. mkSource("nice-c", defaultStorageRequestHandler),
  1147. mkSource("corrupt", noProofStorageRequestHandler),
  1148. )
  1149. done := checkStall(t, term)
  1150. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1151. t.Fatalf("sync failed: %v", err)
  1152. }
  1153. close(done)
  1154. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1155. }
  1156. // TestSyncWithStorage tests basic sync using accounts + storage + code, against
  1157. // a peer who insists on delivering full storage sets _and_ proofs. This triggered
  1158. // an error, where the recipient erroneously clipped the boundary nodes, but
  1159. // did not mark the account for healing.
  1160. func TestSyncWithStorageMisbehavingProve(t *testing.T) {
  1161. t.Parallel()
  1162. var (
  1163. once sync.Once
  1164. cancel = make(chan struct{})
  1165. term = func() {
  1166. once.Do(func() {
  1167. close(cancel)
  1168. })
  1169. }
  1170. )
  1171. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)
  1172. mkSource := func(name string) *testPeer {
  1173. source := newTestPeer(name, t, term)
  1174. source.accountTrie = sourceAccountTrie
  1175. source.accountValues = elems
  1176. source.storageTries = storageTries
  1177. source.storageValues = storageElems
  1178. source.storageRequestHandler = proofHappyStorageRequestHandler
  1179. return source
  1180. }
  1181. syncer := setupSyncer(mkSource("sourceA"))
  1182. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1183. t.Fatalf("sync failed: %v", err)
  1184. }
  1185. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1186. }
  1187. type kv struct {
  1188. k, v []byte
  1189. }
  1190. // Some helpers for sorting
  1191. type entrySlice []*kv
  1192. func (p entrySlice) Len() int { return len(p) }
  1193. func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }
  1194. func (p entrySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1195. func key32(i uint64) []byte {
  1196. key := make([]byte, 32)
  1197. binary.LittleEndian.PutUint64(key, i)
  1198. return key
  1199. }
  1200. var (
  1201. codehashes = []common.Hash{
  1202. crypto.Keccak256Hash([]byte{0}),
  1203. crypto.Keccak256Hash([]byte{1}),
  1204. crypto.Keccak256Hash([]byte{2}),
  1205. crypto.Keccak256Hash([]byte{3}),
  1206. crypto.Keccak256Hash([]byte{4}),
  1207. crypto.Keccak256Hash([]byte{5}),
  1208. crypto.Keccak256Hash([]byte{6}),
  1209. crypto.Keccak256Hash([]byte{7}),
  1210. }
  1211. )
  1212. // getCodeHash returns a pseudo-random code hash
  1213. func getCodeHash(i uint64) []byte {
  1214. h := codehashes[int(i)%len(codehashes)]
  1215. return common.CopyBytes(h[:])
  1216. }
  1217. // getCodeByHash convenience function to lookup the code from the code hash
  1218. func getCodeByHash(hash common.Hash) []byte {
  1219. if hash == emptyCode {
  1220. return nil
  1221. }
  1222. for i, h := range codehashes {
  1223. if h == hash {
  1224. return []byte{byte(i)}
  1225. }
  1226. }
  1227. return nil
  1228. }
  1229. // makeAccountTrieNoStorage spits out a trie, along with the leafs
  1230. func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
  1231. db := trie.NewDatabase(rawdb.NewMemoryDatabase())
  1232. accTrie, _ := trie.New(common.Hash{}, db)
  1233. var entries entrySlice
  1234. for i := uint64(1); i <= uint64(n); i++ {
  1235. value, _ := rlp.EncodeToBytes(state.Account{
  1236. Nonce: i,
  1237. Balance: big.NewInt(int64(i)),
  1238. Root: emptyRoot,
  1239. CodeHash: getCodeHash(i),
  1240. })
  1241. key := key32(i)
  1242. elem := &kv{key, value}
  1243. accTrie.Update(elem.k, elem.v)
  1244. entries = append(entries, elem)
  1245. }
  1246. sort.Sort(entries)
  1247. accTrie.Commit(nil)
  1248. return accTrie, entries
  1249. }
  1250. // makeBoundaryAccountTrie constructs an account trie. Instead of filling
  1251. // accounts normally, this function will fill a few accounts which have
  1252. // boundary hash.
  1253. func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
  1254. var (
  1255. entries entrySlice
  1256. boundaries []common.Hash
  1257. db = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1258. trie, _ = trie.New(common.Hash{}, db)
  1259. )
  1260. // Initialize boundaries
  1261. var next common.Hash
  1262. step := new(big.Int).Sub(
  1263. new(big.Int).Div(
  1264. new(big.Int).Exp(common.Big2, common.Big256, nil),
  1265. big.NewInt(int64(accountConcurrency)),
  1266. ), common.Big1,
  1267. )
  1268. for i := 0; i < accountConcurrency; i++ {
  1269. last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1270. if i == accountConcurrency-1 {
  1271. last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1272. }
  1273. boundaries = append(boundaries, last)
  1274. next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1275. }
  1276. // Fill boundary accounts
  1277. for i := 0; i < len(boundaries); i++ {
  1278. value, _ := rlp.EncodeToBytes(state.Account{
  1279. Nonce: uint64(0),
  1280. Balance: big.NewInt(int64(i)),
  1281. Root: emptyRoot,
  1282. CodeHash: getCodeHash(uint64(i)),
  1283. })
  1284. elem := &kv{boundaries[i].Bytes(), value}
  1285. trie.Update(elem.k, elem.v)
  1286. entries = append(entries, elem)
  1287. }
  1288. // Fill other accounts if required
  1289. for i := uint64(1); i <= uint64(n); i++ {
  1290. value, _ := rlp.EncodeToBytes(state.Account{
  1291. Nonce: i,
  1292. Balance: big.NewInt(int64(i)),
  1293. Root: emptyRoot,
  1294. CodeHash: getCodeHash(i),
  1295. })
  1296. elem := &kv{key32(i), value}
  1297. trie.Update(elem.k, elem.v)
  1298. entries = append(entries, elem)
  1299. }
  1300. sort.Sort(entries)
  1301. trie.Commit(nil)
  1302. return trie, entries
  1303. }
  1304. // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
  1305. // has a unique storage set.
  1306. func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1307. var (
  1308. db = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1309. accTrie, _ = trie.New(common.Hash{}, db)
  1310. entries entrySlice
  1311. storageTries = make(map[common.Hash]*trie.Trie)
  1312. storageEntries = make(map[common.Hash]entrySlice)
  1313. )
  1314. // Create n accounts in the trie
  1315. for i := uint64(1); i <= uint64(accounts); i++ {
  1316. key := key32(i)
  1317. codehash := emptyCode[:]
  1318. if code {
  1319. codehash = getCodeHash(i)
  1320. }
  1321. // Create a storage trie
  1322. stTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db)
  1323. stRoot := stTrie.Hash()
  1324. stTrie.Commit(nil)
  1325. value, _ := rlp.EncodeToBytes(state.Account{
  1326. Nonce: i,
  1327. Balance: big.NewInt(int64(i)),
  1328. Root: stRoot,
  1329. CodeHash: codehash,
  1330. })
  1331. elem := &kv{key, value}
  1332. accTrie.Update(elem.k, elem.v)
  1333. entries = append(entries, elem)
  1334. storageTries[common.BytesToHash(key)] = stTrie
  1335. storageEntries[common.BytesToHash(key)] = stEntries
  1336. }
  1337. sort.Sort(entries)
  1338. accTrie.Commit(nil)
  1339. return accTrie, entries, storageTries, storageEntries
  1340. }
  1341. // makeAccountTrieWithStorage spits out a trie, along with the leafs
  1342. func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1343. var (
  1344. db = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1345. accTrie, _ = trie.New(common.Hash{}, db)
  1346. entries entrySlice
  1347. storageTries = make(map[common.Hash]*trie.Trie)
  1348. storageEntries = make(map[common.Hash]entrySlice)
  1349. )
  1350. // Make a storage trie which we reuse for the whole lot
  1351. var (
  1352. stTrie *trie.Trie
  1353. stEntries entrySlice
  1354. )
  1355. if boundary {
  1356. stTrie, stEntries = makeBoundaryStorageTrie(slots, db)
  1357. } else {
  1358. stTrie, stEntries = makeStorageTrieWithSeed(uint64(slots), 0, db)
  1359. }
  1360. stRoot := stTrie.Hash()
  1361. // Create n accounts in the trie
  1362. for i := uint64(1); i <= uint64(accounts); i++ {
  1363. key := key32(i)
  1364. codehash := emptyCode[:]
  1365. if code {
  1366. codehash = getCodeHash(i)
  1367. }
  1368. value, _ := rlp.EncodeToBytes(state.Account{
  1369. Nonce: i,
  1370. Balance: big.NewInt(int64(i)),
  1371. Root: stRoot,
  1372. CodeHash: codehash,
  1373. })
  1374. elem := &kv{key, value}
  1375. accTrie.Update(elem.k, elem.v)
  1376. entries = append(entries, elem)
  1377. // we reuse the same one for all accounts
  1378. storageTries[common.BytesToHash(key)] = stTrie
  1379. storageEntries[common.BytesToHash(key)] = stEntries
  1380. }
  1381. sort.Sort(entries)
  1382. stTrie.Commit(nil)
  1383. accTrie.Commit(nil)
  1384. return accTrie, entries, storageTries, storageEntries
  1385. }
  1386. // makeStorageTrieWithSeed fills a storage trie with n items, returning the
  1387. // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
  1388. // that tries are unique.
  1389. func makeStorageTrieWithSeed(n, seed uint64, db *trie.Database) (*trie.Trie, entrySlice) {
  1390. trie, _ := trie.New(common.Hash{}, db)
  1391. var entries entrySlice
  1392. for i := uint64(1); i <= n; i++ {
  1393. // store 'x' at slot 'x'
  1394. slotValue := key32(i + seed)
  1395. rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1396. slotKey := key32(i)
  1397. key := crypto.Keccak256Hash(slotKey[:])
  1398. elem := &kv{key[:], rlpSlotValue}
  1399. trie.Update(elem.k, elem.v)
  1400. entries = append(entries, elem)
  1401. }
  1402. sort.Sort(entries)
  1403. trie.Commit(nil)
  1404. return trie, entries
  1405. }
  1406. // makeBoundaryStorageTrie constructs a storage trie. Instead of filling
  1407. // storage slots normally, this function will fill a few slots which have
  1408. // boundary hash.
  1409. func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) {
  1410. var (
  1411. entries entrySlice
  1412. boundaries []common.Hash
  1413. trie, _ = trie.New(common.Hash{}, db)
  1414. )
  1415. // Initialize boundaries
  1416. var next common.Hash
  1417. step := new(big.Int).Sub(
  1418. new(big.Int).Div(
  1419. new(big.Int).Exp(common.Big2, common.Big256, nil),
  1420. big.NewInt(int64(accountConcurrency)),
  1421. ), common.Big1,
  1422. )
  1423. for i := 0; i < accountConcurrency; i++ {
  1424. last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1425. if i == accountConcurrency-1 {
  1426. last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1427. }
  1428. boundaries = append(boundaries, last)
  1429. next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1430. }
  1431. // Fill boundary slots
  1432. for i := 0; i < len(boundaries); i++ {
  1433. key := boundaries[i]
  1434. val := []byte{0xde, 0xad, 0xbe, 0xef}
  1435. elem := &kv{key[:], val}
  1436. trie.Update(elem.k, elem.v)
  1437. entries = append(entries, elem)
  1438. }
  1439. // Fill other slots if required
  1440. for i := uint64(1); i <= uint64(n); i++ {
  1441. slotKey := key32(i)
  1442. key := crypto.Keccak256Hash(slotKey[:])
  1443. slotValue := key32(i)
  1444. rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1445. elem := &kv{key[:], rlpSlotValue}
  1446. trie.Update(elem.k, elem.v)
  1447. entries = append(entries, elem)
  1448. }
  1449. sort.Sort(entries)
  1450. trie.Commit(nil)
  1451. return trie, entries
  1452. }
  1453. func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
  1454. t.Helper()
  1455. triedb := trie.NewDatabase(db)
  1456. accTrie, err := trie.New(root, triedb)
  1457. if err != nil {
  1458. t.Fatal(err)
  1459. }
  1460. accounts, slots := 0, 0
  1461. accIt := trie.NewIterator(accTrie.NodeIterator(nil))
  1462. for accIt.Next() {
  1463. var acc struct {
  1464. Nonce uint64
  1465. Balance *big.Int
  1466. Root common.Hash
  1467. CodeHash []byte
  1468. }
  1469. if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
  1470. log.Crit("Invalid account encountered during snapshot creation", "err", err)
  1471. }
  1472. accounts++
  1473. if acc.Root != emptyRoot {
  1474. storeTrie, err := trie.NewSecure(acc.Root, triedb)
  1475. if err != nil {
  1476. t.Fatal(err)
  1477. }
  1478. storeIt := trie.NewIterator(storeTrie.NodeIterator(nil))
  1479. for storeIt.Next() {
  1480. slots++
  1481. }
  1482. if err := storeIt.Err; err != nil {
  1483. t.Fatal(err)
  1484. }
  1485. }
  1486. }
  1487. if err := accIt.Err; err != nil {
  1488. t.Fatal(err)
  1489. }
  1490. t.Logf("accounts: %d, slots: %d", accounts, slots)
  1491. }
  1492. // TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
  1493. // state healing
  1494. func TestSyncAccountPerformance(t *testing.T) {
  1495. // Set the account concurrency to 1. This _should_ result in the
  1496. // range root to become correct, and there should be no healing needed
  1497. defer func(old int) { accountConcurrency = old }(accountConcurrency)
  1498. accountConcurrency = 1
  1499. var (
  1500. once sync.Once
  1501. cancel = make(chan struct{})
  1502. term = func() {
  1503. once.Do(func() {
  1504. close(cancel)
  1505. })
  1506. }
  1507. )
  1508. sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
  1509. mkSource := func(name string) *testPeer {
  1510. source := newTestPeer(name, t, term)
  1511. source.accountTrie = sourceAccountTrie
  1512. source.accountValues = elems
  1513. return source
  1514. }
  1515. src := mkSource("source")
  1516. syncer := setupSyncer(src)
  1517. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1518. t.Fatalf("sync failed: %v", err)
  1519. }
  1520. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1521. // The trie root will always be requested, since it is added when the snap
  1522. // sync cycle starts. When popping the queue, we do not look it up again.
  1523. // Doing so would bring this number down to zero in this artificial testcase,
  1524. // but only add extra IO for no reason in practice.
  1525. if have, want := src.nTrienodeRequests, 1; have != want {
  1526. fmt.Printf(src.Stats())
  1527. t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
  1528. }
  1529. }
  1530. func TestSlotEstimation(t *testing.T) {
  1531. for i, tc := range []struct {
  1532. last common.Hash
  1533. count int
  1534. want uint64
  1535. }{
  1536. {
  1537. // Half the space
  1538. common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1539. 100,
  1540. 100,
  1541. },
  1542. {
  1543. // 1 / 16th
  1544. common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1545. 100,
  1546. 1500,
  1547. },
  1548. {
  1549. // Bit more than 1 / 16th
  1550. common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"),
  1551. 100,
  1552. 1499,
  1553. },
  1554. {
  1555. // Almost everything
  1556. common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"),
  1557. 100,
  1558. 6,
  1559. },
  1560. {
  1561. // Almost nothing -- should lead to error
  1562. common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
  1563. 1,
  1564. 0,
  1565. },
  1566. {
  1567. // Nothing -- should lead to error
  1568. common.Hash{},
  1569. 100,
  1570. 0,
  1571. },
  1572. } {
  1573. have, _ := estimateRemainingSlots(tc.count, tc.last)
  1574. if want := tc.want; have != want {
  1575. t.Errorf("test %d: have %d want %d", i, have, want)
  1576. }
  1577. }
  1578. }