sync_test.go 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769
  1. // Copyright 2021 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package snap
  17. import (
  18. "bytes"
  19. "crypto/rand"
  20. "encoding/binary"
  21. "fmt"
  22. "math/big"
  23. "sort"
  24. "sync"
  25. "testing"
  26. "time"
  27. "github.com/ethereum/go-ethereum/common"
  28. "github.com/ethereum/go-ethereum/core/rawdb"
  29. "github.com/ethereum/go-ethereum/core/types"
  30. "github.com/ethereum/go-ethereum/crypto"
  31. "github.com/ethereum/go-ethereum/ethdb"
  32. "github.com/ethereum/go-ethereum/light"
  33. "github.com/ethereum/go-ethereum/log"
  34. "github.com/ethereum/go-ethereum/rlp"
  35. "github.com/ethereum/go-ethereum/trie"
  36. "golang.org/x/crypto/sha3"
  37. )
  38. func TestHashing(t *testing.T) {
  39. t.Parallel()
  40. var bytecodes = make([][]byte, 10)
  41. for i := 0; i < len(bytecodes); i++ {
  42. buf := make([]byte, 100)
  43. rand.Read(buf)
  44. bytecodes[i] = buf
  45. }
  46. var want, got string
  47. var old = func() {
  48. hasher := sha3.NewLegacyKeccak256()
  49. for i := 0; i < len(bytecodes); i++ {
  50. hasher.Reset()
  51. hasher.Write(bytecodes[i])
  52. hash := hasher.Sum(nil)
  53. got = fmt.Sprintf("%v\n%v", got, hash)
  54. }
  55. }
  56. var new = func() {
  57. hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  58. var hash = make([]byte, 32)
  59. for i := 0; i < len(bytecodes); i++ {
  60. hasher.Reset()
  61. hasher.Write(bytecodes[i])
  62. hasher.Read(hash)
  63. want = fmt.Sprintf("%v\n%v", want, hash)
  64. }
  65. }
  66. old()
  67. new()
  68. if want != got {
  69. t.Errorf("want\n%v\ngot\n%v\n", want, got)
  70. }
  71. }
  72. func BenchmarkHashing(b *testing.B) {
  73. var bytecodes = make([][]byte, 10000)
  74. for i := 0; i < len(bytecodes); i++ {
  75. buf := make([]byte, 100)
  76. rand.Read(buf)
  77. bytecodes[i] = buf
  78. }
  79. var old = func() {
  80. hasher := sha3.NewLegacyKeccak256()
  81. for i := 0; i < len(bytecodes); i++ {
  82. hasher.Reset()
  83. hasher.Write(bytecodes[i])
  84. hasher.Sum(nil)
  85. }
  86. }
  87. var new = func() {
  88. hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
  89. var hash = make([]byte, 32)
  90. for i := 0; i < len(bytecodes); i++ {
  91. hasher.Reset()
  92. hasher.Write(bytecodes[i])
  93. hasher.Read(hash)
  94. }
  95. }
  96. b.Run("old", func(b *testing.B) {
  97. b.ReportAllocs()
  98. for i := 0; i < b.N; i++ {
  99. old()
  100. }
  101. })
  102. b.Run("new", func(b *testing.B) {
  103. b.ReportAllocs()
  104. for i := 0; i < b.N; i++ {
  105. new()
  106. }
  107. })
  108. }
  109. type (
  110. accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error
  111. storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
  112. trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
  113. codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
  114. )
  115. type testPeer struct {
  116. id string
  117. test *testing.T
  118. remote *Syncer
  119. logger log.Logger
  120. accountTrie *trie.Trie
  121. accountValues entrySlice
  122. storageTries map[common.Hash]*trie.Trie
  123. storageValues map[common.Hash]entrySlice
  124. accountRequestHandler accountHandlerFunc
  125. storageRequestHandler storageHandlerFunc
  126. trieRequestHandler trieHandlerFunc
  127. codeRequestHandler codeHandlerFunc
  128. term func()
  129. // counters
  130. nAccountRequests int
  131. nStorageRequests int
  132. nBytecodeRequests int
  133. nTrienodeRequests int
  134. }
  135. func newTestPeer(id string, t *testing.T, term func()) *testPeer {
  136. peer := &testPeer{
  137. id: id,
  138. test: t,
  139. logger: log.New("id", id),
  140. accountRequestHandler: defaultAccountRequestHandler,
  141. trieRequestHandler: defaultTrieRequestHandler,
  142. storageRequestHandler: defaultStorageRequestHandler,
  143. codeRequestHandler: defaultCodeRequestHandler,
  144. term: term,
  145. }
  146. //stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
  147. //peer.logger.SetHandler(stderrHandler)
  148. return peer
  149. }
  150. func (t *testPeer) ID() string { return t.id }
  151. func (t *testPeer) Log() log.Logger { return t.logger }
  152. func (t *testPeer) Stats() string {
  153. return fmt.Sprintf(`Account requests: %d
  154. Storage requests: %d
  155. Bytecode requests: %d
  156. Trienode requests: %d
  157. `, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests)
  158. }
  159. func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
  160. t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
  161. t.nAccountRequests++
  162. go t.accountRequestHandler(t, id, root, origin, limit, bytes)
  163. return nil
  164. }
  165. func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
  166. t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
  167. t.nTrienodeRequests++
  168. go t.trieRequestHandler(t, id, root, paths, bytes)
  169. return nil
  170. }
  171. func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
  172. t.nStorageRequests++
  173. if len(accounts) == 1 && origin != nil {
  174. t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
  175. } else {
  176. t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
  177. }
  178. go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
  179. return nil
  180. }
  181. func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
  182. t.nBytecodeRequests++
  183. t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
  184. go t.codeRequestHandler(t, id, hashes, bytes)
  185. return nil
  186. }
  187. // defaultTrieRequestHandler is a well-behaving handler for trie healing requests
  188. func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
  189. // Pass the response
  190. var nodes [][]byte
  191. for _, pathset := range paths {
  192. switch len(pathset) {
  193. case 1:
  194. blob, _, err := t.accountTrie.TryGetNode(pathset[0])
  195. if err != nil {
  196. t.logger.Info("Error handling req", "error", err)
  197. break
  198. }
  199. nodes = append(nodes, blob)
  200. default:
  201. account := t.storageTries[(common.BytesToHash(pathset[0]))]
  202. for _, path := range pathset[1:] {
  203. blob, _, err := account.TryGetNode(path)
  204. if err != nil {
  205. t.logger.Info("Error handling req", "error", err)
  206. break
  207. }
  208. nodes = append(nodes, blob)
  209. }
  210. }
  211. }
  212. t.remote.OnTrieNodes(t, requestId, nodes)
  213. return nil
  214. }
  215. // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
  216. func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
  217. keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
  218. if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
  219. t.test.Errorf("Remote side rejected our delivery: %v", err)
  220. t.term()
  221. return err
  222. }
  223. return nil
  224. }
  225. func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
  226. var size uint64
  227. if limit == (common.Hash{}) {
  228. limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  229. }
  230. for _, entry := range t.accountValues {
  231. if size > cap {
  232. break
  233. }
  234. if bytes.Compare(origin[:], entry.k) <= 0 {
  235. keys = append(keys, common.BytesToHash(entry.k))
  236. vals = append(vals, entry.v)
  237. size += uint64(32 + len(entry.v))
  238. }
  239. // If we've exceeded the request threshold, abort
  240. if bytes.Compare(entry.k, limit[:]) >= 0 {
  241. break
  242. }
  243. }
  244. // Unless we send the entire trie, we need to supply proofs
  245. // Actually, we need to supply proofs either way! This seems to be an implementation
  246. // quirk in go-ethereum
  247. proof := light.NewNodeSet()
  248. if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
  249. t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
  250. }
  251. if len(keys) > 0 {
  252. lastK := (keys[len(keys)-1])[:]
  253. if err := t.accountTrie.Prove(lastK, 0, proof); err != nil {
  254. t.logger.Error("Could not prove last item", "error", err)
  255. }
  256. }
  257. for _, blob := range proof.NodeList() {
  258. proofs = append(proofs, blob)
  259. }
  260. return keys, vals, proofs
  261. }
  262. // defaultStorageRequestHandler is a well-behaving storage request handler
  263. func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
  264. hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
  265. if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
  266. t.test.Errorf("Remote side rejected our delivery: %v", err)
  267. t.term()
  268. }
  269. return nil
  270. }
  271. func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  272. var bytecodes [][]byte
  273. for _, h := range hashes {
  274. bytecodes = append(bytecodes, getCodeByHash(h))
  275. }
  276. if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
  277. t.test.Errorf("Remote side rejected our delivery: %v", err)
  278. t.term()
  279. }
  280. return nil
  281. }
  282. func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
  283. var size uint64
  284. for _, account := range accounts {
  285. // The first account might start from a different origin and end sooner
  286. var originHash common.Hash
  287. if len(origin) > 0 {
  288. originHash = common.BytesToHash(origin)
  289. }
  290. var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  291. if len(limit) > 0 {
  292. limitHash = common.BytesToHash(limit)
  293. }
  294. var (
  295. keys []common.Hash
  296. vals [][]byte
  297. abort bool
  298. )
  299. for _, entry := range t.storageValues[account] {
  300. if size >= max {
  301. abort = true
  302. break
  303. }
  304. if bytes.Compare(entry.k, originHash[:]) < 0 {
  305. continue
  306. }
  307. keys = append(keys, common.BytesToHash(entry.k))
  308. vals = append(vals, entry.v)
  309. size += uint64(32 + len(entry.v))
  310. if bytes.Compare(entry.k, limitHash[:]) >= 0 {
  311. break
  312. }
  313. }
  314. if len(keys) > 0 {
  315. hashes = append(hashes, keys)
  316. slots = append(slots, vals)
  317. }
  318. // Generate the Merkle proofs for the first and last storage slot, but
  319. // only if the response was capped. If the entire storage trie included
  320. // in the response, no need for any proofs.
  321. if originHash != (common.Hash{}) || (abort && len(keys) > 0) {
  322. // If we're aborting, we need to prove the first and last item
  323. // This terminates the response (and thus the loop)
  324. proof := light.NewNodeSet()
  325. stTrie := t.storageTries[account]
  326. // Here's a potential gotcha: when constructing the proof, we cannot
  327. // use the 'origin' slice directly, but must use the full 32-byte
  328. // hash form.
  329. if err := stTrie.Prove(originHash[:], 0, proof); err != nil {
  330. t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err)
  331. }
  332. if len(keys) > 0 {
  333. lastK := (keys[len(keys)-1])[:]
  334. if err := stTrie.Prove(lastK, 0, proof); err != nil {
  335. t.logger.Error("Could not prove last item", "error", err)
  336. }
  337. }
  338. for _, blob := range proof.NodeList() {
  339. proofs = append(proofs, blob)
  340. }
  341. break
  342. }
  343. }
  344. return hashes, slots, proofs
  345. }
  346. // the createStorageRequestResponseAlwaysProve tests a cornercase, where it always
  347. // supplies the proof for the last account, even if it is 'complete'.h
  348. func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
  349. var size uint64
  350. max = max * 3 / 4
  351. var origin common.Hash
  352. if len(bOrigin) > 0 {
  353. origin = common.BytesToHash(bOrigin)
  354. }
  355. var exit bool
  356. for i, account := range accounts {
  357. var keys []common.Hash
  358. var vals [][]byte
  359. for _, entry := range t.storageValues[account] {
  360. if bytes.Compare(entry.k, origin[:]) < 0 {
  361. exit = true
  362. }
  363. keys = append(keys, common.BytesToHash(entry.k))
  364. vals = append(vals, entry.v)
  365. size += uint64(32 + len(entry.v))
  366. if size > max {
  367. exit = true
  368. }
  369. }
  370. if i == len(accounts)-1 {
  371. exit = true
  372. }
  373. hashes = append(hashes, keys)
  374. slots = append(slots, vals)
  375. if exit {
  376. // If we're aborting, we need to prove the first and last item
  377. // This terminates the response (and thus the loop)
  378. proof := light.NewNodeSet()
  379. stTrie := t.storageTries[account]
  380. // Here's a potential gotcha: when constructing the proof, we cannot
  381. // use the 'origin' slice directly, but must use the full 32-byte
  382. // hash form.
  383. if err := stTrie.Prove(origin[:], 0, proof); err != nil {
  384. t.logger.Error("Could not prove inexistence of origin", "origin", origin,
  385. "error", err)
  386. }
  387. if len(keys) > 0 {
  388. lastK := (keys[len(keys)-1])[:]
  389. if err := stTrie.Prove(lastK, 0, proof); err != nil {
  390. t.logger.Error("Could not prove last item", "error", err)
  391. }
  392. }
  393. for _, blob := range proof.NodeList() {
  394. proofs = append(proofs, blob)
  395. }
  396. break
  397. }
  398. }
  399. return hashes, slots, proofs
  400. }
  401. // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
  402. func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
  403. t.remote.OnAccounts(t, requestId, nil, nil, nil)
  404. return nil
  405. }
  406. func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
  407. return nil
  408. }
  409. func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
  410. t.remote.OnTrieNodes(t, requestId, nil)
  411. return nil
  412. }
  413. func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
  414. return nil
  415. }
  416. func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  417. t.remote.OnStorage(t, requestId, nil, nil, nil)
  418. return nil
  419. }
  420. func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  421. return nil
  422. }
  423. func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  424. hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)
  425. if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
  426. t.test.Errorf("Remote side rejected our delivery: %v", err)
  427. t.term()
  428. }
  429. return nil
  430. }
  431. //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  432. // var bytecodes [][]byte
  433. // t.remote.OnByteCodes(t, id, bytecodes)
  434. // return nil
  435. //}
  436. func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  437. var bytecodes [][]byte
  438. for _, h := range hashes {
  439. // Send back the hashes
  440. bytecodes = append(bytecodes, h[:])
  441. }
  442. if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
  443. t.logger.Info("remote error on delivery (as expected)", "error", err)
  444. // Mimic the real-life handler, which drops a peer on errors
  445. t.remote.Unregister(t.id)
  446. }
  447. return nil
  448. }
  449. func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  450. var bytecodes [][]byte
  451. for _, h := range hashes[:1] {
  452. bytecodes = append(bytecodes, getCodeByHash(h))
  453. }
  454. // Missing bytecode can be retrieved again, no error expected
  455. if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
  456. t.test.Errorf("Remote side rejected our delivery: %v", err)
  457. t.term()
  458. }
  459. return nil
  460. }
  461. // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
  462. func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  463. return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
  464. }
  465. func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
  466. return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)
  467. }
  468. //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
  469. // return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
  470. //}
  471. func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
  472. hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
  473. if len(proofs) > 0 {
  474. proofs = proofs[1:]
  475. }
  476. if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
  477. t.logger.Info("remote error on delivery (as expected)", "error", err)
  478. // Mimic the real-life handler, which drops a peer on errors
  479. t.remote.Unregister(t.id)
  480. }
  481. return nil
  482. }
  483. // corruptStorageRequestHandler doesn't provide good proofs
  484. func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  485. hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
  486. if len(proofs) > 0 {
  487. proofs = proofs[1:]
  488. }
  489. if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
  490. t.logger.Info("remote error on delivery (as expected)", "error", err)
  491. // Mimic the real-life handler, which drops a peer on errors
  492. t.remote.Unregister(t.id)
  493. }
  494. return nil
  495. }
  496. func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
  497. hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
  498. if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
  499. t.logger.Info("remote error on delivery (as expected)", "error", err)
  500. // Mimic the real-life handler, which drops a peer on errors
  501. t.remote.Unregister(t.id)
  502. }
  503. return nil
  504. }
  505. // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
  506. // also ship the entire trie inside the proof. If the attack is successful,
  507. // the remote side does not do any follow-up requests
  508. func TestSyncBloatedProof(t *testing.T) {
  509. t.Parallel()
  510. var (
  511. once sync.Once
  512. cancel = make(chan struct{})
  513. term = func() {
  514. once.Do(func() {
  515. close(cancel)
  516. })
  517. }
  518. )
  519. sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
  520. source := newTestPeer("source", t, term)
  521. source.accountTrie = sourceAccountTrie
  522. source.accountValues = elems
  523. source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
  524. var (
  525. proofs [][]byte
  526. keys []common.Hash
  527. vals [][]byte
  528. )
  529. // The values
  530. for _, entry := range t.accountValues {
  531. if bytes.Compare(entry.k, origin[:]) < 0 {
  532. continue
  533. }
  534. if bytes.Compare(entry.k, limit[:]) > 0 {
  535. continue
  536. }
  537. keys = append(keys, common.BytesToHash(entry.k))
  538. vals = append(vals, entry.v)
  539. }
  540. // The proofs
  541. proof := light.NewNodeSet()
  542. if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
  543. t.logger.Error("Could not prove origin", "origin", origin, "error", err)
  544. }
  545. // The bloat: add proof of every single element
  546. for _, entry := range t.accountValues {
  547. if err := t.accountTrie.Prove(entry.k, 0, proof); err != nil {
  548. t.logger.Error("Could not prove item", "error", err)
  549. }
  550. }
  551. // And remove one item from the elements
  552. if len(keys) > 2 {
  553. keys = append(keys[:1], keys[2:]...)
  554. vals = append(vals[:1], vals[2:]...)
  555. }
  556. for _, blob := range proof.NodeList() {
  557. proofs = append(proofs, blob)
  558. }
  559. if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
  560. t.logger.Info("remote error on delivery (as expected)", "error", err)
  561. t.term()
  562. // This is actually correct, signal to exit the test successfully
  563. }
  564. return nil
  565. }
  566. syncer := setupSyncer(source)
  567. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
  568. t.Fatal("No error returned from incomplete/cancelled sync")
  569. }
  570. }
  571. func setupSyncer(peers ...*testPeer) *Syncer {
  572. stateDb := rawdb.NewMemoryDatabase()
  573. syncer := NewSyncer(stateDb)
  574. for _, peer := range peers {
  575. syncer.Register(peer)
  576. peer.remote = syncer
  577. }
  578. return syncer
  579. }
  580. // TestSync tests a basic sync with one peer
  581. func TestSync(t *testing.T) {
  582. t.Parallel()
  583. var (
  584. once sync.Once
  585. cancel = make(chan struct{})
  586. term = func() {
  587. once.Do(func() {
  588. close(cancel)
  589. })
  590. }
  591. )
  592. sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
  593. mkSource := func(name string) *testPeer {
  594. source := newTestPeer(name, t, term)
  595. source.accountTrie = sourceAccountTrie
  596. source.accountValues = elems
  597. return source
  598. }
  599. syncer := setupSyncer(mkSource("source"))
  600. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  601. t.Fatalf("sync failed: %v", err)
  602. }
  603. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  604. }
  605. // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
  606. // panic within the prover
  607. func TestSyncTinyTriePanic(t *testing.T) {
  608. t.Parallel()
  609. var (
  610. once sync.Once
  611. cancel = make(chan struct{})
  612. term = func() {
  613. once.Do(func() {
  614. close(cancel)
  615. })
  616. }
  617. )
  618. sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
  619. mkSource := func(name string) *testPeer {
  620. source := newTestPeer(name, t, term)
  621. source.accountTrie = sourceAccountTrie
  622. source.accountValues = elems
  623. return source
  624. }
  625. syncer := setupSyncer(mkSource("source"))
  626. done := checkStall(t, term)
  627. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  628. t.Fatalf("sync failed: %v", err)
  629. }
  630. close(done)
  631. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  632. }
  633. // TestMultiSync tests a basic sync with multiple peers
  634. func TestMultiSync(t *testing.T) {
  635. t.Parallel()
  636. var (
  637. once sync.Once
  638. cancel = make(chan struct{})
  639. term = func() {
  640. once.Do(func() {
  641. close(cancel)
  642. })
  643. }
  644. )
  645. sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
  646. mkSource := func(name string) *testPeer {
  647. source := newTestPeer(name, t, term)
  648. source.accountTrie = sourceAccountTrie
  649. source.accountValues = elems
  650. return source
  651. }
  652. syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB"))
  653. done := checkStall(t, term)
  654. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  655. t.Fatalf("sync failed: %v", err)
  656. }
  657. close(done)
  658. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  659. }
  660. // TestSyncWithStorage tests basic sync using accounts + storage + code
  661. func TestSyncWithStorage(t *testing.T) {
  662. t.Parallel()
  663. var (
  664. once sync.Once
  665. cancel = make(chan struct{})
  666. term = func() {
  667. once.Do(func() {
  668. close(cancel)
  669. })
  670. }
  671. )
  672. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)
  673. mkSource := func(name string) *testPeer {
  674. source := newTestPeer(name, t, term)
  675. source.accountTrie = sourceAccountTrie
  676. source.accountValues = elems
  677. source.storageTries = storageTries
  678. source.storageValues = storageElems
  679. return source
  680. }
  681. syncer := setupSyncer(mkSource("sourceA"))
  682. done := checkStall(t, term)
  683. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  684. t.Fatalf("sync failed: %v", err)
  685. }
  686. close(done)
  687. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  688. }
  689. // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
  690. func TestMultiSyncManyUseless(t *testing.T) {
  691. t.Parallel()
  692. var (
  693. once sync.Once
  694. cancel = make(chan struct{})
  695. term = func() {
  696. once.Do(func() {
  697. close(cancel)
  698. })
  699. }
  700. )
  701. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  702. mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
  703. source := newTestPeer(name, t, term)
  704. source.accountTrie = sourceAccountTrie
  705. source.accountValues = elems
  706. source.storageTries = storageTries
  707. source.storageValues = storageElems
  708. if !noAccount {
  709. source.accountRequestHandler = emptyRequestAccountRangeFn
  710. }
  711. if !noStorage {
  712. source.storageRequestHandler = emptyStorageRequestHandler
  713. }
  714. if !noTrieNode {
  715. source.trieRequestHandler = emptyTrieRequestHandler
  716. }
  717. return source
  718. }
  719. syncer := setupSyncer(
  720. mkSource("full", true, true, true),
  721. mkSource("noAccounts", false, true, true),
  722. mkSource("noStorage", true, false, true),
  723. mkSource("noTrie", true, true, false),
  724. )
  725. done := checkStall(t, term)
  726. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  727. t.Fatalf("sync failed: %v", err)
  728. }
  729. close(done)
  730. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  731. }
  732. // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
  733. func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
  734. var (
  735. once sync.Once
  736. cancel = make(chan struct{})
  737. term = func() {
  738. once.Do(func() {
  739. close(cancel)
  740. })
  741. }
  742. )
  743. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  744. mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
  745. source := newTestPeer(name, t, term)
  746. source.accountTrie = sourceAccountTrie
  747. source.accountValues = elems
  748. source.storageTries = storageTries
  749. source.storageValues = storageElems
  750. if !noAccount {
  751. source.accountRequestHandler = emptyRequestAccountRangeFn
  752. }
  753. if !noStorage {
  754. source.storageRequestHandler = emptyStorageRequestHandler
  755. }
  756. if !noTrieNode {
  757. source.trieRequestHandler = emptyTrieRequestHandler
  758. }
  759. return source
  760. }
  761. syncer := setupSyncer(
  762. mkSource("full", true, true, true),
  763. mkSource("noAccounts", false, true, true),
  764. mkSource("noStorage", true, false, true),
  765. mkSource("noTrie", true, true, false),
  766. )
  767. // We're setting the timeout to very low, to increase the chance of the timeout
  768. // being triggered. This was previously a cause of panic, when a response
  769. // arrived simultaneously as a timeout was triggered.
  770. syncer.rates.OverrideTTLLimit = time.Millisecond
  771. done := checkStall(t, term)
  772. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  773. t.Fatalf("sync failed: %v", err)
  774. }
  775. close(done)
  776. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  777. }
  778. // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
  779. func TestMultiSyncManyUnresponsive(t *testing.T) {
  780. var (
  781. once sync.Once
  782. cancel = make(chan struct{})
  783. term = func() {
  784. once.Do(func() {
  785. close(cancel)
  786. })
  787. }
  788. )
  789. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  790. mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
  791. source := newTestPeer(name, t, term)
  792. source.accountTrie = sourceAccountTrie
  793. source.accountValues = elems
  794. source.storageTries = storageTries
  795. source.storageValues = storageElems
  796. if !noAccount {
  797. source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
  798. }
  799. if !noStorage {
  800. source.storageRequestHandler = nonResponsiveStorageRequestHandler
  801. }
  802. if !noTrieNode {
  803. source.trieRequestHandler = nonResponsiveTrieRequestHandler
  804. }
  805. return source
  806. }
  807. syncer := setupSyncer(
  808. mkSource("full", true, true, true),
  809. mkSource("noAccounts", false, true, true),
  810. mkSource("noStorage", true, false, true),
  811. mkSource("noTrie", true, true, false),
  812. )
  813. // We're setting the timeout to very low, to make the test run a bit faster
  814. syncer.rates.OverrideTTLLimit = time.Millisecond
  815. done := checkStall(t, term)
  816. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  817. t.Fatalf("sync failed: %v", err)
  818. }
  819. close(done)
  820. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  821. }
  822. func checkStall(t *testing.T, term func()) chan struct{} {
  823. testDone := make(chan struct{})
  824. go func() {
  825. select {
  826. case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
  827. t.Log("Sync stalled")
  828. term()
  829. case <-testDone:
  830. return
  831. }
  832. }()
  833. return testDone
  834. }
  835. // TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
  836. // account trie has a few boundary elements.
  837. func TestSyncBoundaryAccountTrie(t *testing.T) {
  838. t.Parallel()
  839. var (
  840. once sync.Once
  841. cancel = make(chan struct{})
  842. term = func() {
  843. once.Do(func() {
  844. close(cancel)
  845. })
  846. }
  847. )
  848. sourceAccountTrie, elems := makeBoundaryAccountTrie(3000)
  849. mkSource := func(name string) *testPeer {
  850. source := newTestPeer(name, t, term)
  851. source.accountTrie = sourceAccountTrie
  852. source.accountValues = elems
  853. return source
  854. }
  855. syncer := setupSyncer(
  856. mkSource("peer-a"),
  857. mkSource("peer-b"),
  858. )
  859. done := checkStall(t, term)
  860. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  861. t.Fatalf("sync failed: %v", err)
  862. }
  863. close(done)
  864. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  865. }
  866. // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
  867. // consistently returning very small results
  868. func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
  869. t.Parallel()
  870. var (
  871. once sync.Once
  872. cancel = make(chan struct{})
  873. term = func() {
  874. once.Do(func() {
  875. close(cancel)
  876. })
  877. }
  878. )
  879. sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  880. mkSource := func(name string, slow bool) *testPeer {
  881. source := newTestPeer(name, t, term)
  882. source.accountTrie = sourceAccountTrie
  883. source.accountValues = elems
  884. if slow {
  885. source.accountRequestHandler = starvingAccountRequestHandler
  886. }
  887. return source
  888. }
  889. syncer := setupSyncer(
  890. mkSource("nice-a", false),
  891. mkSource("nice-b", false),
  892. mkSource("nice-c", false),
  893. mkSource("capped", true),
  894. )
  895. done := checkStall(t, term)
  896. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  897. t.Fatalf("sync failed: %v", err)
  898. }
  899. close(done)
  900. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  901. }
  902. // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
  903. // code requests properly.
  904. func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
  905. t.Parallel()
  906. var (
  907. once sync.Once
  908. cancel = make(chan struct{})
  909. term = func() {
  910. once.Do(func() {
  911. close(cancel)
  912. })
  913. }
  914. )
  915. sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  916. mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  917. source := newTestPeer(name, t, term)
  918. source.accountTrie = sourceAccountTrie
  919. source.accountValues = elems
  920. source.codeRequestHandler = codeFn
  921. return source
  922. }
  923. // One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  924. // chance that the full set of codes requested are sent only to the
  925. // non-corrupt peer, which delivers everything in one go, and makes the
  926. // test moot
  927. syncer := setupSyncer(
  928. mkSource("capped", cappedCodeRequestHandler),
  929. mkSource("corrupt", corruptCodeRequestHandler),
  930. )
  931. done := checkStall(t, term)
  932. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  933. t.Fatalf("sync failed: %v", err)
  934. }
  935. close(done)
  936. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  937. }
  938. func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
  939. t.Parallel()
  940. var (
  941. once sync.Once
  942. cancel = make(chan struct{})
  943. term = func() {
  944. once.Do(func() {
  945. close(cancel)
  946. })
  947. }
  948. )
  949. sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  950. mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
  951. source := newTestPeer(name, t, term)
  952. source.accountTrie = sourceAccountTrie
  953. source.accountValues = elems
  954. source.accountRequestHandler = accFn
  955. return source
  956. }
  957. // One is capped, one is corrupt. If we don't use a capped one, there's a 50%
  958. // chance that the full set of codes requested are sent only to the
  959. // non-corrupt peer, which delivers everything in one go, and makes the
  960. // test moot
  961. syncer := setupSyncer(
  962. mkSource("capped", defaultAccountRequestHandler),
  963. mkSource("corrupt", corruptAccountRequestHandler),
  964. )
  965. done := checkStall(t, term)
  966. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  967. t.Fatalf("sync failed: %v", err)
  968. }
  969. close(done)
  970. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  971. }
  972. // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
  973. // one by one
  974. func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
  975. t.Parallel()
  976. var (
  977. once sync.Once
  978. cancel = make(chan struct{})
  979. term = func() {
  980. once.Do(func() {
  981. close(cancel)
  982. })
  983. }
  984. )
  985. sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
  986. mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
  987. source := newTestPeer(name, t, term)
  988. source.accountTrie = sourceAccountTrie
  989. source.accountValues = elems
  990. source.codeRequestHandler = codeFn
  991. return source
  992. }
  993. // Count how many times it's invoked. Remember, there are only 8 unique hashes,
  994. // so it shouldn't be more than that
  995. var counter int
  996. syncer := setupSyncer(
  997. mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
  998. counter++
  999. return cappedCodeRequestHandler(t, id, hashes, max)
  1000. }),
  1001. )
  1002. done := checkStall(t, term)
  1003. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1004. t.Fatalf("sync failed: %v", err)
  1005. }
  1006. close(done)
  1007. // There are only 8 unique hashes, and 3K accounts. However, the code
  1008. // deduplication is per request batch. If it were a perfect global dedup,
  1009. // we would expect only 8 requests. If there were no dedup, there would be
  1010. // 3k requests.
  1011. // We expect somewhere below 100 requests for these 8 unique hashes. But
  1012. // the number can be flaky, so don't limit it so strictly.
  1013. if threshold := 100; counter > threshold {
  1014. t.Logf("Error, expected < %d invocations, got %d", threshold, counter)
  1015. }
  1016. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1017. }
  1018. // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
  1019. // storage trie has a few boundary elements.
  1020. func TestSyncBoundaryStorageTrie(t *testing.T) {
  1021. t.Parallel()
  1022. var (
  1023. once sync.Once
  1024. cancel = make(chan struct{})
  1025. term = func() {
  1026. once.Do(func() {
  1027. close(cancel)
  1028. })
  1029. }
  1030. )
  1031. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)
  1032. mkSource := func(name string) *testPeer {
  1033. source := newTestPeer(name, t, term)
  1034. source.accountTrie = sourceAccountTrie
  1035. source.accountValues = elems
  1036. source.storageTries = storageTries
  1037. source.storageValues = storageElems
  1038. return source
  1039. }
  1040. syncer := setupSyncer(
  1041. mkSource("peer-a"),
  1042. mkSource("peer-b"),
  1043. )
  1044. done := checkStall(t, term)
  1045. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1046. t.Fatalf("sync failed: %v", err)
  1047. }
  1048. close(done)
  1049. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1050. }
  1051. // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
  1052. // consistently returning very small results
  1053. func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
  1054. t.Parallel()
  1055. var (
  1056. once sync.Once
  1057. cancel = make(chan struct{})
  1058. term = func() {
  1059. once.Do(func() {
  1060. close(cancel)
  1061. })
  1062. }
  1063. )
  1064. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)
  1065. mkSource := func(name string, slow bool) *testPeer {
  1066. source := newTestPeer(name, t, term)
  1067. source.accountTrie = sourceAccountTrie
  1068. source.accountValues = elems
  1069. source.storageTries = storageTries
  1070. source.storageValues = storageElems
  1071. if slow {
  1072. source.storageRequestHandler = starvingStorageRequestHandler
  1073. }
  1074. return source
  1075. }
  1076. syncer := setupSyncer(
  1077. mkSource("nice-a", false),
  1078. mkSource("slow", true),
  1079. )
  1080. done := checkStall(t, term)
  1081. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1082. t.Fatalf("sync failed: %v", err)
  1083. }
  1084. close(done)
  1085. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1086. }
  1087. // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
  1088. // sometimes sending bad proofs
  1089. func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
  1090. t.Parallel()
  1091. var (
  1092. once sync.Once
  1093. cancel = make(chan struct{})
  1094. term = func() {
  1095. once.Do(func() {
  1096. close(cancel)
  1097. })
  1098. }
  1099. )
  1100. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1101. mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1102. source := newTestPeer(name, t, term)
  1103. source.accountTrie = sourceAccountTrie
  1104. source.accountValues = elems
  1105. source.storageTries = storageTries
  1106. source.storageValues = storageElems
  1107. source.storageRequestHandler = handler
  1108. return source
  1109. }
  1110. syncer := setupSyncer(
  1111. mkSource("nice-a", defaultStorageRequestHandler),
  1112. mkSource("nice-b", defaultStorageRequestHandler),
  1113. mkSource("nice-c", defaultStorageRequestHandler),
  1114. mkSource("corrupt", corruptStorageRequestHandler),
  1115. )
  1116. done := checkStall(t, term)
  1117. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1118. t.Fatalf("sync failed: %v", err)
  1119. }
  1120. close(done)
  1121. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1122. }
  1123. func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
  1124. t.Parallel()
  1125. var (
  1126. once sync.Once
  1127. cancel = make(chan struct{})
  1128. term = func() {
  1129. once.Do(func() {
  1130. close(cancel)
  1131. })
  1132. }
  1133. )
  1134. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
  1135. mkSource := func(name string, handler storageHandlerFunc) *testPeer {
  1136. source := newTestPeer(name, t, term)
  1137. source.accountTrie = sourceAccountTrie
  1138. source.accountValues = elems
  1139. source.storageTries = storageTries
  1140. source.storageValues = storageElems
  1141. source.storageRequestHandler = handler
  1142. return source
  1143. }
  1144. syncer := setupSyncer(
  1145. mkSource("nice-a", defaultStorageRequestHandler),
  1146. mkSource("nice-b", defaultStorageRequestHandler),
  1147. mkSource("nice-c", defaultStorageRequestHandler),
  1148. mkSource("corrupt", noProofStorageRequestHandler),
  1149. )
  1150. done := checkStall(t, term)
  1151. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1152. t.Fatalf("sync failed: %v", err)
  1153. }
  1154. close(done)
  1155. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1156. }
  1157. // TestSyncWithStorage tests basic sync using accounts + storage + code, against
  1158. // a peer who insists on delivering full storage sets _and_ proofs. This triggered
  1159. // an error, where the recipient erroneously clipped the boundary nodes, but
  1160. // did not mark the account for healing.
  1161. func TestSyncWithStorageMisbehavingProve(t *testing.T) {
  1162. t.Parallel()
  1163. var (
  1164. once sync.Once
  1165. cancel = make(chan struct{})
  1166. term = func() {
  1167. once.Do(func() {
  1168. close(cancel)
  1169. })
  1170. }
  1171. )
  1172. sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)
  1173. mkSource := func(name string) *testPeer {
  1174. source := newTestPeer(name, t, term)
  1175. source.accountTrie = sourceAccountTrie
  1176. source.accountValues = elems
  1177. source.storageTries = storageTries
  1178. source.storageValues = storageElems
  1179. source.storageRequestHandler = proofHappyStorageRequestHandler
  1180. return source
  1181. }
  1182. syncer := setupSyncer(mkSource("sourceA"))
  1183. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1184. t.Fatalf("sync failed: %v", err)
  1185. }
  1186. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1187. }
  1188. type kv struct {
  1189. k, v []byte
  1190. }
  1191. // Some helpers for sorting
  1192. type entrySlice []*kv
  1193. func (p entrySlice) Len() int { return len(p) }
  1194. func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }
  1195. func (p entrySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1196. func key32(i uint64) []byte {
  1197. key := make([]byte, 32)
  1198. binary.LittleEndian.PutUint64(key, i)
  1199. return key
  1200. }
  1201. var (
  1202. codehashes = []common.Hash{
  1203. crypto.Keccak256Hash([]byte{0}),
  1204. crypto.Keccak256Hash([]byte{1}),
  1205. crypto.Keccak256Hash([]byte{2}),
  1206. crypto.Keccak256Hash([]byte{3}),
  1207. crypto.Keccak256Hash([]byte{4}),
  1208. crypto.Keccak256Hash([]byte{5}),
  1209. crypto.Keccak256Hash([]byte{6}),
  1210. crypto.Keccak256Hash([]byte{7}),
  1211. }
  1212. )
  1213. // getCodeHash returns a pseudo-random code hash
  1214. func getCodeHash(i uint64) []byte {
  1215. h := codehashes[int(i)%len(codehashes)]
  1216. return common.CopyBytes(h[:])
  1217. }
  1218. // getCodeByHash convenience function to lookup the code from the code hash
  1219. func getCodeByHash(hash common.Hash) []byte {
  1220. if hash == emptyCode {
  1221. return nil
  1222. }
  1223. for i, h := range codehashes {
  1224. if h == hash {
  1225. return []byte{byte(i)}
  1226. }
  1227. }
  1228. return nil
  1229. }
  1230. // makeAccountTrieNoStorage spits out a trie, along with the leafs
  1231. func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
  1232. var (
  1233. db = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1234. accTrie = trie.NewEmpty(db)
  1235. entries entrySlice
  1236. )
  1237. for i := uint64(1); i <= uint64(n); i++ {
  1238. value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1239. Nonce: i,
  1240. Balance: big.NewInt(int64(i)),
  1241. Root: emptyRoot,
  1242. CodeHash: getCodeHash(i),
  1243. })
  1244. key := key32(i)
  1245. elem := &kv{key, value}
  1246. accTrie.Update(elem.k, elem.v)
  1247. entries = append(entries, elem)
  1248. }
  1249. sort.Sort(entries)
  1250. // Commit the state changes into db and re-create the trie
  1251. // for accessing later.
  1252. root, nodes, _ := accTrie.Commit(false)
  1253. db.Update(trie.NewWithNodeSet(nodes))
  1254. accTrie, _ = trie.New(common.Hash{}, root, db)
  1255. return accTrie, entries
  1256. }
  1257. // makeBoundaryAccountTrie constructs an account trie. Instead of filling
  1258. // accounts normally, this function will fill a few accounts which have
  1259. // boundary hash.
  1260. func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
  1261. var (
  1262. entries entrySlice
  1263. boundaries []common.Hash
  1264. db = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1265. accTrie = trie.NewEmpty(db)
  1266. )
  1267. // Initialize boundaries
  1268. var next common.Hash
  1269. step := new(big.Int).Sub(
  1270. new(big.Int).Div(
  1271. new(big.Int).Exp(common.Big2, common.Big256, nil),
  1272. big.NewInt(int64(accountConcurrency)),
  1273. ), common.Big1,
  1274. )
  1275. for i := 0; i < accountConcurrency; i++ {
  1276. last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1277. if i == accountConcurrency-1 {
  1278. last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1279. }
  1280. boundaries = append(boundaries, last)
  1281. next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1282. }
  1283. // Fill boundary accounts
  1284. for i := 0; i < len(boundaries); i++ {
  1285. value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1286. Nonce: uint64(0),
  1287. Balance: big.NewInt(int64(i)),
  1288. Root: emptyRoot,
  1289. CodeHash: getCodeHash(uint64(i)),
  1290. })
  1291. elem := &kv{boundaries[i].Bytes(), value}
  1292. accTrie.Update(elem.k, elem.v)
  1293. entries = append(entries, elem)
  1294. }
  1295. // Fill other accounts if required
  1296. for i := uint64(1); i <= uint64(n); i++ {
  1297. value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1298. Nonce: i,
  1299. Balance: big.NewInt(int64(i)),
  1300. Root: emptyRoot,
  1301. CodeHash: getCodeHash(i),
  1302. })
  1303. elem := &kv{key32(i), value}
  1304. accTrie.Update(elem.k, elem.v)
  1305. entries = append(entries, elem)
  1306. }
  1307. sort.Sort(entries)
  1308. // Commit the state changes into db and re-create the trie
  1309. // for accessing later.
  1310. root, nodes, _ := accTrie.Commit(false)
  1311. db.Update(trie.NewWithNodeSet(nodes))
  1312. accTrie, _ = trie.New(common.Hash{}, root, db)
  1313. return accTrie, entries
  1314. }
  1315. // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
  1316. // has a unique storage set.
  1317. func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1318. var (
  1319. db = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1320. accTrie = trie.NewEmpty(db)
  1321. entries entrySlice
  1322. storageRoots = make(map[common.Hash]common.Hash)
  1323. storageTries = make(map[common.Hash]*trie.Trie)
  1324. storageEntries = make(map[common.Hash]entrySlice)
  1325. nodes = trie.NewMergedNodeSet()
  1326. )
  1327. // Create n accounts in the trie
  1328. for i := uint64(1); i <= uint64(accounts); i++ {
  1329. key := key32(i)
  1330. codehash := emptyCode[:]
  1331. if code {
  1332. codehash = getCodeHash(i)
  1333. }
  1334. // Create a storage trie
  1335. stRoot, stNodes, stEntries := makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), i, db)
  1336. nodes.Merge(stNodes)
  1337. value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1338. Nonce: i,
  1339. Balance: big.NewInt(int64(i)),
  1340. Root: stRoot,
  1341. CodeHash: codehash,
  1342. })
  1343. elem := &kv{key, value}
  1344. accTrie.Update(elem.k, elem.v)
  1345. entries = append(entries, elem)
  1346. storageRoots[common.BytesToHash(key)] = stRoot
  1347. storageEntries[common.BytesToHash(key)] = stEntries
  1348. }
  1349. sort.Sort(entries)
  1350. // Commit account trie
  1351. root, set, _ := accTrie.Commit(true)
  1352. nodes.Merge(set)
  1353. // Commit gathered dirty nodes into database
  1354. db.Update(nodes)
  1355. // Re-create tries with new root
  1356. accTrie, _ = trie.New(common.Hash{}, root, db)
  1357. for i := uint64(1); i <= uint64(accounts); i++ {
  1358. key := key32(i)
  1359. trie, _ := trie.New(common.BytesToHash(key), storageRoots[common.BytesToHash(key)], db)
  1360. storageTries[common.BytesToHash(key)] = trie
  1361. }
  1362. return accTrie, entries, storageTries, storageEntries
  1363. }
  1364. // makeAccountTrieWithStorage spits out a trie, along with the leafs
  1365. func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
  1366. var (
  1367. db = trie.NewDatabase(rawdb.NewMemoryDatabase())
  1368. accTrie = trie.NewEmpty(db)
  1369. entries entrySlice
  1370. storageRoots = make(map[common.Hash]common.Hash)
  1371. storageTries = make(map[common.Hash]*trie.Trie)
  1372. storageEntries = make(map[common.Hash]entrySlice)
  1373. nodes = trie.NewMergedNodeSet()
  1374. )
  1375. // Create n accounts in the trie
  1376. for i := uint64(1); i <= uint64(accounts); i++ {
  1377. key := key32(i)
  1378. codehash := emptyCode[:]
  1379. if code {
  1380. codehash = getCodeHash(i)
  1381. }
  1382. // Make a storage trie
  1383. var (
  1384. stRoot common.Hash
  1385. stNodes *trie.NodeSet
  1386. stEntries entrySlice
  1387. )
  1388. if boundary {
  1389. stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
  1390. } else {
  1391. stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
  1392. }
  1393. nodes.Merge(stNodes)
  1394. value, _ := rlp.EncodeToBytes(&types.StateAccount{
  1395. Nonce: i,
  1396. Balance: big.NewInt(int64(i)),
  1397. Root: stRoot,
  1398. CodeHash: codehash,
  1399. })
  1400. elem := &kv{key, value}
  1401. accTrie.Update(elem.k, elem.v)
  1402. entries = append(entries, elem)
  1403. // we reuse the same one for all accounts
  1404. storageRoots[common.BytesToHash(key)] = stRoot
  1405. storageEntries[common.BytesToHash(key)] = stEntries
  1406. }
  1407. sort.Sort(entries)
  1408. // Commit account trie
  1409. root, set, _ := accTrie.Commit(true)
  1410. nodes.Merge(set)
  1411. // Commit gathered dirty nodes into database
  1412. db.Update(nodes)
  1413. // Re-create tries with new root
  1414. accTrie, err := trie.New(common.Hash{}, root, db)
  1415. if err != nil {
  1416. panic(err)
  1417. }
  1418. for i := uint64(1); i <= uint64(accounts); i++ {
  1419. key := key32(i)
  1420. trie, err := trie.New(common.BytesToHash(key), storageRoots[common.BytesToHash(key)], db)
  1421. if err != nil {
  1422. panic(err)
  1423. }
  1424. storageTries[common.BytesToHash(key)] = trie
  1425. }
  1426. return accTrie, entries, storageTries, storageEntries
  1427. }
  1428. // makeStorageTrieWithSeed fills a storage trie with n items, returning the
  1429. // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
  1430. // that tries are unique.
  1431. func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) {
  1432. trie, _ := trie.New(owner, common.Hash{}, db)
  1433. var entries entrySlice
  1434. for i := uint64(1); i <= n; i++ {
  1435. // store 'x' at slot 'x'
  1436. slotValue := key32(i + seed)
  1437. rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1438. slotKey := key32(i)
  1439. key := crypto.Keccak256Hash(slotKey[:])
  1440. elem := &kv{key[:], rlpSlotValue}
  1441. trie.Update(elem.k, elem.v)
  1442. entries = append(entries, elem)
  1443. }
  1444. sort.Sort(entries)
  1445. root, nodes, _ := trie.Commit(false)
  1446. return root, nodes, entries
  1447. }
  1448. // makeBoundaryStorageTrie constructs a storage trie. Instead of filling
  1449. // storage slots normally, this function will fill a few slots which have
  1450. // boundary hash.
  1451. func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) {
  1452. var (
  1453. entries entrySlice
  1454. boundaries []common.Hash
  1455. trie, _ = trie.New(owner, common.Hash{}, db)
  1456. )
  1457. // Initialize boundaries
  1458. var next common.Hash
  1459. step := new(big.Int).Sub(
  1460. new(big.Int).Div(
  1461. new(big.Int).Exp(common.Big2, common.Big256, nil),
  1462. big.NewInt(int64(accountConcurrency)),
  1463. ), common.Big1,
  1464. )
  1465. for i := 0; i < accountConcurrency; i++ {
  1466. last := common.BigToHash(new(big.Int).Add(next.Big(), step))
  1467. if i == accountConcurrency-1 {
  1468. last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  1469. }
  1470. boundaries = append(boundaries, last)
  1471. next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
  1472. }
  1473. // Fill boundary slots
  1474. for i := 0; i < len(boundaries); i++ {
  1475. key := boundaries[i]
  1476. val := []byte{0xde, 0xad, 0xbe, 0xef}
  1477. elem := &kv{key[:], val}
  1478. trie.Update(elem.k, elem.v)
  1479. entries = append(entries, elem)
  1480. }
  1481. // Fill other slots if required
  1482. for i := uint64(1); i <= uint64(n); i++ {
  1483. slotKey := key32(i)
  1484. key := crypto.Keccak256Hash(slotKey[:])
  1485. slotValue := key32(i)
  1486. rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
  1487. elem := &kv{key[:], rlpSlotValue}
  1488. trie.Update(elem.k, elem.v)
  1489. entries = append(entries, elem)
  1490. }
  1491. sort.Sort(entries)
  1492. root, nodes, _ := trie.Commit(false)
  1493. return root, nodes, entries
  1494. }
  1495. func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
  1496. t.Helper()
  1497. triedb := trie.NewDatabase(db)
  1498. accTrie, err := trie.New(common.Hash{}, root, triedb)
  1499. if err != nil {
  1500. t.Fatal(err)
  1501. }
  1502. accounts, slots := 0, 0
  1503. accIt := trie.NewIterator(accTrie.NodeIterator(nil))
  1504. for accIt.Next() {
  1505. var acc struct {
  1506. Nonce uint64
  1507. Balance *big.Int
  1508. Root common.Hash
  1509. CodeHash []byte
  1510. }
  1511. if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
  1512. log.Crit("Invalid account encountered during snapshot creation", "err", err)
  1513. }
  1514. accounts++
  1515. if acc.Root != emptyRoot {
  1516. storeTrie, err := trie.NewStateTrie(common.BytesToHash(accIt.Key), acc.Root, triedb)
  1517. if err != nil {
  1518. t.Fatal(err)
  1519. }
  1520. storeIt := trie.NewIterator(storeTrie.NodeIterator(nil))
  1521. for storeIt.Next() {
  1522. slots++
  1523. }
  1524. if err := storeIt.Err; err != nil {
  1525. t.Fatal(err)
  1526. }
  1527. }
  1528. }
  1529. if err := accIt.Err; err != nil {
  1530. t.Fatal(err)
  1531. }
  1532. t.Logf("accounts: %d, slots: %d", accounts, slots)
  1533. }
  1534. // TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
  1535. // state healing
  1536. func TestSyncAccountPerformance(t *testing.T) {
  1537. // Set the account concurrency to 1. This _should_ result in the
  1538. // range root to become correct, and there should be no healing needed
  1539. defer func(old int) { accountConcurrency = old }(accountConcurrency)
  1540. accountConcurrency = 1
  1541. var (
  1542. once sync.Once
  1543. cancel = make(chan struct{})
  1544. term = func() {
  1545. once.Do(func() {
  1546. close(cancel)
  1547. })
  1548. }
  1549. )
  1550. sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
  1551. mkSource := func(name string) *testPeer {
  1552. source := newTestPeer(name, t, term)
  1553. source.accountTrie = sourceAccountTrie
  1554. source.accountValues = elems
  1555. return source
  1556. }
  1557. src := mkSource("source")
  1558. syncer := setupSyncer(src)
  1559. if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
  1560. t.Fatalf("sync failed: %v", err)
  1561. }
  1562. verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
  1563. // The trie root will always be requested, since it is added when the snap
  1564. // sync cycle starts. When popping the queue, we do not look it up again.
  1565. // Doing so would bring this number down to zero in this artificial testcase,
  1566. // but only add extra IO for no reason in practice.
  1567. if have, want := src.nTrienodeRequests, 1; have != want {
  1568. fmt.Print(src.Stats())
  1569. t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
  1570. }
  1571. }
  1572. func TestSlotEstimation(t *testing.T) {
  1573. for i, tc := range []struct {
  1574. last common.Hash
  1575. count int
  1576. want uint64
  1577. }{
  1578. {
  1579. // Half the space
  1580. common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1581. 100,
  1582. 100,
  1583. },
  1584. {
  1585. // 1 / 16th
  1586. common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
  1587. 100,
  1588. 1500,
  1589. },
  1590. {
  1591. // Bit more than 1 / 16th
  1592. common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"),
  1593. 100,
  1594. 1499,
  1595. },
  1596. {
  1597. // Almost everything
  1598. common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"),
  1599. 100,
  1600. 6,
  1601. },
  1602. {
  1603. // Almost nothing -- should lead to error
  1604. common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
  1605. 1,
  1606. 0,
  1607. },
  1608. {
  1609. // Nothing -- should lead to error
  1610. common.Hash{},
  1611. 100,
  1612. 0,
  1613. },
  1614. } {
  1615. have, _ := estimateRemainingSlots(tc.count, tc.last)
  1616. if want := tc.want; have != want {
  1617. t.Errorf("test %d: have %d want %d", i, have, want)
  1618. }
  1619. }
  1620. }