| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769 |
- // Copyright 2021 The go-ethereum Authors
- // This file is part of the go-ethereum library.
- //
- // The go-ethereum library is free software: you can redistribute it and/or modify
- // it under the terms of the GNU Lesser General Public License as published by
- // the Free Software Foundation, either version 3 of the License, or
- // (at your option) any later version.
- //
- // The go-ethereum library is distributed in the hope that it will be useful,
- // but WITHOUT ANY WARRANTY; without even the implied warranty of
- // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- // GNU Lesser General Public License for more details.
- //
- // You should have received a copy of the GNU Lesser General Public License
- // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
- package snap
- import (
- "bytes"
- "crypto/rand"
- "encoding/binary"
- "fmt"
- "math/big"
- "sort"
- "sync"
- "testing"
- "time"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/light"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/trie"
- "golang.org/x/crypto/sha3"
- )
- func TestHashing(t *testing.T) {
- t.Parallel()
- var bytecodes = make([][]byte, 10)
- for i := 0; i < len(bytecodes); i++ {
- buf := make([]byte, 100)
- rand.Read(buf)
- bytecodes[i] = buf
- }
- var want, got string
- var old = func() {
- hasher := sha3.NewLegacyKeccak256()
- for i := 0; i < len(bytecodes); i++ {
- hasher.Reset()
- hasher.Write(bytecodes[i])
- hash := hasher.Sum(nil)
- got = fmt.Sprintf("%v\n%v", got, hash)
- }
- }
- var new = func() {
- hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
- var hash = make([]byte, 32)
- for i := 0; i < len(bytecodes); i++ {
- hasher.Reset()
- hasher.Write(bytecodes[i])
- hasher.Read(hash)
- want = fmt.Sprintf("%v\n%v", want, hash)
- }
- }
- old()
- new()
- if want != got {
- t.Errorf("want\n%v\ngot\n%v\n", want, got)
- }
- }
- func BenchmarkHashing(b *testing.B) {
- var bytecodes = make([][]byte, 10000)
- for i := 0; i < len(bytecodes); i++ {
- buf := make([]byte, 100)
- rand.Read(buf)
- bytecodes[i] = buf
- }
- var old = func() {
- hasher := sha3.NewLegacyKeccak256()
- for i := 0; i < len(bytecodes); i++ {
- hasher.Reset()
- hasher.Write(bytecodes[i])
- hasher.Sum(nil)
- }
- }
- var new = func() {
- hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
- var hash = make([]byte, 32)
- for i := 0; i < len(bytecodes); i++ {
- hasher.Reset()
- hasher.Write(bytecodes[i])
- hasher.Read(hash)
- }
- }
- b.Run("old", func(b *testing.B) {
- b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- old()
- }
- })
- b.Run("new", func(b *testing.B) {
- b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- new()
- }
- })
- }
- type (
- accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error
- storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
- trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
- codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
- )
- type testPeer struct {
- id string
- test *testing.T
- remote *Syncer
- logger log.Logger
- accountTrie *trie.Trie
- accountValues entrySlice
- storageTries map[common.Hash]*trie.Trie
- storageValues map[common.Hash]entrySlice
- accountRequestHandler accountHandlerFunc
- storageRequestHandler storageHandlerFunc
- trieRequestHandler trieHandlerFunc
- codeRequestHandler codeHandlerFunc
- term func()
- // counters
- nAccountRequests int
- nStorageRequests int
- nBytecodeRequests int
- nTrienodeRequests int
- }
- func newTestPeer(id string, t *testing.T, term func()) *testPeer {
- peer := &testPeer{
- id: id,
- test: t,
- logger: log.New("id", id),
- accountRequestHandler: defaultAccountRequestHandler,
- trieRequestHandler: defaultTrieRequestHandler,
- storageRequestHandler: defaultStorageRequestHandler,
- codeRequestHandler: defaultCodeRequestHandler,
- term: term,
- }
- //stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
- //peer.logger.SetHandler(stderrHandler)
- return peer
- }
- func (t *testPeer) ID() string { return t.id }
- func (t *testPeer) Log() log.Logger { return t.logger }
- func (t *testPeer) Stats() string {
- return fmt.Sprintf(`Account requests: %d
- Storage requests: %d
- Bytecode requests: %d
- Trienode requests: %d
- `, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests)
- }
- func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
- t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
- t.nAccountRequests++
- go t.accountRequestHandler(t, id, root, origin, limit, bytes)
- return nil
- }
- func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
- t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
- t.nTrienodeRequests++
- go t.trieRequestHandler(t, id, root, paths, bytes)
- return nil
- }
- func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
- t.nStorageRequests++
- if len(accounts) == 1 && origin != nil {
- t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
- } else {
- t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
- }
- go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
- return nil
- }
- func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
- t.nBytecodeRequests++
- t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
- go t.codeRequestHandler(t, id, hashes, bytes)
- return nil
- }
- // defaultTrieRequestHandler is a well-behaving handler for trie healing requests
- func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
- // Pass the response
- var nodes [][]byte
- for _, pathset := range paths {
- switch len(pathset) {
- case 1:
- blob, _, err := t.accountTrie.TryGetNode(pathset[0])
- if err != nil {
- t.logger.Info("Error handling req", "error", err)
- break
- }
- nodes = append(nodes, blob)
- default:
- account := t.storageTries[(common.BytesToHash(pathset[0]))]
- for _, path := range pathset[1:] {
- blob, _, err := account.TryGetNode(path)
- if err != nil {
- t.logger.Info("Error handling req", "error", err)
- break
- }
- nodes = append(nodes, blob)
- }
- }
- }
- t.remote.OnTrieNodes(t, requestId, nodes)
- return nil
- }
- // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
- func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
- keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
- if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
- t.test.Errorf("Remote side rejected our delivery: %v", err)
- t.term()
- return err
- }
- return nil
- }
- func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
- var size uint64
- if limit == (common.Hash{}) {
- limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
- }
- for _, entry := range t.accountValues {
- if size > cap {
- break
- }
- if bytes.Compare(origin[:], entry.k) <= 0 {
- keys = append(keys, common.BytesToHash(entry.k))
- vals = append(vals, entry.v)
- size += uint64(32 + len(entry.v))
- }
- // If we've exceeded the request threshold, abort
- if bytes.Compare(entry.k, limit[:]) >= 0 {
- break
- }
- }
- // Unless we send the entire trie, we need to supply proofs
- // Actually, we need to supply proofs either way! This seems to be an implementation
- // quirk in go-ethereum
- proof := light.NewNodeSet()
- if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
- t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
- }
- if len(keys) > 0 {
- lastK := (keys[len(keys)-1])[:]
- if err := t.accountTrie.Prove(lastK, 0, proof); err != nil {
- t.logger.Error("Could not prove last item", "error", err)
- }
- }
- for _, blob := range proof.NodeList() {
- proofs = append(proofs, blob)
- }
- return keys, vals, proofs
- }
- // defaultStorageRequestHandler is a well-behaving storage request handler
- func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
- hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
- if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
- t.test.Errorf("Remote side rejected our delivery: %v", err)
- t.term()
- }
- return nil
- }
- func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
- var bytecodes [][]byte
- for _, h := range hashes {
- bytecodes = append(bytecodes, getCodeByHash(h))
- }
- if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
- t.test.Errorf("Remote side rejected our delivery: %v", err)
- t.term()
- }
- return nil
- }
- func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
- var size uint64
- for _, account := range accounts {
- // The first account might start from a different origin and end sooner
- var originHash common.Hash
- if len(origin) > 0 {
- originHash = common.BytesToHash(origin)
- }
- var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
- if len(limit) > 0 {
- limitHash = common.BytesToHash(limit)
- }
- var (
- keys []common.Hash
- vals [][]byte
- abort bool
- )
- for _, entry := range t.storageValues[account] {
- if size >= max {
- abort = true
- break
- }
- if bytes.Compare(entry.k, originHash[:]) < 0 {
- continue
- }
- keys = append(keys, common.BytesToHash(entry.k))
- vals = append(vals, entry.v)
- size += uint64(32 + len(entry.v))
- if bytes.Compare(entry.k, limitHash[:]) >= 0 {
- break
- }
- }
- if len(keys) > 0 {
- hashes = append(hashes, keys)
- slots = append(slots, vals)
- }
- // Generate the Merkle proofs for the first and last storage slot, but
- // only if the response was capped. If the entire storage trie included
- // in the response, no need for any proofs.
- if originHash != (common.Hash{}) || (abort && len(keys) > 0) {
- // If we're aborting, we need to prove the first and last item
- // This terminates the response (and thus the loop)
- proof := light.NewNodeSet()
- stTrie := t.storageTries[account]
- // Here's a potential gotcha: when constructing the proof, we cannot
- // use the 'origin' slice directly, but must use the full 32-byte
- // hash form.
- if err := stTrie.Prove(originHash[:], 0, proof); err != nil {
- t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err)
- }
- if len(keys) > 0 {
- lastK := (keys[len(keys)-1])[:]
- if err := stTrie.Prove(lastK, 0, proof); err != nil {
- t.logger.Error("Could not prove last item", "error", err)
- }
- }
- for _, blob := range proof.NodeList() {
- proofs = append(proofs, blob)
- }
- break
- }
- }
- return hashes, slots, proofs
- }
- // the createStorageRequestResponseAlwaysProve tests a cornercase, where it always
- // supplies the proof for the last account, even if it is 'complete'.h
- func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
- var size uint64
- max = max * 3 / 4
- var origin common.Hash
- if len(bOrigin) > 0 {
- origin = common.BytesToHash(bOrigin)
- }
- var exit bool
- for i, account := range accounts {
- var keys []common.Hash
- var vals [][]byte
- for _, entry := range t.storageValues[account] {
- if bytes.Compare(entry.k, origin[:]) < 0 {
- exit = true
- }
- keys = append(keys, common.BytesToHash(entry.k))
- vals = append(vals, entry.v)
- size += uint64(32 + len(entry.v))
- if size > max {
- exit = true
- }
- }
- if i == len(accounts)-1 {
- exit = true
- }
- hashes = append(hashes, keys)
- slots = append(slots, vals)
- if exit {
- // If we're aborting, we need to prove the first and last item
- // This terminates the response (and thus the loop)
- proof := light.NewNodeSet()
- stTrie := t.storageTries[account]
- // Here's a potential gotcha: when constructing the proof, we cannot
- // use the 'origin' slice directly, but must use the full 32-byte
- // hash form.
- if err := stTrie.Prove(origin[:], 0, proof); err != nil {
- t.logger.Error("Could not prove inexistence of origin", "origin", origin,
- "error", err)
- }
- if len(keys) > 0 {
- lastK := (keys[len(keys)-1])[:]
- if err := stTrie.Prove(lastK, 0, proof); err != nil {
- t.logger.Error("Could not prove last item", "error", err)
- }
- }
- for _, blob := range proof.NodeList() {
- proofs = append(proofs, blob)
- }
- break
- }
- }
- return hashes, slots, proofs
- }
- // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
- func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
- t.remote.OnAccounts(t, requestId, nil, nil, nil)
- return nil
- }
- func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
- return nil
- }
- func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
- t.remote.OnTrieNodes(t, requestId, nil)
- return nil
- }
- func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
- return nil
- }
- func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
- t.remote.OnStorage(t, requestId, nil, nil, nil)
- return nil
- }
- func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
- return nil
- }
- func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
- hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)
- if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
- t.test.Errorf("Remote side rejected our delivery: %v", err)
- t.term()
- }
- return nil
- }
- //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
- // var bytecodes [][]byte
- // t.remote.OnByteCodes(t, id, bytecodes)
- // return nil
- //}
- func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
- var bytecodes [][]byte
- for _, h := range hashes {
- // Send back the hashes
- bytecodes = append(bytecodes, h[:])
- }
- if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
- t.logger.Info("remote error on delivery (as expected)", "error", err)
- // Mimic the real-life handler, which drops a peer on errors
- t.remote.Unregister(t.id)
- }
- return nil
- }
- func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
- var bytecodes [][]byte
- for _, h := range hashes[:1] {
- bytecodes = append(bytecodes, getCodeByHash(h))
- }
- // Missing bytecode can be retrieved again, no error expected
- if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
- t.test.Errorf("Remote side rejected our delivery: %v", err)
- t.term()
- }
- return nil
- }
- // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
- func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
- return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
- }
- func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
- return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)
- }
- //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
- // return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
- //}
- func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
- hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
- if len(proofs) > 0 {
- proofs = proofs[1:]
- }
- if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
- t.logger.Info("remote error on delivery (as expected)", "error", err)
- // Mimic the real-life handler, which drops a peer on errors
- t.remote.Unregister(t.id)
- }
- return nil
- }
- // corruptStorageRequestHandler doesn't provide good proofs
- func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
- hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
- if len(proofs) > 0 {
- proofs = proofs[1:]
- }
- if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
- t.logger.Info("remote error on delivery (as expected)", "error", err)
- // Mimic the real-life handler, which drops a peer on errors
- t.remote.Unregister(t.id)
- }
- return nil
- }
- func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
- hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
- if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
- t.logger.Info("remote error on delivery (as expected)", "error", err)
- // Mimic the real-life handler, which drops a peer on errors
- t.remote.Unregister(t.id)
- }
- return nil
- }
- // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
- // also ship the entire trie inside the proof. If the attack is successful,
- // the remote side does not do any follow-up requests
- func TestSyncBloatedProof(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
- source := newTestPeer("source", t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
- var (
- proofs [][]byte
- keys []common.Hash
- vals [][]byte
- )
- // The values
- for _, entry := range t.accountValues {
- if bytes.Compare(entry.k, origin[:]) < 0 {
- continue
- }
- if bytes.Compare(entry.k, limit[:]) > 0 {
- continue
- }
- keys = append(keys, common.BytesToHash(entry.k))
- vals = append(vals, entry.v)
- }
- // The proofs
- proof := light.NewNodeSet()
- if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
- t.logger.Error("Could not prove origin", "origin", origin, "error", err)
- }
- // The bloat: add proof of every single element
- for _, entry := range t.accountValues {
- if err := t.accountTrie.Prove(entry.k, 0, proof); err != nil {
- t.logger.Error("Could not prove item", "error", err)
- }
- }
- // And remove one item from the elements
- if len(keys) > 2 {
- keys = append(keys[:1], keys[2:]...)
- vals = append(vals[:1], vals[2:]...)
- }
- for _, blob := range proof.NodeList() {
- proofs = append(proofs, blob)
- }
- if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
- t.logger.Info("remote error on delivery (as expected)", "error", err)
- t.term()
- // This is actually correct, signal to exit the test successfully
- }
- return nil
- }
- syncer := setupSyncer(source)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
- t.Fatal("No error returned from incomplete/cancelled sync")
- }
- }
- func setupSyncer(peers ...*testPeer) *Syncer {
- stateDb := rawdb.NewMemoryDatabase()
- syncer := NewSyncer(stateDb)
- for _, peer := range peers {
- syncer.Register(peer)
- peer.remote = syncer
- }
- return syncer
- }
- // TestSync tests a basic sync with one peer
- func TestSync(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- return source
- }
- syncer := setupSyncer(mkSource("source"))
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
- // panic within the prover
- func TestSyncTinyTriePanic(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- return source
- }
- syncer := setupSyncer(mkSource("source"))
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestMultiSync tests a basic sync with multiple peers
- func TestMultiSync(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- return source
- }
- syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB"))
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestSyncWithStorage tests basic sync using accounts + storage + code
- func TestSyncWithStorage(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- return source
- }
- syncer := setupSyncer(mkSource("sourceA"))
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
- func TestMultiSyncManyUseless(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
- mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- if !noAccount {
- source.accountRequestHandler = emptyRequestAccountRangeFn
- }
- if !noStorage {
- source.storageRequestHandler = emptyStorageRequestHandler
- }
- if !noTrieNode {
- source.trieRequestHandler = emptyTrieRequestHandler
- }
- return source
- }
- syncer := setupSyncer(
- mkSource("full", true, true, true),
- mkSource("noAccounts", false, true, true),
- mkSource("noStorage", true, false, true),
- mkSource("noTrie", true, true, false),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
- func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
- mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- if !noAccount {
- source.accountRequestHandler = emptyRequestAccountRangeFn
- }
- if !noStorage {
- source.storageRequestHandler = emptyStorageRequestHandler
- }
- if !noTrieNode {
- source.trieRequestHandler = emptyTrieRequestHandler
- }
- return source
- }
- syncer := setupSyncer(
- mkSource("full", true, true, true),
- mkSource("noAccounts", false, true, true),
- mkSource("noStorage", true, false, true),
- mkSource("noTrie", true, true, false),
- )
- // We're setting the timeout to very low, to increase the chance of the timeout
- // being triggered. This was previously a cause of panic, when a response
- // arrived simultaneously as a timeout was triggered.
- syncer.rates.OverrideTTLLimit = time.Millisecond
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
- func TestMultiSyncManyUnresponsive(t *testing.T) {
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
- mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- if !noAccount {
- source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
- }
- if !noStorage {
- source.storageRequestHandler = nonResponsiveStorageRequestHandler
- }
- if !noTrieNode {
- source.trieRequestHandler = nonResponsiveTrieRequestHandler
- }
- return source
- }
- syncer := setupSyncer(
- mkSource("full", true, true, true),
- mkSource("noAccounts", false, true, true),
- mkSource("noStorage", true, false, true),
- mkSource("noTrie", true, true, false),
- )
- // We're setting the timeout to very low, to make the test run a bit faster
- syncer.rates.OverrideTTLLimit = time.Millisecond
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- func checkStall(t *testing.T, term func()) chan struct{} {
- testDone := make(chan struct{})
- go func() {
- select {
- case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
- t.Log("Sync stalled")
- term()
- case <-testDone:
- return
- }
- }()
- return testDone
- }
- // TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
- // account trie has a few boundary elements.
- func TestSyncBoundaryAccountTrie(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeBoundaryAccountTrie(3000)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- return source
- }
- syncer := setupSyncer(
- mkSource("peer-a"),
- mkSource("peer-b"),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
- // consistently returning very small results
- func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
- mkSource := func(name string, slow bool) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- if slow {
- source.accountRequestHandler = starvingAccountRequestHandler
- }
- return source
- }
- syncer := setupSyncer(
- mkSource("nice-a", false),
- mkSource("nice-b", false),
- mkSource("nice-c", false),
- mkSource("capped", true),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
- // code requests properly.
- func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
- mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.codeRequestHandler = codeFn
- return source
- }
- // One is capped, one is corrupt. If we don't use a capped one, there's a 50%
- // chance that the full set of codes requested are sent only to the
- // non-corrupt peer, which delivers everything in one go, and makes the
- // test moot
- syncer := setupSyncer(
- mkSource("capped", cappedCodeRequestHandler),
- mkSource("corrupt", corruptCodeRequestHandler),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
- mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.accountRequestHandler = accFn
- return source
- }
- // One is capped, one is corrupt. If we don't use a capped one, there's a 50%
- // chance that the full set of codes requested are sent only to the
- // non-corrupt peer, which delivers everything in one go, and makes the
- // test moot
- syncer := setupSyncer(
- mkSource("capped", defaultAccountRequestHandler),
- mkSource("corrupt", corruptAccountRequestHandler),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
- // one by one
- func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
- mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.codeRequestHandler = codeFn
- return source
- }
- // Count how many times it's invoked. Remember, there are only 8 unique hashes,
- // so it shouldn't be more than that
- var counter int
- syncer := setupSyncer(
- mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
- counter++
- return cappedCodeRequestHandler(t, id, hashes, max)
- }),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- // There are only 8 unique hashes, and 3K accounts. However, the code
- // deduplication is per request batch. If it were a perfect global dedup,
- // we would expect only 8 requests. If there were no dedup, there would be
- // 3k requests.
- // We expect somewhere below 100 requests for these 8 unique hashes. But
- // the number can be flaky, so don't limit it so strictly.
- if threshold := 100; counter > threshold {
- t.Logf("Error, expected < %d invocations, got %d", threshold, counter)
- }
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
- // storage trie has a few boundary elements.
- func TestSyncBoundaryStorageTrie(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- return source
- }
- syncer := setupSyncer(
- mkSource("peer-a"),
- mkSource("peer-b"),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
- // consistently returning very small results
- func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)
- mkSource := func(name string, slow bool) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- if slow {
- source.storageRequestHandler = starvingStorageRequestHandler
- }
- return source
- }
- syncer := setupSyncer(
- mkSource("nice-a", false),
- mkSource("slow", true),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
- // sometimes sending bad proofs
- func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
- mkSource := func(name string, handler storageHandlerFunc) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- source.storageRequestHandler = handler
- return source
- }
- syncer := setupSyncer(
- mkSource("nice-a", defaultStorageRequestHandler),
- mkSource("nice-b", defaultStorageRequestHandler),
- mkSource("nice-c", defaultStorageRequestHandler),
- mkSource("corrupt", corruptStorageRequestHandler),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
- mkSource := func(name string, handler storageHandlerFunc) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- source.storageRequestHandler = handler
- return source
- }
- syncer := setupSyncer(
- mkSource("nice-a", defaultStorageRequestHandler),
- mkSource("nice-b", defaultStorageRequestHandler),
- mkSource("nice-c", defaultStorageRequestHandler),
- mkSource("corrupt", noProofStorageRequestHandler),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestSyncWithStorage tests basic sync using accounts + storage + code, against
- // a peer who insists on delivering full storage sets _and_ proofs. This triggered
- // an error, where the recipient erroneously clipped the boundary nodes, but
- // did not mark the account for healing.
- func TestSyncWithStorageMisbehavingProve(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- source.storageRequestHandler = proofHappyStorageRequestHandler
- return source
- }
- syncer := setupSyncer(mkSource("sourceA"))
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- type kv struct {
- k, v []byte
- }
- // Some helpers for sorting
- type entrySlice []*kv
- func (p entrySlice) Len() int { return len(p) }
- func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }
- func (p entrySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
- func key32(i uint64) []byte {
- key := make([]byte, 32)
- binary.LittleEndian.PutUint64(key, i)
- return key
- }
- var (
- codehashes = []common.Hash{
- crypto.Keccak256Hash([]byte{0}),
- crypto.Keccak256Hash([]byte{1}),
- crypto.Keccak256Hash([]byte{2}),
- crypto.Keccak256Hash([]byte{3}),
- crypto.Keccak256Hash([]byte{4}),
- crypto.Keccak256Hash([]byte{5}),
- crypto.Keccak256Hash([]byte{6}),
- crypto.Keccak256Hash([]byte{7}),
- }
- )
- // getCodeHash returns a pseudo-random code hash
- func getCodeHash(i uint64) []byte {
- h := codehashes[int(i)%len(codehashes)]
- return common.CopyBytes(h[:])
- }
- // getCodeByHash convenience function to lookup the code from the code hash
- func getCodeByHash(hash common.Hash) []byte {
- if hash == emptyCode {
- return nil
- }
- for i, h := range codehashes {
- if h == hash {
- return []byte{byte(i)}
- }
- }
- return nil
- }
- // makeAccountTrieNoStorage spits out a trie, along with the leafs
- func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
- var (
- db = trie.NewDatabase(rawdb.NewMemoryDatabase())
- accTrie = trie.NewEmpty(db)
- entries entrySlice
- )
- for i := uint64(1); i <= uint64(n); i++ {
- value, _ := rlp.EncodeToBytes(&types.StateAccount{
- Nonce: i,
- Balance: big.NewInt(int64(i)),
- Root: emptyRoot,
- CodeHash: getCodeHash(i),
- })
- key := key32(i)
- elem := &kv{key, value}
- accTrie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- }
- sort.Sort(entries)
- // Commit the state changes into db and re-create the trie
- // for accessing later.
- root, nodes, _ := accTrie.Commit(false)
- db.Update(trie.NewWithNodeSet(nodes))
- accTrie, _ = trie.New(common.Hash{}, root, db)
- return accTrie, entries
- }
- // makeBoundaryAccountTrie constructs an account trie. Instead of filling
- // accounts normally, this function will fill a few accounts which have
- // boundary hash.
- func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
- var (
- entries entrySlice
- boundaries []common.Hash
- db = trie.NewDatabase(rawdb.NewMemoryDatabase())
- accTrie = trie.NewEmpty(db)
- )
- // Initialize boundaries
- var next common.Hash
- step := new(big.Int).Sub(
- new(big.Int).Div(
- new(big.Int).Exp(common.Big2, common.Big256, nil),
- big.NewInt(int64(accountConcurrency)),
- ), common.Big1,
- )
- for i := 0; i < accountConcurrency; i++ {
- last := common.BigToHash(new(big.Int).Add(next.Big(), step))
- if i == accountConcurrency-1 {
- last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
- }
- boundaries = append(boundaries, last)
- next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
- }
- // Fill boundary accounts
- for i := 0; i < len(boundaries); i++ {
- value, _ := rlp.EncodeToBytes(&types.StateAccount{
- Nonce: uint64(0),
- Balance: big.NewInt(int64(i)),
- Root: emptyRoot,
- CodeHash: getCodeHash(uint64(i)),
- })
- elem := &kv{boundaries[i].Bytes(), value}
- accTrie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- }
- // Fill other accounts if required
- for i := uint64(1); i <= uint64(n); i++ {
- value, _ := rlp.EncodeToBytes(&types.StateAccount{
- Nonce: i,
- Balance: big.NewInt(int64(i)),
- Root: emptyRoot,
- CodeHash: getCodeHash(i),
- })
- elem := &kv{key32(i), value}
- accTrie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- }
- sort.Sort(entries)
- // Commit the state changes into db and re-create the trie
- // for accessing later.
- root, nodes, _ := accTrie.Commit(false)
- db.Update(trie.NewWithNodeSet(nodes))
- accTrie, _ = trie.New(common.Hash{}, root, db)
- return accTrie, entries
- }
- // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
- // has a unique storage set.
- func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
- var (
- db = trie.NewDatabase(rawdb.NewMemoryDatabase())
- accTrie = trie.NewEmpty(db)
- entries entrySlice
- storageRoots = make(map[common.Hash]common.Hash)
- storageTries = make(map[common.Hash]*trie.Trie)
- storageEntries = make(map[common.Hash]entrySlice)
- nodes = trie.NewMergedNodeSet()
- )
- // Create n accounts in the trie
- for i := uint64(1); i <= uint64(accounts); i++ {
- key := key32(i)
- codehash := emptyCode[:]
- if code {
- codehash = getCodeHash(i)
- }
- // Create a storage trie
- stRoot, stNodes, stEntries := makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), i, db)
- nodes.Merge(stNodes)
- value, _ := rlp.EncodeToBytes(&types.StateAccount{
- Nonce: i,
- Balance: big.NewInt(int64(i)),
- Root: stRoot,
- CodeHash: codehash,
- })
- elem := &kv{key, value}
- accTrie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- storageRoots[common.BytesToHash(key)] = stRoot
- storageEntries[common.BytesToHash(key)] = stEntries
- }
- sort.Sort(entries)
- // Commit account trie
- root, set, _ := accTrie.Commit(true)
- nodes.Merge(set)
- // Commit gathered dirty nodes into database
- db.Update(nodes)
- // Re-create tries with new root
- accTrie, _ = trie.New(common.Hash{}, root, db)
- for i := uint64(1); i <= uint64(accounts); i++ {
- key := key32(i)
- trie, _ := trie.New(common.BytesToHash(key), storageRoots[common.BytesToHash(key)], db)
- storageTries[common.BytesToHash(key)] = trie
- }
- return accTrie, entries, storageTries, storageEntries
- }
- // makeAccountTrieWithStorage spits out a trie, along with the leafs
- func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
- var (
- db = trie.NewDatabase(rawdb.NewMemoryDatabase())
- accTrie = trie.NewEmpty(db)
- entries entrySlice
- storageRoots = make(map[common.Hash]common.Hash)
- storageTries = make(map[common.Hash]*trie.Trie)
- storageEntries = make(map[common.Hash]entrySlice)
- nodes = trie.NewMergedNodeSet()
- )
- // Create n accounts in the trie
- for i := uint64(1); i <= uint64(accounts); i++ {
- key := key32(i)
- codehash := emptyCode[:]
- if code {
- codehash = getCodeHash(i)
- }
- // Make a storage trie
- var (
- stRoot common.Hash
- stNodes *trie.NodeSet
- stEntries entrySlice
- )
- if boundary {
- stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
- } else {
- stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
- }
- nodes.Merge(stNodes)
- value, _ := rlp.EncodeToBytes(&types.StateAccount{
- Nonce: i,
- Balance: big.NewInt(int64(i)),
- Root: stRoot,
- CodeHash: codehash,
- })
- elem := &kv{key, value}
- accTrie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- // we reuse the same one for all accounts
- storageRoots[common.BytesToHash(key)] = stRoot
- storageEntries[common.BytesToHash(key)] = stEntries
- }
- sort.Sort(entries)
- // Commit account trie
- root, set, _ := accTrie.Commit(true)
- nodes.Merge(set)
- // Commit gathered dirty nodes into database
- db.Update(nodes)
- // Re-create tries with new root
- accTrie, err := trie.New(common.Hash{}, root, db)
- if err != nil {
- panic(err)
- }
- for i := uint64(1); i <= uint64(accounts); i++ {
- key := key32(i)
- trie, err := trie.New(common.BytesToHash(key), storageRoots[common.BytesToHash(key)], db)
- if err != nil {
- panic(err)
- }
- storageTries[common.BytesToHash(key)] = trie
- }
- return accTrie, entries, storageTries, storageEntries
- }
- // makeStorageTrieWithSeed fills a storage trie with n items, returning the
- // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
- // that tries are unique.
- func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) {
- trie, _ := trie.New(owner, common.Hash{}, db)
- var entries entrySlice
- for i := uint64(1); i <= n; i++ {
- // store 'x' at slot 'x'
- slotValue := key32(i + seed)
- rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
- slotKey := key32(i)
- key := crypto.Keccak256Hash(slotKey[:])
- elem := &kv{key[:], rlpSlotValue}
- trie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- }
- sort.Sort(entries)
- root, nodes, _ := trie.Commit(false)
- return root, nodes, entries
- }
- // makeBoundaryStorageTrie constructs a storage trie. Instead of filling
- // storage slots normally, this function will fill a few slots which have
- // boundary hash.
- func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) {
- var (
- entries entrySlice
- boundaries []common.Hash
- trie, _ = trie.New(owner, common.Hash{}, db)
- )
- // Initialize boundaries
- var next common.Hash
- step := new(big.Int).Sub(
- new(big.Int).Div(
- new(big.Int).Exp(common.Big2, common.Big256, nil),
- big.NewInt(int64(accountConcurrency)),
- ), common.Big1,
- )
- for i := 0; i < accountConcurrency; i++ {
- last := common.BigToHash(new(big.Int).Add(next.Big(), step))
- if i == accountConcurrency-1 {
- last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
- }
- boundaries = append(boundaries, last)
- next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
- }
- // Fill boundary slots
- for i := 0; i < len(boundaries); i++ {
- key := boundaries[i]
- val := []byte{0xde, 0xad, 0xbe, 0xef}
- elem := &kv{key[:], val}
- trie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- }
- // Fill other slots if required
- for i := uint64(1); i <= uint64(n); i++ {
- slotKey := key32(i)
- key := crypto.Keccak256Hash(slotKey[:])
- slotValue := key32(i)
- rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
- elem := &kv{key[:], rlpSlotValue}
- trie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- }
- sort.Sort(entries)
- root, nodes, _ := trie.Commit(false)
- return root, nodes, entries
- }
- func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
- t.Helper()
- triedb := trie.NewDatabase(db)
- accTrie, err := trie.New(common.Hash{}, root, triedb)
- if err != nil {
- t.Fatal(err)
- }
- accounts, slots := 0, 0
- accIt := trie.NewIterator(accTrie.NodeIterator(nil))
- for accIt.Next() {
- var acc struct {
- Nonce uint64
- Balance *big.Int
- Root common.Hash
- CodeHash []byte
- }
- if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
- log.Crit("Invalid account encountered during snapshot creation", "err", err)
- }
- accounts++
- if acc.Root != emptyRoot {
- storeTrie, err := trie.NewStateTrie(common.BytesToHash(accIt.Key), acc.Root, triedb)
- if err != nil {
- t.Fatal(err)
- }
- storeIt := trie.NewIterator(storeTrie.NodeIterator(nil))
- for storeIt.Next() {
- slots++
- }
- if err := storeIt.Err; err != nil {
- t.Fatal(err)
- }
- }
- }
- if err := accIt.Err; err != nil {
- t.Fatal(err)
- }
- t.Logf("accounts: %d, slots: %d", accounts, slots)
- }
- // TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
- // state healing
- func TestSyncAccountPerformance(t *testing.T) {
- // Set the account concurrency to 1. This _should_ result in the
- // range root to become correct, and there should be no healing needed
- defer func(old int) { accountConcurrency = old }(accountConcurrency)
- accountConcurrency = 1
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- return source
- }
- src := mkSource("source")
- syncer := setupSyncer(src)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- // The trie root will always be requested, since it is added when the snap
- // sync cycle starts. When popping the queue, we do not look it up again.
- // Doing so would bring this number down to zero in this artificial testcase,
- // but only add extra IO for no reason in practice.
- if have, want := src.nTrienodeRequests, 1; have != want {
- fmt.Print(src.Stats())
- t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
- }
- }
- func TestSlotEstimation(t *testing.T) {
- for i, tc := range []struct {
- last common.Hash
- count int
- want uint64
- }{
- {
- // Half the space
- common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
- 100,
- 100,
- },
- {
- // 1 / 16th
- common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
- 100,
- 1500,
- },
- {
- // Bit more than 1 / 16th
- common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"),
- 100,
- 1499,
- },
- {
- // Almost everything
- common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"),
- 100,
- 6,
- },
- {
- // Almost nothing -- should lead to error
- common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
- 1,
- 0,
- },
- {
- // Nothing -- should lead to error
- common.Hash{},
- 100,
- 0,
- },
- } {
- have, _ := estimateRemainingSlots(tc.count, tc.last)
- if want := tc.want; have != want {
- t.Errorf("test %d: have %d want %d", i, have, want)
- }
- }
- }
|