| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716 |
- // Copyright 2020 The go-ethereum Authors
- // This file is part of the go-ethereum library.
- //
- // The go-ethereum library is free software: you can redistribute it and/or modify
- // it under the terms of the GNU Lesser General Public License as published by
- // the Free Software Foundation, either version 3 of the License, or
- // (at your option) any later version.
- //
- // The go-ethereum library is distributed in the hope that it will be useful,
- // but WITHOUT ANY WARRANTY; without even the implied warranty of
- // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- // GNU Lesser General Public License for more details.
- //
- // You should have received a copy of the GNU Lesser General Public License
- // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
- package snap
- import (
- "bytes"
- "crypto/rand"
- "encoding/binary"
- "fmt"
- "math/big"
- "sort"
- "sync"
- "testing"
- "time"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/light"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/trie"
- "golang.org/x/crypto/sha3"
- )
- func TestHashing(t *testing.T) {
- t.Parallel()
- var bytecodes = make([][]byte, 10)
- for i := 0; i < len(bytecodes); i++ {
- buf := make([]byte, 100)
- rand.Read(buf)
- bytecodes[i] = buf
- }
- var want, got string
- var old = func() {
- hasher := sha3.NewLegacyKeccak256()
- for i := 0; i < len(bytecodes); i++ {
- hasher.Reset()
- hasher.Write(bytecodes[i])
- hash := hasher.Sum(nil)
- got = fmt.Sprintf("%v\n%v", got, hash)
- }
- }
- var new = func() {
- hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
- var hash = make([]byte, 32)
- for i := 0; i < len(bytecodes); i++ {
- hasher.Reset()
- hasher.Write(bytecodes[i])
- hasher.Read(hash)
- want = fmt.Sprintf("%v\n%v", want, hash)
- }
- }
- old()
- new()
- if want != got {
- t.Errorf("want\n%v\ngot\n%v\n", want, got)
- }
- }
- func BenchmarkHashing(b *testing.B) {
- var bytecodes = make([][]byte, 10000)
- for i := 0; i < len(bytecodes); i++ {
- buf := make([]byte, 100)
- rand.Read(buf)
- bytecodes[i] = buf
- }
- var old = func() {
- hasher := sha3.NewLegacyKeccak256()
- for i := 0; i < len(bytecodes); i++ {
- hasher.Reset()
- hasher.Write(bytecodes[i])
- hasher.Sum(nil)
- }
- }
- var new = func() {
- hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
- var hash = make([]byte, 32)
- for i := 0; i < len(bytecodes); i++ {
- hasher.Reset()
- hasher.Write(bytecodes[i])
- hasher.Read(hash)
- }
- }
- b.Run("old", func(b *testing.B) {
- b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- old()
- }
- })
- b.Run("new", func(b *testing.B) {
- b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- new()
- }
- })
- }
- type (
- accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error
- storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
- trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
- codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
- )
- type testPeer struct {
- id string
- test *testing.T
- remote *Syncer
- logger log.Logger
- accountTrie *trie.Trie
- accountValues entrySlice
- storageTries map[common.Hash]*trie.Trie
- storageValues map[common.Hash]entrySlice
- accountRequestHandler accountHandlerFunc
- storageRequestHandler storageHandlerFunc
- trieRequestHandler trieHandlerFunc
- codeRequestHandler codeHandlerFunc
- term func()
- // counters
- nAccountRequests int
- nStorageRequests int
- nBytecodeRequests int
- nTrienodeRequests int
- }
- func newTestPeer(id string, t *testing.T, term func()) *testPeer {
- peer := &testPeer{
- id: id,
- test: t,
- logger: log.New("id", id),
- accountRequestHandler: defaultAccountRequestHandler,
- trieRequestHandler: defaultTrieRequestHandler,
- storageRequestHandler: defaultStorageRequestHandler,
- codeRequestHandler: defaultCodeRequestHandler,
- term: term,
- }
- //stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
- //peer.logger.SetHandler(stderrHandler)
- return peer
- }
- func (t *testPeer) ID() string { return t.id }
- func (t *testPeer) Log() log.Logger { return t.logger }
- func (t *testPeer) Stats() string {
- return fmt.Sprintf(`Account requests: %d
- Storage requests: %d
- Bytecode requests: %d
- Trienode requests: %d
- `, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests)
- }
- func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
- t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
- t.nAccountRequests++
- go t.accountRequestHandler(t, id, root, origin, limit, bytes)
- return nil
- }
- func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
- t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
- t.nTrienodeRequests++
- go t.trieRequestHandler(t, id, root, paths, bytes)
- return nil
- }
- func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
- t.nStorageRequests++
- if len(accounts) == 1 && origin != nil {
- t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
- } else {
- t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
- }
- go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
- return nil
- }
- func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
- t.nBytecodeRequests++
- t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
- go t.codeRequestHandler(t, id, hashes, bytes)
- return nil
- }
- // defaultTrieRequestHandler is a well-behaving handler for trie healing requests
- func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
- // Pass the response
- var nodes [][]byte
- for _, pathset := range paths {
- switch len(pathset) {
- case 1:
- blob, _, err := t.accountTrie.TryGetNode(pathset[0])
- if err != nil {
- t.logger.Info("Error handling req", "error", err)
- break
- }
- nodes = append(nodes, blob)
- default:
- account := t.storageTries[(common.BytesToHash(pathset[0]))]
- for _, path := range pathset[1:] {
- blob, _, err := account.TryGetNode(path)
- if err != nil {
- t.logger.Info("Error handling req", "error", err)
- break
- }
- nodes = append(nodes, blob)
- }
- }
- }
- t.remote.OnTrieNodes(t, requestId, nodes)
- return nil
- }
- // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
- func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
- keys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
- if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
- t.test.Errorf("Remote side rejected our delivery: %v", err)
- t.term()
- return err
- }
- return nil
- }
- func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
- var size uint64
- if limit == (common.Hash{}) {
- limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
- }
- for _, entry := range t.accountValues {
- if size > cap {
- break
- }
- if bytes.Compare(origin[:], entry.k) <= 0 {
- keys = append(keys, common.BytesToHash(entry.k))
- vals = append(vals, entry.v)
- size += uint64(32 + len(entry.v))
- }
- // If we've exceeded the request threshold, abort
- if bytes.Compare(entry.k, limit[:]) >= 0 {
- break
- }
- }
- // Unless we send the entire trie, we need to supply proofs
- // Actually, we need to supply proofs either way! This seems to be an implementation
- // quirk in go-ethereum
- proof := light.NewNodeSet()
- if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
- t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err)
- }
- if len(keys) > 0 {
- lastK := (keys[len(keys)-1])[:]
- if err := t.accountTrie.Prove(lastK, 0, proof); err != nil {
- t.logger.Error("Could not prove last item", "error", err)
- }
- }
- for _, blob := range proof.NodeList() {
- proofs = append(proofs, blob)
- }
- return keys, vals, proofs
- }
- // defaultStorageRequestHandler is a well-behaving storage request handler
- func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
- hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
- if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
- t.test.Errorf("Remote side rejected our delivery: %v", err)
- t.term()
- }
- return nil
- }
- func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
- var bytecodes [][]byte
- for _, h := range hashes {
- bytecodes = append(bytecodes, getCodeByHash(h))
- }
- if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
- t.test.Errorf("Remote side rejected our delivery: %v", err)
- t.term()
- }
- return nil
- }
- func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
- var size uint64
- for _, account := range accounts {
- // The first account might start from a different origin and end sooner
- var originHash common.Hash
- if len(origin) > 0 {
- originHash = common.BytesToHash(origin)
- }
- var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
- if len(limit) > 0 {
- limitHash = common.BytesToHash(limit)
- }
- var (
- keys []common.Hash
- vals [][]byte
- abort bool
- )
- for _, entry := range t.storageValues[account] {
- if size >= max {
- abort = true
- break
- }
- if bytes.Compare(entry.k, originHash[:]) < 0 {
- continue
- }
- keys = append(keys, common.BytesToHash(entry.k))
- vals = append(vals, entry.v)
- size += uint64(32 + len(entry.v))
- if bytes.Compare(entry.k, limitHash[:]) >= 0 {
- break
- }
- }
- hashes = append(hashes, keys)
- slots = append(slots, vals)
- // Generate the Merkle proofs for the first and last storage slot, but
- // only if the response was capped. If the entire storage trie included
- // in the response, no need for any proofs.
- if originHash != (common.Hash{}) || abort {
- // If we're aborting, we need to prove the first and last item
- // This terminates the response (and thus the loop)
- proof := light.NewNodeSet()
- stTrie := t.storageTries[account]
- // Here's a potential gotcha: when constructing the proof, we cannot
- // use the 'origin' slice directly, but must use the full 32-byte
- // hash form.
- if err := stTrie.Prove(originHash[:], 0, proof); err != nil {
- t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err)
- }
- if len(keys) > 0 {
- lastK := (keys[len(keys)-1])[:]
- if err := stTrie.Prove(lastK, 0, proof); err != nil {
- t.logger.Error("Could not prove last item", "error", err)
- }
- }
- for _, blob := range proof.NodeList() {
- proofs = append(proofs, blob)
- }
- break
- }
- }
- return hashes, slots, proofs
- }
- // the createStorageRequestResponseAlwaysProve tests a cornercase, where it always
- // supplies the proof for the last account, even if it is 'complete'.h
- func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
- var size uint64
- max = max * 3 / 4
- var origin common.Hash
- if len(bOrigin) > 0 {
- origin = common.BytesToHash(bOrigin)
- }
- var exit bool
- for i, account := range accounts {
- var keys []common.Hash
- var vals [][]byte
- for _, entry := range t.storageValues[account] {
- if bytes.Compare(entry.k, origin[:]) < 0 {
- exit = true
- }
- keys = append(keys, common.BytesToHash(entry.k))
- vals = append(vals, entry.v)
- size += uint64(32 + len(entry.v))
- if size > max {
- exit = true
- }
- }
- if i == len(accounts)-1 {
- exit = true
- }
- hashes = append(hashes, keys)
- slots = append(slots, vals)
- if exit {
- // If we're aborting, we need to prove the first and last item
- // This terminates the response (and thus the loop)
- proof := light.NewNodeSet()
- stTrie := t.storageTries[account]
- // Here's a potential gotcha: when constructing the proof, we cannot
- // use the 'origin' slice directly, but must use the full 32-byte
- // hash form.
- if err := stTrie.Prove(origin[:], 0, proof); err != nil {
- t.logger.Error("Could not prove inexistence of origin", "origin", origin,
- "error", err)
- }
- if len(keys) > 0 {
- lastK := (keys[len(keys)-1])[:]
- if err := stTrie.Prove(lastK, 0, proof); err != nil {
- t.logger.Error("Could not prove last item", "error", err)
- }
- }
- for _, blob := range proof.NodeList() {
- proofs = append(proofs, blob)
- }
- break
- }
- }
- return hashes, slots, proofs
- }
- // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
- func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
- t.remote.OnAccounts(t, requestId, nil, nil, nil)
- return nil
- }
- func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
- return nil
- }
- func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
- t.remote.OnTrieNodes(t, requestId, nil)
- return nil
- }
- func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
- return nil
- }
- func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
- t.remote.OnStorage(t, requestId, nil, nil, nil)
- return nil
- }
- func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
- return nil
- }
- func proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
- hashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)
- if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
- t.test.Errorf("Remote side rejected our delivery: %v", err)
- t.term()
- }
- return nil
- }
- //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
- // var bytecodes [][]byte
- // t.remote.OnByteCodes(t, id, bytecodes)
- // return nil
- //}
- func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
- var bytecodes [][]byte
- for _, h := range hashes {
- // Send back the hashes
- bytecodes = append(bytecodes, h[:])
- }
- if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
- t.logger.Info("remote error on delivery (as expected)", "error", err)
- // Mimic the real-life handler, which drops a peer on errors
- t.remote.Unregister(t.id)
- }
- return nil
- }
- func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
- var bytecodes [][]byte
- for _, h := range hashes[:1] {
- bytecodes = append(bytecodes, getCodeByHash(h))
- }
- // Missing bytecode can be retrieved again, no error expected
- if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
- t.test.Errorf("Remote side rejected our delivery: %v", err)
- t.term()
- }
- return nil
- }
- // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
- func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
- return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
- }
- func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
- return defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)
- }
- //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
- // return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
- //}
- func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
- hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)
- if len(proofs) > 0 {
- proofs = proofs[1:]
- }
- if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
- t.logger.Info("remote error on delivery (as expected)", "error", err)
- // Mimic the real-life handler, which drops a peer on errors
- t.remote.Unregister(t.id)
- }
- return nil
- }
- // corruptStorageRequestHandler doesn't provide good proofs
- func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
- hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
- if len(proofs) > 0 {
- proofs = proofs[1:]
- }
- if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
- t.logger.Info("remote error on delivery (as expected)", "error", err)
- // Mimic the real-life handler, which drops a peer on errors
- t.remote.Unregister(t.id)
- }
- return nil
- }
- func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
- hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
- if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
- t.logger.Info("remote error on delivery (as expected)", "error", err)
- // Mimic the real-life handler, which drops a peer on errors
- t.remote.Unregister(t.id)
- }
- return nil
- }
- // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
- // also ship the entire trie inside the proof. If the attack is successful,
- // the remote side does not do any follow-up requests
- func TestSyncBloatedProof(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
- source := newTestPeer("source", t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
- var (
- proofs [][]byte
- keys []common.Hash
- vals [][]byte
- )
- // The values
- for _, entry := range t.accountValues {
- if bytes.Compare(entry.k, origin[:]) < 0 {
- continue
- }
- if bytes.Compare(entry.k, limit[:]) > 0 {
- continue
- }
- keys = append(keys, common.BytesToHash(entry.k))
- vals = append(vals, entry.v)
- }
- // The proofs
- proof := light.NewNodeSet()
- if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
- t.logger.Error("Could not prove origin", "origin", origin, "error", err)
- }
- // The bloat: add proof of every single element
- for _, entry := range t.accountValues {
- if err := t.accountTrie.Prove(entry.k, 0, proof); err != nil {
- t.logger.Error("Could not prove item", "error", err)
- }
- }
- // And remove one item from the elements
- if len(keys) > 2 {
- keys = append(keys[:1], keys[2:]...)
- vals = append(vals[:1], vals[2:]...)
- }
- for _, blob := range proof.NodeList() {
- proofs = append(proofs, blob)
- }
- if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
- t.logger.Info("remote error on delivery (as expected)", "error", err)
- t.term()
- // This is actually correct, signal to exit the test successfully
- }
- return nil
- }
- syncer := setupSyncer(source)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
- t.Fatal("No error returned from incomplete/cancelled sync")
- }
- }
- func setupSyncer(peers ...*testPeer) *Syncer {
- stateDb := rawdb.NewMemoryDatabase()
- syncer := NewSyncer(stateDb)
- for _, peer := range peers {
- syncer.Register(peer)
- peer.remote = syncer
- }
- return syncer
- }
- // TestSync tests a basic sync with one peer
- func TestSync(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- return source
- }
- syncer := setupSyncer(mkSource("source"))
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
- // panic within the prover
- func TestSyncTinyTriePanic(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- return source
- }
- syncer := setupSyncer(mkSource("source"))
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestMultiSync tests a basic sync with multiple peers
- func TestMultiSync(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- return source
- }
- syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB"))
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestSyncWithStorage tests basic sync using accounts + storage + code
- func TestSyncWithStorage(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- return source
- }
- syncer := setupSyncer(mkSource("sourceA"))
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
- func TestMultiSyncManyUseless(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
- mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- if !noAccount {
- source.accountRequestHandler = emptyRequestAccountRangeFn
- }
- if !noStorage {
- source.storageRequestHandler = emptyStorageRequestHandler
- }
- if !noTrieNode {
- source.trieRequestHandler = emptyTrieRequestHandler
- }
- return source
- }
- syncer := setupSyncer(
- mkSource("full", true, true, true),
- mkSource("noAccounts", false, true, true),
- mkSource("noStorage", true, false, true),
- mkSource("noTrie", true, true, false),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
- func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
- // We're setting the timeout to very low, to increase the chance of the timeout
- // being triggered. This was previously a cause of panic, when a response
- // arrived simultaneously as a timeout was triggered.
- defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
- requestTimeout = time.Millisecond
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
- mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- if !noAccount {
- source.accountRequestHandler = emptyRequestAccountRangeFn
- }
- if !noStorage {
- source.storageRequestHandler = emptyStorageRequestHandler
- }
- if !noTrieNode {
- source.trieRequestHandler = emptyTrieRequestHandler
- }
- return source
- }
- syncer := setupSyncer(
- mkSource("full", true, true, true),
- mkSource("noAccounts", false, true, true),
- mkSource("noStorage", true, false, true),
- mkSource("noTrie", true, true, false),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
- func TestMultiSyncManyUnresponsive(t *testing.T) {
- // We're setting the timeout to very low, to make the test run a bit faster
- defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
- requestTimeout = time.Millisecond
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
- mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- if !noAccount {
- source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
- }
- if !noStorage {
- source.storageRequestHandler = nonResponsiveStorageRequestHandler
- }
- if !noTrieNode {
- source.trieRequestHandler = nonResponsiveTrieRequestHandler
- }
- return source
- }
- syncer := setupSyncer(
- mkSource("full", true, true, true),
- mkSource("noAccounts", false, true, true),
- mkSource("noStorage", true, false, true),
- mkSource("noTrie", true, true, false),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- func checkStall(t *testing.T, term func()) chan struct{} {
- testDone := make(chan struct{})
- go func() {
- select {
- case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
- t.Log("Sync stalled")
- term()
- case <-testDone:
- return
- }
- }()
- return testDone
- }
- // TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the
- // account trie has a few boundary elements.
- func TestSyncBoundaryAccountTrie(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeBoundaryAccountTrie(3000)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- return source
- }
- syncer := setupSyncer(
- mkSource("peer-a"),
- mkSource("peer-b"),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
- // consistently returning very small results
- func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
- mkSource := func(name string, slow bool) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- if slow {
- source.accountRequestHandler = starvingAccountRequestHandler
- }
- return source
- }
- syncer := setupSyncer(
- mkSource("nice-a", false),
- mkSource("nice-b", false),
- mkSource("nice-c", false),
- mkSource("capped", true),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
- // code requests properly.
- func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
- mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.codeRequestHandler = codeFn
- return source
- }
- // One is capped, one is corrupt. If we don't use a capped one, there's a 50%
- // chance that the full set of codes requested are sent only to the
- // non-corrupt peer, which delivers everything in one go, and makes the
- // test moot
- syncer := setupSyncer(
- mkSource("capped", cappedCodeRequestHandler),
- mkSource("corrupt", corruptCodeRequestHandler),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
- mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.accountRequestHandler = accFn
- return source
- }
- // One is capped, one is corrupt. If we don't use a capped one, there's a 50%
- // chance that the full set of codes requested are sent only to the
- // non-corrupt peer, which delivers everything in one go, and makes the
- // test moot
- syncer := setupSyncer(
- mkSource("capped", defaultAccountRequestHandler),
- mkSource("corrupt", corruptAccountRequestHandler),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
- // one by one
- func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
- mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.codeRequestHandler = codeFn
- return source
- }
- // Count how many times it's invoked. Remember, there are only 8 unique hashes,
- // so it shouldn't be more than that
- var counter int
- syncer := setupSyncer(
- mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
- counter++
- return cappedCodeRequestHandler(t, id, hashes, max)
- }),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- // There are only 8 unique hashes, and 3K accounts. However, the code
- // deduplication is per request batch. If it were a perfect global dedup,
- // we would expect only 8 requests. If there were no dedup, there would be
- // 3k requests.
- // We expect somewhere below 100 requests for these 8 unique hashes.
- if threshold := 100; counter > threshold {
- t.Fatalf("Error, expected < %d invocations, got %d", threshold, counter)
- }
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
- // storage trie has a few boundary elements.
- func TestSyncBoundaryStorageTrie(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- return source
- }
- syncer := setupSyncer(
- mkSource("peer-a"),
- mkSource("peer-b"),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
- // consistently returning very small results
- func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)
- mkSource := func(name string, slow bool) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- if slow {
- source.storageRequestHandler = starvingStorageRequestHandler
- }
- return source
- }
- syncer := setupSyncer(
- mkSource("nice-a", false),
- mkSource("slow", true),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
- // sometimes sending bad proofs
- func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
- mkSource := func(name string, handler storageHandlerFunc) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- source.storageRequestHandler = handler
- return source
- }
- syncer := setupSyncer(
- mkSource("nice-a", defaultStorageRequestHandler),
- mkSource("nice-b", defaultStorageRequestHandler),
- mkSource("nice-c", defaultStorageRequestHandler),
- mkSource("corrupt", corruptStorageRequestHandler),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
- mkSource := func(name string, handler storageHandlerFunc) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- source.storageRequestHandler = handler
- return source
- }
- syncer := setupSyncer(
- mkSource("nice-a", defaultStorageRequestHandler),
- mkSource("nice-b", defaultStorageRequestHandler),
- mkSource("nice-c", defaultStorageRequestHandler),
- mkSource("corrupt", noProofStorageRequestHandler),
- )
- done := checkStall(t, term)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- // TestSyncWithStorage tests basic sync using accounts + storage + code, against
- // a peer who insists on delivering full storage sets _and_ proofs. This triggered
- // an error, where the recipient erroneously clipped the boundary nodes, but
- // did not mark the account for healing.
- func TestSyncWithStorageMisbehavingProve(t *testing.T) {
- t.Parallel()
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- source.storageRequestHandler = proofHappyStorageRequestHandler
- return source
- }
- syncer := setupSyncer(mkSource("sourceA"))
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- }
- type kv struct {
- k, v []byte
- }
- // Some helpers for sorting
- type entrySlice []*kv
- func (p entrySlice) Len() int { return len(p) }
- func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }
- func (p entrySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
- func key32(i uint64) []byte {
- key := make([]byte, 32)
- binary.LittleEndian.PutUint64(key, i)
- return key
- }
- var (
- codehashes = []common.Hash{
- crypto.Keccak256Hash([]byte{0}),
- crypto.Keccak256Hash([]byte{1}),
- crypto.Keccak256Hash([]byte{2}),
- crypto.Keccak256Hash([]byte{3}),
- crypto.Keccak256Hash([]byte{4}),
- crypto.Keccak256Hash([]byte{5}),
- crypto.Keccak256Hash([]byte{6}),
- crypto.Keccak256Hash([]byte{7}),
- }
- )
- // getCodeHash returns a pseudo-random code hash
- func getCodeHash(i uint64) []byte {
- h := codehashes[int(i)%len(codehashes)]
- return common.CopyBytes(h[:])
- }
- // getCodeByHash convenience function to lookup the code from the code hash
- func getCodeByHash(hash common.Hash) []byte {
- if hash == emptyCode {
- return nil
- }
- for i, h := range codehashes {
- if h == hash {
- return []byte{byte(i)}
- }
- }
- return nil
- }
- // makeAccountTrieNoStorage spits out a trie, along with the leafs
- func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
- db := trie.NewDatabase(rawdb.NewMemoryDatabase())
- accTrie, _ := trie.New(common.Hash{}, db)
- var entries entrySlice
- for i := uint64(1); i <= uint64(n); i++ {
- value, _ := rlp.EncodeToBytes(state.Account{
- Nonce: i,
- Balance: big.NewInt(int64(i)),
- Root: emptyRoot,
- CodeHash: getCodeHash(i),
- })
- key := key32(i)
- elem := &kv{key, value}
- accTrie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- }
- sort.Sort(entries)
- accTrie.Commit(nil)
- return accTrie, entries
- }
- // makeBoundaryAccountTrie constructs an account trie. Instead of filling
- // accounts normally, this function will fill a few accounts which have
- // boundary hash.
- func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
- var (
- entries entrySlice
- boundaries []common.Hash
- db = trie.NewDatabase(rawdb.NewMemoryDatabase())
- trie, _ = trie.New(common.Hash{}, db)
- )
- // Initialize boundaries
- var next common.Hash
- step := new(big.Int).Sub(
- new(big.Int).Div(
- new(big.Int).Exp(common.Big2, common.Big256, nil),
- big.NewInt(int64(accountConcurrency)),
- ), common.Big1,
- )
- for i := 0; i < accountConcurrency; i++ {
- last := common.BigToHash(new(big.Int).Add(next.Big(), step))
- if i == accountConcurrency-1 {
- last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
- }
- boundaries = append(boundaries, last)
- next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
- }
- // Fill boundary accounts
- for i := 0; i < len(boundaries); i++ {
- value, _ := rlp.EncodeToBytes(state.Account{
- Nonce: uint64(0),
- Balance: big.NewInt(int64(i)),
- Root: emptyRoot,
- CodeHash: getCodeHash(uint64(i)),
- })
- elem := &kv{boundaries[i].Bytes(), value}
- trie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- }
- // Fill other accounts if required
- for i := uint64(1); i <= uint64(n); i++ {
- value, _ := rlp.EncodeToBytes(state.Account{
- Nonce: i,
- Balance: big.NewInt(int64(i)),
- Root: emptyRoot,
- CodeHash: getCodeHash(i),
- })
- elem := &kv{key32(i), value}
- trie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- }
- sort.Sort(entries)
- trie.Commit(nil)
- return trie, entries
- }
- // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
- // has a unique storage set.
- func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
- var (
- db = trie.NewDatabase(rawdb.NewMemoryDatabase())
- accTrie, _ = trie.New(common.Hash{}, db)
- entries entrySlice
- storageTries = make(map[common.Hash]*trie.Trie)
- storageEntries = make(map[common.Hash]entrySlice)
- )
- // Create n accounts in the trie
- for i := uint64(1); i <= uint64(accounts); i++ {
- key := key32(i)
- codehash := emptyCode[:]
- if code {
- codehash = getCodeHash(i)
- }
- // Create a storage trie
- stTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db)
- stRoot := stTrie.Hash()
- stTrie.Commit(nil)
- value, _ := rlp.EncodeToBytes(state.Account{
- Nonce: i,
- Balance: big.NewInt(int64(i)),
- Root: stRoot,
- CodeHash: codehash,
- })
- elem := &kv{key, value}
- accTrie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- storageTries[common.BytesToHash(key)] = stTrie
- storageEntries[common.BytesToHash(key)] = stEntries
- }
- sort.Sort(entries)
- accTrie.Commit(nil)
- return accTrie, entries, storageTries, storageEntries
- }
- // makeAccountTrieWithStorage spits out a trie, along with the leafs
- func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
- var (
- db = trie.NewDatabase(rawdb.NewMemoryDatabase())
- accTrie, _ = trie.New(common.Hash{}, db)
- entries entrySlice
- storageTries = make(map[common.Hash]*trie.Trie)
- storageEntries = make(map[common.Hash]entrySlice)
- )
- // Make a storage trie which we reuse for the whole lot
- var (
- stTrie *trie.Trie
- stEntries entrySlice
- )
- if boundary {
- stTrie, stEntries = makeBoundaryStorageTrie(slots, db)
- } else {
- stTrie, stEntries = makeStorageTrieWithSeed(uint64(slots), 0, db)
- }
- stRoot := stTrie.Hash()
- // Create n accounts in the trie
- for i := uint64(1); i <= uint64(accounts); i++ {
- key := key32(i)
- codehash := emptyCode[:]
- if code {
- codehash = getCodeHash(i)
- }
- value, _ := rlp.EncodeToBytes(state.Account{
- Nonce: i,
- Balance: big.NewInt(int64(i)),
- Root: stRoot,
- CodeHash: codehash,
- })
- elem := &kv{key, value}
- accTrie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- // we reuse the same one for all accounts
- storageTries[common.BytesToHash(key)] = stTrie
- storageEntries[common.BytesToHash(key)] = stEntries
- }
- sort.Sort(entries)
- stTrie.Commit(nil)
- accTrie.Commit(nil)
- return accTrie, entries, storageTries, storageEntries
- }
- // makeStorageTrieWithSeed fills a storage trie with n items, returning the
- // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
- // that tries are unique.
- func makeStorageTrieWithSeed(n, seed uint64, db *trie.Database) (*trie.Trie, entrySlice) {
- trie, _ := trie.New(common.Hash{}, db)
- var entries entrySlice
- for i := uint64(1); i <= n; i++ {
- // store 'x' at slot 'x'
- slotValue := key32(i + seed)
- rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
- slotKey := key32(i)
- key := crypto.Keccak256Hash(slotKey[:])
- elem := &kv{key[:], rlpSlotValue}
- trie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- }
- sort.Sort(entries)
- trie.Commit(nil)
- return trie, entries
- }
- // makeBoundaryStorageTrie constructs a storage trie. Instead of filling
- // storage slots normally, this function will fill a few slots which have
- // boundary hash.
- func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) {
- var (
- entries entrySlice
- boundaries []common.Hash
- trie, _ = trie.New(common.Hash{}, db)
- )
- // Initialize boundaries
- var next common.Hash
- step := new(big.Int).Sub(
- new(big.Int).Div(
- new(big.Int).Exp(common.Big2, common.Big256, nil),
- big.NewInt(int64(accountConcurrency)),
- ), common.Big1,
- )
- for i := 0; i < accountConcurrency; i++ {
- last := common.BigToHash(new(big.Int).Add(next.Big(), step))
- if i == accountConcurrency-1 {
- last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
- }
- boundaries = append(boundaries, last)
- next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
- }
- // Fill boundary slots
- for i := 0; i < len(boundaries); i++ {
- key := boundaries[i]
- val := []byte{0xde, 0xad, 0xbe, 0xef}
- elem := &kv{key[:], val}
- trie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- }
- // Fill other slots if required
- for i := uint64(1); i <= uint64(n); i++ {
- slotKey := key32(i)
- key := crypto.Keccak256Hash(slotKey[:])
- slotValue := key32(i)
- rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
- elem := &kv{key[:], rlpSlotValue}
- trie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- }
- sort.Sort(entries)
- trie.Commit(nil)
- return trie, entries
- }
- func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
- t.Helper()
- triedb := trie.NewDatabase(db)
- accTrie, err := trie.New(root, triedb)
- if err != nil {
- t.Fatal(err)
- }
- accounts, slots := 0, 0
- accIt := trie.NewIterator(accTrie.NodeIterator(nil))
- for accIt.Next() {
- var acc struct {
- Nonce uint64
- Balance *big.Int
- Root common.Hash
- CodeHash []byte
- }
- if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
- log.Crit("Invalid account encountered during snapshot creation", "err", err)
- }
- accounts++
- if acc.Root != emptyRoot {
- storeTrie, err := trie.NewSecure(acc.Root, triedb)
- if err != nil {
- t.Fatal(err)
- }
- storeIt := trie.NewIterator(storeTrie.NodeIterator(nil))
- for storeIt.Next() {
- slots++
- }
- if err := storeIt.Err; err != nil {
- t.Fatal(err)
- }
- }
- }
- if err := accIt.Err; err != nil {
- t.Fatal(err)
- }
- t.Logf("accounts: %d, slots: %d", accounts, slots)
- }
- // TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
- // state healing
- func TestSyncAccountPerformance(t *testing.T) {
- // Set the account concurrency to 1. This _should_ result in the
- // range root to become correct, and there should be no healing needed
- defer func(old int) { accountConcurrency = old }(accountConcurrency)
- accountConcurrency = 1
- var (
- once sync.Once
- cancel = make(chan struct{})
- term = func() {
- once.Do(func() {
- close(cancel)
- })
- }
- )
- sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- return source
- }
- src := mkSource("source")
- syncer := setupSyncer(src)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
- // The trie root will always be requested, since it is added when the snap
- // sync cycle starts. When popping the queue, we do not look it up again.
- // Doing so would bring this number down to zero in this artificial testcase,
- // but only add extra IO for no reason in practice.
- if have, want := src.nTrienodeRequests, 1; have != want {
- fmt.Printf(src.Stats())
- t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
- }
- }
- func TestSlotEstimation(t *testing.T) {
- for i, tc := range []struct {
- last common.Hash
- count int
- want uint64
- }{
- {
- // Half the space
- common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
- 100,
- 100,
- },
- {
- // 1 / 16th
- common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
- 100,
- 1500,
- },
- {
- // Bit more than 1 / 16th
- common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"),
- 100,
- 1499,
- },
- {
- // Almost everything
- common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"),
- 100,
- 6,
- },
- {
- // Almost nothing -- should lead to error
- common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
- 1,
- 0,
- },
- {
- // Nothing -- should lead to error
- common.Hash{},
- 100,
- 0,
- },
- } {
- have, _ := estimateRemainingSlots(tc.count, tc.last)
- if want := tc.want; have != want {
- t.Errorf("test %d: have %d want %d", i, have, want)
- }
- }
- }
|