| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118 |
- // Copyright 2020 The go-ethereum Authors
- // This file is part of the go-ethereum library.
- //
- // The go-ethereum library is free software: you can redistribute it and/or modify
- // it under the terms of the GNU Lesser General Public License as published by
- // the Free Software Foundation, either version 3 of the License, or
- // (at your option) any later version.
- //
- // The go-ethereum library is distributed in the hope that it will be useful,
- // but WITHOUT ANY WARRANTY; without even the implied warranty of
- // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- // GNU Lesser General Public License for more details.
- //
- // You should have received a copy of the GNU Lesser General Public License
- // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
- package snap
- import (
- "bytes"
- "crypto/rand"
- "encoding/binary"
- "fmt"
- "math/big"
- "sort"
- "testing"
- "time"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/light"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/trie"
- "golang.org/x/crypto/sha3"
- )
- func TestHashing(t *testing.T) {
- t.Parallel()
- var bytecodes = make([][]byte, 10)
- for i := 0; i < len(bytecodes); i++ {
- buf := make([]byte, 100)
- rand.Read(buf)
- bytecodes[i] = buf
- }
- var want, got string
- var old = func() {
- hasher := sha3.NewLegacyKeccak256()
- for i := 0; i < len(bytecodes); i++ {
- hasher.Reset()
- hasher.Write(bytecodes[i])
- hash := hasher.Sum(nil)
- got = fmt.Sprintf("%v\n%v", got, hash)
- }
- }
- var new = func() {
- hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
- var hash = make([]byte, 32)
- for i := 0; i < len(bytecodes); i++ {
- hasher.Reset()
- hasher.Write(bytecodes[i])
- hasher.Read(hash)
- want = fmt.Sprintf("%v\n%v", want, hash)
- }
- }
- old()
- new()
- if want != got {
- t.Errorf("want\n%v\ngot\n%v\n", want, got)
- }
- }
- func BenchmarkHashing(b *testing.B) {
- var bytecodes = make([][]byte, 10000)
- for i := 0; i < len(bytecodes); i++ {
- buf := make([]byte, 100)
- rand.Read(buf)
- bytecodes[i] = buf
- }
- var old = func() {
- hasher := sha3.NewLegacyKeccak256()
- for i := 0; i < len(bytecodes); i++ {
- hasher.Reset()
- hasher.Write(bytecodes[i])
- hasher.Sum(nil)
- }
- }
- var new = func() {
- hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
- var hash = make([]byte, 32)
- for i := 0; i < len(bytecodes); i++ {
- hasher.Reset()
- hasher.Write(bytecodes[i])
- hasher.Read(hash)
- }
- }
- b.Run("old", func(b *testing.B) {
- b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- old()
- }
- })
- b.Run("new", func(b *testing.B) {
- b.ReportAllocs()
- for i := 0; i < b.N; i++ {
- new()
- }
- })
- }
- type storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error
- type accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error
- type trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error
- type codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error
- type testPeer struct {
- id string
- test *testing.T
- remote *Syncer
- logger log.Logger
- accountTrie *trie.Trie
- accountValues entrySlice
- storageTries map[common.Hash]*trie.Trie
- storageValues map[common.Hash]entrySlice
- accountRequestHandler accountHandlerFunc
- storageRequestHandler storageHandlerFunc
- trieRequestHandler trieHandlerFunc
- codeRequestHandler codeHandlerFunc
- cancelCh chan struct{}
- }
- func newTestPeer(id string, t *testing.T, cancelCh chan struct{}) *testPeer {
- peer := &testPeer{
- id: id,
- test: t,
- logger: log.New("id", id),
- accountRequestHandler: defaultAccountRequestHandler,
- trieRequestHandler: defaultTrieRequestHandler,
- storageRequestHandler: defaultStorageRequestHandler,
- codeRequestHandler: defaultCodeRequestHandler,
- cancelCh: cancelCh,
- }
- //stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
- //peer.logger.SetHandler(stderrHandler)
- return peer
- }
- func (t *testPeer) ID() string { return t.id }
- func (t *testPeer) Log() log.Logger { return t.logger }
- func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
- t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
- go t.accountRequestHandler(t, id, root, origin, bytes)
- return nil
- }
- func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
- t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
- go t.trieRequestHandler(t, id, root, paths, bytes)
- return nil
- }
- func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
- if len(accounts) == 1 && origin != nil {
- t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
- } else {
- t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
- }
- go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)
- return nil
- }
- func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
- t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
- go t.codeRequestHandler(t, id, hashes, bytes)
- return nil
- }
- // defaultTrieRequestHandler is a well-behaving handler for trie healing requests
- func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
- // Pass the response
- var nodes [][]byte
- for _, pathset := range paths {
- switch len(pathset) {
- case 1:
- blob, _, err := t.accountTrie.TryGetNode(pathset[0])
- if err != nil {
- t.logger.Info("Error handling req", "error", err)
- break
- }
- nodes = append(nodes, blob)
- default:
- account := t.storageTries[(common.BytesToHash(pathset[0]))]
- for _, path := range pathset[1:] {
- blob, _, err := account.TryGetNode(path)
- if err != nil {
- t.logger.Info("Error handling req", "error", err)
- break
- }
- nodes = append(nodes, blob)
- }
- }
- }
- t.remote.OnTrieNodes(t, requestId, nodes)
- return nil
- }
- // defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests
- func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, cap uint64) error {
- keys, vals, proofs := createAccountRequestResponse(t, root, origin, cap)
- if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {
- t.logger.Error("remote error on delivery", "error", err)
- t.test.Errorf("Remote side rejected our delivery: %v", err)
- t.remote.Unregister(t.id)
- close(t.cancelCh)
- return err
- }
- return nil
- }
- func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
- var size uint64
- for _, entry := range t.accountValues {
- if size > cap {
- break
- }
- if bytes.Compare(origin[:], entry.k) <= 0 {
- keys = append(keys, common.BytesToHash(entry.k))
- vals = append(vals, entry.v)
- size += uint64(32 + len(entry.v))
- }
- }
- // Unless we send the entire trie, we need to supply proofs
- // Actually, we need to supply proofs either way! This seems tob be an implementation
- // quirk in go-ethereum
- proof := light.NewNodeSet()
- if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
- t.logger.Error("Could not prove inexistence of origin", "origin", origin,
- "error", err)
- }
- if len(keys) > 0 {
- lastK := (keys[len(keys)-1])[:]
- if err := t.accountTrie.Prove(lastK, 0, proof); err != nil {
- t.logger.Error("Could not prove last item",
- "error", err)
- }
- }
- for _, blob := range proof.NodeList() {
- proofs = append(proofs, blob)
- }
- return keys, vals, proofs
- }
- // defaultStorageRequestHandler is a well-behaving storage request handler
- func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {
- hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)
- if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
- t.logger.Error("remote error on delivery", "error", err)
- t.test.Errorf("Remote side rejected our delivery: %v", err)
- close(t.cancelCh)
- }
- return nil
- }
- func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
- var bytecodes [][]byte
- for _, h := range hashes {
- bytecodes = append(bytecodes, getCode(h))
- }
- if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
- t.logger.Error("remote error on delivery", "error", err)
- t.test.Errorf("Remote side rejected our delivery: %v", err)
- close(t.cancelCh)
- }
- return nil
- }
- func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
- var (
- size uint64
- limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
- )
- if len(bLimit) > 0 {
- limit = common.BytesToHash(bLimit)
- }
- var origin common.Hash
- if len(bOrigin) > 0 {
- origin = common.BytesToHash(bOrigin)
- }
- var limitExceeded bool
- var incomplete bool
- for _, account := range accounts {
- var keys []common.Hash
- var vals [][]byte
- for _, entry := range t.storageValues[account] {
- if limitExceeded {
- incomplete = true
- break
- }
- if bytes.Compare(entry.k, origin[:]) < 0 {
- incomplete = true
- continue
- }
- keys = append(keys, common.BytesToHash(entry.k))
- vals = append(vals, entry.v)
- size += uint64(32 + len(entry.v))
- if bytes.Compare(entry.k, limit[:]) >= 0 {
- limitExceeded = true
- }
- if size > max {
- limitExceeded = true
- }
- }
- hashes = append(hashes, keys)
- slots = append(slots, vals)
- if incomplete {
- // If we're aborting, we need to prove the first and last item
- // This terminates the response (and thus the loop)
- proof := light.NewNodeSet()
- stTrie := t.storageTries[account]
- // Here's a potential gotcha: when constructing the proof, we cannot
- // use the 'origin' slice directly, but must use the full 32-byte
- // hash form.
- if err := stTrie.Prove(origin[:], 0, proof); err != nil {
- t.logger.Error("Could not prove inexistence of origin", "origin", origin,
- "error", err)
- }
- if len(keys) > 0 {
- lastK := (keys[len(keys)-1])[:]
- if err := stTrie.Prove(lastK, 0, proof); err != nil {
- t.logger.Error("Could not prove last item", "error", err)
- }
- }
- for _, blob := range proof.NodeList() {
- proofs = append(proofs, blob)
- }
- break
- }
- }
- return hashes, slots, proofs
- }
- // emptyRequestAccountRangeFn is a rejects AccountRangeRequests
- func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
- var proofs [][]byte
- var keys []common.Hash
- var vals [][]byte
- t.remote.OnAccounts(t, requestId, keys, vals, proofs)
- return nil
- }
- func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
- return nil
- }
- func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
- var nodes [][]byte
- t.remote.OnTrieNodes(t, requestId, nodes)
- return nil
- }
- func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {
- return nil
- }
- func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
- var hashes [][]common.Hash
- var slots [][][]byte
- var proofs [][]byte
- t.remote.OnStorage(t, requestId, hashes, slots, proofs)
- return nil
- }
- func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
- return nil
- }
- //func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
- // var bytecodes [][]byte
- // t.remote.OnByteCodes(t, id, bytecodes)
- // return nil
- //}
- func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
- var bytecodes [][]byte
- for _, h := range hashes {
- // Send back the hashes
- bytecodes = append(bytecodes, h[:])
- }
- if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
- t.logger.Error("remote error on delivery", "error", err)
- // Mimic the real-life handler, which drops a peer on errors
- t.remote.Unregister(t.id)
- }
- return nil
- }
- func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
- var bytecodes [][]byte
- for _, h := range hashes[:1] {
- bytecodes = append(bytecodes, getCode(h))
- }
- if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {
- t.logger.Error("remote error on delivery", "error", err)
- // Mimic the real-life handler, which drops a peer on errors
- t.remote.Unregister(t.id)
- }
- return nil
- }
- // starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small
- func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
- return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)
- }
- func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
- return defaultAccountRequestHandler(t, requestId, root, origin, 500)
- }
- //func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
- // return defaultAccountRequestHandler(t, requestId-1, root, origin, 500)
- //}
- func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
- hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, cap)
- if len(proofs) > 0 {
- proofs = proofs[1:]
- }
- if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {
- t.logger.Info("remote error on delivery (as expected)", "error", err)
- // Mimic the real-life handler, which drops a peer on errors
- t.remote.Unregister(t.id)
- }
- return nil
- }
- // corruptStorageRequestHandler doesn't provide good proofs
- func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
- hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)
- if len(proofs) > 0 {
- proofs = proofs[1:]
- }
- if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {
- t.logger.Info("remote error on delivery (as expected)", "error", err)
- // Mimic the real-life handler, which drops a peer on errors
- t.remote.Unregister(t.id)
- }
- return nil
- }
- func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
- hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)
- if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {
- t.logger.Info("remote error on delivery (as expected)", "error", err)
- // Mimic the real-life handler, which drops a peer on errors
- t.remote.Unregister(t.id)
- }
- return nil
- }
- // TestSyncBloatedProof tests a scenario where we provide only _one_ value, but
- // also ship the entire trie inside the proof. If the attack is successful,
- // the remote side does not do any follow-up requests
- func TestSyncBloatedProof(t *testing.T) {
- t.Parallel()
- sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
- cancel := make(chan struct{})
- source := newTestPeer("source", t, cancel)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {
- var proofs [][]byte
- var keys []common.Hash
- var vals [][]byte
- // The values
- for _, entry := range t.accountValues {
- if bytes.Compare(origin[:], entry.k) <= 0 {
- keys = append(keys, common.BytesToHash(entry.k))
- vals = append(vals, entry.v)
- }
- }
- // The proofs
- proof := light.NewNodeSet()
- if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {
- t.logger.Error("Could not prove origin", "origin", origin, "error", err)
- }
- // The bloat: add proof of every single element
- for _, entry := range t.accountValues {
- if err := t.accountTrie.Prove(entry.k, 0, proof); err != nil {
- t.logger.Error("Could not prove item", "error", err)
- }
- }
- // And remove one item from the elements
- if len(keys) > 2 {
- keys = append(keys[:1], keys[2:]...)
- vals = append(vals[:1], vals[2:]...)
- }
- for _, blob := range proof.NodeList() {
- proofs = append(proofs, blob)
- }
- if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
- t.logger.Info("remote error on delivery", "error", err)
- // This is actually correct, signal to exit the test successfully
- close(t.cancelCh)
- }
- return nil
- }
- syncer := setupSyncer(source)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
- t.Fatal("No error returned from incomplete/cancelled sync")
- }
- }
- func setupSyncer(peers ...*testPeer) *Syncer {
- stateDb := rawdb.NewMemoryDatabase()
- syncer := NewSyncer(stateDb, trie.NewSyncBloom(1, stateDb))
- for _, peer := range peers {
- syncer.Register(peer)
- peer.remote = syncer
- }
- return syncer
- }
- // TestSync tests a basic sync with one peer
- func TestSync(t *testing.T) {
- t.Parallel()
- cancel := make(chan struct{})
- sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, cancel)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- return source
- }
- syncer := setupSyncer(mkSource("sourceA"))
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- }
- // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
- // panic within the prover
- func TestSyncTinyTriePanic(t *testing.T) {
- t.Parallel()
- cancel := make(chan struct{})
- sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, cancel)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- return source
- }
- syncer := setupSyncer(
- mkSource("nice-a"),
- )
- done := checkStall(t, cancel)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- }
- // TestMultiSync tests a basic sync with multiple peers
- func TestMultiSync(t *testing.T) {
- t.Parallel()
- cancel := make(chan struct{})
- sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, cancel)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- return source
- }
- syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB"))
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- }
- // TestSyncWithStorage tests basic sync using accounts + storage + code
- func TestSyncWithStorage(t *testing.T) {
- t.Parallel()
- cancel := make(chan struct{})
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true)
- mkSource := func(name string) *testPeer {
- source := newTestPeer(name, t, cancel)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- return source
- }
- syncer := setupSyncer(mkSource("sourceA"))
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- }
- // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
- func TestMultiSyncManyUseless(t *testing.T) {
- t.Parallel()
- cancel := make(chan struct{})
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true)
- mkSource := func(name string, a, b, c bool) *testPeer {
- source := newTestPeer(name, t, cancel)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- if !a {
- source.accountRequestHandler = emptyRequestAccountRangeFn
- }
- if !b {
- source.storageRequestHandler = emptyStorageRequestHandler
- }
- if !c {
- source.trieRequestHandler = emptyTrieRequestHandler
- }
- return source
- }
- syncer := setupSyncer(
- mkSource("full", true, true, true),
- mkSource("noAccounts", false, true, true),
- mkSource("noStorage", true, false, true),
- mkSource("noTrie", true, true, false),
- )
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- }
- // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
- func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
- // We're setting the timeout to very low, to increase the chance of the timeout
- // being triggered. This was previously a cause of panic, when a response
- // arrived simultaneously as a timeout was triggered.
- defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
- requestTimeout = time.Millisecond
- cancel := make(chan struct{})
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true)
- mkSource := func(name string, a, b, c bool) *testPeer {
- source := newTestPeer(name, t, cancel)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- if !a {
- source.accountRequestHandler = emptyRequestAccountRangeFn
- }
- if !b {
- source.storageRequestHandler = emptyStorageRequestHandler
- }
- if !c {
- source.trieRequestHandler = emptyTrieRequestHandler
- }
- return source
- }
- syncer := setupSyncer(
- mkSource("full", true, true, true),
- mkSource("noAccounts", false, true, true),
- mkSource("noStorage", true, false, true),
- mkSource("noTrie", true, true, false),
- )
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- }
- // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
- func TestMultiSyncManyUnresponsive(t *testing.T) {
- // We're setting the timeout to very low, to make the test run a bit faster
- defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
- requestTimeout = time.Millisecond
- cancel := make(chan struct{})
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true)
- mkSource := func(name string, a, b, c bool) *testPeer {
- source := newTestPeer(name, t, cancel)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- if !a {
- source.accountRequestHandler = nonResponsiveRequestAccountRangeFn
- }
- if !b {
- source.storageRequestHandler = nonResponsiveStorageRequestHandler
- }
- if !c {
- source.trieRequestHandler = nonResponsiveTrieRequestHandler
- }
- return source
- }
- syncer := setupSyncer(
- mkSource("full", true, true, true),
- mkSource("noAccounts", false, true, true),
- mkSource("noStorage", true, false, true),
- mkSource("noTrie", true, true, false),
- )
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- }
- func checkStall(t *testing.T, cancel chan struct{}) chan struct{} {
- testDone := make(chan struct{})
- go func() {
- select {
- case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much
- t.Log("Sync stalled")
- close(cancel)
- case <-testDone:
- return
- }
- }()
- return testDone
- }
- // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
- // consistently returning very small results
- func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
- t.Parallel()
- cancel := make(chan struct{})
- sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
- mkSource := func(name string, slow bool) *testPeer {
- source := newTestPeer(name, t, cancel)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- if slow {
- source.accountRequestHandler = starvingAccountRequestHandler
- }
- return source
- }
- syncer := setupSyncer(
- mkSource("nice-a", false),
- mkSource("nice-b", false),
- mkSource("nice-c", false),
- mkSource("capped", true),
- )
- done := checkStall(t, cancel)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- }
- // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
- // code requests properly.
- func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
- t.Parallel()
- cancel := make(chan struct{})
- sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
- mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
- source := newTestPeer(name, t, cancel)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.codeRequestHandler = codeFn
- return source
- }
- // One is capped, one is corrupt. If we don't use a capped one, there's a 50%
- // chance that the full set of codes requested are sent only to the
- // non-corrupt peer, which delivers everything in one go, and makes the
- // test moot
- syncer := setupSyncer(
- mkSource("capped", cappedCodeRequestHandler),
- mkSource("corrupt", corruptCodeRequestHandler),
- )
- done := checkStall(t, cancel)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- }
- func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
- t.Parallel()
- cancel := make(chan struct{})
- sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
- mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
- source := newTestPeer(name, t, cancel)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.accountRequestHandler = accFn
- return source
- }
- // One is capped, one is corrupt. If we don't use a capped one, there's a 50%
- // chance that the full set of codes requested are sent only to the
- // non-corrupt peer, which delivers everything in one go, and makes the
- // test moot
- syncer := setupSyncer(
- mkSource("capped", defaultAccountRequestHandler),
- mkSource("corrupt", corruptAccountRequestHandler),
- )
- done := checkStall(t, cancel)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- }
- // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
- // one by one
- func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
- t.Parallel()
- cancel := make(chan struct{})
- sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
- mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
- source := newTestPeer(name, t, cancel)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.codeRequestHandler = codeFn
- return source
- }
- // Count how many times it's invoked. Remember, there are only 8 unique hashes,
- // so it shouldn't be more than that
- var counter int
- syncer := setupSyncer(
- mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
- counter++
- return cappedCodeRequestHandler(t, id, hashes, max)
- }),
- )
- done := checkStall(t, cancel)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- // There are only 8 unique hashes, and 3K accounts. However, the code
- // deduplication is per request batch. If it were a perfect global dedup,
- // we would expect only 8 requests. If there were no dedup, there would be
- // 3k requests.
- // We expect somewhere below 100 requests for these 8 unique hashes.
- if threshold := 100; counter > threshold {
- t.Fatalf("Error, expected < %d invocations, got %d", threshold, counter)
- }
- }
- // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
- // consistently returning very small results
- func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
- t.Parallel()
- cancel := make(chan struct{})
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false)
- mkSource := func(name string, slow bool) *testPeer {
- source := newTestPeer(name, t, cancel)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- if slow {
- source.storageRequestHandler = starvingStorageRequestHandler
- }
- return source
- }
- syncer := setupSyncer(
- mkSource("nice-a", false),
- mkSource("slow", true),
- )
- done := checkStall(t, cancel)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- }
- // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
- // sometimes sending bad proofs
- func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
- t.Parallel()
- cancel := make(chan struct{})
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true)
- mkSource := func(name string, handler storageHandlerFunc) *testPeer {
- source := newTestPeer(name, t, cancel)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- source.storageRequestHandler = handler
- return source
- }
- syncer := setupSyncer(
- mkSource("nice-a", defaultStorageRequestHandler),
- mkSource("nice-b", defaultStorageRequestHandler),
- mkSource("nice-c", defaultStorageRequestHandler),
- mkSource("corrupt", corruptStorageRequestHandler),
- )
- done := checkStall(t, cancel)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- }
- func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
- t.Parallel()
- cancel := make(chan struct{})
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true)
- mkSource := func(name string, handler storageHandlerFunc) *testPeer {
- source := newTestPeer(name, t, cancel)
- source.accountTrie = sourceAccountTrie
- source.accountValues = elems
- source.storageTries = storageTries
- source.storageValues = storageElems
- source.storageRequestHandler = handler
- return source
- }
- syncer := setupSyncer(
- mkSource("nice-a", defaultStorageRequestHandler),
- mkSource("nice-b", defaultStorageRequestHandler),
- mkSource("nice-c", defaultStorageRequestHandler),
- mkSource("corrupt", noProofStorageRequestHandler),
- )
- done := checkStall(t, cancel)
- if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
- t.Fatalf("sync failed: %v", err)
- }
- close(done)
- }
- type kv struct {
- k, v []byte
- t bool
- }
- // Some helpers for sorting
- type entrySlice []*kv
- func (p entrySlice) Len() int { return len(p) }
- func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }
- func (p entrySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
- func key32(i uint64) []byte {
- key := make([]byte, 32)
- binary.LittleEndian.PutUint64(key, i)
- return key
- }
- var (
- codehashes = []common.Hash{
- crypto.Keccak256Hash([]byte{0}),
- crypto.Keccak256Hash([]byte{1}),
- crypto.Keccak256Hash([]byte{2}),
- crypto.Keccak256Hash([]byte{3}),
- crypto.Keccak256Hash([]byte{4}),
- crypto.Keccak256Hash([]byte{5}),
- crypto.Keccak256Hash([]byte{6}),
- crypto.Keccak256Hash([]byte{7}),
- }
- )
- // getACodeHash returns a pseudo-random code hash
- func getACodeHash(i uint64) []byte {
- h := codehashes[int(i)%len(codehashes)]
- return common.CopyBytes(h[:])
- }
- // convenience function to lookup the code from the code hash
- func getCode(hash common.Hash) []byte {
- if hash == emptyCode {
- return nil
- }
- for i, h := range codehashes {
- if h == hash {
- return []byte{byte(i)}
- }
- }
- return nil
- }
- // makeAccountTrieNoStorage spits out a trie, along with the leafs
- func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
- db := trie.NewDatabase(rawdb.NewMemoryDatabase())
- accTrie, _ := trie.New(common.Hash{}, db)
- var entries entrySlice
- for i := uint64(1); i <= uint64(n); i++ {
- value, _ := rlp.EncodeToBytes(state.Account{
- Nonce: i,
- Balance: big.NewInt(int64(i)),
- Root: emptyRoot,
- CodeHash: getACodeHash(i),
- })
- key := key32(i)
- elem := &kv{key, value, false}
- accTrie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- }
- sort.Sort(entries)
- // Push to disk layer
- accTrie.Commit(nil)
- return accTrie, entries
- }
- // makeAccountTrieWithStorage spits out a trie, along with the leafs
- func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice,
- map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
- var (
- db = trie.NewDatabase(rawdb.NewMemoryDatabase())
- accTrie, _ = trie.New(common.Hash{}, db)
- entries entrySlice
- storageTries = make(map[common.Hash]*trie.Trie)
- storageEntries = make(map[common.Hash]entrySlice)
- )
- // Make a storage trie which we reuse for the whole lot
- stTrie, stEntries := makeStorageTrie(slots, db)
- stRoot := stTrie.Hash()
- // Create n accounts in the trie
- for i := uint64(1); i <= uint64(accounts); i++ {
- key := key32(i)
- codehash := emptyCode[:]
- if code {
- codehash = getACodeHash(i)
- }
- value, _ := rlp.EncodeToBytes(state.Account{
- Nonce: i,
- Balance: big.NewInt(int64(i)),
- Root: stRoot,
- CodeHash: codehash,
- })
- elem := &kv{key, value, false}
- accTrie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- // we reuse the same one for all accounts
- storageTries[common.BytesToHash(key)] = stTrie
- storageEntries[common.BytesToHash(key)] = stEntries
- }
- sort.Sort(entries)
- stTrie.Commit(nil)
- accTrie.Commit(nil)
- return accTrie, entries, storageTries, storageEntries
- }
- // makeStorageTrie fills a storage trie with n items, returning the
- // not-yet-committed trie and the sorted entries
- func makeStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) {
- trie, _ := trie.New(common.Hash{}, db)
- var entries entrySlice
- for i := uint64(1); i <= uint64(n); i++ {
- // store 'i' at slot 'i'
- slotValue := key32(i)
- rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
- slotKey := key32(i)
- key := crypto.Keccak256Hash(slotKey[:])
- elem := &kv{key[:], rlpSlotValue, false}
- trie.Update(elem.k, elem.v)
- entries = append(entries, elem)
- }
- sort.Sort(entries)
- return trie, entries
- }
|