Bläddra i källkod

fix conflicts

yutianwu 4 år sedan
förälder
incheckning
334b6adf14
100 ändrade filer med 4827 tillägg och 1932 borttagningar
  1. 1 0
      .github/CODEOWNERS
  2. 2 0
      .github/ISSUE_TEMPLATE/bug.md
  3. 5 6
      Makefile
  4. 11 4
      README.md
  5. 2 2
      accounts/accounts.go
  6. 14 0
      accounts/external/backend.go
  7. 1 1
      accounts/url.go
  8. 14 26
      appveyor.yml
  9. 30 28
      build/checksums.txt
  10. 67 142
      build/ci.go
  11. 1 1
      build/nsis.envvarupdate.nsh
  12. 25 2
      cmd/devp2p/README.md
  13. 44 11
      cmd/devp2p/dns_route53.go
  14. 30 5
      cmd/devp2p/dnscmd.go
  15. 23 6
      cmd/devp2p/internal/ethtest/chain.go
  16. 175 50
      cmd/devp2p/internal/ethtest/eth66_suite.go
  17. 103 44
      cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go
  18. 1 1
      cmd/devp2p/internal/ethtest/large.go
  19. 81 47
      cmd/devp2p/internal/ethtest/suite.go
  20. 107 0
      cmd/devp2p/internal/ethtest/suite_test.go
  21. 133 25
      cmd/devp2p/internal/ethtest/transaction.go
  22. 22 6
      cmd/devp2p/internal/ethtest/types.go
  23. 27 1
      cmd/devp2p/nodeset.go
  24. 70 1
      cmd/devp2p/nodesetcmd.go
  25. 3 3
      cmd/evm/README.md
  26. 3 3
      cmd/evm/testdata/8/readme.md
  27. 23 7
      cmd/faucet/faucet.go
  28. 12 1
      cmd/geth/config.go
  29. 58 0
      cmd/geth/dbcmd.go
  30. 1 0
      cmd/geth/main.go
  31. 19 19
      cmd/geth/snapshot.go
  32. 1 0
      cmd/geth/usage.go
  33. 38 24
      cmd/puppeth/ssh.go
  34. 15 11
      cmd/utils/flags.go
  35. 1 1
      common/types.go
  36. 20 6
      consensus/ethash/consensus.go
  37. 7 6
      consensus/ethash/ethash.go
  38. 35 75
      core/blockchain.go
  39. 7 313
      core/blockchain_snapshot_test.go
  40. 2 2
      core/blockchain_test.go
  41. 1 1
      core/chain_indexer.go
  42. 35 0
      core/genesis_test.go
  43. 1 1
      core/headerchain.go
  44. 1 1
      core/rawdb/accessors_chain.go
  45. 20 0
      core/rawdb/accessors_snapshot.go
  46. 3 3
      core/rawdb/database.go
  47. 17 0
      core/rawdb/database_test.go
  48. 1 1
      core/rawdb/freezer.go
  49. 67 44
      core/rawdb/freezer_table.go
  50. 57 5
      core/rawdb/freezer_table_test.go
  51. 5 2
      core/rawdb/schema.go
  52. 1 1
      core/state/snapshot/conversion.go
  53. 556 131
      core/state/snapshot/generate.go
  54. 646 3
      core/state/snapshot/generate_test.go
  55. 15 137
      core/state/snapshot/journal.go
  56. 63 42
      core/state/snapshot/snapshot.go
  57. 24 8
      core/state/snapshot/wipe.go
  58. 6 8
      core/state/state_test.go
  59. 1 1
      core/state/statedb.go
  60. 3 3
      core/state/statedb_test.go
  61. 19 5
      core/state/sync.go
  62. 6 6
      core/state/sync_test.go
  63. 0 32
      core/types/block.go
  64. 16 0
      core/types/transaction_marshalling.go
  65. 0 5
      core/vm/errors.go
  66. 8 9
      core/vm/evm.go
  67. 35 34
      core/vm/instructions_test.go
  68. 2 2
      core/vm/interpreter.go
  69. 19 5
      core/vm/operations_acl.go
  70. 80 0
      core/vm/runtime/runtime_test.go
  71. 5 0
      eth/api.go
  72. 11 3
      eth/backend.go
  73. 307 0
      eth/catalyst/api.go
  74. 241 0
      eth/catalyst/api_test.go
  75. 70 0
      eth/catalyst/api_types.go
  76. 46 0
      eth/catalyst/gen_blockparams.go
  77. 117 0
      eth/catalyst/gen_ed.go
  78. 0 11
      eth/discovery.go
  79. 13 2
      eth/downloader/downloader.go
  80. 154 179
      eth/downloader/downloader_test.go
  81. 4 4
      eth/downloader/peer.go
  82. 1 1
      eth/downloader/statesync.go
  83. 6 2
      eth/fetcher/block_fetcher.go
  84. 2 2
      eth/filters/api.go
  85. 3 0
      eth/gasprice/gasprice.go
  86. 1 5
      eth/handler.go
  87. 15 27
      eth/handler_eth_test.go
  88. 17 36
      eth/protocols/eth/handler.go
  89. 91 24
      eth/protocols/eth/handler_test.go
  90. 13 3
      eth/protocols/eth/handlers.go
  91. 2 2
      eth/protocols/eth/handshake_test.go
  92. 28 7
      eth/protocols/eth/peer.go
  93. 2 3
      eth/protocols/eth/protocol.go
  94. 26 0
      eth/protocols/eth/tracker.go
  95. 15 1
      eth/protocols/snap/handler.go
  96. 7 0
      eth/protocols/snap/peer.go
  97. 80 0
      eth/protocols/snap/range.go
  98. 143 0
      eth/protocols/snap/range_test.go
  99. 354 248
      eth/protocols/snap/sync.go
  100. 111 2
      eth/protocols/snap/sync_test.go

+ 1 - 0
.github/CODEOWNERS

@@ -9,6 +9,7 @@ cmd/puppeth                     @karalabe
 consensus                       @karalabe
 core/                           @karalabe @holiman @rjl493456442
 eth/                            @karalabe @holiman @rjl493456442
+eth/catalyst/                   @gballet
 graphql/                        @gballet
 les/                            @zsfelfoldi @rjl493456442
 light/                          @zsfelfoldi @rjl493456442

+ 2 - 0
.github/ISSUE_TEMPLATE/bug.md

@@ -26,3 +26,5 @@ Commit hash : (if `develop`)
 ````
 [backtrace]
 ````
+
+When submitting logs: please submit them as text and not screenshots.

+ 5 - 6
Makefile

@@ -26,7 +26,7 @@ android:
 	@echo "Import \"$(GOBIN)/geth.aar\" to use the library."
 	@echo "Import \"$(GOBIN)/geth-sources.jar\" to add javadocs"
 	@echo "For more info see https://stackoverflow.com/questions/20994336/android-studio-how-to-attach-javadoc"
-	
+
 ios:
 	$(GORUN) build/ci.go xcode --local
 	@echo "Done building."
@@ -46,12 +46,11 @@ clean:
 # You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'.
 
 devtools:
-	env GOBIN= go get -u golang.org/x/tools/cmd/stringer
-	env GOBIN= go get -u github.com/kevinburke/go-bindata/go-bindata
-	env GOBIN= go get -u github.com/fjl/gencodec
-	env GOBIN= go get -u github.com/golang/protobuf/protoc-gen-go
+	env GOBIN= go install golang.org/x/tools/cmd/stringer@latest
+	env GOBIN= go install github.com/kevinburke/go-bindata/go-bindata@latest
+	env GOBIN= go install github.com/fjl/gencodec@latest
+	env GOBIN= go install github.com/golang/protobuf/protoc-gen-go@latest
 	env GOBIN= go install ./cmd/abigen
-	@type "npm" 2> /dev/null || echo 'Please install node.js and npm'
 	@type "solc" 2> /dev/null || echo 'Please install solc'
 	@type "protoc" 2> /dev/null || echo 'Please install protoc'
 

+ 11 - 4
README.md

@@ -63,7 +63,7 @@ Many of the below are the same as or similar to go-ethereum.
 
 For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/install-and-build/installing-geth).
 
-Building `geth` requires both a Go (version 1.13 or later) and a C compiler. You can install
+Building `geth` requires both a Go (version 1.14 or later) and a C compiler. You can install
 them using your favourite package manager. Once the dependencies are installed, run
 
 ```shell
@@ -83,11 +83,16 @@ directory.
 
 |    Command    | Description                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                          |
 | :-----------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+<<<<<<< HEAD
 |  **`geth`**   | Main Binance Smart Chain client binary. It is the entry point into the BSC network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It has the same and more RPC and other interface as go-ethereum and can be used by other processes as a gateway into the BSC network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/interface/command-line-options) for command line options.          |
+=======
+|  **`geth`**   | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/interface/command-line-options) for command line options.          |
+|   `clef`    | Stand-alone signing tool, which can be used as a backend signer for `geth`.  |
+|   `devp2p`    | Utilities to interact with nodes on the networking layer, without running a full blockchain. |
+>>>>>>> v1.10.3
 |   `abigen`    | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/dapp/native-bindings) page for details. |
 |  `bootnode`   | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks.                                                                                                                                                                                                                                                                 |
 |     `evm`     | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`).                                                                                                                                                                                                                                                                     |
-| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://eth.wiki/json-rpc/API) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details.                                                                                                                                                                                                     |
 |   `rlpdump`   | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://eth.wiki/en/fundamentals/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`).                                                                                                                                                                                                                                 |
 |   `puppeth`   | a CLI wizard that aids in creating a new Ethereum network.                                                                                                                                                                                                                                                                                                                                                                                                                                                                                           |
 
@@ -116,7 +121,8 @@ This command will:
    causing it to download more data in exchange for avoiding processing the entire history
    of the Ethereum network, which is very CPU intensive.
  * Start up `geth`'s built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console),
-   (via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://web3js.readthedocs.io/en/)
+   (via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://web3js.readthedocs.io/en/) 
+   (note: the `web3` version bundled within `geth` is very old, and not up to date with official docs),
    as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server).
    This tool is optional and if you leave it out you can always attach to an already running
    `geth` instance with `geth attach`.
@@ -262,7 +268,8 @@ aware of and agree upon. This consists of a small JSON file (e.g. call it `genes
     "byzantiumBlock": 0,
     "constantinopleBlock": 0,
     "petersburgBlock": 0,
-    "istanbulBlock": 0
+    "istanbulBlock": 0,
+    "berlinBlock": 0
   },
   "alloc": {},
   "coinbase": "0x0000000000000000000000000000000000000000",

+ 2 - 2
accounts/accounts.go

@@ -114,7 +114,7 @@ type Wallet interface {
 	SignData(account Account, mimeType string, data []byte) ([]byte, error)
 
 	// SignDataWithPassphrase is identical to SignData, but also takes a password
-	// NOTE: there's an chance that an erroneous call might mistake the two strings, and
+	// NOTE: there's a chance that an erroneous call might mistake the two strings, and
 	// supply password in the mimetype field, or vice versa. Thus, an implementation
 	// should never echo the mimetype or return the mimetype in the error-response
 	SignDataWithPassphrase(account Account, passphrase, mimeType string, data []byte) ([]byte, error)
@@ -128,7 +128,7 @@ type Wallet interface {
 	// a password to decrypt the account, or a PIN code o verify the transaction),
 	// an AuthNeededError instance will be returned, containing infos for the user
 	// about which fields or actions are needed. The user may retry by providing
-	// the needed details via SignHashWithPassphrase, or by other means (e.g. unlock
+	// the needed details via SignTextWithPassphrase, or by other means (e.g. unlock
 	// the account in a keystore).
 	//
 	// This method should return the signature in 'canonical' format, with v 0 or 1

+ 14 - 0
accounts/external/backend.go

@@ -212,6 +212,20 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
 		To:       to,
 		From:     common.NewMixedcaseAddress(account.Address),
 	}
+	// We should request the default chain id that we're operating with
+	// (the chain we're executing on)
+	if chainID != nil {
+		args.ChainID = (*hexutil.Big)(chainID)
+	}
+	// However, if the user asked for a particular chain id, then we should
+	// use that instead.
+	if tx.Type() != types.LegacyTxType && tx.ChainId() != nil {
+		args.ChainID = (*hexutil.Big)(tx.ChainId())
+	}
+	if tx.Type() == types.AccessListTxType {
+		accessList := tx.AccessList()
+		args.AccessList = &accessList
+	}
 	var res signTransactionResult
 	if err := api.client.Call(&res, "account_signTransaction", args); err != nil {
 		return nil, err

+ 1 - 1
accounts/url.go

@@ -64,7 +64,7 @@ func (u URL) String() string {
 func (u URL) TerminalString() string {
 	url := u.String()
 	if len(url) > 32 {
-		return url[:31] + ""
+		return url[:31] + ".."
 	}
 	return url
 }

+ 14 - 26
appveyor.yml

@@ -1,41 +1,29 @@
-os: Visual Studio 2015
-
-# Clone directly into GOPATH.
-clone_folder: C:\gopath\src\github.com\ethereum\go-ethereum
+os: Visual Studio 2019
 clone_depth: 5
 version: "{branch}.{build}"
 environment:
-  global:
-    GO111MODULE: on
-    GOPATH: C:\gopath
-    CC: gcc.exe
   matrix:
+    # We use gcc from MSYS2 because it is the most recent compiler version available on
+    # AppVeyor. Note: gcc.exe only works properly if the corresponding bin/ directory is
+    # contained in PATH.
     - GETH_ARCH: amd64
-      MSYS2_ARCH: x86_64
-      MSYS2_BITS: 64
-      MSYSTEM: MINGW64
-      PATH: C:\msys64\mingw64\bin\;C:\Program Files (x86)\NSIS\;%PATH%
+      GETH_CC: C:\msys64\mingw64\bin\gcc.exe
+      PATH: C:\msys64\mingw64\bin;C:\Program Files (x86)\NSIS\;%PATH%
     - GETH_ARCH: 386
-      MSYS2_ARCH: i686
-      MSYS2_BITS: 32
-      MSYSTEM: MINGW32
-      PATH: C:\msys64\mingw32\bin\;C:\Program Files (x86)\NSIS\;%PATH%
+      GETH_CC: C:\msys64\mingw32\bin\gcc.exe
+      PATH: C:\msys64\mingw32\bin;C:\Program Files (x86)\NSIS\;%PATH%
 
 install:
-  - git submodule update --init
-  - rmdir C:\go /s /q
-  - appveyor DownloadFile https://dl.google.com/go/go1.16.windows-%GETH_ARCH%.zip
-  - 7z x go1.16.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
+  - git submodule update --init --depth 1
   - go version
-  - gcc --version
+  - "%GETH_CC% --version"
 
 build_script:
-  - go run build\ci.go install -dlgo
+  - go run build\ci.go install -dlgo -arch %GETH_ARCH% -cc %GETH_CC%
 
 after_build:
-  - go run build\ci.go archive -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
-  - go run build\ci.go nsis -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
+  - go run build\ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
+  - go run build\ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
 
 test_script:
-  - set CGO_ENABLED=1
-  - go run build\ci.go test -coverage
+  - go run build\ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -coverage

+ 30 - 28
build/checksums.txt

@@ -1,31 +1,33 @@
 # This file contains sha256 checksums of optional build dependencies.
 
-7688063d55656105898f323d90a79a39c378d86fe89ae192eb3b7fc46347c95a  go1.16.src.tar.gz
-6000a9522975d116bf76044967d7e69e04e982e9625330d9a539a8b45395f9a8  go1.16.darwin-amd64.tar.gz
-ea435a1ac6d497b03e367fdfb74b33e961d813883468080f6e239b3b03bea6aa  go1.16.linux-386.tar.gz
-013a489ebb3e24ef3d915abe5b94c3286c070dfe0818d5bca8108f1d6e8440d2  go1.16.linux-amd64.tar.gz
-3770f7eb22d05e25fbee8fb53c2a4e897da043eb83c69b9a14f8d98562cd8098  go1.16.linux-arm64.tar.gz
-d1d9404b1dbd77afa2bdc70934e10fbfcf7d785c372efc29462bb7d83d0a32fd  go1.16.linux-armv6l.tar.gz
-481492a17d42193d471b93b7a06da3555331bd833b76336afc87be820c48933f  go1.16.windows-386.zip
-5cc88fa506b3d5c453c54c3ea218fc8dd05d7362ae1de15bb67986b72089ce93  go1.16.windows-amd64.zip
-d7d6c70b05a7c2f68b48aab5ab8cb5116b8444c9ddad131673b152e7cff7c726  go1.16.freebsd-386.tar.gz
-40b03216f6945fb6883a50604fc7f409a83f62171607229a9c598e701e684f8a  go1.16.freebsd-amd64.tar.gz
-27a1aaa988e930b7932ce459c8a63ad5b3333b3a06b016d87ff289f2a11aacd6  go1.16.linux-ppc64le.tar.gz
-be4c9e4e2cf058efc4e3eb013a760cb989ddc4362f111950c990d1c63b27ccbe  go1.16.linux-s390x.tar.gz
+b298d29de9236ca47a023e382313bcc2d2eed31dfa706b60a04103ce83a71a25  go1.16.3.src.tar.gz
+6bb1cf421f8abc2a9a4e39140b7397cdae6aca3e8d36dcff39a1a77f4f1170ac  go1.16.3.darwin-amd64.tar.gz
+f4e96bbcd5d2d1942f5b55d9e4ab19564da4fad192012f6d7b0b9b055ba4208f  go1.16.3.darwin-arm64.tar.gz
+48b2d1481db756c88c18b1f064dbfc3e265ce4a775a23177ca17e25d13a24c5d  go1.16.3.linux-386.tar.gz
+951a3c7c6ce4e56ad883f97d9db74d3d6d80d5fec77455c6ada6c1f7ac4776d2  go1.16.3.linux-amd64.tar.gz
+566b1d6f17d2bc4ad5f81486f0df44f3088c3ed47a3bec4099d8ed9939e90d5d  go1.16.3.linux-arm64.tar.gz
+0dae30385e3564a557dac7f12a63eedc73543e6da0f6017990e214ce8cc8797c  go1.16.3.linux-armv6l.tar.gz
+a3c16e1531bf9726f47911c4a9ed7cb665a6207a51c44f10ebad4db63b4bcc5a  go1.16.3.windows-386.zip
+a4400345135b36cb7942e52bbaf978b66814738b855eeff8de879a09fd99de7f  go1.16.3.windows-amd64.zip
+31ecd11d497684fa8b0f01ba784590c4c760943665fdc4fe0adaa1405c71736c  go1.16.3.freebsd-386.tar.gz
+ffbd920b309e62e807457b11d80e8c17fefe3ef6de423aaba4b1e270b2ca4c3d  go1.16.3.freebsd-amd64.tar.gz
+5eb046bbbbc7fe2591846a4303884cb5a01abb903e3e61e33459affe7874e811  go1.16.3.linux-ppc64le.tar.gz
+3e8bd7bde533a73fd6fa75b5288678ef397e76c198cfb26b8ae086035383b1cf  go1.16.3.linux-s390x.tar.gz
 
-d998a84eea42f2271aca792a7b027ca5c1edfcba229e8e5a844c9ac3f336df35  golangci-lint-1.27.0-linux-armv7.tar.gz
-bf781f05b0d393b4bf0a327d9e62926949a4f14d7774d950c4e009fc766ed1d4  golangci-lint.exe-1.27.0-windows-amd64.zip
-bf781f05b0d393b4bf0a327d9e62926949a4f14d7774d950c4e009fc766ed1d4  golangci-lint-1.27.0-windows-amd64.zip
-0e2a57d6ba709440d3ed018ef1037465fa010ed02595829092860e5cf863042e  golangci-lint-1.27.0-freebsd-386.tar.gz
-90205fc42ab5ed0096413e790d88ac9b4ed60f4c47e576d13dc0660f7ed4b013  golangci-lint-1.27.0-linux-arm64.tar.gz
-8d345e4e88520e21c113d81978e89ad77fc5b13bfdf20e5bca86b83fc4261272  golangci-lint-1.27.0-linux-amd64.tar.gz
-cc619634a77f18dc73df2a0725be13116d64328dc35131ca1737a850d6f76a59  golangci-lint-1.27.0-freebsd-armv7.tar.gz
-fe683583cfc9eeec83e498c0d6159d87b5e1919dbe4b6c3b3913089642906069  golangci-lint-1.27.0-linux-s390x.tar.gz
-058f5579bee75bdaacbaf75b75e1369f7ad877fd8b3b145aed17a17545de913e  golangci-lint-1.27.0-freebsd-armv6.tar.gz
-38e1e3dadbe3f56ab62b4de82ee0b88e8fad966d8dfd740a26ef94c2edef9818  golangci-lint-1.27.0-linux-armv6.tar.gz
-071b34af5516f4e1ddcaea6011e18208f4f043e1af8ba21eeccad4585cb3d095  golangci-lint.exe-1.27.0-windows-386.zip
-071b34af5516f4e1ddcaea6011e18208f4f043e1af8ba21eeccad4585cb3d095  golangci-lint-1.27.0-windows-386.zip
-5f37e2b33914ecddb7cad38186ef4ec61d88172fc04f930fa0267c91151ff306  golangci-lint-1.27.0-linux-386.tar.gz
-4d94cfb51fdebeb205f1d5a349ac2b683c30591c5150708073c1c329e15965f0  golangci-lint-1.27.0-freebsd-amd64.tar.gz
-52572ba8ff07d5169c2365d3de3fec26dc55a97522094d13d1596199580fa281  golangci-lint-1.27.0-linux-ppc64le.tar.gz
-3fb1a1683a29c6c0a8cd76135f62b606fbdd538d5a7aeab94af1af70ffdc2fd4  golangci-lint-1.27.0-darwin-amd64.tar.gz
+7e9a47ab540aa3e8472fbf8120d28bed3b9d9cf625b955818e8bc69628d7187c  golangci-lint-1.39.0-darwin-amd64.tar.gz
+574daa2c9c299b01672a6daeb1873b5f12e413cdb6dc0e30f2ff163956778064  golangci-lint-1.39.0-darwin-arm64.tar.gz
+6225f7014987324ab78e9b511f294e3f25be013728283c33918c67c8576d543e  golangci-lint-1.39.0-freebsd-386.tar.gz
+6b3e76e1e5eaf0159411c8e2727f8d533989d3bb19f10e9caa6e0b9619ee267d  golangci-lint-1.39.0-freebsd-amd64.tar.gz
+a301cacfff87ed9b00313d95278533c25a4527a06b040a17d969b4b7e1b8a90d  golangci-lint-1.39.0-freebsd-armv7.tar.gz
+25bfd96a29c3112f508d5e4fc860dbad7afce657233c343acfa20715717d51e7  golangci-lint-1.39.0-freebsd-armv6.tar.gz
+9687e4ff15545cfc722b0e46107a94195166a505023b48a316579af25ad09505  golangci-lint-1.39.0-linux-armv7.tar.gz
+a7fa7ab2bfc99cbe5e5bcbf5684f5a997f920afbbe2f253d2feb1001d5e3c8b3  golangci-lint-1.39.0-linux-armv6.tar.gz
+c8f9634115beddb4ed9129c1f7ecd4c97c99d07aeef33e3707234097eeb51b7b  golangci-lint-1.39.0-linux-mips64le.tar.gz
+d1234c213b74751f1af413302dde0e9a6d4d29aecef034af7abb07dc1b6e887f  golangci-lint-1.39.0-linux-arm64.tar.gz
+df25d9267168323b163147acb823ab0215a8a3bb6898a4a9320afdfedde66817  golangci-lint-1.39.0-linux-386.tar.gz
+1767e75fba357b7651b1a796d38453558f371c60af805505ec99e166908c04b5  golangci-lint-1.39.0-linux-ppc64le.tar.gz
+25fd75bf3186b3d930ecae10185689968fd18fd8fa6f9f555d6beb04348c20f6  golangci-lint-1.39.0-linux-s390x.tar.gz
+3a73aa7468087caa62673c8adea99b4e4dff846dc72707222db85f8679b40cbf  golangci-lint-1.39.0-linux-amd64.tar.gz
+578caceccf81739bda67dbfec52816709d03608c6878888ecdc0e186a094a41b  golangci-lint-1.39.0-linux-mips64.tar.gz
+494b66ba0e32c8ddf6c4f6b1d05729b110900f6017eda943057e43598c17d7a8  golangci-lint-1.39.0-windows-386.zip
+52ec2e13a3cbb47147244dff8cfc35103563deb76e0459133058086fc35fb2c7  golangci-lint-1.39.0-windows-amd64.zip

+ 67 - 142
build/ci.go

@@ -152,7 +152,7 @@ var (
 	// This is the version of go that will be downloaded by
 	//
 	//     go run ci.go install -dlgo
-	dlgoVersion = "1.16"
+	dlgoVersion = "1.16.3"
 )
 
 var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@@ -208,58 +208,25 @@ func doInstall(cmdline []string) {
 		cc   = flag.String("cc", "", "C compiler to cross build with")
 	)
 	flag.CommandLine.Parse(cmdline)
-	env := build.Env()
 
-	// Check local Go version. People regularly open issues about compilation
-	// failure with outdated Go. This should save them the trouble.
-	if !strings.Contains(runtime.Version(), "devel") {
-		// Figure out the minor version number since we can't textually compare (1.10 < 1.9)
-		var minor int
-		fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
-		if minor < 13 {
-			log.Println("You have Go version", runtime.Version())
-			log.Println("go-ethereum requires at least Go version 1.13 and cannot")
-			log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
-			os.Exit(1)
-		}
-	}
-
-	// Choose which go command we're going to use.
-	var gobuild *exec.Cmd
-	if !*dlgo {
-		// Default behavior: use the go version which runs ci.go right now.
-		gobuild = goTool("build")
-	} else {
-		// Download of Go requested. This is for build environments where the
-		// installed version is too old and cannot be upgraded easily.
-		cachedir := filepath.Join("build", "cache")
-		goroot := downloadGo(runtime.GOARCH, runtime.GOOS, cachedir)
-		gobuild = localGoTool(goroot, "build")
+	// Configure the toolchain.
+	tc := build.GoToolchain{GOARCH: *arch, CC: *cc}
+	if *dlgo {
+		csdb := build.MustLoadChecksums("build/checksums.txt")
+		tc.Root = build.DownloadGo(csdb, dlgoVersion)
 	}
 
-	// Configure environment for cross build.
-	if *arch != "" || *arch != runtime.GOARCH {
-		gobuild.Env = append(gobuild.Env, "CGO_ENABLED=1")
-		gobuild.Env = append(gobuild.Env, "GOARCH="+*arch)
-	}
-
-	// Configure C compiler.
-	if *cc != "" {
-		gobuild.Env = append(gobuild.Env, "CC="+*cc)
-	} else if os.Getenv("CC") != "" {
-		gobuild.Env = append(gobuild.Env, "CC="+os.Getenv("CC"))
-	}
+	// Configure the build.
+	env := build.Env()
+	gobuild := tc.Go("build", buildFlags(env)...)
 
 	// arm64 CI builders are memory-constrained and can't handle concurrent builds,
 	// better disable it. This check isn't the best, it should probably
 	// check for something in env instead.
-	if runtime.GOARCH == "arm64" {
+	if env.CI && runtime.GOARCH == "arm64" {
 		gobuild.Args = append(gobuild.Args, "-p", "1")
 	}
 
-	// Put the default settings in.
-	gobuild.Args = append(gobuild.Args, buildFlags(env)...)
-
 	// We use -trimpath to avoid leaking local paths into the built executables.
 	gobuild.Args = append(gobuild.Args, "-trimpath")
 
@@ -301,53 +268,30 @@ func buildFlags(env build.Environment) (flags []string) {
 	return flags
 }
 
-// goTool returns the go tool. This uses the Go version which runs ci.go.
-func goTool(subcmd string, args ...string) *exec.Cmd {
-	cmd := build.GoTool(subcmd, args...)
-	goToolSetEnv(cmd)
-	return cmd
-}
-
-// localGoTool returns the go tool from the given GOROOT.
-func localGoTool(goroot string, subcmd string, args ...string) *exec.Cmd {
-	gotool := filepath.Join(goroot, "bin", "go")
-	cmd := exec.Command(gotool, subcmd)
-	goToolSetEnv(cmd)
-	cmd.Env = append(cmd.Env, "GOROOT="+goroot)
-	cmd.Args = append(cmd.Args, args...)
-	return cmd
-}
-
-// goToolSetEnv forwards the build environment to the go tool.
-func goToolSetEnv(cmd *exec.Cmd) {
-	cmd.Env = append(cmd.Env, "GOBIN="+GOBIN)
-	for _, e := range os.Environ() {
-		if strings.HasPrefix(e, "GOBIN=") || strings.HasPrefix(e, "CC=") {
-			continue
-		}
-		cmd.Env = append(cmd.Env, e)
-	}
-}
-
 // Running The Tests
 //
 // "tests" also includes static analysis tools such as vet.
 
 func doTest(cmdline []string) {
-	coverage := flag.Bool("coverage", false, "Whether to record code coverage")
-	verbose := flag.Bool("v", false, "Whether to log verbosely")
+	var (
+		dlgo     = flag.Bool("dlgo", false, "Download Go and build with it")
+		arch     = flag.String("arch", "", "Run tests for given architecture")
+		cc       = flag.String("cc", "", "Sets C compiler binary")
+		coverage = flag.Bool("coverage", false, "Whether to record code coverage")
+		verbose  = flag.Bool("v", false, "Whether to log verbosely")
+	)
 	flag.CommandLine.Parse(cmdline)
-	env := build.Env()
 
-	packages := []string{"./..."}
-	if len(flag.CommandLine.Args()) > 0 {
-		packages = flag.CommandLine.Args()
+	// Configure the toolchain.
+	tc := build.GoToolchain{GOARCH: *arch, CC: *cc}
+	if *dlgo {
+		csdb := build.MustLoadChecksums("build/checksums.txt")
+		tc.Root = build.DownloadGo(csdb, dlgoVersion)
 	}
+	gotest := tc.Go("test")
 
-	// Run the actual tests.
 	// Test a single package at a time. CI builders are slow
 	// and some tests run into timeouts under load.
-	gotest := goTool("test", buildFlags(env)...)
 	gotest.Args = append(gotest.Args, "-p", "1")
 	if *coverage {
 		gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover")
@@ -356,6 +300,10 @@ func doTest(cmdline []string) {
 		gotest.Args = append(gotest.Args, "-v")
 	}
 
+	packages := []string{"./..."}
+	if len(flag.CommandLine.Args()) > 0 {
+		packages = flag.CommandLine.Args()
+	}
 	gotest.Args = append(gotest.Args, packages...)
 	build.MustRun(gotest)
 }
@@ -379,7 +327,7 @@ func doLint(cmdline []string) {
 
 // downloadLinter downloads and unpacks golangci-lint.
 func downloadLinter(cachedir string) string {
-	const version = "1.27.0"
+	const version = "1.39.0"
 
 	csdb := build.MustLoadChecksums("build/checksums.txt")
 	base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH)
@@ -415,8 +363,7 @@ func doArchive(cmdline []string) {
 	}
 
 	var (
-		env = build.Env()
-
+		env      = build.Env()
 		basegeth = archiveBasename(*arch, params.ArchiveVersion(env.Commit))
 		geth     = "geth-" + basegeth + ext
 		alltools = "geth-alltools-" + basegeth + ext
@@ -492,15 +439,15 @@ func archiveUpload(archive string, blobstore string, signer string, signifyVar s
 // skips archiving for some build configurations.
 func maybeSkipArchive(env build.Environment) {
 	if env.IsPullRequest {
-		log.Printf("skipping because this is a PR build")
+		log.Printf("skipping archive creation because this is a PR build")
 		os.Exit(0)
 	}
 	if env.IsCronJob {
-		log.Printf("skipping because this is a cron job")
+		log.Printf("skipping archive creation because this is a cron job")
 		os.Exit(0)
 	}
 	if env.Branch != "master" && !strings.HasPrefix(env.Tag, "v1.") {
-		log.Printf("skipping because branch %q, tag %q is not on the whitelist", env.Branch, env.Tag)
+		log.Printf("skipping archive creation because branch %q, tag %q is not on the whitelist", env.Branch, env.Tag)
 		os.Exit(0)
 	}
 }
@@ -518,6 +465,7 @@ func doDebianSource(cmdline []string) {
 	flag.CommandLine.Parse(cmdline)
 	*workdir = makeWorkdir(*workdir)
 	env := build.Env()
+	tc := new(build.GoToolchain)
 	maybeSkipArchive(env)
 
 	// Import the signing key.
@@ -531,12 +479,12 @@ func doDebianSource(cmdline []string) {
 	gobundle := downloadGoSources(*cachedir)
 
 	// Download all the dependencies needed to build the sources and run the ci script
-	srcdepfetch := goTool("mod", "download")
-	srcdepfetch.Env = append(os.Environ(), "GOPATH="+filepath.Join(*workdir, "modgopath"))
+	srcdepfetch := tc.Go("mod", "download")
+	srcdepfetch.Env = append(srcdepfetch.Env, "GOPATH="+filepath.Join(*workdir, "modgopath"))
 	build.MustRun(srcdepfetch)
 
-	cidepfetch := goTool("run", "./build/ci.go")
-	cidepfetch.Env = append(os.Environ(), "GOPATH="+filepath.Join(*workdir, "modgopath"))
+	cidepfetch := tc.Go("run", "./build/ci.go")
+	cidepfetch.Env = append(cidepfetch.Env, "GOPATH="+filepath.Join(*workdir, "modgopath"))
 	cidepfetch.Run() // Command fails, don't care, we only need the deps to start it
 
 	// Create Debian packages and upload them.
@@ -592,41 +540,6 @@ func downloadGoSources(cachedir string) string {
 	return dst
 }
 
-// downloadGo downloads the Go binary distribution and unpacks it into a temporary
-// directory. It returns the GOROOT of the unpacked toolchain.
-func downloadGo(goarch, goos, cachedir string) string {
-	if goarch == "arm" {
-		goarch = "armv6l"
-	}
-
-	csdb := build.MustLoadChecksums("build/checksums.txt")
-	file := fmt.Sprintf("go%s.%s-%s", dlgoVersion, goos, goarch)
-	if goos == "windows" {
-		file += ".zip"
-	} else {
-		file += ".tar.gz"
-	}
-	url := "https://golang.org/dl/" + file
-	dst := filepath.Join(cachedir, file)
-	if err := csdb.DownloadFile(url, dst); err != nil {
-		log.Fatal(err)
-	}
-
-	ucache, err := os.UserCacheDir()
-	if err != nil {
-		log.Fatal(err)
-	}
-	godir := filepath.Join(ucache, fmt.Sprintf("geth-go-%s-%s-%s", dlgoVersion, goos, goarch))
-	if err := build.ExtractArchive(dst, godir); err != nil {
-		log.Fatal(err)
-	}
-	goroot, err := filepath.Abs(filepath.Join(godir, "go"))
-	if err != nil {
-		log.Fatal(err)
-	}
-	return goroot
-}
-
 func ppaUpload(workdir, ppa, sshUser string, files []string) {
 	p := strings.Split(ppa, "/")
 	if len(p) != 2 {
@@ -901,13 +814,23 @@ func doAndroidArchive(cmdline []string) {
 	)
 	flag.CommandLine.Parse(cmdline)
 	env := build.Env()
+	tc := new(build.GoToolchain)
 
 	// Sanity check that the SDK and NDK are installed and set
 	if os.Getenv("ANDROID_HOME") == "" {
 		log.Fatal("Please ensure ANDROID_HOME points to your Android SDK")
 	}
+
+	// Build gomobile.
+	install := tc.Install(GOBIN, "golang.org/x/mobile/cmd/gomobile@latest", "golang.org/x/mobile/cmd/gobind@latest")
+	install.Env = append(install.Env)
+	build.MustRun(install)
+
+	// Ensure all dependencies are available. This is required to make
+	// gomobile bind work because it expects go.sum to contain all checksums.
+	build.MustRun(tc.Go("mod", "download"))
+
 	// Build the Android archive and Maven resources
-	build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind"))
 	build.MustRun(gomobileTool("bind", "-ldflags", "-s -w", "--target", "android", "--javapkg", "org.ethereum", "-v", "github.com/ethereum/go-ethereum/mobile"))
 
 	if *local {
@@ -1027,10 +950,16 @@ func doXCodeFramework(cmdline []string) {
 	)
 	flag.CommandLine.Parse(cmdline)
 	env := build.Env()
+	tc := new(build.GoToolchain)
+
+	// Build gomobile.
+	build.MustRun(tc.Install(GOBIN, "golang.org/x/mobile/cmd/gomobile@latest", "golang.org/x/mobile/cmd/gobind@latest"))
+
+	// Ensure all dependencies are available. This is required to make
+	// gomobile bind work because it expects go.sum to contain all checksums.
+	build.MustRun(tc.Go("mod", "download"))
 
 	// Build the iOS XCode framework
-	build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind"))
-	build.MustRun(gomobileTool("init"))
 	bind := gomobileTool("bind", "-ldflags", "-s -w", "--target", "ios", "-v", "github.com/ethereum/go-ethereum/mobile")
 
 	if *local {
@@ -1039,17 +968,17 @@ func doXCodeFramework(cmdline []string) {
 		build.MustRun(bind)
 		return
 	}
+
+	// Create the archive.
+	maybeSkipArchive(env)
 	archive := "geth-" + archiveBasename("ios", params.ArchiveVersion(env.Commit))
-	if err := os.Mkdir(archive, os.ModePerm); err != nil {
+	if err := os.MkdirAll(archive, 0755); err != nil {
 		log.Fatal(err)
 	}
 	bind.Dir, _ = filepath.Abs(archive)
 	build.MustRun(bind)
 	build.MustRunCommand("tar", "-zcvf", archive+".tar.gz", archive)
 
-	// Skip CocoaPods deploy and Azure upload for PR builds
-	maybeSkipArchive(env)
-
 	// Sign and upload the framework to Azure
 	if err := archiveUpload(archive+".tar.gz", *upload, *signer, *signify); err != nil {
 		log.Fatal(err)
@@ -1115,10 +1044,10 @@ func doXgo(cmdline []string) {
 	)
 	flag.CommandLine.Parse(cmdline)
 	env := build.Env()
+	var tc build.GoToolchain
 
 	// Make sure xgo is available for cross compilation
-	gogetxgo := goTool("get", "github.com/karalabe/xgo")
-	build.MustRun(gogetxgo)
+	build.MustRun(tc.Install(GOBIN, "github.com/karalabe/xgo@latest"))
 
 	// If all tools building is requested, build everything the builder wants
 	args := append(buildFlags(env), flag.Args()...)
@@ -1129,27 +1058,23 @@ func doXgo(cmdline []string) {
 			if strings.HasPrefix(res, GOBIN) {
 				// Binary tool found, cross build it explicitly
 				args = append(args, "./"+filepath.Join("cmd", filepath.Base(res)))
-				xgo := xgoTool(args)
-				build.MustRun(xgo)
+				build.MustRun(xgoTool(args))
 				args = args[:len(args)-1]
 			}
 		}
 		return
 	}
-	// Otherwise xxecute the explicit cross compilation
+
+	// Otherwise execute the explicit cross compilation
 	path := args[len(args)-1]
 	args = append(args[:len(args)-1], []string{"--dest", GOBIN, path}...)
-
-	xgo := xgoTool(args)
-	build.MustRun(xgo)
+	build.MustRun(xgoTool(args))
 }
 
 func xgoTool(args []string) *exec.Cmd {
 	cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...)
 	cmd.Env = os.Environ()
-	cmd.Env = append(cmd.Env, []string{
-		"GOBIN=" + GOBIN,
-	}...)
+	cmd.Env = append(cmd.Env, []string{"GOBIN=" + GOBIN}...)
 	return cmd
 }
 

+ 1 - 1
build/nsis.envvarupdate.nsh

@@ -43,7 +43,7 @@
   !ifndef Un${StrFuncName}_INCLUDED
     ${Un${StrFuncName}}
   !endif
-  !define un.${StrFuncName} "${Un${StrFuncName}}"
+  !define un.${StrFuncName} '${Un${StrFuncName}}'
 !macroend
 
 !insertmacro _IncludeStrFunction StrTok

+ 25 - 2
cmd/devp2p/README.md

@@ -30,6 +30,29 @@ Run `devp2p dns to-route53 <directory>` to publish a tree to Amazon Route53.
 
 You can find more information about these commands in the [DNS Discovery Setup Guide][dns-tutorial].
 
+### Node Set Utilities
+
+There are several commands for working with JSON node set files. These files are generated
+by the discovery crawlers and DNS client commands. Node sets also used as the input of the
+DNS deployer commands.
+
+Run `devp2p nodeset info <nodes.json>` to display statistics of a node set.
+
+Run `devp2p nodeset filter <nodes.json> <filter flags...>` to write a new, filtered node
+set to standard output. The following filters are supported:
+
+- `-limit <N>` limits the output set to N entries, taking the top N nodes by score
+- `-ip <CIDR>` filters nodes by IP subnet
+- `-min-age <duration>` filters nodes by 'first seen' time
+- `-eth-network <mainnet/rinkeby/goerli/ropsten>` filters nodes by "eth" ENR entry
+- `-les-server` filters nodes by LES server support
+- `-snap` filters nodes by snap protocol support
+
+For example, given a node set in `nodes.json`, you could create a filtered set containing
+up to 20 eth mainnet nodes which also support snap sync using this command:
+
+    devp2p nodeset filter nodes.json -eth-network mainnet -snap -limit 20
+
 ### Discovery v4 Utilities
 
 The `devp2p discv4 ...` command family deals with the [Node Discovery v4][discv4]
@@ -94,7 +117,7 @@ To run the eth protocol test suite against your implementation, the node needs t
 geth --datadir <datadir> --nodiscover --nat=none --networkid 19763 --verbosity 5
 ```
 
-Then, run the following command, replacing `<enode>` with the enode of the geth node: 
+Then, run the following command, replacing `<enode>` with the enode of the geth node:
  ```
  devp2p rlpx eth-test <enode> cmd/devp2p/internal/ethtest/testdata/chain.rlp cmd/devp2p/internal/ethtest/testdata/genesis.json
 ```
@@ -103,7 +126,7 @@ Repeat the above process (re-initialising the node) in order to run the Eth Prot
 
 #### Eth66 Test Suite
 
-The Eth66 test suite is also a conformance test suite for the eth 66 protocol version specifically. 
+The Eth66 test suite is also a conformance test suite for the eth 66 protocol version specifically.
 To run the eth66 protocol test suite, initialize a geth node as described above and run the following command,
 replacing `<enode>` with the enode of the geth node:
 

+ 44 - 11
cmd/devp2p/dns_route53.go

@@ -107,22 +107,48 @@ func (c *route53Client) deploy(name string, t *dnsdisc.Tree) error {
 		return err
 	}
 	log.Info(fmt.Sprintf("Found %d TXT records", len(existing)))
-
 	records := t.ToTXT(name)
 	changes := c.computeChanges(name, records, existing)
+
+	// Submit to API.
+	comment := fmt.Sprintf("enrtree update of %s at seq %d", name, t.Seq())
+	return c.submitChanges(changes, comment)
+}
+
+// deleteDomain removes all TXT records of the given domain.
+func (c *route53Client) deleteDomain(name string) error {
+	if err := c.checkZone(name); err != nil {
+		return err
+	}
+
+	// Compute DNS changes.
+	existing, err := c.collectRecords(name)
+	if err != nil {
+		return err
+	}
+	log.Info(fmt.Sprintf("Found %d TXT records", len(existing)))
+	changes := makeDeletionChanges(existing, nil)
+
+	// Submit to API.
+	comment := "enrtree delete of " + name
+	return c.submitChanges(changes, comment)
+}
+
+// submitChanges submits the given DNS changes to Route53.
+func (c *route53Client) submitChanges(changes []types.Change, comment string) error {
 	if len(changes) == 0 {
 		log.Info("No DNS changes needed")
 		return nil
 	}
 
-	// Submit all change batches.
+	var err error
 	batches := splitChanges(changes, route53ChangeSizeLimit, route53ChangeCountLimit)
 	changesToCheck := make([]*route53.ChangeResourceRecordSetsOutput, len(batches))
 	for i, changes := range batches {
 		log.Info(fmt.Sprintf("Submitting %d changes to Route53", len(changes)))
 		batch := &types.ChangeBatch{
 			Changes: changes,
-			Comment: aws.String(fmt.Sprintf("enrtree update %d/%d of %s at seq %d", i+1, len(batches), name, t.Seq())),
+			Comment: aws.String(fmt.Sprintf("%s (%d/%d)", comment, i+1, len(batches))),
 		}
 		req := &route53.ChangeResourceRecordSetsInput{HostedZoneId: &c.zoneID, ChangeBatch: batch}
 		changesToCheck[i], err = c.api.ChangeResourceRecordSets(context.TODO(), req)
@@ -151,7 +177,6 @@ func (c *route53Client) deploy(name string, t *dnsdisc.Tree) error {
 			time.Sleep(30 * time.Second)
 		}
 	}
-
 	return nil
 }
 
@@ -186,7 +211,8 @@ func (c *route53Client) findZoneID(name string) (string, error) {
 	return "", errors.New("can't find zone ID for " + name)
 }
 
-// computeChanges creates DNS changes for the given record.
+// computeChanges creates DNS changes for the given set of DNS discovery records.
+// The 'existing' arg is the set of records that already exist on Route53.
 func (c *route53Client) computeChanges(name string, records map[string]string, existing map[string]recordSet) []types.Change {
 	// Convert all names to lowercase.
 	lrecords := make(map[string]string, len(records))
@@ -223,16 +249,23 @@ func (c *route53Client) computeChanges(name string, records map[string]string, e
 	}
 
 	// Iterate over the old records and delete anything stale.
-	for path, set := range existing {
-		if _, ok := records[path]; ok {
+	changes = append(changes, makeDeletionChanges(existing, records)...)
+
+	// Ensure changes are in the correct order.
+	sortChanges(changes)
+	return changes
+}
+
+// makeDeletionChanges creates record changes which delete all records not contained in 'keep'.
+func makeDeletionChanges(records map[string]recordSet, keep map[string]string) []types.Change {
+	var changes []types.Change
+	for path, set := range records {
+		if _, ok := keep[path]; ok {
 			continue
 		}
-		// Stale entry, nuke it.
-		log.Info(fmt.Sprintf("Deleting %s = %q", path, strings.Join(set.values, "")))
+		log.Info(fmt.Sprintf("Deleting %s = %s", path, strings.Join(set.values, "")))
 		changes = append(changes, newTXTChange("DELETE", path, set.ttl, set.values...))
 	}
-
-	sortChanges(changes)
 	return changes
 }
 

+ 30 - 5
cmd/devp2p/dnscmd.go

@@ -43,6 +43,7 @@ var (
 			dnsTXTCommand,
 			dnsCloudflareCommand,
 			dnsRoute53Command,
+			dnsRoute53NukeCommand,
 		},
 	}
 	dnsSyncCommand = cli.Command{
@@ -84,6 +85,18 @@ var (
 			route53RegionFlag,
 		},
 	}
+	dnsRoute53NukeCommand = cli.Command{
+		Name:      "nuke-route53",
+		Usage:     "Deletes DNS TXT records of a subdomain on Amazon Route53",
+		ArgsUsage: "<domain>",
+		Action:    dnsNukeRoute53,
+		Flags: []cli.Flag{
+			route53AccessKeyFlag,
+			route53AccessSecretFlag,
+			route53ZoneIDFlag,
+			route53RegionFlag,
+		},
+	}
 )
 
 var (
@@ -174,6 +187,9 @@ func dnsSign(ctx *cli.Context) error {
 	return nil
 }
 
+// directoryName returns the directory name of the given path.
+// For example, when dir is "foo/bar", it returns "bar".
+// When dir is ".", and the working directory is "example/foo", it returns "foo".
 func directoryName(dir string) string {
 	abs, err := filepath.Abs(dir)
 	if err != nil {
@@ -182,7 +198,7 @@ func directoryName(dir string) string {
 	return filepath.Base(abs)
 }
 
-// dnsToTXT peforms dnsTXTCommand.
+// dnsToTXT performs dnsTXTCommand.
 func dnsToTXT(ctx *cli.Context) error {
 	if ctx.NArg() < 1 {
 		return fmt.Errorf("need tree definition directory as argument")
@@ -199,9 +215,9 @@ func dnsToTXT(ctx *cli.Context) error {
 	return nil
 }
 
-// dnsToCloudflare peforms dnsCloudflareCommand.
+// dnsToCloudflare performs dnsCloudflareCommand.
 func dnsToCloudflare(ctx *cli.Context) error {
-	if ctx.NArg() < 1 {
+	if ctx.NArg() != 1 {
 		return fmt.Errorf("need tree definition directory as argument")
 	}
 	domain, t, err := loadTreeDefinitionForExport(ctx.Args().Get(0))
@@ -212,9 +228,9 @@ func dnsToCloudflare(ctx *cli.Context) error {
 	return client.deploy(domain, t)
 }
 
-// dnsToRoute53 peforms dnsRoute53Command.
+// dnsToRoute53 performs dnsRoute53Command.
 func dnsToRoute53(ctx *cli.Context) error {
-	if ctx.NArg() < 1 {
+	if ctx.NArg() != 1 {
 		return fmt.Errorf("need tree definition directory as argument")
 	}
 	domain, t, err := loadTreeDefinitionForExport(ctx.Args().Get(0))
@@ -225,6 +241,15 @@ func dnsToRoute53(ctx *cli.Context) error {
 	return client.deploy(domain, t)
 }
 
+// dnsNukeRoute53 performs dnsRoute53NukeCommand.
+func dnsNukeRoute53(ctx *cli.Context) error {
+	if ctx.NArg() != 1 {
+		return fmt.Errorf("need domain name as argument")
+	}
+	client := newRoute53Client(ctx)
+	return client.deleteDomain(ctx.Args().First())
+}
+
 // loadSigningKey loads a private key in Ethereum keystore format.
 func loadSigningKey(keyfile string) *ecdsa.PrivateKey {
 	keyjson, err := ioutil.ReadFile(keyfile)

+ 23 - 6
cmd/devp2p/internal/ethtest/chain.go

@@ -34,6 +34,7 @@ import (
 )
 
 type Chain struct {
+	genesis     core.Genesis
 	blocks      []*types.Block
 	chainConfig *params.ChainConfig
 }
@@ -124,16 +125,34 @@ func (c *Chain) GetHeaders(req GetBlockHeaders) (BlockHeaders, error) {
 // loadChain takes the given chain.rlp file, and decodes and returns
 // the blocks from the file.
 func loadChain(chainfile string, genesis string) (*Chain, error) {
-	chainConfig, err := ioutil.ReadFile(genesis)
+	gen, err := loadGenesis(genesis)
 	if err != nil {
 		return nil, err
 	}
+	gblock := gen.ToBlock(nil)
+
+	blocks, err := blocksFromFile(chainfile, gblock)
+	if err != nil {
+		return nil, err
+	}
+
+	c := &Chain{genesis: gen, blocks: blocks, chainConfig: gen.Config}
+	return c, nil
+}
+
+func loadGenesis(genesisFile string) (core.Genesis, error) {
+	chainConfig, err := ioutil.ReadFile(genesisFile)
+	if err != nil {
+		return core.Genesis{}, err
+	}
 	var gen core.Genesis
 	if err := json.Unmarshal(chainConfig, &gen); err != nil {
-		return nil, err
+		return core.Genesis{}, err
 	}
-	gblock := gen.ToBlock(nil)
+	return gen, nil
+}
 
+func blocksFromFile(chainfile string, gblock *types.Block) ([]*types.Block, error) {
 	// Load chain.rlp.
 	fh, err := os.Open(chainfile)
 	if err != nil {
@@ -161,7 +180,5 @@ func loadChain(chainfile string, genesis string) (*Chain, error) {
 		}
 		blocks = append(blocks, &b)
 	}
-
-	c := &Chain{blocks: blocks, chainConfig: gen.Config}
-	return c, nil
+	return blocks, nil
 }

+ 175 - 50
cmd/devp2p/internal/ethtest/eth66_suite.go

@@ -19,6 +19,7 @@ package ethtest
 import (
 	"time"
 
+	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/eth/protocols/eth"
@@ -41,6 +42,7 @@ func (s *Suite) Is_66(t *utesting.T) {
 // make sure the chain head is correct.
 func (s *Suite) TestStatus_66(t *utesting.T) {
 	conn := s.dial66(t)
+	defer conn.Close()
 	// get protoHandshake
 	conn.handshake(t)
 	// get status
@@ -60,6 +62,7 @@ func (s *Suite) TestStatus_66(t *utesting.T) {
 // an eth66 `GetBlockHeaders` request and that the response is accurate.
 func (s *Suite) TestGetBlockHeaders_66(t *utesting.T) {
 	conn := s.setupConnection66(t)
+	defer conn.Close()
 	// get block headers
 	req := &eth.GetBlockHeadersPacket66{
 		RequestId: 3,
@@ -73,9 +76,14 @@ func (s *Suite) TestGetBlockHeaders_66(t *utesting.T) {
 		},
 	}
 	// write message
-	headers := s.getBlockHeaders66(t, conn, req, req.RequestId)
+	headers, err := s.getBlockHeaders66(conn, req, req.RequestId)
+	if err != nil {
+		t.Fatalf("could not get block headers: %v", err)
+	}
 	// check for correct headers
-	headersMatch(t, s.chain, headers)
+	if !headersMatch(t, s.chain, headers) {
+		t.Fatal("received wrong header(s)")
+	}
 }
 
 // TestSimultaneousRequests_66 sends two simultaneous `GetBlockHeader` requests
@@ -83,7 +91,8 @@ func (s *Suite) TestGetBlockHeaders_66(t *utesting.T) {
 // headers per request.
 func (s *Suite) TestSimultaneousRequests_66(t *utesting.T) {
 	// create two connections
-	conn1, conn2 := s.setupConnection66(t), s.setupConnection66(t)
+	conn := s.setupConnection66(t)
+	defer conn.Close()
 	// create two requests
 	req1 := &eth.GetBlockHeadersPacket66{
 		RequestId: 111,
@@ -107,33 +116,36 @@ func (s *Suite) TestSimultaneousRequests_66(t *utesting.T) {
 			Reverse: false,
 		},
 	}
-	// wait for headers for first request
-	headerChan := make(chan BlockHeaders, 1)
-	go func(headers chan BlockHeaders) {
-		headers <- s.getBlockHeaders66(t, conn1, req1, req1.RequestId)
-	}(headerChan)
-	// check headers of second request
-	headersMatch(t, s.chain, s.getBlockHeaders66(t, conn2, req2, req2.RequestId))
-	// check headers of first request
-	headersMatch(t, s.chain, <-headerChan)
+	// write first request
+	if err := conn.write66(req1, GetBlockHeaders{}.Code()); err != nil {
+		t.Fatalf("failed to write to connection: %v", err)
+	}
+	// write second request
+	if err := conn.write66(req2, GetBlockHeaders{}.Code()); err != nil {
+		t.Fatalf("failed to write to connection: %v", err)
+	}
+	// wait for responses
+	headers1, err := s.waitForBlockHeadersResponse66(conn, req1.RequestId)
+	if err != nil {
+		t.Fatalf("error while waiting for block headers: %v", err)
+	}
+	headers2, err := s.waitForBlockHeadersResponse66(conn, req2.RequestId)
+	if err != nil {
+		t.Fatalf("error while waiting for block headers: %v", err)
+	}
+	// check headers of both responses
+	if !headersMatch(t, s.chain, headers1) {
+		t.Fatalf("wrong header(s) in response to req1: got %v", headers1)
+	}
+	if !headersMatch(t, s.chain, headers2) {
+		t.Fatalf("wrong header(s) in response to req2: got %v", headers2)
+	}
 }
 
 // TestBroadcast_66 tests whether a block announcement is correctly
 // propagated to the given node's peer(s) on the eth66 protocol.
 func (s *Suite) TestBroadcast_66(t *utesting.T) {
-	sendConn, receiveConn := s.setupConnection66(t), s.setupConnection66(t)
-	nextBlock := len(s.chain.blocks)
-	blockAnnouncement := &NewBlock{
-		Block: s.fullChain.blocks[nextBlock],
-		TD:    s.fullChain.TD(nextBlock + 1),
-	}
-	s.testAnnounce66(t, sendConn, receiveConn, blockAnnouncement)
-	// update test suite chain
-	s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[nextBlock])
-	// wait for client to update its chain
-	if err := receiveConn.waitForBlock66(s.chain.Head()); err != nil {
-		t.Fatal(err)
-	}
+	s.sendNextBlock66(t)
 }
 
 // TestGetBlockBodies_66 tests whether the given node can respond to
@@ -141,6 +153,7 @@ func (s *Suite) TestBroadcast_66(t *utesting.T) {
 // the eth66 protocol.
 func (s *Suite) TestGetBlockBodies_66(t *utesting.T) {
 	conn := s.setupConnection66(t)
+	defer conn.Close()
 	// create block bodies request
 	id := uint64(55)
 	req := &eth.GetBlockBodiesPacket66{
@@ -195,33 +208,31 @@ func (s *Suite) TestLargeAnnounce_66(t *utesting.T) {
 			t.Fatalf("could not write to connection: %v", err)
 		}
 		// Invalid announcement, check that peer disconnected
-		switch msg := sendConn.ReadAndServe(s.chain, timeout).(type) {
+		switch msg := sendConn.ReadAndServe(s.chain, time.Second*8).(type) {
 		case *Disconnect:
 		case *Error:
 			break
 		default:
 			t.Fatalf("unexpected: %s wanted disconnect", pretty.Sdump(msg))
 		}
+		sendConn.Close()
 	}
 	// Test the last block as a valid block
-	sendConn := s.setupConnection66(t)
-	receiveConn := s.setupConnection66(t)
-	s.testAnnounce66(t, sendConn, receiveConn, blocks[3])
-	// update test suite chain
-	s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[nextBlock])
-	// wait for client to update its chain
-	if err := receiveConn.waitForBlock66(s.fullChain.blocks[nextBlock]); err != nil {
-		t.Fatal(err)
-	}
+	s.sendNextBlock66(t)
 }
 
 func (s *Suite) TestOldAnnounce_66(t *utesting.T) {
-	s.oldAnnounce(t, s.setupConnection66(t), s.setupConnection66(t))
+	sendConn, recvConn := s.setupConnection66(t), s.setupConnection66(t)
+	defer sendConn.Close()
+	defer recvConn.Close()
+
+	s.oldAnnounce(t, sendConn, recvConn)
 }
 
 // TestMaliciousHandshake_66 tries to send malicious data during the handshake.
 func (s *Suite) TestMaliciousHandshake_66(t *utesting.T) {
 	conn := s.dial66(t)
+	defer conn.Close()
 	// write hello to client
 	pub0 := crypto.FromECDSAPub(&conn.ourKey.PublicKey)[1:]
 	handshakes := []*Hello{
@@ -295,6 +306,7 @@ func (s *Suite) TestMaliciousHandshake_66(t *utesting.T) {
 // TestMaliciousStatus_66 sends a status package with a large total difficulty.
 func (s *Suite) TestMaliciousStatus_66(t *utesting.T) {
 	conn := s.dial66(t)
+	defer conn.Close()
 	// get protoHandshake
 	conn.handshake(t)
 	status := &Status{
@@ -334,23 +346,37 @@ func (s *Suite) TestTransaction_66(t *utesting.T) {
 }
 
 func (s *Suite) TestMaliciousTx_66(t *utesting.T) {
-	tests := []*types.Transaction{
+	badTxs := []*types.Transaction{
 		getOldTxFromChain(t, s),
 		invalidNonceTx(t, s),
 		hugeAmount(t, s),
 		hugeGasPrice(t, s),
 		hugeData(t, s),
 	}
-	for i, tx := range tests {
+	sendConn := s.setupConnection66(t)
+	defer sendConn.Close()
+	// set up receiving connection before sending txs to make sure
+	// no announcements are missed
+	recvConn := s.setupConnection66(t)
+	defer recvConn.Close()
+
+	for i, tx := range badTxs {
 		t.Logf("Testing malicious tx propagation: %v\n", i)
-		sendFailingTx66(t, s, tx)
+		if err := sendConn.Write(&Transactions{tx}); err != nil {
+			t.Fatalf("could not write to connection: %v", err)
+		}
+
 	}
+	// check to make sure bad txs aren't propagated
+	waitForTxPropagation(t, s, badTxs, recvConn)
 }
 
 // TestZeroRequestID_66 checks that a request ID of zero is still handled
 // by the node.
 func (s *Suite) TestZeroRequestID_66(t *utesting.T) {
 	conn := s.setupConnection66(t)
+	defer conn.Close()
+
 	req := &eth.GetBlockHeadersPacket66{
 		RequestId: 0,
 		GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{
@@ -360,25 +386,31 @@ func (s *Suite) TestZeroRequestID_66(t *utesting.T) {
 			Amount: 2,
 		},
 	}
-	headersMatch(t, s.chain, s.getBlockHeaders66(t, conn, req, req.RequestId))
+	headers, err := s.getBlockHeaders66(conn, req, req.RequestId)
+	if err != nil {
+		t.Fatalf("could not get block headers: %v", err)
+	}
+	if !headersMatch(t, s.chain, headers) {
+		t.Fatal("received wrong header(s)")
+	}
 }
 
 // TestSameRequestID_66 sends two requests with the same request ID
 // concurrently to a single node.
 func (s *Suite) TestSameRequestID_66(t *utesting.T) {
 	conn := s.setupConnection66(t)
-	// create two separate requests with same ID
+	// create two requests with the same request ID
 	reqID := uint64(1234)
-	req1 := &eth.GetBlockHeadersPacket66{
+	request1 := &eth.GetBlockHeadersPacket66{
 		RequestId: reqID,
 		GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{
 			Origin: eth.HashOrNumber{
-				Number: 0,
+				Number: 1,
 			},
 			Amount: 2,
 		},
 	}
-	req2 := &eth.GetBlockHeadersPacket66{
+	request2 := &eth.GetBlockHeadersPacket66{
 		RequestId: reqID,
 		GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{
 			Origin: eth.HashOrNumber{
@@ -387,10 +419,103 @@ func (s *Suite) TestSameRequestID_66(t *utesting.T) {
 			Amount: 2,
 		},
 	}
-	// send requests concurrently
-	go func() {
-		headersMatch(t, s.chain, s.getBlockHeaders66(t, conn, req2, reqID))
-	}()
-	// check response from first request
-	headersMatch(t, s.chain, s.getBlockHeaders66(t, conn, req1, reqID))
+	// write the first request
+	err := conn.write66(request1, GetBlockHeaders{}.Code())
+	if err != nil {
+		t.Fatalf("could not write to connection: %v", err)
+	}
+	// perform second request
+	headers2, err := s.getBlockHeaders66(conn, request2, reqID)
+	if err != nil {
+		t.Fatalf("could not get block headers: %v", err)
+		return
+	}
+	// wait for response to first request
+	headers1, err := s.waitForBlockHeadersResponse66(conn, reqID)
+	if err != nil {
+		t.Fatalf("could not get BlockHeaders response: %v", err)
+	}
+	// check if headers match
+	if !headersMatch(t, s.chain, headers1) || !headersMatch(t, s.chain, headers2) {
+		t.Fatal("received wrong header(s)")
+	}
+}
+
+// TestLargeTxRequest_66 tests whether a node can fulfill a large GetPooledTransactions
+// request.
+func (s *Suite) TestLargeTxRequest_66(t *utesting.T) {
+	// send the next block to ensure the node is no longer syncing and is able to accept
+	// txs
+	s.sendNextBlock66(t)
+	// send 2000 transactions to the node
+	hashMap, txs := generateTxs(t, s, 2000)
+	sendConn := s.setupConnection66(t)
+	defer sendConn.Close()
+
+	sendMultipleSuccessfulTxs(t, s, sendConn, txs)
+	// set up connection to receive to ensure node is peered with the receiving connection
+	// before tx request is sent
+	recvConn := s.setupConnection66(t)
+	defer recvConn.Close()
+	// create and send pooled tx request
+	hashes := make([]common.Hash, 0)
+	for _, hash := range hashMap {
+		hashes = append(hashes, hash)
+	}
+	getTxReq := &eth.GetPooledTransactionsPacket66{
+		RequestId:                   1234,
+		GetPooledTransactionsPacket: hashes,
+	}
+	if err := recvConn.write66(getTxReq, GetPooledTransactions{}.Code()); err != nil {
+		t.Fatalf("could not write to conn: %v", err)
+	}
+	// check that all received transactions match those that were sent to node
+	switch msg := recvConn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) {
+	case PooledTransactions:
+		for _, gotTx := range msg {
+			if _, exists := hashMap[gotTx.Hash()]; !exists {
+				t.Fatalf("unexpected tx received: %v", gotTx.Hash())
+			}
+		}
+	default:
+		t.Fatalf("unexpected %s", pretty.Sdump(msg))
+	}
+}
+
+// TestNewPooledTxs_66 tests whether a node will do a GetPooledTransactions
+// request upon receiving a NewPooledTransactionHashes announcement.
+func (s *Suite) TestNewPooledTxs_66(t *utesting.T) {
+	// send the next block to ensure the node is no longer syncing and is able to accept
+	// txs
+	s.sendNextBlock66(t)
+	// generate 50 txs
+	hashMap, _ := generateTxs(t, s, 50)
+	// create new pooled tx hashes announcement
+	hashes := make([]common.Hash, 0)
+	for _, hash := range hashMap {
+		hashes = append(hashes, hash)
+	}
+	announce := NewPooledTransactionHashes(hashes)
+	// send announcement
+	conn := s.setupConnection66(t)
+	defer conn.Close()
+	if err := conn.Write(announce); err != nil {
+		t.Fatalf("could not write to connection: %v", err)
+	}
+	// wait for GetPooledTxs request
+	for {
+		_, msg := conn.readAndServe66(s.chain, timeout)
+		switch msg := msg.(type) {
+		case GetPooledTransactions:
+			if len(msg) != len(hashes) {
+				t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg))
+			}
+			return
+		case *NewPooledTransactionHashes, *NewBlock, *NewBlockHashes:
+			// ignore propagated txs and blocks from old tests
+			continue
+		default:
+			t.Fatalf("unexpected %s", pretty.Sdump(msg))
+		}
+	}
 }

+ 103 - 44
cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go

@@ -18,6 +18,7 @@ package ethtest
 
 import (
 	"fmt"
+	"reflect"
 	"time"
 
 	"github.com/ethereum/go-ethereum/core/types"
@@ -111,6 +112,18 @@ func (c *Conn) read66() (uint64, Message) {
 		msg = new(Transactions)
 	case (NewPooledTransactionHashes{}).Code():
 		msg = new(NewPooledTransactionHashes)
+	case (GetPooledTransactions{}.Code()):
+		ethMsg := new(eth.GetPooledTransactionsPacket66)
+		if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
+			return 0, errorf("could not rlp decode message: %v", err)
+		}
+		return ethMsg.RequestId, GetPooledTransactions(ethMsg.GetPooledTransactionsPacket)
+	case (PooledTransactions{}.Code()):
+		ethMsg := new(eth.PooledTransactionsPacket66)
+		if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
+			return 0, errorf("could not rlp decode message: %v", err)
+		}
+		return ethMsg.RequestId, PooledTransactions(ethMsg.PooledTransactionsPacket)
 	default:
 		msg = errorf("invalid message code: %d", code)
 	}
@@ -124,13 +137,21 @@ func (c *Conn) read66() (uint64, Message) {
 	return 0, errorf("invalid message: %s", string(rawData))
 }
 
+func (c *Conn) waitForResponse(chain *Chain, timeout time.Duration, requestID uint64) Message {
+	for {
+		id, msg := c.readAndServe66(chain, timeout)
+		if id == requestID {
+			return msg
+		}
+	}
+}
+
 // ReadAndServe serves GetBlockHeaders requests while waiting
 // on another message from the node.
 func (c *Conn) readAndServe66(chain *Chain, timeout time.Duration) (uint64, Message) {
 	start := time.Now()
 	for time.Since(start) < timeout {
-		timeout := time.Now().Add(10 * time.Second)
-		c.SetReadDeadline(timeout)
+		c.SetReadDeadline(time.Now().Add(10 * time.Second))
 
 		reqID, msg := c.read66()
 
@@ -173,27 +194,33 @@ func (s *Suite) testAnnounce66(t *utesting.T, sendConn, receiveConn *Conn, block
 }
 
 func (s *Suite) waitAnnounce66(t *utesting.T, conn *Conn, blockAnnouncement *NewBlock) {
-	timeout := 20 * time.Second
-	_, msg := conn.readAndServe66(s.chain, timeout)
-	switch msg := msg.(type) {
-	case *NewBlock:
-		t.Logf("received NewBlock message: %s", pretty.Sdump(msg.Block))
-		assert.Equal(t,
-			blockAnnouncement.Block.Header(), msg.Block.Header(),
-			"wrong block header in announcement",
-		)
-		assert.Equal(t,
-			blockAnnouncement.TD, msg.TD,
-			"wrong TD in announcement",
-		)
-	case *NewBlockHashes:
-		blockHashes := *msg
-		t.Logf("received NewBlockHashes message: %s", pretty.Sdump(blockHashes))
-		assert.Equal(t, blockAnnouncement.Block.Hash(), blockHashes[0].Hash,
-			"wrong block hash in announcement",
-		)
-	default:
-		t.Fatalf("unexpected: %s", pretty.Sdump(msg))
+	for {
+		_, msg := conn.readAndServe66(s.chain, timeout)
+		switch msg := msg.(type) {
+		case *NewBlock:
+			t.Logf("received NewBlock message: %s", pretty.Sdump(msg.Block))
+			assert.Equal(t,
+				blockAnnouncement.Block.Header(), msg.Block.Header(),
+				"wrong block header in announcement",
+			)
+			assert.Equal(t,
+				blockAnnouncement.TD, msg.TD,
+				"wrong TD in announcement",
+			)
+			return
+		case *NewBlockHashes:
+			blockHashes := *msg
+			t.Logf("received NewBlockHashes message: %s", pretty.Sdump(blockHashes))
+			assert.Equal(t, blockAnnouncement.Block.Hash(), blockHashes[0].Hash,
+				"wrong block hash in announcement",
+			)
+			return
+		case *NewPooledTransactionHashes:
+			// ignore old txs being propagated
+			continue
+		default:
+			t.Fatalf("unexpected: %s", pretty.Sdump(msg))
+		}
 	}
 }
 
@@ -202,8 +229,11 @@ func (s *Suite) waitAnnounce66(t *utesting.T, conn *Conn, blockAnnouncement *New
 func (c *Conn) waitForBlock66(block *types.Block) error {
 	defer c.SetReadDeadline(time.Time{})
 
-	timeout := time.Now().Add(20 * time.Second)
-	c.SetReadDeadline(timeout)
+	c.SetReadDeadline(time.Now().Add(20 * time.Second))
+	// note: if the node has not yet imported the block, it will respond
+	// to the GetBlockHeaders request with an empty BlockHeaders response,
+	// so the GetBlockHeaders request must be sent again until the BlockHeaders
+	// response contains the desired header.
 	for {
 		req := eth.GetBlockHeadersPacket66{
 			RequestId: 54,
@@ -226,10 +256,15 @@ func (c *Conn) waitForBlock66(block *types.Block) error {
 			if reqID != req.RequestId {
 				return fmt.Errorf("request ID mismatch: wanted %d, got %d", req.RequestId, reqID)
 			}
-			if len(msg) > 0 {
-				return nil
+			for _, header := range msg {
+				if header.Number.Uint64() == block.NumberU64() {
+					return nil
+				}
 			}
 			time.Sleep(100 * time.Millisecond)
+		case *NewPooledTransactionHashes:
+			// ignore old announcements
+			continue
 		default:
 			return fmt.Errorf("invalid message: %s", pretty.Sdump(msg))
 		}
@@ -238,37 +273,61 @@ func (c *Conn) waitForBlock66(block *types.Block) error {
 
 func sendSuccessfulTx66(t *utesting.T, s *Suite, tx *types.Transaction) {
 	sendConn := s.setupConnection66(t)
+	defer sendConn.Close()
 	sendSuccessfulTxWithConn(t, s, tx, sendConn)
 }
 
-func sendFailingTx66(t *utesting.T, s *Suite, tx *types.Transaction) {
-	sendConn, recvConn := s.setupConnection66(t), s.setupConnection66(t)
-	sendFailingTxWithConns(t, s, tx, sendConn, recvConn)
-}
-
-func (s *Suite) getBlockHeaders66(t *utesting.T, conn *Conn, req eth.Packet, expectedID uint64) BlockHeaders {
-	if err := conn.write66(req, GetBlockHeaders{}.Code()); err != nil {
-		t.Fatalf("could not write to connection: %v", err)
-	}
-	// check block headers response
+// waitForBlockHeadersResponse66 waits for a BlockHeaders message with the given expected request ID
+func (s *Suite) waitForBlockHeadersResponse66(conn *Conn, expectedID uint64) (BlockHeaders, error) {
 	reqID, msg := conn.readAndServe66(s.chain, timeout)
-
 	switch msg := msg.(type) {
 	case BlockHeaders:
 		if reqID != expectedID {
-			t.Fatalf("request ID mismatch: wanted %d, got %d", expectedID, reqID)
+			return nil, fmt.Errorf("request ID mismatch: wanted %d, got %d", expectedID, reqID)
 		}
-		return msg
+		return msg, nil
 	default:
-		t.Fatalf("unexpected: %s", pretty.Sdump(msg))
-		return nil
+		return nil, fmt.Errorf("unexpected: %s", pretty.Sdump(msg))
+	}
+}
+
+func (s *Suite) getBlockHeaders66(conn *Conn, req eth.Packet, expectedID uint64) (BlockHeaders, error) {
+	if err := conn.write66(req, GetBlockHeaders{}.Code()); err != nil {
+		return nil, fmt.Errorf("could not write to connection: %v", err)
 	}
+	return s.waitForBlockHeadersResponse66(conn, expectedID)
 }
 
-func headersMatch(t *utesting.T, chain *Chain, headers BlockHeaders) {
+func headersMatch(t *utesting.T, chain *Chain, headers BlockHeaders) bool {
+	mismatched := 0
 	for _, header := range headers {
 		num := header.Number.Uint64()
 		t.Logf("received header (%d): %s", num, pretty.Sdump(header.Hash()))
-		assert.Equal(t, chain.blocks[int(num)].Header(), header)
+		if !reflect.DeepEqual(chain.blocks[int(num)].Header(), header) {
+			mismatched += 1
+			t.Logf("received wrong header: %v", pretty.Sdump(header))
+		}
+	}
+	return mismatched == 0
+}
+
+func (s *Suite) sendNextBlock66(t *utesting.T) {
+	sendConn, receiveConn := s.setupConnection66(t), s.setupConnection66(t)
+	defer sendConn.Close()
+	defer receiveConn.Close()
+
+	// create new block announcement
+	nextBlock := len(s.chain.blocks)
+	blockAnnouncement := &NewBlock{
+		Block: s.fullChain.blocks[nextBlock],
+		TD:    s.fullChain.TD(nextBlock + 1),
+	}
+	// send announcement and wait for node to request the header
+	s.testAnnounce66(t, sendConn, receiveConn, blockAnnouncement)
+	// wait for client to update its chain
+	if err := receiveConn.waitForBlock66(s.fullChain.blocks[nextBlock]); err != nil {
+		t.Fatal(err)
 	}
+	// update test suite chain
+	s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[nextBlock])
 }

+ 1 - 1
cmd/devp2p/internal/ethtest/large.go

@@ -70,7 +70,7 @@ func largeHeader() *types.Header {
 		GasUsed:     0,
 		Coinbase:    common.Address{},
 		GasLimit:    0,
-		UncleHash:   randHash(),
+		UncleHash:   types.EmptyUncleHash,
 		Time:        1337,
 		ParentHash:  randHash(),
 		Root:        randHash(),

+ 81 - 47
cmd/devp2p/internal/ethtest/suite.go

@@ -69,20 +69,20 @@ func NewSuite(dest *enode.Node, chainfile string, genesisfile string) (*Suite, e
 func (s *Suite) AllEthTests() []utesting.Test {
 	return []utesting.Test{
 		// status
-		{Name: "Status", Fn: s.TestStatus},
-		{Name: "Status_66", Fn: s.TestStatus_66},
+		{Name: "TestStatus", Fn: s.TestStatus},
+		{Name: "TestStatus_66", Fn: s.TestStatus_66},
 		// get block headers
-		{Name: "GetBlockHeaders", Fn: s.TestGetBlockHeaders},
-		{Name: "GetBlockHeaders_66", Fn: s.TestGetBlockHeaders_66},
+		{Name: "TestGetBlockHeaders", Fn: s.TestGetBlockHeaders},
+		{Name: "TestGetBlockHeaders_66", Fn: s.TestGetBlockHeaders_66},
 		{Name: "TestSimultaneousRequests_66", Fn: s.TestSimultaneousRequests_66},
 		{Name: "TestSameRequestID_66", Fn: s.TestSameRequestID_66},
 		{Name: "TestZeroRequestID_66", Fn: s.TestZeroRequestID_66},
 		// get block bodies
-		{Name: "GetBlockBodies", Fn: s.TestGetBlockBodies},
-		{Name: "GetBlockBodies_66", Fn: s.TestGetBlockBodies_66},
+		{Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies},
+		{Name: "TestGetBlockBodies_66", Fn: s.TestGetBlockBodies_66},
 		// broadcast
-		{Name: "Broadcast", Fn: s.TestBroadcast},
-		{Name: "Broadcast_66", Fn: s.TestBroadcast_66},
+		{Name: "TestBroadcast", Fn: s.TestBroadcast},
+		{Name: "TestBroadcast_66", Fn: s.TestBroadcast_66},
 		{Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce},
 		{Name: "TestLargeAnnounce_66", Fn: s.TestLargeAnnounce_66},
 		{Name: "TestOldAnnounce", Fn: s.TestOldAnnounce},
@@ -91,44 +91,48 @@ func (s *Suite) AllEthTests() []utesting.Test {
 		{Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake},
 		{Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus},
 		{Name: "TestMaliciousHandshake_66", Fn: s.TestMaliciousHandshake_66},
-		{Name: "TestMaliciousStatus_66", Fn: s.TestMaliciousStatus},
+		{Name: "TestMaliciousStatus_66", Fn: s.TestMaliciousStatus_66},
 		// test transactions
-		{Name: "TestTransactions", Fn: s.TestTransaction},
-		{Name: "TestTransactions_66", Fn: s.TestTransaction_66},
-		{Name: "TestMaliciousTransactions", Fn: s.TestMaliciousTx},
-		{Name: "TestMaliciousTransactions_66", Fn: s.TestMaliciousTx_66},
+		{Name: "TestTransaction", Fn: s.TestTransaction},
+		{Name: "TestTransaction_66", Fn: s.TestTransaction_66},
+		{Name: "TestMaliciousTx", Fn: s.TestMaliciousTx},
+		{Name: "TestMaliciousTx_66", Fn: s.TestMaliciousTx_66},
+		{Name: "TestLargeTxRequest_66", Fn: s.TestLargeTxRequest_66},
+		{Name: "TestNewPooledTxs_66", Fn: s.TestNewPooledTxs_66},
 	}
 }
 
 func (s *Suite) EthTests() []utesting.Test {
 	return []utesting.Test{
-		{Name: "Status", Fn: s.TestStatus},
-		{Name: "GetBlockHeaders", Fn: s.TestGetBlockHeaders},
-		{Name: "GetBlockBodies", Fn: s.TestGetBlockBodies},
-		{Name: "Broadcast", Fn: s.TestBroadcast},
+		{Name: "TestStatus", Fn: s.TestStatus},
+		{Name: "TestGetBlockHeaders", Fn: s.TestGetBlockHeaders},
+		{Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies},
+		{Name: "TestBroadcast", Fn: s.TestBroadcast},
 		{Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce},
 		{Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake},
 		{Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus},
-		{Name: "TestMaliciousStatus_66", Fn: s.TestMaliciousStatus},
-		{Name: "TestTransactions", Fn: s.TestTransaction},
-		{Name: "TestMaliciousTransactions", Fn: s.TestMaliciousTx},
+		{Name: "TestTransaction", Fn: s.TestTransaction},
+		{Name: "TestMaliciousTx", Fn: s.TestMaliciousTx},
 	}
 }
 
 func (s *Suite) Eth66Tests() []utesting.Test {
 	return []utesting.Test{
 		// only proceed with eth66 test suite if node supports eth 66 protocol
-		{Name: "Status_66", Fn: s.TestStatus_66},
-		{Name: "GetBlockHeaders_66", Fn: s.TestGetBlockHeaders_66},
+		{Name: "TestStatus_66", Fn: s.TestStatus_66},
+		{Name: "TestGetBlockHeaders_66", Fn: s.TestGetBlockHeaders_66},
 		{Name: "TestSimultaneousRequests_66", Fn: s.TestSimultaneousRequests_66},
 		{Name: "TestSameRequestID_66", Fn: s.TestSameRequestID_66},
 		{Name: "TestZeroRequestID_66", Fn: s.TestZeroRequestID_66},
-		{Name: "GetBlockBodies_66", Fn: s.TestGetBlockBodies_66},
-		{Name: "Broadcast_66", Fn: s.TestBroadcast_66},
+		{Name: "TestGetBlockBodies_66", Fn: s.TestGetBlockBodies_66},
+		{Name: "TestBroadcast_66", Fn: s.TestBroadcast_66},
 		{Name: "TestLargeAnnounce_66", Fn: s.TestLargeAnnounce_66},
 		{Name: "TestMaliciousHandshake_66", Fn: s.TestMaliciousHandshake_66},
-		{Name: "TestTransactions_66", Fn: s.TestTransaction_66},
-		{Name: "TestMaliciousTransactions_66", Fn: s.TestMaliciousTx_66},
+		{Name: "TestMaliciousStatus_66", Fn: s.TestMaliciousStatus_66},
+		{Name: "TestTransaction_66", Fn: s.TestTransaction_66},
+		{Name: "TestMaliciousTx_66", Fn: s.TestMaliciousTx_66},
+		{Name: "TestLargeTxRequest_66", Fn: s.TestLargeTxRequest_66},
+		{Name: "TestNewPooledTxs_66", Fn: s.TestNewPooledTxs_66},
 	}
 }
 
@@ -140,6 +144,7 @@ func (s *Suite) TestStatus(t *utesting.T) {
 	if err != nil {
 		t.Fatalf("could not dial: %v", err)
 	}
+	defer conn.Close()
 	// get protoHandshake
 	conn.handshake(t)
 	// get status
@@ -157,6 +162,7 @@ func (s *Suite) TestMaliciousStatus(t *utesting.T) {
 	if err != nil {
 		t.Fatalf("could not dial: %v", err)
 	}
+	defer conn.Close()
 	// get protoHandshake
 	conn.handshake(t)
 	status := &Status{
@@ -191,6 +197,7 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
 	if err != nil {
 		t.Fatalf("could not dial: %v", err)
 	}
+	defer conn.Close()
 
 	conn.handshake(t)
 	conn.statusExchange(t, s.chain, nil)
@@ -229,6 +236,7 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) {
 	if err != nil {
 		t.Fatalf("could not dial: %v", err)
 	}
+	defer conn.Close()
 
 	conn.handshake(t)
 	conn.statusExchange(t, s.chain, nil)
@@ -252,19 +260,28 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) {
 // TestBroadcast tests whether a block announcement is correctly
 // propagated to the given node's peer(s).
 func (s *Suite) TestBroadcast(t *utesting.T) {
+	s.sendNextBlock(t)
+}
+
+func (s *Suite) sendNextBlock(t *utesting.T) {
 	sendConn, receiveConn := s.setupConnection(t), s.setupConnection(t)
+	defer sendConn.Close()
+	defer receiveConn.Close()
+
+	// create new block announcement
 	nextBlock := len(s.chain.blocks)
 	blockAnnouncement := &NewBlock{
 		Block: s.fullChain.blocks[nextBlock],
 		TD:    s.fullChain.TD(nextBlock + 1),
 	}
+	// send announcement and wait for node to request the header
 	s.testAnnounce(t, sendConn, receiveConn, blockAnnouncement)
-	// update test suite chain
-	s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[nextBlock])
 	// wait for client to update its chain
-	if err := receiveConn.waitForBlock(s.chain.Head()); err != nil {
+	if err := receiveConn.waitForBlock(s.fullChain.blocks[nextBlock]); err != nil {
 		t.Fatal(err)
 	}
+	// update test suite chain
+	s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[nextBlock])
 }
 
 // TestMaliciousHandshake tries to send malicious data during the handshake.
@@ -273,6 +290,7 @@ func (s *Suite) TestMaliciousHandshake(t *utesting.T) {
 	if err != nil {
 		t.Fatalf("could not dial: %v", err)
 	}
+	defer conn.Close()
 	// write hello to client
 	pub0 := crypto.FromECDSAPub(&conn.ourKey.PublicKey)[1:]
 	handshakes := []*Hello{
@@ -372,28 +390,25 @@ func (s *Suite) TestLargeAnnounce(t *utesting.T) {
 			t.Fatalf("could not write to connection: %v", err)
 		}
 		// Invalid announcement, check that peer disconnected
-		switch msg := sendConn.ReadAndServe(s.chain, timeout).(type) {
+		switch msg := sendConn.ReadAndServe(s.chain, time.Second*8).(type) {
 		case *Disconnect:
 		case *Error:
 			break
 		default:
 			t.Fatalf("unexpected: %s wanted disconnect", pretty.Sdump(msg))
 		}
+		sendConn.Close()
 	}
 	// Test the last block as a valid block
-	sendConn := s.setupConnection(t)
-	receiveConn := s.setupConnection(t)
-	s.testAnnounce(t, sendConn, receiveConn, blocks[3])
-	// update test suite chain
-	s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[nextBlock])
-	// wait for client to update its chain
-	if err := receiveConn.waitForBlock(s.fullChain.blocks[nextBlock]); err != nil {
-		t.Fatal(err)
-	}
+	s.sendNextBlock(t)
 }
 
 func (s *Suite) TestOldAnnounce(t *utesting.T) {
-	s.oldAnnounce(t, s.setupConnection(t), s.setupConnection(t))
+	sendConn, recvConn := s.setupConnection(t), s.setupConnection(t)
+	defer sendConn.Close()
+	defer recvConn.Close()
+
+	s.oldAnnounce(t, sendConn, recvConn)
 }
 
 func (s *Suite) oldAnnounce(t *utesting.T, sendConn, receiveConn *Conn) {
@@ -406,11 +421,19 @@ func (s *Suite) oldAnnounce(t *utesting.T, sendConn, receiveConn *Conn) {
 		t.Fatalf("could not write to connection: %v", err)
 	}
 
-	switch msg := receiveConn.ReadAndServe(s.chain, timeout*2).(type) {
+	switch msg := receiveConn.ReadAndServe(s.chain, time.Second*8).(type) {
 	case *NewBlock:
-		t.Fatalf("unexpected: block propagated: %s", pretty.Sdump(msg))
+		block := *msg
+		if block.Block.Hash() == oldBlockAnnounce.Block.Hash() {
+			t.Fatalf("unexpected: block propagated: %s", pretty.Sdump(msg))
+		}
 	case *NewBlockHashes:
-		t.Fatalf("unexpected: block announced: %s", pretty.Sdump(msg))
+		hashes := *msg
+		for _, hash := range hashes {
+			if hash.Hash == oldBlockAnnounce.Block.Hash() {
+				t.Fatalf("unexpected: block announced: %s", pretty.Sdump(msg))
+			}
+		}
 	case *Error:
 		errMsg := *msg
 		// check to make sure error is timeout (propagation didn't come through == test successful)
@@ -431,7 +454,6 @@ func (s *Suite) testAnnounce(t *utesting.T, sendConn, receiveConn *Conn, blockAn
 }
 
 func (s *Suite) waitAnnounce(t *utesting.T, conn *Conn, blockAnnouncement *NewBlock) {
-	timeout := 20 * time.Second
 	switch msg := conn.ReadAndServe(s.chain, timeout).(type) {
 	case *NewBlock:
 		t.Logf("received NewBlock message: %s", pretty.Sdump(msg.Block))
@@ -502,15 +524,27 @@ func (s *Suite) TestTransaction(t *utesting.T) {
 }
 
 func (s *Suite) TestMaliciousTx(t *utesting.T) {
-	tests := []*types.Transaction{
+	badTxs := []*types.Transaction{
 		getOldTxFromChain(t, s),
 		invalidNonceTx(t, s),
 		hugeAmount(t, s),
 		hugeGasPrice(t, s),
 		hugeData(t, s),
 	}
-	for i, tx := range tests {
+	sendConn := s.setupConnection(t)
+	defer sendConn.Close()
+	// set up receiving connection before sending txs to make sure
+	// no announcements are missed
+	recvConn := s.setupConnection(t)
+	defer recvConn.Close()
+
+	for i, tx := range badTxs {
 		t.Logf("Testing malicious tx propagation: %v\n", i)
-		sendFailingTx(t, s, tx)
+		if err := sendConn.Write(&Transactions{tx}); err != nil {
+			t.Fatalf("could not write to connection: %v", err)
+		}
+
 	}
+	// check to make sure bad txs aren't propagated
+	waitForTxPropagation(t, s, badTxs, recvConn)
 }

+ 107 - 0
cmd/devp2p/internal/ethtest/suite_test.go

@@ -0,0 +1,107 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package ethtest
+
+import (
+	"os"
+	"testing"
+	"time"
+
+	"github.com/ethereum/go-ethereum/eth"
+	"github.com/ethereum/go-ethereum/eth/ethconfig"
+	"github.com/ethereum/go-ethereum/internal/utesting"
+	"github.com/ethereum/go-ethereum/node"
+	"github.com/ethereum/go-ethereum/p2p"
+)
+
+var (
+	genesisFile   = "./testdata/genesis.json"
+	halfchainFile = "./testdata/halfchain.rlp"
+	fullchainFile = "./testdata/chain.rlp"
+)
+
+func TestEthSuite(t *testing.T) {
+	geth, err := runGeth()
+	if err != nil {
+		t.Fatalf("could not run geth: %v", err)
+	}
+	defer geth.Close()
+
+	suite, err := NewSuite(geth.Server().Self(), fullchainFile, genesisFile)
+	if err != nil {
+		t.Fatalf("could not create new test suite: %v", err)
+	}
+	for _, test := range suite.AllEthTests() {
+		t.Run(test.Name, func(t *testing.T) {
+			result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
+			if result[0].Failed {
+				t.Fatal()
+			}
+		})
+	}
+}
+
+// runGeth creates and starts a geth node
+func runGeth() (*node.Node, error) {
+	stack, err := node.New(&node.Config{
+		P2P: p2p.Config{
+			ListenAddr:  "127.0.0.1:0",
+			NoDiscovery: true,
+			MaxPeers:    10, // in case a test requires multiple connections, can be changed in the future
+			NoDial:      true,
+		},
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	err = setupGeth(stack)
+	if err != nil {
+		stack.Close()
+		return nil, err
+	}
+	if err = stack.Start(); err != nil {
+		stack.Close()
+		return nil, err
+	}
+	return stack, nil
+}
+
+func setupGeth(stack *node.Node) error {
+	chain, err := loadChain(halfchainFile, genesisFile)
+	if err != nil {
+		return err
+	}
+
+	backend, err := eth.New(stack, &ethconfig.Config{
+		Genesis:                 &chain.genesis,
+		NetworkId:               chain.genesis.Config.ChainID.Uint64(), // 19763
+		DatabaseCache:           10,
+		TrieCleanCache:          10,
+		TrieCleanCacheJournal:   "",
+		TrieCleanCacheRejournal: 60 * time.Minute,
+		TrieDirtyCache:          16,
+		TrieTimeout:             60 * time.Minute,
+		SnapshotCache:           10,
+	})
+	if err != nil {
+		return err
+	}
+
+	_, err = backend.BlockChain().InsertChain(chain.blocks[1:])
+	return err
+}

+ 133 - 25
cmd/devp2p/internal/ethtest/transaction.go

@@ -17,12 +17,15 @@
 package ethtest
 
 import (
+	"math/big"
+	"strings"
 	"time"
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/internal/utesting"
+	"github.com/ethereum/go-ethereum/params"
 )
 
 //var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7")
@@ -30,6 +33,7 @@ var faucetKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c666
 
 func sendSuccessfulTx(t *utesting.T, s *Suite, tx *types.Transaction) {
 	sendConn := s.setupConnection(t)
+	defer sendConn.Close()
 	sendSuccessfulTxWithConn(t, s, tx, sendConn)
 }
 
@@ -39,7 +43,9 @@ func sendSuccessfulTxWithConn(t *utesting.T, s *Suite, tx *types.Transaction, se
 	if err := sendConn.Write(&Transactions{tx}); err != nil {
 		t.Fatal(err)
 	}
-	time.Sleep(100 * time.Millisecond)
+	// update last nonce seen
+	nonce = tx.Nonce()
+
 	recvConn := s.setupConnection(t)
 	// Wait for the transaction announcement
 	switch msg := recvConn.ReadAndServe(s.chain, timeout).(type) {
@@ -65,29 +71,84 @@ func sendSuccessfulTxWithConn(t *utesting.T, s *Suite, tx *types.Transaction, se
 	}
 }
 
-func sendFailingTx(t *utesting.T, s *Suite, tx *types.Transaction) {
-	sendConn, recvConn := s.setupConnection(t), s.setupConnection(t)
-	sendFailingTxWithConns(t, s, tx, sendConn, recvConn)
-}
+var nonce = uint64(99)
 
-func sendFailingTxWithConns(t *utesting.T, s *Suite, tx *types.Transaction, sendConn, recvConn *Conn) {
-	// Wait for a transaction announcement
-	switch msg := recvConn.ReadAndServe(s.chain, timeout).(type) {
-	case *NewPooledTransactionHashes:
-		break
-	default:
-		t.Logf("unexpected message, logging: %v", pretty.Sdump(msg))
-	}
-	// Send the transaction
-	if err := sendConn.Write(&Transactions{tx}); err != nil {
+func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, sendConn *Conn, txs []*types.Transaction) {
+	txMsg := Transactions(txs)
+	t.Logf("sending %d txs\n", len(txs))
+
+	recvConn := s.setupConnection(t)
+	defer recvConn.Close()
+
+	// Send the transactions
+	if err := sendConn.Write(&txMsg); err != nil {
 		t.Fatal(err)
 	}
+	// update nonce
+	nonce = txs[len(txs)-1].Nonce()
+	// Wait for the transaction announcement(s) and make sure all sent txs are being propagated
+	recvHashes := make([]common.Hash, 0)
+	// all txs should be announced within 3 announcements
+	for i := 0; i < 3; i++ {
+		switch msg := recvConn.ReadAndServe(s.chain, timeout).(type) {
+		case *Transactions:
+			for _, tx := range *msg {
+				recvHashes = append(recvHashes, tx.Hash())
+			}
+		case *NewPooledTransactionHashes:
+			recvHashes = append(recvHashes, *msg...)
+		default:
+			if !strings.Contains(pretty.Sdump(msg), "i/o timeout") {
+				t.Fatalf("unexpected message while waiting to receive txs: %s", pretty.Sdump(msg))
+			}
+		}
+		// break once all 2000 txs have been received
+		if len(recvHashes) == 2000 {
+			break
+		}
+		if len(recvHashes) > 0 {
+			_, missingTxs := compareReceivedTxs(recvHashes, txs)
+			if len(missingTxs) > 0 {
+				continue
+			} else {
+				t.Logf("successfully received all %d txs", len(txs))
+				return
+			}
+		}
+	}
+	_, missingTxs := compareReceivedTxs(recvHashes, txs)
+	if len(missingTxs) > 0 {
+		for _, missing := range missingTxs {
+			t.Logf("missing tx: %v", missing.Hash())
+		}
+		t.Fatalf("missing %d txs", len(missingTxs))
+	}
+}
+
+func waitForTxPropagation(t *utesting.T, s *Suite, txs []*types.Transaction, recvConn *Conn) {
 	// Wait for another transaction announcement
-	switch msg := recvConn.ReadAndServe(s.chain, timeout).(type) {
+	switch msg := recvConn.ReadAndServe(s.chain, time.Second*8).(type) {
 	case *Transactions:
-		t.Fatalf("Received unexpected transaction announcement: %v", msg)
+		// check to see if any of the failing txs were in the announcement
+		recvTxs := make([]common.Hash, len(*msg))
+		for i, recvTx := range *msg {
+			recvTxs[i] = recvTx.Hash()
+		}
+		badTxs, _ := compareReceivedTxs(recvTxs, txs)
+		if len(badTxs) > 0 {
+			for _, tx := range badTxs {
+				t.Logf("received bad tx: %v", tx)
+			}
+			t.Fatalf("received %d bad txs", len(badTxs))
+		}
 	case *NewPooledTransactionHashes:
-		t.Fatalf("Received unexpected pooledTx announcement: %v", msg)
+		badTxs, _ := compareReceivedTxs(*msg, txs)
+		if len(badTxs) > 0 {
+			for _, tx := range badTxs {
+				t.Logf("received bad tx: %v", tx)
+			}
+			t.Fatalf("received %d bad txs", len(badTxs))
+		}
 	case *Error:
 		// Transaction should not be announced -> wait for timeout
 		return
@@ -96,6 +157,29 @@ func sendFailingTxWithConns(t *utesting.T, s *Suite, tx *types.Transaction, send
 	}
 }
 
+// compareReceivedTxs compares the received set of txs against the given set of txs,
+// returning both the set received txs that were present within the given txs, and
+// the set of txs that were missing from the set of received txs
+func compareReceivedTxs(recvTxs []common.Hash, txs []*types.Transaction) (present []*types.Transaction, missing []*types.Transaction) {
+	// create a map of the hashes received from node
+	recvHashes := make(map[common.Hash]common.Hash)
+	for _, hash := range recvTxs {
+		recvHashes[hash] = hash
+	}
+
+	// collect present txs and missing txs separately
+	present = make([]*types.Transaction, 0)
+	missing = make([]*types.Transaction, 0)
+	for _, tx := range txs {
+		if _, exists := recvHashes[tx.Hash()]; exists {
+			present = append(present, tx)
+		} else {
+			missing = append(missing, tx)
+		}
+	}
+	return present, missing
+}
+
 func unknownTx(t *utesting.T, s *Suite) *types.Transaction {
 	tx := getNextTxFromChain(t, s)
 	var to common.Address
@@ -103,7 +187,7 @@ func unknownTx(t *utesting.T, s *Suite) *types.Transaction {
 		to = *tx.To()
 	}
 	txNew := types.NewTransaction(tx.Nonce()+1, to, tx.Value(), tx.Gas(), tx.GasPrice(), tx.Data())
-	return signWithFaucet(t, txNew)
+	return signWithFaucet(t, s.chain.chainConfig, txNew)
 }
 
 func getNextTxFromChain(t *utesting.T, s *Suite) *types.Transaction {
@@ -122,6 +206,30 @@ func getNextTxFromChain(t *utesting.T, s *Suite) *types.Transaction {
 	return tx
 }
 
+func generateTxs(t *utesting.T, s *Suite, numTxs int) (map[common.Hash]common.Hash, []*types.Transaction) {
+	txHashMap := make(map[common.Hash]common.Hash, numTxs)
+	txs := make([]*types.Transaction, numTxs)
+
+	nextTx := getNextTxFromChain(t, s)
+	gas := nextTx.Gas()
+
+	nonce = nonce + 1
+	// generate txs
+	for i := 0; i < numTxs; i++ {
+		tx := generateTx(t, s.chain.chainConfig, nonce, gas)
+		txHashMap[tx.Hash()] = tx.Hash()
+		txs[i] = tx
+		nonce = nonce + 1
+	}
+	return txHashMap, txs
+}
+
+func generateTx(t *utesting.T, chainConfig *params.ChainConfig, nonce uint64, gas uint64) *types.Transaction {
+	var to common.Address
+	tx := types.NewTransaction(nonce, to, big.NewInt(1), gas, big.NewInt(1), []byte{})
+	return signWithFaucet(t, chainConfig, tx)
+}
+
 func getOldTxFromChain(t *utesting.T, s *Suite) *types.Transaction {
 	var tx *types.Transaction
 	for _, blocks := range s.fullChain.blocks[:s.chain.Len()-1] {
@@ -144,7 +252,7 @@ func invalidNonceTx(t *utesting.T, s *Suite) *types.Transaction {
 		to = *tx.To()
 	}
 	txNew := types.NewTransaction(tx.Nonce()-2, to, tx.Value(), tx.Gas(), tx.GasPrice(), tx.Data())
-	return signWithFaucet(t, txNew)
+	return signWithFaucet(t, s.chain.chainConfig, txNew)
 }
 
 func hugeAmount(t *utesting.T, s *Suite) *types.Transaction {
@@ -155,7 +263,7 @@ func hugeAmount(t *utesting.T, s *Suite) *types.Transaction {
 		to = *tx.To()
 	}
 	txNew := types.NewTransaction(tx.Nonce(), to, amount, tx.Gas(), tx.GasPrice(), tx.Data())
-	return signWithFaucet(t, txNew)
+	return signWithFaucet(t, s.chain.chainConfig, txNew)
 }
 
 func hugeGasPrice(t *utesting.T, s *Suite) *types.Transaction {
@@ -166,7 +274,7 @@ func hugeGasPrice(t *utesting.T, s *Suite) *types.Transaction {
 		to = *tx.To()
 	}
 	txNew := types.NewTransaction(tx.Nonce(), to, tx.Value(), tx.Gas(), gasPrice, tx.Data())
-	return signWithFaucet(t, txNew)
+	return signWithFaucet(t, s.chain.chainConfig, txNew)
 }
 
 func hugeData(t *utesting.T, s *Suite) *types.Transaction {
@@ -176,11 +284,11 @@ func hugeData(t *utesting.T, s *Suite) *types.Transaction {
 		to = *tx.To()
 	}
 	txNew := types.NewTransaction(tx.Nonce(), to, tx.Value(), tx.Gas(), tx.GasPrice(), largeBuffer(2))
-	return signWithFaucet(t, txNew)
+	return signWithFaucet(t, s.chain.chainConfig, txNew)
 }
 
-func signWithFaucet(t *utesting.T, tx *types.Transaction) *types.Transaction {
-	signer := types.HomesteadSigner{}
+func signWithFaucet(t *utesting.T, chainConfig *params.ChainConfig, tx *types.Transaction) *types.Transaction {
+	signer := types.LatestSigner(chainConfig)
 	signedTx, err := types.SignTx(tx, signer, faucetKey)
 	if err != nil {
 		t.Fatalf("could not sign tx: %v\n", err)

+ 22 - 6
cmd/devp2p/internal/ethtest/types.go

@@ -120,6 +120,14 @@ type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket
 
 func (nb NewPooledTransactionHashes) Code() int { return 24 }
 
+type GetPooledTransactions eth.GetPooledTransactionsPacket
+
+func (gpt GetPooledTransactions) Code() int { return 25 }
+
+type PooledTransactions eth.PooledTransactionsPacket
+
+func (pt PooledTransactions) Code() int { return 26 }
+
 // Conn represents an individual connection with a peer
 type Conn struct {
 	*rlpx.Conn
@@ -163,6 +171,10 @@ func (c *Conn) Read() Message {
 		msg = new(Transactions)
 	case (NewPooledTransactionHashes{}).Code():
 		msg = new(NewPooledTransactionHashes)
+	case (GetPooledTransactions{}.Code()):
+		msg = new(GetPooledTransactions)
+	case (PooledTransactions{}.Code()):
+		msg = new(PooledTransactions)
 	default:
 		return errorf("invalid message code: %d", code)
 	}
@@ -178,8 +190,7 @@ func (c *Conn) Read() Message {
 func (c *Conn) ReadAndServe(chain *Chain, timeout time.Duration) Message {
 	start := time.Now()
 	for time.Since(start) < timeout {
-		timeout := time.Now().Add(10 * time.Second)
-		c.SetReadDeadline(timeout)
+		c.SetReadDeadline(time.Now().Add(5 * time.Second))
 		switch msg := c.Read().(type) {
 		case *Ping:
 			c.Write(&Pong{})
@@ -323,8 +334,11 @@ loop:
 func (c *Conn) waitForBlock(block *types.Block) error {
 	defer c.SetReadDeadline(time.Time{})
 
-	timeout := time.Now().Add(20 * time.Second)
-	c.SetReadDeadline(timeout)
+	c.SetReadDeadline(time.Now().Add(20 * time.Second))
+	// note: if the node has not yet imported the block, it will respond
+	// to the GetBlockHeaders request with an empty BlockHeaders response,
+	// so the GetBlockHeaders request must be sent again until the BlockHeaders
+	// response contains the desired header.
 	for {
 		req := &GetBlockHeaders{Origin: eth.HashOrNumber{Hash: block.Hash()}, Amount: 1}
 		if err := c.Write(req); err != nil {
@@ -332,8 +346,10 @@ func (c *Conn) waitForBlock(block *types.Block) error {
 		}
 		switch msg := c.Read().(type) {
 		case *BlockHeaders:
-			if len(*msg) > 0 {
-				return nil
+			for _, header := range *msg {
+				if header.Number.Uint64() == block.NumberU64() {
+					return nil
+				}
 			}
 			time.Sleep(100 * time.Millisecond)
 		default:

+ 27 - 1
cmd/devp2p/nodeset.go

@@ -71,6 +71,7 @@ func writeNodesJSON(file string, nodes nodeSet) {
 	}
 }
 
+// nodes returns the node records contained in the set.
 func (ns nodeSet) nodes() []*enode.Node {
 	result := make([]*enode.Node, 0, len(ns))
 	for _, n := range ns {
@@ -83,12 +84,37 @@ func (ns nodeSet) nodes() []*enode.Node {
 	return result
 }
 
+// add ensures the given nodes are present in the set.
 func (ns nodeSet) add(nodes ...*enode.Node) {
 	for _, n := range nodes {
-		ns[n.ID()] = nodeJSON{Seq: n.Seq(), N: n}
+		v := ns[n.ID()]
+		v.N = n
+		v.Seq = n.Seq()
+		ns[n.ID()] = v
 	}
 }
 
+// topN returns the top n nodes by score as a new set.
+func (ns nodeSet) topN(n int) nodeSet {
+	if n >= len(ns) {
+		return ns
+	}
+
+	byscore := make([]nodeJSON, 0, len(ns))
+	for _, v := range ns {
+		byscore = append(byscore, v)
+	}
+	sort.Slice(byscore, func(i, j int) bool {
+		return byscore[i].Score >= byscore[j].Score
+	})
+	result := make(nodeSet, n)
+	for _, v := range byscore[:n] {
+		result[v.N.ID()] = v
+	}
+	return result
+}
+
+// verify performs integrity checks on the node set.
 func (ns nodeSet) verify() error {
 	for id, n := range ns {
 		if n.N.ID() != id {

+ 70 - 1
cmd/devp2p/nodesetcmd.go

@@ -17,8 +17,12 @@
 package main
 
 import (
+	"errors"
 	"fmt"
 	"net"
+	"sort"
+	"strconv"
+	"strings"
 	"time"
 
 	"github.com/ethereum/go-ethereum/core/forkid"
@@ -60,25 +64,64 @@ func nodesetInfo(ctx *cli.Context) error {
 
 	ns := loadNodesJSON(ctx.Args().First())
 	fmt.Printf("Set contains %d nodes.\n", len(ns))
+	showAttributeCounts(ns)
 	return nil
 }
 
+// showAttributeCounts prints the distribution of ENR attributes in a node set.
+func showAttributeCounts(ns nodeSet) {
+	attrcount := make(map[string]int)
+	var attrlist []interface{}
+	for _, n := range ns {
+		r := n.N.Record()
+		attrlist = r.AppendElements(attrlist[:0])[1:]
+		for i := 0; i < len(attrlist); i += 2 {
+			key := attrlist[i].(string)
+			attrcount[key]++
+		}
+	}
+
+	var keys []string
+	var maxlength int
+	for key := range attrcount {
+		keys = append(keys, key)
+		if len(key) > maxlength {
+			maxlength = len(key)
+		}
+	}
+	sort.Strings(keys)
+	fmt.Println("ENR attribute counts:")
+	for _, key := range keys {
+		fmt.Printf("%s%s: %d\n", strings.Repeat(" ", maxlength-len(key)+1), key, attrcount[key])
+	}
+}
+
 func nodesetFilter(ctx *cli.Context) error {
 	if ctx.NArg() < 1 {
 		return fmt.Errorf("need nodes file as argument")
 	}
-	ns := loadNodesJSON(ctx.Args().First())
+	// Parse -limit.
+	limit, err := parseFilterLimit(ctx.Args().Tail())
+	if err != nil {
+		return err
+	}
+	// Parse the filters.
 	filter, err := andFilter(ctx.Args().Tail())
 	if err != nil {
 		return err
 	}
 
+	// Load nodes and apply filters.
+	ns := loadNodesJSON(ctx.Args().First())
 	result := make(nodeSet)
 	for id, n := range ns {
 		if filter(n) {
 			result[id] = n
 		}
 	}
+	if limit >= 0 {
+		result = result.topN(limit)
+	}
 	writeNodesJSON("-", result)
 	return nil
 }
@@ -91,6 +134,7 @@ type nodeFilterC struct {
 }
 
 var filterFlags = map[string]nodeFilterC{
+	"-limit":       {1, trueFilter}, // needed to skip over -limit
 	"-ip":          {1, ipFilter},
 	"-min-age":     {1, minAgeFilter},
 	"-eth-network": {1, ethFilter},
@@ -98,6 +142,7 @@ var filterFlags = map[string]nodeFilterC{
 	"-snap":        {0, snapFilter},
 }
 
+// parseFilters parses nodeFilters from args.
 func parseFilters(args []string) ([]nodeFilter, error) {
 	var filters []nodeFilter
 	for len(args) > 0 {
@@ -118,6 +163,26 @@ func parseFilters(args []string) ([]nodeFilter, error) {
 	return filters, nil
 }
 
+// parseFilterLimit parses the -limit option in args. It returns -1 if there is no limit.
+func parseFilterLimit(args []string) (int, error) {
+	limit := -1
+	for i, arg := range args {
+		if arg == "-limit" {
+			if i == len(args)-1 {
+				return -1, errors.New("-limit requires an argument")
+			}
+			n, err := strconv.Atoi(args[i+1])
+			if err != nil {
+				return -1, fmt.Errorf("invalid -limit %q", args[i+1])
+			}
+			limit = n
+		}
+	}
+	return limit, nil
+}
+
+// andFilter parses node filters in args and and returns a single filter that requires all
+// of them to match.
 func andFilter(args []string) (nodeFilter, error) {
 	checks, err := parseFilters(args)
 	if err != nil {
@@ -134,6 +199,10 @@ func andFilter(args []string) (nodeFilter, error) {
 	return f, nil
 }
 
+func trueFilter(args []string) (nodeFilter, error) {
+	return func(n nodeJSON) bool { return true }, nil
+}
+
 func ipFilter(args []string) (nodeFilter, error) {
 	_, cidr, err := net.ParseCIDR(args[0])
 	if err != nil {

+ 3 - 3
cmd/evm/README.md

@@ -256,9 +256,9 @@ Error code: 4
 Another thing that can be done, is to chain invocations:
 ```
 ./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.alloc=stdout | ./evm t8n --input.alloc=stdin --input.env=./testdata/1/env.json --input.txs=./testdata/1/txs.json
-INFO [01-21|22:41:22.963] rejected tx                              index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
-INFO [01-21|22:41:22.966] rejected tx                              index=0 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
-INFO [01-21|22:41:22.967] rejected tx                              index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
+INFO [01-21|22:41:22.963] rejected tx                              index=1 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
+INFO [01-21|22:41:22.966] rejected tx                              index=0 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
+INFO [01-21|22:41:22.967] rejected tx                              index=1 hash=0557ba..18d673 from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
 
 ```
 What happened here, is that we first applied two identical transactions, so the second one was rejected. 

+ 3 - 3
cmd/evm/testdata/8/readme.md

@@ -56,8 +56,8 @@ dir=./testdata/8 \
 If we try to execute it on older rules: 
 ```
 dir=./testdata/8 && ./evm t8n --state.fork=Istanbul --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json 
-INFO [01-21|23:21:51.265] rejected tx                              index=0 hash="d2818d…6ab3da" error="tx type not supported"
-INFO [01-21|23:21:51.265] rejected tx                              index=1 hash="26ea00…81c01b" from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="nonce too high: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B, tx: 1 state: 0"
-INFO [01-21|23:21:51.265] rejected tx                              index=2 hash="698d01…369cee" error="tx type not supported"
+INFO [01-21|23:21:51.265] rejected tx                              index=0 hash=d2818d..6ab3da error="tx type not supported"
+INFO [01-21|23:21:51.265] rejected tx                              index=1 hash=26ea00..81c01b from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="nonce too high: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B, tx: 1 state: 0"
+INFO [01-21|23:21:51.265] rejected tx                              index=2 hash=698d01..369cee error="tx type not supported"
 ```
 Number `1` and `3` are not applicable, and therefore number `2` has wrong nonce. 

+ 23 - 7
cmd/faucet/faucet.go

@@ -90,6 +90,9 @@ var (
 	fixGasPrice        = flag.Int64("faucet.fixedprice", 0, "Will use fixed gas price if specified")
 	twitterTokenFlag   = flag.String("twitter.token", "", "Bearer token to authenticate with the v2 Twitter API")
 	twitterTokenV1Flag = flag.String("twitter.token.v1", "", "Bearer token to authenticate with the v1.1 Twitter API")
+
+	goerliFlag  = flag.Bool("goerli", false, "Initializes the faucet with Görli network config")
+	rinkebyFlag = flag.Bool("rinkeby", false, "Initializes the faucet with Rinkeby network config")
 )
 
 var (
@@ -167,13 +170,9 @@ func main() {
 		log.Crit("Failed to render the faucet template", "err", err)
 	}
 	// Load and parse the genesis block requested by the user
-	blob, err := ioutil.ReadFile(*genesisFlag)
+	genesis, err := getGenesis(genesisFlag, *goerliFlag, *rinkebyFlag)
 	if err != nil {
-		log.Crit("Failed to read genesis block contents", "genesis", *genesisFlag, "err", err)
-	}
-	genesis := new(core.Genesis)
-	if err = json.Unmarshal(blob, genesis); err != nil {
-		log.Crit("Failed to parse genesis block json", "err", err)
+		log.Crit("Failed to parse genesis config", "err", err)
 	}
 	// Convert the bootnodes to internal enode representations
 	var enodes []*enode.Node
@@ -185,7 +184,8 @@ func main() {
 		}
 	}
 	// Load up the account key and decrypt its password
-	if blob, err = ioutil.ReadFile(*accPassFlag); err != nil {
+	blob, err := ioutil.ReadFile(*accPassFlag)
+	if err != nil {
 		log.Crit("Failed to read account password contents", "file", *accPassFlag, "err", err)
 	}
 	pass := strings.TrimSuffix(string(blob), "\n")
@@ -945,3 +945,19 @@ func authNoAuth(url string) (string, string, common.Address, error) {
 	}
 	return address.Hex() + "@noauth", "", address, nil
 }
+
+// getGenesis returns a genesis based on input args
+func getGenesis(genesisFlag *string, goerliFlag bool, rinkebyFlag bool) (*core.Genesis, error) {
+	switch {
+	case genesisFlag != nil:
+		var genesis core.Genesis
+		err := common.LoadJSON(*genesisFlag, &genesis)
+		return &genesis, err
+	case goerliFlag:
+		return core.DefaultGoerliGenesisBlock(), nil
+	case rinkebyFlag:
+		return core.DefaultRinkebyGenesisBlock(), nil
+	default:
+		return nil, fmt.Errorf("no genesis flag provided")
+	}
+}

+ 12 - 1
cmd/geth/config.go

@@ -28,6 +28,7 @@ import (
 	"gopkg.in/urfave/cli.v1"
 
 	"github.com/ethereum/go-ethereum/cmd/utils"
+	"github.com/ethereum/go-ethereum/eth/catalyst"
 	"github.com/ethereum/go-ethereum/eth/ethconfig"
 	"github.com/ethereum/go-ethereum/internal/ethapi"
 	"github.com/ethereum/go-ethereum/metrics"
@@ -143,7 +144,17 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
 	if ctx.GlobalIsSet(utils.OverrideBerlinFlag.Name) {
 		cfg.Eth.OverrideBerlin = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideBerlinFlag.Name))
 	}
-	backend := utils.RegisterEthService(stack, &cfg.Eth)
+	backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
+
+	// Configure catalyst.
+	if ctx.GlobalBool(utils.CatalystFlag.Name) {
+		if eth == nil {
+			utils.Fatalf("Catalyst does not work in light client mode.")
+		}
+		if err := catalyst.Register(stack, eth); err != nil {
+			utils.Fatalf("%v", err)
+		}
+	}
 
 	// Configure GraphQL if requested
 	if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) {

+ 58 - 0
cmd/geth/dbcmd.go

@@ -20,6 +20,7 @@ import (
 	"fmt"
 	"os"
 	"path/filepath"
+	"sort"
 	"strconv"
 	"time"
 
@@ -60,6 +61,7 @@ Remove blockchain and state databases`,
 			dbDeleteCmd,
 			dbPutCmd,
 			dbGetSlotsCmd,
+			dbDumpFreezerIndex,
 		},
 	}
 	dbInspectCmd = cli.Command{
@@ -177,6 +179,22 @@ WARNING: This is a low-level operation which may cause database corruption!`,
 		},
 		Description: "This command looks up the specified database key from the database.",
 	}
+	dbDumpFreezerIndex = cli.Command{
+		Action:    utils.MigrateFlags(freezerInspect),
+		Name:      "freezer-index",
+		Usage:     "Dump out the index of a given freezer type",
+		ArgsUsage: "<type> <start (int)> <end (int)>",
+		Flags: []cli.Flag{
+			utils.DataDirFlag,
+			utils.SyncModeFlag,
+			utils.MainnetFlag,
+			utils.RopstenFlag,
+			utils.RinkebyFlag,
+			utils.GoerliFlag,
+			utils.YoloV3Flag,
+		},
+		Description: "This command displays information about the freezer index.",
+	}
 )
 
 func removeDB(ctx *cli.Context) error {
@@ -449,3 +467,43 @@ func dbDumpTrie(ctx *cli.Context) error {
 	}
 	return it.Err
 }
+
+func freezerInspect(ctx *cli.Context) error {
+	var (
+		start, end    int64
+		disableSnappy bool
+		err           error
+	)
+	if ctx.NArg() < 3 {
+		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
+	}
+	kind := ctx.Args().Get(0)
+	if noSnap, ok := rawdb.FreezerNoSnappy[kind]; !ok {
+		var options []string
+		for opt := range rawdb.FreezerNoSnappy {
+			options = append(options, opt)
+		}
+		sort.Strings(options)
+		return fmt.Errorf("Could read freezer-type '%v'. Available options: %v", kind, options)
+	} else {
+		disableSnappy = noSnap
+	}
+	if start, err = strconv.ParseInt(ctx.Args().Get(1), 10, 64); err != nil {
+		log.Info("Could read start-param", "error", err)
+		return err
+	}
+	if end, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
+		log.Info("Could read count param", "error", err)
+		return err
+	}
+	stack, _ := makeConfigNode(ctx)
+	defer stack.Close()
+	path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
+	log.Info("Opening freezer", "location", path, "name", kind)
+	if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy); err != nil {
+		return err
+	} else {
+		f.DumpIndex(start, end)
+	}
+	return nil
+}

+ 1 - 0
cmd/geth/main.go

@@ -154,6 +154,7 @@ var (
 		utils.EVMInterpreterFlag,
 		utils.MinerNotifyFullFlag,
 		configFileFlag,
+		utils.CatalystFlag,
 	}
 
 	rpcFlags = []cli.Flag{

+ 19 - 19
cmd/geth/snapshot.go

@@ -155,7 +155,7 @@ func pruneState(ctx *cli.Context) error {
 	chaindb := utils.MakeChainDatabase(ctx, stack, false)
 	pruner, err := pruner.NewPruner(chaindb, stack.ResolvePath(""), stack.ResolvePath(config.Eth.TrieCleanCacheJournal), ctx.GlobalUint64(utils.BloomFilterSizeFlag.Name))
 	if err != nil {
-		log.Error("Failed to open snapshot tree", "error", err)
+		log.Error("Failed to open snapshot tree", "err", err)
 		return err
 	}
 	if ctx.NArg() > 1 {
@@ -166,12 +166,12 @@ func pruneState(ctx *cli.Context) error {
 	if ctx.NArg() == 1 {
 		targetRoot, err = parseRoot(ctx.Args()[0])
 		if err != nil {
-			log.Error("Failed to resolve state root", "error", err)
+			log.Error("Failed to resolve state root", "err", err)
 			return err
 		}
 	}
 	if err = pruner.Prune(targetRoot); err != nil {
-		log.Error("Failed to prune state", "error", err)
+		log.Error("Failed to prune state", "err", err)
 		return err
 	}
 	return nil
@@ -189,7 +189,7 @@ func verifyState(ctx *cli.Context) error {
 	}
 	snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false)
 	if err != nil {
-		log.Error("Failed to open snapshot tree", "error", err)
+		log.Error("Failed to open snapshot tree", "err", err)
 		return err
 	}
 	if ctx.NArg() > 1 {
@@ -200,15 +200,15 @@ func verifyState(ctx *cli.Context) error {
 	if ctx.NArg() == 1 {
 		root, err = parseRoot(ctx.Args()[0])
 		if err != nil {
-			log.Error("Failed to resolve state root", "error", err)
+			log.Error("Failed to resolve state root", "err", err)
 			return err
 		}
 	}
 	if err := snaptree.Verify(root); err != nil {
-		log.Error("Failed to verfiy state", "error", err)
+		log.Error("Failed to verfiy state", "root", root, "err", err)
 		return err
 	}
-	log.Info("Verified the state")
+	log.Info("Verified the state", "root", root)
 	return nil
 }
 
@@ -236,7 +236,7 @@ func traverseState(ctx *cli.Context) error {
 	if ctx.NArg() == 1 {
 		root, err = parseRoot(ctx.Args()[0])
 		if err != nil {
-			log.Error("Failed to resolve state root", "error", err)
+			log.Error("Failed to resolve state root", "err", err)
 			return err
 		}
 		log.Info("Start traversing the state", "root", root)
@@ -247,7 +247,7 @@ func traverseState(ctx *cli.Context) error {
 	triedb := trie.NewDatabase(chaindb)
 	t, err := trie.NewSecure(root, triedb)
 	if err != nil {
-		log.Error("Failed to open trie", "root", root, "error", err)
+		log.Error("Failed to open trie", "root", root, "err", err)
 		return err
 	}
 	var (
@@ -262,13 +262,13 @@ func traverseState(ctx *cli.Context) error {
 		accounts += 1
 		var acc state.Account
 		if err := rlp.DecodeBytes(accIter.Value, &acc); err != nil {
-			log.Error("Invalid account encountered during traversal", "error", err)
+			log.Error("Invalid account encountered during traversal", "err", err)
 			return err
 		}
 		if acc.Root != emptyRoot {
 			storageTrie, err := trie.NewSecure(acc.Root, triedb)
 			if err != nil {
-				log.Error("Failed to open storage trie", "root", acc.Root, "error", err)
+				log.Error("Failed to open storage trie", "root", acc.Root, "err", err)
 				return err
 			}
 			storageIter := trie.NewIterator(storageTrie.NodeIterator(nil))
@@ -276,7 +276,7 @@ func traverseState(ctx *cli.Context) error {
 				slots += 1
 			}
 			if storageIter.Err != nil {
-				log.Error("Failed to traverse storage trie", "root", acc.Root, "error", storageIter.Err)
+				log.Error("Failed to traverse storage trie", "root", acc.Root, "err", storageIter.Err)
 				return storageIter.Err
 			}
 		}
@@ -294,7 +294,7 @@ func traverseState(ctx *cli.Context) error {
 		}
 	}
 	if accIter.Err != nil {
-		log.Error("Failed to traverse state trie", "root", root, "error", accIter.Err)
+		log.Error("Failed to traverse state trie", "root", root, "err", accIter.Err)
 		return accIter.Err
 	}
 	log.Info("State is complete", "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start)))
@@ -326,7 +326,7 @@ func traverseRawState(ctx *cli.Context) error {
 	if ctx.NArg() == 1 {
 		root, err = parseRoot(ctx.Args()[0])
 		if err != nil {
-			log.Error("Failed to resolve state root", "error", err)
+			log.Error("Failed to resolve state root", "err", err)
 			return err
 		}
 		log.Info("Start traversing the state", "root", root)
@@ -337,7 +337,7 @@ func traverseRawState(ctx *cli.Context) error {
 	triedb := trie.NewDatabase(chaindb)
 	t, err := trie.NewSecure(root, triedb)
 	if err != nil {
-		log.Error("Failed to open trie", "root", root, "error", err)
+		log.Error("Failed to open trie", "root", root, "err", err)
 		return err
 	}
 	var (
@@ -368,13 +368,13 @@ func traverseRawState(ctx *cli.Context) error {
 			accounts += 1
 			var acc state.Account
 			if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil {
-				log.Error("Invalid account encountered during traversal", "error", err)
+				log.Error("Invalid account encountered during traversal", "err", err)
 				return errors.New("invalid account")
 			}
 			if acc.Root != emptyRoot {
 				storageTrie, err := trie.NewSecure(acc.Root, triedb)
 				if err != nil {
-					log.Error("Failed to open storage trie", "root", acc.Root, "error", err)
+					log.Error("Failed to open storage trie", "root", acc.Root, "err", err)
 					return errors.New("missing storage trie")
 				}
 				storageIter := storageTrie.NodeIterator(nil)
@@ -397,7 +397,7 @@ func traverseRawState(ctx *cli.Context) error {
 					}
 				}
 				if storageIter.Error() != nil {
-					log.Error("Failed to traverse storage trie", "root", acc.Root, "error", storageIter.Error())
+					log.Error("Failed to traverse storage trie", "root", acc.Root, "err", storageIter.Error())
 					return storageIter.Error()
 				}
 			}
@@ -416,7 +416,7 @@ func traverseRawState(ctx *cli.Context) error {
 		}
 	}
 	if accIter.Error() != nil {
-		log.Error("Failed to traverse state trie", "root", root, "error", accIter.Error())
+		log.Error("Failed to traverse state trie", "root", root, "err", accIter.Error())
 		return accIter.Error()
 	}
 	log.Info("State is complete", "nodes", nodes, "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start)))

+ 1 - 0
cmd/geth/usage.go

@@ -238,6 +238,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
 			utils.SnapshotFlag,
 			utils.BloomFilterSizeFlag,
 			cli.HelpFlag,
+			utils.CatalystFlag,
 		},
 	},
 }

+ 38 - 24
cmd/puppeth/ssh.go

@@ -30,6 +30,7 @@ import (
 
 	"github.com/ethereum/go-ethereum/log"
 	"golang.org/x/crypto/ssh"
+	"golang.org/x/crypto/ssh/agent"
 	"golang.org/x/crypto/ssh/terminal"
 )
 
@@ -43,6 +44,8 @@ type sshClient struct {
 	logger  log.Logger
 }
 
+const EnvSSHAuthSock = "SSH_AUTH_SOCK"
+
 // dial establishes an SSH connection to a remote node using the current user and
 // the user's configured private RSA key. If that fails, password authentication
 // is fallen back to. server can be a string like user:identity@server:port.
@@ -79,38 +82,49 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
 	if username == "" {
 		username = user.Username
 	}
-	// Configure the supported authentication methods (private key and password)
-	var auths []ssh.AuthMethod
 
-	path := filepath.Join(user.HomeDir, ".ssh", identity)
-	if buf, err := ioutil.ReadFile(path); err != nil {
-		log.Warn("No SSH key, falling back to passwords", "path", path, "err", err)
+	// Configure the supported authentication methods (ssh agent, private key and password)
+	var (
+		auths []ssh.AuthMethod
+		conn  net.Conn
+	)
+	if conn, err = net.Dial("unix", os.Getenv(EnvSSHAuthSock)); err != nil {
+		log.Warn("Unable to dial SSH agent, falling back to private keys", "err", err)
 	} else {
-		key, err := ssh.ParsePrivateKey(buf)
-		if err != nil {
-			fmt.Printf("What's the decryption password for %s? (won't be echoed)\n>", path)
-			blob, err := terminal.ReadPassword(int(os.Stdin.Fd()))
-			fmt.Println()
-			if err != nil {
-				log.Warn("Couldn't read password", "err", err)
-			}
-			key, err := ssh.ParsePrivateKeyWithPassphrase(buf, blob)
+		client := agent.NewClient(conn)
+		auths = append(auths, ssh.PublicKeysCallback(client.Signers))
+	}
+	if err != nil {
+		path := filepath.Join(user.HomeDir, ".ssh", identity)
+		if buf, err := ioutil.ReadFile(path); err != nil {
+			log.Warn("No SSH key, falling back to passwords", "path", path, "err", err)
+		} else {
+			key, err := ssh.ParsePrivateKey(buf)
 			if err != nil {
-				log.Warn("Failed to decrypt SSH key, falling back to passwords", "path", path, "err", err)
+				fmt.Printf("What's the decryption password for %s? (won't be echoed)\n>", path)
+				blob, err := terminal.ReadPassword(int(os.Stdin.Fd()))
+				fmt.Println()
+				if err != nil {
+					log.Warn("Couldn't read password", "err", err)
+				}
+				key, err := ssh.ParsePrivateKeyWithPassphrase(buf, blob)
+				if err != nil {
+					log.Warn("Failed to decrypt SSH key, falling back to passwords", "path", path, "err", err)
+				} else {
+					auths = append(auths, ssh.PublicKeys(key))
+				}
 			} else {
 				auths = append(auths, ssh.PublicKeys(key))
 			}
-		} else {
-			auths = append(auths, ssh.PublicKeys(key))
 		}
-	}
-	auths = append(auths, ssh.PasswordCallback(func() (string, error) {
-		fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", username, server)
-		blob, err := terminal.ReadPassword(int(os.Stdin.Fd()))
+		auths = append(auths, ssh.PasswordCallback(func() (string, error) {
+			fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", username, server)
+			blob, err := terminal.ReadPassword(int(os.Stdin.Fd()))
 
-		fmt.Println()
-		return string(blob), err
-	}))
+			fmt.Println()
+			return string(blob), err
+		}))
+	}
 	// Resolve the IP address of the remote server
 	addr, err := net.LookupHost(hostname)
 	if err != nil {

+ 15 - 11
cmd/utils/flags.go

@@ -794,6 +794,11 @@ var (
 		Usage: "the p2p port of the nodes in the network",
 		Value: 30311,
 	}
+
+	CatalystFlag = cli.BoolFlag{
+		Name:  "catalyst",
+		Usage: "Catalyst mode (eth2 integration testing)",
+	}
 )
 
 // MakeDataDir retrieves the currently requested data directory, terminating
@@ -1225,10 +1230,11 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
 		cfg.NetRestrict = list
 	}
 
-	if ctx.GlobalBool(DeveloperFlag.Name) {
+	if ctx.GlobalBool(DeveloperFlag.Name) || ctx.GlobalBool(CatalystFlag.Name) {
 		// --dev mode can't use p2p networking.
 		cfg.MaxPeers = 0
-		cfg.ListenAddr = ":0"
+		cfg.ListenAddr = ""
+		cfg.NoDial = true
 		cfg.NoDiscovery = true
 		cfg.DiscoveryV5 = false
 	}
@@ -1710,7 +1716,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
 		if ctx.GlobalIsSet(DataDirFlag.Name) {
 			// Check if we have an already initialized chain and fall back to
 			// that if so. Otherwise we need to generate a new genesis spec.
-			chaindb := MakeChainDatabase(ctx, stack, true)
+			chaindb := MakeChainDatabase(ctx, stack, false) // TODO (MariusVanDerWijden) make this read only
 			if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) {
 				cfg.Genesis = nil // fallback to db content
 			}
@@ -1738,23 +1744,21 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) {
 	}
 	if url := params.KnownDNSNetwork(genesis, protocol); url != "" {
 		cfg.EthDiscoveryURLs = []string{url}
-	}
-	if cfg.SyncMode == downloader.SnapSync {
-		if url := params.KnownDNSNetwork(genesis, "snap"); url != "" {
-			cfg.SnapDiscoveryURLs = []string{url}
-		}
+		cfg.SnapDiscoveryURLs = cfg.EthDiscoveryURLs
 	}
 }
 
 // RegisterEthService adds an Ethereum client to the stack.
-func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) ethapi.Backend {
+// The second return value is the full node instance, which may be nil if the
+// node is running as a light client.
+func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend, *eth.Ethereum) {
 	if cfg.SyncMode == downloader.LightSync {
 		backend, err := les.New(stack, cfg)
 		if err != nil {
 			Fatalf("Failed to register the Ethereum service: %v", err)
 		}
 		stack.RegisterAPIs(tracers.APIs(backend.ApiBackend))
-		return backend.ApiBackend
+		return backend.ApiBackend, nil
 	}
 	backend, err := eth.New(stack, cfg)
 	if err != nil {
@@ -1767,7 +1771,7 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) ethapi.Backend
 		}
 	}
 	stack.RegisterAPIs(tracers.APIs(backend.APIBackend))
-	return backend.APIBackend
+	return backend.APIBackend, backend
 }
 
 // RegisterEthStatsService configures the Ethereum Stats daemon and adds it to

+ 1 - 1
common/types.go

@@ -76,7 +76,7 @@ func (h Hash) Hex() string { return hexutil.Encode(h[:]) }
 // TerminalString implements log.TerminalStringer, formatting a string for console
 // output during logging.
 func (h Hash) TerminalString() string {
-	return fmt.Sprintf("%x%x", h[:3], h[29:])
+	return fmt.Sprintf("%x..%x", h[:3], h[29:])
 }
 
 // String implements the stringer interface and is used also by the logger when

+ 20 - 6
consensus/ethash/consensus.go

@@ -203,15 +203,23 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo
 
 	number, parent := block.NumberU64()-1, block.ParentHash()
 	for i := 0; i < 7; i++ {
-		ancestor := chain.GetBlock(parent, number)
-		if ancestor == nil {
+		ancestorHeader := chain.GetHeader(parent, number)
+		if ancestorHeader == nil {
 			break
 		}
-		ancestors[ancestor.Hash()] = ancestor.Header()
-		for _, uncle := range ancestor.Uncles() {
-			uncles.Add(uncle.Hash())
+		ancestors[parent] = ancestorHeader
+		// If the ancestor doesn't have any uncles, we don't have to iterate them
+		if ancestorHeader.UncleHash != types.EmptyUncleHash {
+			// Need to add those uncles to the blacklist too
+			ancestor := chain.GetBlock(parent, number)
+			if ancestor == nil {
+				break
+			}
+			for _, uncle := range ancestor.Uncles() {
+				uncles.Add(uncle.Hash())
+			}
 		}
-		parent, number = ancestor.ParentHash(), number-1
+		parent, number = ancestorHeader.ParentHash, number-1
 	}
 	ancestors[block.Hash()] = block.Header()
 	uncles.Add(block.Hash())
@@ -315,6 +323,8 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainHeaderReader, time uin
 func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
 	next := new(big.Int).Add(parent.Number, big1)
 	switch {
+	case config.IsCatalyst(next):
+		return big.NewInt(1)
 	case config.IsMuirGlacier(next):
 		return calcDifficultyEip2384(time, parent)
 	case config.IsConstantinople(next):
@@ -623,6 +633,10 @@ var (
 // reward. The total reward consists of the static block reward and rewards for
 // included uncles. The coinbase of each uncle block is also rewarded.
 func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) {
+	// Skip block reward in catalyst mode
+	if config.IsCatalyst(header.Number) {
+		return
+	}
 	// Select the correct block reward based on chain progression
 	blockReward := FrontierBlockReward
 	if config.IsByzantium(header.Number) {

+ 7 - 6
consensus/ethash/ethash.go

@@ -112,12 +112,13 @@ func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) {
 	if err != nil {
 		return nil, nil, err
 	}
-	// Yay, we managed to memory map the file, here be dragons
-	header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem))
-	header.Len /= 4
-	header.Cap /= 4
-
-	return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil
+	// The file is now memory-mapped. Create a []uint32 view of the file.
+	var view []uint32
+	header := (*reflect.SliceHeader)(unsafe.Pointer(&view))
+	header.Data = (*reflect.SliceHeader)(unsafe.Pointer(&mem)).Data
+	header.Cap = len(mem) / 4
+	header.Len = header.Cap
+	return mem, view, nil
 }
 
 // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write

+ 35 - 75
core/blockchain.go

@@ -208,9 +208,8 @@ type BlockChain struct {
 	processor  Processor // Block transaction processor interface
 	vmConfig   vm.Config
 
-	shouldPreserve     func(*types.Block) bool        // Function used to determine whether should preserve the given block.
-	terminateInsert    func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
-	writeLegacyJournal bool                           // Testing flag used to flush the snapshot journal in legacy format.
+	shouldPreserve  func(*types.Block) bool        // Function used to determine whether should preserve the given block.
+	terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
 }
 
 // NewBlockChain returns a fully initialised block chain using information
@@ -488,7 +487,7 @@ func (bc *BlockChain) SetHead(head uint64) error {
 
 // SetHeadBeyondRoot rewinds the local chain to a new head with the extra condition
 // that the rewind must pass the specified state root. This method is meant to be
-// used when rewiding with snapshots enabled to ensure that we go back further than
+// used when rewinding with snapshots enabled to ensure that we go back further than
 // persistent disk layer. Depending on whether the node was fast synced or full, and
 // in which state, the method will try to delete minimal data from disk whilst
 // retaining chain consistency.
@@ -635,7 +634,7 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
 	// Make sure that both the block as well at its state trie exists
 	block := bc.GetBlockByHash(hash)
 	if block == nil {
-		return fmt.Errorf("non existent block [%x]", hash[:4])
+		return fmt.Errorf("non existent block [%x..]", hash[:4])
 	}
 	if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil {
 		return err
@@ -646,7 +645,8 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
 	headBlockGauge.Update(int64(block.NumberU64()))
 	bc.chainmu.Unlock()
 
-	// Destroy any existing state snapshot and regenerate it in the background
+	// Destroy any existing state snapshot and regenerate it in the background,
+	// also resuming the normal maintenance of any previously paused snapshot.
 	if bc.snaps != nil {
 		bc.snaps.Rebuild(block.Root())
 	}
@@ -1007,14 +1007,8 @@ func (bc *BlockChain) Stop() {
 	var snapBase common.Hash
 	if bc.snaps != nil {
 		var err error
-		if bc.writeLegacyJournal {
-			if snapBase, err = bc.snaps.LegacyJournal(bc.CurrentBlock().Root()); err != nil {
-				log.Error("Failed to journal state snapshot", "err", err)
-			}
-		} else {
-			if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil {
-				log.Error("Failed to journal state snapshot", "err", err)
-			}
+		if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil {
+			log.Error("Failed to journal state snapshot", "err", err)
 		}
 	}
 	// Ensure the state of a recent block is also stored to disk before exiting.
@@ -1152,7 +1146,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
 			if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
 				log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
 					"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
-				return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
+				return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, blockChain[i-1].NumberU64(),
 					blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
 			}
 		}
@@ -1217,66 +1211,17 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
 			}
 			// Short circuit if the owner header is unknown
 			if !bc.HasHeader(block.Hash(), block.NumberU64()) {
-				return i, fmt.Errorf("containing header #%d [%x] unknown", block.Number(), block.Hash().Bytes()[:4])
+				return i, fmt.Errorf("containing header #%d [%x..] unknown", block.Number(), block.Hash().Bytes()[:4])
 			}
-			var (
-				start  = time.Now()
-				logged = time.Now()
-				count  int
-			)
-			// Migrate all ancient blocks. This can happen if someone upgrades from Geth
-			// 1.8.x to 1.9.x mid-fast-sync. Perhaps we can get rid of this path in the
-			// long term.
-			for {
-				// We can ignore the error here since light client won't hit this code path.
-				frozen, _ := bc.db.Ancients()
-				if frozen >= block.NumberU64() {
-					break
-				}
-				h := rawdb.ReadCanonicalHash(bc.db, frozen)
-				b := rawdb.ReadBlock(bc.db, h, frozen)
-				size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, frozen, bc.chainConfig), rawdb.ReadTd(bc.db, h, frozen))
-				count += 1
-
-				// Always keep genesis block in active database.
-				if b.NumberU64() != 0 {
-					deleted = append(deleted, &numberHash{b.NumberU64(), b.Hash()})
-				}
-				if time.Since(logged) > 8*time.Second {
-					log.Info("Migrating ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
-					logged = time.Now()
-				}
-				// Don't collect too much in-memory, write it out every 100K blocks
-				if len(deleted) > 100000 {
-					// Sync the ancient store explicitly to ensure all data has been flushed to disk.
-					if err := bc.db.Sync(); err != nil {
-						return 0, err
-					}
-					// Wipe out canonical block data.
-					for _, nh := range deleted {
-						rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number)
-						rawdb.DeleteCanonicalHash(batch, nh.number)
-					}
-					if err := batch.Write(); err != nil {
-						return 0, err
-					}
-					batch.Reset()
-					// Wipe out side chain too.
-					for _, nh := range deleted {
-						for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) {
-							rawdb.DeleteBlock(batch, hash, nh.number)
-						}
-					}
-					if err := batch.Write(); err != nil {
-						return 0, err
-					}
-					batch.Reset()
-					deleted = deleted[0:]
+			if block.NumberU64() == 1 {
+				// Make sure to write the genesis into the freezer
+				if frozen, _ := bc.db.Ancients(); frozen == 0 {
+					h := rawdb.ReadCanonicalHash(bc.db, 0)
+					b := rawdb.ReadBlock(bc.db, h, 0)
+					size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, 0, bc.chainConfig), rawdb.ReadTd(bc.db, h, 0))
+					log.Info("Wrote genesis to ancients")
 				}
 			}
-			if count > 0 {
-				log.Info("Migrated ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
-			}
 			// Flush data into ancient database.
 			size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64()))
 
@@ -1361,7 +1306,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
 			}
 			// Short circuit if the owner header is unknown
 			if !bc.HasHeader(block.Hash(), block.NumberU64()) {
-				return i, fmt.Errorf("containing header #%d [%x] unknown", block.Number(), block.Hash().Bytes()[:4])
+				return i, fmt.Errorf("containing header #%d [%x..] unknown", block.Number(), block.Hash().Bytes()[:4])
 			}
 			if !skipPresenceCheck {
 				// Ignore if the entire data is already known
@@ -1692,7 +1637,7 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
 			log.Error("Non contiguous block insert", "number", block.Number(), "hash", block.Hash(),
 				"parent", block.ParentHash(), "prevnumber", prev.Number(), "prevhash", prev.Hash())
 
-			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, prev.NumberU64(),
+			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, prev.NumberU64(),
 				prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
 		}
 	}
@@ -1706,6 +1651,22 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
 	return n, err
 }
 
+// InsertChainWithoutSealVerification works exactly the same
+// except for seal verification, seal verification is omitted
+func (bc *BlockChain) InsertChainWithoutSealVerification(block *types.Block) (int, error) {
+	bc.blockProcFeed.Send(true)
+	defer bc.blockProcFeed.Send(false)
+
+	// Pre-checks passed, start the full block imports
+	bc.wg.Add(1)
+	bc.chainmu.Lock()
+	n, err := bc.insertChain(types.Blocks([]*types.Block{block}), false)
+	bc.chainmu.Unlock()
+	bc.wg.Done()
+
+	return n, err
+}
+
 // insertChain is the internal implementation of InsertChain, which assumes that
 // 1) chains are contiguous, and 2) The chain mutex is held.
 //
@@ -2167,7 +2128,6 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
 					l := *log
 					if removed {
 						l.Removed = true
-					} else {
 					}
 					logs = append(logs, &l)
 				}

+ 7 - 313
core/blockchain_snapshot_test.go

@@ -39,7 +39,6 @@ import (
 
 // snapshotTestBasic wraps the common testing fields in the snapshot tests.
 type snapshotTestBasic struct {
-	legacy        bool   // Wether write the snapshot journal in legacy format
 	chainBlocks   int    // Number of blocks to generate for the canonical chain
 	snapshotBlock uint64 // Block number of the relevant snapshot disk layer
 	commitBlock   uint64 // Block number for which to commit the state to disk
@@ -104,19 +103,13 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
 			chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true, nil)
 		}
 		if basic.snapshotBlock > 0 && basic.snapshotBlock == point {
-			if basic.legacy {
-				// Here we commit the snapshot disk root to simulate
-				// committing the legacy snapshot.
-				rawdb.WriteSnapshotRoot(db, blocks[point-1].Root())
-			} else {
-				// Flushing the entire snap tree into the disk, the
-				// relavant (a) snapshot root and (b) snapshot generator
-				// will be persisted atomically.
-				chain.snaps.Cap(blocks[point-1].Root(), 0)
-				diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root()
-				if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) {
-					t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot)
-				}
+			// Flushing the entire snap tree into the disk, the
+			// relavant (a) snapshot root and (b) snapshot generator
+			// will be persisted atomically.
+			chain.snaps.Cap(blocks[point-1].Root(), 0)
+			diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root()
+			if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) {
+				t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot)
 			}
 		}
 	}
@@ -129,12 +122,6 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
 	basic.db = db
 	basic.gendb = gendb
 	basic.engine = engine
-
-	// Ugly hack, notify the chain to flush the journal in legacy format
-	// if it's requested.
-	if basic.legacy {
-		chain.writeLegacyJournal = true
-	}
 	return chain, blocks
 }
 
@@ -484,46 +471,6 @@ func TestRestartWithNewSnapshot(t *testing.T) {
 	// Expected snapshot disk  : G
 	test := &snapshotTest{
 		snapshotTestBasic{
-			legacy:             false,
-			chainBlocks:        8,
-			snapshotBlock:      0,
-			commitBlock:        0,
-			expCanonicalBlocks: 8,
-			expHeadHeader:      8,
-			expHeadFastBlock:   8,
-			expHeadBlock:       8,
-			expSnapshotBottom:  0, // Initial disk layer built from genesis
-		},
-	}
-	test.test(t)
-	test.teardown()
-}
-
-// Tests a Geth restart with valid but "legacy" snapshot. Before the shutdown,
-// all snapshot journal will be persisted correctly. In this case no snapshot
-// recovery is required.
-func TestRestartWithLegacySnapshot(t *testing.T) {
-	// Chain:
-	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
-	//
-	// Commit:   G
-	// Snapshot: G
-	//
-	// SetHead(0)
-	//
-	// ------------------------------
-	//
-	// Expected in leveldb:
-	//   G->C1->C2->C3->C4->C5->C6->C7->C8
-	//
-	// Expected head header    : C8
-	// Expected head fast block: C8
-	// Expected head block     : C8
-	// Expected snapshot disk  : G
-	t.Skip("Legacy format testing is not supported")
-	test := &snapshotTest{
-		snapshotTestBasic{
-			legacy:             true,
 			chainBlocks:        8,
 			snapshotBlock:      0,
 			commitBlock:        0,
@@ -563,7 +510,6 @@ func TestNoCommitCrashWithNewSnapshot(t *testing.T) {
 	// Expected snapshot disk  : C4
 	test := &crashSnapshotTest{
 		snapshotTestBasic{
-			legacy:             false,
 			chainBlocks:        8,
 			snapshotBlock:      4,
 			commitBlock:        0,
@@ -603,7 +549,6 @@ func TestLowCommitCrashWithNewSnapshot(t *testing.T) {
 	// Expected snapshot disk  : C4
 	test := &crashSnapshotTest{
 		snapshotTestBasic{
-			legacy:             false,
 			chainBlocks:        8,
 			snapshotBlock:      4,
 			commitBlock:        2,
@@ -643,7 +588,6 @@ func TestHighCommitCrashWithNewSnapshot(t *testing.T) {
 	// Expected snapshot disk  : C4
 	test := &crashSnapshotTest{
 		snapshotTestBasic{
-			legacy:             false,
 			chainBlocks:        8,
 			snapshotBlock:      4,
 			commitBlock:        6,
@@ -658,131 +602,6 @@ func TestHighCommitCrashWithNewSnapshot(t *testing.T) {
 	test.teardown()
 }
 
-// Tests a Geth was crashed and restarts with a broken and "legacy format"
-// snapshot. In this case the entire legacy snapshot should be discared
-// and rebuild from the new chain head. The new head here refers to the
-// genesis because there is no committed point.
-func TestNoCommitCrashWithLegacySnapshot(t *testing.T) {
-	// Chain:
-	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
-	//
-	// Commit:   G
-	// Snapshot: G, C4
-	//
-	// CRASH
-	//
-	// ------------------------------
-	//
-	// Expected in leveldb:
-	//   G->C1->C2->C3->C4->C5->C6->C7->C8
-	//
-	// Expected head header    : C8
-	// Expected head fast block: C8
-	// Expected head block     : G
-	// Expected snapshot disk  : G
-	t.Skip("Legacy format testing is not supported")
-	test := &crashSnapshotTest{
-		snapshotTestBasic{
-			legacy:             true,
-			chainBlocks:        8,
-			snapshotBlock:      4,
-			commitBlock:        0,
-			expCanonicalBlocks: 8,
-			expHeadHeader:      8,
-			expHeadFastBlock:   8,
-			expHeadBlock:       0,
-			expSnapshotBottom:  0, // Rebuilt snapshot from the latest HEAD(genesis)
-		},
-	}
-	test.test(t)
-	test.teardown()
-}
-
-// Tests a Geth was crashed and restarts with a broken and "legacy format"
-// snapshot. In this case the entire legacy snapshot should be discared
-// and rebuild from the new chain head. The new head here refers to the
-// block-2 because it's committed into the disk.
-func TestLowCommitCrashWithLegacySnapshot(t *testing.T) {
-	// Chain:
-	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
-	//
-	// Commit:   G, C2
-	// Snapshot: G, C4
-	//
-	// CRASH
-	//
-	// ------------------------------
-	//
-	// Expected in leveldb:
-	//   G->C1->C2->C3->C4->C5->C6->C7->C8
-	//
-	// Expected head header    : C8
-	// Expected head fast block: C8
-	// Expected head block     : C2
-	// Expected snapshot disk  : C2
-	t.Skip("Legacy format testing is not supported")
-	test := &crashSnapshotTest{
-		snapshotTestBasic{
-			legacy:             true,
-			chainBlocks:        8,
-			snapshotBlock:      4,
-			commitBlock:        2,
-			expCanonicalBlocks: 8,
-			expHeadHeader:      8,
-			expHeadFastBlock:   8,
-			expHeadBlock:       2,
-			expSnapshotBottom:  2, // Rebuilt snapshot from the latest HEAD
-		},
-	}
-	test.test(t)
-	test.teardown()
-}
-
-// Tests a Geth was crashed and restarts with a broken and "legacy format"
-// snapshot. In this case the entire legacy snapshot should be discared
-// and rebuild from the new chain head.
-//
-// The new head here refers to the the genesis, the reason is:
-//   - the state of block-6 is committed into the disk
-//   - the legacy disk layer of block-4 is committed into the disk
-//   - the head is rewound the genesis in order to find an available
-//     state lower than disk layer
-func TestHighCommitCrashWithLegacySnapshot(t *testing.T) {
-	// Chain:
-	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
-	//
-	// Commit:   G, C6
-	// Snapshot: G, C4
-	//
-	// CRASH
-	//
-	// ------------------------------
-	//
-	// Expected in leveldb:
-	//   G->C1->C2->C3->C4->C5->C6->C7->C8
-	//
-	// Expected head header    : C8
-	// Expected head fast block: C8
-	// Expected head block     : G
-	// Expected snapshot disk  : G
-	t.Skip("Legacy format testing is not supported")
-	test := &crashSnapshotTest{
-		snapshotTestBasic{
-			legacy:             true,
-			chainBlocks:        8,
-			snapshotBlock:      4,
-			commitBlock:        6,
-			expCanonicalBlocks: 8,
-			expHeadHeader:      8,
-			expHeadFastBlock:   8,
-			expHeadBlock:       0,
-			expSnapshotBottom:  0, // Rebuilt snapshot from the latest HEAD(genesis)
-		},
-	}
-	test.test(t)
-	test.teardown()
-}
-
 // Tests a Geth was running with snapshot enabled. Then restarts without
 // enabling snapshot and after that re-enable the snapshot again. In this
 // case the snapshot should be rebuilt with latest chain head.
@@ -806,47 +625,6 @@ func TestGappedNewSnapshot(t *testing.T) {
 	// Expected snapshot disk  : C10
 	test := &gappedSnapshotTest{
 		snapshotTestBasic: snapshotTestBasic{
-			legacy:             false,
-			chainBlocks:        8,
-			snapshotBlock:      0,
-			commitBlock:        0,
-			expCanonicalBlocks: 10,
-			expHeadHeader:      10,
-			expHeadFastBlock:   10,
-			expHeadBlock:       10,
-			expSnapshotBottom:  10, // Rebuilt snapshot from the latest HEAD
-		},
-		gapped: 2,
-	}
-	test.test(t)
-	test.teardown()
-}
-
-// Tests a Geth was running with leagcy snapshot enabled. Then restarts
-// without enabling snapshot and after that re-enable the snapshot again.
-// In this case the snapshot should be rebuilt with latest chain head.
-func TestGappedLegacySnapshot(t *testing.T) {
-	// Chain:
-	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
-	//
-	// Commit:   G
-	// Snapshot: G
-	//
-	// SetHead(0)
-	//
-	// ------------------------------
-	//
-	// Expected in leveldb:
-	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
-	//
-	// Expected head header    : C10
-	// Expected head fast block: C10
-	// Expected head block     : C10
-	// Expected snapshot disk  : C10
-	t.Skip("Legacy format testing is not supported")
-	test := &gappedSnapshotTest{
-		snapshotTestBasic: snapshotTestBasic{
-			legacy:             true,
 			chainBlocks:        8,
 			snapshotBlock:      0,
 			commitBlock:        0,
@@ -885,7 +663,6 @@ func TestSetHeadWithNewSnapshot(t *testing.T) {
 	// Expected snapshot disk  : G
 	test := &setHeadSnapshotTest{
 		snapshotTestBasic: snapshotTestBasic{
-			legacy:             false,
 			chainBlocks:        8,
 			snapshotBlock:      0,
 			commitBlock:        0,
@@ -901,88 +678,6 @@ func TestSetHeadWithNewSnapshot(t *testing.T) {
 	test.teardown()
 }
 
-// Tests the Geth was running with snapshot(legacy-format) enabled and resetHead
-// is applied. In this case the head is rewound to the target(with state available).
-// After that the chain is restarted and the original disk layer is kept.
-func TestSetHeadWithLegacySnapshot(t *testing.T) {
-	// Chain:
-	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
-	//
-	// Commit:   G
-	// Snapshot: G
-	//
-	// SetHead(4)
-	//
-	// ------------------------------
-	//
-	// Expected in leveldb:
-	//   G->C1->C2->C3->C4
-	//
-	// Expected head header    : C4
-	// Expected head fast block: C4
-	// Expected head block     : C4
-	// Expected snapshot disk  : G
-	t.Skip("Legacy format testing is not supported")
-	test := &setHeadSnapshotTest{
-		snapshotTestBasic: snapshotTestBasic{
-			legacy:             true,
-			chainBlocks:        8,
-			snapshotBlock:      0,
-			commitBlock:        0,
-			expCanonicalBlocks: 4,
-			expHeadHeader:      4,
-			expHeadFastBlock:   4,
-			expHeadBlock:       4,
-			expSnapshotBottom:  0, // The initial disk layer is built from the genesis
-		},
-		setHead: 4,
-	}
-	test.test(t)
-	test.teardown()
-}
-
-// Tests the Geth was running with snapshot(legacy-format) enabled and upgrades
-// the disk layer journal(journal generator) to latest format. After that the Geth
-// is restarted from a crash. In this case Geth will find the new-format disk layer
-// journal but with legacy-format diff journal(the new-format is never committed),
-// and the invalid diff journal is expected to be dropped.
-func TestRecoverSnapshotFromCrashWithLegacyDiffJournal(t *testing.T) {
-	// Chain:
-	//   G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
-	//
-	// Commit:   G
-	// Snapshot: G
-	//
-	// SetHead(0)
-	//
-	// ------------------------------
-	//
-	// Expected in leveldb:
-	//   G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
-	//
-	// Expected head header    : C10
-	// Expected head fast block: C10
-	// Expected head block     : C8
-	// Expected snapshot disk  : C10
-	t.Skip("Legacy format testing is not supported")
-	test := &restartCrashSnapshotTest{
-		snapshotTestBasic: snapshotTestBasic{
-			legacy:             true,
-			chainBlocks:        8,
-			snapshotBlock:      0,
-			commitBlock:        0,
-			expCanonicalBlocks: 10,
-			expHeadHeader:      10,
-			expHeadFastBlock:   10,
-			expHeadBlock:       8,  // The persisted state in the first running
-			expSnapshotBottom:  10, // The persisted disk layer in the second running
-		},
-		newBlocks: 2,
-	}
-	test.test(t)
-	test.teardown()
-}
-
 // Tests the Geth was running with a complete snapshot and then imports a few
 // more new blocks on top without enabling the snapshot. After the restart,
 // crash happens. Check everything is ok after the restart.
@@ -1006,7 +701,6 @@ func TestRecoverSnapshotFromWipingCrash(t *testing.T) {
 	// Expected snapshot disk  : C10
 	test := &wipeCrashSnapshotTest{
 		snapshotTestBasic: snapshotTestBasic{
-			legacy:             false,
 			chainBlocks:        8,
 			snapshotBlock:      4,
 			commitBlock:        0,

+ 2 - 2
core/blockchain_test.go

@@ -1462,13 +1462,13 @@ func TestBlockchainHeaderchainReorgConsistency(t *testing.T) {
 			t.Fatalf("block %d: failed to insert into chain: %v", i, err)
 		}
 		if chain.CurrentBlock().Hash() != chain.CurrentHeader().Hash() {
-			t.Errorf("block %d: current block/header mismatch: block #%d [%x…], header #%d [%x…]", i, chain.CurrentBlock().Number(), chain.CurrentBlock().Hash().Bytes()[:4], chain.CurrentHeader().Number, chain.CurrentHeader().Hash().Bytes()[:4])
+			t.Errorf("block %d: current block/header mismatch: block #%d [%x..], header #%d [%x..]", i, chain.CurrentBlock().Number(), chain.CurrentBlock().Hash().Bytes()[:4], chain.CurrentHeader().Number, chain.CurrentHeader().Hash().Bytes()[:4])
 		}
 		if _, err := chain.InsertChain(forks[i : i+1]); err != nil {
 			t.Fatalf(" fork %d: failed to insert into chain: %v", i, err)
 		}
 		if chain.CurrentBlock().Hash() != chain.CurrentHeader().Hash() {
-			t.Errorf(" fork %d: current block/header mismatch: block #%d [%x…], header #%d [%x…]", i, chain.CurrentBlock().Number(), chain.CurrentBlock().Hash().Bytes()[:4], chain.CurrentHeader().Number, chain.CurrentHeader().Hash().Bytes()[:4])
+			t.Errorf(" fork %d: current block/header mismatch: block #%d [%x..], header #%d [%x..]", i, chain.CurrentBlock().Number(), chain.CurrentBlock().Hash().Bytes()[:4], chain.CurrentHeader().Number, chain.CurrentHeader().Hash().Bytes()[:4])
 		}
 	}
 }

+ 1 - 1
core/chain_indexer.go

@@ -401,7 +401,7 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
 		}
 		header := rawdb.ReadHeader(c.chainDb, hash, number)
 		if header == nil {
-			return common.Hash{}, fmt.Errorf("block #%d [%x] not found", number, hash[:4])
+			return common.Hash{}, fmt.Errorf("block #%d [%x..] not found", number, hash[:4])
 		} else if header.ParentHash != lastHead {
 			return common.Hash{}, fmt.Errorf("chain reorged during section processing")
 		}

+ 35 - 0
core/genesis_test.go

@@ -162,3 +162,38 @@ func TestSetupGenesis(t *testing.T) {
 		}
 	}
 }
+
+// TestGenesisHashes checks the congruity of default genesis data to corresponding hardcoded genesis hash values.
+func TestGenesisHashes(t *testing.T) {
+	cases := []struct {
+		genesis *Genesis
+		hash    common.Hash
+	}{
+		{
+			genesis: DefaultGenesisBlock(),
+			hash:    params.MainnetGenesisHash,
+		},
+		{
+			genesis: DefaultGoerliGenesisBlock(),
+			hash:    params.GoerliGenesisHash,
+		},
+		{
+			genesis: DefaultRopstenGenesisBlock(),
+			hash:    params.RopstenGenesisHash,
+		},
+		{
+			genesis: DefaultRinkebyGenesisBlock(),
+			hash:    params.RinkebyGenesisHash,
+		},
+		{
+			genesis: DefaultYoloV3GenesisBlock(),
+			hash:    params.YoloV3GenesisHash,
+		},
+	}
+	for i, c := range cases {
+		b := c.genesis.MustCommit(rawdb.NewMemoryDatabase())
+		if got := b.Hash(); got != c.hash {
+			t.Errorf("case: %d, want: %s, got: %s", i, c.hash.Hex(), got.Hex())
+		}
+	}
+}

+ 1 - 1
core/headerchain.go

@@ -306,7 +306,7 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int)
 			log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", hash,
 				"parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", parentHash)
 
-			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].Number,
+			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, chain[i-1].Number,
 				parentHash.Bytes()[:4], i, chain[i].Number, hash.Bytes()[:4], chain[i].ParentHash[:4])
 		}
 		// If the header is a banned one, straight out abort

+ 1 - 1
core/rawdb/accessors_chain.go

@@ -839,7 +839,7 @@ func ReadHeadHeader(db ethdb.Reader) *types.Header {
 	return ReadHeader(db, headHeaderHash, *headHeaderNumber)
 }
 
-// ReadHeadHeader returns the current canonical head block.
+// ReadHeadBlock returns the current canonical head block.
 func ReadHeadBlock(db ethdb.Reader) *types.Block {
 	headBlockHash := ReadHeadBlockHash(db)
 	if headBlockHash == (common.Hash{}) {

+ 20 - 0
core/rawdb/accessors_snapshot.go

@@ -24,6 +24,26 @@ import (
 	"github.com/ethereum/go-ethereum/log"
 )
 
+// ReadSnapshotDisabled retrieves if the snapshot maintenance is disabled.
+func ReadSnapshotDisabled(db ethdb.KeyValueReader) bool {
+	disabled, _ := db.Has(snapshotDisabledKey)
+	return disabled
+}
+
+// WriteSnapshotDisabled stores the snapshot pause flag.
+func WriteSnapshotDisabled(db ethdb.KeyValueWriter) {
+	if err := db.Put(snapshotDisabledKey, []byte("42")); err != nil {
+		log.Crit("Failed to store snapshot disabled flag", "err", err)
+	}
+}
+
+// DeleteSnapshotDisabled deletes the flag keeping the snapshot maintenance disabled.
+func DeleteSnapshotDisabled(db ethdb.KeyValueWriter) {
+	if err := db.Delete(snapshotDisabledKey); err != nil {
+		log.Crit("Failed to remove snapshot disabled flag", "err", err)
+	}
+}
+
 // ReadSnapshotRoot retrieves the root of the block whose state is contained in
 // the persisted snapshot.
 func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash {

+ 3 - 3
core/rawdb/database.go

@@ -375,9 +375,9 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
 			var accounted bool
 			for _, meta := range [][]byte{
 				databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey,
-				fastTrieProgressKey, snapshotRootKey, snapshotJournalKey, snapshotGeneratorKey,
-				snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, uncleanShutdownKey,
-				badBlockKey,
+				fastTrieProgressKey, snapshotDisabledKey, snapshotRootKey, snapshotJournalKey,
+				snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
+				uncleanShutdownKey, badBlockKey,
 			} {
 				if bytes.Equal(key, meta) {
 					metadata.Add(size)

+ 17 - 0
core/rawdb/database_test.go

@@ -0,0 +1,17 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package rawdb

+ 1 - 1
core/rawdb/freezer.go

@@ -118,7 +118,7 @@ func newFreezer(datadir string, namespace string, readonly bool) (*freezer, erro
 		trigger:      make(chan chan struct{}),
 		quit:         make(chan struct{}),
 	}
-	for name, disableSnappy := range freezerNoSnappy {
+	for name, disableSnappy := range FreezerNoSnappy {
 		table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, disableSnappy)
 		if err != nil {
 			for _, table := range freezer.tables {

+ 67 - 44
core/rawdb/freezer_table.go

@@ -465,35 +465,59 @@ func (t *freezerTable) releaseFilesAfter(num uint32, remove bool) {
 // Note, this method will *not* flush any data to disk so be sure to explicitly
 // fsync before irreversibly deleting data from the database.
 func (t *freezerTable) Append(item uint64, blob []byte) error {
+	// Encode the blob before the lock portion
+	if !t.noCompression {
+		blob = snappy.Encode(nil, blob)
+	}
 	// Read lock prevents competition with truncate
-	t.lock.RLock()
+	retry, err := t.append(item, blob, false)
+	if err != nil {
+		return err
+	}
+	if retry {
+		// Read lock was insufficient, retry with a writelock
+		_, err = t.append(item, blob, true)
+	}
+	return err
+}
+
+// append injects a binary blob at the end of the freezer table.
+// Normally, inserts do not require holding the write-lock, so it should be invoked with 'wlock' set to
+// false.
+// However, if the data will grown the current file out of bounds, then this
+// method will return 'true, nil', indicating that the caller should retry, this time
+// with 'wlock' set to true.
+func (t *freezerTable) append(item uint64, encodedBlob []byte, wlock bool) (bool, error) {
+	if wlock {
+		t.lock.Lock()
+		defer t.lock.Unlock()
+	} else {
+		t.lock.RLock()
+		defer t.lock.RUnlock()
+	}
 	// Ensure the table is still accessible
 	if t.index == nil || t.head == nil {
-		t.lock.RUnlock()
-		return errClosed
+		return false, errClosed
 	}
 	// Ensure only the next item can be written, nothing else
 	if atomic.LoadUint64(&t.items) != item {
-		t.lock.RUnlock()
-		return fmt.Errorf("appending unexpected item: want %d, have %d", t.items, item)
-	}
-	// Encode the blob and write it into the data file
-	if !t.noCompression {
-		blob = snappy.Encode(nil, blob)
+		return false, fmt.Errorf("appending unexpected item: want %d, have %d", t.items, item)
 	}
-	bLen := uint32(len(blob))
+	bLen := uint32(len(encodedBlob))
 	if t.headBytes+bLen < bLen ||
 		t.headBytes+bLen > t.maxFileSize {
-		// we need a new file, writing would overflow
-		t.lock.RUnlock()
-		t.lock.Lock()
+		// Writing would overflow, so we need to open a new data file.
+		// If we don't already hold the writelock, abort and let the caller
+		// invoke this method a second time.
+		if !wlock {
+			return true, nil
+		}
 		nextID := atomic.LoadUint32(&t.headId) + 1
 		// We open the next file in truncated mode -- if this file already
 		// exists, we need to start over from scratch on it
 		newHead, err := t.openFile(nextID, openFreezerFileTruncated)
 		if err != nil {
-			t.lock.Unlock()
-			return err
+			return false, err
 		}
 		// Close old file, and reopen in RDONLY mode
 		t.releaseFile(t.headId)
@@ -503,13 +527,9 @@ func (t *freezerTable) Append(item uint64, blob []byte) error {
 		t.head = newHead
 		atomic.StoreUint32(&t.headBytes, 0)
 		atomic.StoreUint32(&t.headId, nextID)
-		t.lock.Unlock()
-		t.lock.RLock()
 	}
-
-	defer t.lock.RUnlock()
-	if _, err := t.head.Write(blob); err != nil {
-		return err
+	if _, err := t.head.Write(encodedBlob); err != nil {
+		return false, err
 	}
 	newOffset := atomic.AddUint32(&t.headBytes, bLen)
 	idx := indexEntry{
@@ -523,7 +543,7 @@ func (t *freezerTable) Append(item uint64, blob []byte) error {
 	t.sizeGauge.Inc(int64(bLen + indexEntrySize))
 
 	atomic.AddUint64(&t.items, 1)
-	return nil
+	return false, nil
 }
 
 // getBounds returns the indexes for the item
@@ -562,44 +582,48 @@ func (t *freezerTable) getBounds(item uint64) (uint32, uint32, uint32, error) {
 // Retrieve looks up the data offset of an item with the given number and retrieves
 // the raw binary blob from the data file.
 func (t *freezerTable) Retrieve(item uint64) ([]byte, error) {
+	blob, err := t.retrieve(item)
+	if err != nil {
+		return nil, err
+	}
+	if t.noCompression {
+		return blob, nil
+	}
+	return snappy.Decode(nil, blob)
+}
+
+// retrieve looks up the data offset of an item with the given number and retrieves
+// the raw binary blob from the data file. OBS! This method does not decode
+// compressed data.
+func (t *freezerTable) retrieve(item uint64) ([]byte, error) {
 	t.lock.RLock()
+	defer t.lock.RUnlock()
 	// Ensure the table and the item is accessible
 	if t.index == nil || t.head == nil {
-		t.lock.RUnlock()
 		return nil, errClosed
 	}
 	if atomic.LoadUint64(&t.items) <= item {
-		t.lock.RUnlock()
 		return nil, errOutOfBounds
 	}
 	// Ensure the item was not deleted from the tail either
 	if uint64(t.itemOffset) > item {
-		t.lock.RUnlock()
 		return nil, errOutOfBounds
 	}
 	startOffset, endOffset, filenum, err := t.getBounds(item - uint64(t.itemOffset))
 	if err != nil {
-		t.lock.RUnlock()
 		return nil, err
 	}
 	dataFile, exist := t.files[filenum]
 	if !exist {
-		t.lock.RUnlock()
 		return nil, fmt.Errorf("missing data file %d", filenum)
 	}
 	// Retrieve the data itself, decompress and return
 	blob := make([]byte, endOffset-startOffset)
 	if _, err := dataFile.ReadAt(blob, int64(startOffset)); err != nil {
-		t.lock.RUnlock()
 		return nil, err
 	}
-	t.lock.RUnlock()
 	t.readMeter.Mark(int64(len(blob) + 2*indexEntrySize))
-
-	if t.noCompression {
-		return blob, nil
-	}
-	return snappy.Decode(nil, blob)
+	return blob, nil
 }
 
 // has returns an indicator whether the specified number data
@@ -636,25 +660,24 @@ func (t *freezerTable) Sync() error {
 	return t.head.Sync()
 }
 
-// printIndex is a debug print utility function for testing
-func (t *freezerTable) printIndex() {
+// DumpIndex is a debug print utility function, mainly for testing. It can also
+// be used to analyse a live freezer table index.
+func (t *freezerTable) DumpIndex(start, stop int64) {
 	buf := make([]byte, indexEntrySize)
 
-	fmt.Printf("|-----------------|\n")
-	fmt.Printf("| fileno | offset |\n")
-	fmt.Printf("|--------+--------|\n")
+	fmt.Printf("| number | fileno | offset |\n")
+	fmt.Printf("|--------|--------|--------|\n")
 
-	for i := uint64(0); ; i++ {
+	for i := uint64(start); ; i++ {
 		if _, err := t.index.ReadAt(buf, int64(i*indexEntrySize)); err != nil {
 			break
 		}
 		var entry indexEntry
 		entry.unmarshalBinary(buf)
-		fmt.Printf("|  %03d   |  %03d   | \n", entry.filenum, entry.offset)
-		if i > 100 {
-			fmt.Printf(" ... \n")
+		fmt.Printf("|  %03d   |  %03d   |  %03d   | \n", i, entry.filenum, entry.offset)
+		if stop > 0 && i >= uint64(stop) {
 			break
 		}
 	}
-	fmt.Printf("|-----------------|\n")
+	fmt.Printf("|--------------------------|\n")
 }

+ 57 - 5
core/rawdb/freezer_table_test.go

@@ -18,10 +18,13 @@ package rawdb
 
 import (
 	"bytes"
+	"encoding/binary"
 	"fmt"
+	"io/ioutil"
 	"math/rand"
 	"os"
 	"path/filepath"
+	"sync"
 	"testing"
 	"time"
 
@@ -525,7 +528,7 @@ func TestOffset(t *testing.T) {
 
 		f.Append(4, getChunk(20, 0xbb))
 		f.Append(5, getChunk(20, 0xaa))
-		f.printIndex()
+		f.DumpIndex(0, 100)
 		f.Close()
 	}
 	// Now crop it.
@@ -572,7 +575,7 @@ func TestOffset(t *testing.T) {
 		if err != nil {
 			t.Fatal(err)
 		}
-		f.printIndex()
+		f.DumpIndex(0, 100)
 		// It should allow writing item 6
 		f.Append(numDeleted+2, getChunk(20, 0x99))
 
@@ -637,6 +640,55 @@ func TestOffset(t *testing.T) {
 // 1. have data files d0, d1, d2, d3
 // 2. remove d2,d3
 //
-// However, all 'normal' failure modes arising due to failing to sync() or save a file should be
-// handled already, and the case described above can only (?) happen if an external process/user
-// deletes files from the filesystem.
+// However, all 'normal' failure modes arising due to failing to sync() or save a file
+// should be handled already, and the case described above can only (?) happen if an
+// external process/user deletes files from the filesystem.
+
+// TestAppendTruncateParallel is a test to check if the Append/truncate operations are
+// racy.
+//
+// The reason why it's not a regular fuzzer, within tests/fuzzers, is that it is dependent
+// on timing rather than 'clever' input -- there's no determinism.
+func TestAppendTruncateParallel(t *testing.T) {
+	dir, err := ioutil.TempDir("", "freezer")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(dir)
+
+	f, err := newCustomTable(dir, "tmp", metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, 8, true)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	fill := func(mark uint64) []byte {
+		data := make([]byte, 8)
+		binary.LittleEndian.PutUint64(data, mark)
+		return data
+	}
+
+	for i := 0; i < 5000; i++ {
+		f.truncate(0)
+		data0 := fill(0)
+		f.Append(0, data0)
+		data1 := fill(1)
+
+		var wg sync.WaitGroup
+		wg.Add(2)
+		go func() {
+			f.truncate(0)
+			wg.Done()
+		}()
+		go func() {
+			f.Append(1, data1)
+			wg.Done()
+		}()
+		wg.Wait()
+
+		if have, err := f.Retrieve(0); err == nil {
+			if !bytes.Equal(have, data0) {
+				t.Fatalf("have %x want %x", have, data0)
+			}
+		}
+	}
+}

+ 5 - 2
core/rawdb/schema.go

@@ -45,6 +45,9 @@ var (
 	// fastTrieProgressKey tracks the number of trie entries imported during fast sync.
 	fastTrieProgressKey = []byte("TrieSync")
 
+	// snapshotDisabledKey flags that the snapshot should not be maintained due to initial sync.
+	snapshotDisabledKey = []byte("SnapshotDisabled")
+
 	// snapshotRootKey tracks the hash of the last snapshot.
 	snapshotRootKey = []byte("SnapshotRoot")
 
@@ -114,9 +117,9 @@ const (
 	freezerDifficultyTable = "diffs"
 )
 
-// freezerNoSnappy configures whether compression is disabled for the ancient-tables.
+// FreezerNoSnappy configures whether compression is disabled for the ancient-tables.
 // Hashes and difficulties don't compress well.
-var freezerNoSnappy = map[string]bool{
+var FreezerNoSnappy = map[string]bool{
 	freezerHeaderTable:     false,
 	freezerHashTable:       true,
 	freezerBodiesTable:     false,

+ 1 - 1
core/state/snapshot/conversion.go

@@ -322,7 +322,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, it Iterator, account common.Hash,
 						return
 					}
 					if !bytes.Equal(account.Root, subroot.Bytes()) {
-						results <- fmt.Errorf("invalid subroot(%x), want %x, got %x", it.Hash(), account.Root, subroot)
+						results <- fmt.Errorf("invalid subroot(path %x), want %x, have %x", hash, account.Root, subroot)
 						return
 					}
 					results <- nil

+ 556 - 131
core/state/snapshot/generate.go

@@ -19,17 +19,21 @@ package snapshot
 import (
 	"bytes"
 	"encoding/binary"
+	"errors"
 	"fmt"
 	"math/big"
 	"time"
 
 	"github.com/VictoriaMetrics/fastcache"
 	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/common/hexutil"
 	"github.com/ethereum/go-ethereum/common/math"
 	"github.com/ethereum/go-ethereum/core/rawdb"
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/ethdb/memorydb"
 	"github.com/ethereum/go-ethereum/log"
+	"github.com/ethereum/go-ethereum/metrics"
 	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/ethereum/go-ethereum/trie"
 )
@@ -40,17 +44,63 @@ var (
 
 	// emptyCode is the known hash of the empty EVM bytecode.
 	emptyCode = crypto.Keccak256Hash(nil)
+
+	// accountCheckRange is the upper limit of the number of accounts involved in
+	// each range check. This is a value estimated based on experience. If this
+	// value is too large, the failure rate of range prove will increase. Otherwise
+	// the the value is too small, the efficiency of the state recovery will decrease.
+	accountCheckRange = 128
+
+	// storageCheckRange is the upper limit of the number of storage slots involved
+	// in each range check. This is a value estimated based on experience. If this
+	// value is too large, the failure rate of range prove will increase. Otherwise
+	// the the value is too small, the efficiency of the state recovery will decrease.
+	storageCheckRange = 1024
+
+	// errMissingTrie is returned if the target trie is missing while the generation
+	// is running. In this case the generation is aborted and wait the new signal.
+	errMissingTrie = errors.New("missing trie")
+)
+
+// Metrics in generation
+var (
+	snapGeneratedAccountMeter     = metrics.NewRegisteredMeter("state/snapshot/generation/account/generated", nil)
+	snapRecoveredAccountMeter     = metrics.NewRegisteredMeter("state/snapshot/generation/account/recovered", nil)
+	snapWipedAccountMeter         = metrics.NewRegisteredMeter("state/snapshot/generation/account/wiped", nil)
+	snapMissallAccountMeter       = metrics.NewRegisteredMeter("state/snapshot/generation/account/missall", nil)
+	snapGeneratedStorageMeter     = metrics.NewRegisteredMeter("state/snapshot/generation/storage/generated", nil)
+	snapRecoveredStorageMeter     = metrics.NewRegisteredMeter("state/snapshot/generation/storage/recovered", nil)
+	snapWipedStorageMeter         = metrics.NewRegisteredMeter("state/snapshot/generation/storage/wiped", nil)
+	snapMissallStorageMeter       = metrics.NewRegisteredMeter("state/snapshot/generation/storage/missall", nil)
+	snapSuccessfulRangeProofMeter = metrics.NewRegisteredMeter("state/snapshot/generation/proof/success", nil)
+	snapFailedRangeProofMeter     = metrics.NewRegisteredMeter("state/snapshot/generation/proof/failure", nil)
+
+	// snapAccountProveCounter measures time spent on the account proving
+	snapAccountProveCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/prove", nil)
+	// snapAccountTrieReadCounter measures time spent on the account trie iteration
+	snapAccountTrieReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/trieread", nil)
+	// snapAccountSnapReadCounter measues time spent on the snapshot account iteration
+	snapAccountSnapReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/snapread", nil)
+	// snapAccountWriteCounter measures time spent on writing/updating/deleting accounts
+	snapAccountWriteCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/write", nil)
+	// snapStorageProveCounter measures time spent on storage proving
+	snapStorageProveCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/prove", nil)
+	// snapStorageTrieReadCounter measures time spent on the storage trie iteration
+	snapStorageTrieReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/trieread", nil)
+	// snapStorageSnapReadCounter measures time spent on the snapshot storage iteration
+	snapStorageSnapReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/snapread", nil)
+	// snapStorageWriteCounter measures time spent on writing/updating/deleting storages
+	snapStorageWriteCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/write", nil)
 )
 
 // generatorStats is a collection of statistics gathered by the snapshot generator
 // for logging purposes.
 type generatorStats struct {
-	wiping   chan struct{}      // Notification channel if wiping is in progress
 	origin   uint64             // Origin prefix where generation started
 	start    time.Time          // Timestamp when generation started
-	accounts uint64             // Number of accounts indexed
-	slots    uint64             // Number of storage slots indexed
-	storage  common.StorageSize // Account and storage slot size
+	accounts uint64             // Number of accounts indexed(generated or recovered)
+	slots    uint64             // Number of storage slots indexed(generated or recovered)
+	storage  common.StorageSize // Total account and storage slot size(generation or recovery)
 }
 
 // Log creates an contextual log with the given message and the context pulled
@@ -94,22 +144,17 @@ func (gs *generatorStats) Log(msg string, root common.Hash, marker []byte) {
 // generateSnapshot regenerates a brand new snapshot based on an existing state
 // database and head block asynchronously. The snapshot is returned immediately
 // and generation is continued in the background until done.
-func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, wiper chan struct{}) *diskLayer {
-	// Wipe any previously existing snapshot from the database if no wiper is
-	// currently in progress.
-	if wiper == nil {
-		wiper = wipeSnapshot(diskdb, true)
-	}
+func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) *diskLayer {
 	// Create a new disk layer with an initialized state marker at zero
 	var (
-		stats     = &generatorStats{wiping: wiper, start: time.Now()}
+		stats     = &generatorStats{start: time.Now()}
 		batch     = diskdb.NewBatch()
 		genMarker = []byte{} // Initialized but empty!
 	)
 	rawdb.WriteSnapshotRoot(batch, root)
 	journalProgress(batch, genMarker, stats)
 	if err := batch.Write(); err != nil {
-		log.Crit("Failed to write initialized state marker", "error", err)
+		log.Crit("Failed to write initialized state marker", "err", err)
 	}
 	base := &diskLayer{
 		diskdb:     diskdb,
@@ -135,7 +180,6 @@ func journalProgress(db ethdb.KeyValueWriter, marker []byte, stats *generatorSta
 		Marker: marker,
 	}
 	if stats != nil {
-		entry.Wiping = (stats.wiping != nil)
 		entry.Accounts = stats.accounts
 		entry.Slots = stats.slots
 		entry.Storage = uint64(stats.storage)
@@ -159,169 +203,538 @@ func journalProgress(db ethdb.KeyValueWriter, marker []byte, stats *generatorSta
 	rawdb.WriteSnapshotGenerator(db, blob)
 }
 
-// generate is a background thread that iterates over the state and storage tries,
-// constructing the state snapshot. All the arguments are purely for statistics
-// gathering and logging, since the method surfs the blocks as they arrive, often
-// being restarted.
-func (dl *diskLayer) generate(stats *generatorStats) {
-	// If a database wipe is in operation, wait until it's done
-	if stats.wiping != nil {
-		stats.Log("Wiper running, state snapshotting paused", common.Hash{}, dl.genMarker)
-		select {
-		// If wiper is done, resume normal mode of operation
-		case <-stats.wiping:
-			stats.wiping = nil
-			stats.start = time.Now()
+// proofResult contains the output of range proving which can be used
+// for further processing regardless if it is successful or not.
+type proofResult struct {
+	keys     [][]byte   // The key set of all elements being iterated, even proving is failed
+	vals     [][]byte   // The val set of all elements being iterated, even proving is failed
+	diskMore bool       // Set when the database has extra snapshot states since last iteration
+	trieMore bool       // Set when the trie has extra snapshot states(only meaningful for successful proving)
+	proofErr error      // Indicator whether the given state range is valid or not
+	tr       *trie.Trie // The trie, in case the trie was resolved by the prover (may be nil)
+}
 
-		// If generator was aborted during wipe, return
-		case abort := <-dl.genAbort:
-			abort <- stats
-			return
+// valid returns the indicator that range proof is successful or not.
+func (result *proofResult) valid() bool {
+	return result.proofErr == nil
+}
+
+// last returns the last verified element key regardless of whether the range proof is
+// successful or not. Nil is returned if nothing involved in the proving.
+func (result *proofResult) last() []byte {
+	var last []byte
+	if len(result.keys) > 0 {
+		last = result.keys[len(result.keys)-1]
+	}
+	return last
+}
+
+// forEach iterates all the visited elements and applies the given callback on them.
+// The iteration is aborted if the callback returns non-nil error.
+func (result *proofResult) forEach(callback func(key []byte, val []byte) error) error {
+	for i := 0; i < len(result.keys); i++ {
+		key, val := result.keys[i], result.vals[i]
+		if err := callback(key, val); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// proveRange proves the snapshot segment with particular prefix is "valid".
+// The iteration start point will be assigned if the iterator is restored from
+// the last interruption. Max will be assigned in order to limit the maximum
+// amount of data involved in each iteration.
+//
+// The proof result will be returned if the range proving is finished, otherwise
+// the error will be returned to abort the entire procedure.
+func (dl *diskLayer) proveRange(stats *generatorStats, root common.Hash, prefix []byte, kind string, origin []byte, max int, valueConvertFn func([]byte) ([]byte, error)) (*proofResult, error) {
+	var (
+		keys     [][]byte
+		vals     [][]byte
+		proof    = rawdb.NewMemoryDatabase()
+		diskMore = false
+	)
+	iter := dl.diskdb.NewIterator(prefix, origin)
+	defer iter.Release()
+
+	var start = time.Now()
+	for iter.Next() {
+		key := iter.Key()
+		if len(key) != len(prefix)+common.HashLength {
+			continue
+		}
+		if len(keys) == max {
+			// Break if we've reached the max size, and signal that we're not
+			// done yet.
+			diskMore = true
+			break
+		}
+		keys = append(keys, common.CopyBytes(key[len(prefix):]))
+
+		if valueConvertFn == nil {
+			vals = append(vals, common.CopyBytes(iter.Value()))
+		} else {
+			val, err := valueConvertFn(iter.Value())
+			if err != nil {
+				// Special case, the state data is corrupted (invalid slim-format account),
+				// don't abort the entire procedure directly. Instead, let the fallback
+				// generation to heal the invalid data.
+				//
+				// Here append the original value to ensure that the number of key and
+				// value are the same.
+				vals = append(vals, common.CopyBytes(iter.Value()))
+				log.Error("Failed to convert account state data", "err", err)
+			} else {
+				vals = append(vals, val)
+			}
+		}
+	}
+	// Update metrics for database iteration and merkle proving
+	if kind == "storage" {
+		snapStorageSnapReadCounter.Inc(time.Since(start).Nanoseconds())
+	} else {
+		snapAccountSnapReadCounter.Inc(time.Since(start).Nanoseconds())
+	}
+	defer func(start time.Time) {
+		if kind == "storage" {
+			snapStorageProveCounter.Inc(time.Since(start).Nanoseconds())
+		} else {
+			snapAccountProveCounter.Inc(time.Since(start).Nanoseconds())
+		}
+	}(time.Now())
+
+	// The snap state is exhausted, pass the entire key/val set for verification
+	if origin == nil && !diskMore {
+		stackTr := trie.NewStackTrie(nil)
+		for i, key := range keys {
+			stackTr.TryUpdate(key, vals[i])
 		}
+		if gotRoot := stackTr.Hash(); gotRoot != root {
+			return &proofResult{
+				keys:     keys,
+				vals:     vals,
+				proofErr: fmt.Errorf("wrong root: have %#x want %#x", gotRoot, root),
+			}, nil
+		}
+		return &proofResult{keys: keys, vals: vals}, nil
 	}
-	// Create an account and state iterator pointing to the current generator marker
-	accTrie, err := trie.NewSecure(dl.root, dl.triedb)
+	// Snap state is chunked, generate edge proofs for verification.
+	tr, err := trie.New(root, dl.triedb)
 	if err != nil {
-		// The account trie is missing (GC), surf the chain until one becomes available
 		stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
+		return nil, errMissingTrie
+	}
+	// Firstly find out the key of last iterated element.
+	var last []byte
+	if len(keys) > 0 {
+		last = keys[len(keys)-1]
+	}
+	// Generate the Merkle proofs for the first and last element
+	if origin == nil {
+		origin = common.Hash{}.Bytes()
+	}
+	if err := tr.Prove(origin, 0, proof); err != nil {
+		log.Debug("Failed to prove range", "kind", kind, "origin", origin, "err", err)
+		return &proofResult{
+			keys:     keys,
+			vals:     vals,
+			diskMore: diskMore,
+			proofErr: err,
+			tr:       tr,
+		}, nil
+	}
+	if last != nil {
+		if err := tr.Prove(last, 0, proof); err != nil {
+			log.Debug("Failed to prove range", "kind", kind, "last", last, "err", err)
+			return &proofResult{
+				keys:     keys,
+				vals:     vals,
+				diskMore: diskMore,
+				proofErr: err,
+				tr:       tr,
+			}, nil
+		}
+	}
+	// Verify the snapshot segment with range prover, ensure that all flat states
+	// in this range correspond to merkle trie.
+	cont, err := trie.VerifyRangeProof(root, origin, last, keys, vals, proof)
+	return &proofResult{
+			keys:     keys,
+			vals:     vals,
+			diskMore: diskMore,
+			trieMore: cont,
+			proofErr: err,
+			tr:       tr},
+		nil
+}
 
-		abort := <-dl.genAbort
-		abort <- stats
-		return
+// onStateCallback is a function that is called by generateRange, when processing a range of
+// accounts or storage slots. For each element, the callback is invoked.
+// If 'delete' is true, then this element (and potential slots) needs to be deleted from the snapshot.
+// If 'write' is true, then this element needs to be updated with the 'val'.
+// If 'write' is false, then this element is already correct, and needs no update. However,
+// for accounts, the storage trie of the account needs to be checked.
+// The 'val' is the canonical encoding of the value (not the slim format for accounts)
+type onStateCallback func(key []byte, val []byte, write bool, delete bool) error
+
+// generateRange generates the state segment with particular prefix. Generation can
+// either verify the correctness of existing state through rangeproof and skip
+// generation, or iterate trie to regenerate state on demand.
+func (dl *diskLayer) generateRange(root common.Hash, prefix []byte, kind string, origin []byte, max int, stats *generatorStats, onState onStateCallback, valueConvertFn func([]byte) ([]byte, error)) (bool, []byte, error) {
+	// Use range prover to check the validity of the flat state in the range
+	result, err := dl.proveRange(stats, root, prefix, kind, origin, max, valueConvertFn)
+	if err != nil {
+		return false, nil, err
 	}
-	stats.Log("Resuming state snapshot generation", dl.root, dl.genMarker)
+	last := result.last()
+
+	// Construct contextual logger
+	logCtx := []interface{}{"kind", kind, "prefix", hexutil.Encode(prefix)}
+	if len(origin) > 0 {
+		logCtx = append(logCtx, "origin", hexutil.Encode(origin))
+	}
+	logger := log.New(logCtx...)
+
+	// The range prover says the range is correct, skip trie iteration
+	if result.valid() {
+		snapSuccessfulRangeProofMeter.Mark(1)
+		logger.Trace("Proved state range", "last", hexutil.Encode(last))
+
+		// The verification is passed, process each state with the given
+		// callback function. If this state represents a contract, the
+		// corresponding storage check will be performed in the callback
+		if err := result.forEach(func(key []byte, val []byte) error { return onState(key, val, false, false) }); err != nil {
+			return false, nil, err
+		}
+		// Only abort the iteration when both database and trie are exhausted
+		return !result.diskMore && !result.trieMore, last, nil
+	}
+	logger.Trace("Detected outdated state range", "last", hexutil.Encode(last), "err", result.proofErr)
+	snapFailedRangeProofMeter.Mark(1)
+
+	// Special case, the entire trie is missing. In the original trie scheme,
+	// all the duplicated subtries will be filter out(only one copy of data
+	// will be stored). While in the snapshot model, all the storage tries
+	// belong to different contracts will be kept even they are duplicated.
+	// Track it to a certain extent remove the noise data used for statistics.
+	if origin == nil && last == nil {
+		meter := snapMissallAccountMeter
+		if kind == "storage" {
+			meter = snapMissallStorageMeter
+		}
+		meter.Mark(1)
+	}
+
+	// We use the snap data to build up a cache which can be used by the
+	// main account trie as a primary lookup when resolving hashes
+	var snapNodeCache ethdb.KeyValueStore
+	if len(result.keys) > 0 {
+		snapNodeCache = memorydb.New()
+		snapTrieDb := trie.NewDatabase(snapNodeCache)
+		snapTrie, _ := trie.New(common.Hash{}, snapTrieDb)
+		for i, key := range result.keys {
+			snapTrie.Update(key, result.vals[i])
+		}
+		root, _ := snapTrie.Commit(nil)
+		snapTrieDb.Commit(root, false, nil)
+	}
+	tr := result.tr
+	if tr == nil {
+		tr, err = trie.New(root, dl.triedb)
+		if err != nil {
+			stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
+			return false, nil, errMissingTrie
+		}
+	}
+
+	var (
+		trieMore       bool
+		nodeIt         = tr.NodeIterator(origin)
+		iter           = trie.NewIterator(nodeIt)
+		kvkeys, kvvals = result.keys, result.vals
+
+		// counters
+		count     = 0 // number of states delivered by iterator
+		created   = 0 // states created from the trie
+		updated   = 0 // states updated from the trie
+		deleted   = 0 // states not in trie, but were in snapshot
+		untouched = 0 // states already correct
 
-	var accMarker []byte
+		// timers
+		start    = time.Now()
+		internal time.Duration
+	)
+	nodeIt.AddResolver(snapNodeCache)
+	for iter.Next() {
+		if last != nil && bytes.Compare(iter.Key, last) > 0 {
+			trieMore = true
+			break
+		}
+		count++
+		write := true
+		created++
+		for len(kvkeys) > 0 {
+			if cmp := bytes.Compare(kvkeys[0], iter.Key); cmp < 0 {
+				// delete the key
+				istart := time.Now()
+				if err := onState(kvkeys[0], nil, false, true); err != nil {
+					return false, nil, err
+				}
+				kvkeys = kvkeys[1:]
+				kvvals = kvvals[1:]
+				deleted++
+				internal += time.Since(istart)
+				continue
+			} else if cmp == 0 {
+				// the snapshot key can be overwritten
+				created--
+				if write = !bytes.Equal(kvvals[0], iter.Value); write {
+					updated++
+				} else {
+					untouched++
+				}
+				kvkeys = kvkeys[1:]
+				kvvals = kvvals[1:]
+			}
+			break
+		}
+		istart := time.Now()
+		if err := onState(iter.Key, iter.Value, write, false); err != nil {
+			return false, nil, err
+		}
+		internal += time.Since(istart)
+	}
+	if iter.Err != nil {
+		return false, nil, iter.Err
+	}
+	// Delete all stale snapshot states remaining
+	istart := time.Now()
+	for _, key := range kvkeys {
+		if err := onState(key, nil, false, true); err != nil {
+			return false, nil, err
+		}
+		deleted += 1
+	}
+	internal += time.Since(istart)
+
+	// Update metrics for counting trie iteration
+	if kind == "storage" {
+		snapStorageTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds())
+	} else {
+		snapAccountTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds())
+	}
+	logger.Debug("Regenerated state range", "root", root, "last", hexutil.Encode(last),
+		"count", count, "created", created, "updated", updated, "untouched", untouched, "deleted", deleted)
+
+	// If there are either more trie items, or there are more snap items
+	// (in the next segment), then we need to keep working
+	return !trieMore && !result.diskMore, last, nil
+}
+
+// generate is a background thread that iterates over the state and storage tries,
+// constructing the state snapshot. All the arguments are purely for statistics
+// gathering and logging, since the method surfs the blocks as they arrive, often
+// being restarted.
+func (dl *diskLayer) generate(stats *generatorStats) {
+	var (
+		accMarker    []byte
+		accountRange = accountCheckRange
+	)
 	if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that
-		accMarker = dl.genMarker[:common.HashLength]
+		// Always reset the initial account range as 1
+		// whenever recover from the interruption.
+		accMarker, accountRange = dl.genMarker[:common.HashLength], 1
 	}
-	accIt := trie.NewIterator(accTrie.NodeIterator(accMarker))
-	batch := dl.diskdb.NewBatch()
+	var (
+		batch     = dl.diskdb.NewBatch()
+		logged    = time.Now()
+		accOrigin = common.CopyBytes(accMarker)
+		abort     chan *generatorStats
+	)
+	stats.Log("Resuming state snapshot generation", dl.root, dl.genMarker)
 
-	// Iterate from the previous marker and continue generating the state snapshot
-	logged := time.Now()
-	for accIt.Next() {
-		// Retrieve the current account and flatten it into the internal format
-		accountHash := common.BytesToHash(accIt.Key)
+	checkAndFlush := func(currentLocation []byte) error {
+		select {
+		case abort = <-dl.genAbort:
+		default:
+		}
+		if batch.ValueSize() > ethdb.IdealBatchSize || abort != nil {
+			// Flush out the batch anyway no matter it's empty or not.
+			// It's possible that all the states are recovered and the
+			// generation indeed makes progress.
+			journalProgress(batch, currentLocation, stats)
+
+			if err := batch.Write(); err != nil {
+				return err
+			}
+			batch.Reset()
+
+			dl.lock.Lock()
+			dl.genMarker = currentLocation
+			dl.lock.Unlock()
+
+			if abort != nil {
+				stats.Log("Aborting state snapshot generation", dl.root, currentLocation)
+				return errors.New("aborted")
+			}
+		}
+		if time.Since(logged) > 8*time.Second {
+			stats.Log("Generating state snapshot", dl.root, currentLocation)
+			logged = time.Now()
+		}
+		return nil
+	}
 
+	onAccount := func(key []byte, val []byte, write bool, delete bool) error {
+		var (
+			start       = time.Now()
+			accountHash = common.BytesToHash(key)
+		)
+		if delete {
+			rawdb.DeleteAccountSnapshot(batch, accountHash)
+			snapWipedAccountMeter.Mark(1)
+
+			// Ensure that any previous snapshot storage values are cleared
+			prefix := append(rawdb.SnapshotStoragePrefix, accountHash.Bytes()...)
+			keyLen := len(rawdb.SnapshotStoragePrefix) + 2*common.HashLength
+			if err := wipeKeyRange(dl.diskdb, "storage", prefix, nil, nil, keyLen, snapWipedStorageMeter, false); err != nil {
+				return err
+			}
+			snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds())
+			return nil
+		}
+		// Retrieve the current account and flatten it into the internal format
 		var acc struct {
 			Nonce    uint64
 			Balance  *big.Int
 			Root     common.Hash
 			CodeHash []byte
 		}
-		if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
+		if err := rlp.DecodeBytes(val, &acc); err != nil {
 			log.Crit("Invalid account encountered during snapshot creation", "err", err)
 		}
-		data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash)
-
 		// If the account is not yet in-progress, write it out
 		if accMarker == nil || !bytes.Equal(accountHash[:], accMarker) {
-			rawdb.WriteAccountSnapshot(batch, accountHash, data)
-			stats.storage += common.StorageSize(1 + common.HashLength + len(data))
+			dataLen := len(val) // Approximate size, saves us a round of RLP-encoding
+			if !write {
+				if bytes.Equal(acc.CodeHash, emptyCode[:]) {
+					dataLen -= 32
+				}
+				if acc.Root == emptyRoot {
+					dataLen -= 32
+				}
+				snapRecoveredAccountMeter.Mark(1)
+			} else {
+				data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash)
+				dataLen = len(data)
+				rawdb.WriteAccountSnapshot(batch, accountHash, data)
+				snapGeneratedAccountMeter.Mark(1)
+			}
+			stats.storage += common.StorageSize(1 + common.HashLength + dataLen)
 			stats.accounts++
 		}
 		// If we've exceeded our batch allowance or termination was requested, flush to disk
-		var abort chan *generatorStats
-		select {
-		case abort = <-dl.genAbort:
-		default:
-		}
-		if batch.ValueSize() > ethdb.IdealBatchSize || abort != nil {
-			// Only write and set the marker if we actually did something useful
-			if batch.ValueSize() > 0 {
-				// Ensure the generator entry is in sync with the data
-				marker := accountHash[:]
-				journalProgress(batch, marker, stats)
-
-				batch.Write()
-				batch.Reset()
-
-				dl.lock.Lock()
-				dl.genMarker = marker
-				dl.lock.Unlock()
-			}
-			if abort != nil {
-				stats.Log("Aborting state snapshot generation", dl.root, accountHash[:])
-				abort <- stats
-				return
-			}
+		if err := checkAndFlush(accountHash[:]); err != nil {
+			return err
 		}
-		// If the account is in-progress, continue where we left off (otherwise iterate all)
-		if acc.Root != emptyRoot {
-			storeTrie, err := trie.NewSecure(acc.Root, dl.triedb)
-			if err != nil {
-				log.Error("Generator failed to access storage trie", "root", dl.root, "account", accountHash, "stroot", acc.Root, "err", err)
-				abort := <-dl.genAbort
-				abort <- stats
-				return
+		// If the iterated account is the contract, create a further loop to
+		// verify or regenerate the contract storage.
+		if acc.Root == emptyRoot {
+			// If the root is empty, we still need to ensure that any previous snapshot
+			// storage values are cleared
+			// TODO: investigate if this can be avoided, this will be very costly since it
+			// affects every single EOA account
+			//  - Perhaps we can avoid if where codeHash is emptyCode
+			prefix := append(rawdb.SnapshotStoragePrefix, accountHash.Bytes()...)
+			keyLen := len(rawdb.SnapshotStoragePrefix) + 2*common.HashLength
+			if err := wipeKeyRange(dl.diskdb, "storage", prefix, nil, nil, keyLen, snapWipedStorageMeter, false); err != nil {
+				return err
 			}
+			snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds())
+		} else {
+			snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds())
+
 			var storeMarker []byte
 			if accMarker != nil && bytes.Equal(accountHash[:], accMarker) && len(dl.genMarker) > common.HashLength {
 				storeMarker = dl.genMarker[common.HashLength:]
 			}
-			storeIt := trie.NewIterator(storeTrie.NodeIterator(storeMarker))
-			for storeIt.Next() {
-				rawdb.WriteStorageSnapshot(batch, accountHash, common.BytesToHash(storeIt.Key), storeIt.Value)
-				stats.storage += common.StorageSize(1 + 2*common.HashLength + len(storeIt.Value))
+			onStorage := func(key []byte, val []byte, write bool, delete bool) error {
+				defer func(start time.Time) {
+					snapStorageWriteCounter.Inc(time.Since(start).Nanoseconds())
+				}(time.Now())
+
+				if delete {
+					rawdb.DeleteStorageSnapshot(batch, accountHash, common.BytesToHash(key))
+					snapWipedStorageMeter.Mark(1)
+					return nil
+				}
+				if write {
+					rawdb.WriteStorageSnapshot(batch, accountHash, common.BytesToHash(key), val)
+					snapGeneratedStorageMeter.Mark(1)
+				} else {
+					snapRecoveredStorageMeter.Mark(1)
+				}
+				stats.storage += common.StorageSize(1 + 2*common.HashLength + len(val))
 				stats.slots++
 
 				// If we've exceeded our batch allowance or termination was requested, flush to disk
-				var abort chan *generatorStats
-				select {
-				case abort = <-dl.genAbort:
-				default:
-				}
-				if batch.ValueSize() > ethdb.IdealBatchSize || abort != nil {
-					// Only write and set the marker if we actually did something useful
-					if batch.ValueSize() > 0 {
-						// Ensure the generator entry is in sync with the data
-						marker := append(accountHash[:], storeIt.Key...)
-						journalProgress(batch, marker, stats)
-
-						batch.Write()
-						batch.Reset()
-
-						dl.lock.Lock()
-						dl.genMarker = marker
-						dl.lock.Unlock()
-					}
-					if abort != nil {
-						stats.Log("Aborting state snapshot generation", dl.root, append(accountHash[:], storeIt.Key...))
-						abort <- stats
-						return
-					}
-					if time.Since(logged) > 8*time.Second {
-						stats.Log("Generating state snapshot", dl.root, append(accountHash[:], storeIt.Key...))
-						logged = time.Now()
-					}
+				if err := checkAndFlush(append(accountHash[:], key...)); err != nil {
+					return err
 				}
+				return nil
 			}
-			if err := storeIt.Err; err != nil {
-				log.Error("Generator failed to iterate storage trie", "accroot", dl.root, "acchash", common.BytesToHash(accIt.Key), "stroot", acc.Root, "err", err)
-				abort := <-dl.genAbort
-				abort <- stats
-				return
+			var storeOrigin = common.CopyBytes(storeMarker)
+			for {
+				exhausted, last, err := dl.generateRange(acc.Root, append(rawdb.SnapshotStoragePrefix, accountHash.Bytes()...), "storage", storeOrigin, storageCheckRange, stats, onStorage, nil)
+				if err != nil {
+					return err
+				}
+				if exhausted {
+					break
+				}
+				if storeOrigin = increaseKey(last); storeOrigin == nil {
+					break // special case, the last is 0xffffffff...fff
+				}
 			}
 		}
-		if time.Since(logged) > 8*time.Second {
-			stats.Log("Generating state snapshot", dl.root, accIt.Key)
-			logged = time.Now()
-		}
 		// Some account processed, unmark the marker
 		accMarker = nil
+		return nil
 	}
-	if err := accIt.Err; err != nil {
-		log.Error("Generator failed to iterate account trie", "root", dl.root, "err", err)
-		abort := <-dl.genAbort
-		abort <- stats
-		return
+
+	// Global loop for regerating the entire state trie + all layered storage tries.
+	for {
+		exhausted, last, err := dl.generateRange(dl.root, rawdb.SnapshotAccountPrefix, "account", accOrigin, accountRange, stats, onAccount, FullAccountRLP)
+		// The procedure it aborted, either by external signal or internal error
+		if err != nil {
+			if abort == nil { // aborted by internal error, wait the signal
+				abort = <-dl.genAbort
+			}
+			abort <- stats
+			return
+		}
+		// Abort the procedure if the entire snapshot is generated
+		if exhausted {
+			break
+		}
+		if accOrigin = increaseKey(last); accOrigin == nil {
+			break // special case, the last is 0xffffffff...fff
+		}
+		accountRange = accountCheckRange
 	}
 	// Snapshot fully generated, set the marker to nil.
 	// Note even there is nothing to commit, persist the
 	// generator anyway to mark the snapshot is complete.
 	journalProgress(batch, nil, stats)
-	batch.Write()
+	if err := batch.Write(); err != nil {
+		log.Error("Failed to flush batch", "err", err)
+
+		abort = <-dl.genAbort
+		abort <- stats
+		return
+	}
+	batch.Reset()
 
 	log.Info("Generated state snapshot", "accounts", stats.accounts, "slots", stats.slots,
 		"storage", stats.storage, "elapsed", common.PrettyDuration(time.Since(stats.start)))
@@ -332,6 +745,18 @@ func (dl *diskLayer) generate(stats *generatorStats) {
 	dl.lock.Unlock()
 
 	// Someone will be looking for us, wait it out
-	abort := <-dl.genAbort
+	abort = <-dl.genAbort
 	abort <- nil
 }
+
+// increaseKey increase the input key by one bit. Return nil if the entire
+// addition operation overflows,
+func increaseKey(key []byte) []byte {
+	for i := len(key) - 1; i >= 0; i-- {
+		key[i]++
+		if key[i] != 0x0 {
+			return key
+		}
+	}
+	return nil
+}

+ 646 - 3
core/state/snapshot/generate_test.go

@@ -17,16 +17,361 @@
 package snapshot
 
 import (
+	"fmt"
 	"math/big"
+	"os"
 	"testing"
 	"time"
 
 	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core/rawdb"
+	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/ethdb/memorydb"
+	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/ethereum/go-ethereum/trie"
+	"golang.org/x/crypto/sha3"
 )
 
+// Tests that snapshot generation from an empty database.
+func TestGeneration(t *testing.T) {
+	// We can't use statedb to make a test trie (circular dependency), so make
+	// a fake one manually. We're going with a small account trie of 3 accounts,
+	// two of which also has the same 3-slot storage trie attached.
+	var (
+		diskdb = memorydb.New()
+		triedb = trie.NewDatabase(diskdb)
+	)
+	stTrie, _ := trie.NewSecure(common.Hash{}, triedb)
+	stTrie.Update([]byte("key-1"), []byte("val-1")) // 0x1314700b81afc49f94db3623ef1df38f3ed18b73a1b7ea2f6c095118cf6118a0
+	stTrie.Update([]byte("key-2"), []byte("val-2")) // 0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371
+	stTrie.Update([]byte("key-3"), []byte("val-3")) // 0x51c71a47af0695957647fb68766d0becee77e953df17c29b3c2f25436f055c78
+	stTrie.Commit(nil)                              // Root: 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
+
+	accTrie, _ := trie.NewSecure(common.Hash{}, triedb)
+	acc := &Account{Balance: big.NewInt(1), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
+	val, _ := rlp.EncodeToBytes(acc)
+	accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+
+	acc = &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
+	val, _ = rlp.EncodeToBytes(acc)
+	accTrie.Update([]byte("acc-2"), val) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+
+	acc = &Account{Balance: big.NewInt(3), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
+	val, _ = rlp.EncodeToBytes(acc)
+	accTrie.Update([]byte("acc-3"), val) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
+	root, _ := accTrie.Commit(nil)       // Root: 0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd
+	triedb.Commit(root, false, nil)
+
+	if have, want := root, common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"); have != want {
+		t.Fatalf("have %#x want %#x", have, want)
+	}
+	snap := generateSnapshot(diskdb, triedb, 16, root)
+	select {
+	case <-snap.genPending:
+		// Snapshot generation succeeded
+
+	case <-time.After(250 * time.Millisecond):
+		t.Errorf("Snapshot generation failed")
+	}
+	checkSnapRoot(t, snap, root)
+	// Signal abortion to the generator and wait for it to tear down
+	stop := make(chan *generatorStats)
+	snap.genAbort <- stop
+	<-stop
+}
+
+func hashData(input []byte) common.Hash {
+	var hasher = sha3.NewLegacyKeccak256()
+	var hash common.Hash
+	hasher.Reset()
+	hasher.Write(input)
+	hasher.Sum(hash[:0])
+	return hash
+}
+
+// Tests that snapshot generation with existent flat state.
+func TestGenerateExistentState(t *testing.T) {
+	// We can't use statedb to make a test trie (circular dependency), so make
+	// a fake one manually. We're going with a small account trie of 3 accounts,
+	// two of which also has the same 3-slot storage trie attached.
+	var (
+		diskdb = memorydb.New()
+		triedb = trie.NewDatabase(diskdb)
+	)
+	stTrie, _ := trie.NewSecure(common.Hash{}, triedb)
+	stTrie.Update([]byte("key-1"), []byte("val-1")) // 0x1314700b81afc49f94db3623ef1df38f3ed18b73a1b7ea2f6c095118cf6118a0
+	stTrie.Update([]byte("key-2"), []byte("val-2")) // 0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371
+	stTrie.Update([]byte("key-3"), []byte("val-3")) // 0x51c71a47af0695957647fb68766d0becee77e953df17c29b3c2f25436f055c78
+	stTrie.Commit(nil)                              // Root: 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
+
+	accTrie, _ := trie.NewSecure(common.Hash{}, triedb)
+	acc := &Account{Balance: big.NewInt(1), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
+	val, _ := rlp.EncodeToBytes(acc)
+	accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+	rawdb.WriteAccountSnapshot(diskdb, hashData([]byte("acc-1")), val)
+	rawdb.WriteStorageSnapshot(diskdb, hashData([]byte("acc-1")), hashData([]byte("key-1")), []byte("val-1"))
+	rawdb.WriteStorageSnapshot(diskdb, hashData([]byte("acc-1")), hashData([]byte("key-2")), []byte("val-2"))
+	rawdb.WriteStorageSnapshot(diskdb, hashData([]byte("acc-1")), hashData([]byte("key-3")), []byte("val-3"))
+
+	acc = &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
+	val, _ = rlp.EncodeToBytes(acc)
+	accTrie.Update([]byte("acc-2"), val) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+	diskdb.Put(hashData([]byte("acc-2")).Bytes(), val)
+	rawdb.WriteAccountSnapshot(diskdb, hashData([]byte("acc-2")), val)
+
+	acc = &Account{Balance: big.NewInt(3), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
+	val, _ = rlp.EncodeToBytes(acc)
+	accTrie.Update([]byte("acc-3"), val) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
+	rawdb.WriteAccountSnapshot(diskdb, hashData([]byte("acc-3")), val)
+	rawdb.WriteStorageSnapshot(diskdb, hashData([]byte("acc-3")), hashData([]byte("key-1")), []byte("val-1"))
+	rawdb.WriteStorageSnapshot(diskdb, hashData([]byte("acc-3")), hashData([]byte("key-2")), []byte("val-2"))
+	rawdb.WriteStorageSnapshot(diskdb, hashData([]byte("acc-3")), hashData([]byte("key-3")), []byte("val-3"))
+
+	root, _ := accTrie.Commit(nil) // Root: 0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd
+	triedb.Commit(root, false, nil)
+
+	snap := generateSnapshot(diskdb, triedb, 16, root)
+	select {
+	case <-snap.genPending:
+		// Snapshot generation succeeded
+
+	case <-time.After(250 * time.Millisecond):
+		t.Errorf("Snapshot generation failed")
+	}
+	checkSnapRoot(t, snap, root)
+	// Signal abortion to the generator and wait for it to tear down
+	stop := make(chan *generatorStats)
+	snap.genAbort <- stop
+	<-stop
+}
+
+func checkSnapRoot(t *testing.T, snap *diskLayer, trieRoot common.Hash) {
+	t.Helper()
+	accIt := snap.AccountIterator(common.Hash{})
+	defer accIt.Release()
+	snapRoot, err := generateTrieRoot(nil, accIt, common.Hash{}, stackTrieGenerate,
+		func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
+			storageIt, _ := snap.StorageIterator(accountHash, common.Hash{})
+			defer storageIt.Release()
+
+			hash, err := generateTrieRoot(nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false)
+			if err != nil {
+				return common.Hash{}, err
+			}
+			return hash, nil
+		}, newGenerateStats(), true)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+	if snapRoot != trieRoot {
+		t.Fatalf("snaproot: %#x != trieroot #%x", snapRoot, trieRoot)
+	}
+}
+
+type testHelper struct {
+	diskdb  *memorydb.Database
+	triedb  *trie.Database
+	accTrie *trie.SecureTrie
+}
+
+func newHelper() *testHelper {
+	diskdb := memorydb.New()
+	triedb := trie.NewDatabase(diskdb)
+	accTrie, _ := trie.NewSecure(common.Hash{}, triedb)
+	return &testHelper{
+		diskdb:  diskdb,
+		triedb:  triedb,
+		accTrie: accTrie,
+	}
+}
+
+func (t *testHelper) addTrieAccount(acckey string, acc *Account) {
+	val, _ := rlp.EncodeToBytes(acc)
+	t.accTrie.Update([]byte(acckey), val)
+}
+
+func (t *testHelper) addSnapAccount(acckey string, acc *Account) {
+	val, _ := rlp.EncodeToBytes(acc)
+	key := hashData([]byte(acckey))
+	rawdb.WriteAccountSnapshot(t.diskdb, key, val)
+}
+
+func (t *testHelper) addAccount(acckey string, acc *Account) {
+	t.addTrieAccount(acckey, acc)
+	t.addSnapAccount(acckey, acc)
+}
+
+func (t *testHelper) addSnapStorage(accKey string, keys []string, vals []string) {
+	accHash := hashData([]byte(accKey))
+	for i, key := range keys {
+		rawdb.WriteStorageSnapshot(t.diskdb, accHash, hashData([]byte(key)), []byte(vals[i]))
+	}
+}
+
+func (t *testHelper) makeStorageTrie(keys []string, vals []string) []byte {
+	stTrie, _ := trie.NewSecure(common.Hash{}, t.triedb)
+	for i, k := range keys {
+		stTrie.Update([]byte(k), []byte(vals[i]))
+	}
+	root, _ := stTrie.Commit(nil)
+	return root.Bytes()
+}
+
+func (t *testHelper) Generate() (common.Hash, *diskLayer) {
+	root, _ := t.accTrie.Commit(nil)
+	t.triedb.Commit(root, false, nil)
+	snap := generateSnapshot(t.diskdb, t.triedb, 16, root)
+	return root, snap
+}
+
+// Tests that snapshot generation with existent flat state, where the flat state
+// contains some errors:
+// - the contract with empty storage root but has storage entries in the disk
+// - the contract with non empty storage root but empty storage slots
+// - the contract(non-empty storage) misses some storage slots
+//   - miss in the beginning
+//   - miss in the middle
+//   - miss in the end
+// - the contract(non-empty storage) has wrong storage slots
+//   - wrong slots in the beginning
+//   - wrong slots in the middle
+//   - wrong slots in the end
+// - the contract(non-empty storage) has extra storage slots
+//   - extra slots in the beginning
+//   - extra slots in the middle
+//   - extra slots in the end
+func TestGenerateExistentStateWithWrongStorage(t *testing.T) {
+	helper := newHelper()
+	stRoot := helper.makeStorageTrie([]string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+
+	// Account one, empty root but non-empty database
+	helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()})
+	helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+
+	// Account two, non empty root but empty database
+	helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+
+	// Miss slots
+	{
+		// Account three, non empty root but misses slots in the beginning
+		helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+		helper.addSnapStorage("acc-3", []string{"key-2", "key-3"}, []string{"val-2", "val-3"})
+
+		// Account four, non empty root but misses slots in the middle
+		helper.addAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+		helper.addSnapStorage("acc-4", []string{"key-1", "key-3"}, []string{"val-1", "val-3"})
+
+		// Account five, non empty root but misses slots in the end
+		helper.addAccount("acc-5", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+		helper.addSnapStorage("acc-5", []string{"key-1", "key-2"}, []string{"val-1", "val-2"})
+	}
+
+	// Wrong storage slots
+	{
+		// Account six, non empty root but wrong slots in the beginning
+		helper.addAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+		helper.addSnapStorage("acc-6", []string{"key-1", "key-2", "key-3"}, []string{"badval-1", "val-2", "val-3"})
+
+		// Account seven, non empty root but wrong slots in the middle
+		helper.addAccount("acc-7", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+		helper.addSnapStorage("acc-7", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "badval-2", "val-3"})
+
+		// Account eight, non empty root but wrong slots in the end
+		helper.addAccount("acc-8", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+		helper.addSnapStorage("acc-8", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "badval-3"})
+
+		// Account 9, non empty root but rotated slots
+		helper.addAccount("acc-9", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+		helper.addSnapStorage("acc-9", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-3", "val-2"})
+	}
+
+	// Extra storage slots
+	{
+		// Account 10, non empty root but extra slots in the beginning
+		helper.addAccount("acc-10", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+		helper.addSnapStorage("acc-10", []string{"key-0", "key-1", "key-2", "key-3"}, []string{"val-0", "val-1", "val-2", "val-3"})
+
+		// Account 11, non empty root but extra slots in the middle
+		helper.addAccount("acc-11", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+		helper.addSnapStorage("acc-11", []string{"key-1", "key-2", "key-2-1", "key-3"}, []string{"val-1", "val-2", "val-2-1", "val-3"})
+
+		// Account 12, non empty root but extra slots in the end
+		helper.addAccount("acc-12", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+		helper.addSnapStorage("acc-12", []string{"key-1", "key-2", "key-3", "key-4"}, []string{"val-1", "val-2", "val-3", "val-4"})
+	}
+
+	root, snap := helper.Generate()
+	t.Logf("Root: %#x\n", root) // Root = 0x8746cce9fd9c658b2cfd639878ed6584b7a2b3e73bb40f607fcfa156002429a0
+
+	select {
+	case <-snap.genPending:
+		// Snapshot generation succeeded
+
+	case <-time.After(250 * time.Millisecond):
+		t.Errorf("Snapshot generation failed")
+	}
+	checkSnapRoot(t, snap, root)
+	// Signal abortion to the generator and wait for it to tear down
+	stop := make(chan *generatorStats)
+	snap.genAbort <- stop
+	<-stop
+}
+
+// Tests that snapshot generation with existent flat state, where the flat state
+// contains some errors:
+// - miss accounts
+// - wrong accounts
+// - extra accounts
+func TestGenerateExistentStateWithWrongAccounts(t *testing.T) {
+	helper := newHelper()
+	stRoot := helper.makeStorageTrie([]string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+
+	// Trie accounts [acc-1, acc-2, acc-3, acc-4, acc-6]
+	// Extra accounts [acc-0, acc-5, acc-7]
+
+	// Missing accounts, only in the trie
+	{
+		helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // Beginning
+		helper.addTrieAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // Middle
+		helper.addTrieAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // End
+	}
+
+	// Wrong accounts
+	{
+		helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+		helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")})
+
+		helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+		helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()})
+	}
+
+	// Extra accounts, only in the snap
+	{
+		helper.addSnapAccount("acc-0", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyRoot.Bytes()})                     // before the beginning
+		helper.addSnapAccount("acc-5", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: common.Hex2Bytes("0x1234")}) // Middle
+		helper.addSnapAccount("acc-7", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyRoot.Bytes()})          // after the end
+	}
+
+	root, snap := helper.Generate()
+	t.Logf("Root: %#x\n", root) // Root = 0x825891472281463511e7ebcc7f109e4f9200c20fa384754e11fd605cd98464e8
+
+	select {
+	case <-snap.genPending:
+		// Snapshot generation succeeded
+
+	case <-time.After(250 * time.Millisecond):
+		t.Errorf("Snapshot generation failed")
+	}
+	checkSnapRoot(t, snap, root)
+
+	// Signal abortion to the generator and wait for it to tear down
+	stop := make(chan *generatorStats)
+	snap.genAbort <- stop
+	<-stop
+}
+
 // Tests that snapshot generation errors out correctly in case of a missing trie
 // node in the account trie.
 func TestGenerateCorruptAccountTrie(t *testing.T) {
@@ -55,7 +400,7 @@ func TestGenerateCorruptAccountTrie(t *testing.T) {
 	triedb.Commit(common.HexToHash("0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978"), false, nil)
 	diskdb.Delete(common.HexToHash("0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7").Bytes())
 
-	snap := generateSnapshot(diskdb, triedb, 16, common.HexToHash("0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978"), nil)
+	snap := generateSnapshot(diskdb, triedb, 16, common.HexToHash("0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978"))
 	select {
 	case <-snap.genPending:
 		// Snapshot generation succeeded
@@ -115,7 +460,7 @@ func TestGenerateMissingStorageTrie(t *testing.T) {
 	// Delete a storage trie root and ensure the generator chokes
 	diskdb.Delete(common.HexToHash("0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67").Bytes())
 
-	snap := generateSnapshot(diskdb, triedb, 16, common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"), nil)
+	snap := generateSnapshot(diskdb, triedb, 16, common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"))
 	select {
 	case <-snap.genPending:
 		// Snapshot generation succeeded
@@ -174,7 +519,7 @@ func TestGenerateCorruptStorageTrie(t *testing.T) {
 	// Delete a storage trie leaf and ensure the generator chokes
 	diskdb.Delete(common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371").Bytes())
 
-	snap := generateSnapshot(diskdb, triedb, 16, common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"), nil)
+	snap := generateSnapshot(diskdb, triedb, 16, common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"))
 	select {
 	case <-snap.genPending:
 		// Snapshot generation succeeded
@@ -188,3 +533,301 @@ func TestGenerateCorruptStorageTrie(t *testing.T) {
 	snap.genAbort <- stop
 	<-stop
 }
+
+func getStorageTrie(n int, triedb *trie.Database) *trie.SecureTrie {
+	stTrie, _ := trie.NewSecure(common.Hash{}, triedb)
+	for i := 0; i < n; i++ {
+		k := fmt.Sprintf("key-%d", i)
+		v := fmt.Sprintf("val-%d", i)
+		stTrie.Update([]byte(k), []byte(v))
+	}
+	stTrie.Commit(nil)
+	return stTrie
+}
+
+// Tests that snapshot generation when an extra account with storage exists in the snap state.
+func TestGenerateWithExtraAccounts(t *testing.T) {
+	var (
+		diskdb = memorydb.New()
+		triedb = trie.NewDatabase(diskdb)
+		stTrie = getStorageTrie(5, triedb)
+	)
+	accTrie, _ := trie.NewSecure(common.Hash{}, triedb)
+	{ // Account one in the trie
+		acc := &Account{Balance: big.NewInt(1), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
+		val, _ := rlp.EncodeToBytes(acc)
+		accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+		// Identical in the snap
+		key := hashData([]byte("acc-1"))
+		rawdb.WriteAccountSnapshot(diskdb, key, val)
+		rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("key-1")), []byte("val-1"))
+		rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("key-2")), []byte("val-2"))
+		rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("key-3")), []byte("val-3"))
+		rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("key-4")), []byte("val-4"))
+		rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("key-5")), []byte("val-5"))
+	}
+	{ // Account two exists only in the snapshot
+		acc := &Account{Balance: big.NewInt(1), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
+		val, _ := rlp.EncodeToBytes(acc)
+		key := hashData([]byte("acc-2"))
+		rawdb.WriteAccountSnapshot(diskdb, key, val)
+		rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("b-key-1")), []byte("b-val-1"))
+		rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("b-key-2")), []byte("b-val-2"))
+		rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("b-key-3")), []byte("b-val-3"))
+	}
+	root, _ := accTrie.Commit(nil)
+	t.Logf("root: %x", root)
+	triedb.Commit(root, false, nil)
+	// To verify the test: If we now inspect the snap db, there should exist extraneous storage items
+	if data := rawdb.ReadStorageSnapshot(diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data == nil {
+		t.Fatalf("expected snap storage to exist")
+	}
+
+	snap := generateSnapshot(diskdb, triedb, 16, root)
+	select {
+	case <-snap.genPending:
+		// Snapshot generation succeeded
+
+	case <-time.After(250 * time.Millisecond):
+		t.Errorf("Snapshot generation failed")
+	}
+	checkSnapRoot(t, snap, root)
+	// Signal abortion to the generator and wait for it to tear down
+	stop := make(chan *generatorStats)
+	snap.genAbort <- stop
+	<-stop
+	// If we now inspect the snap db, there should exist no extraneous storage items
+	if data := rawdb.ReadStorageSnapshot(diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data != nil {
+		t.Fatalf("expected slot to be removed, got %v", string(data))
+	}
+}
+
+func enableLogging() {
+	log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+}
+
+// Tests that snapshot generation when an extra account with storage exists in the snap state.
+func TestGenerateWithManyExtraAccounts(t *testing.T) {
+	if false {
+		enableLogging()
+	}
+	var (
+		diskdb = memorydb.New()
+		triedb = trie.NewDatabase(diskdb)
+		stTrie = getStorageTrie(3, triedb)
+	)
+	accTrie, _ := trie.NewSecure(common.Hash{}, triedb)
+	{ // Account one in the trie
+		acc := &Account{Balance: big.NewInt(1), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
+		val, _ := rlp.EncodeToBytes(acc)
+		accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+		// Identical in the snap
+		key := hashData([]byte("acc-1"))
+		rawdb.WriteAccountSnapshot(diskdb, key, val)
+		rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("key-1")), []byte("val-1"))
+		rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("key-2")), []byte("val-2"))
+		rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("key-3")), []byte("val-3"))
+	}
+	{ // 100 accounts exist only in snapshot
+		for i := 0; i < 1000; i++ {
+			//acc := &Account{Balance: big.NewInt(int64(i)), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
+			acc := &Account{Balance: big.NewInt(int64(i)), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
+			val, _ := rlp.EncodeToBytes(acc)
+			key := hashData([]byte(fmt.Sprintf("acc-%d", i)))
+			rawdb.WriteAccountSnapshot(diskdb, key, val)
+		}
+	}
+	root, _ := accTrie.Commit(nil)
+	t.Logf("root: %x", root)
+	triedb.Commit(root, false, nil)
+
+	snap := generateSnapshot(diskdb, triedb, 16, root)
+	select {
+	case <-snap.genPending:
+		// Snapshot generation succeeded
+
+	case <-time.After(250 * time.Millisecond):
+		t.Errorf("Snapshot generation failed")
+	}
+	checkSnapRoot(t, snap, root)
+	// Signal abortion to the generator and wait for it to tear down
+	stop := make(chan *generatorStats)
+	snap.genAbort <- stop
+	<-stop
+}
+
+// Tests this case
+// maxAccountRange 3
+// snapshot-accounts: 01, 02, 03, 04, 05, 06, 07
+// trie-accounts:             03,             07
+//
+// We iterate three snapshot storage slots (max = 3) from the database. They are 0x01, 0x02, 0x03.
+// The trie has a lot of deletions.
+// So in trie, we iterate 2 entries 0x03, 0x07. We create the 0x07 in the database and abort the procedure, because the trie is exhausted.
+// But in the database, we still have the stale storage slots 0x04, 0x05. They are not iterated yet, but the procedure is finished.
+func TestGenerateWithExtraBeforeAndAfter(t *testing.T) {
+	accountCheckRange = 3
+	if false {
+		enableLogging()
+	}
+	var (
+		diskdb = memorydb.New()
+		triedb = trie.NewDatabase(diskdb)
+	)
+	accTrie, _ := trie.New(common.Hash{}, triedb)
+	{
+		acc := &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
+		val, _ := rlp.EncodeToBytes(acc)
+		accTrie.Update(common.HexToHash("0x03").Bytes(), val)
+		accTrie.Update(common.HexToHash("0x07").Bytes(), val)
+
+		rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x01"), val)
+		rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x02"), val)
+		rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x03"), val)
+		rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x04"), val)
+		rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x05"), val)
+		rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x06"), val)
+		rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x07"), val)
+	}
+
+	root, _ := accTrie.Commit(nil)
+	t.Logf("root: %x", root)
+	triedb.Commit(root, false, nil)
+
+	snap := generateSnapshot(diskdb, triedb, 16, root)
+	select {
+	case <-snap.genPending:
+		// Snapshot generation succeeded
+
+	case <-time.After(250 * time.Millisecond):
+		t.Errorf("Snapshot generation failed")
+	}
+	checkSnapRoot(t, snap, root)
+	// Signal abortion to the generator and wait for it to tear down
+	stop := make(chan *generatorStats)
+	snap.genAbort <- stop
+	<-stop
+}
+
+// TestGenerateWithMalformedSnapdata tests what happes if we have some junk
+// in the snapshot database, which cannot be parsed back to an account
+func TestGenerateWithMalformedSnapdata(t *testing.T) {
+	accountCheckRange = 3
+	if false {
+		enableLogging()
+	}
+	var (
+		diskdb = memorydb.New()
+		triedb = trie.NewDatabase(diskdb)
+	)
+	accTrie, _ := trie.New(common.Hash{}, triedb)
+	{
+		acc := &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
+		val, _ := rlp.EncodeToBytes(acc)
+		accTrie.Update(common.HexToHash("0x03").Bytes(), val)
+
+		junk := make([]byte, 100)
+		copy(junk, []byte{0xde, 0xad})
+		rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x02"), junk)
+		rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x03"), junk)
+		rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x04"), junk)
+		rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x05"), junk)
+	}
+
+	root, _ := accTrie.Commit(nil)
+	t.Logf("root: %x", root)
+	triedb.Commit(root, false, nil)
+
+	snap := generateSnapshot(diskdb, triedb, 16, root)
+	select {
+	case <-snap.genPending:
+		// Snapshot generation succeeded
+
+	case <-time.After(250 * time.Millisecond):
+		t.Errorf("Snapshot generation failed")
+	}
+	checkSnapRoot(t, snap, root)
+	// Signal abortion to the generator and wait for it to tear down
+	stop := make(chan *generatorStats)
+	snap.genAbort <- stop
+	<-stop
+	// If we now inspect the snap db, there should exist no extraneous storage items
+	if data := rawdb.ReadStorageSnapshot(diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data != nil {
+		t.Fatalf("expected slot to be removed, got %v", string(data))
+	}
+}
+
+func TestGenerateFromEmptySnap(t *testing.T) {
+	//enableLogging()
+	accountCheckRange = 10
+	storageCheckRange = 20
+	helper := newHelper()
+	stRoot := helper.makeStorageTrie([]string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+	// Add 1K accounts to the trie
+	for i := 0; i < 400; i++ {
+		helper.addTrieAccount(fmt.Sprintf("acc-%d", i),
+			&Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+	}
+	root, snap := helper.Generate()
+	t.Logf("Root: %#x\n", root) // Root: 0x6f7af6d2e1a1bf2b84a3beb3f8b64388465fbc1e274ca5d5d3fc787ca78f59e4
+
+	select {
+	case <-snap.genPending:
+		// Snapshot generation succeeded
+
+	case <-time.After(1 * time.Second):
+		t.Errorf("Snapshot generation failed")
+	}
+	checkSnapRoot(t, snap, root)
+	// Signal abortion to the generator and wait for it to tear down
+	stop := make(chan *generatorStats)
+	snap.genAbort <- stop
+	<-stop
+}
+
+// Tests that snapshot generation with existent flat state, where the flat state
+// storage is correct, but incomplete.
+// The incomplete part is on the second range
+// snap: [ 0x01, 0x02, 0x03, 0x04] , [ 0x05, 0x06, 0x07, {missing}] (with storageCheck = 4)
+// trie:  0x01, 0x02, 0x03, 0x04,  0x05, 0x06, 0x07, 0x08
+// This hits a case where the snap verification passes, but there are more elements in the trie
+// which we must also add.
+func TestGenerateWithIncompleteStorage(t *testing.T) {
+	storageCheckRange = 4
+	helper := newHelper()
+	stKeys := []string{"1", "2", "3", "4", "5", "6", "7", "8"}
+	stVals := []string{"v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8"}
+	stRoot := helper.makeStorageTrie(stKeys, stVals)
+	// We add 8 accounts, each one is missing exactly one of the storage slots. This means
+	// we don't have to order the keys and figure out exactly which hash-key winds up
+	// on the sensitive spots at the boundaries
+	for i := 0; i < 8; i++ {
+		accKey := fmt.Sprintf("acc-%d", i)
+		helper.addAccount(accKey, &Account{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: emptyCode.Bytes()})
+		var moddedKeys []string
+		var moddedVals []string
+		for ii := 0; ii < 8; ii++ {
+			if ii != i {
+				moddedKeys = append(moddedKeys, stKeys[ii])
+				moddedVals = append(moddedVals, stVals[ii])
+			}
+		}
+		helper.addSnapStorage(accKey, moddedKeys, moddedVals)
+	}
+
+	root, snap := helper.Generate()
+	t.Logf("Root: %#x\n", root) // Root: 0xca73f6f05ba4ca3024ef340ef3dfca8fdabc1b677ff13f5a9571fd49c16e67ff
+
+	select {
+	case <-snap.genPending:
+		// Snapshot generation succeeded
+
+	case <-time.After(250 * time.Millisecond):
+		t.Errorf("Snapshot generation failed")
+	}
+	checkSnapRoot(t, snap, root)
+	// Signal abortion to the generator and wait for it to tear down
+	stop := make(chan *generatorStats)
+	snap.genAbort <- stop
+	<-stop
+}

+ 15 - 137
core/state/snapshot/journal.go

@@ -37,7 +37,10 @@ const journalVersion uint64 = 0
 
 // journalGenerator is a disk layer entry containing the generator progress marker.
 type journalGenerator struct {
-	Wiping   bool // Whether the database was in progress of being wiped
+	// Indicator that whether the database was in progress of being wiped.
+	// It's deprecated but keep it here for background compatibility.
+	Wiping bool
+
 	Done     bool // Whether the generator finished creating the snapshot
 	Marker   []byte
 	Accounts uint64
@@ -63,30 +66,6 @@ type journalStorage struct {
 	Vals [][]byte
 }
 
-// loadAndParseLegacyJournal tries to parse the snapshot journal in legacy format.
-func loadAndParseLegacyJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
-	// Retrieve the journal, for legacy journal it must exist since even for
-	// 0 layer it stores whether we've already generated the snapshot or are
-	// in progress only.
-	journal := rawdb.ReadSnapshotJournal(db)
-	if len(journal) == 0 {
-		return nil, journalGenerator{}, errors.New("missing or corrupted snapshot journal")
-	}
-	r := rlp.NewStream(bytes.NewReader(journal), 0)
-
-	// Read the snapshot generation progress for the disk layer
-	var generator journalGenerator
-	if err := r.Decode(&generator); err != nil {
-		return nil, journalGenerator{}, fmt.Errorf("failed to load snapshot progress marker: %v", err)
-	}
-	// Load all the snapshot diffs from the journal
-	snapshot, err := loadDiffLayer(base, r)
-	if err != nil {
-		return nil, generator, err
-	}
-	return snapshot, generator, nil
-}
-
 // loadAndParseJournal tries to parse the snapshot journal in latest format.
 func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
 	// Retrieve the disk layer generator. It must exist, no matter the
@@ -147,12 +126,17 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou
 }
 
 // loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
-func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, recovery bool) (snapshot, error) {
+func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, recovery bool) (snapshot, bool, error) {
+	// If snapshotting is disabled (initial sync in progress), don't do anything,
+	// wait for the chain to permit us to do something meaningful
+	if rawdb.ReadSnapshotDisabled(diskdb) {
+		return nil, true, nil
+	}
 	// Retrieve the block number and hash of the snapshot, failing if no snapshot
 	// is present in the database (or crashed mid-update).
 	baseRoot := rawdb.ReadSnapshotRoot(diskdb)
 	if baseRoot == (common.Hash{}) {
-		return nil, errors.New("missing or corrupted snapshot")
+		return nil, false, errors.New("missing or corrupted snapshot")
 	}
 	base := &diskLayer{
 		diskdb: diskdb,
@@ -160,15 +144,10 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int,
 		cache:  fastcache.New(cache * 1024 * 1024),
 		root:   baseRoot,
 	}
-	var legacy bool
 	snapshot, generator, err := loadAndParseJournal(diskdb, base)
 	if err != nil {
 		log.Warn("Failed to load new-format journal", "error", err)
-		snapshot, generator, err = loadAndParseLegacyJournal(diskdb, base)
-		legacy = true
-	}
-	if err != nil {
-		return nil, err
+		return nil, false, err
 	}
 	// Entire snapshot journal loaded, sanity check the head. If the loaded
 	// snapshot is not matched with current state root, print a warning log
@@ -182,8 +161,8 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int,
 		// If it's legacy snapshot, or it's new-format snapshot but
 		// it's not in recovery mode, returns the error here for
 		// rebuilding the entire snapshot forcibly.
-		if legacy || !recovery {
-			return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root)
+		if !recovery {
+			return nil, false, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root)
 		}
 		// It's in snapshot recovery, the assumption is held that
 		// the disk layer is always higher than chain head. It can
@@ -193,14 +172,6 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int,
 	}
 	// Everything loaded correctly, resume any suspended operations
 	if !generator.Done {
-		// If the generator was still wiping, restart one from scratch (fine for
-		// now as it's rare and the wiper deletes the stuff it touches anyway, so
-		// restarting won't incur a lot of extra database hops.
-		var wiper chan struct{}
-		if generator.Wiping {
-			log.Info("Resuming previous snapshot wipe")
-			wiper = wipeSnapshot(diskdb, false)
-		}
 		// Whether or not wiping was in progress, load any generator progress too
 		base.genMarker = generator.Marker
 		if base.genMarker == nil {
@@ -214,7 +185,6 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int,
 			origin = binary.BigEndian.Uint64(generator.Marker)
 		}
 		go base.generate(&generatorStats{
-			wiping:   wiper,
 			origin:   origin,
 			start:    time.Now(),
 			accounts: generator.Accounts,
@@ -222,7 +192,7 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int,
 			storage:  common.StorageSize(generator.Storage),
 		})
 	}
-	return snapshot, nil
+	return snapshot, false, nil
 }
 
 // loadDiffLayer reads the next sections of a snapshot journal, reconstructing a new
@@ -352,95 +322,3 @@ func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
 	log.Debug("Journalled diff layer", "root", dl.root, "parent", dl.parent.Root())
 	return base, nil
 }
-
-// LegacyJournal writes the persistent layer generator stats into a buffer
-// to be stored in the database as the snapshot journal.
-//
-// Note it's the legacy version which is only used in testing right now.
-func (dl *diskLayer) LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) {
-	// If the snapshot is currently being generated, abort it
-	var stats *generatorStats
-	if dl.genAbort != nil {
-		abort := make(chan *generatorStats)
-		dl.genAbort <- abort
-
-		if stats = <-abort; stats != nil {
-			stats.Log("Journalling in-progress snapshot", dl.root, dl.genMarker)
-		}
-	}
-	// Ensure the layer didn't get stale
-	dl.lock.RLock()
-	defer dl.lock.RUnlock()
-
-	if dl.stale {
-		return common.Hash{}, ErrSnapshotStale
-	}
-	// Write out the generator marker
-	entry := journalGenerator{
-		Done:   dl.genMarker == nil,
-		Marker: dl.genMarker,
-	}
-	if stats != nil {
-		entry.Wiping = (stats.wiping != nil)
-		entry.Accounts = stats.accounts
-		entry.Slots = stats.slots
-		entry.Storage = uint64(stats.storage)
-	}
-	log.Debug("Legacy journalled disk layer", "root", dl.root)
-	if err := rlp.Encode(buffer, entry); err != nil {
-		return common.Hash{}, err
-	}
-	return dl.root, nil
-}
-
-// Journal writes the memory layer contents into a buffer to be stored in the
-// database as the snapshot journal.
-//
-// Note it's the legacy version which is only used in testing right now.
-func (dl *diffLayer) LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) {
-	// Journal the parent first
-	base, err := dl.parent.LegacyJournal(buffer)
-	if err != nil {
-		return common.Hash{}, err
-	}
-	// Ensure the layer didn't get stale
-	dl.lock.RLock()
-	defer dl.lock.RUnlock()
-
-	if dl.Stale() {
-		return common.Hash{}, ErrSnapshotStale
-	}
-	// Everything below was journalled, persist this layer too
-	if err := rlp.Encode(buffer, dl.root); err != nil {
-		return common.Hash{}, err
-	}
-	destructs := make([]journalDestruct, 0, len(dl.destructSet))
-	for hash := range dl.destructSet {
-		destructs = append(destructs, journalDestruct{Hash: hash})
-	}
-	if err := rlp.Encode(buffer, destructs); err != nil {
-		return common.Hash{}, err
-	}
-	accounts := make([]journalAccount, 0, len(dl.accountData))
-	for hash, blob := range dl.accountData {
-		accounts = append(accounts, journalAccount{Hash: hash, Blob: blob})
-	}
-	if err := rlp.Encode(buffer, accounts); err != nil {
-		return common.Hash{}, err
-	}
-	storage := make([]journalStorage, 0, len(dl.storageData))
-	for hash, slots := range dl.storageData {
-		keys := make([]common.Hash, 0, len(slots))
-		vals := make([][]byte, 0, len(slots))
-		for key, val := range slots {
-			keys = append(keys, key)
-			vals = append(vals, val)
-		}
-		storage = append(storage, journalStorage{Hash: hash, Keys: keys, Vals: vals})
-	}
-	if err := rlp.Encode(buffer, storage); err != nil {
-		return common.Hash{}, err
-	}
-	log.Debug("Legacy journalled diff layer", "root", dl.root, "parent", dl.parent.Root())
-	return base, nil
-}

+ 63 - 42
core/state/snapshot/snapshot.go

@@ -137,10 +137,6 @@ type snapshot interface {
 	// flattening everything down (bad for reorgs).
 	Journal(buffer *bytes.Buffer) (common.Hash, error)
 
-	// LegacyJournal is basically identical to Journal. it's the legacy version for
-	// flushing legacy journal. Now the only purpose of this function is for testing.
-	LegacyJournal(buffer *bytes.Buffer) (common.Hash, error)
-
 	// Stale return whether this layer has become stale (was flattened across) or
 	// if it's still live.
 	Stale() bool
@@ -152,11 +148,11 @@ type snapshot interface {
 	StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool)
 }
 
-// SnapshotTree is an Ethereum state snapshot tree. It consists of one persistent
-// base layer backed by a key-value store, on top of which arbitrarily many in-
-// memory diff layers are topped. The memory diffs can form a tree with branching,
-// but the disk layer is singleton and common to all. If a reorg goes deeper than
-// the disk layer, everything needs to be deleted.
+// Tree is an Ethereum state snapshot tree. It consists of one persistent base
+// layer backed by a key-value store, on top of which arbitrarily many in-memory
+// diff layers are topped. The memory diffs can form a tree with branching, but
+// the disk layer is singleton and common to all. If a reorg goes deeper than the
+// disk layer, everything needs to be deleted.
 //
 // The goal of a state snapshot is twofold: to allow direct access to account and
 // storage data to avoid expensive multi-level trie lookups; and to allow sorted,
@@ -190,7 +186,11 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm
 		defer snap.waitBuild()
 	}
 	// Attempt to load a previously persisted snapshot and rebuild one if failed
-	head, err := loadSnapshot(diskdb, triedb, cache, root, recovery)
+	head, disabled, err := loadSnapshot(diskdb, triedb, cache, root, recovery)
+	if disabled {
+		log.Warn("Snapshot maintenance disabled (syncing)")
+		return snap, nil
+	}
 	if err != nil {
 		if rebuild {
 			log.Warn("Failed to load snapshot, regenerating", "err", err)
@@ -228,6 +228,55 @@ func (t *Tree) waitBuild() {
 	}
 }
 
+// Disable interrupts any pending snapshot generator, deletes all the snapshot
+// layers in memory and marks snapshots disabled globally. In order to resume
+// the snapshot functionality, the caller must invoke Rebuild.
+func (t *Tree) Disable() {
+	// Interrupt any live snapshot layers
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	for _, layer := range t.layers {
+		switch layer := layer.(type) {
+		case *diskLayer:
+			// If the base layer is generating, abort it
+			if layer.genAbort != nil {
+				abort := make(chan *generatorStats)
+				layer.genAbort <- abort
+				<-abort
+			}
+			// Layer should be inactive now, mark it as stale
+			layer.lock.Lock()
+			layer.stale = true
+			layer.lock.Unlock()
+
+		case *diffLayer:
+			// If the layer is a simple diff, simply mark as stale
+			layer.lock.Lock()
+			atomic.StoreUint32(&layer.stale, 1)
+			layer.lock.Unlock()
+
+		default:
+			panic(fmt.Sprintf("unknown layer type: %T", layer))
+		}
+	}
+	t.layers = map[common.Hash]snapshot{}
+
+	// Delete all snapshot liveness information from the database
+	batch := t.diskdb.NewBatch()
+
+	rawdb.WriteSnapshotDisabled(batch)
+	rawdb.DeleteSnapshotRoot(batch)
+	rawdb.DeleteSnapshotJournal(batch)
+	rawdb.DeleteSnapshotGenerator(batch)
+	rawdb.DeleteSnapshotRecoveryNumber(batch)
+	// Note, we don't delete the sync progress
+
+	if err := batch.Write(); err != nil {
+		log.Crit("Failed to disable snapshots", "err", err)
+	}
+}
+
 // Snapshot retrieves a snapshot belonging to the given block root, or nil if no
 // snapshot is maintained for that block.
 func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot {
@@ -622,29 +671,6 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
 	return base, nil
 }
 
-// LegacyJournal is basically identical to Journal. it's the legacy
-// version for flushing legacy journal. Now the only purpose of this
-// function is for testing.
-func (t *Tree) LegacyJournal(root common.Hash) (common.Hash, error) {
-	// Retrieve the head snapshot to journal from var snap snapshot
-	snap := t.Snapshot(root)
-	if snap == nil {
-		return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root)
-	}
-	// Run the journaling
-	t.lock.Lock()
-	defer t.lock.Unlock()
-
-	journal := new(bytes.Buffer)
-	base, err := snap.(snapshot).LegacyJournal(journal)
-	if err != nil {
-		return common.Hash{}, err
-	}
-	// Store the journal into the database and return
-	rawdb.WriteSnapshotJournal(t.diskdb, journal.Bytes())
-	return base, nil
-}
-
 // Rebuild wipes all available snapshot data from the persistent database and
 // discard all caches and diff layers. Afterwards, it starts a new snapshot
 // generator with the given root hash.
@@ -653,11 +679,9 @@ func (t *Tree) Rebuild(root common.Hash) {
 	defer t.lock.Unlock()
 
 	// Firstly delete any recovery flag in the database. Because now we are
-	// building a brand new snapshot.
+	// building a brand new snapshot. Also reenable the snapshot feature.
 	rawdb.DeleteSnapshotRecoveryNumber(t.diskdb)
-
-	// Track whether there's a wipe currently running and keep it alive if so
-	var wiper chan struct{}
+	rawdb.DeleteSnapshotDisabled(t.diskdb)
 
 	// Iterate over and mark all layers stale
 	for _, layer := range t.layers {
@@ -667,10 +691,7 @@ func (t *Tree) Rebuild(root common.Hash) {
 			if layer.genAbort != nil {
 				abort := make(chan *generatorStats)
 				layer.genAbort <- abort
-
-				if stats := <-abort; stats != nil {
-					wiper = stats.wiping
-				}
+				<-abort
 			}
 			// Layer should be inactive now, mark it as stale
 			layer.lock.Lock()
@@ -691,7 +712,7 @@ func (t *Tree) Rebuild(root common.Hash) {
 	// generator will run a wiper first if there's not one running right now.
 	log.Info("Rebuilding state snapshot")
 	t.layers = map[common.Hash]snapshot{
-		root: generateSnapshot(t.diskdb, t.triedb, t.cache, root, wiper),
+		root: generateSnapshot(t.diskdb, t.triedb, t.cache, root),
 	}
 }
 

+ 24 - 8
core/state/snapshot/wipe.go

@@ -24,10 +24,11 @@ import (
 	"github.com/ethereum/go-ethereum/core/rawdb"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/log"
+	"github.com/ethereum/go-ethereum/metrics"
 )
 
 // wipeSnapshot starts a goroutine to iterate over the entire key-value database
-// and delete all the  data associated with the snapshot (accounts, storage,
+// and delete all the data associated with the snapshot (accounts, storage,
 // metadata). After all is done, the snapshot range of the database is compacted
 // to free up unused data blocks.
 func wipeSnapshot(db ethdb.KeyValueStore, full bool) chan struct{} {
@@ -53,10 +54,10 @@ func wipeSnapshot(db ethdb.KeyValueStore, full bool) chan struct{} {
 // removed in sync to avoid data races. After all is done, the snapshot range of
 // the database is compacted to free up unused data blocks.
 func wipeContent(db ethdb.KeyValueStore) error {
-	if err := wipeKeyRange(db, "accounts", rawdb.SnapshotAccountPrefix, len(rawdb.SnapshotAccountPrefix)+common.HashLength); err != nil {
+	if err := wipeKeyRange(db, "accounts", rawdb.SnapshotAccountPrefix, nil, nil, len(rawdb.SnapshotAccountPrefix)+common.HashLength, snapWipedAccountMeter, true); err != nil {
 		return err
 	}
-	if err := wipeKeyRange(db, "storage", rawdb.SnapshotStoragePrefix, len(rawdb.SnapshotStoragePrefix)+2*common.HashLength); err != nil {
+	if err := wipeKeyRange(db, "storage", rawdb.SnapshotStoragePrefix, nil, nil, len(rawdb.SnapshotStoragePrefix)+2*common.HashLength, snapWipedStorageMeter, true); err != nil {
 		return err
 	}
 	// Compact the snapshot section of the database to get rid of unused space
@@ -82,8 +83,11 @@ func wipeContent(db ethdb.KeyValueStore) error {
 }
 
 // wipeKeyRange deletes a range of keys from the database starting with prefix
-// and having a specific total key length.
-func wipeKeyRange(db ethdb.KeyValueStore, kind string, prefix []byte, keylen int) error {
+// and having a specific total key length. The start and limit is optional for
+// specifying a particular key range for deletion.
+//
+// Origin is included for wiping and limit is excluded if they are specified.
+func wipeKeyRange(db ethdb.KeyValueStore, kind string, prefix []byte, origin []byte, limit []byte, keylen int, meter metrics.Meter, report bool) error {
 	// Batch deletions together to avoid holding an iterator for too long
 	var (
 		batch = db.NewBatch()
@@ -92,7 +96,11 @@ func wipeKeyRange(db ethdb.KeyValueStore, kind string, prefix []byte, keylen int
 	// Iterate over the key-range and delete all of them
 	start, logged := time.Now(), time.Now()
 
-	it := db.NewIterator(prefix, nil)
+	it := db.NewIterator(prefix, origin)
+	var stop []byte
+	if limit != nil {
+		stop = append(prefix, limit...)
+	}
 	for it.Next() {
 		// Skip any keys with the correct prefix but wrong length (trie nodes)
 		key := it.Key()
@@ -102,6 +110,9 @@ func wipeKeyRange(db ethdb.KeyValueStore, kind string, prefix []byte, keylen int
 		if len(key) != keylen {
 			continue
 		}
+		if stop != nil && bytes.Compare(key, stop) >= 0 {
+			break
+		}
 		// Delete the key and periodically recreate the batch and iterator
 		batch.Delete(key)
 		items++
@@ -116,7 +127,7 @@ func wipeKeyRange(db ethdb.KeyValueStore, kind string, prefix []byte, keylen int
 			seekPos := key[len(prefix):]
 			it = db.NewIterator(prefix, seekPos)
 
-			if time.Since(logged) > 8*time.Second {
+			if time.Since(logged) > 8*time.Second && report {
 				log.Info("Deleting state snapshot leftovers", "kind", kind, "wiped", items, "elapsed", common.PrettyDuration(time.Since(start)))
 				logged = time.Now()
 			}
@@ -126,6 +137,11 @@ func wipeKeyRange(db ethdb.KeyValueStore, kind string, prefix []byte, keylen int
 	if err := batch.Write(); err != nil {
 		return err
 	}
-	log.Info("Deleted state snapshot leftovers", "kind", kind, "wiped", items, "elapsed", common.PrettyDuration(time.Since(start)))
+	if meter != nil {
+		meter.Mark(int64(items))
+	}
+	if report {
+		log.Info("Deleted state snapshot leftovers", "kind", kind, "wiped", items, "elapsed", common.PrettyDuration(time.Since(start)))
+	}
 	return nil
 }

+ 6 - 8
core/state/state_test.go

@@ -27,8 +27,6 @@ import (
 	"github.com/ethereum/go-ethereum/ethdb"
 )
 
-var toAddr = common.BytesToAddress
-
 type stateTest struct {
 	db    ethdb.Database
 	state *StateDB
@@ -46,11 +44,11 @@ func TestDump(t *testing.T) {
 	s := &stateTest{db: db, state: sdb}
 
 	// generate a few entries
-	obj1 := s.state.GetOrNewStateObject(toAddr([]byte{0x01}))
+	obj1 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01}))
 	obj1.AddBalance(big.NewInt(22))
-	obj2 := s.state.GetOrNewStateObject(toAddr([]byte{0x01, 0x02}))
+	obj2 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01, 0x02}))
 	obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3})
-	obj3 := s.state.GetOrNewStateObject(toAddr([]byte{0x02}))
+	obj3 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x02}))
 	obj3.SetBalance(big.NewInt(44))
 
 	// write some of them to the trie
@@ -108,7 +106,7 @@ func TestNull(t *testing.T) {
 }
 
 func TestSnapshot(t *testing.T) {
-	stateobjaddr := toAddr([]byte("aa"))
+	stateobjaddr := common.BytesToAddress([]byte("aa"))
 	var storageaddr common.Hash
 	data1 := common.BytesToHash([]byte{42})
 	data2 := common.BytesToHash([]byte{43})
@@ -150,8 +148,8 @@ func TestSnapshotEmpty(t *testing.T) {
 func TestSnapshot2(t *testing.T) {
 	state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil)
 
-	stateobjaddr0 := toAddr([]byte("so0"))
-	stateobjaddr1 := toAddr([]byte("so1"))
+	stateobjaddr0 := common.BytesToAddress([]byte("so0"))
+	stateobjaddr1 := common.BytesToAddress([]byte("so1"))
 	var storageaddr common.Hash
 
 	data0 := common.BytesToHash([]byte{17})

+ 1 - 1
core/state/statedb.go

@@ -948,7 +948,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
 	// The onleaf func is called _serially_, so we can reuse the same account
 	// for unmarshalling every time.
 	var account Account
-	root, err := s.trie.Commit(func(path []byte, leaf []byte, parent common.Hash) error {
+	root, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash) error {
 		if err := rlp.DecodeBytes(leaf, &account); err != nil {
 			return nil
 		}

+ 3 - 3
core/state/statedb_test.go

@@ -672,7 +672,7 @@ func TestDeleteCreateRevert(t *testing.T) {
 	// Create an initial state with a single contract
 	state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil)
 
-	addr := toAddr([]byte("so"))
+	addr := common.BytesToAddress([]byte("so"))
 	state.SetBalance(addr, big.NewInt(1))
 
 	root, _ := state.Commit(false)
@@ -705,11 +705,11 @@ func TestMissingTrieNodes(t *testing.T) {
 	db := NewDatabase(memDb)
 	var root common.Hash
 	state, _ := New(common.Hash{}, db, nil)
-	addr := toAddr([]byte("so"))
+	addr := common.BytesToAddress([]byte("so"))
 	{
 		state.SetBalance(addr, big.NewInt(1))
 		state.SetCode(addr, []byte{1, 2, 3})
-		a2 := toAddr([]byte("another"))
+		a2 := common.BytesToAddress([]byte("another"))
 		state.SetBalance(a2, big.NewInt(100))
 		state.SetCode(a2, []byte{1, 2, 4})
 		root, _ = state.Commit(false)

+ 19 - 5
core/state/sync.go

@@ -26,17 +26,31 @@ import (
 )
 
 // NewStateSync create a new state trie download scheduler.
-func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.SyncBloom) *trie.Sync {
+func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.SyncBloom, onLeaf func(paths [][]byte, leaf []byte) error) *trie.Sync {
+	// Register the storage slot callback if the external callback is specified.
+	var onSlot func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error
+	if onLeaf != nil {
+		onSlot = func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error {
+			return onLeaf(paths, leaf)
+		}
+	}
+	// Register the account callback to connect the state trie and the storage
+	// trie belongs to the contract.
 	var syncer *trie.Sync
-	callback := func(path []byte, leaf []byte, parent common.Hash) error {
+	onAccount := func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error {
+		if onLeaf != nil {
+			if err := onLeaf(paths, leaf); err != nil {
+				return err
+			}
+		}
 		var obj Account
 		if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil {
 			return err
 		}
-		syncer.AddSubTrie(obj.Root, path, parent, nil)
-		syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), path, parent)
+		syncer.AddSubTrie(obj.Root, hexpath, parent, onSlot)
+		syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), hexpath, parent)
 		return nil
 	}
-	syncer = trie.NewSync(root, database, callback, bloom)
+	syncer = trie.NewSync(root, database, onAccount, bloom)
 	return syncer
 }

+ 6 - 6
core/state/sync_test.go

@@ -133,7 +133,7 @@ func checkStateConsistency(db ethdb.Database, root common.Hash) error {
 // Tests that an empty state is not scheduled for syncing.
 func TestEmptyStateSync(t *testing.T) {
 	empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
-	sync := NewStateSync(empty, rawdb.NewMemoryDatabase(), trie.NewSyncBloom(1, memorydb.New()))
+	sync := NewStateSync(empty, rawdb.NewMemoryDatabase(), trie.NewSyncBloom(1, memorydb.New()), nil)
 	if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
 		t.Errorf(" content requested for empty state: %v, %v, %v", nodes, paths, codes)
 	}
@@ -170,7 +170,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
 
 	// Create a destination state and sync with the scheduler
 	dstDb := rawdb.NewMemoryDatabase()
-	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))
+	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
 
 	nodes, paths, codes := sched.Missing(count)
 	var (
@@ -249,7 +249,7 @@ func TestIterativeDelayedStateSync(t *testing.T) {
 
 	// Create a destination state and sync with the scheduler
 	dstDb := rawdb.NewMemoryDatabase()
-	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))
+	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
 
 	nodes, _, codes := sched.Missing(0)
 	queue := append(append([]common.Hash{}, nodes...), codes...)
@@ -297,7 +297,7 @@ func testIterativeRandomStateSync(t *testing.T, count int) {
 
 	// Create a destination state and sync with the scheduler
 	dstDb := rawdb.NewMemoryDatabase()
-	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))
+	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
 
 	queue := make(map[common.Hash]struct{})
 	nodes, _, codes := sched.Missing(count)
@@ -347,7 +347,7 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
 
 	// Create a destination state and sync with the scheduler
 	dstDb := rawdb.NewMemoryDatabase()
-	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))
+	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
 
 	queue := make(map[common.Hash]struct{})
 	nodes, _, codes := sched.Missing(0)
@@ -414,7 +414,7 @@ func TestIncompleteStateSync(t *testing.T) {
 
 	// Create a destination state and sync with the scheduler
 	dstDb := rawdb.NewMemoryDatabase()
-	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb))
+	sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
 
 	var added []common.Hash
 

+ 0 - 32
core/types/block.go

@@ -166,19 +166,6 @@ type Block struct {
 	ReceivedFrom interface{}
 }
 
-// DeprecatedTd is an old relic for extracting the TD of a block. It is in the
-// code solely to facilitate upgrading the database from the old format to the
-// new, after which it should be deleted. Do not use!
-func (b *Block) DeprecatedTd() *big.Int {
-	return b.td
-}
-
-// [deprecated by eth/63]
-// StorageBlock defines the RLP encoding of a Block stored in the
-// state database. The StorageBlock encoding contains fields that
-// would otherwise need to be recomputed.
-type StorageBlock Block
-
 // "external" block encoding. used for eth protocol, etc.
 type extblock struct {
 	Header *Header
@@ -186,15 +173,6 @@ type extblock struct {
 	Uncles []*Header
 }
 
-// [deprecated by eth/63]
-// "storage" block encoding. used for database.
-type storageblock struct {
-	Header *Header
-	Txs    []*Transaction
-	Uncles []*Header
-	TD     *big.Int
-}
-
 // NewBlock creates a new block. The input data is copied,
 // changes to header and to the field values will not affect the
 // block.
@@ -279,16 +257,6 @@ func (b *Block) EncodeRLP(w io.Writer) error {
 	})
 }
 
-// [deprecated by eth/63]
-func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error {
-	var sb storageblock
-	if err := s.Decode(&sb); err != nil {
-		return err
-	}
-	b.header, b.uncles, b.transactions, b.td = sb.Header, sb.Uncles, sb.Txs, sb.TD
-	return nil
-}
-
 // TODO: copies
 
 func (b *Block) Uncles() []*Header          { return b.uncles }

+ 16 - 0
core/types/transaction_marshalling.go

@@ -1,3 +1,19 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
 package types
 
 import (

+ 0 - 5
core/vm/errors.go

@@ -23,9 +23,6 @@ import (
 
 // List evm execution errors
 var (
-	// ErrInvalidSubroutineEntry means that a BEGINSUB was reached via iteration,
-	// as opposed to from a JUMPSUB instruction
-	ErrInvalidSubroutineEntry   = errors.New("invalid subroutine entry")
 	ErrOutOfGas                 = errors.New("out of gas")
 	ErrCodeStoreOutOfGas        = errors.New("contract creation code storage out of gas")
 	ErrDepth                    = errors.New("max call depth exceeded")
@@ -37,8 +34,6 @@ var (
 	ErrWriteProtection          = errors.New("write protection")
 	ErrReturnDataOutOfBounds    = errors.New("return data out of bounds")
 	ErrGasUintOverflow          = errors.New("gas uint64 overflow")
-	ErrInvalidRetsub            = errors.New("invalid retsub")
-	ErrReturnStackExceeded      = errors.New("return stack limit reached")
 )
 
 // ErrStackUnderflow wraps an evm error when the items on the stack less

+ 8 - 9
core/vm/evm.go

@@ -463,13 +463,16 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
 
 	ret, err := run(evm, contract, nil, false)
 
-	// check whether the max code size has been exceeded
-	maxCodeSizeExceeded := evm.chainRules.IsEIP158 && len(ret) > params.MaxCodeSize
+	// Check whether the max code size has been exceeded, assign err if the case.
+	if err == nil && evm.chainRules.IsEIP158 && len(ret) > params.MaxCodeSize {
+		err = ErrMaxCodeSizeExceeded
+	}
+
 	// if the contract creation ran successfully and no errors were returned
 	// calculate the gas required to store the code. If the code could not
 	// be stored due to not enough gas set an error and let it be handled
 	// by the error checking condition below.
-	if err == nil && !maxCodeSizeExceeded {
+	if err == nil {
 		createDataGas := uint64(len(ret)) * params.CreateDataGas
 		if contract.UseGas(createDataGas) {
 			evm.StateDB.SetCode(address, ret)
@@ -481,21 +484,17 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
 	// When an error was returned by the EVM or when setting the creation code
 	// above we revert to the snapshot and consume any gas remaining. Additionally
 	// when we're in homestead this also counts for code storage gas errors.
-	if maxCodeSizeExceeded || (err != nil && (evm.chainRules.IsHomestead || err != ErrCodeStoreOutOfGas)) {
+	if err != nil && (evm.chainRules.IsHomestead || err != ErrCodeStoreOutOfGas) {
 		evm.StateDB.RevertToSnapshot(snapshot)
 		if err != ErrExecutionReverted {
 			contract.UseGas(contract.Gas)
 		}
 	}
-	// Assign err if contract code size exceeds the max while the err is still empty.
-	if maxCodeSizeExceeded && err == nil {
-		err = ErrMaxCodeSizeExceeded
-	}
+
 	if evm.vmConfig.Debug && evm.depth == 0 {
 		evm.vmConfig.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err)
 	}
 	return ret, address, contract.Gas, err
-
 }
 
 // Create creates a new contract using code as deployment code.

+ 35 - 34
core/vm/instructions_test.go

@@ -40,6 +40,7 @@ type twoOperandParams struct {
 	y string
 }
 
+var alphabetSoup = "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
 var commonParams []*twoOperandParams
 var twoOpMethods map[string]executionFunc
 
@@ -347,8 +348,8 @@ func BenchmarkOpSub256(b *testing.B) {
 }
 
 func BenchmarkOpMul(b *testing.B) {
-	x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
-	y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+	x := alphabetSoup
+	y := alphabetSoup
 
 	opBenchmark(b, opMul, x, y)
 }
@@ -379,64 +380,64 @@ func BenchmarkOpSdiv(b *testing.B) {
 }
 
 func BenchmarkOpMod(b *testing.B) {
-	x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
-	y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+	x := alphabetSoup
+	y := alphabetSoup
 
 	opBenchmark(b, opMod, x, y)
 }
 
 func BenchmarkOpSmod(b *testing.B) {
-	x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
-	y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+	x := alphabetSoup
+	y := alphabetSoup
 
 	opBenchmark(b, opSmod, x, y)
 }
 
 func BenchmarkOpExp(b *testing.B) {
-	x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
-	y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+	x := alphabetSoup
+	y := alphabetSoup
 
 	opBenchmark(b, opExp, x, y)
 }
 
 func BenchmarkOpSignExtend(b *testing.B) {
-	x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
-	y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+	x := alphabetSoup
+	y := alphabetSoup
 
 	opBenchmark(b, opSignExtend, x, y)
 }
 
 func BenchmarkOpLt(b *testing.B) {
-	x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
-	y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+	x := alphabetSoup
+	y := alphabetSoup
 
 	opBenchmark(b, opLt, x, y)
 }
 
 func BenchmarkOpGt(b *testing.B) {
-	x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
-	y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+	x := alphabetSoup
+	y := alphabetSoup
 
 	opBenchmark(b, opGt, x, y)
 }
 
 func BenchmarkOpSlt(b *testing.B) {
-	x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
-	y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+	x := alphabetSoup
+	y := alphabetSoup
 
 	opBenchmark(b, opSlt, x, y)
 }
 
 func BenchmarkOpSgt(b *testing.B) {
-	x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
-	y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+	x := alphabetSoup
+	y := alphabetSoup
 
 	opBenchmark(b, opSgt, x, y)
 }
 
 func BenchmarkOpEq(b *testing.B) {
-	x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
-	y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+	x := alphabetSoup
+	y := alphabetSoup
 
 	opBenchmark(b, opEq, x, y)
 }
@@ -446,45 +447,45 @@ func BenchmarkOpEq2(b *testing.B) {
 	opBenchmark(b, opEq, x, y)
 }
 func BenchmarkOpAnd(b *testing.B) {
-	x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
-	y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+	x := alphabetSoup
+	y := alphabetSoup
 
 	opBenchmark(b, opAnd, x, y)
 }
 
 func BenchmarkOpOr(b *testing.B) {
-	x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
-	y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+	x := alphabetSoup
+	y := alphabetSoup
 
 	opBenchmark(b, opOr, x, y)
 }
 
 func BenchmarkOpXor(b *testing.B) {
-	x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
-	y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+	x := alphabetSoup
+	y := alphabetSoup
 
 	opBenchmark(b, opXor, x, y)
 }
 
 func BenchmarkOpByte(b *testing.B) {
-	x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
-	y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+	x := alphabetSoup
+	y := alphabetSoup
 
 	opBenchmark(b, opByte, x, y)
 }
 
 func BenchmarkOpAddmod(b *testing.B) {
-	x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
-	y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
-	z := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+	x := alphabetSoup
+	y := alphabetSoup
+	z := alphabetSoup
 
 	opBenchmark(b, opAddmod, x, y, z)
 }
 
 func BenchmarkOpMulmod(b *testing.B) {
-	x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
-	y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
-	z := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+	x := alphabetSoup
+	y := alphabetSoup
+	z := alphabetSoup
 
 	opBenchmark(b, opMulmod, x, y, z)
 }

+ 2 - 2
core/vm/interpreter.go

@@ -144,7 +144,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
 	defer func() { in.evm.depth-- }()
 
 	// Make sure the readOnly is only set if we aren't in readOnly yet.
-	// This makes also sure that the readOnly flag isn't removed for child calls.
+	// This also makes sure that the readOnly flag isn't removed for child calls.
 	if readOnly && !in.readOnly {
 		in.readOnly = true
 		defer func() { in.readOnly = false }()
@@ -227,7 +227,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
 		} else if sLen > operation.maxStack {
 			return nil, &ErrStackOverflow{stackLen: sLen, limit: operation.maxStack}
 		}
-		// If the operation is valid, enforce and write restrictions
+		// If the operation is valid, enforce write restrictions
 		if in.readOnly && in.evm.chainRules.IsByzantium {
 			// If the interpreter is operating in readonly mode, make sure no
 			// state-modifying operation is performed. The 3rd stack item

+ 19 - 5
core/vm/operations_acl.go

@@ -30,7 +30,7 @@ const (
 	WarmStorageReadCostEIP2929   = uint64(100)  // WARM_STORAGE_READ_COST
 )
 
-// gasSStoreEIP2929 implements gas cost for SSTORE according to EIP-2929"
+// gasSStoreEIP2929 implements gas cost for SSTORE according to EIP-2929
 //
 // When calling SSTORE, check if the (address, storage_key) pair is in accessed_storage_keys.
 // If it is not, charge an additional COLD_SLOAD_COST gas, and add the pair to accessed_storage_keys.
@@ -177,10 +177,15 @@ func makeCallVariantGasCallEIP2929(oldCalculator gasFunc) gasFunc {
 	return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
 		addr := common.Address(stack.Back(1).Bytes20())
 		// Check slot presence in the access list
-		if !evm.StateDB.AddressInAccessList(addr) {
+		warmAccess := evm.StateDB.AddressInAccessList(addr)
+		// The WarmStorageReadCostEIP2929 (100) is already deducted in the form of a constant cost, so
+		// the cost to charge for cold access, if any, is Cold - Warm
+		coldCost := ColdAccountAccessCostEIP2929 - WarmStorageReadCostEIP2929
+		if !warmAccess {
 			evm.StateDB.AddAddressToAccessList(addr)
-			// The WarmStorageReadCostEIP2929 (100) is already deducted in the form of a constant cost
-			if !contract.UseGas(ColdAccountAccessCostEIP2929 - WarmStorageReadCostEIP2929) {
+			// Charge the remaining difference here already, to correctly calculate available
+			// gas for call
+			if !contract.UseGas(coldCost) {
 				return 0, ErrOutOfGas
 			}
 		}
@@ -189,7 +194,16 @@ func makeCallVariantGasCallEIP2929(oldCalculator gasFunc) gasFunc {
 		// - transfer value
 		// - memory expansion
 		// - 63/64ths rule
-		return oldCalculator(evm, contract, stack, mem, memorySize)
+		gas, err := oldCalculator(evm, contract, stack, mem, memorySize)
+		if warmAccess || err != nil {
+			return gas, err
+		}
+		// In case of a cold access, we temporarily add the cold charge back, and also
+		// add it to the returned gas. By adding it to the return, it will be charged
+		// outside of this function, as part of the dynamic gas, and that will make it
+		// also become correctly reported to tracers.
+		contract.Gas += coldCost
+		return gas + coldCost, nil
 	}
 }
 

+ 80 - 0
core/vm/runtime/runtime_test.go

@@ -608,3 +608,83 @@ func TestEip2929Cases(t *testing.T) {
 			"account (cheap)", code)
 	}
 }
+
+// TestColdAccountAccessCost test that the cold account access cost is reported
+// correctly
+// see: https://github.com/ethereum/go-ethereum/issues/22649
+func TestColdAccountAccessCost(t *testing.T) {
+	for i, tc := range []struct {
+		code []byte
+		step int
+		want uint64
+	}{
+		{ // EXTCODEHASH(0xff)
+			code: []byte{byte(vm.PUSH1), 0xFF, byte(vm.EXTCODEHASH), byte(vm.POP)},
+			step: 1,
+			want: 2600,
+		},
+		{ // BALANCE(0xff)
+			code: []byte{byte(vm.PUSH1), 0xFF, byte(vm.BALANCE), byte(vm.POP)},
+			step: 1,
+			want: 2600,
+		},
+		{ // CALL(0xff)
+			code: []byte{
+				byte(vm.PUSH1), 0x0,
+				byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1),
+				byte(vm.PUSH1), 0xff, byte(vm.DUP1), byte(vm.CALL), byte(vm.POP),
+			},
+			step: 7,
+			want: 2855,
+		},
+		{ // CALLCODE(0xff)
+			code: []byte{
+				byte(vm.PUSH1), 0x0,
+				byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1),
+				byte(vm.PUSH1), 0xff, byte(vm.DUP1), byte(vm.CALLCODE), byte(vm.POP),
+			},
+			step: 7,
+			want: 2855,
+		},
+		{ // DELEGATECALL(0xff)
+			code: []byte{
+				byte(vm.PUSH1), 0x0,
+				byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1),
+				byte(vm.PUSH1), 0xff, byte(vm.DUP1), byte(vm.DELEGATECALL), byte(vm.POP),
+			},
+			step: 6,
+			want: 2855,
+		},
+		{ // STATICCALL(0xff)
+			code: []byte{
+				byte(vm.PUSH1), 0x0,
+				byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1),
+				byte(vm.PUSH1), 0xff, byte(vm.DUP1), byte(vm.STATICCALL), byte(vm.POP),
+			},
+			step: 6,
+			want: 2855,
+		},
+		{ // SELFDESTRUCT(0xff)
+			code: []byte{
+				byte(vm.PUSH1), 0xff, byte(vm.SELFDESTRUCT),
+			},
+			step: 1,
+			want: 7600,
+		},
+	} {
+		tracer := vm.NewStructLogger(nil)
+		Execute(tc.code, nil, &Config{
+			EVMConfig: vm.Config{
+				Debug:  true,
+				Tracer: tracer,
+			},
+		})
+		have := tracer.StructLogs()[tc.step].GasCost
+		if want := tc.want; have != want {
+			for ii, op := range tracer.StructLogs() {
+				t.Logf("%d: %v %d", ii, op.OpName(), op.GasCost)
+			}
+			t.Fatalf("tescase %d, gas report wrong, step %d, have %d want %d", i, tc.step, have, want)
+		}
+	}
+}

+ 5 - 0
eth/api.go

@@ -61,6 +61,11 @@ func (api *PublicEthereumAPI) Coinbase() (common.Address, error) {
 	return api.Etherbase()
 }
 
+// Hashrate returns the POW hashrate
+func (api *PublicEthereumAPI) Hashrate() hexutil.Uint64 {
+	return hexutil.Uint64(api.e.Miner().Hashrate())
+}
+
 // PublicMinerAPI provides an API to control the miner.
 // It offers only methods that operate on data that pose no security risk when it is publicly accessible.
 type PublicMinerAPI struct {

+ 11 - 3
eth/backend.go

@@ -51,6 +51,7 @@ import (
 	"github.com/ethereum/go-ethereum/miner"
 	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/p2p"
+	"github.com/ethereum/go-ethereum/p2p/dnsdisc"
 	"github.com/ethereum/go-ethereum/p2p/enode"
 	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/rlp"
@@ -172,7 +173,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
 		if bcVersion != nil && *bcVersion > core.BlockChainVersion {
 			return nil, fmt.Errorf("database version is v%d, Geth %s only supports v%d", *bcVersion, params.VersionWithMeta, core.BlockChainVersion)
 		} else if bcVersion == nil || *bcVersion < core.BlockChainVersion {
-			log.Warn("Upgrade blockchain database version", "from", dbVer, "to", core.BlockChainVersion)
+			if bcVersion != nil { // only print warning on upgrade, not on init
+				log.Warn("Upgrade blockchain database version", "from", dbVer, "to", core.BlockChainVersion)
+			}
 			rawdb.WriteDatabaseVersion(chainDb, core.BlockChainVersion)
 		}
 	}
@@ -242,14 +245,17 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
 	}
 	eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams)
 
-	eth.ethDialCandidates, err = setupDiscovery(eth.config.EthDiscoveryURLs)
+	// Setup DNS discovery iterators.
+	dnsclient := dnsdisc.NewClient(dnsdisc.Config{})
+	eth.ethDialCandidates, err = dnsclient.NewIterator(eth.config.EthDiscoveryURLs...)
 	if err != nil {
 		return nil, err
 	}
-	eth.snapDialCandidates, err = setupDiscovery(eth.config.SnapDiscoveryURLs)
+	eth.snapDialCandidates, err = dnsclient.NewIterator(eth.config.SnapDiscoveryURLs...)
 	if err != nil {
 		return nil, err
 	}
+
 	// Start the RPC service
 	eth.netRPCService = ethapi.NewPublicNetAPI(eth.p2pServer, config.NetworkId)
 
@@ -559,6 +565,8 @@ func (s *Ethereum) Start() error {
 // Ethereum protocol.
 func (s *Ethereum) Stop() error {
 	// Stop all the peer-related stuff first.
+	s.ethDialCandidates.Close()
+	s.snapDialCandidates.Close()
 	s.handler.Stop()
 
 	// Then stop everything else.

+ 307 - 0
eth/catalyst/api.go

@@ -0,0 +1,307 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package catalyst implements the temporary eth1/eth2 RPC integration.
+package catalyst
+
+import (
+	"errors"
+	"fmt"
+	"math/big"
+	"time"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/state"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/eth"
+	"github.com/ethereum/go-ethereum/log"
+	"github.com/ethereum/go-ethereum/node"
+	chainParams "github.com/ethereum/go-ethereum/params"
+	"github.com/ethereum/go-ethereum/rpc"
+	"github.com/ethereum/go-ethereum/trie"
+)
+
+// Register adds catalyst APIs to the node.
+func Register(stack *node.Node, backend *eth.Ethereum) error {
+	chainconfig := backend.BlockChain().Config()
+	if chainconfig.CatalystBlock == nil {
+		return errors.New("catalystBlock is not set in genesis config")
+	} else if chainconfig.CatalystBlock.Sign() != 0 {
+		return errors.New("catalystBlock of genesis config must be zero")
+	}
+
+	log.Warn("Catalyst mode enabled")
+	stack.RegisterAPIs([]rpc.API{
+		{
+			Namespace: "consensus",
+			Version:   "1.0",
+			Service:   newConsensusAPI(backend),
+			Public:    true,
+		},
+	})
+	return nil
+}
+
+type consensusAPI struct {
+	eth *eth.Ethereum
+}
+
+func newConsensusAPI(eth *eth.Ethereum) *consensusAPI {
+	return &consensusAPI{eth: eth}
+}
+
+// blockExecutionEnv gathers all the data required to execute
+// a block, either when assembling it or when inserting it.
+type blockExecutionEnv struct {
+	chain   *core.BlockChain
+	state   *state.StateDB
+	tcount  int
+	gasPool *core.GasPool
+
+	header   *types.Header
+	txs      []*types.Transaction
+	receipts []*types.Receipt
+}
+
+func (env *blockExecutionEnv) commitTransaction(tx *types.Transaction, coinbase common.Address) error {
+	vmconfig := *env.chain.GetVMConfig()
+	receipt, err := core.ApplyTransaction(env.chain.Config(), env.chain, &coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, vmconfig)
+	if err != nil {
+		return err
+	}
+	env.txs = append(env.txs, tx)
+	env.receipts = append(env.receipts, receipt)
+	return nil
+}
+
+func (api *consensusAPI) makeEnv(parent *types.Block, header *types.Header) (*blockExecutionEnv, error) {
+	state, err := api.eth.BlockChain().StateAt(parent.Root())
+	if err != nil {
+		return nil, err
+	}
+	env := &blockExecutionEnv{
+		chain:   api.eth.BlockChain(),
+		state:   state,
+		header:  header,
+		gasPool: new(core.GasPool).AddGas(header.GasLimit),
+	}
+	return env, nil
+}
+
+// AssembleBlock creates a new block, inserts it into the chain, and returns the "execution
+// data" required for eth2 clients to process the new block.
+func (api *consensusAPI) AssembleBlock(params assembleBlockParams) (*executableData, error) {
+	log.Info("Producing block", "parentHash", params.ParentHash)
+
+	bc := api.eth.BlockChain()
+	parent := bc.GetBlockByHash(params.ParentHash)
+	if parent == nil {
+		log.Warn("Cannot assemble block with parent hash to unknown block", "parentHash", params.ParentHash)
+		return nil, fmt.Errorf("cannot assemble block with unknown parent %s", params.ParentHash)
+	}
+
+	pool := api.eth.TxPool()
+
+	if parent.Time() >= params.Timestamp {
+		return nil, fmt.Errorf("child timestamp lower than parent's: %d >= %d", parent.Time(), params.Timestamp)
+	}
+	if now := uint64(time.Now().Unix()); params.Timestamp > now+1 {
+		wait := time.Duration(params.Timestamp-now) * time.Second
+		log.Info("Producing block too far in the future", "wait", common.PrettyDuration(wait))
+		time.Sleep(wait)
+	}
+
+	pending, err := pool.Pending()
+	if err != nil {
+		return nil, err
+	}
+
+	coinbase, err := api.eth.Etherbase()
+	if err != nil {
+		return nil, err
+	}
+	num := parent.Number()
+	header := &types.Header{
+		ParentHash: parent.Hash(),
+		Number:     num.Add(num, common.Big1),
+		Coinbase:   coinbase,
+		GasLimit:   parent.GasLimit(), // Keep the gas limit constant in this prototype
+		Extra:      []byte{},
+		Time:       params.Timestamp,
+	}
+	err = api.eth.Engine().Prepare(bc, header)
+	if err != nil {
+		return nil, err
+	}
+
+	env, err := api.makeEnv(parent, header)
+	if err != nil {
+		return nil, err
+	}
+
+	var (
+		signer       = types.MakeSigner(bc.Config(), header.Number)
+		txHeap       = types.NewTransactionsByPriceAndNonce(signer, pending)
+		transactions []*types.Transaction
+	)
+	for {
+		if env.gasPool.Gas() < chainParams.TxGas {
+			log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", chainParams.TxGas)
+			break
+		}
+		tx := txHeap.Peek()
+		if tx == nil {
+			break
+		}
+
+		// The sender is only for logging purposes, and it doesn't really matter if it's correct.
+		from, _ := types.Sender(signer, tx)
+
+		// Execute the transaction
+		env.state.Prepare(tx.Hash(), common.Hash{}, env.tcount)
+		err = env.commitTransaction(tx, coinbase)
+		switch err {
+		case core.ErrGasLimitReached:
+			// Pop the current out-of-gas transaction without shifting in the next from the account
+			log.Trace("Gas limit exceeded for current block", "sender", from)
+			txHeap.Pop()
+
+		case core.ErrNonceTooLow:
+			// New head notification data race between the transaction pool and miner, shift
+			log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce())
+			txHeap.Shift()
+
+		case core.ErrNonceTooHigh:
+			// Reorg notification data race between the transaction pool and miner, skip account =
+			log.Trace("Skipping account with high nonce", "sender", from, "nonce", tx.Nonce())
+			txHeap.Pop()
+
+		case nil:
+			// Everything ok, collect the logs and shift in the next transaction from the same account
+			env.tcount++
+			txHeap.Shift()
+			transactions = append(transactions, tx)
+
+		default:
+			// Strange error, discard the transaction and get the next in line (note, the
+			// nonce-too-high clause will prevent us from executing in vain).
+			log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err)
+			txHeap.Shift()
+		}
+	}
+
+	// Create the block.
+	block, _, err := api.eth.Engine().FinalizeAndAssemble(bc, header, env.state, transactions, nil /* uncles */, env.receipts)
+	if err != nil {
+		return nil, err
+	}
+	return &executableData{
+		BlockHash:    block.Hash(),
+		ParentHash:   block.ParentHash(),
+		Miner:        block.Coinbase(),
+		StateRoot:    block.Root(),
+		Number:       block.NumberU64(),
+		GasLimit:     block.GasLimit(),
+		GasUsed:      block.GasUsed(),
+		Timestamp:    block.Time(),
+		ReceiptRoot:  block.ReceiptHash(),
+		LogsBloom:    block.Bloom().Bytes(),
+		Transactions: encodeTransactions(block.Transactions()),
+	}, nil
+}
+
+func encodeTransactions(txs []*types.Transaction) [][]byte {
+	var enc = make([][]byte, len(txs))
+	for i, tx := range txs {
+		enc[i], _ = tx.MarshalBinary()
+	}
+	return enc
+}
+
+func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
+	var txs = make([]*types.Transaction, len(enc))
+	for i, encTx := range enc {
+		var tx types.Transaction
+		if err := tx.UnmarshalBinary(encTx); err != nil {
+			return nil, fmt.Errorf("invalid transaction %d: %v", i, err)
+		}
+		txs[i] = &tx
+	}
+	return txs, nil
+}
+
+func insertBlockParamsToBlock(params executableData) (*types.Block, error) {
+	txs, err := decodeTransactions(params.Transactions)
+	if err != nil {
+		return nil, err
+	}
+
+	number := big.NewInt(0)
+	number.SetUint64(params.Number)
+	header := &types.Header{
+		ParentHash:  params.ParentHash,
+		UncleHash:   types.EmptyUncleHash,
+		Coinbase:    params.Miner,
+		Root:        params.StateRoot,
+		TxHash:      types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
+		ReceiptHash: params.ReceiptRoot,
+		Bloom:       types.BytesToBloom(params.LogsBloom),
+		Difficulty:  big.NewInt(1),
+		Number:      number,
+		GasLimit:    params.GasLimit,
+		GasUsed:     params.GasUsed,
+		Time:        params.Timestamp,
+	}
+	block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */)
+	return block, nil
+}
+
+// NewBlock creates an Eth1 block, inserts it in the chain, and either returns true,
+// or false + an error. This is a bit redundant for go, but simplifies things on the
+// eth2 side.
+func (api *consensusAPI) NewBlock(params executableData) (*newBlockResponse, error) {
+	parent := api.eth.BlockChain().GetBlockByHash(params.ParentHash)
+	if parent == nil {
+		return &newBlockResponse{false}, fmt.Errorf("could not find parent %x", params.ParentHash)
+	}
+	block, err := insertBlockParamsToBlock(params)
+	if err != nil {
+		return nil, err
+	}
+
+	_, err = api.eth.BlockChain().InsertChainWithoutSealVerification(block)
+	return &newBlockResponse{err == nil}, err
+}
+
+// Used in tests to add a the list of transactions from a block to the tx pool.
+func (api *consensusAPI) addBlockTxs(block *types.Block) error {
+	for _, tx := range block.Transactions() {
+		api.eth.TxPool().AddLocal(tx)
+	}
+	return nil
+}
+
+// FinalizeBlock is called to mark a block as synchronized, so
+// that data that is no longer needed can be removed.
+func (api *consensusAPI) FinalizeBlock(blockHash common.Hash) (*genericResponse, error) {
+	return &genericResponse{true}, nil
+}
+
+// SetHead is called to perform a force choice.
+func (api *consensusAPI) SetHead(newHead common.Hash) (*genericResponse, error) {
+	return &genericResponse{true}, nil
+}

+ 241 - 0
eth/catalyst/api_test.go

@@ -0,0 +1,241 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package catalyst
+
+import (
+	"math/big"
+	"testing"
+
+	"github.com/ethereum/go-ethereum/consensus/ethash"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/rawdb"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/eth"
+	"github.com/ethereum/go-ethereum/eth/ethconfig"
+	"github.com/ethereum/go-ethereum/node"
+	"github.com/ethereum/go-ethereum/params"
+)
+
+var (
+	// testKey is a private key to use for funding a tester account.
+	testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+
+	// testAddr is the Ethereum address of the tester account.
+	testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
+
+	testBalance = big.NewInt(2e10)
+)
+
+func generateTestChain() (*core.Genesis, []*types.Block) {
+	db := rawdb.NewMemoryDatabase()
+	config := params.AllEthashProtocolChanges
+	genesis := &core.Genesis{
+		Config:    config,
+		Alloc:     core.GenesisAlloc{testAddr: {Balance: testBalance}},
+		ExtraData: []byte("test genesis"),
+		Timestamp: 9000,
+	}
+	generate := func(i int, g *core.BlockGen) {
+		g.OffsetTime(5)
+		g.SetExtra([]byte("test"))
+	}
+	gblock := genesis.ToBlock(db)
+	engine := ethash.NewFaker()
+	blocks, _ := core.GenerateChain(config, gblock, engine, db, 10, generate)
+	blocks = append([]*types.Block{gblock}, blocks...)
+	return genesis, blocks
+}
+
+func generateTestChainWithFork(n int, fork int) (*core.Genesis, []*types.Block, []*types.Block) {
+	if fork >= n {
+		fork = n - 1
+	}
+	db := rawdb.NewMemoryDatabase()
+	config := &params.ChainConfig{
+		ChainID:             big.NewInt(1337),
+		HomesteadBlock:      big.NewInt(0),
+		EIP150Block:         big.NewInt(0),
+		EIP155Block:         big.NewInt(0),
+		EIP158Block:         big.NewInt(0),
+		ByzantiumBlock:      big.NewInt(0),
+		ConstantinopleBlock: big.NewInt(0),
+		PetersburgBlock:     big.NewInt(0),
+		IstanbulBlock:       big.NewInt(0),
+		MuirGlacierBlock:    big.NewInt(0),
+		BerlinBlock:         big.NewInt(0),
+		CatalystBlock:       big.NewInt(0),
+		Ethash:              new(params.EthashConfig),
+	}
+	genesis := &core.Genesis{
+		Config:    config,
+		Alloc:     core.GenesisAlloc{testAddr: {Balance: testBalance}},
+		ExtraData: []byte("test genesis"),
+		Timestamp: 9000,
+	}
+	generate := func(i int, g *core.BlockGen) {
+		g.OffsetTime(5)
+		g.SetExtra([]byte("test"))
+	}
+	generateFork := func(i int, g *core.BlockGen) {
+		g.OffsetTime(5)
+		g.SetExtra([]byte("testF"))
+	}
+	gblock := genesis.ToBlock(db)
+	engine := ethash.NewFaker()
+	blocks, _ := core.GenerateChain(config, gblock, engine, db, n, generate)
+	blocks = append([]*types.Block{gblock}, blocks...)
+	forkedBlocks, _ := core.GenerateChain(config, blocks[fork], engine, db, n-fork, generateFork)
+	return genesis, blocks, forkedBlocks
+}
+
+func TestEth2AssembleBlock(t *testing.T) {
+	genesis, blocks := generateTestChain()
+	n, ethservice := startEthService(t, genesis, blocks[1:9])
+	defer n.Close()
+
+	api := newConsensusAPI(ethservice)
+	signer := types.NewEIP155Signer(ethservice.BlockChain().Config().ChainID)
+	tx, err := types.SignTx(types.NewTransaction(0, blocks[8].Coinbase(), big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)
+	if err != nil {
+		t.Fatalf("error signing transaction, err=%v", err)
+	}
+	ethservice.TxPool().AddLocal(tx)
+	blockParams := assembleBlockParams{
+		ParentHash: blocks[8].ParentHash(),
+		Timestamp:  blocks[8].Time(),
+	}
+	execData, err := api.AssembleBlock(blockParams)
+
+	if err != nil {
+		t.Fatalf("error producing block, err=%v", err)
+	}
+
+	if len(execData.Transactions) != 1 {
+		t.Fatalf("invalid number of transactions %d != 1", len(execData.Transactions))
+	}
+}
+
+func TestEth2AssembleBlockWithAnotherBlocksTxs(t *testing.T) {
+	genesis, blocks := generateTestChain()
+	n, ethservice := startEthService(t, genesis, blocks[1:9])
+	defer n.Close()
+
+	api := newConsensusAPI(ethservice)
+
+	// Put the 10th block's tx in the pool and produce a new block
+	api.addBlockTxs(blocks[9])
+	blockParams := assembleBlockParams{
+		ParentHash: blocks[9].ParentHash(),
+		Timestamp:  blocks[9].Time(),
+	}
+	execData, err := api.AssembleBlock(blockParams)
+	if err != nil {
+		t.Fatalf("error producing block, err=%v", err)
+	}
+
+	if len(execData.Transactions) != blocks[9].Transactions().Len() {
+		t.Fatalf("invalid number of transactions %d != 1", len(execData.Transactions))
+	}
+}
+
+func TestEth2NewBlock(t *testing.T) {
+	genesis, blocks, forkedBlocks := generateTestChainWithFork(10, 4)
+	n, ethservice := startEthService(t, genesis, blocks[1:5])
+	defer n.Close()
+
+	api := newConsensusAPI(ethservice)
+	for i := 5; i < 10; i++ {
+		p := executableData{
+			ParentHash:   ethservice.BlockChain().CurrentBlock().Hash(),
+			Miner:        blocks[i].Coinbase(),
+			StateRoot:    blocks[i].Root(),
+			GasLimit:     blocks[i].GasLimit(),
+			GasUsed:      blocks[i].GasUsed(),
+			Transactions: encodeTransactions(blocks[i].Transactions()),
+			ReceiptRoot:  blocks[i].ReceiptHash(),
+			LogsBloom:    blocks[i].Bloom().Bytes(),
+			BlockHash:    blocks[i].Hash(),
+			Timestamp:    blocks[i].Time(),
+			Number:       uint64(i),
+		}
+		success, err := api.NewBlock(p)
+		if err != nil || !success.Valid {
+			t.Fatalf("Failed to insert block: %v", err)
+		}
+	}
+
+	exp := ethservice.BlockChain().CurrentBlock().Hash()
+
+	// Introduce the fork point.
+	lastBlockNum := blocks[4].Number()
+	lastBlock := blocks[4]
+	for i := 0; i < 4; i++ {
+		lastBlockNum.Add(lastBlockNum, big.NewInt(1))
+		p := executableData{
+			ParentHash:   lastBlock.Hash(),
+			Miner:        forkedBlocks[i].Coinbase(),
+			StateRoot:    forkedBlocks[i].Root(),
+			Number:       lastBlockNum.Uint64(),
+			GasLimit:     forkedBlocks[i].GasLimit(),
+			GasUsed:      forkedBlocks[i].GasUsed(),
+			Transactions: encodeTransactions(blocks[i].Transactions()),
+			ReceiptRoot:  forkedBlocks[i].ReceiptHash(),
+			LogsBloom:    forkedBlocks[i].Bloom().Bytes(),
+			BlockHash:    forkedBlocks[i].Hash(),
+			Timestamp:    forkedBlocks[i].Time(),
+		}
+		success, err := api.NewBlock(p)
+		if err != nil || !success.Valid {
+			t.Fatalf("Failed to insert forked block #%d: %v", i, err)
+		}
+		lastBlock, err = insertBlockParamsToBlock(p)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+
+	if ethservice.BlockChain().CurrentBlock().Hash() != exp {
+		t.Fatalf("Wrong head after inserting fork %x != %x", exp, ethservice.BlockChain().CurrentBlock().Hash())
+	}
+}
+
+// startEthService creates a full node instance for testing.
+func startEthService(t *testing.T, genesis *core.Genesis, blocks []*types.Block) (*node.Node, *eth.Ethereum) {
+	t.Helper()
+
+	n, err := node.New(&node.Config{})
+	if err != nil {
+		t.Fatal("can't create node:", err)
+	}
+
+	ethcfg := &ethconfig.Config{Genesis: genesis, Ethash: ethash.Config{PowMode: ethash.ModeFake}}
+	ethservice, err := eth.New(n, ethcfg)
+	if err != nil {
+		t.Fatal("can't create eth service:", err)
+	}
+	if err := n.Start(); err != nil {
+		t.Fatal("can't start node:", err)
+	}
+	if _, err := ethservice.BlockChain().InsertChain(blocks); err != nil {
+		n.Close()
+		t.Fatal("can't import test blocks:", err)
+	}
+	ethservice.SetEtherbase(testAddr)
+
+	return n, ethservice
+}

+ 70 - 0
eth/catalyst/api_types.go

@@ -0,0 +1,70 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package catalyst
+
+import (
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/common/hexutil"
+)
+
+//go:generate go run github.com/fjl/gencodec -type assembleBlockParams -field-override assembleBlockParamsMarshaling -out gen_blockparams.go
+
+// Structure described at https://hackmd.io/T9x2mMA4S7us8tJwEB3FDQ
+type assembleBlockParams struct {
+	ParentHash common.Hash `json:"parentHash"    gencodec:"required"`
+	Timestamp  uint64      `json:"timestamp"     gencodec:"required"`
+}
+
+// JSON type overrides for assembleBlockParams.
+type assembleBlockParamsMarshaling struct {
+	Timestamp hexutil.Uint64
+}
+
+//go:generate go run github.com/fjl/gencodec -type executableData -field-override executableDataMarshaling -out gen_ed.go
+
+// Structure described at https://notes.ethereum.org/@n0ble/rayonism-the-merge-spec#Parameters1
+type executableData struct {
+	BlockHash    common.Hash    `json:"blockHash"     gencodec:"required"`
+	ParentHash   common.Hash    `json:"parentHash"    gencodec:"required"`
+	Miner        common.Address `json:"miner"         gencodec:"required"`
+	StateRoot    common.Hash    `json:"stateRoot"     gencodec:"required"`
+	Number       uint64         `json:"number"        gencodec:"required"`
+	GasLimit     uint64         `json:"gasLimit"      gencodec:"required"`
+	GasUsed      uint64         `json:"gasUsed"       gencodec:"required"`
+	Timestamp    uint64         `json:"timestamp"     gencodec:"required"`
+	ReceiptRoot  common.Hash    `json:"receiptsRoot"  gencodec:"required"`
+	LogsBloom    []byte         `json:"logsBloom"     gencodec:"required"`
+	Transactions [][]byte       `json:"transactions"  gencodec:"required"`
+}
+
+// JSON type overrides for executableData.
+type executableDataMarshaling struct {
+	Number       hexutil.Uint64
+	GasLimit     hexutil.Uint64
+	GasUsed      hexutil.Uint64
+	Timestamp    hexutil.Uint64
+	LogsBloom    hexutil.Bytes
+	Transactions []hexutil.Bytes
+}
+
+type newBlockResponse struct {
+	Valid bool `json:"valid"`
+}
+
+type genericResponse struct {
+	Success bool `json:"success"`
+}

+ 46 - 0
eth/catalyst/gen_blockparams.go

@@ -0,0 +1,46 @@
+// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
+
+package catalyst
+
+import (
+	"encoding/json"
+	"errors"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/common/hexutil"
+)
+
+var _ = (*assembleBlockParamsMarshaling)(nil)
+
+// MarshalJSON marshals as JSON.
+func (a assembleBlockParams) MarshalJSON() ([]byte, error) {
+	type assembleBlockParams struct {
+		ParentHash common.Hash    `json:"parentHash"    gencodec:"required"`
+		Timestamp  hexutil.Uint64 `json:"timestamp"     gencodec:"required"`
+	}
+	var enc assembleBlockParams
+	enc.ParentHash = a.ParentHash
+	enc.Timestamp = hexutil.Uint64(a.Timestamp)
+	return json.Marshal(&enc)
+}
+
+// UnmarshalJSON unmarshals from JSON.
+func (a *assembleBlockParams) UnmarshalJSON(input []byte) error {
+	type assembleBlockParams struct {
+		ParentHash *common.Hash    `json:"parentHash"    gencodec:"required"`
+		Timestamp  *hexutil.Uint64 `json:"timestamp"     gencodec:"required"`
+	}
+	var dec assembleBlockParams
+	if err := json.Unmarshal(input, &dec); err != nil {
+		return err
+	}
+	if dec.ParentHash == nil {
+		return errors.New("missing required field 'parentHash' for assembleBlockParams")
+	}
+	a.ParentHash = *dec.ParentHash
+	if dec.Timestamp == nil {
+		return errors.New("missing required field 'timestamp' for assembleBlockParams")
+	}
+	a.Timestamp = uint64(*dec.Timestamp)
+	return nil
+}

+ 117 - 0
eth/catalyst/gen_ed.go

@@ -0,0 +1,117 @@
+// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
+
+package catalyst
+
+import (
+	"encoding/json"
+	"errors"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/common/hexutil"
+)
+
+var _ = (*executableDataMarshaling)(nil)
+
+// MarshalJSON marshals as JSON.
+func (e executableData) MarshalJSON() ([]byte, error) {
+	type executableData struct {
+		BlockHash    common.Hash     `json:"blockHash"     gencodec:"required"`
+		ParentHash   common.Hash     `json:"parentHash"    gencodec:"required"`
+		Miner        common.Address  `json:"miner"         gencodec:"required"`
+		StateRoot    common.Hash     `json:"stateRoot"     gencodec:"required"`
+		Number       hexutil.Uint64  `json:"number"        gencodec:"required"`
+		GasLimit     hexutil.Uint64  `json:"gasLimit"      gencodec:"required"`
+		GasUsed      hexutil.Uint64  `json:"gasUsed"       gencodec:"required"`
+		Timestamp    hexutil.Uint64  `json:"timestamp"     gencodec:"required"`
+		ReceiptRoot  common.Hash     `json:"receiptsRoot"  gencodec:"required"`
+		LogsBloom    hexutil.Bytes   `json:"logsBloom"     gencodec:"required"`
+		Transactions []hexutil.Bytes `json:"transactions"  gencodec:"required"`
+	}
+	var enc executableData
+	enc.BlockHash = e.BlockHash
+	enc.ParentHash = e.ParentHash
+	enc.Miner = e.Miner
+	enc.StateRoot = e.StateRoot
+	enc.Number = hexutil.Uint64(e.Number)
+	enc.GasLimit = hexutil.Uint64(e.GasLimit)
+	enc.GasUsed = hexutil.Uint64(e.GasUsed)
+	enc.Timestamp = hexutil.Uint64(e.Timestamp)
+	enc.ReceiptRoot = e.ReceiptRoot
+	enc.LogsBloom = e.LogsBloom
+	if e.Transactions != nil {
+		enc.Transactions = make([]hexutil.Bytes, len(e.Transactions))
+		for k, v := range e.Transactions {
+			enc.Transactions[k] = v
+		}
+	}
+	return json.Marshal(&enc)
+}
+
+// UnmarshalJSON unmarshals from JSON.
+func (e *executableData) UnmarshalJSON(input []byte) error {
+	type executableData struct {
+		BlockHash    *common.Hash    `json:"blockHash"     gencodec:"required"`
+		ParentHash   *common.Hash    `json:"parentHash"    gencodec:"required"`
+		Miner        *common.Address `json:"miner"         gencodec:"required"`
+		StateRoot    *common.Hash    `json:"stateRoot"     gencodec:"required"`
+		Number       *hexutil.Uint64 `json:"number"        gencodec:"required"`
+		GasLimit     *hexutil.Uint64 `json:"gasLimit"      gencodec:"required"`
+		GasUsed      *hexutil.Uint64 `json:"gasUsed"       gencodec:"required"`
+		Timestamp    *hexutil.Uint64 `json:"timestamp"     gencodec:"required"`
+		ReceiptRoot  *common.Hash    `json:"receiptsRoot"  gencodec:"required"`
+		LogsBloom    *hexutil.Bytes  `json:"logsBloom"     gencodec:"required"`
+		Transactions []hexutil.Bytes `json:"transactions"  gencodec:"required"`
+	}
+	var dec executableData
+	if err := json.Unmarshal(input, &dec); err != nil {
+		return err
+	}
+	if dec.BlockHash == nil {
+		return errors.New("missing required field 'blockHash' for executableData")
+	}
+	e.BlockHash = *dec.BlockHash
+	if dec.ParentHash == nil {
+		return errors.New("missing required field 'parentHash' for executableData")
+	}
+	e.ParentHash = *dec.ParentHash
+	if dec.Miner == nil {
+		return errors.New("missing required field 'miner' for executableData")
+	}
+	e.Miner = *dec.Miner
+	if dec.StateRoot == nil {
+		return errors.New("missing required field 'stateRoot' for executableData")
+	}
+	e.StateRoot = *dec.StateRoot
+	if dec.Number == nil {
+		return errors.New("missing required field 'number' for executableData")
+	}
+	e.Number = uint64(*dec.Number)
+	if dec.GasLimit == nil {
+		return errors.New("missing required field 'gasLimit' for executableData")
+	}
+	e.GasLimit = uint64(*dec.GasLimit)
+	if dec.GasUsed == nil {
+		return errors.New("missing required field 'gasUsed' for executableData")
+	}
+	e.GasUsed = uint64(*dec.GasUsed)
+	if dec.Timestamp == nil {
+		return errors.New("missing required field 'timestamp' for executableData")
+	}
+	e.Timestamp = uint64(*dec.Timestamp)
+	if dec.ReceiptRoot == nil {
+		return errors.New("missing required field 'receiptsRoot' for executableData")
+	}
+	e.ReceiptRoot = *dec.ReceiptRoot
+	if dec.LogsBloom == nil {
+		return errors.New("missing required field 'logsBloom' for executableData")
+	}
+	e.LogsBloom = *dec.LogsBloom
+	if dec.Transactions == nil {
+		return errors.New("missing required field 'transactions' for executableData")
+	}
+	e.Transactions = make([][]byte, len(dec.Transactions))
+	for k, v := range dec.Transactions {
+		e.Transactions[k] = v
+	}
+	return nil
+}

+ 0 - 11
eth/discovery.go

@@ -19,7 +19,6 @@ package eth
 import (
 	"github.com/ethereum/go-ethereum/core"
 	"github.com/ethereum/go-ethereum/core/forkid"
-	"github.com/ethereum/go-ethereum/p2p/dnsdisc"
 	"github.com/ethereum/go-ethereum/p2p/enode"
 	"github.com/ethereum/go-ethereum/rlp"
 )
@@ -62,13 +61,3 @@ func (eth *Ethereum) currentEthEntry() *ethEntry {
 	return &ethEntry{ForkID: forkid.NewID(eth.blockchain.Config(), eth.blockchain.Genesis().Hash(),
 		eth.blockchain.CurrentHeader().Number.Uint64())}
 }
-
-// setupDiscovery creates the node discovery source for the `eth` and `snap`
-// protocols.
-func setupDiscovery(urls []string) (enode.Iterator, error) {
-	if len(urls) == 0 {
-		return nil, nil
-	}
-	client := dnsdisc.NewClient(dnsdisc.Config{})
-	return client.NewIterator(urls...)
-}

+ 13 - 2
eth/downloader/downloader.go

@@ -28,7 +28,9 @@ import (
 	"github.com/ethereum/go-ethereum"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/rawdb"
+	"github.com/ethereum/go-ethereum/core/state/snapshot"
 	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/eth/protocols/eth"
 	"github.com/ethereum/go-ethereum/eth/protocols/snap"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/event"
@@ -213,6 +215,9 @@ type BlockChain interface {
 
 	// InsertReceiptChain inserts a batch of receipts into the local chain.
 	InsertReceiptChain(types.Blocks, []types.Receipts, uint64) (int, error)
+
+	// Snapshots returns the blockchain snapshot tree to paused it during sync.
+	Snapshots() *snapshot.Tree
 }
 
 // New creates a new downloader to fetch hashes and blocks from remote peers.
@@ -392,6 +397,12 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
 	// but until snap becomes prevalent, we should support both. TODO(karalabe).
 	if mode == SnapSync {
 		if !d.snapSync {
+			// Snap sync uses the snapshot namespace to store potentially flakey data until
+			// sync completely heals and finishes. Pause snapshot maintenance in the mean
+			// time to prevent access.
+			if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests
+				snapshots.Disable()
+			}
 			log.Warn("Enabling snapshot sync prototype")
 			d.snapSync = true
 		}
@@ -459,8 +470,8 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
 			d.mux.Post(DoneEvent{latest})
 		}
 	}()
-	if p.version < 64 {
-		return fmt.Errorf("%w: advertized %d < required %d", errTooOld, p.version, 64)
+	if p.version < eth.ETH65 {
+		return fmt.Errorf("%w: advertized %d < required %d", errTooOld, p.version, eth.ETH65)
 	}
 	mode := d.getMode()
 

+ 154 - 179
eth/downloader/downloader_test.go

@@ -29,7 +29,9 @@ import (
 	"github.com/ethereum/go-ethereum"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/rawdb"
+	"github.com/ethereum/go-ethereum/core/state/snapshot"
 	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/eth/protocols/eth"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/event"
 	"github.com/ethereum/go-ethereum/trie"
@@ -408,6 +410,11 @@ func (dl *downloadTester) dropPeer(id string) {
 	dl.downloader.UnregisterPeer(id)
 }
 
+// Snapshots implements the BlockChain interface for the downloader, but is a noop.
+func (dl *downloadTester) Snapshots() *snapshot.Tree {
+	return nil
+}
+
 type downloadTesterPeer struct {
 	dl            *downloadTester
 	id            string
@@ -515,16 +522,13 @@ func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, leng
 	}
 }
 
-func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonSync(t, 64, FullSync) }
-func TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonSync(t, 64, FastSync) }
+func TestCanonicalSynchronisation65Full(t *testing.T)  { testCanonSync(t, eth.ETH65, FullSync) }
+func TestCanonicalSynchronisation65Fast(t *testing.T)  { testCanonSync(t, eth.ETH65, FastSync) }
+func TestCanonicalSynchronisation65Light(t *testing.T) { testCanonSync(t, eth.ETH65, LightSync) }
 
-func TestCanonicalSynchronisation65Full(t *testing.T)  { testCanonSync(t, 65, FullSync) }
-func TestCanonicalSynchronisation65Fast(t *testing.T)  { testCanonSync(t, 65, FastSync) }
-func TestCanonicalSynchronisation65Light(t *testing.T) { testCanonSync(t, 65, LightSync) }
-
-func TestCanonicalSynchronisation66Full(t *testing.T)  { testCanonSync(t, 66, FullSync) }
-func TestCanonicalSynchronisation66Fast(t *testing.T)  { testCanonSync(t, 66, FastSync) }
-func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, 66, LightSync) }
+func TestCanonicalSynchronisation66Full(t *testing.T)  { testCanonSync(t, eth.ETH66, FullSync) }
+func TestCanonicalSynchronisation66Fast(t *testing.T)  { testCanonSync(t, eth.ETH66, FastSync) }
+func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) }
 
 func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -545,14 +549,11 @@ func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
 
 // Tests that if a large batch of blocks are being downloaded, it is throttled
 // until the cached blocks are retrieved.
-func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
-func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
-
-func TestThrottling65Full(t *testing.T) { testThrottling(t, 65, FullSync) }
-func TestThrottling65Fast(t *testing.T) { testThrottling(t, 65, FastSync) }
+func TestThrottling65Full(t *testing.T) { testThrottling(t, eth.ETH65, FullSync) }
+func TestThrottling65Fast(t *testing.T) { testThrottling(t, eth.ETH65, FastSync) }
 
-func TestThrottling66Full(t *testing.T) { testThrottling(t, 66, FullSync) }
-func TestThrottling66Fast(t *testing.T) { testThrottling(t, 66, FastSync) }
+func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) }
+func TestThrottling66Fast(t *testing.T) { testThrottling(t, eth.ETH66, FastSync) }
 
 func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -633,16 +634,13 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
 // Tests that simple synchronization against a forked chain works correctly. In
 // this test common ancestor lookup should *not* be short circuited, and a full
 // binary search should be executed.
-func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) }
-func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) }
+func TestForkedSync65Full(t *testing.T)  { testForkedSync(t, eth.ETH65, FullSync) }
+func TestForkedSync65Fast(t *testing.T)  { testForkedSync(t, eth.ETH65, FastSync) }
+func TestForkedSync65Light(t *testing.T) { testForkedSync(t, eth.ETH65, LightSync) }
 
-func TestForkedSync65Full(t *testing.T)  { testForkedSync(t, 65, FullSync) }
-func TestForkedSync65Fast(t *testing.T)  { testForkedSync(t, 65, FastSync) }
-func TestForkedSync65Light(t *testing.T) { testForkedSync(t, 65, LightSync) }
-
-func TestForkedSync66Full(t *testing.T)  { testForkedSync(t, 66, FullSync) }
-func TestForkedSync66Fast(t *testing.T)  { testForkedSync(t, 66, FastSync) }
-func TestForkedSync66Light(t *testing.T) { testForkedSync(t, 66, LightSync) }
+func TestForkedSync66Full(t *testing.T)  { testForkedSync(t, eth.ETH66, FullSync) }
+func TestForkedSync66Fast(t *testing.T)  { testForkedSync(t, eth.ETH66, FastSync) }
+func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) }
 
 func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -669,16 +667,13 @@ func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
 
 // Tests that synchronising against a much shorter but much heavyer fork works
 // corrently and is not dropped.
-func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) }
-func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) }
-
-func TestHeavyForkedSync65Full(t *testing.T)  { testHeavyForkedSync(t, 65, FullSync) }
-func TestHeavyForkedSync65Fast(t *testing.T)  { testHeavyForkedSync(t, 65, FastSync) }
-func TestHeavyForkedSync65Light(t *testing.T) { testHeavyForkedSync(t, 65, LightSync) }
+func TestHeavyForkedSync65Full(t *testing.T)  { testHeavyForkedSync(t, eth.ETH65, FullSync) }
+func TestHeavyForkedSync65Fast(t *testing.T)  { testHeavyForkedSync(t, eth.ETH65, FastSync) }
+func TestHeavyForkedSync65Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH65, LightSync) }
 
-func TestHeavyForkedSync66Full(t *testing.T)  { testHeavyForkedSync(t, 66, FullSync) }
-func TestHeavyForkedSync66Fast(t *testing.T)  { testHeavyForkedSync(t, 66, FastSync) }
-func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, 66, LightSync) }
+func TestHeavyForkedSync66Full(t *testing.T)  { testHeavyForkedSync(t, eth.ETH66, FullSync) }
+func TestHeavyForkedSync66Fast(t *testing.T)  { testHeavyForkedSync(t, eth.ETH66, FastSync) }
+func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) }
 
 func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -707,16 +702,13 @@ func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
 // Tests that chain forks are contained within a certain interval of the current
 // chain head, ensuring that malicious peers cannot waste resources by feeding
 // long dead chains.
-func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) }
-func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) }
+func TestBoundedForkedSync65Full(t *testing.T)  { testBoundedForkedSync(t, eth.ETH65, FullSync) }
+func TestBoundedForkedSync65Fast(t *testing.T)  { testBoundedForkedSync(t, eth.ETH65, FastSync) }
+func TestBoundedForkedSync65Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH65, LightSync) }
 
-func TestBoundedForkedSync65Full(t *testing.T)  { testBoundedForkedSync(t, 65, FullSync) }
-func TestBoundedForkedSync65Fast(t *testing.T)  { testBoundedForkedSync(t, 65, FastSync) }
-func TestBoundedForkedSync65Light(t *testing.T) { testBoundedForkedSync(t, 65, LightSync) }
-
-func TestBoundedForkedSync66Full(t *testing.T)  { testBoundedForkedSync(t, 66, FullSync) }
-func TestBoundedForkedSync66Fast(t *testing.T)  { testBoundedForkedSync(t, 66, FastSync) }
-func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, 66, LightSync) }
+func TestBoundedForkedSync66Full(t *testing.T)  { testBoundedForkedSync(t, eth.ETH66, FullSync) }
+func TestBoundedForkedSync66Fast(t *testing.T)  { testBoundedForkedSync(t, eth.ETH66, FastSync) }
+func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) }
 
 func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -744,16 +736,25 @@ func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
 // Tests that chain forks are contained within a certain interval of the current
 // chain head for short but heavy forks too. These are a bit special because they
 // take different ancestor lookup paths.
-func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) }
-func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) }
-
-func TestBoundedHeavyForkedSync65Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 65, FullSync) }
-func TestBoundedHeavyForkedSync65Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 65, FastSync) }
-func TestBoundedHeavyForkedSync65Light(t *testing.T) { testBoundedHeavyForkedSync(t, 65, LightSync) }
+func TestBoundedHeavyForkedSync65Full(t *testing.T) {
+	testBoundedHeavyForkedSync(t, eth.ETH65, FullSync)
+}
+func TestBoundedHeavyForkedSync65Fast(t *testing.T) {
+	testBoundedHeavyForkedSync(t, eth.ETH65, FastSync)
+}
+func TestBoundedHeavyForkedSync65Light(t *testing.T) {
+	testBoundedHeavyForkedSync(t, eth.ETH65, LightSync)
+}
 
-func TestBoundedHeavyForkedSync66Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 66, FullSync) }
-func TestBoundedHeavyForkedSync66Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 66, FastSync) }
-func TestBoundedHeavyForkedSync66Light(t *testing.T) { testBoundedHeavyForkedSync(t, 66, LightSync) }
+func TestBoundedHeavyForkedSync66Full(t *testing.T) {
+	testBoundedHeavyForkedSync(t, eth.ETH66, FullSync)
+}
+func TestBoundedHeavyForkedSync66Fast(t *testing.T) {
+	testBoundedHeavyForkedSync(t, eth.ETH66, FastSync)
+}
+func TestBoundedHeavyForkedSync66Light(t *testing.T) {
+	testBoundedHeavyForkedSync(t, eth.ETH66, LightSync)
+}
 
 func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -799,16 +800,13 @@ func TestInactiveDownloader63(t *testing.T) {
 }
 
 // Tests that a canceled download wipes all previously accumulated state.
-func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) }
-func TestCancel64Fast(t *testing.T) { testCancel(t, 64, FastSync) }
-
-func TestCancel65Full(t *testing.T)  { testCancel(t, 65, FullSync) }
-func TestCancel65Fast(t *testing.T)  { testCancel(t, 65, FastSync) }
-func TestCancel65Light(t *testing.T) { testCancel(t, 65, LightSync) }
+func TestCancel65Full(t *testing.T)  { testCancel(t, eth.ETH65, FullSync) }
+func TestCancel65Fast(t *testing.T)  { testCancel(t, eth.ETH65, FastSync) }
+func TestCancel65Light(t *testing.T) { testCancel(t, eth.ETH65, LightSync) }
 
-func TestCancel66Full(t *testing.T)  { testCancel(t, 66, FullSync) }
-func TestCancel66Fast(t *testing.T)  { testCancel(t, 66, FastSync) }
-func TestCancel66Light(t *testing.T) { testCancel(t, 66, LightSync) }
+func TestCancel66Full(t *testing.T)  { testCancel(t, eth.ETH66, FullSync) }
+func TestCancel66Fast(t *testing.T)  { testCancel(t, eth.ETH66, FastSync) }
+func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) }
 
 func testCancel(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -835,16 +833,13 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) {
 }
 
 // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
-func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) }
-func TestMultiSynchronisation64Fast(t *testing.T) { testMultiSynchronisation(t, 64, FastSync) }
+func TestMultiSynchronisation65Full(t *testing.T)  { testMultiSynchronisation(t, eth.ETH65, FullSync) }
+func TestMultiSynchronisation65Fast(t *testing.T)  { testMultiSynchronisation(t, eth.ETH65, FastSync) }
+func TestMultiSynchronisation65Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH65, LightSync) }
 
-func TestMultiSynchronisation65Full(t *testing.T)  { testMultiSynchronisation(t, 65, FullSync) }
-func TestMultiSynchronisation65Fast(t *testing.T)  { testMultiSynchronisation(t, 65, FastSync) }
-func TestMultiSynchronisation65Light(t *testing.T) { testMultiSynchronisation(t, 65, LightSync) }
-
-func TestMultiSynchronisation66Full(t *testing.T)  { testMultiSynchronisation(t, 66, FullSync) }
-func TestMultiSynchronisation66Fast(t *testing.T)  { testMultiSynchronisation(t, 66, FastSync) }
-func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, 66, LightSync) }
+func TestMultiSynchronisation66Full(t *testing.T)  { testMultiSynchronisation(t, eth.ETH66, FullSync) }
+func TestMultiSynchronisation66Fast(t *testing.T)  { testMultiSynchronisation(t, eth.ETH66, FastSync) }
+func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) }
 
 func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -868,16 +863,13 @@ func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
 
 // Tests that synchronisations behave well in multi-version protocol environments
 // and not wreak havoc on other nodes in the network.
-func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) }
-func TestMultiProtoSynchronisation64Fast(t *testing.T) { testMultiProtoSync(t, 64, FastSync) }
-
-func TestMultiProtoSynchronisation65Full(t *testing.T)  { testMultiProtoSync(t, 65, FullSync) }
-func TestMultiProtoSynchronisation65Fast(t *testing.T)  { testMultiProtoSync(t, 65, FastSync) }
-func TestMultiProtoSynchronisation65Light(t *testing.T) { testMultiProtoSync(t, 65, LightSync) }
+func TestMultiProtoSynchronisation65Full(t *testing.T)  { testMultiProtoSync(t, eth.ETH65, FullSync) }
+func TestMultiProtoSynchronisation65Fast(t *testing.T)  { testMultiProtoSync(t, eth.ETH65, FastSync) }
+func TestMultiProtoSynchronisation65Light(t *testing.T) { testMultiProtoSync(t, eth.ETH65, LightSync) }
 
-func TestMultiProtoSynchronisation66Full(t *testing.T)  { testMultiProtoSync(t, 66, FullSync) }
-func TestMultiProtoSynchronisation66Fast(t *testing.T)  { testMultiProtoSync(t, 66, FastSync) }
-func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, 66, LightSync) }
+func TestMultiProtoSynchronisation66Full(t *testing.T)  { testMultiProtoSync(t, eth.ETH66, FullSync) }
+func TestMultiProtoSynchronisation66Fast(t *testing.T)  { testMultiProtoSync(t, eth.ETH66, FastSync) }
+func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) }
 
 func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -889,9 +881,8 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
 	chain := testChainBase.shorten(blockCacheMaxItems - 15)
 
 	// Create peers of every type
-	tester.newPeer("peer 64", 64, chain)
-	tester.newPeer("peer 65", 65, chain)
-	tester.newPeer("peer 66", 66, chain)
+	tester.newPeer("peer 65", eth.ETH65, chain)
+	tester.newPeer("peer 66", eth.ETH66, chain)
 
 	// Synchronise with the requested peer and make sure all blocks were retrieved
 	if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
@@ -900,7 +891,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
 	assertOwnChain(t, tester, chain.len())
 
 	// Check that no peers have been dropped off
-	for _, version := range []int{64, 65, 66} {
+	for _, version := range []int{65, 66} {
 		peer := fmt.Sprintf("peer %d", version)
 		if _, ok := tester.peers[peer]; !ok {
 			t.Errorf("%s dropped", peer)
@@ -910,16 +901,13 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
 
 // Tests that if a block is empty (e.g. header only), no body request should be
 // made, and instead the header should be assembled into a whole block in itself.
-func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) }
-func TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, FastSync) }
+func TestEmptyShortCircuit65Full(t *testing.T)  { testEmptyShortCircuit(t, eth.ETH65, FullSync) }
+func TestEmptyShortCircuit65Fast(t *testing.T)  { testEmptyShortCircuit(t, eth.ETH65, FastSync) }
+func TestEmptyShortCircuit65Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH65, LightSync) }
 
-func TestEmptyShortCircuit65Full(t *testing.T)  { testEmptyShortCircuit(t, 65, FullSync) }
-func TestEmptyShortCircuit65Fast(t *testing.T)  { testEmptyShortCircuit(t, 65, FastSync) }
-func TestEmptyShortCircuit65Light(t *testing.T) { testEmptyShortCircuit(t, 65, LightSync) }
-
-func TestEmptyShortCircuit66Full(t *testing.T)  { testEmptyShortCircuit(t, 66, FullSync) }
-func TestEmptyShortCircuit66Fast(t *testing.T)  { testEmptyShortCircuit(t, 66, FastSync) }
-func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, 66, LightSync) }
+func TestEmptyShortCircuit66Full(t *testing.T)  { testEmptyShortCircuit(t, eth.ETH66, FullSync) }
+func TestEmptyShortCircuit66Fast(t *testing.T)  { testEmptyShortCircuit(t, eth.ETH66, FastSync) }
+func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) }
 
 func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -967,16 +955,13 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
 
 // Tests that headers are enqueued continuously, preventing malicious nodes from
 // stalling the downloader by feeding gapped header chains.
-func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) }
-func TestMissingHeaderAttack64Fast(t *testing.T) { testMissingHeaderAttack(t, 64, FastSync) }
-
-func TestMissingHeaderAttack65Full(t *testing.T)  { testMissingHeaderAttack(t, 65, FullSync) }
-func TestMissingHeaderAttack65Fast(t *testing.T)  { testMissingHeaderAttack(t, 65, FastSync) }
-func TestMissingHeaderAttack65Light(t *testing.T) { testMissingHeaderAttack(t, 65, LightSync) }
+func TestMissingHeaderAttack65Full(t *testing.T)  { testMissingHeaderAttack(t, eth.ETH65, FullSync) }
+func TestMissingHeaderAttack65Fast(t *testing.T)  { testMissingHeaderAttack(t, eth.ETH65, FastSync) }
+func TestMissingHeaderAttack65Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH65, LightSync) }
 
-func TestMissingHeaderAttack66Full(t *testing.T)  { testMissingHeaderAttack(t, 66, FullSync) }
-func TestMissingHeaderAttack66Fast(t *testing.T)  { testMissingHeaderAttack(t, 66, FastSync) }
-func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, 66, LightSync) }
+func TestMissingHeaderAttack66Full(t *testing.T)  { testMissingHeaderAttack(t, eth.ETH66, FullSync) }
+func TestMissingHeaderAttack66Fast(t *testing.T)  { testMissingHeaderAttack(t, eth.ETH66, FastSync) }
+func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) }
 
 func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -1002,16 +987,13 @@ func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
 
 // Tests that if requested headers are shifted (i.e. first is missing), the queue
 // detects the invalid numbering.
-func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) }
-func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 64, FastSync) }
-
-func TestShiftedHeaderAttack65Full(t *testing.T)  { testShiftedHeaderAttack(t, 65, FullSync) }
-func TestShiftedHeaderAttack65Fast(t *testing.T)  { testShiftedHeaderAttack(t, 65, FastSync) }
-func TestShiftedHeaderAttack65Light(t *testing.T) { testShiftedHeaderAttack(t, 65, LightSync) }
+func TestShiftedHeaderAttack65Full(t *testing.T)  { testShiftedHeaderAttack(t, eth.ETH65, FullSync) }
+func TestShiftedHeaderAttack65Fast(t *testing.T)  { testShiftedHeaderAttack(t, eth.ETH65, FastSync) }
+func TestShiftedHeaderAttack65Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH65, LightSync) }
 
-func TestShiftedHeaderAttack66Full(t *testing.T)  { testShiftedHeaderAttack(t, 66, FullSync) }
-func TestShiftedHeaderAttack66Fast(t *testing.T)  { testShiftedHeaderAttack(t, 66, FastSync) }
-func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, 66, LightSync) }
+func TestShiftedHeaderAttack66Full(t *testing.T)  { testShiftedHeaderAttack(t, eth.ETH66, FullSync) }
+func TestShiftedHeaderAttack66Fast(t *testing.T)  { testShiftedHeaderAttack(t, eth.ETH66, FastSync) }
+func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) }
 
 func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -1042,9 +1024,8 @@ func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
 // Tests that upon detecting an invalid header, the recent ones are rolled back
 // for various failure scenarios. Afterwards a full sync is attempted to make
 // sure no state was corrupted.
-func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) }
-func TestInvalidHeaderRollback65Fast(t *testing.T) { testInvalidHeaderRollback(t, 65, FastSync) }
-func TestInvalidHeaderRollback66Fast(t *testing.T) { testInvalidHeaderRollback(t, 66, FastSync) }
+func TestInvalidHeaderRollback65Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH65, FastSync) }
+func TestInvalidHeaderRollback66Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, FastSync) }
 
 func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -1134,16 +1115,25 @@ func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
 
 // Tests that a peer advertising a high TD doesn't get to stall the downloader
 // afterwards by not sending any useful hashes.
-func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) }
-func TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttack(t, 64, FastSync) }
-
-func TestHighTDStarvationAttack65Full(t *testing.T)  { testHighTDStarvationAttack(t, 65, FullSync) }
-func TestHighTDStarvationAttack65Fast(t *testing.T)  { testHighTDStarvationAttack(t, 65, FastSync) }
-func TestHighTDStarvationAttack65Light(t *testing.T) { testHighTDStarvationAttack(t, 65, LightSync) }
+func TestHighTDStarvationAttack65Full(t *testing.T) {
+	testHighTDStarvationAttack(t, eth.ETH65, FullSync)
+}
+func TestHighTDStarvationAttack65Fast(t *testing.T) {
+	testHighTDStarvationAttack(t, eth.ETH65, FastSync)
+}
+func TestHighTDStarvationAttack65Light(t *testing.T) {
+	testHighTDStarvationAttack(t, eth.ETH65, LightSync)
+}
 
-func TestHighTDStarvationAttack66Full(t *testing.T)  { testHighTDStarvationAttack(t, 66, FullSync) }
-func TestHighTDStarvationAttack66Fast(t *testing.T)  { testHighTDStarvationAttack(t, 66, FastSync) }
-func TestHighTDStarvationAttack66Light(t *testing.T) { testHighTDStarvationAttack(t, 66, LightSync) }
+func TestHighTDStarvationAttack66Full(t *testing.T) {
+	testHighTDStarvationAttack(t, eth.ETH66, FullSync)
+}
+func TestHighTDStarvationAttack66Fast(t *testing.T) {
+	testHighTDStarvationAttack(t, eth.ETH66, FastSync)
+}
+func TestHighTDStarvationAttack66Light(t *testing.T) {
+	testHighTDStarvationAttack(t, eth.ETH66, LightSync)
+}
 
 func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -1159,9 +1149,8 @@ func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
 }
 
 // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
-func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
-func TestBlockHeaderAttackerDropping65(t *testing.T) { testBlockHeaderAttackerDropping(t, 65) }
-func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, 66) }
+func TestBlockHeaderAttackerDropping65(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH65) }
+func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) }
 
 func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
 	t.Parallel()
@@ -1213,16 +1202,13 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
 
 // Tests that synchronisation progress (origin block number, current block number
 // and highest block number) is tracked and updated correctly.
-func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) }
-func TestSyncProgress64Fast(t *testing.T) { testSyncProgress(t, 64, FastSync) }
-
-func TestSyncProgress65Full(t *testing.T)  { testSyncProgress(t, 65, FullSync) }
-func TestSyncProgress65Fast(t *testing.T)  { testSyncProgress(t, 65, FastSync) }
-func TestSyncProgress65Light(t *testing.T) { testSyncProgress(t, 65, LightSync) }
+func TestSyncProgress65Full(t *testing.T)  { testSyncProgress(t, eth.ETH65, FullSync) }
+func TestSyncProgress65Fast(t *testing.T)  { testSyncProgress(t, eth.ETH65, FastSync) }
+func TestSyncProgress65Light(t *testing.T) { testSyncProgress(t, eth.ETH65, LightSync) }
 
-func TestSyncProgress66Full(t *testing.T)  { testSyncProgress(t, 66, FullSync) }
-func TestSyncProgress66Fast(t *testing.T)  { testSyncProgress(t, 66, FastSync) }
-func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, 66, LightSync) }
+func TestSyncProgress66Full(t *testing.T)  { testSyncProgress(t, eth.ETH66, FullSync) }
+func TestSyncProgress66Fast(t *testing.T)  { testSyncProgress(t, eth.ETH66, FastSync) }
+func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) }
 
 func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -1300,16 +1286,13 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync
 // Tests that synchronisation progress (origin block number and highest block
 // number) is tracked and updated correctly in case of a fork (or manual head
 // revertal).
-func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) }
-func TestForkedSyncProgress64Fast(t *testing.T) { testForkedSyncProgress(t, 64, FastSync) }
-
-func TestForkedSyncProgress65Full(t *testing.T)  { testForkedSyncProgress(t, 65, FullSync) }
-func TestForkedSyncProgress65Fast(t *testing.T)  { testForkedSyncProgress(t, 65, FastSync) }
-func TestForkedSyncProgress65Light(t *testing.T) { testForkedSyncProgress(t, 65, LightSync) }
+func TestForkedSyncProgress65Full(t *testing.T)  { testForkedSyncProgress(t, eth.ETH65, FullSync) }
+func TestForkedSyncProgress65Fast(t *testing.T)  { testForkedSyncProgress(t, eth.ETH65, FastSync) }
+func TestForkedSyncProgress65Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH65, LightSync) }
 
-func TestForkedSyncProgress66Full(t *testing.T)  { testForkedSyncProgress(t, 66, FullSync) }
-func TestForkedSyncProgress66Fast(t *testing.T)  { testForkedSyncProgress(t, 66, FastSync) }
-func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, 66, LightSync) }
+func TestForkedSyncProgress66Full(t *testing.T)  { testForkedSyncProgress(t, eth.ETH66, FullSync) }
+func TestForkedSyncProgress66Fast(t *testing.T)  { testForkedSyncProgress(t, eth.ETH66, FastSync) }
+func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) }
 
 func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -1379,16 +1362,13 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
 // Tests that if synchronisation is aborted due to some failure, then the progress
 // origin is not updated in the next sync cycle, as it should be considered the
 // continuation of the previous sync and not a new instance.
-func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) }
-func TestFailedSyncProgress64Fast(t *testing.T) { testFailedSyncProgress(t, 64, FastSync) }
+func TestFailedSyncProgress65Full(t *testing.T)  { testFailedSyncProgress(t, eth.ETH65, FullSync) }
+func TestFailedSyncProgress65Fast(t *testing.T)  { testFailedSyncProgress(t, eth.ETH65, FastSync) }
+func TestFailedSyncProgress65Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH65, LightSync) }
 
-func TestFailedSyncProgress65Full(t *testing.T)  { testFailedSyncProgress(t, 65, FullSync) }
-func TestFailedSyncProgress65Fast(t *testing.T)  { testFailedSyncProgress(t, 65, FastSync) }
-func TestFailedSyncProgress65Light(t *testing.T) { testFailedSyncProgress(t, 65, LightSync) }
-
-func TestFailedSyncProgress66Full(t *testing.T)  { testFailedSyncProgress(t, 66, FullSync) }
-func TestFailedSyncProgress66Fast(t *testing.T)  { testFailedSyncProgress(t, 66, FastSync) }
-func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, 66, LightSync) }
+func TestFailedSyncProgress66Full(t *testing.T)  { testFailedSyncProgress(t, eth.ETH66, FullSync) }
+func TestFailedSyncProgress66Fast(t *testing.T)  { testFailedSyncProgress(t, eth.ETH66, FastSync) }
+func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) }
 
 func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -1455,16 +1435,13 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
 
 // Tests that if an attacker fakes a chain height, after the attack is detected,
 // the progress height is successfully reduced at the next sync invocation.
-func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) }
-func TestFakedSyncProgress64Fast(t *testing.T) { testFakedSyncProgress(t, 64, FastSync) }
-
-func TestFakedSyncProgress65Full(t *testing.T)  { testFakedSyncProgress(t, 65, FullSync) }
-func TestFakedSyncProgress65Fast(t *testing.T)  { testFakedSyncProgress(t, 65, FastSync) }
-func TestFakedSyncProgress65Light(t *testing.T) { testFakedSyncProgress(t, 65, LightSync) }
+func TestFakedSyncProgress65Full(t *testing.T)  { testFakedSyncProgress(t, eth.ETH65, FullSync) }
+func TestFakedSyncProgress65Fast(t *testing.T)  { testFakedSyncProgress(t, eth.ETH65, FastSync) }
+func TestFakedSyncProgress65Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH65, LightSync) }
 
-func TestFakedSyncProgress66Full(t *testing.T)  { testFakedSyncProgress(t, 66, FullSync) }
-func TestFakedSyncProgress66Fast(t *testing.T)  { testFakedSyncProgress(t, 66, FastSync) }
-func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, 66, LightSync) }
+func TestFakedSyncProgress66Full(t *testing.T)  { testFakedSyncProgress(t, eth.ETH66, FullSync) }
+func TestFakedSyncProgress66Fast(t *testing.T)  { testFakedSyncProgress(t, eth.ETH66, FastSync) }
+func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) }
 
 func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -1535,16 +1512,13 @@ func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
 
 // This test reproduces an issue where unexpected deliveries would
 // block indefinitely if they arrived at the right time.
-func TestDeliverHeadersHang64Full(t *testing.T) { testDeliverHeadersHang(t, 64, FullSync) }
-func TestDeliverHeadersHang64Fast(t *testing.T) { testDeliverHeadersHang(t, 64, FastSync) }
+func TestDeliverHeadersHang65Full(t *testing.T)  { testDeliverHeadersHang(t, eth.ETH65, FullSync) }
+func TestDeliverHeadersHang65Fast(t *testing.T)  { testDeliverHeadersHang(t, eth.ETH65, FastSync) }
+func TestDeliverHeadersHang65Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH65, LightSync) }
 
-func TestDeliverHeadersHang65Full(t *testing.T)  { testDeliverHeadersHang(t, 65, FullSync) }
-func TestDeliverHeadersHang65Fast(t *testing.T)  { testDeliverHeadersHang(t, 65, FastSync) }
-func TestDeliverHeadersHang65Light(t *testing.T) { testDeliverHeadersHang(t, 65, LightSync) }
-
-func TestDeliverHeadersHang66Full(t *testing.T)  { testDeliverHeadersHang(t, 66, FullSync) }
-func TestDeliverHeadersHang66Fast(t *testing.T)  { testDeliverHeadersHang(t, 66, FastSync) }
-func TestDeliverHeadersHang66Light(t *testing.T) { testDeliverHeadersHang(t, 66, LightSync) }
+func TestDeliverHeadersHang66Full(t *testing.T)  { testDeliverHeadersHang(t, eth.ETH66, FullSync) }
+func TestDeliverHeadersHang66Fast(t *testing.T)  { testDeliverHeadersHang(t, eth.ETH66, FastSync) }
+func TestDeliverHeadersHang66Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, LightSync) }
 
 func testDeliverHeadersHang(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()
@@ -1699,16 +1673,17 @@ func TestRemoteHeaderRequestSpan(t *testing.T) {
 
 // Tests that peers below a pre-configured checkpoint block are prevented from
 // being fast-synced from, avoiding potential cheap eclipse attacks.
-func TestCheckpointEnforcement64Full(t *testing.T) { testCheckpointEnforcement(t, 64, FullSync) }
-func TestCheckpointEnforcement64Fast(t *testing.T) { testCheckpointEnforcement(t, 64, FastSync) }
-
-func TestCheckpointEnforcement65Full(t *testing.T)  { testCheckpointEnforcement(t, 65, FullSync) }
-func TestCheckpointEnforcement65Fast(t *testing.T)  { testCheckpointEnforcement(t, 65, FastSync) }
-func TestCheckpointEnforcement65Light(t *testing.T) { testCheckpointEnforcement(t, 65, LightSync) }
+func TestCheckpointEnforcement65Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH65, FullSync) }
+func TestCheckpointEnforcement65Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH65, FastSync) }
+func TestCheckpointEnforcement65Light(t *testing.T) {
+	testCheckpointEnforcement(t, eth.ETH65, LightSync)
+}
 
-func TestCheckpointEnforcement66Full(t *testing.T)  { testCheckpointEnforcement(t, 66, FullSync) }
-func TestCheckpointEnforcement66Fast(t *testing.T)  { testCheckpointEnforcement(t, 66, FastSync) }
-func TestCheckpointEnforcement66Light(t *testing.T) { testCheckpointEnforcement(t, 66, LightSync) }
+func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FullSync) }
+func TestCheckpointEnforcement66Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FastSync) }
+func TestCheckpointEnforcement66Light(t *testing.T) {
+	testCheckpointEnforcement(t, eth.ETH66, LightSync)
+}
 
 func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) {
 	t.Parallel()

+ 4 - 4
eth/downloader/peer.go

@@ -458,7 +458,7 @@ func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) {
 		defer p.lock.RUnlock()
 		return p.headerThroughput
 	}
-	return ps.idlePeers(eth.ETH64, eth.ETH66, idle, throughput)
+	return ps.idlePeers(eth.ETH65, eth.ETH66, idle, throughput)
 }
 
 // BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
@@ -472,7 +472,7 @@ func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) {
 		defer p.lock.RUnlock()
 		return p.blockThroughput
 	}
-	return ps.idlePeers(eth.ETH64, eth.ETH66, idle, throughput)
+	return ps.idlePeers(eth.ETH65, eth.ETH66, idle, throughput)
 }
 
 // ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers
@@ -486,7 +486,7 @@ func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) {
 		defer p.lock.RUnlock()
 		return p.receiptThroughput
 	}
-	return ps.idlePeers(eth.ETH64, eth.ETH66, idle, throughput)
+	return ps.idlePeers(eth.ETH65, eth.ETH66, idle, throughput)
 }
 
 // NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle
@@ -500,7 +500,7 @@ func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) {
 		defer p.lock.RUnlock()
 		return p.stateThroughput
 	}
-	return ps.idlePeers(eth.ETH64, eth.ETH66, idle, throughput)
+	return ps.idlePeers(eth.ETH65, eth.ETH66, idle, throughput)
 }
 
 // idlePeers retrieves a flat list of all currently idle peers satisfying the

+ 1 - 1
eth/downloader/statesync.go

@@ -298,7 +298,7 @@ func newStateSync(d *Downloader, root common.Hash) *stateSync {
 	return &stateSync{
 		d:         d,
 		root:      root,
-		sched:     state.NewStateSync(root, d.stateDB, d.stateBloom),
+		sched:     state.NewStateSync(root, d.stateDB, d.stateBloom, nil),
 		keccak:    sha3.NewLegacyKeccak256().(crypto.KeccakState),
 		trieTasks: make(map[common.Hash]*trieTask),
 		codeTasks: make(map[common.Hash]*codeTask),

+ 6 - 2
eth/fetcher/block_fetcher.go

@@ -331,8 +331,12 @@ func (f *BlockFetcher) FilterBodies(peer string, transactions [][]*types.Transac
 // events.
 func (f *BlockFetcher) loop() {
 	// Iterate the block fetching until a quit is requested
-	fetchTimer := time.NewTimer(0)
-	completeTimer := time.NewTimer(0)
+	var (
+		fetchTimer    = time.NewTimer(0)
+		completeTimer = time.NewTimer(0)
+	)
+	<-fetchTimer.C // clear out the channel
+	<-completeTimer.C
 	defer fetchTimer.Stop()
 	defer completeTimer.Stop()
 

+ 2 - 2
eth/filters/api.go

@@ -74,8 +74,8 @@ func NewPublicFilterAPI(backend Backend, lightMode bool, timeout time.Duration,
 	return api
 }
 
-// timeoutLoop runs every 5 minutes and deletes filters that have not been recently used.
-// Tt is started when the api is created.
+// timeoutLoop runs at the interval set by 'timeout' and deletes filters
+// that have not been recently used. It is started when the API is created.
 func (api *PublicFilterAPI) timeoutLoop(timeout time.Duration) {
 	var toUninstall []*Subscription
 	ticker := time.NewTicker(timeout)

+ 3 - 0
eth/gasprice/gasprice.go

@@ -213,6 +213,9 @@ func (gpo *Oracle) getBlockPrices(ctx context.Context, signer types.Signer, bloc
 
 	var prices []*big.Int
 	for _, tx := range txs {
+		if tx.GasPriceIntCmp(common.Big1) <= 0 {
+			continue
+		}
 		sender, err := types.Sender(signer, tx)
 		if err == nil && sender != block.Coinbase() {
 			prices = append(prices, tx.GasPrice())

+ 1 - 5
eth/handler.go

@@ -504,11 +504,7 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) {
 	for peer, hashes := range annos {
 		annoPeers++
 		annoCount += len(hashes)
-		if peer.Version() >= eth.ETH65 {
-			peer.AsyncSendPooledTransactionHashes(hashes)
-		} else {
-			peer.AsyncSendTransactions(hashes)
-		}
+		peer.AsyncSendPooledTransactionHashes(hashes)
 	}
 	log.Debug("Transaction broadcast", "txs", len(txs),
 		"announce packs", annoPeers, "announced hashes", annoCount,

+ 15 - 27
eth/handler_eth_test.go

@@ -80,8 +80,8 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
 
 // Tests that peers are correctly accepted (or rejected) based on the advertised
 // fork IDs in the protocol handshake.
-func TestForkIDSplit64(t *testing.T) { testForkIDSplit(t, 64) }
-func TestForkIDSplit65(t *testing.T) { testForkIDSplit(t, 65) }
+func TestForkIDSplit65(t *testing.T) { testForkIDSplit(t, eth.ETH65) }
+func TestForkIDSplit66(t *testing.T) { testForkIDSplit(t, eth.ETH66) }
 
 func testForkIDSplit(t *testing.T, protocol uint) {
 	t.Parallel()
@@ -236,8 +236,8 @@ func testForkIDSplit(t *testing.T, protocol uint) {
 }
 
 // Tests that received transactions are added to the local pool.
-func TestRecvTransactions64(t *testing.T) { testRecvTransactions(t, 64) }
-func TestRecvTransactions65(t *testing.T) { testRecvTransactions(t, 65) }
+func TestRecvTransactions65(t *testing.T) { testRecvTransactions(t, eth.ETH65) }
+func TestRecvTransactions66(t *testing.T) { testRecvTransactions(t, eth.ETH66) }
 
 func testRecvTransactions(t *testing.T, protocol uint) {
 	t.Parallel()
@@ -294,8 +294,8 @@ func testRecvTransactions(t *testing.T, protocol uint) {
 }
 
 // This test checks that pending transactions are sent.
-func TestSendTransactions64(t *testing.T) { testSendTransactions(t, 64) }
-func TestSendTransactions65(t *testing.T) { testSendTransactions(t, 65) }
+func TestSendTransactions65(t *testing.T) { testSendTransactions(t, eth.ETH65) }
+func TestSendTransactions66(t *testing.T) { testSendTransactions(t, eth.ETH66) }
 
 func testSendTransactions(t *testing.T, protocol uint) {
 	t.Parallel()
@@ -354,19 +354,7 @@ func testSendTransactions(t *testing.T, protocol uint) {
 	seen := make(map[common.Hash]struct{})
 	for len(seen) < len(insert) {
 		switch protocol {
-		case 63, 64:
-			select {
-			case <-anns:
-				t.Errorf("tx announce received on pre eth/65")
-			case txs := <-bcasts:
-				for _, tx := range txs {
-					if _, ok := seen[tx.Hash()]; ok {
-						t.Errorf("duplicate transaction announced: %x", tx.Hash())
-					}
-					seen[tx.Hash()] = struct{}{}
-				}
-			}
-		case 65:
+		case 65, 66:
 			select {
 			case hashes := <-anns:
 				for _, hash := range hashes {
@@ -392,8 +380,8 @@ func testSendTransactions(t *testing.T, protocol uint) {
 
 // Tests that transactions get propagated to all attached peers, either via direct
 // broadcasts or via announcements/retrievals.
-func TestTransactionPropagation64(t *testing.T) { testTransactionPropagation(t, 64) }
-func TestTransactionPropagation65(t *testing.T) { testTransactionPropagation(t, 65) }
+func TestTransactionPropagation65(t *testing.T) { testTransactionPropagation(t, eth.ETH65) }
+func TestTransactionPropagation66(t *testing.T) { testTransactionPropagation(t, eth.ETH66) }
 
 func testTransactionPropagation(t *testing.T, protocol uint) {
 	t.Parallel()
@@ -530,8 +518,8 @@ func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpo
 	defer p2pLocal.Close()
 	defer p2pRemote.Close()
 
-	local := eth.NewPeer(eth.ETH64, p2p.NewPeer(enode.ID{1}, "", nil), p2pLocal, handler.txpool)
-	remote := eth.NewPeer(eth.ETH64, p2p.NewPeer(enode.ID{2}, "", nil), p2pRemote, handler.txpool)
+	local := eth.NewPeer(eth.ETH65, p2p.NewPeer(enode.ID{1}, "", nil), p2pLocal, handler.txpool)
+	remote := eth.NewPeer(eth.ETH65, p2p.NewPeer(enode.ID{2}, "", nil), p2pRemote, handler.txpool)
 	defer local.Close()
 	defer remote.Close()
 
@@ -620,8 +608,8 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
 		defer sourcePipe.Close()
 		defer sinkPipe.Close()
 
-		sourcePeer := eth.NewPeer(eth.ETH64, p2p.NewPeer(enode.ID{byte(i)}, "", nil), sourcePipe, nil)
-		sinkPeer := eth.NewPeer(eth.ETH64, p2p.NewPeer(enode.ID{0}, "", nil), sinkPipe, nil)
+		sourcePeer := eth.NewPeer(eth.ETH65, p2p.NewPeer(enode.ID{byte(i)}, "", nil), sourcePipe, nil)
+		sinkPeer := eth.NewPeer(eth.ETH65, p2p.NewPeer(enode.ID{0}, "", nil), sinkPipe, nil)
 		defer sourcePeer.Close()
 		defer sinkPeer.Close()
 
@@ -672,8 +660,8 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
 
 // Tests that a propagated malformed block (uncles or transactions don't match
 // with the hashes in the header) gets discarded and not broadcast forward.
-func TestBroadcastMalformedBlock64(t *testing.T) { testBroadcastMalformedBlock(t, 64) }
-func TestBroadcastMalformedBlock65(t *testing.T) { testBroadcastMalformedBlock(t, 65) }
+func TestBroadcastMalformedBlock65(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH65) }
+func TestBroadcastMalformedBlock66(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH66) }
 
 func testBroadcastMalformedBlock(t *testing.T, protocol uint) {
 	t.Parallel()

+ 17 - 36
eth/protocols/eth/handler.go

@@ -171,44 +171,27 @@ type Decoder interface {
 	Time() time.Time
 }
 
-var eth64 = map[uint64]msgHandler{
-	GetBlockHeadersMsg: handleGetBlockHeaders,
-	BlockHeadersMsg:    handleBlockHeaders,
-	GetBlockBodiesMsg:  handleGetBlockBodies,
-	BlockBodiesMsg:     handleBlockBodies,
-	GetNodeDataMsg:     handleGetNodeData,
-	NodeDataMsg:        handleNodeData,
-	GetReceiptsMsg:     handleGetReceipts,
-	ReceiptsMsg:        handleReceipts,
-	NewBlockHashesMsg:  handleNewBlockhashes,
-	NewBlockMsg:        handleNewBlock,
-	TransactionsMsg:    handleTransactions,
-}
 var eth65 = map[uint64]msgHandler{
-	// old 64 messages
-	GetBlockHeadersMsg: handleGetBlockHeaders,
-	BlockHeadersMsg:    handleBlockHeaders,
-	GetBlockBodiesMsg:  handleGetBlockBodies,
-	BlockBodiesMsg:     handleBlockBodies,
-	GetNodeDataMsg:     handleGetNodeData,
-	NodeDataMsg:        handleNodeData,
-	GetReceiptsMsg:     handleGetReceipts,
-	ReceiptsMsg:        handleReceipts,
-	NewBlockHashesMsg:  handleNewBlockhashes,
-	NewBlockMsg:        handleNewBlock,
-	TransactionsMsg:    handleTransactions,
-	// New eth65 messages
+	GetBlockHeadersMsg:            handleGetBlockHeaders,
+	BlockHeadersMsg:               handleBlockHeaders,
+	GetBlockBodiesMsg:             handleGetBlockBodies,
+	BlockBodiesMsg:                handleBlockBodies,
+	GetNodeDataMsg:                handleGetNodeData,
+	NodeDataMsg:                   handleNodeData,
+	GetReceiptsMsg:                handleGetReceipts,
+	ReceiptsMsg:                   handleReceipts,
+	NewBlockHashesMsg:             handleNewBlockhashes,
+	NewBlockMsg:                   handleNewBlock,
+	TransactionsMsg:               handleTransactions,
 	NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes,
 	GetPooledTransactionsMsg:      handleGetPooledTransactions,
 	PooledTransactionsMsg:         handlePooledTransactions,
 }
 
 var eth66 = map[uint64]msgHandler{
-	// eth64 announcement messages (no id)
-	NewBlockHashesMsg: handleNewBlockhashes,
-	NewBlockMsg:       handleNewBlock,
-	TransactionsMsg:   handleTransactions,
-	// eth65 announcement messages (no id)
+	NewBlockHashesMsg:             handleNewBlockhashes,
+	NewBlockMsg:                   handleNewBlock,
+	TransactionsMsg:               handleTransactions,
 	NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes,
 	// eth66 messages with request-id
 	GetBlockHeadersMsg:       handleGetBlockHeaders66,
@@ -236,13 +219,11 @@ func handleMessage(backend Backend, peer *Peer) error {
 	}
 	defer msg.Discard()
 
-	var handlers = eth64
-	if peer.Version() == ETH65 {
-		handlers = eth65
-	} else if peer.Version() >= ETH66 {
+	var handlers = eth65
+	if peer.Version() >= ETH66 {
 		handlers = eth66
 	}
-	// Track the emount of time it takes to serve the request and run the handler
+	// Track the amount of time it takes to serve the request and run the handler
 	if metrics.Enabled {
 		h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
 		defer func(start time.Time) {

+ 91 - 24
eth/protocols/eth/handler_test.go

@@ -110,8 +110,8 @@ func (b *testBackend) Handle(*Peer, Packet) error {
 }
 
 // Tests that block headers can be retrieved from a remote chain based on user queries.
-func TestGetBlockHeaders64(t *testing.T) { testGetBlockHeaders(t, 64) }
-func TestGetBlockHeaders65(t *testing.T) { testGetBlockHeaders(t, 65) }
+func TestGetBlockHeaders65(t *testing.T) { testGetBlockHeaders(t, ETH65) }
+func TestGetBlockHeaders66(t *testing.T) { testGetBlockHeaders(t, ETH66) }
 
 func testGetBlockHeaders(t *testing.T, protocol uint) {
 	t.Parallel()
@@ -254,18 +254,44 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
 			headers = append(headers, backend.chain.GetBlockByHash(hash).Header())
 		}
 		// Send the hash request and verify the response
-		p2p.Send(peer.app, GetBlockHeadersMsg, tt.query)
-		if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, headers); err != nil {
-			t.Errorf("test %d: headers mismatch: %v", i, err)
+		if protocol <= ETH65 {
+			p2p.Send(peer.app, GetBlockHeadersMsg, tt.query)
+			if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, headers); err != nil {
+				t.Errorf("test %d: headers mismatch: %v", i, err)
+			}
+		} else {
+			p2p.Send(peer.app, GetBlockHeadersMsg, GetBlockHeadersPacket66{
+				RequestId:             123,
+				GetBlockHeadersPacket: tt.query,
+			})
+			if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, BlockHeadersPacket66{
+				RequestId:          123,
+				BlockHeadersPacket: headers,
+			}); err != nil {
+				t.Errorf("test %d: headers mismatch: %v", i, err)
+			}
 		}
 		// If the test used number origins, repeat with hashes as the too
 		if tt.query.Origin.Hash == (common.Hash{}) {
 			if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil {
 				tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0
 
-				p2p.Send(peer.app, GetBlockHeadersMsg, tt.query)
-				if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, headers); err != nil {
-					t.Errorf("test %d: headers mismatch: %v", i, err)
+				if protocol <= ETH65 {
+					p2p.Send(peer.app, GetBlockHeadersMsg, tt.query)
+					if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, headers); err != nil {
+						t.Errorf("test %d: headers mismatch: %v", i, err)
+					}
+				} else {
+					p2p.Send(peer.app, GetBlockHeadersMsg, GetBlockHeadersPacket66{
+						RequestId:             456,
+						GetBlockHeadersPacket: tt.query,
+					})
+					if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, BlockHeadersPacket66{
+						RequestId:          456,
+						BlockHeadersPacket: headers,
+					}); err != nil {
+						t.Errorf("test %d: headers mismatch: %v", i, err)
+					}
 				}
 			}
 		}
@@ -273,8 +299,8 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
 }
 
 // Tests that block contents can be retrieved from a remote chain based on their hashes.
-func TestGetBlockBodies64(t *testing.T) { testGetBlockBodies(t, 64) }
-func TestGetBlockBodies65(t *testing.T) { testGetBlockBodies(t, 65) }
+func TestGetBlockBodies65(t *testing.T) { testGetBlockBodies(t, ETH65) }
+func TestGetBlockBodies66(t *testing.T) { testGetBlockBodies(t, ETH66) }
 
 func testGetBlockBodies(t *testing.T, protocol uint) {
 	t.Parallel()
@@ -343,16 +369,29 @@ func testGetBlockBodies(t *testing.T, protocol uint) {
 			}
 		}
 		// Send the hash request and verify the response
-		p2p.Send(peer.app, GetBlockBodiesMsg, hashes)
-		if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, bodies); err != nil {
-			t.Errorf("test %d: bodies mismatch: %v", i, err)
+		if protocol <= ETH65 {
+			p2p.Send(peer.app, GetBlockBodiesMsg, hashes)
+			if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, bodies); err != nil {
+				t.Errorf("test %d: bodies mismatch: %v", i, err)
+			}
+		} else {
+			p2p.Send(peer.app, GetBlockBodiesMsg, GetBlockBodiesPacket66{
+				RequestId:            123,
+				GetBlockBodiesPacket: hashes,
+			})
+			if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, BlockBodiesPacket66{
+				RequestId:         123,
+				BlockBodiesPacket: bodies,
+			}); err != nil {
+				t.Errorf("test %d: bodies mismatch: %v", i, err)
+			}
 		}
 	}
 }
 
 // Tests that the state trie nodes can be retrieved based on hashes.
-func TestGetNodeData64(t *testing.T) { testGetNodeData(t, 64) }
-func TestGetNodeData65(t *testing.T) { testGetNodeData(t, 65) }
+func TestGetNodeData65(t *testing.T) { testGetNodeData(t, ETH65) }
+func TestGetNodeData66(t *testing.T) { testGetNodeData(t, ETH66) }
 
 func testGetNodeData(t *testing.T, protocol uint) {
 	t.Parallel()
@@ -410,7 +449,14 @@ func testGetNodeData(t *testing.T, protocol uint) {
 	}
 	it.Release()
 
-	p2p.Send(peer.app, GetNodeDataMsg, hashes)
+	if protocol <= ETH65 {
+		p2p.Send(peer.app, GetNodeDataMsg, hashes)
+	} else {
+		p2p.Send(peer.app, GetNodeDataMsg, GetNodeDataPacket66{
+			RequestId:         123,
+			GetNodeDataPacket: hashes,
+		})
+	}
 	msg, err := peer.app.ReadMsg()
 	if err != nil {
 		t.Fatalf("failed to read node data response: %v", err)
@@ -419,8 +465,16 @@ func testGetNodeData(t *testing.T, protocol uint) {
 		t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, NodeDataMsg)
 	}
 	var data [][]byte
-	if err := msg.Decode(&data); err != nil {
-		t.Fatalf("failed to decode response node data: %v", err)
+	if protocol <= ETH65 {
+		if err := msg.Decode(&data); err != nil {
+			t.Fatalf("failed to decode response node data: %v", err)
+		}
+	} else {
+		var res NodeDataPacket66
+		if err := msg.Decode(&res); err != nil {
+			t.Fatalf("failed to decode response node data: %v", err)
+		}
+		data = res.NodeDataPacket
 	}
 	// Verify that all hashes correspond to the requested data, and reconstruct a state tree
 	for i, want := range hashes {
@@ -452,8 +506,8 @@ func testGetNodeData(t *testing.T, protocol uint) {
 }
 
 // Tests that the transaction receipts can be retrieved based on hashes.
-func TestGetBlockReceipts64(t *testing.T) { testGetBlockReceipts(t, 64) }
-func TestGetBlockReceipts65(t *testing.T) { testGetBlockReceipts(t, 65) }
+func TestGetBlockReceipts65(t *testing.T) { testGetBlockReceipts(t, ETH65) }
+func TestGetBlockReceipts66(t *testing.T) { testGetBlockReceipts(t, ETH66) }
 
 func testGetBlockReceipts(t *testing.T, protocol uint) {
 	t.Parallel()
@@ -503,7 +557,7 @@ func testGetBlockReceipts(t *testing.T, protocol uint) {
 	// Collect the hashes to request, and the response to expect
 	var (
 		hashes   []common.Hash
-		receipts []types.Receipts
+		receipts [][]*types.Receipt
 	)
 	for i := uint64(0); i <= backend.chain.CurrentBlock().NumberU64(); i++ {
 		block := backend.chain.GetBlockByNumber(i)
@@ -512,8 +566,21 @@ func testGetBlockReceipts(t *testing.T, protocol uint) {
 		receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash()))
 	}
 	// Send the hash request and verify the response
-	p2p.Send(peer.app, GetReceiptsMsg, hashes)
-	if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, receipts); err != nil {
-		t.Errorf("receipts mismatch: %v", err)
+	if protocol <= ETH65 {
+		p2p.Send(peer.app, GetReceiptsMsg, hashes)
+		if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, receipts); err != nil {
+			t.Errorf("receipts mismatch: %v", err)
+		}
+	} else {
+		p2p.Send(peer.app, GetReceiptsMsg, GetReceiptsPacket66{
+			RequestId:         123,
+			GetReceiptsPacket: hashes,
+		})
+		if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, ReceiptsPacket66{
+			RequestId:      123,
+			ReceiptsPacket: receipts,
+		}); err != nil {
+			t.Errorf("receipts mismatch: %v", err)
+		}
 	}
 }

+ 13 - 3
eth/protocols/eth/handlers.go

@@ -292,6 +292,9 @@ func handleNewBlock(backend Backend, msg Decoder, peer *Peer) error {
 	if err := msg.Decode(ann); err != nil {
 		return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
 	}
+	if err := ann.sanityCheck(); err != nil {
+		return err
+	}
 	if hash := types.CalcUncleHash(ann.Block.Uncles()); hash != ann.Block.UncleHash() {
 		log.Warn("Propagated block has invalid uncles", "have", hash, "exp", ann.Block.UncleHash())
 		return nil // TODO(karalabe): return error eventually, but wait a few releases
@@ -300,9 +303,6 @@ func handleNewBlock(backend Backend, msg Decoder, peer *Peer) error {
 		log.Warn("Propagated block has invalid body", "have", hash, "exp", ann.Block.TxHash())
 		return nil // TODO(karalabe): return error eventually, but wait a few releases
 	}
-	if err := ann.sanityCheck(); err != nil {
-		return err
-	}
 	ann.Block.ReceivedAt = msg.Time()
 	ann.Block.ReceivedFrom = peer
 
@@ -327,6 +327,8 @@ func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error {
 	if err := msg.Decode(res); err != nil {
 		return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
 	}
+	requestTracker.Fulfil(peer.id, peer.version, BlockHeadersMsg, res.RequestId)
+
 	return backend.Handle(peer, &res.BlockHeadersPacket)
 }
 
@@ -345,6 +347,8 @@ func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {
 	if err := msg.Decode(res); err != nil {
 		return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
 	}
+	requestTracker.Fulfil(peer.id, peer.version, BlockBodiesMsg, res.RequestId)
+
 	return backend.Handle(peer, &res.BlockBodiesPacket)
 }
 
@@ -363,6 +367,8 @@ func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error {
 	if err := msg.Decode(res); err != nil {
 		return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
 	}
+	requestTracker.Fulfil(peer.id, peer.version, NodeDataMsg, res.RequestId)
+
 	return backend.Handle(peer, &res.NodeDataPacket)
 }
 
@@ -381,6 +387,8 @@ func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error {
 	if err := msg.Decode(res); err != nil {
 		return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
 	}
+	requestTracker.Fulfil(peer.id, peer.version, ReceiptsMsg, res.RequestId)
+
 	return backend.Handle(peer, &res.ReceiptsPacket)
 }
 
@@ -506,5 +514,7 @@ func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error
 		}
 		peer.markTransaction(tx.Hash())
 	}
+	requestTracker.Fulfil(peer.id, peer.version, PooledTransactionsMsg, txs.RequestId)
+
 	return backend.Handle(peer, &txs.PooledTransactionsPacket)
 }

+ 2 - 2
eth/protocols/eth/handshake_test.go

@@ -27,8 +27,8 @@ import (
 )
 
 // Tests that handshake failures are detected and reported correctly.
-func TestHandshake64(t *testing.T) { testHandshake(t, 64) }
-func TestHandshake65(t *testing.T) { testHandshake(t, 65) }
+func TestHandshake65(t *testing.T) { testHandshake(t, ETH65) }
+func TestHandshake66(t *testing.T) { testHandshake(t, ETH66) }
 
 func testHandshake(t *testing.T, protocol uint) {
 	t.Parallel()

+ 28 - 7
eth/protocols/eth/peer.go

@@ -413,8 +413,11 @@ func (p *Peer) RequestOneHeader(hash common.Hash) error {
 		Reverse: false,
 	}
 	if p.Version() >= ETH66 {
+		id := rand.Uint64()
+
+		requestTracker.Track(p.id, p.version, GetBlockHeadersMsg, BlockHeadersMsg, id)
 		return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
-			RequestId:             rand.Uint64(),
+			RequestId:             id,
 			GetBlockHeadersPacket: &query,
 		})
 	}
@@ -432,8 +435,11 @@ func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, re
 		Reverse: reverse,
 	}
 	if p.Version() >= ETH66 {
+		id := rand.Uint64()
+
+		requestTracker.Track(p.id, p.version, GetBlockHeadersMsg, BlockHeadersMsg, id)
 		return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
-			RequestId:             rand.Uint64(),
+			RequestId:             id,
 			GetBlockHeadersPacket: &query,
 		})
 	}
@@ -451,8 +457,11 @@ func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, rever
 		Reverse: reverse,
 	}
 	if p.Version() >= ETH66 {
+		id := rand.Uint64()
+
+		requestTracker.Track(p.id, p.version, GetBlockHeadersMsg, BlockHeadersMsg, id)
 		return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
-			RequestId:             rand.Uint64(),
+			RequestId:             id,
 			GetBlockHeadersPacket: &query,
 		})
 	}
@@ -476,8 +485,11 @@ func (p *Peer) ExpectRequestHeadersByNumber(origin uint64, amount int, skip int,
 func (p *Peer) RequestBodies(hashes []common.Hash) error {
 	p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
 	if p.Version() >= ETH66 {
+		id := rand.Uint64()
+
+		requestTracker.Track(p.id, p.version, GetBlockBodiesMsg, BlockBodiesMsg, id)
 		return p2p.Send(p.rw, GetBlockBodiesMsg, &GetBlockBodiesPacket66{
-			RequestId:            rand.Uint64(),
+			RequestId:            id,
 			GetBlockBodiesPacket: hashes,
 		})
 	}
@@ -489,8 +501,11 @@ func (p *Peer) RequestBodies(hashes []common.Hash) error {
 func (p *Peer) RequestNodeData(hashes []common.Hash) error {
 	p.Log().Debug("Fetching batch of state data", "count", len(hashes))
 	if p.Version() >= ETH66 {
+		id := rand.Uint64()
+
+		requestTracker.Track(p.id, p.version, GetNodeDataMsg, NodeDataMsg, id)
 		return p2p.Send(p.rw, GetNodeDataMsg, &GetNodeDataPacket66{
-			RequestId:         rand.Uint64(),
+			RequestId:         id,
 			GetNodeDataPacket: hashes,
 		})
 	}
@@ -501,8 +516,11 @@ func (p *Peer) RequestNodeData(hashes []common.Hash) error {
 func (p *Peer) RequestReceipts(hashes []common.Hash) error {
 	p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
 	if p.Version() >= ETH66 {
+		id := rand.Uint64()
+
+		requestTracker.Track(p.id, p.version, GetReceiptsMsg, ReceiptsMsg, id)
 		return p2p.Send(p.rw, GetReceiptsMsg, &GetReceiptsPacket66{
-			RequestId:         rand.Uint64(),
+			RequestId:         id,
 			GetReceiptsPacket: hashes,
 		})
 	}
@@ -513,8 +531,11 @@ func (p *Peer) RequestReceipts(hashes []common.Hash) error {
 func (p *Peer) RequestTxs(hashes []common.Hash) error {
 	p.Log().Debug("Fetching batch of transactions", "count", len(hashes))
 	if p.Version() >= ETH66 {
+		id := rand.Uint64()
+
+		requestTracker.Track(p.id, p.version, GetPooledTransactionsMsg, PooledTransactionsMsg, id)
 		return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket66{
-			RequestId:                   rand.Uint64(),
+			RequestId:                   id,
 			GetPooledTransactionsPacket: hashes,
 		})
 	}

+ 2 - 3
eth/protocols/eth/protocol.go

@@ -30,7 +30,6 @@ import (
 
 // Constants to match up protocol versions and messages
 const (
-	ETH64 = 64
 	ETH65 = 65
 	ETH66 = 66
 )
@@ -41,11 +40,11 @@ const ProtocolName = "eth"
 
 // ProtocolVersions are the supported versions of the `eth` protocol (first
 // is primary).
-var ProtocolVersions = []uint{ETH66, ETH65, ETH64}
+var ProtocolVersions = []uint{ETH66, ETH65}
 
 // protocolLengths are the number of implemented message corresponding to
 // different protocol versions.
-var protocolLengths = map[uint]uint64{ETH66: 17, ETH65: 17, ETH64: 17}
+var protocolLengths = map[uint]uint64{ETH66: 17, ETH65: 17}
 
 // maxMessageSize is the maximum cap on the size of a protocol message.
 const maxMessageSize = 10 * 1024 * 1024

+ 26 - 0
eth/protocols/eth/tracker.go

@@ -0,0 +1,26 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package eth
+
+import (
+	"time"
+
+	"github.com/ethereum/go-ethereum/p2p/tracker"
+)
+
+// requestTracker is a singleton tracker for eth/66 and newer request times.
+var requestTracker = tracker.New(ProtocolName, 5*time.Minute)

+ 15 - 1
eth/protocols/snap/handler.go

@@ -84,6 +84,12 @@ type Backend interface {
 
 // MakeProtocols constructs the P2P protocol definitions for `snap`.
 func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol {
+	// Filter the discovery iterator for nodes advertising snap support.
+	dnsdisc = enode.Filter(dnsdisc, func(n *enode.Node) bool {
+		var snap enrEntry
+		return n.Load(&snap) == nil
+	})
+
 	protocols := make([]p2p.Protocol, len(ProtocolVersions))
 	for i, version := range ProtocolVersions {
 		version := version // Closure
@@ -227,6 +233,8 @@ func handleMessage(backend Backend, peer *Peer) error {
 				return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
 			}
 		}
+		requestTracker.Fulfil(peer.id, peer.version, AccountRangeMsg, res.ID)
+
 		return backend.Handle(peer, res)
 
 	case msg.Code == GetStorageRangesMsg:
@@ -352,7 +360,7 @@ func handleMessage(backend Backend, peer *Peer) error {
 		if err := msg.Decode(res); err != nil {
 			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
 		}
-		// Ensure the ranges ae monotonically increasing
+		// Ensure the ranges are monotonically increasing
 		for i, slots := range res.Slots {
 			for j := 1; j < len(slots); j++ {
 				if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
@@ -360,6 +368,8 @@ func handleMessage(backend Backend, peer *Peer) error {
 				}
 			}
 		}
+		requestTracker.Fulfil(peer.id, peer.version, StorageRangesMsg, res.ID)
+
 		return backend.Handle(peer, res)
 
 	case msg.Code == GetByteCodesMsg:
@@ -404,6 +414,8 @@ func handleMessage(backend Backend, peer *Peer) error {
 		if err := msg.Decode(res); err != nil {
 			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
 		}
+		requestTracker.Fulfil(peer.id, peer.version, ByteCodesMsg, res.ID)
+
 		return backend.Handle(peer, res)
 
 	case msg.Code == GetTrieNodesMsg:
@@ -497,6 +509,8 @@ func handleMessage(backend Backend, peer *Peer) error {
 		if err := msg.Decode(res); err != nil {
 			return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
 		}
+		requestTracker.Fulfil(peer.id, peer.version, TrieNodesMsg, res.ID)
+
 		return backend.Handle(peer, res)
 
 	default:

+ 7 - 0
eth/protocols/snap/peer.go

@@ -65,6 +65,8 @@ func (p *Peer) Log() log.Logger {
 // trie, starting with the origin.
 func (p *Peer) RequestAccountRange(id uint64, root common.Hash, origin, limit common.Hash, bytes uint64) error {
 	p.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
+
+	requestTracker.Track(p.id, p.version, GetAccountRangeMsg, AccountRangeMsg, id)
 	return p2p.Send(p.rw, GetAccountRangeMsg, &GetAccountRangePacket{
 		ID:     id,
 		Root:   root,
@@ -83,6 +85,7 @@ func (p *Peer) RequestStorageRanges(id uint64, root common.Hash, accounts []comm
 	} else {
 		p.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes))
 	}
+	requestTracker.Track(p.id, p.version, GetStorageRangesMsg, StorageRangesMsg, id)
 	return p2p.Send(p.rw, GetStorageRangesMsg, &GetStorageRangesPacket{
 		ID:       id,
 		Root:     root,
@@ -96,6 +99,8 @@ func (p *Peer) RequestStorageRanges(id uint64, root common.Hash, accounts []comm
 // RequestByteCodes fetches a batch of bytecodes by hash.
 func (p *Peer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
 	p.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
+
+	requestTracker.Track(p.id, p.version, GetByteCodesMsg, ByteCodesMsg, id)
 	return p2p.Send(p.rw, GetByteCodesMsg, &GetByteCodesPacket{
 		ID:     id,
 		Hashes: hashes,
@@ -107,6 +112,8 @@ func (p *Peer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) e
 // a specificstate trie.
 func (p *Peer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
 	p.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
+
+	requestTracker.Track(p.id, p.version, GetTrieNodesMsg, TrieNodesMsg, id)
 	return p2p.Send(p.rw, GetTrieNodesMsg, &GetTrieNodesPacket{
 		ID:    id,
 		Root:  root,

+ 80 - 0
eth/protocols/snap/range.go

@@ -0,0 +1,80 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package snap
+
+import (
+	"math/big"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/holiman/uint256"
+)
+
+// hashRange is a utility to handle ranges of hashes, Split up the
+// hash-space into sections, and 'walk' over the sections
+type hashRange struct {
+	current *uint256.Int
+	step    *uint256.Int
+}
+
+// newHashRange creates a new hashRange, initiated at the start position,
+// and with the step set to fill the desired 'num' chunks
+func newHashRange(start common.Hash, num uint64) *hashRange {
+	left := new(big.Int).Sub(hashSpace, start.Big())
+	step := new(big.Int).Div(
+		new(big.Int).Add(left, new(big.Int).SetUint64(num-1)),
+		new(big.Int).SetUint64(num),
+	)
+	step256 := new(uint256.Int)
+	step256.SetFromBig(step)
+
+	return &hashRange{
+		current: uint256.NewInt().SetBytes32(start[:]),
+		step:    step256,
+	}
+}
+
+// Next pushes the hash range to the next interval.
+func (r *hashRange) Next() bool {
+	next := new(uint256.Int)
+	if overflow := next.AddOverflow(r.current, r.step); overflow {
+		return false
+	}
+	r.current = next
+	return true
+}
+
+// Start returns the first hash in the current interval.
+func (r *hashRange) Start() common.Hash {
+	return r.current.Bytes32()
+}
+
+// End returns the last hash in the current interval.
+func (r *hashRange) End() common.Hash {
+	// If the end overflows (non divisible range), return a shorter interval
+	next := new(uint256.Int)
+	if overflow := next.AddOverflow(r.current, r.step); overflow {
+		return common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+	}
+	return new(uint256.Int).Sub(next, uint256.NewInt().SetOne()).Bytes32()
+}
+
+// incHash returns the next hash, in lexicographical order (a.k.a plus one)
+func incHash(h common.Hash) common.Hash {
+	a := uint256.NewInt().SetBytes32(h[:])
+	a.Add(a, uint256.NewInt().SetOne())
+	return common.Hash(a.Bytes32())
+}

+ 143 - 0
eth/protocols/snap/range_test.go

@@ -0,0 +1,143 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package snap
+
+import (
+	"testing"
+
+	"github.com/ethereum/go-ethereum/common"
+)
+
+// Tests that given a starting hash and a density, the hash ranger can correctly
+// split up the remaining hash space into a fixed number of chunks.
+func TestHashRanges(t *testing.T) {
+	tests := []struct {
+		head   common.Hash
+		chunks uint64
+		starts []common.Hash
+		ends   []common.Hash
+	}{
+		// Simple test case to split the entire hash range into 4 chunks
+		{
+			head:   common.Hash{},
+			chunks: 4,
+			starts: []common.Hash{
+				{},
+				common.HexToHash("0x4000000000000000000000000000000000000000000000000000000000000000"),
+				common.HexToHash("0x8000000000000000000000000000000000000000000000000000000000000000"),
+				common.HexToHash("0xc000000000000000000000000000000000000000000000000000000000000000"),
+			},
+			ends: []common.Hash{
+				common.HexToHash("0x3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+				common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+				common.HexToHash("0xbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+				common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+			},
+		},
+		// Split a divisible part of the hash range up into 2 chunks
+		{
+			head:   common.HexToHash("0x2000000000000000000000000000000000000000000000000000000000000000"),
+			chunks: 2,
+			starts: []common.Hash{
+				common.Hash{},
+				common.HexToHash("0x9000000000000000000000000000000000000000000000000000000000000000"),
+			},
+			ends: []common.Hash{
+				common.HexToHash("0x8fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+				common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+			},
+		},
+		// Split the entire hash range into a non divisible 3 chunks
+		{
+			head:   common.Hash{},
+			chunks: 3,
+			starts: []common.Hash{
+				{},
+				common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555556"),
+				common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac"),
+			},
+			ends: []common.Hash{
+				common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
+				common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"),
+				common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+			},
+		},
+		// Split a part of hash range into a non divisible 3 chunks
+		{
+			head:   common.HexToHash("0x2000000000000000000000000000000000000000000000000000000000000000"),
+			chunks: 3,
+			starts: []common.Hash{
+				{},
+				common.HexToHash("0x6aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"),
+				common.HexToHash("0xb555555555555555555555555555555555555555555555555555555555555556"),
+			},
+			ends: []common.Hash{
+				common.HexToHash("0x6aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
+				common.HexToHash("0xb555555555555555555555555555555555555555555555555555555555555555"),
+				common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+			},
+		},
+		// Split a part of hash range into a non divisible 3 chunks, but with a
+		// meaningful space size for manual verification.
+		//   - The head being 0xff...f0, we have 14 hashes left in the space
+		//   - Chunking up 14 into 3 pieces is 4.(6), but we need the ceil of 5 to avoid a micro-last-chunk
+		//   - Since the range is not divisible, the last interval will be shrter, capped at 0xff...f
+		//   - The chunk ranges thus needs to be [..0, ..5], [..6, ..b], [..c, ..f]
+		{
+			head:   common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0"),
+			chunks: 3,
+			starts: []common.Hash{
+				{},
+				common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6"),
+				common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc"),
+			},
+			ends: []common.Hash{
+				common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5"),
+				common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb"),
+				common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+			},
+		},
+	}
+	for i, tt := range tests {
+		r := newHashRange(tt.head, tt.chunks)
+
+		var (
+			starts = []common.Hash{{}}
+			ends   = []common.Hash{r.End()}
+		)
+		for r.Next() {
+			starts = append(starts, r.Start())
+			ends = append(ends, r.End())
+		}
+		if len(starts) != len(tt.starts) {
+			t.Errorf("test %d: starts count mismatch: have %d, want %d", i, len(starts), len(tt.starts))
+		}
+		for j := 0; j < len(starts) && j < len(tt.starts); j++ {
+			if starts[j] != tt.starts[j] {
+				t.Errorf("test %d, start %d: hash mismatch: have %x, want %x", i, j, starts[j], tt.starts[j])
+			}
+		}
+		if len(ends) != len(tt.ends) {
+			t.Errorf("test %d: ends count mismatch: have %d, want %d", i, len(ends), len(tt.ends))
+		}
+		for j := 0; j < len(ends) && j < len(tt.ends); j++ {
+			if ends[j] != tt.ends[j] {
+				t.Errorf("test %d, end %d: hash mismatch: have %x, want %x", i, j, ends[j], tt.ends[j])
+			}
+		}
+	}
+}

Filskillnaden har hållts tillbaka eftersom den är för stor
+ 354 - 248
eth/protocols/snap/sync.go


+ 111 - 2
eth/protocols/snap/sync_test.go

@@ -135,6 +135,12 @@ type testPeer struct {
 	trieRequestHandler    trieHandlerFunc
 	codeRequestHandler    codeHandlerFunc
 	term                  func()
+
+	// counters
+	nAccountRequests  int
+	nStorageRequests  int
+	nBytecodeRequests int
+	nTrienodeRequests int
 }
 
 func newTestPeer(id string, t *testing.T, term func()) *testPeer {
@@ -156,19 +162,30 @@ func newTestPeer(id string, t *testing.T, term func()) *testPeer {
 func (t *testPeer) ID() string      { return t.id }
 func (t *testPeer) Log() log.Logger { return t.logger }
 
+func (t *testPeer) Stats() string {
+	return fmt.Sprintf(`Account requests: %d
+Storage requests: %d
+Bytecode requests: %d
+Trienode requests: %d
+`, t.nAccountRequests, t.nStorageRequests, t.nBytecodeRequests, t.nTrienodeRequests)
+}
+
 func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {
 	t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes))
+	t.nAccountRequests++
 	go t.accountRequestHandler(t, id, root, origin, limit, bytes)
 	return nil
 }
 
 func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {
 	t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes))
+	t.nTrienodeRequests++
 	go t.trieRequestHandler(t, id, root, paths, bytes)
 	return nil
 }
 
 func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
+	t.nStorageRequests++
 	if len(accounts) == 1 && origin != nil {
 		t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes))
 	} else {
@@ -179,6 +196,7 @@ func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []
 }
 
 func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {
+	t.nBytecodeRequests++
 	t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes))
 	go t.codeRequestHandler(t, id, hashes, bytes)
 	return nil
@@ -1365,7 +1383,7 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
 	step := new(big.Int).Sub(
 		new(big.Int).Div(
 			new(big.Int).Exp(common.Big2, common.Big256, nil),
-			big.NewInt(accountConcurrency),
+			big.NewInt(int64(accountConcurrency)),
 		), common.Big1,
 	)
 	for i := 0; i < accountConcurrency; i++ {
@@ -1529,7 +1547,7 @@ func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice)
 	step := new(big.Int).Sub(
 		new(big.Int).Div(
 			new(big.Int).Exp(common.Big2, common.Big256, nil),
-			big.NewInt(accountConcurrency),
+			big.NewInt(int64(accountConcurrency)),
 		), common.Big1,
 	)
 	for i := 0; i < accountConcurrency; i++ {
@@ -1605,3 +1623,94 @@ func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
 	}
 	t.Logf("accounts: %d, slots: %d", accounts, slots)
 }
+
+// TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
+// state healing
+func TestSyncAccountPerformance(t *testing.T) {
+	// Set the account concurrency to 1. This _should_ result in the
+	// range root to become correct, and there should be no healing needed
+	defer func(old int) { accountConcurrency = old }(accountConcurrency)
+	accountConcurrency = 1
+
+	var (
+		once   sync.Once
+		cancel = make(chan struct{})
+		term   = func() {
+			once.Do(func() {
+				close(cancel)
+			})
+		}
+	)
+	sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
+
+	mkSource := func(name string) *testPeer {
+		source := newTestPeer(name, t, term)
+		source.accountTrie = sourceAccountTrie
+		source.accountValues = elems
+		return source
+	}
+	src := mkSource("source")
+	syncer := setupSyncer(src)
+	if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
+		t.Fatalf("sync failed: %v", err)
+	}
+	verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+	// The trie root will always be requested, since it is added when the snap
+	// sync cycle starts. When popping the queue, we do not look it up again.
+	// Doing so would bring this number down to zero in this artificial testcase,
+	// but only add extra IO for no reason in practice.
+	if have, want := src.nTrienodeRequests, 1; have != want {
+		fmt.Printf(src.Stats())
+		t.Errorf("trie node heal requests wrong, want %d, have %d", want, have)
+	}
+}
+
+func TestSlotEstimation(t *testing.T) {
+	for i, tc := range []struct {
+		last  common.Hash
+		count int
+		want  uint64
+	}{
+		{
+			// Half the space
+			common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+			100,
+			100,
+		},
+		{
+			// 1 / 16th
+			common.HexToHash("0x0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+			100,
+			1500,
+		},
+		{
+			// Bit more than 1 / 16th
+			common.HexToHash("0x1000000000000000000000000000000000000000000000000000000000000000"),
+			100,
+			1499,
+		},
+		{
+			// Almost everything
+			common.HexToHash("0xF000000000000000000000000000000000000000000000000000000000000000"),
+			100,
+			6,
+		},
+		{
+			// Almost nothing -- should lead to error
+			common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
+			1,
+			0,
+		},
+		{
+			// Nothing -- should lead to error
+			common.Hash{},
+			100,
+			0,
+		},
+	} {
+		have, _ := estimateRemainingSlots(tc.count, tc.last)
+		if want := tc.want; have != want {
+			t.Errorf("test %d: have %d want %d", i, have, want)
+		}
+	}
+}

Vissa filer visades inte eftersom för många filer har ändrats