Quellcode durchsuchen

Merge pull request #743 from binance-chain/develop

[R4R] Release v1.1.8
zjubfd vor 3 Jahren
Ursprung
Commit
859186f299
100 geänderte Dateien mit 3620 neuen und 843 gelöschten Zeilen
  1. 8 0
      .github/generate_change_log.sh
  2. 2 2
      .github/release.env
  3. 105 0
      .github/workflows/pre-release.yml
  4. 105 0
      .github/workflows/release.yml
  5. 2 0
      .gitignore
  6. 15 0
      CHANGELOG.md
  7. 0 14
      PULL_REQUEST_TEMPLATE
  8. 2 2
      README.md
  9. 1 1
      cmd/evm/disasm.go
  10. 7 3
      cmd/evm/internal/t8ntool/execution.go
  11. 2 2
      cmd/evm/internal/t8ntool/flags.go
  12. 8 8
      cmd/evm/internal/t8ntool/transition.go
  13. 3 3
      cmd/evm/main.go
  14. 9 7
      cmd/evm/runner.go
  15. 7 6
      cmd/evm/staterunner.go
  16. 3 3
      cmd/geth/chaincmd.go
  17. 27 7
      cmd/geth/dbcmd.go
  18. 8 0
      cmd/geth/main.go
  19. 242 0
      cmd/geth/pruneblock_test.go
  20. 189 5
      cmd/geth/snapshot.go
  21. 2 0
      cmd/geth/usage.go
  22. 25 7
      cmd/utils/flags.go
  23. 4 0
      consensus/clique/clique.go
  24. 22 7
      core/block_validator.go
  25. 177 77
      core/blockchain.go
  26. 5 5
      core/blockchain_diff_test.go
  27. 2 2
      core/blockchain_repair_test.go
  28. 1 1
      core/blockchain_sethead_test.go
  29. 2 2
      core/blockchain_snapshot_test.go
  30. 197 89
      core/blockchain_test.go
  31. 3 3
      core/chain_makers.go
  32. 3 0
      core/error.go
  33. 1 1
      core/genesis.go
  34. 1 1
      core/rawdb/accessors_chain_test.go
  35. 3 2
      core/rawdb/chain_iterator.go
  36. 106 7
      core/rawdb/database.go
  37. 20 8
      core/rawdb/freezer.go
  38. 6 0
      core/rawdb/schema.go
  39. 10 0
      core/rawdb/table.go
  40. 3 0
      core/state/database.go
  41. 211 0
      core/state/pruner/pruner.go
  42. 33 3
      core/state/snapshot/difflayer.go
  43. 18 18
      core/state/snapshot/difflayer_test.go
  44. 12 2
      core/state/snapshot/disklayer.go
  45. 4 4
      core/state/snapshot/disklayer_test.go
  46. 45 45
      core/state/snapshot/iterator_test.go
  47. 1 1
      core/state/snapshot/journal.go
  48. 14 5
      core/state/snapshot/snapshot.go
  49. 12 12
      core/state/snapshot/snapshot_test.go
  50. 9 3
      core/state/state_test.go
  51. 194 62
      core/state/statedb.go
  52. 26 9
      core/state/statedb_test.go
  53. 3 1
      core/state/sync_test.go
  54. 15 7
      core/state/trie_prefetcher.go
  55. 46 32
      core/state_prefetcher.go
  56. 13 5
      core/state_processor.go
  57. 1 1
      core/types.go
  58. 7 2
      core/vm/access_list_tracer.go
  59. 61 15
      core/vm/evm.go
  60. 4 0
      core/vm/instructions.go
  61. 10 10
      core/vm/interpreter.go
  62. 39 23
      core/vm/logger.go
  63. 13 6
      core/vm/logger_json.go
  64. 2 1
      core/vm/logger_test.go
  65. 262 10
      core/vm/runtime/runtime_test.go
  66. 1 1
      core/vm/stack.go
  67. 2 2
      eth/api_backend.go
  68. 4 2
      eth/api_test.go
  69. 3 0
      eth/backend.go
  70. 2 2
      eth/downloader/downloader.go
  71. 1 0
      eth/ethconfig/config.go
  72. 6 13
      eth/handler.go
  73. 70 0
      eth/handler_eth_test.go
  74. 32 4
      eth/peerset.go
  75. 1 1
      eth/protocols/diff/handshake.go
  76. 26 4
      eth/state_accessor.go
  77. 154 64
      eth/tracers/api.go
  78. 139 198
      eth/tracers/api_test.go
  79. 112 0
      eth/tracers/internal/tracers/call_tracer_js.js
  80. 394 0
      eth/tracers/internal/tracetest/calltrace_test.go
  81. 0 0
      eth/tracers/internal/tracetest/testdata/call_tracer/create.json
  82. 0 0
      eth/tracers/internal/tracetest/testdata/call_tracer/deep_calls.json
  83. 0 0
      eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json
  84. 12 0
      eth/tracers/internal/tracetest/testdata/call_tracer/inner_create_oog_outer_throw.json
  85. 63 0
      eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json
  86. 0 0
      eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json
  87. 0 0
      eth/tracers/internal/tracetest/testdata/call_tracer/oog.json
  88. 0 0
      eth/tracers/internal/tracetest/testdata/call_tracer/revert.json
  89. 0 0
      eth/tracers/internal/tracetest/testdata/call_tracer/revert_reason.json
  90. 75 0
      eth/tracers/internal/tracetest/testdata/call_tracer/selfdestruct.json
  91. 18 0
      eth/tracers/internal/tracetest/testdata/call_tracer/simple.json
  92. 0 0
      eth/tracers/internal/tracetest/testdata/call_tracer/throw.json
  93. 46 0
      eth/tracers/internal/tracetest/testdata/call_tracer_legacy/create.json
  94. 12 0
      eth/tracers/internal/tracetest/testdata/call_tracer_legacy/deep_calls.json
  95. 12 0
      eth/tracers/internal/tracetest/testdata/call_tracer_legacy/delegatecall.json
  96. 0 0
      eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_create_oog_outer_throw.json
  97. 0 0
      eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_instafail.json
  98. 12 0
      eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_throw_outer_revert.json
  99. 12 0
      eth/tracers/internal/tracetest/testdata/call_tracer_legacy/oog.json
  100. 18 0
      eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert.json

+ 8 - 0
.github/generate_change_log.sh

@@ -24,6 +24,10 @@ TESTNET_ZIP_SUM="$(checksum ./testnet.zip)"
 LINUX_BIN_SUM="$(checksum ./linux/geth)"
 MAC_BIN_SUM="$(checksum ./macos/geth)"
 WINDOWS_BIN_SUM="$(checksum ./windows/geth.exe)"
+ARM5_BIN_SUM="$(checksum ./arm5/geth-linux-arm-5)"
+ARM6_BIN_SUM="$(checksum ./arm6/geth-linux-arm-6)"
+ARM7_BIN_SUM="$(checksum ./arm7/geth-linux-arm-7)"
+ARM64_BIN_SUM="$(checksum ./arm64/geth-linux-arm64)"
 OUTPUT=$(cat <<-END
 ## Changelog\n
 ${CHANGE_LOG}\n
@@ -35,6 +39,10 @@ ${CHANGE_LOG}\n
 | geth_linux | ${LINUX_BIN_SUM} |\n
 | geth_mac  | ${MAC_BIN_SUM} |\n
 | geth_windows  | ${WINDOWS_BIN_SUM} |\n
+| geth_linux_arm5  | ${ARM5_BIN_SUM} |\n
+| geth_linux_arm6  | ${ARM6_BIN_SUM} |\n
+| geth_linux_arm7  | ${ARM7_BIN_SUM} |\n
+| geth_linux_arm64  | ${ARM64_BIN_SUM} |\n
 END
 )
 

+ 2 - 2
.github/release.env

@@ -1,2 +1,2 @@
-MAINNET_FILE_URL="https://github.com/binance-chain/bsc/releases/download/v1.1.6/mainnet.zip"
-TESTNET_FILE_URL="https://github.com/binance-chain/bsc/releases/download/v1.1.6/testnet.zip"
+MAINNET_FILE_URL="https://github.com/binance-chain/bsc/releases/download/v1.1.7/mainnet.zip"
+TESTNET_FILE_URL="https://github.com/binance-chain/bsc/releases/download/v1.1.7/testnet.zip"

+ 105 - 0
.github/workflows/pre-release.yml

@@ -45,6 +45,19 @@ jobs:
       - name: Build Binary for ${{matrix.os}}
         run: make geth
 
+      # ==============================
+      #       Cross Compile for ARM
+      # ==============================
+
+      - name: Build Binary for ARM
+        if: matrix.os == 'ubuntu-18.04'
+        env:
+          GOPATH: /home/runner/work/woodpecker/go
+        run: |
+          mkdir -p $GOPATH/src/github.com/binance-chain/bsc/
+          cp -r ./* $GOPATH/src/github.com/binance-chain/bsc/
+          cd $GOPATH/src/github.com/binance-chain/bsc/ && make geth-linux-arm
+
       # ==============================
       #       Upload artifacts
       # ==============================
@@ -70,6 +83,34 @@ jobs:
           name: windows
           path: ./build/bin/geth.exe
 
+      - name: Upload ARM-5 Build
+        uses: actions/upload-artifact@v2
+        if: matrix.os == 'ubuntu-18.04'
+        with:
+          name: arm5
+          path: /home/runner/work/woodpecker/go/src/github.com/binance-chain/bsc/build/bin/geth-linux-arm-5
+      
+      - name: Upload ARM-6 Build
+        uses: actions/upload-artifact@v2
+        if: matrix.os == 'ubuntu-18.04'
+        with:
+          name: arm6
+          path: /home/runner/work/woodpecker/go/src/github.com/binance-chain/bsc/build/bin/geth-linux-arm-6
+
+      - name: Upload ARM-7 Build
+        uses: actions/upload-artifact@v2
+        if: matrix.os == 'ubuntu-18.04'
+        with:
+          name: arm7
+          path: /home/runner/work/woodpecker/go/src/github.com/binance-chain/bsc/build/bin/geth-linux-arm-7
+
+      - name: Upload ARM-64 Build
+        uses: actions/upload-artifact@v2
+        if: matrix.os == 'ubuntu-18.04'
+        with:
+          name: arm64
+          path: /home/runner/work/woodpecker/go/src/github.com/binance-chain/bsc/build/bin/geth-linux-arm64
+
   release:
     name: Release
     needs: build
@@ -102,6 +143,30 @@ jobs:
         with:
           name: windows
           path: ./windows
+
+      - name: Download Artifacts
+        uses: actions/download-artifact@v2
+        with:
+          name: arm5
+          path: ./arm5
+      
+      - name: Download Artifacts
+        uses: actions/download-artifact@v2
+        with:
+          name: arm6
+          path: ./arm6
+      
+      - name: Download Artifacts
+        uses: actions/download-artifact@v2
+        with:
+          name: arm7
+          path: ./arm7
+      
+      - name: Download Artifacts
+        uses: actions/download-artifact@v2
+        with:
+          name: arm64
+          path: ./arm64
       
       - name: Download Config File
         run: |
@@ -161,6 +226,46 @@ jobs:
           asset_path: ./windows/geth.exe
           asset_name: geth_windows.exe
           asset_content_type: application/octet-stream
+
+      - name: Upload Release Asset - Linux ARM 5
+        uses: actions/upload-release-asset@v1
+        env:
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+        with:
+          upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
+          asset_path: ./arm5/geth-linux-arm-5
+          asset_name: geth-linux-arm-5
+          asset_content_type: application/octet-stream
+      
+      - name: Upload Release Asset - Linux ARM 6
+        uses: actions/upload-release-asset@v1
+        env:
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+        with:
+          upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
+          asset_path: ./arm6/geth-linux-arm-6
+          asset_name: geth-linux-arm-6
+          asset_content_type: application/octet-stream
+
+      - name: Upload Release Asset - Linux ARM 7
+        uses: actions/upload-release-asset@v1
+        env:
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+        with:
+          upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
+          asset_path: ./arm7/geth-linux-arm-7
+          asset_name: geth-linux-arm-7
+          asset_content_type: application/octet-stream
+
+      - name: Upload Release Asset - Linux ARM 64
+        uses: actions/upload-release-asset@v1
+        env:
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+        with:
+          upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
+          asset_path: ./arm64/geth-linux-arm64
+          asset_name: geth-linux-arm64
+          asset_content_type: application/octet-stream
       
       - name: Upload Release Asset - MAINNET.ZIP
         uses: actions/upload-release-asset@v1

+ 105 - 0
.github/workflows/release.yml

@@ -46,6 +46,19 @@ jobs:
       - name: Build Binary for ${{matrix.os}}
         run: make geth
 
+      # ==============================
+      #       Cross Compile for ARM
+      # ==============================
+
+      - name: Build Binary for ARM
+        if: matrix.os == 'ubuntu-18.04'
+        env:
+          GOPATH: /home/runner/work/woodpecker/go
+        run: |
+          mkdir -p $GOPATH/src/github.com/binance-chain/bsc/
+          cp -r ./* $GOPATH/src/github.com/binance-chain/bsc/
+          cd $GOPATH/src/github.com/binance-chain/bsc/ && make geth-linux-arm
+
       # ==============================
       #       Upload artifacts
       # ==============================
@@ -71,6 +84,34 @@ jobs:
           name: windows
           path: ./build/bin/geth.exe
 
+      - name: Upload ARM-5 Build
+        uses: actions/upload-artifact@v2
+        if: matrix.os == 'ubuntu-18.04'
+        with:
+          name: arm5
+          path: /home/runner/work/woodpecker/go/src/github.com/binance-chain/bsc/build/bin/geth-linux-arm-5
+      
+      - name: Upload ARM-6 Build
+        uses: actions/upload-artifact@v2
+        if: matrix.os == 'ubuntu-18.04'
+        with:
+          name: arm6
+          path: /home/runner/work/woodpecker/go/src/github.com/binance-chain/bsc/build/bin/geth-linux-arm-6
+
+      - name: Upload ARM-7 Build
+        uses: actions/upload-artifact@v2
+        if: matrix.os == 'ubuntu-18.04'
+        with:
+          name: arm7
+          path: /home/runner/work/woodpecker/go/src/github.com/binance-chain/bsc/build/bin/geth-linux-arm-7
+
+      - name: Upload ARM-64 Build
+        uses: actions/upload-artifact@v2
+        if: matrix.os == 'ubuntu-18.04'
+        with:
+          name: arm64
+          path: /home/runner/work/woodpecker/go/src/github.com/binance-chain/bsc/build/bin/geth-linux-arm64
+
   release:
     name: Release
     needs: build
@@ -104,6 +145,30 @@ jobs:
           name: windows
           path: ./windows
       
+      - name: Download Artifacts
+        uses: actions/download-artifact@v2
+        with:
+          name: arm5
+          path: ./arm5
+      
+      - name: Download Artifacts
+        uses: actions/download-artifact@v2
+        with:
+          name: arm6
+          path: ./arm6
+      
+      - name: Download Artifacts
+        uses: actions/download-artifact@v2
+        with:
+          name: arm7
+          path: ./arm7
+      
+      - name: Download Artifacts
+        uses: actions/download-artifact@v2
+        with:
+          name: arm64
+          path: ./arm64
+      
       - name: Download Config File
         run: |
           . ./.github/release.env
@@ -171,6 +236,46 @@ jobs:
           asset_name: geth_windows.exe
           asset_content_type: application/octet-stream
       
+      - name: Upload Release Asset - Linux ARM 5
+        uses: actions/upload-release-asset@v1
+        env:
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+        with:
+          upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
+          asset_path: ./arm5/geth-linux-arm-5
+          asset_name: geth-linux-arm-5
+          asset_content_type: application/octet-stream
+      
+      - name: Upload Release Asset - Linux ARM 6
+        uses: actions/upload-release-asset@v1
+        env:
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+        with:
+          upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
+          asset_path: ./arm6/geth-linux-arm-6
+          asset_name: geth-linux-arm-6
+          asset_content_type: application/octet-stream
+
+      - name: Upload Release Asset - Linux ARM 7
+        uses: actions/upload-release-asset@v1
+        env:
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+        with:
+          upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
+          asset_path: ./arm7/geth-linux-arm-7
+          asset_name: geth-linux-arm-7
+          asset_content_type: application/octet-stream
+
+      - name: Upload Release Asset - Linux ARM 64
+        uses: actions/upload-release-asset@v1
+        env:
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+        with:
+          upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
+          asset_path: ./arm64/geth-linux-arm64
+          asset_name: geth-linux-arm64
+          asset_content_type: application/octet-stream
+      
       - name: Upload Release Asset - MAINNET.ZIP
         uses: actions/upload-release-asset@v1
         env:

+ 2 - 0
.gitignore

@@ -48,3 +48,5 @@ profile.cov
 /dashboard/assets/package-lock.json
 
 **/yarn-error.log
+cmd/geth/node/
+cmd/geth/__debug_bin

+ 15 - 0
CHANGELOG.md

@@ -1,5 +1,20 @@
 # Changelog
 
+## v1.1.8
+FEATURES
+* [\#668](https://github.com/binance-chain/bsc/pull/668) implement State Verification && Snapshot Commit pipeline
+* [\#581](https://github.com/binance-chain/bsc/pull/581) implement geth native trace 
+* [\#543](https://github.com/binance-chain/bsc/pull/543) implement offline block prune tools
+
+IMPROVEMENT
+* [\#704](https://github.com/binance-chain/bsc/pull/704) prefetch state by applying the transactions within one block 
+* [\#713](https://github.com/binance-chain/bsc/pull/713) add ARM binaries for release pipeline
+
+BUGFIX
+* [\#667](https://github.com/binance-chain/bsc/pull/667) trie: reject deletions when verifying range proofs #667
+* [\#643](https://github.com/binance-chain/bsc/pull/643) add timeout for stopping p2p server to fix can not gracefully shutdown issue
+* [\#740](https://github.com/binance-chain/bsc/pull/740) update discord link which won't expire 
+
 ## v1.1.7
 
 BUGFIX

+ 0 - 14
PULL_REQUEST_TEMPLATE

@@ -15,17 +15,3 @@ add an example CLI or API response...
 Notable changes: 
 * add each change in a bullet point here
 * ...
-
-### Preflight checks
-
-- [ ] build passed (`make build`)
-- [ ] tests passed (`make test`)
-- [ ] manual transaction test passed
-
-### Already reviewed by
-
-...
-
-### Related issues
-
-... reference related issue #'s here ...

+ 2 - 2
README.md

@@ -7,7 +7,7 @@ Binance Smart Chain starts its development based on go-ethereum fork. So you may
 [![API Reference](
 https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
 )](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc)
-[![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/5Z3C3SdxDw)
+[![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/z2VpC455eU)
 
 But from that baseline of EVM compatible, Binance Smart Chain introduces  a system of 21 validators with Proof of Staked Authority (PoSA) consensus that can support short block time and lower fees. The most bonded validator candidates of staking will become validators and produce blocks. The double-sign detection and other slashing logic guarantee security, stability, and chain finality.
 
@@ -203,7 +203,7 @@ from anyone on the internet, and are grateful for even the smallest of fixes!
 
 If you'd like to contribute to bsc, please fork, fix, commit and send a pull request
 for the maintainers to review and merge into the main code base. If you wish to submit
-more complex changes though, please check up with the core devs first on [our discord channel](https://discord.gg/5Z3C3SdxDw)
+more complex changes though, please check up with the core devs first on [our discord channel](https://discord.gg/z2VpC455eU)
 to ensure those changes are in line with the general philosophy of the project and/or get
 some early feedback which can make both your efforts much lighter as well as our review
 and merge procedures quick and simple.

+ 1 - 1
cmd/evm/disasm.go

@@ -46,7 +46,7 @@ func disasmCmd(ctx *cli.Context) error {
 	case ctx.GlobalIsSet(InputFlag.Name):
 		in = ctx.GlobalString(InputFlag.Name)
 	default:
-		return errors.New("Missing filename or --input value")
+		return errors.New("missing filename or --input value")
 	}
 
 	code := strings.TrimSpace(in)

+ 7 - 3
cmd/evm/internal/t8ntool/execution.go

@@ -82,7 +82,7 @@ type stEnvMarshaling struct {
 // Apply applies a set of transactions to a pre-state
 func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
 	txs types.Transactions, miningReward int64,
-	getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error)) (*state.StateDB, *ExecutionResult, error) {
+	getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error)) (*state.StateDB, *ExecutionResult, error) {
 
 	// Capture errors for BLOCKHASH operation, if we haven't been supplied the
 	// required blockhashes
@@ -223,7 +223,9 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
 		statedb.AddBalance(pre.Env.Coinbase, minerReward)
 	}
 	// Commit block
-	root, _, err := statedb.Commit(chainConfig.IsEIP158(vmContext.BlockNumber))
+	statedb.Finalise(chainConfig.IsEIP158(vmContext.BlockNumber))
+	statedb.AccountsIntermediateRoot()
+	root, _, err := statedb.Commit(nil)
 	if err != nil {
 		fmt.Fprintf(os.Stderr, "Could not commit state: %v", err)
 		return nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err))
@@ -252,7 +254,9 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB
 		}
 	}
 	// Commit and re-open to start with a clean state.
-	root, _, _ := statedb.Commit(false)
+	statedb.Finalise(false)
+	statedb.AccountsIntermediateRoot()
+	root, _, _ := statedb.Commit(nil)
 	statedb, _ = state.New(root, sdb, nil)
 	return statedb
 }

+ 2 - 2
cmd/evm/internal/t8ntool/flags.go

@@ -30,7 +30,7 @@ var (
 		Name:  "trace",
 		Usage: "Output full trace logs to files <txhash>.jsonl",
 	}
-	TraceDisableMemoryFlag = cli.BoolFlag{
+	TraceDisableMemoryFlag = cli.BoolTFlag{
 		Name:  "trace.nomemory",
 		Usage: "Disable full memory dump in traces",
 	}
@@ -38,7 +38,7 @@ var (
 		Name:  "trace.nostack",
 		Usage: "Disable stack output in traces",
 	}
-	TraceDisableReturnDataFlag = cli.BoolFlag{
+	TraceDisableReturnDataFlag = cli.BoolTFlag{
 		Name:  "trace.noreturndata",
 		Usage: "Disable return data output in traces",
 	}

+ 8 - 8
cmd/evm/internal/t8ntool/transition.go

@@ -81,10 +81,10 @@ func Main(ctx *cli.Context) error {
 
 	var (
 		err     error
-		tracer  vm.Tracer
+		tracer  vm.EVMLogger
 		baseDir = ""
 	)
-	var getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error)
+	var getTracer func(txIndex int, txHash common.Hash) (vm.EVMLogger, error)
 
 	// If user specified a basedir, make sure it exists
 	if ctx.IsSet(OutputBasedir.Name) {
@@ -99,10 +99,10 @@ func Main(ctx *cli.Context) error {
 	if ctx.Bool(TraceFlag.Name) {
 		// Configure the EVM logger
 		logConfig := &vm.LogConfig{
-			DisableStack:      ctx.Bool(TraceDisableStackFlag.Name),
-			DisableMemory:     ctx.Bool(TraceDisableMemoryFlag.Name),
-			DisableReturnData: ctx.Bool(TraceDisableReturnDataFlag.Name),
-			Debug:             true,
+			DisableStack:     ctx.Bool(TraceDisableStackFlag.Name),
+			EnableMemory:     !ctx.Bool(TraceDisableMemoryFlag.Name),
+			EnableReturnData: !ctx.Bool(TraceDisableReturnDataFlag.Name),
+			Debug:            true,
 		}
 		var prevFile *os.File
 		// This one closes the last file
@@ -111,7 +111,7 @@ func Main(ctx *cli.Context) error {
 				prevFile.Close()
 			}
 		}()
-		getTracer = func(txIndex int, txHash common.Hash) (vm.Tracer, error) {
+		getTracer = func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) {
 			if prevFile != nil {
 				prevFile.Close()
 			}
@@ -123,7 +123,7 @@ func Main(ctx *cli.Context) error {
 			return vm.NewJSONLogger(logConfig, traceFile), nil
 		}
 	} else {
-		getTracer = func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error) {
+		getTracer = func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error) {
 			return nil, nil
 		}
 	}

+ 3 - 3
cmd/evm/main.go

@@ -113,7 +113,7 @@ var (
 		Name:  "receiver",
 		Usage: "The transaction receiver (execution context)",
 	}
-	DisableMemoryFlag = cli.BoolFlag{
+	DisableMemoryFlag = cli.BoolTFlag{
 		Name:  "nomemory",
 		Usage: "disable memory output",
 	}
@@ -125,9 +125,9 @@ var (
 		Name:  "nostorage",
 		Usage: "disable storage output",
 	}
-	DisableReturnDataFlag = cli.BoolFlag{
+	DisableReturnDataFlag = cli.BoolTFlag{
 		Name:  "noreturndata",
-		Usage: "disable return data output",
+		Usage: "enable return data output",
 	}
 	EVMInterpreterFlag = cli.StringFlag{
 		Name:  "vm.evm",

+ 9 - 7
cmd/evm/runner.go

@@ -108,15 +108,15 @@ func runCmd(ctx *cli.Context) error {
 	glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
 	log.Root().SetHandler(glogger)
 	logconfig := &vm.LogConfig{
-		DisableMemory:     ctx.GlobalBool(DisableMemoryFlag.Name),
-		DisableStack:      ctx.GlobalBool(DisableStackFlag.Name),
-		DisableStorage:    ctx.GlobalBool(DisableStorageFlag.Name),
-		DisableReturnData: ctx.GlobalBool(DisableReturnDataFlag.Name),
-		Debug:             ctx.GlobalBool(DebugFlag.Name),
+		EnableMemory:     !ctx.GlobalBool(DisableMemoryFlag.Name),
+		DisableStack:     ctx.GlobalBool(DisableStackFlag.Name),
+		DisableStorage:   ctx.GlobalBool(DisableStorageFlag.Name),
+		EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name),
+		Debug:            ctx.GlobalBool(DebugFlag.Name),
 	}
 
 	var (
-		tracer        vm.Tracer
+		tracer        vm.EVMLogger
 		debugLogger   *vm.StructLogger
 		statedb       *state.StateDB
 		chainConfig   *params.ChainConfig
@@ -268,7 +268,9 @@ func runCmd(ctx *cli.Context) error {
 	output, leftOverGas, stats, err := timedExec(bench, execFunc)
 
 	if ctx.GlobalBool(DumpFlag.Name) {
-		statedb.Commit(true)
+		statedb.Finalise(true)
+		statedb.AccountsIntermediateRoot()
+		statedb.Commit(nil)
 		statedb.IntermediateRoot(true)
 		fmt.Println(string(statedb.Dump(false, false, true)))
 	}

+ 7 - 6
cmd/evm/staterunner.go

@@ -59,13 +59,13 @@ func stateTestCmd(ctx *cli.Context) error {
 
 	// Configure the EVM logger
 	config := &vm.LogConfig{
-		DisableMemory:     ctx.GlobalBool(DisableMemoryFlag.Name),
-		DisableStack:      ctx.GlobalBool(DisableStackFlag.Name),
-		DisableStorage:    ctx.GlobalBool(DisableStorageFlag.Name),
-		DisableReturnData: ctx.GlobalBool(DisableReturnDataFlag.Name),
+		EnableMemory:     !ctx.GlobalBool(DisableMemoryFlag.Name),
+		DisableStack:     ctx.GlobalBool(DisableStackFlag.Name),
+		DisableStorage:   ctx.GlobalBool(DisableStorageFlag.Name),
+		EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name),
 	}
 	var (
-		tracer   vm.Tracer
+		tracer   vm.EVMLogger
 		debugger *vm.StructLogger
 	)
 	switch {
@@ -101,7 +101,8 @@ func stateTestCmd(ctx *cli.Context) error {
 			_, state, err := test.Run(st, cfg, false)
 			// print state root for evmlab tracing
 			if ctx.GlobalBool(MachineFlag.Name) && state != nil {
-				fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
+				root := state.IntermediateRoot(false)
+				fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", root)
 			}
 			if err != nil {
 				// Test failed, mark as so and dump any state to aid debugging

+ 3 - 3
cmd/geth/chaincmd.go

@@ -458,7 +458,7 @@ func importPreimages(ctx *cli.Context) error {
 	stack, _ := makeConfigNode(ctx)
 	defer stack.Close()
 
-	db := utils.MakeChainDatabase(ctx, stack, false)
+	db := utils.MakeChainDatabase(ctx, stack, false, false)
 	start := time.Now()
 
 	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
@@ -477,7 +477,7 @@ func exportPreimages(ctx *cli.Context) error {
 	stack, _ := makeConfigNode(ctx)
 	defer stack.Close()
 
-	db := utils.MakeChainDatabase(ctx, stack, true)
+	db := utils.MakeChainDatabase(ctx, stack, true, false)
 	start := time.Now()
 
 	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
@@ -491,7 +491,7 @@ func dump(ctx *cli.Context) error {
 	stack, _ := makeConfigNode(ctx)
 	defer stack.Close()
 
-	db := utils.MakeChainDatabase(ctx, stack, true)
+	db := utils.MakeChainDatabase(ctx, stack, true, false)
 	for _, arg := range ctx.Args() {
 		var header *types.Header
 		if hashish(arg) {

+ 27 - 7
cmd/geth/dbcmd.go

@@ -62,6 +62,7 @@ Remove blockchain and state databases`,
 			dbPutCmd,
 			dbGetSlotsCmd,
 			dbDumpFreezerIndex,
+			ancientInspectCmd,
 		},
 	}
 	dbInspectCmd = cli.Command{
@@ -195,6 +196,16 @@ WARNING: This is a low-level operation which may cause database corruption!`,
 		},
 		Description: "This command displays information about the freezer index.",
 	}
+	ancientInspectCmd = cli.Command{
+		Action: utils.MigrateFlags(ancientInspect),
+		Name:   "inspect-reserved-oldest-blocks",
+		Flags: []cli.Flag{
+			utils.DataDirFlag,
+		},
+		Usage: "Inspect the ancientStore information",
+		Description: `This commands will read current offset from kvdb, which is the current offset and starting BlockNumber
+of ancientStore, will also displays the reserved number of blocks in ancientStore `,
+	}
 )
 
 func removeDB(ctx *cli.Context) error {
@@ -282,12 +293,21 @@ func inspect(ctx *cli.Context) error {
 	stack, _ := makeConfigNode(ctx)
 	defer stack.Close()
 
-	db := utils.MakeChainDatabase(ctx, stack, true)
+	db := utils.MakeChainDatabase(ctx, stack, true, false)
 	defer db.Close()
 
 	return rawdb.InspectDatabase(db, prefix, start)
 }
 
+func ancientInspect(ctx *cli.Context) error {
+	stack, _ := makeConfigNode(ctx)
+	defer stack.Close()
+
+	db := utils.MakeChainDatabase(ctx, stack, true, true)
+	defer db.Close()
+	return rawdb.AncientInspect(db)
+}
+
 func showLeveldbStats(db ethdb.Stater) {
 	if stats, err := db.Stat("leveldb.stats"); err != nil {
 		log.Warn("Failed to read database stats", "error", err)
@@ -305,7 +325,7 @@ func dbStats(ctx *cli.Context) error {
 	stack, _ := makeConfigNode(ctx)
 	defer stack.Close()
 
-	db := utils.MakeChainDatabase(ctx, stack, true)
+	db := utils.MakeChainDatabase(ctx, stack, true, false)
 	defer db.Close()
 
 	showLeveldbStats(db)
@@ -316,7 +336,7 @@ func dbCompact(ctx *cli.Context) error {
 	stack, _ := makeConfigNode(ctx)
 	defer stack.Close()
 
-	db := utils.MakeChainDatabase(ctx, stack, false)
+	db := utils.MakeChainDatabase(ctx, stack, false, false)
 	defer db.Close()
 
 	log.Info("Stats before compaction")
@@ -340,7 +360,7 @@ func dbGet(ctx *cli.Context) error {
 	stack, _ := makeConfigNode(ctx)
 	defer stack.Close()
 
-	db := utils.MakeChainDatabase(ctx, stack, true)
+	db := utils.MakeChainDatabase(ctx, stack, true, false)
 	defer db.Close()
 
 	key, err := hexutil.Decode(ctx.Args().Get(0))
@@ -365,7 +385,7 @@ func dbDelete(ctx *cli.Context) error {
 	stack, _ := makeConfigNode(ctx)
 	defer stack.Close()
 
-	db := utils.MakeChainDatabase(ctx, stack, false)
+	db := utils.MakeChainDatabase(ctx, stack, false, false)
 	defer db.Close()
 
 	key, err := hexutil.Decode(ctx.Args().Get(0))
@@ -392,7 +412,7 @@ func dbPut(ctx *cli.Context) error {
 	stack, _ := makeConfigNode(ctx)
 	defer stack.Close()
 
-	db := utils.MakeChainDatabase(ctx, stack, false)
+	db := utils.MakeChainDatabase(ctx, stack, false, false)
 	defer db.Close()
 
 	var (
@@ -426,7 +446,7 @@ func dbDumpTrie(ctx *cli.Context) error {
 	stack, _ := makeConfigNode(ctx)
 	defer stack.Close()
 
-	db := utils.MakeChainDatabase(ctx, stack, true)
+	db := utils.MakeChainDatabase(ctx, stack, true, false)
 	defer db.Close()
 	var (
 		root  []byte

+ 8 - 0
cmd/geth/main.go

@@ -39,6 +39,11 @@ import (
 	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/metrics"
 	"github.com/ethereum/go-ethereum/node"
+
+	// Force-load the tracer engines to trigger registration
+	_ "github.com/ethereum/go-ethereum/eth/tracers/js"
+	_ "github.com/ethereum/go-ethereum/eth/tracers/native"
+
 	"gopkg.in/urfave/cli.v1"
 )
 
@@ -67,6 +72,7 @@ var (
 		utils.DirectBroadcastFlag,
 		utils.DisableSnapProtocolFlag,
 		utils.DiffSyncFlag,
+		utils.PipeCommitFlag,
 		utils.RangeLimitFlag,
 		utils.USBFlag,
 		utils.SmartCardDaemonPathFlag,
@@ -160,6 +166,8 @@ var (
 		utils.MinerNotifyFullFlag,
 		configFileFlag,
 		utils.CatalystFlag,
+		utils.BlockAmountReserved,
+		utils.CheckSnapshotWithMPT,
 	}
 
 	rpcFlags = []cli.Flag{

+ 242 - 0
cmd/geth/pruneblock_test.go

@@ -0,0 +1,242 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
+
+package main
+
+import (
+	"bytes"
+	"encoding/hex"
+	"fmt"
+	"io/ioutil"
+	"math/big"
+	"os"
+	"path/filepath"
+	"testing"
+	"time"
+
+	"github.com/ethereum/go-ethereum/cmd/utils"
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/consensus"
+	"github.com/ethereum/go-ethereum/consensus/ethash"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/rawdb"
+	"github.com/ethereum/go-ethereum/core/state/pruner"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/core/vm"
+	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/eth"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/node"
+	"github.com/ethereum/go-ethereum/params"
+	"github.com/ethereum/go-ethereum/rlp"
+)
+
+var (
+	canonicalSeed               = 1
+	blockPruneBackUpBlockNumber = 128
+	key, _                      = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+	address                     = crypto.PubkeyToAddress(key.PublicKey)
+	balance                     = big.NewInt(10000000)
+	gspec                       = &core.Genesis{Config: params.TestChainConfig, Alloc: core.GenesisAlloc{address: {Balance: balance}}}
+	signer                      = types.LatestSigner(gspec.Config)
+	config                      = &core.CacheConfig{
+		TrieCleanLimit: 256,
+		TrieDirtyLimit: 256,
+		TrieTimeLimit:  5 * time.Minute,
+		SnapshotLimit:  0, // Disable snapshot
+		TriesInMemory:  128,
+	}
+	engine = ethash.NewFullFaker()
+)
+
+func TestOfflineBlockPrune(t *testing.T) {
+	//Corner case for 0 remain in ancinetStore.
+	testOfflineBlockPruneWithAmountReserved(t, 0)
+	//General case.
+	testOfflineBlockPruneWithAmountReserved(t, 100)
+}
+
+func testOfflineBlockPruneWithAmountReserved(t *testing.T, amountReserved uint64) {
+	datadir, err := ioutil.TempDir("", "")
+	if err != nil {
+		t.Fatalf("Failed to create temporary datadir: %v", err)
+	}
+	os.RemoveAll(datadir)
+
+	chaindbPath := filepath.Join(datadir, "chaindata")
+	oldAncientPath := filepath.Join(chaindbPath, "ancient")
+	newAncientPath := filepath.Join(chaindbPath, "ancient_back")
+
+	db, blocks, blockList, receiptsList, externTdList, startBlockNumber, _ := BlockchainCreator(t, chaindbPath, oldAncientPath, amountReserved)
+	node, _ := startEthService(t, gspec, blocks, chaindbPath)
+	defer node.Close()
+
+	//Initialize a block pruner for pruning, only remain amountReserved blocks backward.
+	testBlockPruner := pruner.NewBlockPruner(db, node, oldAncientPath, newAncientPath, amountReserved)
+	if err != nil {
+		t.Fatalf("failed to make new blockpruner: %v", err)
+	}
+	if err := testBlockPruner.BlockPruneBackUp(chaindbPath, 512, utils.MakeDatabaseHandles(), "", false, false); err != nil {
+		t.Fatalf("Failed to back up block: %v", err)
+	}
+
+	dbBack, err := rawdb.NewLevelDBDatabaseWithFreezer(chaindbPath, 0, 0, newAncientPath, "", false, true, false)
+	if err != nil {
+		t.Fatalf("failed to create database with ancient backend")
+	}
+	defer dbBack.Close()
+
+	//check against if the backup data matched original one
+	for blockNumber := startBlockNumber; blockNumber < startBlockNumber+amountReserved; blockNumber++ {
+		blockHash := rawdb.ReadCanonicalHash(dbBack, blockNumber)
+		block := rawdb.ReadBlock(dbBack, blockHash, blockNumber)
+
+		if block.Hash() != blockHash {
+			t.Fatalf("block data did not match between oldDb and backupDb")
+		}
+		if blockList[blockNumber-startBlockNumber].Hash() != blockHash {
+			t.Fatalf("block data did not match between oldDb and backupDb")
+		}
+
+		receipts := rawdb.ReadRawReceipts(dbBack, blockHash, blockNumber)
+		if err := checkReceiptsRLP(receipts, receiptsList[blockNumber-startBlockNumber]); err != nil {
+			t.Fatalf("receipts did not match between oldDb and backupDb")
+		}
+		// // Calculate the total difficulty of the block
+		td := rawdb.ReadTd(dbBack, blockHash, blockNumber)
+		if td == nil {
+			t.Fatalf("Failed to ReadTd: %v", consensus.ErrUnknownAncestor)
+		}
+		if td.Cmp(externTdList[blockNumber-startBlockNumber]) != 0 {
+			t.Fatalf("externTd did not match between oldDb and backupDb")
+		}
+	}
+
+	//check if ancientDb freezer replaced successfully
+	testBlockPruner.AncientDbReplacer()
+	if _, err := os.Stat(newAncientPath); err != nil {
+		if !os.IsNotExist(err) {
+			t.Fatalf("ancientDb replaced unsuccessfully")
+		}
+	}
+	if _, err := os.Stat(oldAncientPath); err != nil {
+		t.Fatalf("ancientDb replaced unsuccessfully")
+	}
+}
+
+func BlockchainCreator(t *testing.T, chaindbPath, AncientPath string, blockRemain uint64) (ethdb.Database, []*types.Block, []*types.Block, []types.Receipts, []*big.Int, uint64, *core.BlockChain) {
+	//create a database with ancient freezer
+	db, err := rawdb.NewLevelDBDatabaseWithFreezer(chaindbPath, 0, 0, AncientPath, "", false, false, false)
+	if err != nil {
+		t.Fatalf("failed to create database with ancient backend")
+	}
+	defer db.Close()
+	genesis := gspec.MustCommit(db)
+	// Initialize a fresh chain with only a genesis block
+	blockchain, err := core.NewBlockChain(db, config, gspec.Config, engine, vm.Config{}, nil, nil)
+	if err != nil {
+		t.Fatalf("Failed to create chain: %v", err)
+	}
+
+	// Make chain starting from genesis
+	blocks, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 500, func(i int, block *core.BlockGen) {
+		block.SetCoinbase(common.Address{0: byte(canonicalSeed), 19: byte(i)})
+		tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, nil, nil), signer, key)
+		if err != nil {
+			panic(err)
+		}
+		block.AddTx(tx)
+		block.SetDifficulty(big.NewInt(1000000))
+	})
+	if _, err := blockchain.InsertChain(blocks); err != nil {
+		t.Fatalf("Failed to import canonical chain start: %v", err)
+	}
+
+	// Force run a freeze cycle
+	type freezer interface {
+		Freeze(threshold uint64) error
+		Ancients() (uint64, error)
+	}
+	db.(freezer).Freeze(10)
+
+	frozen, err := db.Ancients()
+	//make sure there're frozen items
+	if err != nil || frozen == 0 {
+		t.Fatalf("Failed to import canonical chain start: %v", err)
+	}
+	if frozen < blockRemain {
+		t.Fatalf("block amount is not enough for pruning: %v", err)
+	}
+
+	oldOffSet := rawdb.ReadOffSetOfCurrentAncientFreezer(db)
+	// Get the actual start block number.
+	startBlockNumber := frozen - blockRemain + oldOffSet
+	// Initialize the slice to buffer the block data left.
+	blockList := make([]*types.Block, 0, blockPruneBackUpBlockNumber)
+	receiptsList := make([]types.Receipts, 0, blockPruneBackUpBlockNumber)
+	externTdList := make([]*big.Int, 0, blockPruneBackUpBlockNumber)
+	// All ancient data within the most recent 128 blocks write into memory buffer for future new ancient_back directory usage.
+	for blockNumber := startBlockNumber; blockNumber < frozen+oldOffSet; blockNumber++ {
+		blockHash := rawdb.ReadCanonicalHash(db, blockNumber)
+		block := rawdb.ReadBlock(db, blockHash, blockNumber)
+		blockList = append(blockList, block)
+		receipts := rawdb.ReadRawReceipts(db, blockHash, blockNumber)
+		receiptsList = append(receiptsList, receipts)
+		// Calculate the total difficulty of the block
+		td := rawdb.ReadTd(db, blockHash, blockNumber)
+		if td == nil {
+			t.Fatalf("Failed to ReadTd: %v", consensus.ErrUnknownAncestor)
+		}
+		externTdList = append(externTdList, td)
+	}
+
+	return db, blocks, blockList, receiptsList, externTdList, startBlockNumber, blockchain
+}
+
+func checkReceiptsRLP(have, want types.Receipts) error {
+	if len(have) != len(want) {
+		return fmt.Errorf("receipts sizes mismatch: have %d, want %d", len(have), len(want))
+	}
+	for i := 0; i < len(want); i++ {
+		rlpHave, err := rlp.EncodeToBytes(have[i])
+		if err != nil {
+			return err
+		}
+		rlpWant, err := rlp.EncodeToBytes(want[i])
+		if err != nil {
+			return err
+		}
+		if !bytes.Equal(rlpHave, rlpWant) {
+			return fmt.Errorf("receipt #%d: receipt mismatch: have %s, want %s", i, hex.EncodeToString(rlpHave), hex.EncodeToString(rlpWant))
+		}
+	}
+	return nil
+}
+
+// startEthService creates a full node instance for testing.
+func startEthService(t *testing.T, genesis *core.Genesis, blocks []*types.Block, chaindbPath string) (*node.Node, *eth.Ethereum) {
+	t.Helper()
+	n, err := node.New(&node.Config{DataDir: chaindbPath})
+	if err != nil {
+		t.Fatal("can't create node:", err)
+	}
+
+	if err := n.Start(); err != nil {
+		t.Fatal("can't start node:", err)
+	}
+
+	return n, nil
+}

+ 189 - 5
cmd/geth/snapshot.go

@@ -19,8 +19,14 @@ package main
 import (
 	"bytes"
 	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
 	"time"
 
+	"github.com/prometheus/tsdb/fileutil"
+	cli "gopkg.in/urfave/cli.v1"
+
 	"github.com/ethereum/go-ethereum/cmd/utils"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/rawdb"
@@ -28,10 +34,11 @@ import (
 	"github.com/ethereum/go-ethereum/core/state/pruner"
 	"github.com/ethereum/go-ethereum/core/state/snapshot"
 	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/log"
+	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/ethereum/go-ethereum/trie"
-	cli "gopkg.in/urfave/cli.v1"
 )
 
 var (
@@ -78,6 +85,30 @@ WARNING: It's necessary to delete the trie clean cache after the pruning.
 If you specify another directory for the trie clean cache via "--cache.trie.journal"
 during the use of Geth, please also specify it here for correct deletion. Otherwise
 the trie clean cache with default directory will be deleted.
+`,
+			},
+			{
+				Name:     "prune-block",
+				Usage:    "Prune block data offline",
+				Action:   utils.MigrateFlags(pruneBlock),
+				Category: "MISCELLANEOUS COMMANDS",
+				Flags: []cli.Flag{
+					utils.DataDirFlag,
+					utils.AncientFlag,
+					utils.BlockAmountReserved,
+					utils.TriesInMemoryFlag,
+					utils.CheckSnapshotWithMPT,
+				},
+				Description: `
+geth offline prune-block for block data in ancientdb.
+The amount of blocks expected for remaining after prune can be specified via block-amount-reserved in this command,
+will prune and only remain the specified amount of old block data in ancientdb.
+the brief workflow is to backup the the number of this specified amount blocks backward in original ancientdb 
+into new ancient_backup, then delete the original ancientdb dir and rename the ancient_backup to original one for replacement,
+finally assemble the statedb and new ancientDb together.
+The purpose of doing it is because the block data will be moved into the ancient store when it
+becomes old enough(exceed the Threshold 90000), the disk usage will be very large over time, and is occupied mainly by ancientDb,
+so it's very necessary to do block data prune, this feature will handle it.
 `,
 			},
 			{
@@ -149,11 +180,164 @@ It's also usable without snapshot enabled.
 	}
 )
 
+func accessDb(ctx *cli.Context, stack *node.Node) (ethdb.Database, error) {
+	//The layer of tries trees that keep in memory.
+	TriesInMemory := int(ctx.GlobalUint64(utils.TriesInMemoryFlag.Name))
+	chaindb := utils.MakeChainDatabase(ctx, stack, false, true)
+	defer chaindb.Close()
+
+	if !ctx.GlobalBool(utils.CheckSnapshotWithMPT.Name) {
+		return chaindb, nil
+	}
+	headBlock := rawdb.ReadHeadBlock(chaindb)
+	if headBlock == nil {
+		return nil, errors.New("failed to load head block")
+	}
+	headHeader := headBlock.Header()
+	//Make sure the MPT and snapshot matches before pruning, otherwise the node can not start.
+	snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, TriesInMemory, headBlock.Root(), false, false, false)
+	if err != nil {
+		log.Error("snaptree error", "err", err)
+		return nil, err // The relevant snapshot(s) might not exist
+	}
+
+	// Use the HEAD-(n-1) as the target root. The reason for picking it is:
+	// - in most of the normal cases, the related state is available
+	// - the probability of this layer being reorg is very low
+
+	// Retrieve all snapshot layers from the current HEAD.
+	// In theory there are n difflayers + 1 disk layer present,
+	// so n diff layers are expected to be returned.
+	layers := snaptree.Snapshots(headHeader.Root, TriesInMemory, true)
+	if len(layers) != TriesInMemory {
+		// Reject if the accumulated diff layers are less than n. It
+		// means in most of normal cases, there is no associated state
+		// with bottom-most diff layer.
+		log.Error("snapshot layers != TriesInMemory", "err", err)
+		return nil, fmt.Errorf("snapshot not old enough yet: need %d more blocks", TriesInMemory-len(layers))
+	}
+	// Use the bottom-most diff layer as the target
+	targetRoot := layers[len(layers)-1].Root()
+
+	// Ensure the root is really present. The weak assumption
+	// is the presence of root can indicate the presence of the
+	// entire trie.
+	if blob := rawdb.ReadTrieNode(chaindb, targetRoot); len(blob) == 0 {
+		// The special case is for clique based networks(rinkeby, goerli
+		// and some other private networks), it's possible that two
+		// consecutive blocks will have same root. In this case snapshot
+		// difflayer won't be created. So HEAD-(n-1) may not paired with
+		// head-(n-1) layer. Instead the paired layer is higher than the
+		// bottom-most diff layer. Try to find the bottom-most snapshot
+		// layer with state available.
+		//
+		// Note HEAD is ignored. Usually there is the associated
+		// state available, but we don't want to use the topmost state
+		// as the pruning target.
+		var found bool
+		for i := len(layers) - 2; i >= 1; i-- {
+			if blob := rawdb.ReadTrieNode(chaindb, layers[i].Root()); len(blob) != 0 {
+				targetRoot = layers[i].Root()
+				found = true
+				log.Info("Selecting middle-layer as the pruning target", "root", targetRoot, "depth", i)
+				break
+			}
+		}
+		if !found {
+			if blob := rawdb.ReadTrieNode(chaindb, snaptree.DiskRoot()); len(blob) != 0 {
+				targetRoot = snaptree.DiskRoot()
+				found = true
+				log.Info("Selecting disk-layer as the pruning target", "root", targetRoot)
+			}
+		}
+		if !found {
+			if len(layers) > 0 {
+				log.Error("no snapshot paired state")
+				return nil, errors.New("no snapshot paired state")
+			}
+			return nil, fmt.Errorf("associated state[%x] is not present", targetRoot)
+		}
+	} else {
+		if len(layers) > 0 {
+			log.Info("Selecting bottom-most difflayer as the pruning target", "root", targetRoot, "height", headHeader.Number.Uint64()-uint64(len(layers)-1))
+		} else {
+			log.Info("Selecting user-specified state as the pruning target", "root", targetRoot)
+		}
+	}
+	return chaindb, nil
+}
+
+func pruneBlock(ctx *cli.Context) error {
+	stack, config := makeConfigNode(ctx)
+	defer stack.Close()
+	blockAmountReserved := ctx.GlobalUint64(utils.BlockAmountReserved.Name)
+	chaindb, err := accessDb(ctx, stack)
+	if err != nil {
+		return err
+	}
+	var newAncientPath string
+	oldAncientPath := ctx.GlobalString(utils.AncientFlag.Name)
+	if !filepath.IsAbs(oldAncientPath) {
+		oldAncientPath = stack.ResolvePath(oldAncientPath)
+	}
+
+	path, _ := filepath.Split(oldAncientPath)
+	if path == "" {
+		return errors.New("prune failed, did not specify the AncientPath")
+	}
+	newAncientPath = filepath.Join(path, "ancient_back")
+
+	blockpruner := pruner.NewBlockPruner(chaindb, stack, oldAncientPath, newAncientPath, blockAmountReserved)
+
+	lock, exist, err := fileutil.Flock(filepath.Join(oldAncientPath, "PRUNEFLOCK"))
+	if err != nil {
+		log.Error("file lock error", "err", err)
+		return err
+	}
+	if exist {
+		defer lock.Release()
+		log.Info("file lock existed, waiting for prune recovery and continue", "err", err)
+		if err := blockpruner.RecoverInterruption("chaindata", config.Eth.DatabaseCache, utils.MakeDatabaseHandles(), "", false); err != nil {
+			log.Error("Pruning failed", "err", err)
+			return err
+		}
+		log.Info("Block prune successfully")
+		return nil
+	}
+
+	if _, err := os.Stat(newAncientPath); err == nil {
+		// No file lock found for old ancientDB but new ancientDB exsisted, indicating the geth was interrupted
+		// after old ancientDB removal, this happened after backup successfully, so just rename the new ancientDB
+		if err := blockpruner.AncientDbReplacer(); err != nil {
+			log.Error("Failed to rename new ancient directory")
+			return err
+		}
+		log.Info("Block prune successfully")
+		return nil
+	}
+	name := "chaindata"
+	if err := blockpruner.BlockPruneBackUp(name, config.Eth.DatabaseCache, utils.MakeDatabaseHandles(), "", false, false); err != nil {
+		log.Error("Failed to back up block", "err", err)
+		return err
+	}
+
+	log.Info("backup block successfully")
+
+	//After backing up successfully, rename the new ancientdb name to the original one, and delete the old ancientdb
+	if err := blockpruner.AncientDbReplacer(); err != nil {
+		return err
+	}
+
+	lock.Release()
+	log.Info("Block prune successfully")
+	return nil
+}
+
 func pruneState(ctx *cli.Context) error {
 	stack, config := makeConfigNode(ctx)
 	defer stack.Close()
 
-	chaindb := utils.MakeChainDatabase(ctx, stack, false)
+	chaindb := utils.MakeChainDatabase(ctx, stack, false, false)
 	pruner, err := pruner.NewPruner(chaindb, stack.ResolvePath(""), stack.ResolvePath(config.Eth.TrieCleanCacheJournal), ctx.GlobalUint64(utils.BloomFilterSizeFlag.Name), ctx.GlobalUint64(utils.TriesInMemoryFlag.Name))
 	if err != nil {
 		log.Error("Failed to open snapshot tree", "err", err)
@@ -182,7 +366,7 @@ func verifyState(ctx *cli.Context) error {
 	stack, _ := makeConfigNode(ctx)
 	defer stack.Close()
 
-	chaindb := utils.MakeChainDatabase(ctx, stack, true)
+	chaindb := utils.MakeChainDatabase(ctx, stack, true, false)
 	headBlock := rawdb.ReadHeadBlock(chaindb)
 	if headBlock == nil {
 		log.Error("Failed to load head block")
@@ -220,7 +404,7 @@ func traverseState(ctx *cli.Context) error {
 	stack, _ := makeConfigNode(ctx)
 	defer stack.Close()
 
-	chaindb := utils.MakeChainDatabase(ctx, stack, true)
+	chaindb := utils.MakeChainDatabase(ctx, stack, true, false)
 	headBlock := rawdb.ReadHeadBlock(chaindb)
 	if headBlock == nil {
 		log.Error("Failed to load head block")
@@ -310,7 +494,7 @@ func traverseRawState(ctx *cli.Context) error {
 	stack, _ := makeConfigNode(ctx)
 	defer stack.Close()
 
-	chaindb := utils.MakeChainDatabase(ctx, stack, true)
+	chaindb := utils.MakeChainDatabase(ctx, stack, true, false)
 	headBlock := rawdb.ReadHeadBlock(chaindb)
 	if headBlock == nil {
 		log.Error("Failed to load head block")

+ 2 - 0
cmd/geth/usage.go

@@ -58,6 +58,8 @@ var AppHelpFlagGroups = []flags.FlagGroup{
 			utils.LightKDFFlag,
 			utils.WhitelistFlag,
 			utils.TriesInMemoryFlag,
+			utils.BlockAmountReserved,
+			utils.CheckSnapshotWithMPT,
 		},
 	},
 	{

+ 25 - 7
cmd/utils/flags.go

@@ -33,6 +33,10 @@ import (
 	"text/template"
 	"time"
 
+	pcsclite "github.com/gballet/go-libpcsclite"
+	gopsutil "github.com/shirou/gopsutil/mem"
+	"gopkg.in/urfave/cli.v1"
+
 	"github.com/ethereum/go-ethereum/accounts"
 	"github.com/ethereum/go-ethereum/accounts/keystore"
 	"github.com/ethereum/go-ethereum/common"
@@ -66,9 +70,6 @@ import (
 	"github.com/ethereum/go-ethereum/p2p/nat"
 	"github.com/ethereum/go-ethereum/p2p/netutil"
 	"github.com/ethereum/go-ethereum/params"
-	pcsclite "github.com/gballet/go-libpcsclite"
-	gopsutil "github.com/shirou/gopsutil/mem"
-	"gopkg.in/urfave/cli.v1"
 )
 
 func init() {
@@ -126,6 +127,10 @@ var (
 		Usage: "Enable diffy sync, Please note that enable diffsync will improve the syncing speed, " +
 			"but will degrade the security to light client level",
 	}
+	PipeCommitFlag = cli.BoolFlag{
+		Name:  "pipecommit",
+		Usage: "Enable MPT pipeline commit, it will improve syncing performance. It is an experimental feature(default is false)",
+	}
 	RangeLimitFlag = cli.BoolFlag{
 		Name:  "rangelimit",
 		Usage: "Enable 5000 blocks limit for range query",
@@ -827,6 +832,16 @@ var (
 		Name:  "catalyst",
 		Usage: "Catalyst mode (eth2 integration testing)",
 	}
+
+	BlockAmountReserved = cli.Uint64Flag{
+		Name:  "block-amount-reserved",
+		Usage: "Sets the expected remained amount of blocks for offline block prune",
+	}
+
+	CheckSnapshotWithMPT = cli.BoolFlag{
+		Name:  "check-snapshot-with-mpt",
+		Usage: "Enable checking between snapshot and MPT ",
+	}
 )
 
 // MakeDataDir retrieves the currently requested data directory, terminating
@@ -1621,6 +1636,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
 	if ctx.GlobalIsSet(DiffSyncFlag.Name) {
 		cfg.DiffSync = ctx.GlobalBool(DiffSyncFlag.Name)
 	}
+	if ctx.GlobalIsSet(PipeCommitFlag.Name) {
+		cfg.PipeCommit = ctx.GlobalBool(PipeCommitFlag.Name)
+	}
 	if ctx.GlobalIsSet(RangeLimitFlag.Name) {
 		cfg.RangeLimit = ctx.GlobalBool(RangeLimitFlag.Name)
 	}
@@ -1764,7 +1782,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
 		if ctx.GlobalIsSet(DataDirFlag.Name) {
 			// Check if we have an already initialized chain and fall back to
 			// that if so. Otherwise we need to generate a new genesis spec.
-			chaindb := MakeChainDatabase(ctx, stack, false) // TODO (MariusVanDerWijden) make this read only
+			chaindb := MakeChainDatabase(ctx, stack, false, false) // TODO (MariusVanDerWijden) make this read only
 			if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) {
 				cfg.Genesis = nil // fallback to db content
 			}
@@ -1883,7 +1901,7 @@ func SplitTagsFlag(tagsFlag string) map[string]string {
 }
 
 // MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails.
-func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.Database {
+func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly, disableFreeze bool) ethdb.Database {
 	var (
 		cache   = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
 		handles = MakeDatabaseHandles()
@@ -1896,7 +1914,7 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.
 		chainDb, err = stack.OpenDatabase(name, cache, handles, "", readonly)
 	} else {
 		name := "chaindata"
-		chainDb, err = stack.OpenDatabaseWithFreezer(name, cache, handles, ctx.GlobalString(AncientFlag.Name), "", readonly)
+		chainDb, err = stack.OpenDatabaseWithFreezer(name, cache, handles, ctx.GlobalString(AncientFlag.Name), "", readonly, disableFreeze, false)
 	}
 	if err != nil {
 		Fatalf("Could not open database: %v", err)
@@ -1926,7 +1944,7 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
 // MakeChain creates a chain manager from set command line flags.
 func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chainDb ethdb.Database) {
 	var err error
-	chainDb = MakeChainDatabase(ctx, stack, false) // TODO(rjl493456442) support read-only database
+	chainDb = MakeChainDatabase(ctx, stack, false, false) // TODO(rjl493456442) support read-only database
 	config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
 	if err != nil {
 		Fatalf("%v", err)

+ 4 - 0
consensus/clique/clique.go

@@ -560,7 +560,11 @@ func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Heade
 func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB,
 	txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, []*types.Receipt, error) {
 	// No block rewards in PoA, so the state remains as is and uncles are dropped
+	var err error
 	header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
+	if err != nil {
+		return nil, nil, err
+	}
 	header.UncleHash = types.CalcUncleHash(nil)
 
 	// Assemble and return the final block for sealing

+ 22 - 7
core/block_validator.go

@@ -18,6 +18,7 @@ package core
 
 import (
 	"fmt"
+	"time"
 
 	"github.com/ethereum/go-ethereum/consensus"
 	"github.com/ethereum/go-ethereum/core/state"
@@ -26,6 +27,8 @@ import (
 	"github.com/ethereum/go-ethereum/trie"
 )
 
+const badBlockCacheExpire = 30 * time.Second
+
 // BlockValidator is responsible for validating block headers, uncles and
 // processed state.
 //
@@ -54,6 +57,9 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
 	if v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) {
 		return ErrKnownBlock
 	}
+	if v.bc.isCachedBadBlock(block) {
+		return ErrKnownBadBlock
+	}
 	// Header validity is known at this point, check the uncles and transactions
 	header := block.Header()
 	if err := v.engine.VerifyUncles(v.bc, block); err != nil {
@@ -106,7 +112,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
 // transition, such as amount of used gas, the receipt roots and the state root
 // itself. ValidateState returns a database batch if the validation was a success
 // otherwise nil and an error is returned.
-func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, receipts types.Receipts, usedGas uint64) error {
+func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, receipts types.Receipts, usedGas uint64, skipHeavyVerify bool) error {
 	header := block.Header()
 	if block.GasUsed() != usedGas {
 		return fmt.Errorf("invalid gas used (remote: %d local: %d)", block.GasUsed(), usedGas)
@@ -125,17 +131,26 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
 			receiptSha := types.DeriveSha(receipts, trie.NewStackTrie(nil))
 			if receiptSha != header.ReceiptHash {
 				return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha)
-			} else {
-				return nil
 			}
+			return nil
 		},
-		func() error {
+	}
+	if skipHeavyVerify {
+		validateFuns = append(validateFuns, func() error {
+			if err := statedb.WaitPipeVerification(); err != nil {
+				return err
+			}
+			statedb.Finalise(v.config.IsEIP158(header.Number))
+			statedb.AccountsIntermediateRoot()
+			return nil
+		})
+	} else {
+		validateFuns = append(validateFuns, func() error {
 			if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
 				return fmt.Errorf("invalid merkle root (remote: %x local: %x)", header.Root, root)
-			} else {
-				return nil
 			}
-		},
+			return nil
+		})
 	}
 	validateRes := make(chan error, len(validateFuns))
 	for _, f := range validateFuns {

+ 177 - 77
core/blockchain.go

@@ -77,7 +77,8 @@ var (
 	blockReorgDropMeter     = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
 	blockReorgInvalidatedTx = metrics.NewRegisteredMeter("chain/reorg/invalidTx", nil)
 
-	errInsertionInterrupted = errors.New("insertion is interrupted")
+	errInsertionInterrupted        = errors.New("insertion is interrupted")
+	errStateRootVerificationFailed = errors.New("state root verification failed")
 )
 
 const (
@@ -87,9 +88,11 @@ const (
 	diffLayerRLPCacheLimit = 256
 	receiptsCacheLimit     = 10000
 	txLookupCacheLimit     = 1024
+	maxBadBlockLimit       = 16
 	maxFutureBlocks        = 256
 	maxTimeFutureBlocks    = 30
 	maxBeyondBlocks        = 2048
+	prefetchTxNumber       = 100
 
 	diffLayerFreezerRecheckInterval = 3 * time.Second
 	diffLayerPruneRecheckInterval   = 1 * time.Second // The interval to prune unverified diff layers
@@ -98,6 +101,8 @@ const (
 	maxDiffForkDist                 = 11              // Maximum allowed backward distance from the chain head
 	maxDiffLimitForBroadcast        = 128             // Maximum number of unique diff layers a peer may have broadcasted
 
+	rewindBadBlockInterval = 1 * time.Second
+
 	// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
 	//
 	// Changelog:
@@ -176,10 +181,11 @@ type BlockChain struct {
 	chainConfig *params.ChainConfig // Chain & network configuration
 	cacheConfig *CacheConfig        // Cache configuration for pruning
 
-	db     ethdb.Database // Low level persistent database to store final content in
-	snaps  *snapshot.Tree // Snapshot tree for fast trie leaf access
-	triegc *prque.Prque   // Priority queue mapping block numbers to tries to gc
-	gcproc time.Duration  // Accumulates canonical block processing for trie dumping
+	db         ethdb.Database // Low level persistent database to store final content in
+	snaps      *snapshot.Tree // Snapshot tree for fast trie leaf access
+	triegc     *prque.Prque   // Priority queue mapping block numbers to tries to gc
+	gcproc     time.Duration  // Accumulates canonical block processing for trie dumping
+	commitLock sync.Mutex     // CommitLock is used to protect above field from being modified concurrently
 
 	// txLookupLimit is the maximum number of blocks from head whose tx indices
 	// are reserved:
@@ -212,6 +218,7 @@ type BlockChain struct {
 	blockCache    *lru.Cache     // Cache for the most recent entire blocks
 	txLookupCache *lru.Cache     // Cache for the most recent transaction lookup data.
 	futureBlocks  *lru.Cache     // future blocks are blocks added for later processing
+	badBlockCache *lru.Cache     // Cache for the blocks that failed to pass MPT root verification
 
 	// trusted diff layers
 	diffLayerCache             *lru.Cache   // Cache for the diffLayers
@@ -233,10 +240,12 @@ type BlockChain struct {
 	running       int32          // 0 if chain is running, 1 when stopped
 	procInterrupt int32          // interrupt signaler for block processing
 
-	engine    consensus.Engine
-	validator Validator // Block and state validator interface
-	processor Processor // Block transaction processor interface
-	vmConfig  vm.Config
+	engine     consensus.Engine
+	prefetcher Prefetcher
+	validator  Validator // Block and state validator interface
+	processor  Processor // Block transaction processor interface
+	vmConfig   vm.Config
+	pipeCommit bool
 
 	shouldPreserve  func(*types.Block) bool        // Function used to determine whether should preserve the given block.
 	terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
@@ -260,6 +269,8 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
 	receiptsCache, _ := lru.New(receiptsCacheLimit)
 	blockCache, _ := lru.New(blockCacheLimit)
 	txLookupCache, _ := lru.New(txLookupCacheLimit)
+	badBlockCache, _ := lru.New(maxBadBlockLimit)
+
 	futureBlocks, _ := lru.New(maxFutureBlocks)
 	diffLayerCache, _ := lru.New(diffLayerCacheLimit)
 	diffLayerRLPCache, _ := lru.New(diffLayerRLPCacheLimit)
@@ -281,6 +292,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
 		bodyRLPCache:          bodyRLPCache,
 		receiptsCache:         receiptsCache,
 		blockCache:            blockCache,
+		badBlockCache:         badBlockCache,
 		diffLayerCache:        diffLayerCache,
 		diffLayerRLPCache:     diffLayerRLPCache,
 		txLookupCache:         txLookupCache,
@@ -295,6 +307,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
 		diffNumToBlockHashes:  make(map[uint64]map[common.Hash]struct{}),
 		diffPeersToDiffHashes: make(map[string]map[common.Hash]struct{}),
 	}
+	bc.prefetcher = NewStatePrefetcher(chainConfig, bc, engine)
 	bc.validator = NewBlockValidator(chainConfig, bc, engine)
 	bc.processor = NewStateProcessor(chainConfig, bc, engine)
 
@@ -322,9 +335,9 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
 		rawdb.InitDatabaseFromFreezer(bc.db)
 		// If ancient database is not empty, reconstruct all missing
 		// indices in the background.
-		frozen, _ := bc.db.Ancients()
+		frozen, _ := bc.db.ItemAmountInAncient()
 		if frozen > 0 {
-			txIndexBlock = frozen
+			txIndexBlock, _ = bc.db.Ancients()
 		}
 	}
 	if err := bc.loadLastState(); err != nil {
@@ -359,7 +372,11 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
 		}
 	}
 	// Ensure that a previous crash in SetHead doesn't leave extra ancients
-	if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
+	if frozen, err := bc.db.ItemAmountInAncient(); err == nil && frozen > 0 {
+		frozen, err = bc.db.Ancients()
+		if err != nil {
+			return nil, err
+		}
 		var (
 			needRewind bool
 			low        uint64
@@ -454,7 +471,10 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
 		go bc.trustedDiffLayerLoop()
 	}
 	go bc.untrustedDiffLayerPruneLoop()
-
+	if bc.pipeCommit {
+		// check current block and rewind invalid one
+		go bc.rewindInvalidHeaderBlockLoop()
+	}
 	return bc, nil
 }
 
@@ -570,6 +590,25 @@ func (bc *BlockChain) SetHead(head uint64) error {
 	return err
 }
 
+func (bc *BlockChain) tryRewindBadBlocks() {
+	bc.chainmu.Lock()
+	defer bc.chainmu.Unlock()
+	block := bc.CurrentBlock()
+	snaps := bc.snaps
+	// Verified and Result is false
+	if snaps != nil && snaps.Snapshot(block.Root()) != nil &&
+		snaps.Snapshot(block.Root()).Verified() && !snaps.Snapshot(block.Root()).WaitAndGetVerifyRes() {
+		// Rewind by one block
+		log.Warn("current block verified failed, rewind to its parent", "height", block.NumberU64(), "hash", block.Hash())
+		bc.futureBlocks.Remove(block.Hash())
+		bc.badBlockCache.Add(block.Hash(), time.Now())
+		bc.diffLayerCache.Remove(block.Hash())
+		bc.diffLayerRLPCache.Remove(block.Hash())
+		bc.reportBlock(block, nil, errStateRootVerificationFailed)
+		bc.setHeadBeyondRoot(block.NumberU64()-1, common.Hash{})
+	}
+}
+
 // SetHeadBeyondRoot rewinds the local chain to a new head with the extra condition
 // that the rewind must pass the specified state root. This method is meant to be
 // used when rewinding with snapshots enabled to ensure that we go back further than
@@ -581,7 +620,10 @@ func (bc *BlockChain) SetHead(head uint64) error {
 func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64, error) {
 	bc.chainmu.Lock()
 	defer bc.chainmu.Unlock()
+	return bc.setHeadBeyondRoot(head, root)
+}
 
+func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash) (uint64, error) {
 	// Track the block number of the requested root hash
 	var rootNumber uint64 // (no root == always 0)
 
@@ -1049,6 +1091,12 @@ func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
 
 // HasState checks if state trie is fully present in the database or not.
 func (bc *BlockChain) HasState(hash common.Hash) bool {
+	if bc.pipeCommit && bc.snaps != nil {
+		// If parent snap is pending on verification, treat it as state exist
+		if s := bc.snaps.Snapshot(hash); s != nil && !s.Verified() {
+			return true
+		}
+	}
 	_, err := bc.stateCache.OpenTrie(hash)
 	return err == nil
 }
@@ -1660,8 +1708,78 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
 		}
 		wg.Done()
 	}()
+
+	tryCommitTrieDB := func() error {
+		bc.commitLock.Lock()
+		defer bc.commitLock.Unlock()
+
+		triedb := bc.stateCache.TrieDB()
+		// If we're running an archive node, always flush
+		if bc.cacheConfig.TrieDirtyDisabled {
+			err := triedb.Commit(block.Root(), false, nil)
+			if err != nil {
+				return err
+			}
+		} else {
+			// Full but not archive node, do proper garbage collection
+			triedb.Reference(block.Root(), common.Hash{}) // metadata reference to keep trie alive
+			bc.triegc.Push(block.Root(), -int64(block.NumberU64()))
+
+			if current := block.NumberU64(); current > bc.triesInMemory {
+				// If we exceeded our memory allowance, flush matured singleton nodes to disk
+				var (
+					nodes, imgs = triedb.Size()
+					limit       = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
+				)
+				if nodes > limit || imgs > 4*1024*1024 {
+					triedb.Cap(limit - ethdb.IdealBatchSize)
+				}
+				// Find the next state trie we need to commit
+				chosen := current - bc.triesInMemory
+
+				// If we exceeded out time allowance, flush an entire trie to disk
+				if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
+					canWrite := true
+					if posa, ok := bc.engine.(consensus.PoSA); ok {
+						if !posa.EnoughDistance(bc, block.Header()) {
+							canWrite = false
+						}
+					}
+					if canWrite {
+						// If the header is missing (canonical chain behind), we're reorging a low
+						// diff sidechain. Suspend committing until this operation is completed.
+						header := bc.GetHeaderByNumber(chosen)
+						if header == nil {
+							log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
+						} else {
+							// If we're exceeding limits but haven't reached a large enough memory gap,
+							// warn the user that the system is becoming unstable.
+							if chosen < lastWrite+bc.triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
+								log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/float64(bc.triesInMemory))
+							}
+							// Flush an entire trie and restart the counters
+							triedb.Commit(header.Root, true, nil)
+							lastWrite = chosen
+							bc.gcproc = 0
+						}
+					}
+				}
+				// Garbage collect anything below our required write retention
+				for !bc.triegc.Empty() {
+					root, number := bc.triegc.Pop()
+					if uint64(-number) > chosen {
+						bc.triegc.Push(root, number)
+						break
+					}
+					go triedb.Dereference(root.(common.Hash))
+				}
+			}
+		}
+		return nil
+	}
+
 	// Commit all cached state changes into underlying memory database.
-	root, diffLayer, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
+	_, diffLayer, err := state.Commit(bc.tryRewindBadBlocks, tryCommitTrieDB)
 	if err != nil {
 		return NonStatTy, err
 	}
@@ -1674,69 +1792,9 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
 		diffLayer.Number = block.NumberU64()
 		bc.cacheDiffLayer(diffLayer)
 	}
-	triedb := bc.stateCache.TrieDB()
 
-	// If we're running an archive node, always flush
-	if bc.cacheConfig.TrieDirtyDisabled {
-		if err := triedb.Commit(root, false, nil); err != nil {
-			return NonStatTy, err
-		}
-	} else {
-		// Full but not archive node, do proper garbage collection
-		triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
-		bc.triegc.Push(root, -int64(block.NumberU64()))
-
-		if current := block.NumberU64(); current > bc.triesInMemory {
-			// If we exceeded our memory allowance, flush matured singleton nodes to disk
-			var (
-				nodes, imgs = triedb.Size()
-				limit       = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
-			)
-			if nodes > limit || imgs > 4*1024*1024 {
-				triedb.Cap(limit - ethdb.IdealBatchSize)
-			}
-			// Find the next state trie we need to commit
-			chosen := current - bc.triesInMemory
-
-			// If we exceeded out time allowance, flush an entire trie to disk
-			if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
-				canWrite := true
-				if posa, ok := bc.engine.(consensus.PoSA); ok {
-					if !posa.EnoughDistance(bc, block.Header()) {
-						canWrite = false
-					}
-				}
-				if canWrite {
-					// If the header is missing (canonical chain behind), we're reorging a low
-					// diff sidechain. Suspend committing until this operation is completed.
-					header := bc.GetHeaderByNumber(chosen)
-					if header == nil {
-						log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
-					} else {
-						// If we're exceeding limits but haven't reached a large enough memory gap,
-						// warn the user that the system is becoming unstable.
-						if chosen < lastWrite+bc.triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
-							log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/float64(bc.triesInMemory))
-						}
-						// Flush an entire trie and restart the counters
-						triedb.Commit(header.Root, true, nil)
-						lastWrite = chosen
-						bc.gcproc = 0
-					}
-				}
-			}
-			// Garbage collect anything below our required write retention
-			for !bc.triegc.Empty() {
-				root, number := bc.triegc.Pop()
-				if uint64(-number) > chosen {
-					bc.triegc.Push(root, number)
-					break
-				}
-				go triedb.Dereference(root.(common.Hash))
-			}
-		}
-	}
 	wg.Wait()
+
 	// If the total difficulty is higher than our known, add it to the canonical chain
 	// Second clause in the if statement reduces the vulnerability to selfish mining.
 	// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
@@ -2051,10 +2109,22 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
 
 		// Enable prefetching to pull in trie node paths while processing transactions
 		statedb.StartPrefetcher("chain")
-
+		var followupInterrupt uint32
+		// For diff sync, it may fallback to full sync, so we still do prefetch
+		if len(block.Transactions()) >= prefetchTxNumber {
+			throwaway := statedb.Copy()
+			go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) {
+				bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt)
+			}(time.Now(), block, throwaway, &followupInterrupt)
+		}
 		//Process block using the parent state as reference point
 		substart := time.Now()
+		if bc.pipeCommit {
+			statedb.EnablePipeCommit()
+		}
+		statedb.SetExpectedStateRoot(block.Root())
 		statedb, receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
+		atomic.StoreUint32(&followupInterrupt, 1)
 		activeState = statedb
 		if err != nil {
 			bc.reportBlock(block, receipts, err)
@@ -2073,7 +2143,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
 		// Validate the state using the default validator
 		substart = time.Now()
 		if !statedb.IsLightProcessed() {
-			if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
+			if err := bc.validator.ValidateState(block, statedb, receipts, usedGas, bc.pipeCommit); err != nil {
 				log.Error("validate state failed", "error", err)
 				bc.reportBlock(block, receipts, err)
 				return it.index, err
@@ -2488,6 +2558,19 @@ func (bc *BlockChain) update() {
 	}
 }
 
+func (bc *BlockChain) rewindInvalidHeaderBlockLoop() {
+	recheck := time.NewTicker(rewindBadBlockInterval)
+	defer recheck.Stop()
+	for {
+		select {
+		case <-recheck.C:
+			bc.tryRewindBadBlocks()
+		case <-bc.quit:
+			return
+		}
+	}
+}
+
 func (bc *BlockChain) trustedDiffLayerLoop() {
 	recheck := time.NewTicker(diffLayerFreezerRecheckInterval)
 	bc.wg.Add(1)
@@ -2824,6 +2907,18 @@ func (bc *BlockChain) maintainTxIndex(ancients uint64) {
 	}
 }
 
+func (bc *BlockChain) isCachedBadBlock(block *types.Block) bool {
+	if timeAt, exist := bc.badBlockCache.Get(block.Hash()); exist {
+		putAt := timeAt.(time.Time)
+		if time.Since(putAt) >= badBlockCacheExpire {
+			bc.badBlockCache.Remove(block.Hash())
+			return false
+		}
+		return true
+	}
+	return false
+}
+
 // reportBlock logs a bad block error.
 func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
 	rawdb.WriteBadBlock(bc.db, block)
@@ -2994,6 +3089,11 @@ func EnableLightProcessor(bc *BlockChain) *BlockChain {
 	return bc
 }
 
+func EnablePipelineCommit(bc *BlockChain) *BlockChain {
+	bc.pipeCommit = true
+	return bc
+}
+
 func EnablePersistDiff(limit uint64) BlockChainOption {
 	return func(chain *BlockChain) *BlockChain {
 		chain.diffLayerFreezerBlockLimit = limit

+ 5 - 5
core/blockchain_diff_test.go

@@ -317,6 +317,9 @@ func TestProcessDiffLayer(t *testing.T) {
 			lightBackend.Chain().HandleDiffLayer(diff, "testpid", true)
 		}
 		_, err := lightBackend.chain.insertChain([]*types.Block{block}, true)
+		if err != nil {
+			t.Errorf("failed to insert block %v", err)
+		}
 		if checks, exist := checkBlocks[i]; exist {
 			for _, check := range checks.txs {
 				s, _ := lightBackend.Chain().Snapshots().Snapshot(block.Root()).Storage(crypto.Keccak256Hash((*check.to)[:]), check.slot)
@@ -325,9 +328,6 @@ func TestProcessDiffLayer(t *testing.T) {
 				}
 			}
 		}
-		if err != nil {
-			t.Errorf("failed to insert block %v", err)
-		}
 	}
 	currentBlock := lightBackend.chain.CurrentBlock()
 	nextBlock := fullBackend.chain.GetBlockByNumber(currentBlock.NumberU64() + 1)
@@ -368,11 +368,11 @@ func TestFreezeDiffLayer(t *testing.T) {
 		// Wait for the buffer to be zero.
 	}
 	// Minus one empty block.
-	if fullBackend.chain.diffQueue.Size() != blockNum-1 {
+	if fullBackend.chain.diffQueue.Size() > blockNum-1 && fullBackend.chain.diffQueue.Size() < blockNum-2 {
 		t.Errorf("size of diff queue is wrong, expected: %d, get: %d", blockNum-1, fullBackend.chain.diffQueue.Size())
 	}
 
-	time.Sleep(diffLayerFreezerRecheckInterval + 1*time.Second)
+	time.Sleep(diffLayerFreezerRecheckInterval + 2*time.Second)
 	if fullBackend.chain.diffQueue.Size() != int(fullBackend.chain.triesInMemory) {
 		t.Errorf("size of diff queue is wrong, expected: %d, get: %d", blockNum, fullBackend.chain.diffQueue.Size())
 	}

+ 2 - 2
core/blockchain_repair_test.go

@@ -1762,7 +1762,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
 	}
 	os.RemoveAll(datadir)
 
-	db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
+	db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false, false, false)
 	if err != nil {
 		t.Fatalf("Failed to create persistent database: %v", err)
 	}
@@ -1832,7 +1832,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
 	db.Close()
 
 	// Start a new blockchain back up and see where the repait leads us
-	db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
+	db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false, false, false)
 	if err != nil {
 		t.Fatalf("Failed to reopen persistent database: %v", err)
 	}

+ 1 - 1
core/blockchain_sethead_test.go

@@ -1961,7 +1961,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
 	}
 	os.RemoveAll(datadir)
 
-	db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
+	db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false, false, false)
 	if err != nil {
 		t.Fatalf("Failed to create persistent database: %v", err)
 	}

+ 2 - 2
core/blockchain_snapshot_test.go

@@ -64,7 +64,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
 	}
 	os.RemoveAll(datadir)
 
-	db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
+	db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false, false, false)
 	if err != nil {
 		t.Fatalf("Failed to create persistent database: %v", err)
 	}
@@ -248,7 +248,7 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) {
 	db.Close()
 
 	// Start a new blockchain back up and see where the repair leads us
-	newdb, err := rawdb.NewLevelDBDatabaseWithFreezer(snaptest.datadir, 0, 0, snaptest.datadir, "", false)
+	newdb, err := rawdb.NewLevelDBDatabaseWithFreezer(snaptest.datadir, 0, 0, snaptest.datadir, "", false, false, false)
 	if err != nil {
 		t.Fatalf("Failed to reopen persistent database: %v", err)
 	}

+ 197 - 89
core/blockchain_test.go

@@ -43,7 +43,8 @@ import (
 // So we can deterministically seed different blockchains
 var (
 	canonicalSeed = 1
-	forkSeed      = 2
+	forkSeed1     = 2
+	forkSeed2     = 3
 
 	TestTriesInMemory = 128
 )
@@ -51,14 +52,18 @@ var (
 // newCanonical creates a chain database, and injects a deterministic canonical
 // chain. Depending on the full flag, if creates either a full block chain or a
 // header only chain.
-func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *BlockChain, error) {
+func newCanonical(engine consensus.Engine, n int, full, pipeline bool) (ethdb.Database, *BlockChain, error) {
 	var (
 		db      = rawdb.NewMemoryDatabase()
 		genesis = new(Genesis).MustCommit(db)
 	)
 
 	// Initialize a fresh chain with only a genesis block
-	blockchain, _ := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+	var ops []BlockChainOption
+	if pipeline {
+		ops = append(ops, EnablePipelineCommit)
+	}
+	blockchain, _ := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil, ops...)
 	// Create and inject the requested chain
 	if n == 0 {
 		return db, blockchain, nil
@@ -76,9 +81,53 @@ func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *B
 }
 
 // Test fork of length N starting from block i
-func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int)) {
+func testInvalidStateRootBlockImport(t *testing.T, blockchain *BlockChain, i, n int, pipeline bool) {
 	// Copy old chain up to #i into a new db
-	db, blockchain2, err := newCanonical(ethash.NewFaker(), i, full)
+	db, blockchain2, err := newCanonical(ethash.NewFaker(), i, true, pipeline)
+	if err != nil {
+		t.Fatal("could not make new canonical in testFork", err)
+	}
+	defer blockchain2.Stop()
+
+	// Assert the chains have the same header/block at #i
+	hash1 := blockchain.GetBlockByNumber(uint64(i)).Hash()
+	hash2 := blockchain2.GetBlockByNumber(uint64(i)).Hash()
+	if hash1 != hash2 {
+		t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
+	}
+	// Extend the newly created chain
+	blockChainB := makeBlockChain(blockchain2.CurrentBlock(), n, ethash.NewFaker(), db, forkSeed1)
+	for idx, block := range blockChainB {
+		block.SetRoot(common.Hash{0: byte(forkSeed1), 19: byte(idx)})
+	}
+	previousBlock := blockchain.CurrentBlock()
+	// Sanity check that the forked chain can be imported into the original
+	if _, err := blockchain.InsertChain(blockChainB); err == nil {
+		t.Fatalf("failed to report insert error")
+	}
+
+	time.Sleep(2 * rewindBadBlockInterval)
+	latestBlock := blockchain.CurrentBlock()
+	if latestBlock.Hash() != previousBlock.Hash() || latestBlock.NumberU64() != previousBlock.NumberU64() {
+		t.Fatalf("rewind do not take effect")
+	}
+	db, blockchain3, err := newCanonical(ethash.NewFaker(), i, true, pipeline)
+	if err != nil {
+		t.Fatal("could not make new canonical in testFork", err)
+	}
+	defer blockchain3.Stop()
+
+	blockChainC := makeBlockChain(blockchain3.CurrentBlock(), n, ethash.NewFaker(), db, forkSeed2)
+
+	if _, err := blockchain.InsertChain(blockChainC); err != nil {
+		t.Fatalf("failed to insert forking chain: %v", err)
+	}
+}
+
+// Test fork of length N starting from block i
+func testFork(t *testing.T, blockchain *BlockChain, i, n int, full, pipeline bool, comparator func(td1, td2 *big.Int)) {
+	// Copy old chain up to #i into a new db
+	db, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, pipeline)
 	if err != nil {
 		t.Fatal("could not make new canonical in testFork", err)
 	}
@@ -102,12 +151,12 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara
 		headerChainB []*types.Header
 	)
 	if full {
-		blockChainB = makeBlockChain(blockchain2.CurrentBlock(), n, ethash.NewFaker(), db, forkSeed)
+		blockChainB = makeBlockChain(blockchain2.CurrentBlock(), n, ethash.NewFaker(), db, forkSeed1)
 		if _, err := blockchain2.InsertChain(blockChainB); err != nil {
 			t.Fatalf("failed to insert forking chain: %v", err)
 		}
 	} else {
-		headerChainB = makeHeaderChain(blockchain2.CurrentHeader(), n, ethash.NewFaker(), db, forkSeed)
+		headerChainB = makeHeaderChain(blockchain2.CurrentHeader(), n, ethash.NewFaker(), db, forkSeed1)
 		if _, err := blockchain2.InsertHeaderChain(headerChainB, 1); err != nil {
 			t.Fatalf("failed to insert forking chain: %v", err)
 		}
@@ -117,7 +166,7 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara
 
 	if full {
 		tdPre = blockchain.GetTdByHash(blockchain.CurrentBlock().Hash())
-		if err := testBlockChainImport(blockChainB, blockchain); err != nil {
+		if err := testBlockChainImport(blockChainB, pipeline, blockchain); err != nil {
 			t.Fatalf("failed to import forked block chain: %v", err)
 		}
 		tdPost = blockchain.GetTdByHash(blockChainB[len(blockChainB)-1].Hash())
@@ -134,7 +183,7 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara
 
 // testBlockChainImport tries to process a chain of blocks, writing them into
 // the database if successful.
-func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
+func testBlockChainImport(chain types.Blocks, pipelineCommit bool, blockchain *BlockChain) error {
 	for _, block := range chain {
 		// Try and process the block
 		err := blockchain.engine.VerifyHeader(blockchain, block.Header(), true)
@@ -151,12 +200,16 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
 		if err != nil {
 			return err
 		}
+		statedb.SetExpectedStateRoot(block.Root())
+		if pipelineCommit {
+			statedb.EnablePipeCommit()
+		}
 		statedb, receipts, _, usedGas, err := blockchain.processor.Process(block, statedb, vm.Config{})
 		if err != nil {
 			blockchain.reportBlock(block, receipts, err)
 			return err
 		}
-		err = blockchain.validator.ValidateState(block, statedb, receipts, usedGas)
+		err = blockchain.validator.ValidateState(block, statedb, receipts, usedGas, pipelineCommit)
 		if err != nil {
 			blockchain.reportBlock(block, receipts, err)
 			return err
@@ -164,7 +217,9 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
 		blockchain.chainmu.Lock()
 		rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTdByHash(block.ParentHash())))
 		rawdb.WriteBlock(blockchain.db, block)
-		statedb.Commit(false)
+		statedb.Finalise(false)
+		statedb.AccountsIntermediateRoot()
+		statedb.Commit(nil)
 		blockchain.chainmu.Unlock()
 	}
 	return nil
@@ -187,8 +242,22 @@ func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error
 	return nil
 }
 
+func TestBlockImportVerification(t *testing.T) {
+	length := 5
+
+	// Make first chain starting from genesis
+	_, processor, err := newCanonical(ethash.NewFaker(), length, true, true)
+	if err != nil {
+		t.Fatalf("failed to make new canonical chain: %v", err)
+	}
+	defer processor.Stop()
+	// Start fork from current height
+	processor = EnablePipelineCommit(processor)
+	testInvalidStateRootBlockImport(t, processor, length, 10, true)
+}
+
 func TestLastBlock(t *testing.T) {
-	_, blockchain, err := newCanonical(ethash.NewFaker(), 0, true)
+	_, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, false)
 	if err != nil {
 		t.Fatalf("failed to create pristine chain: %v", err)
 	}
@@ -205,14 +274,20 @@ func TestLastBlock(t *testing.T) {
 
 // Tests that given a starting canonical chain of a given size, it can be extended
 // with various length chains.
-func TestExtendCanonicalHeaders(t *testing.T) { testExtendCanonical(t, false) }
-func TestExtendCanonicalBlocks(t *testing.T)  { testExtendCanonical(t, true) }
+func TestExtendCanonicalHeaders(t *testing.T) {
+	testExtendCanonical(t, false, false)
 
-func testExtendCanonical(t *testing.T, full bool) {
+}
+func TestExtendCanonicalBlocks(t *testing.T) {
+	testExtendCanonical(t, true, false)
+	testExtendCanonical(t, true, true)
+}
+
+func testExtendCanonical(t *testing.T, full, pipeline bool) {
 	length := 5
 
 	// Make first chain starting from genesis
-	_, processor, err := newCanonical(ethash.NewFaker(), length, full)
+	_, processor, err := newCanonical(ethash.NewFaker(), length, full, pipeline)
 	if err != nil {
 		t.Fatalf("failed to make new canonical chain: %v", err)
 	}
@@ -225,22 +300,25 @@ func testExtendCanonical(t *testing.T, full bool) {
 		}
 	}
 	// Start fork from current height
-	testFork(t, processor, length, 1, full, better)
-	testFork(t, processor, length, 2, full, better)
-	testFork(t, processor, length, 5, full, better)
-	testFork(t, processor, length, 10, full, better)
+	testFork(t, processor, length, 1, full, pipeline, better)
+	testFork(t, processor, length, 2, full, pipeline, better)
+	testFork(t, processor, length, 5, full, pipeline, better)
+	testFork(t, processor, length, 10, full, pipeline, better)
 }
 
 // Tests that given a starting canonical chain of a given size, creating shorter
 // forks do not take canonical ownership.
-func TestShorterForkHeaders(t *testing.T) { testShorterFork(t, false) }
-func TestShorterForkBlocks(t *testing.T)  { testShorterFork(t, true) }
+func TestShorterForkHeaders(t *testing.T) { testShorterFork(t, false, false) }
+func TestShorterForkBlocks(t *testing.T) {
+	testShorterFork(t, true, false)
+	testShorterFork(t, true, true)
+}
 
-func testShorterFork(t *testing.T, full bool) {
+func testShorterFork(t *testing.T, full, pipeline bool) {
 	length := 10
 
 	// Make first chain starting from genesis
-	_, processor, err := newCanonical(ethash.NewFaker(), length, full)
+	_, processor, err := newCanonical(ethash.NewFaker(), length, full, pipeline)
 	if err != nil {
 		t.Fatalf("failed to make new canonical chain: %v", err)
 	}
@@ -253,24 +331,30 @@ func testShorterFork(t *testing.T, full bool) {
 		}
 	}
 	// Sum of numbers must be less than `length` for this to be a shorter fork
-	testFork(t, processor, 0, 3, full, worse)
-	testFork(t, processor, 0, 7, full, worse)
-	testFork(t, processor, 1, 1, full, worse)
-	testFork(t, processor, 1, 7, full, worse)
-	testFork(t, processor, 5, 3, full, worse)
-	testFork(t, processor, 5, 4, full, worse)
+	testFork(t, processor, 0, 3, full, pipeline, worse)
+	testFork(t, processor, 0, 7, full, pipeline, worse)
+	testFork(t, processor, 1, 1, full, pipeline, worse)
+	testFork(t, processor, 1, 7, full, pipeline, worse)
+	testFork(t, processor, 5, 3, full, pipeline, worse)
+	testFork(t, processor, 5, 4, full, pipeline, worse)
 }
 
 // Tests that given a starting canonical chain of a given size, creating longer
 // forks do take canonical ownership.
-func TestLongerForkHeaders(t *testing.T) { testLongerFork(t, false) }
-func TestLongerForkBlocks(t *testing.T)  { testLongerFork(t, true) }
+func TestLongerForkHeaders(t *testing.T) {
+	testLongerFork(t, false, false)
+}
+func TestLongerForkBlocks(t *testing.T) {
+	testLongerFork(t, true, false)
+	testLongerFork(t, true, true)
+
+}
 
-func testLongerFork(t *testing.T, full bool) {
+func testLongerFork(t *testing.T, full, pipeline bool) {
 	length := 10
 
 	// Make first chain starting from genesis
-	_, processor, err := newCanonical(ethash.NewFaker(), length, full)
+	_, processor, err := newCanonical(ethash.NewFaker(), length, full, pipeline)
 	if err != nil {
 		t.Fatalf("failed to make new canonical chain: %v", err)
 	}
@@ -283,24 +367,28 @@ func testLongerFork(t *testing.T, full bool) {
 		}
 	}
 	// Sum of numbers must be greater than `length` for this to be a longer fork
-	testFork(t, processor, 0, 11, full, better)
-	testFork(t, processor, 0, 15, full, better)
-	testFork(t, processor, 1, 10, full, better)
-	testFork(t, processor, 1, 12, full, better)
-	testFork(t, processor, 5, 6, full, better)
-	testFork(t, processor, 5, 8, full, better)
+	testFork(t, processor, 0, 11, full, pipeline, better)
+	testFork(t, processor, 0, 15, full, pipeline, better)
+	testFork(t, processor, 1, 10, full, pipeline, better)
+	testFork(t, processor, 1, 12, full, pipeline, better)
+	testFork(t, processor, 5, 6, full, pipeline, better)
+	testFork(t, processor, 5, 8, full, pipeline, better)
 }
 
 // Tests that given a starting canonical chain of a given size, creating equal
 // forks do take canonical ownership.
-func TestEqualForkHeaders(t *testing.T) { testEqualFork(t, false) }
-func TestEqualForkBlocks(t *testing.T)  { testEqualFork(t, true) }
+func TestEqualForkHeaders(t *testing.T) { testEqualFork(t, false, false) }
+func TestEqualForkBlocks(t *testing.T) {
+	testEqualFork(t, true, true)
+	testEqualFork(t, true, false)
 
-func testEqualFork(t *testing.T, full bool) {
+}
+
+func testEqualFork(t *testing.T, full, pipeline bool) {
 	length := 10
 
 	// Make first chain starting from genesis
-	_, processor, err := newCanonical(ethash.NewFaker(), length, full)
+	_, processor, err := newCanonical(ethash.NewFaker(), length, full, pipeline)
 	if err != nil {
 		t.Fatalf("failed to make new canonical chain: %v", err)
 	}
@@ -313,21 +401,24 @@ func testEqualFork(t *testing.T, full bool) {
 		}
 	}
 	// Sum of numbers must be equal to `length` for this to be an equal fork
-	testFork(t, processor, 0, 10, full, equal)
-	testFork(t, processor, 1, 9, full, equal)
-	testFork(t, processor, 2, 8, full, equal)
-	testFork(t, processor, 5, 5, full, equal)
-	testFork(t, processor, 6, 4, full, equal)
-	testFork(t, processor, 9, 1, full, equal)
+	testFork(t, processor, 0, 10, full, pipeline, equal)
+	testFork(t, processor, 1, 9, full, pipeline, equal)
+	testFork(t, processor, 2, 8, full, pipeline, equal)
+	testFork(t, processor, 5, 5, full, pipeline, equal)
+	testFork(t, processor, 6, 4, full, pipeline, equal)
+	testFork(t, processor, 9, 1, full, pipeline, equal)
 }
 
 // Tests that chains missing links do not get accepted by the processor.
-func TestBrokenHeaderChain(t *testing.T) { testBrokenChain(t, false) }
-func TestBrokenBlockChain(t *testing.T)  { testBrokenChain(t, true) }
+func TestBrokenHeaderChain(t *testing.T) { testBrokenChain(t, false, false) }
+func TestBrokenBlockChain(t *testing.T) {
+	testBrokenChain(t, true, false)
+	testBrokenChain(t, true, true)
+}
 
-func testBrokenChain(t *testing.T, full bool) {
+func testBrokenChain(t *testing.T, full, pipeline bool) {
 	// Make chain starting from genesis
-	db, blockchain, err := newCanonical(ethash.NewFaker(), 10, full)
+	db, blockchain, err := newCanonical(ethash.NewFaker(), 10, full, pipeline)
 	if err != nil {
 		t.Fatalf("failed to make new canonical chain: %v", err)
 	}
@@ -335,12 +426,12 @@ func testBrokenChain(t *testing.T, full bool) {
 
 	// Create a forked chain, and try to insert with a missing link
 	if full {
-		chain := makeBlockChain(blockchain.CurrentBlock(), 5, ethash.NewFaker(), db, forkSeed)[1:]
-		if err := testBlockChainImport(chain, blockchain); err == nil {
+		chain := makeBlockChain(blockchain.CurrentBlock(), 5, ethash.NewFaker(), db, forkSeed1)[1:]
+		if err := testBlockChainImport(chain, pipeline, blockchain); err == nil {
 			t.Errorf("broken block chain not reported")
 		}
 	} else {
-		chain := makeHeaderChain(blockchain.CurrentHeader(), 5, ethash.NewFaker(), db, forkSeed)[1:]
+		chain := makeHeaderChain(blockchain.CurrentHeader(), 5, ethash.NewFaker(), db, forkSeed1)[1:]
 		if err := testHeaderChainImport(chain, blockchain); err == nil {
 			t.Errorf("broken header chain not reported")
 		}
@@ -349,19 +440,25 @@ func testBrokenChain(t *testing.T, full bool) {
 
 // Tests that reorganising a long difficult chain after a short easy one
 // overwrites the canonical numbers and links in the database.
-func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false) }
-func TestReorgLongBlocks(t *testing.T)  { testReorgLong(t, true) }
+func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false, false) }
+func TestReorgLongBlocks(t *testing.T) {
+	testReorgLong(t, true, false)
+	testReorgLong(t, true, true)
+}
 
-func testReorgLong(t *testing.T, full bool) {
-	testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280, full)
+func testReorgLong(t *testing.T, full, pipeline bool) {
+	testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280, full, pipeline)
 }
 
 // Tests that reorganising a short difficult chain after a long easy one
 // overwrites the canonical numbers and links in the database.
-func TestReorgShortHeaders(t *testing.T) { testReorgShort(t, false) }
-func TestReorgShortBlocks(t *testing.T)  { testReorgShort(t, true) }
+func TestReorgShortHeaders(t *testing.T) { testReorgShort(t, false, false) }
+func TestReorgShortBlocks(t *testing.T) {
+	testReorgShort(t, true, false)
+	testReorgShort(t, true, true)
+}
 
-func testReorgShort(t *testing.T, full bool) {
+func testReorgShort(t *testing.T, full, pipeline bool) {
 	// Create a long easy chain vs. a short heavy one. Due to difficulty adjustment
 	// we need a fairly long chain of blocks with different difficulties for a short
 	// one to become heavyer than a long one. The 96 is an empirical value.
@@ -373,12 +470,12 @@ func testReorgShort(t *testing.T, full bool) {
 	for i := 0; i < len(diff); i++ {
 		diff[i] = -9
 	}
-	testReorg(t, easy, diff, 12615120, full)
+	testReorg(t, easy, diff, 12615120, full, pipeline)
 }
 
-func testReorg(t *testing.T, first, second []int64, td int64, full bool) {
+func testReorg(t *testing.T, first, second []int64, td int64, full, pipeline bool) {
 	// Create a pristine chain and database
-	db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
+	db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, pipeline)
 	if err != nil {
 		t.Fatalf("failed to create pristine chain: %v", err)
 	}
@@ -444,12 +541,16 @@ func testReorg(t *testing.T, first, second []int64, td int64, full bool) {
 }
 
 // Tests that the insertion functions detect banned hashes.
-func TestBadHeaderHashes(t *testing.T) { testBadHashes(t, false) }
-func TestBadBlockHashes(t *testing.T)  { testBadHashes(t, true) }
+func TestBadHeaderHashes(t *testing.T) { testBadHashes(t, false, false) }
+func TestBadBlockHashes(t *testing.T) {
+	testBadHashes(t, true, true)
+	testBadHashes(t, true, false)
+
+}
 
-func testBadHashes(t *testing.T, full bool) {
+func testBadHashes(t *testing.T, full, pipeline bool) {
 	// Create a pristine chain and database
-	db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
+	db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, pipeline)
 	if err != nil {
 		t.Fatalf("failed to create pristine chain: %v", err)
 	}
@@ -478,12 +579,16 @@ func testBadHashes(t *testing.T, full bool) {
 
 // Tests that bad hashes are detected on boot, and the chain rolled back to a
 // good state prior to the bad hash.
-func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false) }
-func TestReorgBadBlockHashes(t *testing.T)  { testReorgBadHashes(t, true) }
+func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false, false) }
+func TestReorgBadBlockHashes(t *testing.T) {
+	testReorgBadHashes(t, true, false)
+	testReorgBadHashes(t, true, true)
 
-func testReorgBadHashes(t *testing.T, full bool) {
+}
+
+func testReorgBadHashes(t *testing.T, full, pipeline bool) {
 	// Create a pristine chain and database
-	db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
+	db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, pipeline)
 	if err != nil {
 		t.Fatalf("failed to create pristine chain: %v", err)
 	}
@@ -533,13 +638,16 @@ func testReorgBadHashes(t *testing.T, full bool) {
 }
 
 // Tests chain insertions in the face of one entity containing an invalid nonce.
-func TestHeadersInsertNonceError(t *testing.T) { testInsertNonceError(t, false) }
-func TestBlocksInsertNonceError(t *testing.T)  { testInsertNonceError(t, true) }
+func TestHeadersInsertNonceError(t *testing.T) { testInsertNonceError(t, false, false) }
+func TestBlocksInsertNonceError(t *testing.T) {
+	testInsertNonceError(t, true, false)
+	testInsertNonceError(t, true, true)
+}
 
-func testInsertNonceError(t *testing.T, full bool) {
+func testInsertNonceError(t *testing.T, full, pipeline bool) {
 	for i := 1; i < 25 && !t.Failed(); i++ {
 		// Create a pristine chain and database
-		db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
+		db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, pipeline)
 		if err != nil {
 			t.Fatalf("failed to create pristine chain: %v", err)
 		}
@@ -653,7 +761,7 @@ func TestFastVsFullChains(t *testing.T) {
 		t.Fatalf("failed to create temp freezer dir: %v", err)
 	}
 	defer os.Remove(frdir)
-	ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
+	ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false)
 	if err != nil {
 		t.Fatalf("failed to create temp freezer db: %v", err)
 	}
@@ -727,7 +835,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
 			t.Fatalf("failed to create temp freezer dir: %v", err)
 		}
 		defer os.Remove(dir)
-		db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false)
+		db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false, false, false)
 		if err != nil {
 			t.Fatalf("failed to create temp freezer db: %v", err)
 		}
@@ -1212,7 +1320,7 @@ done:
 
 // Tests if the canonical block can be fetched from the database during chain insertion.
 func TestCanonicalBlockRetrieval(t *testing.T) {
-	_, blockchain, err := newCanonical(ethash.NewFaker(), 0, true)
+	_, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, false)
 	if err != nil {
 		t.Fatalf("failed to create pristine chain: %v", err)
 	}
@@ -1594,7 +1702,7 @@ func TestBlockchainRecovery(t *testing.T) {
 	}
 	defer os.Remove(frdir)
 
-	ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
+	ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false)
 	if err != nil {
 		t.Fatalf("failed to create temp freezer db: %v", err)
 	}
@@ -1651,7 +1759,7 @@ func TestIncompleteAncientReceiptChainInsertion(t *testing.T) {
 		t.Fatalf("failed to create temp freezer dir: %v", err)
 	}
 	defer os.Remove(frdir)
-	ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
+	ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false)
 	if err != nil {
 		t.Fatalf("failed to create temp freezer db: %v", err)
 	}
@@ -1850,7 +1958,7 @@ func testInsertKnownChainData(t *testing.T, typ string) {
 		t.Fatalf("failed to create temp freezer dir: %v", err)
 	}
 	defer os.Remove(dir)
-	chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false)
+	chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false, false, false)
 	if err != nil {
 		t.Fatalf("failed to create temp freezer db: %v", err)
 	}
@@ -2130,7 +2238,7 @@ func TestTransactionIndices(t *testing.T) {
 		t.Fatalf("failed to create temp freezer dir: %v", err)
 	}
 	defer os.Remove(frdir)
-	ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
+	ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false)
 	if err != nil {
 		t.Fatalf("failed to create temp freezer db: %v", err)
 	}
@@ -2158,7 +2266,7 @@ func TestTransactionIndices(t *testing.T) {
 	// Init block chain with external ancients, check all needed indices has been indexed.
 	limit := []uint64{0, 32, 64, 128}
 	for _, l := range limit {
-		ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
+		ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false)
 		if err != nil {
 			t.Fatalf("failed to create temp freezer db: %v", err)
 		}
@@ -2178,7 +2286,7 @@ func TestTransactionIndices(t *testing.T) {
 	}
 
 	// Reconstruct a block chain which only reserves HEAD-64 tx indices
-	ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
+	ancientDb, err = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false)
 	if err != nil {
 		t.Fatalf("failed to create temp freezer db: %v", err)
 	}
@@ -2257,7 +2365,7 @@ func TestSkipStaleTxIndicesInFastSync(t *testing.T) {
 		t.Fatalf("failed to create temp freezer dir: %v", err)
 	}
 	defer os.Remove(frdir)
-	ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
+	ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false)
 	if err != nil {
 		t.Fatalf("failed to create temp freezer db: %v", err)
 	}

+ 3 - 3
core/chain_makers.go

@@ -223,7 +223,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
 			block, _, _ := b.engine.FinalizeAndAssemble(chainreader, b.header, statedb, b.txs, b.uncles, b.receipts)
 
 			// Write state changes to db
-			root, _, err := statedb.Commit(config.IsEIP158(b.header.Number))
+			root, _, err := statedb.Commit(nil)
 			if err != nil {
 				panic(fmt.Sprintf("state write error: %v", err))
 			}
@@ -254,9 +254,9 @@ func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.S
 	} else {
 		time = parent.Time() + 10 // block time is fixed at 10 seconds
 	}
-
+	root := state.IntermediateRoot(chain.Config().IsEIP158(parent.Number()))
 	return &types.Header{
-		Root:       state.IntermediateRoot(chain.Config().IsEIP158(parent.Number())),
+		Root:       root,
 		ParentHash: parent.Hash(),
 		Coinbase:   parent.Coinbase(),
 		Difficulty: engine.CalcDifficulty(chain, time, &types.Header{

+ 3 - 0
core/error.go

@@ -34,6 +34,9 @@ var (
 
 	// ErrDiffLayerNotFound is returned when diff layer not found.
 	ErrDiffLayerNotFound = errors.New("diff layer not found")
+
+	// ErrKnownBadBlock is return when the block is a known bad block
+	ErrKnownBadBlock = errors.New("already known bad block")
 )
 
 // List of evm-call-message pre-checking errors. All state transition messages will

+ 1 - 1
core/genesis.go

@@ -298,7 +298,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
 	if g.Difficulty == nil {
 		head.Difficulty = params.GenesisDifficulty
 	}
-	statedb.Commit(false)
+	statedb.Commit(nil)
 	statedb.Database().TrieDB().Commit(root, true, nil)
 
 	return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil))

+ 1 - 1
core/rawdb/accessors_chain_test.go

@@ -440,7 +440,7 @@ func TestAncientStorage(t *testing.T) {
 	}
 	defer os.Remove(frdir)
 
-	db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
+	db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false)
 	if err != nil {
 		t.Fatalf("failed to create database with ancient backend")
 	}

+ 3 - 2
core/rawdb/chain_iterator.go

@@ -35,7 +35,7 @@ import (
 // injects into the database the block hash->number mappings.
 func InitDatabaseFromFreezer(db ethdb.Database) {
 	// If we can't access the freezer or it's empty, abort
-	frozen, err := db.Ancients()
+	frozen, err := db.ItemAmountInAncient()
 	if err != nil || frozen == 0 {
 		return
 	}
@@ -44,8 +44,9 @@ func InitDatabaseFromFreezer(db ethdb.Database) {
 		start  = time.Now()
 		logged = start.Add(-7 * time.Second) // Unindex during import is fast, don't double log
 		hash   common.Hash
+		offset = db.AncientOffSet()
 	)
-	for i := uint64(0); i < frozen; i++ {
+	for i := uint64(0) + offset; i < frozen+offset; i++ {
 		// Since the freezer has all data in sequential order on a file,
 		// it would be 'neat' to read more data in one go, and let the
 		// freezerdb return N items (e.g up to 1000 items per go)

+ 106 - 7
core/rawdb/database.go

@@ -20,16 +20,18 @@ import (
 	"bytes"
 	"errors"
 	"fmt"
+	"math/big"
 	"os"
 	"sync/atomic"
 	"time"
 
+	"github.com/olekukonko/tablewriter"
+
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/ethdb/leveldb"
 	"github.com/ethereum/go-ethereum/ethdb/memorydb"
 	"github.com/ethereum/go-ethereum/log"
-	"github.com/olekukonko/tablewriter"
 )
 
 // freezerdb is a database wrapper that enabled freezer data retrievals.
@@ -112,6 +114,11 @@ func (db *nofreezedb) Ancients() (uint64, error) {
 	return 0, errNotSupported
 }
 
+// Ancients returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) ItemAmountInAncient() (uint64, error) {
+	return 0, errNotSupported
+}
+
 // AncientSize returns an error as we don't have a backing chain freezer.
 func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
 	return 0, errNotSupported
@@ -140,6 +147,10 @@ func (db *nofreezedb) SetDiffStore(diff ethdb.KeyValueStore) {
 	db.diffStore = diff
 }
 
+func (db *nofreezedb) AncientOffSet() uint64 {
+	return 0
+}
+
 // NewDatabase creates a high level database on top of a given key-value data
 // store without a freezer moving immutable chain segments into cold storage.
 func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
@@ -148,15 +159,69 @@ func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
 	}
 }
 
+func ReadOffSetOfCurrentAncientFreezer(db ethdb.KeyValueReader) uint64 {
+	offset, _ := db.Get(offSetOfCurrentAncientFreezer)
+	if offset == nil {
+		return 0
+	}
+	return new(big.Int).SetBytes(offset).Uint64()
+}
+
+func ReadOffSetOfLastAncientFreezer(db ethdb.KeyValueReader) uint64 {
+	offset, _ := db.Get(offSetOfLastAncientFreezer)
+	if offset == nil {
+		return 0
+	}
+	return new(big.Int).SetBytes(offset).Uint64()
+}
+
+func WriteOffSetOfCurrentAncientFreezer(db ethdb.KeyValueWriter, offset uint64) {
+	if err := db.Put(offSetOfCurrentAncientFreezer, new(big.Int).SetUint64(offset).Bytes()); err != nil {
+		log.Crit("Failed to store offSetOfAncientFreezer", "err", err)
+	}
+}
+func WriteOffSetOfLastAncientFreezer(db ethdb.KeyValueWriter, offset uint64) {
+	if err := db.Put(offSetOfLastAncientFreezer, new(big.Int).SetUint64(offset).Bytes()); err != nil {
+		log.Crit("Failed to store offSetOfAncientFreezer", "err", err)
+	}
+}
+
+// NewFreezerDb only create a freezer without statedb.
+func NewFreezerDb(db ethdb.KeyValueStore, frz, namespace string, readonly bool, newOffSet uint64) (*freezer, error) {
+	// Create the idle freezer instance, this operation should be atomic to avoid mismatch between offset and acientDB.
+	frdb, err := newFreezer(frz, namespace, readonly)
+	if err != nil {
+		return nil, err
+	}
+	frdb.offset = newOffSet
+	frdb.frozen += newOffSet
+	return frdb, nil
+}
+
 // NewDatabaseWithFreezer creates a high level database on top of a given key-
 // value data store with a freezer moving immutable chain segments into cold
 // storage.
-func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string, readonly bool) (ethdb.Database, error) {
+func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string, readonly, disableFreeze, isLastOffset bool) (ethdb.Database, error) {
 	// Create the idle freezer instance
 	frdb, err := newFreezer(freezer, namespace, readonly)
 	if err != nil {
 		return nil, err
 	}
+
+	var offset uint64
+	// The offset of ancientDB should be handled differently in different scenarios.
+	if isLastOffset {
+		offset = ReadOffSetOfLastAncientFreezer(db)
+	} else {
+		offset = ReadOffSetOfCurrentAncientFreezer(db)
+	}
+
+	frdb.offset = offset
+
+	// Some blocks in ancientDB may have already been frozen and been pruned, so adding the offset to
+	// reprensent the absolute number of blocks already frozen.
+	frdb.frozen += offset
+
 	// Since the freezer can be stored separately from the user's key-value database,
 	// there's a fairly high probability that the user requests invalid combinations
 	// of the freezer and database. Ensure that we don't shoot ourselves in the foot
@@ -179,7 +244,10 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
 	// If the genesis hash is empty, we have a new key-value store, so nothing to
 	// validate in this method. If, however, the genesis hash is not nil, compare
 	// it to the freezer content.
-	if kvgenesis, _ := db.Get(headerHashKey(0)); len(kvgenesis) > 0 {
+	// Only to check the followings when offset equal to 0, otherwise the block number
+	// in ancientdb did not start with 0, no genesis block in ancientdb as well.
+
+	if kvgenesis, _ := db.Get(headerHashKey(0)); offset == 0 && len(kvgenesis) > 0 {
 		if frozen, _ := frdb.Ancients(); frozen > 0 {
 			// If the freezer already contains something, ensure that the genesis blocks
 			// match, otherwise we might mix up freezers across chains and destroy both
@@ -221,8 +289,9 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
 			// feezer.
 		}
 	}
+
 	// Freezer is consistent with the key-value database, permit combining the two
-	if !frdb.readonly {
+	if !disableFreeze && !frdb.readonly {
 		go frdb.freeze(db)
 	}
 	return &freezerdb{
@@ -256,12 +325,12 @@ func NewLevelDBDatabase(file string, cache int, handles int, namespace string, r
 
 // NewLevelDBDatabaseWithFreezer creates a persistent key-value database with a
 // freezer moving immutable chain segments into cold storage.
-func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, freezer string, namespace string, readonly bool) (ethdb.Database, error) {
+func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, freezer string, namespace string, readonly, disableFreeze, isLastOffset bool) (ethdb.Database, error) {
 	kvdb, err := leveldb.New(file, cache, handles, namespace, readonly)
 	if err != nil {
 		return nil, err
 	}
-	frdb, err := NewDatabaseWithFreezer(kvdb, freezer, namespace, readonly)
+	frdb, err := NewDatabaseWithFreezer(kvdb, freezer, namespace, readonly, disableFreeze, isLastOffset)
 	if err != nil {
 		kvdb.Close()
 		return nil, err
@@ -298,6 +367,35 @@ func (s *stat) Size() string {
 func (s *stat) Count() string {
 	return s.count.String()
 }
+func AncientInspect(db ethdb.Database) error {
+	offset := counter(ReadOffSetOfCurrentAncientFreezer(db))
+	// Get number of ancient rows inside the freezer.
+	ancients := counter(0)
+	if count, err := db.ItemAmountInAncient(); err != nil {
+		log.Error("failed to get the items amount in ancientDB", "err", err)
+		return err
+	} else {
+		ancients = counter(count)
+	}
+	var endNumber counter
+	if offset+ancients <= 0 {
+		endNumber = 0
+	} else {
+		endNumber = offset + ancients - 1
+	}
+	stats := [][]string{
+		{"Offset/StartBlockNumber", "Offset/StartBlockNumber of ancientDB", offset.String()},
+		{"Amount of remained items in AncientStore", "Remaining items of ancientDB", ancients.String()},
+		{"The last BlockNumber within ancientDB", "The last BlockNumber", endNumber.String()},
+	}
+	table := tablewriter.NewWriter(os.Stdout)
+	table.SetHeader([]string{"Database", "Category", "Items"})
+	table.SetFooter([]string{"", "AncientStore information", ""})
+	table.AppendBulk(stats)
+	table.Render()
+
+	return nil
+}
 
 // InspectDatabase traverses the entire database and checks the size
 // of all different categories of data.
@@ -431,9 +529,10 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
 	}
 	// Get number of ancient rows inside the freezer
 	ancients := counter(0)
-	if count, err := db.Ancients(); err == nil {
+	if count, err := db.ItemAmountInAncient(); err == nil {
 		ancients = counter(count)
 	}
+
 	// Display the database statistic.
 	stats := [][]string{
 		{"Key-Value store", "Headers", headers.Size(), headers.Count()},

+ 20 - 8
core/rawdb/freezer.go

@@ -85,6 +85,8 @@ type freezer struct {
 
 	quit      chan struct{}
 	closeOnce sync.Once
+
+	offset uint64 // Starting BlockNumber in current freezer
 }
 
 // newFreezer creates a chain freezer that moves ancient chain data into
@@ -164,7 +166,7 @@ func (f *freezer) Close() error {
 // in the freezer.
 func (f *freezer) HasAncient(kind string, number uint64) (bool, error) {
 	if table := f.tables[kind]; table != nil {
-		return table.has(number), nil
+		return table.has(number - f.offset), nil
 	}
 	return false, nil
 }
@@ -172,7 +174,7 @@ func (f *freezer) HasAncient(kind string, number uint64) (bool, error) {
 // Ancient retrieves an ancient binary blob from the append-only immutable files.
 func (f *freezer) Ancient(kind string, number uint64) ([]byte, error) {
 	if table := f.tables[kind]; table != nil {
-		return table.Retrieve(number)
+		return table.Retrieve(number - f.offset)
 	}
 	return nil, errUnknownTable
 }
@@ -182,6 +184,16 @@ func (f *freezer) Ancients() (uint64, error) {
 	return atomic.LoadUint64(&f.frozen), nil
 }
 
+// ItemAmountInAncient returns the actual length of current ancientDB.
+func (f *freezer) ItemAmountInAncient() (uint64, error) {
+	return atomic.LoadUint64(&f.frozen) - atomic.LoadUint64(&f.offset), nil
+}
+
+// AncientOffSet returns the offset of current ancientDB.
+func (f *freezer) AncientOffSet() uint64 {
+	return atomic.LoadUint64(&f.offset)
+}
+
 // AncientSize returns the ancient size of the specified category.
 func (f *freezer) AncientSize(kind string) (uint64, error) {
 	if table := f.tables[kind]; table != nil {
@@ -216,23 +228,23 @@ func (f *freezer) AppendAncient(number uint64, hash, header, body, receipts, td
 		}
 	}()
 	// Inject all the components into the relevant data tables
-	if err := f.tables[freezerHashTable].Append(f.frozen, hash[:]); err != nil {
+	if err := f.tables[freezerHashTable].Append(f.frozen-f.offset, hash[:]); err != nil {
 		log.Error("Failed to append ancient hash", "number", f.frozen, "hash", hash, "err", err)
 		return err
 	}
-	if err := f.tables[freezerHeaderTable].Append(f.frozen, header); err != nil {
+	if err := f.tables[freezerHeaderTable].Append(f.frozen-f.offset, header); err != nil {
 		log.Error("Failed to append ancient header", "number", f.frozen, "hash", hash, "err", err)
 		return err
 	}
-	if err := f.tables[freezerBodiesTable].Append(f.frozen, body); err != nil {
+	if err := f.tables[freezerBodiesTable].Append(f.frozen-f.offset, body); err != nil {
 		log.Error("Failed to append ancient body", "number", f.frozen, "hash", hash, "err", err)
 		return err
 	}
-	if err := f.tables[freezerReceiptTable].Append(f.frozen, receipts); err != nil {
+	if err := f.tables[freezerReceiptTable].Append(f.frozen-f.offset, receipts); err != nil {
 		log.Error("Failed to append ancient receipts", "number", f.frozen, "hash", hash, "err", err)
 		return err
 	}
-	if err := f.tables[freezerDifficultyTable].Append(f.frozen, td); err != nil {
+	if err := f.tables[freezerDifficultyTable].Append(f.frozen-f.offset, td); err != nil {
 		log.Error("Failed to append ancient difficulty", "number", f.frozen, "hash", hash, "err", err)
 		return err
 	}
@@ -249,7 +261,7 @@ func (f *freezer) TruncateAncients(items uint64) error {
 		return nil
 	}
 	for _, table := range f.tables {
-		if err := table.truncate(items); err != nil {
+		if err := table.truncate(items - f.offset); err != nil {
 			return err
 		}
 	}

+ 6 - 0
core/rawdb/schema.go

@@ -69,6 +69,12 @@ var (
 	// fastTxLookupLimitKey tracks the transaction lookup limit during fast sync.
 	fastTxLookupLimitKey = []byte("FastTransactionLookupLimit")
 
+	//offSet of new updated ancientDB.
+	offSetOfCurrentAncientFreezer = []byte("offSetOfCurrentAncientFreezer")
+
+	//offSet of the ancientDB before updated version.
+	offSetOfLastAncientFreezer = []byte("offSetOfLastAncientFreezer")
+
 	// badBlockKey tracks the list of bad blocks seen by local
 	badBlockKey = []byte("InvalidBlock")
 

+ 10 - 0
core/rawdb/table.go

@@ -68,6 +68,16 @@ func (t *table) Ancients() (uint64, error) {
 	return t.db.Ancients()
 }
 
+// ItemAmountInAncient returns the actual length of current ancientDB.
+func (t *table) ItemAmountInAncient() (uint64, error) {
+	return t.db.ItemAmountInAncient()
+}
+
+// AncientOffSet returns the offset of current ancientDB.
+func (t *table) AncientOffSet() uint64 {
+	return t.db.AncientOffSet()
+}
+
 // AncientSize is a noop passthrough that just forwards the request to the underlying
 // database.
 func (t *table) AncientSize(kind string) (uint64, error) {

+ 3 - 0
core/state/database.go

@@ -257,6 +257,9 @@ func (db *cachingDB) Purge() {
 
 // CopyTrie returns an independent copy of the given trie.
 func (db *cachingDB) CopyTrie(t Trie) Trie {
+	if t == nil {
+		return nil
+	}
 	switch t := t.(type) {
 	case *trie.SecureTrie:
 		return t.Copy()

+ 211 - 0
core/state/pruner/pruner.go

@@ -27,7 +27,10 @@ import (
 	"strings"
 	"time"
 
+	"github.com/prometheus/tsdb/fileutil"
+
 	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/consensus"
 	"github.com/ethereum/go-ethereum/core/rawdb"
 	"github.com/ethereum/go-ethereum/core/state"
 	"github.com/ethereum/go-ethereum/core/state/snapshot"
@@ -35,6 +38,7 @@ import (
 	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/log"
+	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/ethereum/go-ethereum/trie"
 )
@@ -85,6 +89,14 @@ type Pruner struct {
 	triesInMemory uint64
 }
 
+type BlockPruner struct {
+	db                  ethdb.Database
+	oldAncientPath      string
+	newAncientPath      string
+	node                *node.Node
+	BlockAmountReserved uint64
+}
+
 // NewPruner creates the pruner instance.
 func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize, triesInMemory uint64) (*Pruner, error) {
 	headBlock := rawdb.ReadHeadBlock(db)
@@ -101,6 +113,7 @@ func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize, trie
 		bloomSize = 256
 	}
 	stateBloom, err := newStateBloomWithSize(bloomSize)
+
 	if err != nil {
 		return nil, err
 	}
@@ -115,6 +128,16 @@ func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize, trie
 	}, nil
 }
 
+func NewBlockPruner(db ethdb.Database, n *node.Node, oldAncientPath, newAncientPath string, BlockAmountReserved uint64) *BlockPruner {
+	return &BlockPruner{
+		db:                  db,
+		oldAncientPath:      oldAncientPath,
+		newAncientPath:      newAncientPath,
+		node:                n,
+		BlockAmountReserved: BlockAmountReserved,
+	}
+}
+
 func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, stateBloom *stateBloom, bloomPath string, middleStateRoots map[common.Hash]struct{}, start time.Time) error {
 	// Delete all stale trie nodes in the disk. With the help of state bloom
 	// the trie nodes(and codes) belong to the active state will be filtered
@@ -233,6 +256,194 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta
 	return nil
 }
 
+func (p *BlockPruner) backUpOldDb(name string, cache, handles int, namespace string, readonly, interrupt bool) error {
+	// Open old db wrapper.
+	chainDb, err := p.node.OpenDatabaseWithFreezer(name, cache, handles, p.oldAncientPath, namespace, readonly, true, interrupt)
+	if err != nil {
+		log.Error("Failed to open ancient database", "err=", err)
+		return err
+	}
+	defer chainDb.Close()
+	log.Info("chainDB opened successfully")
+
+	// Get the number of items in old ancient db.
+	itemsOfAncient, err := chainDb.ItemAmountInAncient()
+	log.Info("the number of items in ancientDB is ", "itemsOfAncient", itemsOfAncient)
+
+	// If we can't access the freezer or it's empty, abort.
+	if err != nil || itemsOfAncient == 0 {
+		log.Error("can't access the freezer or it's empty, abort")
+		return errors.New("can't access the freezer or it's empty, abort")
+	}
+
+	// If the items in freezer is less than the block amount that we want to reserve, it is not enough, should stop.
+	if itemsOfAncient < p.BlockAmountReserved {
+		log.Error("the number of old blocks is not enough to reserve,", "ancient items", itemsOfAncient, "the amount specified", p.BlockAmountReserved)
+		return errors.New("the number of old blocks is not enough to reserve")
+	}
+
+	var oldOffSet uint64
+	if interrupt {
+		// The interrupt scecario within this function is specific for old and new ancientDB exsisted concurrently,
+		// should use last version of offset for oldAncientDB, because current offset is
+		// actually of the new ancientDB_Backup, but what we want is the offset of ancientDB being backup.
+		oldOffSet = rawdb.ReadOffSetOfLastAncientFreezer(chainDb)
+	} else {
+		// Using current version of ancientDB for oldOffSet because the db for backup is current version.
+		oldOffSet = rawdb.ReadOffSetOfCurrentAncientFreezer(chainDb)
+	}
+	log.Info("the oldOffSet is ", "oldOffSet", oldOffSet)
+
+	// Get the start BlockNumber for pruning.
+	startBlockNumber := oldOffSet + itemsOfAncient - p.BlockAmountReserved
+	log.Info("new offset/new startBlockNumber is ", "new offset", startBlockNumber)
+
+	// Create new ancientdb backup and record the new and last version of offset in kvDB as well.
+	// For every round, newoffset actually equals to the startBlockNumber in ancient backup db.
+	frdbBack, err := rawdb.NewFreezerDb(chainDb, p.newAncientPath, namespace, readonly, startBlockNumber)
+	if err != nil {
+		log.Error("Failed to create ancient freezer backup", "err=", err)
+		return err
+	}
+	defer frdbBack.Close()
+
+	offsetBatch := chainDb.NewBatch()
+	rawdb.WriteOffSetOfCurrentAncientFreezer(offsetBatch, startBlockNumber)
+	rawdb.WriteOffSetOfLastAncientFreezer(offsetBatch, oldOffSet)
+	if err := offsetBatch.Write(); err != nil {
+		log.Crit("Failed to write offset into disk", "err", err)
+	}
+
+	// It's guaranteed that the old/new offsets are updated as well as the new ancientDB are created if this flock exist.
+	lock, _, err := fileutil.Flock(filepath.Join(p.newAncientPath, "PRUNEFLOCKBACK"))
+	if err != nil {
+		log.Error("file lock error", "err", err)
+		return err
+	}
+
+	log.Info("prune info", "old offset", oldOffSet, "number of items in ancientDB", itemsOfAncient, "amount to reserve", p.BlockAmountReserved)
+	log.Info("new offset/new startBlockNumber recorded successfully ", "new offset", startBlockNumber)
+
+	start := time.Now()
+	// All ancient data after and including startBlockNumber should write into new ancientDB ancient_back.
+	for blockNumber := startBlockNumber; blockNumber < itemsOfAncient+oldOffSet; blockNumber++ {
+		blockHash := rawdb.ReadCanonicalHash(chainDb, blockNumber)
+		block := rawdb.ReadBlock(chainDb, blockHash, blockNumber)
+		receipts := rawdb.ReadRawReceipts(chainDb, blockHash, blockNumber)
+		// Calculate the total difficulty of the block
+		td := rawdb.ReadTd(chainDb, blockHash, blockNumber)
+		if td == nil {
+			return consensus.ErrUnknownAncestor
+		}
+		// Write into new ancient_back db.
+		rawdb.WriteAncientBlock(frdbBack, block, receipts, td)
+		// Print the log every 5s for better trace.
+		if common.PrettyDuration(time.Since(start)) > common.PrettyDuration(5*time.Second) {
+			log.Info("block backup process running successfully", "current blockNumber for backup", blockNumber)
+			start = time.Now()
+		}
+	}
+	lock.Release()
+	log.Info("block back up done", "current start blockNumber in ancientDB", startBlockNumber)
+	return nil
+}
+
+// Backup the ancient data for the old ancient db, i.e. the most recent 128 blocks in ancient db.
+func (p *BlockPruner) BlockPruneBackUp(name string, cache, handles int, namespace string, readonly, interrupt bool) error {
+
+	start := time.Now()
+
+	if err := p.backUpOldDb(name, cache, handles, namespace, readonly, interrupt); err != nil {
+		return err
+	}
+
+	log.Info("Block pruning BackUp successfully", "time duration since start is", common.PrettyDuration(time.Since(start)))
+	return nil
+}
+
+func (p *BlockPruner) RecoverInterruption(name string, cache, handles int, namespace string, readonly bool) error {
+	log.Info("RecoverInterruption for block prune")
+	newExist, err := CheckFileExist(p.newAncientPath)
+	if err != nil {
+		log.Error("newAncientDb path error")
+		return err
+	}
+
+	if newExist {
+		log.Info("New ancientDB_backup existed in interruption scenario")
+		flockOfAncientBack, err := CheckFileExist(filepath.Join(p.newAncientPath, "PRUNEFLOCKBACK"))
+		if err != nil {
+			log.Error("Failed to check flock of ancientDB_Back %v", err)
+			return err
+		}
+
+		// Indicating both old and new ancientDB existed concurrently.
+		// Delete directly for the new ancientdb to prune from start, e.g.: path ../chaindb/ancient_backup
+		if err := os.RemoveAll(p.newAncientPath); err != nil {
+			log.Error("Failed to remove old ancient directory %v", err)
+			return err
+		}
+		if flockOfAncientBack {
+			// Indicating the oldOffset/newOffset have already been updated.
+			if err := p.BlockPruneBackUp(name, cache, handles, namespace, readonly, true); err != nil {
+				log.Error("Failed to prune")
+				return err
+			}
+		} else {
+			// Indicating the flock did not exist and the new offset did not be updated, so just handle this case as usual.
+			if err := p.BlockPruneBackUp(name, cache, handles, namespace, readonly, false); err != nil {
+				log.Error("Failed to prune")
+				return err
+			}
+		}
+
+		if err := p.AncientDbReplacer(); err != nil {
+			log.Error("Failed to replace ancientDB")
+			return err
+		}
+	} else {
+		log.Info("New ancientDB_backup did not exist in interruption scenario")
+		// Indicating new ancientDB even did not be created, just prune starting at backup from startBlockNumber as usual,
+		// in this case, the new offset have not been written into kvDB.
+		if err := p.BlockPruneBackUp(name, cache, handles, namespace, readonly, false); err != nil {
+			log.Error("Failed to prune")
+			return err
+		}
+		if err := p.AncientDbReplacer(); err != nil {
+			log.Error("Failed to replace ancientDB")
+			return err
+		}
+	}
+
+	return nil
+}
+
+func CheckFileExist(path string) (bool, error) {
+	if _, err := os.Stat(path); err != nil {
+		if os.IsNotExist(err) {
+			// Indicating the file didn't exist.
+			return false, nil
+		}
+		return true, err
+	}
+	return true, nil
+}
+
+func (p *BlockPruner) AncientDbReplacer() error {
+	// Delete directly for the old ancientdb, e.g.: path ../chaindb/ancient
+	if err := os.RemoveAll(p.oldAncientPath); err != nil {
+		log.Error("Failed to remove old ancient directory %v", err)
+		return err
+	}
+
+	// Rename the new ancientdb path same to the old
+	if err := os.Rename(p.newAncientPath, p.oldAncientPath); err != nil {
+		log.Error("Failed to rename new ancient directory")
+		return err
+	}
+	return nil
+}
+
 // Prune deletes all historical state nodes except the nodes belong to the
 // specified state version. If user doesn't specify the state version, use
 // the bottom-most snapshot diff layer as the target.

+ 33 - 3
core/state/snapshot/difflayer.go

@@ -118,6 +118,9 @@ type diffLayer struct {
 	storageList map[common.Hash][]common.Hash          // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
 	storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted)
 
+	verifiedCh chan struct{} // the difflayer is verified when verifiedCh is nil or closed
+	valid      bool          // mark the difflayer is valid or not.
+
 	diffed *bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer
 
 	lock sync.RWMutex
@@ -168,7 +171,7 @@ func (h storageBloomHasher) Sum64() uint64 {
 
 // newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low
 // level persistent database or a hierarchical diff already.
-func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
+func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer {
 	// Create the new layer with some pre-allocated data segments
 	dl := &diffLayer{
 		parent:      parent,
@@ -177,6 +180,7 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s
 		accountData: accounts,
 		storageData: storage,
 		storageList: make(map[common.Hash][]common.Hash),
+		verifiedCh:  verified,
 	}
 	switch parent := parent.(type) {
 	case *diskLayer:
@@ -256,6 +260,32 @@ func (dl *diffLayer) Root() common.Hash {
 	return dl.root
 }
 
+// WaitAndGetVerifyRes will wait until the diff layer been verified and return the verification result
+func (dl *diffLayer) WaitAndGetVerifyRes() bool {
+	if dl.verifiedCh == nil {
+		return true
+	}
+	<-dl.verifiedCh
+	return dl.valid
+}
+
+func (dl *diffLayer) MarkValid() {
+	dl.valid = true
+}
+
+// Represent whether the difflayer is been verified, does not means it is a valid or invalid difflayer
+func (dl *diffLayer) Verified() bool {
+	if dl.verifiedCh == nil {
+		return true
+	}
+	select {
+	case <-dl.verifiedCh:
+		return true
+	default:
+		return false
+	}
+}
+
 // Parent returns the subsequent layer of a diff layer.
 func (dl *diffLayer) Parent() snapshot {
 	return dl.parent
@@ -423,8 +453,8 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
 
 // Update creates a new layer on top of the existing snapshot diff tree with
 // the specified data items.
-func (dl *diffLayer) Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
-	return newDiffLayer(dl, blockRoot, destructs, accounts, storage)
+func (dl *diffLayer) Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer {
+	return newDiffLayer(dl, blockRoot, destructs, accounts, storage, verified)
 }
 
 // flatten pushes all data from this point downwards, flattening everything into

+ 18 - 18
core/state/snapshot/difflayer_test.go

@@ -79,11 +79,11 @@ func TestMergeBasics(t *testing.T) {
 		}
 	}
 	// Add some (identical) layers on top
-	parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
-	child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
-	child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
-	child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
-	child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
+	parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil)
+	child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil)
+	child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil)
+	child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil)
+	child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil)
 	// And flatten
 	merged := (child.flatten()).(*diffLayer)
 
@@ -151,13 +151,13 @@ func TestMergeDelete(t *testing.T) {
 		}
 	}
 	// Add some flipAccs-flopping layers on top
-	parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage)
-	child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
-	child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
-	child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
-	child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
-	child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
-	child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
+	parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage, nil)
+	child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage, nil)
+	child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage, nil)
+	child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage, nil)
+	child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage, nil)
+	child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage, nil)
+	child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage, nil)
 
 	if data, _ := child.Account(h1); data == nil {
 		t.Errorf("last diff layer: expected %x account to be non-nil", h1)
@@ -209,7 +209,7 @@ func TestInsertAndMerge(t *testing.T) {
 			accounts  = make(map[common.Hash][]byte)
 			storage   = make(map[common.Hash]map[common.Hash][]byte)
 		)
-		parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage)
+		parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage, nil)
 	}
 	{
 		var (
@@ -220,7 +220,7 @@ func TestInsertAndMerge(t *testing.T) {
 		accounts[acc] = randomAccount()
 		storage[acc] = make(map[common.Hash][]byte)
 		storage[acc][slot] = []byte{0x01}
-		child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
+		child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil)
 	}
 	// And flatten
 	merged := (child.flatten()).(*diffLayer)
@@ -256,7 +256,7 @@ func BenchmarkSearch(b *testing.B) {
 		for i := 0; i < 10000; i++ {
 			accounts[randomHash()] = randomAccount()
 		}
-		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
+		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil)
 	}
 	var layer snapshot
 	layer = emptyLayer()
@@ -298,7 +298,7 @@ func BenchmarkSearchSlot(b *testing.B) {
 			accStorage[randomHash()] = value
 			storage[accountKey] = accStorage
 		}
-		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
+		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil)
 	}
 	var layer snapshot
 	layer = emptyLayer()
@@ -336,7 +336,7 @@ func BenchmarkFlatten(b *testing.B) {
 			}
 			storage[accountKey] = accStorage
 		}
-		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
+		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil)
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -386,7 +386,7 @@ func BenchmarkJournal(b *testing.B) {
 			}
 			storage[accountKey] = accStorage
 		}
-		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
+		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage, nil)
 	}
 	layer := snapshot(new(diskLayer))
 	for i := 1; i < 128; i++ {

+ 12 - 2
core/state/snapshot/disklayer.go

@@ -49,6 +49,16 @@ func (dl *diskLayer) Root() common.Hash {
 	return dl.root
 }
 
+func (dl *diskLayer) WaitAndGetVerifyRes() bool {
+	return true
+}
+
+func (dl *diskLayer) MarkValid() {}
+
+func (dl *diskLayer) Verified() bool {
+	return true
+}
+
 // Parent always returns nil as there's no layer below the disk.
 func (dl *diskLayer) Parent() snapshot {
 	return nil
@@ -161,6 +171,6 @@ func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
 // Update creates a new layer on top of the existing snapshot diff tree with
 // the specified data items. Note, the maps are retained by the method to avoid
 // copying everything.
-func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
-	return newDiffLayer(dl, blockHash, destructs, accounts, storage)
+func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer {
+	return newDiffLayer(dl, blockHash, destructs, accounts, storage, verified)
 }

+ 4 - 4
core/state/snapshot/disklayer_test.go

@@ -134,7 +134,7 @@ func TestDiskMerge(t *testing.T) {
 		conModCache:   {conModCacheSlot: reverse(conModCacheSlot[:])},
 		conDelNoCache: {conDelNoCacheSlot: nil},
 		conDelCache:   {conDelCacheSlot: nil},
-	}); err != nil {
+	}, nil); err != nil {
 		t.Fatalf("failed to update snapshot tree: %v", err)
 	}
 	if err := snaps.Cap(diffRoot, 0); err != nil {
@@ -357,7 +357,7 @@ func TestDiskPartialMerge(t *testing.T) {
 			conModCache:   {conModCacheSlot: reverse(conModCacheSlot[:])},
 			conDelNoCache: {conDelNoCacheSlot: nil},
 			conDelCache:   {conDelCacheSlot: nil},
-		}); err != nil {
+		}, nil); err != nil {
 			t.Fatalf("test %d: failed to update snapshot tree: %v", i, err)
 		}
 		if err := snaps.Cap(diffRoot, 0); err != nil {
@@ -468,7 +468,7 @@ func TestDiskGeneratorPersistence(t *testing.T) {
 	// Modify or delete some accounts, flatten everything onto disk
 	if err := snaps.update(diffRoot, baseRoot, nil, map[common.Hash][]byte{
 		accTwo: accTwo[:],
-	}, nil); err != nil {
+	}, nil, nil); err != nil {
 		t.Fatalf("failed to update snapshot tree: %v", err)
 	}
 	if err := snaps.Cap(diffRoot, 0); err != nil {
@@ -488,7 +488,7 @@ func TestDiskGeneratorPersistence(t *testing.T) {
 		accThree: accThree.Bytes(),
 	}, map[common.Hash]map[common.Hash][]byte{
 		accThree: {accThreeSlot: accThreeSlot.Bytes()},
-	}); err != nil {
+	}, nil); err != nil {
 		t.Fatalf("failed to update snapshot tree: %v", err)
 	}
 	diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer)

+ 45 - 45
core/state/snapshot/iterator_test.go

@@ -53,7 +53,7 @@ func TestAccountIteratorBasics(t *testing.T) {
 		}
 	}
 	// Add some (identical) layers on top
-	diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
+	diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage), nil)
 	it := diffLayer.AccountIterator(common.Hash{})
 	verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
 
@@ -91,7 +91,7 @@ func TestStorageIteratorBasics(t *testing.T) {
 		nilStorage[h] = nilstorage
 	}
 	// Add some (identical) layers on top
-	diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, nil, copyAccounts(accounts), copyStorage(storage))
+	diffLayer := newDiffLayer(emptyLayer(), common.Hash{}, nil, copyAccounts(accounts), copyStorage(storage), nil)
 	for account := range accounts {
 		it, _ := diffLayer.StorageIterator(account, common.Hash{})
 		verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
@@ -222,13 +222,13 @@ func TestAccountIteratorTraversal(t *testing.T) {
 	}
 	// Stack three diff layers on top with various overlaps
 	snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
-		randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
+		randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil)
 
 	snaps.update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
-		randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
+		randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil)
 
 	snaps.update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
-		randomAccountSet("0xcc", "0xf0", "0xff"), nil)
+		randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil)
 
 	// Verify the single and multi-layer iterators
 	head := snaps.Snapshot(common.HexToHash("0x04"))
@@ -269,13 +269,13 @@ func TestStorageIteratorTraversal(t *testing.T) {
 	}
 	// Stack three diff layers on top with various overlaps
 	snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
-		randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
+		randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil)
 
 	snaps.update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
-		randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil))
+		randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil), nil)
 
 	snaps.update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
-		randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil))
+		randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil)
 
 	// Verify the single and multi-layer iterators
 	head := snaps.Snapshot(common.HexToHash("0x04"))
@@ -353,14 +353,14 @@ func TestAccountIteratorTraversalValues(t *testing.T) {
 		}
 	}
 	// Assemble a stack of snapshots from the account layers
-	snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, a, nil)
-	snaps.update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, b, nil)
-	snaps.update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, c, nil)
-	snaps.update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, d, nil)
-	snaps.update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, e, nil)
-	snaps.update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, f, nil)
-	snaps.update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, g, nil)
-	snaps.update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil)
+	snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, a, nil, nil)
+	snaps.update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, b, nil, nil)
+	snaps.update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, c, nil, nil)
+	snaps.update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, d, nil, nil)
+	snaps.update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, e, nil, nil)
+	snaps.update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, f, nil, nil)
+	snaps.update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, g, nil, nil)
+	snaps.update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil, nil)
 
 	it, _ := snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{})
 	head := snaps.Snapshot(common.HexToHash("0x09"))
@@ -452,14 +452,14 @@ func TestStorageIteratorTraversalValues(t *testing.T) {
 		}
 	}
 	// Assemble a stack of snapshots from the account layers
-	snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a))
-	snaps.update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b))
-	snaps.update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c))
-	snaps.update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d))
-	snaps.update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e))
-	snaps.update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e))
-	snaps.update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g))
-	snaps.update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h))
+	snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, randomAccountSet("0xaa"), wrapStorage(a), nil)
+	snaps.update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, randomAccountSet("0xaa"), wrapStorage(b), nil)
+	snaps.update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, randomAccountSet("0xaa"), wrapStorage(c), nil)
+	snaps.update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, randomAccountSet("0xaa"), wrapStorage(d), nil)
+	snaps.update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, randomAccountSet("0xaa"), wrapStorage(e), nil)
+	snaps.update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, randomAccountSet("0xaa"), wrapStorage(e), nil)
+	snaps.update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, randomAccountSet("0xaa"), wrapStorage(g), nil)
+	snaps.update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, randomAccountSet("0xaa"), wrapStorage(h), nil)
 
 	it, _ := snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{})
 	head := snaps.Snapshot(common.HexToHash("0x09"))
@@ -522,7 +522,7 @@ func TestAccountIteratorLargeTraversal(t *testing.T) {
 		},
 	}
 	for i := 1; i < 128; i++ {
-		snaps.update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
+		snaps.update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil, nil)
 	}
 	// Iterate the entire stack and ensure everything is hit only once
 	head := snaps.Snapshot(common.HexToHash("0x80"))
@@ -567,13 +567,13 @@ func TestAccountIteratorFlattening(t *testing.T) {
 	}
 	// Create a stack of diffs on top
 	snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
-		randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
+		randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil)
 
 	snaps.update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
-		randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
+		randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil)
 
 	snaps.update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
-		randomAccountSet("0xcc", "0xf0", "0xff"), nil)
+		randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil)
 
 	// Create an iterator and flatten the data from underneath it
 	it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
@@ -598,13 +598,13 @@ func TestAccountIteratorSeek(t *testing.T) {
 		},
 	}
 	snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
-		randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil)
+		randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil)
 
 	snaps.update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
-		randomAccountSet("0xbb", "0xdd", "0xf0"), nil)
+		randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil)
 
 	snaps.update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
-		randomAccountSet("0xcc", "0xf0", "0xff"), nil)
+		randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil)
 
 	// Account set is now
 	// 02: aa, ee, f0, ff
@@ -662,13 +662,13 @@ func TestStorageIteratorSeek(t *testing.T) {
 	}
 	// Stack three diff layers on top with various overlaps
 	snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
-		randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
+		randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil)
 
 	snaps.update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
-		randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil))
+		randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil), nil)
 
 	snaps.update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil,
-		randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil))
+		randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil), nil)
 
 	// Account set is now
 	// 02: 01, 03, 05
@@ -725,17 +725,17 @@ func TestAccountIteratorDeletions(t *testing.T) {
 	}
 	// Stack three diff layers on top with various overlaps
 	snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"),
-		nil, randomAccountSet("0x11", "0x22", "0x33"), nil)
+		nil, randomAccountSet("0x11", "0x22", "0x33"), nil, nil)
 
 	deleted := common.HexToHash("0x22")
 	destructed := map[common.Hash]struct{}{
 		deleted: {},
 	}
 	snaps.update(common.HexToHash("0x03"), common.HexToHash("0x02"),
-		destructed, randomAccountSet("0x11", "0x33"), nil)
+		destructed, randomAccountSet("0x11", "0x33"), nil, nil)
 
 	snaps.update(common.HexToHash("0x04"), common.HexToHash("0x03"),
-		nil, randomAccountSet("0x33", "0x44", "0x55"), nil)
+		nil, randomAccountSet("0x33", "0x44", "0x55"), nil, nil)
 
 	// The output should be 11,33,44,55
 	it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{})
@@ -771,10 +771,10 @@ func TestStorageIteratorDeletions(t *testing.T) {
 	}
 	// Stack three diff layers on top with various overlaps
 	snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil,
-		randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil))
+		randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil)
 
 	snaps.update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil,
-		randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}))
+		randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}), nil)
 
 	// The output should be 02,04,05,06
 	it, _ := snaps.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{})
@@ -790,7 +790,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
 	destructed := map[common.Hash]struct{}{
 		common.HexToHash("0xaa"): {},
 	}
-	snaps.update(common.HexToHash("0x04"), common.HexToHash("0x03"), destructed, nil, nil)
+	snaps.update(common.HexToHash("0x04"), common.HexToHash("0x03"), destructed, nil, nil, nil)
 
 	it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
 	verifyIterator(t, 0, it, verifyStorage)
@@ -798,7 +798,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
 
 	// Re-insert the slots of the same account
 	snaps.update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil,
-		randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil))
+		randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil), nil)
 
 	// The output should be 07,08,09
 	it, _ = snaps.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{})
@@ -806,7 +806,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
 	it.Release()
 
 	// Destruct the whole storage but re-create the account in the same layer
-	snaps.update(common.HexToHash("0x06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil))
+	snaps.update(common.HexToHash("0x06"), common.HexToHash("0x05"), destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil), nil)
 	it, _ = snaps.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{})
 	verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12
 	it.Release()
@@ -848,7 +848,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) {
 		},
 	}
 	for i := 1; i <= 100; i++ {
-		snaps.update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil)
+		snaps.update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil, nil)
 	}
 	// We call this once before the benchmark, so the creation of
 	// sorted accountlists are not included in the results.
@@ -943,9 +943,9 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
 			base.root: base,
 		},
 	}
-	snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil)
+	snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil, nil)
 	for i := 2; i <= 100; i++ {
-		snaps.update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil)
+		snaps.update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil, nil)
 	}
 	// We call this once before the benchmark, so the creation of
 	// sorted accountlists are not included in the results.

+ 1 - 1
core/state/snapshot/journal.go

@@ -243,7 +243,7 @@ func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) {
 		}
 		storageData[entry.Hash] = slots
 	}
-	return loadDiffLayer(newDiffLayer(parent, root, destructSet, accountData, storageData), r)
+	return loadDiffLayer(newDiffLayer(parent, root, destructSet, accountData, storageData, nil), r)
 }
 
 // Journal terminates any in-progress snapshot generation, also implicitly pushing

+ 14 - 5
core/state/snapshot/snapshot.go

@@ -101,6 +101,15 @@ type Snapshot interface {
 	// Root returns the root hash for which this snapshot was made.
 	Root() common.Hash
 
+	// WaitAndGetVerifyRes will wait until the snapshot been verified and return verification result
+	WaitAndGetVerifyRes() bool
+
+	// Verified returns whether the snapshot is verified
+	Verified() bool
+
+	// Store the verification result
+	MarkValid()
+
 	// Account directly retrieves the account associated with a particular hash in
 	// the snapshot slim data format.
 	Account(hash common.Hash) (*Account, error)
@@ -130,7 +139,7 @@ type snapshot interface {
 	// the specified data items.
 	//
 	// Note, the maps are retained by the method to avoid copying everything.
-	Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
+	Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer
 
 	// Journal commits an entire diff hierarchy to disk into a single journal entry.
 	// This is meant to be used during shutdown to persist the snapshot without
@@ -322,14 +331,14 @@ func (t *Tree) Snapshots(root common.Hash, limits int, nodisk bool) []Snapshot {
 	return ret
 }
 
-func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Address]struct{}, accounts map[common.Address][]byte, storage map[common.Address]map[string][]byte) error {
+func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Address]struct{}, accounts map[common.Address][]byte, storage map[common.Address]map[string][]byte, verified chan struct{}) error {
 	hashDestructs, hashAccounts, hashStorage := transformSnapData(destructs, accounts, storage)
-	return t.update(blockRoot, parentRoot, hashDestructs, hashAccounts, hashStorage)
+	return t.update(blockRoot, parentRoot, hashDestructs, hashAccounts, hashStorage, verified)
 }
 
 // Update adds a new snapshot into the tree, if that can be linked to an existing
 // old parent. It is disallowed to insert a disk layer (the origin of all).
-func (t *Tree) update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
+func (t *Tree) update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) error {
 	// Reject noop updates to avoid self-loops in the snapshot tree. This is a
 	// special case that can only happen for Clique networks where empty blocks
 	// don't modify the state (0 block subsidy).
@@ -344,7 +353,7 @@ func (t *Tree) update(blockRoot common.Hash, parentRoot common.Hash, destructs m
 	if parent == nil {
 		return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
 	}
-	snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage)
+	snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage, verified)
 
 	// Save the new snapshot for later
 	t.lock.Lock()

+ 12 - 12
core/state/snapshot/snapshot_test.go

@@ -105,7 +105,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
 	accounts := map[common.Hash][]byte{
 		common.HexToHash("0xa1"): randomAccount(),
 	}
-	if err := snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
+	if err := snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil, nil); err != nil {
 		t.Fatalf("failed to create a diff layer: %v", err)
 	}
 	if n := len(snaps.layers); n != 2 {
@@ -149,10 +149,10 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) {
 	accounts := map[common.Hash][]byte{
 		common.HexToHash("0xa1"): randomAccount(),
 	}
-	if err := snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
+	if err := snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil, nil); err != nil {
 		t.Fatalf("failed to create a diff layer: %v", err)
 	}
-	if err := snaps.update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
+	if err := snaps.update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil, nil); err != nil {
 		t.Fatalf("failed to create a diff layer: %v", err)
 	}
 	if n := len(snaps.layers); n != 3 {
@@ -197,13 +197,13 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) {
 	accounts := map[common.Hash][]byte{
 		common.HexToHash("0xa1"): randomAccount(),
 	}
-	if err := snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil {
+	if err := snaps.update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil, nil); err != nil {
 		t.Fatalf("failed to create a diff layer: %v", err)
 	}
-	if err := snaps.update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil {
+	if err := snaps.update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil, nil); err != nil {
 		t.Fatalf("failed to create a diff layer: %v", err)
 	}
-	if err := snaps.update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, accounts, nil); err != nil {
+	if err := snaps.update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, accounts, nil, nil); err != nil {
 		t.Fatalf("failed to create a diff layer: %v", err)
 	}
 	if n := len(snaps.layers); n != 4 {
@@ -257,12 +257,12 @@ func TestPostCapBasicDataAccess(t *testing.T) {
 		},
 	}
 	// The lowest difflayer
-	snaps.update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil)
-	snaps.update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil)
-	snaps.update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil)
+	snaps.update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil, nil)
+	snaps.update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil, nil)
+	snaps.update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil, nil)
 
-	snaps.update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil)
-	snaps.update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil)
+	snaps.update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil, nil)
+	snaps.update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil, nil)
 
 	// checkExist verifies if an account exiss in a snapshot
 	checkExist := func(layer *diffLayer, key string) error {
@@ -357,7 +357,7 @@ func TestSnaphots(t *testing.T) {
 	)
 	for i := 0; i < 129; i++ {
 		head = makeRoot(uint64(i + 2))
-		snaps.update(head, last, nil, setAccount(fmt.Sprintf("%d", i+2)), nil)
+		snaps.update(head, last, nil, setAccount(fmt.Sprintf("%d", i+2)), nil, nil)
 		last = head
 		snaps.Cap(head, 128) // 130 layers (128 diffs + 1 accumulator + 1 disk)
 	}

+ 9 - 3
core/state/state_test.go

@@ -54,7 +54,9 @@ func TestDump(t *testing.T) {
 	// write some of them to the trie
 	s.state.updateStateObject(obj1)
 	s.state.updateStateObject(obj2)
-	s.state.Commit(false)
+	s.state.Finalise(false)
+	s.state.AccountsIntermediateRoot()
+	s.state.Commit(nil)
 
 	// check that DumpToCollector contains the state objects that are in trie
 	got := string(s.state.Dump(false, false, true))
@@ -95,7 +97,9 @@ func TestNull(t *testing.T) {
 	var value common.Hash
 
 	s.state.SetState(address, common.Hash{}, value)
-	s.state.Commit(false)
+	s.state.Finalise(false)
+	s.state.AccountsIntermediateRoot()
+	s.state.Commit(nil)
 
 	if value := s.state.GetState(address, common.Hash{}); value != (common.Hash{}) {
 		t.Errorf("expected empty current value, got %x", value)
@@ -167,7 +171,9 @@ func TestSnapshot2(t *testing.T) {
 	so0.deleted = false
 	state.SetStateObject(so0)
 
-	root, _, _ := state.Commit(false)
+	state.Finalise(false)
+	state.AccountsIntermediateRoot()
+	root, _, _ := state.Commit(nil)
 	state, _ = New(root, state.db, state.snaps)
 
 	// and one with deleted == true

+ 194 - 62
core/state/statedb.go

@@ -74,14 +74,20 @@ func (n *proofList) Delete(key []byte) error {
 // * Accounts
 type StateDB struct {
 	db             Database
+	prefetcherLock sync.Mutex
 	prefetcher     *triePrefetcher
 	originalRoot   common.Hash // The pre-state root, before any changes were made
+	expectedRoot   common.Hash // The state root in the block header
+	stateRoot      common.Hash // The calculation result of IntermediateRoot
+
 	trie           Trie
 	hasher         crypto.KeccakState
 	diffLayer      *types.DiffLayer
 	diffTries      map[common.Address]Trie
 	diffCode       map[common.Hash][]byte
 	lightProcessed bool
+	fullProcessed  bool
+	pipeCommit     bool
 
 	snapMux       sync.Mutex
 	snaps         *snapshot.Tree
@@ -154,11 +160,6 @@ func newStateDB(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB,
 		journal:             newJournal(),
 		hasher:              crypto.NewKeccakState(),
 	}
-	tr, err := db.OpenTrie(root)
-	if err != nil {
-		return nil, err
-	}
-	sdb.trie = tr
 	if sdb.snaps != nil {
 		if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil {
 			sdb.snapDestructs = make(map[common.Address]struct{})
@@ -166,6 +167,14 @@ func newStateDB(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB,
 			sdb.snapStorage = make(map[common.Address]map[string][]byte)
 		}
 	}
+
+	snapVerified := sdb.snap != nil && sdb.snap.Verified()
+	tr, err := db.OpenTrie(root)
+	// return error when 1. failed to open trie and 2. the snap is nil or the snap is not nil and done verification
+	if err != nil && (sdb.snap == nil || snapVerified) {
+		return nil, err
+	}
+	sdb.trie = tr
 	return sdb, nil
 }
 
@@ -173,6 +182,8 @@ func newStateDB(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB,
 // state trie concurrently while the state is mutated so that when we reach the
 // commit phase, most of the needed data is already hot.
 func (s *StateDB) StartPrefetcher(namespace string) {
+	s.prefetcherLock.Lock()
+	defer s.prefetcherLock.Unlock()
 	if s.prefetcher != nil {
 		s.prefetcher.close()
 		s.prefetcher = nil
@@ -185,17 +196,36 @@ func (s *StateDB) StartPrefetcher(namespace string) {
 // StopPrefetcher terminates a running prefetcher and reports any leftover stats
 // from the gathered metrics.
 func (s *StateDB) StopPrefetcher() {
+	s.prefetcherLock.Lock()
+	defer s.prefetcherLock.Unlock()
 	if s.prefetcher != nil {
 		s.prefetcher.close()
 		s.prefetcher = nil
 	}
 }
 
+// Mark that the block is processed by diff layer
+func (s *StateDB) SetExpectedStateRoot(root common.Hash) {
+	s.expectedRoot = root
+}
+
 // Mark that the block is processed by diff layer
 func (s *StateDB) MarkLightProcessed() {
 	s.lightProcessed = true
 }
 
+// Enable the pipeline commit function of statedb
+func (s *StateDB) EnablePipeCommit() {
+	if s.snap != nil {
+		s.pipeCommit = true
+	}
+}
+
+// Mark that the block is full processed
+func (s *StateDB) MarkFullProcessed() {
+	s.fullProcessed = true
+}
+
 func (s *StateDB) IsLightProcessed() bool {
 	return s.lightProcessed
 }
@@ -211,8 +241,20 @@ func (s *StateDB) Error() error {
 	return s.dbErr
 }
 
-func (s *StateDB) Trie() Trie {
-	return s.trie
+// Not thread safe
+func (s *StateDB) Trie() (Trie, error) {
+	if s.trie == nil {
+		err := s.WaitPipeVerification()
+		if err != nil {
+			return nil, err
+		}
+		tr, err := s.db.OpenTrie(s.originalRoot)
+		if err != nil {
+			return nil, err
+		}
+		s.trie = tr
+	}
+	return s.trie, nil
 }
 
 func (s *StateDB) SetDiff(diffLayer *types.DiffLayer, diffTries map[common.Address]Trie, diffCode map[common.Hash][]byte) {
@@ -360,6 +402,9 @@ func (s *StateDB) GetProof(addr common.Address) ([][]byte, error) {
 // GetProofByHash returns the Merkle proof for a given account.
 func (s *StateDB) GetProofByHash(addrHash common.Hash) ([][]byte, error) {
 	var proof proofList
+	if _, err := s.Trie(); err != nil {
+		return nil, err
+	}
 	err := s.trie.Prove(addrHash[:], 0, &proof)
 	return proof, err
 }
@@ -904,6 +949,17 @@ func (s *StateDB) GetRefund() uint64 {
 	return s.refund
 }
 
+// GetRefund returns the current value of the refund counter.
+func (s *StateDB) WaitPipeVerification() error {
+	// We need wait for the parent trie to commit
+	if s.snap != nil {
+		if valid := s.snap.WaitAndGetVerifyRes(); !valid {
+			return fmt.Errorf("verification on parent snap failed")
+		}
+	}
+	return nil
+}
+
 // Finalise finalises the state by removing the s destructed objects and clears
 // the journal as well as the refunds. Finalise, however, will not push any updates
 // into the tries just yet. Only IntermediateRoot or Commit will do that.
@@ -963,22 +1019,11 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
 	}
 	// Finalise all the dirty storage states and write them into the tries
 	s.Finalise(deleteEmptyObjects)
+	s.AccountsIntermediateRoot()
+	return s.StateIntermediateRoot()
+}
 
-	// If there was a trie prefetcher operating, it gets aborted and irrevocably
-	// modified after we start retrieving tries. Remove it from the statedb after
-	// this round of use.
-	//
-	// This is weird pre-byzantium since the first tx runs with a prefetcher and
-	// the remainder without, but pre-byzantium even the initial prefetcher is
-	// useless, so no sleep lost.
-	prefetcher := s.prefetcher
-	if s.prefetcher != nil {
-		defer func() {
-			s.prefetcher.close()
-			s.prefetcher = nil
-		}()
-	}
-
+func (s *StateDB) AccountsIntermediateRoot() {
 	tasks := make(chan func())
 	finishCh := make(chan struct{})
 	defer close(finishCh)
@@ -995,6 +1040,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
 			}
 		}()
 	}
+
 	// Although naively it makes sense to retrieve the account trie and then do
 	// the contract storage and account updates sequentially, that short circuits
 	// the account prefetcher. Instead, let's process all the storage updates
@@ -1026,6 +1072,27 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
 		}
 	}
 	wg.Wait()
+}
+
+func (s *StateDB) StateIntermediateRoot() common.Hash {
+	// If there was a trie prefetcher operating, it gets aborted and irrevocably
+	// modified after we start retrieving tries. Remove it from the statedb after
+	// this round of use.
+	//
+	// This is weird pre-byzantium since the first tx runs with a prefetcher and
+	// the remainder without, but pre-byzantium even the initial prefetcher is
+	// useless, so no sleep lost.
+	prefetcher := s.prefetcher
+	defer func() {
+		s.prefetcherLock.Lock()
+		if s.prefetcher != nil {
+			s.prefetcher.close()
+			s.prefetcher = nil
+		}
+		// try not use defer inside defer
+		s.prefetcherLock.Unlock()
+	}()
+
 	// Now we're about to start to write changes to the trie. The trie is so far
 	// _untouched_. We can check with the prefetcher, if it can give us a trie
 	// which has the same root, but also has some content loaded into it.
@@ -1037,7 +1104,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
 	if s.trie == nil {
 		tr, err := s.db.OpenTrie(s.originalRoot)
 		if err != nil {
-			panic("Failed to open trie tree")
+			panic(fmt.Sprintf("Failed to open trie tree %s", s.originalRoot))
 		}
 		s.trie = tr
 	}
@@ -1081,9 +1148,12 @@ func (s *StateDB) clearJournalAndRefund() {
 	s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entires
 }
 
-func (s *StateDB) LightCommit(root common.Hash) (common.Hash, *types.DiffLayer, error) {
+func (s *StateDB) LightCommit() (common.Hash, *types.DiffLayer, error) {
 	codeWriter := s.db.TrieDB().DiskDB().NewBatch()
 
+	// light process already verified it, expectedRoot is trustworthy.
+	root := s.expectedRoot
+
 	commitFuncs := []func() error{
 		func() error {
 			for codeHash, code := range s.diffCode {
@@ -1171,7 +1241,8 @@ func (s *StateDB) LightCommit(root common.Hash) (common.Hash, *types.DiffLayer,
 				}
 				// Only update if there's a state transition (skip empty Clique blocks)
 				if parent := s.snap.Root(); parent != root {
-					if err := s.snaps.Update(root, parent, s.snapDestructs, s.snapAccounts, s.snapStorage); err != nil {
+					// for light commit, always do sync commit
+					if err := s.snaps.Update(root, parent, s.snapDestructs, s.snapAccounts, s.snapStorage, nil); err != nil {
 						log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err)
 					}
 					// Keep n diff layers in the memory
@@ -1205,23 +1276,42 @@ func (s *StateDB) LightCommit(root common.Hash) (common.Hash, *types.DiffLayer,
 }
 
 // Commit writes the state to the underlying in-memory trie database.
-func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, *types.DiffLayer, error) {
+func (s *StateDB) Commit(failPostCommitFunc func(), postCommitFuncs ...func() error) (common.Hash, *types.DiffLayer, error) {
 	if s.dbErr != nil {
 		return common.Hash{}, nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
 	}
 	// Finalize any pending changes and merge everything into the tries
-	root := s.IntermediateRoot(deleteEmptyObjects)
 	if s.lightProcessed {
-		return s.LightCommit(root)
+		root, diff, err := s.LightCommit()
+		if err != nil {
+			return root, diff, err
+		}
+		for _, postFunc := range postCommitFuncs {
+			err = postFunc()
+			if err != nil {
+				return root, diff, err
+			}
+		}
+		return root, diff, nil
 	}
 	var diffLayer *types.DiffLayer
+	var verified chan struct{}
+	var snapUpdated chan struct{}
 	if s.snap != nil {
 		diffLayer = &types.DiffLayer{}
 	}
-	commitFuncs := []func() error{
-		func() error {
-			// Commit objects to the trie, measuring the elapsed time
-			tasks := make(chan func(batch ethdb.KeyValueWriter))
+	if s.pipeCommit {
+		// async commit the MPT
+		verified = make(chan struct{})
+		snapUpdated = make(chan struct{})
+	}
+
+	commmitTrie := func() error {
+		commitErr := func() error {
+			if s.stateRoot = s.StateIntermediateRoot(); s.fullProcessed && s.expectedRoot != s.stateRoot {
+				return fmt.Errorf("invalid merkle root (remote: %x local: %x)", s.expectedRoot, s.stateRoot)
+			}
+			tasks := make(chan func())
 			taskResults := make(chan error, len(s.stateObjectsDirty))
 			tasksNum := 0
 			finishCh := make(chan struct{})
@@ -1232,17 +1322,11 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, *types.DiffLayer
 				wg.Add(1)
 				go func() {
 					defer wg.Done()
-					codeWriter := s.db.TrieDB().DiskDB().NewBatch()
 					for {
 						select {
 						case task := <-tasks:
-							task(codeWriter)
+							task()
 						case <-finishCh:
-							if codeWriter.ValueSize() > 0 {
-								if err := codeWriter.Write(); err != nil {
-									log.Crit("Failed to commit dirty codes", "error", err)
-								}
-							}
 							return
 						}
 					}
@@ -1265,11 +1349,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, *types.DiffLayer
 			for addr := range s.stateObjectsDirty {
 				if obj := s.stateObjects[addr]; !obj.deleted {
 					// Write any contract code associated with the state object
-					tasks <- func(codeWriter ethdb.KeyValueWriter) {
-						if obj.code != nil && obj.dirtyCode {
-							rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
-							obj.dirtyCode = false
-						}
+					tasks <- func() {
 						// Write any storage changes in the state object to its storage trie
 						if err := obj.CommitTrie(s.db); err != nil {
 							taskResults <- err
@@ -1289,14 +1369,6 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, *types.DiffLayer
 			}
 			close(finishCh)
 
-			if len(s.stateObjectsDirty) > 0 {
-				s.stateObjectsDirty = make(map[common.Address]struct{}, len(s.stateObjectsDirty)/2)
-			}
-			// Write the account trie changes, measuing the amount of wasted time
-			var start time.Time
-			if metrics.EnabledExpensive {
-				start = time.Now()
-			}
 			// The onleaf func is called _serially_, so we can reuse the same account
 			// for unmarshalling every time.
 			var account Account
@@ -1312,14 +1384,60 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, *types.DiffLayer
 			if err != nil {
 				return err
 			}
-			if metrics.EnabledExpensive {
-				s.AccountCommits += time.Since(start)
-			}
 			if root != emptyRoot {
 				s.db.CacheAccount(root, s.trie)
 			}
+			for _, postFunc := range postCommitFuncs {
+				err = postFunc()
+				if err != nil {
+					return err
+				}
+			}
 			wg.Wait()
 			return nil
+		}()
+
+		if s.pipeCommit {
+			if commitErr == nil {
+				<-snapUpdated
+				s.snaps.Snapshot(s.stateRoot).MarkValid()
+			} else {
+				// The blockchain will do the further rewind if write block not finish yet
+				if failPostCommitFunc != nil {
+					<-snapUpdated
+					failPostCommitFunc()
+				}
+				log.Error("state verification failed", "err", commitErr)
+			}
+			close(verified)
+		}
+		return commitErr
+	}
+
+	commitFuncs := []func() error{
+		func() error {
+			codeWriter := s.db.TrieDB().DiskDB().NewBatch()
+			for addr := range s.stateObjectsDirty {
+				if obj := s.stateObjects[addr]; !obj.deleted {
+					if obj.code != nil && obj.dirtyCode {
+						rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
+						obj.dirtyCode = false
+						if codeWriter.ValueSize() > ethdb.IdealBatchSize {
+							if err := codeWriter.Write(); err != nil {
+								return err
+							}
+							codeWriter.Reset()
+						}
+					}
+				}
+			}
+			if codeWriter.ValueSize() > 0 {
+				if err := codeWriter.Write(); err != nil {
+					log.Crit("Failed to commit dirty codes", "error", err)
+					return err
+				}
+			}
+			return nil
 		},
 		func() error {
 			// If snapshotting is enabled, update the snapshot tree with this new version
@@ -1327,18 +1445,23 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, *types.DiffLayer
 				if metrics.EnabledExpensive {
 					defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now())
 				}
+				if s.pipeCommit {
+					defer close(snapUpdated)
+				}
 				// Only update if there's a state transition (skip empty Clique blocks)
-				if parent := s.snap.Root(); parent != root {
-					if err := s.snaps.Update(root, parent, s.snapDestructs, s.snapAccounts, s.snapStorage); err != nil {
-						log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err)
+				if parent := s.snap.Root(); parent != s.expectedRoot {
+					if err := s.snaps.Update(s.expectedRoot, parent, s.snapDestructs, s.snapAccounts, s.snapStorage, verified); err != nil {
+						log.Warn("Failed to update snapshot tree", "from", parent, "to", s.expectedRoot, "err", err)
 					}
 					// Keep n diff layers in the memory
 					// - head layer is paired with HEAD state
 					// - head-1 layer is paired with HEAD-1 state
 					// - head-(n-1) layer(bottom-most diff layer) is paired with HEAD-(n-1)state
-					if err := s.snaps.Cap(root, s.snaps.CapLimit()); err != nil {
-						log.Warn("Failed to cap snapshot tree", "root", root, "layers", s.snaps.CapLimit(), "err", err)
-					}
+					go func() {
+						if err := s.snaps.Cap(s.expectedRoot, s.snaps.CapLimit()); err != nil {
+							log.Warn("Failed to cap snapshot tree", "root", s.expectedRoot, "layers", s.snaps.CapLimit(), "err", err)
+						}
+					}()
 				}
 			}
 			return nil
@@ -1350,6 +1473,11 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, *types.DiffLayer
 			return nil
 		},
 	}
+	if s.pipeCommit {
+		go commmitTrie()
+	} else {
+		commitFuncs = append(commitFuncs, commmitTrie)
+	}
 	commitRes := make(chan error, len(commitFuncs))
 	for _, f := range commitFuncs {
 		tmpFunc := f
@@ -1363,7 +1491,11 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, *types.DiffLayer
 			return common.Hash{}, nil, r
 		}
 	}
-	s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil
+	root := s.stateRoot
+	if s.pipeCommit {
+		root = s.expectedRoot
+	}
+
 	return root, diffLayer, nil
 }
 

+ 26 - 9
core/state/statedb_test.go

@@ -102,7 +102,9 @@ func TestIntermediateLeaks(t *testing.T) {
 	}
 
 	// Commit and cross check the databases.
-	transRoot, _, err := transState.Commit(false)
+	transState.Finalise(false)
+	transState.AccountsIntermediateRoot()
+	transRoot, _, err := transState.Commit(nil)
 	if err != nil {
 		t.Fatalf("failed to commit transition state: %v", err)
 	}
@@ -110,7 +112,9 @@ func TestIntermediateLeaks(t *testing.T) {
 		t.Errorf("can not commit trie %v to persistent database", transRoot.Hex())
 	}
 
-	finalRoot, _, err := finalState.Commit(false)
+	finalState.Finalise(false)
+	finalState.AccountsIntermediateRoot()
+	finalRoot, _, err := finalState.Commit(nil)
 	if err != nil {
 		t.Fatalf("failed to commit final state: %v", err)
 	}
@@ -473,7 +477,7 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
 func TestTouchDelete(t *testing.T) {
 	s := newStateTest()
 	s.state.GetOrNewStateObject(common.Address{})
-	root, _, _ := s.state.Commit(false)
+	root, _, _ := s.state.Commit(nil)
 	s.state, _ = New(root, s.state.db, s.state.snaps)
 
 	snapshot := s.state.Snapshot()
@@ -546,7 +550,9 @@ func TestCopyCommitCopy(t *testing.T) {
 		t.Fatalf("first copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{})
 	}
 
-	copyOne.Commit(false)
+	copyOne.Finalise(false)
+	copyOne.AccountsIntermediateRoot()
+	copyOne.Commit(nil)
 	if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
 		t.Fatalf("first copy post-commit balance mismatch: have %v, want %v", balance, 42)
 	}
@@ -631,7 +637,10 @@ func TestCopyCopyCommitCopy(t *testing.T) {
 	if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) {
 		t.Fatalf("second copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{})
 	}
-	copyTwo.Commit(false)
+
+	copyTwo.Finalise(false)
+	copyTwo.AccountsIntermediateRoot()
+	copyTwo.Commit(nil)
 	if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
 		t.Fatalf("second copy post-commit balance mismatch: have %v, want %v", balance, 42)
 	}
@@ -675,7 +684,9 @@ func TestDeleteCreateRevert(t *testing.T) {
 	addr := common.BytesToAddress([]byte("so"))
 	state.SetBalance(addr, big.NewInt(1))
 
-	root, _, _ := state.Commit(false)
+	state.Finalise(false)
+	state.AccountsIntermediateRoot()
+	root, _, _ := state.Commit(nil)
 	state, _ = New(root, state.db, state.snaps)
 
 	// Simulate self-destructing in one transaction, then create-reverting in another
@@ -686,8 +697,10 @@ func TestDeleteCreateRevert(t *testing.T) {
 	state.SetBalance(addr, big.NewInt(2))
 	state.RevertToSnapshot(id)
 
+	state.Finalise(true)
+	state.AccountsIntermediateRoot()
 	// Commit the entire state and make sure we don't crash and have the correct state
-	root, _, _ = state.Commit(true)
+	root, _, _ = state.Commit(nil)
 	state, _ = New(root, state.db, state.snaps)
 
 	if state.getStateObject(addr) != nil {
@@ -712,7 +725,9 @@ func TestMissingTrieNodes(t *testing.T) {
 		a2 := common.BytesToAddress([]byte("another"))
 		state.SetBalance(a2, big.NewInt(100))
 		state.SetCode(a2, []byte{1, 2, 4})
-		root, _, _ = state.Commit(false)
+		state.Finalise(false)
+		state.AccountsIntermediateRoot()
+		root, _, _ = state.Commit(nil)
 		t.Logf("root: %x", root)
 		// force-flush
 		state.Database().TrieDB().Cap(0)
@@ -736,7 +751,9 @@ func TestMissingTrieNodes(t *testing.T) {
 	}
 	// Modify the state
 	state.SetBalance(addr, big.NewInt(2))
-	root, _, err := state.Commit(false)
+	state.Finalise(false)
+	state.AccountsIntermediateRoot()
+	root, _, err := state.Commit(nil)
 	if err == nil {
 		t.Fatalf("expected error, got root :%x", root)
 	}

+ 3 - 1
core/state/sync_test.go

@@ -69,7 +69,9 @@ func makeTestState() (Database, common.Hash, []*testAccount) {
 		state.updateStateObject(obj)
 		accounts = append(accounts, acc)
 	}
-	root, _, _ := state.Commit(false)
+	state.Finalise(false)
+	state.AccountsIntermediateRoot()
+	root, _, _ := state.Commit(nil)
 
 	// Return the generated state
 	return db, root, accounts

+ 15 - 7
core/state/trie_prefetcher.go

@@ -20,7 +20,6 @@ import (
 	"sync"
 
 	"github.com/ethereum/go-ethereum/common"
-	"github.com/ethereum/go-ethereum/common/gopool"
 	"github.com/ethereum/go-ethereum/log"
 	"github.com/ethereum/go-ethereum/metrics"
 )
@@ -106,7 +105,7 @@ func (p *triePrefetcher) close() {
 	for _, fetcher := range p.fetchers {
 		p.abortChan <- fetcher // safe to do multiple times
 		<-fetcher.term
-		if metrics.Enabled {
+		if metrics.EnabledExpensive {
 			if fetcher.root == p.root {
 				p.accountLoadMeter.Mark(int64(len(fetcher.seen)))
 				p.accountDupMeter.Mark(int64(fetcher.dups))
@@ -257,9 +256,7 @@ func newSubfetcher(db Database, root common.Hash, accountHash common.Hash) *subf
 		seen:        make(map[string]struct{}),
 		accountHash: accountHash,
 	}
-	gopool.Submit(func() {
-		sf.loop()
-	})
+	go sf.loop()
 	return sf
 }
 
@@ -322,8 +319,7 @@ func (sf *subfetcher) loop() {
 		trie, err = sf.db.OpenStorageTrie(sf.accountHash, sf.root)
 	}
 	if err != nil {
-		log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err)
-		return
+		log.Debug("Trie prefetcher failed opening trie", "root", sf.root, "err", err)
 	}
 	sf.trie = trie
 
@@ -332,6 +328,18 @@ func (sf *subfetcher) loop() {
 		select {
 		case <-sf.wake:
 			// Subfetcher was woken up, retrieve any tasks to avoid spinning the lock
+			if sf.trie == nil {
+				if sf.accountHash == emptyAddr {
+					sf.trie, err = sf.db.OpenTrie(sf.root)
+				} else {
+					// address is useless
+					sf.trie, err = sf.db.OpenStorageTrie(sf.accountHash, sf.root)
+				}
+				if err != nil {
+					continue
+				}
+			}
+
 			sf.lock.Lock()
 			tasks := sf.tasks
 			sf.tasks = nil

+ 46 - 32
core/state_prefetcher.go

@@ -26,6 +26,8 @@ import (
 	"github.com/ethereum/go-ethereum/params"
 )
 
+const prefetchThread = 2
+
 // statePrefetcher is a basic Prefetcher, which blindly executes a block on top
 // of an arbitrary state with the goal of prefetching potentially useful state
 // data from disk before the main block processor start executing.
@@ -35,51 +37,63 @@ type statePrefetcher struct {
 	engine consensus.Engine    // Consensus engine used for block rewards
 }
 
+// NewStatePrefetcher initialises a new statePrefetcher.
+func NewStatePrefetcher(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine) *statePrefetcher {
+	return &statePrefetcher{
+		config: config,
+		bc:     bc,
+		engine: engine,
+	}
+}
+
 // Prefetch processes the state changes according to the Ethereum rules by running
 // the transaction messages using the statedb, but any changes are discarded. The
-// only goal is to pre-cache transaction signatures and state trie nodes.
+// only goal is to pre-cache transaction signatures and snapshot clean state.
 func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *uint32) {
 	var (
-		header       = block.Header()
-		gaspool      = new(GasPool).AddGas(block.GasLimit())
-		blockContext = NewEVMBlockContext(header, p.bc, nil)
-		evm          = vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg)
-		signer       = types.MakeSigner(p.config, header.Number)
+		header = block.Header()
+		signer = types.MakeSigner(p.config, header.Number)
 	)
-	// Iterate over and process the individual transactions
-	byzantium := p.config.IsByzantium(block.Number())
-	for i, tx := range block.Transactions() {
-		// If block precaching was interrupted, abort
-		if interrupt != nil && atomic.LoadUint32(interrupt) == 1 {
-			return
-		}
-		// Convert the transaction into an executable message and pre-cache its sender
-		msg, err := tx.AsMessage(signer)
-		if err != nil {
-			return // Also invalid block, bail out
-		}
-		statedb.Prepare(tx.Hash(), block.Hash(), i)
-		if err := precacheTransaction(msg, p.config, gaspool, statedb, header, evm); err != nil {
-			return // Ugh, something went horribly wrong, bail out
-		}
-		// If we're pre-byzantium, pre-load trie nodes for the intermediate root
-		if !byzantium {
-			statedb.IntermediateRoot(true)
-		}
+	transactions := block.Transactions()
+	sortTransactions := make([][]*types.Transaction, prefetchThread)
+	for i := 0; i < prefetchThread; i++ {
+		sortTransactions[i] = make([]*types.Transaction, 0, len(transactions)/prefetchThread)
+	}
+	for idx := range transactions {
+		threadIdx := idx % prefetchThread
+		sortTransactions[threadIdx] = append(sortTransactions[threadIdx], transactions[idx])
 	}
-	// If were post-byzantium, pre-load trie nodes for the final root hash
-	if byzantium {
-		statedb.IntermediateRoot(true)
+	// No need to execute the first batch, since the main processor will do it.
+	for i := 0; i < prefetchThread; i++ {
+		go func(idx int) {
+			newStatedb := statedb.Copy()
+			gaspool := new(GasPool).AddGas(block.GasLimit())
+			blockContext := NewEVMBlockContext(header, p.bc, nil)
+			evm := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg)
+			// Iterate over and process the individual transactions
+			for i, tx := range sortTransactions[idx] {
+				// If block precaching was interrupted, abort
+				if interrupt != nil && atomic.LoadUint32(interrupt) == 1 {
+					return
+				}
+				// Convert the transaction into an executable message and pre-cache its sender
+				msg, err := tx.AsMessage(signer)
+				if err != nil {
+					return // Also invalid block, bail out
+				}
+				newStatedb.Prepare(tx.Hash(), header.Hash(), i)
+				precacheTransaction(msg, p.config, gaspool, newStatedb, header, evm)
+			}
+		}(i)
 	}
 }
 
 // precacheTransaction attempts to apply a transaction to the given state database
 // and uses the input parameters for its environment. The goal is not to execute
 // the transaction successfully, rather to warm up touched data slots.
-func precacheTransaction(msg types.Message, config *params.ChainConfig, gaspool *GasPool, statedb *state.StateDB, header *types.Header, evm *vm.EVM) error {
+func precacheTransaction(msg types.Message, config *params.ChainConfig, gaspool *GasPool, statedb *state.StateDB, header *types.Header, evm *vm.EVM) {
 	// Update the evm with the new transaction context.
 	evm.Reset(NewEVMTxContext(msg), statedb)
 	// Add addresses to access list if applicable
-	_, err := ApplyMessage(evm, msg, gaspool)
-	return err
+	ApplyMessage(evm, msg, gaspool)
 }

+ 13 - 5
core/state_processor.go

@@ -123,6 +123,10 @@ func (p *LightStateProcessor) Process(block *types.Block, statedb *state.StateDB
 			statedb.StopPrefetcher()
 			parent := p.bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
 			statedb, err = state.New(parent.Root, p.bc.stateCache, p.bc.snaps)
+			statedb.SetExpectedStateRoot(block.Root())
+			if p.bc.pipeCommit {
+				statedb.EnablePipeCommit()
+			}
 			if err != nil {
 				return statedb, nil, nil, 0, err
 			}
@@ -148,9 +152,12 @@ func (p *LightStateProcessor) LightProcess(diffLayer *types.DiffLayer, block *ty
 	for _, c := range diffLayer.Codes {
 		fullDiffCode[c.Hash] = c.Code
 	}
-
+	stateTrie, err := statedb.Trie()
+	if err != nil {
+		return nil, nil, 0, err
+	}
 	for des := range snapDestructs {
-		statedb.Trie().TryDelete(des[:])
+		stateTrie.TryDelete(des[:])
 	}
 	threads := gopool.Threads(len(snapAccounts))
 
@@ -191,7 +198,7 @@ func (p *LightStateProcessor) LightProcess(diffLayer *types.DiffLayer, block *ty
 				// fetch previous state
 				var previousAccount state.Account
 				stateMux.Lock()
-				enc, err := statedb.Trie().TryGet(diffAccount[:])
+				enc, err := stateTrie.TryGet(diffAccount[:])
 				stateMux.Unlock()
 				if err != nil {
 					errChan <- err
@@ -303,7 +310,7 @@ func (p *LightStateProcessor) LightProcess(diffLayer *types.DiffLayer, block *ty
 					return
 				}
 				stateMux.Lock()
-				err = statedb.Trie().TryUpdate(diffAccount[:], bz)
+				err = stateTrie.TryUpdate(diffAccount[:], bz)
 				stateMux.Unlock()
 				if err != nil {
 					errChan <- err
@@ -330,7 +337,7 @@ func (p *LightStateProcessor) LightProcess(diffLayer *types.DiffLayer, block *ty
 	}
 
 	// Do validate in advance so that we can fall back to full process
-	if err := p.bc.validator.ValidateState(block, statedb, diffLayer.Receipts, gasUsed); err != nil {
+	if err := p.bc.validator.ValidateState(block, statedb, diffLayer.Receipts, gasUsed, false); err != nil {
 		log.Error("validate state failed during diff sync", "error", err)
 		return nil, nil, 0, err
 	}
@@ -397,6 +404,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
 
 	// initilise bloom processors
 	bloomProcessors := NewAsyncReceiptBloomGenerator(txNum)
+	statedb.MarkFullProcessed()
 
 	// usually do have two tx, one for validator set contract, another for system reward contract.
 	systemTxs := make([]*types.Transaction, 0, 2)

+ 1 - 1
core/types.go

@@ -31,7 +31,7 @@ type Validator interface {
 
 	// ValidateState validates the given statedb and optionally the receipts and
 	// gas used.
-	ValidateState(block *types.Block, state *state.StateDB, receipts types.Receipts, usedGas uint64) error
+	ValidateState(block *types.Block, state *state.StateDB, receipts types.Receipts, usedGas uint64, skipHeavyVerify bool) error
 }
 
 // Prefetcher is an interface for pre-caching transaction signatures and state.

+ 7 - 2
core/vm/access_list_tracer.go

@@ -141,7 +141,7 @@ func (a *AccessListTracer) CaptureStart(env *EVM, from common.Address, to common
 }
 
 // CaptureState captures all opcodes that touch storage or addresses and adds them to the accesslist.
-func (a *AccessListTracer) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) {
+func (a *AccessListTracer) CaptureState(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) {
 	stack := scope.Stack
 	if (op == SLOAD || op == SSTORE) && stack.len() >= 1 {
 		slot := common.Hash(stack.data[stack.len()-1].Bytes32())
@@ -161,11 +161,16 @@ func (a *AccessListTracer) CaptureState(env *EVM, pc uint64, op OpCode, gas, cos
 	}
 }
 
-func (*AccessListTracer) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) {
+func (*AccessListTracer) CaptureFault(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) {
 }
 
 func (*AccessListTracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) {}
 
+func (*AccessListTracer) CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
+}
+
+func (*AccessListTracer) CaptureExit(output []byte, gasUsed uint64, err error) {}
+
 // AccessList returns the current accesslist maintained by the tracer.
 func (a *AccessListTracer) AccessList() types.AccessList {
 	return a.list.accessList()

+ 61 - 15
core/vm/evm.go

@@ -232,9 +232,14 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
 	if !evm.StateDB.Exist(addr) {
 		if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 {
 			// Calling a non existing account, don't do anything, but ping the tracer
-			if evm.vmConfig.Debug && evm.depth == 0 {
-				evm.vmConfig.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value)
-				evm.vmConfig.Tracer.CaptureEnd(ret, 0, 0, nil)
+			if evm.vmConfig.Debug {
+				if evm.depth == 0 {
+					evm.vmConfig.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value)
+					evm.vmConfig.Tracer.CaptureEnd(ret, 0, 0, nil)
+				} else {
+					evm.vmConfig.Tracer.CaptureEnter(CALL, caller.Address(), addr, input, gas, value)
+					evm.vmConfig.Tracer.CaptureExit(ret, 0, nil)
+				}
 			}
 			return nil, gas, nil
 		}
@@ -243,11 +248,19 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
 	evm.Context.Transfer(evm.StateDB, caller.Address(), addr, value)
 
 	// Capture the tracer start/end events in debug mode
-	if evm.vmConfig.Debug && evm.depth == 0 {
-		evm.vmConfig.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value)
-		defer func(startGas uint64, startTime time.Time) { // Lazy evaluation of the parameters
-			evm.vmConfig.Tracer.CaptureEnd(ret, startGas-gas, time.Since(startTime), err)
-		}(gas, time.Now())
+	if evm.vmConfig.Debug {
+		if evm.depth == 0 {
+			evm.vmConfig.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value)
+			defer func(startGas uint64, startTime time.Time) { // Lazy evaluation of the parameters
+				evm.vmConfig.Tracer.CaptureEnd(ret, startGas-gas, time.Since(startTime), err)
+			}(gas, time.Now())
+		} else {
+			// Handle tracer events for entering and exiting a call frame
+			evm.vmConfig.Tracer.CaptureEnter(CALL, caller.Address(), addr, input, gas, value)
+			defer func(startGas uint64) {
+				evm.vmConfig.Tracer.CaptureExit(ret, startGas-gas, err)
+			}(gas)
+		}
 	}
 
 	if isPrecompile {
@@ -307,6 +320,14 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte,
 	}
 	var snapshot = evm.StateDB.Snapshot()
 
+	// Invoke tracer hooks that signal entering/exiting a call frame
+	if evm.vmConfig.Debug {
+		evm.vmConfig.Tracer.CaptureEnter(CALLCODE, caller.Address(), addr, input, gas, value)
+		defer func(startGas uint64) {
+			evm.vmConfig.Tracer.CaptureExit(ret, startGas-gas, err)
+		}(gas)
+	}
+
 	// It is allowed to call precompiles, even via delegatecall
 	if p, isPrecompile := evm.precompile(addr); isPrecompile {
 		ret, gas, err = RunPrecompiledContract(p, input, gas)
@@ -343,6 +364,14 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by
 	}
 	var snapshot = evm.StateDB.Snapshot()
 
+	// Invoke tracer hooks that signal entering/exiting a call frame
+	if evm.vmConfig.Debug {
+		evm.vmConfig.Tracer.CaptureEnter(DELEGATECALL, caller.Address(), addr, input, gas, nil)
+		defer func(startGas uint64) {
+			evm.vmConfig.Tracer.CaptureExit(ret, startGas-gas, err)
+		}(gas)
+	}
+
 	// It is allowed to call precompiles, even via delegatecall
 	if p, isPrecompile := evm.precompile(addr); isPrecompile {
 		ret, gas, err = RunPrecompiledContract(p, input, gas)
@@ -388,6 +417,14 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte
 	// future scenarios
 	evm.StateDB.AddBalance(addr, big0)
 
+	// Invoke tracer hooks that signal entering/exiting a call frame
+	if evm.vmConfig.Debug {
+		evm.vmConfig.Tracer.CaptureEnter(STATICCALL, caller.Address(), addr, input, gas, nil)
+		defer func(startGas uint64) {
+			evm.vmConfig.Tracer.CaptureExit(ret, startGas-gas, err)
+		}(gas)
+	}
+
 	if p, isPrecompile := evm.precompile(addr); isPrecompile {
 		ret, gas, err = RunPrecompiledContract(p, input, gas)
 	} else {
@@ -427,7 +464,7 @@ func (c *codeAndHash) Hash() common.Hash {
 }
 
 // create creates a new contract using code as deployment code.
-func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, value *big.Int, address common.Address) ([]byte, common.Address, uint64, error) {
+func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, value *big.Int, address common.Address, typ OpCode) ([]byte, common.Address, uint64, error) {
 	// Depth check execution. Fail if we're trying to execute above the
 	// limit.
 	if evm.depth > int(params.CallCreateDepth) {
@@ -465,9 +502,14 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
 		return nil, address, gas, nil
 	}
 
-	if evm.vmConfig.Debug && evm.depth == 0 {
-		evm.vmConfig.Tracer.CaptureStart(evm, caller.Address(), address, true, codeAndHash.code, gas, value)
+	if evm.vmConfig.Debug {
+		if evm.depth == 0 {
+			evm.vmConfig.Tracer.CaptureStart(evm, caller.Address(), address, true, codeAndHash.code, gas, value)
+		} else {
+			evm.vmConfig.Tracer.CaptureEnter(typ, caller.Address(), address, codeAndHash.code, gas, value)
+		}
 	}
+
 	start := time.Now()
 
 	ret, err := run(evm, contract, nil, false)
@@ -500,8 +542,12 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
 		}
 	}
 
-	if evm.vmConfig.Debug && evm.depth == 0 {
-		evm.vmConfig.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err)
+	if evm.vmConfig.Debug {
+		if evm.depth == 0 {
+			evm.vmConfig.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err)
+		} else {
+			evm.vmConfig.Tracer.CaptureExit(ret, gas-contract.Gas, err)
+		}
 	}
 	return ret, address, contract.Gas, err
 }
@@ -509,7 +555,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
 // Create creates a new contract using code as deployment code.
 func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
 	contractAddr = crypto.CreateAddress(caller.Address(), evm.StateDB.GetNonce(caller.Address()))
-	return evm.create(caller, &codeAndHash{code: code}, gas, value, contractAddr)
+	return evm.create(caller, &codeAndHash{code: code}, gas, value, contractAddr, CREATE)
 }
 
 // Create2 creates a new contract using code as deployment code.
@@ -519,7 +565,7 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I
 func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *big.Int, salt *uint256.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
 	codeAndHash := &codeAndHash{code: code}
 	contractAddr = crypto.CreateAddress2(caller.Address(), salt.Bytes32(), codeAndHash.Hash().Bytes())
-	return evm.create(caller, codeAndHash, gas, endowment, contractAddr)
+	return evm.create(caller, codeAndHash, gas, endowment, contractAddr, CREATE2)
 }
 
 // ChainConfig returns the environment's chain configuration

+ 4 - 0
core/vm/instructions.go

@@ -791,6 +791,10 @@ func opSuicide(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]
 	balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address())
 	interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance)
 	interpreter.evm.StateDB.Suicide(scope.Contract.Address())
+	if interpreter.cfg.Debug {
+		interpreter.cfg.Tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance)
+		interpreter.cfg.Tracer.CaptureExit([]byte{}, 0, nil)
+	}
 	return nil, nil
 }
 

+ 10 - 10
core/vm/interpreter.go

@@ -34,10 +34,10 @@ var EVMInterpreterPool = sync.Pool{
 
 // Config are the configuration options for the Interpreter
 type Config struct {
-	Debug                   bool   // Enables debugging
-	Tracer                  Tracer // Opcode logger
-	NoRecursion             bool   // Disables call, callcode, delegate call and create
-	EnablePreimageRecording bool   // Enables recording of SHA3/keccak preimages
+	Debug                   bool      // Enables debugging
+	Tracer                  EVMLogger // Opcode logger
+	NoRecursion             bool      // Disables call, callcode, delegate call and create
+	EnablePreimageRecording bool      // Enables recording of SHA3/keccak preimages
 
 	JumpTable [256]*operation // EVM instruction table, automatically populated if unset
 
@@ -183,9 +183,9 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
 		pc   = uint64(0) // program counter
 		cost uint64
 		// copies used by tracer
-		pcCopy  uint64 // needed for the deferred Tracer
-		gasCopy uint64 // for Tracer to log gas remaining before execution
-		logged  bool   // deferred Tracer should ignore already logged steps
+		pcCopy  uint64 // needed for the deferred EVMLogger
+		gasCopy uint64 // for EVMLogger to log gas remaining before execution
+		logged  bool   // deferred EVMLogger should ignore already logged steps
 		res     []byte // result of the opcode execution function
 	)
 	// Don't move this deferrred function, it's placed before the capturestate-deferred method,
@@ -200,9 +200,9 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
 		defer func() {
 			if err != nil {
 				if !logged {
-					in.cfg.Tracer.CaptureState(in.evm, pcCopy, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err)
+					in.cfg.Tracer.CaptureState(pcCopy, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err)
 				} else {
-					in.cfg.Tracer.CaptureFault(in.evm, pcCopy, op, gasCopy, cost, callContext, in.evm.depth, err)
+					in.cfg.Tracer.CaptureFault(pcCopy, op, gasCopy, cost, callContext, in.evm.depth, err)
 				}
 			}
 		}()
@@ -284,7 +284,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
 		}
 
 		if in.cfg.Debug {
-			in.cfg.Tracer.CaptureState(in.evm, pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err)
+			in.cfg.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err)
 			logged = true
 		}
 

+ 39 - 23
core/vm/logger.go

@@ -47,12 +47,12 @@ func (s Storage) Copy() Storage {
 
 // LogConfig are the configuration options for structured logger the EVM
 type LogConfig struct {
-	DisableMemory     bool // disable memory capture
-	DisableStack      bool // disable stack capture
-	DisableStorage    bool // disable storage capture
-	DisableReturnData bool // disable return data capture
-	Debug             bool // print output during capture end
-	Limit             int  // maximum length of output, but zero means unlimited
+	EnableMemory     bool // enable memory capture
+	DisableStack     bool // disable stack capture
+	DisableStorage   bool // disable storage capture
+	EnableReturnData bool // enable return data capture
+	Debug            bool // print output during capture end
+	Limit            int  // maximum length of output, but zero means unlimited
 	// Chain overrides, can be used to execute a trace using future fork rules
 	Overrides *params.ChainConfig `json:"overrides,omitempty"`
 }
@@ -99,25 +99,28 @@ func (s *StructLog) ErrorString() string {
 	return ""
 }
 
-// Tracer is used to collect execution traces from an EVM transaction
+// EVMLogger is used to collect execution traces from an EVM transaction
 // execution. CaptureState is called for each step of the VM with the
 // current VM state.
 // Note that reference types are actual VM data structures; make copies
 // if you need to retain them beyond the current call.
-type Tracer interface {
+type EVMLogger interface {
 	CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int)
-	CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error)
-	CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error)
+	CaptureState(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error)
+	CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int)
+	CaptureExit(output []byte, gasUsed uint64, err error)
+	CaptureFault(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error)
 	CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error)
 }
 
-// StructLogger is an EVM state logger and implements Tracer.
+// StructLogger is an EVM state logger and implements EVMLogger.
 //
 // StructLogger can capture state based on the given Log configuration and also keeps
 // a track record of modified storage which is used in reporting snapshots of the
 // contract their storage.
 type StructLogger struct {
 	cfg LogConfig
+	env *EVM
 
 	storage map[common.Address]Storage
 	logs    []StructLog
@@ -144,14 +147,15 @@ func (l *StructLogger) Reset() {
 	l.err = nil
 }
 
-// CaptureStart implements the Tracer interface to initialize the tracing operation.
+// CaptureStart implements the EVMLogger interface to initialize the tracing operation.
 func (l *StructLogger) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
+	l.env = env
 }
 
 // CaptureState logs a new structured log message and pushes it out to the environment
 //
 // CaptureState also tracks SLOAD/SSTORE ops to track storage change.
-func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) {
+func (l *StructLogger) CaptureState(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) {
 	memory := scope.Memory
 	stack := scope.Stack
 	contract := scope.Contract
@@ -161,7 +165,7 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui
 	}
 	// Copy a snapshot of the current memory state to a new buffer
 	var mem []byte
-	if !l.cfg.DisableMemory {
+	if l.cfg.EnableMemory {
 		mem = make([]byte, len(memory.Data()))
 		copy(mem, memory.Data())
 	}
@@ -185,7 +189,7 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui
 		if op == SLOAD && stack.len() >= 1 {
 			var (
 				address = common.Hash(stack.data[stack.len()-1].Bytes32())
-				value   = env.StateDB.GetState(contract.Address(), address)
+				value   = l.env.StateDB.GetState(contract.Address(), address)
 			)
 			l.storage[contract.Address()][address] = value
 			storage = l.storage[contract.Address()].Copy()
@@ -200,18 +204,18 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui
 		}
 	}
 	var rdata []byte
-	if !l.cfg.DisableReturnData {
+	if l.cfg.EnableReturnData {
 		rdata = make([]byte, len(rData))
 		copy(rdata, rData)
 	}
 	// create a new snapshot of the EVM.
-	log := StructLog{pc, op, gas, cost, mem, memory.Len(), stck, rdata, storage, depth, env.StateDB.GetRefund(), err}
+	log := StructLog{pc, op, gas, cost, mem, memory.Len(), stck, rdata, storage, depth, l.env.StateDB.GetRefund(), err}
 	l.logs = append(l.logs, log)
 }
 
-// CaptureFault implements the Tracer interface to trace an execution fault
+// CaptureFault implements the EVMLogger interface to trace an execution fault
 // while running an opcode.
-func (l *StructLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) {
+func (l *StructLogger) CaptureFault(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) {
 }
 
 // CaptureEnd is called after the call finishes to finalize the tracing.
@@ -226,6 +230,11 @@ func (l *StructLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration
 	}
 }
 
+func (l *StructLogger) CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
+}
+
+func (l *StructLogger) CaptureExit(output []byte, gasUsed uint64, err error) {}
+
 // StructLogs returns the captured log entries.
 func (l *StructLogger) StructLogs() []StructLog { return l.logs }
 
@@ -285,12 +294,13 @@ func WriteLogs(writer io.Writer, logs []*types.Log) {
 type mdLogger struct {
 	out io.Writer
 	cfg *LogConfig
+	env *EVM
 }
 
 // NewMarkdownLogger creates a logger which outputs information in a format adapted
 // for human readability, and is also a valid markdown table
 func NewMarkdownLogger(cfg *LogConfig, writer io.Writer) *mdLogger {
-	l := &mdLogger{writer, cfg}
+	l := &mdLogger{out: writer, cfg: cfg}
 	if l.cfg == nil {
 		l.cfg = &LogConfig{}
 	}
@@ -298,6 +308,7 @@ func NewMarkdownLogger(cfg *LogConfig, writer io.Writer) *mdLogger {
 }
 
 func (t *mdLogger) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
+	t.env = env
 	if !create {
 		fmt.Fprintf(t.out, "From: `%v`\nTo: `%v`\nData: `0x%x`\nGas: `%d`\nValue `%v` wei\n",
 			from.String(), to.String(),
@@ -315,7 +326,7 @@ func (t *mdLogger) CaptureStart(env *EVM, from common.Address, to common.Address
 }
 
 // CaptureState also tracks SLOAD/SSTORE ops to track storage change.
-func (t *mdLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) {
+func (t *mdLogger) CaptureState(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) {
 	stack := scope.Stack
 	fmt.Fprintf(t.out, "| %4d  | %10v  |  %3d |", pc, op, cost)
 
@@ -328,14 +339,14 @@ func (t *mdLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64
 		b := fmt.Sprintf("[%v]", strings.Join(a, ","))
 		fmt.Fprintf(t.out, "%10v |", b)
 	}
-	fmt.Fprintf(t.out, "%10v |", env.StateDB.GetRefund())
+	fmt.Fprintf(t.out, "%10v |", t.env.StateDB.GetRefund())
 	fmt.Fprintln(t.out, "")
 	if err != nil {
 		fmt.Fprintf(t.out, "Error: %v\n", err)
 	}
 }
 
-func (t *mdLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) {
+func (t *mdLogger) CaptureFault(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) {
 	fmt.Fprintf(t.out, "\nError: at pc=%d, op=%v: %v\n", pc, op, err)
 }
 
@@ -343,3 +354,8 @@ func (t *mdLogger) CaptureEnd(output []byte, gasUsed uint64, tm time.Duration, e
 	fmt.Fprintf(t.out, "\nOutput: `0x%x`\nConsumed gas: `%d`\nError: `%v`\n",
 		output, gasUsed, err)
 }
+
+func (t *mdLogger) CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
+}
+
+func (t *mdLogger) CaptureExit(output []byte, gasUsed uint64, err error) {}

+ 13 - 6
core/vm/logger_json.go

@@ -29,12 +29,13 @@ import (
 type JSONLogger struct {
 	encoder *json.Encoder
 	cfg     *LogConfig
+	env     *EVM
 }
 
 // NewJSONLogger creates a new EVM tracer that prints execution steps as JSON objects
 // into the provided stream.
 func NewJSONLogger(cfg *LogConfig, writer io.Writer) *JSONLogger {
-	l := &JSONLogger{json.NewEncoder(writer), cfg}
+	l := &JSONLogger{encoder: json.NewEncoder(writer), cfg: cfg}
 	if l.cfg == nil {
 		l.cfg = &LogConfig{}
 	}
@@ -42,12 +43,13 @@ func NewJSONLogger(cfg *LogConfig, writer io.Writer) *JSONLogger {
 }
 
 func (l *JSONLogger) CaptureStart(env *EVM, from, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
+	l.env = env
 }
 
-func (l *JSONLogger) CaptureFault(*EVM, uint64, OpCode, uint64, uint64, *ScopeContext, int, error) {}
+func (l *JSONLogger) CaptureFault(uint64, OpCode, uint64, uint64, *ScopeContext, int, error) {}
 
 // CaptureState outputs state information on the logger.
-func (l *JSONLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) {
+func (l *JSONLogger) CaptureState(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) {
 	memory := scope.Memory
 	stack := scope.Stack
 
@@ -58,16 +60,16 @@ func (l *JSONLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint
 		GasCost:       cost,
 		MemorySize:    memory.Len(),
 		Depth:         depth,
-		RefundCounter: env.StateDB.GetRefund(),
+		RefundCounter: l.env.StateDB.GetRefund(),
 		Err:           err,
 	}
-	if !l.cfg.DisableMemory {
+	if l.cfg.EnableMemory {
 		log.Memory = memory.Data()
 	}
 	if !l.cfg.DisableStack {
 		log.Stack = stack.data
 	}
-	if !l.cfg.DisableReturnData {
+	if l.cfg.EnableReturnData {
 		log.ReturnData = rData
 	}
 	l.encoder.Encode(log)
@@ -86,3 +88,8 @@ func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration,
 	}
 	l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, ""})
 }
+
+func (l *JSONLogger) CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
+}
+
+func (l *JSONLogger) CaptureExit(output []byte, gasUsed uint64, err error) {}

+ 2 - 1
core/vm/logger_test.go

@@ -63,7 +63,8 @@ func TestStoreCapture(t *testing.T) {
 	scope.Stack.push(uint256.NewInt().SetUint64(1))
 	scope.Stack.push(uint256.NewInt())
 	var index common.Hash
-	logger.CaptureState(env, 0, SSTORE, 0, 0, scope, nil, 0, nil)
+	logger.CaptureStart(env, common.Address{}, contract.Address(), false, nil, 0, nil)
+	logger.CaptureState(0, SSTORE, 0, 0, scope, nil, 0, nil)
 	if len(logger.storage[contract.Address()]) == 0 {
 		t.Fatalf("expected exactly 1 changed value on address %x, got %d", contract.Address(),
 			len(logger.storage[contract.Address()]))

+ 262 - 10
core/vm/runtime/runtime_test.go

@@ -33,7 +33,11 @@ import (
 	"github.com/ethereum/go-ethereum/core/state"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/core/vm"
+	"github.com/ethereum/go-ethereum/eth/tracers"
 	"github.com/ethereum/go-ethereum/params"
+
+	// force-load js tracers to trigger registration
+	_ "github.com/ethereum/go-ethereum/eth/tracers/js"
 )
 
 func TestDefaults(t *testing.T) {
@@ -329,12 +333,12 @@ type stepCounter struct {
 func (s *stepCounter) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
 }
 
-func (s *stepCounter) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) {
+func (s *stepCounter) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) {
 }
 
 func (s *stepCounter) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) {}
 
-func (s *stepCounter) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
+func (s *stepCounter) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
 	s.steps++
 	// Enable this for more output
 	//s.inner.CaptureState(env, pc, op, gas, cost, memory, stack, rStack, contract, depth, err)
@@ -342,11 +346,21 @@ func (s *stepCounter) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, co
 
 // benchmarkNonModifyingCode benchmarks code, but if the code modifies the
 // state, this should not be used, since it does not reset the state between runs.
-func benchmarkNonModifyingCode(gas uint64, code []byte, name string, b *testing.B) {
+func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode string, b *testing.B) {
 	cfg := new(Config)
 	setDefaults(cfg)
 	cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
 	cfg.GasLimit = gas
+	if len(tracerCode) > 0 {
+		tracer, err := tracers.New(tracerCode, new(tracers.Context))
+		if err != nil {
+			b.Fatal(err)
+		}
+		cfg.EVMConfig = vm.Config{
+			Debug:  true,
+			Tracer: tracer,
+		}
+	}
 	var (
 		destination = common.BytesToAddress([]byte("contract"))
 		vmenv       = NewEnv(cfg)
@@ -486,12 +500,12 @@ func BenchmarkSimpleLoop(b *testing.B) {
 	//		Tracer: tracer,
 	//	}})
 	// 100M gas
-	benchmarkNonModifyingCode(100000000, staticCallIdentity, "staticcall-identity-100M", b)
-	benchmarkNonModifyingCode(100000000, callIdentity, "call-identity-100M", b)
-	benchmarkNonModifyingCode(100000000, loopingCode, "loop-100M", b)
-	benchmarkNonModifyingCode(100000000, callInexistant, "call-nonexist-100M", b)
-	benchmarkNonModifyingCode(100000000, callEOA, "call-EOA-100M", b)
-	benchmarkNonModifyingCode(100000000, calllRevertingContractWithInput, "call-reverting-100M", b)
+	benchmarkNonModifyingCode(100000000, staticCallIdentity, "staticcall-identity-100M", "", b)
+	benchmarkNonModifyingCode(100000000, callIdentity, "call-identity-100M", "", b)
+	benchmarkNonModifyingCode(100000000, loopingCode, "loop-100M", "", b)
+	benchmarkNonModifyingCode(100000000, callInexistant, "call-nonexist-100M", "", b)
+	benchmarkNonModifyingCode(100000000, callEOA, "call-EOA-100M", "", b)
+	benchmarkNonModifyingCode(100000000, calllRevertingContractWithInput, "call-reverting-100M", "", b)
 
 	//benchmarkNonModifyingCode(10000000, staticCallIdentity, "staticcall-identity-10M", b)
 	//benchmarkNonModifyingCode(10000000, loopingCode, "loop-10M", b)
@@ -500,7 +514,7 @@ func BenchmarkSimpleLoop(b *testing.B) {
 // TestEip2929Cases contains various testcases that are used for
 // EIP-2929 about gas repricings
 func TestEip2929Cases(t *testing.T) {
-
+	t.Skip("Test only useful for generating documentation")
 	id := 1
 	prettyPrint := func(comment string, code []byte) {
 
@@ -688,3 +702,241 @@ func TestColdAccountAccessCost(t *testing.T) {
 		}
 	}
 }
+
+func TestRuntimeJSTracer(t *testing.T) {
+	jsTracers := []string{
+		`{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, steps:0,
+	step: function() { this.steps++}, 
+	fault: function() {}, 
+	result: function() { 
+		return [this.enters, this.exits,this.enterGas,this.gasUsed, this.steps].join(",") 
+	}, 
+	enter: function(frame) { 
+		this.enters++; 
+		this.enterGas = frame.getGas();
+	}, 
+	exit: function(res) { 
+		this.exits++; 
+		this.gasUsed = res.getGasUsed();
+	}}`,
+		`{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, steps:0,
+	fault: function() {}, 
+	result: function() { 
+		return [this.enters, this.exits,this.enterGas,this.gasUsed, this.steps].join(",") 
+	}, 
+	enter: function(frame) { 
+		this.enters++; 
+		this.enterGas = frame.getGas();
+	}, 
+	exit: function(res) { 
+		this.exits++; 
+		this.gasUsed = res.getGasUsed();
+	}}`}
+	tests := []struct {
+		code []byte
+		// One result per tracer
+		results []string
+	}{
+		{
+			// CREATE
+			code: []byte{
+				// Store initcode in memory at 0x00 (5 bytes left-padded to 32 bytes)
+				byte(vm.PUSH5),
+				// Init code: PUSH1 0, PUSH1 0, RETURN (3 steps)
+				byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN),
+				byte(vm.PUSH1), 0,
+				byte(vm.MSTORE),
+				// length, offset, value
+				byte(vm.PUSH1), 5, byte(vm.PUSH1), 27, byte(vm.PUSH1), 0,
+				byte(vm.CREATE),
+				byte(vm.POP),
+			},
+			results: []string{`"1,1,4294935775,6,12"`, `"1,1,4294935775,6,0"`},
+		},
+		{
+			// CREATE2
+			code: []byte{
+				// Store initcode in memory at 0x00 (5 bytes left-padded to 32 bytes)
+				byte(vm.PUSH5),
+				// Init code: PUSH1 0, PUSH1 0, RETURN (3 steps)
+				byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN),
+				byte(vm.PUSH1), 0,
+				byte(vm.MSTORE),
+				// salt, length, offset, value
+				byte(vm.PUSH1), 1, byte(vm.PUSH1), 5, byte(vm.PUSH1), 27, byte(vm.PUSH1), 0,
+				byte(vm.CREATE2),
+				byte(vm.POP),
+			},
+			results: []string{`"1,1,4294935766,6,13"`, `"1,1,4294935766,6,0"`},
+		},
+		{
+			// CALL
+			code: []byte{
+				// outsize, outoffset, insize, inoffset
+				byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0,
+				byte(vm.PUSH1), 0, // value
+				byte(vm.PUSH1), 0xbb, //address
+				byte(vm.GAS), // gas
+				byte(vm.CALL),
+				byte(vm.POP),
+			},
+			results: []string{`"1,1,4294964716,6,13"`, `"1,1,4294964716,6,0"`},
+		},
+		{
+			// CALLCODE
+			code: []byte{
+				// outsize, outoffset, insize, inoffset
+				byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0,
+				byte(vm.PUSH1), 0, // value
+				byte(vm.PUSH1), 0xcc, //address
+				byte(vm.GAS), // gas
+				byte(vm.CALLCODE),
+				byte(vm.POP),
+			},
+			results: []string{`"1,1,4294964716,6,13"`, `"1,1,4294964716,6,0"`},
+		},
+		{
+			// STATICCALL
+			code: []byte{
+				// outsize, outoffset, insize, inoffset
+				byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0,
+				byte(vm.PUSH1), 0xdd, //address
+				byte(vm.GAS), // gas
+				byte(vm.STATICCALL),
+				byte(vm.POP),
+			},
+			results: []string{`"1,1,4294964719,6,12"`, `"1,1,4294964719,6,0"`},
+		},
+		{
+			// DELEGATECALL
+			code: []byte{
+				// outsize, outoffset, insize, inoffset
+				byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0,
+				byte(vm.PUSH1), 0xee, //address
+				byte(vm.GAS), // gas
+				byte(vm.DELEGATECALL),
+				byte(vm.POP),
+			},
+			results: []string{`"1,1,4294964719,6,12"`, `"1,1,4294964719,6,0"`},
+		},
+		{
+			// CALL self-destructing contract
+			code: []byte{
+				// outsize, outoffset, insize, inoffset
+				byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0,
+				byte(vm.PUSH1), 0, // value
+				byte(vm.PUSH1), 0xff, //address
+				byte(vm.GAS), // gas
+				byte(vm.CALL),
+				byte(vm.POP),
+			},
+			results: []string{`"2,2,0,5003,12"`, `"2,2,0,5003,0"`},
+		},
+	}
+	calleeCode := []byte{
+		byte(vm.PUSH1), 0,
+		byte(vm.PUSH1), 0,
+		byte(vm.RETURN),
+	}
+	depressedCode := []byte{
+		byte(vm.PUSH1), 0xaa,
+		byte(vm.SELFDESTRUCT),
+	}
+	main := common.HexToAddress("0xaa")
+	for i, jsTracer := range jsTracers {
+		for j, tc := range tests {
+			statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+			statedb.SetCode(main, tc.code)
+			statedb.SetCode(common.HexToAddress("0xbb"), calleeCode)
+			statedb.SetCode(common.HexToAddress("0xcc"), calleeCode)
+			statedb.SetCode(common.HexToAddress("0xdd"), calleeCode)
+			statedb.SetCode(common.HexToAddress("0xee"), calleeCode)
+			statedb.SetCode(common.HexToAddress("0xff"), depressedCode)
+
+			tracer, err := tracers.New(jsTracer, new(tracers.Context))
+			if err != nil {
+				t.Fatal(err)
+			}
+			_, _, err = Call(main, nil, &Config{
+				State: statedb,
+				EVMConfig: vm.Config{
+					Debug:  true,
+					Tracer: tracer,
+				}})
+			if err != nil {
+				t.Fatal("didn't expect error", err)
+			}
+			res, err := tracer.GetResult()
+			if err != nil {
+				t.Fatal(err)
+			}
+			if have, want := string(res), tc.results[i]; have != want {
+				t.Errorf("wrong result for tracer %d testcase %d, have \n%v\nwant\n%v\n", i, j, have, want)
+			}
+		}
+	}
+}
+
+func TestJSTracerCreateTx(t *testing.T) {
+	jsTracer := `
+	{enters: 0, exits: 0,
+	step: function() {},
+	fault: function() {},
+	result: function() { return [this.enters, this.exits].join(",") },
+	enter: function(frame) { this.enters++ },
+	exit: function(res) { this.exits++ }}`
+	code := []byte{byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN)}
+
+	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+	tracer, err := tracers.New(jsTracer, new(tracers.Context))
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, _, _, err = Create(code, &Config{
+		State: statedb,
+		EVMConfig: vm.Config{
+			Debug:  true,
+			Tracer: tracer,
+		}})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	res, err := tracer.GetResult()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if have, want := string(res), `"0,0"`; have != want {
+		t.Errorf("wrong result for tracer, have \n%v\nwant\n%v\n", have, want)
+	}
+}
+
+func BenchmarkTracerStepVsCallFrame(b *testing.B) {
+	// Simply pushes and pops some values in a loop
+	code := []byte{
+		byte(vm.JUMPDEST),
+		byte(vm.PUSH1), 0,
+		byte(vm.PUSH1), 0,
+		byte(vm.POP),
+		byte(vm.POP),
+		byte(vm.PUSH1), 0, // jumpdestination
+		byte(vm.JUMP),
+	}
+
+	stepTracer := `
+	{
+	step: function() {},
+	fault: function() {},
+	result: function() {},
+	}`
+	callFrameTracer := `
+	{
+	enter: function() {},
+	exit: function() {},
+	fault: function() {},
+	result: function() {},
+	}`
+
+	benchmarkNonModifyingCode(10000000, code, "tracer-step-10M", stepTracer, b)
+	benchmarkNonModifyingCode(10000000, code, "tracer-call-frame-10M", callFrameTracer, b)
+}

+ 1 - 1
core/vm/stack.go

@@ -91,7 +91,7 @@ func (st *Stack) Print() {
 	fmt.Println("### stack ###")
 	if len(st.data) > 0 {
 		for i, val := range st.data {
-			fmt.Printf("%-3d  %v\n", i, val)
+			fmt.Printf("%-3d  %s\n", i, val.String())
 		}
 	} else {
 		fmt.Println("-- empty --")

+ 2 - 2
eth/api_backend.go

@@ -338,8 +338,8 @@ func (b *EthAPIBackend) StartMining(threads int) error {
 	return b.eth.StartMining(threads)
 }
 
-func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, error) {
-	return b.eth.stateAtBlock(block, reexec, base, checkLive)
+func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive, preferDisk bool) (*state.StateDB, error) {
+	return b.eth.stateAtBlock(block, reexec, base, checkLive, preferDisk)
 }
 
 func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) {

+ 4 - 2
eth/api_test.go

@@ -77,7 +77,9 @@ func TestAccountRange(t *testing.T) {
 			m[addr] = true
 		}
 	}
-	state.Commit(true)
+	state.Finalise(true)
+	state.AccountsIntermediateRoot()
+	state.Commit(nil)
 	root := state.IntermediateRoot(true)
 
 	trie, err := statedb.OpenTrie(root)
@@ -134,7 +136,7 @@ func TestEmptyAccountRange(t *testing.T) {
 		statedb  = state.NewDatabase(rawdb.NewMemoryDatabase())
 		state, _ = state.New(common.Hash{}, statedb, nil)
 	)
-	state.Commit(true)
+	state.Commit(nil)
 	state.IntermediateRoot(true)
 	results := state.IteratorDump(true, true, true, (common.Hash{}).Bytes(), AccountRangeMaxResults)
 	if bytes.Equal(results.Next, (common.Hash{}).Bytes()) {

+ 3 - 0
eth/backend.go

@@ -203,6 +203,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
 	if config.DiffSync {
 		bcOps = append(bcOps, core.EnableLightProcessor)
 	}
+	if config.PipeCommit {
+		bcOps = append(bcOps, core.EnablePipelineCommit)
+	}
 	if config.PersistDiff {
 		bcOps = append(bcOps, core.EnablePersistDiff(config.DiffBlock))
 	}

+ 2 - 2
eth/downloader/downloader.go

@@ -599,10 +599,10 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
 			d.ancientLimit = 0
 		}
 		frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here.
-
+		itemAmountInAncient, _ := d.stateDB.ItemAmountInAncient()
 		// If a part of blockchain data has already been written into active store,
 		// disable the ancient style insertion explicitly.
-		if origin >= frozen && frozen != 0 {
+		if origin >= frozen && itemAmountInAncient != 0 {
 			d.ancientLimit = 0
 			log.Info("Disabling direct-ancient mode", "origin", origin, "ancient", frozen-1)
 		} else if d.ancientLimit > 0 {

+ 1 - 0
eth/ethconfig/config.go

@@ -137,6 +137,7 @@ type Config struct {
 	DirectBroadcast     bool
 	DisableSnapProtocol bool //Whether disable snap protocol
 	DiffSync            bool // Whether support diff sync
+	PipeCommit          bool
 	RangeLimit          bool
 
 	TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.

+ 6 - 13
eth/handler.go

@@ -565,26 +565,19 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) {
 // ReannounceTransactions will announce a batch of local pending transactions
 // to a square root of all peers.
 func (h *handler) ReannounceTransactions(txs types.Transactions) {
-	var (
-		annoCount int                                // Count of announcements made
-		annos     = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce
-	)
+	hashes := make([]common.Hash, 0, txs.Len())
+	for _, tx := range txs {
+		hashes = append(hashes, tx.Hash())
+	}
 
 	// Announce transactions hash to a batch of peers
 	peersCount := uint(math.Sqrt(float64(h.peers.len())))
 	peers := h.peers.headPeers(peersCount)
-	for _, tx := range txs {
-		for _, peer := range peers {
-			annos[peer] = append(annos[peer], tx.Hash())
-		}
-	}
-
-	for peer, hashes := range annos {
-		annoCount += len(hashes)
+	for _, peer := range peers {
 		peer.AsyncSendPooledTransactionHashes(hashes)
 	}
 	log.Debug("Transaction reannounce", "txs", len(txs),
-		"announce packs", peersCount, "announced hashes", annoCount)
+		"announce packs", peersCount, "announced hashes", peersCount*uint(len(hashes)))
 }
 
 // minedBroadcastLoop sends mined blocks to connected peers.

+ 70 - 0
eth/handler_eth_test.go

@@ -239,6 +239,76 @@ func testForkIDSplit(t *testing.T, protocol uint) {
 func TestRecvTransactions65(t *testing.T) { testRecvTransactions(t, eth.ETH65) }
 func TestRecvTransactions66(t *testing.T) { testRecvTransactions(t, eth.ETH66) }
 
+func TestWaitDiffExtensionTimout(t *testing.T) {
+	t.Parallel()
+
+	// Create a message handler, configure it to accept transactions and watch them
+	handler := newTestHandler()
+	defer handler.close()
+
+	// Create a source peer to send messages through and a sink handler to receive them
+	_, p2pSink := p2p.MsgPipe()
+	defer p2pSink.Close()
+
+	protos := []p2p.Protocol{
+		{
+			Name:    "diff",
+			Version: 1,
+		},
+	}
+
+	sink := eth.NewPeer(eth.ETH67, p2p.NewPeerWithProtocols(enode.ID{2}, protos, "", []p2p.Cap{
+		{
+			Name:    "diff",
+			Version: 1,
+		},
+	}), p2pSink, nil)
+	defer sink.Close()
+
+	err := handler.handler.runEthPeer(sink, func(peer *eth.Peer) error {
+		return eth.Handle((*ethHandler)(handler.handler), peer)
+	})
+
+	if err == nil || err.Error() != "peer wait timeout" {
+		t.Fatalf("error should be `peer wait timeout`")
+	}
+}
+
+func TestWaitSnapExtensionTimout(t *testing.T) {
+	t.Parallel()
+
+	// Create a message handler, configure it to accept transactions and watch them
+	handler := newTestHandler()
+	defer handler.close()
+
+	// Create a source peer to send messages through and a sink handler to receive them
+	_, p2pSink := p2p.MsgPipe()
+	defer p2pSink.Close()
+
+	protos := []p2p.Protocol{
+		{
+			Name:    "snap",
+			Version: 1,
+		},
+	}
+
+	sink := eth.NewPeer(eth.ETH67, p2p.NewPeerWithProtocols(enode.ID{2}, protos, "", []p2p.Cap{
+		{
+			Name:    "snap",
+			Version: 1,
+		},
+	}), p2pSink, nil)
+	defer sink.Close()
+
+	err := handler.handler.runEthPeer(sink, func(peer *eth.Peer) error {
+		return eth.Handle((*ethHandler)(handler.handler), peer)
+	})
+
+	if err == nil || err.Error() != "peer wait timeout" {
+		t.Fatalf("error should be `peer wait timeout`")
+	}
+}
+
 func testRecvTransactions(t *testing.T, protocol uint) {
 	t.Parallel()
 

+ 32 - 4
eth/peerset.go

@@ -20,6 +20,7 @@ import (
 	"errors"
 	"math/big"
 	"sync"
+	"time"
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/eth/downloader"
@@ -38,19 +39,28 @@ var (
 	// to the peer set, but one with the same id already exists.
 	errPeerAlreadyRegistered = errors.New("peer already registered")
 
+	// errPeerWaitTimeout is returned if a peer waits extension for too long
+	errPeerWaitTimeout = errors.New("peer wait timeout")
+
 	// errPeerNotRegistered is returned if a peer is attempted to be removed from
 	// a peer set, but no peer with the given id exists.
 	errPeerNotRegistered = errors.New("peer not registered")
 
 	// errSnapWithoutEth is returned if a peer attempts to connect only on the
-	// snap protocol without advertizing the eth main protocol.
+	// snap protocol without advertising the eth main protocol.
 	errSnapWithoutEth = errors.New("peer connected on snap without compatible eth support")
 
 	// errDiffWithoutEth is returned if a peer attempts to connect only on the
-	// diff protocol without advertizing the eth main protocol.
+	// diff protocol without advertising the eth main protocol.
 	errDiffWithoutEth = errors.New("peer connected on diff without compatible eth support")
 )
 
+const (
+	// extensionWaitTimeout is the maximum allowed time for the extension wait to
+	// complete before dropping the connection as malicious.
+	extensionWaitTimeout = 10 * time.Second
+)
+
 // peerSet represents the collection of active peers currently participating in
 // the `eth` protocol, with or without the `snap` extension.
 type peerSet struct {
@@ -169,7 +179,16 @@ func (ps *peerSet) waitSnapExtension(peer *eth.Peer) (*snap.Peer, error) {
 	ps.snapWait[id] = wait
 	ps.lock.Unlock()
 
-	return <-wait, nil
+	select {
+	case peer := <-wait:
+		return peer, nil
+
+	case <-time.After(extensionWaitTimeout):
+		ps.lock.Lock()
+		delete(ps.snapWait, id)
+		ps.lock.Unlock()
+		return nil, errPeerWaitTimeout
+	}
 }
 
 // waitDiffExtension blocks until all satellite protocols are connected and tracked
@@ -203,7 +222,16 @@ func (ps *peerSet) waitDiffExtension(peer *eth.Peer) (*diff.Peer, error) {
 	ps.diffWait[id] = wait
 	ps.lock.Unlock()
 
-	return <-wait, nil
+	select {
+	case peer := <-wait:
+		return peer, nil
+
+	case <-time.After(extensionWaitTimeout):
+		ps.lock.Lock()
+		delete(ps.diffWait, id)
+		ps.lock.Unlock()
+		return nil, errPeerWaitTimeout
+	}
 }
 
 func (ps *peerSet) GetDiffPeer(pid string) downloader.IDiffPeer {

+ 1 - 1
eth/protocols/diff/handshake.go

@@ -26,7 +26,7 @@ import (
 
 const (
 	// handshakeTimeout is the maximum allowed time for the `diff` handshake to
-	// complete before dropping the connection.= as malicious.
+	// complete before dropping the connection as malicious.
 	handshakeTimeout = 5 * time.Second
 )
 

+ 26 - 4
eth/state_accessor.go

@@ -37,7 +37,17 @@ import (
 // are attempted to be reexecuted to generate the desired state. The optional
 // base layer statedb can be passed then it's regarded as the statedb of the
 // parent block.
-func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (statedb *state.StateDB, err error) {
+// Parameters:
+// - block: The block for which we want the state (== state at the stateRoot of the parent)
+// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state
+// - base: If the caller is tracing multiple blocks, the caller can provide the parent state
+//         continuously from the callsite.
+// - checklive: if true, then the live 'blockchain' state database is used. If the caller want to
+//        perform Commit or other 'save-to-disk' changes, this should be set to false to avoid
+//        storing trash persistently
+// - preferDisk: this arg can be used by the caller to signal that even though the 'base' is provided,
+//        it would be preferrable to start from a fresh state, if we have it on disk.
+func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) {
 	var (
 		current  *types.Block
 		database state.Database
@@ -52,6 +62,15 @@ func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64, base *state
 		}
 	}
 	if base != nil {
+		if preferDisk {
+			// Create an ephemeral trie.Database for isolating the live one. Otherwise
+			// the internal junks created by tracing will be persisted into the disk.
+			database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16})
+			if statedb, err = state.New(block.Root(), database, nil); err == nil {
+				log.Info("Found disk backend for state trie", "root", block.Root(), "number", block.Number())
+				return statedb, nil
+			}
+		}
 		// The optional base statedb is given, mark the start point as parent block
 		statedb, database, report = base, base.Database(), false
 		current = eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
@@ -119,9 +138,12 @@ func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64, base *state
 			return nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err)
 		}
 		// Finalize the state so any modifications are written to the trie
-		root, _, err := statedb.Commit(eth.blockchain.Config().IsEIP158(current.Number()))
+		statedb.Finalise(eth.blockchain.Config().IsEIP158(current.Number()))
+		statedb.AccountsIntermediateRoot()
+		root, _, err := statedb.Commit(nil)
 		if err != nil {
-			return nil, err
+			return nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w",
+				current.NumberU64(), current.Root().Hex(), err)
 		}
 		statedb, err = state.New(root, database, nil)
 		if err != nil {
@@ -153,7 +175,7 @@ func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec
 	}
 	// Lookup the statedb of parent block from the live database,
 	// otherwise regenerate it on the flight.
-	statedb, err := eth.stateAtBlock(parent, reexec, nil, true)
+	statedb, err := eth.stateAtBlock(parent, reexec, nil, true, false)
 	if err != nil {
 		return nil, vm.BlockContext{}, nil, err
 	}

+ 154 - 64
eth/tracers/api.go

@@ -55,6 +55,13 @@ const (
 	// and reexecute to produce missing historical state necessary to run a specific
 	// trace.
 	defaultTraceReexec = uint64(128)
+
+	// defaultTracechainMemLimit is the size of the triedb, at which traceChain
+	// switches over and tries to use a disk-backed database instead of building
+	// on top of memory.
+	// For non-archive nodes, this limit _will_ be overblown, as disk-backed tries
+	// will only be found every ~15K blocks or so.
+	defaultTracechainMemLimit = common.StorageSize(500 * 1024 * 1024)
 )
 
 // Backend interface provides the common API services (that are provided by
@@ -69,7 +76,10 @@ type Backend interface {
 	ChainConfig() *params.ChainConfig
 	Engine() consensus.Engine
 	ChainDb() ethdb.Database
-	StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, error)
+	// StateAtBlock returns the state corresponding to the stateroot of the block.
+	// N.B: For executing transactions on block N, the required stateRoot is block N-1,
+	// so this method should be called with the parent.
+	StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive, preferDisk bool) (*state.StateDB, error)
 	StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error)
 }
 
@@ -180,13 +190,6 @@ type StdTraceConfig struct {
 	TxHash common.Hash
 }
 
-// txTraceContext is the contextual infos about a transaction before it gets run.
-type txTraceContext struct {
-	index int         // Index of the transaction within the block
-	hash  common.Hash // Hash of the transaction
-	block common.Hash // Hash of the block containing the transaction
-}
-
 // txTraceResult is the result of a single transaction trace.
 type txTraceResult struct {
 	Result interface{} `json:"result,omitempty"` // Trace results produced by the tracer
@@ -274,10 +277,10 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config
 				// Trace all the transactions contained within
 				for i, tx := range task.block.Transactions() {
 					msg, _ := tx.AsMessage(signer)
-					txctx := &txTraceContext{
-						index: i,
-						hash:  tx.Hash(),
-						block: task.block.Hash(),
+					txctx := &Context{
+						BlockHash: task.block.Hash(),
+						TxIndex:   i,
+						TxHash:    tx.Hash(),
 					}
 					res, err := api.traceTx(localctx, msg, txctx, blockCtx, task.statedb, config)
 					if err != nil {
@@ -299,7 +302,11 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config
 		})
 	}
 	// Start a goroutine to feed all the blocks into the tracers
-	begin := time.Now()
+	var (
+		begin     = time.Now()
+		derefTodo []common.Hash // list of hashes to dereference from the db
+		derefsMu  sync.Mutex    // mutex for the derefs
+	)
 
 	gopool.Submit(func() {
 		var (
@@ -325,6 +332,7 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config
 			}
 			close(results)
 		}()
+		var preferDisk bool
 		// Feed all the blocks both into the tracer, as well as fast process concurrently
 		for number = start.NumberU64(); number < end.NumberU64(); number++ {
 			// Stop tracing if interruption was requested
@@ -333,6 +341,14 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config
 				return
 			default:
 			}
+			// clean out any derefs
+			derefsMu.Lock()
+			for _, h := range derefTodo {
+				statedb.Database().TrieDB().Dereference(h)
+			}
+			derefTodo = derefTodo[:0]
+			derefsMu.Unlock()
+
 			// Print progress logs if long enough time elapsed
 			if time.Since(logged) > 8*time.Second {
 				logged = time.Now()
@@ -346,18 +362,24 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config
 			}
 			// Prepare the statedb for tracing. Don't use the live database for
 			// tracing to avoid persisting state junks into the database.
-			statedb, err = api.backend.StateAtBlock(localctx, block, reexec, statedb, false)
+			statedb, err = api.backend.StateAtBlock(localctx, block, reexec, statedb, false, preferDisk)
 			if err != nil {
 				failed = err
 				break
 			}
-			if statedb.Database().TrieDB() != nil {
+			if trieDb := statedb.Database().TrieDB(); trieDb != nil {
 				// Hold the reference for tracer, will be released at the final stage
-				statedb.Database().TrieDB().Reference(block.Root(), common.Hash{})
+				trieDb.Reference(block.Root(), common.Hash{})
 
 				// Release the parent state because it's already held by the tracer
 				if parent != (common.Hash{}) {
-					statedb.Database().TrieDB().Dereference(parent)
+					trieDb.Dereference(parent)
+				}
+				// Prefer disk if the trie db memory grows too much
+				s1, s2 := trieDb.Size()
+				if !preferDisk && (s1+s2) > defaultTracechainMemLimit {
+					log.Info("Switching to prefer-disk mode for tracing", "size", s1+s2)
+					preferDisk = true
 				}
 			}
 			parent = block.Root()
@@ -391,12 +413,11 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config
 				Hash:   res.block.Hash(),
 				Traces: res.results,
 			}
+			// Schedule any parent tries held in memory by this task for dereferencing
 			done[uint64(result.Block)] = result
-
-			// Dereference any parent tries held in memory by this task
-			if res.statedb.Database().TrieDB() != nil {
-				res.statedb.Database().TrieDB().Dereference(res.rootref)
-			}
+			derefsMu.Lock()
+			derefTodo = append(derefTodo, res.rootref)
+			derefsMu.Unlock()
 			// Stream completed traces to the user, aborting on the first error
 			for result, ok := done[next]; ok; result, ok = done[next] {
 				if len(result.Traces) > 0 || next == end.NumberU64() {
@@ -454,12 +475,11 @@ func (api *API) TraceBlockFromFile(ctx context.Context, file string, config *Tra
 // EVM against a block pulled from the pool of bad ones and returns them as a JSON
 // object.
 func (api *API) TraceBadBlock(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) {
-	for _, block := range rawdb.ReadAllBadBlocks(api.backend.ChainDb()) {
-		if block.Hash() == hash {
-			return api.traceBlock(ctx, block, config)
-		}
+	block := rawdb.ReadBadBlock(api.backend.ChainDb(), hash)
+	if block == nil {
+		return nil, fmt.Errorf("bad block %#x not found", hash)
 	}
-	return nil, fmt.Errorf("bad block %#x not found", hash)
+	return api.traceBlock(ctx, block, config)
 }
 
 // StandardTraceBlockToFile dumps the structured logs created during the
@@ -473,16 +493,85 @@ func (api *API) StandardTraceBlockToFile(ctx context.Context, hash common.Hash,
 	return api.standardTraceBlockToFile(ctx, block, config)
 }
 
+// IntermediateRoots executes a block (bad- or canon- or side-), and returns a list
+// of intermediate roots: the stateroot after each transaction.
+func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config *TraceConfig) ([]common.Hash, error) {
+	block, _ := api.blockByHash(ctx, hash)
+	if block == nil {
+		// Check in the bad blocks
+		block = rawdb.ReadBadBlock(api.backend.ChainDb(), hash)
+	}
+	if block == nil {
+		return nil, fmt.Errorf("block %#x not found", hash)
+	}
+	if block.NumberU64() == 0 {
+		return nil, errors.New("genesis is not traceable")
+	}
+	parent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash())
+	if err != nil {
+		return nil, err
+	}
+	reexec := defaultTraceReexec
+	if config != nil && config.Reexec != nil {
+		reexec = *config.Reexec
+	}
+	statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false)
+	if err != nil {
+		return nil, err
+	}
+	var (
+		roots              []common.Hash
+		signer             = types.MakeSigner(api.backend.ChainConfig(), block.Number())
+		chainConfig        = api.backend.ChainConfig()
+		vmctx              = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
+		deleteEmptyObjects = chainConfig.IsEIP158(block.Number())
+	)
+	for i, tx := range block.Transactions() {
+		var (
+			msg, _    = tx.AsMessage(signer)
+			txContext = core.NewEVMTxContext(msg)
+			vmenv     = vm.NewEVM(vmctx, txContext, statedb, chainConfig, vm.Config{})
+		)
+
+		if posa, ok := api.backend.Engine().(consensus.PoSA); ok {
+			if isSystem, _ := posa.IsSystemTransaction(tx, block.Header()); isSystem {
+				balance := statedb.GetBalance(consensus.SystemAddress)
+				if balance.Cmp(common.Big0) > 0 {
+					statedb.SetBalance(consensus.SystemAddress, big.NewInt(0))
+					statedb.AddBalance(vmctx.Coinbase, balance)
+				}
+			}
+		}
+
+		statedb.Prepare(tx.Hash(), block.Hash(), i)
+		if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil {
+			log.Warn("Tracing intermediate roots did not complete", "txindex", i, "txhash", tx.Hash(), "err", err)
+			// We intentionally don't return the error here: if we do, then the RPC server will not
+			// return the roots. Most likely, the caller already knows that a certain transaction fails to
+			// be included, but still want the intermediate roots that led to that point.
+			// It may happen the tx_N causes an erroneous state, which in turn causes tx_N+M to not be
+			// executable.
+			// N.B: This should never happen while tracing canon blocks, only when tracing bad blocks.
+			return roots, nil
+		}
+		// calling IntermediateRoot will internally call Finalize on the state
+		// so any modifications are written to the trie
+		root := statedb.IntermediateRoot(deleteEmptyObjects)
+
+		roots = append(roots, root)
+	}
+	return roots, nil
+}
+
 // StandardTraceBadBlockToFile dumps the structured logs created during the
 // execution of EVM against a block pulled from the pool of bad ones to the
 // local file system and returns a list of files to the caller.
 func (api *API) StandardTraceBadBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) {
-	for _, block := range rawdb.ReadAllBadBlocks(api.backend.ChainDb()) {
-		if block.Hash() == hash {
-			return api.standardTraceBlockToFile(ctx, block, config)
-		}
+	block := rawdb.ReadBadBlock(api.backend.ChainDb(), hash)
+	if block == nil {
+		return nil, fmt.Errorf("bad block %#x not found", hash)
 	}
-	return nil, fmt.Errorf("bad block %#x not found", hash)
+	return api.standardTraceBlockToFile(ctx, block, config)
 }
 
 // traceBlock configures a new tracer according to the provided configuration, and
@@ -500,7 +589,7 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
 	if config != nil && config.Reexec != nil {
 		reexec = *config.Reexec
 	}
-	statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true)
+	statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false)
 	if err != nil {
 		return nil, err
 	}
@@ -520,16 +609,18 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
 	blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
 	blockHash := block.Hash()
 	for th := 0; th < threads; th++ {
+		blockCtx := blockCtx
+
 		pend.Add(1)
 		gopool.Submit(func() {
 			defer pend.Done()
 			// Fetch and execute the next transaction trace tasks
 			for task := range jobs {
 				msg, _ := txs[task.index].AsMessage(signer)
-				txctx := &txTraceContext{
-					index: task.index,
-					hash:  txs[task.index].Hash(),
-					block: blockHash,
+				txctx := &Context{
+					BlockHash: blockHash,
+					TxIndex:   task.index,
+					TxHash:    txs[task.index].Hash(),
 				}
 				res, err := api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, config)
 				if err != nil {
@@ -542,6 +633,7 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
 	}
 	// Feed the transactions into the tracers and return
 	var failed error
+	blockCtx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
 	for i, tx := range txs {
 		// Send the trace task over for execution
 		jobs <- &txTraceTask{statedb: statedb.Copy(), index: i}
@@ -600,7 +692,7 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block
 	if config != nil && config.Reexec != nil {
 		reexec = *config.Reexec
 	}
-	statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true)
+	statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false)
 	if err != nil {
 		return nil, err
 	}
@@ -740,10 +832,10 @@ func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config *
 	if err != nil {
 		return nil, err
 	}
-	txctx := &txTraceContext{
-		index: int(index),
-		hash:  hash,
-		block: blockHash,
+	txctx := &Context{
+		BlockHash: blockHash,
+		TxIndex:   int(index),
+		TxHash:    hash,
 	}
 	return api.traceTx(ctx, msg, txctx, vmctx, statedb, config)
 }
@@ -773,7 +865,7 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHa
 	if config != nil && config.Reexec != nil {
 		reexec = *config.Reexec
 	}
-	statedb, err := api.backend.StateAtBlock(ctx, block, reexec, nil, true)
+	statedb, err := api.backend.StateAtBlock(ctx, block, reexec, nil, true, false)
 	if err != nil {
 		return nil, err
 	}
@@ -796,21 +888,23 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHa
 			Reexec:    config.Reexec,
 		}
 	}
-	return api.traceTx(ctx, msg, new(txTraceContext), vmctx, statedb, traceConfig)
+	return api.traceTx(ctx, msg, new(Context), vmctx, statedb, traceConfig)
 }
 
 // traceTx configures a new tracer according to the provided configuration, and
 // executes the given message in the provided environment. The return value will
 // be tracer dependent.
-func (api *API) traceTx(ctx context.Context, message core.Message, txctx *txTraceContext, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) {
+func (api *API) traceTx(ctx context.Context, message core.Message, txctx *Context, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) {
 	// Assemble the structured logger or the JavaScript tracer
 	var (
-		tracer    vm.Tracer
+		tracer    vm.EVMLogger
 		err       error
 		txContext = core.NewEVMTxContext(message)
 	)
 	switch {
-	case config != nil && config.Tracer != nil:
+	case config == nil:
+		tracer = vm.NewStructLogger(nil)
+	case config.Tracer != nil:
 		// Define a meaningful timeout of a single transaction trace
 		timeout := defaultTraceTimeout
 		if config.Timeout != nil {
@@ -818,23 +912,19 @@ func (api *API) traceTx(ctx context.Context, message core.Message, txctx *txTrac
 				return nil, err
 			}
 		}
-		// Constuct the JavaScript tracer to execute with
-		if tracer, err = New(*config.Tracer, txContext); err != nil {
+		if t, err := New(*config.Tracer, txctx); err != nil {
 			return nil, err
+		} else {
+			deadlineCtx, cancel := context.WithTimeout(ctx, timeout)
+			go func() {
+				<-deadlineCtx.Done()
+				if errors.Is(deadlineCtx.Err(), context.DeadlineExceeded) {
+					t.Stop(errors.New("execution timeout"))
+				}
+			}()
+			defer cancel()
+			tracer = t
 		}
-		// Handle timeouts and RPC cancellations
-		deadlineCtx, cancel := context.WithTimeout(ctx, timeout)
-		gopool.Submit(func() {
-			<-deadlineCtx.Done()
-			if deadlineCtx.Err() == context.DeadlineExceeded {
-				tracer.(*Tracer).Stop(errors.New("execution timeout"))
-			}
-		})
-		defer cancel()
-
-	case config == nil:
-		tracer = vm.NewStructLogger(nil)
-
 	default:
 		tracer = vm.NewStructLogger(config.LogConfig)
 	}
@@ -851,7 +941,7 @@ func (api *API) traceTx(ctx context.Context, message core.Message, txctx *txTrac
 	}
 
 	// Call Prepare to clear out the statedb access list
-	statedb.Prepare(txctx.hash, txctx.block, txctx.index)
+	statedb.Prepare(txctx.TxHash, txctx.BlockHash, txctx.TxIndex)
 
 	result, err := core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas()))
 	if err != nil {
@@ -873,7 +963,7 @@ func (api *API) traceTx(ctx context.Context, message core.Message, txctx *txTrac
 			StructLogs:  ethapi.FormatLogs(tracer.StructLogs()),
 		}, nil
 
-	case *Tracer:
+	case Tracer:
 		return tracer.GetResult()
 
 	default:

+ 139 - 198
eth/tracers/api_test.go

@@ -139,7 +139,7 @@ func (b *testBackend) ChainDb() ethdb.Database {
 	return b.chaindb
 }
 
-func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, error) {
+func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (*state.StateDB, error) {
 	statedb, err := b.chain.StateAt(block.Root())
 	if err != nil {
 		return nil, errStateNotFound
@@ -307,147 +307,6 @@ func TestTraceCall(t *testing.T) {
 	}
 }
 
-func TestOverridenTraceCall(t *testing.T) {
-	t.Parallel()
-
-	// Initialize test accounts
-	accounts := newAccounts(3)
-	genesis := &core.Genesis{Alloc: core.GenesisAlloc{
-		accounts[0].addr: {Balance: big.NewInt(params.Ether)},
-		accounts[1].addr: {Balance: big.NewInt(params.Ether)},
-		accounts[2].addr: {Balance: big.NewInt(params.Ether)},
-	}}
-	genBlocks := 10
-	signer := types.HomesteadSigner{}
-	api := NewAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {
-		// Transfer from account[0] to account[1]
-		//    value: 1000 wei
-		//    fee:   0 wei
-		tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, big.NewInt(0), nil), signer, accounts[0].key)
-		b.AddTx(tx)
-	}))
-	randomAccounts, tracer := newAccounts(3), "callTracer"
-
-	var testSuite = []struct {
-		blockNumber rpc.BlockNumber
-		call        ethapi.CallArgs
-		config      *TraceCallConfig
-		expectErr   error
-		expect      *callTrace
-	}{
-		// Succcessful call with state overriding
-		{
-			blockNumber: rpc.PendingBlockNumber,
-			call: ethapi.CallArgs{
-				From:  &randomAccounts[0].addr,
-				To:    &randomAccounts[1].addr,
-				Value: (*hexutil.Big)(big.NewInt(1000)),
-			},
-			config: &TraceCallConfig{
-				Tracer: &tracer,
-				StateOverrides: &ethapi.StateOverride{
-					randomAccounts[0].addr: ethapi.OverrideAccount{Balance: newRPCBalance(new(big.Int).Mul(big.NewInt(1), big.NewInt(params.Ether)))},
-				},
-			},
-			expectErr: nil,
-			expect: &callTrace{
-				Type:    "CALL",
-				From:    randomAccounts[0].addr,
-				To:      randomAccounts[1].addr,
-				Gas:     newRPCUint64(24979000),
-				GasUsed: newRPCUint64(0),
-				Value:   (*hexutil.Big)(big.NewInt(1000)),
-			},
-		},
-		// Invalid call without state overriding
-		{
-			blockNumber: rpc.PendingBlockNumber,
-			call: ethapi.CallArgs{
-				From:  &randomAccounts[0].addr,
-				To:    &randomAccounts[1].addr,
-				Value: (*hexutil.Big)(big.NewInt(1000)),
-			},
-			config: &TraceCallConfig{
-				Tracer: &tracer,
-			},
-			expectErr: core.ErrInsufficientFundsForTransfer,
-			expect:    nil,
-		},
-		// Successful simple contract call
-		//
-		// // SPDX-License-Identifier: GPL-3.0
-		//
-		//  pragma solidity >=0.7.0 <0.8.0;
-		//
-		//  /**
-		//   * @title Storage
-		//   * @dev Store & retrieve value in a variable
-		//   */
-		//  contract Storage {
-		//      uint256 public number;
-		//      constructor() {
-		//          number = block.number;
-		//      }
-		//  }
-		{
-			blockNumber: rpc.PendingBlockNumber,
-			call: ethapi.CallArgs{
-				From: &randomAccounts[0].addr,
-				To:   &randomAccounts[2].addr,
-				Data: newRPCBytes(common.Hex2Bytes("8381f58a")), // call number()
-			},
-			config: &TraceCallConfig{
-				Tracer: &tracer,
-				StateOverrides: &ethapi.StateOverride{
-					randomAccounts[2].addr: ethapi.OverrideAccount{
-						Code:      newRPCBytes(common.Hex2Bytes("6080604052348015600f57600080fd5b506004361060285760003560e01c80638381f58a14602d575b600080fd5b60336049565b6040518082815260200191505060405180910390f35b6000548156fea2646970667358221220eab35ffa6ab2adfe380772a48b8ba78e82a1b820a18fcb6f59aa4efb20a5f60064736f6c63430007040033")),
-						StateDiff: newStates([]common.Hash{{}}, []common.Hash{common.BigToHash(big.NewInt(123))}),
-					},
-				},
-			},
-			expectErr: nil,
-			expect: &callTrace{
-				Type:    "CALL",
-				From:    randomAccounts[0].addr,
-				To:      randomAccounts[2].addr,
-				Input:   hexutil.Bytes(common.Hex2Bytes("8381f58a")),
-				Output:  hexutil.Bytes(common.BigToHash(big.NewInt(123)).Bytes()),
-				Gas:     newRPCUint64(24978936),
-				GasUsed: newRPCUint64(2283),
-				Value:   (*hexutil.Big)(big.NewInt(0)),
-			},
-		},
-	}
-	for _, testspec := range testSuite {
-		result, err := api.TraceCall(context.Background(), testspec.call, rpc.BlockNumberOrHash{BlockNumber: &testspec.blockNumber}, testspec.config)
-		if testspec.expectErr != nil {
-			if err == nil {
-				t.Errorf("Expect error %v, get nothing", testspec.expectErr)
-				continue
-			}
-			if !errors.Is(err, testspec.expectErr) {
-				t.Errorf("Error mismatch, want %v, get %v", testspec.expectErr, err)
-			}
-		} else {
-			if err != nil {
-				t.Errorf("Expect no error, get %v", err)
-				continue
-			}
-			ret := new(callTrace)
-			if err := json.Unmarshal(result.(json.RawMessage), ret); err != nil {
-				t.Fatalf("failed to unmarshal trace result: %v", err)
-			}
-			if !jsonEqual(ret, testspec.expect) {
-				// uncomment this for easier debugging
-				//have, _ := json.MarshalIndent(ret, "", " ")
-				//want, _ := json.MarshalIndent(testspec.expect, "", " ")
-				//t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", string(have), string(want))
-				t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", ret, testspec.expect)
-			}
-		}
-	}
-}
-
 func TestTraceTransaction(t *testing.T) {
 	t.Parallel()
 
@@ -504,90 +363,177 @@ func TestTraceBlock(t *testing.T) {
 	var testSuite = []struct {
 		blockNumber rpc.BlockNumber
 		config      *TraceConfig
-		expect      interface{}
+		want        string
 		expectErr   error
 	}{
 		// Trace genesis block, expect error
 		{
 			blockNumber: rpc.BlockNumber(0),
-			config:      nil,
-			expect:      nil,
 			expectErr:   errors.New("genesis is not traceable"),
 		},
 		// Trace head block
 		{
 			blockNumber: rpc.BlockNumber(genBlocks),
-			config:      nil,
-			expectErr:   nil,
-			expect: []*txTraceResult{
-				{
-					Result: &ethapi.ExecutionResult{
-						Gas:         params.TxGas,
-						Failed:      false,
-						ReturnValue: "",
-						StructLogs:  []ethapi.StructLogRes{},
-					},
-				},
-			},
+			want:        `[{"result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`,
 		},
 		// Trace non-existent block
 		{
 			blockNumber: rpc.BlockNumber(genBlocks + 1),
-			config:      nil,
 			expectErr:   fmt.Errorf("block #%d not found", genBlocks+1),
-			expect:      nil,
 		},
 		// Trace latest block
 		{
 			blockNumber: rpc.LatestBlockNumber,
-			config:      nil,
-			expectErr:   nil,
-			expect: []*txTraceResult{
-				{
-					Result: &ethapi.ExecutionResult{
-						Gas:         params.TxGas,
-						Failed:      false,
-						ReturnValue: "",
-						StructLogs:  []ethapi.StructLogRes{},
-					},
+			want:        `[{"result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`,
+		},
+		// Trace pending block
+		{
+			blockNumber: rpc.PendingBlockNumber,
+			want:        `[{"result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`,
+		},
+	}
+	for i, tc := range testSuite {
+		result, err := api.TraceBlockByNumber(context.Background(), tc.blockNumber, tc.config)
+		if tc.expectErr != nil {
+			if err == nil {
+				t.Errorf("test %d, want error %v", i, tc.expectErr)
+				continue
+			}
+			if !reflect.DeepEqual(err, tc.expectErr) {
+				t.Errorf("test %d: error mismatch, want %v, get %v", i, tc.expectErr, err)
+			}
+			continue
+		}
+		if err != nil {
+			t.Errorf("test %d, want no error, have %v", i, err)
+			continue
+		}
+		have, _ := json.Marshal(result)
+		want := tc.want
+		if string(have) != want {
+			t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, string(have), want)
+		}
+	}
+}
+
+func TestTracingWithOverrides(t *testing.T) {
+	t.Parallel()
+	// Initialize test accounts
+	accounts := newAccounts(3)
+	genesis := &core.Genesis{Alloc: core.GenesisAlloc{
+		accounts[0].addr: {Balance: big.NewInt(params.Ether)},
+		accounts[1].addr: {Balance: big.NewInt(params.Ether)},
+		accounts[2].addr: {Balance: big.NewInt(params.Ether)},
+	}}
+	genBlocks := 10
+	signer := types.HomesteadSigner{}
+	api := NewAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {
+		// Transfer from account[0] to account[1]
+		//    value: 1000 wei
+		//    fee:   0 wei
+		tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, big.NewInt(0), nil), signer, accounts[0].key)
+		b.AddTx(tx)
+	}))
+	randomAccounts := newAccounts(3)
+	type res struct {
+		Gas         int
+		Failed      bool
+		returnValue string
+	}
+	var testSuite = []struct {
+		blockNumber rpc.BlockNumber
+		call        ethapi.CallArgs
+		config      *TraceCallConfig
+		expectErr   error
+		want        string
+	}{
+		// Call which can only succeed if state is state overridden
+		{
+			blockNumber: rpc.PendingBlockNumber,
+			call: ethapi.CallArgs{
+				From:  &randomAccounts[0].addr,
+				To:    &randomAccounts[1].addr,
+				Value: (*hexutil.Big)(big.NewInt(1000)),
+			},
+			config: &TraceCallConfig{
+				StateOverrides: &ethapi.StateOverride{
+					randomAccounts[0].addr: ethapi.OverrideAccount{Balance: newRPCBalance(new(big.Int).Mul(big.NewInt(1), big.NewInt(params.Ether)))},
 				},
 			},
+			want: `{"gas":21000,"failed":false,"returnValue":""}`,
 		},
-		// Trace pending block
+		// Invalid call without state overriding
+		{
+			blockNumber: rpc.PendingBlockNumber,
+			call: ethapi.CallArgs{
+				From:  &randomAccounts[0].addr,
+				To:    &randomAccounts[1].addr,
+				Value: (*hexutil.Big)(big.NewInt(1000)),
+			},
+			config:    &TraceCallConfig{},
+			expectErr: core.ErrInsufficientFundsForTransfer,
+		},
+		// Successful simple contract call
+		//
+		// // SPDX-License-Identifier: GPL-3.0
+		//
+		//  pragma solidity >=0.7.0 <0.8.0;
+		//
+		//  /**
+		//   * @title Storage
+		//   * @dev Store & retrieve value in a variable
+		//   */
+		//  contract Storage {
+		//      uint256 public number;
+		//      constructor() {
+		//          number = block.number;
+		//      }
+		//  }
 		{
 			blockNumber: rpc.PendingBlockNumber,
-			config:      nil,
-			expectErr:   nil,
-			expect: []*txTraceResult{
-				{
-					Result: &ethapi.ExecutionResult{
-						Gas:         params.TxGas,
-						Failed:      false,
-						ReturnValue: "",
-						StructLogs:  []ethapi.StructLogRes{},
+			call: ethapi.CallArgs{
+				From: &randomAccounts[0].addr,
+				To:   &randomAccounts[2].addr,
+				Data: newRPCBytes(common.Hex2Bytes("8381f58a")), // call number()
+			},
+			config: &TraceCallConfig{
+				//Tracer: &tracer,
+				StateOverrides: &ethapi.StateOverride{
+					randomAccounts[2].addr: ethapi.OverrideAccount{
+						Code:      newRPCBytes(common.Hex2Bytes("6080604052348015600f57600080fd5b506004361060285760003560e01c80638381f58a14602d575b600080fd5b60336049565b6040518082815260200191505060405180910390f35b6000548156fea2646970667358221220eab35ffa6ab2adfe380772a48b8ba78e82a1b820a18fcb6f59aa4efb20a5f60064736f6c63430007040033")),
+						StateDiff: newStates([]common.Hash{{}}, []common.Hash{common.BigToHash(big.NewInt(123))}),
 					},
 				},
 			},
+			want: `{"gas":23347,"failed":false,"returnValue":"000000000000000000000000000000000000000000000000000000000000007b"}`,
 		},
 	}
-	for _, testspec := range testSuite {
-		result, err := api.TraceBlockByNumber(context.Background(), testspec.blockNumber, testspec.config)
-		if testspec.expectErr != nil {
+	for i, tc := range testSuite {
+		result, err := api.TraceCall(context.Background(), tc.call, rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, tc.config)
+		if tc.expectErr != nil {
 			if err == nil {
-				t.Errorf("Expect error %v, get nothing", testspec.expectErr)
+				t.Errorf("test %d: want error %v, have nothing", i, tc.expectErr)
 				continue
 			}
-			if !reflect.DeepEqual(err, testspec.expectErr) {
-				t.Errorf("Error mismatch, want %v, get %v", testspec.expectErr, err)
-			}
-		} else {
-			if err != nil {
-				t.Errorf("Expect no error, get %v", err)
-				continue
-			}
-			if !reflect.DeepEqual(result, testspec.expect) {
-				t.Errorf("Result mismatch, want %v, get %v", testspec.expect, result)
+			if !errors.Is(err, tc.expectErr) {
+				t.Errorf("test %d: error mismatch, want %v, have %v", i, tc.expectErr, err)
 			}
+			continue
+		}
+		if err != nil {
+			t.Errorf("test %d: want no error, have %v", i, err)
+			continue
+		}
+		// Turn result into res-struct
+		var (
+			have res
+			want res
+		)
+		resBytes, _ := json.Marshal(result)
+		json.Unmarshal(resBytes, &have)
+		json.Unmarshal([]byte(tc.want), &want)
+		if !reflect.DeepEqual(have, want) {
+			t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, string(resBytes), want)
 		}
 	}
 }
@@ -618,11 +564,6 @@ func newRPCBalance(balance *big.Int) **hexutil.Big {
 	return &rpcBalance
 }
 
-func newRPCUint64(number uint64) *hexutil.Uint64 {
-	rpcUint64 := hexutil.Uint64(number)
-	return &rpcUint64
-}
-
 func newRPCBytes(bytes []byte) *hexutil.Bytes {
 	rpcBytes := hexutil.Bytes(bytes)
 	return &rpcBytes

+ 112 - 0
eth/tracers/internal/tracers/call_tracer_js.js

@@ -0,0 +1,112 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+
+// callFrameTracer uses the new call frame tracing methods to report useful information
+// about internal messages of a transaction.
+{
+	callstack: [{}],
+	fault: function(log, db) {},
+	result: function(ctx, db) {
+		// Prepare outer message info
+		var result = {
+			type:    ctx.type,
+			from:    toHex(ctx.from),
+			to:      toHex(ctx.to),
+			value:   '0x' + ctx.value.toString(16),
+			gas:     '0x' + bigInt(ctx.gas).toString(16),
+			gasUsed: '0x' + bigInt(ctx.gasUsed).toString(16),
+			input:   toHex(ctx.input),
+			output:  toHex(ctx.output),
+		}
+		if (this.callstack[0].calls !== undefined) {
+			result.calls = this.callstack[0].calls
+		}
+		if (this.callstack[0].error !== undefined) {
+			result.error = this.callstack[0].error
+		} else if (ctx.error !== undefined) {
+			result.error = ctx.error
+		}
+		if (result.error !== undefined && (result.error !== "execution reverted" || result.output ==="0x")) {
+			delete result.output
+		}
+
+		return this.finalize(result)
+	},
+	enter: function(frame) {
+		var call = {
+			type: frame.getType(),
+			from: toHex(frame.getFrom()),
+			to: toHex(frame.getTo()),
+			input: toHex(frame.getInput()),
+			gas: '0x' + bigInt(frame.getGas()).toString('16'),
+		}
+		if (frame.getValue() !== undefined){
+			call.value='0x' + bigInt(frame.getValue()).toString(16)
+		}
+		this.callstack.push(call)
+	},
+	exit: function(frameResult) {
+		var len = this.callstack.length
+		if (len > 1) {
+			var call = this.callstack.pop()
+			call.gasUsed = '0x' + bigInt(frameResult.getGasUsed()).toString('16')
+			var error = frameResult.getError()
+			if (error === undefined) {
+				call.output = toHex(frameResult.getOutput())
+			} else {
+				call.error = error
+				if (call.type === 'CREATE' || call.type === 'CREATE2') {
+					delete call.to
+				}
+			}
+			len -= 1
+			if (this.callstack[len-1].calls === undefined) {
+				this.callstack[len-1].calls = []
+			}
+			this.callstack[len-1].calls.push(call)
+		}
+	},
+	// finalize recreates a call object using the final desired field oder for json
+	// serialization. This is a nicety feature to pass meaningfully ordered results
+	// to users who don't interpret it, just display it.
+	finalize: function(call) {
+		var sorted = {
+			type:    call.type,
+			from:    call.from,
+			to:      call.to,
+			value:   call.value,
+			gas:     call.gas,
+			gasUsed: call.gasUsed,
+			input:   call.input,
+			output:  call.output,
+			error:   call.error,
+			time:    call.time,
+			calls:   call.calls,
+		}
+		for (var key in sorted) {
+			if (sorted[key] === undefined) {
+				delete sorted[key]
+			}
+		}
+		if (sorted.calls !== undefined) {
+			for (var i=0; i<sorted.calls.length; i++) {
+				sorted.calls[i] = this.finalize(sorted.calls[i])
+			}
+		}
+		return sorted
+	}
+}

+ 394 - 0
eth/tracers/internal/tracetest/calltrace_test.go

@@ -0,0 +1,394 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package tracetest
+
+import (
+	"encoding/json"
+	"io/ioutil"
+	"math/big"
+	"path/filepath"
+	"reflect"
+	"strings"
+	"testing"
+	"unicode"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/common/hexutil"
+	"github.com/ethereum/go-ethereum/common/math"
+	"github.com/ethereum/go-ethereum/core"
+	"github.com/ethereum/go-ethereum/core/rawdb"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/core/vm"
+	"github.com/ethereum/go-ethereum/crypto"
+	"github.com/ethereum/go-ethereum/eth/tracers"
+	"github.com/ethereum/go-ethereum/params"
+	"github.com/ethereum/go-ethereum/rlp"
+	"github.com/ethereum/go-ethereum/tests"
+
+	// Force-load native and js pacakges, to trigger registration
+	_ "github.com/ethereum/go-ethereum/eth/tracers/js"
+	_ "github.com/ethereum/go-ethereum/eth/tracers/native"
+)
+
+// To generate a new callTracer test, copy paste the makeTest method below into
+// a Geth console and call it with a transaction hash you which to export.
+
+/*
+// makeTest generates a callTracer test by running a prestate reassembled and a
+// call trace run, assembling all the gathered information into a test case.
+var makeTest = function(tx, rewind) {
+  // Generate the genesis block from the block, transaction and prestate data
+  var block   = eth.getBlock(eth.getTransaction(tx).blockHash);
+  var genesis = eth.getBlock(block.parentHash);
+
+  delete genesis.gasUsed;
+  delete genesis.logsBloom;
+  delete genesis.parentHash;
+  delete genesis.receiptsRoot;
+  delete genesis.sha3Uncles;
+  delete genesis.size;
+  delete genesis.transactions;
+  delete genesis.transactionsRoot;
+  delete genesis.uncles;
+
+  genesis.gasLimit  = genesis.gasLimit.toString();
+  genesis.number    = genesis.number.toString();
+  genesis.timestamp = genesis.timestamp.toString();
+
+  genesis.alloc = debug.traceTransaction(tx, {tracer: "prestateTracer", rewind: rewind});
+  for (var key in genesis.alloc) {
+    genesis.alloc[key].nonce = genesis.alloc[key].nonce.toString();
+  }
+  genesis.config = admin.nodeInfo.protocols.eth.config;
+
+  // Generate the call trace and produce the test input
+  var result = debug.traceTransaction(tx, {tracer: "callTracer", rewind: rewind});
+  delete result.time;
+
+  console.log(JSON.stringify({
+    genesis: genesis,
+    context: {
+      number:     block.number.toString(),
+      difficulty: block.difficulty,
+      timestamp:  block.timestamp.toString(),
+      gasLimit:   block.gasLimit.toString(),
+      miner:      block.miner,
+    },
+    input:  eth.getRawTransaction(tx),
+    result: result,
+  }, null, 2));
+}
+*/
+
+type callContext struct {
+	Number     math.HexOrDecimal64   `json:"number"`
+	Difficulty *math.HexOrDecimal256 `json:"difficulty"`
+	Time       math.HexOrDecimal64   `json:"timestamp"`
+	GasLimit   math.HexOrDecimal64   `json:"gasLimit"`
+	Miner      common.Address        `json:"miner"`
+}
+
+// callTrace is the result of a callTracer run.
+type callTrace struct {
+	Type    string          `json:"type"`
+	From    common.Address  `json:"from"`
+	To      common.Address  `json:"to"`
+	Input   hexutil.Bytes   `json:"input"`
+	Output  hexutil.Bytes   `json:"output"`
+	Gas     *hexutil.Uint64 `json:"gas,omitempty"`
+	GasUsed *hexutil.Uint64 `json:"gasUsed,omitempty"`
+	Value   *hexutil.Big    `json:"value,omitempty"`
+	Error   string          `json:"error,omitempty"`
+	Calls   []callTrace     `json:"calls,omitempty"`
+}
+
+// callTracerTest defines a single test to check the call tracer against.
+type callTracerTest struct {
+	Genesis *core.Genesis `json:"genesis"`
+	Context *callContext  `json:"context"`
+	Input   string        `json:"input"`
+	Result  *callTrace    `json:"result"`
+}
+
+// Iterates over all the input-output datasets in the tracer test harness and
+// runs the JavaScript tracers against them.
+func TestCallTracerLegacy(t *testing.T) {
+	testCallTracer("callTracerLegacy", "call_tracer_legacy", t)
+}
+
+func TestCallTracerJs(t *testing.T) {
+	testCallTracer("callTracerJs", "call_tracer", t)
+}
+
+func TestCallTracerNative(t *testing.T) {
+	testCallTracer("callTracer", "call_tracer", t)
+}
+
+func testCallTracer(tracerName string, dirPath string, t *testing.T) {
+	files, err := ioutil.ReadDir(filepath.Join("testdata", dirPath))
+	if err != nil {
+		t.Fatalf("failed to retrieve tracer test suite: %v", err)
+	}
+	for _, file := range files {
+		if !strings.HasSuffix(file.Name(), ".json") {
+			continue
+		}
+		file := file // capture range variable
+		t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) {
+			t.Parallel()
+
+			var (
+				test = new(callTracerTest)
+				tx   = new(types.Transaction)
+			)
+			// Call tracer test found, read if from disk
+			if blob, err := ioutil.ReadFile(filepath.Join("testdata", dirPath, file.Name())); err != nil {
+				t.Fatalf("failed to read testcase: %v", err)
+			} else if err := json.Unmarshal(blob, test); err != nil {
+				t.Fatalf("failed to parse testcase: %v", err)
+			}
+			if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil {
+				t.Fatalf("failed to parse testcase input: %v", err)
+			}
+			// Configure a blockchain with the given prestate
+			var (
+				signer    = types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)))
+				origin, _ = signer.Sender(tx)
+				txContext = vm.TxContext{
+					Origin:   origin,
+					GasPrice: tx.GasPrice(),
+				}
+				context = vm.BlockContext{
+					CanTransfer: core.CanTransfer,
+					Transfer:    core.Transfer,
+					Coinbase:    test.Context.Miner,
+					BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)),
+					Time:        new(big.Int).SetUint64(uint64(test.Context.Time)),
+					Difficulty:  (*big.Int)(test.Context.Difficulty),
+					GasLimit:    uint64(test.Context.GasLimit),
+				}
+				_, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
+			)
+			tracer, err := tracers.New(tracerName, new(tracers.Context))
+			if err != nil {
+				t.Fatalf("failed to create call tracer: %v", err)
+			}
+			evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
+			msg, err := tx.AsMessage(signer)
+			if err != nil {
+				t.Fatalf("failed to prepare transaction for tracing: %v", err)
+			}
+			st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
+			if _, err = st.TransitionDb(); err != nil {
+				t.Fatalf("failed to execute transaction: %v", err)
+			}
+			// Retrieve the trace result and compare against the etalon
+			res, err := tracer.GetResult()
+			if err != nil {
+				t.Fatalf("failed to retrieve trace result: %v", err)
+			}
+			ret := new(callTrace)
+			if err := json.Unmarshal(res, ret); err != nil {
+				t.Fatalf("failed to unmarshal trace result: %v", err)
+			}
+
+			if !jsonEqual(ret, test.Result) {
+				// uncomment this for easier debugging
+				//have, _ := json.MarshalIndent(ret, "", " ")
+				//want, _ := json.MarshalIndent(test.Result, "", " ")
+				//t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", string(have), string(want))
+				t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", ret, test.Result)
+			}
+		})
+	}
+}
+
+// jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to
+// comparison
+func jsonEqual(x, y interface{}) bool {
+	xTrace := new(callTrace)
+	yTrace := new(callTrace)
+	if xj, err := json.Marshal(x); err == nil {
+		json.Unmarshal(xj, xTrace)
+	} else {
+		return false
+	}
+	if yj, err := json.Marshal(y); err == nil {
+		json.Unmarshal(yj, yTrace)
+	} else {
+		return false
+	}
+	return reflect.DeepEqual(xTrace, yTrace)
+}
+
+// camel converts a snake cased input string into a camel cased output.
+func camel(str string) string {
+	pieces := strings.Split(str, "_")
+	for i := 1; i < len(pieces); i++ {
+		pieces[i] = string(unicode.ToUpper(rune(pieces[i][0]))) + pieces[i][1:]
+	}
+	return strings.Join(pieces, "")
+}
+func BenchmarkTracers(b *testing.B) {
+	files, err := ioutil.ReadDir(filepath.Join("testdata", "call_tracer"))
+	if err != nil {
+		b.Fatalf("failed to retrieve tracer test suite: %v", err)
+	}
+	for _, file := range files {
+		if !strings.HasSuffix(file.Name(), ".json") {
+			continue
+		}
+		file := file // capture range variable
+		b.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(b *testing.B) {
+			blob, err := ioutil.ReadFile(filepath.Join("testdata", "call_tracer", file.Name()))
+			if err != nil {
+				b.Fatalf("failed to read testcase: %v", err)
+			}
+			test := new(callTracerTest)
+			if err := json.Unmarshal(blob, test); err != nil {
+				b.Fatalf("failed to parse testcase: %v", err)
+			}
+			benchTracer("callTracerNative", test, b)
+		})
+	}
+}
+
+func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
+	// Configure a blockchain with the given prestate
+	tx := new(types.Transaction)
+	if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil {
+		b.Fatalf("failed to parse testcase input: %v", err)
+	}
+	signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)))
+	msg, err := tx.AsMessage(signer)
+	if err != nil {
+		b.Fatalf("failed to prepare transaction for tracing: %v", err)
+	}
+	origin, _ := signer.Sender(tx)
+	txContext := vm.TxContext{
+		Origin:   origin,
+		GasPrice: tx.GasPrice(),
+	}
+	context := vm.BlockContext{
+		CanTransfer: core.CanTransfer,
+		Transfer:    core.Transfer,
+		Coinbase:    test.Context.Miner,
+		BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)),
+		Time:        new(big.Int).SetUint64(uint64(test.Context.Time)),
+		Difficulty:  (*big.Int)(test.Context.Difficulty),
+		GasLimit:    uint64(test.Context.GasLimit),
+	}
+	_, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
+
+	b.ReportAllocs()
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		tracer, err := tracers.New(tracerName, new(tracers.Context))
+		if err != nil {
+			b.Fatalf("failed to create call tracer: %v", err)
+		}
+		evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
+		snap := statedb.Snapshot()
+		st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
+		if _, err = st.TransitionDb(); err != nil {
+			b.Fatalf("failed to execute transaction: %v", err)
+		}
+		if _, err = tracer.GetResult(); err != nil {
+			b.Fatal(err)
+		}
+		statedb.RevertToSnapshot(snap)
+	}
+}
+
+// TestZeroValueToNotExitCall tests the calltracer(s) on the following:
+// Tx to A, A calls B with zero value. B does not already exist.
+// Expected: that enter/exit is invoked and the inner call is shown in the result
+func TestZeroValueToNotExitCall(t *testing.T) {
+	var to = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
+	privkey, err := crypto.HexToECDSA("0000000000000000deadbeef00000000000000000000000000000000deadbeef")
+	if err != nil {
+		t.Fatalf("err %v", err)
+	}
+	signer := types.NewEIP155Signer(big.NewInt(1))
+	tx, err := types.SignNewTx(privkey, signer, &types.LegacyTx{
+		GasPrice: big.NewInt(0),
+		Gas:      50000,
+		To:       &to,
+	})
+	if err != nil {
+		t.Fatalf("err %v", err)
+	}
+	origin, _ := signer.Sender(tx)
+	txContext := vm.TxContext{
+		Origin:   origin,
+		GasPrice: big.NewInt(1),
+	}
+	context := vm.BlockContext{
+		CanTransfer: core.CanTransfer,
+		Transfer:    core.Transfer,
+		Coinbase:    common.Address{},
+		BlockNumber: new(big.Int).SetUint64(8000000),
+		Time:        new(big.Int).SetUint64(5),
+		Difficulty:  big.NewInt(0x30000),
+		GasLimit:    uint64(6000000),
+	}
+	var code = []byte{
+		byte(vm.PUSH1), 0x0, byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1), // in and outs zero
+		byte(vm.DUP1), byte(vm.PUSH1), 0xff, byte(vm.GAS), // value=0,address=0xff, gas=GAS
+		byte(vm.CALL),
+	}
+	var alloc = core.GenesisAlloc{
+		to: core.GenesisAccount{
+			Nonce: 1,
+			Code:  code,
+		},
+		origin: core.GenesisAccount{
+			Nonce:   0,
+			Balance: big.NewInt(500000000000000),
+		},
+	}
+	_, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false)
+	// Create the tracer, the EVM environment and run it
+	tracer, err := tracers.New("callTracer", nil)
+	if err != nil {
+		t.Fatalf("failed to create call tracer: %v", err)
+	}
+	evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Debug: true, Tracer: tracer})
+	msg, err := tx.AsMessage(signer)
+	if err != nil {
+		t.Fatalf("failed to prepare transaction for tracing: %v", err)
+	}
+	st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
+	if _, err = st.TransitionDb(); err != nil {
+		t.Fatalf("failed to execute transaction: %v", err)
+	}
+	// Retrieve the trace result and compare against the etalon
+	res, err := tracer.GetResult()
+	if err != nil {
+		t.Fatalf("failed to retrieve trace result: %v", err)
+	}
+	have := new(callTrace)
+	if err := json.Unmarshal(res, have); err != nil {
+		t.Fatalf("failed to unmarshal trace result: %v", err)
+	}
+	wantStr := `{"type":"CALL","from":"0x682a80a6f560eec50d54e63cbeda1c324c5f8d1b","to":"0x00000000000000000000000000000000deadbeef","value":"0x0","gas":"0x7148","gasUsed":"0x2d0","input":"0x","output":"0x","calls":[{"type":"CALL","from":"0x00000000000000000000000000000000deadbeef","to":"0x00000000000000000000000000000000000000ff","value":"0x0","gas":"0x6cbf","gasUsed":"0x0","input":"0x","output":"0x"}]}`
+	want := new(callTrace)
+	json.Unmarshal([]byte(wantStr), want)
+	if !jsonEqual(have, want) {
+		t.Error("have != want")
+	}
+}

+ 0 - 0
eth/tracers/testdata/call_tracer_create.json → eth/tracers/internal/tracetest/testdata/call_tracer/create.json


+ 0 - 0
eth/tracers/testdata/call_tracer_deep_calls.json → eth/tracers/internal/tracetest/testdata/call_tracer/deep_calls.json


+ 0 - 0
eth/tracers/testdata/call_tracer_delegatecall.json → eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json


Datei-Diff unterdrückt, da er zu groß ist
+ 12 - 0
eth/tracers/internal/tracetest/testdata/call_tracer/inner_create_oog_outer_throw.json


+ 63 - 0
eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json

@@ -0,0 +1,63 @@
+{
+  "genesis": {
+    "difficulty": "117067574",
+    "extraData": "0xd783010502846765746887676f312e372e33856c696e7578",
+    "gasLimit": "4712380",
+    "hash": "0xe05db05eeb3f288041ecb10a787df121c0ed69499355716e17c307de313a4486",
+    "miner": "0x0c062b329265c965deef1eede55183b3acb8f611",
+    "mixHash": "0xb669ae39118a53d2c65fd3b1e1d3850dd3f8c6842030698ed846a2762d68b61d",
+    "nonce": "0x2b469722b8e28c45",
+    "number": "24973",
+    "stateRoot": "0x532a5c3f75453a696428db078e32ae283c85cb97e4d8560dbdf022adac6df369",
+    "timestamp": "1479891145",
+    "totalDifficulty": "1892250259406",
+    "alloc": {
+      "0x6c06b16512b332e6cd8293a2974872674716ce18": {
+        "balance": "0x0",
+        "nonce": "1",
+        "code": "0x60606040526000357c0100000000000000000000000000000000000000000000000000000000900480632e1a7d4d146036575b6000565b34600057604e60048080359060200190919050506050565b005b3373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051809050600060405180830381858888f19350505050505b5056",
+        "storage": {}
+      },
+      "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31": {
+        "balance": "0x229ebbb36c3e0f20",
+        "nonce": "3",
+        "code": "0x",
+        "storage": {}
+      }
+    },
+    "config": {
+      "chainId": 3,
+      "homesteadBlock": 0,
+      "daoForkSupport": true,
+      "eip150Block": 0,
+      "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
+      "eip155Block": 10,
+      "eip158Block": 10,
+      "byzantiumBlock": 1700000,
+      "constantinopleBlock": 4230000,
+      "petersburgBlock": 4939394,
+      "istanbulBlock": 6485846,
+      "muirGlacierBlock": 7117117,
+      "ethash": {}
+    }
+  },
+  "context": {
+    "number": "24974",
+    "difficulty": "117067574",
+    "timestamp": "1479891162",
+    "gasLimit": "4712388",
+    "miner": "0xc822ef32e6d26e170b70cf761e204c1806265914"
+  },
+  "input": "0xf889038504a81557008301f97e946c06b16512b332e6cd8293a2974872674716ce1880a42e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b1600002aa0e2a6558040c5d72bc59f2fb62a38993a314c849cd22fb393018d2c5af3112095a01bdb6d7ba32263ccc2ecc880d38c49d9f0c5a72d8b7908e3122b31356d349745",
+  "result": {
+    "type": "CALL",
+    "from": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31",
+    "to": "0x6c06b16512b332e6cd8293a2974872674716ce18",
+    "value": "0x0",
+    "gas": "0x1a466",
+    "gasUsed": "0x1dc6",
+    "input": "0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000",
+    "output": "0x",
+    "calls": []
+  }
+}

+ 0 - 0
eth/tracers/testdata/call_tracer_inner_throw_outer_revert.json → eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json


+ 0 - 0
eth/tracers/testdata/call_tracer_oog.json → eth/tracers/internal/tracetest/testdata/call_tracer/oog.json


+ 0 - 0
eth/tracers/testdata/call_tracer_revert.json → eth/tracers/internal/tracetest/testdata/call_tracer/revert.json


+ 0 - 0
eth/tracers/testdata/call_tracer_revert_reason.json → eth/tracers/internal/tracetest/testdata/call_tracer/revert_reason.json


+ 75 - 0
eth/tracers/internal/tracetest/testdata/call_tracer/selfdestruct.json

@@ -0,0 +1,75 @@
+{
+  "context": {
+    "difficulty": "3502894804",
+    "gasLimit": "4722976",
+    "miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724",
+    "number": "2289806",
+    "timestamp": "1513601314"
+  },
+  "genesis": {
+    "alloc": {
+      "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": {
+        "balance": "0x0",
+        "code": "0x",
+        "nonce": "22",
+        "storage": {}
+      },
+      "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": {
+        "balance": "0x4d87094125a369d9bd5",
+        "code": "0x61deadff",
+        "nonce": "1",
+        "storage": {}
+      },
+      "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": {
+        "balance": "0x1780d77678137ac1b775",
+        "code": "0x",
+        "nonce": "29072",
+        "storage": {}
+      }
+    },
+    "config": {
+      "byzantiumBlock": 1700000,
+      "chainId": 3,
+      "daoForkSupport": true,
+      "eip150Block": 0,
+      "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
+      "eip155Block": 10,
+      "eip158Block": 10,
+      "ethash": {},
+      "homesteadBlock": 0
+    },
+    "difficulty": "3509749784",
+    "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444",
+    "gasLimit": "4727564",
+    "hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440",
+    "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3",
+    "mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada",
+    "nonce": "0x4eb12e19c16d43da",
+    "number": "2289805",
+    "stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f",
+    "timestamp": "1513601261",
+    "totalDifficulty": "7143276353481064"
+  },
+  "input": "0xf88b8271908506fc23ac0083015f90943b873a919aa0512d5a0f09e6dcceaa4a6727fafe80a463e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c52aa0bdce0b59e8761854e857fe64015f06dd08a4fbb7624f6094893a79a72e6ad6bea01d9dde033cff7bb235a3163f348a6d7ab8d6b52bc0963a95b91612e40ca766a4",
+  "result": {
+    "calls": [
+      {
+        "from": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe",
+        "gas": "0x0",
+        "gasUsed": "0x0",
+        "input": "0x",
+        "to": "0x000000000000000000000000000000000000dEaD",
+        "type": "SELFDESTRUCT",
+        "value": "0x4d87094125a369d9bd5"
+      }
+    ],
+    "from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb",
+    "gas": "0x10738",
+    "gasUsed": "0x7533",
+    "input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5",
+    "output": "0x",
+    "to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe",
+    "type": "CALL",
+    "value": "0x0"
+  }
+}

Datei-Diff unterdrückt, da er zu groß ist
+ 18 - 0
eth/tracers/internal/tracetest/testdata/call_tracer/simple.json


+ 0 - 0
eth/tracers/testdata/call_tracer_throw.json → eth/tracers/internal/tracetest/testdata/call_tracer/throw.json


Datei-Diff unterdrückt, da er zu groß ist
+ 46 - 0
eth/tracers/internal/tracetest/testdata/call_tracer_legacy/create.json


Datei-Diff unterdrückt, da er zu groß ist
+ 12 - 0
eth/tracers/internal/tracetest/testdata/call_tracer_legacy/deep_calls.json


Datei-Diff unterdrückt, da er zu groß ist
+ 12 - 0
eth/tracers/internal/tracetest/testdata/call_tracer_legacy/delegatecall.json


+ 0 - 0
eth/tracers/testdata/call_tracer_inner_create_oog_outer_throw.json → eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_create_oog_outer_throw.json


+ 0 - 0
eth/tracers/testdata/call_tracer_inner_instafail.json → eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_instafail.json


Datei-Diff unterdrückt, da er zu groß ist
+ 12 - 0
eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_throw_outer_revert.json


Datei-Diff unterdrückt, da er zu groß ist
+ 12 - 0
eth/tracers/internal/tracetest/testdata/call_tracer_legacy/oog.json


Datei-Diff unterdrückt, da er zu groß ist
+ 18 - 0
eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert.json


Einige Dateien werden nicht angezeigt, da zu viele Dateien in diesem Diff geändert wurden.