Selaa lähdekoodia

Merge pull request #1889 from karalabe/fast-sync-rebase

eth/63 fast synchronization algorithm
Jeffrey Wilcke 10 vuotta sitten
vanhempi
commit
0467a6ceec
53 muutettua tiedostoa jossa 4570 lisäystä ja 1457 poistoa
  1. 1 2
      cmd/geth/main.go
  2. 5 18
      cmd/utils/flags.go
  3. 1 1
      core/bench_test.go
  4. 34 10
      core/block_processor.go
  5. 10 10
      core/block_processor_test.go
  6. 544 96
      core/blockchain.go
  7. 531 242
      core/blockchain_test.go
  8. 47 21
      core/chain_makers.go
  9. 1 1
      core/chain_makers_test.go
  10. 3 3
      core/chain_pow_test.go
  11. 24 2
      core/chain_util.go
  12. 36 17
      core/chain_util_test.go
  13. 1 1
      core/error.go
  14. 1 1
      core/genesis.go
  15. 70 0
      core/state/sync.go
  16. 238 0
      core/state/sync_test.go
  17. 9 9
      core/transaction_util.go
  18. 14 17
      core/types/block.go
  19. 1 1
      core/types/bloom9.go
  20. 2 0
      core/types/common.go
  21. 75 49
      core/types/receipt.go
  22. 28 23
      core/vm/log.go
  23. 5 4
      eth/backend.go
  24. 5 5
      eth/backend_test.go
  25. 342 244
      eth/downloader/downloader.go
  26. 622 235
      eth/downloader/downloader_test.go
  27. 10 0
      eth/downloader/metrics.go
  28. 26 0
      eth/downloader/modes.go
  29. 191 70
      eth/downloader/peer.go
  30. 636 198
      eth/downloader/queue.go
  31. 140 0
      eth/downloader/types.go
  32. 20 6
      eth/fetcher/fetcher.go
  33. 40 11
      eth/fetcher/fetcher_test.go
  34. 1 1
      eth/filters/filter.go
  35. 16 17
      eth/filters/filter_test.go
  36. 99 71
      eth/handler.go
  37. 44 20
      eth/handler_test.go
  38. 18 3
      eth/helper_test.go
  39. 1 1
      eth/metrics.go
  40. 4 4
      eth/peer.go
  41. 2 8
      eth/protocol.go
  42. 3 6
      eth/protocol_test.go
  43. 19 2
      eth/sync.go
  44. 53 0
      eth/sync_test.go
  45. 44 13
      ethdb/memory_database.go
  46. 1 1
      miner/worker.go
  47. 1 7
      rpc/api/debug.go
  48. 1 3
      rpc/api/eth.go
  49. 1 1
      rpc/api/eth_args.go
  50. 2 2
      rpc/api/parsing.go
  51. 285 0
      trie/sync.go
  52. 257 0
      trie/sync_test.go
  53. 5 0
      trie/trie.go

+ 1 - 2
cmd/geth/main.go

@@ -304,7 +304,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
 		utils.DataDirFlag,
 		utils.DataDirFlag,
 		utils.BlockchainVersionFlag,
 		utils.BlockchainVersionFlag,
 		utils.OlympicFlag,
 		utils.OlympicFlag,
-		utils.EthVersionFlag,
+		utils.FastSyncFlag,
 		utils.CacheFlag,
 		utils.CacheFlag,
 		utils.JSpathFlag,
 		utils.JSpathFlag,
 		utils.ListenPortFlag,
 		utils.ListenPortFlag,
@@ -360,7 +360,6 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
 		utils.SetupLogger(ctx)
 		utils.SetupLogger(ctx)
 		utils.SetupNetwork(ctx)
 		utils.SetupNetwork(ctx)
 		utils.SetupVM(ctx)
 		utils.SetupVM(ctx)
-		utils.SetupEth(ctx)
 		if ctx.GlobalBool(utils.PProfEanbledFlag.Name) {
 		if ctx.GlobalBool(utils.PProfEanbledFlag.Name) {
 			utils.StartPProf(ctx)
 			utils.StartPProf(ctx)
 		}
 		}

+ 5 - 18
cmd/utils/flags.go

@@ -148,10 +148,9 @@ var (
 		Name:  "olympic",
 		Name:  "olympic",
 		Usage: "Use olympic style protocol",
 		Usage: "Use olympic style protocol",
 	}
 	}
-	EthVersionFlag = cli.IntFlag{
-		Name:  "eth",
-		Value: 62,
-		Usage: "Highest eth protocol to advertise (temporary, dev option)",
+	FastSyncFlag = cli.BoolFlag{
+		Name:  "fast",
+		Usage: "Enables fast syncing through state downloads",
 	}
 	}
 
 
 	// miner settings
 	// miner settings
@@ -425,12 +424,13 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config {
 	if err != nil {
 	if err != nil {
 		glog.V(logger.Error).Infoln("WARNING: No etherbase set and no accounts found as default")
 		glog.V(logger.Error).Infoln("WARNING: No etherbase set and no accounts found as default")
 	}
 	}
-
+	// Assemble the entire eth configuration and return
 	cfg := &eth.Config{
 	cfg := &eth.Config{
 		Name:                    common.MakeName(clientID, version),
 		Name:                    common.MakeName(clientID, version),
 		DataDir:                 MustDataDir(ctx),
 		DataDir:                 MustDataDir(ctx),
 		GenesisNonce:            ctx.GlobalInt(GenesisNonceFlag.Name),
 		GenesisNonce:            ctx.GlobalInt(GenesisNonceFlag.Name),
 		GenesisFile:             ctx.GlobalString(GenesisFileFlag.Name),
 		GenesisFile:             ctx.GlobalString(GenesisFileFlag.Name),
+		FastSync:                ctx.GlobalBool(FastSyncFlag.Name),
 		BlockChainVersion:       ctx.GlobalInt(BlockchainVersionFlag.Name),
 		BlockChainVersion:       ctx.GlobalInt(BlockchainVersionFlag.Name),
 		DatabaseCache:           ctx.GlobalInt(CacheFlag.Name),
 		DatabaseCache:           ctx.GlobalInt(CacheFlag.Name),
 		SkipBcVersionCheck:      false,
 		SkipBcVersionCheck:      false,
@@ -499,7 +499,6 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config {
 
 
 		glog.V(logger.Info).Infoln("dev mode enabled")
 		glog.V(logger.Info).Infoln("dev mode enabled")
 	}
 	}
-
 	return cfg
 	return cfg
 }
 }
 
 
@@ -532,18 +531,6 @@ func SetupVM(ctx *cli.Context) {
 	vm.SetJITCacheSize(ctx.GlobalInt(VMJitCacheFlag.Name))
 	vm.SetJITCacheSize(ctx.GlobalInt(VMJitCacheFlag.Name))
 }
 }
 
 
-// SetupEth configures the eth packages global settings
-func SetupEth(ctx *cli.Context) {
-	version := ctx.GlobalInt(EthVersionFlag.Name)
-	for len(eth.ProtocolVersions) > 0 && eth.ProtocolVersions[0] > uint(version) {
-		eth.ProtocolVersions = eth.ProtocolVersions[1:]
-		eth.ProtocolLengths = eth.ProtocolLengths[1:]
-	}
-	if len(eth.ProtocolVersions) == 0 {
-		Fatalf("No valid eth protocols remaining")
-	}
-}
-
 // MakeChain creates a chain manager from set command line flags.
 // MakeChain creates a chain manager from set command line flags.
 func MakeChain(ctx *cli.Context) (chain *core.BlockChain, chainDb ethdb.Database) {
 func MakeChain(ctx *cli.Context) (chain *core.BlockChain, chainDb ethdb.Database) {
 	datadir := MustDataDir(ctx)
 	datadir := MustDataDir(ctx)

+ 1 - 1
core/bench_test.go

@@ -163,7 +163,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
 	// Generate a chain of b.N blocks using the supplied block
 	// Generate a chain of b.N blocks using the supplied block
 	// generator function.
 	// generator function.
 	genesis := WriteGenesisBlockForTesting(db, GenesisAccount{benchRootAddr, benchRootFunds})
 	genesis := WriteGenesisBlockForTesting(db, GenesisAccount{benchRootAddr, benchRootFunds})
-	chain := GenerateChain(genesis, db, b.N, gen)
+	chain, _ := GenerateChain(genesis, db, b.N, gen)
 
 
 	// Time the insertion of the new chain.
 	// Time the insertion of the new chain.
 	// State and blocks are stored in the same DB.
 	// State and blocks are stored in the same DB.

+ 34 - 10
core/block_processor.go

@@ -128,7 +128,7 @@ func (self *BlockProcessor) ApplyTransaction(gp *GasPool, statedb *state.StateDB
 	}
 	}
 
 
 	logs := statedb.GetLogs(tx.Hash())
 	logs := statedb.GetLogs(tx.Hash())
-	receipt.SetLogs(logs)
+	receipt.Logs = logs
 	receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
 	receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
 
 
 	glog.V(logger.Debug).Infoln(receipt)
 	glog.V(logger.Debug).Infoln(receipt)
@@ -212,14 +212,16 @@ func (sm *BlockProcessor) Process(block *types.Block) (logs vm.Logs, receipts ty
 	defer sm.mutex.Unlock()
 	defer sm.mutex.Unlock()
 
 
 	if sm.bc.HasBlock(block.Hash()) {
 	if sm.bc.HasBlock(block.Hash()) {
-		return nil, nil, &KnownBlockError{block.Number(), block.Hash()}
+		if _, err := state.New(block.Root(), sm.chainDb); err == nil {
+			return nil, nil, &KnownBlockError{block.Number(), block.Hash()}
+		}
 	}
 	}
-
-	if !sm.bc.HasBlock(block.ParentHash()) {
-		return nil, nil, ParentError(block.ParentHash())
+	if parent := sm.bc.GetBlock(block.ParentHash()); parent != nil {
+		if _, err := state.New(parent.Root(), sm.chainDb); err == nil {
+			return sm.processWithParent(block, parent)
+		}
 	}
 	}
-	parent := sm.bc.GetBlock(block.ParentHash())
-	return sm.processWithParent(block, parent)
+	return nil, nil, ParentError(block.ParentHash())
 }
 }
 
 
 func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs vm.Logs, receipts types.Receipts, err error) {
 func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs vm.Logs, receipts types.Receipts, err error) {
@@ -381,18 +383,40 @@ func (sm *BlockProcessor) GetLogs(block *types.Block) (logs vm.Logs, err error)
 	receipts := GetBlockReceipts(sm.chainDb, block.Hash())
 	receipts := GetBlockReceipts(sm.chainDb, block.Hash())
 	// coalesce logs
 	// coalesce logs
 	for _, receipt := range receipts {
 	for _, receipt := range receipts {
-		logs = append(logs, receipt.Logs()...)
+		logs = append(logs, receipt.Logs...)
 	}
 	}
 	return logs, nil
 	return logs, nil
 }
 }
 
 
+// ValidateHeader verifies the validity of a header, relying on the database and
+// POW behind the block processor.
+func (sm *BlockProcessor) ValidateHeader(header *types.Header, checkPow, uncle bool) error {
+	// Short circuit if the header's already known or its parent missing
+	if sm.bc.HasHeader(header.Hash()) {
+		return nil
+	}
+	if parent := sm.bc.GetHeader(header.ParentHash); parent == nil {
+		return ParentError(header.ParentHash)
+	} else {
+		return ValidateHeader(sm.Pow, header, parent, checkPow, uncle)
+	}
+}
+
+// ValidateHeaderWithParent verifies the validity of a header, relying on the database and
+// POW behind the block processor.
+func (sm *BlockProcessor) ValidateHeaderWithParent(header, parent *types.Header, checkPow, uncle bool) error {
+	if sm.bc.HasHeader(header.Hash()) {
+		return nil
+	}
+	return ValidateHeader(sm.Pow, header, parent, checkPow, uncle)
+}
+
 // See YP section 4.3.4. "Block Header Validity"
 // See YP section 4.3.4. "Block Header Validity"
 // Validates a header. Returns an error if the header is invalid.
 // Validates a header. Returns an error if the header is invalid.
 func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, checkPow, uncle bool) error {
 func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, checkPow, uncle bool) error {
 	if big.NewInt(int64(len(header.Extra))).Cmp(params.MaximumExtraDataSize) == 1 {
 	if big.NewInt(int64(len(header.Extra))).Cmp(params.MaximumExtraDataSize) == 1 {
 		return fmt.Errorf("Header extra data too long (%d)", len(header.Extra))
 		return fmt.Errorf("Header extra data too long (%d)", len(header.Extra))
 	}
 	}
-
 	if uncle {
 	if uncle {
 		if header.Time.Cmp(common.MaxBig) == 1 {
 		if header.Time.Cmp(common.MaxBig) == 1 {
 			return BlockTSTooBigErr
 			return BlockTSTooBigErr
@@ -429,7 +453,7 @@ func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, che
 	if checkPow {
 	if checkPow {
 		// Verify the nonce of the header. Return an error if it's not valid
 		// Verify the nonce of the header. Return an error if it's not valid
 		if !pow.Verify(types.NewBlockWithHeader(header)) {
 		if !pow.Verify(types.NewBlockWithHeader(header)) {
-			return ValidationError("Header's nonce is invalid (= %x)", header.Nonce)
+			return &BlockNonceErr{Hash: header.Hash(), Number: header.Number, Nonce: header.Nonce.Uint64()}
 		}
 		}
 	}
 	}
 	return nil
 	return nil

+ 10 - 10
core/block_processor_test.go

@@ -70,16 +70,16 @@ func TestPutReceipt(t *testing.T) {
 	hash[0] = 2
 	hash[0] = 2
 
 
 	receipt := new(types.Receipt)
 	receipt := new(types.Receipt)
-	receipt.SetLogs(vm.Logs{&vm.Log{
-		Address:   addr,
-		Topics:    []common.Hash{hash},
-		Data:      []byte("hi"),
-		Number:    42,
-		TxHash:    hash,
-		TxIndex:   0,
-		BlockHash: hash,
-		Index:     0,
-	}})
+	receipt.Logs = vm.Logs{&vm.Log{
+		Address:     addr,
+		Topics:      []common.Hash{hash},
+		Data:        []byte("hi"),
+		BlockNumber: 42,
+		TxHash:      hash,
+		TxIndex:     0,
+		BlockHash:   hash,
+		Index:       0,
+	}}
 
 
 	PutReceipts(db, types.Receipts{receipt})
 	PutReceipts(db, types.Receipts{receipt})
 	receipt = GetReceipt(db, common.Hash{})
 	receipt = GetReceipt(db, common.Hash{})

+ 544 - 96
core/blockchain.go

@@ -18,10 +18,14 @@
 package core
 package core
 
 
 import (
 import (
+	crand "crypto/rand"
 	"errors"
 	"errors"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
+	"math"
 	"math/big"
 	"math/big"
+	mrand "math/rand"
+	"runtime"
 	"sync"
 	"sync"
 	"sync/atomic"
 	"sync/atomic"
 	"time"
 	"time"
@@ -29,6 +33,7 @@ import (
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/state"
 	"github.com/ethereum/go-ethereum/core/state"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/ethdb"
 	"github.com/ethereum/go-ethereum/event"
 	"github.com/ethereum/go-ethereum/event"
 	"github.com/ethereum/go-ethereum/logger"
 	"github.com/ethereum/go-ethereum/logger"
@@ -36,6 +41,7 @@ import (
 	"github.com/ethereum/go-ethereum/metrics"
 	"github.com/ethereum/go-ethereum/metrics"
 	"github.com/ethereum/go-ethereum/pow"
 	"github.com/ethereum/go-ethereum/pow"
 	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/ethereum/go-ethereum/rlp"
+	"github.com/ethereum/go-ethereum/trie"
 	"github.com/hashicorp/golang-lru"
 	"github.com/hashicorp/golang-lru"
 )
 )
 
 
@@ -67,9 +73,10 @@ type BlockChain struct {
 	chainmu sync.RWMutex
 	chainmu sync.RWMutex
 	tsmu    sync.RWMutex
 	tsmu    sync.RWMutex
 
 
-	td              *big.Int
-	currentBlock    *types.Block
-	currentGasLimit *big.Int
+	checkpoint       int           // checkpoint counts towards the new checkpoint
+	currentHeader    *types.Header // Current head of the header chain (may be above the block chain!)
+	currentBlock     *types.Block  // Current head of the block chain
+	currentFastBlock *types.Block  // Current head of the fast-sync chain (may be above the block chain!)
 
 
 	headerCache  *lru.Cache // Cache for the most recent block headers
 	headerCache  *lru.Cache // Cache for the most recent block headers
 	bodyCache    *lru.Cache // Cache for the most recent block bodies
 	bodyCache    *lru.Cache // Cache for the most recent block bodies
@@ -84,7 +91,8 @@ type BlockChain struct {
 	procInterrupt int32 // interrupt signaler for block processing
 	procInterrupt int32 // interrupt signaler for block processing
 	wg            sync.WaitGroup
 	wg            sync.WaitGroup
 
 
-	pow pow.PoW
+	pow  pow.PoW
+	rand *mrand.Rand
 }
 }
 
 
 func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*BlockChain, error) {
 func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*BlockChain, error) {
@@ -107,6 +115,12 @@ func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*Bl
 		futureBlocks: futureBlocks,
 		futureBlocks: futureBlocks,
 		pow:          pow,
 		pow:          pow,
 	}
 	}
+	// Seed a fast but crypto originating random generator
+	seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
+	if err != nil {
+		return nil, err
+	}
+	bc.rand = mrand.New(mrand.NewSource(seed.Int64()))
 
 
 	bc.genesisBlock = bc.GetBlockByNumber(0)
 	bc.genesisBlock = bc.GetBlockByNumber(0)
 	if bc.genesisBlock == nil {
 	if bc.genesisBlock == nil {
@@ -120,20 +134,15 @@ func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*Bl
 		}
 		}
 		glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
 		glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
 	}
 	}
-	if err := bc.setLastState(); err != nil {
+	if err := bc.loadLastState(); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
 	// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
 	for hash, _ := range BadHashes {
 	for hash, _ := range BadHashes {
-		if block := bc.GetBlock(hash); block != nil {
-			glog.V(logger.Error).Infof("Found bad hash. Reorganising chain to state %x\n", block.ParentHash().Bytes()[:4])
-			block = bc.GetBlock(block.ParentHash())
-			if block == nil {
-				glog.Fatal("Unable to complete. Parent block not found. Corrupted DB?")
-			}
-			bc.SetHead(block)
-
-			glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation")
+		if header := bc.GetHeader(hash); header != nil {
+			glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4])
+			bc.SetHead(header.Number.Uint64() - 1)
+			glog.V(logger.Error).Infoln("Chain rewind was successful, resuming normal operation")
 		}
 		}
 	}
 	}
 	// Take ownership of this particular state
 	// Take ownership of this particular state
@@ -141,30 +150,146 @@ func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*Bl
 	return bc, nil
 	return bc, nil
 }
 }
 
 
-func (bc *BlockChain) SetHead(head *types.Block) {
+// loadLastState loads the last known chain state from the database. This method
+// assumes that the chain manager mutex is held.
+func (self *BlockChain) loadLastState() error {
+	// Restore the last known head block
+	head := GetHeadBlockHash(self.chainDb)
+	if head == (common.Hash{}) {
+		// Corrupt or empty database, init from scratch
+		self.Reset()
+	} else {
+		if block := self.GetBlock(head); block != nil {
+			// Block found, set as the current head
+			self.currentBlock = block
+		} else {
+			// Corrupt or empty database, init from scratch
+			self.Reset()
+		}
+	}
+	// Restore the last known head header
+	self.currentHeader = self.currentBlock.Header()
+	if head := GetHeadHeaderHash(self.chainDb); head != (common.Hash{}) {
+		if header := self.GetHeader(head); header != nil {
+			self.currentHeader = header
+		}
+	}
+	// Restore the last known head fast block
+	self.currentFastBlock = self.currentBlock
+	if head := GetHeadFastBlockHash(self.chainDb); head != (common.Hash{}) {
+		if block := self.GetBlock(head); block != nil {
+			self.currentFastBlock = block
+		}
+	}
+	// Issue a status log and return
+	headerTd := self.GetTd(self.currentHeader.Hash())
+	blockTd := self.GetTd(self.currentBlock.Hash())
+	fastTd := self.GetTd(self.currentFastBlock.Hash())
+
+	glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", self.currentHeader.Number, self.currentHeader.Hash().Bytes()[:4], headerTd)
+	glog.V(logger.Info).Infof("Last block: #%d [%x…] TD=%v", self.currentBlock.Number(), self.currentBlock.Hash().Bytes()[:4], blockTd)
+	glog.V(logger.Info).Infof("Fast block: #%d [%x…] TD=%v", self.currentFastBlock.Number(), self.currentFastBlock.Hash().Bytes()[:4], fastTd)
+
+	return nil
+}
+
+// SetHead rewinds the local chain to a new head. In the case of headers, everything
+// above the new head will be deleted and the new one set. In the case of blocks
+// though, the head may be further rewound if block bodies are missing (non-archive
+// nodes after a fast sync).
+func (bc *BlockChain) SetHead(head uint64) {
 	bc.mu.Lock()
 	bc.mu.Lock()
 	defer bc.mu.Unlock()
 	defer bc.mu.Unlock()
 
 
-	for block := bc.currentBlock; block != nil && block.Hash() != head.Hash(); block = bc.GetBlock(block.ParentHash()) {
-		DeleteBlock(bc.chainDb, block.Hash())
+	// Figure out the highest known canonical headers and/or blocks
+	height := uint64(0)
+	if bc.currentHeader != nil {
+		if hh := bc.currentHeader.Number.Uint64(); hh > height {
+			height = hh
+		}
+	}
+	if bc.currentBlock != nil {
+		if bh := bc.currentBlock.NumberU64(); bh > height {
+			height = bh
+		}
+	}
+	if bc.currentFastBlock != nil {
+		if fbh := bc.currentFastBlock.NumberU64(); fbh > height {
+			height = fbh
+		}
+	}
+	// Gather all the hashes that need deletion
+	drop := make(map[common.Hash]struct{})
+
+	for bc.currentHeader != nil && bc.currentHeader.Number.Uint64() > head {
+		drop[bc.currentHeader.Hash()] = struct{}{}
+		bc.currentHeader = bc.GetHeader(bc.currentHeader.ParentHash)
+	}
+	for bc.currentBlock != nil && bc.currentBlock.NumberU64() > head {
+		drop[bc.currentBlock.Hash()] = struct{}{}
+		bc.currentBlock = bc.GetBlock(bc.currentBlock.ParentHash())
+	}
+	for bc.currentFastBlock != nil && bc.currentFastBlock.NumberU64() > head {
+		drop[bc.currentFastBlock.Hash()] = struct{}{}
+		bc.currentFastBlock = bc.GetBlock(bc.currentFastBlock.ParentHash())
+	}
+	// Roll back the canonical chain numbering
+	for i := height; i > head; i-- {
+		DeleteCanonicalHash(bc.chainDb, i)
 	}
 	}
+	// Delete everything found by the above rewind
+	for hash, _ := range drop {
+		DeleteHeader(bc.chainDb, hash)
+		DeleteBody(bc.chainDb, hash)
+		DeleteTd(bc.chainDb, hash)
+	}
+	// Clear out any stale content from the caches
 	bc.headerCache.Purge()
 	bc.headerCache.Purge()
 	bc.bodyCache.Purge()
 	bc.bodyCache.Purge()
 	bc.bodyRLPCache.Purge()
 	bc.bodyRLPCache.Purge()
 	bc.blockCache.Purge()
 	bc.blockCache.Purge()
 	bc.futureBlocks.Purge()
 	bc.futureBlocks.Purge()
 
 
-	bc.currentBlock = head
-	bc.setTotalDifficulty(bc.GetTd(head.Hash()))
-	bc.insert(head)
-	bc.setLastState()
+	// Update all computed fields to the new head
+	if bc.currentBlock == nil {
+		bc.currentBlock = bc.genesisBlock
+	}
+	if bc.currentHeader == nil {
+		bc.currentHeader = bc.genesisBlock.Header()
+	}
+	if bc.currentFastBlock == nil {
+		bc.currentFastBlock = bc.genesisBlock
+	}
+	if err := WriteHeadBlockHash(bc.chainDb, bc.currentBlock.Hash()); err != nil {
+		glog.Fatalf("failed to reset head block hash: %v", err)
+	}
+	if err := WriteHeadHeaderHash(bc.chainDb, bc.currentHeader.Hash()); err != nil {
+		glog.Fatalf("failed to reset head header hash: %v", err)
+	}
+	if err := WriteHeadFastBlockHash(bc.chainDb, bc.currentFastBlock.Hash()); err != nil {
+		glog.Fatalf("failed to reset head fast block hash: %v", err)
+	}
+	bc.loadLastState()
 }
 }
 
 
-func (self *BlockChain) Td() *big.Int {
-	self.mu.RLock()
-	defer self.mu.RUnlock()
+// FastSyncCommitHead sets the current head block to the one defined by the hash
+// irrelevant what the chain contents were prior.
+func (self *BlockChain) FastSyncCommitHead(hash common.Hash) error {
+	// Make sure that both the block as well at its state trie exists
+	block := self.GetBlock(hash)
+	if block == nil {
+		return fmt.Errorf("non existent block [%x…]", hash[:4])
+	}
+	if _, err := trie.NewSecure(block.Root(), self.chainDb); err != nil {
+		return err
+	}
+	// If all checks out, manually set the head block
+	self.mu.Lock()
+	self.currentBlock = block
+	self.mu.Unlock()
 
 
-	return new(big.Int).Set(self.td)
+	glog.V(logger.Info).Infof("committed block #%d [%x…] as new head", block.Number(), hash[:4])
+	return nil
 }
 }
 
 
 func (self *BlockChain) GasLimit() *big.Int {
 func (self *BlockChain) GasLimit() *big.Int {
@@ -181,6 +306,17 @@ func (self *BlockChain) LastBlockHash() common.Hash {
 	return self.currentBlock.Hash()
 	return self.currentBlock.Hash()
 }
 }
 
 
+// CurrentHeader retrieves the current head header of the canonical chain. The
+// header is retrieved from the blockchain's internal cache.
+func (self *BlockChain) CurrentHeader() *types.Header {
+	self.mu.RLock()
+	defer self.mu.RUnlock()
+
+	return self.currentHeader
+}
+
+// CurrentBlock retrieves the current head block of the canonical chain. The
+// block is retrieved from the blockchain's internal cache.
 func (self *BlockChain) CurrentBlock() *types.Block {
 func (self *BlockChain) CurrentBlock() *types.Block {
 	self.mu.RLock()
 	self.mu.RLock()
 	defer self.mu.RUnlock()
 	defer self.mu.RUnlock()
@@ -188,11 +324,20 @@ func (self *BlockChain) CurrentBlock() *types.Block {
 	return self.currentBlock
 	return self.currentBlock
 }
 }
 
 
+// CurrentFastBlock retrieves the current fast-sync head block of the canonical
+// chain. The block is retrieved from the blockchain's internal cache.
+func (self *BlockChain) CurrentFastBlock() *types.Block {
+	self.mu.RLock()
+	defer self.mu.RUnlock()
+
+	return self.currentFastBlock
+}
+
 func (self *BlockChain) Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash) {
 func (self *BlockChain) Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash) {
 	self.mu.RLock()
 	self.mu.RLock()
 	defer self.mu.RUnlock()
 	defer self.mu.RUnlock()
 
 
-	return new(big.Int).Set(self.td), self.currentBlock.Hash(), self.genesisBlock.Hash()
+	return self.GetTd(self.currentBlock.Hash()), self.currentBlock.Hash(), self.genesisBlock.Hash()
 }
 }
 
 
 func (self *BlockChain) SetProcessor(proc types.BlockProcessor) {
 func (self *BlockChain) SetProcessor(proc types.BlockProcessor) {
@@ -203,26 +348,6 @@ func (self *BlockChain) State() (*state.StateDB, error) {
 	return state.New(self.CurrentBlock().Root(), self.chainDb)
 	return state.New(self.CurrentBlock().Root(), self.chainDb)
 }
 }
 
 
-func (bc *BlockChain) setLastState() error {
-	head := GetHeadBlockHash(bc.chainDb)
-	if head != (common.Hash{}) {
-		block := bc.GetBlock(head)
-		if block != nil {
-			bc.currentBlock = block
-		}
-	} else {
-		bc.Reset()
-	}
-	bc.td = bc.GetTd(bc.currentBlock.Hash())
-	bc.currentGasLimit = CalcGasLimit(bc.currentBlock)
-
-	if glog.V(logger.Info) {
-		glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td)
-	}
-
-	return nil
-}
-
 // Reset purges the entire blockchain, restoring it to its genesis state.
 // Reset purges the entire blockchain, restoring it to its genesis state.
 func (bc *BlockChain) Reset() {
 func (bc *BlockChain) Reset() {
 	bc.ResetWithGenesisBlock(bc.genesisBlock)
 	bc.ResetWithGenesisBlock(bc.genesisBlock)
@@ -231,20 +356,13 @@ func (bc *BlockChain) Reset() {
 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
 // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
 // specified genesis state.
 // specified genesis state.
 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
 func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
+	// Dump the entire block chain and purge the caches
+	bc.SetHead(0)
+
 	bc.mu.Lock()
 	bc.mu.Lock()
 	defer bc.mu.Unlock()
 	defer bc.mu.Unlock()
 
 
-	// Dump the entire block chain and purge the caches
-	for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
-		DeleteBlock(bc.chainDb, block.Hash())
-	}
-	bc.headerCache.Purge()
-	bc.bodyCache.Purge()
-	bc.bodyRLPCache.Purge()
-	bc.blockCache.Purge()
-	bc.futureBlocks.Purge()
-
-	// Prepare the genesis block and reinitialize the chain
+	// Prepare the genesis block and reinitialise the chain
 	if err := WriteTd(bc.chainDb, genesis.Hash(), genesis.Difficulty()); err != nil {
 	if err := WriteTd(bc.chainDb, genesis.Hash(), genesis.Difficulty()); err != nil {
 		glog.Fatalf("failed to write genesis block TD: %v", err)
 		glog.Fatalf("failed to write genesis block TD: %v", err)
 	}
 	}
@@ -254,7 +372,8 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
 	bc.genesisBlock = genesis
 	bc.genesisBlock = genesis
 	bc.insert(bc.genesisBlock)
 	bc.insert(bc.genesisBlock)
 	bc.currentBlock = bc.genesisBlock
 	bc.currentBlock = bc.genesisBlock
-	bc.setTotalDifficulty(genesis.Difficulty())
+	bc.currentHeader = bc.genesisBlock.Header()
+	bc.currentFastBlock = bc.genesisBlock
 }
 }
 
 
 // Export writes the active chain to the given writer.
 // Export writes the active chain to the given writer.
@@ -290,17 +409,30 @@ func (self *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
 	return nil
 	return nil
 }
 }
 
 
-// insert injects a block into the current chain block chain. Note, this function
-// assumes that the `mu` mutex is held!
+// insert injects a new head block into the current block chain. This method
+// assumes that the block is indeed a true head. It will also reset the head
+// header and the head fast sync block to this very same block to prevent them
+// from pointing to a possibly old canonical chain (i.e. side chain by now).
+//
+// Note, this function assumes that the `mu` mutex is held!
 func (bc *BlockChain) insert(block *types.Block) {
 func (bc *BlockChain) insert(block *types.Block) {
 	// Add the block to the canonical chain number scheme and mark as the head
 	// Add the block to the canonical chain number scheme and mark as the head
 	if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
 	if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
 		glog.Fatalf("failed to insert block number: %v", err)
 		glog.Fatalf("failed to insert block number: %v", err)
 	}
 	}
 	if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
 	if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
-		glog.Fatalf("failed to insert block number: %v", err)
+		glog.Fatalf("failed to insert head block hash: %v", err)
+	}
+	if err := WriteHeadHeaderHash(bc.chainDb, block.Hash()); err != nil {
+		glog.Fatalf("failed to insert head header hash: %v", err)
 	}
 	}
+	if err := WriteHeadFastBlockHash(bc.chainDb, block.Hash()); err != nil {
+		glog.Fatalf("failed to insert head fast block hash: %v", err)
+	}
+	// Update the internal state with the head block
 	bc.currentBlock = block
 	bc.currentBlock = block
+	bc.currentHeader = block.Header()
+	bc.currentFastBlock = block
 }
 }
 
 
 // Accessors
 // Accessors
@@ -456,19 +588,15 @@ func (self *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*ty
 	return
 	return
 }
 }
 
 
-func (self *BlockChain) GetUnclesInChain(block *types.Block, length int) (uncles []*types.Header) {
+// GetUnclesInChain retrieves all the uncles from a given block backwards until
+// a specific distance is reached.
+func (self *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
+	uncles := []*types.Header{}
 	for i := 0; block != nil && i < length; i++ {
 	for i := 0; block != nil && i < length; i++ {
 		uncles = append(uncles, block.Uncles()...)
 		uncles = append(uncles, block.Uncles()...)
 		block = self.GetBlock(block.ParentHash())
 		block = self.GetBlock(block.ParentHash())
 	}
 	}
-
-	return
-}
-
-// setTotalDifficulty updates the TD of the chain manager. Note, this function
-// assumes that the `mu` mutex is held!
-func (bc *BlockChain) setTotalDifficulty(td *big.Int) {
-	bc.td = new(big.Int).Set(td)
+	return uncles
 }
 }
 
 
 func (bc *BlockChain) Stop() {
 func (bc *BlockChain) Stop() {
@@ -504,6 +632,337 @@ const (
 	SideStatTy
 	SideStatTy
 )
 )
 
 
+// writeHeader writes a header into the local chain, given that its parent is
+// already known. If the total difficulty of the newly inserted header becomes
+// greater than the current known TD, the canonical chain is re-routed.
+//
+// Note: This method is not concurrent-safe with inserting blocks simultaneously
+// into the chain, as side effects caused by reorganisations cannot be emulated
+// without the real blocks. Hence, writing headers directly should only be done
+// in two scenarios: pure-header mode of operation (light clients), or properly
+// separated header/block phases (non-archive clients).
+func (self *BlockChain) writeHeader(header *types.Header) error {
+	self.wg.Add(1)
+	defer self.wg.Done()
+
+	// Calculate the total difficulty of the header
+	ptd := self.GetTd(header.ParentHash)
+	if ptd == nil {
+		return ParentError(header.ParentHash)
+	}
+	td := new(big.Int).Add(header.Difficulty, ptd)
+
+	// Make sure no inconsistent state is leaked during insertion
+	self.mu.Lock()
+	defer self.mu.Unlock()
+
+	// If the total difficulty is higher than our known, add it to the canonical chain
+	if td.Cmp(self.GetTd(self.currentHeader.Hash())) > 0 {
+		// Delete any canonical number assignments above the new head
+		for i := header.Number.Uint64() + 1; GetCanonicalHash(self.chainDb, i) != (common.Hash{}); i++ {
+			DeleteCanonicalHash(self.chainDb, i)
+		}
+		// Overwrite any stale canonical number assignments
+		head := self.GetHeader(header.ParentHash)
+		for GetCanonicalHash(self.chainDb, head.Number.Uint64()) != head.Hash() {
+			WriteCanonicalHash(self.chainDb, head.Hash(), head.Number.Uint64())
+			head = self.GetHeader(head.ParentHash)
+		}
+		// Extend the canonical chain with the new header
+		if err := WriteCanonicalHash(self.chainDb, header.Hash(), header.Number.Uint64()); err != nil {
+			glog.Fatalf("failed to insert header number: %v", err)
+		}
+		if err := WriteHeadHeaderHash(self.chainDb, header.Hash()); err != nil {
+			glog.Fatalf("failed to insert head header hash: %v", err)
+		}
+		self.currentHeader = types.CopyHeader(header)
+	}
+	// Irrelevant of the canonical status, write the header itself to the database
+	if err := WriteTd(self.chainDb, header.Hash(), td); err != nil {
+		glog.Fatalf("failed to write header total difficulty: %v", err)
+	}
+	if err := WriteHeader(self.chainDb, header); err != nil {
+		glog.Fatalf("filed to write header contents: %v", err)
+	}
+	return nil
+}
+
+// InsertHeaderChain attempts to insert the given header chain in to the local
+// chain, possibly creating a reorg. If an error is returned, it will return the
+// index number of the failing header as well an error describing what went wrong.
+//
+// The verify parameter can be used to fine tune whether nonce verification
+// should be done or not. The reason behind the optional check is because some
+// of the header retrieval mechanisms already need to verfy nonces, as well as
+// because nonces can be verified sparsely, not needing to check each.
+func (self *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
+	self.wg.Add(1)
+	defer self.wg.Done()
+
+	// Make sure only one thread manipulates the chain at once
+	self.chainmu.Lock()
+	defer self.chainmu.Unlock()
+
+	// Collect some import statistics to report on
+	stats := struct{ processed, ignored int }{}
+	start := time.Now()
+
+	// Generate the list of headers that should be POW verified
+	verify := make([]bool, len(chain))
+	for i := 0; i < len(verify)/checkFreq; i++ {
+		index := i*checkFreq + self.rand.Intn(checkFreq)
+		if index >= len(verify) {
+			index = len(verify) - 1
+		}
+		verify[index] = true
+	}
+	verify[len(verify)-1] = true // Last should always be verified to avoid junk
+
+	// Create the header verification task queue and worker functions
+	tasks := make(chan int, len(chain))
+	for i := 0; i < len(chain); i++ {
+		tasks <- i
+	}
+	close(tasks)
+
+	errs, failed := make([]error, len(tasks)), int32(0)
+	process := func(worker int) {
+		for index := range tasks {
+			header, hash := chain[index], chain[index].Hash()
+
+			// Short circuit insertion if shutting down or processing failed
+			if atomic.LoadInt32(&self.procInterrupt) == 1 {
+				return
+			}
+			if atomic.LoadInt32(&failed) > 0 {
+				return
+			}
+			// Short circuit if the header is bad or already known
+			if BadHashes[hash] {
+				errs[index] = BadHashError(hash)
+				atomic.AddInt32(&failed, 1)
+				return
+			}
+			if self.HasHeader(hash) {
+				continue
+			}
+			// Verify that the header honors the chain parameters
+			checkPow := verify[index]
+
+			var err error
+			if index == 0 {
+				err = self.processor.ValidateHeader(header, checkPow, false)
+			} else {
+				err = self.processor.ValidateHeaderWithParent(header, chain[index-1], checkPow, false)
+			}
+			if err != nil {
+				errs[index] = err
+				atomic.AddInt32(&failed, 1)
+				return
+			}
+		}
+	}
+	// Start as many worker threads as goroutines allowed
+	pending := new(sync.WaitGroup)
+	for i := 0; i < runtime.GOMAXPROCS(0); i++ {
+		pending.Add(1)
+		go func(id int) {
+			defer pending.Done()
+			process(id)
+		}(i)
+	}
+	pending.Wait()
+
+	// If anything failed, report
+	if failed > 0 {
+		for i, err := range errs {
+			if err != nil {
+				return i, err
+			}
+		}
+	}
+	// All headers passed verification, import them into the database
+	for i, header := range chain {
+		// Short circuit insertion if shutting down
+		if atomic.LoadInt32(&self.procInterrupt) == 1 {
+			glog.V(logger.Debug).Infoln("premature abort during header chain processing")
+			break
+		}
+		hash := header.Hash()
+
+		// If the header's already known, skip it, otherwise store
+		if self.HasHeader(hash) {
+			stats.ignored++
+			continue
+		}
+		if err := self.writeHeader(header); err != nil {
+			return i, err
+		}
+		stats.processed++
+	}
+	// Report some public statistics so the user has a clue what's going on
+	first, last := chain[0], chain[len(chain)-1]
+	glog.V(logger.Info).Infof("imported %d header(s) (%d ignored) in %v. #%v [%x… / %x…]", stats.processed, stats.ignored,
+		time.Since(start), last.Number, first.Hash().Bytes()[:4], last.Hash().Bytes()[:4])
+
+	return 0, nil
+}
+
+// Rollback is designed to remove a chain of links from the database that aren't
+// certain enough to be valid.
+func (self *BlockChain) Rollback(chain []common.Hash) {
+	self.mu.Lock()
+	defer self.mu.Unlock()
+
+	for i := len(chain) - 1; i >= 0; i-- {
+		hash := chain[i]
+
+		if self.currentHeader.Hash() == hash {
+			self.currentHeader = self.GetHeader(self.currentHeader.ParentHash)
+			WriteHeadHeaderHash(self.chainDb, self.currentHeader.Hash())
+		}
+		if self.currentFastBlock.Hash() == hash {
+			self.currentFastBlock = self.GetBlock(self.currentFastBlock.ParentHash())
+			WriteHeadFastBlockHash(self.chainDb, self.currentFastBlock.Hash())
+		}
+		if self.currentBlock.Hash() == hash {
+			self.currentBlock = self.GetBlock(self.currentBlock.ParentHash())
+			WriteHeadBlockHash(self.chainDb, self.currentBlock.Hash())
+		}
+	}
+}
+
+// InsertReceiptChain attempts to complete an already existing header chain with
+// transaction and receipt data.
+func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
+	self.wg.Add(1)
+	defer self.wg.Done()
+
+	// Collect some import statistics to report on
+	stats := struct{ processed, ignored int32 }{}
+	start := time.Now()
+
+	// Create the block importing task queue and worker functions
+	tasks := make(chan int, len(blockChain))
+	for i := 0; i < len(blockChain) && i < len(receiptChain); i++ {
+		tasks <- i
+	}
+	close(tasks)
+
+	errs, failed := make([]error, len(tasks)), int32(0)
+	process := func(worker int) {
+		for index := range tasks {
+			block, receipts := blockChain[index], receiptChain[index]
+
+			// Short circuit insertion if shutting down or processing failed
+			if atomic.LoadInt32(&self.procInterrupt) == 1 {
+				return
+			}
+			if atomic.LoadInt32(&failed) > 0 {
+				return
+			}
+			// Short circuit if the owner header is unknown
+			if !self.HasHeader(block.Hash()) {
+				errs[index] = fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
+				atomic.AddInt32(&failed, 1)
+				return
+			}
+			// Skip if the entire data is already known
+			if self.HasBlock(block.Hash()) {
+				atomic.AddInt32(&stats.ignored, 1)
+				continue
+			}
+			// Compute all the non-consensus fields of the receipts
+			transactions, logIndex := block.Transactions(), uint(0)
+			for j := 0; j < len(receipts); j++ {
+				// The transaction hash can be retrieved from the transaction itself
+				receipts[j].TxHash = transactions[j].Hash()
+
+				// The contract address can be derived from the transaction itself
+				if MessageCreatesContract(transactions[j]) {
+					from, _ := transactions[j].From()
+					receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
+				}
+				// The used gas can be calculated based on previous receipts
+				if j == 0 {
+					receipts[j].GasUsed = new(big.Int).Set(receipts[j].CumulativeGasUsed)
+				} else {
+					receipts[j].GasUsed = new(big.Int).Sub(receipts[j].CumulativeGasUsed, receipts[j-1].CumulativeGasUsed)
+				}
+				// The derived log fields can simply be set from the block and transaction
+				for k := 0; k < len(receipts[j].Logs); k++ {
+					receipts[j].Logs[k].BlockNumber = block.NumberU64()
+					receipts[j].Logs[k].BlockHash = block.Hash()
+					receipts[j].Logs[k].TxHash = receipts[j].TxHash
+					receipts[j].Logs[k].TxIndex = uint(j)
+					receipts[j].Logs[k].Index = logIndex
+					logIndex++
+				}
+			}
+			// Write all the data out into the database
+			if err := WriteBody(self.chainDb, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
+				errs[index] = fmt.Errorf("failed to write block body: %v", err)
+				atomic.AddInt32(&failed, 1)
+				glog.Fatal(errs[index])
+				return
+			}
+			if err := PutBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil {
+				errs[index] = fmt.Errorf("failed to write block receipts: %v", err)
+				atomic.AddInt32(&failed, 1)
+				glog.Fatal(errs[index])
+				return
+			}
+			if err := WriteMipmapBloom(self.chainDb, block.NumberU64(), receipts); err != nil {
+				errs[index] = fmt.Errorf("failed to write log blooms: %v", err)
+				atomic.AddInt32(&failed, 1)
+				glog.Fatal(errs[index])
+				return
+			}
+			atomic.AddInt32(&stats.processed, 1)
+		}
+	}
+	// Start as many worker threads as goroutines allowed
+	pending := new(sync.WaitGroup)
+	for i := 0; i < runtime.GOMAXPROCS(0); i++ {
+		pending.Add(1)
+		go func(id int) {
+			defer pending.Done()
+			process(id)
+		}(i)
+	}
+	pending.Wait()
+
+	// If anything failed, report
+	if failed > 0 {
+		for i, err := range errs {
+			if err != nil {
+				return i, err
+			}
+		}
+	}
+	if atomic.LoadInt32(&self.procInterrupt) == 1 {
+		glog.V(logger.Debug).Infoln("premature abort during receipt chain processing")
+		return 0, nil
+	}
+	// Update the head fast sync block if better
+	self.mu.Lock()
+	head := blockChain[len(errs)-1]
+	if self.GetTd(self.currentFastBlock.Hash()).Cmp(self.GetTd(head.Hash())) < 0 {
+		if err := WriteHeadFastBlockHash(self.chainDb, head.Hash()); err != nil {
+			glog.Fatalf("failed to update head fast block hash: %v", err)
+		}
+		self.currentFastBlock = head
+	}
+	self.mu.Unlock()
+
+	// Report some public statistics so the user has a clue what's going on
+	first, last := blockChain[0], blockChain[len(blockChain)-1]
+	glog.V(logger.Info).Infof("imported %d receipt(s) (%d ignored) in %v. #%d [%x… / %x…]", stats.processed, stats.ignored,
+		time.Since(start), last.Number(), first.Hash().Bytes()[:4], last.Hash().Bytes()[:4])
+
+	return 0, nil
+}
+
 // WriteBlock writes the block to the chain.
 // WriteBlock writes the block to the chain.
 func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err error) {
 func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err error) {
 	self.wg.Add(1)
 	self.wg.Add(1)
@@ -516,38 +975,31 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err
 	}
 	}
 	td := new(big.Int).Add(block.Difficulty(), ptd)
 	td := new(big.Int).Add(block.Difficulty(), ptd)
 
 
-	self.mu.RLock()
-	cblock := self.currentBlock
-	self.mu.RUnlock()
-
-	// Compare the TD of the last known block in the canonical chain to make sure it's greater.
-	// At this point it's possible that a different chain (fork) becomes the new canonical chain.
-	if td.Cmp(self.Td()) > 0 {
-		// chain fork
-		if block.ParentHash() != cblock.Hash() {
-			// during split we merge two different chains and create the new canonical chain
-			err := self.reorg(cblock, block)
-			if err != nil {
+	// Make sure no inconsistent state is leaked during insertion
+	self.mu.Lock()
+	defer self.mu.Unlock()
+
+	// If the total difficulty is higher than our known, add it to the canonical chain
+	if td.Cmp(self.GetTd(self.currentBlock.Hash())) > 0 {
+		// Reorganize the chain if the parent is not the head block
+		if block.ParentHash() != self.currentBlock.Hash() {
+			if err := self.reorg(self.currentBlock, block); err != nil {
 				return NonStatTy, err
 				return NonStatTy, err
 			}
 			}
 		}
 		}
-		status = CanonStatTy
-
-		self.mu.Lock()
-		self.setTotalDifficulty(td)
+		// Insert the block as the new head of the chain
 		self.insert(block)
 		self.insert(block)
-		self.mu.Unlock()
+		status = CanonStatTy
 	} else {
 	} else {
 		status = SideStatTy
 		status = SideStatTy
 	}
 	}
-
+	// Irrelevant of the canonical status, write the block itself to the database
 	if err := WriteTd(self.chainDb, block.Hash(), td); err != nil {
 	if err := WriteTd(self.chainDb, block.Hash(), td); err != nil {
 		glog.Fatalf("failed to write block total difficulty: %v", err)
 		glog.Fatalf("failed to write block total difficulty: %v", err)
 	}
 	}
 	if err := WriteBlock(self.chainDb, block); err != nil {
 	if err := WriteBlock(self.chainDb, block); err != nil {
 		glog.Fatalf("filed to write block contents: %v", err)
 		glog.Fatalf("filed to write block contents: %v", err)
 	}
 	}
-	// Delete from future blocks
 	self.futureBlocks.Remove(block.Hash())
 	self.futureBlocks.Remove(block.Hash())
 
 
 	return
 	return
@@ -580,7 +1032,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
 	txcount := 0
 	txcount := 0
 	for i, block := range chain {
 	for i, block := range chain {
 		if atomic.LoadInt32(&self.procInterrupt) == 1 {
 		if atomic.LoadInt32(&self.procInterrupt) == 1 {
-			glog.V(logger.Debug).Infoln("Premature abort during chain processing")
+			glog.V(logger.Debug).Infoln("Premature abort during block chain processing")
 			break
 			break
 		}
 		}
 
 
@@ -636,7 +1088,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
 
 
 			return i, err
 			return i, err
 		}
 		}
-		if err := PutBlockReceipts(self.chainDb, block, receipts); err != nil {
+		if err := PutBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil {
 			glog.V(logger.Warn).Infoln("error writing block receipts:", err)
 			glog.V(logger.Warn).Infoln("error writing block receipts:", err)
 		}
 		}
 
 
@@ -691,9 +1143,6 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
 // to be part of the new canonical chain and accumulates potential missing transactions and post an
 // to be part of the new canonical chain and accumulates potential missing transactions and post an
 // event about them
 // event about them
 func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
 func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
-	self.mu.Lock()
-	defer self.mu.Unlock()
-
 	var (
 	var (
 		newChain    types.Blocks
 		newChain    types.Blocks
 		commonBlock *types.Block
 		commonBlock *types.Block
@@ -788,8 +1237,7 @@ func (self *BlockChain) postChainEvents(events []interface{}) {
 		if event, ok := event.(ChainEvent); ok {
 		if event, ok := event.(ChainEvent); ok {
 			// We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long
 			// We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long
 			// and in most cases isn't even necessary.
 			// and in most cases isn't even necessary.
-			if self.currentBlock.Hash() == event.Hash {
-				self.currentGasLimit = CalcGasLimit(event.Block)
+			if self.LastBlockHash() == event.Hash {
 				self.eventMux.Post(ChainHeadEvent{event.Block})
 				self.eventMux.Post(ChainHeadEvent{event.Block})
 			}
 			}
 		}
 		}

+ 531 - 242
core/blockchain_test.go

@@ -64,44 +64,58 @@ func theBlockChain(db ethdb.Database, t *testing.T) *BlockChain {
 }
 }
 
 
 // Test fork of length N starting from block i
 // Test fork of length N starting from block i
-func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big.Int)) {
-	// switch databases to process the new chain
-	db, err := ethdb.NewMemDatabase()
-	if err != nil {
-		t.Fatal("Failed to create db:", err)
-	}
-	// copy old chain up to i into new db with deterministic canonical
-	bman2, err := newCanonical(i, db)
+func testFork(t *testing.T, processor *BlockProcessor, i, n int, full bool, comparator func(td1, td2 *big.Int)) {
+	// Copy old chain up to #i into a new db
+	db, processor2, err := newCanonical(i, full)
 	if err != nil {
 	if err != nil {
 		t.Fatal("could not make new canonical in testFork", err)
 		t.Fatal("could not make new canonical in testFork", err)
 	}
 	}
-	// assert the bmans have the same block at i
-	bi1 := bman.bc.GetBlockByNumber(uint64(i)).Hash()
-	bi2 := bman2.bc.GetBlockByNumber(uint64(i)).Hash()
-	if bi1 != bi2 {
-		fmt.Printf("%+v\n%+v\n\n", bi1, bi2)
-		t.Fatal("chains do not have the same hash at height", i)
+	// Assert the chains have the same header/block at #i
+	var hash1, hash2 common.Hash
+	if full {
+		hash1 = processor.bc.GetBlockByNumber(uint64(i)).Hash()
+		hash2 = processor2.bc.GetBlockByNumber(uint64(i)).Hash()
+	} else {
+		hash1 = processor.bc.GetHeaderByNumber(uint64(i)).Hash()
+		hash2 = processor2.bc.GetHeaderByNumber(uint64(i)).Hash()
 	}
 	}
-	bman2.bc.SetProcessor(bman2)
-
-	// extend the fork
-	parent := bman2.bc.CurrentBlock()
-	chainB := makeChain(parent, N, db, forkSeed)
-	_, err = bman2.bc.InsertChain(chainB)
-	if err != nil {
-		t.Fatal("Insert chain error for fork:", err)
+	if hash1 != hash2 {
+		t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
 	}
 	}
-
-	tdpre := bman.bc.Td()
-	// Test the fork's blocks on the original chain
-	td, err := testChain(chainB, bman)
-	if err != nil {
-		t.Fatal("expected chainB not to give errors:", err)
+	// Extend the newly created chain
+	var (
+		blockChainB  []*types.Block
+		headerChainB []*types.Header
+	)
+	if full {
+		blockChainB = makeBlockChain(processor2.bc.CurrentBlock(), n, db, forkSeed)
+		if _, err := processor2.bc.InsertChain(blockChainB); err != nil {
+			t.Fatalf("failed to insert forking chain: %v", err)
+		}
+	} else {
+		headerChainB = makeHeaderChain(processor2.bc.CurrentHeader(), n, db, forkSeed)
+		if _, err := processor2.bc.InsertHeaderChain(headerChainB, 1); err != nil {
+			t.Fatalf("failed to insert forking chain: %v", err)
+		}
 	}
 	}
-	// Compare difficulties
-	f(tdpre, td)
+	// Sanity check that the forked chain can be imported into the original
+	var tdPre, tdPost *big.Int
 
 
-	// Loop over parents making sure reconstruction is done properly
+	if full {
+		tdPre = processor.bc.GetTd(processor.bc.CurrentBlock().Hash())
+		if err := testBlockChainImport(blockChainB, processor); err != nil {
+			t.Fatalf("failed to import forked block chain: %v", err)
+		}
+		tdPost = processor.bc.GetTd(blockChainB[len(blockChainB)-1].Hash())
+	} else {
+		tdPre = processor.bc.GetTd(processor.bc.CurrentHeader().Hash())
+		if err := testHeaderChainImport(headerChainB, processor); err != nil {
+			t.Fatalf("failed to import forked header chain: %v", err)
+		}
+		tdPost = processor.bc.GetTd(headerChainB[len(headerChainB)-1].Hash())
+	}
+	// Compare the total difficulties of the chains
+	comparator(tdPre, tdPost)
 }
 }
 
 
 func printChain(bc *BlockChain) {
 func printChain(bc *BlockChain) {
@@ -111,22 +125,41 @@ func printChain(bc *BlockChain) {
 	}
 	}
 }
 }
 
 
-// process blocks against a chain
-func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) {
-	for _, block := range chainB {
-		_, _, err := bman.bc.processor.Process(block)
-		if err != nil {
+// testBlockChainImport tries to process a chain of blocks, writing them into
+// the database if successful.
+func testBlockChainImport(chain []*types.Block, processor *BlockProcessor) error {
+	for _, block := range chain {
+		// Try and process the block
+		if _, _, err := processor.Process(block); err != nil {
 			if IsKnownBlockErr(err) {
 			if IsKnownBlockErr(err) {
 				continue
 				continue
 			}
 			}
-			return nil, err
+			return err
 		}
 		}
-		bman.bc.mu.Lock()
-		WriteTd(bman.bc.chainDb, block.Hash(), new(big.Int).Add(block.Difficulty(), bman.bc.GetTd(block.ParentHash())))
-		WriteBlock(bman.bc.chainDb, block)
-		bman.bc.mu.Unlock()
+		// Manually insert the block into the database, but don't reorganize (allows subsequent testing)
+		processor.bc.mu.Lock()
+		WriteTd(processor.chainDb, block.Hash(), new(big.Int).Add(block.Difficulty(), processor.bc.GetTd(block.ParentHash())))
+		WriteBlock(processor.chainDb, block)
+		processor.bc.mu.Unlock()
 	}
 	}
-	return bman.bc.GetTd(chainB[len(chainB)-1].Hash()), nil
+	return nil
+}
+
+// testHeaderChainImport tries to process a chain of header, writing them into
+// the database if successful.
+func testHeaderChainImport(chain []*types.Header, processor *BlockProcessor) error {
+	for _, header := range chain {
+		// Try and validate the header
+		if err := processor.ValidateHeader(header, false, false); err != nil {
+			return err
+		}
+		// Manually insert the header into the database, but don't reorganize (allows subsequent testing)
+		processor.bc.mu.Lock()
+		WriteTd(processor.chainDb, header.Hash(), new(big.Int).Add(header.Difficulty, processor.bc.GetTd(header.ParentHash)))
+		WriteHeader(processor.chainDb, header)
+		processor.bc.mu.Unlock()
+	}
+	return nil
 }
 }
 
 
 func loadChain(fn string, t *testing.T) (types.Blocks, error) {
 func loadChain(fn string, t *testing.T) (types.Blocks, error) {
@@ -154,139 +187,147 @@ func insertChain(done chan bool, blockchain *BlockChain, chain types.Blocks, t *
 }
 }
 
 
 func TestLastBlock(t *testing.T) {
 func TestLastBlock(t *testing.T) {
-	db, err := ethdb.NewMemDatabase()
-	if err != nil {
-		t.Fatal("Failed to create db:", err)
-	}
+	db, _ := ethdb.NewMemDatabase()
+
 	bchain := theBlockChain(db, t)
 	bchain := theBlockChain(db, t)
-	block := makeChain(bchain.CurrentBlock(), 1, db, 0)[0]
+	block := makeBlockChain(bchain.CurrentBlock(), 1, db, 0)[0]
 	bchain.insert(block)
 	bchain.insert(block)
 	if block.Hash() != GetHeadBlockHash(db) {
 	if block.Hash() != GetHeadBlockHash(db) {
 		t.Errorf("Write/Get HeadBlockHash failed")
 		t.Errorf("Write/Get HeadBlockHash failed")
 	}
 	}
 }
 }
 
 
-func TestExtendCanonical(t *testing.T) {
-	CanonicalLength := 5
-	db, err := ethdb.NewMemDatabase()
-	if err != nil {
-		t.Fatal("Failed to create db:", err)
-	}
-	// make first chain starting from genesis
-	bman, err := newCanonical(CanonicalLength, db)
+// Tests that given a starting canonical chain of a given size, it can be extended
+// with various length chains.
+func TestExtendCanonicalHeaders(t *testing.T) { testExtendCanonical(t, false) }
+func TestExtendCanonicalBlocks(t *testing.T)  { testExtendCanonical(t, true) }
+
+func testExtendCanonical(t *testing.T, full bool) {
+	length := 5
+
+	// Make first chain starting from genesis
+	_, processor, err := newCanonical(length, full)
 	if err != nil {
 	if err != nil {
-		t.Fatal("Could not make new canonical chain:", err)
+		t.Fatalf("failed to make new canonical chain: %v", err)
 	}
 	}
-	f := func(td1, td2 *big.Int) {
+	// Define the difficulty comparator
+	better := func(td1, td2 *big.Int) {
 		if td2.Cmp(td1) <= 0 {
 		if td2.Cmp(td1) <= 0 {
-			t.Error("expected chainB to have higher difficulty. Got", td2, "expected more than", td1)
+			t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1)
 		}
 		}
 	}
 	}
-	// Start fork from current height (CanonicalLength)
-	testFork(t, bman, CanonicalLength, 1, f)
-	testFork(t, bman, CanonicalLength, 2, f)
-	testFork(t, bman, CanonicalLength, 5, f)
-	testFork(t, bman, CanonicalLength, 10, f)
+	// Start fork from current height
+	testFork(t, processor, length, 1, full, better)
+	testFork(t, processor, length, 2, full, better)
+	testFork(t, processor, length, 5, full, better)
+	testFork(t, processor, length, 10, full, better)
 }
 }
 
 
-func TestShorterFork(t *testing.T) {
-	db, err := ethdb.NewMemDatabase()
-	if err != nil {
-		t.Fatal("Failed to create db:", err)
-	}
-	// make first chain starting from genesis
-	bman, err := newCanonical(10, db)
+// Tests that given a starting canonical chain of a given size, creating shorter
+// forks do not take canonical ownership.
+func TestShorterForkHeaders(t *testing.T) { testShorterFork(t, false) }
+func TestShorterForkBlocks(t *testing.T)  { testShorterFork(t, true) }
+
+func testShorterFork(t *testing.T, full bool) {
+	length := 10
+
+	// Make first chain starting from genesis
+	_, processor, err := newCanonical(length, full)
 	if err != nil {
 	if err != nil {
-		t.Fatal("Could not make new canonical chain:", err)
+		t.Fatalf("failed to make new canonical chain: %v", err)
 	}
 	}
-	f := func(td1, td2 *big.Int) {
+	// Define the difficulty comparator
+	worse := func(td1, td2 *big.Int) {
 		if td2.Cmp(td1) >= 0 {
 		if td2.Cmp(td1) >= 0 {
-			t.Error("expected chainB to have lower difficulty. Got", td2, "expected less than", td1)
+			t.Errorf("total difficulty mismatch: have %v, expected less than %v", td2, td1)
 		}
 		}
 	}
 	}
-	// Sum of numbers must be less than 10
-	// for this to be a shorter fork
-	testFork(t, bman, 0, 3, f)
-	testFork(t, bman, 0, 7, f)
-	testFork(t, bman, 1, 1, f)
-	testFork(t, bman, 1, 7, f)
-	testFork(t, bman, 5, 3, f)
-	testFork(t, bman, 5, 4, f)
+	// Sum of numbers must be less than `length` for this to be a shorter fork
+	testFork(t, processor, 0, 3, full, worse)
+	testFork(t, processor, 0, 7, full, worse)
+	testFork(t, processor, 1, 1, full, worse)
+	testFork(t, processor, 1, 7, full, worse)
+	testFork(t, processor, 5, 3, full, worse)
+	testFork(t, processor, 5, 4, full, worse)
 }
 }
 
 
-func TestLongerFork(t *testing.T) {
-	db, err := ethdb.NewMemDatabase()
-	if err != nil {
-		t.Fatal("Failed to create db:", err)
-	}
-	// make first chain starting from genesis
-	bman, err := newCanonical(10, db)
+// Tests that given a starting canonical chain of a given size, creating longer
+// forks do take canonical ownership.
+func TestLongerForkHeaders(t *testing.T) { testLongerFork(t, false) }
+func TestLongerForkBlocks(t *testing.T)  { testLongerFork(t, true) }
+
+func testLongerFork(t *testing.T, full bool) {
+	length := 10
+
+	// Make first chain starting from genesis
+	_, processor, err := newCanonical(length, full)
 	if err != nil {
 	if err != nil {
-		t.Fatal("Could not make new canonical chain:", err)
+		t.Fatalf("failed to make new canonical chain: %v", err)
 	}
 	}
-	f := func(td1, td2 *big.Int) {
+	// Define the difficulty comparator
+	better := func(td1, td2 *big.Int) {
 		if td2.Cmp(td1) <= 0 {
 		if td2.Cmp(td1) <= 0 {
-			t.Error("expected chainB to have higher difficulty. Got", td2, "expected more than", td1)
+			t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1)
 		}
 		}
 	}
 	}
-	// Sum of numbers must be greater than 10
-	// for this to be a longer fork
-	testFork(t, bman, 0, 11, f)
-	testFork(t, bman, 0, 15, f)
-	testFork(t, bman, 1, 10, f)
-	testFork(t, bman, 1, 12, f)
-	testFork(t, bman, 5, 6, f)
-	testFork(t, bman, 5, 8, f)
+	// Sum of numbers must be greater than `length` for this to be a longer fork
+	testFork(t, processor, 0, 11, full, better)
+	testFork(t, processor, 0, 15, full, better)
+	testFork(t, processor, 1, 10, full, better)
+	testFork(t, processor, 1, 12, full, better)
+	testFork(t, processor, 5, 6, full, better)
+	testFork(t, processor, 5, 8, full, better)
 }
 }
 
 
-func TestEqualFork(t *testing.T) {
-	db, err := ethdb.NewMemDatabase()
-	if err != nil {
-		t.Fatal("Failed to create db:", err)
-	}
-	bman, err := newCanonical(10, db)
+// Tests that given a starting canonical chain of a given size, creating equal
+// forks do take canonical ownership.
+func TestEqualForkHeaders(t *testing.T) { testEqualFork(t, false) }
+func TestEqualForkBlocks(t *testing.T)  { testEqualFork(t, true) }
+
+func testEqualFork(t *testing.T, full bool) {
+	length := 10
+
+	// Make first chain starting from genesis
+	_, processor, err := newCanonical(length, full)
 	if err != nil {
 	if err != nil {
-		t.Fatal("Could not make new canonical chain:", err)
+		t.Fatalf("failed to make new canonical chain: %v", err)
 	}
 	}
-	f := func(td1, td2 *big.Int) {
+	// Define the difficulty comparator
+	equal := func(td1, td2 *big.Int) {
 		if td2.Cmp(td1) != 0 {
 		if td2.Cmp(td1) != 0 {
-			t.Error("expected chainB to have equal difficulty. Got", td2, "expected ", td1)
+			t.Errorf("total difficulty mismatch: have %v, want %v", td2, td1)
 		}
 		}
 	}
 	}
-	// Sum of numbers must be equal to 10
-	// for this to be an equal fork
-	testFork(t, bman, 0, 10, f)
-	testFork(t, bman, 1, 9, f)
-	testFork(t, bman, 2, 8, f)
-	testFork(t, bman, 5, 5, f)
-	testFork(t, bman, 6, 4, f)
-	testFork(t, bman, 9, 1, f)
+	// Sum of numbers must be equal to `length` for this to be an equal fork
+	testFork(t, processor, 0, 10, full, equal)
+	testFork(t, processor, 1, 9, full, equal)
+	testFork(t, processor, 2, 8, full, equal)
+	testFork(t, processor, 5, 5, full, equal)
+	testFork(t, processor, 6, 4, full, equal)
+	testFork(t, processor, 9, 1, full, equal)
 }
 }
 
 
-func TestBrokenChain(t *testing.T) {
-	db, err := ethdb.NewMemDatabase()
-	if err != nil {
-		t.Fatal("Failed to create db:", err)
-	}
-	bman, err := newCanonical(10, db)
-	if err != nil {
-		t.Fatal("Could not make new canonical chain:", err)
-	}
-	db2, err := ethdb.NewMemDatabase()
-	if err != nil {
-		t.Fatal("Failed to create db:", err)
-	}
-	bman2, err := newCanonical(10, db2)
+// Tests that chains missing links do not get accepted by the processor.
+func TestBrokenHeaderChain(t *testing.T) { testBrokenChain(t, false) }
+func TestBrokenBlockChain(t *testing.T)  { testBrokenChain(t, true) }
+
+func testBrokenChain(t *testing.T, full bool) {
+	// Make chain starting from genesis
+	db, processor, err := newCanonical(10, full)
 	if err != nil {
 	if err != nil {
-		t.Fatal("Could not make new canonical chain:", err)
+		t.Fatalf("failed to make new canonical chain: %v", err)
 	}
 	}
-	bman2.bc.SetProcessor(bman2)
-	parent := bman2.bc.CurrentBlock()
-	chainB := makeChain(parent, 5, db2, forkSeed)
-	chainB = chainB[1:]
-	_, err = testChain(chainB, bman)
-	if err == nil {
-		t.Error("expected broken chain to return error")
+	// Create a forked chain, and try to insert with a missing link
+	if full {
+		chain := makeBlockChain(processor.bc.CurrentBlock(), 5, db, forkSeed)[1:]
+		if err := testBlockChainImport(chain, processor); err == nil {
+			t.Errorf("broken block chain not reported")
+		}
+	} else {
+		chain := makeHeaderChain(processor.bc.CurrentHeader(), 5, db, forkSeed)[1:]
+		if err := testHeaderChainImport(chain, processor); err == nil {
+			t.Errorf("broken header chain not reported")
+		}
 	}
 	}
 }
 }
 
 
@@ -374,15 +415,29 @@ func TestChainMultipleInsertions(t *testing.T) {
 
 
 type bproc struct{}
 type bproc struct{}
 
 
-func (bproc) Process(*types.Block) (vm.Logs, types.Receipts, error) { return nil, nil, nil }
+func (bproc) Process(*types.Block) (vm.Logs, types.Receipts, error)                   { return nil, nil, nil }
+func (bproc) ValidateHeader(*types.Header, bool, bool) error                          { return nil }
+func (bproc) ValidateHeaderWithParent(*types.Header, *types.Header, bool, bool) error { return nil }
 
 
-func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block {
+func makeHeaderChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Header {
+	blocks := makeBlockChainWithDiff(genesis, d, seed)
+	headers := make([]*types.Header, len(blocks))
+	for i, block := range blocks {
+		headers[i] = block.Header()
+	}
+	return headers
+}
+
+func makeBlockChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block {
 	var chain []*types.Block
 	var chain []*types.Block
 	for i, difficulty := range d {
 	for i, difficulty := range d {
 		header := &types.Header{
 		header := &types.Header{
-			Coinbase:   common.Address{seed},
-			Number:     big.NewInt(int64(i + 1)),
-			Difficulty: big.NewInt(int64(difficulty)),
+			Coinbase:    common.Address{seed},
+			Number:      big.NewInt(int64(i + 1)),
+			Difficulty:  big.NewInt(int64(difficulty)),
+			UncleHash:   types.EmptyUncleHash,
+			TxHash:      types.EmptyRootHash,
+			ReceiptHash: types.EmptyRootHash,
 		}
 		}
 		if i == 0 {
 		if i == 0 {
 			header.ParentHash = genesis.Hash()
 			header.ParentHash = genesis.Hash()
@@ -397,7 +452,7 @@ func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block
 
 
 func chm(genesis *types.Block, db ethdb.Database) *BlockChain {
 func chm(genesis *types.Block, db ethdb.Database) *BlockChain {
 	var eventMux event.TypeMux
 	var eventMux event.TypeMux
-	bc := &BlockChain{chainDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}}
+	bc := &BlockChain{chainDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}, rand: rand.New(rand.NewSource(0))}
 	bc.headerCache, _ = lru.New(100)
 	bc.headerCache, _ = lru.New(100)
 	bc.bodyCache, _ = lru.New(100)
 	bc.bodyCache, _ = lru.New(100)
 	bc.bodyRLPCache, _ = lru.New(100)
 	bc.bodyRLPCache, _ = lru.New(100)
@@ -410,147 +465,381 @@ func chm(genesis *types.Block, db ethdb.Database) *BlockChain {
 	return bc
 	return bc
 }
 }
 
 
-func TestReorgLongest(t *testing.T) {
-	db, _ := ethdb.NewMemDatabase()
+// Tests that reorganizing a long difficult chain after a short easy one
+// overwrites the canonical numbers and links in the database.
+func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false) }
+func TestReorgLongBlocks(t *testing.T)  { testReorgLong(t, true) }
 
 
-	genesis, err := WriteTestNetGenesisBlock(db, 0)
-	if err != nil {
-		t.Error(err)
-		t.FailNow()
-	}
-	bc := chm(genesis, db)
+func testReorgLong(t *testing.T, full bool) {
+	testReorg(t, []int{1, 2, 4}, []int{1, 2, 3, 4}, 10, full)
+}
 
 
-	chain1 := makeChainWithDiff(genesis, []int{1, 2, 4}, 10)
-	chain2 := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11)
+// Tests that reorganizing a short difficult chain after a long easy one
+// overwrites the canonical numbers and links in the database.
+func TestReorgShortHeaders(t *testing.T) { testReorgShort(t, false) }
+func TestReorgShortBlocks(t *testing.T)  { testReorgShort(t, true) }
 
 
-	bc.InsertChain(chain1)
-	bc.InsertChain(chain2)
+func testReorgShort(t *testing.T, full bool) {
+	testReorg(t, []int{1, 2, 3, 4}, []int{1, 10}, 11, full)
+}
 
 
-	prev := bc.CurrentBlock()
-	for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) {
-		if prev.ParentHash() != block.Hash() {
-			t.Errorf("parent hash mismatch %x - %x", prev.ParentHash(), block.Hash())
+func testReorg(t *testing.T, first, second []int, td int64, full bool) {
+	// Create a pristine block chain
+	db, _ := ethdb.NewMemDatabase()
+	genesis, _ := WriteTestNetGenesisBlock(db, 0)
+	bc := chm(genesis, db)
+
+	// Insert an easy and a difficult chain afterwards
+	if full {
+		bc.InsertChain(makeBlockChainWithDiff(genesis, first, 11))
+		bc.InsertChain(makeBlockChainWithDiff(genesis, second, 22))
+	} else {
+		bc.InsertHeaderChain(makeHeaderChainWithDiff(genesis, first, 11), 1)
+		bc.InsertHeaderChain(makeHeaderChainWithDiff(genesis, second, 22), 1)
+	}
+	// Check that the chain is valid number and link wise
+	if full {
+		prev := bc.CurrentBlock()
+		for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) {
+			if prev.ParentHash() != block.Hash() {
+				t.Errorf("parent block hash mismatch: have %x, want %x", prev.ParentHash(), block.Hash())
+			}
+		}
+	} else {
+		prev := bc.CurrentHeader()
+		for header := bc.GetHeaderByNumber(bc.CurrentHeader().Number.Uint64() - 1); header.Number.Uint64() != 0; prev, header = header, bc.GetHeaderByNumber(header.Number.Uint64()-1) {
+			if prev.ParentHash != header.Hash() {
+				t.Errorf("parent header hash mismatch: have %x, want %x", prev.ParentHash, header.Hash())
+			}
+		}
+	}
+	// Make sure the chain total difficulty is the correct one
+	want := new(big.Int).Add(genesis.Difficulty(), big.NewInt(td))
+	if full {
+		if have := bc.GetTd(bc.CurrentBlock().Hash()); have.Cmp(want) != 0 {
+			t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
+		}
+	} else {
+		if have := bc.GetTd(bc.CurrentHeader().Hash()); have.Cmp(want) != 0 {
+			t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
 		}
 		}
 	}
 	}
 }
 }
 
 
-func TestBadHashes(t *testing.T) {
+// Tests that the insertion functions detect banned hashes.
+func TestBadHeaderHashes(t *testing.T) { testBadHashes(t, false) }
+func TestBadBlockHashes(t *testing.T)  { testBadHashes(t, true) }
+
+func testBadHashes(t *testing.T, full bool) {
+	// Create a pristine block chain
 	db, _ := ethdb.NewMemDatabase()
 	db, _ := ethdb.NewMemDatabase()
-	genesis, err := WriteTestNetGenesisBlock(db, 0)
-	if err != nil {
-		t.Error(err)
-		t.FailNow()
-	}
+	genesis, _ := WriteTestNetGenesisBlock(db, 0)
 	bc := chm(genesis, db)
 	bc := chm(genesis, db)
 
 
-	chain := makeChainWithDiff(genesis, []int{1, 2, 4}, 10)
-	BadHashes[chain[2].Header().Hash()] = true
-
-	_, err = bc.InsertChain(chain)
+	// Create a chain, ban a hash and try to import
+	var err error
+	if full {
+		blocks := makeBlockChainWithDiff(genesis, []int{1, 2, 4}, 10)
+		BadHashes[blocks[2].Header().Hash()] = true
+		_, err = bc.InsertChain(blocks)
+	} else {
+		headers := makeHeaderChainWithDiff(genesis, []int{1, 2, 4}, 10)
+		BadHashes[headers[2].Hash()] = true
+		_, err = bc.InsertHeaderChain(headers, 1)
+	}
 	if !IsBadHashError(err) {
 	if !IsBadHashError(err) {
 		t.Errorf("error mismatch: want: BadHashError, have: %v", err)
 		t.Errorf("error mismatch: want: BadHashError, have: %v", err)
 	}
 	}
 }
 }
 
 
-func TestReorgBadHashes(t *testing.T) {
+// Tests that bad hashes are detected on boot, and the chan rolled back to a
+// good state prior to the bad hash.
+func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false) }
+func TestReorgBadBlockHashes(t *testing.T)  { testReorgBadHashes(t, true) }
+
+func testReorgBadHashes(t *testing.T, full bool) {
+	// Create a pristine block chain
 	db, _ := ethdb.NewMemDatabase()
 	db, _ := ethdb.NewMemDatabase()
-	genesis, err := WriteTestNetGenesisBlock(db, 0)
-	if err != nil {
-		t.Error(err)
-		t.FailNow()
-	}
+	genesis, _ := WriteTestNetGenesisBlock(db, 0)
 	bc := chm(genesis, db)
 	bc := chm(genesis, db)
 
 
-	chain := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11)
-	bc.InsertChain(chain)
-
-	if chain[3].Header().Hash() != bc.LastBlockHash() {
-		t.Errorf("last block hash mismatch: want: %x, have: %x", chain[3].Header().Hash(), bc.LastBlockHash())
-	}
-
-	// NewChainManager should check BadHashes when loading it db
-	BadHashes[chain[3].Header().Hash()] = true
+	// Create a chain, import and ban aferwards
+	headers := makeHeaderChainWithDiff(genesis, []int{1, 2, 3, 4}, 10)
+	blocks := makeBlockChainWithDiff(genesis, []int{1, 2, 3, 4}, 10)
 
 
-	var eventMux event.TypeMux
-	ncm, err := NewBlockChain(db, FakePow{}, &eventMux)
-	if err != nil {
-		t.Errorf("NewChainManager err: %s", err)
-	}
-
-	// check it set head to (valid) parent of bad hash block
-	if chain[2].Header().Hash() != ncm.LastBlockHash() {
-		t.Errorf("last block hash mismatch: want: %x, have: %x", chain[2].Header().Hash(), ncm.LastBlockHash())
-	}
-
-	if chain[2].Header().GasLimit.Cmp(ncm.GasLimit()) != 0 {
-		t.Errorf("current block gasLimit mismatch: want: %x, have: %x", chain[2].Header().GasLimit, ncm.GasLimit())
+	if full {
+		if _, err := bc.InsertChain(blocks); err != nil {
+			t.Fatalf("failed to import blocks: %v", err)
+		}
+		if bc.CurrentBlock().Hash() != blocks[3].Hash() {
+			t.Errorf("last block hash mismatch: have: %x, want %x", bc.CurrentBlock().Hash(), blocks[3].Header().Hash())
+		}
+		BadHashes[blocks[3].Header().Hash()] = true
+		defer func() { delete(BadHashes, blocks[3].Header().Hash()) }()
+	} else {
+		if _, err := bc.InsertHeaderChain(headers, 1); err != nil {
+			t.Fatalf("failed to import headers: %v", err)
+		}
+		if bc.CurrentHeader().Hash() != headers[3].Hash() {
+			t.Errorf("last header hash mismatch: have: %x, want %x", bc.CurrentHeader().Hash(), headers[3].Hash())
+		}
+		BadHashes[headers[3].Hash()] = true
+		defer func() { delete(BadHashes, headers[3].Hash()) }()
 	}
 	}
-}
-
-func TestReorgShortest(t *testing.T) {
-	db, _ := ethdb.NewMemDatabase()
-	genesis, err := WriteTestNetGenesisBlock(db, 0)
+	// Create a new chain manager and check it rolled back the state
+	ncm, err := NewBlockChain(db, FakePow{}, new(event.TypeMux))
 	if err != nil {
 	if err != nil {
-		t.Error(err)
-		t.FailNow()
+		t.Fatalf("failed to create new chain manager: %v", err)
 	}
 	}
-	bc := chm(genesis, db)
-
-	chain1 := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 10)
-	chain2 := makeChainWithDiff(genesis, []int{1, 10}, 11)
-
-	bc.InsertChain(chain1)
-	bc.InsertChain(chain2)
-
-	prev := bc.CurrentBlock()
-	for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) {
-		if prev.ParentHash() != block.Hash() {
-			t.Errorf("parent hash mismatch %x - %x", prev.ParentHash(), block.Hash())
+	if full {
+		if ncm.CurrentBlock().Hash() != blocks[2].Header().Hash() {
+			t.Errorf("last block hash mismatch: have: %x, want %x", ncm.CurrentBlock().Hash(), blocks[2].Header().Hash())
+		}
+		if blocks[2].Header().GasLimit.Cmp(ncm.GasLimit()) != 0 {
+			t.Errorf("last  block gasLimit mismatch: have: %x, want %x", ncm.GasLimit(), blocks[2].Header().GasLimit)
+		}
+	} else {
+		if ncm.CurrentHeader().Hash() != headers[2].Hash() {
+			t.Errorf("last header hash mismatch: have: %x, want %x", ncm.CurrentHeader().Hash(), headers[2].Hash())
 		}
 		}
 	}
 	}
 }
 }
 
 
-func TestInsertNonceError(t *testing.T) {
+// Tests chain insertions in the face of one entity containing an invalid nonce.
+func TestHeadersInsertNonceError(t *testing.T) { testInsertNonceError(t, false) }
+func TestBlocksInsertNonceError(t *testing.T)  { testInsertNonceError(t, true) }
+
+func testInsertNonceError(t *testing.T, full bool) {
 	for i := 1; i < 25 && !t.Failed(); i++ {
 	for i := 1; i < 25 && !t.Failed(); i++ {
-		db, _ := ethdb.NewMemDatabase()
-		genesis, err := WriteTestNetGenesisBlock(db, 0)
+		// Create a pristine chain and database
+		db, processor, err := newCanonical(0, full)
 		if err != nil {
 		if err != nil {
-			t.Error(err)
-			t.FailNow()
+			t.Fatalf("failed to create pristine chain: %v", err)
 		}
 		}
-		bc := chm(genesis, db)
-		bc.processor = NewBlockProcessor(db, bc.pow, bc, bc.eventMux)
-		blocks := makeChain(bc.currentBlock, i, db, 0)
+		bc := processor.bc
+
+		// Create and insert a chain with a failing nonce
+		var (
+			failAt   int
+			failRes  int
+			failNum  uint64
+			failHash common.Hash
+		)
+		if full {
+			blocks := makeBlockChain(processor.bc.CurrentBlock(), i, db, 0)
+
+			failAt = rand.Int() % len(blocks)
+			failNum = blocks[failAt].NumberU64()
+			failHash = blocks[failAt].Hash()
+
+			processor.bc.pow = failPow{failNum}
+			processor.Pow = failPow{failNum}
+
+			failRes, err = processor.bc.InsertChain(blocks)
+		} else {
+			headers := makeHeaderChain(processor.bc.CurrentHeader(), i, db, 0)
 
 
-		fail := rand.Int() % len(blocks)
-		failblock := blocks[fail]
-		bc.pow = failPow{failblock.NumberU64()}
-		n, err := bc.InsertChain(blocks)
+			failAt = rand.Int() % len(headers)
+			failNum = headers[failAt].Number.Uint64()
+			failHash = headers[failAt].Hash()
 
 
+			processor.bc.pow = failPow{failNum}
+			processor.Pow = failPow{failNum}
+
+			failRes, err = processor.bc.InsertHeaderChain(headers, 1)
+		}
 		// Check that the returned error indicates the nonce failure.
 		// Check that the returned error indicates the nonce failure.
-		if n != fail {
-			t.Errorf("(i=%d) wrong failed block index: got %d, want %d", i, n, fail)
+		if failRes != failAt {
+			t.Errorf("test %d: failure index mismatch: have %d, want %d", i, failRes, failAt)
 		}
 		}
 		if !IsBlockNonceErr(err) {
 		if !IsBlockNonceErr(err) {
-			t.Fatalf("(i=%d) got %q, want a nonce error", i, err)
+			t.Fatalf("test %d: error mismatch: have %v, want nonce error", i, err)
 		}
 		}
 		nerr := err.(*BlockNonceErr)
 		nerr := err.(*BlockNonceErr)
-		if nerr.Number.Cmp(failblock.Number()) != 0 {
-			t.Errorf("(i=%d) wrong block number in error, got %v, want %v", i, nerr.Number, failblock.Number())
+		if nerr.Number.Uint64() != failNum {
+			t.Errorf("test %d: number mismatch: have %v, want %v", i, nerr.Number, failNum)
 		}
 		}
-		if nerr.Hash != failblock.Hash() {
-			t.Errorf("(i=%d) wrong block hash in error, got %v, want %v", i, nerr.Hash, failblock.Hash())
+		if nerr.Hash != failHash {
+			t.Errorf("test %d: hash mismatch: have %x, want %x", i, nerr.Hash[:4], failHash[:4])
 		}
 		}
-
 		// Check that all no blocks after the failing block have been inserted.
 		// Check that all no blocks after the failing block have been inserted.
-		for _, block := range blocks[fail:] {
-			if bc.HasBlock(block.Hash()) {
-				t.Errorf("(i=%d) invalid block %d present in chain", i, block.NumberU64())
+		for j := 0; j < i-failAt; j++ {
+			if full {
+				if block := bc.GetBlockByNumber(failNum + uint64(j)); block != nil {
+					t.Errorf("test %d: invalid block in chain: %v", i, block)
+				}
+			} else {
+				if header := bc.GetHeaderByNumber(failNum + uint64(j)); header != nil {
+					t.Errorf("test %d: invalid header in chain: %v", i, header)
+				}
 			}
 			}
 		}
 		}
 	}
 	}
 }
 }
 
 
+// Tests that fast importing a block chain produces the same chain data as the
+// classical full block processing.
+func TestFastVsFullChains(t *testing.T) {
+	// Configure and generate a sample block chain
+	var (
+		gendb, _ = ethdb.NewMemDatabase()
+		key, _   = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+		address  = crypto.PubkeyToAddress(key.PublicKey)
+		funds    = big.NewInt(1000000000)
+		genesis  = GenesisBlockForTesting(gendb, address, funds)
+	)
+	blocks, receipts := GenerateChain(genesis, gendb, 1024, func(i int, block *BlockGen) {
+		block.SetCoinbase(common.Address{0x00})
+
+		// If the block number is multiple of 3, send a few bonus transactions to the miner
+		if i%3 == 2 {
+			for j := 0; j < i%4+1; j++ {
+				tx, err := types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key)
+				if err != nil {
+					panic(err)
+				}
+				block.AddTx(tx)
+			}
+		}
+		// If the block number is a multiple of 5, add a few bonus uncles to the block
+		if i%5 == 5 {
+			block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))})
+		}
+	})
+	// Import the chain as an archive node for the comparison baseline
+	archiveDb, _ := ethdb.NewMemDatabase()
+	WriteGenesisBlockForTesting(archiveDb, GenesisAccount{address, funds})
+
+	archive, _ := NewBlockChain(archiveDb, FakePow{}, new(event.TypeMux))
+	archive.SetProcessor(NewBlockProcessor(archiveDb, FakePow{}, archive, new(event.TypeMux)))
+
+	if n, err := archive.InsertChain(blocks); err != nil {
+		t.Fatalf("failed to process block %d: %v", n, err)
+	}
+	// Fast import the chain as a non-archive node to test
+	fastDb, _ := ethdb.NewMemDatabase()
+	WriteGenesisBlockForTesting(fastDb, GenesisAccount{address, funds})
+	fast, _ := NewBlockChain(fastDb, FakePow{}, new(event.TypeMux))
+	fast.SetProcessor(NewBlockProcessor(fastDb, FakePow{}, fast, new(event.TypeMux)))
+
+	headers := make([]*types.Header, len(blocks))
+	for i, block := range blocks {
+		headers[i] = block.Header()
+	}
+	if n, err := fast.InsertHeaderChain(headers, 1); err != nil {
+		t.Fatalf("failed to insert header %d: %v", n, err)
+	}
+	if n, err := fast.InsertReceiptChain(blocks, receipts); err != nil {
+		t.Fatalf("failed to insert receipt %d: %v", n, err)
+	}
+	// Iterate over all chain data components, and cross reference
+	for i := 0; i < len(blocks); i++ {
+		num, hash := blocks[i].NumberU64(), blocks[i].Hash()
+
+		if ftd, atd := fast.GetTd(hash), archive.GetTd(hash); ftd.Cmp(atd) != 0 {
+			t.Errorf("block #%d [%x]: td mismatch: have %v, want %v", num, hash, ftd, atd)
+		}
+		if fheader, aheader := fast.GetHeader(hash), archive.GetHeader(hash); fheader.Hash() != aheader.Hash() {
+			t.Errorf("block #%d [%x]: header mismatch: have %v, want %v", num, hash, fheader, aheader)
+		}
+		if fblock, ablock := fast.GetBlock(hash), archive.GetBlock(hash); fblock.Hash() != ablock.Hash() {
+			t.Errorf("block #%d [%x]: block mismatch: have %v, want %v", num, hash, fblock, ablock)
+		} else if types.DeriveSha(fblock.Transactions()) != types.DeriveSha(ablock.Transactions()) {
+			t.Errorf("block #%d [%x]: transactions mismatch: have %v, want %v", num, hash, fblock.Transactions(), ablock.Transactions())
+		} else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(ablock.Uncles()) {
+			t.Errorf("block #%d [%x]: uncles mismatch: have %v, want %v", num, hash, fblock.Uncles(), ablock.Uncles())
+		}
+		if freceipts, areceipts := GetBlockReceipts(fastDb, hash), GetBlockReceipts(archiveDb, hash); types.DeriveSha(freceipts) != types.DeriveSha(areceipts) {
+			t.Errorf("block #%d [%x]: receipts mismatch: have %v, want %v", num, hash, freceipts, areceipts)
+		}
+	}
+	// Check that the canonical chains are the same between the databases
+	for i := 0; i < len(blocks)+1; i++ {
+		if fhash, ahash := GetCanonicalHash(fastDb, uint64(i)), GetCanonicalHash(archiveDb, uint64(i)); fhash != ahash {
+			t.Errorf("block #%d: canonical hash mismatch: have %v, want %v", i, fhash, ahash)
+		}
+	}
+}
+
+// Tests that various import methods move the chain head pointers to the correct
+// positions.
+func TestLightVsFastVsFullChainHeads(t *testing.T) {
+	// Configure and generate a sample block chain
+	var (
+		gendb, _ = ethdb.NewMemDatabase()
+		key, _   = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+		address  = crypto.PubkeyToAddress(key.PublicKey)
+		funds    = big.NewInt(1000000000)
+		genesis  = GenesisBlockForTesting(gendb, address, funds)
+	)
+	height := uint64(1024)
+	blocks, receipts := GenerateChain(genesis, gendb, int(height), nil)
+
+	// Configure a subchain to roll back
+	remove := []common.Hash{}
+	for _, block := range blocks[height/2:] {
+		remove = append(remove, block.Hash())
+	}
+	// Create a small assertion method to check the three heads
+	assert := func(t *testing.T, kind string, chain *BlockChain, header uint64, fast uint64, block uint64) {
+		if num := chain.CurrentBlock().NumberU64(); num != block {
+			t.Errorf("%s head block mismatch: have #%v, want #%v", kind, num, block)
+		}
+		if num := chain.CurrentFastBlock().NumberU64(); num != fast {
+			t.Errorf("%s head fast-block mismatch: have #%v, want #%v", kind, num, fast)
+		}
+		if num := chain.CurrentHeader().Number.Uint64(); num != header {
+			t.Errorf("%s head header mismatch: have #%v, want #%v", kind, num, header)
+		}
+	}
+	// Import the chain as an archive node and ensure all pointers are updated
+	archiveDb, _ := ethdb.NewMemDatabase()
+	WriteGenesisBlockForTesting(archiveDb, GenesisAccount{address, funds})
+
+	archive, _ := NewBlockChain(archiveDb, FakePow{}, new(event.TypeMux))
+	archive.SetProcessor(NewBlockProcessor(archiveDb, FakePow{}, archive, new(event.TypeMux)))
+
+	if n, err := archive.InsertChain(blocks); err != nil {
+		t.Fatalf("failed to process block %d: %v", n, err)
+	}
+	assert(t, "archive", archive, height, height, height)
+	archive.Rollback(remove)
+	assert(t, "archive", archive, height/2, height/2, height/2)
+
+	// Import the chain as a non-archive node and ensure all pointers are updated
+	fastDb, _ := ethdb.NewMemDatabase()
+	WriteGenesisBlockForTesting(fastDb, GenesisAccount{address, funds})
+	fast, _ := NewBlockChain(fastDb, FakePow{}, new(event.TypeMux))
+	fast.SetProcessor(NewBlockProcessor(fastDb, FakePow{}, fast, new(event.TypeMux)))
+
+	headers := make([]*types.Header, len(blocks))
+	for i, block := range blocks {
+		headers[i] = block.Header()
+	}
+	if n, err := fast.InsertHeaderChain(headers, 1); err != nil {
+		t.Fatalf("failed to insert header %d: %v", n, err)
+	}
+	if n, err := fast.InsertReceiptChain(blocks, receipts); err != nil {
+		t.Fatalf("failed to insert receipt %d: %v", n, err)
+	}
+	assert(t, "fast", fast, height, height, 0)
+	fast.Rollback(remove)
+	assert(t, "fast", fast, height/2, height/2, 0)
+
+	// Import the chain as a light node and ensure all pointers are updated
+	lightDb, _ := ethdb.NewMemDatabase()
+	WriteGenesisBlockForTesting(lightDb, GenesisAccount{address, funds})
+	light, _ := NewBlockChain(lightDb, FakePow{}, new(event.TypeMux))
+	light.SetProcessor(NewBlockProcessor(lightDb, FakePow{}, light, new(event.TypeMux)))
+
+	if n, err := light.InsertHeaderChain(headers, 1); err != nil {
+		t.Fatalf("failed to insert header %d: %v", n, err)
+	}
+	assert(t, "light", light, height, 0, 0)
+	light.Rollback(remove)
+	assert(t, "light", light, height/2, 0, 0)
+}
+
 // Tests that chain reorganizations handle transaction removals and reinsertions.
 // Tests that chain reorganizations handle transaction removals and reinsertions.
 func TestChainTxReorgs(t *testing.T) {
 func TestChainTxReorgs(t *testing.T) {
 	params.MinGasLimit = big.NewInt(125000)      // Minimum the gas limit may ever be.
 	params.MinGasLimit = big.NewInt(125000)      // Minimum the gas limit may ever be.
@@ -587,7 +876,7 @@ func TestChainTxReorgs(t *testing.T) {
 	//  - futureAdd: transaction added after the reorg has already finished
 	//  - futureAdd: transaction added after the reorg has already finished
 	var pastAdd, freshAdd, futureAdd *types.Transaction
 	var pastAdd, freshAdd, futureAdd *types.Transaction
 
 
-	chain := GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) {
+	chain, _ := GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) {
 		switch i {
 		switch i {
 		case 0:
 		case 0:
 			pastDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2)
 			pastDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2)
@@ -613,7 +902,7 @@ func TestChainTxReorgs(t *testing.T) {
 	}
 	}
 
 
 	// overwrite the old chain
 	// overwrite the old chain
-	chain = GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) {
+	chain, _ = GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) {
 		switch i {
 		switch i {
 		case 0:
 		case 0:
 			pastAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3)
 			pastAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3)

+ 47 - 21
core/chain_makers.go

@@ -98,7 +98,7 @@ func (b *BlockGen) AddTx(tx *types.Transaction) {
 	b.header.GasUsed.Add(b.header.GasUsed, gas)
 	b.header.GasUsed.Add(b.header.GasUsed, gas)
 	receipt := types.NewReceipt(root.Bytes(), b.header.GasUsed)
 	receipt := types.NewReceipt(root.Bytes(), b.header.GasUsed)
 	logs := b.statedb.GetLogs(tx.Hash())
 	logs := b.statedb.GetLogs(tx.Hash())
-	receipt.SetLogs(logs)
+	receipt.Logs = logs
 	receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
 	receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
 	b.txs = append(b.txs, tx)
 	b.txs = append(b.txs, tx)
 	b.receipts = append(b.receipts, receipt)
 	b.receipts = append(b.receipts, receipt)
@@ -163,13 +163,13 @@ func (b *BlockGen) OffsetTime(seconds int64) {
 // Blocks created by GenerateChain do not contain valid proof of work
 // Blocks created by GenerateChain do not contain valid proof of work
 // values. Inserting them into BlockChain requires use of FakePow or
 // values. Inserting them into BlockChain requires use of FakePow or
 // a similar non-validating proof of work implementation.
 // a similar non-validating proof of work implementation.
-func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int, *BlockGen)) []*types.Block {
+func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) {
 	statedb, err := state.New(parent.Root(), db)
 	statedb, err := state.New(parent.Root(), db)
 	if err != nil {
 	if err != nil {
 		panic(err)
 		panic(err)
 	}
 	}
-	blocks := make(types.Blocks, n)
-	genblock := func(i int, h *types.Header) *types.Block {
+	blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n)
+	genblock := func(i int, h *types.Header) (*types.Block, types.Receipts) {
 		b := &BlockGen{parent: parent, i: i, chain: blocks, header: h, statedb: statedb}
 		b := &BlockGen{parent: parent, i: i, chain: blocks, header: h, statedb: statedb}
 		if gen != nil {
 		if gen != nil {
 			gen(i, b)
 			gen(i, b)
@@ -180,15 +180,16 @@ func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int,
 			panic(fmt.Sprintf("state write error: %v", err))
 			panic(fmt.Sprintf("state write error: %v", err))
 		}
 		}
 		h.Root = root
 		h.Root = root
-		return types.NewBlock(h, b.txs, b.uncles, b.receipts)
+		return types.NewBlock(h, b.txs, b.uncles, b.receipts), b.receipts
 	}
 	}
 	for i := 0; i < n; i++ {
 	for i := 0; i < n; i++ {
 		header := makeHeader(parent, statedb)
 		header := makeHeader(parent, statedb)
-		block := genblock(i, header)
+		block, receipt := genblock(i, header)
 		blocks[i] = block
 		blocks[i] = block
+		receipts[i] = receipt
 		parent = block
 		parent = block
 	}
 	}
-	return blocks
+	return blocks, receipts
 }
 }
 
 
 func makeHeader(parent *types.Block, state *state.StateDB) *types.Header {
 func makeHeader(parent *types.Block, state *state.StateDB) *types.Header {
@@ -210,26 +211,51 @@ func makeHeader(parent *types.Block, state *state.StateDB) *types.Header {
 	}
 	}
 }
 }
 
 
-// newCanonical creates a new deterministic canonical chain by running
-// InsertChain on the result of makeChain.
-func newCanonical(n int, db ethdb.Database) (*BlockProcessor, error) {
+// newCanonical creates a chain database, and injects a deterministic canonical
+// chain. Depending on the full flag, if creates either a full block chain or a
+// header only chain.
+func newCanonical(n int, full bool) (ethdb.Database, *BlockProcessor, error) {
+	// Create te new chain database
+	db, _ := ethdb.NewMemDatabase()
 	evmux := &event.TypeMux{}
 	evmux := &event.TypeMux{}
 
 
-	WriteTestNetGenesisBlock(db, 0)
-	chainman, _ := NewBlockChain(db, FakePow{}, evmux)
-	bman := NewBlockProcessor(db, FakePow{}, chainman, evmux)
-	bman.bc.SetProcessor(bman)
-	parent := bman.bc.CurrentBlock()
+	// Initialize a fresh chain with only a genesis block
+	genesis, _ := WriteTestNetGenesisBlock(db, 0)
+
+	blockchain, _ := NewBlockChain(db, FakePow{}, evmux)
+	processor := NewBlockProcessor(db, FakePow{}, blockchain, evmux)
+	processor.bc.SetProcessor(processor)
+
+	// Create and inject the requested chain
 	if n == 0 {
 	if n == 0 {
-		return bman, nil
+		return db, processor, nil
+	}
+	if full {
+		// Full block-chain requested
+		blocks := makeBlockChain(genesis, n, db, canonicalSeed)
+		_, err := blockchain.InsertChain(blocks)
+		return db, processor, err
 	}
 	}
-	lchain := makeChain(parent, n, db, canonicalSeed)
-	_, err := bman.bc.InsertChain(lchain)
-	return bman, err
+	// Header-only chain requested
+	headers := makeHeaderChain(genesis.Header(), n, db, canonicalSeed)
+	_, err := blockchain.InsertHeaderChain(headers, 1)
+	return db, processor, err
 }
 }
 
 
-func makeChain(parent *types.Block, n int, db ethdb.Database, seed int) []*types.Block {
-	return GenerateChain(parent, db, n, func(i int, b *BlockGen) {
+// makeHeaderChain creates a deterministic chain of headers rooted at parent.
+func makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) []*types.Header {
+	blocks := makeBlockChain(types.NewBlockWithHeader(parent), n, db, seed)
+	headers := make([]*types.Header, len(blocks))
+	for i, block := range blocks {
+		headers[i] = block.Header()
+	}
+	return headers
+}
+
+// makeBlockChain creates a deterministic chain of blocks rooted at parent.
+func makeBlockChain(parent *types.Block, n int, db ethdb.Database, seed int) []*types.Block {
+	blocks, _ := GenerateChain(parent, db, n, func(i int, b *BlockGen) {
 		b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
 		b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
 	})
 	})
+	return blocks
 }
 }

+ 1 - 1
core/chain_makers_test.go

@@ -47,7 +47,7 @@ func ExampleGenerateChain() {
 	// This call generates a chain of 5 blocks. The function runs for
 	// This call generates a chain of 5 blocks. The function runs for
 	// each block and adds different features to gen based on the
 	// each block and adds different features to gen based on the
 	// block index.
 	// block index.
-	chain := GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) {
+	chain, _ := GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) {
 		switch i {
 		switch i {
 		case 0:
 		case 0:
 			// In block 1, addr1 sends addr2 some ether.
 			// In block 1, addr1 sends addr2 some ether.

+ 3 - 3
core/chain_pow_test.go

@@ -60,7 +60,7 @@ func TestPowVerification(t *testing.T) {
 	var (
 	var (
 		testdb, _ = ethdb.NewMemDatabase()
 		testdb, _ = ethdb.NewMemDatabase()
 		genesis   = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
 		genesis   = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
-		blocks    = GenerateChain(genesis, testdb, 8, nil)
+		blocks, _ = GenerateChain(genesis, testdb, 8, nil)
 	)
 	)
 	headers := make([]*types.Header, len(blocks))
 	headers := make([]*types.Header, len(blocks))
 	for i, block := range blocks {
 	for i, block := range blocks {
@@ -115,7 +115,7 @@ func testPowConcurrentVerification(t *testing.T, threads int) {
 	var (
 	var (
 		testdb, _ = ethdb.NewMemDatabase()
 		testdb, _ = ethdb.NewMemDatabase()
 		genesis   = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
 		genesis   = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
-		blocks    = GenerateChain(genesis, testdb, 8, nil)
+		blocks, _ = GenerateChain(genesis, testdb, 8, nil)
 	)
 	)
 	headers := make([]*types.Header, len(blocks))
 	headers := make([]*types.Header, len(blocks))
 	for i, block := range blocks {
 	for i, block := range blocks {
@@ -186,7 +186,7 @@ func testPowConcurrentAbortion(t *testing.T, threads int) {
 	var (
 	var (
 		testdb, _ = ethdb.NewMemDatabase()
 		testdb, _ = ethdb.NewMemDatabase()
 		genesis   = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
 		genesis   = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
-		blocks    = GenerateChain(genesis, testdb, 1024, nil)
+		blocks, _ = GenerateChain(genesis, testdb, 1024, nil)
 	)
 	)
 	headers := make([]*types.Header, len(blocks))
 	headers := make([]*types.Header, len(blocks))
 	for i, block := range blocks {
 	for i, block := range blocks {

+ 24 - 2
core/chain_util.go

@@ -34,6 +34,7 @@ import (
 var (
 var (
 	headHeaderKey = []byte("LastHeader")
 	headHeaderKey = []byte("LastHeader")
 	headBlockKey  = []byte("LastBlock")
 	headBlockKey  = []byte("LastBlock")
+	headFastKey   = []byte("LastFast")
 
 
 	blockPrefix    = []byte("block-")
 	blockPrefix    = []byte("block-")
 	blockNumPrefix = []byte("block-num-")
 	blockNumPrefix = []byte("block-num-")
@@ -129,7 +130,7 @@ func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash {
 // header. The difference between this and GetHeadBlockHash is that whereas the
 // header. The difference between this and GetHeadBlockHash is that whereas the
 // last block hash is only updated upon a full block import, the last header
 // last block hash is only updated upon a full block import, the last header
 // hash is updated already at header import, allowing head tracking for the
 // hash is updated already at header import, allowing head tracking for the
-// fast synchronization mechanism.
+// light synchronization mechanism.
 func GetHeadHeaderHash(db ethdb.Database) common.Hash {
 func GetHeadHeaderHash(db ethdb.Database) common.Hash {
 	data, _ := db.Get(headHeaderKey)
 	data, _ := db.Get(headHeaderKey)
 	if len(data) == 0 {
 	if len(data) == 0 {
@@ -147,6 +148,18 @@ func GetHeadBlockHash(db ethdb.Database) common.Hash {
 	return common.BytesToHash(data)
 	return common.BytesToHash(data)
 }
 }
 
 
+// GetHeadFastBlockHash retrieves the hash of the current canonical head block during
+// fast synchronization. The difference between this and GetHeadBlockHash is that
+// whereas the last block hash is only updated upon a full block import, the last
+// fast hash is updated when importing pre-processed blocks.
+func GetHeadFastBlockHash(db ethdb.Database) common.Hash {
+	data, _ := db.Get(headFastKey)
+	if len(data) == 0 {
+		return common.Hash{}
+	}
+	return common.BytesToHash(data)
+}
+
 // GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
 // GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
 // if the header's not found.
 // if the header's not found.
 func GetHeaderRLP(db ethdb.Database, hash common.Hash) rlp.RawValue {
 func GetHeaderRLP(db ethdb.Database, hash common.Hash) rlp.RawValue {
@@ -249,6 +262,15 @@ func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
 	return nil
 	return nil
 }
 }
 
 
+// WriteHeadFastBlockHash stores the fast head block's hash.
+func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error {
+	if err := db.Put(headFastKey, hash.Bytes()); err != nil {
+		glog.Fatalf("failed to store last fast block's hash into database: %v", err)
+		return err
+	}
+	return nil
+}
+
 // WriteHeader serializes a block header into the database.
 // WriteHeader serializes a block header into the database.
 func WriteHeader(db ethdb.Database, header *types.Header) error {
 func WriteHeader(db ethdb.Database, header *types.Header) error {
 	data, err := rlp.EncodeToBytes(header)
 	data, err := rlp.EncodeToBytes(header)
@@ -372,7 +394,7 @@ func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts)
 		bloomDat, _ := db.Get(key)
 		bloomDat, _ := db.Get(key)
 		bloom := types.BytesToBloom(bloomDat)
 		bloom := types.BytesToBloom(bloomDat)
 		for _, receipt := range receipts {
 		for _, receipt := range receipts {
-			for _, log := range receipt.Logs() {
+			for _, log := range receipt.Logs {
 				bloom.Add(log.Address.Big())
 				bloom.Add(log.Address.Big())
 			}
 			}
 		}
 		}

+ 36 - 17
core/chain_util_test.go

@@ -163,7 +163,12 @@ func TestBlockStorage(t *testing.T) {
 	db, _ := ethdb.NewMemDatabase()
 	db, _ := ethdb.NewMemDatabase()
 
 
 	// Create a test block to move around the database and make sure it's really new
 	// Create a test block to move around the database and make sure it's really new
-	block := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block")})
+	block := types.NewBlockWithHeader(&types.Header{
+		Extra:       []byte("test block"),
+		UncleHash:   types.EmptyUncleHash,
+		TxHash:      types.EmptyRootHash,
+		ReceiptHash: types.EmptyRootHash,
+	})
 	if entry := GetBlock(db, block.Hash()); entry != nil {
 	if entry := GetBlock(db, block.Hash()); entry != nil {
 		t.Fatalf("Non existent block returned: %v", entry)
 		t.Fatalf("Non existent block returned: %v", entry)
 	}
 	}
@@ -208,8 +213,12 @@ func TestBlockStorage(t *testing.T) {
 // Tests that partial block contents don't get reassembled into full blocks.
 // Tests that partial block contents don't get reassembled into full blocks.
 func TestPartialBlockStorage(t *testing.T) {
 func TestPartialBlockStorage(t *testing.T) {
 	db, _ := ethdb.NewMemDatabase()
 	db, _ := ethdb.NewMemDatabase()
-	block := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block")})
-
+	block := types.NewBlockWithHeader(&types.Header{
+		Extra:       []byte("test block"),
+		UncleHash:   types.EmptyUncleHash,
+		TxHash:      types.EmptyRootHash,
+		ReceiptHash: types.EmptyRootHash,
+	})
 	// Store a header and check that it's not recognized as a block
 	// Store a header and check that it's not recognized as a block
 	if err := WriteHeader(db, block.Header()); err != nil {
 	if err := WriteHeader(db, block.Header()); err != nil {
 		t.Fatalf("Failed to write header into database: %v", err)
 		t.Fatalf("Failed to write header into database: %v", err)
@@ -298,6 +307,7 @@ func TestHeadStorage(t *testing.T) {
 
 
 	blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")})
 	blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")})
 	blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")})
 	blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")})
+	blockFast := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block fast")})
 
 
 	// Check that no head entries are in a pristine database
 	// Check that no head entries are in a pristine database
 	if entry := GetHeadHeaderHash(db); entry != (common.Hash{}) {
 	if entry := GetHeadHeaderHash(db); entry != (common.Hash{}) {
@@ -306,6 +316,9 @@ func TestHeadStorage(t *testing.T) {
 	if entry := GetHeadBlockHash(db); entry != (common.Hash{}) {
 	if entry := GetHeadBlockHash(db); entry != (common.Hash{}) {
 		t.Fatalf("Non head block entry returned: %v", entry)
 		t.Fatalf("Non head block entry returned: %v", entry)
 	}
 	}
+	if entry := GetHeadFastBlockHash(db); entry != (common.Hash{}) {
+		t.Fatalf("Non fast head block entry returned: %v", entry)
+	}
 	// Assign separate entries for the head header and block
 	// Assign separate entries for the head header and block
 	if err := WriteHeadHeaderHash(db, blockHead.Hash()); err != nil {
 	if err := WriteHeadHeaderHash(db, blockHead.Hash()); err != nil {
 		t.Fatalf("Failed to write head header hash: %v", err)
 		t.Fatalf("Failed to write head header hash: %v", err)
@@ -313,6 +326,9 @@ func TestHeadStorage(t *testing.T) {
 	if err := WriteHeadBlockHash(db, blockFull.Hash()); err != nil {
 	if err := WriteHeadBlockHash(db, blockFull.Hash()); err != nil {
 		t.Fatalf("Failed to write head block hash: %v", err)
 		t.Fatalf("Failed to write head block hash: %v", err)
 	}
 	}
+	if err := WriteHeadFastBlockHash(db, blockFast.Hash()); err != nil {
+		t.Fatalf("Failed to write fast head block hash: %v", err)
+	}
 	// Check that both heads are present, and different (i.e. two heads maintained)
 	// Check that both heads are present, and different (i.e. two heads maintained)
 	if entry := GetHeadHeaderHash(db); entry != blockHead.Hash() {
 	if entry := GetHeadHeaderHash(db); entry != blockHead.Hash() {
 		t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash())
 		t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash())
@@ -320,21 +336,24 @@ func TestHeadStorage(t *testing.T) {
 	if entry := GetHeadBlockHash(db); entry != blockFull.Hash() {
 	if entry := GetHeadBlockHash(db); entry != blockFull.Hash() {
 		t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash())
 		t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash())
 	}
 	}
+	if entry := GetHeadFastBlockHash(db); entry != blockFast.Hash() {
+		t.Fatalf("Fast head block hash mismatch: have %v, want %v", entry, blockFast.Hash())
+	}
 }
 }
 
 
 func TestMipmapBloom(t *testing.T) {
 func TestMipmapBloom(t *testing.T) {
 	db, _ := ethdb.NewMemDatabase()
 	db, _ := ethdb.NewMemDatabase()
 
 
 	receipt1 := new(types.Receipt)
 	receipt1 := new(types.Receipt)
-	receipt1.SetLogs(vm.Logs{
+	receipt1.Logs = vm.Logs{
 		&vm.Log{Address: common.BytesToAddress([]byte("test"))},
 		&vm.Log{Address: common.BytesToAddress([]byte("test"))},
 		&vm.Log{Address: common.BytesToAddress([]byte("address"))},
 		&vm.Log{Address: common.BytesToAddress([]byte("address"))},
-	})
+	}
 	receipt2 := new(types.Receipt)
 	receipt2 := new(types.Receipt)
-	receipt2.SetLogs(vm.Logs{
+	receipt2.Logs = vm.Logs{
 		&vm.Log{Address: common.BytesToAddress([]byte("test"))},
 		&vm.Log{Address: common.BytesToAddress([]byte("test"))},
 		&vm.Log{Address: common.BytesToAddress([]byte("address1"))},
 		&vm.Log{Address: common.BytesToAddress([]byte("address1"))},
-	})
+	}
 
 
 	WriteMipmapBloom(db, 1, types.Receipts{receipt1})
 	WriteMipmapBloom(db, 1, types.Receipts{receipt1})
 	WriteMipmapBloom(db, 2, types.Receipts{receipt2})
 	WriteMipmapBloom(db, 2, types.Receipts{receipt2})
@@ -349,15 +368,15 @@ func TestMipmapBloom(t *testing.T) {
 	// reset
 	// reset
 	db, _ = ethdb.NewMemDatabase()
 	db, _ = ethdb.NewMemDatabase()
 	receipt := new(types.Receipt)
 	receipt := new(types.Receipt)
-	receipt.SetLogs(vm.Logs{
+	receipt.Logs = vm.Logs{
 		&vm.Log{Address: common.BytesToAddress([]byte("test"))},
 		&vm.Log{Address: common.BytesToAddress([]byte("test"))},
-	})
+	}
 	WriteMipmapBloom(db, 999, types.Receipts{receipt1})
 	WriteMipmapBloom(db, 999, types.Receipts{receipt1})
 
 
 	receipt = new(types.Receipt)
 	receipt = new(types.Receipt)
-	receipt.SetLogs(vm.Logs{
+	receipt.Logs = vm.Logs{
 		&vm.Log{Address: common.BytesToAddress([]byte("test 1"))},
 		&vm.Log{Address: common.BytesToAddress([]byte("test 1"))},
-	})
+	}
 	WriteMipmapBloom(db, 1000, types.Receipts{receipt})
 	WriteMipmapBloom(db, 1000, types.Receipts{receipt})
 
 
 	bloom := GetMipmapBloom(db, 1000, 1000)
 	bloom := GetMipmapBloom(db, 1000, 1000)
@@ -384,22 +403,22 @@ func TestMipmapChain(t *testing.T) {
 	defer db.Close()
 	defer db.Close()
 
 
 	genesis := WriteGenesisBlockForTesting(db, GenesisAccount{addr, big.NewInt(1000000)})
 	genesis := WriteGenesisBlockForTesting(db, GenesisAccount{addr, big.NewInt(1000000)})
-	chain := GenerateChain(genesis, db, 1010, func(i int, gen *BlockGen) {
+	chain, receipts := GenerateChain(genesis, db, 1010, func(i int, gen *BlockGen) {
 		var receipts types.Receipts
 		var receipts types.Receipts
 		switch i {
 		switch i {
 		case 1:
 		case 1:
 			receipt := types.NewReceipt(nil, new(big.Int))
 			receipt := types.NewReceipt(nil, new(big.Int))
-			receipt.SetLogs(vm.Logs{
+			receipt.Logs = vm.Logs{
 				&vm.Log{
 				&vm.Log{
 					Address: addr,
 					Address: addr,
 					Topics:  []common.Hash{hash1},
 					Topics:  []common.Hash{hash1},
 				},
 				},
-			})
+			}
 			gen.AddUncheckedReceipt(receipt)
 			gen.AddUncheckedReceipt(receipt)
 			receipts = types.Receipts{receipt}
 			receipts = types.Receipts{receipt}
 		case 1000:
 		case 1000:
 			receipt := types.NewReceipt(nil, new(big.Int))
 			receipt := types.NewReceipt(nil, new(big.Int))
-			receipt.SetLogs(vm.Logs{&vm.Log{Address: addr2}})
+			receipt.Logs = vm.Logs{&vm.Log{Address: addr2}}
 			gen.AddUncheckedReceipt(receipt)
 			gen.AddUncheckedReceipt(receipt)
 			receipts = types.Receipts{receipt}
 			receipts = types.Receipts{receipt}
 
 
@@ -412,7 +431,7 @@ func TestMipmapChain(t *testing.T) {
 		}
 		}
 		WriteMipmapBloom(db, uint64(i+1), receipts)
 		WriteMipmapBloom(db, uint64(i+1), receipts)
 	})
 	})
-	for _, block := range chain {
+	for i, block := range chain {
 		WriteBlock(db, block)
 		WriteBlock(db, block)
 		if err := WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
 		if err := WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
 			t.Fatalf("failed to insert block number: %v", err)
 			t.Fatalf("failed to insert block number: %v", err)
@@ -420,7 +439,7 @@ func TestMipmapChain(t *testing.T) {
 		if err := WriteHeadBlockHash(db, block.Hash()); err != nil {
 		if err := WriteHeadBlockHash(db, block.Hash()); err != nil {
 			t.Fatalf("failed to insert block number: %v", err)
 			t.Fatalf("failed to insert block number: %v", err)
 		}
 		}
-		if err := PutBlockReceipts(db, block, block.Receipts()); err != nil {
+		if err := PutBlockReceipts(db, block.Hash(), receipts[i]); err != nil {
 			t.Fatal("error writing block receipts:", err)
 			t.Fatal("error writing block receipts:", err)
 		}
 		}
 	}
 	}

+ 1 - 1
core/error.go

@@ -111,7 +111,7 @@ type BlockNonceErr struct {
 }
 }
 
 
 func (err *BlockNonceErr) Error() string {
 func (err *BlockNonceErr) Error() string {
-	return fmt.Sprintf("block %d (%v) nonce is invalid (got %d)", err.Number, err.Hash, err.Nonce)
+	return fmt.Sprintf("nonce for #%d [%x…] is invalid (got %d)", err.Number, err.Hash, err.Nonce)
 }
 }
 
 
 // IsBlockNonceErr returns true for invalid block nonce errors.
 // IsBlockNonceErr returns true for invalid block nonce errors.

+ 1 - 1
core/genesis.go

@@ -103,7 +103,7 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block,
 	if err := WriteBlock(chainDb, block); err != nil {
 	if err := WriteBlock(chainDb, block); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	if err := PutBlockReceipts(chainDb, block, nil); err != nil {
+	if err := PutBlockReceipts(chainDb, block.Hash(), nil); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 	if err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()); err != nil {
 	if err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()); err != nil {

+ 70 - 0
core/state/sync.go

@@ -0,0 +1,70 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package state
+
+import (
+	"bytes"
+	"math/big"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/rlp"
+	"github.com/ethereum/go-ethereum/trie"
+)
+
+// StateSync is the main state synchronisation scheduler, which provides yet the
+// unknown state hashes to retrieve, accepts node data associated with said hashes
+// and reconstructs the state database step by step until all is done.
+type StateSync trie.TrieSync
+
+// NewStateSync create a new state trie download scheduler.
+func NewStateSync(root common.Hash, database ethdb.Database) *StateSync {
+	var syncer *trie.TrieSync
+
+	callback := func(leaf []byte, parent common.Hash) error {
+		var obj struct {
+			Nonce    uint64
+			Balance  *big.Int
+			Root     common.Hash
+			CodeHash []byte
+		}
+		if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil {
+			return err
+		}
+		syncer.AddSubTrie(obj.Root, 64, parent, nil)
+		syncer.AddRawEntry(common.BytesToHash(obj.CodeHash), 64, parent)
+
+		return nil
+	}
+	syncer = trie.NewTrieSync(root, database, callback)
+	return (*StateSync)(syncer)
+}
+
+// Missing retrieves the known missing nodes from the state trie for retrieval.
+func (s *StateSync) Missing(max int) []common.Hash {
+	return (*trie.TrieSync)(s).Missing(max)
+}
+
+// Process injects a batch of retrieved trie nodes data.
+func (s *StateSync) Process(list []trie.SyncResult) (int, error) {
+	return (*trie.TrieSync)(s).Process(list)
+}
+
+// Pending returns the number of state entries currently pending for download.
+func (s *StateSync) Pending() int {
+	return (*trie.TrieSync)(s).Pending()
+}

+ 238 - 0
core/state/sync_test.go

@@ -0,0 +1,238 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package state
+
+import (
+	"bytes"
+	"math/big"
+	"testing"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"github.com/ethereum/go-ethereum/trie"
+)
+
+// testAccount is the data associated with an account used by the state tests.
+type testAccount struct {
+	address common.Address
+	balance *big.Int
+	nonce   uint64
+	code    []byte
+}
+
+// makeTestState create a sample test state to test node-wise reconstruction.
+func makeTestState() (ethdb.Database, common.Hash, []*testAccount) {
+	// Create an empty state
+	db, _ := ethdb.NewMemDatabase()
+	state, _ := New(common.Hash{}, db)
+
+	// Fill it with some arbitrary data
+	accounts := []*testAccount{}
+	for i := byte(0); i < 255; i++ {
+		obj := state.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
+		acc := &testAccount{address: common.BytesToAddress([]byte{i})}
+
+		obj.AddBalance(big.NewInt(int64(11 * i)))
+		acc.balance = big.NewInt(int64(11 * i))
+
+		obj.SetNonce(uint64(42 * i))
+		acc.nonce = uint64(42 * i)
+
+		if i%3 == 0 {
+			obj.SetCode([]byte{i, i, i, i, i})
+			acc.code = []byte{i, i, i, i, i}
+		}
+		state.UpdateStateObject(obj)
+		accounts = append(accounts, acc)
+	}
+	root, _ := state.Commit()
+
+	// Return the generated state
+	return db, root, accounts
+}
+
+// checkStateAccounts cross references a reconstructed state with an expected
+// account array.
+func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accounts []*testAccount) {
+	state, _ := New(root, db)
+	for i, acc := range accounts {
+
+		if balance := state.GetBalance(acc.address); balance.Cmp(acc.balance) != 0 {
+			t.Errorf("account %d: balance mismatch: have %v, want %v", i, balance, acc.balance)
+		}
+		if nonce := state.GetNonce(acc.address); nonce != acc.nonce {
+			t.Errorf("account %d: nonce mismatch: have %v, want %v", i, nonce, acc.nonce)
+		}
+		if code := state.GetCode(acc.address); bytes.Compare(code, acc.code) != 0 {
+			t.Errorf("account %d: code mismatch: have %x, want %x", i, code, acc.code)
+		}
+	}
+}
+
+// Tests that an empty state is not scheduled for syncing.
+func TestEmptyStateSync(t *testing.T) {
+	empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+	db, _ := ethdb.NewMemDatabase()
+	if req := NewStateSync(empty, db).Missing(1); len(req) != 0 {
+		t.Errorf("content requested for empty state: %v", req)
+	}
+}
+
+// Tests that given a root hash, a state can sync iteratively on a single thread,
+// requesting retrieval tasks and returning all of them in one go.
+func TestIterativeStateSyncIndividual(t *testing.T) { testIterativeStateSync(t, 1) }
+func TestIterativeStateSyncBatched(t *testing.T)    { testIterativeStateSync(t, 100) }
+
+func testIterativeStateSync(t *testing.T, batch int) {
+	// Create a random state to copy
+	srcDb, srcRoot, srcAccounts := makeTestState()
+
+	// Create a destination state and sync with the scheduler
+	dstDb, _ := ethdb.NewMemDatabase()
+	sched := NewStateSync(srcRoot, dstDb)
+
+	queue := append([]common.Hash{}, sched.Missing(batch)...)
+	for len(queue) > 0 {
+		results := make([]trie.SyncResult, len(queue))
+		for i, hash := range queue {
+			data, err := srcDb.Get(hash.Bytes())
+			if err != nil {
+				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
+			}
+			results[i] = trie.SyncResult{hash, data}
+		}
+		if index, err := sched.Process(results); err != nil {
+			t.Fatalf("failed to process result #%d: %v", index, err)
+		}
+		queue = append(queue[:0], sched.Missing(batch)...)
+	}
+	// Cross check that the two states are in sync
+	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
+}
+
+// Tests that the trie scheduler can correctly reconstruct the state even if only
+// partial results are returned, and the others sent only later.
+func TestIterativeDelayedStateSync(t *testing.T) {
+	// Create a random state to copy
+	srcDb, srcRoot, srcAccounts := makeTestState()
+
+	// Create a destination state and sync with the scheduler
+	dstDb, _ := ethdb.NewMemDatabase()
+	sched := NewStateSync(srcRoot, dstDb)
+
+	queue := append([]common.Hash{}, sched.Missing(0)...)
+	for len(queue) > 0 {
+		// Sync only half of the scheduled nodes
+		results := make([]trie.SyncResult, len(queue)/2+1)
+		for i, hash := range queue[:len(results)] {
+			data, err := srcDb.Get(hash.Bytes())
+			if err != nil {
+				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
+			}
+			results[i] = trie.SyncResult{hash, data}
+		}
+		if index, err := sched.Process(results); err != nil {
+			t.Fatalf("failed to process result #%d: %v", index, err)
+		}
+		queue = append(queue[len(results):], sched.Missing(0)...)
+	}
+	// Cross check that the two states are in sync
+	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
+}
+
+// Tests that given a root hash, a trie can sync iteratively on a single thread,
+// requesting retrieval tasks and returning all of them in one go, however in a
+// random order.
+func TestIterativeRandomStateSyncIndividual(t *testing.T) { testIterativeRandomStateSync(t, 1) }
+func TestIterativeRandomStateSyncBatched(t *testing.T)    { testIterativeRandomStateSync(t, 100) }
+
+func testIterativeRandomStateSync(t *testing.T, batch int) {
+	// Create a random state to copy
+	srcDb, srcRoot, srcAccounts := makeTestState()
+
+	// Create a destination state and sync with the scheduler
+	dstDb, _ := ethdb.NewMemDatabase()
+	sched := NewStateSync(srcRoot, dstDb)
+
+	queue := make(map[common.Hash]struct{})
+	for _, hash := range sched.Missing(batch) {
+		queue[hash] = struct{}{}
+	}
+	for len(queue) > 0 {
+		// Fetch all the queued nodes in a random order
+		results := make([]trie.SyncResult, 0, len(queue))
+		for hash, _ := range queue {
+			data, err := srcDb.Get(hash.Bytes())
+			if err != nil {
+				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
+			}
+			results = append(results, trie.SyncResult{hash, data})
+		}
+		// Feed the retrieved results back and queue new tasks
+		if index, err := sched.Process(results); err != nil {
+			t.Fatalf("failed to process result #%d: %v", index, err)
+		}
+		queue = make(map[common.Hash]struct{})
+		for _, hash := range sched.Missing(batch) {
+			queue[hash] = struct{}{}
+		}
+	}
+	// Cross check that the two states are in sync
+	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
+}
+
+// Tests that the trie scheduler can correctly reconstruct the state even if only
+// partial results are returned (Even those randomly), others sent only later.
+func TestIterativeRandomDelayedStateSync(t *testing.T) {
+	// Create a random state to copy
+	srcDb, srcRoot, srcAccounts := makeTestState()
+
+	// Create a destination state and sync with the scheduler
+	dstDb, _ := ethdb.NewMemDatabase()
+	sched := NewStateSync(srcRoot, dstDb)
+
+	queue := make(map[common.Hash]struct{})
+	for _, hash := range sched.Missing(0) {
+		queue[hash] = struct{}{}
+	}
+	for len(queue) > 0 {
+		// Sync only half of the scheduled nodes, even those in random order
+		results := make([]trie.SyncResult, 0, len(queue)/2+1)
+		for hash, _ := range queue {
+			delete(queue, hash)
+
+			data, err := srcDb.Get(hash.Bytes())
+			if err != nil {
+				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
+			}
+			results = append(results, trie.SyncResult{hash, data})
+
+			if len(results) >= cap(results) {
+				break
+			}
+		}
+		// Feed the retrieved results back and queue new tasks
+		if index, err := sched.Process(results); err != nil {
+			t.Fatalf("failed to process result #%d: %v", index, err)
+		}
+		for _, hash := range sched.Missing(0) {
+			queue[hash] = struct{}{}
+		}
+	}
+	// Cross check that the two states are in sync
+	checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
+}

+ 9 - 9
core/transaction_util.go

@@ -140,11 +140,14 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts {
 	if len(data) == 0 {
 	if len(data) == 0 {
 		return nil
 		return nil
 	}
 	}
-
-	var receipts types.Receipts
-	err := rlp.DecodeBytes(data, &receipts)
-	if err != nil {
-		glog.V(logger.Core).Infoln("GetReceiptse err", err)
+	rs := []*types.ReceiptForStorage{}
+	if err := rlp.DecodeBytes(data, &rs); err != nil {
+		glog.V(logger.Error).Infof("invalid receipt array RLP for hash %x: %v", hash, err)
+		return nil
+	}
+	receipts := make(types.Receipts, len(rs))
+	for i, receipt := range rs {
+		receipts[i] = (*types.Receipt)(receipt)
 	}
 	}
 	return receipts
 	return receipts
 }
 }
@@ -152,7 +155,7 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts {
 // PutBlockReceipts stores the block's transactions associated receipts
 // PutBlockReceipts stores the block's transactions associated receipts
 // and stores them by block hash in a single slice. This is required for
 // and stores them by block hash in a single slice. This is required for
 // forks and chain reorgs
 // forks and chain reorgs
-func PutBlockReceipts(db ethdb.Database, block *types.Block, receipts types.Receipts) error {
+func PutBlockReceipts(db ethdb.Database, hash common.Hash, receipts types.Receipts) error {
 	rs := make([]*types.ReceiptForStorage, len(receipts))
 	rs := make([]*types.ReceiptForStorage, len(receipts))
 	for i, receipt := range receipts {
 	for i, receipt := range receipts {
 		rs[i] = (*types.ReceiptForStorage)(receipt)
 		rs[i] = (*types.ReceiptForStorage)(receipt)
@@ -161,12 +164,9 @@ func PutBlockReceipts(db ethdb.Database, block *types.Block, receipts types.Rece
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-
-	hash := block.Hash()
 	err = db.Put(append(blockReceiptsPre, hash[:]...), bytes)
 	err = db.Put(append(blockReceiptsPre, hash[:]...), bytes)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-
 	return nil
 	return nil
 }
 }

+ 14 - 17
core/types/block.go

@@ -128,7 +128,6 @@ type Block struct {
 	header       *Header
 	header       *Header
 	uncles       []*Header
 	uncles       []*Header
 	transactions Transactions
 	transactions Transactions
-	receipts     Receipts
 
 
 	// caches
 	// caches
 	hash atomic.Value
 	hash atomic.Value
@@ -172,8 +171,8 @@ type storageblock struct {
 }
 }
 
 
 var (
 var (
-	emptyRootHash  = DeriveSha(Transactions{})
-	emptyUncleHash = CalcUncleHash(nil)
+	EmptyRootHash  = DeriveSha(Transactions{})
+	EmptyUncleHash = CalcUncleHash(nil)
 )
 )
 
 
 // NewBlock creates a new block. The input data is copied,
 // NewBlock creates a new block. The input data is copied,
@@ -184,11 +183,11 @@ var (
 // are ignored and set to values derived from the given txs, uncles
 // are ignored and set to values derived from the given txs, uncles
 // and receipts.
 // and receipts.
 func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt) *Block {
 func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt) *Block {
-	b := &Block{header: copyHeader(header), td: new(big.Int)}
+	b := &Block{header: CopyHeader(header), td: new(big.Int)}
 
 
 	// TODO: panic if len(txs) != len(receipts)
 	// TODO: panic if len(txs) != len(receipts)
 	if len(txs) == 0 {
 	if len(txs) == 0 {
-		b.header.TxHash = emptyRootHash
+		b.header.TxHash = EmptyRootHash
 	} else {
 	} else {
 		b.header.TxHash = DeriveSha(Transactions(txs))
 		b.header.TxHash = DeriveSha(Transactions(txs))
 		b.transactions = make(Transactions, len(txs))
 		b.transactions = make(Transactions, len(txs))
@@ -196,21 +195,19 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*
 	}
 	}
 
 
 	if len(receipts) == 0 {
 	if len(receipts) == 0 {
-		b.header.ReceiptHash = emptyRootHash
+		b.header.ReceiptHash = EmptyRootHash
 	} else {
 	} else {
 		b.header.ReceiptHash = DeriveSha(Receipts(receipts))
 		b.header.ReceiptHash = DeriveSha(Receipts(receipts))
 		b.header.Bloom = CreateBloom(receipts)
 		b.header.Bloom = CreateBloom(receipts)
-		b.receipts = make([]*Receipt, len(receipts))
-		copy(b.receipts, receipts)
 	}
 	}
 
 
 	if len(uncles) == 0 {
 	if len(uncles) == 0 {
-		b.header.UncleHash = emptyUncleHash
+		b.header.UncleHash = EmptyUncleHash
 	} else {
 	} else {
 		b.header.UncleHash = CalcUncleHash(uncles)
 		b.header.UncleHash = CalcUncleHash(uncles)
 		b.uncles = make([]*Header, len(uncles))
 		b.uncles = make([]*Header, len(uncles))
 		for i := range uncles {
 		for i := range uncles {
-			b.uncles[i] = copyHeader(uncles[i])
+			b.uncles[i] = CopyHeader(uncles[i])
 		}
 		}
 	}
 	}
 
 
@@ -221,10 +218,12 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*
 // header data is copied, changes to header and to the field values
 // header data is copied, changes to header and to the field values
 // will not affect the block.
 // will not affect the block.
 func NewBlockWithHeader(header *Header) *Block {
 func NewBlockWithHeader(header *Header) *Block {
-	return &Block{header: copyHeader(header)}
+	return &Block{header: CopyHeader(header)}
 }
 }
 
 
-func copyHeader(h *Header) *Header {
+// CopyHeader creates a deep copy of a block header to prevent side effects from
+// modifying a header variable.
+func CopyHeader(h *Header) *Header {
 	cpy := *h
 	cpy := *h
 	if cpy.Time = new(big.Int); h.Time != nil {
 	if cpy.Time = new(big.Int); h.Time != nil {
 		cpy.Time.Set(h.Time)
 		cpy.Time.Set(h.Time)
@@ -297,7 +296,6 @@ func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error {
 // TODO: copies
 // TODO: copies
 func (b *Block) Uncles() []*Header          { return b.uncles }
 func (b *Block) Uncles() []*Header          { return b.uncles }
 func (b *Block) Transactions() Transactions { return b.transactions }
 func (b *Block) Transactions() Transactions { return b.transactions }
-func (b *Block) Receipts() Receipts         { return b.receipts }
 
 
 func (b *Block) Transaction(hash common.Hash) *Transaction {
 func (b *Block) Transaction(hash common.Hash) *Transaction {
 	for _, transaction := range b.transactions {
 	for _, transaction := range b.transactions {
@@ -326,7 +324,7 @@ func (b *Block) ReceiptHash() common.Hash { return b.header.ReceiptHash }
 func (b *Block) UncleHash() common.Hash   { return b.header.UncleHash }
 func (b *Block) UncleHash() common.Hash   { return b.header.UncleHash }
 func (b *Block) Extra() []byte            { return common.CopyBytes(b.header.Extra) }
 func (b *Block) Extra() []byte            { return common.CopyBytes(b.header.Extra) }
 
 
-func (b *Block) Header() *Header { return copyHeader(b.header) }
+func (b *Block) Header() *Header { return CopyHeader(b.header) }
 
 
 func (b *Block) HashNoNonce() common.Hash {
 func (b *Block) HashNoNonce() common.Hash {
 	return b.header.HashNoNonce()
 	return b.header.HashNoNonce()
@@ -362,7 +360,6 @@ func (b *Block) WithMiningResult(nonce uint64, mixDigest common.Hash) *Block {
 	return &Block{
 	return &Block{
 		header:       &cpy,
 		header:       &cpy,
 		transactions: b.transactions,
 		transactions: b.transactions,
-		receipts:     b.receipts,
 		uncles:       b.uncles,
 		uncles:       b.uncles,
 	}
 	}
 }
 }
@@ -370,13 +367,13 @@ func (b *Block) WithMiningResult(nonce uint64, mixDigest common.Hash) *Block {
 // WithBody returns a new block with the given transaction and uncle contents.
 // WithBody returns a new block with the given transaction and uncle contents.
 func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block {
 func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block {
 	block := &Block{
 	block := &Block{
-		header:       copyHeader(b.header),
+		header:       CopyHeader(b.header),
 		transactions: make([]*Transaction, len(transactions)),
 		transactions: make([]*Transaction, len(transactions)),
 		uncles:       make([]*Header, len(uncles)),
 		uncles:       make([]*Header, len(uncles)),
 	}
 	}
 	copy(block.transactions, transactions)
 	copy(block.transactions, transactions)
 	for i := range uncles {
 	for i := range uncles {
-		block.uncles[i] = copyHeader(uncles[i])
+		block.uncles[i] = CopyHeader(uncles[i])
 	}
 	}
 	return block
 	return block
 }
 }

+ 1 - 1
core/types/bloom9.go

@@ -72,7 +72,7 @@ func (b Bloom) TestBytes(test []byte) bool {
 func CreateBloom(receipts Receipts) Bloom {
 func CreateBloom(receipts Receipts) Bloom {
 	bin := new(big.Int)
 	bin := new(big.Int)
 	for _, receipt := range receipts {
 	for _, receipt := range receipts {
-		bin.Or(bin, LogsBloom(receipt.logs))
+		bin.Or(bin, LogsBloom(receipt.Logs))
 	}
 	}
 
 
 	return BytesToBloom(bin.Bytes())
 	return BytesToBloom(bin.Bytes())

+ 2 - 0
core/types/common.go

@@ -20,4 +20,6 @@ import "github.com/ethereum/go-ethereum/core/vm"
 
 
 type BlockProcessor interface {
 type BlockProcessor interface {
 	Process(*Block) (vm.Logs, Receipts, error)
 	Process(*Block) (vm.Logs, Receipts, error)
+	ValidateHeader(*Header, bool, bool) error
+	ValidateHeaderWithParent(*Header, *Header, bool, bool) error
 }
 }

+ 75 - 49
core/types/receipt.go

@@ -17,7 +17,6 @@
 package types
 package types
 
 
 import (
 import (
-	"bytes"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"math/big"
 	"math/big"
@@ -27,89 +26,116 @@ import (
 	"github.com/ethereum/go-ethereum/rlp"
 	"github.com/ethereum/go-ethereum/rlp"
 )
 )
 
 
+// Receipt represents the results of a transaction.
 type Receipt struct {
 type Receipt struct {
+	// Consensus fields
 	PostState         []byte
 	PostState         []byte
 	CumulativeGasUsed *big.Int
 	CumulativeGasUsed *big.Int
 	Bloom             Bloom
 	Bloom             Bloom
-	TxHash            common.Hash
-	ContractAddress   common.Address
-	logs              vm.Logs
-	GasUsed           *big.Int
-}
-
-func NewReceipt(root []byte, cumalativeGasUsed *big.Int) *Receipt {
-	return &Receipt{PostState: common.CopyBytes(root), CumulativeGasUsed: new(big.Int).Set(cumalativeGasUsed)}
-}
+	Logs              vm.Logs
 
 
-func (self *Receipt) SetLogs(logs vm.Logs) {
-	self.logs = logs
+	// Implementation fields
+	TxHash          common.Hash
+	ContractAddress common.Address
+	GasUsed         *big.Int
 }
 }
 
 
-func (self *Receipt) Logs() vm.Logs {
-	return self.logs
+// NewReceipt creates a barebone transaction receipt, copying the init fields.
+func NewReceipt(root []byte, cumulativeGasUsed *big.Int) *Receipt {
+	return &Receipt{PostState: common.CopyBytes(root), CumulativeGasUsed: new(big.Int).Set(cumulativeGasUsed)}
 }
 }
 
 
-func (self *Receipt) EncodeRLP(w io.Writer) error {
-	return rlp.Encode(w, []interface{}{self.PostState, self.CumulativeGasUsed, self.Bloom, self.logs})
+// EncodeRLP implements rlp.Encoder, and flattens the consensus fields of a receipt
+// into an RLP stream.
+func (r *Receipt) EncodeRLP(w io.Writer) error {
+	return rlp.Encode(w, []interface{}{r.PostState, r.CumulativeGasUsed, r.Bloom, r.Logs})
 }
 }
 
 
-func (self *Receipt) DecodeRLP(s *rlp.Stream) error {
-	var r struct {
+// DecodeRLP implements rlp.Decoder, and loads the consensus fields of a receipt
+// from an RLP stream.
+func (r *Receipt) DecodeRLP(s *rlp.Stream) error {
+	var receipt struct {
 		PostState         []byte
 		PostState         []byte
 		CumulativeGasUsed *big.Int
 		CumulativeGasUsed *big.Int
 		Bloom             Bloom
 		Bloom             Bloom
-		TxHash            common.Hash
-		ContractAddress   common.Address
 		Logs              vm.Logs
 		Logs              vm.Logs
-		GasUsed           *big.Int
 	}
 	}
-	if err := s.Decode(&r); err != nil {
+	if err := s.Decode(&receipt); err != nil {
 		return err
 		return err
 	}
 	}
-	self.PostState, self.CumulativeGasUsed, self.Bloom, self.TxHash, self.ContractAddress, self.logs, self.GasUsed = r.PostState, r.CumulativeGasUsed, r.Bloom, r.TxHash, r.ContractAddress, r.Logs, r.GasUsed
-
+	r.PostState, r.CumulativeGasUsed, r.Bloom, r.Logs = receipt.PostState, receipt.CumulativeGasUsed, receipt.Bloom, receipt.Logs
 	return nil
 	return nil
 }
 }
 
 
-type ReceiptForStorage Receipt
-
-func (self *ReceiptForStorage) EncodeRLP(w io.Writer) error {
-	storageLogs := make([]*vm.LogForStorage, len(self.logs))
-	for i, log := range self.logs {
-		storageLogs[i] = (*vm.LogForStorage)(log)
-	}
-	return rlp.Encode(w, []interface{}{self.PostState, self.CumulativeGasUsed, self.Bloom, self.TxHash, self.ContractAddress, storageLogs, self.GasUsed})
-}
-
-func (self *Receipt) RlpEncode() []byte {
-	bytes, err := rlp.EncodeToBytes(self)
+// RlpEncode implements common.RlpEncode required for SHA3 derivation.
+func (r *Receipt) RlpEncode() []byte {
+	bytes, err := rlp.EncodeToBytes(r)
 	if err != nil {
 	if err != nil {
-		fmt.Println("TMP -- RECEIPT ENCODE ERROR", err)
+		panic(err)
 	}
 	}
 	return bytes
 	return bytes
 }
 }
 
 
-func (self *Receipt) Cmp(other *Receipt) bool {
-	if bytes.Compare(self.PostState, other.PostState) != 0 {
-		return false
-	}
+// String implements the Stringer interface.
+func (r *Receipt) String() string {
+	return fmt.Sprintf("receipt{med=%x cgas=%v bloom=%x logs=%v}", r.PostState, r.CumulativeGasUsed, r.Bloom, r.Logs)
+}
+
+// ReceiptForStorage is a wrapper around a Receipt that flattens and parses the
+// entire content of a receipt, as opposed to only the consensus fields originally.
+type ReceiptForStorage Receipt
 
 
-	return true
+// EncodeRLP implements rlp.Encoder, and flattens all content fields of a receipt
+// into an RLP stream.
+func (r *ReceiptForStorage) EncodeRLP(w io.Writer) error {
+	logs := make([]*vm.LogForStorage, len(r.Logs))
+	for i, log := range r.Logs {
+		logs[i] = (*vm.LogForStorage)(log)
+	}
+	return rlp.Encode(w, []interface{}{r.PostState, r.CumulativeGasUsed, r.Bloom, r.TxHash, r.ContractAddress, logs, r.GasUsed})
 }
 }
 
 
-func (self *Receipt) String() string {
-	return fmt.Sprintf("receipt{med=%x cgas=%v bloom=%x logs=%v}", self.PostState, self.CumulativeGasUsed, self.Bloom, self.logs)
+// DecodeRLP implements rlp.Decoder, and loads both consensus and implementation
+// fields of a receipt from an RLP stream.
+func (r *ReceiptForStorage) DecodeRLP(s *rlp.Stream) error {
+	var receipt struct {
+		PostState         []byte
+		CumulativeGasUsed *big.Int
+		Bloom             Bloom
+		TxHash            common.Hash
+		ContractAddress   common.Address
+		Logs              []*vm.LogForStorage
+		GasUsed           *big.Int
+	}
+	if err := s.Decode(&receipt); err != nil {
+		return err
+	}
+	// Assign the consensus fields
+	r.PostState, r.CumulativeGasUsed, r.Bloom = receipt.PostState, receipt.CumulativeGasUsed, receipt.Bloom
+	r.Logs = make(vm.Logs, len(receipt.Logs))
+	for i, log := range receipt.Logs {
+		r.Logs[i] = (*vm.Log)(log)
+	}
+	// Assign the implementation fields
+	r.TxHash, r.ContractAddress, r.GasUsed = receipt.TxHash, receipt.ContractAddress, receipt.GasUsed
+
+	return nil
 }
 }
 
 
+// Receipts is a wrapper around a Receipt array to implement types.DerivableList.
 type Receipts []*Receipt
 type Receipts []*Receipt
 
 
-func (self Receipts) RlpEncode() []byte {
-	bytes, err := rlp.EncodeToBytes(self)
+// RlpEncode implements common.RlpEncode required for SHA3 derivation.
+func (r Receipts) RlpEncode() []byte {
+	bytes, err := rlp.EncodeToBytes(r)
 	if err != nil {
 	if err != nil {
-		fmt.Println("TMP -- RECEIPTS ENCODE ERROR", err)
+		panic(err)
 	}
 	}
 	return bytes
 	return bytes
 }
 }
 
 
-func (self Receipts) Len() int            { return len(self) }
-func (self Receipts) GetRlp(i int) []byte { return common.Rlp(self[i]) }
+// Len returns the number of receipts in this list.
+func (r Receipts) Len() int { return len(r) }
+
+// GetRlp returns the RLP encoding of one receipt from the list.
+func (r Receipts) GetRlp(i int) []byte { return common.Rlp(r[i]) }

+ 28 - 23
core/vm/log.go

@@ -25,42 +25,47 @@ import (
 )
 )
 
 
 type Log struct {
 type Log struct {
+	// Consensus fields
 	Address common.Address
 	Address common.Address
 	Topics  []common.Hash
 	Topics  []common.Hash
 	Data    []byte
 	Data    []byte
-	Number  uint64
 
 
-	TxHash    common.Hash
-	TxIndex   uint
-	BlockHash common.Hash
-	Index     uint
+	// Derived fields (don't reorder!)
+	BlockNumber uint64
+	TxHash      common.Hash
+	TxIndex     uint
+	BlockHash   common.Hash
+	Index       uint
 }
 }
 
 
 func NewLog(address common.Address, topics []common.Hash, data []byte, number uint64) *Log {
 func NewLog(address common.Address, topics []common.Hash, data []byte, number uint64) *Log {
-	return &Log{Address: address, Topics: topics, Data: data, Number: number}
+	return &Log{Address: address, Topics: topics, Data: data, BlockNumber: number}
 }
 }
 
 
-func (self *Log) EncodeRLP(w io.Writer) error {
-	return rlp.Encode(w, []interface{}{self.Address, self.Topics, self.Data})
+func (l *Log) EncodeRLP(w io.Writer) error {
+	return rlp.Encode(w, []interface{}{l.Address, l.Topics, l.Data})
 }
 }
 
 
-func (self *Log) String() string {
-	return fmt.Sprintf(`log: %x %x %x %x %d %x %d`, self.Address, self.Topics, self.Data, self.TxHash, self.TxIndex, self.BlockHash, self.Index)
+func (l *Log) DecodeRLP(s *rlp.Stream) error {
+	var log struct {
+		Address common.Address
+		Topics  []common.Hash
+		Data    []byte
+	}
+	if err := s.Decode(&log); err != nil {
+		return err
+	}
+	l.Address, l.Topics, l.Data = log.Address, log.Topics, log.Data
+	return nil
+}
+
+func (l *Log) String() string {
+	return fmt.Sprintf(`log: %x %x %x %x %d %x %d`, l.Address, l.Topics, l.Data, l.TxHash, l.TxIndex, l.BlockHash, l.Index)
 }
 }
 
 
 type Logs []*Log
 type Logs []*Log
 
 
+// LogForStorage is a wrapper around a Log that flattens and parses the entire
+// content of a log, as opposed to only the consensus fields originally (by hiding
+// the rlp interface methods).
 type LogForStorage Log
 type LogForStorage Log
-
-func (self *LogForStorage) EncodeRLP(w io.Writer) error {
-	return rlp.Encode(w, []interface{}{
-		self.Address,
-		self.Topics,
-		self.Data,
-		self.Number,
-		self.TxHash,
-		self.TxIndex,
-		self.BlockHash,
-		self.Index,
-	})
-}

+ 5 - 4
eth/backend.go

@@ -88,6 +88,7 @@ type Config struct {
 	GenesisNonce int
 	GenesisNonce int
 	GenesisFile  string
 	GenesisFile  string
 	GenesisBlock *types.Block // used by block tests
 	GenesisBlock *types.Block // used by block tests
+	FastSync     bool
 	Olympic      bool
 	Olympic      bool
 
 
 	BlockChainVersion  int
 	BlockChainVersion  int
@@ -390,7 +391,6 @@ func New(config *Config) (*Ethereum, error) {
 		if err == core.ErrNoGenesis {
 		if err == core.ErrNoGenesis {
 			return nil, fmt.Errorf(`Genesis block not found. Please supply a genesis block with the "--genesis /path/to/file" argument`)
 			return nil, fmt.Errorf(`Genesis block not found. Please supply a genesis block with the "--genesis /path/to/file" argument`)
 		}
 		}
-
 		return nil, err
 		return nil, err
 	}
 	}
 	newPool := core.NewTxPool(eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit)
 	newPool := core.NewTxPool(eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit)
@@ -398,8 +398,9 @@ func New(config *Config) (*Ethereum, error) {
 
 
 	eth.blockProcessor = core.NewBlockProcessor(chainDb, eth.pow, eth.blockchain, eth.EventMux())
 	eth.blockProcessor = core.NewBlockProcessor(chainDb, eth.pow, eth.blockchain, eth.EventMux())
 	eth.blockchain.SetProcessor(eth.blockProcessor)
 	eth.blockchain.SetProcessor(eth.blockProcessor)
-	eth.protocolManager = NewProtocolManager(config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.blockchain, chainDb)
-
+	if eth.protocolManager, err = NewProtocolManager(config.FastSync, config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.blockchain, chainDb); err != nil {
+		return nil, err
+	}
 	eth.miner = miner.New(eth, eth.EventMux(), eth.pow)
 	eth.miner = miner.New(eth, eth.EventMux(), eth.pow)
 	eth.miner.SetGasPrice(config.GasPrice)
 	eth.miner.SetGasPrice(config.GasPrice)
 	eth.miner.SetExtra(config.ExtraData)
 	eth.miner.SetExtra(config.ExtraData)
@@ -462,7 +463,7 @@ func (s *Ethereum) NodeInfo() *NodeInfo {
 		DiscPort:   int(node.UDP),
 		DiscPort:   int(node.UDP),
 		TCPPort:    int(node.TCP),
 		TCPPort:    int(node.TCP),
 		ListenAddr: s.net.ListenAddr,
 		ListenAddr: s.net.ListenAddr,
-		Td:         s.BlockChain().Td().String(),
+		Td:         s.BlockChain().GetTd(s.BlockChain().CurrentBlock().Hash()).String(),
 	}
 	}
 }
 }
 
 

+ 5 - 5
eth/backend_test.go

@@ -16,17 +16,17 @@ func TestMipmapUpgrade(t *testing.T) {
 	addr := common.BytesToAddress([]byte("jeff"))
 	addr := common.BytesToAddress([]byte("jeff"))
 	genesis := core.WriteGenesisBlockForTesting(db)
 	genesis := core.WriteGenesisBlockForTesting(db)
 
 
-	chain := core.GenerateChain(genesis, db, 10, func(i int, gen *core.BlockGen) {
+	chain, receipts := core.GenerateChain(genesis, db, 10, func(i int, gen *core.BlockGen) {
 		var receipts types.Receipts
 		var receipts types.Receipts
 		switch i {
 		switch i {
 		case 1:
 		case 1:
 			receipt := types.NewReceipt(nil, new(big.Int))
 			receipt := types.NewReceipt(nil, new(big.Int))
-			receipt.SetLogs(vm.Logs{&vm.Log{Address: addr}})
+			receipt.Logs = vm.Logs{&vm.Log{Address: addr}}
 			gen.AddUncheckedReceipt(receipt)
 			gen.AddUncheckedReceipt(receipt)
 			receipts = types.Receipts{receipt}
 			receipts = types.Receipts{receipt}
 		case 2:
 		case 2:
 			receipt := types.NewReceipt(nil, new(big.Int))
 			receipt := types.NewReceipt(nil, new(big.Int))
-			receipt.SetLogs(vm.Logs{&vm.Log{Address: addr}})
+			receipt.Logs = vm.Logs{&vm.Log{Address: addr}}
 			gen.AddUncheckedReceipt(receipt)
 			gen.AddUncheckedReceipt(receipt)
 			receipts = types.Receipts{receipt}
 			receipts = types.Receipts{receipt}
 		}
 		}
@@ -37,7 +37,7 @@ func TestMipmapUpgrade(t *testing.T) {
 			t.Fatal(err)
 			t.Fatal(err)
 		}
 		}
 	})
 	})
-	for _, block := range chain {
+	for i, block := range chain {
 		core.WriteBlock(db, block)
 		core.WriteBlock(db, block)
 		if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
 		if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
 			t.Fatalf("failed to insert block number: %v", err)
 			t.Fatalf("failed to insert block number: %v", err)
@@ -45,7 +45,7 @@ func TestMipmapUpgrade(t *testing.T) {
 		if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil {
 		if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil {
 			t.Fatalf("failed to insert block number: %v", err)
 			t.Fatalf("failed to insert block number: %v", err)
 		}
 		}
-		if err := core.PutBlockReceipts(db, block, block.Receipts()); err != nil {
+		if err := core.PutBlockReceipts(db, block.Hash(), receipts[i]); err != nil {
 			t.Fatal("error writing block receipts:", err)
 			t.Fatal("error writing block receipts:", err)
 		}
 		}
 	}
 	}

Tiedoston diff-näkymää rajattu, sillä se on liian suuri
+ 342 - 244
eth/downloader/downloader.go


Tiedoston diff-näkymää rajattu, sillä se on liian suuri
+ 622 - 235
eth/downloader/downloader_test.go


+ 10 - 0
eth/downloader/metrics.go

@@ -42,4 +42,14 @@ var (
 	bodyReqTimer     = metrics.NewTimer("eth/downloader/bodies/req")
 	bodyReqTimer     = metrics.NewTimer("eth/downloader/bodies/req")
 	bodyDropMeter    = metrics.NewMeter("eth/downloader/bodies/drop")
 	bodyDropMeter    = metrics.NewMeter("eth/downloader/bodies/drop")
 	bodyTimeoutMeter = metrics.NewMeter("eth/downloader/bodies/timeout")
 	bodyTimeoutMeter = metrics.NewMeter("eth/downloader/bodies/timeout")
+
+	receiptInMeter      = metrics.NewMeter("eth/downloader/receipts/in")
+	receiptReqTimer     = metrics.NewTimer("eth/downloader/receipts/req")
+	receiptDropMeter    = metrics.NewMeter("eth/downloader/receipts/drop")
+	receiptTimeoutMeter = metrics.NewMeter("eth/downloader/receipts/timeout")
+
+	stateInMeter      = metrics.NewMeter("eth/downloader/states/in")
+	stateReqTimer     = metrics.NewTimer("eth/downloader/states/req")
+	stateDropMeter    = metrics.NewMeter("eth/downloader/states/drop")
+	stateTimeoutMeter = metrics.NewMeter("eth/downloader/states/timeout")
 )
 )

+ 26 - 0
eth/downloader/modes.go

@@ -0,0 +1,26 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package downloader
+
+// SyncMode represents the synchronisation mode of the downloader.
+type SyncMode int
+
+const (
+	FullSync  SyncMode = iota // Synchronise the entire blockchain history from full blocks
+	FastSync                  // Quickly download the headers, full sync only at the chain head
+	LightSync                 // Download only the headers and terminate afterwards
+)

+ 191 - 70
eth/downloader/peer.go

@@ -36,10 +36,12 @@ type relativeHashFetcherFn func(common.Hash) error
 type absoluteHashFetcherFn func(uint64, int) error
 type absoluteHashFetcherFn func(uint64, int) error
 type blockFetcherFn func([]common.Hash) error
 type blockFetcherFn func([]common.Hash) error
 
 
-// Block header and body fethers belonging to eth/62 and above
+// Block header and body fetchers belonging to eth/62 and above
 type relativeHeaderFetcherFn func(common.Hash, int, int, bool) error
 type relativeHeaderFetcherFn func(common.Hash, int, int, bool) error
 type absoluteHeaderFetcherFn func(uint64, int, int, bool) error
 type absoluteHeaderFetcherFn func(uint64, int, int, bool) error
 type blockBodyFetcherFn func([]common.Hash) error
 type blockBodyFetcherFn func([]common.Hash) error
+type receiptFetcherFn func([]common.Hash) error
+type stateFetcherFn func([]common.Hash) error
 
 
 var (
 var (
 	errAlreadyFetching   = errors.New("already fetching blocks from peer")
 	errAlreadyFetching   = errors.New("already fetching blocks from peer")
@@ -52,11 +54,18 @@ type peer struct {
 	id   string      // Unique identifier of the peer
 	id   string      // Unique identifier of the peer
 	head common.Hash // Hash of the peers latest known block
 	head common.Hash // Hash of the peers latest known block
 
 
-	idle int32 // Current activity state of the peer (idle = 0, active = 1)
-	rep  int32 // Simple peer reputation
+	blockIdle   int32 // Current block activity state of the peer (idle = 0, active = 1)
+	receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1)
+	stateIdle   int32 // Current node data activity state of the peer (idle = 0, active = 1)
+	rep         int32 // Simple peer reputation
 
 
-	capacity int32     // Number of blocks allowed to fetch per request
-	started  time.Time // Time instance when the last fetch was started
+	blockCapacity   int32 // Number of blocks (bodies) allowed to fetch per request
+	receiptCapacity int32 // Number of receipts allowed to fetch per request
+	stateCapacity   int32 // Number of node data pieces allowed to fetch per request
+
+	blockStarted   time.Time // Time instance when the last block (body)fetch was started
+	receiptStarted time.Time // Time instance when the last receipt fetch was started
+	stateStarted   time.Time // Time instance when the last node data fetch was started
 
 
 	ignored *set.Set // Set of hashes not to request (didn't have previously)
 	ignored *set.Set // Set of hashes not to request (didn't have previously)
 
 
@@ -68,6 +77,9 @@ type peer struct {
 	getAbsHeaders  absoluteHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an absolute position
 	getAbsHeaders  absoluteHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an absolute position
 	getBlockBodies blockBodyFetcherFn      // [eth/62] Method to retrieve a batch of block bodies
 	getBlockBodies blockBodyFetcherFn      // [eth/62] Method to retrieve a batch of block bodies
 
 
+	getReceipts receiptFetcherFn // [eth/63] Method to retrieve a batch of block transaction receipts
+	getNodeData stateFetcherFn   // [eth/63] Method to retrieve a batch of state trie data
+
 	version int // Eth protocol version number to switch strategies
 	version int // Eth protocol version number to switch strategies
 }
 }
 
 
@@ -75,12 +87,15 @@ type peer struct {
 // mechanisms.
 // mechanisms.
 func newPeer(id string, version int, head common.Hash,
 func newPeer(id string, version int, head common.Hash,
 	getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn, // eth/61 callbacks, remove when upgrading
 	getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn, // eth/61 callbacks, remove when upgrading
-	getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn) *peer {
+	getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
+	getReceipts receiptFetcherFn, getNodeData stateFetcherFn) *peer {
 	return &peer{
 	return &peer{
-		id:       id,
-		head:     head,
-		capacity: 1,
-		ignored:  set.New(),
+		id:              id,
+		head:            head,
+		blockCapacity:   1,
+		receiptCapacity: 1,
+		stateCapacity:   1,
+		ignored:         set.New(),
 
 
 		getRelHashes: getRelHashes,
 		getRelHashes: getRelHashes,
 		getAbsHashes: getAbsHashes,
 		getAbsHashes: getAbsHashes,
@@ -90,24 +105,34 @@ func newPeer(id string, version int, head common.Hash,
 		getAbsHeaders:  getAbsHeaders,
 		getAbsHeaders:  getAbsHeaders,
 		getBlockBodies: getBlockBodies,
 		getBlockBodies: getBlockBodies,
 
 
+		getReceipts: getReceipts,
+		getNodeData: getNodeData,
+
 		version: version,
 		version: version,
 	}
 	}
 }
 }
 
 
 // Reset clears the internal state of a peer entity.
 // Reset clears the internal state of a peer entity.
 func (p *peer) Reset() {
 func (p *peer) Reset() {
-	atomic.StoreInt32(&p.idle, 0)
-	atomic.StoreInt32(&p.capacity, 1)
+	atomic.StoreInt32(&p.blockIdle, 0)
+	atomic.StoreInt32(&p.receiptIdle, 0)
+	atomic.StoreInt32(&p.blockCapacity, 1)
+	atomic.StoreInt32(&p.receiptCapacity, 1)
+	atomic.StoreInt32(&p.stateCapacity, 1)
 	p.ignored.Clear()
 	p.ignored.Clear()
 }
 }
 
 
 // Fetch61 sends a block retrieval request to the remote peer.
 // Fetch61 sends a block retrieval request to the remote peer.
 func (p *peer) Fetch61(request *fetchRequest) error {
 func (p *peer) Fetch61(request *fetchRequest) error {
+	// Sanity check the protocol version
+	if p.version != 61 {
+		panic(fmt.Sprintf("block fetch [eth/61] requested on eth/%d", p.version))
+	}
 	// Short circuit if the peer is already fetching
 	// Short circuit if the peer is already fetching
-	if !atomic.CompareAndSwapInt32(&p.idle, 0, 1) {
+	if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) {
 		return errAlreadyFetching
 		return errAlreadyFetching
 	}
 	}
-	p.started = time.Now()
+	p.blockStarted = time.Now()
 
 
 	// Convert the hash set to a retrievable slice
 	// Convert the hash set to a retrievable slice
 	hashes := make([]common.Hash, 0, len(request.Hashes))
 	hashes := make([]common.Hash, 0, len(request.Hashes))
@@ -119,13 +144,17 @@ func (p *peer) Fetch61(request *fetchRequest) error {
 	return nil
 	return nil
 }
 }
 
 
-// Fetch sends a block body retrieval request to the remote peer.
-func (p *peer) Fetch(request *fetchRequest) error {
+// FetchBodies sends a block body retrieval request to the remote peer.
+func (p *peer) FetchBodies(request *fetchRequest) error {
+	// Sanity check the protocol version
+	if p.version < 62 {
+		panic(fmt.Sprintf("body fetch [eth/62+] requested on eth/%d", p.version))
+	}
 	// Short circuit if the peer is already fetching
 	// Short circuit if the peer is already fetching
-	if !atomic.CompareAndSwapInt32(&p.idle, 0, 1) {
+	if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) {
 		return errAlreadyFetching
 		return errAlreadyFetching
 	}
 	}
-	p.started = time.Now()
+	p.blockStarted = time.Now()
 
 
 	// Convert the header set to a retrievable slice
 	// Convert the header set to a retrievable slice
 	hashes := make([]common.Hash, 0, len(request.Headers))
 	hashes := make([]common.Hash, 0, len(request.Headers))
@@ -137,55 +166,97 @@ func (p *peer) Fetch(request *fetchRequest) error {
 	return nil
 	return nil
 }
 }
 
 
-// SetIdle61 sets the peer to idle, allowing it to execute new retrieval requests.
-// Its block retrieval allowance will also be updated either up- or downwards,
-// depending on whether the previous fetch completed in time or not.
-func (p *peer) SetIdle61() {
-	// Update the peer's download allowance based on previous performance
-	scale := 2.0
-	if time.Since(p.started) > blockSoftTTL {
-		scale = 0.5
-		if time.Since(p.started) > blockHardTTL {
-			scale = 1 / float64(MaxBlockFetch) // reduces capacity to 1
-		}
+// FetchReceipts sends a receipt retrieval request to the remote peer.
+func (p *peer) FetchReceipts(request *fetchRequest) error {
+	// Sanity check the protocol version
+	if p.version < 63 {
+		panic(fmt.Sprintf("body fetch [eth/63+] requested on eth/%d", p.version))
 	}
 	}
-	for {
-		// Calculate the new download bandwidth allowance
-		prev := atomic.LoadInt32(&p.capacity)
-		next := int32(math.Max(1, math.Min(float64(MaxBlockFetch), float64(prev)*scale)))
+	// Short circuit if the peer is already fetching
+	if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) {
+		return errAlreadyFetching
+	}
+	p.receiptStarted = time.Now()
 
 
-		// Try to update the old value
-		if atomic.CompareAndSwapInt32(&p.capacity, prev, next) {
-			// If we're having problems at 1 capacity, try to find better peers
-			if next == 1 {
-				p.Demote()
-			}
-			break
-		}
+	// Convert the header set to a retrievable slice
+	hashes := make([]common.Hash, 0, len(request.Headers))
+	for _, header := range request.Headers {
+		hashes = append(hashes, header.Hash())
 	}
 	}
-	// Set the peer to idle to allow further block requests
-	atomic.StoreInt32(&p.idle, 0)
+	go p.getReceipts(hashes)
+
+	return nil
 }
 }
 
 
-// SetIdle sets the peer to idle, allowing it to execute new retrieval requests.
+// FetchNodeData sends a node state data retrieval request to the remote peer.
+func (p *peer) FetchNodeData(request *fetchRequest) error {
+	// Sanity check the protocol version
+	if p.version < 63 {
+		panic(fmt.Sprintf("node data fetch [eth/63+] requested on eth/%d", p.version))
+	}
+	// Short circuit if the peer is already fetching
+	if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) {
+		return errAlreadyFetching
+	}
+	p.stateStarted = time.Now()
+
+	// Convert the hash set to a retrievable slice
+	hashes := make([]common.Hash, 0, len(request.Hashes))
+	for hash, _ := range request.Hashes {
+		hashes = append(hashes, hash)
+	}
+	go p.getNodeData(hashes)
+
+	return nil
+}
+
+// SetBlocksIdle sets the peer to idle, allowing it to execute new retrieval requests.
+// Its block retrieval allowance will also be updated either up- or downwards,
+// depending on whether the previous fetch completed in time.
+func (p *peer) SetBlocksIdle() {
+	p.setIdle(p.blockStarted, blockSoftTTL, blockHardTTL, MaxBlockFetch, &p.blockCapacity, &p.blockIdle)
+}
+
+// SetBodiesIdle sets the peer to idle, allowing it to execute new retrieval requests.
 // Its block body retrieval allowance will also be updated either up- or downwards,
 // Its block body retrieval allowance will also be updated either up- or downwards,
-// depending on whether the previous fetch completed in time or not.
-func (p *peer) SetIdle() {
+// depending on whether the previous fetch completed in time.
+func (p *peer) SetBodiesIdle() {
+	p.setIdle(p.blockStarted, bodySoftTTL, bodyHardTTL, MaxBodyFetch, &p.blockCapacity, &p.blockIdle)
+}
+
+// SetReceiptsIdle sets the peer to idle, allowing it to execute new retrieval requests.
+// Its receipt retrieval allowance will also be updated either up- or downwards,
+// depending on whether the previous fetch completed in time.
+func (p *peer) SetReceiptsIdle() {
+	p.setIdle(p.receiptStarted, receiptSoftTTL, receiptHardTTL, MaxReceiptFetch, &p.receiptCapacity, &p.receiptIdle)
+}
+
+// SetNodeDataIdle sets the peer to idle, allowing it to execute new retrieval
+// requests. Its node data retrieval allowance will also be updated either up- or
+// downwards, depending on whether the previous fetch completed in time.
+func (p *peer) SetNodeDataIdle() {
+	p.setIdle(p.stateStarted, stateSoftTTL, stateSoftTTL, MaxStateFetch, &p.stateCapacity, &p.stateIdle)
+}
+
+// setIdle sets the peer to idle, allowing it to execute new retrieval requests.
+// Its data retrieval allowance will also be updated either up- or downwards,
+// depending on whether the previous fetch completed in time.
+func (p *peer) setIdle(started time.Time, softTTL, hardTTL time.Duration, maxFetch int, capacity, idle *int32) {
 	// Update the peer's download allowance based on previous performance
 	// Update the peer's download allowance based on previous performance
 	scale := 2.0
 	scale := 2.0
-	if time.Since(p.started) > bodySoftTTL {
+	if time.Since(started) > softTTL {
 		scale = 0.5
 		scale = 0.5
-		if time.Since(p.started) > bodyHardTTL {
-			scale = 1 / float64(MaxBodyFetch) // reduces capacity to 1
+		if time.Since(started) > hardTTL {
+			scale = 1 / float64(maxFetch) // reduces capacity to 1
 		}
 		}
 	}
 	}
 	for {
 	for {
 		// Calculate the new download bandwidth allowance
 		// Calculate the new download bandwidth allowance
-		prev := atomic.LoadInt32(&p.capacity)
-		next := int32(math.Max(1, math.Min(float64(MaxBodyFetch), float64(prev)*scale)))
+		prev := atomic.LoadInt32(capacity)
+		next := int32(math.Max(1, math.Min(float64(maxFetch), float64(prev)*scale)))
 
 
 		// Try to update the old value
 		// Try to update the old value
-		if atomic.CompareAndSwapInt32(&p.capacity, prev, next) {
+		if atomic.CompareAndSwapInt32(capacity, prev, next) {
 			// If we're having problems at 1 capacity, try to find better peers
 			// If we're having problems at 1 capacity, try to find better peers
 			if next == 1 {
 			if next == 1 {
 				p.Demote()
 				p.Demote()
@@ -193,14 +264,26 @@ func (p *peer) SetIdle() {
 			break
 			break
 		}
 		}
 	}
 	}
-	// Set the peer to idle to allow further block requests
-	atomic.StoreInt32(&p.idle, 0)
+	// Set the peer to idle to allow further fetch requests
+	atomic.StoreInt32(idle, 0)
+}
+
+// BlockCapacity retrieves the peers block download allowance based on its
+// previously discovered bandwidth capacity.
+func (p *peer) BlockCapacity() int {
+	return int(atomic.LoadInt32(&p.blockCapacity))
+}
+
+// ReceiptCapacity retrieves the peers block download allowance based on its
+// previously discovered bandwidth capacity.
+func (p *peer) ReceiptCapacity() int {
+	return int(atomic.LoadInt32(&p.receiptCapacity))
 }
 }
 
 
-// Capacity retrieves the peers block download allowance based on its previously
-// discovered bandwidth capacity.
-func (p *peer) Capacity() int {
-	return int(atomic.LoadInt32(&p.capacity))
+// NodeDataCapacity retrieves the peers block download allowance based on its
+// previously discovered bandwidth capacity.
+func (p *peer) NodeDataCapacity() int {
+	return int(atomic.LoadInt32(&p.stateCapacity))
 }
 }
 
 
 // Promote increases the peer's reputation.
 // Promote increases the peer's reputation.
@@ -226,7 +309,8 @@ func (p *peer) Demote() {
 func (p *peer) String() string {
 func (p *peer) String() string {
 	return fmt.Sprintf("Peer %s [%s]", p.id,
 	return fmt.Sprintf("Peer %s [%s]", p.id,
 		fmt.Sprintf("reputation %3d, ", atomic.LoadInt32(&p.rep))+
 		fmt.Sprintf("reputation %3d, ", atomic.LoadInt32(&p.rep))+
-			fmt.Sprintf("capacity %3d, ", atomic.LoadInt32(&p.capacity))+
+			fmt.Sprintf("block cap %3d, ", atomic.LoadInt32(&p.blockCapacity))+
+			fmt.Sprintf("receipt cap %3d, ", atomic.LoadInt32(&p.receiptCapacity))+
 			fmt.Sprintf("ignored %4d", p.ignored.Size()),
 			fmt.Sprintf("ignored %4d", p.ignored.Size()),
 	)
 	)
 }
 }
@@ -310,26 +394,63 @@ func (ps *peerSet) AllPeers() []*peer {
 	return list
 	return list
 }
 }
 
 
-// IdlePeers retrieves a flat list of all the currently idle peers within the
+// BlockIdlePeers retrieves a flat list of all the currently idle peers within the
 // active peer set, ordered by their reputation.
 // active peer set, ordered by their reputation.
-func (ps *peerSet) IdlePeers(version int) []*peer {
+func (ps *peerSet) BlockIdlePeers() ([]*peer, int) {
+	idle := func(p *peer) bool {
+		return atomic.LoadInt32(&p.blockIdle) == 0
+	}
+	return ps.idlePeers(61, 61, idle)
+}
+
+// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
+// the active peer set, ordered by their reputation.
+func (ps *peerSet) BodyIdlePeers() ([]*peer, int) {
+	idle := func(p *peer) bool {
+		return atomic.LoadInt32(&p.blockIdle) == 0
+	}
+	return ps.idlePeers(62, 64, idle)
+}
+
+// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers
+// within the active peer set, ordered by their reputation.
+func (ps *peerSet) ReceiptIdlePeers() ([]*peer, int) {
+	idle := func(p *peer) bool {
+		return atomic.LoadInt32(&p.receiptIdle) == 0
+	}
+	return ps.idlePeers(63, 64, idle)
+}
+
+// NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle
+// peers within the active peer set, ordered by their reputation.
+func (ps *peerSet) NodeDataIdlePeers() ([]*peer, int) {
+	idle := func(p *peer) bool {
+		return atomic.LoadInt32(&p.stateIdle) == 0
+	}
+	return ps.idlePeers(63, 64, idle)
+}
+
+// idlePeers retrieves a flat list of all currently idle peers satisfying the
+// protocol version constraints, using the provided function to check idleness.
+func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peer) bool) ([]*peer, int) {
 	ps.lock.RLock()
 	ps.lock.RLock()
 	defer ps.lock.RUnlock()
 	defer ps.lock.RUnlock()
 
 
-	list := make([]*peer, 0, len(ps.peers))
+	idle, total := make([]*peer, 0, len(ps.peers)), 0
 	for _, p := range ps.peers {
 	for _, p := range ps.peers {
-		if (version == eth61 && p.version == eth61) || (version >= eth62 && p.version >= eth62) {
-			if atomic.LoadInt32(&p.idle) == 0 {
-				list = append(list, p)
+		if p.version >= minProtocol && p.version <= maxProtocol {
+			if idleCheck(p) {
+				idle = append(idle, p)
 			}
 			}
+			total++
 		}
 		}
 	}
 	}
-	for i := 0; i < len(list); i++ {
-		for j := i + 1; j < len(list); j++ {
-			if atomic.LoadInt32(&list[i].rep) < atomic.LoadInt32(&list[j].rep) {
-				list[i], list[j] = list[j], list[i]
+	for i := 0; i < len(idle); i++ {
+		for j := i + 1; j < len(idle); j++ {
+			if atomic.LoadInt32(&idle[i].rep) < atomic.LoadInt32(&idle[j].rep) {
+				idle[i], idle[j] = idle[j], idle[i]
 			}
 			}
 		}
 		}
 	}
 	}
-	return list
+	return idle, total
 }
 }

Tiedoston diff-näkymää rajattu, sillä se on liian suuri
+ 636 - 198
eth/downloader/queue.go


+ 140 - 0
eth/downloader/types.go

@@ -0,0 +1,140 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package downloader
+
+import (
+	"fmt"
+	"math/big"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/core/types"
+)
+
+// headerCheckFn is a callback type for verifying a header's presence in the local chain.
+type headerCheckFn func(common.Hash) bool
+
+// blockCheckFn is a callback type for verifying a block's presence in the local chain.
+type blockCheckFn func(common.Hash) bool
+
+// headerRetrievalFn is a callback type for retrieving a header from the local chain.
+type headerRetrievalFn func(common.Hash) *types.Header
+
+// blockRetrievalFn is a callback type for retrieving a block from the local chain.
+type blockRetrievalFn func(common.Hash) *types.Block
+
+// headHeaderRetrievalFn is a callback type for retrieving the head header from the local chain.
+type headHeaderRetrievalFn func() *types.Header
+
+// headBlockRetrievalFn is a callback type for retrieving the head block from the local chain.
+type headBlockRetrievalFn func() *types.Block
+
+// headFastBlockRetrievalFn is a callback type for retrieving the head fast block from the local chain.
+type headFastBlockRetrievalFn func() *types.Block
+
+// headBlockCommitterFn is a callback for directly committing the head block to a certain entity.
+type headBlockCommitterFn func(common.Hash) error
+
+// tdRetrievalFn is a callback type for retrieving the total difficulty of a local block.
+type tdRetrievalFn func(common.Hash) *big.Int
+
+// headerChainInsertFn is a callback type to insert a batch of headers into the local chain.
+type headerChainInsertFn func([]*types.Header, int) (int, error)
+
+// blockChainInsertFn is a callback type to insert a batch of blocks into the local chain.
+type blockChainInsertFn func(types.Blocks) (int, error)
+
+// receiptChainInsertFn is a callback type to insert a batch of receipts into the local chain.
+type receiptChainInsertFn func(types.Blocks, []types.Receipts) (int, error)
+
+// chainRollbackFn is a callback type to remove a few recently added elements from the local chain.
+type chainRollbackFn func([]common.Hash)
+
+// peerDropFn is a callback type for dropping a peer detected as malicious.
+type peerDropFn func(id string)
+
+// dataPack is a data message returned by a peer for some query.
+type dataPack interface {
+	PeerId() string
+	Items() int
+	Stats() string
+}
+
+// hashPack is a batch of block hashes returned by a peer (eth/61).
+type hashPack struct {
+	peerId string
+	hashes []common.Hash
+}
+
+func (p *hashPack) PeerId() string { return p.peerId }
+func (p *hashPack) Items() int     { return len(p.hashes) }
+func (p *hashPack) Stats() string  { return fmt.Sprintf("%d", len(p.hashes)) }
+
+// blockPack is a batch of blocks returned by a peer (eth/61).
+type blockPack struct {
+	peerId string
+	blocks []*types.Block
+}
+
+func (p *blockPack) PeerId() string { return p.peerId }
+func (p *blockPack) Items() int     { return len(p.blocks) }
+func (p *blockPack) Stats() string  { return fmt.Sprintf("%d", len(p.blocks)) }
+
+// headerPack is a batch of block headers returned by a peer.
+type headerPack struct {
+	peerId  string
+	headers []*types.Header
+}
+
+func (p *headerPack) PeerId() string { return p.peerId }
+func (p *headerPack) Items() int     { return len(p.headers) }
+func (p *headerPack) Stats() string  { return fmt.Sprintf("%d", len(p.headers)) }
+
+// bodyPack is a batch of block bodies returned by a peer.
+type bodyPack struct {
+	peerId       string
+	transactions [][]*types.Transaction
+	uncles       [][]*types.Header
+}
+
+func (p *bodyPack) PeerId() string { return p.peerId }
+func (p *bodyPack) Items() int {
+	if len(p.transactions) <= len(p.uncles) {
+		return len(p.transactions)
+	}
+	return len(p.uncles)
+}
+func (p *bodyPack) Stats() string { return fmt.Sprintf("%d:%d", len(p.transactions), len(p.uncles)) }
+
+// receiptPack is a batch of receipts returned by a peer.
+type receiptPack struct {
+	peerId   string
+	receipts [][]*types.Receipt
+}
+
+func (p *receiptPack) PeerId() string { return p.peerId }
+func (p *receiptPack) Items() int     { return len(p.receipts) }
+func (p *receiptPack) Stats() string  { return fmt.Sprintf("%d", len(p.receipts)) }
+
+// statePack is a batch of states returned by a peer.
+type statePack struct {
+	peerId string
+	states [][]byte
+}
+
+func (p *statePack) PeerId() string { return p.peerId }
+func (p *statePack) Items() int     { return len(p.states) }
+func (p *statePack) Stats() string  { return fmt.Sprintf("%d", len(p.states)) }

+ 20 - 6
eth/fetcher/fetcher.go

@@ -142,9 +142,11 @@ type Fetcher struct {
 	dropPeer       peerDropFn         // Drops a peer for misbehaving
 	dropPeer       peerDropFn         // Drops a peer for misbehaving
 
 
 	// Testing hooks
 	// Testing hooks
-	fetchingHook   func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch
-	completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62)
-	importedHook   func(*types.Block)  // Method to call upon successful block import (both eth/61 and eth/62)
+	announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the announce list
+	queueChangeHook    func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue
+	fetchingHook       func([]common.Hash)     // Method to call upon starting a block (eth/61) or header (eth/62) fetch
+	completingHook     func([]common.Hash)     // Method to call upon starting a block body fetch (eth/62)
+	importedHook       func(*types.Block)      // Method to call upon successful block import (both eth/61 and eth/62)
 }
 }
 
 
 // New creates a block fetcher to retrieve blocks based on hash announcements.
 // New creates a block fetcher to retrieve blocks based on hash announcements.
@@ -324,11 +326,16 @@ func (f *Fetcher) loop() {
 		height := f.chainHeight()
 		height := f.chainHeight()
 		for !f.queue.Empty() {
 		for !f.queue.Empty() {
 			op := f.queue.PopItem().(*inject)
 			op := f.queue.PopItem().(*inject)
-
+			if f.queueChangeHook != nil {
+				f.queueChangeHook(op.block.Hash(), false)
+			}
 			// If too high up the chain or phase, continue later
 			// If too high up the chain or phase, continue later
 			number := op.block.NumberU64()
 			number := op.block.NumberU64()
 			if number > height+1 {
 			if number > height+1 {
 				f.queue.Push(op, -float32(op.block.NumberU64()))
 				f.queue.Push(op, -float32(op.block.NumberU64()))
+				if f.queueChangeHook != nil {
+					f.queueChangeHook(op.block.Hash(), true)
+				}
 				break
 				break
 			}
 			}
 			// Otherwise if fresh and still unknown, try and import
 			// Otherwise if fresh and still unknown, try and import
@@ -372,6 +379,9 @@ func (f *Fetcher) loop() {
 			}
 			}
 			f.announces[notification.origin] = count
 			f.announces[notification.origin] = count
 			f.announced[notification.hash] = append(f.announced[notification.hash], notification)
 			f.announced[notification.hash] = append(f.announced[notification.hash], notification)
+			if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 {
+				f.announceChangeHook(notification.hash, true)
+			}
 			if len(f.announced) == 1 {
 			if len(f.announced) == 1 {
 				f.rescheduleFetch(fetchTimer)
 				f.rescheduleFetch(fetchTimer)
 			}
 			}
@@ -714,7 +724,9 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
 		f.queues[peer] = count
 		f.queues[peer] = count
 		f.queued[hash] = op
 		f.queued[hash] = op
 		f.queue.Push(op, -float32(block.NumberU64()))
 		f.queue.Push(op, -float32(block.NumberU64()))
-
+		if f.queueChangeHook != nil {
+			f.queueChangeHook(op.block.Hash(), true)
+		}
 		if glog.V(logger.Debug) {
 		if glog.V(logger.Debug) {
 			glog.Infof("Peer %s: queued block #%d [%x…], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
 			glog.Infof("Peer %s: queued block #%d [%x…], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
 		}
 		}
@@ -781,7 +793,9 @@ func (f *Fetcher) forgetHash(hash common.Hash) {
 		}
 		}
 	}
 	}
 	delete(f.announced, hash)
 	delete(f.announced, hash)
-
+	if f.announceChangeHook != nil {
+		f.announceChangeHook(hash, false)
+	}
 	// Remove any pending fetches and decrement the DOS counters
 	// Remove any pending fetches and decrement the DOS counters
 	if announce := f.fetching[hash]; announce != nil {
 	if announce := f.fetching[hash]; announce != nil {
 		f.announces[announce.origin]--
 		f.announces[announce.origin]--

+ 40 - 11
eth/fetcher/fetcher_test.go

@@ -45,7 +45,7 @@ var (
 // contains a transaction and every 5th an uncle to allow testing correct block
 // contains a transaction and every 5th an uncle to allow testing correct block
 // reassembly.
 // reassembly.
 func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) {
 func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) {
-	blocks := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) {
+	blocks, _ := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) {
 		block.SetCoinbase(common.Address{seed})
 		block.SetCoinbase(common.Address{seed})
 
 
 		// If the block number is multiple of 3, send a bonus transaction to the miner
 		// If the block number is multiple of 3, send a bonus transaction to the miner
@@ -145,6 +145,9 @@ func (f *fetcherTester) insertChain(blocks types.Blocks) (int, error) {
 // dropPeer is an emulator for the peer removal, simply accumulating the various
 // dropPeer is an emulator for the peer removal, simply accumulating the various
 // peers dropped by the fetcher.
 // peers dropped by the fetcher.
 func (f *fetcherTester) dropPeer(peer string) {
 func (f *fetcherTester) dropPeer(peer string) {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+
 	f.drops[peer] = true
 	f.drops[peer] = true
 }
 }
 
 
@@ -608,8 +611,11 @@ func TestDistantPropagationDiscarding(t *testing.T) {
 
 
 	// Create a tester and simulate a head block being the middle of the above chain
 	// Create a tester and simulate a head block being the middle of the above chain
 	tester := newTester()
 	tester := newTester()
+
+	tester.lock.Lock()
 	tester.hashes = []common.Hash{head}
 	tester.hashes = []common.Hash{head}
 	tester.blocks = map[common.Hash]*types.Block{head: blocks[head]}
 	tester.blocks = map[common.Hash]*types.Block{head: blocks[head]}
+	tester.lock.Unlock()
 
 
 	// Ensure that a block with a lower number than the threshold is discarded
 	// Ensure that a block with a lower number than the threshold is discarded
 	tester.fetcher.Enqueue("lower", blocks[hashes[low]])
 	tester.fetcher.Enqueue("lower", blocks[hashes[low]])
@@ -641,8 +647,11 @@ func testDistantAnnouncementDiscarding(t *testing.T, protocol int) {
 
 
 	// Create a tester and simulate a head block being the middle of the above chain
 	// Create a tester and simulate a head block being the middle of the above chain
 	tester := newTester()
 	tester := newTester()
+
+	tester.lock.Lock()
 	tester.hashes = []common.Hash{head}
 	tester.hashes = []common.Hash{head}
 	tester.blocks = map[common.Hash]*types.Block{head: blocks[head]}
 	tester.blocks = map[common.Hash]*types.Block{head: blocks[head]}
+	tester.lock.Unlock()
 
 
 	headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
 	headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
 	bodyFetcher := tester.makeBodyFetcher(blocks, 0)
 	bodyFetcher := tester.makeBodyFetcher(blocks, 0)
@@ -687,14 +696,22 @@ func testInvalidNumberAnnouncement(t *testing.T, protocol int) {
 	tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
 	tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
 	verifyImportEvent(t, imported, false)
 	verifyImportEvent(t, imported, false)
 
 
-	if !tester.drops["bad"] {
+	tester.lock.RLock()
+	dropped := tester.drops["bad"]
+	tester.lock.RUnlock()
+
+	if !dropped {
 		t.Fatalf("peer with invalid numbered announcement not dropped")
 		t.Fatalf("peer with invalid numbered announcement not dropped")
 	}
 	}
 	// Make sure a good announcement passes without a drop
 	// Make sure a good announcement passes without a drop
 	tester.fetcher.Notify("good", hashes[0], 1, time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
 	tester.fetcher.Notify("good", hashes[0], 1, time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
 	verifyImportEvent(t, imported, true)
 	verifyImportEvent(t, imported, true)
 
 
-	if tester.drops["good"] {
+	tester.lock.RLock()
+	dropped = tester.drops["good"]
+	tester.lock.RUnlock()
+
+	if dropped {
 		t.Fatalf("peer with valid numbered announcement dropped")
 		t.Fatalf("peer with valid numbered announcement dropped")
 	}
 	}
 	verifyImportDone(t, imported)
 	verifyImportDone(t, imported)
@@ -752,9 +769,15 @@ func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
 	// Create a tester with instrumented import hooks
 	// Create a tester with instrumented import hooks
 	tester := newTester()
 	tester := newTester()
 
 
-	imported := make(chan *types.Block)
+	imported, announces := make(chan *types.Block), int32(0)
 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
-
+	tester.fetcher.announceChangeHook = func(hash common.Hash, added bool) {
+		if added {
+			atomic.AddInt32(&announces, 1)
+		} else {
+			atomic.AddInt32(&announces, -1)
+		}
+	}
 	// Create a valid chain and an infinite junk chain
 	// Create a valid chain and an infinite junk chain
 	targetBlocks := hashLimit + 2*maxQueueDist
 	targetBlocks := hashLimit + 2*maxQueueDist
 	hashes, blocks := makeChain(targetBlocks, 0, genesis)
 	hashes, blocks := makeChain(targetBlocks, 0, genesis)
@@ -782,8 +805,8 @@ func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
 			tester.fetcher.Notify("attacker", attack[i], 1 /* don't distance drop */, time.Now(), nil, attackerHeaderFetcher, attackerBodyFetcher)
 			tester.fetcher.Notify("attacker", attack[i], 1 /* don't distance drop */, time.Now(), nil, attackerHeaderFetcher, attackerBodyFetcher)
 		}
 		}
 	}
 	}
-	if len(tester.fetcher.announced) != hashLimit+maxQueueDist {
-		t.Fatalf("queued announce count mismatch: have %d, want %d", len(tester.fetcher.announced), hashLimit+maxQueueDist)
+	if count := atomic.LoadInt32(&announces); count != hashLimit+maxQueueDist {
+		t.Fatalf("queued announce count mismatch: have %d, want %d", count, hashLimit+maxQueueDist)
 	}
 	}
 	// Wait for fetches to complete
 	// Wait for fetches to complete
 	verifyImportCount(t, imported, maxQueueDist)
 	verifyImportCount(t, imported, maxQueueDist)
@@ -807,9 +830,15 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) {
 	// Create a tester with instrumented import hooks
 	// Create a tester with instrumented import hooks
 	tester := newTester()
 	tester := newTester()
 
 
-	imported := make(chan *types.Block)
+	imported, enqueued := make(chan *types.Block), int32(0)
 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
-
+	tester.fetcher.queueChangeHook = func(hash common.Hash, added bool) {
+		if added {
+			atomic.AddInt32(&enqueued, 1)
+		} else {
+			atomic.AddInt32(&enqueued, -1)
+		}
+	}
 	// Create a valid chain and a batch of dangling (but in range) blocks
 	// Create a valid chain and a batch of dangling (but in range) blocks
 	targetBlocks := hashLimit + 2*maxQueueDist
 	targetBlocks := hashLimit + 2*maxQueueDist
 	hashes, blocks := makeChain(targetBlocks, 0, genesis)
 	hashes, blocks := makeChain(targetBlocks, 0, genesis)
@@ -825,7 +854,7 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) {
 		tester.fetcher.Enqueue("attacker", block)
 		tester.fetcher.Enqueue("attacker", block)
 	}
 	}
 	time.Sleep(200 * time.Millisecond)
 	time.Sleep(200 * time.Millisecond)
-	if queued := tester.fetcher.queue.Size(); queued != blockLimit {
+	if queued := atomic.LoadInt32(&enqueued); queued != blockLimit {
 		t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit)
 		t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit)
 	}
 	}
 	// Queue up a batch of valid blocks, and check that a new peer is allowed to do so
 	// Queue up a batch of valid blocks, and check that a new peer is allowed to do so
@@ -833,7 +862,7 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) {
 		tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-3-i]])
 		tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-3-i]])
 	}
 	}
 	time.Sleep(100 * time.Millisecond)
 	time.Sleep(100 * time.Millisecond)
-	if queued := tester.fetcher.queue.Size(); queued != blockLimit+maxQueueDist-1 {
+	if queued := atomic.LoadInt32(&enqueued); queued != blockLimit+maxQueueDist-1 {
 		t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit+maxQueueDist-1)
 		t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit+maxQueueDist-1)
 	}
 	}
 	// Insert the missing piece (and sanity check the import)
 	// Insert the missing piece (and sanity check the import)

+ 1 - 1
eth/filters/filter.go

@@ -138,7 +138,7 @@ func (self *Filter) getLogs(start, end uint64) (logs vm.Logs) {
 				unfiltered vm.Logs
 				unfiltered vm.Logs
 			)
 			)
 			for _, receipt := range receipts {
 			for _, receipt := range receipts {
-				unfiltered = append(unfiltered, receipt.Logs()...)
+				unfiltered = append(unfiltered, receipt.Logs...)
 			}
 			}
 			logs = append(logs, self.FilterLogs(unfiltered)...)
 			logs = append(logs, self.FilterLogs(unfiltered)...)
 		}
 		}

+ 16 - 17
eth/filters/filter_test.go

@@ -16,9 +16,9 @@ import (
 
 
 func makeReceipt(addr common.Address) *types.Receipt {
 func makeReceipt(addr common.Address) *types.Receipt {
 	receipt := types.NewReceipt(nil, new(big.Int))
 	receipt := types.NewReceipt(nil, new(big.Int))
-	receipt.SetLogs(vm.Logs{
+	receipt.Logs = vm.Logs{
 		&vm.Log{Address: addr},
 		&vm.Log{Address: addr},
-	})
+	}
 	receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
 	receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
 	return receipt
 	return receipt
 }
 }
@@ -41,7 +41,7 @@ func BenchmarkMipmaps(b *testing.B) {
 	defer db.Close()
 	defer db.Close()
 
 
 	genesis := core.WriteGenesisBlockForTesting(db, core.GenesisAccount{addr1, big.NewInt(1000000)})
 	genesis := core.WriteGenesisBlockForTesting(db, core.GenesisAccount{addr1, big.NewInt(1000000)})
-	chain := core.GenerateChain(genesis, db, 100010, func(i int, gen *core.BlockGen) {
+	chain, receipts := core.GenerateChain(genesis, db, 100010, func(i int, gen *core.BlockGen) {
 		var receipts types.Receipts
 		var receipts types.Receipts
 		switch i {
 		switch i {
 		case 2403:
 		case 2403:
@@ -70,7 +70,7 @@ func BenchmarkMipmaps(b *testing.B) {
 		}
 		}
 		core.WriteMipmapBloom(db, uint64(i+1), receipts)
 		core.WriteMipmapBloom(db, uint64(i+1), receipts)
 	})
 	})
-	for _, block := range chain {
+	for i, block := range chain {
 		core.WriteBlock(db, block)
 		core.WriteBlock(db, block)
 		if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
 		if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
 			b.Fatalf("failed to insert block number: %v", err)
 			b.Fatalf("failed to insert block number: %v", err)
@@ -78,11 +78,10 @@ func BenchmarkMipmaps(b *testing.B) {
 		if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil {
 		if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil {
 			b.Fatalf("failed to insert block number: %v", err)
 			b.Fatalf("failed to insert block number: %v", err)
 		}
 		}
-		if err := core.PutBlockReceipts(db, block, block.Receipts()); err != nil {
+		if err := core.PutBlockReceipts(db, block.Hash(), receipts[i]); err != nil {
 			b.Fatal("error writing block receipts:", err)
 			b.Fatal("error writing block receipts:", err)
 		}
 		}
 	}
 	}
-
 	b.ResetTimer()
 	b.ResetTimer()
 
 
 	filter := New(db)
 	filter := New(db)
@@ -118,47 +117,47 @@ func TestFilters(t *testing.T) {
 	defer db.Close()
 	defer db.Close()
 
 
 	genesis := core.WriteGenesisBlockForTesting(db, core.GenesisAccount{addr, big.NewInt(1000000)})
 	genesis := core.WriteGenesisBlockForTesting(db, core.GenesisAccount{addr, big.NewInt(1000000)})
-	chain := core.GenerateChain(genesis, db, 1000, func(i int, gen *core.BlockGen) {
+	chain, receipts := core.GenerateChain(genesis, db, 1000, func(i int, gen *core.BlockGen) {
 		var receipts types.Receipts
 		var receipts types.Receipts
 		switch i {
 		switch i {
 		case 1:
 		case 1:
 			receipt := types.NewReceipt(nil, new(big.Int))
 			receipt := types.NewReceipt(nil, new(big.Int))
-			receipt.SetLogs(vm.Logs{
+			receipt.Logs = vm.Logs{
 				&vm.Log{
 				&vm.Log{
 					Address: addr,
 					Address: addr,
 					Topics:  []common.Hash{hash1},
 					Topics:  []common.Hash{hash1},
 				},
 				},
-			})
+			}
 			gen.AddUncheckedReceipt(receipt)
 			gen.AddUncheckedReceipt(receipt)
 			receipts = types.Receipts{receipt}
 			receipts = types.Receipts{receipt}
 		case 2:
 		case 2:
 			receipt := types.NewReceipt(nil, new(big.Int))
 			receipt := types.NewReceipt(nil, new(big.Int))
-			receipt.SetLogs(vm.Logs{
+			receipt.Logs = vm.Logs{
 				&vm.Log{
 				&vm.Log{
 					Address: addr,
 					Address: addr,
 					Topics:  []common.Hash{hash2},
 					Topics:  []common.Hash{hash2},
 				},
 				},
-			})
+			}
 			gen.AddUncheckedReceipt(receipt)
 			gen.AddUncheckedReceipt(receipt)
 			receipts = types.Receipts{receipt}
 			receipts = types.Receipts{receipt}
 		case 998:
 		case 998:
 			receipt := types.NewReceipt(nil, new(big.Int))
 			receipt := types.NewReceipt(nil, new(big.Int))
-			receipt.SetLogs(vm.Logs{
+			receipt.Logs = vm.Logs{
 				&vm.Log{
 				&vm.Log{
 					Address: addr,
 					Address: addr,
 					Topics:  []common.Hash{hash3},
 					Topics:  []common.Hash{hash3},
 				},
 				},
-			})
+			}
 			gen.AddUncheckedReceipt(receipt)
 			gen.AddUncheckedReceipt(receipt)
 			receipts = types.Receipts{receipt}
 			receipts = types.Receipts{receipt}
 		case 999:
 		case 999:
 			receipt := types.NewReceipt(nil, new(big.Int))
 			receipt := types.NewReceipt(nil, new(big.Int))
-			receipt.SetLogs(vm.Logs{
+			receipt.Logs = vm.Logs{
 				&vm.Log{
 				&vm.Log{
 					Address: addr,
 					Address: addr,
 					Topics:  []common.Hash{hash4},
 					Topics:  []common.Hash{hash4},
 				},
 				},
-			})
+			}
 			gen.AddUncheckedReceipt(receipt)
 			gen.AddUncheckedReceipt(receipt)
 			receipts = types.Receipts{receipt}
 			receipts = types.Receipts{receipt}
 		}
 		}
@@ -173,7 +172,7 @@ func TestFilters(t *testing.T) {
 		// by one
 		// by one
 		core.WriteMipmapBloom(db, uint64(i+1), receipts)
 		core.WriteMipmapBloom(db, uint64(i+1), receipts)
 	})
 	})
-	for _, block := range chain {
+	for i, block := range chain {
 		core.WriteBlock(db, block)
 		core.WriteBlock(db, block)
 		if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
 		if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
 			t.Fatalf("failed to insert block number: %v", err)
 			t.Fatalf("failed to insert block number: %v", err)
@@ -181,7 +180,7 @@ func TestFilters(t *testing.T) {
 		if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil {
 		if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil {
 			t.Fatalf("failed to insert block number: %v", err)
 			t.Fatalf("failed to insert block number: %v", err)
 		}
 		}
-		if err := core.PutBlockReceipts(db, block, block.Receipts()); err != nil {
+		if err := core.PutBlockReceipts(db, block.Hash(), receipts[i]); err != nil {
 			t.Fatal("error writing block receipts:", err)
 			t.Fatal("error writing block receipts:", err)
 		}
 		}
 	}
 	}

+ 99 - 71
eth/handler.go

@@ -17,6 +17,7 @@
 package eth
 package eth
 
 
 import (
 import (
+	"errors"
 	"fmt"
 	"fmt"
 	"math"
 	"math"
 	"math/big"
 	"math/big"
@@ -42,6 +43,10 @@ const (
 	estHeaderRlpSize  = 500             // Approximate size of an RLP encoded block header
 	estHeaderRlpSize  = 500             // Approximate size of an RLP encoded block header
 )
 )
 
 
+// errIncompatibleConfig is returned if the requested protocols and configs are
+// not compatible (low protocol version restrictions and high requirements).
+var errIncompatibleConfig = errors.New("incompatible configuration")
+
 func errResp(code errCode, format string, v ...interface{}) error {
 func errResp(code errCode, format string, v ...interface{}) error {
 	return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
 	return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
 }
 }
@@ -49,17 +54,8 @@ func errResp(code errCode, format string, v ...interface{}) error {
 type hashFetcherFn func(common.Hash) error
 type hashFetcherFn func(common.Hash) error
 type blockFetcherFn func([]common.Hash) error
 type blockFetcherFn func([]common.Hash) error
 
 
-// extProt is an interface which is passed around so we can expose GetHashes and GetBlock without exposing it to the rest of the protocol
-// extProt is passed around to peers which require to GetHashes and GetBlocks
-type extProt struct {
-	getHashes hashFetcherFn
-	getBlocks blockFetcherFn
-}
-
-func (ep extProt) GetHashes(hash common.Hash) error    { return ep.getHashes(hash) }
-func (ep extProt) GetBlock(hashes []common.Hash) error { return ep.getBlocks(hashes) }
-
 type ProtocolManager struct {
 type ProtocolManager struct {
+	fastSync   bool
 	txpool     txPool
 	txpool     txPool
 	blockchain *core.BlockChain
 	blockchain *core.BlockChain
 	chaindb    ethdb.Database
 	chaindb    ethdb.Database
@@ -87,9 +83,15 @@ type ProtocolManager struct {
 
 
 // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
 // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
 // with the ethereum network.
 // with the ethereum network.
-func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) *ProtocolManager {
+func NewProtocolManager(fastSync bool, networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
+	// Figure out whether to allow fast sync or not
+	if fastSync && blockchain.CurrentBlock().NumberU64() > 0 {
+		glog.V(logger.Info).Infof("blockchain not empty, fast sync disabled")
+		fastSync = false
+	}
 	// Create the protocol manager with the base fields
 	// Create the protocol manager with the base fields
 	manager := &ProtocolManager{
 	manager := &ProtocolManager{
+		fastSync:   fastSync,
 		eventMux:   mux,
 		eventMux:   mux,
 		txpool:     txpool,
 		txpool:     txpool,
 		blockchain: blockchain,
 		blockchain: blockchain,
@@ -100,11 +102,15 @@ func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow po
 		quitSync:   make(chan struct{}),
 		quitSync:   make(chan struct{}),
 	}
 	}
 	// Initiate a sub-protocol for every implemented version we can handle
 	// Initiate a sub-protocol for every implemented version we can handle
-	manager.SubProtocols = make([]p2p.Protocol, len(ProtocolVersions))
-	for i := 0; i < len(manager.SubProtocols); i++ {
-		version := ProtocolVersions[i]
-
-		manager.SubProtocols[i] = p2p.Protocol{
+	manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions))
+	for i, version := range ProtocolVersions {
+		// Skip protocol version if incompatible with the mode of operation
+		if fastSync && version < eth63 {
+			continue
+		}
+		// Compatible; initialise the sub-protocol
+		version := version // Closure for the run
+		manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
 			Name:    "eth",
 			Name:    "eth",
 			Version: version,
 			Version: version,
 			Length:  ProtocolLengths[i],
 			Length:  ProtocolLengths[i],
@@ -113,20 +119,25 @@ func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow po
 				manager.newPeerCh <- peer
 				manager.newPeerCh <- peer
 				return manager.handle(peer)
 				return manager.handle(peer)
 			},
 			},
-		}
+		})
+	}
+	if len(manager.SubProtocols) == 0 {
+		return nil, errIncompatibleConfig
 	}
 	}
 	// Construct the different synchronisation mechanisms
 	// Construct the different synchronisation mechanisms
-	manager.downloader = downloader.New(manager.eventMux, manager.blockchain.HasBlock, manager.blockchain.GetBlock, manager.blockchain.CurrentBlock, manager.blockchain.GetTd, manager.blockchain.InsertChain, manager.removePeer)
+	manager.downloader = downloader.New(chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlock, blockchain.GetHeader, blockchain.GetBlock,
+		blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.FastSyncCommitHead, blockchain.GetTd,
+		blockchain.InsertHeaderChain, blockchain.InsertChain, blockchain.InsertReceiptChain, blockchain.Rollback, manager.removePeer)
 
 
 	validator := func(block *types.Block, parent *types.Block) error {
 	validator := func(block *types.Block, parent *types.Block) error {
 		return core.ValidateHeader(pow, block.Header(), parent.Header(), true, false)
 		return core.ValidateHeader(pow, block.Header(), parent.Header(), true, false)
 	}
 	}
 	heighter := func() uint64 {
 	heighter := func() uint64 {
-		return manager.blockchain.CurrentBlock().NumberU64()
+		return blockchain.CurrentBlock().NumberU64()
 	}
 	}
-	manager.fetcher = fetcher.New(manager.blockchain.GetBlock, validator, manager.BroadcastBlock, heighter, manager.blockchain.InsertChain, manager.removePeer)
+	manager.fetcher = fetcher.New(blockchain.GetBlock, validator, manager.BroadcastBlock, heighter, blockchain.InsertChain, manager.removePeer)
 
 
-	return manager
+	return manager, nil
 }
 }
 
 
 func (pm *ProtocolManager) removePeer(id string) {
 func (pm *ProtocolManager) removePeer(id string) {
@@ -205,8 +216,8 @@ func (pm *ProtocolManager) handle(p *peer) error {
 
 
 	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
 	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
 	if err := pm.downloader.RegisterPeer(p.id, p.version, p.Head(),
 	if err := pm.downloader.RegisterPeer(p.id, p.version, p.Head(),
-		p.RequestHashes, p.RequestHashesFromNumber, p.RequestBlocks,
-		p.RequestHeadersByHash, p.RequestHeadersByNumber, p.RequestBodies); err != nil {
+		p.RequestHashes, p.RequestHashesFromNumber, p.RequestBlocks, p.RequestHeadersByHash,
+		p.RequestHeadersByNumber, p.RequestBodies, p.RequestReceipts, p.RequestNodeData); err != nil {
 		return err
 		return err
 	}
 	}
 	// Propagate existing transactions. new transactions appearing
 	// Propagate existing transactions. new transactions appearing
@@ -292,7 +303,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 			break
 			break
 		}
 		}
 		// Deliver them all to the downloader for queuing
 		// Deliver them all to the downloader for queuing
-		err := pm.downloader.DeliverHashes61(p.id, hashes)
+		err := pm.downloader.DeliverHashes(p.id, hashes)
 		if err != nil {
 		if err != nil {
 			glog.V(logger.Debug).Infoln(err)
 			glog.V(logger.Debug).Infoln(err)
 		}
 		}
@@ -338,7 +349,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		}
 		}
 		// Filter out any explicitly requested blocks, deliver the rest to the downloader
 		// Filter out any explicitly requested blocks, deliver the rest to the downloader
 		if blocks := pm.fetcher.FilterBlocks(blocks); len(blocks) > 0 {
 		if blocks := pm.fetcher.FilterBlocks(blocks); len(blocks) > 0 {
-			pm.downloader.DeliverBlocks61(p.id, blocks)
+			pm.downloader.DeliverBlocks(p.id, blocks)
 		}
 		}
 
 
 	// Block header query, collect the requested headers and reply
 	// Block header query, collect the requested headers and reply
@@ -424,28 +435,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 			}
 			}
 		}
 		}
 
 
-	case p.version >= eth62 && msg.Code == BlockBodiesMsg:
-		// A batch of block bodies arrived to one of our previous requests
-		var request blockBodiesData
-		if err := msg.Decode(&request); err != nil {
-			return errResp(ErrDecode, "msg %v: %v", msg, err)
-		}
-		// Deliver them all to the downloader for queuing
-		trasactions := make([][]*types.Transaction, len(request))
-		uncles := make([][]*types.Header, len(request))
-
-		for i, body := range request {
-			trasactions[i] = body.Transactions
-			uncles[i] = body.Uncles
-		}
-		// Filter out any explicitly requested bodies, deliver the rest to the downloader
-		if trasactions, uncles := pm.fetcher.FilterBodies(trasactions, uncles, time.Now()); len(trasactions) > 0 || len(uncles) > 0 {
-			err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
-			if err != nil {
-				glog.V(logger.Debug).Infoln(err)
-			}
-		}
-
 	case p.version >= eth62 && msg.Code == GetBlockBodiesMsg:
 	case p.version >= eth62 && msg.Code == GetBlockBodiesMsg:
 		// Decode the retrieval message
 		// Decode the retrieval message
 		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
 		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
@@ -473,6 +462,28 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		}
 		}
 		return p.SendBlockBodiesRLP(bodies)
 		return p.SendBlockBodiesRLP(bodies)
 
 
+	case p.version >= eth62 && msg.Code == BlockBodiesMsg:
+		// A batch of block bodies arrived to one of our previous requests
+		var request blockBodiesData
+		if err := msg.Decode(&request); err != nil {
+			return errResp(ErrDecode, "msg %v: %v", msg, err)
+		}
+		// Deliver them all to the downloader for queuing
+		trasactions := make([][]*types.Transaction, len(request))
+		uncles := make([][]*types.Header, len(request))
+
+		for i, body := range request {
+			trasactions[i] = body.Transactions
+			uncles[i] = body.Uncles
+		}
+		// Filter out any explicitly requested bodies, deliver the rest to the downloader
+		if trasactions, uncles := pm.fetcher.FilterBodies(trasactions, uncles, time.Now()); len(trasactions) > 0 || len(uncles) > 0 {
+			err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
+			if err != nil {
+				glog.V(logger.Debug).Infoln(err)
+			}
+		}
+
 	case p.version >= eth63 && msg.Code == GetNodeDataMsg:
 	case p.version >= eth63 && msg.Code == GetNodeDataMsg:
 		// Decode the retrieval message
 		// Decode the retrieval message
 		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
 		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
@@ -500,6 +511,17 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		}
 		}
 		return p.SendNodeData(data)
 		return p.SendNodeData(data)
 
 
+	case p.version >= eth63 && msg.Code == NodeDataMsg:
+		// A batch of node state data arrived to one of our previous requests
+		var data [][]byte
+		if err := msg.Decode(&data); err != nil {
+			return errResp(ErrDecode, "msg %v: %v", msg, err)
+		}
+		// Deliver all to the downloader
+		if err := pm.downloader.DeliverNodeData(p.id, data); err != nil {
+			glog.V(logger.Debug).Infof("failed to deliver node state data: %v", err)
+		}
+
 	case p.version >= eth63 && msg.Code == GetReceiptsMsg:
 	case p.version >= eth63 && msg.Code == GetReceiptsMsg:
 		// Decode the retrieval message
 		// Decode the retrieval message
 		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
 		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
@@ -510,22 +532,42 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		var (
 		var (
 			hash     common.Hash
 			hash     common.Hash
 			bytes    int
 			bytes    int
-			receipts []*types.Receipt
+			receipts []rlp.RawValue
 		)
 		)
-		for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptsFetch {
-			// Retrieve the hash of the next transaction receipt
+		for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch {
+			// Retrieve the hash of the next block
 			if err := msgStream.Decode(&hash); err == rlp.EOL {
 			if err := msgStream.Decode(&hash); err == rlp.EOL {
 				break
 				break
 			} else if err != nil {
 			} else if err != nil {
 				return errResp(ErrDecode, "msg %v: %v", msg, err)
 				return errResp(ErrDecode, "msg %v: %v", msg, err)
 			}
 			}
-			// Retrieve the requested receipt, stopping if enough was found
-			if receipt := core.GetReceipt(pm.chaindb, hash); receipt != nil {
-				receipts = append(receipts, receipt)
-				bytes += len(receipt.RlpEncode())
+			// Retrieve the requested block's receipts, skipping if unknown to us
+			results := core.GetBlockReceipts(pm.chaindb, hash)
+			if results == nil {
+				if header := pm.blockchain.GetHeader(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
+					continue
+				}
+			}
+			// If known, encode and queue for response packet
+			if encoded, err := rlp.EncodeToBytes(results); err != nil {
+				glog.V(logger.Error).Infof("failed to encode receipt: %v", err)
+			} else {
+				receipts = append(receipts, encoded)
+				bytes += len(encoded)
 			}
 			}
 		}
 		}
-		return p.SendReceipts(receipts)
+		return p.SendReceiptsRLP(receipts)
+
+	case p.version >= eth63 && msg.Code == ReceiptsMsg:
+		// A batch of receipts arrived to one of our previous requests
+		var receipts [][]*types.Receipt
+		if err := msg.Decode(&receipts); err != nil {
+			return errResp(ErrDecode, "msg %v: %v", msg, err)
+		}
+		// Deliver all to the downloader
+		if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil {
+			glog.V(logger.Debug).Infof("failed to deliver receipts: %v", err)
+		}
 
 
 	case msg.Code == NewBlockHashesMsg:
 	case msg.Code == NewBlockHashesMsg:
 		// Retrieve and deseralize the remote new block hashes notification
 		// Retrieve and deseralize the remote new block hashes notification
@@ -585,15 +627,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		}
 		}
 		request.Block.ReceivedAt = msg.ReceivedAt
 		request.Block.ReceivedAt = msg.ReceivedAt
 
 
-		// Mark the block's arrival for whatever reason
-		_, chainHead, _ := pm.blockchain.Status()
-		jsonlogger.LogJson(&logger.EthChainReceivedNewBlock{
-			BlockHash:     request.Block.Hash().Hex(),
-			BlockNumber:   request.Block.Number(),
-			ChainHeadHash: chainHead.Hex(),
-			BlockPrevHash: request.Block.ParentHash().Hex(),
-			RemoteId:      p.ID().String(),
-		})
 		// Mark the peer as owning the block and schedule it for import
 		// Mark the peer as owning the block and schedule it for import
 		p.MarkBlock(request.Block.Hash())
 		p.MarkBlock(request.Block.Hash())
 		p.SetHead(request.Block.Hash())
 		p.SetHead(request.Block.Hash())
@@ -603,7 +636,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		// Update the peers total difficulty if needed, schedule a download if gapped
 		// Update the peers total difficulty if needed, schedule a download if gapped
 		if request.TD.Cmp(p.Td()) > 0 {
 		if request.TD.Cmp(p.Td()) > 0 {
 			p.SetTd(request.TD)
 			p.SetTd(request.TD)
-			if request.TD.Cmp(new(big.Int).Add(pm.blockchain.Td(), request.Block.Difficulty())) > 0 {
+			td := pm.blockchain.GetTd(pm.blockchain.CurrentBlock().Hash())
+			if request.TD.Cmp(new(big.Int).Add(td, request.Block.Difficulty())) > 0 {
 				go pm.synchronise(p)
 				go pm.synchronise(p)
 			}
 			}
 		}
 		}
@@ -620,12 +654,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 				return errResp(ErrDecode, "transaction %d is nil", i)
 				return errResp(ErrDecode, "transaction %d is nil", i)
 			}
 			}
 			p.MarkTransaction(tx.Hash())
 			p.MarkTransaction(tx.Hash())
-
-			// Log it's arrival for later analysis
-			jsonlogger.LogJson(&logger.EthTxReceived{
-				TxHash:   tx.Hash().Hex(),
-				RemoteId: p.ID().String(),
-			})
 		}
 		}
 		pm.txpool.AddTransactions(txs)
 		pm.txpool.AddTransactions(txs)
 
 

+ 44 - 20
eth/handler_test.go

@@ -17,12 +17,41 @@ import (
 	"github.com/ethereum/go-ethereum/params"
 	"github.com/ethereum/go-ethereum/params"
 )
 )
 
 
+// Tests that protocol versions and modes of operations are matched up properly.
+func TestProtocolCompatibility(t *testing.T) {
+	// Define the compatibility chart
+	tests := []struct {
+		version    uint
+		fastSync   bool
+		compatible bool
+	}{
+		{61, false, true}, {62, false, true}, {63, false, true},
+		{61, true, false}, {62, true, false}, {63, true, true},
+	}
+	// Make sure anything we screw up is restored
+	backup := ProtocolVersions
+	defer func() { ProtocolVersions = backup }()
+
+	// Try all available compatibility configs and check for errors
+	for i, tt := range tests {
+		ProtocolVersions = []uint{tt.version}
+
+		pm, err := newTestProtocolManager(tt.fastSync, 0, nil, nil)
+		if pm != nil {
+			defer pm.Stop()
+		}
+		if (err == nil && !tt.compatible) || (err != nil && tt.compatible) {
+			t.Errorf("test %d: compatibility mismatch: have error %v, want compatibility %v", i, err, tt.compatible)
+		}
+	}
+}
+
 // Tests that hashes can be retrieved from a remote chain by hashes in reverse
 // Tests that hashes can be retrieved from a remote chain by hashes in reverse
 // order.
 // order.
 func TestGetBlockHashes61(t *testing.T) { testGetBlockHashes(t, 61) }
 func TestGetBlockHashes61(t *testing.T) { testGetBlockHashes(t, 61) }
 
 
 func testGetBlockHashes(t *testing.T, protocol int) {
 func testGetBlockHashes(t *testing.T, protocol int) {
-	pm := newTestProtocolManager(downloader.MaxHashFetch+15, nil, nil)
+	pm := newTestProtocolManagerMust(t, false, downloader.MaxHashFetch+15, nil, nil)
 	peer, _ := newTestPeer("peer", protocol, pm, true)
 	peer, _ := newTestPeer("peer", protocol, pm, true)
 	defer peer.close()
 	defer peer.close()
 
 
@@ -65,7 +94,7 @@ func testGetBlockHashes(t *testing.T, protocol int) {
 func TestGetBlockHashesFromNumber61(t *testing.T) { testGetBlockHashesFromNumber(t, 61) }
 func TestGetBlockHashesFromNumber61(t *testing.T) { testGetBlockHashesFromNumber(t, 61) }
 
 
 func testGetBlockHashesFromNumber(t *testing.T, protocol int) {
 func testGetBlockHashesFromNumber(t *testing.T, protocol int) {
-	pm := newTestProtocolManager(downloader.MaxHashFetch+15, nil, nil)
+	pm := newTestProtocolManagerMust(t, false, downloader.MaxHashFetch+15, nil, nil)
 	peer, _ := newTestPeer("peer", protocol, pm, true)
 	peer, _ := newTestPeer("peer", protocol, pm, true)
 	defer peer.close()
 	defer peer.close()
 
 
@@ -105,7 +134,7 @@ func testGetBlockHashesFromNumber(t *testing.T, protocol int) {
 func TestGetBlocks61(t *testing.T) { testGetBlocks(t, 61) }
 func TestGetBlocks61(t *testing.T) { testGetBlocks(t, 61) }
 
 
 func testGetBlocks(t *testing.T, protocol int) {
 func testGetBlocks(t *testing.T, protocol int) {
-	pm := newTestProtocolManager(downloader.MaxHashFetch+15, nil, nil)
+	pm := newTestProtocolManagerMust(t, false, downloader.MaxHashFetch+15, nil, nil)
 	peer, _ := newTestPeer("peer", protocol, pm, true)
 	peer, _ := newTestPeer("peer", protocol, pm, true)
 	defer peer.close()
 	defer peer.close()
 
 
@@ -174,10 +203,9 @@ func testGetBlocks(t *testing.T, protocol int) {
 // Tests that block headers can be retrieved from a remote chain based on user queries.
 // Tests that block headers can be retrieved from a remote chain based on user queries.
 func TestGetBlockHeaders62(t *testing.T) { testGetBlockHeaders(t, 62) }
 func TestGetBlockHeaders62(t *testing.T) { testGetBlockHeaders(t, 62) }
 func TestGetBlockHeaders63(t *testing.T) { testGetBlockHeaders(t, 63) }
 func TestGetBlockHeaders63(t *testing.T) { testGetBlockHeaders(t, 63) }
-func TestGetBlockHeaders64(t *testing.T) { testGetBlockHeaders(t, 64) }
 
 
 func testGetBlockHeaders(t *testing.T, protocol int) {
 func testGetBlockHeaders(t *testing.T, protocol int) {
-	pm := newTestProtocolManager(downloader.MaxHashFetch+15, nil, nil)
+	pm := newTestProtocolManagerMust(t, false, downloader.MaxHashFetch+15, nil, nil)
 	peer, _ := newTestPeer("peer", protocol, pm, true)
 	peer, _ := newTestPeer("peer", protocol, pm, true)
 	defer peer.close()
 	defer peer.close()
 
 
@@ -300,10 +328,9 @@ func testGetBlockHeaders(t *testing.T, protocol int) {
 // Tests that block contents can be retrieved from a remote chain based on their hashes.
 // Tests that block contents can be retrieved from a remote chain based on their hashes.
 func TestGetBlockBodies62(t *testing.T) { testGetBlockBodies(t, 62) }
 func TestGetBlockBodies62(t *testing.T) { testGetBlockBodies(t, 62) }
 func TestGetBlockBodies63(t *testing.T) { testGetBlockBodies(t, 63) }
 func TestGetBlockBodies63(t *testing.T) { testGetBlockBodies(t, 63) }
-func TestGetBlockBodies64(t *testing.T) { testGetBlockBodies(t, 64) }
 
 
 func testGetBlockBodies(t *testing.T, protocol int) {
 func testGetBlockBodies(t *testing.T, protocol int) {
-	pm := newTestProtocolManager(downloader.MaxBlockFetch+15, nil, nil)
+	pm := newTestProtocolManagerMust(t, false, downloader.MaxBlockFetch+15, nil, nil)
 	peer, _ := newTestPeer("peer", protocol, pm, true)
 	peer, _ := newTestPeer("peer", protocol, pm, true)
 	defer peer.close()
 	defer peer.close()
 
 
@@ -372,7 +399,6 @@ func testGetBlockBodies(t *testing.T, protocol int) {
 
 
 // Tests that the node state database can be retrieved based on hashes.
 // Tests that the node state database can be retrieved based on hashes.
 func TestGetNodeData63(t *testing.T) { testGetNodeData(t, 63) }
 func TestGetNodeData63(t *testing.T) { testGetNodeData(t, 63) }
-func TestGetNodeData64(t *testing.T) { testGetNodeData(t, 64) }
 
 
 func testGetNodeData(t *testing.T, protocol int) {
 func testGetNodeData(t *testing.T, protocol int) {
 	// Define three accounts to simulate transactions with
 	// Define three accounts to simulate transactions with
@@ -410,14 +436,16 @@ func testGetNodeData(t *testing.T, protocol int) {
 		}
 		}
 	}
 	}
 	// Assemble the test environment
 	// Assemble the test environment
-	pm := newTestProtocolManager(4, generator, nil)
+	pm := newTestProtocolManagerMust(t, false, 4, generator, nil)
 	peer, _ := newTestPeer("peer", protocol, pm, true)
 	peer, _ := newTestPeer("peer", protocol, pm, true)
 	defer peer.close()
 	defer peer.close()
 
 
 	// Fetch for now the entire chain db
 	// Fetch for now the entire chain db
 	hashes := []common.Hash{}
 	hashes := []common.Hash{}
 	for _, key := range pm.chaindb.(*ethdb.MemDatabase).Keys() {
 	for _, key := range pm.chaindb.(*ethdb.MemDatabase).Keys() {
-		hashes = append(hashes, common.BytesToHash(key))
+		if len(key) == len(common.Hash{}) {
+			hashes = append(hashes, common.BytesToHash(key))
+		}
 	}
 	}
 	p2p.Send(peer.app, 0x0d, hashes)
 	p2p.Send(peer.app, 0x0d, hashes)
 	msg, err := peer.app.ReadMsg()
 	msg, err := peer.app.ReadMsg()
@@ -462,7 +490,6 @@ func testGetNodeData(t *testing.T, protocol int) {
 
 
 // Tests that the transaction receipts can be retrieved based on hashes.
 // Tests that the transaction receipts can be retrieved based on hashes.
 func TestGetReceipt63(t *testing.T) { testGetReceipt(t, 63) }
 func TestGetReceipt63(t *testing.T) { testGetReceipt(t, 63) }
-func TestGetReceipt64(t *testing.T) { testGetReceipt(t, 64) }
 
 
 func testGetReceipt(t *testing.T, protocol int) {
 func testGetReceipt(t *testing.T, protocol int) {
 	// Define three accounts to simulate transactions with
 	// Define three accounts to simulate transactions with
@@ -500,20 +527,17 @@ func testGetReceipt(t *testing.T, protocol int) {
 		}
 		}
 	}
 	}
 	// Assemble the test environment
 	// Assemble the test environment
-	pm := newTestProtocolManager(4, generator, nil)
+	pm := newTestProtocolManagerMust(t, false, 4, generator, nil)
 	peer, _ := newTestPeer("peer", protocol, pm, true)
 	peer, _ := newTestPeer("peer", protocol, pm, true)
 	defer peer.close()
 	defer peer.close()
 
 
 	// Collect the hashes to request, and the response to expect
 	// Collect the hashes to request, and the response to expect
-	hashes := []common.Hash{}
+	hashes, receipts := []common.Hash{}, []types.Receipts{}
 	for i := uint64(0); i <= pm.blockchain.CurrentBlock().NumberU64(); i++ {
 	for i := uint64(0); i <= pm.blockchain.CurrentBlock().NumberU64(); i++ {
-		for _, tx := range pm.blockchain.GetBlockByNumber(i).Transactions() {
-			hashes = append(hashes, tx.Hash())
-		}
-	}
-	receipts := make([]*types.Receipt, len(hashes))
-	for i, hash := range hashes {
-		receipts[i] = core.GetReceipt(pm.chaindb, hash)
+		block := pm.blockchain.GetBlockByNumber(i)
+
+		hashes = append(hashes, block.Hash())
+		receipts = append(receipts, core.GetBlockReceipts(pm.chaindb, block.Hash()))
 	}
 	}
 	// Send the hash request and verify the response
 	// Send the hash request and verify the response
 	p2p.Send(peer.app, 0x0f, hashes)
 	p2p.Send(peer.app, 0x0f, hashes)

+ 18 - 3
eth/helper_test.go

@@ -28,7 +28,7 @@ var (
 // newTestProtocolManager creates a new protocol manager for testing purposes,
 // newTestProtocolManager creates a new protocol manager for testing purposes,
 // with the given number of blocks already known, and potential notification
 // with the given number of blocks already known, and potential notification
 // channels for different events.
 // channels for different events.
-func newTestProtocolManager(blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) *ProtocolManager {
+func newTestProtocolManager(fastSync bool, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) (*ProtocolManager, error) {
 	var (
 	var (
 		evmux         = new(event.TypeMux)
 		evmux         = new(event.TypeMux)
 		pow           = new(core.FakePow)
 		pow           = new(core.FakePow)
@@ -38,12 +38,27 @@ func newTestProtocolManager(blocks int, generator func(int, *core.BlockGen), new
 		blockproc     = core.NewBlockProcessor(db, pow, blockchain, evmux)
 		blockproc     = core.NewBlockProcessor(db, pow, blockchain, evmux)
 	)
 	)
 	blockchain.SetProcessor(blockproc)
 	blockchain.SetProcessor(blockproc)
-	chain := core.GenerateChain(genesis, db, blocks, generator)
+	chain, _ := core.GenerateChain(genesis, db, blocks, generator)
 	if _, err := blockchain.InsertChain(chain); err != nil {
 	if _, err := blockchain.InsertChain(chain); err != nil {
 		panic(err)
 		panic(err)
 	}
 	}
-	pm := NewProtocolManager(NetworkId, evmux, &testTxPool{added: newtx}, pow, blockchain, db)
+	pm, err := NewProtocolManager(fastSync, NetworkId, evmux, &testTxPool{added: newtx}, pow, blockchain, db)
+	if err != nil {
+		return nil, err
+	}
 	pm.Start()
 	pm.Start()
+	return pm, nil
+}
+
+// newTestProtocolManagerMust creates a new protocol manager for testing purposes,
+// with the given number of blocks already known, and potential notification
+// channels for different events. In case of an error, the constructor force-
+// fails the test.
+func newTestProtocolManagerMust(t *testing.T, fastSync bool, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) *ProtocolManager {
+	pm, err := newTestProtocolManager(fastSync, blocks, generator, newtx)
+	if err != nil {
+		t.Fatalf("Failed to create protocol manager: %v", err)
+	}
 	return pm
 	return pm
 }
 }
 
 

+ 1 - 1
eth/metrics.go

@@ -101,7 +101,7 @@ func (rw *meteredMsgReadWriter) ReadMsg() (p2p.Msg, error) {
 		packets, traffic = reqBlockInPacketsMeter, reqBlockInTrafficMeter
 		packets, traffic = reqBlockInPacketsMeter, reqBlockInTrafficMeter
 
 
 	case rw.version >= eth62 && msg.Code == BlockHeadersMsg:
 	case rw.version >= eth62 && msg.Code == BlockHeadersMsg:
-		packets, traffic = reqBlockInPacketsMeter, reqBlockInTrafficMeter
+		packets, traffic = reqHeaderInPacketsMeter, reqHeaderInTrafficMeter
 	case rw.version >= eth62 && msg.Code == BlockBodiesMsg:
 	case rw.version >= eth62 && msg.Code == BlockBodiesMsg:
 		packets, traffic = reqBodyInPacketsMeter, reqBodyInTrafficMeter
 		packets, traffic = reqBodyInPacketsMeter, reqBodyInTrafficMeter
 
 

+ 4 - 4
eth/peer.go

@@ -191,15 +191,15 @@ func (p *peer) SendBlockBodiesRLP(bodies []rlp.RawValue) error {
 	return p2p.Send(p.rw, BlockBodiesMsg, bodies)
 	return p2p.Send(p.rw, BlockBodiesMsg, bodies)
 }
 }
 
 
-// SendNodeData sends a batch of arbitrary internal data, corresponding to the
+// SendNodeDataRLP sends a batch of arbitrary internal data, corresponding to the
 // hashes requested.
 // hashes requested.
 func (p *peer) SendNodeData(data [][]byte) error {
 func (p *peer) SendNodeData(data [][]byte) error {
 	return p2p.Send(p.rw, NodeDataMsg, data)
 	return p2p.Send(p.rw, NodeDataMsg, data)
 }
 }
 
 
-// SendReceipts sends a batch of transaction receipts, corresponding to the ones
-// requested.
-func (p *peer) SendReceipts(receipts []*types.Receipt) error {
+// SendReceiptsRLP sends a batch of transaction receipts, corresponding to the
+// ones requested from an already RLP encoded format.
+func (p *peer) SendReceiptsRLP(receipts []rlp.RawValue) error {
 	return p2p.Send(p.rw, ReceiptsMsg, receipts)
 	return p2p.Send(p.rw, ReceiptsMsg, receipts)
 }
 }
 
 

+ 2 - 8
eth/protocol.go

@@ -31,14 +31,13 @@ const (
 	eth61 = 61
 	eth61 = 61
 	eth62 = 62
 	eth62 = 62
 	eth63 = 63
 	eth63 = 63
-	eth64 = 64
 )
 )
 
 
 // Supported versions of the eth protocol (first is primary).
 // Supported versions of the eth protocol (first is primary).
-var ProtocolVersions = []uint{eth64, eth63, eth62, eth61}
+var ProtocolVersions = []uint{eth63, eth62, eth61}
 
 
 // Number of implemented message corresponding to different protocol versions.
 // Number of implemented message corresponding to different protocol versions.
-var ProtocolLengths = []uint64{15, 12, 8, 9}
+var ProtocolLengths = []uint64{17, 8, 9}
 
 
 const (
 const (
 	NetworkId          = 1
 	NetworkId          = 1
@@ -73,11 +72,6 @@ const (
 	NodeDataMsg    = 0x0e
 	NodeDataMsg    = 0x0e
 	GetReceiptsMsg = 0x0f
 	GetReceiptsMsg = 0x0f
 	ReceiptsMsg    = 0x10
 	ReceiptsMsg    = 0x10
-
-	// Protocol messages belonging to eth/64
-	GetAcctProofMsg     = 0x11
-	GetStorageDataProof = 0x12
-	Proof               = 0x13
 )
 )
 
 
 type errCode int
 type errCode int

+ 3 - 6
eth/protocol_test.go

@@ -41,10 +41,9 @@ var testAccount = crypto.NewKey(rand.Reader)
 func TestStatusMsgErrors61(t *testing.T) { testStatusMsgErrors(t, 61) }
 func TestStatusMsgErrors61(t *testing.T) { testStatusMsgErrors(t, 61) }
 func TestStatusMsgErrors62(t *testing.T) { testStatusMsgErrors(t, 62) }
 func TestStatusMsgErrors62(t *testing.T) { testStatusMsgErrors(t, 62) }
 func TestStatusMsgErrors63(t *testing.T) { testStatusMsgErrors(t, 63) }
 func TestStatusMsgErrors63(t *testing.T) { testStatusMsgErrors(t, 63) }
-func TestStatusMsgErrors64(t *testing.T) { testStatusMsgErrors(t, 64) }
 
 
 func testStatusMsgErrors(t *testing.T, protocol int) {
 func testStatusMsgErrors(t *testing.T, protocol int) {
-	pm := newTestProtocolManager(0, nil, nil)
+	pm := newTestProtocolManagerMust(t, false, 0, nil, nil)
 	td, currentBlock, genesis := pm.blockchain.Status()
 	td, currentBlock, genesis := pm.blockchain.Status()
 	defer pm.Stop()
 	defer pm.Stop()
 
 
@@ -95,11 +94,10 @@ func testStatusMsgErrors(t *testing.T, protocol int) {
 func TestRecvTransactions61(t *testing.T) { testRecvTransactions(t, 61) }
 func TestRecvTransactions61(t *testing.T) { testRecvTransactions(t, 61) }
 func TestRecvTransactions62(t *testing.T) { testRecvTransactions(t, 62) }
 func TestRecvTransactions62(t *testing.T) { testRecvTransactions(t, 62) }
 func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) }
 func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) }
-func TestRecvTransactions64(t *testing.T) { testRecvTransactions(t, 64) }
 
 
 func testRecvTransactions(t *testing.T, protocol int) {
 func testRecvTransactions(t *testing.T, protocol int) {
 	txAdded := make(chan []*types.Transaction)
 	txAdded := make(chan []*types.Transaction)
-	pm := newTestProtocolManager(0, nil, txAdded)
+	pm := newTestProtocolManagerMust(t, false, 0, nil, txAdded)
 	p, _ := newTestPeer("peer", protocol, pm, true)
 	p, _ := newTestPeer("peer", protocol, pm, true)
 	defer pm.Stop()
 	defer pm.Stop()
 	defer p.close()
 	defer p.close()
@@ -124,10 +122,9 @@ func testRecvTransactions(t *testing.T, protocol int) {
 func TestSendTransactions61(t *testing.T) { testSendTransactions(t, 61) }
 func TestSendTransactions61(t *testing.T) { testSendTransactions(t, 61) }
 func TestSendTransactions62(t *testing.T) { testSendTransactions(t, 62) }
 func TestSendTransactions62(t *testing.T) { testSendTransactions(t, 62) }
 func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) }
 func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) }
-func TestSendTransactions64(t *testing.T) { testSendTransactions(t, 64) }
 
 
 func testSendTransactions(t *testing.T, protocol int) {
 func testSendTransactions(t *testing.T, protocol int) {
-	pm := newTestProtocolManager(0, nil, nil)
+	pm := newTestProtocolManagerMust(t, false, 0, nil, nil)
 	defer pm.Stop()
 	defer pm.Stop()
 
 
 	// Fill the pool with big transactions.
 	// Fill the pool with big transactions.

+ 19 - 2
eth/sync.go

@@ -22,6 +22,7 @@ import (
 
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
 	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/eth/downloader"
 	"github.com/ethereum/go-ethereum/logger"
 	"github.com/ethereum/go-ethereum/logger"
 	"github.com/ethereum/go-ethereum/logger/glog"
 	"github.com/ethereum/go-ethereum/logger/glog"
 	"github.com/ethereum/go-ethereum/p2p/discover"
 	"github.com/ethereum/go-ethereum/p2p/discover"
@@ -160,9 +161,25 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
 		return
 		return
 	}
 	}
 	// Make sure the peer's TD is higher than our own. If not drop.
 	// Make sure the peer's TD is higher than our own. If not drop.
-	if peer.Td().Cmp(pm.blockchain.Td()) <= 0 {
+	td := pm.blockchain.GetTd(pm.blockchain.CurrentBlock().Hash())
+	if peer.Td().Cmp(td) <= 0 {
 		return
 		return
 	}
 	}
 	// Otherwise try to sync with the downloader
 	// Otherwise try to sync with the downloader
-	pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td())
+	mode := downloader.FullSync
+	if pm.fastSync {
+		mode = downloader.FastSync
+	}
+	pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), mode)
+
+	// If fast sync was enabled, and we synced up, disable it
+	if pm.fastSync {
+		for pm.downloader.Synchronising() {
+			time.Sleep(100 * time.Millisecond)
+		}
+		if pm.blockchain.CurrentBlock().NumberU64() > 0 {
+			glog.V(logger.Info).Infof("fast sync complete, auto disabling")
+			pm.fastSync = false
+		}
+	}
 }
 }

+ 53 - 0
eth/sync_test.go

@@ -0,0 +1,53 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package eth
+
+import (
+	"testing"
+	"time"
+
+	"github.com/ethereum/go-ethereum/p2p"
+	"github.com/ethereum/go-ethereum/p2p/discover"
+)
+
+// Tests that fast sync gets disabled as soon as a real block is successfully
+// imported into the blockchain.
+func TestFastSyncDisabling(t *testing.T) {
+	// Create a pristine protocol manager, check that fast sync is left enabled
+	pmEmpty := newTestProtocolManagerMust(t, true, 0, nil, nil)
+	if !pmEmpty.fastSync {
+		t.Fatalf("fast sync disabled on pristine blockchain")
+	}
+	// Create a full protocol manager, check that fast sync gets disabled
+	pmFull := newTestProtocolManagerMust(t, true, 1024, nil, nil)
+	if pmFull.fastSync {
+		t.Fatalf("fast sync not disabled on non-empty blockchain")
+	}
+	// Sync up the two peers
+	io1, io2 := p2p.MsgPipe()
+
+	go pmFull.handle(pmFull.newPeer(63, NetworkId, p2p.NewPeer(discover.NodeID{}, "empty", nil), io2))
+	go pmEmpty.handle(pmEmpty.newPeer(63, NetworkId, p2p.NewPeer(discover.NodeID{}, "full", nil), io1))
+
+	time.Sleep(250 * time.Millisecond)
+	pmEmpty.synchronise(pmEmpty.peers.BestPeer())
+
+	// Check that fast sync was disabled
+	if pmEmpty.fastSync {
+		t.Fatalf("fast sync not disabled after successful synchronisation")
+	}
+}

+ 44 - 13
ethdb/memory_database.go

@@ -17,7 +17,9 @@
 package ethdb
 package ethdb
 
 
 import (
 import (
+	"errors"
 	"fmt"
 	"fmt"
+	"sync"
 
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/common"
 )
 )
@@ -26,29 +28,45 @@ import (
  * This is a test memory database. Do not use for any production it does not get persisted
  * This is a test memory database. Do not use for any production it does not get persisted
  */
  */
 type MemDatabase struct {
 type MemDatabase struct {
-	db map[string][]byte
+	db   map[string][]byte
+	lock sync.RWMutex
 }
 }
 
 
 func NewMemDatabase() (*MemDatabase, error) {
 func NewMemDatabase() (*MemDatabase, error) {
-	db := &MemDatabase{db: make(map[string][]byte)}
-
-	return db, nil
+	return &MemDatabase{
+		db: make(map[string][]byte),
+	}, nil
 }
 }
 
 
 func (db *MemDatabase) Put(key []byte, value []byte) error {
 func (db *MemDatabase) Put(key []byte, value []byte) error {
+	db.lock.Lock()
+	defer db.lock.Unlock()
+
 	db.db[string(key)] = common.CopyBytes(value)
 	db.db[string(key)] = common.CopyBytes(value)
 	return nil
 	return nil
 }
 }
 
 
 func (db *MemDatabase) Set(key []byte, value []byte) {
 func (db *MemDatabase) Set(key []byte, value []byte) {
+	db.lock.Lock()
+	defer db.lock.Unlock()
+
 	db.Put(key, value)
 	db.Put(key, value)
 }
 }
 
 
 func (db *MemDatabase) Get(key []byte) ([]byte, error) {
 func (db *MemDatabase) Get(key []byte) ([]byte, error) {
-	return db.db[string(key)], nil
+	db.lock.RLock()
+	defer db.lock.RUnlock()
+
+	if entry, ok := db.db[string(key)]; ok {
+		return entry, nil
+	}
+	return nil, errors.New("not found")
 }
 }
 
 
 func (db *MemDatabase) Keys() [][]byte {
 func (db *MemDatabase) Keys() [][]byte {
+	db.lock.RLock()
+	defer db.lock.RUnlock()
+
 	keys := [][]byte{}
 	keys := [][]byte{}
 	for key, _ := range db.db {
 	for key, _ := range db.db {
 		keys = append(keys, []byte(key))
 		keys = append(keys, []byte(key))
@@ -65,12 +83,17 @@ func (db *MemDatabase) GetKeys() []*common.Key {
 */
 */
 
 
 func (db *MemDatabase) Delete(key []byte) error {
 func (db *MemDatabase) Delete(key []byte) error {
-	delete(db.db, string(key))
+	db.lock.Lock()
+	defer db.lock.Unlock()
 
 
+	delete(db.db, string(key))
 	return nil
 	return nil
 }
 }
 
 
 func (db *MemDatabase) Print() {
 func (db *MemDatabase) Print() {
+	db.lock.RLock()
+	defer db.lock.RUnlock()
+
 	for key, val := range db.db {
 	for key, val := range db.db {
 		fmt.Printf("%x(%d): ", key, len(key))
 		fmt.Printf("%x(%d): ", key, len(key))
 		node := common.NewValueFromBytes(val)
 		node := common.NewValueFromBytes(val)
@@ -83,11 +106,9 @@ func (db *MemDatabase) Close() {
 
 
 func (db *MemDatabase) LastKnownTD() []byte {
 func (db *MemDatabase) LastKnownTD() []byte {
 	data, _ := db.Get([]byte("LastKnownTotalDifficulty"))
 	data, _ := db.Get([]byte("LastKnownTotalDifficulty"))
-
 	if len(data) == 0 || data == nil {
 	if len(data) == 0 || data == nil {
 		data = []byte{0x0}
 		data = []byte{0x0}
 	}
 	}
-
 	return data
 	return data
 }
 }
 
 
@@ -100,16 +121,26 @@ type kv struct{ k, v []byte }
 type memBatch struct {
 type memBatch struct {
 	db     *MemDatabase
 	db     *MemDatabase
 	writes []kv
 	writes []kv
+	lock   sync.RWMutex
 }
 }
 
 
-func (w *memBatch) Put(key, value []byte) error {
-	w.writes = append(w.writes, kv{key, common.CopyBytes(value)})
+func (b *memBatch) Put(key, value []byte) error {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	b.writes = append(b.writes, kv{key, common.CopyBytes(value)})
 	return nil
 	return nil
 }
 }
 
 
-func (w *memBatch) Write() error {
-	for _, kv := range w.writes {
-		w.db.db[string(kv.k)] = kv.v
+func (b *memBatch) Write() error {
+	b.lock.RLock()
+	defer b.lock.RUnlock()
+
+	b.db.lock.Lock()
+	defer b.db.lock.Unlock()
+
+	for _, kv := range b.writes {
+		b.db.db[string(kv.k)] = kv.v
 	}
 	}
 	return nil
 	return nil
 }
 }

+ 1 - 1
miner/worker.go

@@ -312,7 +312,7 @@ func (self *worker) wait() {
 						self.mux.Post(core.ChainHeadEvent{block})
 						self.mux.Post(core.ChainHeadEvent{block})
 						self.mux.Post(logs)
 						self.mux.Post(logs)
 					}
 					}
-					if err := core.PutBlockReceipts(self.chainDb, block, receipts); err != nil {
+					if err := core.PutBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil {
 						glog.V(logger.Warn).Infoln("error writing block receipts:", err)
 						glog.V(logger.Warn).Infoln("error writing block receipts:", err)
 					}
 					}
 				}(block, work.state.Logs(), work.receipts)
 				}(block, work.state.Logs(), work.receipts)

+ 1 - 7
rpc/api/debug.go

@@ -146,13 +146,7 @@ func (self *debugApi) SetHead(req *shared.Request) (interface{}, error) {
 	if err := self.codec.Decode(req.Params, &args); err != nil {
 	if err := self.codec.Decode(req.Params, &args); err != nil {
 		return nil, shared.NewDecodeParamError(err.Error())
 		return nil, shared.NewDecodeParamError(err.Error())
 	}
 	}
-
-	block := self.xeth.EthBlockByNumber(args.BlockNumber)
-	if block == nil {
-		return nil, fmt.Errorf("block #%d not found", args.BlockNumber)
-	}
-
-	self.ethereum.BlockChain().SetHead(block)
+	self.ethereum.BlockChain().SetHead(uint64(args.BlockNumber))
 
 
 	return nil, nil
 	return nil, nil
 }
 }

+ 1 - 3
rpc/api/eth.go

@@ -168,9 +168,7 @@ func (self *ethApi) IsMining(req *shared.Request) (interface{}, error) {
 }
 }
 
 
 func (self *ethApi) IsSyncing(req *shared.Request) (interface{}, error) {
 func (self *ethApi) IsSyncing(req *shared.Request) (interface{}, error) {
-	current := self.ethereum.BlockChain().CurrentBlock().NumberU64()
-	origin, height := self.ethereum.Downloader().Boundaries()
-
+	origin, current, height := self.ethereum.Downloader().Progress()
 	if current < height {
 	if current < height {
 		return map[string]interface{}{
 		return map[string]interface{}{
 			"startingBlock": newHexNum(big.NewInt(int64(origin)).Bytes()),
 			"startingBlock": newHexNum(big.NewInt(int64(origin)).Bytes()),

+ 1 - 1
rpc/api/eth_args.go

@@ -838,7 +838,7 @@ func NewLogRes(log *vm.Log) LogRes {
 	}
 	}
 	l.Address = newHexData(log.Address)
 	l.Address = newHexData(log.Address)
 	l.Data = newHexData(log.Data)
 	l.Data = newHexData(log.Data)
-	l.BlockNumber = newHexNum(log.Number)
+	l.BlockNumber = newHexNum(log.BlockNumber)
 	l.LogIndex = newHexNum(log.Index)
 	l.LogIndex = newHexNum(log.Index)
 	l.TransactionHash = newHexData(log.TxHash)
 	l.TransactionHash = newHexData(log.TxHash)
 	l.TransactionIndex = newHexNum(log.TxIndex)
 	l.TransactionIndex = newHexNum(log.TxIndex)

+ 2 - 2
rpc/api/parsing.go

@@ -453,8 +453,8 @@ func NewReceiptRes(rec *types.Receipt) *ReceiptRes {
 		v.ContractAddress = newHexData(rec.ContractAddress)
 		v.ContractAddress = newHexData(rec.ContractAddress)
 	}
 	}
 
 
-	logs := make([]interface{}, len(rec.Logs()))
-	for i, log := range rec.Logs() {
+	logs := make([]interface{}, len(rec.Logs))
+	for i, log := range rec.Logs {
 		logs[i] = NewLogRes(log)
 		logs[i] = NewLogRes(log)
 	}
 	}
 	v.Logs = &logs
 	v.Logs = &logs

+ 285 - 0
trie/sync.go

@@ -0,0 +1,285 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package trie
+
+import (
+	"fmt"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/ethdb"
+	"gopkg.in/karalabe/cookiejar.v2/collections/prque"
+)
+
+// request represents a scheduled or already in-flight state retrieval request.
+type request struct {
+	hash   common.Hash // Hash of the node data content to retrieve
+	data   []byte      // Data content of the node, cached until all subtrees complete
+	object *node       // Target node to populate with retrieved data (hashnode originally)
+
+	parents []*request // Parent state nodes referencing this entry (notify all upon completion)
+	depth   int        // Depth level within the trie the node is located to prioritise DFS
+	deps    int        // Number of dependencies before allowed to commit this node
+
+	callback TrieSyncLeafCallback // Callback to invoke if a leaf node it reached on this branch
+}
+
+// SyncResult is a simple list to return missing nodes along with their request
+// hashes.
+type SyncResult struct {
+	Hash common.Hash // Hash of the originally unknown trie node
+	Data []byte      // Data content of the retrieved node
+}
+
+// TrieSyncLeafCallback is a callback type invoked when a trie sync reaches a
+// leaf node. It's used by state syncing to check if the leaf node requires some
+// further data syncing.
+type TrieSyncLeafCallback func(leaf []byte, parent common.Hash) error
+
+// TrieSync is the main state trie synchronisation scheduler, which provides yet
+// unknown trie hashes to retrieve, accepts node data associated with said hashes
+// and reconstructs the trie step by step until all is done.
+type TrieSync struct {
+	database ethdb.Database           // State database for storing all the assembled node data
+	requests map[common.Hash]*request // Pending requests pertaining to a key hash
+	queue    *prque.Prque             // Priority queue with the pending requests
+}
+
+// NewTrieSync creates a new trie data download scheduler.
+func NewTrieSync(root common.Hash, database ethdb.Database, callback TrieSyncLeafCallback) *TrieSync {
+	ts := &TrieSync{
+		database: database,
+		requests: make(map[common.Hash]*request),
+		queue:    prque.New(),
+	}
+	ts.AddSubTrie(root, 0, common.Hash{}, callback)
+	return ts
+}
+
+// AddSubTrie registers a new trie to the sync code, rooted at the designated parent.
+func (s *TrieSync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callback TrieSyncLeafCallback) {
+	// Short circuit if the trie is empty or already known
+	if root == emptyRoot {
+		return
+	}
+	blob, _ := s.database.Get(root.Bytes())
+	if local, err := decodeNode(blob); local != nil && err == nil {
+		return
+	}
+	// Assemble the new sub-trie sync request
+	node := node(hashNode(root.Bytes()))
+	req := &request{
+		object:   &node,
+		hash:     root,
+		depth:    depth,
+		callback: callback,
+	}
+	// If this sub-trie has a designated parent, link them together
+	if parent != (common.Hash{}) {
+		ancestor := s.requests[parent]
+		if ancestor == nil {
+			panic(fmt.Sprintf("sub-trie ancestor not found: %x", parent))
+		}
+		ancestor.deps++
+		req.parents = append(req.parents, ancestor)
+	}
+	s.schedule(req)
+}
+
+// AddRawEntry schedules the direct retrieval of a state entry that should not be
+// interpreted as a trie node, but rather accepted and stored into the database
+// as is. This method's goal is to support misc state metadata retrievals (e.g.
+// contract code).
+func (s *TrieSync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) {
+	// Short circuit if the entry is empty or already known
+	if hash == emptyState {
+		return
+	}
+	if blob, _ := s.database.Get(hash.Bytes()); blob != nil {
+		return
+	}
+	// Assemble the new sub-trie sync request
+	req := &request{
+		hash:  hash,
+		depth: depth,
+	}
+	// If this sub-trie has a designated parent, link them together
+	if parent != (common.Hash{}) {
+		ancestor := s.requests[parent]
+		if ancestor == nil {
+			panic(fmt.Sprintf("raw-entry ancestor not found: %x", parent))
+		}
+		ancestor.deps++
+		req.parents = append(req.parents, ancestor)
+	}
+	s.schedule(req)
+}
+
+// Missing retrieves the known missing nodes from the trie for retrieval.
+func (s *TrieSync) Missing(max int) []common.Hash {
+	requests := []common.Hash{}
+	for !s.queue.Empty() && (max == 0 || len(requests) < max) {
+		requests = append(requests, s.queue.PopItem().(common.Hash))
+	}
+	return requests
+}
+
+// Process injects a batch of retrieved trie nodes data.
+func (s *TrieSync) Process(results []SyncResult) (int, error) {
+	for i, item := range results {
+		// If the item was not requested, bail out
+		request := s.requests[item.Hash]
+		if request == nil {
+			return i, fmt.Errorf("not requested: %x", item.Hash)
+		}
+		// If the item is a raw entry request, commit directly
+		if request.object == nil {
+			request.data = item.Data
+			s.commit(request, nil)
+			continue
+		}
+		// Decode the node data content and update the request
+		node, err := decodeNode(item.Data)
+		if err != nil {
+			return i, err
+		}
+		*request.object = node
+		request.data = item.Data
+
+		// Create and schedule a request for all the children nodes
+		requests, err := s.children(request)
+		if err != nil {
+			return i, err
+		}
+		if len(requests) == 0 && request.deps == 0 {
+			s.commit(request, nil)
+			continue
+		}
+		request.deps += len(requests)
+		for _, child := range requests {
+			s.schedule(child)
+		}
+	}
+	return 0, nil
+}
+
+// Pending returns the number of state entries currently pending for download.
+func (s *TrieSync) Pending() int {
+	return len(s.requests)
+}
+
+// schedule inserts a new state retrieval request into the fetch queue. If there
+// is already a pending request for this node, the new request will be discarded
+// and only a parent reference added to the old one.
+func (s *TrieSync) schedule(req *request) {
+	// If we're already requesting this node, add a new reference and stop
+	if old, ok := s.requests[req.hash]; ok {
+		old.parents = append(old.parents, req.parents...)
+		return
+	}
+	// Schedule the request for future retrieval
+	s.queue.Push(req.hash, float32(req.depth))
+	s.requests[req.hash] = req
+}
+
+// children retrieves all the missing children of a state trie entry for future
+// retrieval scheduling.
+func (s *TrieSync) children(req *request) ([]*request, error) {
+	// Gather all the children of the node, irrelevant whether known or not
+	type child struct {
+		node  *node
+		depth int
+	}
+	children := []child{}
+
+	switch node := (*req.object).(type) {
+	case shortNode:
+		children = []child{{
+			node:  &node.Val,
+			depth: req.depth + len(node.Key),
+		}}
+	case fullNode:
+		for i := 0; i < 17; i++ {
+			if node[i] != nil {
+				children = append(children, child{
+					node:  &node[i],
+					depth: req.depth + 1,
+				})
+			}
+		}
+	default:
+		panic(fmt.Sprintf("unknown node: %+v", node))
+	}
+	// Iterate over the children, and request all unknown ones
+	requests := make([]*request, 0, len(children))
+	for _, child := range children {
+		// Notify any external watcher of a new key/value node
+		if req.callback != nil {
+			if node, ok := (*child.node).(valueNode); ok {
+				if err := req.callback(node, req.hash); err != nil {
+					return nil, err
+				}
+			}
+		}
+		// If the child references another node, resolve or schedule
+		if node, ok := (*child.node).(hashNode); ok {
+			// Try to resolve the node from the local database
+			blob, _ := s.database.Get(node)
+			if local, err := decodeNode(blob); local != nil && err == nil {
+				*child.node = local
+				continue
+			}
+			// Locally unknown node, schedule for retrieval
+			requests = append(requests, &request{
+				object:   child.node,
+				hash:     common.BytesToHash(node),
+				parents:  []*request{req},
+				depth:    child.depth,
+				callback: req.callback,
+			})
+		}
+	}
+	return requests, nil
+}
+
+// commit finalizes a retrieval request and stores it into the database. If any
+// of the referencing parent requests complete due to this commit, they are also
+// committed themselves.
+func (s *TrieSync) commit(req *request, batch ethdb.Batch) (err error) {
+	// Create a new batch if none was specified
+	if batch == nil {
+		batch = s.database.NewBatch()
+		defer func() {
+			err = batch.Write()
+		}()
+	}
+	// Write the node content to disk
+	if err := batch.Put(req.hash[:], req.data); err != nil {
+		return err
+	}
+	delete(s.requests, req.hash)
+
+	// Check all parents for completion
+	for _, parent := range req.parents {
+		parent.deps--
+		if parent.deps == 0 {
+			if err := s.commit(parent, batch); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}

+ 257 - 0
trie/sync_test.go

@@ -0,0 +1,257 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package trie
+
+import (
+	"bytes"
+	"testing"
+
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/ethdb"
+)
+
+// makeTestTrie create a sample test trie to test node-wise reconstruction.
+func makeTestTrie() (ethdb.Database, *Trie, map[string][]byte) {
+	// Create an empty trie
+	db, _ := ethdb.NewMemDatabase()
+	trie, _ := New(common.Hash{}, db)
+
+	// Fill it with some arbitrary data
+	content := make(map[string][]byte)
+	for i := byte(0); i < 255; i++ {
+		key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i}
+		content[string(key)] = val
+		trie.Update(key, val)
+
+		key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i}
+		content[string(key)] = val
+		trie.Update(key, val)
+	}
+	trie.Commit()
+
+	// Return the generated trie
+	return db, trie, content
+}
+
+// checkTrieContents cross references a reconstructed trie with an expected data
+// content map.
+func checkTrieContents(t *testing.T, db Database, root []byte, content map[string][]byte) {
+	trie, err := New(common.BytesToHash(root), db)
+	if err != nil {
+		t.Fatalf("failed to create trie at %x: %v", root, err)
+	}
+	for key, val := range content {
+		if have := trie.Get([]byte(key)); bytes.Compare(have, val) != 0 {
+			t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val)
+		}
+	}
+}
+
+// Tests that an empty trie is not scheduled for syncing.
+func TestEmptyTrieSync(t *testing.T) {
+	emptyA, _ := New(common.Hash{}, nil)
+	emptyB, _ := New(emptyRoot, nil)
+
+	for i, trie := range []*Trie{emptyA, emptyB} {
+		db, _ := ethdb.NewMemDatabase()
+		if req := NewTrieSync(common.BytesToHash(trie.Root()), db, nil).Missing(1); len(req) != 0 {
+			t.Errorf("test %d: content requested for empty trie: %v", i, req)
+		}
+	}
+}
+
+// Tests that given a root hash, a trie can sync iteratively on a single thread,
+// requesting retrieval tasks and returning all of them in one go.
+func TestIterativeTrieSyncIndividual(t *testing.T) { testIterativeTrieSync(t, 1) }
+func TestIterativeTrieSyncBatched(t *testing.T)    { testIterativeTrieSync(t, 100) }
+
+func testIterativeTrieSync(t *testing.T, batch int) {
+	// Create a random trie to copy
+	srcDb, srcTrie, srcData := makeTestTrie()
+
+	// Create a destination trie and sync with the scheduler
+	dstDb, _ := ethdb.NewMemDatabase()
+	sched := NewTrieSync(common.BytesToHash(srcTrie.Root()), dstDb, nil)
+
+	queue := append([]common.Hash{}, sched.Missing(batch)...)
+	for len(queue) > 0 {
+		results := make([]SyncResult, len(queue))
+		for i, hash := range queue {
+			data, err := srcDb.Get(hash.Bytes())
+			if err != nil {
+				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
+			}
+			results[i] = SyncResult{hash, data}
+		}
+		if index, err := sched.Process(results); err != nil {
+			t.Fatalf("failed to process result #%d: %v", index, err)
+		}
+		queue = append(queue[:0], sched.Missing(batch)...)
+	}
+	// Cross check that the two tries re in sync
+	checkTrieContents(t, dstDb, srcTrie.Root(), srcData)
+}
+
+// Tests that the trie scheduler can correctly reconstruct the state even if only
+// partial results are returned, and the others sent only later.
+func TestIterativeDelayedTrieSync(t *testing.T) {
+	// Create a random trie to copy
+	srcDb, srcTrie, srcData := makeTestTrie()
+
+	// Create a destination trie and sync with the scheduler
+	dstDb, _ := ethdb.NewMemDatabase()
+	sched := NewTrieSync(common.BytesToHash(srcTrie.Root()), dstDb, nil)
+
+	queue := append([]common.Hash{}, sched.Missing(10000)...)
+	for len(queue) > 0 {
+		// Sync only half of the scheduled nodes
+		results := make([]SyncResult, len(queue)/2+1)
+		for i, hash := range queue[:len(results)] {
+			data, err := srcDb.Get(hash.Bytes())
+			if err != nil {
+				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
+			}
+			results[i] = SyncResult{hash, data}
+		}
+		if index, err := sched.Process(results); err != nil {
+			t.Fatalf("failed to process result #%d: %v", index, err)
+		}
+		queue = append(queue[len(results):], sched.Missing(10000)...)
+	}
+	// Cross check that the two tries re in sync
+	checkTrieContents(t, dstDb, srcTrie.Root(), srcData)
+}
+
+// Tests that given a root hash, a trie can sync iteratively on a single thread,
+// requesting retrieval tasks and returning all of them in one go, however in a
+// random order.
+func TestIterativeRandomTrieSyncIndividual(t *testing.T) { testIterativeRandomTrieSync(t, 1) }
+func TestIterativeRandomTrieSyncBatched(t *testing.T)    { testIterativeRandomTrieSync(t, 100) }
+
+func testIterativeRandomTrieSync(t *testing.T, batch int) {
+	// Create a random trie to copy
+	srcDb, srcTrie, srcData := makeTestTrie()
+
+	// Create a destination trie and sync with the scheduler
+	dstDb, _ := ethdb.NewMemDatabase()
+	sched := NewTrieSync(common.BytesToHash(srcTrie.Root()), dstDb, nil)
+
+	queue := make(map[common.Hash]struct{})
+	for _, hash := range sched.Missing(batch) {
+		queue[hash] = struct{}{}
+	}
+	for len(queue) > 0 {
+		// Fetch all the queued nodes in a random order
+		results := make([]SyncResult, 0, len(queue))
+		for hash, _ := range queue {
+			data, err := srcDb.Get(hash.Bytes())
+			if err != nil {
+				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
+			}
+			results = append(results, SyncResult{hash, data})
+		}
+		// Feed the retrieved results back and queue new tasks
+		if index, err := sched.Process(results); err != nil {
+			t.Fatalf("failed to process result #%d: %v", index, err)
+		}
+		queue = make(map[common.Hash]struct{})
+		for _, hash := range sched.Missing(batch) {
+			queue[hash] = struct{}{}
+		}
+	}
+	// Cross check that the two tries re in sync
+	checkTrieContents(t, dstDb, srcTrie.Root(), srcData)
+}
+
+// Tests that the trie scheduler can correctly reconstruct the state even if only
+// partial results are returned (Even those randomly), others sent only later.
+func TestIterativeRandomDelayedTrieSync(t *testing.T) {
+	// Create a random trie to copy
+	srcDb, srcTrie, srcData := makeTestTrie()
+
+	// Create a destination trie and sync with the scheduler
+	dstDb, _ := ethdb.NewMemDatabase()
+	sched := NewTrieSync(common.BytesToHash(srcTrie.Root()), dstDb, nil)
+
+	queue := make(map[common.Hash]struct{})
+	for _, hash := range sched.Missing(10000) {
+		queue[hash] = struct{}{}
+	}
+	for len(queue) > 0 {
+		// Sync only half of the scheduled nodes, even those in random order
+		results := make([]SyncResult, 0, len(queue)/2+1)
+		for hash, _ := range queue {
+			data, err := srcDb.Get(hash.Bytes())
+			if err != nil {
+				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
+			}
+			results = append(results, SyncResult{hash, data})
+
+			if len(results) >= cap(results) {
+				break
+			}
+		}
+		// Feed the retrieved results back and queue new tasks
+		if index, err := sched.Process(results); err != nil {
+			t.Fatalf("failed to process result #%d: %v", index, err)
+		}
+		for _, result := range results {
+			delete(queue, result.Hash)
+		}
+		for _, hash := range sched.Missing(10000) {
+			queue[hash] = struct{}{}
+		}
+	}
+	// Cross check that the two tries re in sync
+	checkTrieContents(t, dstDb, srcTrie.Root(), srcData)
+}
+
+// Tests that a trie sync will not request nodes multiple times, even if they
+// have such references.
+func TestDuplicateAvoidanceTrieSync(t *testing.T) {
+	// Create a random trie to copy
+	srcDb, srcTrie, srcData := makeTestTrie()
+
+	// Create a destination trie and sync with the scheduler
+	dstDb, _ := ethdb.NewMemDatabase()
+	sched := NewTrieSync(common.BytesToHash(srcTrie.Root()), dstDb, nil)
+
+	queue := append([]common.Hash{}, sched.Missing(0)...)
+	requested := make(map[common.Hash]struct{})
+
+	for len(queue) > 0 {
+		results := make([]SyncResult, len(queue))
+		for i, hash := range queue {
+			data, err := srcDb.Get(hash.Bytes())
+			if err != nil {
+				t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
+			}
+			if _, ok := requested[hash]; ok {
+				t.Errorf("hash %x already requested once", hash)
+			}
+			requested[hash] = struct{}{}
+
+			results[i] = SyncResult{hash, data}
+		}
+		if index, err := sched.Process(results); err != nil {
+			t.Fatalf("failed to process result #%d: %v", index, err)
+		}
+		queue = append(queue[:0], sched.Missing(0)...)
+	}
+	// Cross check that the two tries re in sync
+	checkTrieContents(t, dstDb, srcTrie.Root(), srcData)
+}

+ 5 - 0
trie/trie.go

@@ -24,6 +24,7 @@ import (
 	"hash"
 	"hash"
 
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/crypto"
 	"github.com/ethereum/go-ethereum/crypto/sha3"
 	"github.com/ethereum/go-ethereum/crypto/sha3"
 	"github.com/ethereum/go-ethereum/logger"
 	"github.com/ethereum/go-ethereum/logger"
 	"github.com/ethereum/go-ethereum/logger/glog"
 	"github.com/ethereum/go-ethereum/logger/glog"
@@ -35,8 +36,12 @@ const defaultCacheCapacity = 800
 var (
 var (
 	// The global cache stores decoded trie nodes by hash as they get loaded.
 	// The global cache stores decoded trie nodes by hash as they get loaded.
 	globalCache = newARC(defaultCacheCapacity)
 	globalCache = newARC(defaultCacheCapacity)
+
 	// This is the known root hash of an empty trie.
 	// This is the known root hash of an empty trie.
 	emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
 	emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+
+	// This is the known hash of an empty state trie entry.
+	emptyState = crypto.Sha3Hash(nil)
 )
 )
 
 
 var ErrMissingRoot = errors.New("missing root node")
 var ErrMissingRoot = errors.New("missing root node")

Kaikkia tiedostoja ei voida näyttää, sillä liian monta tiedostoa muuttui tässä diffissä