Browse Source

validator only write database state when enough distance (#116)

zjubfd 4 years ago
parent
commit
7a1262ca14
4 changed files with 45 additions and 14 deletions
  1. 1 0
      consensus/consensus.go
  2. 8 0
      consensus/parlia/parlia.go
  3. 14 0
      consensus/parlia/snapshot.go
  4. 22 14
      core/blockchain.go

+ 1 - 0
consensus/consensus.go

@@ -137,4 +137,5 @@ type PoSA interface {
 
 	IsSystemTransaction(tx *types.Transaction, header *types.Header) (bool, error)
 	IsSystemContract(to *common.Address) bool
+	EnoughDistance(chain ChainReader, header *types.Header) bool
 }

+ 8 - 0
consensus/parlia/parlia.go

@@ -862,6 +862,14 @@ func (p *Parlia) Seal(chain consensus.ChainReader, block *types.Block, results c
 	return nil
 }
 
+func (p *Parlia) EnoughDistance(chain consensus.ChainReader, header *types.Header) bool {
+	snap, err := p.snapshot(chain, header.Number.Uint64()-1, header.ParentHash, nil)
+	if err != nil {
+		return true
+	}
+	return snap.enoughDistance(p.val)
+}
+
 // CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
 // that a new block should have based on the previous blocks in the chain and the
 // current signer.

+ 14 - 0
consensus/parlia/snapshot.go

@@ -244,6 +244,20 @@ func (s *Snapshot) inturn(validator common.Address) bool {
 	return validators[offset] == validator
 }
 
+func (s *Snapshot) enoughDistance(validator common.Address) bool {
+	idx := s.indexOfVal(validator)
+	if idx < 0 {
+		return true
+	}
+	validatorNum := int64(len(s.validators()))
+	offset := (int64(s.Number) + 1) % int64(validatorNum)
+	if int64(idx) >= offset {
+		return int64(idx)-offset >= validatorNum/2
+	} else {
+		return validatorNum+int64(idx)-offset >= validatorNum/2
+	}
+}
+
 func (s *Snapshot) indexOfVal(validator common.Address) int {
 	validators := s.validators()
 	for idx, val := range validators {

+ 22 - 14
core/blockchain.go

@@ -1399,21 +1399,29 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
 
 			// If we exceeded out time allowance, flush an entire trie to disk
 			if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
-				// If the header is missing (canonical chain behind), we're reorging a low
-				// diff sidechain. Suspend committing until this operation is completed.
-				header := bc.GetHeaderByNumber(chosen)
-				if header == nil {
-					log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
-				} else {
-					// If we're exceeding limits but haven't reached a large enough memory gap,
-					// warn the user that the system is becoming unstable.
-					if chosen < lastWrite+TriesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
-						log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/TriesInMemory)
+				canWrite := true
+				if posa, ok := bc.engine.(consensus.PoSA); ok {
+					if !posa.EnoughDistance(bc, block.Header()) {
+						canWrite = false
+					}
+				}
+				if canWrite {
+					// If the header is missing (canonical chain behind), we're reorging a low
+					// diff sidechain. Suspend committing until this operation is completed.
+					header := bc.GetHeaderByNumber(chosen)
+					if header == nil {
+						log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
+					} else {
+						// If we're exceeding limits but haven't reached a large enough memory gap,
+						// warn the user that the system is becoming unstable.
+						if chosen < lastWrite+TriesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
+							log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/TriesInMemory)
+						}
+						// Flush an entire trie and restart the counters
+						triedb.Commit(header.Root, true)
+						lastWrite = chosen
+						bc.gcproc = 0
 					}
-					// Flush an entire trie and restart the counters
-					triedb.Commit(header.Root, true)
-					lastWrite = chosen
-					bc.gcproc = 0
 				}
 			}
 			// Garbage collect anything below our required write retention