|
|
@@ -17,7 +17,9 @@
|
|
|
package les
|
|
|
|
|
|
import (
|
|
|
+ "bytes"
|
|
|
"encoding/binary"
|
|
|
+ "fmt"
|
|
|
"io"
|
|
|
"math"
|
|
|
"sync"
|
|
|
@@ -83,15 +85,16 @@ type clientPool struct {
|
|
|
connectedMap map[enode.ID]*clientInfo
|
|
|
connectedQueue *prque.LazyQueue
|
|
|
|
|
|
- posFactors, negFactors priceFactors
|
|
|
+ defaultPosFactors, defaultNegFactors priceFactors
|
|
|
|
|
|
- connLimit int // The maximum number of connections that clientpool can support
|
|
|
- capLimit uint64 // The maximum cumulative capacity that clientpool can support
|
|
|
- connectedCap uint64 // The sum of the capacity of the current clientpool connected
|
|
|
- freeClientCap uint64 // The capacity value of each free client
|
|
|
- startTime mclock.AbsTime // The timestamp at which the clientpool started running
|
|
|
- cumulativeTime int64 // The cumulative running time of clientpool at the start point.
|
|
|
- disableBias bool // Disable connection bias(used in testing)
|
|
|
+ connLimit int // The maximum number of connections that clientpool can support
|
|
|
+ capLimit uint64 // The maximum cumulative capacity that clientpool can support
|
|
|
+ connectedCap uint64 // The sum of the capacity of the current clientpool connected
|
|
|
+ priorityConnected uint64 // The sum of the capacity of currently connected priority clients
|
|
|
+ freeClientCap uint64 // The capacity value of each free client
|
|
|
+ startTime mclock.AbsTime // The timestamp at which the clientpool started running
|
|
|
+ cumulativeTime int64 // The cumulative running time of clientpool at the start point.
|
|
|
+ disableBias bool // Disable connection bias(used in testing)
|
|
|
}
|
|
|
|
|
|
// clientPeer represents a client in the pool.
|
|
|
@@ -103,18 +106,22 @@ type clientPeer interface {
|
|
|
ID() enode.ID
|
|
|
freeClientId() string
|
|
|
updateCapacity(uint64)
|
|
|
+ freezeClient()
|
|
|
}
|
|
|
|
|
|
// clientInfo represents a connected client
|
|
|
type clientInfo struct {
|
|
|
- address string
|
|
|
- id enode.ID
|
|
|
- capacity uint64
|
|
|
- priority bool
|
|
|
- pool *clientPool
|
|
|
- peer clientPeer
|
|
|
- queueIndex int // position in connectedQueue
|
|
|
- balanceTracker balanceTracker
|
|
|
+ address string
|
|
|
+ id enode.ID
|
|
|
+ connectedAt mclock.AbsTime
|
|
|
+ capacity uint64
|
|
|
+ priority bool
|
|
|
+ pool *clientPool
|
|
|
+ peer clientPeer
|
|
|
+ queueIndex int // position in connectedQueue
|
|
|
+ balanceTracker balanceTracker
|
|
|
+ posFactors, negFactors priceFactors
|
|
|
+ balanceMetaInfo string
|
|
|
}
|
|
|
|
|
|
// connSetIndex callback updates clientInfo item index in connectedQueue
|
|
|
@@ -223,20 +230,26 @@ func (f *clientPool) connect(peer clientPeer, capacity uint64) bool {
|
|
|
)
|
|
|
pb := f.ndb.getOrNewPB(id)
|
|
|
posBalance = pb.value
|
|
|
- e := &clientInfo{pool: f, peer: peer, address: freeID, queueIndex: -1, id: id, priority: posBalance != 0}
|
|
|
|
|
|
nb := f.ndb.getOrNewNB(freeID)
|
|
|
if nb.logValue != 0 {
|
|
|
- negBalance = uint64(math.Exp(float64(nb.logValue-f.logOffset(now)) / fixedPointMultiplier))
|
|
|
- negBalance *= uint64(time.Second)
|
|
|
+ negBalance = uint64(math.Exp(float64(nb.logValue-f.logOffset(now))/fixedPointMultiplier) * float64(time.Second))
|
|
|
+ }
|
|
|
+ e := &clientInfo{
|
|
|
+ pool: f,
|
|
|
+ peer: peer,
|
|
|
+ address: freeID,
|
|
|
+ queueIndex: -1,
|
|
|
+ id: id,
|
|
|
+ connectedAt: now,
|
|
|
+ priority: posBalance != 0,
|
|
|
+ posFactors: f.defaultPosFactors,
|
|
|
+ negFactors: f.defaultNegFactors,
|
|
|
+ balanceMetaInfo: pb.meta,
|
|
|
}
|
|
|
// If the client is a free client, assign with a low free capacity,
|
|
|
// Otherwise assign with the given value(priority client)
|
|
|
- if !e.priority {
|
|
|
- capacity = f.freeClientCap
|
|
|
- }
|
|
|
- // Ensure the capacity will never lower than the free capacity.
|
|
|
- if capacity < f.freeClientCap {
|
|
|
+ if !e.priority || capacity == 0 {
|
|
|
capacity = f.freeClientCap
|
|
|
}
|
|
|
e.capacity = capacity
|
|
|
@@ -244,7 +257,7 @@ func (f *clientPool) connect(peer clientPeer, capacity uint64) bool {
|
|
|
// Starts a balance tracker
|
|
|
e.balanceTracker.init(f.clock, capacity)
|
|
|
e.balanceTracker.setBalance(posBalance, negBalance)
|
|
|
- f.setClientPriceFactors(e)
|
|
|
+ e.updatePriceFactors()
|
|
|
|
|
|
// If the number of clients already connected in the clientpool exceeds its
|
|
|
// capacity, evict some clients with lowest priority.
|
|
|
@@ -283,6 +296,7 @@ func (f *clientPool) connect(peer clientPeer, capacity uint64) bool {
|
|
|
f.dropClient(c, now, true)
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
// Register new client to connection queue.
|
|
|
f.connectedMap[id] = e
|
|
|
f.connectedQueue.Push(e)
|
|
|
@@ -291,6 +305,7 @@ func (f *clientPool) connect(peer clientPeer, capacity uint64) bool {
|
|
|
// If the current client is a paid client, monitor the status of client,
|
|
|
// downgrade it to normal client if positive balance is used up.
|
|
|
if e.priority {
|
|
|
+ f.priorityConnected += capacity
|
|
|
e.balanceTracker.addCallback(balanceCallbackZero, 0, func() { f.balanceExhausted(id) })
|
|
|
}
|
|
|
// If the capacity of client is not the default value(free capacity), notify
|
|
|
@@ -324,6 +339,38 @@ func (f *clientPool) disconnect(p clientPeer) {
|
|
|
f.dropClient(e, f.clock.Now(), false)
|
|
|
}
|
|
|
|
|
|
+// forClients iterates through a list of clients, calling the callback for each one.
|
|
|
+// If a client is not connected then clientInfo is nil. If the specified list is empty
|
|
|
+// then the callback is called for all connected clients.
|
|
|
+func (f *clientPool) forClients(ids []enode.ID, callback func(*clientInfo, enode.ID) error) error {
|
|
|
+ f.lock.Lock()
|
|
|
+ defer f.lock.Unlock()
|
|
|
+
|
|
|
+ if len(ids) > 0 {
|
|
|
+ for _, id := range ids {
|
|
|
+ if err := callback(f.connectedMap[id], id); err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ for _, c := range f.connectedMap {
|
|
|
+ if err := callback(c, c.id); err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+// setDefaultFactors sets the default price factors applied to subsequently connected clients
|
|
|
+func (f *clientPool) setDefaultFactors(posFactors, negFactors priceFactors) {
|
|
|
+ f.lock.Lock()
|
|
|
+ defer f.lock.Unlock()
|
|
|
+
|
|
|
+ f.defaultPosFactors = posFactors
|
|
|
+ f.defaultNegFactors = negFactors
|
|
|
+}
|
|
|
+
|
|
|
// dropClient removes a client from the connected queue and finalizes its balance.
|
|
|
// If kick is true then it also initiates the disconnection.
|
|
|
func (f *clientPool) dropClient(e *clientInfo, now mclock.AbsTime, kick bool) {
|
|
|
@@ -334,6 +381,9 @@ func (f *clientPool) dropClient(e *clientInfo, now mclock.AbsTime, kick bool) {
|
|
|
f.connectedQueue.Remove(e.queueIndex)
|
|
|
delete(f.connectedMap, e.id)
|
|
|
f.connectedCap -= e.capacity
|
|
|
+ if e.priority {
|
|
|
+ f.priorityConnected -= e.capacity
|
|
|
+ }
|
|
|
totalConnectedGauge.Update(int64(f.connectedCap))
|
|
|
if kick {
|
|
|
clientKickedMeter.Mark(1)
|
|
|
@@ -345,6 +395,15 @@ func (f *clientPool) dropClient(e *clientInfo, now mclock.AbsTime, kick bool) {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+// capacityInfo returns the total capacity allowance, the total capacity of connected
|
|
|
+// clients and the total capacity of connected and prioritized clients
|
|
|
+func (f *clientPool) capacityInfo() (uint64, uint64, uint64) {
|
|
|
+ f.lock.Lock()
|
|
|
+ defer f.lock.Unlock()
|
|
|
+
|
|
|
+ return f.capLimit, f.connectedCap, f.priorityConnected
|
|
|
+}
|
|
|
+
|
|
|
// finalizeBalance stops the balance tracker, retrieves the final balances and
|
|
|
// stores them in posBalanceQueue and negBalanceQueue
|
|
|
func (f *clientPool) finalizeBalance(c *clientInfo, now mclock.AbsTime) {
|
|
|
@@ -374,14 +433,20 @@ func (f *clientPool) balanceExhausted(id enode.ID) {
|
|
|
if c == nil || !c.priority {
|
|
|
return
|
|
|
}
|
|
|
+ if c.priority {
|
|
|
+ f.priorityConnected -= c.capacity
|
|
|
+ }
|
|
|
c.priority = false
|
|
|
if c.capacity != f.freeClientCap {
|
|
|
f.connectedCap += f.freeClientCap - c.capacity
|
|
|
totalConnectedGauge.Update(int64(f.connectedCap))
|
|
|
c.capacity = f.freeClientCap
|
|
|
+ c.balanceTracker.setCapacity(c.capacity)
|
|
|
c.peer.updateCapacity(c.capacity)
|
|
|
}
|
|
|
- f.ndb.delPB(id)
|
|
|
+ pb := f.ndb.getOrNewPB(id)
|
|
|
+ pb.value = 0
|
|
|
+ f.ndb.setPB(id, pb)
|
|
|
}
|
|
|
|
|
|
// setConnLimit sets the maximum number and total capacity of connected clients,
|
|
|
@@ -400,6 +465,56 @@ func (f *clientPool) setLimits(totalConn int, totalCap uint64) {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+// setCapacity sets the assigned capacity of a connected client
|
|
|
+func (f *clientPool) setCapacity(c *clientInfo, capacity uint64) error {
|
|
|
+ if f.connectedMap[c.id] != c {
|
|
|
+ return fmt.Errorf("client %064x is not connected", c.id[:])
|
|
|
+ }
|
|
|
+ if c.capacity == capacity {
|
|
|
+ return nil
|
|
|
+ }
|
|
|
+ if !c.priority {
|
|
|
+ return errNoPriority
|
|
|
+ }
|
|
|
+ oldCapacity := c.capacity
|
|
|
+ c.capacity = capacity
|
|
|
+ f.connectedCap += capacity - oldCapacity
|
|
|
+ c.balanceTracker.setCapacity(capacity)
|
|
|
+ f.connectedQueue.Update(c.queueIndex)
|
|
|
+ if f.connectedCap > f.capLimit {
|
|
|
+ var kickList []*clientInfo
|
|
|
+ kick := true
|
|
|
+ f.connectedQueue.MultiPop(func(data interface{}, priority int64) bool {
|
|
|
+ client := data.(*clientInfo)
|
|
|
+ kickList = append(kickList, client)
|
|
|
+ f.connectedCap -= client.capacity
|
|
|
+ if client == c {
|
|
|
+ kick = false
|
|
|
+ }
|
|
|
+ return kick && (f.connectedCap > f.capLimit)
|
|
|
+ })
|
|
|
+ if kick {
|
|
|
+ now := mclock.Now()
|
|
|
+ for _, c := range kickList {
|
|
|
+ f.dropClient(c, now, true)
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ c.capacity = oldCapacity
|
|
|
+ c.balanceTracker.setCapacity(oldCapacity)
|
|
|
+ for _, c := range kickList {
|
|
|
+ f.connectedCap += c.capacity
|
|
|
+ f.connectedQueue.Push(c)
|
|
|
+ }
|
|
|
+ return errNoPriority
|
|
|
+ }
|
|
|
+ }
|
|
|
+ totalConnectedGauge.Update(int64(f.connectedCap))
|
|
|
+ f.priorityConnected += capacity - oldCapacity
|
|
|
+ c.updatePriceFactors()
|
|
|
+ c.peer.updateCapacity(c.capacity)
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
// requestCost feeds request cost after serving a request from the given peer.
|
|
|
func (f *clientPool) requestCost(p *peer, cost uint64) {
|
|
|
f.lock.Lock()
|
|
|
@@ -424,83 +539,86 @@ func (f *clientPool) logOffset(now mclock.AbsTime) int64 {
|
|
|
return f.cumulativeTime + cumulativeTime
|
|
|
}
|
|
|
|
|
|
-// setPriceFactors changes pricing factors for both positive and negative balances.
|
|
|
-// Applies to connected clients and also future connections.
|
|
|
-func (f *clientPool) setPriceFactors(posFactors, negFactors priceFactors) {
|
|
|
+// setClientPriceFactors sets the pricing factors for an individual connected client
|
|
|
+func (c *clientInfo) updatePriceFactors() {
|
|
|
+ c.balanceTracker.setFactors(true, c.negFactors.timeFactor+float64(c.capacity)*c.negFactors.capacityFactor/1000000, c.negFactors.requestFactor)
|
|
|
+ c.balanceTracker.setFactors(false, c.posFactors.timeFactor+float64(c.capacity)*c.posFactors.capacityFactor/1000000, c.posFactors.requestFactor)
|
|
|
+}
|
|
|
+
|
|
|
+// getPosBalance retrieves a single positive balance entry from cache or the database
|
|
|
+func (f *clientPool) getPosBalance(id enode.ID) posBalance {
|
|
|
f.lock.Lock()
|
|
|
defer f.lock.Unlock()
|
|
|
|
|
|
- f.posFactors, f.negFactors = posFactors, negFactors
|
|
|
- for _, c := range f.connectedMap {
|
|
|
- f.setClientPriceFactors(c)
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-// setClientPriceFactors sets the pricing factors for an individual connected client
|
|
|
-func (f *clientPool) setClientPriceFactors(c *clientInfo) {
|
|
|
- c.balanceTracker.setFactors(true, f.negFactors.timeFactor+float64(c.capacity)*f.negFactors.capacityFactor/1000000, f.negFactors.requestFactor)
|
|
|
- c.balanceTracker.setFactors(false, f.posFactors.timeFactor+float64(c.capacity)*f.posFactors.capacityFactor/1000000, f.posFactors.requestFactor)
|
|
|
+ return f.ndb.getOrNewPB(id)
|
|
|
}
|
|
|
|
|
|
-// addBalance updates the positive balance of a client.
|
|
|
-// If setTotal is false then the given amount is added to the balance.
|
|
|
-// If setTotal is true then amount represents the total amount ever added to the
|
|
|
-// given ID and positive balance is increased by (amount-lastTotal) while lastTotal
|
|
|
-// is updated to amount. This method also allows removing positive balance.
|
|
|
-func (f *clientPool) addBalance(id enode.ID, amount uint64, setTotal bool) {
|
|
|
+// updateBalance updates the balance of a client (either overwrites it or adds to it).
|
|
|
+// It also updates the balance meta info string.
|
|
|
+func (f *clientPool) updateBalance(id enode.ID, amount int64, meta string) (uint64, uint64, error) {
|
|
|
f.lock.Lock()
|
|
|
defer f.lock.Unlock()
|
|
|
|
|
|
pb := f.ndb.getOrNewPB(id)
|
|
|
+ var negBalance uint64
|
|
|
c := f.connectedMap[id]
|
|
|
if c != nil {
|
|
|
- posBalance, negBalance := c.balanceTracker.getBalance(f.clock.Now())
|
|
|
- pb.value = posBalance
|
|
|
- defer func() {
|
|
|
- c.balanceTracker.setBalance(pb.value, negBalance)
|
|
|
- if !c.priority && pb.value > 0 {
|
|
|
- // The capacity should be adjusted based on the requirement,
|
|
|
- // but we have no idea about the new capacity, need a second
|
|
|
- // call to udpate it.
|
|
|
- c.priority = true
|
|
|
- c.balanceTracker.addCallback(balanceCallbackZero, 0, func() { f.balanceExhausted(id) })
|
|
|
- }
|
|
|
- }()
|
|
|
+ pb.value, negBalance = c.balanceTracker.getBalance(f.clock.Now())
|
|
|
}
|
|
|
- if setTotal {
|
|
|
- if pb.value+amount > pb.lastTotal {
|
|
|
- pb.value += amount - pb.lastTotal
|
|
|
- } else {
|
|
|
- pb.value = 0
|
|
|
+ oldBalance := pb.value
|
|
|
+ if amount > 0 {
|
|
|
+ if amount > maxBalance || pb.value > maxBalance-uint64(amount) {
|
|
|
+ return oldBalance, oldBalance, errBalanceOverflow
|
|
|
}
|
|
|
- pb.lastTotal = amount
|
|
|
+ pb.value += uint64(amount)
|
|
|
} else {
|
|
|
- pb.value += amount
|
|
|
- pb.lastTotal += amount
|
|
|
+ if uint64(-amount) > pb.value {
|
|
|
+ pb.value = 0
|
|
|
+ } else {
|
|
|
+ pb.value -= uint64(-amount)
|
|
|
+ }
|
|
|
}
|
|
|
+ pb.meta = meta
|
|
|
f.ndb.setPB(id, pb)
|
|
|
+ if c != nil {
|
|
|
+ c.balanceTracker.setBalance(pb.value, negBalance)
|
|
|
+ if !c.priority && pb.value > 0 {
|
|
|
+ // The capacity should be adjusted based on the requirement,
|
|
|
+ // but we have no idea about the new capacity, need a second
|
|
|
+ // call to udpate it.
|
|
|
+ c.priority = true
|
|
|
+ f.priorityConnected += c.capacity
|
|
|
+ c.balanceTracker.addCallback(balanceCallbackZero, 0, func() { f.balanceExhausted(id) })
|
|
|
+ }
|
|
|
+ // if balance is set to zero then reverting to non-priority status
|
|
|
+ // is handled by the balanceExhausted callback
|
|
|
+ c.balanceMetaInfo = meta
|
|
|
+ }
|
|
|
+ return oldBalance, pb.value, nil
|
|
|
}
|
|
|
|
|
|
// posBalance represents a recently accessed positive balance entry
|
|
|
type posBalance struct {
|
|
|
- value, lastTotal uint64
|
|
|
+ value uint64
|
|
|
+ meta string
|
|
|
}
|
|
|
|
|
|
// EncodeRLP implements rlp.Encoder
|
|
|
func (e *posBalance) EncodeRLP(w io.Writer) error {
|
|
|
- return rlp.Encode(w, []interface{}{e.value, e.lastTotal})
|
|
|
+ return rlp.Encode(w, []interface{}{e.value, e.meta})
|
|
|
}
|
|
|
|
|
|
// DecodeRLP implements rlp.Decoder
|
|
|
func (e *posBalance) DecodeRLP(s *rlp.Stream) error {
|
|
|
var entry struct {
|
|
|
- Value, LastTotal uint64
|
|
|
+ Value uint64
|
|
|
+ Meta string
|
|
|
}
|
|
|
if err := s.Decode(&entry); err != nil {
|
|
|
return err
|
|
|
}
|
|
|
e.value = entry.Value
|
|
|
- e.lastTotal = entry.LastTotal
|
|
|
+ e.meta = entry.Meta
|
|
|
return nil
|
|
|
}
|
|
|
|
|
|
@@ -526,7 +644,10 @@ func (e *negBalance) DecodeRLP(s *rlp.Stream) error {
|
|
|
|
|
|
const (
|
|
|
// nodeDBVersion is the version identifier of the node data in db
|
|
|
- nodeDBVersion = 0
|
|
|
+ //
|
|
|
+ // Changelog:
|
|
|
+ // * Replace `lastTotal` with `meta` in positive balance: version 0=>1
|
|
|
+ nodeDBVersion = 1
|
|
|
|
|
|
// dbCleanupCycle is the cycle of db for useless data cleanup
|
|
|
dbCleanupCycle = time.Hour
|
|
|
@@ -614,6 +735,10 @@ func (db *nodeDB) getOrNewPB(id enode.ID) posBalance {
|
|
|
}
|
|
|
|
|
|
func (db *nodeDB) setPB(id enode.ID, b posBalance) {
|
|
|
+ if b.value == 0 && len(b.meta) == 0 {
|
|
|
+ db.delPB(id)
|
|
|
+ return
|
|
|
+ }
|
|
|
key := db.key(id.Bytes(), false)
|
|
|
enc, err := rlp.EncodeToBytes(&(b))
|
|
|
if err != nil {
|
|
|
@@ -630,6 +755,37 @@ func (db *nodeDB) delPB(id enode.ID) {
|
|
|
db.pcache.Remove(string(key))
|
|
|
}
|
|
|
|
|
|
+// getPosBalanceIDs returns a lexicographically ordered list of IDs of accounts
|
|
|
+// with a positive balance
|
|
|
+func (db *nodeDB) getPosBalanceIDs(start, stop enode.ID, maxCount int) (result []enode.ID) {
|
|
|
+ if maxCount <= 0 {
|
|
|
+ return
|
|
|
+ }
|
|
|
+ it := db.db.NewIteratorWithStart(db.key(start.Bytes(), false))
|
|
|
+ defer it.Release()
|
|
|
+ for i := len(stop[:]) - 1; i >= 0; i-- {
|
|
|
+ stop[i]--
|
|
|
+ if stop[i] != 255 {
|
|
|
+ break
|
|
|
+ }
|
|
|
+ }
|
|
|
+ stopKey := db.key(stop.Bytes(), false)
|
|
|
+ keyLen := len(stopKey)
|
|
|
+
|
|
|
+ for it.Next() {
|
|
|
+ var id enode.ID
|
|
|
+ if len(it.Key()) != keyLen || bytes.Compare(it.Key(), stopKey) == 1 {
|
|
|
+ return
|
|
|
+ }
|
|
|
+ copy(id[:], it.Key()[keyLen-len(id):])
|
|
|
+ result = append(result, id)
|
|
|
+ if len(result) == maxCount {
|
|
|
+ return
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return
|
|
|
+}
|
|
|
+
|
|
|
func (db *nodeDB) getOrNewNB(id string) negBalance {
|
|
|
key := db.key([]byte(id), true)
|
|
|
item, exist := db.ncache.Get(string(key))
|