clientpool_test.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598
  1. // Copyright 2019 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package les
  17. import (
  18. "fmt"
  19. "math/rand"
  20. "testing"
  21. "time"
  22. "github.com/ethereum/go-ethereum/common/mclock"
  23. "github.com/ethereum/go-ethereum/core/rawdb"
  24. lps "github.com/ethereum/go-ethereum/les/lespay/server"
  25. "github.com/ethereum/go-ethereum/p2p/enode"
  26. "github.com/ethereum/go-ethereum/p2p/enr"
  27. "github.com/ethereum/go-ethereum/p2p/nodestate"
  28. )
  29. func TestClientPoolL10C100Free(t *testing.T) {
  30. testClientPool(t, 10, 100, 0, true)
  31. }
  32. func TestClientPoolL40C200Free(t *testing.T) {
  33. testClientPool(t, 40, 200, 0, true)
  34. }
  35. func TestClientPoolL100C300Free(t *testing.T) {
  36. testClientPool(t, 100, 300, 0, true)
  37. }
  38. func TestClientPoolL10C100P4(t *testing.T) {
  39. testClientPool(t, 10, 100, 4, false)
  40. }
  41. func TestClientPoolL40C200P30(t *testing.T) {
  42. testClientPool(t, 40, 200, 30, false)
  43. }
  44. func TestClientPoolL100C300P20(t *testing.T) {
  45. testClientPool(t, 100, 300, 20, false)
  46. }
  47. const testClientPoolTicks = 100000
  48. type poolTestPeer struct {
  49. node *enode.Node
  50. index int
  51. disconnCh chan int
  52. cap uint64
  53. inactiveAllowed bool
  54. }
  55. func testStateMachine() *nodestate.NodeStateMachine {
  56. return nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup)
  57. }
  58. func newPoolTestPeer(i int, disconnCh chan int) *poolTestPeer {
  59. return &poolTestPeer{
  60. index: i,
  61. disconnCh: disconnCh,
  62. node: enode.SignNull(&enr.Record{}, enode.ID{byte(i % 256), byte(i >> 8)}),
  63. }
  64. }
  65. func (i *poolTestPeer) Node() *enode.Node {
  66. return i.node
  67. }
  68. func (i *poolTestPeer) freeClientId() string {
  69. return fmt.Sprintf("addr #%d", i.index)
  70. }
  71. func (i *poolTestPeer) updateCapacity(cap uint64) {
  72. i.cap = cap
  73. }
  74. func (i *poolTestPeer) freeze() {}
  75. func (i *poolTestPeer) allowInactive() bool {
  76. return i.inactiveAllowed
  77. }
  78. func getBalance(pool *clientPool, p *poolTestPeer) (pos, neg uint64) {
  79. temp := pool.ns.GetField(p.node, clientInfoField) == nil
  80. if temp {
  81. pool.ns.SetField(p.node, connAddressField, p.freeClientId())
  82. }
  83. n, _ := pool.ns.GetField(p.node, pool.BalanceField).(*lps.NodeBalance)
  84. pos, neg = n.GetBalance()
  85. if temp {
  86. pool.ns.SetField(p.node, connAddressField, nil)
  87. }
  88. return
  89. }
  90. func addBalance(pool *clientPool, id enode.ID, amount int64) {
  91. pool.forClients([]enode.ID{id}, func(c *clientInfo) {
  92. c.balance.AddBalance(amount)
  93. })
  94. }
  95. func checkDiff(a, b uint64) bool {
  96. maxDiff := (a + b) / 2000
  97. if maxDiff < 1 {
  98. maxDiff = 1
  99. }
  100. return a > b+maxDiff || b > a+maxDiff
  101. }
  102. func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, randomDisconnect bool) {
  103. rand.Seed(time.Now().UnixNano())
  104. var (
  105. clock mclock.Simulated
  106. db = rawdb.NewMemoryDatabase()
  107. connected = make([]bool, clientCount)
  108. connTicks = make([]int, clientCount)
  109. disconnCh = make(chan int, clientCount)
  110. disconnFn = func(id enode.ID) {
  111. disconnCh <- int(id[0]) + int(id[1])<<8
  112. }
  113. pool = newClientPool(testStateMachine(), db, 1, 0, &clock, disconnFn)
  114. )
  115. pool.ns.Start()
  116. pool.setLimits(activeLimit, uint64(activeLimit))
  117. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  118. // pool should accept new peers up to its connected limit
  119. for i := 0; i < activeLimit; i++ {
  120. if cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 {
  121. connected[i] = true
  122. } else {
  123. t.Fatalf("Test peer #%d rejected", i)
  124. }
  125. }
  126. // randomly connect and disconnect peers, expect to have a similar total connection time at the end
  127. for tickCounter := 0; tickCounter < testClientPoolTicks; tickCounter++ {
  128. clock.Run(1 * time.Second)
  129. if tickCounter == testClientPoolTicks/4 {
  130. // give a positive balance to some of the peers
  131. amount := testClientPoolTicks / 2 * int64(time.Second) // enough for half of the simulation period
  132. for i := 0; i < paidCount; i++ {
  133. addBalance(pool, newPoolTestPeer(i, disconnCh).node.ID(), amount)
  134. }
  135. }
  136. i := rand.Intn(clientCount)
  137. if connected[i] {
  138. if randomDisconnect {
  139. pool.disconnect(newPoolTestPeer(i, disconnCh))
  140. connected[i] = false
  141. connTicks[i] += tickCounter
  142. }
  143. } else {
  144. if cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 {
  145. connected[i] = true
  146. connTicks[i] -= tickCounter
  147. } else {
  148. pool.disconnect(newPoolTestPeer(i, disconnCh))
  149. }
  150. }
  151. pollDisconnects:
  152. for {
  153. select {
  154. case i := <-disconnCh:
  155. pool.disconnect(newPoolTestPeer(i, disconnCh))
  156. if connected[i] {
  157. connTicks[i] += tickCounter
  158. connected[i] = false
  159. }
  160. default:
  161. break pollDisconnects
  162. }
  163. }
  164. }
  165. expTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2*(activeLimit-paidCount)/(clientCount-paidCount)
  166. expMin := expTicks - expTicks/5
  167. expMax := expTicks + expTicks/5
  168. paidTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2
  169. paidMin := paidTicks - paidTicks/5
  170. paidMax := paidTicks + paidTicks/5
  171. // check if the total connected time of peers are all in the expected range
  172. for i, c := range connected {
  173. if c {
  174. connTicks[i] += testClientPoolTicks
  175. }
  176. min, max := expMin, expMax
  177. if i < paidCount {
  178. // expect a higher amount for clients with a positive balance
  179. min, max = paidMin, paidMax
  180. }
  181. if connTicks[i] < min || connTicks[i] > max {
  182. t.Errorf("Total connected time of test node #%d (%d) outside expected range (%d to %d)", i, connTicks[i], min, max)
  183. }
  184. }
  185. pool.stop()
  186. }
  187. func testPriorityConnect(t *testing.T, pool *clientPool, p *poolTestPeer, cap uint64, expSuccess bool) {
  188. if cap, _ := pool.connect(p); cap == 0 {
  189. if expSuccess {
  190. t.Fatalf("Failed to connect paid client")
  191. } else {
  192. return
  193. }
  194. }
  195. if _, err := pool.setCapacity(p.node, "", cap, defaultConnectedBias, true); err != nil {
  196. if expSuccess {
  197. t.Fatalf("Failed to raise capacity of paid client")
  198. } else {
  199. return
  200. }
  201. }
  202. if !expSuccess {
  203. t.Fatalf("Should reject high capacity paid client")
  204. }
  205. }
  206. func TestConnectPaidClient(t *testing.T) {
  207. var (
  208. clock mclock.Simulated
  209. db = rawdb.NewMemoryDatabase()
  210. )
  211. pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
  212. pool.ns.Start()
  213. defer pool.stop()
  214. pool.setLimits(10, uint64(10))
  215. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  216. // Add balance for an external client and mark it as paid client
  217. addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
  218. testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 10, true)
  219. }
  220. func TestConnectPaidClientToSmallPool(t *testing.T) {
  221. var (
  222. clock mclock.Simulated
  223. db = rawdb.NewMemoryDatabase()
  224. )
  225. pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
  226. pool.ns.Start()
  227. defer pool.stop()
  228. pool.setLimits(10, uint64(10)) // Total capacity limit is 10
  229. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  230. // Add balance for an external client and mark it as paid client
  231. addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
  232. // Connect a fat paid client to pool, should reject it.
  233. testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 100, false)
  234. }
  235. func TestConnectPaidClientToFullPool(t *testing.T) {
  236. var (
  237. clock mclock.Simulated
  238. db = rawdb.NewMemoryDatabase()
  239. )
  240. removeFn := func(enode.ID) {} // Noop
  241. pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn)
  242. pool.ns.Start()
  243. defer pool.stop()
  244. pool.setLimits(10, uint64(10)) // Total capacity limit is 10
  245. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  246. for i := 0; i < 10; i++ {
  247. addBalance(pool, newPoolTestPeer(i, nil).node.ID(), int64(time.Second*20))
  248. pool.connect(newPoolTestPeer(i, nil))
  249. }
  250. addBalance(pool, newPoolTestPeer(11, nil).node.ID(), int64(time.Second*2)) // Add low balance to new paid client
  251. if cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 {
  252. t.Fatalf("Low balance paid client should be rejected")
  253. }
  254. clock.Run(time.Second)
  255. addBalance(pool, newPoolTestPeer(12, nil).node.ID(), int64(time.Minute*5)) // Add high balance to new paid client
  256. if cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap == 0 {
  257. t.Fatalf("High balance paid client should be accepted")
  258. }
  259. }
  260. func TestPaidClientKickedOut(t *testing.T) {
  261. var (
  262. clock mclock.Simulated
  263. db = rawdb.NewMemoryDatabase()
  264. kickedCh = make(chan int, 100)
  265. )
  266. removeFn := func(id enode.ID) {
  267. kickedCh <- int(id[0])
  268. }
  269. pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn)
  270. pool.ns.Start()
  271. pool.bt.SetExpirationTCs(0, 0)
  272. defer pool.stop()
  273. pool.setLimits(10, uint64(10)) // Total capacity limit is 10
  274. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  275. for i := 0; i < 10; i++ {
  276. addBalance(pool, newPoolTestPeer(i, kickedCh).node.ID(), 10000000000) // 10 second allowance
  277. pool.connect(newPoolTestPeer(i, kickedCh))
  278. clock.Run(time.Millisecond)
  279. }
  280. clock.Run(defaultConnectedBias + time.Second*11)
  281. if cap, _ := pool.connect(newPoolTestPeer(11, kickedCh)); cap == 0 {
  282. t.Fatalf("Free client should be accepted")
  283. }
  284. select {
  285. case id := <-kickedCh:
  286. if id != 0 {
  287. t.Fatalf("Kicked client mismatch, want %v, got %v", 0, id)
  288. }
  289. case <-time.NewTimer(time.Second).C:
  290. t.Fatalf("timeout")
  291. }
  292. }
  293. func TestConnectFreeClient(t *testing.T) {
  294. var (
  295. clock mclock.Simulated
  296. db = rawdb.NewMemoryDatabase()
  297. )
  298. pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
  299. pool.ns.Start()
  300. defer pool.stop()
  301. pool.setLimits(10, uint64(10))
  302. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  303. if cap, _ := pool.connect(newPoolTestPeer(0, nil)); cap == 0 {
  304. t.Fatalf("Failed to connect free client")
  305. }
  306. testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 2, false)
  307. }
  308. func TestConnectFreeClientToFullPool(t *testing.T) {
  309. var (
  310. clock mclock.Simulated
  311. db = rawdb.NewMemoryDatabase()
  312. )
  313. removeFn := func(enode.ID) {} // Noop
  314. pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn)
  315. pool.ns.Start()
  316. defer pool.stop()
  317. pool.setLimits(10, uint64(10)) // Total capacity limit is 10
  318. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  319. for i := 0; i < 10; i++ {
  320. pool.connect(newPoolTestPeer(i, nil))
  321. }
  322. if cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 {
  323. t.Fatalf("New free client should be rejected")
  324. }
  325. clock.Run(time.Minute)
  326. if cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap != 0 {
  327. t.Fatalf("New free client should be rejected")
  328. }
  329. clock.Run(time.Millisecond)
  330. clock.Run(4 * time.Minute)
  331. if cap, _ := pool.connect(newPoolTestPeer(13, nil)); cap == 0 {
  332. t.Fatalf("Old client connects more than 5min should be kicked")
  333. }
  334. }
  335. func TestFreeClientKickedOut(t *testing.T) {
  336. var (
  337. clock mclock.Simulated
  338. db = rawdb.NewMemoryDatabase()
  339. kicked = make(chan int, 100)
  340. )
  341. removeFn := func(id enode.ID) { kicked <- int(id[0]) }
  342. pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn)
  343. pool.ns.Start()
  344. defer pool.stop()
  345. pool.setLimits(10, uint64(10)) // Total capacity limit is 10
  346. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  347. for i := 0; i < 10; i++ {
  348. pool.connect(newPoolTestPeer(i, kicked))
  349. clock.Run(time.Millisecond)
  350. }
  351. if cap, _ := pool.connect(newPoolTestPeer(10, kicked)); cap != 0 {
  352. t.Fatalf("New free client should be rejected")
  353. }
  354. select {
  355. case <-kicked:
  356. case <-time.NewTimer(time.Second).C:
  357. t.Fatalf("timeout")
  358. }
  359. pool.disconnect(newPoolTestPeer(10, kicked))
  360. clock.Run(5 * time.Minute)
  361. for i := 0; i < 10; i++ {
  362. pool.connect(newPoolTestPeer(i+10, kicked))
  363. }
  364. for i := 0; i < 10; i++ {
  365. select {
  366. case id := <-kicked:
  367. if id >= 10 {
  368. t.Fatalf("Old client should be kicked, now got: %d", id)
  369. }
  370. case <-time.NewTimer(time.Second).C:
  371. t.Fatalf("timeout")
  372. }
  373. }
  374. }
  375. func TestPositiveBalanceCalculation(t *testing.T) {
  376. var (
  377. clock mclock.Simulated
  378. db = rawdb.NewMemoryDatabase()
  379. kicked = make(chan int, 10)
  380. )
  381. removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop
  382. pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn)
  383. pool.ns.Start()
  384. defer pool.stop()
  385. pool.setLimits(10, uint64(10)) // Total capacity limit is 10
  386. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  387. addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute*3))
  388. testPriorityConnect(t, pool, newPoolTestPeer(0, kicked), 10, true)
  389. clock.Run(time.Minute)
  390. pool.disconnect(newPoolTestPeer(0, kicked))
  391. pb, _ := getBalance(pool, newPoolTestPeer(0, kicked))
  392. if checkDiff(pb, uint64(time.Minute*2)) {
  393. t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute*2), pb)
  394. }
  395. }
  396. func TestDowngradePriorityClient(t *testing.T) {
  397. var (
  398. clock mclock.Simulated
  399. db = rawdb.NewMemoryDatabase()
  400. kicked = make(chan int, 10)
  401. )
  402. removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop
  403. pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn)
  404. pool.ns.Start()
  405. defer pool.stop()
  406. pool.setLimits(10, uint64(10)) // Total capacity limit is 10
  407. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  408. p := newPoolTestPeer(0, kicked)
  409. addBalance(pool, p.node.ID(), int64(time.Minute))
  410. testPriorityConnect(t, pool, p, 10, true)
  411. if p.cap != 10 {
  412. t.Fatalf("The capacity of priority peer hasn't been updated, got: %d", p.cap)
  413. }
  414. clock.Run(time.Minute) // All positive balance should be used up.
  415. time.Sleep(300 * time.Millisecond) // Ensure the callback is called
  416. if p.cap != 1 {
  417. t.Fatalf("The capcacity of peer should be downgraded, got: %d", p.cap)
  418. }
  419. pb, _ := getBalance(pool, newPoolTestPeer(0, kicked))
  420. if pb != 0 {
  421. t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb)
  422. }
  423. addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute))
  424. pb, _ = getBalance(pool, newPoolTestPeer(0, kicked))
  425. if checkDiff(pb, uint64(time.Minute)) {
  426. t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute), pb)
  427. }
  428. }
  429. func TestNegativeBalanceCalculation(t *testing.T) {
  430. var (
  431. clock mclock.Simulated
  432. db = rawdb.NewMemoryDatabase()
  433. )
  434. pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
  435. pool.ns.Start()
  436. defer pool.stop()
  437. pool.setLimits(10, uint64(10)) // Total capacity limit is 10
  438. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1})
  439. for i := 0; i < 10; i++ {
  440. pool.connect(newPoolTestPeer(i, nil))
  441. }
  442. clock.Run(time.Second)
  443. for i := 0; i < 10; i++ {
  444. pool.disconnect(newPoolTestPeer(i, nil))
  445. _, nb := getBalance(pool, newPoolTestPeer(i, nil))
  446. if nb != 0 {
  447. t.Fatalf("Short connection shouldn't be recorded")
  448. }
  449. }
  450. for i := 0; i < 10; i++ {
  451. pool.connect(newPoolTestPeer(i, nil))
  452. }
  453. clock.Run(time.Minute)
  454. for i := 0; i < 10; i++ {
  455. pool.disconnect(newPoolTestPeer(i, nil))
  456. _, nb := getBalance(pool, newPoolTestPeer(i, nil))
  457. if checkDiff(nb, uint64(time.Minute)/1000) {
  458. t.Fatalf("Negative balance mismatch, want %v, got %v", uint64(time.Minute)/1000, nb)
  459. }
  460. }
  461. }
  462. func TestInactiveClient(t *testing.T) {
  463. var (
  464. clock mclock.Simulated
  465. db = rawdb.NewMemoryDatabase()
  466. )
  467. pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
  468. pool.ns.Start()
  469. defer pool.stop()
  470. pool.setLimits(2, uint64(2))
  471. p1 := newPoolTestPeer(1, nil)
  472. p1.inactiveAllowed = true
  473. p2 := newPoolTestPeer(2, nil)
  474. p2.inactiveAllowed = true
  475. p3 := newPoolTestPeer(3, nil)
  476. p3.inactiveAllowed = true
  477. addBalance(pool, p1.node.ID(), 1000*int64(time.Second))
  478. addBalance(pool, p3.node.ID(), 2000*int64(time.Second))
  479. // p1: 1000 p2: 0 p3: 2000
  480. p1.cap, _ = pool.connect(p1)
  481. if p1.cap != 1 {
  482. t.Fatalf("Failed to connect peer #1")
  483. }
  484. p2.cap, _ = pool.connect(p2)
  485. if p2.cap != 1 {
  486. t.Fatalf("Failed to connect peer #2")
  487. }
  488. p3.cap, _ = pool.connect(p3)
  489. if p3.cap != 1 {
  490. t.Fatalf("Failed to connect peer #3")
  491. }
  492. if p2.cap != 0 {
  493. t.Fatalf("Failed to deactivate peer #2")
  494. }
  495. addBalance(pool, p2.node.ID(), 3000*int64(time.Second))
  496. // p1: 1000 p2: 3000 p3: 2000
  497. if p2.cap != 1 {
  498. t.Fatalf("Failed to activate peer #2")
  499. }
  500. if p1.cap != 0 {
  501. t.Fatalf("Failed to deactivate peer #1")
  502. }
  503. addBalance(pool, p2.node.ID(), -2500*int64(time.Second))
  504. // p1: 1000 p2: 500 p3: 2000
  505. if p1.cap != 1 {
  506. t.Fatalf("Failed to activate peer #1")
  507. }
  508. if p2.cap != 0 {
  509. t.Fatalf("Failed to deactivate peer #2")
  510. }
  511. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0})
  512. p4 := newPoolTestPeer(4, nil)
  513. addBalance(pool, p4.node.ID(), 1500*int64(time.Second))
  514. // p1: 1000 p2: 500 p3: 2000 p4: 1500
  515. p4.cap, _ = pool.connect(p4)
  516. if p4.cap != 1 {
  517. t.Fatalf("Failed to activate peer #4")
  518. }
  519. if p1.cap != 0 {
  520. t.Fatalf("Failed to deactivate peer #1")
  521. }
  522. clock.Run(time.Second * 600)
  523. // manually trigger a check to avoid a long real-time wait
  524. pool.ns.SetState(p1.node, pool.UpdateFlag, nodestate.Flags{}, 0)
  525. pool.ns.SetState(p1.node, nodestate.Flags{}, pool.UpdateFlag, 0)
  526. // p1: 1000 p2: 500 p3: 2000 p4: 900
  527. if p1.cap != 1 {
  528. t.Fatalf("Failed to activate peer #1")
  529. }
  530. if p4.cap != 0 {
  531. t.Fatalf("Failed to deactivate peer #4")
  532. }
  533. pool.disconnect(p2)
  534. pool.disconnect(p4)
  535. addBalance(pool, p1.node.ID(), -1000*int64(time.Second))
  536. if p1.cap != 1 {
  537. t.Fatalf("Should not deactivate peer #1")
  538. }
  539. if p2.cap != 0 {
  540. t.Fatalf("Should not activate peer #2")
  541. }
  542. }