clientpool_test.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603
  1. // Copyright 2019 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package server
  17. import (
  18. "fmt"
  19. "math/rand"
  20. "testing"
  21. "time"
  22. "github.com/ethereum/go-ethereum/common/mclock"
  23. "github.com/ethereum/go-ethereum/core/rawdb"
  24. "github.com/ethereum/go-ethereum/p2p/enode"
  25. "github.com/ethereum/go-ethereum/p2p/enr"
  26. "github.com/ethereum/go-ethereum/p2p/nodestate"
  27. )
  28. const defaultConnectedBias = time.Minute * 3
  29. func TestClientPoolL10C100Free(t *testing.T) {
  30. testClientPool(t, 10, 100, 0, true)
  31. }
  32. func TestClientPoolL40C200Free(t *testing.T) {
  33. testClientPool(t, 40, 200, 0, true)
  34. }
  35. func TestClientPoolL100C300Free(t *testing.T) {
  36. testClientPool(t, 100, 300, 0, true)
  37. }
  38. func TestClientPoolL10C100P4(t *testing.T) {
  39. testClientPool(t, 10, 100, 4, false)
  40. }
  41. func TestClientPoolL40C200P30(t *testing.T) {
  42. testClientPool(t, 40, 200, 30, false)
  43. }
  44. func TestClientPoolL100C300P20(t *testing.T) {
  45. testClientPool(t, 100, 300, 20, false)
  46. }
  47. const testClientPoolTicks = 100000
  48. type poolTestPeer struct {
  49. node *enode.Node
  50. index int
  51. disconnCh chan int
  52. cap uint64
  53. inactiveAllowed bool
  54. }
  55. func newPoolTestPeer(i int, disconnCh chan int) *poolTestPeer {
  56. return &poolTestPeer{
  57. index: i,
  58. disconnCh: disconnCh,
  59. node: enode.SignNull(&enr.Record{}, enode.ID{byte(i % 256), byte(i >> 8)}),
  60. }
  61. }
  62. func (i *poolTestPeer) Node() *enode.Node {
  63. return i.node
  64. }
  65. func (i *poolTestPeer) FreeClientId() string {
  66. return fmt.Sprintf("addr #%d", i.index)
  67. }
  68. func (i *poolTestPeer) InactiveAllowance() time.Duration {
  69. if i.inactiveAllowed {
  70. return time.Second * 10
  71. }
  72. return 0
  73. }
  74. func (i *poolTestPeer) UpdateCapacity(capacity uint64, requested bool) {
  75. i.cap = capacity
  76. }
  77. func (i *poolTestPeer) Disconnect() {
  78. if i.disconnCh == nil {
  79. return
  80. }
  81. id := i.node.ID()
  82. i.disconnCh <- int(id[0]) + int(id[1])<<8
  83. }
  84. func getBalance(pool *ClientPool, p *poolTestPeer) (pos, neg uint64) {
  85. pool.BalanceOperation(p.node.ID(), p.FreeClientId(), func(nb AtomicBalanceOperator) {
  86. pos, neg = nb.GetBalance()
  87. })
  88. return
  89. }
  90. func addBalance(pool *ClientPool, id enode.ID, amount int64) {
  91. pool.BalanceOperation(id, "", func(nb AtomicBalanceOperator) {
  92. nb.AddBalance(amount)
  93. })
  94. }
  95. func checkDiff(a, b uint64) bool {
  96. maxDiff := (a + b) / 2000
  97. if maxDiff < 1 {
  98. maxDiff = 1
  99. }
  100. return a > b+maxDiff || b > a+maxDiff
  101. }
  102. func connect(pool *ClientPool, peer *poolTestPeer) uint64 {
  103. pool.Register(peer)
  104. return peer.cap
  105. }
  106. func disconnect(pool *ClientPool, peer *poolTestPeer) {
  107. pool.Unregister(peer)
  108. }
  109. func alwaysTrueFn() bool {
  110. return true
  111. }
  112. func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, randomDisconnect bool) {
  113. rand.Seed(time.Now().UnixNano())
  114. var (
  115. clock mclock.Simulated
  116. db = rawdb.NewMemoryDatabase()
  117. connected = make([]bool, clientCount)
  118. connTicks = make([]int, clientCount)
  119. disconnCh = make(chan int, clientCount)
  120. pool = NewClientPool(db, 1, 0, &clock, alwaysTrueFn)
  121. )
  122. pool.Start()
  123. pool.SetExpirationTCs(0, 1000)
  124. pool.SetLimits(uint64(activeLimit), uint64(activeLimit))
  125. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  126. // pool should accept new peers up to its connected limit
  127. for i := 0; i < activeLimit; i++ {
  128. if cap := connect(pool, newPoolTestPeer(i, disconnCh)); cap != 0 {
  129. connected[i] = true
  130. } else {
  131. t.Fatalf("Test peer #%d rejected", i)
  132. }
  133. }
  134. // randomly connect and disconnect peers, expect to have a similar total connection time at the end
  135. for tickCounter := 0; tickCounter < testClientPoolTicks; tickCounter++ {
  136. clock.Run(1 * time.Second)
  137. if tickCounter == testClientPoolTicks/4 {
  138. // give a positive balance to some of the peers
  139. amount := testClientPoolTicks / 2 * int64(time.Second) // enough for half of the simulation period
  140. for i := 0; i < paidCount; i++ {
  141. addBalance(pool, newPoolTestPeer(i, disconnCh).node.ID(), amount)
  142. }
  143. }
  144. i := rand.Intn(clientCount)
  145. if connected[i] {
  146. if randomDisconnect {
  147. disconnect(pool, newPoolTestPeer(i, disconnCh))
  148. connected[i] = false
  149. connTicks[i] += tickCounter
  150. }
  151. } else {
  152. if cap := connect(pool, newPoolTestPeer(i, disconnCh)); cap != 0 {
  153. connected[i] = true
  154. connTicks[i] -= tickCounter
  155. } else {
  156. disconnect(pool, newPoolTestPeer(i, disconnCh))
  157. }
  158. }
  159. pollDisconnects:
  160. for {
  161. select {
  162. case i := <-disconnCh:
  163. disconnect(pool, newPoolTestPeer(i, disconnCh))
  164. if connected[i] {
  165. connTicks[i] += tickCounter
  166. connected[i] = false
  167. }
  168. default:
  169. break pollDisconnects
  170. }
  171. }
  172. }
  173. expTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2*(activeLimit-paidCount)/(clientCount-paidCount)
  174. expMin := expTicks - expTicks/5
  175. expMax := expTicks + expTicks/5
  176. paidTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2
  177. paidMin := paidTicks - paidTicks/5
  178. paidMax := paidTicks + paidTicks/5
  179. // check if the total connected time of peers are all in the expected range
  180. for i, c := range connected {
  181. if c {
  182. connTicks[i] += testClientPoolTicks
  183. }
  184. min, max := expMin, expMax
  185. if i < paidCount {
  186. // expect a higher amount for clients with a positive balance
  187. min, max = paidMin, paidMax
  188. }
  189. if connTicks[i] < min || connTicks[i] > max {
  190. t.Errorf("Total connected time of test node #%d (%d) outside expected range (%d to %d)", i, connTicks[i], min, max)
  191. }
  192. }
  193. pool.Stop()
  194. }
  195. func testPriorityConnect(t *testing.T, pool *ClientPool, p *poolTestPeer, cap uint64, expSuccess bool) {
  196. if cap := connect(pool, p); cap == 0 {
  197. if expSuccess {
  198. t.Fatalf("Failed to connect paid client")
  199. } else {
  200. return
  201. }
  202. }
  203. if newCap, _ := pool.SetCapacity(p.node, cap, defaultConnectedBias, true); newCap != cap {
  204. if expSuccess {
  205. t.Fatalf("Failed to raise capacity of paid client")
  206. } else {
  207. return
  208. }
  209. }
  210. if !expSuccess {
  211. t.Fatalf("Should reject high capacity paid client")
  212. }
  213. }
  214. func TestConnectPaidClient(t *testing.T) {
  215. var (
  216. clock mclock.Simulated
  217. db = rawdb.NewMemoryDatabase()
  218. )
  219. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  220. pool.Start()
  221. defer pool.Stop()
  222. pool.SetLimits(10, uint64(10))
  223. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  224. // Add balance for an external client and mark it as paid client
  225. addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
  226. testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 10, true)
  227. }
  228. func TestConnectPaidClientToSmallPool(t *testing.T) {
  229. var (
  230. clock mclock.Simulated
  231. db = rawdb.NewMemoryDatabase()
  232. )
  233. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  234. pool.Start()
  235. defer pool.Stop()
  236. pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
  237. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  238. // Add balance for an external client and mark it as paid client
  239. addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
  240. // connect a fat paid client to pool, should reject it.
  241. testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 100, false)
  242. }
  243. func TestConnectPaidClientToFullPool(t *testing.T) {
  244. var (
  245. clock mclock.Simulated
  246. db = rawdb.NewMemoryDatabase()
  247. )
  248. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  249. pool.Start()
  250. defer pool.Stop()
  251. pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
  252. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  253. for i := 0; i < 10; i++ {
  254. addBalance(pool, newPoolTestPeer(i, nil).node.ID(), int64(time.Second*20))
  255. connect(pool, newPoolTestPeer(i, nil))
  256. }
  257. addBalance(pool, newPoolTestPeer(11, nil).node.ID(), int64(time.Second*2)) // Add low balance to new paid client
  258. if cap := connect(pool, newPoolTestPeer(11, nil)); cap != 0 {
  259. t.Fatalf("Low balance paid client should be rejected")
  260. }
  261. clock.Run(time.Second)
  262. addBalance(pool, newPoolTestPeer(12, nil).node.ID(), int64(time.Minute*5)) // Add high balance to new paid client
  263. if cap := connect(pool, newPoolTestPeer(12, nil)); cap == 0 {
  264. t.Fatalf("High balance paid client should be accepted")
  265. }
  266. }
  267. func TestPaidClientKickedOut(t *testing.T) {
  268. var (
  269. clock mclock.Simulated
  270. db = rawdb.NewMemoryDatabase()
  271. kickedCh = make(chan int, 100)
  272. )
  273. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  274. pool.Start()
  275. pool.SetExpirationTCs(0, 0)
  276. defer pool.Stop()
  277. pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
  278. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  279. for i := 0; i < 10; i++ {
  280. addBalance(pool, newPoolTestPeer(i, kickedCh).node.ID(), 10000000000) // 10 second allowance
  281. connect(pool, newPoolTestPeer(i, kickedCh))
  282. clock.Run(time.Millisecond)
  283. }
  284. clock.Run(defaultConnectedBias + time.Second*11)
  285. if cap := connect(pool, newPoolTestPeer(11, kickedCh)); cap == 0 {
  286. t.Fatalf("Free client should be accepted")
  287. }
  288. select {
  289. case id := <-kickedCh:
  290. if id != 0 {
  291. t.Fatalf("Kicked client mismatch, want %v, got %v", 0, id)
  292. }
  293. case <-time.NewTimer(time.Second).C:
  294. t.Fatalf("timeout")
  295. }
  296. }
  297. func TestConnectFreeClient(t *testing.T) {
  298. var (
  299. clock mclock.Simulated
  300. db = rawdb.NewMemoryDatabase()
  301. )
  302. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  303. pool.Start()
  304. defer pool.Stop()
  305. pool.SetLimits(10, uint64(10))
  306. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  307. if cap := connect(pool, newPoolTestPeer(0, nil)); cap == 0 {
  308. t.Fatalf("Failed to connect free client")
  309. }
  310. testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 2, false)
  311. }
  312. func TestConnectFreeClientToFullPool(t *testing.T) {
  313. var (
  314. clock mclock.Simulated
  315. db = rawdb.NewMemoryDatabase()
  316. )
  317. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  318. pool.Start()
  319. defer pool.Stop()
  320. pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
  321. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  322. for i := 0; i < 10; i++ {
  323. connect(pool, newPoolTestPeer(i, nil))
  324. }
  325. if cap := connect(pool, newPoolTestPeer(11, nil)); cap != 0 {
  326. t.Fatalf("New free client should be rejected")
  327. }
  328. clock.Run(time.Minute)
  329. if cap := connect(pool, newPoolTestPeer(12, nil)); cap != 0 {
  330. t.Fatalf("New free client should be rejected")
  331. }
  332. clock.Run(time.Millisecond)
  333. clock.Run(4 * time.Minute)
  334. if cap := connect(pool, newPoolTestPeer(13, nil)); cap == 0 {
  335. t.Fatalf("Old client connects more than 5min should be kicked")
  336. }
  337. }
  338. func TestFreeClientKickedOut(t *testing.T) {
  339. var (
  340. clock mclock.Simulated
  341. db = rawdb.NewMemoryDatabase()
  342. kicked = make(chan int, 100)
  343. )
  344. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  345. pool.Start()
  346. defer pool.Stop()
  347. pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
  348. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  349. for i := 0; i < 10; i++ {
  350. connect(pool, newPoolTestPeer(i, kicked))
  351. clock.Run(time.Millisecond)
  352. }
  353. if cap := connect(pool, newPoolTestPeer(10, kicked)); cap != 0 {
  354. t.Fatalf("New free client should be rejected")
  355. }
  356. select {
  357. case <-kicked:
  358. case <-time.NewTimer(time.Second).C:
  359. t.Fatalf("timeout")
  360. }
  361. disconnect(pool, newPoolTestPeer(10, kicked))
  362. clock.Run(5 * time.Minute)
  363. for i := 0; i < 10; i++ {
  364. connect(pool, newPoolTestPeer(i+10, kicked))
  365. }
  366. for i := 0; i < 10; i++ {
  367. select {
  368. case id := <-kicked:
  369. if id >= 10 {
  370. t.Fatalf("Old client should be kicked, now got: %d", id)
  371. }
  372. case <-time.NewTimer(time.Second).C:
  373. t.Fatalf("timeout")
  374. }
  375. }
  376. }
  377. func TestPositiveBalanceCalculation(t *testing.T) {
  378. var (
  379. clock mclock.Simulated
  380. db = rawdb.NewMemoryDatabase()
  381. kicked = make(chan int, 10)
  382. )
  383. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  384. pool.Start()
  385. defer pool.Stop()
  386. pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
  387. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  388. addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute*3))
  389. testPriorityConnect(t, pool, newPoolTestPeer(0, kicked), 10, true)
  390. clock.Run(time.Minute)
  391. disconnect(pool, newPoolTestPeer(0, kicked))
  392. pb, _ := getBalance(pool, newPoolTestPeer(0, kicked))
  393. if checkDiff(pb, uint64(time.Minute*2)) {
  394. t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute*2), pb)
  395. }
  396. }
  397. func TestDowngradePriorityClient(t *testing.T) {
  398. var (
  399. clock mclock.Simulated
  400. db = rawdb.NewMemoryDatabase()
  401. kicked = make(chan int, 10)
  402. )
  403. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  404. pool.Start()
  405. defer pool.Stop()
  406. pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
  407. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  408. p := newPoolTestPeer(0, kicked)
  409. addBalance(pool, p.node.ID(), int64(time.Minute))
  410. testPriorityConnect(t, pool, p, 10, true)
  411. if p.cap != 10 {
  412. t.Fatalf("The capacity of priority peer hasn't been updated, got: %d", p.cap)
  413. }
  414. clock.Run(time.Minute) // All positive balance should be used up.
  415. time.Sleep(300 * time.Millisecond) // Ensure the callback is called
  416. if p.cap != 1 {
  417. t.Fatalf("The capcacity of peer should be downgraded, got: %d", p.cap)
  418. }
  419. pb, _ := getBalance(pool, newPoolTestPeer(0, kicked))
  420. if pb != 0 {
  421. t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb)
  422. }
  423. addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute))
  424. pb, _ = getBalance(pool, newPoolTestPeer(0, kicked))
  425. if checkDiff(pb, uint64(time.Minute)) {
  426. t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute), pb)
  427. }
  428. }
  429. func TestNegativeBalanceCalculation(t *testing.T) {
  430. var (
  431. clock mclock.Simulated
  432. db = rawdb.NewMemoryDatabase()
  433. )
  434. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  435. pool.Start()
  436. defer pool.Stop()
  437. pool.SetExpirationTCs(0, 3600)
  438. pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
  439. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1})
  440. for i := 0; i < 10; i++ {
  441. connect(pool, newPoolTestPeer(i, nil))
  442. }
  443. clock.Run(time.Second)
  444. for i := 0; i < 10; i++ {
  445. disconnect(pool, newPoolTestPeer(i, nil))
  446. _, nb := getBalance(pool, newPoolTestPeer(i, nil))
  447. if nb != 0 {
  448. t.Fatalf("Short connection shouldn't be recorded")
  449. }
  450. }
  451. for i := 0; i < 10; i++ {
  452. connect(pool, newPoolTestPeer(i, nil))
  453. }
  454. clock.Run(time.Minute)
  455. for i := 0; i < 10; i++ {
  456. disconnect(pool, newPoolTestPeer(i, nil))
  457. _, nb := getBalance(pool, newPoolTestPeer(i, nil))
  458. exp := uint64(time.Minute) / 1000
  459. exp -= exp / 120 // correct for negative balance expiration
  460. if checkDiff(nb, exp) {
  461. t.Fatalf("Negative balance mismatch, want %v, got %v", exp, nb)
  462. }
  463. }
  464. }
  465. func TestInactiveClient(t *testing.T) {
  466. var (
  467. clock mclock.Simulated
  468. db = rawdb.NewMemoryDatabase()
  469. )
  470. pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
  471. pool.Start()
  472. defer pool.Stop()
  473. pool.SetLimits(2, uint64(2))
  474. p1 := newPoolTestPeer(1, nil)
  475. p1.inactiveAllowed = true
  476. p2 := newPoolTestPeer(2, nil)
  477. p2.inactiveAllowed = true
  478. p3 := newPoolTestPeer(3, nil)
  479. p3.inactiveAllowed = true
  480. addBalance(pool, p1.node.ID(), 1000*int64(time.Second))
  481. addBalance(pool, p3.node.ID(), 2000*int64(time.Second))
  482. // p1: 1000 p2: 0 p3: 2000
  483. p1.cap = connect(pool, p1)
  484. if p1.cap != 1 {
  485. t.Fatalf("Failed to connect peer #1")
  486. }
  487. p2.cap = connect(pool, p2)
  488. if p2.cap != 1 {
  489. t.Fatalf("Failed to connect peer #2")
  490. }
  491. p3.cap = connect(pool, p3)
  492. if p3.cap != 1 {
  493. t.Fatalf("Failed to connect peer #3")
  494. }
  495. if p2.cap != 0 {
  496. t.Fatalf("Failed to deactivate peer #2")
  497. }
  498. addBalance(pool, p2.node.ID(), 3000*int64(time.Second))
  499. // p1: 1000 p2: 3000 p3: 2000
  500. if p2.cap != 1 {
  501. t.Fatalf("Failed to activate peer #2")
  502. }
  503. if p1.cap != 0 {
  504. t.Fatalf("Failed to deactivate peer #1")
  505. }
  506. addBalance(pool, p2.node.ID(), -2500*int64(time.Second))
  507. // p1: 1000 p2: 500 p3: 2000
  508. if p1.cap != 1 {
  509. t.Fatalf("Failed to activate peer #1")
  510. }
  511. if p2.cap != 0 {
  512. t.Fatalf("Failed to deactivate peer #2")
  513. }
  514. pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0})
  515. p4 := newPoolTestPeer(4, nil)
  516. addBalance(pool, p4.node.ID(), 1500*int64(time.Second))
  517. // p1: 1000 p2: 500 p3: 2000 p4: 1500
  518. p4.cap = connect(pool, p4)
  519. if p4.cap != 1 {
  520. t.Fatalf("Failed to activate peer #4")
  521. }
  522. if p1.cap != 0 {
  523. t.Fatalf("Failed to deactivate peer #1")
  524. }
  525. clock.Run(time.Second * 600)
  526. // manually trigger a check to avoid a long real-time wait
  527. pool.ns.SetState(p1.node, pool.setup.updateFlag, nodestate.Flags{}, 0)
  528. pool.ns.SetState(p1.node, nodestate.Flags{}, pool.setup.updateFlag, 0)
  529. // p1: 1000 p2: 500 p3: 2000 p4: 900
  530. if p1.cap != 1 {
  531. t.Fatalf("Failed to activate peer #1")
  532. }
  533. if p4.cap != 0 {
  534. t.Fatalf("Failed to deactivate peer #4")
  535. }
  536. disconnect(pool, p2)
  537. disconnect(pool, p4)
  538. addBalance(pool, p1.node.ID(), -1000*int64(time.Second))
  539. if p1.cap != 1 {
  540. t.Fatalf("Should not deactivate peer #1")
  541. }
  542. if p2.cap != 0 {
  543. t.Fatalf("Should not activate peer #2")
  544. }
  545. }