clientpool_test.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581
  1. // Copyright 2019 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package les
  17. import (
  18. "fmt"
  19. "math/rand"
  20. "testing"
  21. "time"
  22. "github.com/ethereum/go-ethereum/common/mclock"
  23. "github.com/ethereum/go-ethereum/core/rawdb"
  24. lps "github.com/ethereum/go-ethereum/les/lespay/server"
  25. "github.com/ethereum/go-ethereum/p2p/enode"
  26. "github.com/ethereum/go-ethereum/p2p/enr"
  27. "github.com/ethereum/go-ethereum/p2p/nodestate"
  28. )
  29. func TestClientPoolL10C100Free(t *testing.T) {
  30. testClientPool(t, 10, 100, 0, true)
  31. }
  32. func TestClientPoolL40C200Free(t *testing.T) {
  33. testClientPool(t, 40, 200, 0, true)
  34. }
  35. func TestClientPoolL100C300Free(t *testing.T) {
  36. testClientPool(t, 100, 300, 0, true)
  37. }
  38. func TestClientPoolL10C100P4(t *testing.T) {
  39. testClientPool(t, 10, 100, 4, false)
  40. }
  41. func TestClientPoolL40C200P30(t *testing.T) {
  42. testClientPool(t, 40, 200, 30, false)
  43. }
  44. func TestClientPoolL100C300P20(t *testing.T) {
  45. testClientPool(t, 100, 300, 20, false)
  46. }
  47. const testClientPoolTicks = 100000
  48. type poolTestPeer struct {
  49. node *enode.Node
  50. index int
  51. disconnCh chan int
  52. cap uint64
  53. inactiveAllowed bool
  54. }
  55. func newPoolTestPeer(i int, disconnCh chan int) *poolTestPeer {
  56. return &poolTestPeer{
  57. index: i,
  58. disconnCh: disconnCh,
  59. node: enode.SignNull(&enr.Record{}, enode.ID{byte(i % 256), byte(i >> 8)}),
  60. }
  61. }
  62. func (i *poolTestPeer) Node() *enode.Node {
  63. return i.node
  64. }
  65. func (i *poolTestPeer) freeClientId() string {
  66. return fmt.Sprintf("addr #%d", i.index)
  67. }
  68. func (i *poolTestPeer) updateCapacity(cap uint64) {
  69. i.cap = cap
  70. }
  71. func (i *poolTestPeer) freeze() {}
  72. func (i *poolTestPeer) allowInactive() bool {
  73. return i.inactiveAllowed
  74. }
  75. func getBalance(pool *clientPool, p *poolTestPeer) (pos, neg uint64) {
  76. temp := pool.ns.GetField(p.node, clientField) == nil
  77. if temp {
  78. pool.ns.SetField(p.node, connAddressField, p.freeClientId())
  79. }
  80. n, _ := pool.ns.GetField(p.node, pool.BalanceField).(*lps.NodeBalance)
  81. pos, neg = n.GetBalance()
  82. if temp {
  83. pool.ns.SetField(p.node, connAddressField, nil)
  84. }
  85. return
  86. }
  87. func addBalance(pool *clientPool, id enode.ID, amount int64) {
  88. pool.forClients([]enode.ID{id}, func(c *clientInfo) {
  89. c.balance.AddBalance(amount)
  90. })
  91. }
  92. func checkDiff(a, b uint64) bool {
  93. maxDiff := (a + b) / 2000
  94. if maxDiff < 1 {
  95. maxDiff = 1
  96. }
  97. return a > b+maxDiff || b > a+maxDiff
  98. }
  99. func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, randomDisconnect bool) {
  100. rand.Seed(time.Now().UnixNano())
  101. var (
  102. clock mclock.Simulated
  103. db = rawdb.NewMemoryDatabase()
  104. connected = make([]bool, clientCount)
  105. connTicks = make([]int, clientCount)
  106. disconnCh = make(chan int, clientCount)
  107. disconnFn = func(id enode.ID) {
  108. disconnCh <- int(id[0]) + int(id[1])<<8
  109. }
  110. pool = newClientPool(db, 1, 0, &clock, disconnFn)
  111. )
  112. pool.setLimits(activeLimit, uint64(activeLimit))
  113. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  114. // pool should accept new peers up to its connected limit
  115. for i := 0; i < activeLimit; i++ {
  116. if cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 {
  117. connected[i] = true
  118. } else {
  119. t.Fatalf("Test peer #%d rejected", i)
  120. }
  121. }
  122. // randomly connect and disconnect peers, expect to have a similar total connection time at the end
  123. for tickCounter := 0; tickCounter < testClientPoolTicks; tickCounter++ {
  124. clock.Run(1 * time.Second)
  125. if tickCounter == testClientPoolTicks/4 {
  126. // give a positive balance to some of the peers
  127. amount := testClientPoolTicks / 2 * int64(time.Second) // enough for half of the simulation period
  128. for i := 0; i < paidCount; i++ {
  129. addBalance(pool, newPoolTestPeer(i, disconnCh).node.ID(), amount)
  130. }
  131. }
  132. i := rand.Intn(clientCount)
  133. if connected[i] {
  134. if randomDisconnect {
  135. pool.disconnect(newPoolTestPeer(i, disconnCh))
  136. connected[i] = false
  137. connTicks[i] += tickCounter
  138. }
  139. } else {
  140. if cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 {
  141. connected[i] = true
  142. connTicks[i] -= tickCounter
  143. } else {
  144. pool.disconnect(newPoolTestPeer(i, disconnCh))
  145. }
  146. }
  147. pollDisconnects:
  148. for {
  149. select {
  150. case i := <-disconnCh:
  151. pool.disconnect(newPoolTestPeer(i, disconnCh))
  152. if connected[i] {
  153. connTicks[i] += tickCounter
  154. connected[i] = false
  155. }
  156. default:
  157. break pollDisconnects
  158. }
  159. }
  160. }
  161. expTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2*(activeLimit-paidCount)/(clientCount-paidCount)
  162. expMin := expTicks - expTicks/5
  163. expMax := expTicks + expTicks/5
  164. paidTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2
  165. paidMin := paidTicks - paidTicks/5
  166. paidMax := paidTicks + paidTicks/5
  167. // check if the total connected time of peers are all in the expected range
  168. for i, c := range connected {
  169. if c {
  170. connTicks[i] += testClientPoolTicks
  171. }
  172. min, max := expMin, expMax
  173. if i < paidCount {
  174. // expect a higher amount for clients with a positive balance
  175. min, max = paidMin, paidMax
  176. }
  177. if connTicks[i] < min || connTicks[i] > max {
  178. t.Errorf("Total connected time of test node #%d (%d) outside expected range (%d to %d)", i, connTicks[i], min, max)
  179. }
  180. }
  181. pool.stop()
  182. }
  183. func testPriorityConnect(t *testing.T, pool *clientPool, p *poolTestPeer, cap uint64, expSuccess bool) {
  184. if cap, _ := pool.connect(p); cap == 0 {
  185. if expSuccess {
  186. t.Fatalf("Failed to connect paid client")
  187. } else {
  188. return
  189. }
  190. }
  191. if _, err := pool.setCapacity(p.node, "", cap, defaultConnectedBias, true); err != nil {
  192. if expSuccess {
  193. t.Fatalf("Failed to raise capacity of paid client")
  194. } else {
  195. return
  196. }
  197. }
  198. if !expSuccess {
  199. t.Fatalf("Should reject high capacity paid client")
  200. }
  201. }
  202. func TestConnectPaidClient(t *testing.T) {
  203. var (
  204. clock mclock.Simulated
  205. db = rawdb.NewMemoryDatabase()
  206. )
  207. pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
  208. defer pool.stop()
  209. pool.setLimits(10, uint64(10))
  210. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  211. // Add balance for an external client and mark it as paid client
  212. addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
  213. testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 10, true)
  214. }
  215. func TestConnectPaidClientToSmallPool(t *testing.T) {
  216. var (
  217. clock mclock.Simulated
  218. db = rawdb.NewMemoryDatabase()
  219. )
  220. pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
  221. defer pool.stop()
  222. pool.setLimits(10, uint64(10)) // Total capacity limit is 10
  223. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  224. // Add balance for an external client and mark it as paid client
  225. addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
  226. // Connect a fat paid client to pool, should reject it.
  227. testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 100, false)
  228. }
  229. func TestConnectPaidClientToFullPool(t *testing.T) {
  230. var (
  231. clock mclock.Simulated
  232. db = rawdb.NewMemoryDatabase()
  233. )
  234. removeFn := func(enode.ID) {} // Noop
  235. pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
  236. defer pool.stop()
  237. pool.setLimits(10, uint64(10)) // Total capacity limit is 10
  238. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  239. for i := 0; i < 10; i++ {
  240. addBalance(pool, newPoolTestPeer(i, nil).node.ID(), int64(time.Second*20))
  241. pool.connect(newPoolTestPeer(i, nil))
  242. }
  243. addBalance(pool, newPoolTestPeer(11, nil).node.ID(), int64(time.Second*2)) // Add low balance to new paid client
  244. if cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 {
  245. t.Fatalf("Low balance paid client should be rejected")
  246. }
  247. clock.Run(time.Second)
  248. addBalance(pool, newPoolTestPeer(12, nil).node.ID(), int64(time.Minute*5)) // Add high balance to new paid client
  249. if cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap == 0 {
  250. t.Fatalf("High balance paid client should be accepted")
  251. }
  252. }
  253. func TestPaidClientKickedOut(t *testing.T) {
  254. var (
  255. clock mclock.Simulated
  256. db = rawdb.NewMemoryDatabase()
  257. kickedCh = make(chan int, 100)
  258. )
  259. removeFn := func(id enode.ID) {
  260. kickedCh <- int(id[0])
  261. }
  262. pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
  263. pool.bt.SetExpirationTCs(0, 0)
  264. defer pool.stop()
  265. pool.setLimits(10, uint64(10)) // Total capacity limit is 10
  266. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  267. for i := 0; i < 10; i++ {
  268. addBalance(pool, newPoolTestPeer(i, kickedCh).node.ID(), 10000000000) // 10 second allowance
  269. pool.connect(newPoolTestPeer(i, kickedCh))
  270. clock.Run(time.Millisecond)
  271. }
  272. clock.Run(defaultConnectedBias + time.Second*11)
  273. if cap, _ := pool.connect(newPoolTestPeer(11, kickedCh)); cap == 0 {
  274. t.Fatalf("Free client should be accepted")
  275. }
  276. select {
  277. case id := <-kickedCh:
  278. if id != 0 {
  279. t.Fatalf("Kicked client mismatch, want %v, got %v", 0, id)
  280. }
  281. case <-time.NewTimer(time.Second).C:
  282. t.Fatalf("timeout")
  283. }
  284. }
  285. func TestConnectFreeClient(t *testing.T) {
  286. var (
  287. clock mclock.Simulated
  288. db = rawdb.NewMemoryDatabase()
  289. )
  290. pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
  291. defer pool.stop()
  292. pool.setLimits(10, uint64(10))
  293. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  294. if cap, _ := pool.connect(newPoolTestPeer(0, nil)); cap == 0 {
  295. t.Fatalf("Failed to connect free client")
  296. }
  297. testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 2, false)
  298. }
  299. func TestConnectFreeClientToFullPool(t *testing.T) {
  300. var (
  301. clock mclock.Simulated
  302. db = rawdb.NewMemoryDatabase()
  303. )
  304. removeFn := func(enode.ID) {} // Noop
  305. pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
  306. defer pool.stop()
  307. pool.setLimits(10, uint64(10)) // Total capacity limit is 10
  308. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  309. for i := 0; i < 10; i++ {
  310. pool.connect(newPoolTestPeer(i, nil))
  311. }
  312. if cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 {
  313. t.Fatalf("New free client should be rejected")
  314. }
  315. clock.Run(time.Minute)
  316. if cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap != 0 {
  317. t.Fatalf("New free client should be rejected")
  318. }
  319. clock.Run(time.Millisecond)
  320. clock.Run(4 * time.Minute)
  321. if cap, _ := pool.connect(newPoolTestPeer(13, nil)); cap == 0 {
  322. t.Fatalf("Old client connects more than 5min should be kicked")
  323. }
  324. }
  325. func TestFreeClientKickedOut(t *testing.T) {
  326. var (
  327. clock mclock.Simulated
  328. db = rawdb.NewMemoryDatabase()
  329. kicked = make(chan int, 100)
  330. )
  331. removeFn := func(id enode.ID) { kicked <- int(id[0]) }
  332. pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
  333. defer pool.stop()
  334. pool.setLimits(10, uint64(10)) // Total capacity limit is 10
  335. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  336. for i := 0; i < 10; i++ {
  337. pool.connect(newPoolTestPeer(i, kicked))
  338. clock.Run(time.Millisecond)
  339. }
  340. if cap, _ := pool.connect(newPoolTestPeer(10, kicked)); cap != 0 {
  341. t.Fatalf("New free client should be rejected")
  342. }
  343. select {
  344. case <-kicked:
  345. case <-time.NewTimer(time.Second).C:
  346. t.Fatalf("timeout")
  347. }
  348. pool.disconnect(newPoolTestPeer(10, kicked))
  349. clock.Run(5 * time.Minute)
  350. for i := 0; i < 10; i++ {
  351. pool.connect(newPoolTestPeer(i+10, kicked))
  352. }
  353. for i := 0; i < 10; i++ {
  354. select {
  355. case id := <-kicked:
  356. if id >= 10 {
  357. t.Fatalf("Old client should be kicked, now got: %d", id)
  358. }
  359. case <-time.NewTimer(time.Second).C:
  360. t.Fatalf("timeout")
  361. }
  362. }
  363. }
  364. func TestPositiveBalanceCalculation(t *testing.T) {
  365. var (
  366. clock mclock.Simulated
  367. db = rawdb.NewMemoryDatabase()
  368. kicked = make(chan int, 10)
  369. )
  370. removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop
  371. pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
  372. defer pool.stop()
  373. pool.setLimits(10, uint64(10)) // Total capacity limit is 10
  374. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  375. addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute*3))
  376. testPriorityConnect(t, pool, newPoolTestPeer(0, kicked), 10, true)
  377. clock.Run(time.Minute)
  378. pool.disconnect(newPoolTestPeer(0, kicked))
  379. pb, _ := getBalance(pool, newPoolTestPeer(0, kicked))
  380. if checkDiff(pb, uint64(time.Minute*2)) {
  381. t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute*2), pb)
  382. }
  383. }
  384. func TestDowngradePriorityClient(t *testing.T) {
  385. var (
  386. clock mclock.Simulated
  387. db = rawdb.NewMemoryDatabase()
  388. kicked = make(chan int, 10)
  389. )
  390. removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop
  391. pool := newClientPool(db, 1, defaultConnectedBias, &clock, removeFn)
  392. defer pool.stop()
  393. pool.setLimits(10, uint64(10)) // Total capacity limit is 10
  394. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
  395. p := newPoolTestPeer(0, kicked)
  396. addBalance(pool, p.node.ID(), int64(time.Minute))
  397. testPriorityConnect(t, pool, p, 10, true)
  398. if p.cap != 10 {
  399. t.Fatalf("The capacity of priority peer hasn't been updated, got: %d", p.cap)
  400. }
  401. clock.Run(time.Minute) // All positive balance should be used up.
  402. time.Sleep(300 * time.Millisecond) // Ensure the callback is called
  403. if p.cap != 1 {
  404. t.Fatalf("The capcacity of peer should be downgraded, got: %d", p.cap)
  405. }
  406. pb, _ := getBalance(pool, newPoolTestPeer(0, kicked))
  407. if pb != 0 {
  408. t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb)
  409. }
  410. addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute))
  411. pb, _ = getBalance(pool, newPoolTestPeer(0, kicked))
  412. if checkDiff(pb, uint64(time.Minute)) {
  413. t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute), pb)
  414. }
  415. }
  416. func TestNegativeBalanceCalculation(t *testing.T) {
  417. var (
  418. clock mclock.Simulated
  419. db = rawdb.NewMemoryDatabase()
  420. )
  421. pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
  422. defer pool.stop()
  423. pool.setLimits(10, uint64(10)) // Total capacity limit is 10
  424. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1})
  425. for i := 0; i < 10; i++ {
  426. pool.connect(newPoolTestPeer(i, nil))
  427. }
  428. clock.Run(time.Second)
  429. for i := 0; i < 10; i++ {
  430. pool.disconnect(newPoolTestPeer(i, nil))
  431. _, nb := getBalance(pool, newPoolTestPeer(i, nil))
  432. if nb != 0 {
  433. t.Fatalf("Short connection shouldn't be recorded")
  434. }
  435. }
  436. for i := 0; i < 10; i++ {
  437. pool.connect(newPoolTestPeer(i, nil))
  438. }
  439. clock.Run(time.Minute)
  440. for i := 0; i < 10; i++ {
  441. pool.disconnect(newPoolTestPeer(i, nil))
  442. _, nb := getBalance(pool, newPoolTestPeer(i, nil))
  443. if checkDiff(nb, uint64(time.Minute)/1000) {
  444. t.Fatalf("Negative balance mismatch, want %v, got %v", uint64(time.Minute)/1000, nb)
  445. }
  446. }
  447. }
  448. func TestInactiveClient(t *testing.T) {
  449. var (
  450. clock mclock.Simulated
  451. db = rawdb.NewMemoryDatabase()
  452. )
  453. pool := newClientPool(db, 1, defaultConnectedBias, &clock, func(id enode.ID) {})
  454. defer pool.stop()
  455. pool.setLimits(2, uint64(2))
  456. p1 := newPoolTestPeer(1, nil)
  457. p1.inactiveAllowed = true
  458. p2 := newPoolTestPeer(2, nil)
  459. p2.inactiveAllowed = true
  460. p3 := newPoolTestPeer(3, nil)
  461. p3.inactiveAllowed = true
  462. addBalance(pool, p1.node.ID(), 1000*int64(time.Second))
  463. addBalance(pool, p3.node.ID(), 2000*int64(time.Second))
  464. // p1: 1000 p2: 0 p3: 2000
  465. p1.cap, _ = pool.connect(p1)
  466. if p1.cap != 1 {
  467. t.Fatalf("Failed to connect peer #1")
  468. }
  469. p2.cap, _ = pool.connect(p2)
  470. if p2.cap != 1 {
  471. t.Fatalf("Failed to connect peer #2")
  472. }
  473. p3.cap, _ = pool.connect(p3)
  474. if p3.cap != 1 {
  475. t.Fatalf("Failed to connect peer #3")
  476. }
  477. if p2.cap != 0 {
  478. t.Fatalf("Failed to deactivate peer #2")
  479. }
  480. addBalance(pool, p2.node.ID(), 3000*int64(time.Second))
  481. // p1: 1000 p2: 3000 p3: 2000
  482. if p2.cap != 1 {
  483. t.Fatalf("Failed to activate peer #2")
  484. }
  485. if p1.cap != 0 {
  486. t.Fatalf("Failed to deactivate peer #1")
  487. }
  488. addBalance(pool, p2.node.ID(), -2500*int64(time.Second))
  489. // p1: 1000 p2: 500 p3: 2000
  490. if p1.cap != 1 {
  491. t.Fatalf("Failed to activate peer #1")
  492. }
  493. if p2.cap != 0 {
  494. t.Fatalf("Failed to deactivate peer #2")
  495. }
  496. pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0})
  497. p4 := newPoolTestPeer(4, nil)
  498. addBalance(pool, p4.node.ID(), 1500*int64(time.Second))
  499. // p1: 1000 p2: 500 p3: 2000 p4: 1500
  500. p4.cap, _ = pool.connect(p4)
  501. if p4.cap != 1 {
  502. t.Fatalf("Failed to activate peer #4")
  503. }
  504. if p1.cap != 0 {
  505. t.Fatalf("Failed to deactivate peer #1")
  506. }
  507. clock.Run(time.Second * 600)
  508. // manually trigger a check to avoid a long real-time wait
  509. pool.ns.SetState(p1.node, pool.UpdateFlag, nodestate.Flags{}, 0)
  510. pool.ns.SetState(p1.node, nodestate.Flags{}, pool.UpdateFlag, 0)
  511. // p1: 1000 p2: 500 p3: 2000 p4: 900
  512. if p1.cap != 1 {
  513. t.Fatalf("Failed to activate peer #1")
  514. }
  515. if p4.cap != 0 {
  516. t.Fatalf("Failed to deactivate peer #4")
  517. }
  518. pool.disconnect(p2)
  519. pool.disconnect(p4)
  520. addBalance(pool, p1.node.ID(), -1000*int64(time.Second))
  521. if p1.cap != 1 {
  522. t.Fatalf("Should not deactivate peer #1")
  523. }
  524. if p2.cap != 0 {
  525. t.Fatalf("Should not activate peer #2")
  526. }
  527. }