serverpool_test.go 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. // Copyright 2020 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package les
  17. import (
  18. "math/rand"
  19. "sync/atomic"
  20. "testing"
  21. "time"
  22. "github.com/ethereum/go-ethereum/common/mclock"
  23. "github.com/ethereum/go-ethereum/ethdb"
  24. "github.com/ethereum/go-ethereum/ethdb/memorydb"
  25. vfc "github.com/ethereum/go-ethereum/les/vflux/client"
  26. "github.com/ethereum/go-ethereum/p2p"
  27. "github.com/ethereum/go-ethereum/p2p/enode"
  28. "github.com/ethereum/go-ethereum/p2p/enr"
  29. )
  30. const (
  31. spTestNodes = 1000
  32. spTestTarget = 5
  33. spTestLength = 10000
  34. spMinTotal = 40000
  35. spMaxTotal = 50000
  36. )
  37. func testNodeID(i int) enode.ID {
  38. return enode.ID{42, byte(i % 256), byte(i / 256)}
  39. }
  40. func testNodeIndex(id enode.ID) int {
  41. if id[0] != 42 {
  42. return -1
  43. }
  44. return int(id[1]) + int(id[2])*256
  45. }
  46. type serverPoolTest struct {
  47. db ethdb.KeyValueStore
  48. clock *mclock.Simulated
  49. quit chan struct{}
  50. preNeg, preNegFail bool
  51. vt *vfc.ValueTracker
  52. sp *serverPool
  53. input enode.Iterator
  54. testNodes []spTestNode
  55. trusted []string
  56. waitCount, waitEnded int32
  57. cycle, conn, servedConn int
  58. serviceCycles, dialCount int
  59. disconnect map[int][]int
  60. }
  61. type spTestNode struct {
  62. connectCycles, waitCycles int
  63. nextConnCycle, totalConn int
  64. connected, service bool
  65. peer *serverPeer
  66. }
  67. func newServerPoolTest(preNeg, preNegFail bool) *serverPoolTest {
  68. nodes := make([]*enode.Node, spTestNodes)
  69. for i := range nodes {
  70. nodes[i] = enode.SignNull(&enr.Record{}, testNodeID(i))
  71. }
  72. return &serverPoolTest{
  73. clock: &mclock.Simulated{},
  74. db: memorydb.New(),
  75. input: enode.CycleNodes(nodes),
  76. testNodes: make([]spTestNode, spTestNodes),
  77. preNeg: preNeg,
  78. preNegFail: preNegFail,
  79. }
  80. }
  81. func (s *serverPoolTest) beginWait() {
  82. // ensure that dialIterator and the maximal number of pre-neg queries are not all stuck in a waiting state
  83. for atomic.AddInt32(&s.waitCount, 1) > preNegLimit {
  84. atomic.AddInt32(&s.waitCount, -1)
  85. s.clock.Run(time.Second)
  86. }
  87. }
  88. func (s *serverPoolTest) endWait() {
  89. atomic.AddInt32(&s.waitCount, -1)
  90. atomic.AddInt32(&s.waitEnded, 1)
  91. }
  92. func (s *serverPoolTest) addTrusted(i int) {
  93. s.trusted = append(s.trusted, enode.SignNull(&enr.Record{}, testNodeID(i)).String())
  94. }
  95. func (s *serverPoolTest) start() {
  96. var testQuery queryFunc
  97. if s.preNeg {
  98. testQuery = func(node *enode.Node) int {
  99. idx := testNodeIndex(node.ID())
  100. n := &s.testNodes[idx]
  101. canConnect := !n.connected && n.connectCycles != 0 && s.cycle >= n.nextConnCycle
  102. if s.preNegFail {
  103. // simulate a scenario where UDP queries never work
  104. s.beginWait()
  105. s.clock.Sleep(time.Second * 5)
  106. s.endWait()
  107. return -1
  108. }
  109. switch idx % 3 {
  110. case 0:
  111. // pre-neg returns true only if connection is possible
  112. if canConnect {
  113. return 1
  114. }
  115. return 0
  116. case 1:
  117. // pre-neg returns true but connection might still fail
  118. return 1
  119. case 2:
  120. // pre-neg returns true if connection is possible, otherwise timeout (node unresponsive)
  121. if canConnect {
  122. return 1
  123. }
  124. s.beginWait()
  125. s.clock.Sleep(time.Second * 5)
  126. s.endWait()
  127. return -1
  128. }
  129. return -1
  130. }
  131. }
  132. s.vt = vfc.NewValueTracker(s.db, s.clock, requestList, time.Minute, 1/float64(time.Hour), 1/float64(time.Hour*100), 1/float64(time.Hour*1000))
  133. s.sp = newServerPool(s.db, []byte("serverpool:"), s.vt, 0, testQuery, s.clock, s.trusted)
  134. s.sp.addSource(s.input)
  135. s.sp.validSchemes = enode.ValidSchemesForTesting
  136. s.sp.unixTime = func() int64 { return int64(s.clock.Now()) / int64(time.Second) }
  137. s.disconnect = make(map[int][]int)
  138. s.sp.start()
  139. s.quit = make(chan struct{})
  140. go func() {
  141. last := int32(-1)
  142. for {
  143. select {
  144. case <-time.After(time.Millisecond * 100):
  145. c := atomic.LoadInt32(&s.waitEnded)
  146. if c == last {
  147. // advance clock if test is stuck (might happen in rare cases)
  148. s.clock.Run(time.Second)
  149. }
  150. last = c
  151. case <-s.quit:
  152. return
  153. }
  154. }
  155. }()
  156. }
  157. func (s *serverPoolTest) stop() {
  158. close(s.quit)
  159. s.sp.stop()
  160. s.vt.Stop()
  161. for i := range s.testNodes {
  162. n := &s.testNodes[i]
  163. if n.connected {
  164. n.totalConn += s.cycle
  165. }
  166. n.connected = false
  167. n.peer = nil
  168. n.nextConnCycle = 0
  169. }
  170. s.conn, s.servedConn = 0, 0
  171. }
  172. func (s *serverPoolTest) run() {
  173. for count := spTestLength; count > 0; count-- {
  174. if dcList := s.disconnect[s.cycle]; dcList != nil {
  175. for _, idx := range dcList {
  176. n := &s.testNodes[idx]
  177. s.sp.unregisterPeer(n.peer)
  178. n.totalConn += s.cycle
  179. n.connected = false
  180. n.peer = nil
  181. s.conn--
  182. if n.service {
  183. s.servedConn--
  184. }
  185. n.nextConnCycle = s.cycle + n.waitCycles
  186. }
  187. delete(s.disconnect, s.cycle)
  188. }
  189. if s.conn < spTestTarget {
  190. s.dialCount++
  191. s.beginWait()
  192. s.sp.dialIterator.Next()
  193. s.endWait()
  194. dial := s.sp.dialIterator.Node()
  195. id := dial.ID()
  196. idx := testNodeIndex(id)
  197. n := &s.testNodes[idx]
  198. if !n.connected && n.connectCycles != 0 && s.cycle >= n.nextConnCycle {
  199. s.conn++
  200. if n.service {
  201. s.servedConn++
  202. }
  203. n.totalConn -= s.cycle
  204. n.connected = true
  205. dc := s.cycle + n.connectCycles
  206. s.disconnect[dc] = append(s.disconnect[dc], idx)
  207. n.peer = &serverPeer{peerCommons: peerCommons{Peer: p2p.NewPeer(id, "", nil)}}
  208. s.sp.registerPeer(n.peer)
  209. if n.service {
  210. s.vt.Served(s.vt.GetNode(id), []vfc.ServedRequest{{ReqType: 0, Amount: 100}}, 0)
  211. }
  212. }
  213. }
  214. s.serviceCycles += s.servedConn
  215. s.clock.Run(time.Second)
  216. s.cycle++
  217. }
  218. }
  219. func (s *serverPoolTest) setNodes(count, conn, wait int, service, trusted bool) (res []int) {
  220. for ; count > 0; count-- {
  221. idx := rand.Intn(spTestNodes)
  222. for s.testNodes[idx].connectCycles != 0 || s.testNodes[idx].connected {
  223. idx = rand.Intn(spTestNodes)
  224. }
  225. res = append(res, idx)
  226. s.testNodes[idx] = spTestNode{
  227. connectCycles: conn,
  228. waitCycles: wait,
  229. service: service,
  230. }
  231. if trusted {
  232. s.addTrusted(idx)
  233. }
  234. }
  235. return
  236. }
  237. func (s *serverPoolTest) resetNodes() {
  238. for i, n := range s.testNodes {
  239. if n.connected {
  240. n.totalConn += s.cycle
  241. s.sp.unregisterPeer(n.peer)
  242. }
  243. s.testNodes[i] = spTestNode{totalConn: n.totalConn}
  244. }
  245. s.conn, s.servedConn = 0, 0
  246. s.disconnect = make(map[int][]int)
  247. s.trusted = nil
  248. }
  249. func (s *serverPoolTest) checkNodes(t *testing.T, nodes []int) {
  250. var sum int
  251. for _, idx := range nodes {
  252. n := &s.testNodes[idx]
  253. if n.connected {
  254. n.totalConn += s.cycle
  255. }
  256. sum += n.totalConn
  257. n.totalConn = 0
  258. if n.connected {
  259. n.totalConn -= s.cycle
  260. }
  261. }
  262. if sum < spMinTotal || sum > spMaxTotal {
  263. t.Errorf("Total connection amount %d outside expected range %d to %d", sum, spMinTotal, spMaxTotal)
  264. }
  265. }
  266. func TestServerPool(t *testing.T) { testServerPool(t, false, false) }
  267. func TestServerPoolWithPreNeg(t *testing.T) { testServerPool(t, true, false) }
  268. func TestServerPoolWithPreNegFail(t *testing.T) { testServerPool(t, true, true) }
  269. func testServerPool(t *testing.T, preNeg, fail bool) {
  270. s := newServerPoolTest(preNeg, fail)
  271. nodes := s.setNodes(100, 200, 200, true, false)
  272. s.setNodes(100, 20, 20, false, false)
  273. s.start()
  274. s.run()
  275. s.stop()
  276. s.checkNodes(t, nodes)
  277. }
  278. func TestServerPoolChangedNodes(t *testing.T) { testServerPoolChangedNodes(t, false) }
  279. func TestServerPoolChangedNodesWithPreNeg(t *testing.T) { testServerPoolChangedNodes(t, true) }
  280. func testServerPoolChangedNodes(t *testing.T, preNeg bool) {
  281. s := newServerPoolTest(preNeg, false)
  282. nodes := s.setNodes(100, 200, 200, true, false)
  283. s.setNodes(100, 20, 20, false, false)
  284. s.start()
  285. s.run()
  286. s.checkNodes(t, nodes)
  287. for i := 0; i < 3; i++ {
  288. s.resetNodes()
  289. nodes := s.setNodes(100, 200, 200, true, false)
  290. s.setNodes(100, 20, 20, false, false)
  291. s.run()
  292. s.checkNodes(t, nodes)
  293. }
  294. s.stop()
  295. }
  296. func TestServerPoolRestartNoDiscovery(t *testing.T) { testServerPoolRestartNoDiscovery(t, false) }
  297. func TestServerPoolRestartNoDiscoveryWithPreNeg(t *testing.T) {
  298. testServerPoolRestartNoDiscovery(t, true)
  299. }
  300. func testServerPoolRestartNoDiscovery(t *testing.T, preNeg bool) {
  301. s := newServerPoolTest(preNeg, false)
  302. nodes := s.setNodes(100, 200, 200, true, false)
  303. s.setNodes(100, 20, 20, false, false)
  304. s.start()
  305. s.run()
  306. s.stop()
  307. s.checkNodes(t, nodes)
  308. s.input = nil
  309. s.start()
  310. s.run()
  311. s.stop()
  312. s.checkNodes(t, nodes)
  313. }
  314. func TestServerPoolTrustedNoDiscovery(t *testing.T) { testServerPoolTrustedNoDiscovery(t, false) }
  315. func TestServerPoolTrustedNoDiscoveryWithPreNeg(t *testing.T) {
  316. testServerPoolTrustedNoDiscovery(t, true)
  317. }
  318. func testServerPoolTrustedNoDiscovery(t *testing.T, preNeg bool) {
  319. s := newServerPoolTest(preNeg, false)
  320. trusted := s.setNodes(200, 200, 200, true, true)
  321. s.input = nil
  322. s.start()
  323. s.run()
  324. s.stop()
  325. s.checkNodes(t, trusted)
  326. }