ソースを参照

les: fix and slim the unit tests of les (#20247)

* les: loose restriction of unit tests

* les: update unit tests

* les, light: slim the unit tests
gary rong 6 年 前
コミット
b9bac1f384

+ 3 - 0
les/clientpool.go

@@ -459,6 +459,9 @@ func (f *clientPool) addBalance(id enode.ID, amount uint64, setTotal bool) {
 		defer func() {
 			c.balanceTracker.setBalance(pb.value, negBalance)
 			if !c.priority && pb.value > 0 {
+				// The capacity should be adjusted based on the requirement,
+				// but we have no idea about the new capacity, need a second
+				// call to udpate it.
 				c.priority = true
 				c.balanceTracker.addCallback(balanceCallbackZero, 0, func() { f.balanceExhausted(id) })
 			}

+ 24 - 7
les/clientpool_test.go

@@ -68,6 +68,14 @@ func (i poolTestPeer) freeClientId() string {
 
 func (i poolTestPeer) updateCapacity(uint64) {}
 
+type poolTestPeerWithCap struct {
+	poolTestPeer
+
+	cap uint64
+}
+
+func (i *poolTestPeerWithCap) updateCapacity(cap uint64) { i.cap = cap }
+
 func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomDisconnect bool) {
 	rand.Seed(time.Now().UnixNano())
 	var (
@@ -308,9 +316,9 @@ func TestFreeClientKickedOut(t *testing.T) {
 
 	for i := 0; i < 10; i++ {
 		pool.connect(poolTestPeer(i), 1)
-		clock.Run(100 * time.Millisecond)
+		clock.Run(time.Millisecond)
 	}
-	if pool.connect(poolTestPeer(11), 1) {
+	if pool.connect(poolTestPeer(10), 1) {
 		t.Fatalf("New free client should be rejected")
 	}
 	clock.Run(5 * time.Minute)
@@ -320,8 +328,8 @@ func TestFreeClientKickedOut(t *testing.T) {
 	for i := 0; i < 10; i++ {
 		select {
 		case id := <-kicked:
-			if id != i {
-				t.Fatalf("Kicked client mismatch, want %v, got %v", i, id)
+			if id >= 10 {
+				t.Fatalf("Old client should be kicked, now got: %d", id)
 			}
 		case <-time.NewTimer(time.Second).C:
 			t.Fatalf("timeout")
@@ -364,11 +372,20 @@ func TestDowngradePriorityClient(t *testing.T) {
 	pool.setLimits(10, uint64(10)) // Total capacity limit is 10
 	pool.setPriceFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
 
-	pool.addBalance(poolTestPeer(0).ID(), uint64(time.Minute), false)
-	pool.connect(poolTestPeer(0), 10)
+	p := &poolTestPeerWithCap{
+		poolTestPeer: poolTestPeer(0),
+	}
+	pool.addBalance(p.ID(), uint64(time.Minute), false)
+	pool.connect(p, 10)
+	if p.cap != 10 {
+		t.Fatalf("The capcacity of priority peer hasn't been updated, got: %d", p.cap)
+	}
+
 	clock.Run(time.Minute)             // All positive balance should be used up.
 	time.Sleep(300 * time.Millisecond) // Ensure the callback is called
-
+	if p.cap != 1 {
+		t.Fatalf("The capcacity of peer should be downgraded, got: %d", p.cap)
+	}
 	pb := pool.ndb.getOrNewPB(poolTestPeer(0).ID())
 	if pb.value != 0 {
 		t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb.value)

+ 8 - 6
les/distributor.go

@@ -110,13 +110,15 @@ func (d *requestDistributor) registerTestPeer(p distPeer) {
 	d.peerLock.Unlock()
 }
 
-// distMaxWait is the maximum waiting time after which further necessary waiting
-// times are recalculated based on new feedback from the servers
-const distMaxWait = time.Millisecond * 50
+var (
+	// distMaxWait is the maximum waiting time after which further necessary waiting
+	// times are recalculated based on new feedback from the servers
+	distMaxWait = time.Millisecond * 50
 
-// waitForPeers is the time window in which a request does not fail even if it
-// has no suitable peers to send to at the moment
-const waitForPeers = time.Second * 3
+	// waitForPeers is the time window in which a request does not fail even if it
+	// has no suitable peers to send to at the moment
+	waitForPeers = time.Second * 3
+)
 
 // main event loop
 func (d *requestDistributor) loop() {

+ 5 - 2
les/distributor_test.go

@@ -86,8 +86,8 @@ func (p *testDistPeer) worker(t *testing.T, checkOrder bool, stop chan struct{})
 const (
 	testDistBufLimit       = 10000000
 	testDistMaxCost        = 1000000
-	testDistPeerCount      = 5
-	testDistReqCount       = 5000
+	testDistPeerCount      = 2
+	testDistReqCount       = 10
 	testDistMaxResendCount = 3
 )
 
@@ -128,6 +128,9 @@ func testRequestDistributor(t *testing.T, resend bool) {
 		go peers[i].worker(t, !resend, stop)
 		dist.registerTestPeer(peers[i])
 	}
+	// Disable the mechanism that we will wait a few time for request
+	// even there is no suitable peer to send right now.
+	waitForPeers = 0
 
 	var wg sync.WaitGroup
 

+ 6 - 1
les/odr_test.go

@@ -193,6 +193,9 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od
 	if clientHead.Number.Uint64() != 4 {
 		t.Fatalf("Failed to sync the chain with server, head: %v", clientHead.Number.Uint64())
 	}
+	// Disable the mechanism that we will wait a few time for request
+	// even there is no suitable peer to send right now.
+	waitForPeers = 0
 
 	test := func(expFail uint64) {
 		// Mark this as a helper to put the failures at the correct lines
@@ -202,7 +205,9 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od
 			bhash := rawdb.ReadCanonicalHash(server.db, i)
 			b1 := fn(light.NoOdr, server.db, server.handler.server.chainConfig, server.handler.blockchain, nil, bhash)
 
-			ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
+			// Set the timeout as 1 second here, ensure there is enough time
+			// for travis to make the action.
+			ctx, cancel := context.WithTimeout(context.Background(), time.Second)
 			b2 := fn(ctx, client.db, client.handler.backend.chainConfig, nil, client.handler.backend.blockchain, bhash)
 			cancel()
 

+ 1 - 1
les/sync_test.go

@@ -89,7 +89,7 @@ func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) {
 			for {
 				_, hash, _, err := server.handler.server.oracle.contract.Contract().GetLatestCheckpoint(nil)
 				if err != nil || hash == [32]byte{} {
-					time.Sleep(100 * time.Millisecond)
+					time.Sleep(10 * time.Millisecond)
 					continue
 				}
 				break

+ 2 - 2
les/test_helper.go

@@ -71,10 +71,10 @@ var (
 
 var (
 	// The block frequency for creating checkpoint(only used in test)
-	sectionSize = big.NewInt(512)
+	sectionSize = big.NewInt(128)
 
 	// The number of confirmations needed to generate a checkpoint(only used in test).
-	processConfirms = big.NewInt(4)
+	processConfirms = big.NewInt(1)
 
 	// The token bucket buffer limit for testing purpose.
 	testBufLimit = uint64(1000000)

+ 12 - 12
light/postprocess.go

@@ -79,21 +79,21 @@ var (
 	}
 	// TestServerIndexerConfig wraps a set of configs as a test indexer config for server side.
 	TestServerIndexerConfig = &IndexerConfig{
-		ChtSize:           512,
-		ChtConfirms:       4,
-		BloomSize:         64,
-		BloomConfirms:     4,
-		BloomTrieSize:     512,
-		BloomTrieConfirms: 4,
+		ChtSize:           128,
+		ChtConfirms:       1,
+		BloomSize:         16,
+		BloomConfirms:     1,
+		BloomTrieSize:     128,
+		BloomTrieConfirms: 1,
 	}
 	// TestClientIndexerConfig wraps a set of configs as a test indexer config for client side.
 	TestClientIndexerConfig = &IndexerConfig{
-		ChtSize:           512,
-		ChtConfirms:       32,
-		BloomSize:         512,
-		BloomConfirms:     32,
-		BloomTrieSize:     512,
-		BloomTrieConfirms: 32,
+		ChtSize:           128,
+		ChtConfirms:       8,
+		BloomSize:         128,
+		BloomConfirms:     8,
+		BloomTrieSize:     128,
+		BloomTrieConfirms: 8,
 	}
 )