diff --git a/.golangci.yml b/.golangci.yml index ddca6d63d2a..e13feb04ba5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -18,6 +18,16 @@ linters: - copyloopvar - goimports - depguard + - asasalint + - asciicheck + - bidichk + - durationcheck + - gocheckcompilerdirectives + - gochecksumtype + - makezero + - protogetter + - reassign + - intrange linters-settings: gocritic: # Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty @@ -36,16 +46,21 @@ linters-settings: - G115 testifylint: enable: + - blank-import - bool-compare - compares - empty - error-is-as - error-nil - expected-actual + - formatter - len + - negative-positive - require-error - suite-dont-use-pkg - suite-extra-assert-call + - suite-subtest-run + - useless-assert disable: - float-compare - go-require diff --git a/client/client.go b/client/client.go index 9ced7284153..d9451ef3ffc 100644 --- a/client/client.go +++ b/client/client.go @@ -739,7 +739,7 @@ func (c *client) dispatchTSORequestWithRetry(ctx context.Context, dcLocation str err error req *tsoRequest ) - for i := 0; i < dispatchRetryCount; i++ { + for i := range dispatchRetryCount { // Do not delay for the first time. if i > 0 { time.Sleep(dispatchRetryDelay) diff --git a/client/pd_service_discovery.go b/client/pd_service_discovery.go index 83bc8e612a3..872b8e0ad0b 100644 --- a/client/pd_service_discovery.go +++ b/client/pd_service_discovery.go @@ -354,7 +354,7 @@ func (c *pdServiceBalancer) set(clients []ServiceClient) { func (c *pdServiceBalancer) check() { c.mu.Lock() defer c.mu.Unlock() - for i := 0; i < c.totalNode; i++ { + for range c.totalNode { c.now.markAsAvailable() c.next() } @@ -523,7 +523,7 @@ func (c *pdServiceDiscovery) initRetry(f func() error) error { var err error ticker := time.NewTicker(time.Second) defer ticker.Stop() - for i := 0; i < c.option.maxRetryTimes; i++ { + for range c.option.maxRetryTimes { if err = f(); err == nil { return nil } @@ -1093,7 +1093,7 @@ func (c *pdServiceDiscovery) updateServiceClient(members []*pdpb.Member, leader }) c.all.Store(clients) // create candidate services for all kinds of request. - for i := 0; i < int(apiKindCount); i++ { + for i := range apiKindCount { c.apiCandidateNodes[i].set(clients) } return err diff --git a/client/pd_service_discovery_test.go b/client/pd_service_discovery_test.go index 44171873b1a..794b03cc4aa 100644 --- a/client/pd_service_discovery_test.go +++ b/client/pd_service_discovery_test.go @@ -137,7 +137,7 @@ func (suite *serviceClientTestSuite) SetupSuite() { suite.followerServer = newTestServer(false) go suite.leaderServer.run() go suite.followerServer.run() - for i := 0; i < 10; i++ { + for range 10 { leaderConn, err1 := grpc.Dial(suite.leaderServer.addr, grpc.WithTransportCredentials(insecure.NewCredentials())) followerConn, err2 := grpc.Dial(suite.followerServer.addr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err1 == nil && err2 == nil { @@ -278,7 +278,7 @@ func (suite *serviceClientTestSuite) TestServiceClientBalancer() { b.set([]ServiceClient{leader, follower}) re.Equal(2, b.totalNode) - for i := 0; i < 10; i++ { + for range 10 { client := b.get() ctx := client.BuildGRPCTargetContext(suite.ctx, false) conn := client.GetClientConn() @@ -292,7 +292,7 @@ func (suite *serviceClientTestSuite) TestServiceClientBalancer() { suite.followerServer.server.resetCount() suite.leaderServer.server.resetCount() - for i := 0; i < 10; i++ { + for range 10 { client := b.get() ctx := client.BuildGRPCTargetContext(suite.ctx, true) conn := client.GetClientConn() diff --git a/client/resource_group/controller/controller.go b/client/resource_group/controller/controller.go index 6cb7995a42e..49372927442 100644 --- a/client/resource_group/controller/controller.go +++ b/client/resource_group/controller/controller.go @@ -1347,7 +1347,7 @@ func (gc *groupCostController) acquireTokens(ctx context.Context, delta *rmpb.Co d time.Duration ) retryLoop: - for i := 0; i < gc.mainCfg.WaitRetryTimes; i++ { + for range gc.mainCfg.WaitRetryTimes { now := time.Now() switch gc.mode { case rmpb.GroupMode_RawMode: diff --git a/client/resource_group/controller/limiter_test.go b/client/resource_group/controller/limiter_test.go index 4ce46703de8..9afebcb3d53 100644 --- a/client/resource_group/controller/limiter_test.go +++ b/client/resource_group/controller/limiter_test.go @@ -246,7 +246,7 @@ func testQPSCase(concurrency int, reserveN int64, limit int64) (qps float64, ru var totalRequests int64 start := time.Now() - for i := 0; i < concurrency; i++ { + for range concurrency { wg.Add(1) go func() { defer wg.Done() diff --git a/client/resource_manager_client.go b/client/resource_manager_client.go index 11e798a89fb..cc1739e4097 100644 --- a/client/resource_manager_client.go +++ b/client/resource_manager_client.go @@ -382,7 +382,7 @@ func (c *client) tryResourceManagerConnect(ctx context.Context, connection *reso ) ticker := time.NewTicker(retryInterval) defer ticker.Stop() - for i := 0; i < maxRetryTimes; i++ { + for range maxRetryTimes { cc, err := c.resourceManagerClient() if err != nil { continue @@ -406,7 +406,7 @@ func (c *client) tryResourceManagerConnect(ctx context.Context, connection *reso } func (tbc *tokenBatchController) revokePendingTokenRequest(err error) { - for i := 0; i < len(tbc.tokenRequestCh); i++ { + for range len(tbc.tokenRequestCh) { req := <-tbc.tokenRequestCh req.done <- err } diff --git a/client/retry/backoff_test.go b/client/retry/backoff_test.go index 22d487b1885..12891d822aa 100644 --- a/client/retry/backoff_test.go +++ b/client/retry/backoff_test.go @@ -70,7 +70,7 @@ func TestBackoffer(t *testing.T) { bo = InitialBackoffer(base, max, total) re.Equal(bo.nextInterval(), base) re.Equal(bo.nextInterval(), 2*base) - for i := 0; i < 10; i++ { + for range 10 { re.LessOrEqual(bo.nextInterval(), max) } re.Equal(bo.nextInterval(), max) diff --git a/client/testutil/tempurl.go b/client/testutil/tempurl.go index ac8f29fa345..71d51176106 100644 --- a/client/testutil/tempurl.go +++ b/client/testutil/tempurl.go @@ -31,7 +31,7 @@ var ( // Alloc allocates a local URL for testing. func Alloc() string { - for i := 0; i < 10; i++ { + for range 10 { if u := tryAllocTestURL(); u != "" { return u } diff --git a/client/timerpool/pool_test.go b/client/timerpool/pool_test.go index d6dffc723a9..e10291f1027 100644 --- a/client/timerpool/pool_test.go +++ b/client/timerpool/pool_test.go @@ -14,7 +14,7 @@ import ( func TestTimerPool(t *testing.T) { var tp TimerPool - for i := 0; i < 100; i++ { + for range 100 { timer := tp.Get(20 * time.Millisecond) select { diff --git a/client/tso_batch_controller.go b/client/tso_batch_controller.go index b810e108667..0b5c1f9616d 100644 --- a/client/tso_batch_controller.go +++ b/client/tso_batch_controller.go @@ -211,7 +211,7 @@ func (tbc *tsoBatchController) adjustBestBatchSize() { } func (tbc *tsoBatchController) finishCollectedRequests(physical, firstLogical int64, suffixBits uint32, streamID string, err error) { - for i := 0; i < tbc.collectedRequestCount; i++ { + for i := range tbc.collectedRequestCount { tsoReq := tbc.collectedRequests[i] // Retrieve the request context before the request is done to trace without race. requestCtx := tsoReq.requestCtx diff --git a/client/tso_client.go b/client/tso_client.go index 6801aee3a11..28f6482c627 100644 --- a/client/tso_client.go +++ b/client/tso_client.go @@ -329,7 +329,7 @@ func (c *tsoClient) backupClientConn() (*grpc.ClientConn, string) { cc *grpc.ClientConn err error ) - for i := 0; i < len(urls); i++ { + for range urls { url := urls[rand.Intn(len(urls))] if cc, err = c.svcDiscovery.GetOrCreateGRPCConn(url); err != nil { continue @@ -403,7 +403,7 @@ func (c *tsoClient) tryConnectToTSO( ticker := time.NewTicker(retryInterval) defer ticker.Stop() // Retry several times before falling back to the follower when the network problem happens - for i := 0; i < maxRetryTimes; i++ { + for range maxRetryTimes { c.svcDiscovery.ScheduleCheckMemberChanged() cc, url = c.GetTSOAllocatorClientConnByDCLocation(dc) if _, ok := connectionCtxs.Load(url); ok { diff --git a/client/tso_dispatcher.go b/client/tso_dispatcher.go index 7febf194f3c..b30dab7242b 100644 --- a/client/tso_dispatcher.go +++ b/client/tso_dispatcher.go @@ -168,7 +168,7 @@ func (td *tsoDispatcher) scheduleUpdateConnectionCtxs() { } func (td *tsoDispatcher) revokePendingRequests(err error) { - for i := 0; i < len(td.tsoRequestCh); i++ { + for range len(td.tsoRequestCh) { req := <-td.tsoRequestCh req.tryDone(err) } diff --git a/client/tso_dispatcher_test.go b/client/tso_dispatcher_test.go index bf038e7b7f3..194c9bde455 100644 --- a/client/tso_dispatcher_test.go +++ b/client/tso_dispatcher_test.go @@ -231,7 +231,7 @@ func (s *testTSODispatcherSuite) testStaticConcurrencyImpl(concurrency int) { // and will be batched together once there is a free token. reqs := make([]*tsoRequest, 0, tokenCount+3) - for i := 0; i < tokenCount+3; i++ { + for range tokenCount + 3 { req := s.sendReq(ctx) s.reqMustNotReady(req) reqs = append(reqs, req) @@ -242,7 +242,7 @@ func (s *testTSODispatcherSuite) testStaticConcurrencyImpl(concurrency int) { // second batch but not finished yet. // Also note that in current implementation, the tsoStream tries to receive the next result before checking // the `tsoStream.pendingRequests` queue. Changing this behavior may need to update this test. - for i := 0; i < tokenCount+3; i++ { + for i := range tokenCount + 3 { expectedPending := tokenCount + 1 - i if expectedPending > tokenCount { expectedPending = tokenCount diff --git a/client/tso_service_discovery.go b/client/tso_service_discovery.go index 0380ddb4c28..fe690ed5f70 100644 --- a/client/tso_service_discovery.go +++ b/client/tso_service_discovery.go @@ -211,7 +211,7 @@ func (c *tsoServiceDiscovery) retry( var err error ticker := time.NewTicker(retryInterval) defer ticker.Stop() - for i := 0; i < maxRetryTimes; i++ { + for range maxRetryTimes { if err = f(); err == nil { return nil } diff --git a/client/tso_stream_test.go b/client/tso_stream_test.go index ab6f2786ff3..6595ed2c13a 100644 --- a/client/tso_stream_test.go +++ b/client/tso_stream_test.go @@ -366,7 +366,7 @@ func (s *testTSOStreamSuite) TestTSOStreamBasic() { func (s *testTSOStreamSuite) testTSOStreamBrokenImpl(err error, pendingRequests int) { var resultCh []<-chan callbackInvocation - for i := 0; i < pendingRequests; i++ { + for range pendingRequests { ch := s.mustProcessRequestWithResultCh(1) resultCh = append(resultCh, ch) s.noResult(ch) @@ -414,7 +414,7 @@ func (s *testTSOStreamSuite) TestTSOStreamCanceledWithPendingReq() { func (s *testTSOStreamSuite) TestTSOStreamFIFO() { var resultChs []<-chan callbackInvocation const count = 5 - for i := 0; i < count; i++ { + for i := range count { ch := s.mustProcessRequestWithResultCh(int64(i + 1)) resultChs = append(resultChs, ch) } @@ -423,7 +423,7 @@ func (s *testTSOStreamSuite) TestTSOStreamFIFO() { s.noResult(ch) } - for i := 0; i < count; i++ { + for i := range count { s.inner.returnResult(int64((i+1)*10), int64(i), uint32(i+1)) } @@ -505,7 +505,7 @@ func (s *testTSOStreamSuite) TestEstimatedLatency() { reqStartTimeCh := make(chan time.Time, maxPendingRequestsInTSOStream) // Limit concurrent requests to be less than the capacity of tsoStream.pendingRequests. tokenCh := make(chan struct{}, maxPendingRequestsInTSOStream-1) - for i := 0; i < 40; i++ { + for range 40 { tokenCh <- struct{}{} } // Return a result after 50ms delay for each requests @@ -594,7 +594,7 @@ func TestRCFilter(t *testing.T) { re.Equal(0.0, f.update(now, 0)) lastOutput := 0.0 // 10000 even samples in 1 second. - for i := 0; i < 10000; i++ { + for range 10000 { now = now.Add(time.Microsecond * 100) output := f.update(now, 1.0) re.Greater(output, lastOutput) diff --git a/pkg/autoscaling/prometheus_test.go b/pkg/autoscaling/prometheus_test.go index 9fe69e810d1..39e48928fa7 100644 --- a/pkg/autoscaling/prometheus_test.go +++ b/pkg/autoscaling/prometheus_test.go @@ -49,7 +49,7 @@ var podNameTemplate = map[ComponentType]string{ func generatePodNames(component ComponentType) []string { names := make([]string, 0, instanceCount) pattern := podNameTemplate[component] - for i := 0; i < instanceCount; i++ { + for i := range instanceCount { names = append(names, fmt.Sprintf(pattern, mockClusterName, i)) } return names @@ -119,7 +119,7 @@ func (c *normalClient) buildCPUMockData(component ComponentType) { cpuQuotaQuery := cpuQuotaPromQLTemplate[component] var results []result - for i := 0; i < instanceCount; i++ { + for i := range instanceCount { results = append(results, result{ Value: []any{time.Now().Unix(), fmt.Sprintf("%f", mockResultValue)}, Metric: metric{ @@ -192,7 +192,7 @@ func TestRetrieveCPUMetrics(t *testing.T) { options := NewQueryOptions(component, metric, addresses[:len(addresses)-1], time.Now(), mockDuration) result, err := querier.Query(options) re.NoError(err) - for i := 0; i < len(addresses)-1; i++ { + for i := range len(addresses) - 1 { value, ok := result[addresses[i]] re.True(ok) re.Less(math.Abs(value-mockResultValue), 1e-6) diff --git a/pkg/balancer/balancer_test.go b/pkg/balancer/balancer_test.go index 2c760c6220c..1fdaf629219 100644 --- a/pkg/balancer/balancer_test.go +++ b/pkg/balancer/balancer_test.go @@ -30,7 +30,7 @@ func TestBalancerPutAndDelete(t *testing.T) { re.Equal(uint32(0), balancer.Next()) // test put exists := make(map[uint32]struct{}) - for i := 0; i < 100; i++ { + for range 100 { num := rand.Uint32() balancer.Put(num) exists[num] = struct{}{} @@ -77,12 +77,12 @@ func TestBalancerDuplicate(t *testing.T) { func TestRoundRobin(t *testing.T) { re := require.New(t) balancer := NewRoundRobin[uint32]() - for i := 0; i < 100; i++ { + for range 100 { num := rand.Uint32() balancer.Put(num) } statistics := make(map[uint32]int) - for i := 0; i < 1000; i++ { + for range 1000 { statistics[balancer.Next()]++ } min := 1000 diff --git a/pkg/btree/btree_generic_test.go b/pkg/btree/btree_generic_test.go index fd0df3e5aaf..8b432229470 100644 --- a/pkg/btree/btree_generic_test.go +++ b/pkg/btree/btree_generic_test.go @@ -50,7 +50,7 @@ func perm(n int) (out []Int) { // rang returns an ordered list of Int items in the range [0, n). func rang(n int) (out []Int) { - for i := 0; i < n; i++ { + for i := range n { out = append(out, Int(i)) } return @@ -101,10 +101,10 @@ func TestBTreeSizeInfo(t *testing.T) { max, _ := tr.Max() assertEq(t, "check max", tr.GetAt(tr.Len()-1), max) } - for k := 0; k < treeSize; k++ { + for k := range treeSize { assertEq(t, "get k-th", tr.GetAt(k), Int(k)) } - for x := Int(0); x < treeSize; x++ { + for x := range Int(treeSize) { y, rk := tr.GetWithIndex(x) assertEq(t, "get", y, x) assertEq(t, "get rank", rk, int(x)) @@ -128,10 +128,10 @@ func TestBTreeSizeInfo(t *testing.T) { max, _ := tr.Max() assertEq(t, "after delete check max", tr.GetAt(tr.Len()-1), max) } - for k := 0; k < treeSize/3; k++ { + for k := range treeSize / 3 { assertEq(t, "after delete get k-th", tr.GetAt(k), Int(3*k)) } - for x := Int(0); x < treeSize; x++ { + for x := range Int(treeSize) { y, rk := tr.GetWithIndex(x) if x%3 == 0 { assertEq(t, "after delete get", y, x) @@ -169,7 +169,7 @@ func TestBTreeSizeInfo(t *testing.T) { func TestBTreeG(t *testing.T) { tr := NewG[Int](*btreeDegree) const treeSize = 10000 - for i := 0; i < 10; i++ { + for range 10 { if min, found := tr.Min(); found { t.Fatalf("empty min, got %+v", min) } @@ -281,7 +281,7 @@ func TestDeleteMaxG(t *testing.T) { got = append(got, v) } // Reverse our list. - for i := 0; i < len(got)/2; i++ { + for i := range len(got) / 2 { got[i], got[len(got)-i-1] = got[len(got)-i-1], got[i] } if want := rang(100); !reflect.DeepEqual(got, want) { @@ -786,7 +786,7 @@ func TestCloneConcurrentOperationsG(t *testing.T) { } t.Log("Removing half from first half") toRemove := rang(cloneTestSize)[cloneTestSize/2:] - for i := 0; i < len(trees)/2; i++ { + for i := range len(trees) / 2 { tree := trees[i] wg.Add(1) go func() { diff --git a/pkg/cgroup/cgroup_cpu_test.go b/pkg/cgroup/cgroup_cpu_test.go index c373f803210..265291163c3 100644 --- a/pkg/cgroup/cgroup_cpu_test.go +++ b/pkg/cgroup/cgroup_cpu_test.go @@ -67,7 +67,7 @@ func TestGetCgroupCPU(t *testing.T) { re := require.New(t) exit := make(chan struct{}) var wg sync.WaitGroup - for i := 0; i < 10; i++ { + for range 10 { wg.Add(1) go func() { defer wg.Done() diff --git a/pkg/cgroup/cgroup_mock_test.go b/pkg/cgroup/cgroup_mock_test.go index 5a7ca9a73dc..bfd4f1d12c9 100644 --- a/pkg/cgroup/cgroup_mock_test.go +++ b/pkg/cgroup/cgroup_mock_test.go @@ -371,7 +371,7 @@ const ( ) func TestCgroupsGetCPU(t *testing.T) { - for i := 0; i < 2; i++ { + for i := range 2 { if i == 1 { // The field in /proc/self/cgroup and /proc/self/mountinfo may appear as "cpuacct,cpu" or "rw,cpuacct,cpu" // while the input controller is "cpu,cpuacct" diff --git a/pkg/codec/codec_test.go b/pkg/codec/codec_test.go index 50bf552a60d..0e74f54e279 100644 --- a/pkg/codec/codec_test.go +++ b/pkg/codec/codec_test.go @@ -23,7 +23,7 @@ import ( func TestDecodeBytes(t *testing.T) { re := require.New(t) key := "abcdefghijklmnopqrstuvwxyz" - for i := 0; i < len(key); i++ { + for i := range key { _, k, err := DecodeBytes(EncodeBytes([]byte(key[:i]))) re.NoError(err) re.Equal(key[:i], string(k)) diff --git a/pkg/core/region.go b/pkg/core/region.go index 53268589c8a..b5ead86b3f1 100644 --- a/pkg/core/region.go +++ b/pkg/core/region.go @@ -2108,7 +2108,7 @@ func DiffRegionKeyInfo(origin *RegionInfo, other *RegionInfo) string { // ToUpperASCIIInplace bytes.ToUpper but zero-cost func ToUpperASCIIInplace(s []byte) []byte { hasLower := false - for i := 0; i < len(s); i++ { + for i := range s { c := s[i] hasLower = hasLower || ('a' <= c && c <= 'z') } @@ -2117,7 +2117,7 @@ func ToUpperASCIIInplace(s []byte) []byte { return s } var c byte - for i := 0; i < len(s); i++ { + for i := range s { c = s[i] if 'a' <= c && c <= 'z' { c -= 'a' - 'A' diff --git a/pkg/core/region_test.go b/pkg/core/region_test.go index 845944780e4..8c30efb2769 100644 --- a/pkg/core/region_test.go +++ b/pkg/core/region_test.go @@ -470,7 +470,7 @@ func TestSetRegionConcurrence(t *testing.T) { func TestSetRegion(t *testing.T) { re := require.New(t) regions := NewRegionsInfo() - for i := 0; i < 100; i++ { + for i := range 100 { peer1 := &metapb.Peer{StoreId: uint64(i%5 + 1), Id: uint64(i*5 + 1)} peer2 := &metapb.Peer{StoreId: uint64((i+1)%5 + 1), Id: uint64(i*5 + 2)} peer3 := &metapb.Peer{StoreId: uint64((i+2)%5 + 1), Id: uint64(i*5 + 3)} @@ -646,7 +646,7 @@ func BenchmarkUpdateBuckets(b *testing.B) { func BenchmarkRandomRegion(b *testing.B) { for _, size := range []int{10, 100, 1000, 10000, 100000, 1000000, 10000000} { regions := NewRegionsInfo() - for i := 0; i < size; i++ { + for i := range size { peer := &metapb.Peer{StoreId: 1, Id: uint64(i + 1)} region := NewRegionInfo(&metapb.Region{ Id: uint64(i + 1), @@ -708,7 +708,7 @@ func BenchmarkRandomRegion(b *testing.B) { func BenchmarkRandomSetRegion(b *testing.B) { regions := NewRegionsInfo() var items []*RegionInfo - for i := 0; i < 1000000; i++ { + for i := range 1000000 { peer := &metapb.Peer{StoreId: 1, Id: uint64(i + 1)} region := NewRegionInfo(&metapb.Region{ Id: uint64(i + 1), @@ -733,7 +733,7 @@ func BenchmarkRandomSetRegion(b *testing.B) { func TestGetRegionSizeByRange(t *testing.T) { regions := NewRegionsInfo() nums := 100001 - for i := 0; i < nums; i++ { + for i := range nums { peer := &metapb.Peer{StoreId: 1, Id: uint64(i + 1)} endKey := []byte(fmt.Sprintf("%20d", i+1)) if i == nums-1 { @@ -761,7 +761,7 @@ func TestGetRegionSizeByRange(t *testing.T) { func BenchmarkRandomSetRegionWithGetRegionSizeByRange(b *testing.B) { regions := NewRegionsInfo() var items []*RegionInfo - for i := 0; i < 1000000; i++ { + for i := range 1000000 { peer := &metapb.Peer{StoreId: 1, Id: uint64(i + 1)} region := NewRegionInfo(&metapb.Region{ Id: uint64(i + 1), @@ -791,7 +791,7 @@ func BenchmarkRandomSetRegionWithGetRegionSizeByRange(b *testing.B) { func BenchmarkRandomSetRegionWithGetRegionSizeByRangeParallel(b *testing.B) { regions := NewRegionsInfo() var items []*RegionInfo - for i := 0; i < 1000000; i++ { + for i := range 1000000 { peer := &metapb.Peer{StoreId: 1, Id: uint64(i + 1)} region := NewRegionInfo(&metapb.Region{ Id: uint64(i + 1), @@ -836,7 +836,7 @@ func newRegionInfoIDRandom(idAllocator id.Allocator) *RegionInfo { ) // Randomly select a peer as the leader. leaderIdx := mrand.Intn(peerNum) - for i := 0; i < peerNum; i++ { + for i := range peerNum { id, _ := idAllocator.Alloc() // Randomly distribute the peers to different stores. p := &metapb.Peer{Id: id, StoreId: uint64(mrand.Intn(storeNum) + 1)} @@ -921,7 +921,7 @@ func BenchmarkUpdateSubTreeOrderInsensitive(b *testing.B) { func generateRegionItems(idAllocator *mockid.IDAllocator, size int) []*RegionInfo { items := make([]*RegionInfo, size) - for i := 0; i < size; i++ { + for i := range size { items[i] = newRegionInfoIDRandom(idAllocator) } return items @@ -1022,7 +1022,7 @@ func TestUpdateRegionEquivalence(t *testing.T) { func generateTestRegions(count int, storeNum int) []*RegionInfo { var items []*RegionInfo - for i := 0; i < count; i++ { + for i := range count { peer1 := &metapb.Peer{StoreId: uint64(i%storeNum + 1), Id: uint64(i*storeNum + 1)} peer2 := &metapb.Peer{StoreId: uint64((i+1)%storeNum + 1), Id: uint64(i*storeNum + 2)} peer3 := &metapb.Peer{StoreId: uint64((i+2)%storeNum + 1), Id: uint64(i*storeNum + 3)} diff --git a/pkg/core/region_tree_test.go b/pkg/core/region_tree_test.go index a2b1bfab7a7..0dedd91be9e 100644 --- a/pkg/core/region_tree_test.go +++ b/pkg/core/region_tree_test.go @@ -29,7 +29,7 @@ func TestRegionInfo(t *testing.T) { n := uint64(3) peers := make([]*metapb.Peer, 0, n) - for i := uint64(0); i < n; i++ { + for i := range n { p := &metapb.Peer{ Id: i, StoreId: i, @@ -50,7 +50,7 @@ func TestRegionInfo(t *testing.T) { r := info.Clone() re.Equal(info, r) - for i := uint64(0); i < n; i++ { + for i := range n { re.Equal(r.meta.Peers[i], r.GetPeer(i)) } re.Nil(r.GetPeer(n)) @@ -59,7 +59,7 @@ func TestRegionInfo(t *testing.T) { re.Nil(r.GetPendingPeer(n)) re.Equal(pendingPeer, r.GetPendingPeer(pendingPeer.GetId())) - for i := uint64(0); i < n; i++ { + for i := range n { re.Equal(i, r.GetStorePeer(i).GetStoreId()) } re.Nil(r.GetStorePeer(n)) @@ -82,7 +82,7 @@ func TestRegionInfo(t *testing.T) { stores := r.GetStoreIDs() re.Len(stores, int(n)) - for i := uint64(0); i < n; i++ { + for i := range n { _, ok := stores[i] re.True(ok) } @@ -249,19 +249,19 @@ func TestRegionTreeSplitAndMerge(t *testing.T) { n := 7 // Split. - for i := 0; i < n; i++ { + for range n { regions = SplitRegions(regions) updateRegions(re, tree, regions) } // Merge. - for i := 0; i < n; i++ { + for range n { regions = MergeRegions(regions) updateRegions(re, tree, regions) } // Split twice and merge once. - for i := 0; i < n*2; i++ { + for i := range n * 2 { if (i+1)%3 == 0 { regions = MergeRegions(regions) } else { @@ -413,7 +413,7 @@ func (m *mockRegionTreeData) shuffleItems() *mockRegionTreeData { func mock1MRegionTree() *mockRegionTreeData { data := &mockRegionTreeData{newRegionTree(), make([]*RegionInfo, 1000000)} - for i := 0; i < 1_000_000; i++ { + for i := range 1_000_000 { region := &RegionInfo{meta: &metapb.Region{Id: uint64(i), StartKey: []byte(fmt.Sprintf("%20d", i)), EndKey: []byte(fmt.Sprintf("%20d", i+1))}} updateNewItem(data.tree, region) data.items[i] = region @@ -443,7 +443,7 @@ func BenchmarkRegionTreeRandomInsert(b *testing.B) { func BenchmarkRegionTreeRandomOverlapsInsert(b *testing.B) { tree := newRegionTree() var items []*RegionInfo - for i := 0; i < MaxCount; i++ { + for range MaxCount { var startKey, endKey int key1 := rand.Intn(MaxCount) key2 := rand.Intn(MaxCount) @@ -493,7 +493,7 @@ func BenchmarkRegionTreeRandomLookUpRegion(b *testing.B) { func BenchmarkRegionTreeScan(b *testing.B) { data := mock1MRegionTree().shuffleItems() b.ResetTimer() - for i := 0; i < 1; i++ { + for i := 0; i < b.N; i++ { data.tree.scanRanges() } } diff --git a/pkg/core/storelimit/limit_test.go b/pkg/core/storelimit/limit_test.go index e11618767a1..cfe805935a5 100644 --- a/pkg/core/storelimit/limit_test.go +++ b/pkg/core/storelimit/limit_test.go @@ -85,7 +85,7 @@ func TestSlidingWindow(t *testing.T) { re.Equal([]int64{capacity - minSnapSize, 0, 0, 0}, s.GetUsed()) // case 3: skip the type is not the SendSnapshot - for i := 0; i < 10; i++ { + for range 10 { re.True(s.Take(capacity, AddPeer, constant.Low)) } } diff --git a/pkg/core/storelimit/sliding_window.go b/pkg/core/storelimit/sliding_window.go index 8feb0a2094d..6f6e39167f6 100644 --- a/pkg/core/storelimit/sliding_window.go +++ b/pkg/core/storelimit/sliding_window.go @@ -41,7 +41,7 @@ type SlidingWindows struct { // NewSlidingWindows is the construct of SlidingWindows. func NewSlidingWindows() *SlidingWindows { windows := make([]*window, constant.PriorityLevelLen) - for i := 0; i < int(constant.PriorityLevelLen); i++ { + for i := range constant.PriorityLevelLen { windows[i] = newWindow(int64(defaultWindowSize) >> i) } return &SlidingWindows{ diff --git a/pkg/election/leadership_test.go b/pkg/election/leadership_test.go index 1846d671a61..eab842ca6e5 100644 --- a/pkg/election/leadership_test.go +++ b/pkg/election/leadership_test.go @@ -274,7 +274,7 @@ func TestCampaignTimes(t *testing.T) { defer func() { campaignTimesRecordTimeout = 5 * time.Minute }() - for i := 0; i < 3; i++ { + for range 3 { leadership.AddCampaignTimes() time.Sleep(100 * time.Millisecond) } @@ -282,7 +282,7 @@ func TestCampaignTimes(t *testing.T) { // only the last 2 records are valid. campaignTimesRecordTimeout = 200 * time.Millisecond - for i := 0; i < 3; i++ { + for range 3 { leadership.AddCampaignTimes() time.Sleep(100 * time.Millisecond) } diff --git a/pkg/encryption/key_manager.go b/pkg/encryption/key_manager.go index 5a029847a9d..621b1b9742f 100644 --- a/pkg/encryption/key_manager.go +++ b/pkg/encryption/key_manager.go @@ -402,7 +402,7 @@ func (m *Manager) rotateKeyIfNeeded(forceUpdate bool) error { } if needRotate { rotated := false - for attempt := 0; attempt < keyRotationRetryLimit; attempt += 1 { + for range keyRotationRetryLimit { keyID, key, err := NewDataKey(m.method, uint64(m.helper.now().Unix())) if err != nil { return nil diff --git a/pkg/gc/safepoint_test.go b/pkg/gc/safepoint_test.go index bc1e551594c..62ce9c086fc 100644 --- a/pkg/gc/safepoint_test.go +++ b/pkg/gc/safepoint_test.go @@ -66,7 +66,7 @@ func TestGCSafePointUpdateCurrently(t *testing.T) { re := require.New(t) // update gc safePoint concurrently - for id := 0; id < 20; id++ { + for id := range 20 { wg.Add(1) go func(step uint64) { for safePoint := step; safePoint <= maxSafePoint; safePoint += step { diff --git a/pkg/gctuner/tuner_test.go b/pkg/gctuner/tuner_test.go index 7018634c5d1..d46bb8edf94 100644 --- a/pkg/gctuner/tuner_test.go +++ b/pkg/gctuner/tuner_test.go @@ -37,7 +37,7 @@ func TestTuner(t *testing.T) { testHeap = make([]byte, 1) runtime.GC() runtime.GC() - for i := 0; i < 100; i++ { + for range 100 { runtime.GC() require.Eventually(t, func() bool { return maxGCPercent.Load() == tn.getGCPercent() }, 1*time.Second, 50*time.Microsecond) @@ -45,7 +45,7 @@ func TestTuner(t *testing.T) { // 1/4 threshold testHeap = make([]byte, threshold/4) - for i := 0; i < 100; i++ { + for range 100 { runtime.GC() require.GreaterOrEqual(t, tn.getGCPercent(), maxGCPercent.Load()/2) require.LessOrEqual(t, tn.getGCPercent(), maxGCPercent.Load()) @@ -54,7 +54,7 @@ func TestTuner(t *testing.T) { // 1/2 threshold testHeap = make([]byte, threshold/2) runtime.GC() - for i := 0; i < 100; i++ { + for range 100 { runtime.GC() require.Eventually(t, func() bool { return tn.getGCPercent() >= minGCPercent.Load() }, 1*time.Second, 50*time.Microsecond) @@ -65,7 +65,7 @@ func TestTuner(t *testing.T) { // 3/4 threshold testHeap = make([]byte, threshold/4*3) runtime.GC() - for i := 0; i < 100; i++ { + for range 100 { runtime.GC() require.Eventually(t, func() bool { return minGCPercent.Load() == tn.getGCPercent() }, 1*time.Second, 50*time.Microsecond) @@ -74,7 +74,7 @@ func TestTuner(t *testing.T) { // out of threshold testHeap = make([]byte, threshold+1024) runtime.GC() - for i := 0; i < 100; i++ { + for range 100 { runtime.GC() require.Eventually(t, func() bool { return minGCPercent.Load() == tn.getGCPercent() }, 1*time.Second, 50*time.Microsecond) diff --git a/pkg/id/id_test.go b/pkg/id/id_test.go index 94f0670b979..d46ac5a963e 100644 --- a/pkg/id/id_test.go +++ b/pkg/id/id_test.go @@ -45,7 +45,7 @@ func TestMultipleAllocator(t *testing.T) { re.NoError(err) wg := sync.WaitGroup{} - for i := 0; i < 3; i++ { + for i := range 3 { iStr := strconv.Itoa(i) wg.Add(1) // All allocators share rootPath and memberVal, but they have different allocPaths, labels and steps. diff --git a/pkg/keyspace/keyspace_test.go b/pkg/keyspace/keyspace_test.go index b322def6bad..3eee9e13a65 100644 --- a/pkg/keyspace/keyspace_test.go +++ b/pkg/keyspace/keyspace_test.go @@ -103,7 +103,7 @@ func (suite *keyspaceTestSuite) TearDownSuite() { func makeCreateKeyspaceRequests(count int) []*CreateKeyspaceRequest { now := time.Now().Unix() requests := make([]*CreateKeyspaceRequest, count) - for i := 0; i < count; i++ { + for i := range count { requests[i] = &CreateKeyspaceRequest{ Name: fmt.Sprintf("test_keyspace_%d", i), Config: map[string]string{ diff --git a/pkg/keyspace/tso_keyspace_group.go b/pkg/keyspace/tso_keyspace_group.go index a09098b3a47..68409bd471c 100644 --- a/pkg/keyspace/tso_keyspace_group.go +++ b/pkg/keyspace/tso_keyspace_group.go @@ -90,8 +90,8 @@ func NewKeyspaceGroupManager( ) *GroupManager { ctx, cancel := context.WithCancel(ctx) groups := make(map[endpoint.UserKind]*indexedHeap) - for i := 0; i < int(endpoint.UserKindCount); i++ { - groups[endpoint.UserKind(i)] = newIndexedHeap(int(constant.MaxKeyspaceGroupCountInUse)) + for i := range endpoint.UserKindCount { + groups[i] = newIndexedHeap(int(constant.MaxKeyspaceGroupCountInUse)) } m := &GroupManager{ ctx: ctx, diff --git a/pkg/keyspace/tso_keyspace_group_test.go b/pkg/keyspace/tso_keyspace_group_test.go index 4dcb85b2939..2661cde9e7a 100644 --- a/pkg/keyspace/tso_keyspace_group_test.go +++ b/pkg/keyspace/tso_keyspace_group_test.go @@ -136,7 +136,7 @@ func (suite *keyspaceGroupTestSuite) TestKeyspaceAssignment() { re.NoError(err) re.Len(kgs, 4) - for i := 0; i < 99; i++ { + for i := range 99 { _, err := suite.kg.CreateKeyspace(&CreateKeyspaceRequest{ Name: fmt.Sprintf("test%d", i), Config: map[string]string{ diff --git a/pkg/mcs/discovery/register_test.go b/pkg/mcs/discovery/register_test.go index 3ea0148d75f..bf35393a814 100644 --- a/pkg/mcs/discovery/register_test.go +++ b/pkg/mcs/discovery/register_test.go @@ -63,7 +63,7 @@ func TestRegister(t *testing.T) { re.NoError(err) fname := testutil.InitTempFileLogger("info") defer os.Remove(fname) - for i := 0; i < 3; i++ { + for i := range 3 { re.Equal("127.0.0.1:2", getKeyAfterLeaseExpired(re, client, sr.key)) etcd.Server.HardStop() // close the etcd to make the keepalive failed // ensure that the request is timeout diff --git a/pkg/mcs/resourcemanager/server/manager.go b/pkg/mcs/resourcemanager/server/manager.go index 618b7ac6dba..7f7e710b3fb 100644 --- a/pkg/mcs/resourcemanager/server/manager.go +++ b/pkg/mcs/resourcemanager/server/manager.go @@ -344,7 +344,7 @@ func (m *Manager) persistResourceGroupRunningState() { keys = append(keys, k) } m.RUnlock() - for idx := 0; idx < len(keys); idx++ { + for idx := range keys { m.RLock() group, ok := m.groups[keys[idx]] if ok { diff --git a/pkg/mcs/resourcemanager/server/metrics_test.go b/pkg/mcs/resourcemanager/server/metrics_test.go index 62d07286eaf..d69d364b64b 100644 --- a/pkg/mcs/resourcemanager/server/metrics_test.go +++ b/pkg/mcs/resourcemanager/server/metrics_test.go @@ -30,7 +30,7 @@ func TestMaxPerSecCostTracker(t *testing.T) { expectedMaxRU := []float64{19, 39, 59} expectedSum := []float64{190, 780, 1770} - for i := 0; i < 60; i++ { + for i := range 60 { // Record data consumption := &rmpb.Consumption{ RRU: float64(i), diff --git a/pkg/mcs/resourcemanager/server/resource_group_test.go b/pkg/mcs/resourcemanager/server/resource_group_test.go index 87ff6da2632..96325ea3653 100644 --- a/pkg/mcs/resourcemanager/server/resource_group_test.go +++ b/pkg/mcs/resourcemanager/server/resource_group_test.go @@ -50,7 +50,7 @@ func resetSizeCacheRecursive(value reflect.Value) { return } - for i := 0; i < value.NumField(); i++ { + for i := range value.NumField() { fieldValue := value.Field(i) fieldType := value.Type().Field(i) diff --git a/pkg/mock/mockcluster/mockcluster.go b/pkg/mock/mockcluster/mockcluster.go index 8d7317f547b..1584f0fbc28 100644 --- a/pkg/mock/mockcluster/mockcluster.go +++ b/pkg/mock/mockcluster/mockcluster.go @@ -359,7 +359,7 @@ func (mc *Cluster) AddRegionStoreWithLeader(storeID uint64, regionCount int, lea leaderCount = leaderCounts[0] } mc.AddRegionStore(storeID, regionCount) - for i := 0; i < leaderCount; i++ { + for range leaderCount { id, _ := mc.AllocID() mc.AddLeaderRegion(id, storeID) } @@ -463,7 +463,7 @@ func (mc *Cluster) AddRegionWithReadInfo( } var items []*statistics.HotPeerStat - for i := 0; i < filledNum; i++ { + for range filledNum { items = mc.CheckRegionRead(r) for _, item := range items { mc.HotCache.Update(item, utils.Read) @@ -483,7 +483,7 @@ func (mc *Cluster) AddRegionWithPeerReadInfo(regionID, leaderStoreID, targetStor filledNum = filledNums[0] } var items []*statistics.HotPeerStat - for i := 0; i < filledNum; i++ { + for range filledNum { items = mc.CheckRegionRead(r) for _, item := range items { if item.StoreID == targetStoreID { @@ -512,7 +512,7 @@ func (mc *Cluster) AddRegionLeaderWithReadInfo( } var items []*statistics.HotPeerStat - for i := 0; i < filledNum; i++ { + for range filledNum { items = mc.CheckRegionLeaderRead(r) for _, item := range items { mc.HotCache.Update(item, utils.Read) @@ -540,7 +540,7 @@ func (mc *Cluster) AddLeaderRegionWithWriteInfo( } var items []*statistics.HotPeerStat - for i := 0; i < filledNum; i++ { + for range filledNum { items = mc.CheckRegionWrite(r) for _, item := range items { mc.HotCache.Update(item, utils.Write) diff --git a/pkg/movingaverage/avg_over_time.go b/pkg/movingaverage/avg_over_time.go index 664466fe98c..07e21ee9456 100644 --- a/pkg/movingaverage/avg_over_time.go +++ b/pkg/movingaverage/avg_over_time.go @@ -115,7 +115,7 @@ func (aot *AvgOverTime) IsFull() bool { // Clone returns a copy of AvgOverTime func (aot *AvgOverTime) Clone() *AvgOverTime { q := queue.New() - for i := 0; i < aot.que.Len(); i++ { + for range aot.que.Len() { v := aot.que.PopFront() aot.que.PushBack(v) q.PushBack(v) diff --git a/pkg/movingaverage/avg_over_time_test.go b/pkg/movingaverage/avg_over_time_test.go index 4a54e33d449..faa4f5f88e9 100644 --- a/pkg/movingaverage/avg_over_time_test.go +++ b/pkg/movingaverage/avg_over_time_test.go @@ -26,11 +26,11 @@ func TestPulse(t *testing.T) { re := require.New(t) aot := NewAvgOverTime(5 * time.Second) // warm up - for i := 0; i < 5; i++ { + for range 5 { aot.Add(1000, time.Second) aot.Add(0, time.Second) } - for i := 0; i < 100; i++ { + for i := range 100 { if i%2 == 0 { aot.Add(1000, time.Second) } else { @@ -59,24 +59,24 @@ func TestChange(t *testing.T) { aot := NewAvgOverTime(5 * time.Second) // phase 1: 1000 - for i := 0; i < 20; i++ { + for range 20 { aot.Add(1000, time.Second) } re.LessOrEqual(aot.Get(), 1010.) re.GreaterOrEqual(aot.Get(), 990.) // phase 2: 500 - for i := 0; i < 5; i++ { + for range 5 { aot.Add(500, time.Second) } re.LessOrEqual(aot.Get(), 900.) re.GreaterOrEqual(aot.Get(), 495.) - for i := 0; i < 15; i++ { + for range 15 { aot.Add(500, time.Second) } // phase 3: 100 - for i := 0; i < 5; i++ { + for range 5 { aot.Add(100, time.Second) } re.LessOrEqual(aot.Get(), 678.) @@ -94,7 +94,7 @@ func TestMinFilled(t *testing.T) { for aotSize := 2; aotSize < 10; aotSize++ { for mfSize := 2; mfSize < 10; mfSize++ { tm := NewTimeMedian(aotSize, mfSize, interval) - for i := 0; i < aotSize; i++ { + for range aotSize { re.Equal(0.0, tm.Get()) tm.Add(rate*interval.Seconds(), interval) } @@ -108,22 +108,22 @@ func TestUnstableInterval(t *testing.T) { aot := NewAvgOverTime(5 * time.Second) re.Equal(0., aot.Get()) // warm up - for i := 0; i < 5; i++ { + for range 5 { aot.Add(1000, time.Second) } // same rate, different interval - for i := 0; i < 1000; i++ { + for range 1000 { r := float64(rand.Intn(5)) aot.Add(1000*r, time.Second*time.Duration(r)) re.LessOrEqual(aot.Get(), 1010.) re.GreaterOrEqual(aot.Get(), 990.) } // warm up - for i := 0; i < 5; i++ { + for range 5 { aot.Add(500, time.Second) } // different rate, same interval - for i := 0; i < 1000; i++ { + for i := range 1000 { rate := float64(i%5*100) + 500 aot.Add(rate*3, time.Second*3) re.LessOrEqual(aot.Get(), 910.) diff --git a/pkg/movingaverage/moving_average_test.go b/pkg/movingaverage/moving_average_test.go index fd0a1a9fcf3..882f3c2266f 100644 --- a/pkg/movingaverage/moving_average_test.go +++ b/pkg/movingaverage/moving_average_test.go @@ -25,7 +25,7 @@ import ( func addRandData(ma MovingAvg, n int, mx float64) { r := rand.New(rand.NewSource(time.Now().UnixNano())) - for i := 0; i < n; i++ { + for range n { ma.Add(r.Float64() * mx) } } diff --git a/pkg/movingaverage/weight_allocator.go b/pkg/movingaverage/weight_allocator.go index e0427d84645..6af93c10417 100644 --- a/pkg/movingaverage/weight_allocator.go +++ b/pkg/movingaverage/weight_allocator.go @@ -40,7 +40,7 @@ func NewWeightAllocator(length, segNum int) *WeightAllocator { segIndexes := make([]int, 0, segNum) weights := make([]float64, 0, length) unitCount := 0 - for i := 0; i < segNum; i++ { + for i := range segNum { next := segLength if segMod > i { next++ @@ -49,8 +49,8 @@ func NewWeightAllocator(length, segNum int) *WeightAllocator { segIndexes = append(segIndexes, next) } unitWeight := 1.0 / float64(unitCount) - for i := 0; i < segNum; i++ { - for j := 0; j < segIndexes[i]; j++ { + for i := range segNum { + for range segIndexes[i] { weights = append(weights, unitWeight*float64(segNum-i)) } } diff --git a/pkg/movingaverage/weight_allocator_test.go b/pkg/movingaverage/weight_allocator_test.go index 405d8f72876..e102bdb59d5 100644 --- a/pkg/movingaverage/weight_allocator_test.go +++ b/pkg/movingaverage/weight_allocator_test.go @@ -25,7 +25,7 @@ func TestWeightAllocator(t *testing.T) { checkSumFunc := func(wa *WeightAllocator, length int) { sum := 0. - for i := 0; i < length; i++ { + for i := range length { sum += wa.Get(i) } re.Less(1-sum, 1e-8) @@ -42,7 +42,7 @@ func TestWeightAllocator(t *testing.T) { wa = NewWeightAllocator(10, 10) checkSumFunc(wa, 10) - for i := 0; i < 10; i++ { + for i := range 10 { re.Equal(1./55.*float64(10-i), wa.Get(i)) } checkTimeFunc(wa.Get(0), wa.Get(9), 10) diff --git a/pkg/progress/progress_test.go b/pkg/progress/progress_test.go index a7b159bc907..5fa486a0577 100644 --- a/pkg/progress/progress_test.go +++ b/pkg/progress/progress_test.go @@ -43,7 +43,7 @@ func TestProgress(t *testing.T) { re.Less(math.Abs(ls-30.0/7.0), 1e-6) re.Less(math.Abs(cs-7), 1e-6) // there is no scheduling - for i := 0; i < 1000; i++ { + for range 1000 { m.UpdateProgress(n, 30, 30, false) } re.Equal(721, m.progresses[n].history.Len()) @@ -124,7 +124,7 @@ func TestProgressWithDynamicWindow(t *testing.T) { re.Less(math.Abs(ls-30.0/(7.0/2)), 1e-6) re.Less(math.Abs(cs-3.5), 1e-6) - for i := 0; i < 1000; i++ { + for range 1000 { m.UpdateProgress(n, 30, 30, false) } re.Equal(721, m.progresses[n].history.Len()) @@ -138,7 +138,7 @@ func TestProgressWithDynamicWindow(t *testing.T) { re.Equal(30.0, m.progresses[n].front.Value.(float64)) re.Equal(721, m.progresses[n].history.Len()) - for i := 0; i < 60; i++ { + for range 60 { m.UpdateProgress(n, 28, 28, false) } re.Equal(721, m.progresses[n].history.Len()) @@ -193,7 +193,7 @@ func TestProgressWithDynamicWindow(t *testing.T) { re.Equal(0.99, p) re.Equal(float64(1/(29./720)*10.), ls) re.Equal(float64(29./720/10.), cs) - for i := 0; i < 2000; i++ { + for range 2000 { m.UpdateProgress(n, 1, 1, false) } re.Equal(721, m.progresses[n].history.Len()) diff --git a/pkg/ratelimit/concurrency_limiter_test.go b/pkg/ratelimit/concurrency_limiter_test.go index f0af1125d21..72c15101f5f 100644 --- a/pkg/ratelimit/concurrency_limiter_test.go +++ b/pkg/ratelimit/concurrency_limiter_test.go @@ -28,7 +28,7 @@ import ( func TestConcurrencyLimiter(t *testing.T) { re := require.New(t) cl := NewConcurrencyLimiter(10) - for i := 0; i < 10; i++ { + for range 10 { re.True(cl.allow()) } re.False(cl.allow()) @@ -42,15 +42,15 @@ func TestConcurrencyLimiter(t *testing.T) { re.Equal(uint64(10), cl.GetRunningTasksNum()) cl.release() re.Equal(uint64(9), cl.GetRunningTasksNum()) - for i := 0; i < 9; i++ { + for range 9 { cl.release() } re.Equal(uint64(10), cl.getMaxConcurrency()) - for i := 0; i < 5; i++ { + for range 5 { re.True(cl.allow()) } re.Equal(uint64(5), cl.GetRunningTasksNum()) - for i := 0; i < 5; i++ { + for range 5 { cl.release() } re.Equal(uint64(5), cl.getMaxConcurrency()) @@ -106,7 +106,7 @@ func TestConcurrencyLimiterAcquire(t *testing.T) { start := time.Now() wg := &sync.WaitGroup{} wg.Add(100) - for i := 0; i < 100; i++ { + for i := range 100 { go func(i int) { defer wg.Done() token, err := limiter.AcquireToken(ctx) diff --git a/pkg/ratelimit/controller_test.go b/pkg/ratelimit/controller_test.go index a25b152c48e..5efa6ec1190 100644 --- a/pkg/ratelimit/controller_test.go +++ b/pkg/ratelimit/controller_test.go @@ -55,7 +55,7 @@ func runMulitLabelLimiter(t *testing.T, limiter *Controller, testCase []labelCas for _, rd := range cas.round { rd.checkOptionStatus(cas.label, rd.opt) time.Sleep(rd.waitDuration) - for i := 0; i < rd.totalRequest; i++ { + for range rd.totalRequest { wg.Add(1) go func() { countRateLimiterHandleResult(limiter, cas.label, &successCount, &failedCount, &lock, &wg, r) @@ -64,7 +64,7 @@ func runMulitLabelLimiter(t *testing.T, limiter *Controller, testCase []labelCas wg.Wait() re.Equal(rd.fail, failedCount) re.Equal(rd.success, successCount) - for i := 0; i < rd.release; i++ { + for range rd.release { r.release() } rd.checkStatusFunc(cas.label) @@ -204,7 +204,7 @@ func TestBlockList(t *testing.T) { status := UpdateQPSLimiter(float64(rate.Every(time.Second)), 1)(label, limiter) re.NotZero(status & InAllowList) - for i := 0; i < 10; i++ { + for range 10 { _, err := limiter.Allow(label) re.NoError(err) } diff --git a/pkg/ratelimit/limiter_test.go b/pkg/ratelimit/limiter_test.go index 520ad3d13d1..256f9ea9ab4 100644 --- a/pkg/ratelimit/limiter_test.go +++ b/pkg/ratelimit/limiter_test.go @@ -49,7 +49,7 @@ func TestWithConcurrencyLimiter(t *testing.T) { successCount, failedCount := 0, 0 var wg sync.WaitGroup r := &releaseUtil{} - for i := 0; i < 15; i++ { + for range 15 { wg.Add(1) go func() { countSingleLimiterHandleResult(limiter, &successCount, &failedCount, &lock, &wg, r) @@ -58,7 +58,7 @@ func TestWithConcurrencyLimiter(t *testing.T) { wg.Wait() re.Equal(5, failedCount) re.Equal(10, successCount) - for i := 0; i < 10; i++ { + for range 10 { r.release() } @@ -73,14 +73,14 @@ func TestWithConcurrencyLimiter(t *testing.T) { re.NotZero(status & LimiterUpdated) failedCount = 0 successCount = 0 - for i := 0; i < 15; i++ { + for range 15 { wg.Add(1) go countSingleLimiterHandleResult(limiter, &successCount, &failedCount, &lock, &wg, r) } wg.Wait() re.Equal(10, failedCount) re.Equal(5, successCount) - for i := 0; i < 5; i++ { + for range 5 { r.release() } @@ -88,7 +88,7 @@ func TestWithConcurrencyLimiter(t *testing.T) { re.NotZero(status & LimiterDeleted) failedCount = 0 successCount = 0 - for i := 0; i < 15; i++ { + for range 15 { wg.Add(1) go countSingleLimiterHandleResult(limiter, &successCount, &failedCount, &lock, &wg, r) } @@ -112,7 +112,7 @@ func TestWithQPSLimiter(t *testing.T) { var wg sync.WaitGroup r := &releaseUtil{} wg.Add(3) - for i := 0; i < 3; i++ { + for range 3 { go countSingleLimiterHandleResult(limiter, &successCount, &failedCount, &lock, &wg, r) } wg.Wait() @@ -133,7 +133,7 @@ func TestWithQPSLimiter(t *testing.T) { re.Equal(5, burst) time.Sleep(time.Second) - for i := 0; i < 10; i++ { + for i := range 10 { if i < 5 { _, err := limiter.allow() re.NoError(err) @@ -146,7 +146,7 @@ func TestWithQPSLimiter(t *testing.T) { status = limiter.updateQPSConfig(0, 0) re.NotZero(status & LimiterDeleted) - for i := 0; i < 10; i++ { + for range 10 { _, err := limiter.allow() re.NoError(err) } @@ -159,7 +159,7 @@ func TestWithQPSLimiter(t *testing.T) { status = limiter.updateQPSConfig(float64(rate.Every(3*time.Second)), 100) re.NotZero(status & LimiterUpdated) wg.Add(200) - for i := 0; i < 200; i++ { + for range 200 { go countSingleLimiterHandleResult(limiter, &successCount, &failedCount, &lock, &wg, r) } wg.Wait() @@ -191,7 +191,7 @@ func TestWithTwoLimiters(t *testing.T) { var wg sync.WaitGroup r := &releaseUtil{} wg.Add(200) - for i := 0; i < 200; i++ { + for range 200 { go countSingleLimiterHandleResult(limiter, &successCount, &failedCount, &lock, &wg, r) } wg.Wait() @@ -200,20 +200,20 @@ func TestWithTwoLimiters(t *testing.T) { time.Sleep(time.Second) wg.Add(100) - for i := 0; i < 100; i++ { + for range 100 { go countSingleLimiterHandleResult(limiter, &successCount, &failedCount, &lock, &wg, r) } wg.Wait() re.Equal(200, failedCount) re.Equal(100, successCount) - for i := 0; i < 100; i++ { + for range 100 { r.release() } status = limiter.updateQPSConfig(float64(rate.Every(10*time.Second)), 1) re.NotZero(status & LimiterUpdated) wg.Add(100) - for i := 0; i < 100; i++ { + for range 100 { go countSingleLimiterHandleResult(limiter, &successCount, &failedCount, &lock, &wg, r) } wg.Wait() diff --git a/pkg/ratelimit/runner_test.go b/pkg/ratelimit/runner_test.go index a9090804a08..f7e7020aa94 100644 --- a/pkg/ratelimit/runner_test.go +++ b/pkg/ratelimit/runner_test.go @@ -30,7 +30,7 @@ func TestConcurrentRunner(t *testing.T) { defer runner.Stop() var wg sync.WaitGroup - for i := 0; i < 10; i++ { + for i := range 10 { time.Sleep(50 * time.Millisecond) wg.Add(1) err := runner.RunTask( @@ -51,7 +51,7 @@ func TestConcurrentRunner(t *testing.T) { runner.Start(context.TODO()) defer runner.Stop() var wg sync.WaitGroup - for i := 0; i < 10; i++ { + for i := range 10 { wg.Add(1) err := runner.RunTask( uint64(i), diff --git a/pkg/schedule/checker/merge_checker_test.go b/pkg/schedule/checker/merge_checker_test.go index 61b8cd579df..03b3a5f83a3 100644 --- a/pkg/schedule/checker/merge_checker_test.go +++ b/pkg/schedule/checker/merge_checker_test.go @@ -498,7 +498,7 @@ func (suite *mergeCheckerTestSuite) TestStoreLimitWithMerge() { tc.SetAllStoresLimit(storelimit.RemovePeer, 0.0000001) tc.PutRegion(regions[2]) // The size of Region is less or equal than 1MB. - for i := 0; i < 50; i++ { + for range 50 { ops := mc.Check(regions[2]) re.NotNil(ops) re.True(oc.AddOperator(ops...)) @@ -512,7 +512,7 @@ func (suite *mergeCheckerTestSuite) TestStoreLimitWithMerge() { ) tc.PutRegion(regions[2]) // The size of Region is more than 1MB but no more than 20MB. - for i := 0; i < 5; i++ { + for range 5 { ops := mc.Check(regions[2]) re.NotNil(ops) re.True(oc.AddOperator(ops...)) diff --git a/pkg/schedule/checker/rule_checker_test.go b/pkg/schedule/checker/rule_checker_test.go index b24a95e2ade..5ac67122de1 100644 --- a/pkg/schedule/checker/rule_checker_test.go +++ b/pkg/schedule/checker/rule_checker_test.go @@ -187,7 +187,7 @@ func (suite *ruleCheckerTestSuite) TestFixPeer() { nr1 := r1.Clone(core.WithPendingPeers([]*metapb.Peer{r1.GetStorePeer(3)})) suite.cluster.PutRegion(nr1) hasTransferLeader := false - for i := 0; i < 100; i++ { + for range 100 { op = suite.rc.Check(suite.cluster.GetRegion(1)) re.NotNil(op) if step, ok := op.Step(0).(operator.TransferLeader); ok { @@ -838,14 +838,14 @@ func (suite *ruleCheckerTestSuite) TestFixOrphanPeerWithDisconnectedStoreAndRule // disconnect any two stores and change rule to 3 replicas stores := []uint64{1, 2, 3, 4, 5} testCases := [][]uint64{} - for i := 0; i < len(stores); i++ { + for i := range stores { for j := i + 1; j < len(stores); j++ { testCases = append(testCases, []uint64{stores[i], stores[j]}) } } for _, leader := range stores { var followers []uint64 - for i := 0; i < len(stores); i++ { + for i := range stores { if stores[i] != leader { followers = append(followers, stores[i]) } @@ -896,7 +896,7 @@ func (suite *ruleCheckerTestSuite) TestFixOrphanPeerWithDisconnectedStoreAndRule re.Contains(op.Desc(), "orphan") var removedPeerStoreID uint64 newLeaderStoreID := r1.GetLeader().GetStoreId() - for i := 0; i < op.Len(); i++ { + for i := range op.Len() { if s, ok := op.Step(i).(operator.RemovePeer); ok { removedPeerStoreID = s.FromStore } @@ -931,7 +931,7 @@ func (suite *ruleCheckerTestSuite) TestFixOrphanPeerWithDisconnectedStoreAndRule // and there is a learner in the disconnected store. stores := []uint64{1, 2, 3, 4, 5, 6} testCases := [][]uint64{} - for i := 0; i < len(stores); i++ { + for i := range stores { for j := i + 1; j < len(stores); j++ { for k := j + 1; k < len(stores); k++ { testCases = append(testCases, []uint64{stores[i], stores[j], stores[k]}) @@ -940,7 +940,7 @@ func (suite *ruleCheckerTestSuite) TestFixOrphanPeerWithDisconnectedStoreAndRule } for _, leader := range stores { var followers []uint64 - for i := 0; i < len(stores); i++ { + for i := range stores { if stores[i] != leader { followers = append(followers, stores[i]) } @@ -1020,7 +1020,7 @@ func (suite *ruleCheckerTestSuite) TestFixOrphanPeerWithDisconnectedStoreAndRule re.Contains(op.Desc(), "orphan") var removedPeerStoreID uint64 newLeaderStoreID := r1.GetLeader().GetStoreId() - for i := 0; i < op.Len(); i++ { + for i := range op.Len() { if s, ok := op.Step(i).(operator.RemovePeer); ok { removedPeerStoreID = s.FromStore } diff --git a/pkg/schedule/coordinator.go b/pkg/schedule/coordinator.go index 344621c8b5b..739ca5c84b6 100644 --- a/pkg/schedule/coordinator.go +++ b/pkg/schedule/coordinator.go @@ -253,7 +253,7 @@ func (c *Coordinator) InitSchedulers(needRun bool) { configs []string err error ) - for i := 0; i < maxLoadConfigRetries; i++ { + for i := range maxLoadConfigRetries { scheduleNames, configs, err = c.cluster.GetStorage().LoadAllSchedulerConfigs() select { case <-c.ctx.Done(): diff --git a/pkg/schedule/filter/region_filters_test.go b/pkg/schedule/filter/region_filters_test.go index f7ad224ade3..a7dd1fa932a 100644 --- a/pkg/schedule/filter/region_filters_test.go +++ b/pkg/schedule/filter/region_filters_test.go @@ -98,7 +98,7 @@ func TestRegionEmptyFilter(t *testing.T) { re.Equal(filter.Select(region), statusOK) region = region.Clone(core.SetApproximateSize(0)) - for i := uint64(0); i < 100; i++ { + for i := range uint64(100) { testCluster.PutRegion(core.NewRegionInfo(&metapb.Region{ Id: i, Peers: []*metapb.Peer{ diff --git a/pkg/schedule/hbstream/heartbeat_streams.go b/pkg/schedule/hbstream/heartbeat_streams.go index d9bf3209bec..01beacab449 100644 --- a/pkg/schedule/hbstream/heartbeat_streams.go +++ b/pkg/schedule/hbstream/heartbeat_streams.go @@ -269,7 +269,7 @@ func (s *HeartbeatStreams) Drain(count int) error { if s.needRun { return errors.Normalize("hbstream running enabled") } - for i := 0; i < count; i++ { + for range count { <-s.msgCh } return nil diff --git a/pkg/schedule/labeler/labeler_test.go b/pkg/schedule/labeler/labeler_test.go index 34490a7249d..ebd57708e47 100644 --- a/pkg/schedule/labeler/labeler_test.go +++ b/pkg/schedule/labeler/labeler_test.go @@ -290,7 +290,7 @@ func expectSameRegionLabels(re *require.Assertions, r1, r2 *RegionLabel) { func expectSameRules(re *require.Assertions, r1, r2 *LabelRule) { re.Len(r1.Labels, len(r2.Labels)) - for id := 0; id < len(r1.Labels); id++ { + for id := range r1.Labels { expectSameRegionLabels(re, &r1.Labels[id], &r2.Labels[id]) } diff --git a/pkg/schedule/operator/builder_test.go b/pkg/schedule/operator/builder_test.go index b010dcf935b..5e9ff7f89bb 100644 --- a/pkg/schedule/operator/builder_test.go +++ b/pkg/schedule/operator/builder_test.go @@ -555,7 +555,7 @@ func (suite *operatorBuilderTestSuite) TestBuild() { re.NoError(err) re.Equal(testCase.kind, op.Kind()) re.Len(testCase.steps, op.Len()) - for i := 0; i < op.Len(); i++ { + for i := range op.Len() { switch step := op.Step(i).(type) { case TransferLeader: re.Equal(testCase.steps[i].(TransferLeader).FromStore, step.FromStore) diff --git a/pkg/schedule/operator/create_operator_test.go b/pkg/schedule/operator/create_operator_test.go index d481334bbcb..845255e713c 100644 --- a/pkg/schedule/operator/create_operator_test.go +++ b/pkg/schedule/operator/create_operator_test.go @@ -150,7 +150,7 @@ func (suite *createOperatorTestSuite) TestCreateSplitRegionOperator() { re.NoError(err) re.Equal(OpSplit, op.Kind()) re.Len(op.steps, 1) - for i := 0; i < op.Len(); i++ { + for i := range op.Len() { switch step := op.Step(i).(type) { case SplitRegion: re.Equal(testCase.startKey, step.StartKey) @@ -307,7 +307,7 @@ func (suite *createOperatorTestSuite) TestCreateMergeRegionOperator() { re.Equal(MergeRegion{source.GetMeta(), target.GetMeta(), true}, ops[1].Step(0).(MergeRegion)) expectedSteps := append(testCase.prepareSteps, MergeRegion{source.GetMeta(), target.GetMeta(), false}) - for i := 0; i < ops[0].Len(); i++ { + for i := range ops[0].Len() { switch step := ops[0].Step(i).(type) { case TransferLeader: re.Equal(expectedSteps[i].(TransferLeader).FromStore, step.FromStore) @@ -604,7 +604,7 @@ func (suite *createOperatorTestSuite) TestCreateLeaveJointStateOperator() { re.NoError(err) re.Equal(testCase.kind, op.Kind()) re.Len(op.steps, len(testCase.steps)) - for i := 0; i < op.Len(); i++ { + for i := range op.Len() { switch step := op.Step(i).(type) { case TransferLeader: re.Equal(testCase.steps[i].(TransferLeader).FromStore, step.FromStore) @@ -947,7 +947,7 @@ func (suite *createOperatorTestSuite) TestCreateMoveRegionOperator() { re.Len(testCase.steps, op.Len()) // Since the peer id may be generated by allocator in runtime, we only check store id. - for i := 0; i < op.Len(); i++ { + for i := range op.Len() { switch step := op.Step(i).(type) { case TransferLeader: re.Equal(testCase.steps[i].(TransferLeader).FromStore, step.FromStore) @@ -1123,7 +1123,7 @@ func (suite *createOperatorTestSuite) TestMoveRegionWithoutJointConsensus() { re.Len(testCase.steps, op.Len()) // Since the peer id may be generated by allocator in runtime, we only check store id. - for i := 0; i < op.Len(); i++ { + for i := range op.Len() { switch step := op.Step(i).(type) { case TransferLeader: re.Equal(testCase.steps[i].(TransferLeader).FromStore, step.FromStore) @@ -1246,7 +1246,7 @@ func (suite *createOperatorTestSuite) TestCreateNonWitnessPeerOperator() { re.Equal(testCase.kind, op.kind) expectedSteps := testCase.prepareSteps - for i := 0; i < op.Len(); i++ { + for i := range op.Len() { switch step := op.Step(i).(type) { case ChangePeerV2Enter: re.Len(step.DemoteVoters, len(expectedSteps[i].(ChangePeerV2Enter).DemoteVoters)) diff --git a/pkg/schedule/operator/operator.go b/pkg/schedule/operator/operator.go index f89f6606412..8abeae54f6b 100644 --- a/pkg/schedule/operator/operator.go +++ b/pkg/schedule/operator/operator.go @@ -430,7 +430,7 @@ func (o *Operator) TotalInfluence(opInfluence OpInfluence, region *core.RegionIn } if o.influence == nil { o.influence = NewOpInfluence() - for step := 0; step < len(o.steps); step++ { + for step := range o.steps { o.steps[step].Influence(*o.influence, region) } } diff --git a/pkg/schedule/operator/operator_controller.go b/pkg/schedule/operator/operator_controller.go index e4da6ead0ef..0478ef2b6ae 100644 --- a/pkg/schedule/operator/operator_controller.go +++ b/pkg/schedule/operator/operator_controller.go @@ -341,7 +341,7 @@ func (oc *Controller) AddWaitingOperator(ops ...*Operator) int { needPromoted++ } operatorCounter.WithLabelValues(ops[0].Desc(), "promote-add").Add(float64(needPromoted)) - for i := 0; i < needPromoted; i++ { + for range needPromoted { oc.PromoteWaitingOperator() } return added diff --git a/pkg/schedule/operator/operator_controller_test.go b/pkg/schedule/operator/operator_controller_test.go index 16ba899db1d..6d26613f640 100644 --- a/pkg/schedule/operator/operator_controller_test.go +++ b/pkg/schedule/operator/operator_controller_test.go @@ -892,7 +892,7 @@ func (suite *operatorControllerTestSuite) TestAddWaitingOperator() { // a batch of operators should be added atomically var batch []*Operator - for i := uint64(0); i < cluster.GetSchedulerMaxWaitingOperator(); i++ { + for i := range cluster.GetSchedulerMaxWaitingOperator() { batch = append(batch, addPeerOp(i)) } added := controller.AddWaitingOperator(batch...) diff --git a/pkg/schedule/operator/operator_test.go b/pkg/schedule/operator/operator_test.go index 1f44d813f1e..22a86c789fc 100644 --- a/pkg/schedule/operator/operator_test.go +++ b/pkg/schedule/operator/operator_test.go @@ -586,7 +586,7 @@ func TestOperatorCheckConcurrently(t *testing.T) { checkSteps(re, op, steps) op.Start() var wg sync.WaitGroup - for i := 0; i < 10; i++ { + for range 10 { wg.Add(1) go func() { defer wg.Done() diff --git a/pkg/schedule/operator/status_tracker_test.go b/pkg/schedule/operator/status_tracker_test.go index 8c897d1e545..99e3853fff3 100644 --- a/pkg/schedule/operator/status_tracker_test.go +++ b/pkg/schedule/operator/status_tracker_test.go @@ -185,7 +185,7 @@ func TestAdditionalInfoConcurrent(t *testing.T) { op := NewOperator("test", "test", 0, nil, OpAdmin, 0) var wg sync.WaitGroup - for i := 0; i < 1000; i++ { + for i := range 1000 { wg.Add(1) go func(i int) { defer wg.Done() diff --git a/pkg/schedule/operator/waiting_operator.go b/pkg/schedule/operator/waiting_operator.go index f75dcf25cd8..2c4b41f6566 100644 --- a/pkg/schedule/operator/waiting_operator.go +++ b/pkg/schedule/operator/waiting_operator.go @@ -47,7 +47,7 @@ type randBuckets struct { // newRandBuckets creates a random buckets. func newRandBuckets() *randBuckets { var buckets []*bucket - for i := 0; i < len(priorityWeight); i++ { + for i := range priorityWeight { buckets = append(buckets, &bucket{ weight: priorityWeight[i], }) diff --git a/pkg/schedule/operator/waiting_operator_test.go b/pkg/schedule/operator/waiting_operator_test.go index 897f416cf38..8a0d1875cd7 100644 --- a/pkg/schedule/operator/waiting_operator_test.go +++ b/pkg/schedule/operator/waiting_operator_test.go @@ -26,7 +26,7 @@ func TestRandBuckets(t *testing.T) { re := require.New(t) rb := newRandBuckets() addOperators(rb) - for i := 0; i < len(priorityWeight); i++ { + for range priorityWeight { op := rb.GetOperator() re.NotNil(op) } @@ -67,7 +67,7 @@ func TestRandomBucketsWithMergeRegion(t *testing.T) { re := require.New(t) rb := newRandBuckets() descs := []string{"merge-region", "admin-merge-region", "random-merge"} - for j := 0; j < 100; j++ { + for j := range 100 { // adds operators desc := descs[j%3] op := NewTestOperator(uint64(1), &metapb.RegionEpoch{}, OpRegion|OpMerge, []OpStep{ @@ -109,7 +109,7 @@ func TestRandomBucketsWithMergeRegion(t *testing.T) { op.SetPriorityLevel(constant.High) rb.PutOperator(op) - for i := 0; i < 2; i++ { + for range 2 { op := rb.GetOperator() re.NotNil(op) } diff --git a/pkg/schedule/placement/fit_region_test.go b/pkg/schedule/placement/fit_region_test.go index 2006801e71a..284674b79a2 100644 --- a/pkg/schedule/placement/fit_region_test.go +++ b/pkg/schedule/placement/fit_region_test.go @@ -200,7 +200,7 @@ func BenchmarkFitRegionMorePeersSplitRules(b *testing.B) { LocationLabels: []string{}, }, } - for i := 0; i < 4; i++ { + for i := range 4 { rules = append(rules, &Rule{ GroupID: DefaultGroupID, ID: fmt.Sprintf("%v", i), @@ -228,7 +228,7 @@ func BenchmarkFitRegionMoreVotersSplitRules(b *testing.B) { LocationLabels: []string{}, }, } - for i := 0; i < 4; i++ { + for i := range 4 { rules = append(rules, &Rule{ GroupID: DefaultGroupID, ID: fmt.Sprintf("%v", i), @@ -266,7 +266,7 @@ func BenchmarkFitRegionCrossRegion(b *testing.B) { Count: 1, LocationLabels: []string{}, }) - for i := 0; i < 2; i++ { + for i := range 2 { rules = append(rules, &Rule{ GroupID: DefaultGroupID, ID: fmt.Sprintf("%v", i), @@ -287,7 +287,7 @@ func BenchmarkFitRegionWithMoreRulesAndStoreLabels(b *testing.B) { region := mockRegion(5, 0) rules := []*Rule{} // create 100 rules, with each rule has 101 LabelConstraints. - for i := 0; i < 100; i++ { + for i := range 100 { rule := &Rule{ GroupID: DefaultGroupID, ID: fmt.Sprintf("%v", i), @@ -319,7 +319,7 @@ func BenchmarkFitRegionWithMoreRulesAndStoreLabels(b *testing.B) { // create stores, with each store has 101 normal labels(1 exclusive label). lists := make([]*core.StoreInfo, 0) labels := make([]*metapb.StoreLabel, 0, 101) - for labID := 0; labID < 100; labID++ { + for labID := range 100 { label := &metapb.StoreLabel{Key: fmt.Sprintf("store_%08d", labID), Value: fmt.Sprintf("value_%08d", labID)} labels = append(labels, label) } diff --git a/pkg/schedule/placement/fit_test.go b/pkg/schedule/placement/fit_test.go index cc49d25640c..b12bcd7451a 100644 --- a/pkg/schedule/placement/fit_test.go +++ b/pkg/schedule/placement/fit_test.go @@ -271,7 +271,7 @@ func TestPickPeersFromBinaryInt(t *testing.T) { re.NoError(err) selected := pickPeersFromBinaryInt(candidates, uint(binaryNumber)) re.Len(selected, len(c.expectedPeers)) - for id := 0; id < len(selected); id++ { + for id := range selected { re.Equal(selected[id].Id, c.expectedPeers[id]) } } diff --git a/pkg/schedule/placement/rule_list.go b/pkg/schedule/placement/rule_list.go index 73b2f5271a1..f5ee0dada0e 100644 --- a/pkg/schedule/placement/rule_list.go +++ b/pkg/schedule/placement/rule_list.go @@ -81,7 +81,7 @@ func buildRuleList(rules ruleContainer) (ruleList, error) { rl := ruleList{ rangeList: rangeList, } - for i := 0; i < rangeList.Len(); i++ { + for i := range rangeList.Len() { start, data := rangeList.Get(i) var end []byte if i < rangeList.Len()-1 { diff --git a/pkg/schedule/placement/rule_manager_test.go b/pkg/schedule/placement/rule_manager_test.go index 5494b3c5a9d..2e1883640d8 100644 --- a/pkg/schedule/placement/rule_manager_test.go +++ b/pkg/schedule/placement/rule_manager_test.go @@ -480,7 +480,7 @@ func TestCacheManager(t *testing.T) { re.Nil(cache.bestFit) } // Store bestFit when the total number of hits is sufficient. - for i := 0; i < minHitCountToCacheHit; i++ { + for range minHitCountToCacheHit { manager.FitRegion(stores, region) } cache := manager.cache.regionCaches[1] diff --git a/pkg/schedule/rangelist/range_list_test.go b/pkg/schedule/rangelist/range_list_test.go index 5baa7f16f28..b1d95c01c55 100644 --- a/pkg/schedule/rangelist/range_list_test.go +++ b/pkg/schedule/rangelist/range_list_test.go @@ -86,7 +86,7 @@ func TestRangeList2(t *testing.T) { rl := b.Build() re.Len(expectKeys, rl.Len()) - for i := 0; i < rl.Len(); i++ { + for i := range rl.Len() { key, data := rl.Get(i) re.Equal(expectKeys[i], key) re.Equal(expectData[i], data) diff --git a/pkg/schedule/scatter/region_scatterer_test.go b/pkg/schedule/scatter/region_scatterer_test.go index 4b31e81aacb..4dbe60d764e 100644 --- a/pkg/schedule/scatter/region_scatterer_test.go +++ b/pkg/schedule/scatter/region_scatterer_test.go @@ -74,7 +74,7 @@ func TestScatterRegions(t *testing.T) { } func checkOperator(re *require.Assertions, op *operator.Operator) { - for i := 0; i < op.Len(); i++ { + for i := range op.Len() { if rp, ok := op.Step(i).(operator.RemovePeer); ok { for j := i + 1; j < op.Len(); j++ { if tr, ok := op.Step(j).(operator.TransferLeader); ok { @@ -353,7 +353,7 @@ func TestSomeStoresFilteredScatterGroupInConcurrency(t *testing.T) { re.True(tc.GetStore(uint64(6)).IsDisconnected()) scatterer := NewRegionScatterer(ctx, tc, oc, tc.AddPendingProcessedRegions) var wg sync.WaitGroup - for j := 0; j < 10; j++ { + for j := range 10 { wg.Add(1) go scatterOnce(tc, scatterer, fmt.Sprintf("group-%v", j), &wg) } @@ -362,7 +362,7 @@ func TestSomeStoresFilteredScatterGroupInConcurrency(t *testing.T) { func scatterOnce(tc *mockcluster.Cluster, scatter *RegionScatterer, group string, wg *sync.WaitGroup) { regionID := 1 - for i := 0; i < 100; i++ { + for range 100 { scatter.scatterRegion(tc.AddLeaderRegion(uint64(regionID), 1, 2, 3), group, false) regionID++ } @@ -407,8 +407,8 @@ func TestScatterGroupInConcurrency(t *testing.T) { t.Log(testCase.name) scatterer := NewRegionScatterer(ctx, tc, oc, tc.AddPendingProcessedRegions) regionID := 1 - for i := 0; i < 100; i++ { - for j := 0; j < testCase.groupCount; j++ { + for range 100 { + for j := range testCase.groupCount { scatterer.scatterRegion(tc.AddLeaderRegion(uint64(regionID), 1, 2, 3), fmt.Sprintf("group-%v", j), false) regionID++ @@ -416,7 +416,7 @@ func TestScatterGroupInConcurrency(t *testing.T) { } checker := func(ss *selectedStores, expected uint64, delta float64) { - for i := 0; i < testCase.groupCount; i++ { + for i := range testCase.groupCount { // comparing the leader distribution group := fmt.Sprintf("group-%v", i) max := uint64(0) @@ -703,14 +703,14 @@ func TestSelectedStoresTooManyPeers(t *testing.T) { group := "group" scatterer := NewRegionScatterer(ctx, tc, oc, tc.AddPendingProcessedRegions) // priority 4 > 1 > 5 > 2 == 3 - for i := 0; i < 1200; i++ { + for range 1200 { scatterer.ordinaryEngine.selectedPeer.Put(2, group) scatterer.ordinaryEngine.selectedPeer.Put(3, group) } - for i := 0; i < 800; i++ { + for range 800 { scatterer.ordinaryEngine.selectedPeer.Put(5, group) } - for i := 0; i < 400; i++ { + for range 400 { scatterer.ordinaryEngine.selectedPeer.Put(1, group) } // test region with peer 1 2 3 @@ -792,7 +792,7 @@ func isPeerCountChanged(op *operator.Operator) bool { return false } add, remove := 0, 0 - for i := 0; i < op.Len(); i++ { + for i := range op.Len() { step := op.Step(i) switch step.(type) { case operator.AddPeer, operator.AddLearner: diff --git a/pkg/schedule/schedulers/balance_benchmark_test.go b/pkg/schedule/schedulers/balance_benchmark_test.go index 4fb6a4fb781..aaafa7be8ca 100644 --- a/pkg/schedule/schedulers/balance_benchmark_test.go +++ b/pkg/schedule/schedulers/balance_benchmark_test.go @@ -70,7 +70,7 @@ func newBenchCluster(ruleEnable, labelEnable bool, tombstoneEnable bool) (contex tc.AddLabelsStore(storeID, regionCount-int(storeID), label) storeID++ } - for j := 0; j < regionCount; j++ { + for range regionCount { if ruleEnable { learnID := regionID%uint64(tiflashCount) + uint64(storeCount) tc.AddRegionWithLearner(regionID, storeID-1, []uint64{storeID - 2, storeID - 3}, []uint64{learnID}) @@ -82,7 +82,7 @@ func newBenchCluster(ruleEnable, labelEnable bool, tombstoneEnable bool) (contex } } if tombstoneEnable { - for i := uint64(0); i < uint64(storeCount*2/3); i++ { + for i := range uint64(storeCount * 2 / 3) { s := tc.GetStore(i) s.GetMeta().State = metapb.StoreState_Tombstone } @@ -105,7 +105,7 @@ func newBenchBigCluster(storeNumInOneRack, regionNum int) (context.CancelFunc, * storeID, regionID := uint64(0), uint64(0) hosts := make([]string, 0) - for i := 0; i < storeNumInOneRack; i++ { + for i := range storeNumInOneRack { hosts = append(hosts, fmt.Sprintf("host%d", i+1)) } for _, host := range hosts { @@ -118,7 +118,7 @@ func newBenchBigCluster(storeNumInOneRack, regionNum int) (context.CancelFunc, * storeID++ tc.AddLabelsStore(storeID, regionNum, label) } - for j := 0; j < regionCount; j++ { + for range regionCount { tc.AddRegionWithLearner(regionID, storeID, []uint64{storeID - 1, storeID - 2}, nil) regionID++ } @@ -129,7 +129,7 @@ func newBenchBigCluster(storeNumInOneRack, regionNum int) (context.CancelFunc, * func addTiflash(tc *mockcluster.Cluster) { tc.SetPlacementRuleEnabled(true) - for i := 0; i < tiflashCount; i++ { + for i := range tiflashCount { label := make(map[string]string, 3) label["engine"] = "tiflash" if i == tiflashCount-1 { diff --git a/pkg/schedule/schedulers/balance_leader.go b/pkg/schedule/schedulers/balance_leader.go index 44605f9c5b8..8788be98797 100644 --- a/pkg/schedule/schedulers/balance_leader.go +++ b/pkg/schedule/schedulers/balance_leader.go @@ -389,7 +389,7 @@ func createTransferLeaderOperator(cs *candidateStores, dir string, s *balanceLea creator = s.transferLeaderIn } var op *operator.Operator - for i := 0; i < retryLimit; i++ { + for range retryLimit { if op = creator(ssolver, collector); op != nil { if _, ok := usedRegions[op.RegionID()]; !ok { break diff --git a/pkg/schedule/schedulers/balance_leader_test.go b/pkg/schedule/schedulers/balance_leader_test.go index 4aa8a7aca26..940f75f78bc 100644 --- a/pkg/schedule/schedulers/balance_leader_test.go +++ b/pkg/schedule/schedulers/balance_leader_test.go @@ -228,7 +228,7 @@ func (suite *balanceLeaderSchedulerTestSuite) TestTransferLeaderOut() { 1: 2, 2: 1, } - for i := 0; i < 20; i++ { + for range 20 { if len(suite.schedule()) == 0 { continue } @@ -605,7 +605,7 @@ func checkBalanceLeaderLimit(re *require.Assertions, enablePlacementRules bool) id uint64 regions []*metapb.Region ) - for i := 0; i < 50; i++ { + for i := range 50 { peers := []*metapb.Peer{ {Id: id + 1, StoreId: 1}, {Id: id + 2, StoreId: 2}, @@ -634,7 +634,7 @@ func checkBalanceLeaderLimit(re *require.Assertions, enablePlacementRules bool) tc.UpdateSubTree(regionInfo, origin, overlaps, rangeChanged) } - for i := 0; i < 100; i++ { + for range 100 { _, err := tc.AllocPeer(1) re.NoError(err) } diff --git a/pkg/schedule/schedulers/balance_region.go b/pkg/schedule/schedulers/balance_region.go index 5fdfa29d96d..2bee521364d 100644 --- a/pkg/schedule/schedulers/balance_region.go +++ b/pkg/schedule/schedulers/balance_region.go @@ -141,7 +141,7 @@ func (s *balanceRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun if sourceIndex == len(sourceStores)-1 { break } - for i := 0; i < retryLimit; i++ { + for range retryLimit { // Priority pick the region that has a pending peer. // Pending region may mean the disk is overload, remove the pending region firstly. solver.Region = filter.SelectOneRegion(cluster.RandPendingRegions(solver.sourceStoreID(), s.conf.Ranges), collector, diff --git a/pkg/schedule/schedulers/balance_region_test.go b/pkg/schedule/schedulers/balance_region_test.go index cb5ad14ef58..48a4959a170 100644 --- a/pkg/schedule/schedulers/balance_region_test.go +++ b/pkg/schedule/schedulers/balance_region_test.go @@ -682,7 +682,7 @@ func TestConcurrencyUpdateConfig(t *testing.T) { re.NoError(sche.config.persist()) } }() - for i := 0; i < 1000; i++ { + for range 1000 { sche.Schedule(tc, false) } ch <- struct{}{} @@ -700,7 +700,7 @@ func TestBalanceWhenRegionNotHeartbeat(t *testing.T) { id uint64 regions []*metapb.Region ) - for i := 0; i < 10; i++ { + for i := range 10 { peers := []*metapb.Peer{ {Id: id + 1, StoreId: 1}, {Id: id + 2, StoreId: 2}, diff --git a/pkg/schedule/schedulers/balance_witness.go b/pkg/schedule/schedulers/balance_witness.go index 1fedb2769ee..cfcafb56c57 100644 --- a/pkg/schedule/schedulers/balance_witness.go +++ b/pkg/schedule/schedulers/balance_witness.go @@ -268,7 +268,7 @@ func createTransferWitnessOperator(cs *candidateStores, s *balanceWitnessSchedul retryLimit := s.retryQuota.getLimit(store) ssolver.Source, ssolver.Target = store, nil var op *operator.Operator - for i := 0; i < retryLimit; i++ { + for range retryLimit { schedulerCounter.WithLabelValues(s.GetName(), "total").Inc() if op = s.transferWitnessOut(ssolver, collector); op != nil { if _, ok := usedRegions[op.RegionID()]; !ok { diff --git a/pkg/schedule/schedulers/balance_witness_test.go b/pkg/schedule/schedulers/balance_witness_test.go index 03fcac77ccc..b1449821236 100644 --- a/pkg/schedule/schedulers/balance_witness_test.go +++ b/pkg/schedule/schedulers/balance_witness_test.go @@ -114,7 +114,7 @@ func (suite *balanceWitnessSchedulerTestSuite) TestTransferWitnessOut() { 1: 2, 2: 1, } - for i := 0; i < 20; i++ { + for range 20 { if len(suite.schedule()) == 0 { continue } diff --git a/pkg/schedule/schedulers/diagnostic_recorder.go b/pkg/schedule/schedulers/diagnostic_recorder.go index cd99262ee48..93bc714f83d 100644 --- a/pkg/schedule/schedulers/diagnostic_recorder.go +++ b/pkg/schedule/schedulers/diagnostic_recorder.go @@ -104,7 +104,7 @@ func (d *DiagnosticRecorder) GetLastResult() *DiagnosticResult { if firstStatus == Pending || firstStatus == Normal { wa := movingaverage.NewWeightAllocator(length, 3) counter := make(map[uint64]map[plan.Status]float64) - for i := 0; i < length; i++ { + for i := range length { item := items[i].Value.(*DiagnosticResult) for storeID, status := range item.StoreStatus { if _, ok := counter[storeID]; !ok { diff --git a/pkg/schedule/schedulers/evict_leader.go b/pkg/schedule/schedulers/evict_leader.go index eb74ae09a12..73256b6102f 100644 --- a/pkg/schedule/schedulers/evict_leader.go +++ b/pkg/schedule/schedulers/evict_leader.go @@ -313,7 +313,7 @@ type evictLeaderStoresConf interface { func scheduleEvictLeaderBatch(r *rand.Rand, name string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf) []*operator.Operator { var ops []*operator.Operator batchSize := conf.getBatch() - for i := 0; i < batchSize; i++ { + for range batchSize { once := scheduleEvictLeaderOnce(r, name, cluster, conf) // no more regions if len(once) == 0 { diff --git a/pkg/schedule/schedulers/hot_region_test.go b/pkg/schedule/schedulers/hot_region_test.go index 195effaecab..45cba6d4e46 100644 --- a/pkg/schedule/schedulers/hot_region_test.go +++ b/pkg/schedule/schedulers/hot_region_test.go @@ -590,7 +590,7 @@ func checkHotWriteRegionScheduleByteRateOnly(re *require.Assertions, enablePlace // Region 1 and 2 are the same, cannot move peer to store 5 due to the label. // Region 3 can only move peer to store 5. // Region 5 can only move peer to store 6. - for i := 0; i < 30; i++ { + for range 30 { ops, _ = hb.Schedule(tc, false) op := ops[0] clearPendingInfluence(hb.(*hotScheduler)) @@ -739,7 +739,7 @@ func TestHotWriteRegionScheduleByteRateOnlyWithTiFlash(t *testing.T) { pdServerCfg.FlowRoundByDigit = 3 // Disable for TiFlash hb.conf.setEnableForTiFlash(false) - for i := 0; i < 20; i++ { + for range 20 { clearPendingInfluence(hb) ops, _ := hb.Schedule(tc, false) if len(ops) == 0 { @@ -820,7 +820,7 @@ func TestHotWriteRegionScheduleByteRateOnlyWithTiFlash(t *testing.T) { } // Will transfer a hot region from store 1, because the total count of peers // which is hot for store 1 is larger than other stores. - for i := 0; i < 20; i++ { + for range 20 { clearPendingInfluence(hb) ops, _ := hb.Schedule(tc, false) op := ops[0] @@ -870,7 +870,7 @@ func TestHotWriteRegionScheduleWithQuery(t *testing.T) { {2, []uint64{1, 2, 3}, 500, 0, 500}, {3, []uint64{2, 1, 3}, 500, 0, 500}, }) - for i := 0; i < 100; i++ { + for range 100 { clearPendingInfluence(hb.(*hotScheduler)) ops, _ := hb.Schedule(tc, false) if len(ops) == 0 { @@ -913,7 +913,7 @@ func TestHotWriteRegionScheduleWithKeyRate(t *testing.T) { {3, []uint64{2, 4, 3}, 0.05 * units.MiB, 0.1 * units.MiB, 0}, }) - for i := 0; i < 100; i++ { + for range 100 { clearPendingInfluence(hb.(*hotScheduler)) ops, _ := hb.Schedule(tc, false) op := ops[0] @@ -1052,7 +1052,7 @@ func TestHotWriteRegionScheduleWithLeader(t *testing.T) { {7, []uint64{3, 1, 2}, 0.5 * units.MiB, 1 * units.MiB, 0}, }) - for i := 0; i < 100; i++ { + for range 100 { clearPendingInfluence(hb.(*hotScheduler)) ops, _ := hb.Schedule(tc, false) re.Empty(ops) @@ -1066,7 +1066,7 @@ func TestHotWriteRegionScheduleWithLeader(t *testing.T) { // store2 has 4 peer as leader // store3 has 2 peer as leader // We expect to transfer leader from store2 to store1 or store3 - for i := 0; i < 100; i++ { + for range 100 { clearPendingInfluence(hb.(*hotScheduler)) ops, _ := hb.Schedule(tc, false) op := ops[0] @@ -1128,11 +1128,11 @@ func checkHotWriteRegionScheduleWithPendingInfluence(re *require.Assertions, dim }) } - for i := 0; i < 20; i++ { + for range 20 { clearPendingInfluence(hb.(*hotScheduler)) cnt := 0 testLoop: - for j := 0; j < 1000; j++ { + for range 1000 { re.LessOrEqual(cnt, 5) emptyCnt := 0 ops, _ := hb.Schedule(tc, false) @@ -1232,7 +1232,7 @@ func TestHotWriteRegionScheduleWithRuleEnabled(t *testing.T) { {7, []uint64{2, 1, 3}, 0.5 * units.MiB, 1 * units.MiB, 0}, }) - for i := 0; i < 100; i++ { + for range 100 { clearPendingInfluence(hb.(*hotScheduler)) ops, _ := hb.Schedule(tc, false) if len(ops) == 0 { @@ -1392,7 +1392,7 @@ func TestHotReadRegionScheduleWithQuery(t *testing.T) { {2, []uint64{2, 1, 3}, 0, 0, 500}, }) - for i := 0; i < 100; i++ { + for range 100 { clearPendingInfluence(hb.(*hotScheduler)) ops, _ := hb.Schedule(tc, false) op := ops[0] @@ -1431,7 +1431,7 @@ func TestHotReadRegionScheduleWithKeyRate(t *testing.T) { {3, []uint64{3, 4, 5}, 0.05 * units.MiB, 0.1 * units.MiB, 0}, }) - for i := 0; i < 100; i++ { + for range 100 { clearPendingInfluence(hb.(*hotScheduler)) ops, _ := hb.Schedule(tc, false) op := ops[0] @@ -1517,7 +1517,7 @@ func checkHotReadRegionScheduleWithPendingInfluence(re *require.Assertions, dim // Before schedule, store byte/key rate: 7.1 | 6.1 | 6 | 5 // Min and max from storeLoadPred. They will be generated in the comparison of current and future. - for j := 0; j < 20; j++ { + for range 20 { clearPendingInfluence(hb.(*hotScheduler)) ops, _ := hb.Schedule(tc, false) @@ -1540,7 +1540,7 @@ func checkHotReadRegionScheduleWithPendingInfluence(re *require.Assertions, dim } // Before schedule, store byte/key rate: 7.1 | 6.1 | 6 | 5 - for j := 0; j < 20; j++ { + for range 20 { clearPendingInfluence(hb.(*hotScheduler)) ops, _ := hb.Schedule(tc, false) @@ -1964,8 +1964,8 @@ func checkHotCacheCheckRegionFlowWithDifferentThreshold(re *require.Assertions, // some peers are hot, and some are cold #3198 rate := uint64(512 * units.KiB) - for i := 0; i < statistics.TopNN; i++ { - for j := 0; j < utils.DefaultAotSize; j++ { + for i := range statistics.TopNN { + for range utils.DefaultAotSize { tc.AddLeaderRegionWithWriteInfo(uint64(i+100), 1, rate*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{2, 3}, 1) } } diff --git a/pkg/schedule/schedulers/scatter_range_test.go b/pkg/schedule/schedulers/scatter_range_test.go index d8e7a37ffad..26ac48a36bb 100644 --- a/pkg/schedule/schedulers/scatter_range_test.go +++ b/pkg/schedule/schedulers/scatter_range_test.go @@ -51,7 +51,7 @@ func checkScatterRangeBalance(re *require.Assertions, enablePlacementRules bool) id uint64 regions []*metapb.Region ) - for i := 0; i < 50; i++ { + for i := range 50 { peers := []*metapb.Peer{ {Id: id + 1, StoreId: 1}, {Id: id + 2, StoreId: 2}, @@ -78,7 +78,7 @@ func checkScatterRangeBalance(re *require.Assertions, enablePlacementRules bool) origin, overlaps, rangeChanged := tc.SetRegion(regionInfo) tc.UpdateSubTree(regionInfo, origin, overlaps, rangeChanged) } - for i := 0; i < 100; i++ { + for range 100 { _, err := tc.AllocPeer(1) re.NoError(err) } diff --git a/pkg/schedule/schedulers/scheduler_test.go b/pkg/schedule/schedulers/scheduler_test.go index 464abdcacc9..03e38d14da6 100644 --- a/pkg/schedule/schedulers/scheduler_test.go +++ b/pkg/schedule/schedulers/scheduler_test.go @@ -81,7 +81,7 @@ func TestShuffleLeader(t *testing.T) { tc.AddLeaderRegion(3, 3, 4, 1, 2) tc.AddLeaderRegion(4, 4, 1, 2, 3) - for i := 0; i < 4; i++ { + for range 4 { ops, _ = sl.Schedule(tc, false) re.NotEmpty(ops) re.Equal(operator.OpLeader|operator.OpAdmin, ops[0].Kind()) @@ -197,7 +197,7 @@ func checkBalance(re *require.Assertions, enablePlacementRules bool) { // try to get an operator var ops []*operator.Operator - for i := 0; i < 100; i++ { + for range 100 { ops, _ = hb.Schedule(tc, false) if ops != nil { break @@ -256,7 +256,7 @@ func TestShuffleRegion(t *testing.T) { tc.AddLeaderRegion(3, 3, 4, 1) tc.AddLeaderRegion(4, 4, 1, 2) - for i := 0; i < 4; i++ { + for range 4 { ops, _ = sl.Schedule(tc, false) re.NotEmpty(ops) re.Equal(operator.OpRegion, ops[0].Kind()) @@ -363,7 +363,7 @@ func TestSpecialUseHotRegion(t *testing.T) { tc.AddLeaderRegionWithWriteInfo(5, 3, 512*units.KiB*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{1, 2}) hs, err := CreateScheduler(writeType, oc, storage, cd) re.NoError(err) - for i := 0; i < 100; i++ { + for range 100 { ops, _ = hs.Schedule(tc, false) if len(ops) == 0 { continue diff --git a/pkg/schedule/schedulers/split_bucket_test.go b/pkg/schedule/schedulers/split_bucket_test.go index c2ea498f99a..840dfa97c19 100644 --- a/pkg/schedule/schedulers/split_bucket_test.go +++ b/pkg/schedule/schedulers/split_bucket_test.go @@ -33,7 +33,7 @@ func TestSplitBucket(t *testing.T) { hotBuckets := make(map[uint64][]*buckets.BucketStat, 10) // init cluster: there are 8 regions and their size is 600MB, // their key range is [1 10][11 20]....[71 80] - for i := uint64(0); i < 8; i++ { + for i := range uint64(8) { peers := []*metapb.Peer{{ Id: i * 100, StoreId: i, diff --git a/pkg/schedule/schedulers/transfer_witness_leader.go b/pkg/schedule/schedulers/transfer_witness_leader.go index 994559843a7..90191dd355c 100644 --- a/pkg/schedule/schedulers/transfer_witness_leader.go +++ b/pkg/schedule/schedulers/transfer_witness_leader.go @@ -66,7 +66,7 @@ func (s *transferWitnessLeaderScheduler) Schedule(cluster sche.SchedulerCluster, func (s *transferWitnessLeaderScheduler) scheduleTransferWitnessLeaderBatch(name string, cluster sche.SchedulerCluster, batchSize int) []*operator.Operator { var ops []*operator.Operator batchLoop: - for i := 0; i < batchSize; i++ { + for range batchSize { select { case region := <-s.regions: op, err := scheduleTransferWitnessLeader(s.R, name, cluster, region) diff --git a/pkg/schedule/splitter/region_splitter_test.go b/pkg/schedule/splitter/region_splitter_test.go index 40347bb310d..6f49707217e 100644 --- a/pkg/schedule/splitter/region_splitter_test.go +++ b/pkg/schedule/splitter/region_splitter_test.go @@ -52,7 +52,7 @@ func (m *mockSplitRegionsHandler) ScanRegionsByKeyRange(groupKeys *regionGroupKe for regionID, keyRange := range m.regions { if bytes.Equal(startKey, keyRange[0]) && bytes.Equal(endKey, keyRange[1]) { regions := make(map[uint64][]byte) - for i := 0; i < len(splitKeys); i++ { + for i := range splitKeys { regions[regionID+uint64(i)+1000] = splitKeys[i] } results.addRegionsID(regions) diff --git a/pkg/slice/slice.go b/pkg/slice/slice.go index b3741593670..f5c8aa5d230 100644 --- a/pkg/slice/slice.go +++ b/pkg/slice/slice.go @@ -16,7 +16,7 @@ package slice // AnyOf returns true if any element in the slice matches the predict func. func AnyOf[T any](s []T, p func(int) bool) bool { - for i := 0; i < len(s); i++ { + for i := range s { if p(i) { return true } diff --git a/pkg/statistics/buckets/hot_bucket_cache.go b/pkg/statistics/buckets/hot_bucket_cache.go index c4ae785bfa4..3fc640e8c1f 100644 --- a/pkg/statistics/buckets/hot_bucket_cache.go +++ b/pkg/statistics/buckets/hot_bucket_cache.go @@ -222,7 +222,7 @@ func convertToBucketTreeItem(buckets *metapb.Buckets) *BucketTreeItem { if interval == 0 { interval = 10 * 1000 } - for i := 0; i < len(buckets.Keys)-1; i++ { + for i := range len(buckets.Keys) - 1 { loads := []uint64{ buckets.Stats.ReadBytes[i] * 1000 / interval, buckets.Stats.ReadKeys[i] * 1000 / interval, diff --git a/pkg/statistics/buckets/hot_bucket_task_test.go b/pkg/statistics/buckets/hot_bucket_task_test.go index 49741fdc83f..100ed7c818d 100644 --- a/pkg/statistics/buckets/hot_bucket_task_test.go +++ b/pkg/statistics/buckets/hot_bucket_task_test.go @@ -47,7 +47,7 @@ func TestColdHot(t *testing.T) { isHot: true, }} for _, v := range testdata { - for i := 0; i < 20; i++ { + for i := range 20 { task := NewCheckPeerTask(v.buckets) re.True(hotCache.CheckAsync(task)) hotBuckets := getAllBucketStats(ctx, hotCache) @@ -110,7 +110,7 @@ func TestCollectBucketStatsTask(t *testing.T) { defer cancelFn() hotCache := NewBucketsCache(ctx) // case1: add bucket successfully - for i := uint64(0); i < 10; i++ { + for i := range uint64(10) { buckets := convertToBucketTreeItem(newTestBuckets(i, 1, [][]byte{[]byte(strconv.FormatUint(i*10, 10)), []byte(strconv.FormatUint((i+1)*10, 10))}, 0)) hotCache.putItem(buckets, hotCache.getBucketsByKeyRange(buckets.startKey, buckets.endKey)) diff --git a/pkg/statistics/hot_cache_test.go b/pkg/statistics/hot_cache_test.go index fbd28c94683..ac19ee19ad2 100644 --- a/pkg/statistics/hot_cache_test.go +++ b/pkg/statistics/hot_cache_test.go @@ -31,7 +31,7 @@ func TestIsHot(t *testing.T) { region := buildRegion(i, 3, 60) stats := cache.CheckReadPeerSync(region, region.GetPeers(), []float64{100000000, 1000, 1000}, 60) cache.Update(stats[0], i) - for i := 0; i < 100; i++ { + for range 100 { re.True(cache.IsRegionHot(region, 1)) } } diff --git a/pkg/statistics/hot_peer.go b/pkg/statistics/hot_peer.go index 8f92fbff542..c17ad5c246f 100644 --- a/pkg/statistics/hot_peer.go +++ b/pkg/statistics/hot_peer.go @@ -183,7 +183,7 @@ func (stat *HotPeerStat) GetLoads() []float64 { func (stat *HotPeerStat) Clone() *HotPeerStat { ret := *stat ret.Loads = make([]float64, utils.DimLen) - for i := 0; i < utils.DimLen; i++ { + for i := range utils.DimLen { ret.Loads[i] = stat.GetLoad(i) // replace with denoising loads } ret.rollingLoads = nil diff --git a/pkg/statistics/hot_peer_cache_test.go b/pkg/statistics/hot_peer_cache_test.go index 38a185fa483..cb0fc9751b9 100644 --- a/pkg/statistics/hot_peer_cache_test.go +++ b/pkg/statistics/hot_peer_cache_test.go @@ -371,7 +371,7 @@ func TestUpdateHotPeerStat(t *testing.T) { re.Equal(1, newItem[0].HotDegree) re.Equal(2*m-1, newItem[0].AntiCount) // sum of interval is larger than report interval, and cold - for i := 0; i < 2*m-1; i++ { + for range 2*m - 1 { cache.UpdateStat(newItem[0]) newItem = cache.CheckPeerFlow(region, []*metapb.Peer{peer}, deltaLoads, interval) } @@ -667,13 +667,13 @@ func TestHotPeerCacheTopNThreshold(t *testing.T) { ThresholdsUpdateInterval = interval cache := NewHotPeerCache(context.Background(), utils.Write) now := time.Now() - for id := uint64(0); id < 100; id++ { + for id := range uint64(100) { meta := &metapb.Region{ Id: id, Peers: []*metapb.Peer{{Id: id, StoreId: 1}}, } region := core.NewRegionInfo(meta, meta.Peers[0], core.SetWrittenBytes(id*6000), core.SetWrittenKeys(id*6000), core.SetWrittenQuery(id*6000)) - for i := 0; i < 10; i++ { + for i := range 10 { start := uint64(now.Add(time.Minute * time.Duration(i)).Unix()) end := uint64(now.Add(time.Minute * time.Duration(i+1)).Unix()) newRegion := region.Clone(core.WithInterval(&pdpb.TimeInterval{ diff --git a/pkg/statistics/region_collection.go b/pkg/statistics/region_collection.go index 7e51a8a7bdd..958ba3be5df 100644 --- a/pkg/statistics/region_collection.go +++ b/pkg/statistics/region_collection.go @@ -261,7 +261,7 @@ func (r *RegionStatistics) Observe(region *core.RegionInfo, stores []*core.Store } // Check if the region meets any of the conditions and update the corresponding info. regionID := region.GetID() - for i := 0; i < len(regionStatisticTypes); i++ { + for i := range regionStatisticTypes { condition := RegionStatisticType(1 << i) if conditions&condition == 0 { continue diff --git a/pkg/statistics/store_collection_test.go b/pkg/statistics/store_collection_test.go index e9fd1bba1fb..6a0ef24aff5 100644 --- a/pkg/statistics/store_collection_test.go +++ b/pkg/statistics/store_collection_test.go @@ -122,8 +122,8 @@ func TestSummaryStoreInfos(t *testing.T) { expectHistoryLoads := []float64{1, 2, 5} for _, storeID := range []uint64{1, 3} { loads := storeHistoryLoad.Get(storeID, rw, kind) - for i := 0; i < len(loads); i++ { - for j := 0; j < len(loads[0]); j++ { + for i := range loads { + for j := range loads[0] { if loads[i][j] != 0 { re.Equal(loads[i][j]/float64(storeID), expectHistoryLoads[i]) } @@ -139,8 +139,8 @@ func TestSummaryStoreInfos(t *testing.T) { for _, detail := range details { loads := detail.LoadPred.Current.HistoryLoads storeID := detail.GetID() - for i := 0; i < len(loads); i++ { - for j := 0; j < len(loads[0]); j++ { + for i := range loads { + for j := range loads[0] { if loads[i][j] != 0 { re.Equal(loads[i][j]/float64(storeID), expectHistoryLoads[i]) } diff --git a/pkg/statistics/store_load_test.go b/pkg/statistics/store_load_test.go index 67c9e53482f..9d958151182 100644 --- a/pkg/statistics/store_load_test.go +++ b/pkg/statistics/store_load_test.go @@ -33,10 +33,10 @@ func TestHistoryLoads(t *testing.T) { re.Len(historyLoads.Get(1, rwTp, kind)[0], 10) expectLoads := make([][]float64, utils.DimLen) - for i := 0; i < len(loads); i++ { + for i := range loads { expectLoads[i] = make([]float64, 10) } - for i := 0; i < 10; i++ { + for i := range 10 { historyLoads.Add(1, rwTp, kind, loads) expectLoads[utils.ByteDim][i] = 1.0 expectLoads[utils.KeyDim][i] = 2.0 diff --git a/pkg/statistics/utils/topn.go b/pkg/statistics/utils/topn.go index cb97251edd9..fba0cd829f4 100644 --- a/pkg/statistics/utils/topn.go +++ b/pkg/statistics/utils/topn.go @@ -48,7 +48,7 @@ func NewTopN(k, n int, ttl time.Duration) *TopN { topns: make([]*singleTopN, k), ttlLst: newTTLList(ttl), } - for i := 0; i < k; i++ { + for i := range k { ret.topns[i] = newSingleTopN(i, n) } return ret diff --git a/pkg/statistics/utils/topn_test.go b/pkg/statistics/utils/topn_test.go index f92d5a61f34..111b7d023ae 100644 --- a/pkg/statistics/utils/topn_test.go +++ b/pkg/statistics/utils/topn_test.go @@ -50,7 +50,7 @@ func TestPut(t *testing.T) { }, true /*update*/) // check GetTopNMin - for k := 0; k < DimLen; k++ { + for k := range DimLen { re.Equal(float64(1-N), tn.GetTopNMin(k).(*item).values[k]) } @@ -99,7 +99,7 @@ func TestPut(t *testing.T) { } // check Get - for i := uint64(0); i < Total; i++ { + for i := range uint64(Total) { it := tn.Get(i).(*item) re.Equal(i, it.id) re.Equal(-float64(i), it.values[0]) @@ -109,15 +109,15 @@ func TestPut(t *testing.T) { func putPerm(re *require.Assertions, tn *TopN, total int, f func(x int) float64, isUpdate bool) { { // insert dims := make([][]int, DimLen) - for k := 0; k < DimLen; k++ { + for k := range DimLen { dims[k] = rand.Perm(total) } - for i := 0; i < total; i++ { + for i := range total { item := &item{ id: uint64(dims[0][i]), values: make([]float64, DimLen), } - for k := 0; k < DimLen; k++ { + for k := range DimLen { item.values[k] = f(dims[k][i]) } re.Equal(isUpdate, tn.Put(item)) @@ -135,7 +135,7 @@ func TestRemove(t *testing.T) { }, false /*insert*/) // check Remove - for i := 0; i < Total; i++ { + for i := range Total { if i%3 != 0 { it := tn.Remove(uint64(i)).(*item) re.Equal(uint64(i), it.id) @@ -143,7 +143,7 @@ func TestRemove(t *testing.T) { } // check Remove worked - for i := 0; i < Total; i++ { + for i := range Total { if i%3 != 0 { re.Nil(tn.Remove(uint64(i))) } diff --git a/pkg/storage/hot_region_storage_test.go b/pkg/storage/hot_region_storage_test.go index 4e98f2059d6..aeda8e450e7 100644 --- a/pkg/storage/hot_region_storage_test.go +++ b/pkg/storage/hot_region_storage_test.go @@ -59,7 +59,7 @@ func (m *MockPackHotRegionInfo) IsLeader() bool { // GenHistoryHotRegions generate history hot region for test. func (m *MockPackHotRegionInfo) GenHistoryHotRegions(num int, updateTime time.Time) { - for i := 0; i < num; i++ { + for i := range num { historyHotRegion := HistoryHotRegion{ UpdateTime: updateTime.UnixNano() / int64(time.Millisecond), RegionID: uint64(i), @@ -180,7 +180,7 @@ func TestHotRegionDelete(t *testing.T) { re.NoError(err) defer clean() historyHotRegions := make([]HistoryHotRegion, 0) - for i := 0; i < defaultDeleteData; i++ { + for range defaultDeleteData { historyHotRegion := HistoryHotRegion{ UpdateTime: deleteDate.UnixNano() / int64(time.Millisecond), RegionID: 1, @@ -273,7 +273,7 @@ func BenchmarkRead(b *testing.B) { } func newTestHotRegions(storage *HotRegionStorage, mock *MockPackHotRegionInfo, cycleTimes, num int, updateTime time.Time) time.Time { - for i := 0; i < cycleTimes; i++ { + for range cycleTimes { mock.GenHistoryHotRegions(num, updateTime) storage.pullHotRegionInfo() storage.flush() diff --git a/pkg/storage/kv/kv_test.go b/pkg/storage/kv/kv_test.go index 3bc3120af95..f57c8145bd0 100644 --- a/pkg/storage/kv/kv_test.go +++ b/pkg/storage/kv/kv_test.go @@ -114,7 +114,7 @@ func testRange(re *require.Assertions, kv Base) { func testSaveMultiple(re *require.Assertions, kv Base, count int) { err := kv.RunInTxn(context.Background(), func(txn Txn) error { var saveErr error - for i := 0; i < count; i++ { + for i := range count { saveErr = txn.Save("key"+strconv.Itoa(i), "val"+strconv.Itoa(i)) if saveErr != nil { return saveErr @@ -123,7 +123,7 @@ func testSaveMultiple(re *require.Assertions, kv Base, count int) { return nil }) re.NoError(err) - for i := 0; i < count; i++ { + for i := range count { val, loadErr := kv.Load("key" + strconv.Itoa(i)) re.NoError(loadErr) re.Equal("val"+strconv.Itoa(i), val) diff --git a/pkg/storage/leveldb_backend_test.go b/pkg/storage/leveldb_backend_test.go index 45af7201c85..c6e0fcef607 100644 --- a/pkg/storage/leveldb_backend_test.go +++ b/pkg/storage/leveldb_backend_test.go @@ -100,7 +100,7 @@ func TestLevelDBBackend(t *testing.T) { backend.flushRate = defaultFlushRate // Test the flush when the cache is full. backend.flushRate = time.Minute - for i := 0; i < backend.batchSize; i++ { + for i := range backend.batchSize { key, value = fmt.Sprintf("k%d", i), fmt.Sprintf("v%d", i) err = backend.SaveIntoBatch(key, []byte(value)) re.NoError(err) diff --git a/pkg/storage/storage_test.go b/pkg/storage/storage_test.go index bd9a587e239..ca1c8e275eb 100644 --- a/pkg/storage/storage_test.go +++ b/pkg/storage/storage_test.go @@ -82,7 +82,7 @@ func TestBasic(t *testing.T) { func mustSaveStores(re *require.Assertions, s Storage, n int) []*metapb.Store { stores := make([]*metapb.Store, 0, n) - for i := 0; i < n; i++ { + for i := range n { store := &metapb.Store{Id: uint64(i)} stores = append(stores, store) } @@ -121,7 +121,7 @@ func TestStoreWeight(t *testing.T) { re.NoError(storage.LoadStores(cache.PutStore)) leaderWeights := []float64{1.0, 2.0, 0.2} regionWeights := []float64{1.0, 3.0, 0.3} - for i := 0; i < n; i++ { + for i := range n { re.Equal(leaderWeights[i], cache.GetStore(uint64(i)).GetLeaderWeight()) re.Equal(regionWeights[i], cache.GetStore(uint64(i)).GetRegionWeight()) } @@ -278,7 +278,7 @@ func TestLoadRegions(t *testing.T) { func mustSaveRegions(re *require.Assertions, s endpoint.RegionStorage, n int) []*metapb.Region { regions := make([]*metapb.Region, 0, n) - for i := 0; i < n; i++ { + for i := range n { region := newTestRegionMeta(uint64(i)) regions = append(regions, region) } @@ -392,7 +392,7 @@ func generateKeys(size int) []string { func randomMerge(regions []*metapb.Region, n int, ratio int) { rand.New(rand.NewSource(6)) note := make(map[int]bool) - for i := 0; i < n*ratio/100; i++ { + for range n * ratio / 100 { pos := rand.Intn(n - 1) for { if _, ok := note[pos]; !ok { @@ -422,7 +422,7 @@ func randomMerge(regions []*metapb.Region, n int, ratio int) { func saveRegions(storage endpoint.RegionStorage, n int, ratio int) error { keys := generateKeys(n) regions := make([]*metapb.Region, 0, n) - for i := uint64(0); i < uint64(n); i++ { + for i := range uint64(n) { var region *metapb.Region if i == 0 { region = &metapb.Region{ diff --git a/pkg/syncer/client_test.go b/pkg/syncer/client_test.go index e7be77d2bb0..eab3fa58182 100644 --- a/pkg/syncer/client_test.go +++ b/pkg/syncer/client_test.go @@ -42,7 +42,7 @@ func TestLoadRegion(t *testing.T) { storage: storage.NewCoreStorage(storage.NewStorageWithMemoryBackend(), rs), bc: core.NewBasicCluster(), } - for i := 0; i < 30; i++ { + for i := range 30 { rs.SaveRegion(&metapb.Region{Id: uint64(i) + 1}) } re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/storage/endpoint/slowLoadRegion", "return(true)")) diff --git a/pkg/syncer/history_buffer_test.go b/pkg/syncer/history_buffer_test.go index 70a1caf13dc..6bcd58b1689 100644 --- a/pkg/syncer/history_buffer_test.go +++ b/pkg/syncer/history_buffer_test.go @@ -53,7 +53,7 @@ func TestBufferSize(t *testing.T) { // size equals 100 kvMem := kv.NewMemoryKV() h1 := newHistoryBuffer(100, kvMem) - for i := 0; i < 6; i++ { + for i := range 6 { h1.record(regions[i]) } re.Equal(6, h1.len()) diff --git a/pkg/timerpool/pool_test.go b/pkg/timerpool/pool_test.go index d6dffc723a9..e10291f1027 100644 --- a/pkg/timerpool/pool_test.go +++ b/pkg/timerpool/pool_test.go @@ -14,7 +14,7 @@ import ( func TestTimerPool(t *testing.T) { var tp TimerPool - for i := 0; i < 100; i++ { + for range 100 { timer := tp.Get(20 * time.Millisecond) select { diff --git a/pkg/tso/global_allocator.go b/pkg/tso/global_allocator.go index 5c7c905089c..d44297b803e 100644 --- a/pkg/tso/global_allocator.go +++ b/pkg/tso/global_allocator.go @@ -233,7 +233,7 @@ func (gta *GlobalTSOAllocator) GenerateTSO(ctx context.Context, count uint32) (p // (whit synchronization with other Local TSO Allocators) ctx, cancel := context.WithCancel(gta.ctx) defer cancel() - for i := 0; i < maxRetryCount; i++ { + for range maxRetryCount { var ( err error shouldRetry, skipCheck bool @@ -366,7 +366,7 @@ func (gta *GlobalTSOAllocator) SyncMaxTS( ) error { defer trace.StartRegion(ctx, "GlobalTSOAllocator.SyncMaxTS").End() originalMaxTSO := *maxTSO - for i := 0; i < syncMaxRetryCount; i++ { + for i := range syncMaxRetryCount { // Collect all allocator leaders' client URLs allocatorLeaders := make(map[string]*pdpb.Member) for dcLocation := range dcLocationMap { diff --git a/pkg/tso/keyspace_group_manager.go b/pkg/tso/keyspace_group_manager.go index c19d790efc5..9288e70d968 100644 --- a/pkg/tso/keyspace_group_manager.go +++ b/pkg/tso/keyspace_group_manager.go @@ -868,7 +868,7 @@ func (kgm *KeyspaceGroupManager) updateKeyspaceGroupMembership( if oldLen != newLen { sameMembership = false } else { - for i := 0; i < oldLen; i++ { + for i := range oldLen { if oldKeyspaces[i] != newKeyspaces[i] { sameMembership = false break diff --git a/pkg/tso/keyspace_group_manager_test.go b/pkg/tso/keyspace_group_manager_test.go index 13a50084eac..3e80ca609b3 100644 --- a/pkg/tso/keyspace_group_manager_test.go +++ b/pkg/tso/keyspace_group_manager_test.go @@ -882,7 +882,7 @@ func collectAssignedKeyspaceGroupIDs(re *require.Assertions, kgm *KeyspaceGroupM defer kgm.RUnlock() ids := []uint32{} - for i := 0; i < len(kgm.kgs); i++ { + for i := range kgm.kgs { kg := kgm.kgs[i] if kg == nil { re.Nil(kgm.ams[i], fmt.Sprintf("ksg is nil but am is not nil for id %d", i)) @@ -909,7 +909,7 @@ func collectAllLoadedKeyspaceGroupIDs(kgm *KeyspaceGroupManager) []uint32 { defer kgm.RUnlock() ids := []uint32{} - for i := 0; i < len(kgm.kgs); i++ { + for i := range kgm.kgs { kg := kgm.kgs[i] if kg != nil { ids = append(ids, uint32(i)) @@ -964,7 +964,7 @@ func (suite *keyspaceGroupManagerTestSuite) TestUpdateKeyspaceGroupMembership() // Verify the keyspaces loaded is sorted. re.Equal(len(keyspaces), len(newGroup.Keyspaces)) - for i := 0; i < len(newGroup.Keyspaces); i++ { + for i := range newGroup.Keyspaces { if i > 0 { re.Less(newGroup.Keyspaces[i-1], newGroup.Keyspaces[i]) } @@ -1089,7 +1089,7 @@ func (suite *keyspaceGroupManagerTestSuite) TestPrimaryPriorityChange() { } // And the primaries on TSO Server 1 should continue to serve TSO requests without any failures. - for i := 0; i < 100; i++ { + for range 100 { for _, id := range ids { _, keyspaceGroupBelongTo, err := mgr1.HandleTSORequest(suite.ctx, id, id, GlobalDCLocation, 1) re.NoError(err) diff --git a/pkg/tso/tso.go b/pkg/tso/tso.go index 427f6771461..87d5f9a14ae 100644 --- a/pkg/tso/tso.go +++ b/pkg/tso/tso.go @@ -394,7 +394,7 @@ func (t *timestampOracle) getTS(ctx context.Context, leadership *election.Leader if count == 0 { return resp, errs.ErrGenerateTimestamp.FastGenByArgs("tso count should be positive") } - for i := 0; i < maxRetryCount; i++ { + for i := range maxRetryCount { currentPhysical, _ := t.getTSO() if currentPhysical == typeutil.ZeroTime { // If it's leader, maybe SyncTimestamp hasn't completed yet diff --git a/pkg/utils/etcdutil/etcdutil.go b/pkg/utils/etcdutil/etcdutil.go index 3eb1afabeac..b63d5b5d0d9 100644 --- a/pkg/utils/etcdutil/etcdutil.go +++ b/pkg/utils/etcdutil/etcdutil.go @@ -504,7 +504,7 @@ func (lw *LoopWatcher) initFromEtcd(ctx context.Context) int64 { ) ticker := time.NewTicker(defaultEtcdRetryInterval) defer ticker.Stop() - for i := 0; i < lw.loadRetryTimes; i++ { + for i := range lw.loadRetryTimes { failpoint.Inject("loadTemporaryFail", func(val failpoint.Value) { if maxFailTimes, ok := val.(int); ok && i < maxFailTimes { err = errors.New("fail to read from etcd") diff --git a/pkg/utils/etcdutil/etcdutil_test.go b/pkg/utils/etcdutil/etcdutil_test.go index 99f71ffde05..623da37aa98 100644 --- a/pkg/utils/etcdutil/etcdutil_test.go +++ b/pkg/utils/etcdutil/etcdutil_test.go @@ -261,7 +261,7 @@ func TestRandomKillEtcd(t *testing.T) { // Randomly kill an etcd server and restart it cfgs := []embed.Config{etcds[0].Config(), etcds[1].Config(), etcds[2].Config()} - for i := 0; i < len(cfgs)*2; i++ { + for range len(cfgs) * 2 { killIndex := rand.Intn(len(etcds)) etcds[killIndex].Close() checkEtcdEndpointNum(re, client1, 2) @@ -541,7 +541,7 @@ func (suite *loopWatcherTestSuite) TestCallBack() { re.NoError(err) // put 10 keys - for i := 0; i < 10; i++ { + for i := range 10 { suite.put(re, fmt.Sprintf("TestCallBack%d", i), "") } time.Sleep(time.Second) @@ -550,7 +550,7 @@ func (suite *loopWatcherTestSuite) TestCallBack() { cache.RUnlock() // delete 10 keys - for i := 0; i < 10; i++ { + for i := range 10 { key := fmt.Sprintf("TestCallBack%d", i) _, err = suite.client.Delete(suite.ctx, key) re.NoError(err) @@ -564,9 +564,9 @@ func (suite *loopWatcherTestSuite) TestCallBack() { func (suite *loopWatcherTestSuite) TestWatcherLoadLimit() { re := suite.Require() for count := 1; count < 10; count++ { - for limit := 0; limit < 10; limit++ { + for limit := range 10 { ctx, cancel := context.WithCancel(suite.ctx) - for i := 0; i < count; i++ { + for i := range count { suite.put(re, fmt.Sprintf("TestWatcherLoadLimit%d", i), "") } cache := make([]string, 0) diff --git a/pkg/utils/etcdutil/health_checker.go b/pkg/utils/etcdutil/health_checker.go index a933279ff2b..d75814ec380 100644 --- a/pkg/utils/etcdutil/health_checker.go +++ b/pkg/utils/etcdutil/health_checker.go @@ -247,7 +247,7 @@ func (checker *healthChecker) pickEps(probeCh <-chan healthProbe) []string { // - [9s, 10s) // Then the picked endpoints will be {A, B} and if C is in the last used endpoints, it will be evicted later. factor := int(DefaultRequestTimeout / DefaultSlowRequestTime) - for i := 0; i < factor; i++ { + for i := range factor { minLatency, maxLatency := DefaultSlowRequestTime*time.Duration(i), DefaultSlowRequestTime*time.Duration(i+1) for _, probe := range probes { if minLatency <= probe.took && probe.took < maxLatency { diff --git a/pkg/utils/keypath/key_path.go b/pkg/utils/keypath/key_path.go index 7e1355c7b56..3696a35c4d3 100644 --- a/pkg/utils/keypath/key_path.go +++ b/pkg/utils/keypath/key_path.go @@ -172,7 +172,7 @@ func RegionPath(regionID uint64) string { if len(s) < keyLen { diff := keyLen - len(s) copy(b[diff:], s) - for i := 0; i < diff; i++ { + for i := range diff { b[i] = '0' } } else if len(s) > keyLen { @@ -381,7 +381,7 @@ func encodeKeyspaceGroupID(groupID uint32) string { func buildPath(withSuffix bool, str ...string) string { var sb strings.Builder - for i := 0; i < len(str); i++ { + for i := range str { if i != 0 { sb.WriteString("/") } diff --git a/pkg/utils/keypath/key_path_test.go b/pkg/utils/keypath/key_path_test.go index 5dc230d43bb..18096ca47bb 100644 --- a/pkg/utils/keypath/key_path_test.go +++ b/pkg/utils/keypath/key_path_test.go @@ -30,7 +30,7 @@ func TestRegionPath(t *testing.T) { return path.Join(regionPathPrefix, fmt.Sprintf("%020d", id)) } rand.New(rand.NewSource(time.Now().Unix())) - for i := 0; i < 1000; i++ { + for range 1000 { id := rand.Uint64() re.Equal(f(id), RegionPath(id)) } diff --git a/pkg/utils/metricutil/metricutil.go b/pkg/utils/metricutil/metricutil.go index f0f0220c311..0d73c7678a8 100644 --- a/pkg/utils/metricutil/metricutil.go +++ b/pkg/utils/metricutil/metricutil.go @@ -52,7 +52,7 @@ func camelCaseToSnakeCase(str string) string { length := len(runes) var ret []rune - for i := 0; i < length; i++ { + for i := range length { if i > 0 && unicode.IsUpper(runes[i]) && runesHasLowerNeighborAt(runes, i) { ret = append(ret, '_') } diff --git a/pkg/utils/operatorutil/operator_check.go b/pkg/utils/operatorutil/operator_check.go index b9428369109..ef0f6af37b2 100644 --- a/pkg/utils/operatorutil/operator_check.go +++ b/pkg/utils/operatorutil/operator_check.go @@ -51,7 +51,7 @@ func CheckMultiTargetTransferLeader(re *require.Assertions, op *operator.Operato } func trimTransferLeaders(op *operator.Operator) (steps []operator.OpStep, lastLeader uint64) { - for i := 0; i < op.Len(); i++ { + for i := range op.Len() { step := op.Step(i) if s, ok := step.(operator.TransferLeader); ok { lastLeader = s.ToStore diff --git a/pkg/utils/reflectutil/tag.go b/pkg/utils/reflectutil/tag.go index 6b5987ea900..7fc4b3fde9b 100644 --- a/pkg/utils/reflectutil/tag.go +++ b/pkg/utils/reflectutil/tag.go @@ -23,7 +23,7 @@ import ( // If we have both "a.c" and "b.c" config items, for a given c, it's hard for us to decide which config item it represents. // We'd better to naming a config item without duplication. func FindJSONFullTagByChildTag(t reflect.Type, tag string) string { - for i := 0; i < t.NumField(); i++ { + for i := range t.NumField() { field := t.Field(i) column := field.Tag.Get("json") @@ -46,7 +46,7 @@ func FindJSONFullTagByChildTag(t reflect.Type, tag string) string { // FindSameFieldByJSON is used to check whether there is same field between `m` and `v` func FindSameFieldByJSON(v any, m map[string]any) bool { t := reflect.TypeOf(v).Elem() - for i := 0; i < t.NumField(); i++ { + for i := range t.NumField() { jsonTag := t.Field(i).Tag.Get("json") if i := strings.Index(jsonTag, ","); i != -1 { // trim 'foobar,string' to 'foobar' jsonTag = jsonTag[:i] @@ -68,7 +68,7 @@ func FindFieldByJSONTag(t reflect.Type, tags []string) reflect.Type { } tag := tags[0] tagRemain := tags[1:] - for i := 0; i < t.NumField(); i++ { + for i := range t.NumField() { jsonTag := t.Field(i).Tag.Get("json") if j := strings.Index(jsonTag, ","); j != -1 { // trim 'foobar,string' to 'foobar' jsonTag = jsonTag[:j] diff --git a/pkg/utils/syncutil/lock_group_test.go b/pkg/utils/syncutil/lock_group_test.go index 897e6b777a6..91235f46888 100644 --- a/pkg/utils/syncutil/lock_group_test.go +++ b/pkg/utils/syncutil/lock_group_test.go @@ -28,7 +28,7 @@ func TestLockGroup(t *testing.T) { concurrency := 50 var wg sync.WaitGroup wg.Add(concurrency) - for i := 0; i < concurrency; i++ { + for range concurrency { go func(spaceID uint32) { defer wg.Done() mustSequentialUpdateSingle(re, spaceID, group, concurrency) @@ -47,7 +47,7 @@ func TestLockGroupWithRemoveEntryOnUnlock(t *testing.T) { // Test Concurrent lock/unlock. var wg sync.WaitGroup wg.Add(maxID) - for i := 0; i < maxID; i++ { + for i := range maxID { go func(spaceID uint32) { defer wg.Done() mustSequentialUpdateSingle(re, spaceID, group, 10) @@ -57,11 +57,11 @@ func TestLockGroupWithRemoveEntryOnUnlock(t *testing.T) { // Test range lock in a scenario with non-consecutive large key space. One of example is // keyspace group split loads non-consecutive keyspace meta in batches and lock all loaded // keyspace meta within a batch at the same time. - for i := 0; i < maxID; i++ { + for i := range maxID { group.Lock(uint32(i)) } re.Len(group.entries, maxID) - for i := 0; i < maxID; i++ { + for i := range maxID { group.Unlock(uint32(i)) } @@ -75,7 +75,7 @@ func mustSequentialUpdateSingle(re *require.Assertions, spaceID uint32, group *L total := 0 var wg sync.WaitGroup wg.Add(concurrency) - for i := 0; i < concurrency; i++ { + for range concurrency { go func() { defer wg.Done() group.Lock(spaceID) diff --git a/pkg/utils/tempurl/tempurl.go b/pkg/utils/tempurl/tempurl.go index cd5cd498f95..fae6f90af91 100644 --- a/pkg/utils/tempurl/tempurl.go +++ b/pkg/utils/tempurl/tempurl.go @@ -37,7 +37,7 @@ const AllocURLFromUT = "allocURLFromUT" // Alloc allocates a local URL for testing. func Alloc() string { - for i := 0; i < 10; i++ { + for range 10 { if u := tryAllocTestURL(); u != "" { return u } diff --git a/pkg/utils/testutil/testutil.go b/pkg/utils/testutil/testutil.go index 55e44039591..363b3f14aef 100644 --- a/pkg/utils/testutil/testutil.go +++ b/pkg/utils/testutil/testutil.go @@ -109,7 +109,7 @@ func GenerateTestDataConcurrently(count int, f func(int)) { var wg sync.WaitGroup tasks := make(chan int, count) workers := runtime.NumCPU() - for w := 0; w < workers; w++ { + for range workers { wg.Add(1) go func() { defer wg.Done() @@ -118,7 +118,7 @@ func GenerateTestDataConcurrently(count int, f func(int)) { } }() } - for i := 0; i < count; i++ { + for i := range count { tasks <- i } close(tasks) diff --git a/pkg/utils/tsoutil/tso_dispatcher.go b/pkg/utils/tsoutil/tso_dispatcher.go index 9dfb2515dc1..4240ce875b4 100644 --- a/pkg/utils/tsoutil/tso_dispatcher.go +++ b/pkg/utils/tsoutil/tso_dispatcher.go @@ -186,7 +186,7 @@ func addLogical(logical, count int64, suffixBits uint32) int64 { func (*TSODispatcher) finishRequest(requests []Request, physical, firstLogical int64, suffixBits uint32) error { countSum := int64(0) - for i := 0; i < len(requests); i++ { + for i := range requests { newCountSum, err := requests[i].postProcess(countSum, physical, firstLogical, suffixBits) if err != nil { return err diff --git a/pkg/utils/typeutil/time_test.go b/pkg/utils/typeutil/time_test.go index b8078f63fa8..d7de7d56a9f 100644 --- a/pkg/utils/typeutil/time_test.go +++ b/pkg/utils/typeutil/time_test.go @@ -24,7 +24,7 @@ import ( func TestParseTimestamp(t *testing.T) { re := require.New(t) - for i := 0; i < 3; i++ { + for range 3 { t := time.Now().Add(time.Second * time.Duration(rand.Int31n(1000))) data := Uint64ToBytes(uint64(t.UnixNano())) nt, err := ParseTimestamp(data) @@ -39,7 +39,7 @@ func TestParseTimestamp(t *testing.T) { func TestSubTimeByWallClock(t *testing.T) { re := require.New(t) - for i := 0; i < 100; i++ { + for range 100 { r := rand.Int63n(1000) t1 := time.Now() // Add r seconds. diff --git a/pkg/window/counter_test.go b/pkg/window/counter_test.go index dd0041ae79f..dc43e4fee3f 100644 --- a/pkg/window/counter_test.go +++ b/pkg/window/counter_test.go @@ -77,7 +77,7 @@ func TestRollingCounterReduce(t *testing.T) { BucketDuration: bucketDuration, } r := NewRollingCounter(opts) - for x := 0; x < size; x++ { + for x := range size { for i := 0; i <= x; i++ { r.Add(1) } diff --git a/pkg/window/window.go b/pkg/window/window.go index a5c4b0dfe3c..80fb5bb5714 100644 --- a/pkg/window/window.go +++ b/pkg/window/window.go @@ -111,7 +111,7 @@ func (w *Window) ResetBucket(offset int) { // ResetBuckets empties the buckets based on the given offsets. func (w *Window) ResetBuckets(offset int, count int) { - for i := 0; i < count; i++ { + for i := range count { w.ResetBucket(offset + i) } } diff --git a/pkg/window/window_test.go b/pkg/window/window_test.go index f4df861fc2f..59fb2ee0bbb 100644 --- a/pkg/window/window_test.go +++ b/pkg/window/window_test.go @@ -27,11 +27,11 @@ func TestWindowResetWindow(t *testing.T) { re := require.New(t) opts := Options{Size: 3} window := NewWindow(opts) - for i := 0; i < opts.Size; i++ { + for i := range opts.Size { window.Append(i, 1.0) } window.ResetWindow() - for i := 0; i < opts.Size; i++ { + for i := range opts.Size { re.Empty(window.Bucket(i).Points) } } @@ -40,7 +40,7 @@ func TestWindowResetBucket(t *testing.T) { re := require.New(t) opts := Options{Size: 3} window := NewWindow(opts) - for i := 0; i < opts.Size; i++ { + for i := range opts.Size { window.Append(i, 1.0) } window.ResetBucket(1) @@ -53,11 +53,11 @@ func TestWindowResetBuckets(t *testing.T) { re := require.New(t) opts := Options{Size: 3} window := NewWindow(opts) - for i := 0; i < opts.Size; i++ { + for i := range opts.Size { window.Append(i, 1.0) } window.ResetBuckets(0, 3) - for i := 0; i < opts.Size; i++ { + for i := range opts.Size { re.Empty(window.Bucket(i).Points) } } @@ -66,13 +66,13 @@ func TestWindowAppend(t *testing.T) { re := require.New(t) opts := Options{Size: 3} window := NewWindow(opts) - for i := 0; i < opts.Size; i++ { + for i := range opts.Size { window.Append(i, 1.0) } for i := 1; i < opts.Size; i++ { window.Append(i, 2.0) } - for i := 0; i < opts.Size; i++ { + for i := range opts.Size { re.Equal(float64(1.0), window.Bucket(i).Points[0]) } for i := 1; i < opts.Size; i++ { diff --git a/server/api/admin_test.go b/server/api/admin_test.go index f3b3dd64bd3..4995c57619b 100644 --- a/server/api/admin_test.go +++ b/server/api/admin_test.go @@ -108,9 +108,9 @@ func (suite *adminTestSuite) TestDropRegions() { np := uint64(3) regions := make([]*core.RegionInfo, 0, n) - for i := uint64(0); i < n; i++ { + for i := range n { peers := make([]*metapb.Peer, 0, np) - for j := uint64(0); j < np; j++ { + for j := range np { peer := &metapb.Peer{ Id: i*np + j, } @@ -130,7 +130,7 @@ func (suite *adminTestSuite) TestDropRegions() { } // Region epoch cannot decrease. - for i := uint64(0); i < n; i++ { + for i := range n { region := regions[i].Clone( core.SetRegionConfVer(50), core.SetRegionVersion(50), @@ -140,7 +140,7 @@ func (suite *adminTestSuite) TestDropRegions() { re.Error(err) } - for i := uint64(0); i < n; i++ { + for i := range n { region := cluster.GetRegionByKey([]byte(fmt.Sprintf("%d", i))) re.Equal(uint64(100), region.GetRegionEpoch().ConfVer) @@ -161,7 +161,7 @@ func (suite *adminTestSuite) TestDropRegions() { re.NoError(err) } - for i := uint64(0); i < n; i++ { + for i := range n { region := cluster.GetRegionByKey([]byte(fmt.Sprintf("%d", i))) re.Equal(uint64(50), region.GetRegionEpoch().ConfVer) diff --git a/server/api/region_test.go b/server/api/region_test.go index 482208261e3..c73dc02587d 100644 --- a/server/api/region_test.go +++ b/server/api/region_test.go @@ -658,7 +658,7 @@ func BenchmarkGetRegions(b *testing.B) { url := fmt.Sprintf("%s%s/api/v1/regions", addr, apiPrefix) mustBootstrapCluster(re, svr) regionCount := 1000000 - for i := 0; i < regionCount; i++ { + for i := range regionCount { r := core.NewTestRegionInfo(uint64(i+2), 1, []byte(fmt.Sprintf("%09d", i)), []byte(fmt.Sprintf("%09d", i+1)), diff --git a/server/api/server_test.go b/server/api/server_test.go index af41905ad86..96c793df527 100644 --- a/server/api/server_test.go +++ b/server/api/server_test.go @@ -103,7 +103,7 @@ func mustNewCluster(re *require.Assertions, num int, opts ...func(cfg *config.Co }(cfg) } - for i := 0; i < num; i++ { + for range num { svr := <-ch svrs = append(svrs, svr) } diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index 69b815e6b95..ce9f06c9ba0 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -1075,7 +1075,7 @@ func (c *RaftCluster) processReportBuckets(buckets *metapb.Buckets) error { // the two request(A:3,B:2) get the same region and need to update the buckets. // the A will pass the check and set the version to 3, the B will fail because the region.bucket has changed. // the retry should keep the old version and the new version will be set to the region.bucket, like two requests (A:2,B:3). - for retry := 0; retry < 3; retry++ { + for range 3 { old := region.GetBuckets() // region should not update if the version of the buckets is less than the old one. if old != nil && buckets.GetVersion() <= old.GetVersion() { @@ -2170,7 +2170,7 @@ func (c *RaftCluster) AddStoreLimit(store *metapb.Store) { cfg.StoreLimit[storeID] = slc c.opt.SetScheduleConfig(cfg) var err error - for i := 0; i < persistLimitRetryTimes; i++ { + for range persistLimitRetryTimes { if err = c.opt.Persist(c.storage); err == nil { log.Info("store limit added", zap.Uint64("store-id", storeID)) return @@ -2189,7 +2189,7 @@ func (c *RaftCluster) RemoveStoreLimit(storeID uint64) { delete(cfg.StoreLimit, storeID) c.opt.SetScheduleConfig(cfg) var err error - for i := 0; i < persistLimitRetryTimes; i++ { + for range persistLimitRetryTimes { if err = c.opt.Persist(c.storage); err == nil { log.Info("store limit removed", zap.Uint64("store-id", storeID)) id := strconv.FormatUint(storeID, 10) diff --git a/server/cluster/cluster_stat_test.go b/server/cluster/cluster_stat_test.go index e5352b7ac0a..01d937334f0 100644 --- a/server/cluster/cluster_stat_test.go +++ b/server/cluster/cluster_stat_test.go @@ -26,7 +26,7 @@ func cpu(usage int64) []*pdpb.RecordPair { n := 10 name := "cpu" pairs := make([]*pdpb.RecordPair, n) - for i := 0; i < n; i++ { + for i := range n { pairs[i] = &pdpb.RecordPair{ Key: fmt.Sprintf("%s:%d", name, i), Value: uint64(usage), @@ -42,7 +42,7 @@ func TestCPUEntriesAppend(t *testing.T) { checkAppend := func(appended bool, usage int64, threads ...string) { entries := NewCPUEntries(N) re.NotNil(entries) - for i := 0; i < N; i++ { + for range N { entry := &StatEntry{ CpuUsages: cpu(usage), } @@ -63,7 +63,7 @@ func TestCPUEntriesCPU(t *testing.T) { re.NotNil(entries) usages := cpu(20) - for i := 0; i < N; i++ { + for range N { entry := &StatEntry{ CpuUsages: usages, } @@ -80,7 +80,7 @@ func TestStatEntriesAppend(t *testing.T) { ThreadsCollected = []string{"cpu:"} // fill 2*N entries, 2 entries for each store - for i := 0; i < 2*N; i++ { + for i := range 2 * N { entry := &StatEntry{ StoreId: uint64(i % N), CpuUsages: cpu(20), @@ -89,7 +89,7 @@ func TestStatEntriesAppend(t *testing.T) { } // use i as the store ID - for i := 0; i < N; i++ { + for i := range N { re.Equal(float64(20), cst.stats[uint64(i)].CPU()) } } @@ -105,7 +105,7 @@ func TestStatEntriesCPU(t *testing.T) { ThreadsCollected = []string{"cpu:"} // 2 entries per store - for i := 0; i < 2*N; i++ { + for i := range 2 * N { entry := &StatEntry{ StoreId: uint64(i % N), CpuUsages: usages, @@ -126,7 +126,7 @@ func TestStatEntriesCPUStale(t *testing.T) { usages := cpu(20) ThreadsCollected = []string{"cpu:"} - for i := 0; i < 2*N; i++ { + for i := range 2 * N { entry := &StatEntry{ StoreId: uint64(i % N), CpuUsages: usages, @@ -145,7 +145,7 @@ func TestStatEntriesState(t *testing.T) { usages := cpu(usage) ThreadsCollected = []string{"cpu:"} - for i := 0; i < NumberOfEntries; i++ { + for range NumberOfEntries { entry := &StatEntry{ StoreId: 0, CpuUsages: usages, diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index c83f485ad3d..33ac13f4495 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -1286,19 +1286,19 @@ func TestRegionSplitAndMerge(t *testing.T) { n := 7 // Split. - for i := 0; i < n; i++ { + for range n { regions = core.SplitRegions(regions) heartbeatRegions(re, cluster, regions) } // Merge. - for i := 0; i < n; i++ { + for range n { regions = core.MergeRegions(regions) heartbeatRegions(re, cluster, regions) } // Split twice and merge once. - for i := 0; i < n*2; i++ { + for i := range n * 2 { if (i+1)%3 == 0 { regions = core.MergeRegions(regions) } else { @@ -1363,14 +1363,14 @@ func TestOfflineAndMerge(t *testing.T) { // Split. n := 7 - for i := 0; i < n; i++ { + for range n { regions = core.SplitRegions(regions) } heartbeatRegions(re, cluster, regions) re.Len(cluster.GetRegionStatsByType(statistics.OfflinePeer), len(regions)) // Merge. - for i := 0; i < n; i++ { + for range n { regions = core.MergeRegions(regions) heartbeatRegions(re, cluster, regions) re.Len(cluster.GetRegionStatsByType(statistics.OfflinePeer), len(regions)) @@ -1881,7 +1881,7 @@ func Test(t *testing.T) { tc := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opts, storage.NewStorageWithMemoryBackend()) cache := tc.BasicCluster - for i := uint64(0); i < n; i++ { + for i := range n { region := regions[i] regionKey := []byte(fmt.Sprintf("a%20d", i+1)) @@ -1927,7 +1927,7 @@ func Test(t *testing.T) { pendingFilter := filter.NewRegionPendingFilter() downFilter := filter.NewRegionDownFilter() - for i := uint64(0); i < n; i++ { + for i := range n { region := filter.SelectOneRegion(tc.RandLeaderRegions(i, []core.KeyRange{core.NewKeyRange("", "")}), nil, pendingFilter, downFilter) re.Equal(i, region.GetLeader().GetStoreId()) @@ -1946,8 +1946,8 @@ func Test(t *testing.T) { re.NotNil(cache.GetRegion(n - 1)) // All regions will be filtered out if they have pending peers. - for i := uint64(0); i < n; i++ { - for j := 0; j < cache.GetStoreLeaderCount(i); j++ { + for i := range n { + for range cache.GetStoreLeaderCount(i) { region := filter.SelectOneRegion(tc.RandLeaderRegions(i, []core.KeyRange{core.NewKeyRange("", "")}), nil, pendingFilter, downFilter) newRegion := region.Clone(core.WithPendingPeers(region.GetPeers())) origin, overlaps, rangeChanged = cache.SetRegion(newRegion) @@ -1955,7 +1955,7 @@ func Test(t *testing.T) { } re.Nil(filter.SelectOneRegion(tc.RandLeaderRegions(i, []core.KeyRange{core.NewKeyRange("", "")}), nil, pendingFilter, downFilter)) } - for i := uint64(0); i < n; i++ { + for i := range n { re.Nil(filter.SelectOneRegion(tc.RandFollowerRegions(i, []core.KeyRange{core.NewKeyRange("", "")}), nil, pendingFilter, downFilter)) } } @@ -2213,9 +2213,9 @@ func newTestStores(n uint64, version string) []*core.StoreInfo { // Each region contains np peers, the first peer is the leader. func newTestRegions(n, m, np uint64) []*core.RegionInfo { regions := make([]*core.RegionInfo, 0, n) - for i := uint64(0); i < n; i++ { + for i := range n { peers := make([]*metapb.Peer, 0, np) - for j := uint64(0); j < np; j++ { + for j := range np { peer := &metapb.Peer{ Id: 100000000 + i*np + j, } @@ -2542,12 +2542,12 @@ func TestCollectMetricsConcurrent(t *testing.T) { for i := 0; i <= count; i++ { go func(i int) { defer wg.Done() - for j := 0; j < 1000; j++ { + for range 1000 { re.NoError(tc.addRegionStore(uint64(i%5), rand.Intn(200))) } }(i) } - for i := 0; i < 1000; i++ { + for range 1000 { co.CollectHotSpotMetrics() controller.CollectSchedulerMetrics() rc.collectSchedulingMetrics() @@ -2575,7 +2575,7 @@ func TestCollectMetrics(t *testing.T) { controller := co.GetSchedulersController() count := 10 for i := 0; i <= count; i++ { - for k := 0; k < 200; k++ { + for k := range 200 { item := &statistics.HotPeerStat{ StoreID: uint64(i % 5), RegionID: uint64(i*1000 + k), @@ -2587,7 +2587,7 @@ func TestCollectMetrics(t *testing.T) { } } - for i := 0; i < 1000; i++ { + for range 1000 { co.CollectHotSpotMetrics() controller.CollectSchedulerMetrics() rc.collectSchedulingMetrics() @@ -2741,7 +2741,7 @@ func TestCheckerIsBusy(t *testing.T) { operator.OpReplica, operator.OpRegion | operator.OpMerge, } for i, operatorKind := range operatorKinds { - for j := uint64(0); j < num; j++ { + for j := range num { regionID := j + uint64(i+1)*num re.NoError(tc.addLeaderRegion(regionID, 1)) switch operatorKind { @@ -3064,7 +3064,7 @@ func TestShouldRunWithNonLeaderRegions(t *testing.T) { re.NoError(tc.addLeaderStore(1, 10)) re.NoError(tc.addLeaderStore(2, 0)) re.NoError(tc.addLeaderStore(3, 0)) - for i := 0; i < 10; i++ { + for i := range 10 { re.NoError(tc.LoadRegion(uint64(i+1), 1, 2, 3)) } re.False(co.ShouldRun()) @@ -3421,7 +3421,7 @@ func BenchmarkPatrolRegion(b *testing.B) { return } } - for i := 0; i < regionNum; i++ { + for i := range regionNum { if err := tc.addLeaderRegion(uint64(i), 1, 2, 3); err != nil { return } @@ -3525,7 +3525,7 @@ func TestStoreOverloaded(t *testing.T) { opt.SetAllStoresLimit(storelimit.AddPeer, 600) opt.SetAllStoresLimit(storelimit.RemovePeer, 600) time.Sleep(time.Second) - for i := 0; i < 10; i++ { + for range 10 { ops, _ := lb.Schedule(tc, false /* dryRun */) re.Len(ops, 1) op := ops[0] @@ -3534,7 +3534,7 @@ func TestStoreOverloaded(t *testing.T) { } // sleep 1 seconds to make sure that the token is filled up time.Sleep(time.Second) - for i := 0; i < 100; i++ { + for range 100 { ops, _ := lb.Schedule(tc, false /* dryRun */) re.NotEmpty(ops) } @@ -3880,7 +3880,7 @@ func BenchmarkHandleRegionHeartbeat(b *testing.B) { pendingPeers := []*metapb.Peer{peers[1], peers[2]} var requests []*pdpb.RegionHeartbeatRequest - for i := 0; i < 1000000; i++ { + for i := range 1000000 { request := &pdpb.RegionHeartbeatRequest{ Region: &metapb.Region{Id: 10, Peers: peers, StartKey: []byte{byte(i)}, EndKey: []byte{byte(i + 1)}}, Leader: peers[0], diff --git a/server/forward.go b/server/forward.go index 7fbbb8e04f8..26e3869806d 100644 --- a/server/forward.go +++ b/server/forward.go @@ -451,7 +451,7 @@ func (s *GrpcServer) getGlobalTSO(ctx context.Context) (pdpb.Timestamp, error) { } return false } - for i := 0; i < maxRetryTimesRequestTSOServer; i++ { + for i := range maxRetryTimesRequestTSOServer { if i > 0 { time.Sleep(retryIntervalRequestTSOServer) } diff --git a/server/join/join.go b/server/join/join.go index bdc2704cd47..e77675f2196 100644 --- a/server/join/join.go +++ b/server/join/join.go @@ -174,7 +174,7 @@ func PrepareJoinCluster(cfg *config.Config) error { listSucc bool ) - for i := 0; i < listMemberRetryTimes; i++ { + for range listMemberRetryTimes { listResp, err = etcdutil.ListEtcdMembers(client.Ctx(), client) if err != nil { return err diff --git a/server/server.go b/server/server.go index 96e359e40d8..cc3270de950 100644 --- a/server/server.go +++ b/server/server.go @@ -1984,7 +1984,7 @@ func (s *Server) UnmarkSnapshotRecovering(ctx context.Context) error { func (s *Server) GetServicePrimaryAddr(ctx context.Context, serviceName string) (string, bool) { ticker := time.NewTicker(retryIntervalGetServicePrimary) defer ticker.Stop() - for i := 0; i < maxRetryTimesGetServicePrimary; i++ { + for range maxRetryTimesGetServicePrimary { if v, ok := s.servicePrimaryMap.Load(serviceName); ok { return v.(string), true } diff --git a/server/server_test.go b/server/server_test.go index af76146df36..7dd91b9f61f 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -61,7 +61,7 @@ func (suite *leaderServerTestSuite) SetupSuite() { cfgs := NewTestMultiConfig(assertutil.CheckerWithNilAssert(re), 3) ch := make(chan *Server, 3) - for i := 0; i < 3; i++ { + for i := range 3 { cfg := cfgs[i] go func() { @@ -74,7 +74,7 @@ func (suite *leaderServerTestSuite) SetupSuite() { }() } - for i := 0; i < 3; i++ { + for range 3 { svr := <-ch suite.svrs[svr.GetAddr()] = svr suite.leaderPath = svr.GetMember().GetLeaderPath() @@ -117,7 +117,7 @@ func newTestServersWithCfgs( }(cfg) } - for i := 0; i < len(cfgs); i++ { + for range cfgs { svr := <-ch re.NotNil(svr) svrs = append(svrs, svr) diff --git a/tests/cluster.go b/tests/cluster.go index 5cd90d8e03e..652db045d34 100644 --- a/tests/cluster.go +++ b/tests/cluster.go @@ -430,7 +430,7 @@ func (s *TestServer) BootstrapCluster() error { // make a test know the PD leader has been elected as soon as possible. // If it exceeds the maximum number of loops, it will return nil. func (s *TestServer) WaitLeader() bool { - for i := 0; i < WaitLeaderRetryTimes; i++ { + for range WaitLeaderRetryTimes { if s.server.GetMember().IsLeader() { return true } @@ -661,7 +661,7 @@ func (c *TestCluster) WaitLeader(ops ...WaitOption) string { for _, op := range ops { op(option) } - for i := 0; i < option.retryTimes; i++ { + for range option.retryTimes { counter := make(map[string]int) running := 0 for _, s := range c.servers { @@ -693,7 +693,7 @@ func (c *TestCluster) WaitRegionSyncerClientsReady(n int) bool { retryTimes: 40, waitInterval: WaitLeaderCheckInterval, } - for i := 0; i < option.retryTimes; i++ { + for range option.retryTimes { name := c.GetLeader() if len(name) == 0 { time.Sleep(option.waitInterval) @@ -730,7 +730,7 @@ func (c *TestCluster) WaitAllocatorLeader(dcLocation string, ops ...WaitOption) for _, op := range ops { op(option) } - for i := 0; i < option.retryTimes; i++ { + for range option.retryTimes { counter := make(map[string]int) running := 0 for _, s := range c.servers { diff --git a/tests/config.go b/tests/config.go index ee184f0f90e..fcda28d4a3d 100644 --- a/tests/config.go +++ b/tests/config.go @@ -104,7 +104,7 @@ type clusterConfig struct { func newClusterConfig(n int) *clusterConfig { var cc clusterConfig - for i := 0; i < n; i++ { + for range n { c := newServerConfig(cc.nextServerName(), &cc, false) cc.InitialServers = append(cc.InitialServers, c) } diff --git a/tests/integrations/client/client_test.go b/tests/integrations/client/client_test.go index 9574918a74a..dfa7c15120e 100644 --- a/tests/integrations/client/client_test.go +++ b/tests/integrations/client/client_test.go @@ -217,7 +217,7 @@ func TestLeaderTransferAndMoveCluster(t *testing.T) { }() // Transfer leader. - for i := 0; i < 3; i++ { + for range 3 { oldLeaderName := cluster.WaitLeader() err := cluster.GetServer(oldLeaderName).ResignLeader() re.NoError(err) @@ -228,7 +228,7 @@ func TestLeaderTransferAndMoveCluster(t *testing.T) { // ABC->ABCDEF oldServers := cluster.GetServers() oldLeaderName := cluster.WaitLeader() - for i := 0; i < 3; i++ { + for range 3 { newPD, err := cluster.Join(ctx) re.NoError(err) re.NoError(newPD.Run()) @@ -366,11 +366,11 @@ func TestTSOFollowerProxy(t *testing.T) { var wg sync.WaitGroup wg.Add(tsoRequestConcurrencyNumber) - for i := 0; i < tsoRequestConcurrencyNumber; i++ { + for range tsoRequestConcurrencyNumber { go func() { defer wg.Done() var lastTS uint64 - for i := 0; i < tsoRequestRound; i++ { + for range tsoRequestRound { physical, logical, err := cli2.GetTS(context.Background()) re.NoError(err) ts := tsoutil.ComposeTS(physical, logical) @@ -392,11 +392,11 @@ func TestTSOFollowerProxy(t *testing.T) { re.NoError(err) wg.Add(tsoRequestConcurrencyNumber) - for i := 0; i < tsoRequestConcurrencyNumber; i++ { + for range tsoRequestConcurrencyNumber { go func() { defer wg.Done() var lastTS uint64 - for i := 0; i < tsoRequestRound; i++ { + for range tsoRequestRound { physical, logical, err := cli2.GetTS(context.Background()) if err != nil { // It can only be the context canceled error caused by the stale stream cleanup. @@ -463,7 +463,7 @@ func TestUnavailableTimeAfterLeaderIsReady(t *testing.T) { getTsoFunc := func() { defer wg.Done() var lastTS uint64 - for i := 0; i < tsoRequestRound; i++ { + for range tsoRequestRound { var physical, logical int64 var ts uint64 physical, logical, err = cli.GetTS(context.Background()) @@ -602,11 +602,11 @@ func requestGlobalAndLocalTSO( ) { for _, dcLocation := range dcLocationConfig { wg.Add(tsoRequestConcurrencyNumber) - for i := 0; i < tsoRequestConcurrencyNumber; i++ { + for range tsoRequestConcurrencyNumber { go func(dc string) { defer wg.Done() var lastTS uint64 - for i := 0; i < tsoRequestRound; i++ { + for range tsoRequestRound { globalPhysical1, globalLogical1, err := cli.GetTS(context.TODO()) // The allocator leader may be changed due to the environment issue. if err != nil { @@ -942,7 +942,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetRegionFromFollower() { }) // follower have no region cnt := 0 - for i := 0; i < 100; i++ { + for range 100 { resp, err := cli.GetRegion(ctx, []byte("a"), pd.WithAllowFollowerHandle()) if err == nil && resp != nil { cnt++ @@ -956,7 +956,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetRegionFromFollower() { re.NoError(failpoint.Enable("github.com/tikv/pd/client/unreachableNetwork1", fmt.Sprintf("return(\"%s\")", leader.GetAddr()))) time.Sleep(150 * time.Millisecond) cnt = 0 - for i := 0; i < 100; i++ { + for range 100 { resp, err := cli.GetRegion(ctx, []byte("a"), pd.WithAllowFollowerHandle()) if err == nil && resp != nil { cnt++ @@ -971,7 +971,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetRegionFromFollower() { re.NoError(failpoint.Enable("github.com/tikv/pd/client/unreachableNetwork1", fmt.Sprintf("return(\"%s\")", follower.GetAddr()))) time.Sleep(100 * time.Millisecond) cnt = 0 - for i := 0; i < 100; i++ { + for range 100 { resp, err := cli.GetRegion(ctx, []byte("a"), pd.WithAllowFollowerHandle()) if err == nil && resp != nil { cnt++ @@ -984,7 +984,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetRegionFromFollower() { // follower client failed will retry by leader service client. re.NoError(failpoint.Enable("github.com/tikv/pd/server/followerHandleError", "return(true)")) cnt = 0 - for i := 0; i < 100; i++ { + for range 100 { resp, err := cli.GetRegion(ctx, []byte("a"), pd.WithAllowFollowerHandle()) if err == nil && resp != nil { cnt++ @@ -999,7 +999,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetRegionFromFollower() { re.NoError(failpoint.Enable("github.com/tikv/pd/client/fastCheckAvailable", "return(true)")) time.Sleep(100 * time.Millisecond) cnt = 0 - for i := 0; i < 100; i++ { + for range 100 { resp, err := cli.GetRegion(ctx, []byte("a"), pd.WithAllowFollowerHandle()) if err == nil && resp != nil { cnt++ @@ -1022,7 +1022,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetTSFuture() { ctxs := make([]context.Context, 20) cancels := make([]context.CancelFunc, 20) - for i := 0; i < 20; i++ { + for i := range 20 { ctxs[i], cancels[i] = context.WithCancel(ctx) } start := time.Now() @@ -1032,7 +1032,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetTSFuture() { wg1.Add(1) go func() { <-time.After(time.Second) - for i := 0; i < 20; i++ { + for i := range 20 { cancels[i]() } wg1.Done() @@ -1044,7 +1044,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetTSFuture() { }() wg3.Add(1) go func() { - for i := 0; i < 20; i++ { + for i := range 20 { cli.GetTSAsync(ctxs[i]) } wg3.Done() @@ -1057,7 +1057,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetTSFuture() { } func checkTS(re *require.Assertions, cli pd.Client, lastTS uint64) uint64 { - for i := 0; i < tsoRequestRound; i++ { + for range tsoRequestRound { physical, logical, err := cli.GetTS(context.TODO()) if err == nil { ts := tsoutil.ComposeTS(physical, logical) @@ -1383,7 +1383,7 @@ func (suite *clientTestSuite) TestGetPrevRegion() { re := suite.Require() regionLen := 10 regions := make([]*metapb.Region, 0, regionLen) - for i := 0; i < regionLen; i++ { + for i := range regionLen { regionID := regionIDAllocator.alloc() r := &metapb.Region{ Id: regionID, @@ -1405,7 +1405,7 @@ func (suite *clientTestSuite) TestGetPrevRegion() { re.NoError(err) } time.Sleep(500 * time.Millisecond) - for i := 0; i < 20; i++ { + for i := range 20 { testutil.Eventually(re, func() bool { r, err := suite.client.GetPrevRegion(context.Background(), []byte{byte(i)}) re.NoError(err) @@ -1422,7 +1422,7 @@ func (suite *clientTestSuite) TestScanRegions() { re := suite.Require() regionLen := 10 regions := make([]*metapb.Region, 0, regionLen) - for i := 0; i < regionLen; i++ { + for i := range regionLen { regionID := regionIDAllocator.alloc() r := &metapb.Region{ Id: regionID, @@ -1950,7 +1950,7 @@ func TestClientWatchWithRevision(t *testing.T) { re.LessOrEqual(r.Header.GetRevision(), res.GetHeader().GetRevision()) // Mock when start watcher there are existed some keys, will load firstly - for i := 0; i < 6; i++ { + for i := range 6 { _, err = s.GetEtcdClient().Put(context.Background(), watchPrefix+strconv.Itoa(i), strconv.Itoa(i)) re.NoError(err) } @@ -1958,7 +1958,7 @@ func TestClientWatchWithRevision(t *testing.T) { ch, err := client.Watch(context.Background(), []byte(watchPrefix), pd.WithRev(res.GetHeader().GetRevision()), pd.WithPrefix(), pd.WithPrevKV()) re.NoError(err) // Mock delete - for i := 0; i < 3; i++ { + for i := range 3 { _, err = s.GetEtcdClient().Delete(context.Background(), watchPrefix+strconv.Itoa(i)) re.NoError(err) } @@ -2042,7 +2042,7 @@ func (suite *clientTestSuite) TestBatchScanRegions() { regions = make([]*metapb.Region, 0, regionLen) ) - for i := 0; i < regionLen; i++ { + for i := range regionLen { regionID := regionIDAllocator.alloc() r := &metapb.Region{ Id: regionID, diff --git a/tests/integrations/client/gc_client_test.go b/tests/integrations/client/gc_client_test.go index 7f96e59d9aa..ba1c5a58a4b 100644 --- a/tests/integrations/client/gc_client_test.go +++ b/tests/integrations/client/gc_client_test.go @@ -110,7 +110,7 @@ func (suite *gcClientTestSuite) TestWatch1() { }, receiver) // Init gc safe points as index value of keyspace 0 ~ 5. - for i := 0; i < 6; i++ { + for i := range 6 { suite.mustUpdateSafePoint(re, uint32(i), uint64(i)) } @@ -120,7 +120,7 @@ func (suite *gcClientTestSuite) TestWatch1() { } // check gc safe point equal to keyspace id for keyspace 0 ~ 2 . - for i := 0; i < 3; i++ { + for i := range 3 { re.Equal(uint64(i), suite.mustLoadSafePoint(re, uint32(i))) } diff --git a/tests/integrations/client/global_config_test.go b/tests/integrations/client/global_config_test.go index 5eed743d8ba..c73e714cc2a 100644 --- a/tests/integrations/client/global_config_test.go +++ b/tests/integrations/client/global_config_test.go @@ -131,12 +131,12 @@ func (suite *globalConfigTestSuite) TestLoadWithoutConfigPath() { func (suite *globalConfigTestSuite) TestLoadOtherConfigPath() { re := suite.Require() defer func() { - for i := 0; i < 3; i++ { + for i := range 3 { _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } }() - for i := 0; i < 3; i++ { + for i := range 3 { _, err := suite.server.GetClient().Put(suite.server.Context(), path.Join("OtherConfigPath", strconv.Itoa(i)), strconv.Itoa(i)) re.NoError(err) } @@ -154,7 +154,7 @@ func (suite *globalConfigTestSuite) TestLoadOtherConfigPath() { func (suite *globalConfigTestSuite) TestLoadAndStore() { re := suite.Require() defer func() { - for i := 0; i < 3; i++ { + for range 3 { _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath("test")) re.NoError(err) } @@ -178,7 +178,7 @@ func (suite *globalConfigTestSuite) TestLoadAndStore() { func (suite *globalConfigTestSuite) TestStore() { re := suite.Require() defer func() { - for i := 0; i < 3; i++ { + for range 3 { _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath("test")) re.NoError(err) } @@ -189,7 +189,7 @@ func (suite *globalConfigTestSuite) TestStore() { Changes: changes, }) re.NoError(err) - for i := 0; i < 3; i++ { + for i := range 3 { res, err := suite.server.GetClient().Get(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) re.Equal(getEtcdPath(string(res.Kvs[0].Value)), string(res.Kvs[0].Key)) @@ -199,7 +199,7 @@ func (suite *globalConfigTestSuite) TestStore() { func (suite *globalConfigTestSuite) TestWatch() { re := suite.Require() defer func() { - for i := 0; i < 3; i++ { + for i := range 3 { // clean up _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) @@ -212,7 +212,7 @@ func (suite *globalConfigTestSuite) TestWatch() { ConfigPath: globalConfigPath, Revision: 0, }, server) - for i := 0; i < 6; i++ { + for i := range 6 { _, err := suite.server.GetClient().Put(suite.server.Context(), getEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) re.NoError(err) } @@ -230,12 +230,12 @@ func (suite *globalConfigTestSuite) TestWatch() { func (suite *globalConfigTestSuite) TestClientLoadWithoutNames() { re := suite.Require() defer func() { - for i := 0; i < 3; i++ { + for i := range 3 { _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } }() - for i := 0; i < 3; i++ { + for i := range 3 { _, err := suite.server.GetClient().Put(suite.server.Context(), getEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) re.NoError(err) } @@ -264,12 +264,12 @@ func (suite *globalConfigTestSuite) TestClientLoadWithoutConfigPath() { func (suite *globalConfigTestSuite) TestClientLoadOtherConfigPath() { re := suite.Require() defer func() { - for i := 0; i < 3; i++ { + for i := range 3 { _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } }() - for i := 0; i < 3; i++ { + for i := range 3 { _, err := suite.server.GetClient().Put(suite.server.Context(), path.Join("OtherConfigPath", strconv.Itoa(i)), strconv.Itoa(i)) re.NoError(err) } @@ -284,7 +284,7 @@ func (suite *globalConfigTestSuite) TestClientLoadOtherConfigPath() { func (suite *globalConfigTestSuite) TestClientStore() { re := suite.Require() defer func() { - for i := 0; i < 3; i++ { + for i := range 3 { _, err := suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } @@ -292,7 +292,7 @@ func (suite *globalConfigTestSuite) TestClientStore() { err := suite.client.StoreGlobalConfig(suite.server.Context(), globalConfigPath, []pd.GlobalConfigItem{{Name: "0", Value: "0"}, {Name: "1", Value: "1"}, {Name: "2", Value: "2"}}) re.NoError(err) - for i := 0; i < 3; i++ { + for i := range 3 { res, err := suite.server.GetClient().Get(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) re.Equal(getEtcdPath(string(res.Kvs[0].Value)), string(res.Kvs[0].Key)) @@ -320,7 +320,7 @@ func (suite *globalConfigTestSuite) TestClientWatchWithRevision() { suite.LessOrEqual(r.Header.GetRevision(), revision) re.Equal(pd.GlobalConfigItem{EventType: pdpb.EventType_PUT, Name: getEtcdPath("test"), PayLoad: []byte("test"), Value: "test"}, res[0]) // Mock when start watcher there are existed some keys, will load firstly - for i := 0; i < 6; i++ { + for i := range 6 { _, err = suite.server.GetClient().Put(suite.server.Context(), getEtcdPath(strconv.Itoa(i)), strconv.Itoa(i)) re.NoError(err) } @@ -328,7 +328,7 @@ func (suite *globalConfigTestSuite) TestClientWatchWithRevision() { configChan, err := suite.client.WatchGlobalConfig(suite.server.Context(), globalConfigPath, revision) re.NoError(err) // Mock delete - for i := 0; i < 3; i++ { + for i := range 3 { _, err = suite.server.GetClient().Delete(suite.server.Context(), getEtcdPath(strconv.Itoa(i))) re.NoError(err) } diff --git a/tests/integrations/client/http_client_test.go b/tests/integrations/client/http_client_test.go index e0e09dd6bc5..a6b6616483c 100644 --- a/tests/integrations/client/http_client_test.go +++ b/tests/integrations/client/http_client_test.go @@ -725,7 +725,7 @@ func (suite *httpClientTestSuite) TestRedirectWithMetrics() { failureCnt, err := metricCnt.GetMetricWithLabelValues([]string{"CreateScheduler", "network error"}...) re.NoError(err) failureCnt.Write(&out) - re.Equal(float64(2), out.Counter.GetValue()) + re.Equal(float64(2), out.GetCounter().GetValue()) c.Close() leader := sd.GetServingURL() @@ -741,7 +741,7 @@ func (suite *httpClientTestSuite) TestRedirectWithMetrics() { successCnt, err := metricCnt.GetMetricWithLabelValues([]string{"CreateScheduler", ""}...) re.NoError(err) successCnt.Write(&out) - re.Equal(float64(1), out.Counter.GetValue()) + re.Equal(float64(1), out.GetCounter().GetValue()) c.Close() httpClient = pd.NewHTTPClientWithRequestChecker(func(req *http.Request) error { @@ -756,11 +756,11 @@ func (suite *httpClientTestSuite) TestRedirectWithMetrics() { successCnt, err = metricCnt.GetMetricWithLabelValues([]string{"CreateScheduler", ""}...) re.NoError(err) successCnt.Write(&out) - re.Equal(float64(2), out.Counter.GetValue()) + re.Equal(float64(2), out.GetCounter().GetValue()) failureCnt, err = metricCnt.GetMetricWithLabelValues([]string{"CreateScheduler", "network error"}...) re.NoError(err) failureCnt.Write(&out) - re.Equal(float64(3), out.Counter.GetValue()) + re.Equal(float64(3), out.GetCounter().GetValue()) c.Close() } @@ -840,7 +840,7 @@ func (suite *httpClientTestSuite) TestRetryOnLeaderChange() { leader := suite.cluster.GetLeaderServer() re.NotNil(leader) - for i := 0; i < 3; i++ { + for range 3 { leader.ResignLeader() re.NotEmpty(suite.cluster.WaitLeader()) leader = suite.cluster.GetLeaderServer() @@ -907,7 +907,7 @@ func (suite *httpClientTestSuite) TestGetGCSafePoint() { } // delete the safepoints - for i := 0; i < 3; i++ { + for i := range 3 { msg, err := client.DeleteGCSafePoint(ctx, list.ServiceGCSafepoints[i].ServiceID) re.NoError(err) re.Equal("Delete service GC safepoint successfully.", msg) diff --git a/tests/integrations/client/keyspace_test.go b/tests/integrations/client/keyspace_test.go index 573302f0695..841859bbb80 100644 --- a/tests/integrations/client/keyspace_test.go +++ b/tests/integrations/client/keyspace_test.go @@ -38,7 +38,7 @@ func mustMakeTestKeyspaces(re *require.Assertions, server *server.Server, start var err error keyspaces := make([]*keyspacepb.KeyspaceMeta, testKeyspaceCount) manager := server.GetKeyspaceManager() - for i := 0; i < testKeyspaceCount; i++ { + for i := range testKeyspaceCount { keyspaces[i], err = manager.CreateKeyspace(&keyspace.CreateKeyspaceRequest{ Name: fmt.Sprintf("test_keyspace_%d", start+i), Config: map[string]string{ diff --git a/tests/integrations/mcs/discovery/register_test.go b/tests/integrations/mcs/discovery/register_test.go index ce019b29da8..7251531eb39 100644 --- a/tests/integrations/mcs/discovery/register_test.go +++ b/tests/integrations/mcs/discovery/register_test.go @@ -72,7 +72,7 @@ func (suite *serverRegisterTestSuite) TearDownSuite() { } func (suite *serverRegisterTestSuite) TestServerRegister() { - for i := 0; i < 3; i++ { + for range 3 { suite.checkServerRegister(constant.TSOServiceName) } } @@ -124,7 +124,7 @@ func (suite *serverRegisterTestSuite) checkServerPrimaryChange(serviceName strin cleanup() } }() - for i := 0; i < serverNum; i++ { + for range serverNum { s, cleanup := suite.addServer(serviceName) cleanups = append(cleanups, cleanup) serverMap[s.GetAddr()] = s diff --git a/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go b/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go index 49f1566de93..c6b6070fda5 100644 --- a/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go +++ b/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go @@ -88,7 +88,7 @@ func (suite *keyspaceGroupTestSuite) TestAllocNodesUpdate() { cleanup() } }() - for i := 0; i < constant.DefaultKeyspaceGroupReplicaCount+1; i++ { + for range constant.DefaultKeyspaceGroupReplicaCount + 1 { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) cleanups = append(cleanups, cleanup) nodes[s.GetAddr()] = s @@ -144,7 +144,7 @@ func (suite *keyspaceGroupTestSuite) TestAllocReplica() { cleanup() } }() - for i := 0; i < constant.DefaultKeyspaceGroupReplicaCount; i++ { + for range constant.DefaultKeyspaceGroupReplicaCount { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) cleanups = append(cleanups, cleanup) nodes[s.GetAddr()] = s @@ -244,7 +244,7 @@ func (suite *keyspaceGroupTestSuite) TestSetNodes() { cleanup() } }() - for i := 0; i < constant.DefaultKeyspaceGroupReplicaCount; i++ { + for range constant.DefaultKeyspaceGroupReplicaCount { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) cleanups = append(cleanups, cleanup) nodes[s.GetAddr()] = s @@ -311,7 +311,7 @@ func (suite *keyspaceGroupTestSuite) TestDefaultKeyspaceGroup() { cleanup() } }() - for i := 0; i < constant.DefaultKeyspaceGroupReplicaCount; i++ { + for range constant.DefaultKeyspaceGroupReplicaCount { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) cleanups = append(cleanups, cleanup) nodes[s.GetAddr()] = s @@ -345,7 +345,7 @@ func (suite *keyspaceGroupTestSuite) TestAllocNodes() { cleanup() } }() - for i := 0; i < constant.DefaultKeyspaceGroupReplicaCount+1; i++ { + for range constant.DefaultKeyspaceGroupReplicaCount + 1 { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) cleanups = append(cleanups, cleanup) nodes[s.GetAddr()] = s diff --git a/tests/integrations/mcs/members/member_test.go b/tests/integrations/mcs/members/member_test.go index 4e1e6534416..e1953de4e62 100644 --- a/tests/integrations/mcs/members/member_test.go +++ b/tests/integrations/mcs/members/member_test.go @@ -75,7 +75,7 @@ func (suite *memberTestSuite) SetupTest() { // TSO nodes := make(map[string]bs.Server) // mock 3 tso nodes, which is more than the default replica count(DefaultKeyspaceGroupReplicaCount). - for i := 0; i < 3; i++ { + for range 3 { s, cleanup := tests.StartSingleTSOTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) nodes[s.GetAddr()] = s suite.cleanupFunc = append(suite.cleanupFunc, func() { @@ -95,7 +95,7 @@ func (suite *memberTestSuite) SetupTest() { // Scheduling nodes = make(map[string]bs.Server) - for i := 0; i < 3; i++ { + for range 3 { s, cleanup := tests.StartSingleSchedulingTestServer(suite.ctx, re, suite.backendEndpoints, tempurl.Alloc()) nodes[s.GetAddr()] = s suite.cleanupFunc = append(suite.cleanupFunc, func() { diff --git a/tests/integrations/mcs/resourcemanager/resource_manager_test.go b/tests/integrations/mcs/resourcemanager/resource_manager_test.go index 4b2e2040ab0..6a8584c4d68 100644 --- a/tests/integrations/mcs/resourcemanager/resource_manager_test.go +++ b/tests/integrations/mcs/resourcemanager/resource_manager_test.go @@ -205,7 +205,7 @@ func (suite *resourceManagerClientTestSuite) TestWatchResourceGroup() { // Mock add resource groups var meta *rmpb.ResourceGroup groupsNum := 10 - for i := 0; i < groupsNum; i++ { + for i := range groupsNum { group.Name = groupNamePrefix + strconv.Itoa(i) resp, err := cli.AddResourceGroup(suite.ctx, group) re.NoError(err) @@ -228,14 +228,14 @@ func (suite *resourceManagerClientTestSuite) TestWatchResourceGroup() { }, } } - for i := 0; i < groupsNum; i++ { + for i := range groupsNum { group.Name = groupNamePrefix + strconv.Itoa(i) modifySettings(group, 20000) resp, err := cli.ModifyResourceGroup(suite.ctx, group) re.NoError(err) re.Contains(resp, "Success!") } - for i := 0; i < groupsNum; i++ { + for i := range groupsNum { testutil.Eventually(re, func() bool { name := groupNamePrefix + strconv.Itoa(i) meta = controller.GetActiveResourceGroup(name) @@ -268,7 +268,7 @@ func (suite *resourceManagerClientTestSuite) TestWatchResourceGroup() { // Mock delete resource groups suite.cleanupResourceGroups(re) - for i := 0; i < groupsNum; i++ { + for i := range groupsNum { testutil.Eventually(re, func() bool { name := groupNamePrefix + strconv.Itoa(i) meta = controller.GetActiveResourceGroup(name) @@ -439,7 +439,7 @@ func (suite *resourceManagerClientTestSuite) TestResourceGroupController() { } v = true sum := time.Duration(0) - for j := 0; j < cas.tcs[i].times; j++ { + for range cas.tcs[i].times { rreq := cas.tcs[i].makeReadRequest() wreq := cas.tcs[i].makeWriteRequest() rres := cas.tcs[i].makeReadResponse() @@ -539,7 +539,7 @@ func (suite *resourceManagerClientTestSuite) TestSwitchBurst() { controller.Start(suite.ctx) resourceGroupName := suite.initGroups[1].Name tcs := tokenConsumptionPerSecond{rruTokensAtATime: 1, wruTokensAtATime: 2, times: 100, waitDuration: 0} - for j := 0; j < tcs.times; j++ { + for range tcs.times { rreq := tcs.makeReadRequest() wreq := tcs.makeWriteRequest() rres := tcs.makeReadResponse() @@ -577,7 +577,7 @@ func (suite *resourceManagerClientTestSuite) TestSwitchBurst() { } v = true sum := time.Duration(0) - for j := 0; j < cas.tcs[i].times; j++ { + for range cas.tcs[i].times { rreq := cas.tcs[i].makeReadRequest() wreq := cas.tcs[i].makeWriteRequest() rres := cas.tcs[i].makeReadResponse() @@ -615,7 +615,7 @@ func (suite *resourceManagerClientTestSuite) TestSwitchBurst() { time.Sleep(110 * time.Millisecond) tcs = tokenConsumptionPerSecond{rruTokensAtATime: 1, wruTokensAtATime: 10, times: 1010, waitDuration: 0} duration := time.Duration(0) - for i := 0; i < tcs.times; i++ { + for range tcs.times { wreq = tcs.makeWriteRequest() startTime := time.Now() _, _, _, _, err = controller.OnRequestWait(suite.ctx, resourceGroupName3, wreq) @@ -750,7 +750,7 @@ func (suite *resourceManagerClientTestSuite) TestAcquireTokenBucket() { } re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/mcs/resourcemanager/server/fastPersist", `return(true)`)) suite.resignAndWaitLeader(re) - for i := 0; i < 3; i++ { + for range 3 { for _, group := range groups { requests := make([]*rmpb.RequestUnitItem, 0) requests = append(requests, &rmpb.RequestUnitItem{ @@ -1179,7 +1179,7 @@ func (suite *resourceManagerClientTestSuite) TestResourceManagerClientFailover() re.Equal(*group, *getResp) // Change the leader after each time we modify the resource group. - for i := 0; i < 4; i++ { + for i := range 4 { group.RUSettings.RU.Settings.FillRate += uint64(i) modifyResp, err := cli.ModifyResourceGroup(suite.ctx, group) re.NoError(err) @@ -1237,10 +1237,10 @@ func (suite *resourceManagerClientTestSuite) TestResourceManagerClientDegradedMo time.Sleep(time.Second * 2) beginTime := time.Now() // This is used to make sure resource group in lowRU. - for i := 0; i < 100; i++ { + for range 100 { controller.OnRequestWait(suite.ctx, groupName, tc2.makeWriteRequest()) } - for i := 0; i < 100; i++ { + for range 100 { controller.OnRequestWait(suite.ctx, groupName, tc.makeWriteRequest()) } endTime := time.Now() @@ -1326,7 +1326,7 @@ func (suite *resourceManagerClientTestSuite) TestRemoveStaleResourceGroup() { // Mock client binds one resource group and then closed rreq := testConfig.tcs.makeReadRequest() rres := testConfig.tcs.makeReadResponse() - for j := 0; j < testConfig.times; j++ { + for range testConfig.times { controller.OnRequestWait(suite.ctx, group.Name, rreq) controller.OnResponse(group.Name, rreq, rres) time.Sleep(100 * time.Microsecond) diff --git a/tests/integrations/mcs/tso/proxy_test.go b/tests/integrations/mcs/tso/proxy_test.go index e865d8b8a0c..1d3ba31a8ab 100644 --- a/tests/integrations/mcs/tso/proxy_test.go +++ b/tests/integrations/mcs/tso/proxy_test.go @@ -108,15 +108,15 @@ func (s *tsoProxyTestSuite) TestTSOProxyWorksWithCancellation() { defer wg.Done() go func() { defer wg.Done() - for i := 0; i < 3; i++ { + for range 3 { streams, cleanupFuncs := createTSOStreams(s.ctx, re, s.backendEndpoints, 10) - for j := 0; j < 10; j++ { + for range 10 { s.verifyTSOProxy(s.ctx, streams, cleanupFuncs, 10, true) } cleanupGRPCStreams(cleanupFuncs) } }() - for i := 0; i < 10; i++ { + for range 10 { s.verifyTSOProxy(s.ctx, s.streams, s.cleanupFuncs, 10, true) } }() @@ -146,7 +146,7 @@ func TestTSOProxyStress(_ *testing.T) { defer cancel() // Push load from many concurrent clients in multiple rounds and increase the #client each round. - for i := 0; i < totalRounds; i++ { + for i := range totalRounds { log.Info("start a new round of stress test", zap.Int("round-id", i), zap.Int("clients-count", len(streams)+clientsIncr)) streamsTemp, cleanupFuncsTemp := @@ -178,7 +178,7 @@ func (s *tsoProxyTestSuite) TestTSOProxyClientsWithSameContext() { ctx, cancel := context.WithCancel(s.ctx) defer cancel() - for i := 0; i < clientCount; i++ { + for i := range clientCount { conn, err := grpc.Dial(strings.TrimPrefix(s.backendEndpoints, "http://"), grpc.WithTransportCredentials(insecure.NewCredentials())) re.NoError(err) grpcPDClient := pdpb.NewPDClient(conn) @@ -308,7 +308,7 @@ func (s *tsoProxyTestSuite) verifyTSOProxy( var respErr atomic.Value wg := &sync.WaitGroup{} - for i := 0; i < len(streams); i++ { + for i := range streams { if streams[i] == nil { continue } @@ -316,7 +316,7 @@ func (s *tsoProxyTestSuite) verifyTSOProxy( go func(i int) { defer wg.Done() lastPhysical, lastLogical := int64(0), int64(0) - for j := 0; j < requestsPerClient; j++ { + for range requestsPerClient { select { case <-ctx.Done(): cleanupGRPCStream(streams, cleanupFuncs, i) @@ -358,7 +358,7 @@ func (s *tsoProxyTestSuite) verifyTSOProxy( func (s *tsoProxyTestSuite) generateRequests(requestsPerClient int) []*pdpb.TsoRequest { reqs := make([]*pdpb.TsoRequest, requestsPerClient) - for i := 0; i < requestsPerClient; i++ { + for i := range requestsPerClient { reqs[i] = &pdpb.TsoRequest{ Header: &pdpb.RequestHeader{ClusterId: s.apiLeader.GetClusterID()}, Count: uint32(i) + 1, // Make sure the count is positive. @@ -376,7 +376,7 @@ func createTSOStreams( cleanupFuncs := make([]testutil.CleanupFunc, clientCount) streams := make([]pdpb.PD_TsoClient, clientCount) - for i := 0; i < clientCount; i++ { + for i := range clientCount { conn, err := grpc.Dial(strings.TrimPrefix(backendEndpoints, "http://"), grpc.WithTransportCredentials(insecure.NewCredentials())) re.NoError(err) grpcPDClient := pdpb.NewPDClient(conn) @@ -407,7 +407,7 @@ func tsoProxy( wg.Add(1) go func(index int, streamCopy pdpb.PD_TsoClient) { defer wg.Done() - for i := 0; i < requestsPerClient; i++ { + for range requestsPerClient { if err := streamCopy.Send(tsoReq); err != nil { errsReturned[index] = err return @@ -426,7 +426,7 @@ func tsoProxy( } } else { for _, stream := range streams { - for i := 0; i < requestsPerClient; i++ { + for range requestsPerClient { if err := stream.Send(tsoReq); err != nil { return err } diff --git a/tests/integrations/mcs/tso/server_test.go b/tests/integrations/mcs/tso/server_test.go index 43b0405923c..e4d1ff319db 100644 --- a/tests/integrations/mcs/tso/server_test.go +++ b/tests/integrations/mcs/tso/server_test.go @@ -328,11 +328,11 @@ func TestResignTSOPrimaryForward(t *testing.T) { defer tc.Destroy() tc.WaitForDefaultPrimaryServing(re) - for j := 0; j < 10; j++ { + for range 10 { tc.ResignPrimary(constant.DefaultKeyspaceID, constant.DefaultKeyspaceGroupID) tc.WaitForDefaultPrimaryServing(re) var err error - for i := 0; i < 3; i++ { // try 3 times + for range 3 { // try 3 times _, _, err = suite.pdClient.GetTS(suite.ctx) if err == nil { break @@ -359,7 +359,7 @@ func TestResignAPIPrimaryForward(t *testing.T) { re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/member/skipCampaignLeaderCheck")) }() - for j := 0; j < 10; j++ { + for range 10 { suite.pdLeader.ResignLeader() suite.pdLeader = suite.cluster.GetServer(suite.cluster.WaitLeader()) suite.backendEndpoints = suite.pdLeader.GetAddr() @@ -444,7 +444,7 @@ func (suite *APIServerForward) checkForwardTSOUnexpectedToFollower(checkTSO func func (suite *APIServerForward) addRegions() { leader := suite.cluster.GetServer(suite.cluster.WaitLeader()) rc := leader.GetServer().GetRaftCluster() - for i := 0; i < 3; i++ { + for i := range 3 { region := &metapb.Region{ Id: uint64(i*4 + 1), Peers: []*metapb.Peer{{Id: uint64(i*4 + 2), StoreId: uint64(i*4 + 3)}}, diff --git a/tests/integrations/realcluster/real_cluster.go b/tests/integrations/realcluster/real_cluster.go index 1843b78a528..441a13f4a73 100644 --- a/tests/integrations/realcluster/real_cluster.go +++ b/tests/integrations/realcluster/real_cluster.go @@ -152,7 +152,7 @@ func waitTiupReady(t *testing.T, tag string) { maxTimes = 20 ) log.Info("start to wait TiUP ready", zap.String("tag", tag)) - for i := 0; i < maxTimes; i++ { + for i := range maxTimes { err := runCommand(tiupBin, "playground", "display", "--tag", tag) if err == nil { log.Info("TiUP is ready", zap.String("tag", tag)) diff --git a/tests/integrations/realcluster/scheduler_test.go b/tests/integrations/realcluster/scheduler_test.go index 69da846b491..c0aff2669e9 100644 --- a/tests/integrations/realcluster/scheduler_test.go +++ b/tests/integrations/realcluster/scheduler_test.go @@ -54,7 +54,7 @@ func (s *schedulerSuite) TestTransferLeader() { oldLeader := resp.Name var newLeader string - for i := 0; i < 2; i++ { + for i := range 2 { if resp.Name != fmt.Sprintf("pd-%d", i) { newLeader = fmt.Sprintf("pd-%d", i) } diff --git a/tests/integrations/tso/client_test.go b/tests/integrations/tso/client_test.go index d1a649cbfa6..35a54a2a8ad 100644 --- a/tests/integrations/tso/client_test.go +++ b/tests/integrations/tso/client_test.go @@ -207,12 +207,12 @@ func (suite *tsoClientTestSuite) TestGetTS() { re := suite.Require() var wg sync.WaitGroup wg.Add(tsoRequestConcurrencyNumber * len(suite.clients)) - for i := 0; i < tsoRequestConcurrencyNumber; i++ { + for range tsoRequestConcurrencyNumber { for _, client := range suite.clients { go func(client pd.Client) { defer wg.Done() var lastTS uint64 - for j := 0; j < tsoRequestRound; j++ { + for range tsoRequestRound { physical, logical, err := client.GetTS(suite.ctx) re.NoError(err) ts := tsoutil.ComposeTS(physical, logical) @@ -229,7 +229,7 @@ func (suite *tsoClientTestSuite) TestGetTSAsync() { re := suite.Require() var wg sync.WaitGroup wg.Add(tsoRequestConcurrencyNumber * len(suite.clients)) - for i := 0; i < tsoRequestConcurrencyNumber; i++ { + for range tsoRequestConcurrencyNumber { for _, client := range suite.clients { go func(client pd.Client) { defer wg.Done() @@ -272,7 +272,7 @@ func (suite *tsoClientTestSuite) TestDiscoverTSOServiceWithLegacyPath() { ctx, re, keyspaceID, suite.getBackendEndpoints()) defer client.Close() var lastTS uint64 - for j := 0; j < tsoRequestRound; j++ { + for range tsoRequestRound { physical, logical, err := client.GetTS(ctx) re.NoError(err) ts := tsoutil.ComposeTS(physical, logical) @@ -286,12 +286,12 @@ func (suite *tsoClientTestSuite) TestGetMinTS() { re := suite.Require() var wg sync.WaitGroup wg.Add(tsoRequestConcurrencyNumber * len(suite.clients)) - for i := 0; i < tsoRequestConcurrencyNumber; i++ { + for range tsoRequestConcurrencyNumber { for _, client := range suite.clients { go func(client pd.Client) { defer wg.Done() var lastMinTS uint64 - for j := 0; j < tsoRequestRound; j++ { + for range tsoRequestRound { physical, logical, err := client.GetMinTS(suite.ctx) re.NoError(err) minTS := tsoutil.ComposeTS(physical, logical) @@ -332,7 +332,7 @@ func (suite *tsoClientTestSuite) TestUpdateAfterResetTSO() { defer func() { re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/member/skipCampaignLeaderCheck")) }() - for i := 0; i < len(suite.clients); i++ { + for i := range suite.clients { client := suite.clients[i] testutil.Eventually(re, func() bool { _, _, err := client.GetTS(ctx) @@ -446,7 +446,7 @@ func (suite *tsoClientTestSuite) TestGetTSWhileResettingTSOClient() { ) wg.Add(tsoRequestConcurrencyNumber * len(suite.clients)) - for i := 0; i < tsoRequestConcurrencyNumber; i++ { + for range tsoRequestConcurrencyNumber { for _, client := range suite.clients { go func(client pd.Client) { defer wg.Done() @@ -465,7 +465,7 @@ func (suite *tsoClientTestSuite) TestGetTSWhileResettingTSOClient() { } } // Reset the TSO clients while requesting TSO concurrently. - for i := 0; i < tsoRequestConcurrencyNumber; i++ { + for range tsoRequestConcurrencyNumber { for _, client := range suite.clients { client.(interface{ ResetTSOClient() }).ResetTSOClient() } @@ -514,7 +514,7 @@ func TestMixedTSODeployment(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - for i := 0; i < 2; i++ { + for range 2 { n := r.Intn(2) + 1 time.Sleep(time.Duration(n) * time.Second) leaderServer.ResignLeader() @@ -578,7 +578,7 @@ func checkTSO( ctx context.Context, re *require.Assertions, wg *sync.WaitGroup, backendEndpoints string, ) { wg.Add(tsoRequestConcurrencyNumber) - for i := 0; i < tsoRequestConcurrencyNumber; i++ { + for range tsoRequestConcurrencyNumber { go func() { defer wg.Done() cli := mcs.SetupClientWithAPIContext(ctx, re, pd.NewAPIContextV1(), strings.Split(backendEndpoints, ",")) diff --git a/tests/integrations/tso/consistency_test.go b/tests/integrations/tso/consistency_test.go index f82f58ee6c8..f9bafb9e71a 100644 --- a/tests/integrations/tso/consistency_test.go +++ b/tests/integrations/tso/consistency_test.go @@ -156,7 +156,7 @@ func (suite *tsoConsistencyTestSuite) requestTSOConcurrently() { var wg sync.WaitGroup wg.Add(tsoRequestConcurrencyNumber) - for i := 0; i < tsoRequestConcurrencyNumber; i++ { + for range tsoRequestConcurrencyNumber { go func() { defer wg.Done() last := &pdpb.Timestamp{ @@ -164,7 +164,7 @@ func (suite *tsoConsistencyTestSuite) requestTSOConcurrently() { Logical: 0, } var ts *pdpb.Timestamp - for j := 0; j < tsoRequestRound; j++ { + for range tsoRequestRound { ts = suite.request(ctx, tsoCount) // Check whether the TSO fallbacks re.Equal(1, tsoutil.CompareTimestamp(ts, last)) @@ -191,7 +191,7 @@ func (suite *tsoConsistencyTestSuite) TestFallbackTSOConsistency() { defer cancel() var wg sync.WaitGroup wg.Add(tsoRequestConcurrencyNumber) - for i := 0; i < tsoRequestConcurrencyNumber; i++ { + for range tsoRequestConcurrencyNumber { go func() { defer wg.Done() last := &pdpb.Timestamp{ @@ -199,7 +199,7 @@ func (suite *tsoConsistencyTestSuite) TestFallbackTSOConsistency() { Logical: 0, } var ts *pdpb.Timestamp - for j := 0; j < tsoRequestRound; j++ { + for range tsoRequestRound { ts = suite.request(ctx, tsoCount) re.Equal(1, tsoutil.CompareTimestamp(ts, last)) last = ts diff --git a/tests/integrations/tso/server_test.go b/tests/integrations/tso/server_test.go index 651a1df96b4..828518d72c0 100644 --- a/tests/integrations/tso/server_test.go +++ b/tests/integrations/tso/server_test.go @@ -151,7 +151,7 @@ func (suite *tsoServerTestSuite) TestConcurrentlyReset() { var wg sync.WaitGroup wg.Add(2) now := time.Now() - for i := 0; i < 2; i++ { + for range 2 { go func() { defer wg.Done() for j := 0; j <= 50; j++ { diff --git a/tests/scheduling_cluster.go b/tests/scheduling_cluster.go index 434a6bd9a48..3f7c39eb81c 100644 --- a/tests/scheduling_cluster.go +++ b/tests/scheduling_cluster.go @@ -44,7 +44,7 @@ func NewTestSchedulingCluster(ctx context.Context, initialServerCount int, backe servers: make(map[string]*scheduling.Server, initialServerCount), cleanupFuncs: make(map[string]testutil.CleanupFunc, initialServerCount), } - for i := 0; i < initialServerCount; i++ { + for range initialServerCount { err = tc.AddServer(tempurl.Alloc()) if err != nil { return nil, err diff --git a/tests/server/api/api_test.go b/tests/server/api/api_test.go index 8d48221784a..828213587d6 100644 --- a/tests/server/api/api_test.go +++ b/tests/server/api/api_test.go @@ -254,7 +254,7 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { re.NoError(err) re.Equal(http.StatusOK, resp.StatusCode) - for i := 0; i < 3; i++ { + for i := range 3 { req, _ = http.NewRequest(http.MethodPost, leader.GetAddr()+"/pd/api/v1/admin/log", strings.NewReader("\"info\"")) resp, err = tests.TestDialClient.Do(req) re.NoError(err) @@ -271,7 +271,7 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { // qps = 0.5, so sleep 2s time.Sleep(time.Second * 2) - for i := 0; i < 2; i++ { + for i := range 2 { req, _ = http.NewRequest(http.MethodPost, leader.GetAddr()+"/pd/api/v1/admin/log", strings.NewReader("\"info\"")) resp, err = tests.TestDialClient.Do(req) re.NoError(err) @@ -288,7 +288,7 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { // test only sleep 1s time.Sleep(time.Second) - for i := 0; i < 2; i++ { + for range 2 { req, _ = http.NewRequest(http.MethodPost, leader.GetAddr()+"/pd/api/v1/admin/log", strings.NewReader("\"info\"")) resp, err = tests.TestDialClient.Do(req) re.NoError(err) @@ -315,7 +315,7 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { re.Equal(0.5, cfg.QPS) re.Equal(1, cfg.QPSBurst) - for i := 0; i < 3; i++ { + for i := range 3 { req, _ = http.NewRequest(http.MethodPost, leader.GetAddr()+"/pd/api/v1/admin/log", strings.NewReader("\"info\"")) resp, err = tests.TestDialClient.Do(req) re.NoError(err) @@ -332,7 +332,7 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { // qps = 0.5, so sleep 2s time.Sleep(time.Second * 2) - for i := 0; i < 2; i++ { + for i := range 2 { req, _ = http.NewRequest(http.MethodPost, leader.GetAddr()+"/pd/api/v1/admin/log", strings.NewReader("\"info\"")) resp, err = tests.TestDialClient.Do(req) re.NoError(err) @@ -349,7 +349,7 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { // test only sleep 1s time.Sleep(time.Second) - for i := 0; i < 2; i++ { + for range 2 { req, _ = http.NewRequest(http.MethodPost, leader.GetAddr()+"/pd/api/v1/admin/log", strings.NewReader("\"info\"")) resp, err = tests.TestDialClient.Do(req) re.NoError(err) @@ -371,7 +371,7 @@ func (suite *middlewareTestSuite) TestRateLimitMiddleware() { resp.Body.Close() re.False(leader.GetServer().GetServiceMiddlewarePersistOptions().IsRateLimitEnabled()) - for i := 0; i < 3; i++ { + for range 3 { req, _ = http.NewRequest(http.MethodPost, leader.GetAddr()+"/pd/api/v1/admin/log", strings.NewReader("\"info\"")) resp, err = tests.TestDialClient.Do(req) re.NoError(err) @@ -998,7 +998,7 @@ func TestPreparingProgress(t *testing.T) { for _, store := range stores[:2] { tests.MustPutStore(re, cluster, store) } - for i := 0; i < core.InitClusterRegionThreshold; i++ { + for i := range core.InitClusterRegionThreshold { tests.MustPutRegion(re, cluster, uint64(i+1), uint64(i)%3+1, []byte(fmt.Sprintf("%20d", i)), []byte(fmt.Sprintf("%20d", i+1)), core.SetApproximateSize(10)) } testutil.Eventually(re, func() bool { diff --git a/tests/server/api/rule_test.go b/tests/server/api/rule_test.go index 303264d7057..3e8adabd4ca 100644 --- a/tests/server/api/rule_test.go +++ b/tests/server/api/rule_test.go @@ -173,7 +173,7 @@ func (suite *ruleTestSuite) checkSet(cluster *tests.TestCluster) { if testCase.success { err = tu.CheckPostJSON(tests.TestDialClient, urlPrefix+"/rule", testCase.rawData, tu.StatusOK(re)) popKeyRangeMap := map[string]struct{}{} - for i := 0; i < len(testCase.popKeyRange)/2; i++ { + for range len(testCase.popKeyRange) / 2 { v, got := leaderServer.GetRaftCluster().PopOneSuspectKeyRange() re.True(got) popKeyRangeMap[hex.EncodeToString(v[0])] = struct{}{} @@ -614,7 +614,7 @@ func (suite *ruleTestSuite) checkDelete(cluster *tests.TestCluster) { re.NoError(err) if len(testCase.popKeyRange) > 0 { popKeyRangeMap := map[string]struct{}{} - for i := 0; i < len(testCase.popKeyRange)/2; i++ { + for range len(testCase.popKeyRange) / 2 { v, got := leaderServer.GetRaftCluster().PopOneSuspectKeyRange() re.True(got) popKeyRangeMap[hex.EncodeToString(v[0])] = struct{}{} @@ -1136,7 +1136,7 @@ func (suite *ruleTestSuite) checkConcurrencyWith(cluster *tests.TestCluster, bundle := genBundle(i) data, err := json.Marshal(bundle) re.NoError(err) - for j := 0; j < 10; j++ { + for range 10 { expectResult.Lock() err = tu.CheckPostJSON(tests.TestDialClient, urlPrefix+"/config/placement-rule", data, tu.StatusOK(re)) re.NoError(err) @@ -1176,7 +1176,7 @@ func (suite *ruleTestSuite) checkLargeRules(cluster *tests.TestCluster) { Rules: make([]*placement.Rule, 0), }, } - for i := 0; i < num; i++ { + for i := range num { bundle[0].Rules = append(bundle[0].Rules, &placement.Rule{ ID: strconv.Itoa(i), Index: i, Role: placement.Voter, Count: 1, GroupID: "1", StartKey: []byte(strconv.Itoa(i)), EndKey: []byte(strconv.Itoa(i + 1)), diff --git a/tests/server/apiv2/handlers/keyspace_test.go b/tests/server/apiv2/handlers/keyspace_test.go index f3aa55bbe43..f3fa01851ac 100644 --- a/tests/server/apiv2/handlers/keyspace_test.go +++ b/tests/server/apiv2/handlers/keyspace_test.go @@ -149,7 +149,7 @@ func mustMakeTestKeyspaces(re *require.Assertions, server *tests.TestServer, cou "config2": "200", } resultMeta := make([]*keyspacepb.KeyspaceMeta, count) - for i := 0; i < count; i++ { + for i := range count { createRequest := &handlers.CreateKeyspaceParams{ Name: fmt.Sprintf("test_keyspace_%d", i), Config: testConfig, diff --git a/tests/server/cluster/cluster_test.go b/tests/server/cluster/cluster_test.go index e1a56982f2d..82c8a5766d3 100644 --- a/tests/server/cluster/cluster_test.go +++ b/tests/server/cluster/cluster_test.go @@ -614,7 +614,7 @@ func TestRaftClusterMultipleRestart(t *testing.T) { // let the job run at small interval re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) - for i := 0; i < 100; i++ { + for range 100 { // See https://github.com/tikv/pd/issues/8543 rc.Wait() err = rc.Start(leaderServer.GetServer()) @@ -795,7 +795,7 @@ func TestConcurrentHandleRegion(t *testing.T) { } concurrent := 1000 - for i := 0; i < concurrent; i++ { + for i := range concurrent { peerID, err := id.Alloc() re.NoError(err) regionID, err := id.Alloc() @@ -927,7 +927,7 @@ func TestLoadClusterInfo(t *testing.T) { meta := &metapb.Cluster{Id: 123} re.NoError(testStorage.SaveMeta(meta)) stores := make([]*metapb.Store, 0, n) - for i := 0; i < n; i++ { + for i := range n { store := &metapb.Store{Id: uint64(i)} stores = append(stores, store) } @@ -937,7 +937,7 @@ func TestLoadClusterInfo(t *testing.T) { } regions := make([]*metapb.Region, 0, n) - for i := uint64(0); i < uint64(n); i++ { + for i := range uint64(n) { region := &metapb.Region{ Id: i, StartKey: []byte(fmt.Sprintf("%20d", i)), @@ -971,7 +971,7 @@ func TestLoadClusterInfo(t *testing.T) { m := 20 regions = make([]*metapb.Region, 0, n) - for i := uint64(0); i < uint64(m); i++ { + for i := range uint64(m) { region := &metapb.Region{ Id: i, StartKey: []byte(fmt.Sprintf("%20d", i)), @@ -1493,7 +1493,7 @@ func checkEvictLeaderStoreIDs(re *require.Assertions, sc *schedulers.Controller, } func putRegionWithLeader(re *require.Assertions, rc *cluster.RaftCluster, id id.Allocator, storeID uint64) { - for i := 0; i < 3; i++ { + for i := range 3 { regionID, err := id.Alloc() re.NoError(err) peerID, err := id.Alloc() diff --git a/tests/server/id/id_test.go b/tests/server/id/id_test.go index 8b0e7ec60b7..465259063bf 100644 --- a/tests/server/id/id_test.go +++ b/tests/server/id/id_test.go @@ -48,7 +48,7 @@ func TestID(t *testing.T) { leaderServer := cluster.GetLeaderServer() var last uint64 - for i := uint64(0); i < allocStep; i++ { + for range allocStep { id, err := leaderServer.GetAllocator().Alloc() re.NoError(err) re.Greater(id, last) @@ -60,12 +60,12 @@ func TestID(t *testing.T) { var m syncutil.Mutex ids := make(map[uint64]struct{}) - for i := 0; i < 10; i++ { + for range 10 { wg.Add(1) go func() { defer wg.Done() - for i := 0; i < 200; i++ { + for range 200 { id, err := leaderServer.GetAllocator().Alloc() re.NoError(err) m.Lock() @@ -97,7 +97,7 @@ func TestCommand(t *testing.T) { grpcPDClient := testutil.MustNewGrpcClient(re, leaderServer.GetAddr()) var last uint64 - for i := uint64(0); i < 2*allocStep; i++ { + for range 2 * allocStep { resp, err := grpcPDClient.AllocID(context.Background(), req) re.NoError(err) re.Equal(pdpb.ErrorType_OK, resp.GetHeader().GetError().GetType()) @@ -124,7 +124,7 @@ func TestMonotonicID(t *testing.T) { leaderServer := cluster.GetLeaderServer() var last1 uint64 - for i := uint64(0); i < 10; i++ { + for range 10 { id, err := leaderServer.GetAllocator().Alloc() re.NoError(err) re.Greater(id, last1) @@ -135,7 +135,7 @@ func TestMonotonicID(t *testing.T) { re.NotEmpty(cluster.WaitLeader()) leaderServer = cluster.GetLeaderServer() var last2 uint64 - for i := uint64(0); i < 10; i++ { + for range 10 { id, err := leaderServer.GetAllocator().Alloc() re.NoError(err) re.Greater(id, last2) @@ -149,7 +149,7 @@ func TestMonotonicID(t *testing.T) { re.NoError(err) re.Greater(id, last2) var last3 uint64 - for i := uint64(0); i < 1000; i++ { + for range 1000 { id, err := leaderServer.GetAllocator().Alloc() re.NoError(err) re.Greater(id, last3) @@ -171,7 +171,7 @@ func TestPDRestart(t *testing.T) { leaderServer := cluster.GetLeaderServer() var last uint64 - for i := uint64(0); i < 10; i++ { + for range 10 { id, err := leaderServer.GetAllocator().Alloc() re.NoError(err) re.Greater(id, last) @@ -182,7 +182,7 @@ func TestPDRestart(t *testing.T) { re.NoError(leaderServer.Run()) re.NotEmpty(cluster.WaitLeader()) - for i := uint64(0); i < 10; i++ { + for range 10 { id, err := leaderServer.GetAllocator().Alloc() re.NoError(err) re.Greater(id, last) diff --git a/tests/server/keyspace/keyspace_test.go b/tests/server/keyspace/keyspace_test.go index d6e188359ce..a0175c1b727 100644 --- a/tests/server/keyspace/keyspace_test.go +++ b/tests/server/keyspace/keyspace_test.go @@ -80,7 +80,7 @@ func (suite *keyspaceTestSuite) TestRegionLabeler() { keyspaces := make([]*keyspacepb.KeyspaceMeta, count) manager := suite.manager var err error - for i := 0; i < count; i++ { + for i := range count { keyspaces[i], err = manager.CreateKeyspace(&keyspace.CreateKeyspaceRequest{ Name: fmt.Sprintf("test_keyspace_%d", i), CreateTime: now, diff --git a/tests/server/member/member_test.go b/tests/server/member/member_test.go index 2d0c9098153..13f7fffe083 100644 --- a/tests/server/member/member_test.go +++ b/tests/server/member/member_test.go @@ -355,7 +355,7 @@ func TestCampaignLeaderFrequently(t *testing.T) { re.NotEmpty(cluster.GetLeader()) // need to prevent 3 times(including the above 1st time) campaign leader in 5 min. - for i := 0; i < 2; i++ { + for range 2 { cluster.GetLeaderServer().ResetPDLeader() re.NotEmpty(cluster.WaitLeader()) re.Equal(leader, cluster.GetLeader()) @@ -383,7 +383,7 @@ func TestGrantLeaseFailed(t *testing.T) { re.NotEmpty(cluster.GetLeader()) re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/election/skipGrantLeader", fmt.Sprintf("return(\"%s\")", leader))) - for i := 0; i < 3; i++ { + for range 3 { cluster.GetLeaderServer().ResetPDLeader() re.NotEmpty(cluster.WaitLeader()) } diff --git a/tests/server/server_test.go b/tests/server/server_test.go index 68c4d5da65e..3f1769a97f8 100644 --- a/tests/server/server_test.go +++ b/tests/server/server_test.go @@ -151,7 +151,7 @@ func TestGRPCRateLimit(t *testing.T) { addr := leaderServer.GetAddr() grpcPDClient := testutil.MustNewGrpcClient(re, addr) leaderServer.BootstrapCluster() - for i := 0; i < 100; i++ { + for range 100 { resp, err := grpcPDClient.GetRegion(context.Background(), &pdpb.GetRegionRequest{ Header: &pdpb.RequestHeader{ClusterId: clusterID}, RegionKey: []byte(""), @@ -170,7 +170,7 @@ func TestGRPCRateLimit(t *testing.T) { err = testutil.CheckPostJSON(tests.TestDialClient, urlPrefix, jsonBody, testutil.StatusOK(re), testutil.StringContain(re, "gRPC limiter is updated")) re.NoError(err) - for i := 0; i < 2; i++ { + for i := range 2 { resp, err := grpcPDClient.GetRegion(context.Background(), &pdpb.GetRegionRequest{ Header: &pdpb.RequestHeader{ClusterId: leaderServer.GetClusterID()}, RegionKey: []byte(""), @@ -190,7 +190,7 @@ func TestGRPCRateLimit(t *testing.T) { err = testutil.CheckPostJSON(tests.TestDialClient, urlPrefix, jsonBody, testutil.StatusOK(re), testutil.StringContain(re, "gRPC limiter is deleted")) re.NoError(err) - for i := 0; i < 100; i++ { + for range 100 { resp, err := grpcPDClient.GetRegion(context.Background(), &pdpb.GetRegionRequest{ Header: &pdpb.RequestHeader{ClusterId: leaderServer.GetClusterID()}, RegionKey: []byte(""), diff --git a/tests/server/tso/consistency_test.go b/tests/server/tso/consistency_test.go index 1bf20cce20d..c7acc69fa60 100644 --- a/tests/server/tso/consistency_test.go +++ b/tests/server/tso/consistency_test.go @@ -92,7 +92,7 @@ func (suite *tsoConsistencyTestSuite) TestSynchronizedGlobalTSO() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() maxGlobalTSO := &pdpb.Timestamp{} - for i := 0; i < tsoRequestRound; i++ { + for range tsoRequestRound { // Get some local TSOs first oldLocalTSOs := make([]*pdpb.Timestamp, 0, dcLocationNum) for _, dcLocation := range dcLocationConfig { @@ -303,7 +303,7 @@ func (suite *tsoConsistencyTestSuite) testTSO(cluster *tests.TestCluster, dcLoca var wg sync.WaitGroup wg.Add(tsoRequestConcurrencyNumber) - for i := 0; i < tsoRequestConcurrencyNumber; i++ { + for range tsoRequestConcurrencyNumber { go func() { defer wg.Done() lastList := make(map[string]*pdpb.Timestamp) @@ -313,7 +313,7 @@ func (suite *tsoConsistencyTestSuite) testTSO(cluster *tests.TestCluster, dcLoca Logical: 0, } } - for j := 0; j < tsoRequestRound; j++ { + for range tsoRequestRound { for _, dcLocation := range dcLocationConfig { req := &pdpb.TsoRequest{ Header: testutil.NewRequestHeader(leaderServer.GetClusterID()), diff --git a/tests/server/tso/global_tso_test.go b/tests/server/tso/global_tso_test.go index c340c44d3d2..bf39c57e3d6 100644 --- a/tests/server/tso/global_tso_test.go +++ b/tests/server/tso/global_tso_test.go @@ -157,7 +157,7 @@ func TestLogicalOverflow(t *testing.T) { defer tsoClient.CloseSend() begin := time.Now() - for i := 0; i < 3; i++ { + for i := range 3 { req := &pdpb.TsoRequest{ Header: testutil.NewRequestHeader(clusterID), Count: 150000, diff --git a/tests/testutil.go b/tests/testutil.go index 98a64c4686c..03ec7bbe805 100644 --- a/tests/testutil.go +++ b/tests/testutil.go @@ -68,7 +68,7 @@ func SetRangePort(start, end int) { dialer := &net.Dialer{} randomPort := strconv.Itoa(rand.Intn(portRange[1]-portRange[0]) + portRange[0]) testPortMutex.Lock() - for i := 0; i < 10; i++ { + for range 10 { if _, ok := testPortMap[randomPort]; !ok { break } @@ -431,7 +431,7 @@ func (i *idAllocator) alloc() uint64 { func InitRegions(regionLen int) []*core.RegionInfo { allocator := &idAllocator{allocator: mockid.NewIDAllocator()} regions := make([]*core.RegionInfo, 0, regionLen) - for i := 0; i < regionLen; i++ { + for i := range regionLen { r := &metapb.Region{ Id: allocator.alloc(), RegionEpoch: &metapb.RegionEpoch{ diff --git a/tests/tso_cluster.go b/tests/tso_cluster.go index 2087e9d4422..28a52580a1f 100644 --- a/tests/tso_cluster.go +++ b/tests/tso_cluster.go @@ -46,7 +46,7 @@ func NewTestTSOCluster(ctx context.Context, initialServerCount int, backendEndpo servers: make(map[string]*tso.Server, initialServerCount), cleanupFuncs: make(map[string]testutil.CleanupFunc, initialServerCount), } - for i := 0; i < initialServerCount; i++ { + for range initialServerCount { err = tc.AddServer(tempurl.Alloc()) if err != nil { return nil, err diff --git a/tools/pd-analysis/analysis/parse_log_test.go b/tools/pd-analysis/analysis/parse_log_test.go index 345f70959f8..c97f6f9e594 100644 --- a/tools/pd-analysis/analysis/parse_log_test.go +++ b/tools/pd-analysis/analysis/parse_log_test.go @@ -27,7 +27,7 @@ func transferCounterParseLog(operator, content string, expect []uint64) bool { if len(results) != len(expect) { return false } - for i := 0; i < len(results); i++ { + for i := range results { if results[i] != expect[i] { return false } diff --git a/tools/pd-analysis/analysis/transfer_counter.go b/tools/pd-analysis/analysis/transfer_counter.go index 8f472ae1e03..98d131117b4 100644 --- a/tools/pd-analysis/analysis/transfer_counter.go +++ b/tools/pd-analysis/analysis/transfer_counter.go @@ -125,7 +125,7 @@ func (c *TransferCounter) prepare() { } c.graphMat = nil - for i := 0; i < c.scheduledStoreNum; i++ { + for range c.scheduledStoreNum { tmp := make([]uint64, c.scheduledStoreNum) c.graphMat = append(c.graphMat, tmp) } @@ -157,7 +157,7 @@ func (c *TransferCounter) dfs(cur int, path []int) { if path[0] == target { // is a loop // get curMinFlow curMinFlow := flow - for i := 0; i < len(path)-1; i++ { + for i := range len(path) - 1 { pathFlow := c.graphMat[path[i]][path[i+1]] if curMinFlow > pathFlow { curMinFlow = pathFlow @@ -167,7 +167,7 @@ func (c *TransferCounter) dfs(cur int, path []int) { if curMinFlow != 0 { c.loopResultPath = append(c.loopResultPath, path) c.loopResultCount = append(c.loopResultCount, curMinFlow*uint64(len(path))) - for i := 0; i < len(path)-1; i++ { + for i := range len(path) - 1 { c.graphMat[path[i]][path[i+1]] -= curMinFlow } c.graphMat[cur][target] -= curMinFlow @@ -186,7 +186,7 @@ func (c *TransferCounter) Result() { c.prepare() } - for i := 0; i < c.scheduledStoreNum; i++ { + for i := range c.scheduledStoreNum { c.dfs(i, make([]int, 0)) } diff --git a/tools/pd-analysis/analysis/transfer_counter_test.go b/tools/pd-analysis/analysis/transfer_counter_test.go index 092767cd49d..39fb9d5ede8 100644 --- a/tools/pd-analysis/analysis/transfer_counter_test.go +++ b/tools/pd-analysis/analysis/transfer_counter_test.go @@ -23,7 +23,7 @@ import ( func addData(test [][]uint64) { for i, row := range test { for j, flow := range row { - for k := uint64(0); k < flow; k++ { + for range flow { GetTransferCounter().AddTarget(64, uint64(j)) GetTransferCounter().AddSource(64, uint64(i)) } diff --git a/tools/pd-api-bench/cases/cases.go b/tools/pd-api-bench/cases/cases.go index f863d3248c6..54c1247c208 100644 --- a/tools/pd-api-bench/cases/cases.go +++ b/tools/pd-api-bench/cases/cases.go @@ -452,7 +452,7 @@ func newGetKV() func() EtcdCase { } func (*getKV) init(ctx context.Context, cli *clientv3.Client) error { - for i := 0; i < 100; i++ { + for i := range 100 { _, err := cli.Put(ctx, fmt.Sprintf("/test/0001/%4d", i), fmt.Sprintf("%4d", i)) if err != nil { return err diff --git a/tools/pd-api-bench/cases/controller.go b/tools/pd-api-bench/cases/controller.go index 75c3c25f7ab..d6bc39d6d35 100644 --- a/tools/pd-api-bench/cases/controller.go +++ b/tools/pd-api-bench/cases/controller.go @@ -221,7 +221,7 @@ func (c *httpController) run() { go func(hCli pdHttp.Client) { defer c.wg.Done() c.wg.Add(int(burst)) - for i := int64(0); i < burst; i++ { + for range burst { go func() { defer c.wg.Done() ticker := time.NewTicker(tt) @@ -290,7 +290,7 @@ func (c *gRPCController) run() { go func(cli pd.Client) { defer c.wg.Done() c.wg.Add(int(burst)) - for i := int64(0); i < burst; i++ { + for range burst { go func() { defer c.wg.Done() ticker := time.NewTicker(tt) @@ -364,7 +364,7 @@ func (c *etcdController) run() { go func(cli *clientv3.Client) { defer c.wg.Done() c.wg.Add(int(burst)) - for i := int64(0); i < burst; i++ { + for range burst { go func() { defer c.wg.Done() ticker := time.NewTicker(tt) diff --git a/tools/pd-api-bench/main.go b/tools/pd-api-bench/main.go index b30b21aab1d..c7af4c61ac6 100644 --- a/tools/pd-api-bench/main.go +++ b/tools/pd-api-bench/main.go @@ -116,16 +116,16 @@ func main() { return } pdClis := make([]pd.Client, cfg.Client) - for i := int64(0); i < cfg.Client; i++ { + for i := range cfg.Client { pdClis[i] = newPDClient(ctx, cfg) pdClis[i].UpdateOption(pd.EnableFollowerHandle, true) } etcdClis := make([]*clientv3.Client, cfg.Client) - for i := int64(0); i < cfg.Client; i++ { + for i := range cfg.Client { etcdClis[i] = newEtcdClient(cfg) } httpClis := make([]pdHttp.Client, cfg.Client) - for i := int64(0); i < cfg.Client; i++ { + for i := range cfg.Client { sd := pdClis[i].GetServiceDiscovery() httpClis[i] = pdHttp.NewClientWithServiceDiscovery("tools-api-bench", sd, pdHttp.WithTLSConfig(loadTLSConfig(cfg)), pdHttp.WithMetrics(pdAPIRequestCounter, pdAPIExecutionHistogram)) } diff --git a/tools/pd-ctl/pdctl/ctl.go b/tools/pd-ctl/pdctl/ctl.go index 5f8c6485c42..77f1601c8f5 100644 --- a/tools/pd-ctl/pdctl/ctl.go +++ b/tools/pd-ctl/pdctl/ctl.go @@ -173,7 +173,7 @@ func genCompleter(cmd *cobra.Command) []readline.PrefixCompleterInterface { if v.HasFlags() { flagsPc := []readline.PrefixCompleterInterface{} flagUsages := strings.Split(strings.Trim(v.Flags().FlagUsages(), " "), "\n") - for i := 0; i < len(flagUsages)-1; i++ { + for i := range len(flagUsages) - 1 { flagsPc = append(flagsPc, readline.PcItem(strings.Split(strings.Trim(flagUsages[i], " "), " ")[0])) } flagsPc = append(flagsPc, genCompleter(v)...) diff --git a/tools/pd-ctl/pdctl/ctl_test.go b/tools/pd-ctl/pdctl/ctl_test.go index d9cea460e21..ecc9385d045 100644 --- a/tools/pd-ctl/pdctl/ctl_test.go +++ b/tools/pd-ctl/pdctl/ctl_test.go @@ -57,7 +57,7 @@ func TestGenCompleter(t *testing.T) { if len(runArray) != len(v.GetName())-1 { continue } - for i := 0; i < len(runArray); i++ { + for i := range runArray { if runArray[i] != v.GetName()[i] { inPrefixArray = false } diff --git a/tools/pd-ctl/tests/config/config_test.go b/tools/pd-ctl/tests/config/config_test.go index 5a9e077d7e4..c3697c065e7 100644 --- a/tools/pd-ctl/tests/config/config_test.go +++ b/tools/pd-ctl/tests/config/config_test.go @@ -1272,7 +1272,7 @@ func (suite *configTestSuite) checkRegionRules(cluster *pdTests.TestCluster) { func assertBundles(re *require.Assertions, a, b []placement.GroupBundle) { re.Len(b, len(a)) - for i := 0; i < len(a); i++ { + for i := range a { assertBundle(re, a[i], b[i]) } } @@ -1282,7 +1282,7 @@ func assertBundle(re *require.Assertions, a, b placement.GroupBundle) { re.Equal(a.Index, b.Index) re.Equal(a.Override, b.Override) re.Len(b.Rules, len(a.Rules)) - for i := 0; i < len(a.Rules); i++ { + for i := range a.Rules { assertRule(re, a.Rules[i], b.Rules[i]) } } diff --git a/tools/pd-ctl/tests/hot/hot_test.go b/tools/pd-ctl/tests/hot/hot_test.go index 641bab686a4..e12b6a39a60 100644 --- a/tools/pd-ctl/tests/hot/hot_test.go +++ b/tools/pd-ctl/tests/hot/hot_test.go @@ -343,7 +343,7 @@ func (suite *hotTestSuite) checkHotWithoutHotPeer(cluster *pdTests.TestCluster) load := 1024.0 s := &server.GrpcServer{Server: leaderServer.GetServer()} for _, store := range stores { - for i := 0; i < 5; i++ { + for i := range 5 { resp1, err := s.StoreHeartbeat( context.Background(), &pdpb.StoreHeartbeatRequest{ Header: &pdpb.RequestHeader{ClusterId: leaderServer.GetClusterID()}, diff --git a/tools/pd-ctl/tests/keyspace/keyspace_group_test.go b/tools/pd-ctl/tests/keyspace/keyspace_group_test.go index 2acb38af47e..9c16b0751f6 100644 --- a/tools/pd-ctl/tests/keyspace/keyspace_group_test.go +++ b/tools/pd-ctl/tests/keyspace/keyspace_group_test.go @@ -97,7 +97,7 @@ func TestSplitKeyspaceGroup(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/server/delayStartServerLoop", `return(true)`)) keyspaces := make([]string, 0) // we test the case which exceed the default max txn ops limit in etcd, which is 128. - for i := 0; i < 129; i++ { + for i := range 129 { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } tc, err := pdTests.NewTestAPICluster(ctx, 3, func(conf *config.Config, _ string) { @@ -152,7 +152,7 @@ func TestExternalAllocNodeWhenStart(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/acceleratedAllocNodes", `return(true)`)) re.NoError(failpoint.Enable("github.com/tikv/pd/server/delayStartServerLoop", `return(true)`)) keyspaces := make([]string, 0) - for i := 0; i < 10; i++ { + for i := range 10 { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { @@ -192,7 +192,7 @@ func TestSetNodeAndPriorityKeyspaceGroup(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() keyspaces := make([]string, 0) - for i := 0; i < 10; i++ { + for i := range 10 { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } tc, err := pdTests.NewTestAPICluster(ctx, 3, func(conf *config.Config, _ string) { @@ -296,7 +296,7 @@ func TestMergeKeyspaceGroup(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/server/delayStartServerLoop", `return(true)`)) keyspaces := make([]string, 0) // we test the case which exceed the default max txn ops limit in etcd, which is 128. - for i := 0; i < 129; i++ { + for i := range 129 { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { @@ -415,7 +415,7 @@ func TestKeyspaceGroupState(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/acceleratedAllocNodes", `return(true)`)) re.NoError(failpoint.Enable("github.com/tikv/pd/server/delayStartServerLoop", `return(true)`)) keyspaces := make([]string, 0) - for i := 0; i < 10; i++ { + for i := range 10 { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { @@ -506,7 +506,7 @@ func TestShowKeyspaceGroupPrimary(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/tso/fastGroupSplitPatroller", `return(true)`)) re.NoError(failpoint.Enable("github.com/tikv/pd/server/delayStartServerLoop", `return(true)`)) keyspaces := make([]string, 0) - for i := 0; i < 10; i++ { + for i := range 10 { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } tc, err := pdTests.NewTestAPICluster(ctx, 1, func(conf *config.Config, _ string) { diff --git a/tools/pd-ctl/tests/keyspace/keyspace_test.go b/tools/pd-ctl/tests/keyspace/keyspace_test.go index b0cb1a2293e..4aa3be1d21c 100644 --- a/tools/pd-ctl/tests/keyspace/keyspace_test.go +++ b/tools/pd-ctl/tests/keyspace/keyspace_test.go @@ -283,7 +283,7 @@ func (suite *keyspaceTestSuite) TestUpdateKeyspaceState() { func (suite *keyspaceTestSuite) TestListKeyspace() { re := suite.Require() var param api.CreateKeyspaceParams - for i := 0; i < 10; i++ { + for i := range 10 { param = api.CreateKeyspaceParams{ Name: fmt.Sprintf("test_keyspace_%d", i), Config: map[string]string{ diff --git a/tools/pd-ctl/tests/region/region_test.go b/tools/pd-ctl/tests/region/region_test.go index 49f1eaa0a58..03a1c04ef19 100644 --- a/tools/pd-ctl/tests/region/region_test.go +++ b/tools/pd-ctl/tests/region/region_test.go @@ -292,7 +292,7 @@ func TestRegionNoLeader(t *testing.T) { leaderServer := cluster.GetLeaderServer() re.NoError(leaderServer.BootstrapCluster()) - for i := 0; i < len(stores); i++ { + for i := range stores { pdTests.MustPutStore(re, cluster, stores[i]) } diff --git a/tools/pd-ctl/tests/safepoint/safepoint_test.go b/tools/pd-ctl/tests/safepoint/safepoint_test.go index 5551cce1fff..9a9c54460bd 100644 --- a/tools/pd-ctl/tests/safepoint/safepoint_test.go +++ b/tools/pd-ctl/tests/safepoint/safepoint_test.go @@ -99,7 +99,7 @@ func TestSafepoint(t *testing.T) { } // delete the safepoints - for i := 0; i < 3; i++ { + for i := range 3 { args = []string{"-u", pdAddr, "service-gc-safepoint", "delete", list.ServiceGCSafepoints[i].ServiceID} output, err = tests.ExecuteCommand(cmd, args...) re.NoError(err) diff --git a/tools/pd-ctl/tests/store/store_test.go b/tools/pd-ctl/tests/store/store_test.go index 983c86b242a..2f68827a816 100644 --- a/tools/pd-ctl/tests/store/store_test.go +++ b/tools/pd-ctl/tests/store/store_test.go @@ -211,7 +211,7 @@ func TestStore(t *testing.T) { re.NoError(json.Unmarshal(output, &storeInfo)) labels := storeInfo.Store.Labels re.Len(labels, testcase.expectLabelLength) - for i := 0; i < testcase.expectLabelLength; i++ { + for i := range testcase.expectLabelLength { re.Equal(testcase.expectKeys[i], labels[i].Key) re.Equal(testcase.expectValues[i], labels[i].Value) } diff --git a/tools/pd-heartbeat-bench/main.go b/tools/pd-heartbeat-bench/main.go index e77f0797bef..a3200bf773a 100644 --- a/tools/pd-heartbeat-bench/main.go +++ b/tools/pd-heartbeat-bench/main.go @@ -202,7 +202,7 @@ func (rs *Regions) init(cfg *config.Config) { id := uint64(1) now := uint64(time.Now().Unix()) - for i := 0; i < cfg.RegionCount; i++ { + for i := range cfg.RegionCount { region := &pdpb.RegionHeartbeatRequest{ Header: header(), Region: &metapb.Region{ @@ -229,7 +229,7 @@ func (rs *Regions) init(cfg *config.Config) { } peers := make([]*metapb.Peer, 0, cfg.Replica) - for j := 0; j < cfg.Replica; j++ { + for j := range cfg.Replica { peers = append(peers, &metapb.Peer{Id: id, StoreId: uint64((i+j)%cfg.StoreCount + 1)}) id += 1 } diff --git a/tools/pd-heartbeat-bench/metrics/util.go b/tools/pd-heartbeat-bench/metrics/util.go index f747e5b507f..9a61feee420 100644 --- a/tools/pd-heartbeat-bench/metrics/util.go +++ b/tools/pd-heartbeat-bench/metrics/util.go @@ -133,7 +133,7 @@ func CollectMetrics(curRound int, wait time.Duration) { sum float64 count int }, len(metrics2Collect)) - for i := 0; i < 5; i++ { + for range 5 { for j, m := range metrics2Collect { r, err := getMetric(prometheusCli, m.promSQL, time.Now()) if err != nil { @@ -151,7 +151,7 @@ func CollectMetrics(curRound int, wait time.Duration) { } return res[index].sum / float64(res[index].count) } - for i := 0; i < len(metrics2Collect); i++ { + for i := range metrics2Collect { metrics2Collect[i].value = getRes(i) if metrics2Collect[i].max { finalMetrics2Collect[i].value = max(finalMetrics2Collect[i].value, metrics2Collect[i].value) diff --git a/tools/pd-simulator/simulator/cases/balance_leader.go b/tools/pd-simulator/simulator/cases/balance_leader.go index 1dad09850a5..a6790548dc1 100644 --- a/tools/pd-simulator/simulator/cases/balance_leader.go +++ b/tools/pd-simulator/simulator/cases/balance_leader.go @@ -30,7 +30,7 @@ func newBalanceLeader(config *sc.SimConfig) *Case { totalRegion := config.TotalRegion allStores := make(map[uint64]struct{}, totalStore) replica := int(config.ServerConfig.Replication.MaxReplicas) - for i := 0; i < totalStore; i++ { + for range totalStore { id := simutil.IDAllocator.NextID() simCase.Stores = append(simCase.Stores, &Store{ ID: id, @@ -40,7 +40,7 @@ func newBalanceLeader(config *sc.SimConfig) *Case { } leaderStoreID := simCase.Stores[totalStore-1].ID - for i := 0; i < totalRegion; i++ { + for i := range totalRegion { peers := make([]*metapb.Peer, 0, replica) peers = append(peers, &metapb.Peer{ Id: simutil.IDAllocator.NextID(), diff --git a/tools/pd-simulator/simulator/cases/balance_region.go b/tools/pd-simulator/simulator/cases/balance_region.go index 8798a656fd7..d4ef7ad986f 100644 --- a/tools/pd-simulator/simulator/cases/balance_region.go +++ b/tools/pd-simulator/simulator/cases/balance_region.go @@ -32,7 +32,7 @@ func newRedundantBalanceRegion(config *sc.SimConfig) *Case { replica := int(config.ServerConfig.Replication.MaxReplicas) allStores := make(map[uint64]struct{}, totalStore) - for i := 0; i < totalStore; i++ { + for i := range totalStore { s := &Store{ ID: simutil.IDAllocator.NextID(), Status: metapb.StoreState_Up, @@ -44,9 +44,9 @@ func newRedundantBalanceRegion(config *sc.SimConfig) *Case { allStores[s.ID] = struct{}{} } - for i := 0; i < totalRegion; i++ { + for i := range totalRegion { peers := make([]*metapb.Peer, 0, replica) - for j := 0; j < replica; j++ { + for j := range replica { peers = append(peers, &metapb.Peer{ Id: simutil.IDAllocator.NextID(), StoreId: uint64((i+j)%totalStore + 1), diff --git a/tools/pd-simulator/simulator/cases/diagnose_label_isolation.go b/tools/pd-simulator/simulator/cases/diagnose_label_isolation.go index 9fe65a3d56a..8e65feefea4 100644 --- a/tools/pd-simulator/simulator/cases/diagnose_label_isolation.go +++ b/tools/pd-simulator/simulator/cases/diagnose_label_isolation.go @@ -34,7 +34,7 @@ func newLabelNotMatch1(_ *sc.SimConfig) *Case { num1, num2 := 3, 1 storeNum, regionNum := num1+num2, 200 allStores := make(map[uint64]struct{}, storeNum+1) - for i := 0; i < num1; i++ { + for range num1 { id := IDAllocator.nextID() simCase.Stores = append(simCase.Stores, &Store{ ID: id, @@ -50,7 +50,7 @@ func newLabelNotMatch1(_ *sc.SimConfig) *Case { }) allStores[id] = struct{}{} - for i := 0; i < regionNum; i++ { + for i := range regionNum { peers := []*metapb.Peer{ {Id: IDAllocator.nextID(), StoreId: uint64(i%num1 + 1)}, {Id: IDAllocator.nextID(), StoreId: uint64((i+1)%num1 + 1)}, @@ -106,7 +106,7 @@ func newLabelIsolation1(_ *sc.SimConfig) *Case { num1, num2 := 2, 2 storeNum, regionNum := num1+num2, 300 allStores := make(map[uint64]struct{}, storeNum+1) - for i := 0; i < num1; i++ { + for range num1 { id := IDAllocator.nextID() simCase.Stores = append(simCase.Stores, &Store{ ID: id, @@ -115,7 +115,7 @@ func newLabelIsolation1(_ *sc.SimConfig) *Case { }) allStores[id] = struct{}{} } - for i := 0; i < num2; i++ { + for range num2 { id := IDAllocator.nextID() simCase.Stores = append(simCase.Stores, &Store{ ID: id, @@ -125,7 +125,7 @@ func newLabelIsolation1(_ *sc.SimConfig) *Case { allStores[id] = struct{}{} } - for i := 0; i < regionNum; i++ { + for i := range regionNum { peers := []*metapb.Peer{ {Id: IDAllocator.nextID(), StoreId: uint64(i%num1 + 1)}, {Id: IDAllocator.nextID(), StoreId: uint64((i+1)%num1 + 1)}, @@ -180,7 +180,7 @@ func newLabelIsolation2(_ *sc.SimConfig) *Case { storeNum, regionNum := 5, 200 allStores := make(map[uint64]struct{}, storeNum) - for i := 0; i < storeNum; i++ { + for range storeNum { id := IDAllocator.nextID() simCase.Stores = append(simCase.Stores, &Store{ ID: id, @@ -194,7 +194,7 @@ func newLabelIsolation2(_ *sc.SimConfig) *Case { simCase.Stores[3].Labels = []*metapb.StoreLabel{{Key: "dc", Value: "dc1"}, {Key: "zone", Value: "zone2"}, {Key: "host", Value: "host4"}} simCase.Stores[4].Labels = []*metapb.StoreLabel{{Key: "dc", Value: "dc1"}, {Key: "zone", Value: "zone3"}, {Key: "host", Value: "host5"}} - for i := 0; i < regionNum; i++ { + for i := range regionNum { peers := []*metapb.Peer{ {Id: IDAllocator.nextID(), StoreId: uint64(i%storeNum + 1)}, {Id: IDAllocator.nextID(), StoreId: uint64((i+1)%storeNum + 1)}, diff --git a/tools/pd-simulator/simulator/cases/diagnose_rule.go b/tools/pd-simulator/simulator/cases/diagnose_rule.go index 26f563297ae..4e7031a3a01 100644 --- a/tools/pd-simulator/simulator/cases/diagnose_rule.go +++ b/tools/pd-simulator/simulator/cases/diagnose_rule.go @@ -66,7 +66,7 @@ func newRule1(_ *sc.SimConfig) *Case { storeNum, regionNum := 9, 300 allStores := make(map[uint64]struct{}, storeNum) - for i := 0; i < storeNum; i++ { + for range storeNum { id := IDAllocator.nextID() simCase.Stores = append(simCase.Stores, &Store{ ID: id, @@ -84,7 +84,7 @@ func newRule1(_ *sc.SimConfig) *Case { simCase.Stores[7].Labels = []*metapb.StoreLabel{{Key: "region", Value: "region2"}, {Key: "idc", Value: "idc5"}} simCase.Stores[8].Labels = []*metapb.StoreLabel{{Key: "region", Value: "region1"}} - for i := 0; i < regionNum; i++ { + for i := range regionNum { peers := []*metapb.Peer{ {Id: IDAllocator.nextID(), StoreId: uint64(i%(storeNum-5) + 5)}, {Id: IDAllocator.nextID(), StoreId: uint64((i+1)%(storeNum-5) + 5)}, @@ -159,7 +159,7 @@ func newRule2(_ *sc.SimConfig) *Case { storeNum, regionNum := 6, 300 allStores := make(map[uint64]struct{}, storeNum) - for i := 0; i < storeNum; i++ { + for range storeNum { id := IDAllocator.nextID() simCase.Stores = append(simCase.Stores, &Store{ ID: id, @@ -174,7 +174,7 @@ func newRule2(_ *sc.SimConfig) *Case { simCase.Stores[4].Labels = []*metapb.StoreLabel{{Key: "region", Value: "region2"}} simCase.Stores[5].Labels = []*metapb.StoreLabel{{Key: "region", Value: "region2"}} - for i := 0; i < regionNum; i++ { + for i := range regionNum { peers := []*metapb.Peer{ {Id: IDAllocator.nextID(), StoreId: uint64(i%storeNum + 1)}, {Id: IDAllocator.nextID(), StoreId: uint64((i+1)%storeNum + 1)}, diff --git a/tools/pd-simulator/simulator/cases/hot_read.go b/tools/pd-simulator/simulator/cases/hot_read.go index 7f4d93fb43b..22ff70d9312 100644 --- a/tools/pd-simulator/simulator/cases/hot_read.go +++ b/tools/pd-simulator/simulator/cases/hot_read.go @@ -34,7 +34,7 @@ func newHotRead(config *sc.SimConfig) *Case { replica := int(config.ServerConfig.Replication.MaxReplicas) allStores := make(map[uint64]struct{}, totalStore) // Initialize the cluster - for i := 0; i < totalStore; i++ { + for range totalStore { id := simutil.IDAllocator.NextID() simCase.Stores = append(simCase.Stores, &Store{ ID: id, @@ -43,9 +43,9 @@ func newHotRead(config *sc.SimConfig) *Case { allStores[id] = struct{}{} } - for i := 0; i < totalRegion; i++ { + for i := range totalRegion { peers := make([]*metapb.Peer, 0, replica) - for j := 0; j < replica; j++ { + for j := range replica { peers = append(peers, &metapb.Peer{ Id: simutil.IDAllocator.NextID(), StoreId: uint64((i+j)%totalStore + 1), diff --git a/tools/pd-simulator/simulator/cases/hot_write.go b/tools/pd-simulator/simulator/cases/hot_write.go index 54e944e540d..adb6eb0756a 100644 --- a/tools/pd-simulator/simulator/cases/hot_write.go +++ b/tools/pd-simulator/simulator/cases/hot_write.go @@ -34,7 +34,7 @@ func newHotWrite(config *sc.SimConfig) *Case { replica := int(config.ServerConfig.Replication.MaxReplicas) allStores := make(map[uint64]struct{}, totalStore) // Initialize the cluster - for i := 0; i < totalStore; i++ { + for range totalStore { id := simutil.IDAllocator.NextID() simCase.Stores = append(simCase.Stores, &Store{ ID: id, @@ -43,9 +43,9 @@ func newHotWrite(config *sc.SimConfig) *Case { allStores[id] = struct{}{} } - for i := 0; i < totalRegion; i++ { + for i := range totalRegion { peers := make([]*metapb.Peer, 0, replica) - for j := 0; j < replica; j++ { + for j := range replica { peers = append(peers, &metapb.Peer{ Id: simutil.IDAllocator.NextID(), StoreId: uint64((i+j)%totalStore + 1), diff --git a/tools/pd-simulator/simulator/cases/import_data.go b/tools/pd-simulator/simulator/cases/import_data.go index e37aadcfeba..3d329081f9e 100644 --- a/tools/pd-simulator/simulator/cases/import_data.go +++ b/tools/pd-simulator/simulator/cases/import_data.go @@ -38,7 +38,7 @@ func newImportData(config *sc.SimConfig) *Case { replica := int(config.ServerConfig.Replication.MaxReplicas) allStores := make(map[uint64]struct{}, totalStore) // Initialize the cluster - for i := 0; i < totalStore; i++ { + for range totalStore { id := simutil.IDAllocator.NextID() simCase.Stores = append(simCase.Stores, &Store{ ID: id, @@ -47,9 +47,9 @@ func newImportData(config *sc.SimConfig) *Case { allStores[id] = struct{}{} } - for i := 0; i < totalRegion; i++ { + for i := range totalRegion { peers := make([]*metapb.Peer, 0, replica) - for j := 0; j < replica; j++ { + for j := range replica { peers = append(peers, &metapb.Peer{ Id: IDAllocator.nextID(), StoreId: uint64((i+j)%totalStore + 1), diff --git a/tools/pd-simulator/simulator/cases/makeup_down_replica.go b/tools/pd-simulator/simulator/cases/makeup_down_replica.go index ede3c4ba083..ec664e91254 100644 --- a/tools/pd-simulator/simulator/cases/makeup_down_replica.go +++ b/tools/pd-simulator/simulator/cases/makeup_down_replica.go @@ -30,16 +30,16 @@ func newMakeupDownReplicas(config *sc.SimConfig) *Case { replica := int(config.ServerConfig.Replication.MaxReplicas) noEmptyStoreNum := totalStore - 1 - for i := 0; i < totalStore; i++ { + for range totalStore { simCase.Stores = append(simCase.Stores, &Store{ ID: simutil.IDAllocator.NextID(), Status: metapb.StoreState_Up, }) } - for i := 0; i < totalRegion; i++ { + for i := range totalRegion { peers := make([]*metapb.Peer, 0, replica) - for j := 0; j < replica; j++ { + for j := range replica { peers = append(peers, &metapb.Peer{ Id: simutil.IDAllocator.NextID(), StoreId: uint64((i+j)%totalStore + 1), diff --git a/tools/pd-simulator/simulator/cases/region_merge.go b/tools/pd-simulator/simulator/cases/region_merge.go index 3d050070203..9a278c851bd 100644 --- a/tools/pd-simulator/simulator/cases/region_merge.go +++ b/tools/pd-simulator/simulator/cases/region_merge.go @@ -30,7 +30,7 @@ func newRegionMerge(config *sc.SimConfig) *Case { replica := int(config.ServerConfig.Replication.MaxReplicas) allStores := make(map[uint64]struct{}, totalStore) - for i := 0; i < totalStore; i++ { + for range totalStore { id := simutil.IDAllocator.NextID() simCase.Stores = append(simCase.Stores, &Store{ ID: id, @@ -39,9 +39,9 @@ func newRegionMerge(config *sc.SimConfig) *Case { allStores[id] = struct{}{} } - for i := 0; i < totalRegion; i++ { + for i := range totalRegion { peers := make([]*metapb.Peer, 0, replica) - for j := 0; j < replica; j++ { + for j := range replica { peers = append(peers, &metapb.Peer{ Id: simutil.IDAllocator.NextID(), StoreId: uint64((i+j)%totalStore + 1), diff --git a/tools/pd-simulator/simulator/cases/region_split.go b/tools/pd-simulator/simulator/cases/region_split.go index 264d9d9f442..8c1f3ac7759 100644 --- a/tools/pd-simulator/simulator/cases/region_split.go +++ b/tools/pd-simulator/simulator/cases/region_split.go @@ -28,7 +28,7 @@ func newRegionSplit(config *sc.SimConfig) *Case { totalStore := config.TotalStore allStores := make(map[uint64]struct{}, totalStore) - for i := 0; i < totalStore; i++ { + for range totalStore { storeID := simutil.IDAllocator.NextID() simCase.Stores = append(simCase.Stores, &Store{ ID: storeID, @@ -38,7 +38,7 @@ func newRegionSplit(config *sc.SimConfig) *Case { } replica := int(config.ServerConfig.Replication.MaxReplicas) peers := make([]*metapb.Peer, 0, replica) - for j := 0; j < replica; j++ { + for j := range replica { peers = append(peers, &metapb.Peer{ Id: simutil.IDAllocator.NextID(), StoreId: uint64((j)%(totalStore-1) + 1), diff --git a/tools/pd-simulator/simulator/cases/scale_tikv.go b/tools/pd-simulator/simulator/cases/scale_tikv.go index 96d44513ae7..9cfe8d9dcad 100644 --- a/tools/pd-simulator/simulator/cases/scale_tikv.go +++ b/tools/pd-simulator/simulator/cases/scale_tikv.go @@ -32,7 +32,7 @@ func newScaleInOut(config *sc.SimConfig) *Case { totalStore, totalRegion = 6, 4000 } - for i := 0; i < totalStore; i++ { + for i := range totalStore { s := &Store{ ID: IDAllocator.nextID(), Status: metapb.StoreState_Up, @@ -43,9 +43,9 @@ func newScaleInOut(config *sc.SimConfig) *Case { simCase.Stores = append(simCase.Stores, s) } - for i := 0; i < totalRegion; i++ { + for i := range totalRegion { peers := make([]*metapb.Peer, 0, replica) - for j := 0; j < replica; j++ { + for j := range replica { peers = append(peers, &metapb.Peer{ Id: simutil.IDAllocator.NextID(), StoreId: uint64((i+j)%totalStore + 1), diff --git a/tools/pd-simulator/simulator/cases/stable_env.go b/tools/pd-simulator/simulator/cases/stable_env.go index 8a015c90ca8..54a9f84341f 100644 --- a/tools/pd-simulator/simulator/cases/stable_env.go +++ b/tools/pd-simulator/simulator/cases/stable_env.go @@ -32,7 +32,7 @@ func newStableEnv(config *sc.SimConfig) *Case { allStores := make(map[uint64]struct{}, totalStore) arrStoresID := make([]uint64, 0, totalStore) replica := int(config.ServerConfig.Replication.MaxReplicas) - for i := 0; i < totalStore; i++ { + for range totalStore { id := simutil.IDAllocator.NextID() simCase.Stores = append(simCase.Stores, &Store{ ID: id, @@ -42,9 +42,9 @@ func newStableEnv(config *sc.SimConfig) *Case { arrStoresID = append(arrStoresID, id) } - for i := 0; i < totalRegion; i++ { + for i := range totalRegion { peers := make([]*metapb.Peer, 0, replica) - for j := 0; j < replica; j++ { + for j := range replica { peers = append(peers, &metapb.Peer{ Id: simutil.IDAllocator.NextID(), StoreId: arrStoresID[(i+j)%totalStore], diff --git a/tools/pd-simulator/simulator/client.go b/tools/pd-simulator/simulator/client.go index 224d54b05d6..39c2633cec5 100644 --- a/tools/pd-simulator/simulator/client.go +++ b/tools/pd-simulator/simulator/client.go @@ -169,7 +169,7 @@ func (c *client) heartbeatStreamLoop() { wg.Wait() // update connection to recreate heartbeat stream - for i := 0; i < retryTimes; i++ { + for range retryTimes { SD.ScheduleCheckMemberChanged() time.Sleep(leaderChangedWaitTime) if client := SD.GetServiceClient(); client != nil { @@ -324,7 +324,7 @@ func newRetryClient(node *Node) *retryClient { ) // Client should wait if PD server is not ready. - for i := 0; i < maxInitClusterRetries; i++ { + for range maxInitClusterRetries { client, receiveRegionHeartbeatCh, err = NewClient(tag) if err == nil { break @@ -359,7 +359,7 @@ func (rc *retryClient) requestWithRetry(f func() (any, error)) (any, error) { return res, nil } // retry to get leader URL - for i := 0; i < rc.retryCount; i++ { + for range rc.retryCount { SD.ScheduleCheckMemberChanged() time.Sleep(100 * time.Millisecond) if client := SD.GetServiceClient(); client != nil { @@ -460,7 +460,7 @@ func Bootstrap(ctx context.Context, pdAddrs string, store *metapb.Store, region } retry: - for i := 0; i < maxInitClusterRetries; i++ { + for range maxInitClusterRetries { time.Sleep(100 * time.Millisecond) for _, url := range urls { conn, err := createConn(url) @@ -497,7 +497,7 @@ retry: newStore := typeutil.DeepClone(store, core.StoreFactory) newRegion := typeutil.DeepClone(region, core.RegionFactory) var res *pdpb.BootstrapResponse - for i := 0; i < maxInitClusterRetries; i++ { + for range maxInitClusterRetries { // Bootstrap the cluster. res, err = pdCli.Bootstrap(ctx, &pdpb.BootstrapRequest{ Header: requestHeader(), diff --git a/tools/pd-simulator/simulator/simutil/key_test.go b/tools/pd-simulator/simulator/simutil/key_test.go index 9db7c597751..be07037501f 100644 --- a/tools/pd-simulator/simulator/simutil/key_test.go +++ b/tools/pd-simulator/simulator/simutil/key_test.go @@ -34,7 +34,7 @@ func TestGenerateTableKeys(t *testing.T) { re.Less(keys[i-1], keys[i]) s := []byte(keys[i-1]) e := []byte(keys[i]) - for j := 0; j < 1000; j++ { + for range 1000 { split, err := GenerateTiDBEncodedSplitKey(s, e) re.NoError(err) re.Less(string(s), string(split)) diff --git a/tools/pd-tso-bench/main.go b/tools/pd-tso-bench/main.go index 96ff2a51d0a..dda4f364519 100644 --- a/tools/pd-tso-bench/main.go +++ b/tools/pd-tso-bench/main.go @@ -91,7 +91,7 @@ func main() { cancel() }() - for i := 0; i < *count; i++ { + for i := range *count { fmt.Printf("\nStart benchmark #%d, duration: %+vs\n", i, duration.Seconds()) bench(ctx) } @@ -125,13 +125,13 @@ func bench(mainCtx context.Context) { if *enableFaultInjection { fmt.Printf("Enable fault injection, failure rate: %f\n", *faultInjectionRate) wg.Add(*clientNumber) - for i := 0; i < *clientNumber; i++ { + for i := range *clientNumber { go reqWorker(ctx, pdClients, i, durCh) } } else { wg.Add((*concurrency) * (*clientNumber)) - for i := 0; i < *clientNumber; i++ { - for j := 0; j < *concurrency; j++ { + for i := range *clientNumber { + for range *concurrency { go reqWorker(ctx, pdClients, i, durCh) } } diff --git a/tools/pd-ut/alloc/tempurl.go b/tools/pd-ut/alloc/tempurl.go index 6be69dfe056..2131699133a 100644 --- a/tools/pd-ut/alloc/tempurl.go +++ b/tools/pd-ut/alloc/tempurl.go @@ -31,7 +31,7 @@ var ( // Alloc allocates a local URL for testing. func Alloc() string { - for i := 0; i < 50; i++ { + for range 50 { if u := tryAllocTestURL(); u != "" { return u } diff --git a/tools/pd-ut/ut.go b/tools/pd-ut/ut.go index d7786feac1d..df8bff526f2 100644 --- a/tools/pd-ut/ut.go +++ b/tools/pd-ut/ut.go @@ -350,7 +350,7 @@ func cmdRun(args ...string) bool { taskCh := make(chan task, 100) works := make([]numa, parallel) var wg sync.WaitGroup - for i := 0; i < parallel; i++ { + for i := range parallel { wg.Add(1) go works[i].worker(&wg, taskCh) } @@ -465,7 +465,7 @@ func stripFlag(flag string) string { func handleFlag(f string) (found bool) { tmp := os.Args[:0] - for i := 0; i < len(os.Args); i++ { + for i := range os.Args { if os.Args[i] == f { found = true continue @@ -585,7 +585,7 @@ func (n *numa) runTestCase(pkg string, fn string) testResult { var buf bytes.Buffer var err error var start time.Time - for i := 0; i < 3; i++ { + for range 3 { cmd := n.testCommand(pkg, fn) cmd.Dir = filepath.Join(workDir, pkg) // Combine the test case output, so the run result for failed cases can be displayed.