Skip to content

Commit

Permalink
enable more linters
Browse files Browse the repository at this point in the history
Signed-off-by: Ryan Leung <[email protected]>
  • Loading branch information
rleungx committed Oct 23, 2024
1 parent d82e41d commit 25d7078
Show file tree
Hide file tree
Showing 199 changed files with 601 additions and 585 deletions.
16 changes: 16 additions & 0 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,17 @@ linters:
- copyloopvar
- goimports
- depguard
- asasalint
- asciicheck
- bidichk
- durationcheck
- gocheckcompilerdirectives
- gochecksumtype
- makezero
- protogetter
- reassign
- zerologlint
- intrange
linters-settings:
gocritic:
# Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty
Expand All @@ -36,16 +47,21 @@ linters-settings:
- G115
testifylint:
enable:
- blank-import
- bool-compare
- compares
- empty
- error-is-as
- error-nil
- expected-actual
- formatter
- len
- negative-positive
- require-error
- suite-dont-use-pkg
- suite-extra-assert-call
- suite-subtest-run
- useless-assert
disable:
- float-compare
- go-require
Expand Down
2 changes: 1 addition & 1 deletion client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -739,7 +739,7 @@ func (c *client) dispatchTSORequestWithRetry(ctx context.Context, dcLocation str
err error
req *tsoRequest
)
for i := 0; i < dispatchRetryCount; i++ {
for i := range dispatchRetryCount {
// Do not delay for the first time.
if i > 0 {
time.Sleep(dispatchRetryDelay)
Expand Down
6 changes: 3 additions & 3 deletions client/pd_service_discovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,7 @@ func (c *pdServiceBalancer) set(clients []ServiceClient) {
func (c *pdServiceBalancer) check() {
c.mu.Lock()
defer c.mu.Unlock()
for i := 0; i < c.totalNode; i++ {
for range c.totalNode {
c.now.markAsAvailable()
c.next()
}
Expand Down Expand Up @@ -523,7 +523,7 @@ func (c *pdServiceDiscovery) initRetry(f func() error) error {
var err error
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for i := 0; i < c.option.maxRetryTimes; i++ {
for range c.option.maxRetryTimes {
if err = f(); err == nil {
return nil
}
Expand Down Expand Up @@ -1093,7 +1093,7 @@ func (c *pdServiceDiscovery) updateServiceClient(members []*pdpb.Member, leader
})
c.all.Store(clients)
// create candidate services for all kinds of request.
for i := 0; i < int(apiKindCount); i++ {
for i := range apiKindCount {
c.apiCandidateNodes[i].set(clients)
}
return err
Expand Down
6 changes: 3 additions & 3 deletions client/pd_service_discovery_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ func (suite *serviceClientTestSuite) SetupSuite() {
suite.followerServer = newTestServer(false)
go suite.leaderServer.run()
go suite.followerServer.run()
for i := 0; i < 10; i++ {
for range 10 {
leaderConn, err1 := grpc.Dial(suite.leaderServer.addr, grpc.WithTransportCredentials(insecure.NewCredentials()))
followerConn, err2 := grpc.Dial(suite.followerServer.addr, grpc.WithTransportCredentials(insecure.NewCredentials()))
if err1 == nil && err2 == nil {
Expand Down Expand Up @@ -278,7 +278,7 @@ func (suite *serviceClientTestSuite) TestServiceClientBalancer() {
b.set([]ServiceClient{leader, follower})
re.Equal(2, b.totalNode)

for i := 0; i < 10; i++ {
for range 10 {
client := b.get()
ctx := client.BuildGRPCTargetContext(suite.ctx, false)
conn := client.GetClientConn()
Expand All @@ -292,7 +292,7 @@ func (suite *serviceClientTestSuite) TestServiceClientBalancer() {
suite.followerServer.server.resetCount()
suite.leaderServer.server.resetCount()

for i := 0; i < 10; i++ {
for range 10 {
client := b.get()
ctx := client.BuildGRPCTargetContext(suite.ctx, true)
conn := client.GetClientConn()
Expand Down
2 changes: 1 addition & 1 deletion client/resource_group/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -1347,7 +1347,7 @@ func (gc *groupCostController) acquireTokens(ctx context.Context, delta *rmpb.Co
d time.Duration
)
retryLoop:
for i := 0; i < gc.mainCfg.WaitRetryTimes; i++ {
for range gc.mainCfg.WaitRetryTimes {
now := time.Now()
switch gc.mode {
case rmpb.GroupMode_RawMode:
Expand Down
2 changes: 1 addition & 1 deletion client/resource_group/controller/limiter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ func testQPSCase(concurrency int, reserveN int64, limit int64) (qps float64, ru
var totalRequests int64
start := time.Now()

for i := 0; i < concurrency; i++ {
for range concurrency {
wg.Add(1)
go func() {
defer wg.Done()
Expand Down
4 changes: 2 additions & 2 deletions client/resource_manager_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -382,7 +382,7 @@ func (c *client) tryResourceManagerConnect(ctx context.Context, connection *reso
)
ticker := time.NewTicker(retryInterval)
defer ticker.Stop()
for i := 0; i < maxRetryTimes; i++ {
for range maxRetryTimes {
cc, err := c.resourceManagerClient()
if err != nil {
continue
Expand All @@ -406,7 +406,7 @@ func (c *client) tryResourceManagerConnect(ctx context.Context, connection *reso
}

func (tbc *tokenBatchController) revokePendingTokenRequest(err error) {
for i := 0; i < len(tbc.tokenRequestCh); i++ {
for range tbc.tokenRequestCh {
req := <-tbc.tokenRequestCh
req.done <- err
}
Expand Down
2 changes: 1 addition & 1 deletion client/retry/backoff_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ func TestBackoffer(t *testing.T) {
bo = InitialBackoffer(base, max, total)
re.Equal(bo.nextInterval(), base)
re.Equal(bo.nextInterval(), 2*base)
for i := 0; i < 10; i++ {
for range 10 {
re.LessOrEqual(bo.nextInterval(), max)
}
re.Equal(bo.nextInterval(), max)
Expand Down
2 changes: 1 addition & 1 deletion client/testutil/tempurl.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ var (

// Alloc allocates a local URL for testing.
func Alloc() string {
for i := 0; i < 10; i++ {
for range 10 {
if u := tryAllocTestURL(); u != "" {
return u
}
Expand Down
2 changes: 1 addition & 1 deletion client/timerpool/pool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import (
func TestTimerPool(t *testing.T) {
var tp TimerPool

for i := 0; i < 100; i++ {
for range 100 {
timer := tp.Get(20 * time.Millisecond)

select {
Expand Down
2 changes: 1 addition & 1 deletion client/tso_batch_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ func (tbc *tsoBatchController) adjustBestBatchSize() {
}

func (tbc *tsoBatchController) finishCollectedRequests(physical, firstLogical int64, suffixBits uint32, streamID string, err error) {
for i := 0; i < tbc.collectedRequestCount; i++ {
for i := range tbc.collectedRequestCount {
tsoReq := tbc.collectedRequests[i]
// Retrieve the request context before the request is done to trace without race.
requestCtx := tsoReq.requestCtx
Expand Down
4 changes: 2 additions & 2 deletions client/tso_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ func (c *tsoClient) backupClientConn() (*grpc.ClientConn, string) {
cc *grpc.ClientConn
err error
)
for i := 0; i < len(urls); i++ {
for range urls {
url := urls[rand.Intn(len(urls))]
if cc, err = c.svcDiscovery.GetOrCreateGRPCConn(url); err != nil {
continue
Expand Down Expand Up @@ -403,7 +403,7 @@ func (c *tsoClient) tryConnectToTSO(
ticker := time.NewTicker(retryInterval)
defer ticker.Stop()
// Retry several times before falling back to the follower when the network problem happens
for i := 0; i < maxRetryTimes; i++ {
for range maxRetryTimes {
c.svcDiscovery.ScheduleCheckMemberChanged()
cc, url = c.GetTSOAllocatorClientConnByDCLocation(dc)
if _, ok := connectionCtxs.Load(url); ok {
Expand Down
2 changes: 1 addition & 1 deletion client/tso_dispatcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ func (td *tsoDispatcher) scheduleUpdateConnectionCtxs() {
}

func (td *tsoDispatcher) revokePendingRequests(err error) {
for i := 0; i < len(td.tsoRequestCh); i++ {
for range td.tsoRequestCh {
req := <-td.tsoRequestCh
req.tryDone(err)
}
Expand Down
4 changes: 2 additions & 2 deletions client/tso_dispatcher_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ func (s *testTSODispatcherSuite) testStaticConcurrencyImpl(concurrency int) {
// and will be batched together once there is a free token.
reqs := make([]*tsoRequest, 0, tokenCount+3)

for i := 0; i < tokenCount+3; i++ {
for range tokenCount + 3 {
req := s.sendReq(ctx)
s.reqMustNotReady(req)
reqs = append(reqs, req)
Expand All @@ -242,7 +242,7 @@ func (s *testTSODispatcherSuite) testStaticConcurrencyImpl(concurrency int) {
// second batch but not finished yet.
// Also note that in current implementation, the tsoStream tries to receive the next result before checking
// the `tsoStream.pendingRequests` queue. Changing this behavior may need to update this test.
for i := 0; i < tokenCount+3; i++ {
for i := range tokenCount + 3 {
expectedPending := tokenCount + 1 - i
if expectedPending > tokenCount {
expectedPending = tokenCount
Expand Down
2 changes: 1 addition & 1 deletion client/tso_service_discovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ func (c *tsoServiceDiscovery) retry(
var err error
ticker := time.NewTicker(retryInterval)
defer ticker.Stop()
for i := 0; i < maxRetryTimes; i++ {
for range maxRetryTimes {
if err = f(); err == nil {
return nil
}
Expand Down
10 changes: 5 additions & 5 deletions client/tso_stream_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ func (s *testTSOStreamSuite) TestTSOStreamBasic() {
func (s *testTSOStreamSuite) testTSOStreamBrokenImpl(err error, pendingRequests int) {
var resultCh []<-chan callbackInvocation

for i := 0; i < pendingRequests; i++ {
for range pendingRequests {
ch := s.mustProcessRequestWithResultCh(1)
resultCh = append(resultCh, ch)
s.noResult(ch)
Expand Down Expand Up @@ -414,7 +414,7 @@ func (s *testTSOStreamSuite) TestTSOStreamCanceledWithPendingReq() {
func (s *testTSOStreamSuite) TestTSOStreamFIFO() {
var resultChs []<-chan callbackInvocation
const count = 5
for i := 0; i < count; i++ {
for i := range count {
ch := s.mustProcessRequestWithResultCh(int64(i + 1))
resultChs = append(resultChs, ch)
}
Expand All @@ -423,7 +423,7 @@ func (s *testTSOStreamSuite) TestTSOStreamFIFO() {
s.noResult(ch)
}

for i := 0; i < count; i++ {
for i := range count {
s.inner.returnResult(int64((i+1)*10), int64(i), uint32(i+1))
}

Expand Down Expand Up @@ -505,7 +505,7 @@ func (s *testTSOStreamSuite) TestEstimatedLatency() {
reqStartTimeCh := make(chan time.Time, maxPendingRequestsInTSOStream)
// Limit concurrent requests to be less than the capacity of tsoStream.pendingRequests.
tokenCh := make(chan struct{}, maxPendingRequestsInTSOStream-1)
for i := 0; i < 40; i++ {
for range 40 {
tokenCh <- struct{}{}
}
// Return a result after 50ms delay for each requests
Expand Down Expand Up @@ -594,7 +594,7 @@ func TestRCFilter(t *testing.T) {
re.Equal(0.0, f.update(now, 0))
lastOutput := 0.0
// 10000 even samples in 1 second.
for i := 0; i < 10000; i++ {
for range 10000 {
now = now.Add(time.Microsecond * 100)
output := f.update(now, 1.0)
re.Greater(output, lastOutput)
Expand Down
6 changes: 3 additions & 3 deletions pkg/autoscaling/prometheus_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ var podNameTemplate = map[ComponentType]string{
func generatePodNames(component ComponentType) []string {
names := make([]string, 0, instanceCount)
pattern := podNameTemplate[component]
for i := 0; i < instanceCount; i++ {
for i := range instanceCount {
names = append(names, fmt.Sprintf(pattern, mockClusterName, i))
}
return names
Expand Down Expand Up @@ -119,7 +119,7 @@ func (c *normalClient) buildCPUMockData(component ComponentType) {
cpuQuotaQuery := cpuQuotaPromQLTemplate[component]

var results []result
for i := 0; i < instanceCount; i++ {
for i := range instanceCount {
results = append(results, result{
Value: []any{time.Now().Unix(), fmt.Sprintf("%f", mockResultValue)},
Metric: metric{
Expand Down Expand Up @@ -192,7 +192,7 @@ func TestRetrieveCPUMetrics(t *testing.T) {
options := NewQueryOptions(component, metric, addresses[:len(addresses)-1], time.Now(), mockDuration)
result, err := querier.Query(options)
re.NoError(err)
for i := 0; i < len(addresses)-1; i++ {
for i := range len(addresses) - 1 {
value, ok := result[addresses[i]]
re.True(ok)
re.Less(math.Abs(value-mockResultValue), 1e-6)
Expand Down
6 changes: 3 additions & 3 deletions pkg/balancer/balancer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ func TestBalancerPutAndDelete(t *testing.T) {
re.Equal(uint32(0), balancer.Next())
// test put
exists := make(map[uint32]struct{})
for i := 0; i < 100; i++ {
for range 100 {
num := rand.Uint32()
balancer.Put(num)
exists[num] = struct{}{}
Expand Down Expand Up @@ -77,12 +77,12 @@ func TestBalancerDuplicate(t *testing.T) {
func TestRoundRobin(t *testing.T) {
re := require.New(t)
balancer := NewRoundRobin[uint32]()
for i := 0; i < 100; i++ {
for range 100 {
num := rand.Uint32()
balancer.Put(num)
}
statistics := make(map[uint32]int)
for i := 0; i < 1000; i++ {
for range 1000 {
statistics[balancer.Next()]++
}
min := 1000
Expand Down
16 changes: 8 additions & 8 deletions pkg/btree/btree_generic_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ func perm(n int) (out []Int) {

// rang returns an ordered list of Int items in the range [0, n).
func rang(n int) (out []Int) {
for i := 0; i < n; i++ {
for i := range n {
out = append(out, Int(i))
}
return
Expand Down Expand Up @@ -101,10 +101,10 @@ func TestBTreeSizeInfo(t *testing.T) {
max, _ := tr.Max()
assertEq(t, "check max", tr.GetAt(tr.Len()-1), max)
}
for k := 0; k < treeSize; k++ {
for k := range treeSize {
assertEq(t, "get k-th", tr.GetAt(k), Int(k))
}
for x := Int(0); x < treeSize; x++ {
for x := range Int(treeSize) {
y, rk := tr.GetWithIndex(x)
assertEq(t, "get", y, x)
assertEq(t, "get rank", rk, int(x))
Expand All @@ -128,10 +128,10 @@ func TestBTreeSizeInfo(t *testing.T) {
max, _ := tr.Max()
assertEq(t, "after delete check max", tr.GetAt(tr.Len()-1), max)
}
for k := 0; k < treeSize/3; k++ {
for k := range treeSize / 3 {
assertEq(t, "after delete get k-th", tr.GetAt(k), Int(3*k))
}
for x := Int(0); x < treeSize; x++ {
for x := range Int(treeSize) {
y, rk := tr.GetWithIndex(x)
if x%3 == 0 {
assertEq(t, "after delete get", y, x)
Expand Down Expand Up @@ -169,7 +169,7 @@ func TestBTreeSizeInfo(t *testing.T) {
func TestBTreeG(t *testing.T) {
tr := NewG[Int](*btreeDegree)
const treeSize = 10000
for i := 0; i < 10; i++ {
for range 10 {
if min, found := tr.Min(); found {
t.Fatalf("empty min, got %+v", min)
}
Expand Down Expand Up @@ -281,7 +281,7 @@ func TestDeleteMaxG(t *testing.T) {
got = append(got, v)
}
// Reverse our list.
for i := 0; i < len(got)/2; i++ {
for i := range len(got) / 2 {
got[i], got[len(got)-i-1] = got[len(got)-i-1], got[i]
}
if want := rang(100); !reflect.DeepEqual(got, want) {
Expand Down Expand Up @@ -786,7 +786,7 @@ func TestCloneConcurrentOperationsG(t *testing.T) {
}
t.Log("Removing half from first half")
toRemove := rang(cloneTestSize)[cloneTestSize/2:]
for i := 0; i < len(trees)/2; i++ {
for i := range len(trees) / 2 {
tree := trees[i]
wg.Add(1)
go func() {
Expand Down
2 changes: 1 addition & 1 deletion pkg/cgroup/cgroup_cpu_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ func TestGetCgroupCPU(t *testing.T) {
re := require.New(t)
exit := make(chan struct{})
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
for range 10 {
wg.Add(1)
go func() {
defer wg.Done()
Expand Down
Loading

0 comments on commit 25d7078

Please sign in to comment.