mirror of
https://github.com/vmware-tanzu/pinniped.git
synced 2026-01-10 07:58:07 +00:00
Enable 'intrange' linter
This commit is contained in:
@@ -46,6 +46,7 @@ linters:
|
||||
- unconvert
|
||||
- whitespace
|
||||
- copyloopvar
|
||||
- intrange
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
|
||||
@@ -22,7 +22,7 @@ func TestInfiniteBackoff(t *testing.T) {
|
||||
stepper: &InfiniteBackoff{},
|
||||
expectedSequence: func() []time.Duration {
|
||||
results := make([]time.Duration, 1000)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for i := range 1000 {
|
||||
results[i] = time.Duration(0)
|
||||
}
|
||||
return results
|
||||
@@ -69,7 +69,7 @@ func TestInfiniteBackoff(t *testing.T) {
|
||||
},
|
||||
expectedSequence: func() []time.Duration {
|
||||
results := make([]time.Duration, 1000)
|
||||
for i := 0; i < 1000; i++ {
|
||||
for i := range 1000 {
|
||||
results[i] = 20 * time.Nanosecond
|
||||
}
|
||||
return results
|
||||
|
||||
@@ -19,9 +19,9 @@ import (
|
||||
)
|
||||
|
||||
func TestTransformer(t *testing.T) {
|
||||
var veryLargeGroupList []string
|
||||
for i := 0; i < 10000; i++ {
|
||||
veryLargeGroupList = append(veryLargeGroupList, fmt.Sprintf("g%d", i))
|
||||
veryLargeGroupList := make([]string, 10000)
|
||||
for i := range 10000 {
|
||||
veryLargeGroupList[i] = fmt.Sprintf("g%d", i)
|
||||
}
|
||||
|
||||
alreadyCancelledContext, cancel := context.WithCancel(context.Background())
|
||||
@@ -849,11 +849,11 @@ func TestTypicalPerformanceAndThreadSafety(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
pipeline.AppendTransformation(compiledTransform)
|
||||
|
||||
var groups []string
|
||||
var wantGroups []string
|
||||
for i := 0; i < 100; i++ {
|
||||
groups = append(groups, fmt.Sprintf("g%d", i))
|
||||
wantGroups = append(wantGroups, fmt.Sprintf("group_prefix:g%d", i))
|
||||
groups := make([]string, 100)
|
||||
wantGroups := make([]string, 100)
|
||||
for i := range 100 {
|
||||
groups[i] = fmt.Sprintf("g%d", i)
|
||||
wantGroups[i] = fmt.Sprintf("group_prefix:g%d", i)
|
||||
}
|
||||
sort.Strings(wantGroups)
|
||||
|
||||
@@ -869,7 +869,7 @@ func TestTypicalPerformanceAndThreadSafety(t *testing.T) {
|
||||
// and 100 group names. It is not meant to be a pass/fail test or scientific benchmark test.
|
||||
iterations := 1000
|
||||
start := time.Now()
|
||||
for i := 0; i < iterations; i++ {
|
||||
for range iterations {
|
||||
_, _ = pipeline.Evaluate(context.Background(), "ryan", groups)
|
||||
}
|
||||
elapsed := time.Since(start)
|
||||
@@ -884,11 +884,11 @@ func TestTypicalPerformanceAndThreadSafety(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
numGoroutines := runtime.NumCPU() / 2
|
||||
t.Logf("Running tight loops in %d simultaneous goroutines", numGoroutines)
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
for range numGoroutines {
|
||||
wg.Add(1) // increment WaitGroup counter for each goroutine
|
||||
go func() {
|
||||
defer wg.Done() // decrement WaitGroup counter when this goroutine finishes
|
||||
for j := 0; j < iterations*2; j++ {
|
||||
for range iterations * 2 {
|
||||
localResult, localErr := pipeline.Evaluate(context.Background(), "ryan", groups)
|
||||
require.NoError(t, localErr)
|
||||
require.Equal(t, "username_prefix:ryan", localResult.Username)
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestCache(t *testing.T) {
|
||||
{APIGroup: "b", Kind: "b", Name: "a"},
|
||||
{APIGroup: "b", Kind: "b", Name: "b"},
|
||||
}
|
||||
for tries := 0; tries < 10; tries++ {
|
||||
for range 10 {
|
||||
cache := New()
|
||||
for _, i := range rand.Perm(len(keysInExpectedOrder)) {
|
||||
cache.Store(keysInExpectedOrder[i], nil)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2020-2021 the Pinniped contributors. All Rights Reserved.
|
||||
// Copyright 2020-2024 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package secret
|
||||
@@ -74,7 +74,7 @@ func TestCacheSynchronized(t *testing.T) {
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
|
||||
eg.Go(func() error {
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
require.Equal(t, csrfCookieEncoderHashKey, c.GetCSRFCookieEncoderHashKey())
|
||||
require.Equal(t, tokenHMACKey, c.GetTokenHMACKey(issuer))
|
||||
require.Equal(t, stateEncoderHashKey, c.GetStateEncoderHashKey(issuer))
|
||||
@@ -84,7 +84,7 @@ func TestCacheSynchronized(t *testing.T) {
|
||||
})
|
||||
|
||||
eg.Go(func() error {
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
require.Equal(t, csrfCookieEncoderHashKey, c.GetCSRFCookieEncoderHashKey())
|
||||
require.Equal(t, tokenHMACKey, c.GetTokenHMACKey(issuer))
|
||||
require.Equal(t, stateEncoderHashKey, c.GetStateEncoderHashKey(issuer))
|
||||
@@ -94,7 +94,7 @@ func TestCacheSynchronized(t *testing.T) {
|
||||
})
|
||||
|
||||
eg.Go(func() error {
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
require.Nil(t, c.GetTokenHMACKey(otherIssuer))
|
||||
require.Nil(t, c.GetStateEncoderHashKey(otherIssuer))
|
||||
require.Nil(t, c.GetStateEncoderBlockKey(otherIssuer))
|
||||
|
||||
@@ -150,7 +150,7 @@ func TestAPIServingCertificateAutoCreationAndRotation_Disruptive(t *testing.T) {
|
||||
// our code changes all the certs immediately thus this should be healthy fairly quickly
|
||||
// if this starts flaking, check for bugs in our dynamiccertificates.Notifier implementation
|
||||
testlib.RequireEventually(t, func(requireEventually *require.Assertions) {
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
_, err := conciergeClient.LoginV1alpha1().TokenCredentialRequests().Create(ctx, &loginv1alpha1.TokenCredentialRequest{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
|
||||
@@ -781,7 +781,7 @@ func TestSimultaneousLDAPRequestsOnSingleProvider(t *testing.T) {
|
||||
// without triggering the race detector.
|
||||
iterations := 150
|
||||
resultCh := make(chan authUserResult, iterations)
|
||||
for i := 0; i < iterations; i++ {
|
||||
for range iterations {
|
||||
go func() {
|
||||
authUserCtx, authUserCtxCancelFunc := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer authUserCtxCancelFunc()
|
||||
@@ -794,7 +794,7 @@ func TestSimultaneousLDAPRequestsOnSingleProvider(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
}
|
||||
for i := 0; i < iterations; i++ {
|
||||
for range iterations {
|
||||
result := <-resultCh
|
||||
// Record failures but allow the test to keep running so that all the background goroutines have a chance to try.
|
||||
assert.NoError(t, result.err)
|
||||
@@ -854,7 +854,7 @@ func findRecentlyUnusedLocalhostPorts(t *testing.T, howManyPorts int) []string {
|
||||
t.Helper()
|
||||
|
||||
listeners := []net.Listener{}
|
||||
for i := 0; i < howManyPorts; i++ {
|
||||
for range howManyPorts {
|
||||
unusedPortGrabbingListener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
listeners = append(listeners, unusedPortGrabbingListener)
|
||||
|
||||
@@ -170,7 +170,7 @@ func leaderElectionClients(t *testing.T, namespace *corev1.Namespace, leaseName
|
||||
clients := make(map[string]*kubeclient.Client, count)
|
||||
cancels := make(map[string]context.CancelFunc, count)
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
for range count {
|
||||
identity := "leader-election-client-" + rand.String(5)
|
||||
clients[identity], cancels[identity] = leaderElectionClient(t, namespace, leaseName, identity)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2023 the Pinniped contributors. All Rights Reserved.
|
||||
// Copyright 2023-2024 the Pinniped contributors. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package integration
|
||||
@@ -929,7 +929,7 @@ func TestSupervisorFederationDomainCRDValidations_Parallel(t *testing.T) {
|
||||
// For the above tests, it should be enough to assume that there will only be indices up to 10.
|
||||
// This is useful when the only difference in the message between old and new is the missing indices.
|
||||
// Otherwise, use wantOldKubeErr to say what the expected message should be for old versions.
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
wantErr = strings.ReplaceAll(wantErr, fmt.Sprintf("[%d]", i), "")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -533,7 +533,7 @@ func makeErrFix(reallyOld bool) []string {
|
||||
out := make([]string, 0, total*6) // good enough allocation
|
||||
|
||||
// these servers do not show the actual index of where the error occurred
|
||||
for i := 0; i < total; i++ {
|
||||
for i := range total {
|
||||
idx := fmt.Sprintf("[%d]", i)
|
||||
out = append(out, idx+":", ":")
|
||||
out = append(out, idx+" ", " ")
|
||||
|
||||
@@ -139,9 +139,9 @@ func LockADTestUser(t *testing.T, env *TestEnv, testUserName string) {
|
||||
// our password policy allows 20 wrong attempts before locking the account, so do 21.
|
||||
// these wrong password attempts could go to different domain controllers, but account
|
||||
// lockout changes are urgently replicated, meaning that the domain controllers will be
|
||||
// synced asap rather than in the usual 15 second interval.
|
||||
// synced asap rather than in the usual 15-second interval.
|
||||
// See https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-2000-server/cc961787(v=technet.10)#urgent-replication-of-account-lockout-changes
|
||||
for i := 0; i <= 21; i++ {
|
||||
for i := range 22 {
|
||||
err := conn.Bind(userDN, "not-the-right-password-"+fmt.Sprint(i))
|
||||
require.Error(t, err) // this should be an error
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user