diff --git a/Makefile b/Makefile index 371277d07..3dc5e1025 100644 --- a/Makefile +++ b/Makefile @@ -20,6 +20,7 @@ help: ## print this help getdeps: ## fetch necessary dependencies @mkdir -p ${GOPATH}/bin @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.43.0 + @echo "Installing gofumpt" && go install mvdan.cc/gofumpt@latest @echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.7-0.20211026165309-e818a1881b0e @echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest @@ -34,13 +35,14 @@ check-gen: ## check for updated autogenerated files lint: ## runs golangci-lint suite of linters @echo "Running $@ check" - @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean - @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml + @${GOPATH}/bin/golangci-lint cache clean + @${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml + @${GOPATH}/bin/gofumpt -s -l . check: test test: verifiers build ## builds minio, runs linters, tests @echo "Running unit tests" - @GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... + @CGO_ENABLED=0 go test -tags kqueue ./... test-upgrade: build @echo "Running minio upgrade tests" @@ -66,18 +68,18 @@ test-site-replication: install ## verify automatic site replication verify: ## verify minio various setups @echo "Verifying build with race" - @GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null + @CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null @(env bash $(PWD)/buildscripts/verify-build.sh) verify-healing: ## verify healing and replacing disks with minio binary @echo "Verify healing build with race" - @GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null + @CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null @(env bash $(PWD)/buildscripts/verify-healing.sh) @(env bash $(PWD)/buildscripts/unaligned-healing.sh) build: checks ## builds minio to $(PWD) @echo "Building minio binary to './minio'" - @GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null + @CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null hotfix-vars: $(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \ diff --git a/cmd/admin-bucket-handlers.go b/cmd/admin-bucket-handlers.go index a3a544b51..b3f678f47 100644 --- a/cmd/admin-bucket-handlers.go +++ b/cmd/admin-bucket-handlers.go @@ -155,7 +155,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http. return } var target madmin.BucketTarget - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err = json.Unmarshal(reqBytes, &target); err != nil { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL) return diff --git a/cmd/admin-handlers-config-kv.go b/cmd/admin-handlers-config-kv.go index 23c385f56..c955cc067 100644 --- a/cmd/admin-handlers-config-kv.go +++ b/cmd/admin-handlers-config-kv.go @@ -170,7 +170,7 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ cfg := globalServerConfig.Clone() vars := mux.Vars(r) - var buf = &bytes.Buffer{} + buf := &bytes.Buffer{} cw := config.NewConfigWriteTo(cfg, vars["key"]) if _, err := cw.WriteTo(buf); err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) diff --git a/cmd/admin-handlers-site-replication.go b/cmd/admin-handlers-site-replication.go index 7addd35f4..95e3d6177 100644 --- a/cmd/admin-handlers-site-replication.go +++ b/cmd/admin-handlers-site-replication.go @@ -134,7 +134,6 @@ func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } - } // SRPeerReplicateIAMItem - PUT /minio/admin/v3/site-replication/iam-item diff --git a/cmd/admin-handlers-users.go b/cmd/admin-handlers-users.go index 98f6504f2..4ba282c3d 100644 --- a/cmd/admin-handlers-users.go +++ b/cmd/admin-handlers-users.go @@ -613,7 +613,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque } } - var createResp = madmin.AddServiceAccountResp{ + createResp := madmin.AddServiceAccountResp{ Credentials: madmin.Credentials{ AccessKey: newCred.AccessKey, SecretKey: newCred.SecretKey, @@ -814,7 +814,7 @@ func (a adminAPIHandlers) InfoServiceAccount(w http.ResponseWriter, r *http.Requ return } - var infoResp = madmin.InfoServiceAccountResp{ + infoResp := madmin.InfoServiceAccountResp{ ParentUser: svcAccount.ParentUser, AccountStatus: svcAccount.Status, ImpliedPolicy: impliedPolicy, @@ -891,7 +891,7 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req serviceAccountsNames = append(serviceAccountsNames, svc.AccessKey) } - var listResp = madmin.ListServiceAccountsResp{ + listResp := madmin.ListServiceAccountsResp{ Accounts: serviceAccountsNames, } @@ -1251,7 +1251,7 @@ func (a adminAPIHandlers) ListBucketPolicies(w http.ResponseWriter, r *http.Requ return } - var newPolicies = make(map[string]iampolicy.Policy) + newPolicies := make(map[string]iampolicy.Policy) for name, p := range policies { _, err = json.Marshal(p) if err != nil { @@ -1283,7 +1283,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ return } - var newPolicies = make(map[string]iampolicy.Policy) + newPolicies := make(map[string]iampolicy.Policy) for name, p := range policies { _, err = json.Marshal(p) if err != nil { diff --git a/cmd/admin-handlers-users_test.go b/cmd/admin-handlers-users_test.go index 3f28e52f7..1f9f94316 100644 --- a/cmd/admin-handlers-users_test.go +++ b/cmd/admin-handlers-users_test.go @@ -653,7 +653,6 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) { if !strings.Contains(infoStr, `"s3:PutObject"`) || !strings.Contains(infoStr, ":"+bucket+"/") { c.Fatalf("policy contains unexpected content!") } - } func (s *TestSuiteIAM) TestGroupAddRemove(c *check) { diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index d3362bdcf..8ba0a8c5a 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -333,7 +333,6 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ // Reply with storage information (across nodes in a // distributed setup) as json. writeSuccessResponseJSON(w, jsonBytes) - } // DataUsageInfoHandler - GET /minio/admin/v3/datausage @@ -1332,7 +1331,7 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req if keyID == "" { keyID = stat.DefaultKey } - var response = madmin.KMSKeyStatus{ + response := madmin.KMSKeyStatus{ KeyID: keyID, } @@ -1816,7 +1815,6 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque anonNetwork[anonEndpoint] = status } return anonNetwork - } anonymizeDrives := func(drives []madmin.Disk) []madmin.Disk { @@ -1916,7 +1914,6 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque return } } - } func getTLSInfo() madmin.TLSInfo { @@ -2042,7 +2039,6 @@ func assignPoolNumbers(servers []madmin.ServerProperties) { } func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus { - lambdaMap := make(map[string][]madmin.TargetIDStatus) for _, tgt := range globalConfigTargetList.Targets() { @@ -2284,7 +2280,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ } if si.Mode == 0 { // Not, set it to default. - si.Mode = 0600 + si.Mode = 0o600 } header, zerr := zip.FileInfoHeader(dummyFileInfo{ name: filename, diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index 53901f5f6..fa66f060e 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -236,8 +236,8 @@ func TestServiceRestartHandler(t *testing.T) { // buildAdminRequest - helper function to build an admin API request. func buildAdminRequest(queryVal url.Values, method, path string, - contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error) { - + contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error, +) { req, err := newTestRequest(method, adminPathPrefix+adminAPIVersionPrefix+path+"?"+queryVal.Encode(), contentLength, bodySeeker) @@ -380,5 +380,4 @@ func TestExtractHealInitParams(t *testing.T) { } } } - } diff --git a/cmd/admin-heal-ops.go b/cmd/admin-heal-ops.go index e7c61c961..c287a08ce 100644 --- a/cmd/admin-heal-ops.go +++ b/cmd/admin-heal-ops.go @@ -278,8 +278,8 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) { // background routine to clean up heal results after the // aforementioned duration. func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLayer) ( - respBytes []byte, apiErr APIError, errMsg string) { - + respBytes []byte, apiErr APIError, errMsg string, +) { if h.forceStarted { _, apiErr = ahs.stopHealSequence(pathJoin(h.bucket, h.object)) if apiErr.Code != "" { @@ -338,8 +338,8 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay // representation. The clientToken helps ensure there aren't // conflicting clients fetching status. func (ahs *allHealState) PopHealStatusJSON(hpath string, - clientToken string) ([]byte, APIErrorCode) { - + clientToken string) ([]byte, APIErrorCode, +) { // fetch heal state for given path h, exists := ahs.getHealSequence(hpath) if !exists { @@ -453,8 +453,8 @@ type healSequence struct { // NewHealSequence - creates healSettings, assumes bucket and // objPrefix are already validated. func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string, - hs madmin.HealOpts, forceStart bool) *healSequence { - + hs madmin.HealOpts, forceStart bool, +) *healSequence { reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket} reqInfo.AppendTags("prefix", objPrefix) ctx, cancel := context.WithCancel(logger.SetReqInfo(ctx, reqInfo)) diff --git a/cmd/admin-router.go b/cmd/admin-router.go index 4729d606e..d8b32b67a 100644 --- a/cmd/admin-router.go +++ b/cmd/admin-router.go @@ -38,7 +38,6 @@ type adminAPIHandlers struct{} // registerAdminRouter - Add handler functions for each service REST API routes. func registerAdminRouter(router *mux.Router, enableConfigOps bool) { - adminAPI := adminAPIHandlers{} // Admin router adminRouter := router.PathPrefix(adminPathPrefix).Subrouter() diff --git a/cmd/api-errors.go b/cmd/api-errors.go index 9312b2c4e..eaa2b2c45 100644 --- a/cmd/api-errors.go +++ b/cmd/api-errors.go @@ -2125,7 +2125,7 @@ func toAPIError(ctx context.Context, err error) APIError { return noError } - var apiErr = errorCodes.ToAPIErr(toAPIErrorCode(ctx, err)) + apiErr := errorCodes.ToAPIErr(toAPIErrorCode(ctx, err)) e, ok := err.(dns.ErrInvalidBucketName) if ok { code := toAPIErrorCode(ctx, e) @@ -2238,7 +2238,6 @@ func toAPIError(ctx context.Context, err error) APIError { // since S3 only sends one Error XML response. if len(e.Errors) >= 1 { apiErr.Code = e.Errors[0].Reason - } case azblob.StorageError: apiErr = APIError{ diff --git a/cmd/api-headers_test.go b/cmd/api-headers_test.go index cd589908f..3c3030f09 100644 --- a/cmd/api-headers_test.go +++ b/cmd/api-headers_test.go @@ -23,7 +23,7 @@ import ( func TestNewRequestID(t *testing.T) { // Ensure that it returns an alphanumeric result of length 16. - var id = mustGetRequestID(UTCNow()) + id := mustGetRequestID(UTCNow()) if len(id) != 16 { t.Fail() diff --git a/cmd/api-response.go b/cmd/api-response.go index f4fa79c42..86fa2d857 100644 --- a/cmd/api-response.go +++ b/cmd/api-response.go @@ -268,7 +268,6 @@ type StringMap map[string]string // MarshalXML - StringMap marshals into XML. func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - tokens := []xml.Token{start} for key, value := range s { @@ -417,8 +416,8 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string) // serialized to match XML and JSON API spec output. func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse { listbuckets := make([]Bucket, 0, len(buckets)) - var data = ListBucketsResponse{} - var owner = Owner{ + data := ListBucketsResponse{} + owner := Owner{ ID: globalMinioDefaultOwnerID, DisplayName: "minio", } @@ -439,14 +438,14 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse { // generates an ListBucketVersions response for the said bucket with other enumerated options. func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse { versions := make([]ObjectVersion, 0, len(resp.Objects)) - var owner = Owner{ + owner := Owner{ ID: globalMinioDefaultOwnerID, DisplayName: "minio", } - var data = ListVersionsResponse{} + data := ListVersionsResponse{} for _, object := range resp.Objects { - var content = ObjectVersion{} + content := ObjectVersion{} if object.Name == "" { continue } @@ -486,7 +485,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim prefixes := make([]CommonPrefix, 0, len(resp.Prefixes)) for _, prefix := range resp.Prefixes { - var prefixItem = CommonPrefix{} + prefixItem := CommonPrefix{} prefixItem.Prefix = s3EncodeName(prefix, encodingType) prefixes = append(prefixes, prefixItem) } @@ -497,14 +496,14 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim // generates an ListObjectsV1 response for the said bucket with other enumerated options. func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse { contents := make([]Object, 0, len(resp.Objects)) - var owner = Owner{ + owner := Owner{ ID: globalMinioDefaultOwnerID, DisplayName: "minio", } - var data = ListObjectsResponse{} + data := ListObjectsResponse{} for _, object := range resp.Objects { - var content = Object{} + content := Object{} if object.Name == "" { continue } @@ -535,7 +534,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy prefixes := make([]CommonPrefix, 0, len(resp.Prefixes)) for _, prefix := range resp.Prefixes { - var prefixItem = CommonPrefix{} + prefixItem := CommonPrefix{} prefixItem.Prefix = s3EncodeName(prefix, encodingType) prefixes = append(prefixes, prefixItem) } @@ -546,14 +545,14 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy // generates an ListObjectsV2 response for the said bucket with other enumerated options. func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response { contents := make([]Object, 0, len(objects)) - var owner = Owner{ + owner := Owner{ ID: globalMinioDefaultOwnerID, DisplayName: "minio", } - var data = ListObjectsV2Response{} + data := ListObjectsV2Response{} for _, object := range objects { - var content = Object{} + content := Object{} if object.Name == "" { continue } @@ -608,7 +607,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, commonPrefixes := make([]CommonPrefix, 0, len(prefixes)) for _, prefix := range prefixes { - var prefixItem = CommonPrefix{} + prefixItem := CommonPrefix{} prefixItem.Prefix = s3EncodeName(prefix, encodingType) commonPrefixes = append(commonPrefixes, prefixItem) } @@ -821,8 +820,8 @@ func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIE // but accepts the error message directly (this allows messages to be // dynamically generated.) func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError, - errBody string, reqURL *url.URL) { - + errBody string, reqURL *url.URL, +) { reqInfo := logger.GetReqInfo(ctx) errorResponse := APIErrorResponse{ Code: err.Code, diff --git a/cmd/api-router.go b/cmd/api-router.go index f19ba7b52..58c5ef363 100644 --- a/cmd/api-router.go +++ b/cmd/api-router.go @@ -484,7 +484,6 @@ func registerAPIRouter(router *mux.Router) { // If none of the routes match add default error handler routes apiRouter.NotFoundHandler = collectAPIStats("notfound", httpTraceAll(errorResponseHandler)) apiRouter.MethodNotAllowedHandler = collectAPIStats("methodnotallowed", httpTraceAll(methodNotAllowedHandler("S3"))) - } // corsHandler handler for CORS (Cross Origin Resource Sharing) diff --git a/cmd/api-utils_test.go b/cmd/api-utils_test.go index 9608b282a..b8bc050fe 100644 --- a/cmd/api-utils_test.go +++ b/cmd/api-utils_test.go @@ -44,7 +44,6 @@ func TestS3EncodeName(t *testing.T) { if testCase.expectedOutput != outputText { t.Errorf("Expected `%s`, got `%s`", testCase.expectedOutput, outputText) } - }) } } diff --git a/cmd/background-heal-ops.go b/cmd/background-heal-ops.go index fe2b3bfa6..aa4f540ee 100644 --- a/cmd/background-heal-ops.go +++ b/cmd/background-heal-ops.go @@ -115,7 +115,6 @@ func newHealRoutine() *healRoutine { tasks: make(chan healTask), workers: workers, } - } // healDiskFormat - heals format.json, return value indicates if a diff --git a/cmd/background-newdisks-heal-ops.go b/cmd/background-newdisks-heal-ops.go index 19e7f042f..16a345dfe 100644 --- a/cmd/background-newdisks-heal-ops.go +++ b/cmd/background-newdisks-heal-ops.go @@ -305,7 +305,6 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) { } } return disksToHeal - } // monitorLocalDisksAndHeal - ensures that detected new disks are healed diff --git a/cmd/bitrot.go b/cmd/bitrot.go index 81119c1da..a31ce6a5b 100644 --- a/cmd/bitrot.go +++ b/cmd/bitrot.go @@ -212,7 +212,7 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w // bitrotSelfTest tries to catch any issue in the bitrot implementation // early instead of silently corrupting data. func bitrotSelfTest() { - var checksums = map[BitrotAlgorithm]string{ + checksums := map[BitrotAlgorithm]string{ SHA256: "a7677ff19e0182e4d52e3a3db727804abc82a5818749336369552e54b838b004", BLAKE2b512: "e519b7d84b1c3c917985f544773a35cf265dcab10948be3550320d156bab612124a5ae2ae5a8c73c0eea360f68b0e28136f26e858756dbfe7375a7389f26c669", HighwayHash256: "39c0407ed3f01b18d22c85db4aeff11e060ca5f43131b0126731ca197cd42313", diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index 9a1fa03cf..5283a99bf 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -449,7 +449,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, return } - var objectsToDelete = map[ObjectToDelete]int{} + objectsToDelete := map[ObjectToDelete]int{} getObjectInfoFn := objectAPI.GetObjectInfo if api.CacheAPI() != nil { getObjectInfoFn = api.CacheAPI().GetObjectInfo @@ -606,8 +606,8 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, } // Generate response - var deleteErrors = make([]DeleteError, 0, len(deleteObjectsReq.Objects)) - var deletedObjects = make([]DeletedObject, 0, len(deleteObjectsReq.Objects)) + deleteErrors := make([]DeleteError, 0, len(deleteObjectsReq.Objects)) + deletedObjects := make([]DeletedObject, 0, len(deleteObjectsReq.Objects)) for _, deleteResult := range deleteResults { if deleteResult.errInfo.Code != "" { deleteErrors = append(deleteErrors, deleteResult.errInfo) @@ -1806,7 +1806,8 @@ func (api objectAPIHandlers) ResetBucketReplicationStateHandler(w http.ResponseW tgtArns := config.FilterTargetArns( replication.ObjectOpts{ OpType: replication.ResyncReplicationType, - TargetArn: arn}) + TargetArn: arn, + }) if len(tgtArns) == 0 { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrBadRequest, InvalidArgument{ diff --git a/cmd/bucket-handlers_test.go b/cmd/bucket-handlers_test.go index 0c69f1907..f0e85d883 100644 --- a/cmd/bucket-handlers_test.go +++ b/cmd/bucket-handlers_test.go @@ -81,8 +81,8 @@ func TestGetBucketLocationHandler(t *testing.T) { } func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { // test cases with sample input and expected output. testCases := []struct { bucketName string @@ -163,7 +163,6 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri recV2 := httptest.NewRecorder() // construct HTTP request for PUT bucket policy endpoint. reqV2, err := newTestSignedRequestV2(http.MethodGet, getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil) - if err != nil { t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: %v", i+1, instanceType, err) } @@ -210,7 +209,6 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri nilBucket := "dummy-bucket" nilReq, err := newTestRequest(http.MethodGet, getBucketLocationURL("", nilBucket), 0, nil) - if err != nil { t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) } @@ -225,8 +223,8 @@ func TestHeadBucketHandler(t *testing.T) { } func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { // test cases with sample input and expected output. testCases := []struct { bucketName string @@ -282,7 +280,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api recV2 := httptest.NewRecorder() // construct HTTP request for PUT bucket policy endpoint. reqV2, err := newTestSignedRequestV2(http.MethodHead, getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil) - if err != nil { t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: %v", i+1, instanceType, err) } @@ -297,7 +294,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api // Test for Anonymous/unsigned http request. anonReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", bucketName), 0, nil) - if err != nil { t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": %v", instanceType, bucketName, err) @@ -315,7 +311,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api nilBucket := "dummy-bucket" nilReq, err := newTestRequest(http.MethodHead, getHEADBucketURL("", nilBucket), 0, nil) - if err != nil { t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) } @@ -331,8 +326,8 @@ func TestListMultipartUploadsHandler(t *testing.T) { // testListMultipartUploadsHandler - Tests validate listing of multipart uploads. func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { // Collection of non-exhaustive ListMultipartUploads test cases, valid errors // and success responses. testCases := []struct { @@ -552,7 +547,6 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s testCases[6].uploadIDMarker, testCases[6].delimiter, testCases[6].maxUploads) nilReq, err := newTestRequest(http.MethodGet, url, 0, nil) - if err != nil { t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) } @@ -568,8 +562,8 @@ func TestListBucketsHandler(t *testing.T) { // testListBucketsHandler - Tests validate listing of buckets. func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { testCases := []struct { bucketName string accessKey string @@ -615,7 +609,6 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap // verify response for V2 signed HTTP request. reqV2, err := newTestSignedRequestV2(http.MethodGet, getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil) - if err != nil { t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: %v", i+1, instanceType, err) } @@ -630,7 +623,6 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap // Test for Anonymous/unsigned http request. // ListBucketsHandler doesn't support bucket policies, setting the policies shouldn't make a difference. anonReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil) - if err != nil { t.Fatalf("MinIO %s: Failed to create an anonymous request.", instanceType) } @@ -646,7 +638,6 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap // The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called. nilReq, err := newTestRequest(http.MethodGet, getListBucketURL(""), 0, nil) - if err != nil { t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) } @@ -661,8 +652,8 @@ func TestAPIDeleteMultipleObjectsHandler(t *testing.T) { } func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { var err error contentBytes := []byte("hello") diff --git a/cmd/bucket-lifecycle-handlers_test.go b/cmd/bucket-lifecycle-handlers_test.go index 28f0b85bd..1ecc5f532 100644 --- a/cmd/bucket-lifecycle-handlers_test.go +++ b/cmd/bucket-lifecycle-handlers_test.go @@ -150,8 +150,8 @@ func TestBucketLifecycle(t *testing.T) { // Simple tests of bucket lifecycle: PUT, GET, DELETE. // Tests are related and the order is important. func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - creds auth.Credentials, t *testing.T) { - + creds auth.Credentials, t *testing.T, +) { // test cases with sample input and expected output. testCases := []struct { method string @@ -266,8 +266,8 @@ func testBucketLifecycle(obj ObjectLayer, instanceType, bucketName string, apiRo lifecycleResponse []byte errorResponse APIErrorResponse shouldPass bool - }) { - + }, +) { for i, testCase := range testCases { // initialize httptest Recorder, this records any mutations to response writer inside the handler. rec := httptest.NewRecorder() diff --git a/cmd/bucket-lifecycle.go b/cmd/bucket-lifecycle.go index d6eee67ea..3f2206a96 100644 --- a/cmd/bucket-lifecycle.go +++ b/cmd/bucket-lifecycle.go @@ -178,9 +178,7 @@ func (t *transitionState) queueTransitionTask(oi ObjectInfo) { } } -var ( - globalTransitionState *transitionState -) +var globalTransitionState *transitionState func newTransitionState(ctx context.Context, objAPI ObjectLayer) *transitionState { return &transitionState{ @@ -466,9 +464,7 @@ func (sp *SelectParameters) IsEmpty() bool { return sp == nil } -var ( - selectParamsXMLName = "SelectParameters" -) +var selectParamsXMLName = "SelectParameters" // UnmarshalXML - decodes XML data. func (sp *SelectParameters) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { diff --git a/cmd/bucket-policy-handlers_test.go b/cmd/bucket-policy-handlers_test.go index 03b8acad1..85ef66ff6 100644 --- a/cmd/bucket-policy-handlers_test.go +++ b/cmd/bucket-policy-handlers_test.go @@ -105,7 +105,7 @@ func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRoute bucketName1 := fmt.Sprintf("%s-1", bucketName) const n = 100 - var start = make(chan struct{}) + start := make(chan struct{}) var ok, errs int var wg sync.WaitGroup var mu sync.Mutex @@ -147,8 +147,8 @@ func TestPutBucketPolicyHandler(t *testing.T) { // testPutBucketPolicyHandler - Test for Bucket policy end point. func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { bucketName1 := fmt.Sprintf("%s-1", bucketName) if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, BucketOptions{}); err != nil { t.Fatal(err) @@ -333,7 +333,6 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // create unsigned HTTP request for PutBucketPolicyHandler. anonReq, err := newTestRequest(http.MethodPut, getPutPolicyURL("", bucketName), int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr))) - if err != nil { t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": %v", instanceType, bucketName, err) @@ -352,14 +351,12 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string nilReq, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", nilBucket), 0, nil, "", "", nil) - if err != nil { t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) } // execute the object layer set to `nil` test. // `ExecObjectLayerAPINilTest` manages the operation. ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq) - } // Wrapper for calling Get Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup. @@ -465,7 +462,6 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // construct HTTP request for PUT bucket policy endpoint. reqV4, err := newTestSignedRequestV4(http.MethodGet, getGetPolicyURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil) - if err != nil { t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: %v", i+1, err) } @@ -540,7 +536,6 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference. // create unsigned HTTP request for PutBucketPolicyHandler. anonReq, err := newTestRequest(http.MethodGet, getPutPolicyURL("", bucketName), 0, nil) - if err != nil { t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": %v", instanceType, bucketName, err) @@ -559,7 +554,6 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string nilReq, err := newTestSignedRequestV4(http.MethodGet, getGetPolicyURL("", nilBucket), 0, nil, "", "", nil) - if err != nil { t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) } @@ -575,8 +569,8 @@ func TestDeleteBucketPolicyHandler(t *testing.T) { // testDeleteBucketPolicyHandler - Test for Delete bucket policy end point. func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { // template for constructing HTTP request body for PUT bucket policy. bucketPolicyTemplate := `{ "Version": "2012-10-17", @@ -743,7 +737,6 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str // Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference. // create unsigned HTTP request for PutBucketPolicyHandler. anonReq, err := newTestRequest(http.MethodDelete, getPutPolicyURL("", bucketName), 0, nil) - if err != nil { t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": %v", instanceType, bucketName, err) @@ -762,7 +755,6 @@ func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName str nilReq, err := newTestSignedRequestV4(http.MethodDelete, getDeletePolicyURL("", nilBucket), 0, nil, "", "", nil) - if err != nil { t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) } diff --git a/cmd/bucket-policy.go b/cmd/bucket-policy.go index b16c5c2d4..048e37635 100644 --- a/cmd/bucket-policy.go +++ b/cmd/bucket-policy.go @@ -200,7 +200,7 @@ func PolicyToBucketAccessPolicy(bucketPolicy *policy.Policy) (*miniogopolicy.Buc } var policyInfo miniogopolicy.BucketAccessPolicy - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err = json.Unmarshal(data, &policyInfo); err != nil { // This should not happen because data is valid to JSON data. return nil, err @@ -218,7 +218,7 @@ func BucketAccessPolicyToPolicy(policyInfo *miniogopolicy.BucketAccessPolicy) (* } var bucketPolicy policy.Policy - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err = json.Unmarshal(data, &bucketPolicy); err != nil { // This should not happen because data is valid to JSON data. return nil, err diff --git a/cmd/bucket-replication-stats.go b/cmd/bucket-replication-stats.go index 8959fd520..3f0c26721 100644 --- a/cmd/bucket-replication-stats.go +++ b/cmd/bucket-replication-stats.go @@ -55,7 +55,6 @@ func (r *ReplicationStats) Delete(bucket string) { r.ulock.Lock() defer r.ulock.Unlock() delete(r.UsageCache, bucket) - } // UpdateReplicaStat updates in-memory replica statistics with new values. diff --git a/cmd/bucket-replication-utils_test.go b/cmd/bucket-replication-utils_test.go index c1b821ff8..beed63a70 100644 --- a/cmd/bucket-replication-utils_test.go +++ b/cmd/bucket-replication-utils_test.go @@ -77,7 +77,8 @@ var replicatedInfosTests = []struct { ReplicationStatus: replication.Failed, OpType: replication.ObjectReplicationType, ReplicationAction: replicateAll, - }}, + }, + }, expectedCompletedSize: 249, expectedReplicationStatusInternal: "arn1=COMPLETED;arn2=FAILED;", expectedReplicationStatus: replication.Failed, @@ -102,7 +103,8 @@ var replicatedInfosTests = []struct { ReplicationStatus: replication.Failed, OpType: replication.ObjectReplicationType, ReplicationAction: replicateAll, - }}, + }, + }, expectedCompletedSize: 0, expectedReplicationStatusInternal: "arn1=PENDING;arn2=FAILED;", expectedReplicationStatus: replication.Failed, @@ -182,7 +184,6 @@ var parseReplicationDecisionTest = []struct { func TestParseReplicateDecision(t *testing.T) { for i, test := range parseReplicationDecisionTest { dsc, err := parseReplicateDecision(test.expDsc.String()) - if err != nil { if test.expErr != err { t.Errorf("Test%d (%s): Expected parse error got %t , want %t", i+1, test.name, err, test.expErr) diff --git a/cmd/bucket-replication.go b/cmd/bucket-replication.go index 26e08c46e..90e16eae6 100644 --- a/cmd/bucket-replication.go +++ b/cmd/bucket-replication.go @@ -139,6 +139,7 @@ func (o mustReplicateOptions) ReplicationStatus() (s replication.StatusType) { } return s } + func (o mustReplicateOptions) isExistingObjectReplication() bool { return o.opType == replication.ExistingObjectReplicationType } @@ -146,6 +147,7 @@ func (o mustReplicateOptions) isExistingObjectReplication() bool { func (o mustReplicateOptions) isMetadataReplication() bool { return o.opType == replication.MetadataReplicationType } + func getMustReplicateOptions(o ObjectInfo, op replication.Type, opts ObjectOptions) mustReplicateOptions { if !op.Valid() { op = replication.ObjectReplicationType @@ -441,7 +443,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj } } - var eventName = event.ObjectReplicationComplete + eventName := event.ObjectReplicationComplete if replicationStatus == replication.Failed { eventName = event.ObjectReplicationFailed } @@ -523,7 +525,8 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI VersionID: versionID, Internal: miniogo.AdvancedGetOptions{ ReplicationProxyRequest: "false", - }}); isErrMethodNotAllowed(ErrorRespToObjectError(err, dobj.Bucket, dobj.ObjectName)) { + }, + }); isErrMethodNotAllowed(ErrorRespToObjectError(err, dobj.Bucket, dobj.ObjectName)) { if dobj.VersionID == "" { rinfo.ReplicationStatus = replication.Completed return @@ -902,7 +905,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje // FIXME: add support for missing replication events // - event.ObjectReplicationMissedThreshold // - event.ObjectReplicationReplicatedAfterThreshold - var eventName = event.ObjectReplicationComplete + eventName := event.ObjectReplicationComplete if rinfos.ReplicationStatus() == replication.Failed { eventName = event.ObjectReplicationFailed } @@ -1058,7 +1061,8 @@ func replicateObjectToTarget(ctx context.Context, ri ReplicateObjectInfo, object VersionID: objInfo.VersionID, Internal: miniogo.AdvancedGetOptions{ ReplicationProxyRequest: "false", - }}) + }, + }) if cerr == nil { rAction = getReplicationAction(objInfo, oi, ri.OpType) rinfo.ReplicationStatus = replication.Completed @@ -1117,7 +1121,8 @@ func replicateObjectToTarget(ctx context.Context, ri ReplicateObjectInfo, object Internal: miniogo.AdvancedPutOptions{ SourceVersionID: objInfo.VersionID, ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside - }} + }, + } if _, err = c.CopyObject(ctx, tgt.Bucket, object, tgt.Bucket, object, getCopyObjMetadata(objInfo, tgt.StorageClass), srcOpts, dstOpts); err != nil { rinfo.ReplicationStatus = replication.Failed logger.LogIf(ctx, fmt.Errorf("Unable to replicate metadata for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err)) @@ -1213,7 +1218,8 @@ func replicateObjectWithMultipart(ctx context.Context, c *miniogo.Core, bucket, SourceMTime: objInfo.ModTime, // always set this to distinguish between `mc mirror` replication and serverside ReplicationRequest: true, - }}) + }, + }) return err } @@ -1357,7 +1363,6 @@ func (p *ReplicationPool) AddWorker() { return } } - } // AddExistingObjectReplicateWorker adds a worker to queue existing objects that need to be sync'd @@ -1671,6 +1676,7 @@ type replicationConfig struct { func (c replicationConfig) Empty() bool { return c.Config == nil } + func (c replicationConfig) Replicate(opts replication.ObjectOpts) bool { return c.Config.Replicate(opts) } @@ -1694,7 +1700,8 @@ func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc *Repli DeleteMarker: oi.DeleteMarker, VersionID: oi.VersionID, OpType: replication.DeleteReplicationType, - ExistingObject: true} + ExistingObject: true, + } tgtArns := c.Config.FilterTargetArns(opts) // indicates no matching target with Existing object replication enabled. diff --git a/cmd/bucket-replication_test.go b/cmd/bucket-replication_test.go index 8ffae9723..7ded8fc8b 100644 --- a/cmd/bucket-replication_test.go +++ b/cmd/bucket-replication_test.go @@ -75,7 +75,8 @@ var replicationConfigTests = []struct { }, { // 4. existing object replication enabled, versioning enabled; no reset in progress name: "existing object replication enabled, versioning enabled; no reset in progress", - info: ObjectInfo{Size: 100, + info: ObjectInfo{ + Size: 100, ReplicationStatus: replication.Completed, VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", }, @@ -93,174 +94,192 @@ func TestReplicationResync(t *testing.T) { } } -var start = UTCNow().AddDate(0, 0, -1) -var replicationConfigTests2 = []struct { - info ObjectInfo - name string - rcfg replicationConfig - dsc ReplicateDecision - tgtStatuses map[string]replication.StatusType - expectedSync bool -}{ - { // Cases 1-4: existing object replication enabled, versioning enabled, no reset - replication status varies - // 1: Pending replication - name: "existing object replication on object in Pending replication status", - info: ObjectInfo{Size: 100, - ReplicationStatusInternal: "arn1:PENDING;", - ReplicationStatus: replication.Pending, - VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", - }, - rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ - Arn: "arn1", - }}}}, - dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, - expectedSync: true, - }, - - { // 2. replication status Failed - name: "existing object replication on object in Failed replication status", - info: ObjectInfo{Size: 100, - ReplicationStatusInternal: "arn1:FAILED", - ReplicationStatus: replication.Failed, - VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", - }, - dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, - rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ - Arn: "arn1", - }}}}, - expectedSync: true, - }, - { // 3. replication status unset - name: "existing object replication on pre-existing unreplicated object", - info: ObjectInfo{Size: 100, - ReplicationStatus: replication.StatusType(""), - VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", - }, - rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ - Arn: "arn1", - }}}}, - dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, - expectedSync: true, - }, - { // 4. replication status Complete - name: "existing object replication on object in Completed replication status", - info: ObjectInfo{Size: 100, - ReplicationStatusInternal: "arn1:COMPLETED", - ReplicationStatus: replication.Completed, - VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", - }, - dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", false, false)}}, - rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ - Arn: "arn1", - }}}}, - expectedSync: false, - }, - { // 5. existing object replication enabled, versioning enabled, replication status Pending & reset ID present - name: "existing object replication with reset in progress and object in Pending status", - info: ObjectInfo{Size: 100, - ReplicationStatusInternal: "arn1:PENDING;", - ReplicationStatus: replication.Pending, - VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", - UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())}, - }, - expectedSync: true, - dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, - rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ - Arn: "arn1", - ResetID: "xyz", - ResetBeforeDate: UTCNow(), - }}}, - }, - }, - { // 6. existing object replication enabled, versioning enabled, replication status Failed & reset ID present - name: "existing object replication with reset in progress and object in Failed status", - info: ObjectInfo{Size: 100, - ReplicationStatusInternal: "arn1:FAILED;", - ReplicationStatus: replication.Failed, - VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", - UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())}, - }, - dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, - rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ - Arn: "arn1", - ResetID: "xyz", - ResetBeforeDate: UTCNow(), - }}}, - }, - expectedSync: true, - }, - { // 7. existing object replication enabled, versioning enabled, replication status unset & reset ID present - name: "existing object replication with reset in progress and object never replicated before", - info: ObjectInfo{Size: 100, - ReplicationStatus: replication.StatusType(""), - VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", - UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())}, - }, - dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, - rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ - Arn: "arn1", - ResetID: "xyz", - ResetBeforeDate: UTCNow(), - }}}, +var ( + start = UTCNow().AddDate(0, 0, -1) + replicationConfigTests2 = []struct { + info ObjectInfo + name string + rcfg replicationConfig + dsc ReplicateDecision + tgtStatuses map[string]replication.StatusType + expectedSync bool + }{ + { // Cases 1-4: existing object replication enabled, versioning enabled, no reset - replication status varies + // 1: Pending replication + name: "existing object replication on object in Pending replication status", + info: ObjectInfo{ + Size: 100, + ReplicationStatusInternal: "arn1:PENDING;", + ReplicationStatus: replication.Pending, + VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", + }, + rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ + Arn: "arn1", + }}}}, + dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, + expectedSync: true, }, - expectedSync: true, - }, + { // 2. replication status Failed + name: "existing object replication on object in Failed replication status", + info: ObjectInfo{ + Size: 100, + ReplicationStatusInternal: "arn1:FAILED", + ReplicationStatus: replication.Failed, + VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", + }, + dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, + rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ + Arn: "arn1", + }}}}, + expectedSync: true, + }, + { // 3. replication status unset + name: "existing object replication on pre-existing unreplicated object", + info: ObjectInfo{ + Size: 100, + ReplicationStatus: replication.StatusType(""), + VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", + }, + rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ + Arn: "arn1", + }}}}, + dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, + expectedSync: true, + }, + { // 4. replication status Complete + name: "existing object replication on object in Completed replication status", + info: ObjectInfo{ + Size: 100, + ReplicationStatusInternal: "arn1:COMPLETED", + ReplicationStatus: replication.Completed, + VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", + }, + dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", false, false)}}, + rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ + Arn: "arn1", + }}}}, + expectedSync: false, + }, + { // 5. existing object replication enabled, versioning enabled, replication status Pending & reset ID present + name: "existing object replication with reset in progress and object in Pending status", + info: ObjectInfo{ + Size: 100, + ReplicationStatusInternal: "arn1:PENDING;", + ReplicationStatus: replication.Pending, + VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", + UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())}, + }, + expectedSync: true, + dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, + rcfg: replicationConfig{ + remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ + Arn: "arn1", + ResetID: "xyz", + ResetBeforeDate: UTCNow(), + }}}, + }, + }, + { // 6. existing object replication enabled, versioning enabled, replication status Failed & reset ID present + name: "existing object replication with reset in progress and object in Failed status", + info: ObjectInfo{ + Size: 100, + ReplicationStatusInternal: "arn1:FAILED;", + ReplicationStatus: replication.Failed, + VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", + UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())}, + }, + dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, + rcfg: replicationConfig{ + remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ + Arn: "arn1", + ResetID: "xyz", + ResetBeforeDate: UTCNow(), + }}}, + }, + expectedSync: true, + }, + { // 7. existing object replication enabled, versioning enabled, replication status unset & reset ID present + name: "existing object replication with reset in progress and object never replicated before", + info: ObjectInfo{ + Size: 100, + ReplicationStatus: replication.StatusType(""), + VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", + UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())}, + }, + dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, + rcfg: replicationConfig{ + remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ + Arn: "arn1", + ResetID: "xyz", + ResetBeforeDate: UTCNow(), + }}}, + }, - { // 8. existing object replication enabled, versioning enabled, replication status Complete & reset ID present - name: "existing object replication enabled - reset in progress for an object in Completed status", - info: ObjectInfo{Size: 100, - ReplicationStatusInternal: "arn1:COMPLETED;", - ReplicationStatus: replication.Completed, - VersionID: "a3348c34-c352-4498-82f0-1098e8b34df8", - UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())}, + expectedSync: true, }, - expectedSync: true, - dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, - rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ - Arn: "arn1", - ResetID: "xyz", - ResetBeforeDate: UTCNow(), - }}}, - }, - }, - { // 9. existing object replication enabled, versioning enabled, replication status Pending & reset ID different - name: "existing object replication enabled, newer reset in progress on object in Pending replication status", - info: ObjectInfo{Size: 100, - ReplicationStatusInternal: "arn1:PENDING;", - ReplicationStatus: replication.Pending, - VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", - UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", UTCNow().AddDate(0, 0, -1).Format(http.TimeFormat), "abc")}, - ModTime: UTCNow().AddDate(0, 0, -2), + { // 8. existing object replication enabled, versioning enabled, replication status Complete & reset ID present + name: "existing object replication enabled - reset in progress for an object in Completed status", + info: ObjectInfo{ + Size: 100, + ReplicationStatusInternal: "arn1:COMPLETED;", + ReplicationStatus: replication.Completed, + VersionID: "a3348c34-c352-4498-82f0-1098e8b34df8", + UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;abc", UTCNow().AddDate(0, -1, 0).String())}, + }, + expectedSync: true, + dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, + rcfg: replicationConfig{ + remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ + Arn: "arn1", + ResetID: "xyz", + ResetBeforeDate: UTCNow(), + }}}, + }, }, - expectedSync: true, - dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, - rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ - Arn: "arn1", - ResetID: "xyz", - ResetBeforeDate: UTCNow(), - }}}, + { // 9. existing object replication enabled, versioning enabled, replication status Pending & reset ID different + name: "existing object replication enabled, newer reset in progress on object in Pending replication status", + info: ObjectInfo{ + Size: 100, + ReplicationStatusInternal: "arn1:PENDING;", + + ReplicationStatus: replication.Pending, + VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", + UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", UTCNow().AddDate(0, 0, -1).Format(http.TimeFormat), "abc")}, + ModTime: UTCNow().AddDate(0, 0, -2), + }, + expectedSync: true, + dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, + rcfg: replicationConfig{ + remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ + Arn: "arn1", + ResetID: "xyz", + ResetBeforeDate: UTCNow(), + }}}, + }, }, - }, - { // 10. existing object replication enabled, versioning enabled, replication status Complete & reset done - name: "reset done on object in Completed Status - ineligbile for re-replication", - info: ObjectInfo{Size: 100, - ReplicationStatusInternal: "arn1:COMPLETED;", - ReplicationStatus: replication.Completed, - VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", - UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", start.Format(http.TimeFormat), "xyz")}, + { // 10. existing object replication enabled, versioning enabled, replication status Complete & reset done + name: "reset done on object in Completed Status - ineligbile for re-replication", + info: ObjectInfo{ + Size: 100, + ReplicationStatusInternal: "arn1:COMPLETED;", + ReplicationStatus: replication.Completed, + VersionID: "a3348c34-c352-4498-82f0-1098e8b34df9", + UserDefined: map[string]string{xhttp.MinIOReplicationResetStatus: fmt.Sprintf("%s;%s", start.Format(http.TimeFormat), "xyz")}, + }, + expectedSync: false, + dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, + rcfg: replicationConfig{ + remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ + Arn: "arn1", + ResetID: "xyz", + ResetBeforeDate: start, + }}}, + }, }, - expectedSync: false, - dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, - rcfg: replicationConfig{remotes: &madmin.BucketTargets{Targets: []madmin.BucketTarget{{ - Arn: "arn1", - ResetID: "xyz", - ResetBeforeDate: start, - }}}, - }, - }, -} + } +) func TestReplicationResyncwrapper(t *testing.T) { for i, test := range replicationConfigTests2 { diff --git a/cmd/bucket-targets.go b/cmd/bucket-targets.go index 5e38c9ea1..ade109c9b 100644 --- a/cmd/bucket-targets.go +++ b/cmd/bucket-targets.go @@ -414,7 +414,7 @@ func parseBucketTargetConfig(bucket string, cdata, cmetadata []byte) (*madmin.Bu return nil, nil } data = cdata - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if len(cmetadata) != 0 { if err := json.Unmarshal(cmetadata, &meta); err != nil { return nil, err diff --git a/cmd/bucket-versioning-handler.go b/cmd/bucket-versioning-handler.go index d3e74d4b2..49e9d815d 100644 --- a/cmd/bucket-versioning-handler.go +++ b/cmd/bucket-versioning-handler.go @@ -144,5 +144,4 @@ func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r // Write bucket versioning configuration to client writeSuccessResponseXML(w, configData) - } diff --git a/cmd/common-main.go b/cmd/common-main.go index d2225d179..ec71f067d 100644 --- a/cmd/common-main.go +++ b/cmd/common-main.go @@ -68,8 +68,11 @@ import ( // serverDebugLog will enable debug printing var serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn -var shardDiskTimeDelta time.Duration -var defaultAWSCredProvider []credentials.Provider + +var ( + shardDiskTimeDelta time.Duration + defaultAWSCredProvider []credentials.Provider +) func init() { if runtime.GOOS == "windows" { @@ -362,7 +365,6 @@ func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() s } func handleCommonCmdArgs(ctx *cli.Context) { - // Get "json" flag from command line argument and // enable json and quite modes if json flag is turned on. globalCLIContext.JSON = ctx.IsSet("json") || ctx.GlobalIsSet("json") @@ -669,7 +671,7 @@ func handleCommonEnvVars() { publicIPs := env.Get(config.EnvPublicIPs, "") if len(publicIPs) != 0 { minioEndpoints := strings.Split(publicIPs, config.ValueSeparator) - var domainIPs = set.NewStringSet() + domainIPs := set.NewStringSet() for _, endpoint := range minioEndpoints { if net.ParseIP(endpoint) == nil { // Checking if the IP is a DNS entry. @@ -786,7 +788,7 @@ func handleCommonEnvVars() { logger.Fatal(err, fmt.Sprintf("Unable to load X.509 root CAs for KES from %q", env.Get(config.EnvKESServerCA, globalCertsCADir.Get()))) } - var defaultKeyID = env.Get(config.EnvKESKeyName, "") + defaultKeyID := env.Get(config.EnvKESKeyName, "") KMS, err := kms.NewWithConfig(kms.Config{ Endpoints: endpoints, DefaultKeyID: defaultKeyID, diff --git a/cmd/common-main_test.go b/cmd/common-main_test.go index 49bd5e0ae..f7abdfb9a 100644 --- a/cmd/common-main_test.go +++ b/cmd/common-main_test.go @@ -73,7 +73,8 @@ func Test_minioEnvironFromFile(t *testing.T) { expectedErr bool expectedEkvs []envKV }{ - {` + { + ` export MINIO_ROOT_USER=minio export MINIO_ROOT_PASSWORD=minio123`, false, @@ -89,7 +90,8 @@ export MINIO_ROOT_PASSWORD=minio123`, }, }, // Value with double quotes - {`export MINIO_ROOT_USER="minio"`, + { + `export MINIO_ROOT_USER="minio"`, false, []envKV{ { @@ -99,7 +101,8 @@ export MINIO_ROOT_PASSWORD=minio123`, }, }, // Value with single quotes - {`export MINIO_ROOT_USER='minio'`, + { + `export MINIO_ROOT_USER='minio'`, false, []envKV{ { @@ -108,7 +111,8 @@ export MINIO_ROOT_PASSWORD=minio123`, }, }, }, - {` + { + ` MINIO_ROOT_USER=minio MINIO_ROOT_PASSWORD=minio123`, false, @@ -123,7 +127,8 @@ MINIO_ROOT_PASSWORD=minio123`, }, }, }, - {` + { + ` export MINIO_ROOT_USERminio export MINIO_ROOT_PASSWORD=minio123`, true, diff --git a/cmd/config-current.go b/cmd/config-current.go index 0e4176294..0790b22db 100644 --- a/cmd/config-current.go +++ b/cmd/config-current.go @@ -50,7 +50,7 @@ import ( ) func initHelp() { - var kvs = map[string]config.KVS{ + kvs := map[string]config.KVS{ config.EtcdSubSys: etcd.DefaultKVS, config.CacheSubSys: cache.DefaultKVS, config.CompressionSubSys: compress.DefaultKVS, @@ -78,7 +78,7 @@ func initHelp() { config.RegisterDefaultKVS(kvs) // Captures help for each sub-system - var helpSubSys = config.HelpKVS{ + helpSubSys := config.HelpKVS{ config.HelpKV{ Key: config.SiteSubSys, Description: "label the server and its location", @@ -205,7 +205,7 @@ func initHelp() { } } - var helpMap = map[string]config.HelpKVS{ + helpMap := map[string]config.HelpKVS{ "": helpSubSys, // Help for all sub-systems. config.SiteSubSys: config.SiteHelp, config.RegionSubSys: config.RegionHelp, diff --git a/cmd/config-dir.go b/cmd/config-dir.go index df3ab9d13..b03b175b8 100644 --- a/cmd/config-dir.go +++ b/cmd/config-dir.go @@ -84,7 +84,7 @@ func (dir *ConfigDir) Get() string { // Attempts to create all directories, ignores any permission denied errors. func mkdirAllIgnorePerm(path string) error { - err := os.MkdirAll(path, 0700) + err := os.MkdirAll(path, 0o700) if err != nil { // It is possible in kubernetes like deployments this directory // is already mounted and is not writable, ignore any write errors. diff --git a/cmd/config-migrate.go b/cmd/config-migrate.go index 500392168..68d51bf3d 100644 --- a/cmd/config-migrate.go +++ b/cmd/config-migrate.go @@ -2445,12 +2445,12 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) { return err } // if errConfigNotFound proceed to migrate.. - var configFiles = []string{ + configFiles := []string{ getConfigFile(), getConfigFile() + ".deprecated", configFile, } - var config = &serverConfigV27{} + config := &serverConfigV27{} for _, cfgFile := range configFiles { if _, err = Load(cfgFile, config); err != nil { if !osIsNotExist(err) && !osIsPermission(err) { diff --git a/cmd/config-migrate_test.go b/cmd/config-migrate_test.go index 70ecb2b4b..0132c198d 100644 --- a/cmd/config-migrate_test.go +++ b/cmd/config-migrate_test.go @@ -51,7 +51,7 @@ func TestServerConfigMigrateV1(t *testing.T) { // Create a V1 config json file and store it configJSON := "{ \"version\":\"1\", \"accessKeyId\":\"abcde\", \"secretAccessKey\":\"abcdefgh\"}" configPath := rootPath + "/fsUsers.json" - if err := ioutil.WriteFile(configPath, []byte(configJSON), 0644); err != nil { + if err := ioutil.WriteFile(configPath, []byte(configJSON), 0o644); err != nil { t.Fatal("Unexpected error: ", err) } @@ -181,7 +181,7 @@ func TestServerConfigMigrateV2toV33(t *testing.T) { configPath := rootPath + SlashSeparator + minioConfigFile // Create a corrupted config file - if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0644); err != nil { + if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0o644); err != nil { t.Fatal("Unexpected error: ", err) } // Fire a migrateConfig() @@ -194,7 +194,7 @@ func TestServerConfigMigrateV2toV33(t *testing.T) { // Create a V2 config json file and store it configJSON := "{ \"version\":\"2\", \"credentials\": {\"accessKeyId\":\"" + accessKey + "\", \"secretAccessKey\":\"" + secretKey + "\", \"region\":\"us-east-1\"}, \"mongoLogger\":{\"addr\":\"127.0.0.1:3543\", \"db\":\"foodb\", \"collection\":\"foo\"}, \"syslogLogger\":{\"network\":\"127.0.0.1:543\", \"addr\":\"addr\"}, \"fileLogger\":{\"filename\":\"log.out\"}}" - if err := ioutil.WriteFile(configPath, []byte(configJSON), 0644); err != nil { + if err := ioutil.WriteFile(configPath, []byte(configJSON), 0o644); err != nil { t.Fatal("Unexpected error: ", err) } @@ -244,7 +244,7 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) { configPath := rootPath + SlashSeparator + minioConfigFile // Create a corrupted config file - if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0644); err != nil { + if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0o644); err != nil { t.Fatal("Unexpected error: ", err) } @@ -343,7 +343,7 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) { for i := 3; i <= 17; i++ { // Create a corrupted config file if err = ioutil.WriteFile(configPath, []byte(fmt.Sprintf("{ \"version\":\"%d\", \"credential\": { \"accessKey\": 1 } }", i)), - 0644); err != nil { + 0o644); err != nil { t.Fatal("Unexpected error: ", err) } @@ -354,7 +354,7 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) { } // Create a corrupted config file for version '2'. - if err = ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"credentials\": { \"accessKeyId\": 1 } }"), 0644); err != nil { + if err = ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"credentials\": { \"accessKeyId\": 1 } }"), 0o644); err != nil { t.Fatal("Unexpected error: ", err) } diff --git a/cmd/config.go b/cmd/config.go index b1f76d642..2b15eb518 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -46,8 +46,8 @@ const ( ) func listServerConfigHistory(ctx context.Context, objAPI ObjectLayer, withData bool, count int) ( - []madmin.ConfigHistoryEntry, error) { - + []madmin.ConfigHistoryEntry, error, +) { var configHistory []madmin.ConfigHistoryEntry // List all kvs @@ -140,7 +140,7 @@ func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg interface{}) return err } - var configFile = path.Join(minioConfigPrefix, minioConfigFile) + configFile := path.Join(minioConfigPrefix, minioConfigFile) if GlobalKMS != nil { data, err = config.EncryptBytes(GlobalKMS, data, kms.Context{ minioMetaBucket: path.Join(minioMetaBucket, configFile), @@ -153,7 +153,7 @@ func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg interface{}) } func readServerConfig(ctx context.Context, objAPI ObjectLayer) (config.Config, error) { - var srvCfg = config.New() + srvCfg := config.New() configFile := path.Join(minioConfigPrefix, minioConfigFile) data, err := readConfig(ctx, objAPI, configFile) if err != nil { @@ -174,7 +174,7 @@ func readServerConfig(ctx context.Context, objAPI ObjectLayer) (config.Config, e } } - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err = json.Unmarshal(data, &srvCfg); err != nil { return nil, err } diff --git a/cmd/data-scanner.go b/cmd/data-scanner.go index 2857994e4..e1613dae2 100644 --- a/cmd/data-scanner.go +++ b/cmd/data-scanner.go @@ -277,7 +277,6 @@ func scanDataFolder(ctx context.Context, basePath string, cache dataUsageCache, defer func() { console.Debugf(logPrefix+" Scanner time: %v %s\n", time.Since(t), logSuffix) }() - } switch cache.Info.Name { @@ -875,8 +874,10 @@ func (i *scannerItem) transformMetaDir() { i.objectName = split[len(split)-1] } -var applyActionsLogPrefix = color.Green("applyActions:") -var applyVersionActionsLogPrefix = color.Green("applyVersionActions:") +var ( + applyActionsLogPrefix = color.Green("applyActions:") + applyVersionActionsLogPrefix = color.Green("applyVersionActions:") +) func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, oi ObjectInfo) (size int64) { if i.debug { @@ -979,7 +980,6 @@ func (i *scannerItem) applyTierObjSweep(ctx context.Context, o ObjectLayer, oi O if ignoreNotFoundErr(err) != nil { logger.LogIf(ctx, err) } - } // applyNewerNoncurrentVersionLimit removes noncurrent versions older than the most recent NewerNoncurrentVersions configured. @@ -1100,7 +1100,6 @@ func applyTransitionRule(obj ObjectInfo) bool { } globalTransitionState.queueTransitionTask(obj) return true - } func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, restoredObject bool) bool { diff --git a/cmd/data-update-tracker_test.go b/cmd/data-update-tracker_test.go index e4b5a032e..70d05fd15 100644 --- a/cmd/data-update-tracker_test.go +++ b/cmd/data-update-tracker_test.go @@ -105,7 +105,7 @@ func TestDataUpdateTracker(t *testing.T) { defer cancel() dut.start(ctx, tmpDir) - var tests = []struct { + tests := []struct { in string check []string // if not empty, check against these instead. exist bool diff --git a/cmd/data-usage-cache.go b/cmd/data-usage-cache.go index 8aceb316d..0776f6623 100644 --- a/cmd/data-usage-cache.go +++ b/cmd/data-usage-cache.go @@ -624,7 +624,7 @@ func (d *dataUsageCache) reduceChildrenOf(path dataUsageHash, limit int, compact // Appears to be printed with _MINIO_SERVER_DEBUG=off // console.Debugf(" %d children found, compacting %v\n", total, path) - var leaves = make([]struct { + leaves := make([]struct { objects uint64 path dataUsageHash }, total) @@ -774,7 +774,7 @@ func (d *dataUsageCache) tiersUsageInfo(buckets []BucketInfo) *allTierStats { // bucketsUsageInfo returns the buckets usage info as a map, with // key as bucket name func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]BucketUsageInfo { - var dst = make(map[string]BucketUsageInfo, len(buckets)) + dst := make(map[string]BucketUsageInfo, len(buckets)) for _, bucket := range buckets { e := d.find(bucket.Name) if e == nil { @@ -797,7 +797,6 @@ func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]Bucke ReplicationPendingCount: stat.PendingCount, ReplicationFailedCount: stat.FailedCount, } - } } dst[bucket.Name] = bui diff --git a/cmd/data-usage.go b/cmd/data-usage.go index 48b64302e..d3a834f8c 100644 --- a/cmd/data-usage.go +++ b/cmd/data-usage.go @@ -40,7 +40,7 @@ const ( // storeDataUsageInBackend will store all objects sent on the gui channel until closed. func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan DataUsageInfo) { for dataUsageInfo := range dui { - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary dataUsageJSON, err := json.Marshal(dataUsageInfo) if err != nil { logger.LogIf(ctx, err) @@ -105,7 +105,7 @@ func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsag defer r.Close() var dataUsageInfo DataUsageInfo - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err = json.NewDecoder(r).Decode(&dataUsageInfo); err != nil { return DataUsageInfo{}, err } diff --git a/cmd/data-usage_test.go b/cmd/data-usage_test.go index 843b9c6e6..573eea69f 100644 --- a/cmd/data-usage_test.go +++ b/cmd/data-usage_test.go @@ -41,7 +41,7 @@ func TestDataUsageUpdate(t *testing.T) { } const bucket = "bucket" defer os.RemoveAll(base) - var files = []usageTestFile{ + files := []usageTestFile{ {name: "rootfile", size: 10000}, {name: "rootfile2", size: 10000}, {name: "dir1/d1file", size: 2000}, @@ -73,7 +73,7 @@ func TestDataUsageUpdate(t *testing.T) { } // Test dirs - var want = []struct { + want := []struct { path string isNil bool size, objs int @@ -257,7 +257,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) { } scannerSleeper.Update(0, 0) defer os.RemoveAll(base) - var files = []usageTestFile{ + files := []usageTestFile{ {name: "bucket/rootfile", size: 10000}, {name: "bucket/rootfile2", size: 10000}, {name: "bucket/dir1/d1file", size: 2000}, @@ -302,7 +302,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) { } // Test dirs - var want = []struct { + want := []struct { path string isNil bool size, objs int @@ -543,7 +543,7 @@ func TestDataUsageCacheSerialize(t *testing.T) { } const bucket = "abucket" defer os.RemoveAll(base) - var files = []usageTestFile{ + files := []usageTestFile{ {name: "rootfile", size: 10000}, {name: "rootfile2", size: 10000}, {name: "dir1/d1file", size: 2000}, diff --git a/cmd/disk-cache-backend.go b/cmd/disk-cache-backend.go index c36b93985..7cdba615a 100644 --- a/cmd/disk-cache-backend.go +++ b/cmd/disk-cache-backend.go @@ -190,7 +190,7 @@ func newDiskCache(ctx context.Context, dir string, config cache.Config) (*diskCa quotaPct = config.Quota } - if err := os.MkdirAll(dir, 0777); err != nil { + if err := os.MkdirAll(dir, 0o777); err != nil { return nil, fmt.Errorf("Unable to initialize '%s' dir, %w", dir, err) } cache := diskCache{ @@ -619,10 +619,10 @@ func (c *diskCache) saveMetadata(ctx context.Context, bucket, object string, met cachedPath := getCacheSHADir(c.dir, bucket, object) metaPath := pathJoin(cachedPath, cacheMetaJSONFile) // Create cache directory if needed - if err := os.MkdirAll(cachedPath, 0777); err != nil { + if err := os.MkdirAll(cachedPath, 0o777); err != nil { return err } - f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0666) + f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0o666) if err != nil { return err } @@ -682,10 +682,10 @@ func (c *diskCache) updateMetadata(ctx context.Context, bucket, object, etag str cachedPath := getCacheSHADir(c.dir, bucket, object) metaPath := pathJoin(cachedPath, cacheMetaJSONFile) // Create cache directory if needed - if err := os.MkdirAll(cachedPath, 0777); err != nil { + if err := os.MkdirAll(cachedPath, 0o777); err != nil { return err } - f, err := os.OpenFile(metaPath, os.O_RDWR, 0666) + f, err := os.OpenFile(metaPath, os.O_RDWR, 0o666) if err != nil { return err } @@ -732,7 +732,7 @@ func getCacheWriteBackSHADir(dir, bucket, object string) string { // Cache data to disk with bitrot checksum added for each block of 1MB func (c *diskCache) bitrotWriteToCache(cachePath, fileName string, reader io.Reader, size uint64) (int64, string, error) { - if err := os.MkdirAll(cachePath, 0777); err != nil { + if err := os.MkdirAll(cachePath, 0o777); err != nil { return 0, "", err } filePath := pathJoin(cachePath, fileName) @@ -807,6 +807,7 @@ func newCacheEncryptReader(content io.Reader, bucket, object string, metadata ma } return reader, nil } + func newCacheEncryptMetadata(bucket, object string, metadata map[string]string) ([]byte, error) { var sealedKey crypto.SealedKey if globalCacheKMS == nil { @@ -827,6 +828,7 @@ func newCacheEncryptMetadata(bucket, object string, metadata map[string]string) metadata[SSECacheEncrypted] = "" return objectKey[:], nil } + func (c *diskCache) GetLockContext(ctx context.Context, bucket, object string) (RWLocker, LockContext, error) { cachePath := getCacheSHADir(c.dir, bucket, object) cLock := c.NewNSLockFn(cachePath) @@ -879,12 +881,12 @@ func (c *diskCache) put(ctx context.Context, bucket, object string, data io.Read cachePath = getCacheWriteBackSHADir(c.dir, bucket, object) } - if err := os.MkdirAll(cachePath, 0777); err != nil { + if err := os.MkdirAll(cachePath, 0o777); err != nil { return oi, err } - var metadata = cloneMSS(opts.UserDefined) - var reader = data - var actualSize = uint64(size) + metadata := cloneMSS(opts.UserDefined) + reader := data + actualSize := uint64(size) if globalCacheKMS != nil { reader, err = newCacheEncryptReader(data, bucket, object, metadata) if err != nil { @@ -933,14 +935,14 @@ func (c *diskCache) putRange(ctx context.Context, bucket, object string, data io return errDiskFull } cachePath := getCacheSHADir(c.dir, bucket, object) - if err := os.MkdirAll(cachePath, 0777); err != nil { + if err := os.MkdirAll(cachePath, 0o777); err != nil { return err } - var metadata = cloneMSS(opts.UserDefined) - var reader = data - var actualSize = uint64(rlen) + metadata := cloneMSS(opts.UserDefined) + reader := data + actualSize := uint64(rlen) // objSize is the actual size of object (with encryption overhead if any) - var objSize = uint64(size) + objSize := uint64(size) if globalCacheKMS != nil { reader, err = newCacheEncryptReader(data, bucket, object, metadata) if err != nil { @@ -1269,12 +1271,12 @@ func (c *diskCache) NewMultipartUpload(ctx context.Context, bucket, object, uID cachePath := getMultipartCacheSHADir(c.dir, bucket, object) uploadIDDir := path.Join(cachePath, uploadID) - if err := os.MkdirAll(uploadIDDir, 0777); err != nil { + if err := os.MkdirAll(uploadIDDir, 0o777); err != nil { return uploadID, err } metaPath := pathJoin(uploadIDDir, cacheMetaJSONFile) - f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0666) + f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0o666) if err != nil { return uploadID, err } @@ -1331,7 +1333,7 @@ func (c *diskCache) PutObjectPart(ctx context.Context, bucket, object, uploadID return oi, errDiskFull } reader := data - var actualSize = uint64(size) + actualSize := uint64(size) if globalCacheKMS != nil { reader, err = newCachePartEncryptReader(ctx, bucket, object, partID, data, size, meta.Meta) if err != nil { @@ -1380,7 +1382,7 @@ func (c *diskCache) SavePartMetadata(ctx context.Context, bucket, object, upload defer uploadLock.Unlock(ulkctx.Cancel) metaPath := pathJoin(uploadDir, cacheMetaJSONFile) - f, err := os.OpenFile(metaPath, os.O_RDWR, 0666) + f, err := os.OpenFile(metaPath, os.O_RDWR, 0o666) if err != nil { return err } @@ -1558,7 +1560,7 @@ func (c *diskCache) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadMeta.Hits++ metaPath := pathJoin(uploadIDDir, cacheMetaJSONFile) - f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0666) + f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0o666) if err != nil { return oi, err } diff --git a/cmd/disk-cache-utils.go b/cmd/disk-cache-utils.go index 7cd0989c7..6e600e43c 100644 --- a/cmd/disk-cache-utils.go +++ b/cmd/disk-cache-utils.go @@ -113,7 +113,6 @@ func cacheControlOpts(o ObjectInfo) *cacheControl { if strings.EqualFold(k, "cache-control") { headerVal = v } - } if headerVal == "" { return nil @@ -581,6 +580,7 @@ func (t *multiWriter) Write(p []byte) (n int, err error) { } return len(p), nil } + func cacheMultiWriter(w1 io.Writer, w2 *io.PipeWriter) io.Writer { return &multiWriter{backendWriter: w1, cacheWriter: w2} } diff --git a/cmd/disk-cache-utils_test.go b/cmd/disk-cache-utils_test.go index e5643ef26..eea70a6aa 100644 --- a/cmd/disk-cache-utils_test.go +++ b/cmd/disk-cache-utils_test.go @@ -38,7 +38,7 @@ func TestGetCacheControlOpts(t *testing.T) { {"max-age=2592000, no-store", timeSentinel, &cacheControl{maxAge: 2592000, sMaxAge: 0, noStore: true, minFresh: 0, expiry: time.Time{}}, false}, {"must-revalidate, max-age=600", timeSentinel, &cacheControl{maxAge: 600, sMaxAge: 0, minFresh: 0, expiry: time.Time{}}, false}, {"s-maxAge=2500, max-age=600", timeSentinel, &cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Time{}}, false}, - {"s-maxAge=2500, max-age=600", expiry, &cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Date(2015, time.October, 21, 07, 28, 00, 00, time.UTC)}, false}, + {"s-maxAge=2500, max-age=600", expiry, &cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Date(2015, time.October, 21, 0o7, 28, 0o0, 0o0, time.UTC)}, false}, {"s-maxAge=2500, max-age=600s", timeSentinel, &cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Time{}}, true}, } @@ -61,7 +61,6 @@ func TestGetCacheControlOpts(t *testing.T) { } func TestIsMetadataSame(t *testing.T) { - testCases := []struct { m1 map[string]string m2 map[string]string @@ -148,6 +147,7 @@ func TestNewFileScorer(t *testing.T) { t.Fatal("unexpected file list", fs.queueString()) } } + func TestBytesToClear(t *testing.T) { testCases := []struct { total int64 diff --git a/cmd/disk-cache.go b/cmd/disk-cache.go index 1e52e0001..50b1fd059 100644 --- a/cmd/disk-cache.go +++ b/cmd/disk-cache.go @@ -137,7 +137,6 @@ func (c *cacheObjects) incHitsToMeta(ctx context.Context, dcache *diskCache, buc // Backend metadata could have changed through server side copy - reset cache metadata if that is the case func (c *cacheObjects) updateMetadataIfChanged(ctx context.Context, dcache *diskCache, bucket, object string, bkObjectInfo, cacheObjInfo ObjectInfo, rs *HTTPRangeSpec) error { - bkMeta := make(map[string]string, len(bkObjectInfo.UserDefined)) cacheMeta := make(map[string]string, len(cacheObjInfo.UserDefined)) for k, v := range bkObjectInfo.UserDefined { diff --git a/cmd/dynamic-timeouts_test.go b/cmd/dynamic-timeouts_test.go index 1b4388e4c..1cb5e16ae 100644 --- a/cmd/dynamic-timeouts_test.go +++ b/cmd/dynamic-timeouts_test.go @@ -26,7 +26,6 @@ import ( ) func TestDynamicTimeoutSingleIncrease(t *testing.T) { - timeout := newDynamicTimeout(time.Minute, time.Second) initial := timeout.Timeout() @@ -43,7 +42,6 @@ func TestDynamicTimeoutSingleIncrease(t *testing.T) { } func TestDynamicTimeoutDualIncrease(t *testing.T) { - timeout := newDynamicTimeout(time.Minute, time.Second) initial := timeout.Timeout() @@ -66,7 +64,6 @@ func TestDynamicTimeoutDualIncrease(t *testing.T) { } func TestDynamicTimeoutSingleDecrease(t *testing.T) { - timeout := newDynamicTimeout(time.Minute, time.Second) initial := timeout.Timeout() @@ -83,7 +80,6 @@ func TestDynamicTimeoutSingleDecrease(t *testing.T) { } func TestDynamicTimeoutDualDecrease(t *testing.T) { - timeout := newDynamicTimeout(time.Minute, time.Second) initial := timeout.Timeout() @@ -106,7 +102,6 @@ func TestDynamicTimeoutDualDecrease(t *testing.T) { } func TestDynamicTimeoutManyDecreases(t *testing.T) { - timeout := newDynamicTimeout(time.Minute, time.Second) initial := timeout.Timeout() @@ -116,7 +111,6 @@ func TestDynamicTimeoutManyDecreases(t *testing.T) { for i := 0; i < dynamicTimeoutLogSize; i++ { timeout.LogSuccess(successTimeout) } - } adjusted := timeout.Timeout() @@ -151,7 +145,6 @@ func TestDynamicTimeoutConcurrent(t *testing.T) { } func TestDynamicTimeoutHitMinimum(t *testing.T) { - const minimum = 30 * time.Second timeout := newDynamicTimeout(time.Minute, minimum) @@ -172,7 +165,6 @@ func TestDynamicTimeoutHitMinimum(t *testing.T) { } func testDynamicTimeoutAdjust(t *testing.T, timeout *dynamicTimeout, f func() float64) { - const successTimeout = 20 * time.Second for i := 0; i < dynamicTimeoutLogSize; i++ { @@ -192,7 +184,6 @@ func testDynamicTimeoutAdjust(t *testing.T, timeout *dynamicTimeout, f func() fl } func TestDynamicTimeoutAdjustExponential(t *testing.T) { - timeout := newDynamicTimeout(time.Minute, time.Second) rand.Seed(0) @@ -200,9 +191,7 @@ func TestDynamicTimeoutAdjustExponential(t *testing.T) { initial := timeout.Timeout() for try := 0; try < 10; try++ { - testDynamicTimeoutAdjust(t, timeout, rand.ExpFloat64) - } adjusted := timeout.Timeout() @@ -212,7 +201,6 @@ func TestDynamicTimeoutAdjustExponential(t *testing.T) { } func TestDynamicTimeoutAdjustNormalized(t *testing.T) { - timeout := newDynamicTimeout(time.Minute, time.Second) rand.Seed(0) @@ -220,11 +208,9 @@ func TestDynamicTimeoutAdjustNormalized(t *testing.T) { initial := timeout.Timeout() for try := 0; try < 10; try++ { - testDynamicTimeoutAdjust(t, timeout, func() float64 { return 1.0 + rand.NormFloat64() }) - } adjusted := timeout.Timeout() diff --git a/cmd/encryption-v1.go b/cmd/encryption-v1.go index f2b4d1a9b..70a5f25dd 100644 --- a/cmd/encryption-v1.go +++ b/cmd/encryption-v1.go @@ -186,7 +186,7 @@ func rotateKey(oldKey []byte, newKeyID string, newKey []byte, bucket, object str // client provided it. Therefore, we create a copy // of the client provided context and add the bucket // key, if not present. - var kmsCtx = kms.Context{} + kmsCtx := kms.Context{} for k, v := range ctx { kmsCtx[k] = v } @@ -253,7 +253,7 @@ func newEncryptMetadata(kind crypto.Type, keyID string, key []byte, bucket, obje // client provided it. Therefore, we create a copy // of the client provided context and add the bucket // key, if not present. - var kmsCtx = kms.Context{} + kmsCtx := kms.Context{} for k, v := range ctx { kmsCtx[k] = v } @@ -443,7 +443,6 @@ func newDecryptReaderWithObjectKey(client io.Reader, objectEncryptionKey []byte, // DecryptBlocksRequestR - same as DecryptBlocksRequest but with a // reader func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, seqNumber uint32, partStart int, oi ObjectInfo, copySource bool) (io.Reader, error) { - bucket, object := oi.Bucket, oi.Name // Single part case if !oi.isMultipart() { diff --git a/cmd/encryption-v1_test.go b/cmd/encryption-v1_test.go index d1195ecad..97d66f01b 100644 --- a/cmd/encryption-v1_test.go +++ b/cmd/encryption-v1_test.go @@ -64,7 +64,6 @@ func TestEncryptRequest(t *testing.T) { req.Header.Set(k, v) } _, _, err := EncryptRequest(content, req, "bucket", "object", test.metadata) - if err != nil { t.Fatalf("Test %d: Failed to encrypt request: %v", i, err) } @@ -285,14 +284,13 @@ func TestGetDecryptedRange(t *testing.T) { ) // Single part object tests - var ( - mkSPObj = func(s int64) ObjectInfo { - return ObjectInfo{ - Size: getEncSize(s), - UserDefined: udMap(false), - } + + mkSPObj := func(s int64) ObjectInfo { + return ObjectInfo{ + Size: getEncSize(s), + UserDefined: udMap(false), } - ) + } testSP := []struct { decSz int64 @@ -325,7 +323,7 @@ func TestGetDecryptedRange(t *testing.T) { if err != nil { t.Errorf("Case %d: unexpected err: %v", i, err) } - var rLen = pkgSz + 32 + rLen := pkgSz + 32 if test.decSz < pkgSz { rLen = test.decSz + 32 } @@ -341,7 +339,7 @@ func TestGetDecryptedRange(t *testing.T) { if err != nil { t.Errorf("Case %d: unexpected err: %v", i, err) } - var rLen = (pkgSz + 32) * 2 + rLen := (pkgSz + 32) * 2 if test.decSz < 2*pkgSz { rLen = (pkgSz + 32) + (test.decSz - pkgSz + 32) } @@ -356,7 +354,7 @@ func TestGetDecryptedRange(t *testing.T) { if err != nil { t.Errorf("Case %d: unexpected err: %v", i, err) } - var rLen = (pkgSz + 32) * 2 + rLen := (pkgSz + 32) * 2 if test.decSz-pkgSz < 2*pkgSz { rLen = (pkgSz + 32) + (test.decSz - pkgSz + 32*2) } @@ -551,60 +549,90 @@ var getDefaultOptsTests = []struct { encryptionType encrypt.Type err error }{ - {headers: http.Header{xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{"AES256"}, - xhttp.AmzServerSideEncryptionCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}, - xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}}, + { + headers: http.Header{ + xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{"AES256"}, + xhttp.AmzServerSideEncryptionCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}, + xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}, + }, copySource: false, metadata: nil, encryptionType: encrypt.SSEC, - err: nil}, // 0 - {headers: http.Header{xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{"AES256"}, - xhttp.AmzServerSideEncryptionCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}, - xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}}, + err: nil, + }, // 0 + { + headers: http.Header{ + xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{"AES256"}, + xhttp.AmzServerSideEncryptionCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}, + xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}, + }, copySource: true, metadata: nil, encryptionType: "", - err: nil}, // 1 - {headers: http.Header{xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{"AES256"}, - xhttp.AmzServerSideEncryptionCustomerKey: []string{"Mz"}, - xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}}, + err: nil, + }, // 1 + { + headers: http.Header{ + xhttp.AmzServerSideEncryptionCustomerAlgorithm: []string{"AES256"}, + xhttp.AmzServerSideEncryptionCustomerKey: []string{"Mz"}, + xhttp.AmzServerSideEncryptionCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}, + }, copySource: false, metadata: nil, encryptionType: "", - err: crypto.ErrInvalidCustomerKey}, // 2 - {headers: http.Header{xhttp.AmzServerSideEncryption: []string{"AES256"}}, + err: crypto.ErrInvalidCustomerKey, + }, // 2 + { + headers: http.Header{xhttp.AmzServerSideEncryption: []string{"AES256"}}, copySource: false, metadata: nil, encryptionType: encrypt.S3, - err: nil}, // 3 - {headers: http.Header{}, + err: nil, + }, // 3 + { + headers: http.Header{}, copySource: false, - metadata: map[string]string{crypto.MetaSealedKeyS3: base64.StdEncoding.EncodeToString(make([]byte, 64)), + metadata: map[string]string{ + crypto.MetaSealedKeyS3: base64.StdEncoding.EncodeToString(make([]byte, 64)), crypto.MetaKeyID: "kms-key", - crypto.MetaDataEncryptionKey: "m-key"}, + crypto.MetaDataEncryptionKey: "m-key", + }, encryptionType: encrypt.S3, - err: nil}, // 4 - {headers: http.Header{}, + err: nil, + }, // 4 + { + headers: http.Header{}, copySource: true, - metadata: map[string]string{crypto.MetaSealedKeyS3: base64.StdEncoding.EncodeToString(make([]byte, 64)), + metadata: map[string]string{ + crypto.MetaSealedKeyS3: base64.StdEncoding.EncodeToString(make([]byte, 64)), crypto.MetaKeyID: "kms-key", - crypto.MetaDataEncryptionKey: "m-key"}, + crypto.MetaDataEncryptionKey: "m-key", + }, encryptionType: "", - err: nil}, // 5 - {headers: http.Header{xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm: []string{"AES256"}, - xhttp.AmzServerSideEncryptionCopyCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}, - xhttp.AmzServerSideEncryptionCopyCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}}, + err: nil, + }, // 5 + { + headers: http.Header{ + xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm: []string{"AES256"}, + xhttp.AmzServerSideEncryptionCopyCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}, + xhttp.AmzServerSideEncryptionCopyCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}, + }, copySource: true, metadata: nil, encryptionType: encrypt.SSEC, - err: nil}, // 6 - {headers: http.Header{xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm: []string{"AES256"}, - xhttp.AmzServerSideEncryptionCopyCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}, - xhttp.AmzServerSideEncryptionCopyCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}}, + err: nil, + }, // 6 + { + headers: http.Header{ + xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm: []string{"AES256"}, + xhttp.AmzServerSideEncryptionCopyCustomerKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="}, + xhttp.AmzServerSideEncryptionCopyCustomerKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}, + }, copySource: false, metadata: nil, encryptionType: "", - err: nil}, // 7 + err: nil, + }, // 7 } func TestGetDefaultOpts(t *testing.T) { diff --git a/cmd/endpoint-ellipses.go b/cmd/endpoint-ellipses.go index 9feb48a03..8e3398762 100644 --- a/cmd/endpoint-ellipses.go +++ b/cmd/endpoint-ellipses.go @@ -90,7 +90,7 @@ func commonSetDriveCount(divisibleSize uint64, setCounts []uint64) (setSize uint // input argument patterns, the symmetry calculation is to ensure that // we also use uniform number of drives common across all ellipses patterns. func possibleSetCountsWithSymmetry(setCounts []uint64, argPatterns []ellipses.ArgPattern) []uint64 { - var newSetCounts = make(map[uint64]struct{}) + newSetCounts := make(map[uint64]struct{}) for _, ss := range setCounts { var symmetry bool for _, argPattern := range argPatterns { @@ -224,7 +224,7 @@ func (s endpointSet) getEndpoints() (endpoints []string) { // this function also intelligently decides on what will // be the right set size etc. func (s endpointSet) Get() (sets [][]string) { - var k = uint64(0) + k := uint64(0) endpoints := s.getEndpoints() for i := range s.setIndexes { for j := range s.setIndexes[i] { @@ -253,7 +253,7 @@ func getTotalSizes(argPatterns []ellipses.ArgPattern) []uint64 { // of endpoints following the ellipses pattern, this is what is used // by the object layer for initializing itself. func parseEndpointSet(customSetDriveCount uint64, args ...string) (ep endpointSet, err error) { - var argPatterns = make([]ellipses.ArgPattern, len(args)) + argPatterns := make([]ellipses.ArgPattern, len(args)) for i, arg := range args { patterns, perr := ellipses.FindEllipsesPatterns(arg) if perr != nil { @@ -332,15 +332,13 @@ const ( EnvErasureSetDriveCount = "MINIO_ERASURE_SET_DRIVE_COUNT" ) -var ( - globalCustomErasureDriveCount = false -) +var globalCustomErasureDriveCount = false // CreateServerEndpoints - validates and creates new endpoints from input args, supports // both ellipses and without ellipses transparently. func createServerEndpoints(serverAddr string, args ...string) ( - endpointServerPools EndpointServerPools, setupType SetupType, err error) { - + endpointServerPools EndpointServerPools, setupType SetupType, err error, +) { if len(args) == 0 { return nil, -1, errInvalidArgument } diff --git a/cmd/endpoint-ellipses_test.go b/cmd/endpoint-ellipses_test.go index c4cc76f94..1cf317c09 100644 --- a/cmd/endpoint-ellipses_test.go +++ b/cmd/endpoint-ellipses_test.go @@ -72,7 +72,8 @@ func TestGetDivisibleSize(t *testing.T) { testCases := []struct { totalSizes []uint64 result uint64 - }{{[]uint64{24, 32, 16}, 8}, + }{ + {[]uint64{24, 32, 16}, 8}, {[]uint64{32, 8, 4}, 4}, {[]uint64{8, 8, 8}, 8}, {[]uint64{24}, 24}, @@ -168,7 +169,7 @@ func TestGetSetIndexesEnvOverride(t *testing.T) { for _, testCase := range testCases { testCase := testCase t.Run("", func(t *testing.T) { - var argPatterns = make([]ellipses.ArgPattern, len(testCase.args)) + argPatterns := make([]ellipses.ArgPattern, len(testCase.args)) for i, arg := range testCase.args { patterns, err := ellipses.FindEllipsesPatterns(arg) if err != nil { @@ -290,7 +291,7 @@ func TestGetSetIndexes(t *testing.T) { for _, testCase := range testCases { testCase := testCase t.Run("", func(t *testing.T) { - var argPatterns = make([]ellipses.ArgPattern, len(testCase.args)) + argPatterns := make([]ellipses.ArgPattern, len(testCase.args)) for i, arg := range testCase.args { patterns, err := ellipses.FindEllipsesPatterns(arg) if err != nil { @@ -552,8 +553,10 @@ func TestParseEndpointSet(t *testing.T) { }, }, nil, - [][]uint64{{16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16}}, + [][]uint64{{ + 16, 16, 16, 16, 16, 16, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, + }}, }, true, }, diff --git a/cmd/endpoint_test.go b/cmd/endpoint_test.go index 5c03529d7..073d9e41c 100644 --- a/cmd/endpoint_test.go +++ b/cmd/endpoint_test.go @@ -238,13 +238,18 @@ func TestCreateEndpoints(t *testing.T) { {"localhost:9000", [][]string{{"https://127.0.0.1:9000/d1", "https://localhost:9001/d1", "https://example.com/d1", "https://example.com/d2"}}, "", Endpoints{}, -1, fmt.Errorf("path '/d1' can not be served by different port on same address")}, // Erasure Setup with PathEndpointType - {":1234", [][]string{{"/d1", "/d2", "/d3", "/d4"}}, ":1234", + { + ":1234", + [][]string{{"/d1", "/d2", "/d3", "/d4"}}, + ":1234", Endpoints{ Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}, Endpoint{URL: &url.URL{Path: mustAbs("/d2")}, IsLocal: true}, Endpoint{URL: &url.URL{Path: mustAbs("/d3")}, IsLocal: true}, Endpoint{URL: &url.URL{Path: mustAbs("/d4")}, IsLocal: true}, - }, ErasureSetupType, nil}, + }, + ErasureSetupType, nil, + }, // DistErasure Setup with URLEndpointType {":9000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://localhost/d3", "http://localhost/d4"}}, ":9000", Endpoints{ Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d1"}, IsLocal: true}, @@ -350,12 +355,18 @@ func TestGetLocalPeer(t *testing.T) { expectedResult string }{ {[]string{"/d1", "/d2", "d3", "d4"}, "127.0.0.1:9000"}, - {[]string{"http://localhost:9000/d1", "http://localhost:9000/d2", "http://example.org:9000/d3", "http://example.com:9000/d4"}, - "localhost:9000"}, - {[]string{"http://localhost:9000/d1", "http://example.org:9000/d2", "http://example.com:9000/d3", "http://example.net:9000/d4"}, - "localhost:9000"}, - {[]string{"http://localhost:9000/d1", "http://localhost:9001/d2", "http://localhost:9002/d3", "http://localhost:9003/d4"}, - "localhost:9000"}, + { + []string{"http://localhost:9000/d1", "http://localhost:9000/d2", "http://example.org:9000/d3", "http://example.com:9000/d4"}, + "localhost:9000", + }, + { + []string{"http://localhost:9000/d1", "http://example.org:9000/d2", "http://example.com:9000/d3", "http://example.net:9000/d4"}, + "localhost:9000", + }, + { + []string{"http://localhost:9000/d1", "http://localhost:9001/d2", "http://localhost:9002/d3", "http://localhost:9003/d4"}, + "localhost:9000", + }, } for i, testCase := range testCases { diff --git a/cmd/erasure-bucket.go b/cmd/erasure-bucket.go index 5f4e654d6..763e2291e 100644 --- a/cmd/erasure-bucket.go +++ b/cmd/erasure-bucket.go @@ -91,7 +91,7 @@ func (er erasureObjects) getBucketInfo(ctx context.Context, bucketName string) ( storageDisks := er.getDisks() g := errgroup.WithNErrs(len(storageDisks)) - var bucketsInfo = make([]BucketInfo, len(storageDisks)) + bucketsInfo := make([]BucketInfo, len(storageDisks)) // Undo previous make bucket entry on all underlying storage disks. for index := range storageDisks { index := index diff --git a/cmd/erasure-coding.go b/cmd/erasure-coding.go index 145300d50..b52eba69b 100644 --- a/cmd/erasure-coding.go +++ b/cmd/erasure-coding.go @@ -94,7 +94,7 @@ func (e *Erasure) EncodeData(ctx context.Context, data []byte) ([][]byte, error) // It only decodes the data blocks but does not verify them. // It returns an error if the decoding failed. func (e *Erasure) DecodeDataBlocks(data [][]byte) error { - var isZero = 0 + isZero := 0 for _, b := range data { if len(b) == 0 { isZero++ diff --git a/cmd/erasure-common.go b/cmd/erasure-common.go index aeba29c45..575192674 100644 --- a/cmd/erasure-common.go +++ b/cmd/erasure-common.go @@ -50,7 +50,7 @@ func (er erasureObjects) getLoadBalancedDisks(optimized bool) []StorageAPI { var wg sync.WaitGroup var mu sync.Mutex - var newDisks = map[uint64][]StorageAPI{} + newDisks := map[uint64][]StorageAPI{} // Based on the random shuffling return back randomized disks. for _, i := range hashOrder(UTCNow().String(), len(disks)) { i := i diff --git a/cmd/erasure-healing-common.go b/cmd/erasure-healing-common.go index 4518aa877..79a878630 100644 --- a/cmd/erasure-healing-common.go +++ b/cmd/erasure-healing-common.go @@ -226,8 +226,8 @@ func getLatestFileInfo(ctx context.Context, partsMetadata []FileInfo, errs []err // a not-found error or a hash-mismatch error. func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetadata []FileInfo, errs []error, latestMeta FileInfo, bucket, object string, - scanMode madmin.HealScanMode) ([]StorageAPI, []error, time.Time) { - + scanMode madmin.HealScanMode) ([]StorageAPI, []error, time.Time, +) { var diskMTime time.Time var shardFix bool if !latestMeta.DataShardFixed() { diff --git a/cmd/erasure-healing.go b/cmd/erasure-healing.go index 4412772b0..20f26a826 100644 --- a/cmd/erasure-healing.go +++ b/cmd/erasure-healing.go @@ -811,8 +811,8 @@ func isObjectDirDangling(errs []error) (ok bool) { } func (er erasureObjects) purgeObjectDangling(ctx context.Context, bucket, object, versionID string, - metaArr []FileInfo, errs []error, dataErrs []error, opts madmin.HealOpts) (madmin.HealResultItem, error) { - + metaArr []FileInfo, errs []error, dataErrs []error, opts madmin.HealOpts) (madmin.HealResultItem, error, +) { storageDisks := er.getDisks() storageEndpoints := er.getEndpoints() // Check if the object is dangling, if yes and user requested diff --git a/cmd/erasure-metadata-utils_test.go b/cmd/erasure-metadata-utils_test.go index a64976481..640e9ff94 100644 --- a/cmd/erasure-metadata-utils_test.go +++ b/cmd/erasure-metadata-utils_test.go @@ -87,9 +87,13 @@ func TestReduceErrs(t *testing.T) { errDiskNotFound, }, []error{errDiskNotFound}, errVolumeNotFound}, {[]error{}, []error{}, errErasureReadQuorum}, - {[]error{errFileNotFound, errFileNotFound, errFileNotFound, - errFileNotFound, errFileNotFound, nil, nil, nil, nil, nil}, - nil, nil}, + { + []error{ + errFileNotFound, errFileNotFound, errFileNotFound, + errFileNotFound, errFileNotFound, nil, nil, nil, nil, nil, + }, + nil, nil, + }, // Checks if wrapped context cancelation errors are grouped as one. {canceledErrs, nil, context.Canceled}, } diff --git a/cmd/erasure-multipart.go b/cmd/erasure-multipart.go index c0702c988..d8c94592e 100644 --- a/cmd/erasure-multipart.go +++ b/cmd/erasure-multipart.go @@ -820,7 +820,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str onlineDisks, partsMetadata = shuffleDisksAndPartsMetadataByIndex(onlineDisks, partsMetadata, fi) // Save current erasure metadata for validation. - var currentFI = fi + currentFI := fi // Allocate parts similar to incoming slice. fi.Parts = make([]ObjectPartInfo, len(parts)) diff --git a/cmd/erasure-object.go b/cmd/erasure-object.go index 61cc5a623..5d2c8c5b0 100644 --- a/cmd/erasure-object.go +++ b/cmd/erasure-object.go @@ -145,7 +145,7 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d // Read(Closer). When err != nil, the returned reader is always nil. func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { var unlockOnDefer bool - var nsUnlocker = func() {} + nsUnlocker := func() {} defer func() { if unlockOnDefer { nsUnlocker() @@ -475,7 +475,6 @@ func (er erasureObjects) getObjectInfo(ctx context.Context, bucket, object strin fi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts, false) if err != nil { return objInfo, toObjectErr(err, bucket, object) - } objInfo = fi.ToObjectInfo(bucket, object) if opts.VersionID != "" && !fi.VersionPurgeStatus().Empty() { @@ -1177,7 +1176,7 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec } // Initialize list of errors. - var delObjErrs = make([][]error, len(storageDisks)) + delObjErrs := make([][]error, len(storageDisks)) var wg sync.WaitGroup // Remove versions in bulk for each disk @@ -1820,6 +1819,7 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s }) } _, err = er.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{ - MTime: oi.ModTime}) + MTime: oi.ModTime, + }) return setRestoreHeaderFn(oi, err) } diff --git a/cmd/erasure-object_test.go b/cmd/erasure-object_test.go index 29b6adc71..a55bc1b0e 100644 --- a/cmd/erasure-object_test.go +++ b/cmd/erasure-object_test.go @@ -505,7 +505,6 @@ func TestGetObjectNoQuorum(t *testing.T) { gr.Close() } } - } func TestHeadObjectNoQuorum(t *testing.T) { diff --git a/cmd/erasure-server-pool.go b/cmd/erasure-server-pool.go index e07ef194e..f1891822e 100644 --- a/cmd/erasure-server-pool.go +++ b/cmd/erasure-server-pool.go @@ -243,7 +243,7 @@ func (z *erasureServerPools) getAvailablePoolIdx(ctx context.Context, bucket, ob // If there is not enough space the pool will return 0 bytes available. // Negative sizes are seen as 0 bytes. func (z *erasureServerPools) getServerPoolsAvailableSpace(ctx context.Context, bucket, object string, size int64) serverPoolsAvailableSpace { - var serverPools = make(serverPoolsAvailableSpace, len(z.serverPools)) + serverPools := make(serverPoolsAvailableSpace, len(z.serverPools)) storageInfos := make([][]*DiskInfo, len(z.serverPools)) g := errgroup.WithNErrs(len(z.serverPools)) @@ -659,7 +659,6 @@ func (z *erasureServerPools) MakeBucketWithLocation(ctx context.Context, bucket // Success. return nil - } func (z *erasureServerPools) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { @@ -674,7 +673,7 @@ func (z *erasureServerPools) GetObjectNInfo(ctx context.Context, bucket, object } var unlockOnDefer bool - var nsUnlocker = func() {} + nsUnlocker := func() {} defer func() { if unlockOnDefer { nsUnlocker() @@ -1168,7 +1167,7 @@ func (z *erasureServerPools) ListMultipartUploads(ctx context.Context, bucket, p return z.serverPools[0].ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) } - var poolResult = ListMultipartsInfo{} + poolResult := ListMultipartsInfo{} poolResult.MaxUploads = maxUploads poolResult.KeyMarker = keyMarker poolResult.Prefix = prefix @@ -1287,7 +1286,6 @@ func (z *erasureServerPools) GetMultipartInfo(ctx context.Context, bucket, objec Object: object, UploadID: uploadID, } - } // ListObjectParts - lists all uploaded parts to an object in hashedSet. @@ -1529,7 +1527,7 @@ func (z *erasureServerPools) HealFormat(ctx context.Context, dryRun bool) (madmi ctx = lkctx.Context() defer formatLock.Unlock(lkctx.Cancel) - var r = madmin.HealResultItem{ + r := madmin.HealResultItem{ Type: madmin.HealItemMetadata, Detail: "disk-format", } @@ -1561,7 +1559,7 @@ func (z *erasureServerPools) HealFormat(ctx context.Context, dryRun bool) (madmi } func (z *erasureServerPools) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, error) { - var r = madmin.HealResultItem{ + r := madmin.HealResultItem{ Type: madmin.HealItemBucket, Bucket: bucket, } diff --git a/cmd/erasure-sets.go b/cmd/erasure-sets.go index 638079982..a5ec8bc03 100644 --- a/cmd/erasure-sets.go +++ b/cmd/erasure-sets.go @@ -206,7 +206,7 @@ func (s *erasureSets) connectDisks() { }() var wg sync.WaitGroup - var setsJustConnected = make([]bool, s.setCount) + setsJustConnected := make([]bool, s.setCount) diskMap := s.getDiskMap() for _, endpoint := range s.endpoints { if isEndpointConnectionStable(diskMap, endpoint, s.lastConnectDisksOpTime) { @@ -398,7 +398,7 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto s.erasureDisks[i] = make([]StorageAPI, setDriveCount) } - var erasureLockers = map[string]dsync.NetLocker{} + erasureLockers := map[string]dsync.NetLocker{} for _, endpoint := range endpoints { if _, ok := erasureLockers[endpoint.Host]; !ok { erasureLockers[endpoint.Host] = newLockAPI(endpoint) @@ -406,7 +406,7 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto } for i := 0; i < setCount; i++ { - var lockerEpSet = set.NewStringSet() + lockerEpSet := set.NewStringSet() for j := 0; j < setDriveCount; j++ { endpoint := endpoints[i*setDriveCount+j] // Only add lockers per endpoint. @@ -865,7 +865,7 @@ func undoDeleteBucketSets(ctx context.Context, bucket string, sets []*erasureObj // that all buckets are present on all sets. func (s *erasureSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) { var listBuckets []BucketInfo - var healBuckets = map[string]VolInfo{} + healBuckets := map[string]VolInfo{} for _, set := range s.sets { // lists all unique buckets across drives. if err := listAllBuckets(ctx, set.getDisks(), healBuckets, s.defaultParityCount); err != nil { @@ -958,13 +958,13 @@ func (s *erasureSets) DeleteObjects(ctx context.Context, bucket string, objects } // The result of delete operation on all passed objects - var delErrs = make([]error, len(objects)) + delErrs := make([]error, len(objects)) // The result of delete objects - var delObjects = make([]DeletedObject, len(objects)) + delObjects := make([]DeletedObject, len(objects)) // A map between a set and its associated objects - var objSetMap = make(map[int][]delObj) + objSetMap := make(map[int][]delObj) // Group objects by set index for i, object := range objects { @@ -1147,7 +1147,7 @@ func formatsToDrivesInfo(endpoints Endpoints, formats []*formatErasureV3, sErrs // result, also populate disks to be healed. for i, format := range formats { drive := endpoints.GetString(i) - var state = madmin.DriveStateCorrupt + state := madmin.DriveStateCorrupt switch { case format != nil: state = madmin.DriveStateOk @@ -1274,7 +1274,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H newFormatSets := newHealFormatSets(refFormat, s.setCount, s.setDriveCount, formats, sErrs) if !dryRun { - var tmpNewFormats = make([]*formatErasureV3, s.setCount*s.setDriveCount) + tmpNewFormats := make([]*formatErasureV3, s.setCount*s.setDriveCount) for i := range newFormatSets { for j := range newFormatSets[i] { if newFormatSets[i][j] == nil { diff --git a/cmd/erasure-sets_test.go b/cmd/erasure-sets_test.go index cf151a235..84ee2a738 100644 --- a/cmd/erasure-sets_test.go +++ b/cmd/erasure-sets_test.go @@ -162,7 +162,7 @@ func TestNewErasureSets(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var nDisks = 16 // Maximum disks. + nDisks := 16 // Maximum disks. var erasureDisks []string for i := 0; i < nDisks; i++ { // Do not attempt to create this path, the test validates diff --git a/cmd/erasure.go b/cmd/erasure.go index 53aa06b0f..e601d46a9 100644 --- a/cmd/erasure.go +++ b/cmd/erasure.go @@ -417,7 +417,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, bf saverWg.Add(1) go func() { // Add jitter to the update time so multiple sets don't sync up. - var updateTime = 30*time.Second + time.Duration(float64(10*time.Second)*rand.Float64()) + updateTime := 30*time.Second + time.Duration(float64(10*time.Second)*rand.Float64()) t := time.NewTicker(updateTime) defer t.Stop() defer saverWg.Done() diff --git a/cmd/format-disk-cache.go b/cmd/format-disk-cache.go index 05b2bb93c..878c79be3 100644 --- a/cmd/format-disk-cache.go +++ b/cmd/format-disk-cache.go @@ -77,9 +77,9 @@ type formatCacheVersionDetect struct { // Return a slice of format, to be used to format uninitialized disks. func newFormatCacheV2(drives []string) []*formatCacheV2 { diskCount := len(drives) - var disks = make([]string, diskCount) + disks := make([]string, diskCount) - var formats = make([]*formatCacheV2, diskCount) + formats := make([]*formatCacheV2, diskCount) for i := 0; i < diskCount; i++ { format := &formatCacheV2{} @@ -110,7 +110,7 @@ func formatCacheGetVersion(r io.ReadSeeker) (string, error) { // Creates a new cache format.json if unformatted. func createFormatCache(fsFormatPath string, format *formatCacheV1) error { // open file using READ & WRITE permission - var file, err = os.OpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0600) + file, err := os.OpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0o600) if err != nil { return err } @@ -133,7 +133,7 @@ func createFormatCache(fsFormatPath string, format *formatCacheV1) error { func initFormatCache(ctx context.Context, drives []string) (formats []*formatCacheV2, err error) { nformats := newFormatCacheV2(drives) for i, drive := range drives { - if err = os.MkdirAll(pathJoin(drive, minioMetaBucket), 0777); err != nil { + if err = os.MkdirAll(pathJoin(drive, minioMetaBucket), 0o777); err != nil { logger.GetReqInfo(ctx).AppendTags("drive", drive) logger.LogIf(ctx, err) return nil, err @@ -156,7 +156,6 @@ func loadFormatCache(ctx context.Context, drives []string) ([]*formatCacheV2, bo for i, drive := range drives { cacheFormatPath := pathJoin(drive, minioMetaBucket, formatConfigFile) f, err := os.OpenFile(cacheFormatPath, os.O_RDWR, 0) - if err != nil { if osIsNotExist(err) { continue @@ -232,7 +231,7 @@ func checkFormatCacheValues(migrating bool, formats []*formatCacheV2) (int, erro // checkCacheDisksConsistency - checks if "This" disk uuid on each disk is consistent with all "Disks" slices // across disks. func checkCacheDiskConsistency(formats []*formatCacheV2) error { - var disks = make([]string, len(formats)) + disks := make([]string, len(formats)) // Collect currently available disk uuids. for index, format := range formats { if format == nil { @@ -413,7 +412,7 @@ func migrateOldCache(ctx context.Context, c *diskCache) error { object = strings.TrimSuffix(object, "/") destdir := getCacheSHADir(c.dir, bucket, object) - if err := os.MkdirAll(destdir, 0777); err != nil { + if err := os.MkdirAll(destdir, 0o777); err != nil { return err } prevCachedPath := path.Join(c.dir, bucket, object) @@ -427,7 +426,7 @@ func migrateOldCache(ctx context.Context, c *diskCache) error { } // marshal cache metadata after adding version and stat info meta := &cacheMeta{} - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err = json.Unmarshal(metaBytes, &meta); err != nil { return err } @@ -459,7 +458,7 @@ func migrateOldCache(ctx context.Context, c *diskCache) error { return err } - if err = ioutil.WriteFile(metaPath, jsonData, 0644); err != nil { + if err = ioutil.WriteFile(metaPath, jsonData, 0o644); err != nil { return err } } @@ -475,7 +474,6 @@ func migrateOldCache(ctx context.Context, c *diskCache) error { removeAll(path.Join(c.dir, minioMetaBucket, "buckets")) return migrateCacheFormatJSON(cacheFormatPath) - } func migrateCacheFormatJSON(cacheFormatPath string) error { diff --git a/cmd/format-disk-cache_test.go b/cmd/format-disk-cache_test.go index 85115ea73..f927558c6 100644 --- a/cmd/format-disk-cache_test.go +++ b/cmd/format-disk-cache_test.go @@ -245,7 +245,7 @@ func genFormatCacheInvalidDisksOrder() []*formatCacheV2 { formatConfigs[index] = format } // Re order disks for failure case. - var disks1 = make([]string, 8) + disks1 := make([]string, 8) copy(disks1, disks) disks1[1], disks1[2] = disks[2], disks[1] formatConfigs[2].Cache.Disks = disks1 diff --git a/cmd/format-erasure.go b/cmd/format-erasure.go index 81b9eaee2..bc7450ba4 100644 --- a/cmd/format-erasure.go +++ b/cmd/format-erasure.go @@ -242,7 +242,7 @@ func formatErasureMigrateV1ToV2(export, version string) error { if err != nil { return err } - return ioutil.WriteFile(formatPath, b, 0666) + return ioutil.WriteFile(formatPath, b, 0o666) } // Migrates V2 for format.json to V3 (Flat hierarchy for multipart) @@ -266,7 +266,7 @@ func formatErasureMigrateV2ToV3(export, version string) error { return err } - if err = mkdirAll(pathJoin(export, minioMetaMultipartBucket), 0755); err != nil { + if err = mkdirAll(pathJoin(export, minioMetaMultipartBucket), 0o755); err != nil { return err } @@ -284,12 +284,12 @@ func formatErasureMigrateV2ToV3(export, version string) error { if err != nil { return err } - return ioutil.WriteFile(formatPath, b, 0666) + return ioutil.WriteFile(formatPath, b, 0o666) } // countErrs - count a specific error. func countErrs(errs []error, err error) int { - var i = 0 + i := 0 for _, err1 := range errs { if err1 == err { i++ @@ -314,7 +314,7 @@ func loadFormatErasureAll(storageDisks []StorageAPI, heal bool) ([]*formatErasur g := errgroup.WithNErrs(len(storageDisks)) // Initialize format configs. - var formats = make([]*formatErasureV3, len(storageDisks)) + formats := make([]*formatErasureV3, len(storageDisks)) // Load format from each disk in parallel for index := range storageDisks { @@ -534,7 +534,6 @@ func formatErasureFixDeploymentID(endpoints Endpoints, storageDisks []StorageAPI // Deployment ID needs to be set on all the disks. // Save `format.json` across all disks. return saveFormatErasureAll(GlobalContext, storageDisks, formats) - } // Update only the valid local disks which have not been updated before. @@ -662,7 +661,6 @@ func formatErasureV3Check(reference *formatErasureV3, format *formatErasureV3) e // Initializes meta volume only on local storage disks. func initErasureMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatErasureV3) error { - // Compute the local disks eligible for meta volumes (re)initialization disksToInit := make([]StorageAPI, 0, len(storageDisks)) for index := range storageDisks { @@ -811,7 +809,6 @@ func fixFormatErasureV3(storageDisks []StorageAPI, endpoints Endpoints, formats } } return nil - } // initFormatErasure - save Erasure format configuration on all disks. diff --git a/cmd/format-erasure_test.go b/cmd/format-erasure_test.go index 63e18dab9..d36b5f56e 100644 --- a/cmd/format-erasure_test.go +++ b/cmd/format-erasure_test.go @@ -124,11 +124,11 @@ func TestFormatErasureMigrate(t *testing.T) { t.Fatal(err) } - if err = os.MkdirAll(pathJoin(rootPath, minioMetaBucket), os.FileMode(0755)); err != nil { + if err = os.MkdirAll(pathJoin(rootPath, minioMetaBucket), os.FileMode(0o755)); err != nil { t.Fatal(err) } - if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0644)); err != nil { + if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil { t.Fatal(err) } @@ -175,7 +175,7 @@ func TestFormatErasureMigrate(t *testing.T) { t.Fatal(err) } - if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0644)); err != nil { + if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil { t.Fatal(err) } @@ -195,7 +195,7 @@ func TestFormatErasureMigrate(t *testing.T) { t.Fatal(err) } - if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0644)); err != nil { + if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil { t.Fatal(err) } diff --git a/cmd/format-fs.go b/cmd/format-fs.go index 1190c622b..172924186 100644 --- a/cmd/format-fs.go +++ b/cmd/format-fs.go @@ -113,7 +113,7 @@ func formatFSMigrateV1ToV2(ctx context.Context, wlk *lock.LockedFile, fsPath str return err } - if err = os.MkdirAll(path.Join(fsPath, minioMetaMultipartBucket), 0755); err != nil { + if err = os.MkdirAll(path.Join(fsPath, minioMetaMultipartBucket), 0o755); err != nil { return err } @@ -165,7 +165,7 @@ func formatFSMigrate(ctx context.Context, wlk *lock.LockedFile, fsPath string) e func createFormatFS(fsFormatPath string) error { // Attempt a write lock on formatConfigFile `format.json` // file stored in minioMetaBucket(.minio.sys) directory. - lk, err := lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0600) + lk, err := lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0o600) if err != nil { return err } diff --git a/cmd/fs-v1-helpers.go b/cmd/fs-v1-helpers.go index 1a996beff..34dddffd3 100644 --- a/cmd/fs-v1-helpers.go +++ b/cmd/fs-v1-helpers.go @@ -123,7 +123,7 @@ func fsMkdir(ctx context.Context, dirPath string) (err error) { return err } - if err = os.Mkdir((dirPath), 0777); err != nil { + if err = os.Mkdir((dirPath), 0o777); err != nil { switch { case osIsExist(err): return errVolumeExists @@ -309,7 +309,7 @@ func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, falloc return 0, err } - if err := mkdirAll(pathutil.Dir(filePath), 0777); err != nil { + if err := mkdirAll(pathutil.Dir(filePath), 0o777); err != nil { switch { case osIsPermission(err): return 0, errFileAccessDenied @@ -329,7 +329,7 @@ func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, falloc if globalFSOSync { flags |= os.O_SYNC } - writer, err := lock.Open(filePath, flags, 0666) + writer, err := lock.Open(filePath, flags, 0o666) if err != nil { return 0, osErrToFileErr(err) } diff --git a/cmd/fs-v1-helpers_test.go b/cmd/fs-v1-helpers_test.go index 219aadf48..cf46962ec 100644 --- a/cmd/fs-v1-helpers_test.go +++ b/cmd/fs-v1-helpers_test.go @@ -75,7 +75,7 @@ func TestFSStats(t *testing.T) { t.Fatalf("Unable to create volume, %s", err) } - var reader = bytes.NewReader([]byte("Hello, world")) + reader := bytes.NewReader([]byte("Hello, world")) if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, 0); err != nil { t.Fatalf("Unable to create file, %s", err) } @@ -201,7 +201,7 @@ func TestFSCreateAndOpen(t *testing.T) { t.Fatal("Unexpected error", err) } - var reader = bytes.NewReader([]byte("Hello, world")) + reader := bytes.NewReader([]byte("Hello, world")) if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, 0); err != nil { t.Fatalf("Unable to create file, %s", err) } @@ -259,7 +259,7 @@ func TestFSDeletes(t *testing.T) { t.Fatalf("Unable to create directory, %s", err) } - var reader = bytes.NewReader([]byte("Hello, world")) + reader := bytes.NewReader([]byte("Hello, world")) if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, reader.Size()); err != nil { t.Fatalf("Unable to create file, %s", err) } @@ -271,7 +271,7 @@ func TestFSDeletes(t *testing.T) { if err != nil { t.Fatal(err) } - err = ioutil.WriteFile(pathJoin(path, "success-vol", "not-empty", "file"), []byte("data"), 0777) + err = ioutil.WriteFile(pathJoin(path, "success-vol", "not-empty", "file"), []byte("data"), 0o777) if err != nil { t.Fatal(err) } @@ -368,7 +368,7 @@ func BenchmarkFSDeleteFile(b *testing.B) { // We need to create and delete the file sequentially inside the benchmark. for i := 0; i < b.N; i++ { b.StopTimer() - err = ioutil.WriteFile(filename, []byte("data"), 0777) + err = ioutil.WriteFile(filename, []byte("data"), 0o777) if err != nil { b.Fatal(err) } @@ -395,7 +395,7 @@ func TestFSRemoves(t *testing.T) { t.Fatalf("Unable to create directory, %s", err) } - var reader = bytes.NewReader([]byte("Hello, world")) + reader := bytes.NewReader([]byte("Hello, world")) if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, 0); err != nil { t.Fatalf("Unable to create file, %s", err) } @@ -514,7 +514,7 @@ func TestFSRemoveMeta(t *testing.T) { filePath := pathJoin(fsPath, "success-vol", "success-file") - var reader = bytes.NewReader([]byte("Hello, world")) + reader := bytes.NewReader([]byte("Hello, world")) if _, err = fsCreateFile(GlobalContext, filePath, reader, 0); err != nil { t.Fatalf("Unable to create file, %s", err) } @@ -556,7 +556,7 @@ func TestFSIsFile(t *testing.T) { filePath := pathJoin(dirPath, "tmpfile") - if err = ioutil.WriteFile(filePath, nil, 0777); err != nil { + if err = ioutil.WriteFile(filePath, nil, 0o777); err != nil { t.Fatalf("Unable to create file %s", filePath) } diff --git a/cmd/fs-v1-metadata.go b/cmd/fs-v1-metadata.go index 5db3c2587..2b8b4df34 100644 --- a/cmd/fs-v1-metadata.go +++ b/cmd/fs-v1-metadata.go @@ -91,7 +91,7 @@ func (c *FSChecksumInfoV1) UnmarshalJSON(data []byte) error { } var info checksuminfo - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary err := json.Unmarshal(data, &info) if err != nil { return err @@ -230,7 +230,7 @@ func (m *fsMetaV1) ReadFrom(ctx context.Context, lk *lock.LockedFile) (n int64, return 0, io.EOF } - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err = json.Unmarshal(fsMetaBuf, m); err != nil { return 0, err } diff --git a/cmd/fs-v1-multipart.go b/cmd/fs-v1-multipart.go index 3cc490e51..6bdc5bb42 100644 --- a/cmd/fs-v1-multipart.go +++ b/cmd/fs-v1-multipart.go @@ -224,7 +224,7 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri uploadID := mustGetUUID() uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) - err := mkdirAll(uploadIDDir, 0755) + err := mkdirAll(uploadIDDir, 0o755) if err != nil { logger.LogIf(ctx, err) return "", err @@ -240,7 +240,7 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri return "", err } - if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0666); err != nil { + if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0o666); err != nil { logger.LogIf(ctx, err) return "", err } @@ -252,8 +252,8 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri // object. Internally incoming data is written to '.minio.sys/tmp' location // and safely renamed to '.minio.sys/multipart' for reach parts. func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, - startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) { - + startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error, +) { if srcOpts.VersionID != "" && srcOpts.VersionID != nullVersionID { return pi, VersionNotFound{ Bucket: srcBucket, @@ -397,7 +397,7 @@ func (fs *FSObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploa } var fsMeta fsMetaV1 - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err = json.Unmarshal(fsMetaBytes, &fsMeta); err != nil { return minfo, toObjectErr(err, bucket, object) } @@ -526,7 +526,7 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload } var fsMeta fsMetaV1 - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err = json.Unmarshal(fsMetaBytes, &fsMeta); err != nil { return result, err } @@ -542,7 +542,6 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload // // Implements S3 compatible Complete multipart API. func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, e error) { - var actualSize int64 if err := checkCompleteMultipartArgs(ctx, bucket, object, fs); err != nil { diff --git a/cmd/fs-v1-rwpool.go b/cmd/fs-v1-rwpool.go index 3c3fe412d..3ea10ec3a 100644 --- a/cmd/fs-v1-rwpool.go +++ b/cmd/fs-v1-rwpool.go @@ -148,7 +148,7 @@ func (fsi *fsIOPool) Write(path string) (wlk *lock.LockedFile, err error) { return nil, err } - wlk, err = lock.LockedOpenFile(path, os.O_RDWR, 0666) + wlk, err = lock.LockedOpenFile(path, os.O_RDWR, 0o666) if err != nil { switch { case osIsNotExist(err): @@ -175,12 +175,12 @@ func (fsi *fsIOPool) Create(path string) (wlk *lock.LockedFile, err error) { } // Creates parent if missing. - if err = mkdirAll(pathutil.Dir(path), 0777); err != nil { + if err = mkdirAll(pathutil.Dir(path), 0o777); err != nil { return nil, err } // Attempt to create the file. - wlk, err = lock.LockedOpenFile(path, os.O_RDWR|os.O_CREATE, 0666) + wlk, err = lock.LockedOpenFile(path, os.O_RDWR|os.O_CREATE, 0o666) if err != nil { switch { case osIsPermission(err): @@ -220,7 +220,6 @@ func (fsi *fsIOPool) Close(path string) error { // If the file is closed, remove it from the reader pool map. if rlkFile.IsClosed() { - // Purge the cached lock path from map. delete(fsi.readersMap, path) } diff --git a/cmd/fs-v1-rwpool_test.go b/cmd/fs-v1-rwpool_test.go index d9ee6891e..c39a117fd 100644 --- a/cmd/fs-v1-rwpool_test.go +++ b/cmd/fs-v1-rwpool_test.go @@ -110,5 +110,4 @@ func TestRWPool(t *testing.T) { t.Fatal("Unexpected error", err) } } - } diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index bf20f73a2..ae29e61ca 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -97,22 +97,21 @@ func initMetaVolumeFS(fsPath, fsUUID string) error { // if it doesn't exist yet. metaBucketPath := pathJoin(fsPath, minioMetaBucket) - if err := os.MkdirAll(metaBucketPath, 0777); err != nil { + if err := os.MkdirAll(metaBucketPath, 0o777); err != nil { return err } metaTmpPath := pathJoin(fsPath, minioMetaTmpBucket, fsUUID) - if err := os.MkdirAll(metaTmpPath, 0777); err != nil { + if err := os.MkdirAll(metaTmpPath, 0o777); err != nil { return err } - if err := os.MkdirAll(pathJoin(fsPath, dataUsageBucket), 0777); err != nil { + if err := os.MkdirAll(pathJoin(fsPath, dataUsageBucket), 0o777); err != nil { return err } metaMultipartPath := pathJoin(fsPath, minioMetaMultipartBucket) - return os.MkdirAll(metaMultipartPath, 0777) - + return os.MkdirAll(metaMultipartPath, 0o777) } // NewFSObjectLayer - initialize new fs object layer. @@ -366,7 +365,7 @@ func (fs *FSObjects) scanBucket(ctx context.Context, bucket string, cache dataUs fsMeta := newFSMetaV1() metaOk := false if len(fsMetaBytes) > 0 { - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err = json.Unmarshal(fsMetaBytes, &fsMeta); err == nil { metaOk = true } @@ -474,7 +473,7 @@ func (fs *FSObjects) SetBucketPolicy(ctx context.Context, bucket string, p *poli return err } - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary configData, err := json.Marshal(p) if err != nil { return err @@ -542,7 +541,7 @@ func (fs *FSObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) { // Ignore any errors returned here. continue } - var created = fi.ModTime() + created := fi.ModTime() meta, err := globalBucketMetadataSys.Get(fi.Name()) if err == nil { created = meta.Created @@ -705,7 +704,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string, return nil, toObjectErr(err, bucket) } - var nsUnlocker = func() {} + nsUnlocker := func() {} if lockType != noLock { // Lock the object before reading. @@ -843,7 +842,7 @@ func (fs *FSObjects) getObjectInfoNoFSLock(ctx context.Context, bucket, object s fsMetaBuf, rerr := ioutil.ReadAll(rc) rc.Close() if rerr == nil { - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if rerr = json.Unmarshal(fsMetaBuf, &fsMeta); rerr != nil { // For any error to read fsMeta, set default ETag and proceed. fsMeta = fs.defaultFsJSON(object) @@ -1029,7 +1028,7 @@ func (fs *FSObjects) putObject(ctx context.Context, bucket string, object string // with a slash separator, we treat it like a valid operation // and return success. if isObjectDir(object, data.Size()) { - if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0777); err != nil { + if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0o777); err != nil { logger.LogIf(ctx, err) return ObjectInfo{}, toObjectErr(err, bucket, object) } diff --git a/cmd/fs-v1_test.go b/cmd/fs-v1_test.go index 9f427e957..785523451 100644 --- a/cmd/fs-v1_test.go +++ b/cmd/fs-v1_test.go @@ -205,7 +205,6 @@ func TestFSDeleteObject(t *testing.T) { t.Fatal("Unexpected error: ", err) } } - } // TestFSDeleteBucket - tests for fs DeleteBucket @@ -263,7 +262,7 @@ func TestFSListBuckets(t *testing.T) { } // Create a bucket with invalid name - if err := os.MkdirAll(pathJoin(fs.fsPath, "vo^"), 0777); err != nil { + if err := os.MkdirAll(pathJoin(fs.fsPath, "vo^"), 0o777); err != nil { t.Fatal("Unexpected error: ", err) } f, err := os.Create(pathJoin(fs.fsPath, "test")) diff --git a/cmd/gateway-common.go b/cmd/gateway-common.go index 23e711c14..cce5df346 100644 --- a/cmd/gateway-common.go +++ b/cmd/gateway-common.go @@ -130,7 +130,6 @@ func FromMinioClientListMultipartsInfo(lmur minio.ListMultipartUploadsResult) Li CommonPrefixes: commonPrefixes, EncodingType: lmur.EncodingType, } - } // FromMinioClientObjectInfo converts minio ObjectInfo to gateway ObjectInfo diff --git a/cmd/gateway-main.go b/cmd/gateway-main.go index f00ce1fdc..bb5726242 100644 --- a/cmd/gateway-main.go +++ b/cmd/gateway-main.go @@ -37,14 +37,12 @@ import ( "github.com/minio/pkg/env" ) -var ( - gatewayCmd = cli.Command{ - Name: "gateway", - Usage: "start object storage gateway", - Flags: append(ServerFlags, GlobalFlags...), - HideHelpCommand: true, - } -) +var gatewayCmd = cli.Command{ + Name: "gateway", + Usage: "start object storage gateway", + Flags: append(ServerFlags, GlobalFlags...), + HideHelpCommand: true, +} // GatewayLocker implements custom NewNSLock implementation type GatewayLocker struct { diff --git a/cmd/gateway/azure/gateway-azure.go b/cmd/gateway/azure/gateway-azure.go index 22bba7721..8c61a3da9 100644 --- a/cmd/gateway/azure/gateway-azure.go +++ b/cmd/gateway/azure/gateway-azure.go @@ -349,7 +349,6 @@ func azureTierToS3StorageClass(tierType string) string { default: return "STANDARD" } - } // azurePropertiesToS3Meta converts Azure metadata/properties to S3 @@ -578,7 +577,6 @@ func (a *azureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi min resp, err := a.client.ListContainersSegment(ctx, marker, azblob.ListContainersSegmentOptions{ Prefix: bucket, }) - if err != nil { return bi, azureToObjectError(err, bucket) } @@ -604,7 +602,6 @@ func (a *azureObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketI for marker.NotDone() { resp, err := a.client.ListContainersSegment(ctx, marker, azblob.ListContainersSegmentOptions{}) - if err != nil { return nil, azureToObjectError(err) } diff --git a/cmd/gateway/azure/gateway-azure_test.go b/cmd/gateway/azure/gateway-azure_test.go index df3e4c5c7..706d449b8 100644 --- a/cmd/gateway/azure/gateway-azure_test.go +++ b/cmd/gateway/azure/gateway-azure_test.go @@ -192,34 +192,41 @@ func TestAzureCodesToObjectError(t *testing.T) { }{ { nil, "ContainerAlreadyExists", 0, - minio.BucketExists{Bucket: "bucket"}, "bucket", "", + minio.BucketExists{Bucket: "bucket"}, + "bucket", "", }, { nil, "InvalidResourceName", 0, - minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "", + minio.BucketNameInvalid{Bucket: "bucket."}, + "bucket.", "", }, { nil, "RequestBodyTooLarge", 0, - minio.PartTooBig{}, "", "", + minio.PartTooBig{}, + "", "", }, { nil, "InvalidMetadata", 0, - minio.UnsupportedMetadata{}, "", "", + minio.UnsupportedMetadata{}, + "", "", }, { nil, "", http.StatusNotFound, minio.ObjectNotFound{ Bucket: "bucket", Object: "object", - }, "bucket", "object", + }, + "bucket", "object", }, { nil, "", http.StatusNotFound, - minio.BucketNotFound{Bucket: "bucket"}, "bucket", "", + minio.BucketNotFound{Bucket: "bucket"}, + "bucket", "", }, { nil, "", http.StatusBadRequest, - minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "", + minio.BucketNameInvalid{Bucket: "bucket."}, + "bucket.", "", }, { fmt.Errorf("unhandled azure error"), "", http.StatusForbidden, diff --git a/cmd/gateway/gcs/gateway-gcs.go b/cmd/gateway/gcs/gateway-gcs.go index 7054d9e9a..8838da916 100644 --- a/cmd/gateway/gcs/gateway-gcs.go +++ b/cmd/gateway/gcs/gateway-gcs.go @@ -1121,7 +1121,6 @@ func (l *gcsGateway) PutObjectPart(ctx context.Context, bucket string, key strin LastModified: minio.UTCNow(), Size: data.Size(), }, nil - } // gcsGetPartInfo returns PartInfo of a given object part diff --git a/cmd/gateway/gcs/gateway-gcs_test.go b/cmd/gateway/gcs/gateway-gcs_test.go index 6745cf171..a725ead4a 100644 --- a/cmd/gateway/gcs/gateway-gcs_test.go +++ b/cmd/gateway/gcs/gateway-gcs_test.go @@ -72,7 +72,6 @@ func TestToGCSPageToken(t *testing.T) { t.Errorf("Test %d: Expected %s, got %s", i+1, toGCSPageToken(testCase.Name), testCase.Token) } } - } // TestIsValidGCSProjectIDFormat tests isValidGCSProjectIDFormat @@ -166,7 +165,6 @@ func TestGCSMultipartDataName(t *testing.T) { } func TestFromMinioClientListBucketResultToV2Info(t *testing.T) { - listBucketResult := miniogo.ListBucketResult{ IsTruncated: false, Marker: "testMarker", diff --git a/cmd/gateway/hdfs/gateway-hdfs.go b/cmd/gateway/hdfs/gateway-hdfs.go index 9012a5dc0..7fd1e44be 100644 --- a/cmd/gateway/hdfs/gateway-hdfs.go +++ b/cmd/gateway/hdfs/gateway-hdfs.go @@ -133,7 +133,6 @@ func getKerberosClient() (*krb.Client, error) { realm := env.Get("KRB5REALM", "") if username == "" || realm == "" { return nil, errors.New("empty KRB5USERNAME or KRB5REALM") - } return krb.NewWithKeytab(username, realm, kt, cfg), nil @@ -216,7 +215,7 @@ func (g *HDFS) NewGatewayLayer(creds madmin.Credentials) (minio.ObjectLayer, err return nil, fmt.Errorf("unable to initialize hdfsClient: %v", err) } - if err = clnt.MkdirAll(minio.PathJoin(commonPath, hdfsSeparator, minioMetaTmpBucket), os.FileMode(0755)); err != nil { + if err = clnt.MkdirAll(minio.PathJoin(commonPath, hdfsSeparator, minioMetaTmpBucket), os.FileMode(0o755)); err != nil { return nil, err } @@ -324,7 +323,7 @@ func (n *hdfsObjects) MakeBucketWithLocation(ctx context.Context, bucket string, if !hdfsIsValidBucketName(bucket) { return minio.BucketNameInvalid{Bucket: bucket} } - return hdfsToObjectErr(ctx, n.clnt.Mkdir(n.hdfsPathJoin(bucket), os.FileMode(0755)), bucket) + return hdfsToObjectErr(ctx, n.clnt.Mkdir(n.hdfsPathJoin(bucket), os.FileMode(0o755)), bucket) } func (n *hdfsObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, err error) { @@ -480,7 +479,6 @@ func fileInfoToObjectInfo(bucket string, entry string, fi os.FileInfo) minio.Obj // a path entry to an `os.FileInfo`. It also saves the listed path's `os.FileInfo` in the cache. func (n *hdfsObjects) populateDirectoryListing(filePath string, fileInfos map[string]os.FileInfo) (os.FileInfo, error) { dirReader, err := n.clnt.Open(filePath) - if err != nil { return nil, err } @@ -494,7 +492,6 @@ func (n *hdfsObjects) populateDirectoryListing(filePath string, fileInfos map[st fileInfos[key] = dirStat infos, err := dirReader.Readdir(0) - if err != nil { return nil, err } @@ -602,7 +599,6 @@ func (n *hdfsObjects) GetObjectNInfo(ctx context.Context, bucket, object string, // exit in case of partial read pipeCloser := func() { pr.Close() } return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser) - } func (n *hdfsObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) { @@ -689,7 +685,7 @@ func (n *hdfsObjects) PutObject(ctx context.Context, bucket string, object strin // If its a directory create a prefix { if strings.HasSuffix(object, hdfsSeparator) && r.Size() == 0 { - if err = n.clnt.MkdirAll(name, os.FileMode(0755)); err != nil { + if err = n.clnt.MkdirAll(name, os.FileMode(0o755)); err != nil { n.deleteObject(n.hdfsPathJoin(bucket), name) return objInfo, hdfsToObjectErr(ctx, err, bucket, object) } @@ -707,7 +703,7 @@ func (n *hdfsObjects) PutObject(ctx context.Context, bucket string, object strin } dir := path.Dir(name) if dir != "" { - if err = n.clnt.MkdirAll(dir, os.FileMode(0755)); err != nil { + if err = n.clnt.MkdirAll(dir, os.FileMode(0o755)); err != nil { w.Close() n.deleteObject(n.hdfsPathJoin(bucket), dir) return objInfo, hdfsToObjectErr(ctx, err, bucket, object) @@ -839,7 +835,7 @@ func (n *hdfsObjects) CompleteMultipartUpload(ctx context.Context, bucket, objec name := n.hdfsPathJoin(bucket, object) dir := path.Dir(name) if dir != "" { - if err = n.clnt.MkdirAll(dir, os.FileMode(0755)); err != nil { + if err = n.clnt.MkdirAll(dir, os.FileMode(0o755)); err != nil { return objInfo, hdfsToObjectErr(ctx, err, bucket, object) } } diff --git a/cmd/gateway/s3/gateway-s3-chain.go b/cmd/gateway/s3/gateway-s3-chain.go index f3b5830e0..2033420b0 100644 --- a/cmd/gateway/s3/gateway-s3-chain.go +++ b/cmd/gateway/s3/gateway-s3-chain.go @@ -89,7 +89,6 @@ func (c *Chain) Retrieve() (credentials.Value, error) { } return credentials.Value{}, fmt.Errorf("no credentials found in %s cannot proceed", providers) - } // IsExpired will returned the expired state of the currently cached provider diff --git a/cmd/gateway/s3/gateway-s3-metadata.go b/cmd/gateway/s3/gateway-s3-metadata.go index 1d98073aa..c82dd055c 100644 --- a/cmd/gateway/s3/gateway-s3-metadata.go +++ b/cmd/gateway/s3/gateway-s3-metadata.go @@ -139,7 +139,7 @@ func (m gwMetaV1) ObjectToPartOffset(ctx context.Context, offset int64) (partInd // Constructs GWMetaV1 using `jsoniter` lib to retrieve each field. func gwMetaUnmarshalJSON(ctx context.Context, gwMetaBuf []byte) (gwMeta gwMetaV1, err error) { - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary err = json.Unmarshal(gwMetaBuf, &gwMeta) return gwMeta, err } diff --git a/cmd/gateway/s3/gateway-s3-sse.go b/cmd/gateway/s3/gateway-s3-sse.go index bfaa86516..7596636c0 100644 --- a/cmd/gateway/s3/gateway-s3-sse.go +++ b/cmd/gateway/s3/gateway-s3-sse.go @@ -75,12 +75,10 @@ func (l *s3EncObjects) ListObjects(ctx context.Context, bucket string, prefix st loi.Objects = res.Objects loi.Prefixes = res.Prefixes return loi, nil - } // ListObjectsV2 lists all blobs in S3 bucket filtered by prefix func (l *s3EncObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) { - var objects []minio.ObjectInfo var prefixes []string var isTruncated bool @@ -423,7 +421,6 @@ func (l *s3EncObjects) DeleteObjects(ctx context.Context, bucket string, objects // ListMultipartUploads lists all multipart uploads. func (l *s3EncObjects) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, e error) { - lmi, e = l.s3Objects.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) if e != nil { return @@ -505,7 +502,6 @@ func (l *s3EncObjects) PutObject(ctx context.Context, bucket string, object stri // PutObjectPart puts a part of object in bucket func (l *s3EncObjects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *minio.PutObjReader, opts minio.ObjectOptions) (pi minio.PartInfo, e error) { - if opts.ServerSideEncryption == nil { return l.s3Objects.PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts) } @@ -630,7 +626,6 @@ func (l *s3EncObjects) AbortMultipartUpload(ctx context.Context, bucket string, // CompleteMultipartUpload completes ongoing multipart upload and finalizes object func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (oi minio.ObjectInfo, e error) { - tmpMeta, err := l.getGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID)) if err != nil { oi, e = l.s3Objects.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts) diff --git a/cmd/global-heal.go b/cmd/global-heal.go index 0ee0406df..ff138736c 100644 --- a/cmd/global-heal.go +++ b/cmd/global-heal.go @@ -89,7 +89,7 @@ func getBackgroundHealStatus(ctx context.Context, o ObjectLayer) (madmin.BgHealS } } - var healDisksMap = map[string]struct{}{} + healDisksMap := map[string]struct{}{} for _, ep := range getLocalDisksToHeal() { healDisksMap[ep.String()] = struct{}{} } @@ -139,7 +139,6 @@ func getBackgroundHealStatus(ctx context.Context, o ObjectLayer) (madmin.BgHealS status.SCParity[storageclass.RRS] = backendInfo.RRSCParity return status, true - } func mustGetHealSequence(ctx context.Context) *healSequence { @@ -306,7 +305,6 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, }, finished: nil, }) - if err != nil { // Set this such that when we return this function // we let the caller retry this disk again for the diff --git a/cmd/handler-api.go b/cmd/handler-api.go index 2a5ccf691..8839cf54a 100644 --- a/cmd/handler-api.go +++ b/cmd/handler-api.go @@ -77,7 +77,6 @@ func availableMemory() (available uint64) { if available != 9223372036854771712 { // This means cgroup memory limit is configured. return - } // no-limit set proceed to set the limits based on virtual memory. } // for all other platforms limits are based on virtual memory. diff --git a/cmd/handler-utils.go b/cmd/handler-utils.go index 3f0958cb3..2442c22ac 100644 --- a/cmd/handler-utils.go +++ b/cmd/handler-utils.go @@ -307,7 +307,7 @@ func extractPostPolicyFormValues(ctx context.Context, form *multipart.Form) (fil // an ugly way of handling this situation. Refer here // https://golang.org/src/mime/multipart/formdata.go#L61 if len(form.File) == 0 { - var b = &bytes.Buffer{} + b := &bytes.Buffer{} for _, v := range formValues["File"] { b.WriteString(v) } @@ -544,7 +544,6 @@ func errorResponseHandler(w http.ResponseWriter, r *http.Request) { HTTPStatusCode: http.StatusBadRequest, }, r.URL) } - } // gets host name for current node diff --git a/cmd/healthcheck-router.go b/cmd/healthcheck-router.go index 8b921ba2e..3eff394ec 100644 --- a/cmd/healthcheck-router.go +++ b/cmd/healthcheck-router.go @@ -34,7 +34,6 @@ const ( // registerHealthCheckRouter - add handler functions for liveness and readiness routes. func registerHealthCheckRouter(router *mux.Router) { - // Healthcheck router healthRouter := router.PathPrefix(healthCheckPathPrefix).Subrouter() diff --git a/cmd/http-stats.go b/cmd/http-stats.go index 626413544..4db09b571 100644 --- a/cmd/http-stats.go +++ b/cmd/http-stats.go @@ -128,7 +128,7 @@ func (stats *HTTPAPIStats) Dec(api string) { func (stats *HTTPAPIStats) Load() map[string]int { stats.Lock() defer stats.Unlock() - var apiStats = make(map[string]int, len(stats.apiStats)) + apiStats := make(map[string]int, len(stats.apiStats)) for k, v := range stats.apiStats { apiStats[k] = v } diff --git a/cmd/http-tracer_test.go b/cmd/http-tracer_test.go index 8567089b3..4979afea7 100644 --- a/cmd/http-tracer_test.go +++ b/cmd/http-tracer_test.go @@ -28,13 +28,16 @@ func TestRedactLDAPPwd(t *testing.T) { expectedQuery string }{ {"", ""}, - {"?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&LDAPPassword=can+youreadthis%3F&Version=2011-06-15", + { + "?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&LDAPPassword=can+youreadthis%3F&Version=2011-06-15", "?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&LDAPPassword=*REDACTED*&Version=2011-06-15", }, - {"LDAPPassword=can+youreadthis%3F&Version=2011-06-15&?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername", + { + "LDAPPassword=can+youreadthis%3F&Version=2011-06-15&?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername", "LDAPPassword=*REDACTED*&Version=2011-06-15&?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername", }, - {"?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&Version=2011-06-15&LDAPPassword=can+youreadthis%3F", + { + "?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&Version=2011-06-15&LDAPPassword=can+youreadthis%3F", "?Action=AssumeRoleWithLDAPIdentity&LDAPUsername=myusername&Version=2011-06-15&LDAPPassword=*REDACTED*", }, { diff --git a/cmd/iam-etcd-store.go b/cmd/iam-etcd-store.go index 53d266c7d..0f0215803 100644 --- a/cmd/iam-etcd-store.go +++ b/cmd/iam-etcd-store.go @@ -139,7 +139,7 @@ func getIAMConfig(item interface{}, data []byte, itemPath string) error { if err != nil { return err } - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary return json.Unmarshal(data, item) } @@ -416,7 +416,6 @@ func (ies *IAMEtcdStore) loadGroup(ctx context.Context, group string, m map[stri } m[group] = gi return nil - } func (ies *IAMEtcdStore) loadGroups(ctx context.Context, m map[string]GroupInfo) error { @@ -437,7 +436,6 @@ func (ies *IAMEtcdStore) loadGroups(ctx context.Context, m map[string]GroupInfo) } } return nil - } func (ies *IAMEtcdStore) loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error { @@ -497,7 +495,6 @@ func (ies *IAMEtcdStore) loadMappedPolicies(ctx context.Context, userType IAMUse } } return nil - } func (ies *IAMEtcdStore) savePolicyDoc(ctx context.Context, policyName string, p PolicyDoc) error { @@ -601,5 +598,4 @@ func (ies *IAMEtcdStore) watch(ctx context.Context, keyPath string) <-chan iamWa } }() return ch - } diff --git a/cmd/iam-object-store.go b/cmd/iam-object-store.go index a288254d0..736c523e2 100644 --- a/cmd/iam-object-store.go +++ b/cmd/iam-object-store.go @@ -128,7 +128,7 @@ func (iamOS *IAMObjectStore) migrateUsersConfigToV1(ctx context.Context) error { next: // 4. check if user identity has old format. identityPath := pathJoin(basePrefix, user, iamIdentityFile) - var cred = auth.Credentials{ + cred := auth.Credentials{ AccessKey: user, } if err := iamOS.loadIAMConfig(ctx, &cred, identityPath); err != nil { @@ -159,7 +159,6 @@ func (iamOS *IAMObjectStore) migrateUsersConfigToV1(ctx context.Context) error { // has not changed. } return nil - } func (iamOS *IAMObjectStore) migrateToV1(ctx context.Context) error { @@ -201,7 +200,7 @@ func (iamOS *IAMObjectStore) migrateBackendFormat(ctx context.Context) error { } func (iamOS *IAMObjectStore) saveIAMConfig(ctx context.Context, item interface{}, objPath string, opts ...options) error { - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary data, err := json.Marshal(item) if err != nil { return err @@ -238,7 +237,7 @@ func (iamOS *IAMObjectStore) loadIAMConfig(ctx context.Context, item interface{} if err != nil { return err } - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary return json.Unmarshal(data, item) } @@ -364,8 +363,8 @@ func (iamOS *IAMObjectStore) loadGroups(ctx context.Context, m map[string]GroupI } func (iamOS *IAMObjectStore) loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, - m map[string]MappedPolicy) error { - + m map[string]MappedPolicy, +) error { var p MappedPolicy err := iamOS.loadIAMConfig(ctx, &p, getMappedPolicyPath(name, userType, isGroup)) if err != nil { diff --git a/cmd/iam-store.go b/cmd/iam-store.go index e659f2192..01b5fbed2 100644 --- a/cmd/iam-store.go +++ b/cmd/iam-store.go @@ -213,7 +213,7 @@ func (d *PolicyDoc) update(p iampolicy.Policy) { // from iampolicy.Policy to PolicyDoc. To avoid a migration, loading supports // both the old and the new formats. func (d *PolicyDoc) parseJSON(data []byte) error { - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary var doc PolicyDoc err := json.Unmarshal(data, &doc) if err != nil { @@ -378,7 +378,6 @@ func (c *iamCache) policyDBGet(mode UsersSysType, name string, isGroup bool) ([] // IAMStorageAPI defines an interface for the IAM persistence layer type IAMStorageAPI interface { - // The role of the read-write lock is to prevent go routines from // concurrently reading and writing the IAM storage. The (r)lock() // functions return the iamCache. The cache can be safely written to @@ -387,32 +386,23 @@ type IAMStorageAPI interface { unlock() rlock() *iamCache runlock() - migrateBackendFormat(context.Context) error - getUsersSysType() UsersSysType - loadPolicyDoc(ctx context.Context, policy string, m map[string]PolicyDoc) error loadPolicyDocs(ctx context.Context, m map[string]PolicyDoc) error - loadUser(ctx context.Context, user string, userType IAMUserType, m map[string]auth.Credentials) error loadUsers(ctx context.Context, userType IAMUserType, m map[string]auth.Credentials) error - loadGroup(ctx context.Context, group string, m map[string]GroupInfo) error loadGroups(ctx context.Context, m map[string]GroupInfo) error - loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error - saveIAMConfig(ctx context.Context, item interface{}, path string, opts ...options) error loadIAMConfig(ctx context.Context, item interface{}, path string) error deleteIAMConfig(ctx context.Context, path string) error - savePolicyDoc(ctx context.Context, policyName string, p PolicyDoc) error saveMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, mp MappedPolicy, opts ...options) error saveUserIdentity(ctx context.Context, name string, userType IAMUserType, u UserIdentity, opts ...options) error saveGroupInfo(ctx context.Context, group string, gi GroupInfo) error - deletePolicyDoc(ctx context.Context, policyName string) error deleteMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool) error deleteUserIdentity(ctx context.Context, name string, userType IAMUserType) error @@ -639,7 +629,6 @@ func (store *IAMStoreSys) AddUsersToGroup(ctx context.Context, group string, mem } return nil - } // helper function - does not take any locks. Updates only cache if @@ -880,7 +869,6 @@ func (store *IAMStoreSys) PolicyDBSet(ctx context.Context, name, policy string, cache.iamGroupPolicyMap[name] = mp } return nil - } // PolicyNotificationHandler - loads given policy from storage. If not present, @@ -1034,7 +1022,6 @@ func (store *IAMStoreSys) GetPolicyDoc(name string) (r PolicyDoc, err error) { // SetPolicy - creates a policy with name. func (store *IAMStoreSys) SetPolicy(ctx context.Context, name string, policy iampolicy.Policy) error { - if policy.IsEmpty() || name == "" { return errInvalidArgument } @@ -1058,7 +1045,6 @@ func (store *IAMStoreSys) SetPolicy(ctx context.Context, name string, policy iam cache.iamPolicyDocsMap[name] = d return nil - } // ListPolicies - fetches all policies from storage and updates cache as well. @@ -1118,7 +1104,6 @@ func (store *IAMStoreSys) FilterPolicies(policyName string, bucketName string) ( defer store.runlock() return filterPolicies(cache, policyName, bucketName) - } // GetBucketUsers - returns users (not STS or service accounts) that have access diff --git a/cmd/iam.go b/cmd/iam.go index 00afa4a5e..de7776c70 100644 --- a/cmd/iam.go +++ b/cmd/iam.go @@ -170,7 +170,6 @@ func (sys *IAMSys) initStore(objAPI ObjectLayer, etcdClient *etcd.Client) { } else { sys.store = &IAMStoreSys{newIAMEtcdStore(etcdClient, sys.usersSysType)} } - } // Initialized checks if IAM is initialized @@ -801,9 +800,7 @@ func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, gro } } - var ( - cred auth.Credentials - ) + var cred auth.Credentials var err error if len(opts.accessKey) > 0 { diff --git a/cmd/local-locker.go b/cmd/local-locker.go index 403c75858..21df92ef4 100644 --- a/cmd/local-locker.go +++ b/cmd/local-locker.go @@ -140,7 +140,6 @@ func (l *localLocker) Unlock(_ context.Context, args dsync.LockArgs) (reply bool } } return - } // removeEntry based on the uid of the lock message, removes a single entry from the diff --git a/cmd/metacache-entries_test.go b/cmd/metacache-entries_test.go index 3f88eba46..3857724d8 100644 --- a/cmd/metacache-entries_test.go +++ b/cmd/metacache-entries_test.go @@ -228,7 +228,7 @@ func Test_metaCacheEntries_resolve(t *testing.T) { if err != nil { t.Fatal(err) } - var inputs = []xlMetaV2{ + inputs := []xlMetaV2{ 0: { versions: []xlMetaV2ShallowVersion{ {header: xlMetaV2VersionHeader{ @@ -379,7 +379,7 @@ func Test_metaCacheEntries_resolve(t *testing.T) { for i, xl := range inputs { xl.sortByModTime() var err error - var entry = metaCacheEntry{ + entry := metaCacheEntry{ name: "testobject", } entry.metadata, err = xl.AppendTo(nil) diff --git a/cmd/metacache-set.go b/cmd/metacache-set.go index c87997b12..624d67e39 100644 --- a/cmd/metacache-set.go +++ b/cmd/metacache-set.go @@ -137,7 +137,7 @@ func (o *listPathOptions) debugln(data ...interface{}) { // The returned function will return the results once there is enough or input is closed, // or the context is canceled. func (o *listPathOptions) gatherResults(ctx context.Context, in <-chan metaCacheEntry) func() (metaCacheEntriesSorted, error) { - var resultsDone = make(chan metaCacheEntriesSorted) + resultsDone := make(chan metaCacheEntriesSorted) // Copy so we can mutate resCh := resultsDone var done bool @@ -218,7 +218,7 @@ func (o *listPathOptions) findFirstPart(fi FileInfo) (int, error) { } o.debugln("searching for ", search) var tmp metacacheBlock - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary i := 0 for { partKey := fmt.Sprintf("%s-metacache-part-%d", ReservedMetadataPrefixLower, i) diff --git a/cmd/metacache-stream.go b/cmd/metacache-stream.go index e692d4863..5c42424a6 100644 --- a/cmd/metacache-stream.go +++ b/cmd/metacache-stream.go @@ -163,7 +163,7 @@ func (w *metacacheWriter) stream() (chan<- metaCacheEntry, error) { return nil, errors.New("metacacheWriter: writer not initialized") } } - var objs = make(chan metaCacheEntry, 100) + objs := make(chan metaCacheEntry, 100) w.streamErr = nil w.streamWg.Add(1) go func() { @@ -406,7 +406,7 @@ func (r *metacacheReader) forwardTo(s string) error { r.current.metadata = nil } // temporary name buffer. - var tmp = make([]byte, 0, 256) + tmp := make([]byte, 0, 256) for { if more, err := r.mr.ReadBool(); !more { switch err { @@ -838,7 +838,7 @@ type metacacheBlock struct { } func (b metacacheBlock) headerKV() map[string]string { - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary v, err := json.Marshal(b) if err != nil { logger.LogIf(context.Background(), err) // Unlikely diff --git a/cmd/metacache-stream_test.go b/cmd/metacache-stream_test.go index 5e67a015f..3738fc665 100644 --- a/cmd/metacache-stream_test.go +++ b/cmd/metacache-stream_test.go @@ -339,6 +339,7 @@ func Test_metacacheReader_next(t *testing.T) { } } } + func Test_metacacheReader_peek(t *testing.T) { r := loadMetacacheSample(t) defer r.Close() diff --git a/cmd/metrics-v2.go b/cmd/metrics-v2.go index cba30d464..ee303b463 100644 --- a/cmd/metrics-v2.go +++ b/cmd/metrics-v2.go @@ -379,6 +379,7 @@ func getBucketUsageTotalBytesMD() MetricDescription { Type: gaugeMetric, } } + func getBucketUsageObjectsTotalMD() MetricDescription { return MetricDescription{ Namespace: bucketMetricNamespace, @@ -1029,7 +1030,6 @@ func getS3TTFBMetric() *MetricsGroup { metrics = append(metrics, metric) } } - }() httpRequestsDuration.Collect(ch) @@ -1744,7 +1744,6 @@ func populateAndPublish(metricsGroups []*MetricsGroup, publish func(m Metric) bo // Collect is called by the Prometheus registry when collecting metrics. func (c *minioNodeCollector) Collect(ch chan<- prometheus.Metric) { - // Expose MinIO's version information minioVersionInfo.WithLabelValues(Version, CommitID).Set(1.0) diff --git a/cmd/metrics.go b/cmd/metrics.go index a919aa820..dc7956dbf 100644 --- a/cmd/metrics.go +++ b/cmd/metrics.go @@ -92,7 +92,6 @@ func (c *minioCollector) Describe(ch chan<- *prometheus.Desc) { // Collect is called by the Prometheus registry when collecting metrics. func (c *minioCollector) Collect(ch chan<- prometheus.Metric) { - // Expose MinIO's version information minioVersionInfo.WithLabelValues(Version, CommitID).Set(1.0) @@ -647,7 +646,6 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) { } func metricsHandler() http.Handler { - registry := prometheus.NewRegistry() err := registry.Register(minioVersionInfo) @@ -671,7 +669,6 @@ func metricsHandler() http.Handler { ErrorHandling: promhttp.ContinueOnError, }), ) - } // AuthMiddleware checks if the bearer token is valid and authorized. diff --git a/cmd/mrf.go b/cmd/mrf.go index d628a2dbf..c7df7a20d 100644 --- a/cmd/mrf.go +++ b/cmd/mrf.go @@ -183,7 +183,7 @@ func (m *mrfState) healRoutine() { idler := time.NewTimer(mrfInfoResetInterval) defer idler.Stop() - var mrfHealingOpts = madmin.HealOpts{ + mrfHealingOpts := madmin.HealOpts{ ScanMode: globalHealConfig.ScanMode(), Remove: healDeleteDangling, } diff --git a/cmd/namespace-lock.go b/cmd/namespace-lock.go index d111d2ff7..54e024953 100644 --- a/cmd/namespace-lock.go +++ b/cmd/namespace-lock.go @@ -20,13 +20,12 @@ package cmd import ( "context" "errors" + "fmt" pathutil "path" "runtime" "sort" "strings" "sync" - - "fmt" "time" "github.com/minio/minio/internal/dsync" diff --git a/cmd/naughty-disk_test.go b/cmd/naughty-disk_test.go index 335e2bb15..5f31e11ee 100644 --- a/cmd/naughty-disk_test.go +++ b/cmd/naughty-disk_test.go @@ -148,6 +148,7 @@ func (d *naughtyDisk) StatVol(ctx context.Context, volume string) (vol VolInfo, } return d.disk.StatVol(ctx, volume) } + func (d *naughtyDisk) DeleteVol(ctx context.Context, volume string, forceDelete bool) (err error) { if err := d.calcError(); err != nil { return err diff --git a/cmd/net.go b/cmd/net.go index ab4ec9c25..a3f8c5f6f 100644 --- a/cmd/net.go +++ b/cmd/net.go @@ -325,7 +325,6 @@ func isLocalHost(host string, port string, localPort string) (bool, error) { // formats, point to the same machine, e.g: // ':9000' and 'http://localhost:9000/' will return true func sameLocalAddrs(addr1, addr2 string) (bool, error) { - // Extract host & port from given parameters host1, port1, err := extractHostPort(addr1) if err != nil { diff --git a/cmd/net_test.go b/cmd/net_test.go index d7e029113..515c22005 100644 --- a/cmd/net_test.go +++ b/cmd/net_test.go @@ -342,6 +342,7 @@ func TestSameLocalAddrs(t *testing.T) { }) } } + func TestIsHostIP(t *testing.T) { testCases := []struct { args string diff --git a/cmd/notification-summary.go b/cmd/notification-summary.go index ac00d125a..8327da76e 100644 --- a/cmd/notification-summary.go +++ b/cmd/notification-summary.go @@ -23,7 +23,6 @@ import ( // GetTotalCapacity gets the total capacity in the cluster. func GetTotalCapacity(diskInfo []madmin.Disk) (capacity uint64) { - for _, disk := range diskInfo { capacity += disk.TotalSpace } diff --git a/cmd/notification.go b/cmd/notification.go index 84cc446fe..a5c6af8ba 100644 --- a/cmd/notification.go +++ b/cmd/notification.go @@ -326,7 +326,7 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io header, zerr := zip.FileInfoHeader(dummyFileInfo{ name: fmt.Sprintf("profile-%s-%s", client.host.String(), typ), size: int64(len(data)), - mode: 0600, + mode: 0o600, modTime: UTCNow(), isDir: false, sys: nil, @@ -376,7 +376,7 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io header, zerr := zip.FileInfoHeader(dummyFileInfo{ name: fmt.Sprintf("profile-%s-%s", thisAddr, typ), size: int64(len(data)), - mode: 0600, + mode: 0o600, modTime: UTCNow(), isDir: false, sys: nil, @@ -432,7 +432,7 @@ func (sys *NotificationSys) SignalService(sig serviceSignal) []NotificationPeerE // updateBloomFilter will cycle all servers to the current index and // return a merged bloom filter if a complete one can be retrieved. func (sys *NotificationSys) updateBloomFilter(ctx context.Context, current uint64) (*bloomFilter, error) { - var req = bloomFilterRequest{ + req := bloomFilterRequest{ Current: current, Oldest: current - dataUsageUpdateDirCycles, } diff --git a/cmd/object-api-deleteobject_test.go b/cmd/object-api-deleteobject_test.go index c962f8ea8..218cd1f70 100644 --- a/cmd/object-api-deleteobject_test.go +++ b/cmd/object-api-deleteobject_test.go @@ -32,7 +32,6 @@ func TestDeleteObject(t *testing.T) { // Unit test for DeleteObject in general. func testDeleteObject(obj ObjectLayer, instanceType string, t TestErrHandler) { - type objectUpload struct { name string content string diff --git a/cmd/object-api-errors.go b/cmd/object-api-errors.go index 34cfc5dd8..0b2d4ce35 100644 --- a/cmd/object-api-errors.go +++ b/cmd/object-api-errors.go @@ -562,8 +562,7 @@ func (e ObjectTooSmall) Error() string { } // OperationTimedOut - a timeout occurred. -type OperationTimedOut struct { -} +type OperationTimedOut struct{} func (e OperationTimedOut) Error() string { return "Operation timed out" diff --git a/cmd/object-api-interface.go b/cmd/object-api-interface.go index c64a9d365..576d924d4 100644 --- a/cmd/object-api-interface.go +++ b/cmd/object-api-interface.go @@ -167,7 +167,6 @@ type ObjectLayer interface { // Storage operations. Shutdown(context.Context) error NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo, wantCycle uint32) error - BackendInfo() madmin.BackendInfo StorageInfo(ctx context.Context) (StorageInfo, []error) LocalStorageInfo(ctx context.Context) (StorageInfo, []error) @@ -222,7 +221,6 @@ type ObjectLayer interface { IsEncryptionSupported() bool IsTaggingSupported() bool IsCompressionSupported() bool - SetDriveCounts() []int // list of erasure stripe size for each pool in order. // Healing operations. diff --git a/cmd/object-api-listobjects_test.go b/cmd/object-api-listobjects_test.go index 2809118c1..8e8e84b5a 100644 --- a/cmd/object-api-listobjects_test.go +++ b/cmd/object-api-listobjects_test.go @@ -1008,7 +1008,7 @@ func _testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler, v } func objInfoNames(o []ObjectInfo) []string { - var res = make([]string, len(o)) + res := make([]string, len(o)) for i := range o { res[i] = o[i].Name } @@ -1830,7 +1830,7 @@ func testListObjectsContinuation(obj ObjectLayer, instanceType string, t1 TestEr t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) { var foundObjects []ObjectInfo var foundPrefixes []string - var marker = "" + marker := "" for { result, err := obj.ListObjects(context.Background(), testCase.bucketName, testCase.prefix, marker, testCase.delimiter, testCase.page) diff --git a/cmd/object-api-multipart_test.go b/cmd/object-api-multipart_test.go index 40b21f10f..ec10d8c96 100644 --- a/cmd/object-api-multipart_test.go +++ b/cmd/object-api-multipart_test.go @@ -36,7 +36,6 @@ func TestObjectNewMultipartUpload(t *testing.T) { // Tests validate creation of new multipart upload instance. func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestErrHandler) { - bucket := "minio-bucket" object := "minio-object" opts := ObjectOptions{} @@ -85,7 +84,6 @@ func TestObjectAbortMultipartUpload(t *testing.T) { // Tests validate creation of abort multipart upload instance. func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t TestErrHandler) { - bucket := "minio-bucket" object := "minio-object" opts := ObjectOptions{} @@ -207,8 +205,10 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH // Cases with invalid bucket name. {".test", "obj", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Bucket not found: .test")}, {"------", "obj", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Bucket not found: ------")}, - {"$this-is-not-valid-too", "obj", "", 1, "", "", "", 0, false, "", - fmt.Errorf("%s", "Bucket not found: $this-is-not-valid-too")}, + { + "$this-is-not-valid-too", "obj", "", 1, "", "", "", 0, false, "", + fmt.Errorf("%s", "Bucket not found: $this-is-not-valid-too"), + }, {"a", "obj", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Bucket not found: a")}, // Test case - 5. // Case with invalid object names. @@ -238,21 +238,31 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH {bucket, "none-object", uploadID, 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Invalid upload id "+uploadID)}, // Test case - 12. // Input to replicate Md5 mismatch. - {bucket, object, uploadID, 1, "", "d41d8cd98f00b204e9800998ecf8427f", "", 0, false, "", - hash.BadDigest{ExpectedMD5: "d41d8cd98f00b204e9800998ecf8427f", CalculatedMD5: "d41d8cd98f00b204e9800998ecf8427e"}}, + { + bucket, object, uploadID, 1, "", "d41d8cd98f00b204e9800998ecf8427f", "", 0, false, "", + hash.BadDigest{ExpectedMD5: "d41d8cd98f00b204e9800998ecf8427f", CalculatedMD5: "d41d8cd98f00b204e9800998ecf8427e"}, + }, // Test case - 13. // When incorrect sha256 is provided. - {bucket, object, uploadID, 1, "", "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b854", 0, false, "", - hash.SHA256Mismatch{ExpectedSHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b854", - CalculatedSHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}}, + { + bucket, object, uploadID, 1, "", "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b854", 0, false, "", + hash.SHA256Mismatch{ + ExpectedSHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b854", + CalculatedSHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + }, // Test case - 14. // Input with size more than the size of actual data inside the reader. - {bucket, object, uploadID, 1, "abcd", "e2fc714c4727ee9395f324cd2e7f3335", "", int64(len("abcd") + 1), false, "", - hash.BadDigest{ExpectedMD5: "e2fc714c4727ee9395f324cd2e7f3335", CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f"}}, + { + bucket, object, uploadID, 1, "abcd", "e2fc714c4727ee9395f324cd2e7f3335", "", int64(len("abcd") + 1), false, "", + hash.BadDigest{ExpectedMD5: "e2fc714c4727ee9395f324cd2e7f3335", CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f"}, + }, // Test case - 15. // Input with size less than the size of actual data inside the reader. - {bucket, object, uploadID, 1, "abcd", "900150983cd24fb0d6963f7d28e17f73", "", int64(len("abcd") - 1), false, "", - hash.BadDigest{ExpectedMD5: "900150983cd24fb0d6963f7d28e17f73", CalculatedMD5: "900150983cd24fb0d6963f7d28e17f72"}}, + { + bucket, object, uploadID, 1, "abcd", "900150983cd24fb0d6963f7d28e17f73", "", int64(len("abcd") - 1), false, "", + hash.BadDigest{ExpectedMD5: "900150983cd24fb0d6963f7d28e17f73", CalculatedMD5: "900150983cd24fb0d6963f7d28e17f72"}, + }, // Test case - 16-19. // Validating for success cases. @@ -295,7 +305,6 @@ func TestListMultipartUploads(t *testing.T) { // testListMultipartUploads - Tests validate listing of multipart uploads. func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHandler) { - bucketNames := []string{"minio-bucket", "minio-2-bucket", "minio-3-bucket"} objectNames := []string{"minio-object-1.txt", "minio-object.txt", "neymar-1.jpeg", "neymar.jpeg", "parrot-1.png", "parrot.png"} uploadIDs := []string{} @@ -1028,13 +1037,22 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan {bucketNames[0], "", "", "", "-", 0, ListMultipartsInfo{Delimiter: "-"}, nil, true}, // Testing for failure cases with both perfix and marker (Test number 10). // The prefix and marker combination to be valid it should satisfy strings.HasPrefix(marker, prefix). - {bucketNames[0], "asia", "europe-object", "", "", 0, ListMultipartsInfo{}, - fmt.Errorf("Invalid combination of marker '%s' and prefix '%s'", "europe-object", "asia"), false}, + { + bucketNames[0], "asia", "europe-object", "", "", 0, + ListMultipartsInfo{}, + fmt.Errorf("Invalid combination of marker '%s' and prefix '%s'", "europe-object", "asia"), false, + }, // Setting an invalid combination of uploadIDMarker and Marker (Test number 11-12). - {bucketNames[0], "asia", "asia/europe/", "abc", "", 0, ListMultipartsInfo{}, - fmt.Errorf("Invalid combination of uploadID marker '%s' and marker '%s'", "abc", "asia/europe/"), false}, - {bucketNames[0], "asia", "asia/europe", "abc", "", 0, ListMultipartsInfo{}, - fmt.Errorf("Malformed upload id %s", "abc"), false}, + { + bucketNames[0], "asia", "asia/europe/", "abc", "", 0, + ListMultipartsInfo{}, + fmt.Errorf("Invalid combination of uploadID marker '%s' and marker '%s'", "abc", "asia/europe/"), false, + }, + { + bucketNames[0], "asia", "asia/europe", "abc", "", 0, + ListMultipartsInfo{}, + fmt.Errorf("Malformed upload id %s", "abc"), false, + }, // Setting up valid case of ListMultiPartUploads. // Test case with multiple parts for a single uploadID (Test number 13). @@ -1159,7 +1177,6 @@ func TestListObjectPartsDiskNotFound(t *testing.T) { // testListObjectParts - Tests validate listing of object parts when disks go offline. func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks []string, t *testing.T) { - bucketNames := []string{"minio-bucket", "minio-2-bucket"} objectNames := []string{"minio-object-1.txt"} uploadIDs := []string{} @@ -1404,7 +1421,6 @@ func TestListObjectParts(t *testing.T) { // testListObjectParts - test validate listing of object parts. func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler) { - bucketNames := []string{"minio-bucket", "minio-2-bucket"} objectNames := []string{"minio-object-1.txt"} uploadIDs := []string{} @@ -1819,7 +1835,6 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T } // Passes as expected, but asserting the results. if actualErr == nil && testCase.shouldPass { - // Asserting IsTruncated. if actualResult.ETag != testCase.expectedS3MD5 { t.Errorf("%s: Expected the result to be \"%v\", but found it to \"%v\"", instanceType, testCase.expectedS3MD5, actualResult) @@ -1856,7 +1871,6 @@ func BenchmarkPutObjectPart10MbErasure(b *testing.B) { // BenchmarkPutObjectPart25MbFS - Benchmark FS.PutObjectPart() for object size of 25MB. func BenchmarkPutObjectPart25MbFS(b *testing.B) { benchmarkPutObjectPart(b, "FS", 25*humanize.MiByte) - } // BenchmarkPutObjectPart25MbErasure - Benchmark Erasure.PutObjectPart() for object size of 25MB. diff --git a/cmd/object-api-options.go b/cmd/object-api-options.go index 49f29351f..f7da0f29b 100644 --- a/cmd/object-api-options.go +++ b/cmd/object-api-options.go @@ -156,7 +156,6 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec Err: err, } } - } return opts, nil } diff --git a/cmd/object-api-putobject_test.go b/cmd/object-api-putobject_test.go index 4be2cd187..f4354a991 100644 --- a/cmd/object-api-putobject_test.go +++ b/cmd/object-api-putobject_test.go @@ -84,8 +84,10 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl // Cases with invalid bucket name. {".test", "obj", []byte(""), nil, "", 0, "", BucketNotFound{Bucket: ".test"}}, {"------", "obj", []byte(""), nil, "", 0, "", BucketNotFound{Bucket: "------"}}, - {"$this-is-not-valid-too", "obj", []byte(""), nil, "", 0, "", - BucketNotFound{Bucket: "$this-is-not-valid-too"}}, + { + "$this-is-not-valid-too", "obj", []byte(""), nil, "", 0, "", + BucketNotFound{Bucket: "$this-is-not-valid-too"}, + }, {"a", "obj", []byte(""), nil, "", 0, "", BucketNotFound{Bucket: "a"}}, // Test case - 5. @@ -98,25 +100,43 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl // Test case - 7. // Input to replicate Md5 mismatch. - {bucket, object, []byte(""), map[string]string{"etag": "d41d8cd98f00b204e9800998ecf8427f"}, "", 0, "", - hash.BadDigest{ExpectedMD5: "d41d8cd98f00b204e9800998ecf8427f", CalculatedMD5: "d41d8cd98f00b204e9800998ecf8427e"}}, + { + bucket, object, []byte(""), + map[string]string{"etag": "d41d8cd98f00b204e9800998ecf8427f"}, + "", 0, "", + hash.BadDigest{ExpectedMD5: "d41d8cd98f00b204e9800998ecf8427f", CalculatedMD5: "d41d8cd98f00b204e9800998ecf8427e"}, + }, // Test case - 8. // With incorrect sha256. - {bucket, object, []byte("abcd"), map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, + { + bucket, object, []byte("abcd"), + map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580", int64(len("abcd")), - "", hash.SHA256Mismatch{ExpectedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580", - CalculatedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589"}}, + "", + hash.SHA256Mismatch{ + ExpectedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580", + CalculatedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", + }, + }, // Test case - 9. // Input with size more than the size of actual data inside the reader. - {bucket, object, []byte("abcd"), map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331e"}, "", int64(len("abcd") + 1), "", - hash.BadDigest{ExpectedMD5: "e2fc714c4727ee9395f324cd2e7f331e", CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f"}}, + { + bucket, object, []byte("abcd"), + map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331e"}, + "", int64(len("abcd") + 1), "", + hash.BadDigest{ExpectedMD5: "e2fc714c4727ee9395f324cd2e7f331e", CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f"}, + }, // Test case - 10. // Input with size less than the size of actual data inside the reader. - {bucket, object, []byte("abcd"), map[string]string{"etag": "900150983cd24fb0d6963f7d28e17f73"}, "", int64(len("abcd") - 1), "", - hash.BadDigest{ExpectedMD5: "900150983cd24fb0d6963f7d28e17f73", CalculatedMD5: "900150983cd24fb0d6963f7d28e17f72"}}, + { + bucket, object, []byte("abcd"), + map[string]string{"etag": "900150983cd24fb0d6963f7d28e17f73"}, + "", int64(len("abcd") - 1), "", + hash.BadDigest{ExpectedMD5: "900150983cd24fb0d6963f7d28e17f73", CalculatedMD5: "900150983cd24fb0d6963f7d28e17f72"}, + }, // Test case - 11-14. // Validating for success cases. @@ -145,12 +165,18 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl // Test case 24-26. // data with invalid md5sum in header - {bucket, object, data, invalidMD5Header, "", int64(len(data)), getMD5Hash(data), - hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(data)}}, - {bucket, object, nilBytes, invalidMD5Header, "", int64(len(nilBytes)), getMD5Hash(nilBytes), - hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(nilBytes)}}, - {bucket, object, fiveMBBytes, invalidMD5Header, "", int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes), - hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(fiveMBBytes)}}, + { + bucket, object, data, invalidMD5Header, "", int64(len(data)), getMD5Hash(data), + hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(data)}, + }, + { + bucket, object, nilBytes, invalidMD5Header, "", int64(len(nilBytes)), getMD5Hash(nilBytes), + hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(nilBytes)}, + }, + { + bucket, object, fiveMBBytes, invalidMD5Header, "", int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes), + hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(fiveMBBytes)}, + }, // Test case 27-29. // data with size different from the actual number of bytes available in the reader @@ -512,7 +538,6 @@ func BenchmarkPutObject10MbErasure(b *testing.B) { // BenchmarkPutObject25MbFS - Benchmark FS.PutObject() for object size of 25MB. func BenchmarkPutObject25MbFS(b *testing.B) { benchmarkPutObject(b, "FS", 25*humanize.MiByte) - } // BenchmarkPutObject25MbErasure - Benchmark Erasure.PutObject() for object size of 25MB. @@ -595,7 +620,6 @@ func BenchmarkParallelPutObject10MbErasure(b *testing.B) { // BenchmarkParallelPutObject25MbFS - BenchmarkParallel FS.PutObject() for object size of 25MB. func BenchmarkParallelPutObject25MbFS(b *testing.B) { benchmarkPutObjectParallel(b, "FS", 25*humanize.MiByte) - } // BenchmarkParallelPutObject25MbErasure - BenchmarkParallel Erasure.PutObject() for object size of 25MB. diff --git a/cmd/object-api-utils.go b/cmd/object-api-utils.go index 08ede6053..8d6642581 100644 --- a/cmd/object-api-utils.go +++ b/cmd/object-api-utils.go @@ -278,7 +278,7 @@ func removeStandardStorageClass(metadata map[string]string) map[string]string { // cleanMetadataKeys takes keyNames to be filtered // and returns a new map with all the entries with keyNames removed. func cleanMetadataKeys(metadata map[string]string, keyNames ...string) map[string]string { - var newMeta = make(map[string]string, len(metadata)) + newMeta := make(map[string]string, len(metadata)) for k, v := range metadata { if contains(keyNames, k) { continue @@ -513,7 +513,7 @@ func partNumberToRangeSpec(oi ObjectInfo, partNumber int) *HTTPRangeSpec { } var start int64 - var end = int64(-1) + end := int64(-1) for i := 0; i < len(oi.Parts) && i < partNumber; i++ { start = end + 1 end = start + oi.Parts[i].ActualSize - 1 @@ -591,8 +591,8 @@ type ObjReaderFn func(inputReader io.Reader, h http.Header, cleanupFns ...func() // assumed that clean up functions do not panic (otherwise, they may // not all run!). func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) ( - fn ObjReaderFn, off, length int64, err error) { - + fn ObjReaderFn, off, length int64, err error, +) { if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) { return nil, 0, 0, PreConditionFailed{} } @@ -873,7 +873,7 @@ func sealETagFn(key crypto.ObjectKey) SealMD5CurrFn { // CleanMinioInternalMetadataKeys removes X-Amz-Meta- prefix from minio internal // encryption metadata that was sent by minio gateway func CleanMinioInternalMetadataKeys(metadata map[string]string) map[string]string { - var newMeta = make(map[string]string, len(metadata)) + newMeta := make(map[string]string, len(metadata)) for k, v := range metadata { if strings.HasPrefix(k, "X-Amz-Meta-X-Minio-Internal-") { newMeta[strings.TrimPrefix(k, "X-Amz-Meta-")] = v @@ -957,7 +957,6 @@ func compressSelfTest() { failOnErr(err) if !bytes.Equal(got, data[skip:]) { logger.Fatal(errSelfTestFailure, "compress: self-test roundtrip mismatch.") - } } diff --git a/cmd/object-api-utils_test.go b/cmd/object-api-utils_test.go index ab2529746..db62b74c2 100644 --- a/cmd/object-api-utils_test.go +++ b/cmd/object-api-utils_test.go @@ -320,35 +320,42 @@ func TestIsCompressed(t *testing.T) { }{ 0: { objInfo: ObjectInfo{ - UserDefined: map[string]string{"X-Minio-Internal-compression": compressionAlgorithmV1, - "content-type": "application/octet-stream", - "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2"}, + UserDefined: map[string]string{ + "X-Minio-Internal-compression": compressionAlgorithmV1, + "content-type": "application/octet-stream", + "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2", + }, }, result: true, }, 1: { objInfo: ObjectInfo{ - UserDefined: map[string]string{"X-Minio-Internal-compression": compressionAlgorithmV2, - "content-type": "application/octet-stream", - "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2"}, + UserDefined: map[string]string{ + "X-Minio-Internal-compression": compressionAlgorithmV2, + "content-type": "application/octet-stream", + "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2", + }, }, result: true, }, 2: { objInfo: ObjectInfo{ - UserDefined: map[string]string{"X-Minio-Internal-compression": "unknown/compression/type", - "content-type": "application/octet-stream", - "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2"}, + UserDefined: map[string]string{ + "X-Minio-Internal-compression": "unknown/compression/type", + "content-type": "application/octet-stream", + "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2", + }, }, result: true, err: true, }, 3: { objInfo: ObjectInfo{ - UserDefined: map[string]string{"X-Minio-Internal-compression": compressionAlgorithmV2, - "content-type": "application/octet-stream", - "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2", - crypto.MetaIV: "yes", + UserDefined: map[string]string{ + "X-Minio-Internal-compression": compressionAlgorithmV2, + "content-type": "application/octet-stream", + "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2", + crypto.MetaIV: "yes", }, }, result: true, @@ -356,16 +363,20 @@ func TestIsCompressed(t *testing.T) { }, 4: { objInfo: ObjectInfo{ - UserDefined: map[string]string{"X-Minio-Internal-XYZ": "klauspost/compress/s2", - "content-type": "application/octet-stream", - "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2"}, + UserDefined: map[string]string{ + "X-Minio-Internal-XYZ": "klauspost/compress/s2", + "content-type": "application/octet-stream", + "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2", + }, }, result: false, }, 5: { objInfo: ObjectInfo{ - UserDefined: map[string]string{"content-type": "application/octet-stream", - "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2"}, + UserDefined: map[string]string{ + "content-type": "application/octet-stream", + "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2", + }, }, result: false, }, @@ -468,10 +479,12 @@ func TestGetActualSize(t *testing.T) { }{ { objInfo: ObjectInfo{ - UserDefined: map[string]string{"X-Minio-Internal-compression": "klauspost/compress/s2", + UserDefined: map[string]string{ + "X-Minio-Internal-compression": "klauspost/compress/s2", "X-Minio-Internal-actual-size": "100000001", "content-type": "application/octet-stream", - "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2"}, + "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2", + }, Parts: []ObjectPartInfo{ { Size: 39235668, @@ -487,19 +500,23 @@ func TestGetActualSize(t *testing.T) { }, { objInfo: ObjectInfo{ - UserDefined: map[string]string{"X-Minio-Internal-compression": "klauspost/compress/s2", + UserDefined: map[string]string{ + "X-Minio-Internal-compression": "klauspost/compress/s2", "X-Minio-Internal-actual-size": "841", "content-type": "application/octet-stream", - "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2"}, + "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2", + }, Parts: []ObjectPartInfo{}, }, result: 841, }, { objInfo: ObjectInfo{ - UserDefined: map[string]string{"X-Minio-Internal-compression": "klauspost/compress/s2", - "content-type": "application/octet-stream", - "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2"}, + UserDefined: map[string]string{ + "X-Minio-Internal-compression": "klauspost/compress/s2", + "content-type": "application/octet-stream", + "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2", + }, Parts: []ObjectPartInfo{}, }, result: -1, diff --git a/cmd/object-handlers-common.go b/cmd/object-handlers-common.go index 251a0ec51..f1444f26e 100644 --- a/cmd/object-handlers-common.go +++ b/cmd/object-handlers-common.go @@ -29,9 +29,7 @@ import ( "github.com/minio/minio/internal/logger" ) -var ( - etagRegex = regexp.MustCompile("\"*?([^\"]*?)\"*?$") -) +var etagRegex = regexp.MustCompile("\"*?([^\"]*?)\"*?$") // Validates the preconditions for CopyObjectPart, returns true if CopyObjectPart // operation should not proceed. Preconditions supported are: diff --git a/cmd/object-handlers-common_test.go b/cmd/object-handlers-common_test.go index d2b1e74ff..6d190a95e 100644 --- a/cmd/object-handlers-common_test.go +++ b/cmd/object-handlers-common_test.go @@ -48,7 +48,6 @@ func TestCanonicalizeETag(t *testing.T) { etag := canonicalizeETag(test.etag) if test.canonicalizedETag != etag { t.Fatalf("Expected %s , got %s", test.canonicalizedETag, etag) - } } } diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index 6fc5a9560..06d1401cf 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -1013,7 +1013,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re srcOpts.VersionID = vid // convert copy src encryption options for GET calls - var getOpts = ObjectOptions{VersionID: srcOpts.VersionID, Versioned: srcOpts.Versioned} + getOpts := ObjectOptions{VersionID: srcOpts.VersionID, Versioned: srcOpts.Versioned} getSSE := encrypt.SSE(srcOpts.ServerSideEncryption) if getSSE != srcOpts.ServerSideEncryption { getOpts.ServerSideEncryption = getSSE @@ -1148,7 +1148,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re pReader := NewPutObjReader(srcInfo.Reader) // Handle encryption - var encMetadata = make(map[string]string) + encMetadata := make(map[string]string) if objectAPI.IsEncryptionSupported() { // Encryption parameters not applicable for this object. if _, ok := crypto.IsEncrypted(srcInfo.UserDefined); !ok && crypto.SSECopy.IsRequested(r.Header) { @@ -1511,7 +1511,6 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re // Schedule object for immediate transition if eligible. enqueueTransitionImmediate(objInfo) } - } // PutObjectHandler - PUT Object @@ -2222,7 +2221,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r } } - var encMetadata = map[string]string{} + encMetadata := map[string]string{} if objectAPI.IsEncryptionSupported() { if _, ok := crypto.IsRequested(r.Header); ok { @@ -2398,7 +2397,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt srcOpts.VersionID = vid // convert copy src and dst encryption options for GET/PUT calls - var getOpts = ObjectOptions{VersionID: srcOpts.VersionID} + getOpts := ObjectOptions{VersionID: srcOpts.VersionID} if srcOpts.ServerSideEncryption != nil { getOpts.ServerSideEncryption = encrypt.SSE(srcOpts.ServerSideEncryption) } @@ -3083,7 +3082,6 @@ func sendWhiteSpace(w http.ResponseWriter) <-chan bool { return } } - }() return doneCh } @@ -3592,7 +3590,6 @@ func (api objectAPIHandlers) PutObjectLegalHoldHandler(w http.ResponseWriter, r UserAgent: r.UserAgent(), Host: handlers.GetSourceIP(r), }) - } // GetObjectLegalHoldHandler - get legal hold configuration to object, @@ -3961,7 +3958,6 @@ func (api objectAPIHandlers) PutObjectTaggingHandler(w http.ResponseWriter, r *h UserAgent: r.UserAgent(), Host: handlers.GetSourceIP(r), }) - } // DeleteObjectTaggingHandler - DELETE object tagging diff --git a/cmd/object-handlers_test.go b/cmd/object-handlers_test.go index a04253201..a7029f49c 100644 --- a/cmd/object-handlers_test.go +++ b/cmd/object-handlers_test.go @@ -26,15 +26,14 @@ import ( "encoding/xml" "fmt" "io" - "path" - "runtime" - "strings" - "io/ioutil" "net/http" "net/http/httptest" "net/url" + "path" + "runtime" "strconv" + "strings" "sync" "testing" @@ -158,7 +157,6 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string, // construct HTTP request for Head Object endpoint. reqV2, err := newTestSignedRequestV2(http.MethodHead, getHeadObjectURL("", testCase.bucketName, testCase.objectName), 0, nil, testCase.accessKey, testCase.secretKey, nil) - if err != nil { t.Fatalf("Test %d: %s: Failed to create HTTP request for Head Object: %v", i+1, instanceType, err) } @@ -173,7 +171,6 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string, // Test for Anonymous/unsigned http request. anonReq, err := newTestRequest(http.MethodHead, getHeadObjectURL("", bucketName, objectName), 0, nil) - if err != nil { t.Fatalf("MinIO %s: Failed to create an anonymous request for %s/%s: %v", instanceType, bucketName, objectName, err) @@ -193,7 +190,6 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string, nilObject := "dummy-object" nilReq, err := newTestSignedRequestV4(http.MethodHead, getGetObjectURL("", nilBucket, nilObject), 0, nil, "", "", nil) - if err != nil { t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) } @@ -211,8 +207,8 @@ func TestAPIHeadObjectHandlerWithEncryption(t *testing.T) { } func testAPIHeadObjectHandlerWithEncryption(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { // Set SSL to on to do encryption tests globalIsTLS = true defer func() { globalIsTLS = false }() @@ -333,8 +329,8 @@ func TestAPIGetObjectHandler(t *testing.T) { } func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { objectName := "test-object" // set of byte data for PutObject. // object has to be created before running tests for GetObject. @@ -525,7 +521,6 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a // construct HTTP request for Get Object end point. req, err := newTestSignedRequestV4(http.MethodGet, getGetObjectURL("", testCase.bucketName, testCase.objectName), 0, nil, testCase.accessKey, testCase.secretKey, nil) - if err != nil { t.Fatalf("Test %d: Failed to create HTTP request for Get Object: %v", i+1, err) } @@ -568,7 +563,6 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a // construct HTTP request for GET Object endpoint. reqV2, err := newTestSignedRequestV2(http.MethodGet, getGetObjectURL("", testCase.bucketName, testCase.objectName), 0, nil, testCase.accessKey, testCase.secretKey, nil) - if err != nil { t.Fatalf("Test %d: %s: Failed to create HTTP request for GetObject: %v", i+1, instanceType, err) } @@ -610,7 +604,6 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a // Test for Anonymous/unsigned http request. anonReq, err := newTestRequest(http.MethodGet, getGetObjectURL("", bucketName, objectName), 0, nil) - if err != nil { t.Fatalf("MinIO %s: Failed to create an anonymous request for %s/%s: %v", instanceType, bucketName, objectName, err) @@ -630,7 +623,6 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a nilObject := "dummy-object" nilReq, err := newTestSignedRequestV4(http.MethodGet, getGetObjectURL("", nilBucket, nilObject), 0, nil, "", "", nil) - if err != nil { t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) } @@ -649,8 +641,8 @@ func TestAPIGetObjectWithMPHandler(t *testing.T) { } func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { // Set SSL to on to do encryption tests globalIsTLS = true defer func() { globalIsTLS = false }() @@ -827,14 +819,12 @@ func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName str nilObject := "dummy-object" nilReq, err := newTestSignedRequestV4(http.MethodGet, getGetObjectURL("", nilBucket, nilObject), 0, nil, "", "", nil) - if err != nil { t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) } // execute the object layer set to `nil` test. // `ExecObjectLayerAPINilTest` manages the operation. ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq) - } // Wrapper for calling GetObject API handler tests for both Erasure multiple disks and FS single drive setup. @@ -847,8 +837,8 @@ func TestAPIGetObjectWithPartNumberHandler(t *testing.T) { } func testAPIGetObjectWithPartNumberHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { // Set SSL to on to do encryption tests globalIsTLS = true defer func() { globalIsTLS = false }() @@ -975,8 +965,8 @@ func TestAPIPutObjectStreamSigV4Handler(t *testing.T) { } func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { objectName := "test-object" bytesDataLen := 65 * humanize.KiByte bytesData := bytes.Repeat([]byte{'a'}, bytesDataLen) @@ -1208,7 +1198,6 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam getPutObjectURL("", testCase.bucketName, testCase.objectName), int64(testCase.dataLen), testCase.chunkSize, bytes.NewReader(testCase.data), testCase.accessKey, testCase.secretKey) - } else if testCase.contentEncoding == "" { req, err = newTestStreamingSignedRequest(http.MethodPut, getPutObjectURL("", testCase.bucketName, testCase.objectName), @@ -1297,8 +1286,8 @@ func TestAPIPutObjectHandler(t *testing.T) { } func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { var err error objectName := "test-object" opts := ObjectOptions{} @@ -1536,14 +1525,12 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a nilReq, err := newTestSignedRequestV4(http.MethodPut, getPutObjectURL("", nilBucket, nilObject), 0, nil, "", "", nil) - if err != nil { t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) } // execute the object layer set to `nil` test. // `ExecObjectLayerAPINilTest` manages the operation. ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq) - } // Tests sanity of attempting to copying each parts at offsets from an existing @@ -1555,8 +1542,8 @@ func TestAPICopyObjectPartHandlerSanity(t *testing.T) { } func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { objectName := "test-object" var err error opts := ObjectOptions{} @@ -1672,8 +1659,8 @@ func TestAPICopyObjectPartHandler(t *testing.T) { } func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { objectName := "test-object" var err error opts := ObjectOptions{} @@ -1978,7 +1965,6 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri // execute the object layer set to `nil` test. // `ExecObjectLayerAPINilTest` manages the operation. ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq) - } // Wrapper for calling Copy Object API handler tests for both Erasure multiple disks and single node setup. @@ -1988,8 +1974,8 @@ func TestAPICopyObjectHandler(t *testing.T) { } func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { objectName := "test?object" // use file with ? to test URL parsing... if runtime.GOOS == "windows" { objectName = "test-object" // ...except on Windows @@ -2439,7 +2425,6 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, // execute the object layer set to `nil` test. // `ExecObjectLayerAPINilTest` manages the operation. ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq) - } // Wrapper for calling NewMultipartUpload tests for both Erasure multiple disks and single node setup. @@ -2451,14 +2436,13 @@ func TestAPINewMultipartHandler(t *testing.T) { } func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { objectName := "test-object-new-multipart" rec := httptest.NewRecorder() // construct HTTP request for NewMultipart upload. req, err := newTestSignedRequestV4(http.MethodPost, getNewMultipartURL("", bucketName, objectName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil) - if err != nil { t.Fatalf("Failed to create HTTP request for NewMultipart Request: %v", err) } @@ -2510,7 +2494,6 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string // construct HTTP request for NewMultipartUpload endpoint. reqV2, err := newTestSignedRequestV2(http.MethodPost, getNewMultipartURL("", bucketName, objectName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil) - if err != nil { t.Fatalf("Failed to create HTTP request for NewMultipart Request: %v", err) } @@ -2558,7 +2541,6 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string // Test for Anonymous/unsigned http request. anonReq, err := newTestRequest(http.MethodPost, getNewMultipartURL("", bucketName, objectName), 0, nil) - if err != nil { t.Fatalf("MinIO %s: Failed to create an anonymous request for %s/%s: %v", instanceType, bucketName, objectName, err) @@ -2578,14 +2560,12 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string nilReq, err := newTestSignedRequestV4(http.MethodPost, getNewMultipartURL("", nilBucket, nilObject), 0, nil, "", "", nil) - if err != nil { t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) } // execute the object layer set to `nil` test. // `ExecObjectLayerAPINilTest` manages the operation. ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq) - } // Wrapper for calling NewMultipartUploadParallel tests for both Erasure multiple disks and single node setup. @@ -2614,7 +2594,6 @@ func testAPINewMultipartHandlerParallel(obj ObjectLayer, instanceType, bucketNam rec := httptest.NewRecorder() // construct HTTP request NewMultipartUpload. req, err := newTestSignedRequestV4(http.MethodPost, getNewMultipartURL("", bucketName, objectName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil) - if err != nil { t.Errorf("Failed to create HTTP request for NewMultipart request: %v", err) return @@ -2660,8 +2639,8 @@ func TestAPICompleteMultipartHandler(t *testing.T) { } func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { var err error var opts ObjectOptions @@ -3016,7 +2995,6 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s nilReq, err := newTestSignedRequestV4(http.MethodPost, getCompleteMultipartUploadURL("", nilBucket, nilObject, "dummy-uploadID"), 0, nil, "", "", nil) - if err != nil { t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) } @@ -3032,8 +3010,8 @@ func TestAPIAbortMultipartHandler(t *testing.T) { } func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { var err error opts := ObjectOptions{} // object used for the test. @@ -3179,7 +3157,6 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri nilReq, err := newTestSignedRequestV4(http.MethodDelete, getAbortMultipartUploadURL("", nilBucket, nilObject, "dummy-uploadID"), 0, nil, "", "", nil) - if err != nil { t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) } @@ -3195,8 +3172,8 @@ func TestAPIDeleteObjectHandler(t *testing.T) { } func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { var err error objectName := "test-object" // Object used for anonymous API request test. @@ -3339,7 +3316,6 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string nilReq, err := newTestSignedRequestV4(http.MethodDelete, getDeleteObjectURL("", nilBucket, nilObject), 0, nil, "", "", nil) - if err != nil { t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) } @@ -3372,7 +3348,6 @@ func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketN mpartRespBytes, err := ioutil.ReadAll(rec.Result().Body) if err != nil { t.Fatalf("[%s] Failed to read NewMultipartUpload response %v", instanceType, err) - } err = xml.Unmarshal(mpartRespBytes, &mpartResp) if err != nil { @@ -3430,7 +3405,6 @@ func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketN if rec.Code != http.StatusOK { t.Errorf("Test %d %s expected to succeed, but failed with HTTP status code %d", i+1, instanceType, rec.Code) - } } } @@ -3444,8 +3418,8 @@ func TestAPIPutObjectPartHandler(t *testing.T) { } func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - + credentials auth.Credentials, t *testing.T, +) { // Initiate Multipart upload for testing PutObjectPartHandler. testObject := "testobject" var opts ObjectOptions @@ -3616,7 +3590,6 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin for i, test := range testCases { // Using sub-tests introduced in Go 1.7. t.Run(fmt.Sprintf("MinIO %s : Test case %d.", instanceType, i+1), func(t *testing.T) { - var reqV4, reqV2 *http.Request var recV4, recV2 *httptest.ResponseRecorder @@ -3743,7 +3716,6 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin nilReq, err := newTestSignedRequestV4(http.MethodPut, getPutObjectPartURL("", nilBucket, nilObject, "0", "0"), 0, bytes.NewReader([]byte("testNilObjLayer")), "", "", nil) - if err != nil { t.Errorf("MinIO %s: Failed to create http request for testing the response when object Layer is set to `nil`.", instanceType) } @@ -3777,7 +3749,6 @@ func testAPIListObjectPartsHandlerPreSign(obj ObjectLayer, instanceType, bucketN mpartRespBytes, err := ioutil.ReadAll(rec.Result().Body) if err != nil { t.Fatalf("[%s] Failed to read NewMultipartUpload response %v", instanceType, err) - } err = xml.Unmarshal(mpartRespBytes, &mpartResp) if err != nil { diff --git a/cmd/os-readdir_test.go b/cmd/os-readdir_test.go index 5d58b708a..28cd59c74 100644 --- a/cmd/os-readdir_test.go +++ b/cmd/os-readdir_test.go @@ -36,7 +36,7 @@ func TestReadDirFail(t *testing.T) { } file := path.Join(os.TempDir(), "issue") - if err := ioutil.WriteFile(file, []byte(""), 0644); err != nil { + if err := ioutil.WriteFile(file, []byte(""), 0o644); err != nil { t.Fatal(err) } defer os.RemoveAll(file) @@ -49,7 +49,7 @@ func TestReadDirFail(t *testing.T) { // Only valid for linux. if runtime.GOOS == "linux" { permDir := path.Join(os.TempDir(), "perm-dir") - if err := os.MkdirAll(permDir, os.FileMode(0200)); err != nil { + if err := os.MkdirAll(permDir, os.FileMode(0o200)); err != nil { t.Fatal(err) } defer os.RemoveAll(permDir) @@ -108,7 +108,7 @@ func setupTestReadDirFiles(t *testing.T) (testResults []result) { // Test to read non-empty directory with directories and files. func setupTestReadDirGeneric(t *testing.T) (testResults []result) { dir := mustSetupDir(t) - if err := os.MkdirAll(filepath.Join(dir, "mydir"), 0777); err != nil { + if err := os.MkdirAll(filepath.Join(dir, "mydir"), 0o777); err != nil { t.Fatalf("Unable to create prefix directory \"mydir\", %s", err) } entries := []string{"mydir/"} @@ -153,7 +153,7 @@ func setupTestReadDirSymlink(t *testing.T) (testResults []result) { // Symlinks are preserved for regular files entries = append(entries, name2) } - if err := os.MkdirAll(filepath.Join(dir, "mydir"), 0777); err != nil { + if err := os.MkdirAll(filepath.Join(dir, "mydir"), 0o777); err != nil { t.Fatalf("Unable to create \"mydir\", %s", err) } entries = append(entries, "mydir/") diff --git a/cmd/os-reliable.go b/cmd/os-reliable.go index 621da0a13..4ba92b8f4 100644 --- a/cmd/os-reliable.go +++ b/cmd/os-reliable.go @@ -163,7 +163,7 @@ func renameAll(srcFilePath, dstFilePath string) (err error) { // Reliably retries os.RenameAll if for some reason os.RenameAll returns // syscall.ENOENT (parent does not exist). func reliableRename(srcFilePath, dstFilePath string) (err error) { - if err = reliableMkdirAll(path.Dir(dstFilePath), 0777); err != nil { + if err = reliableMkdirAll(path.Dir(dstFilePath), 0o777); err != nil { return err } i := 0 diff --git a/cmd/os-reliable_test.go b/cmd/os-reliable_test.go index 3816007b2..4b02b85ae 100644 --- a/cmd/os-reliable_test.go +++ b/cmd/os-reliable_test.go @@ -31,15 +31,15 @@ func TestOSMkdirAll(t *testing.T) { } defer os.RemoveAll(path) - if err = mkdirAll("", 0777); err != errInvalidArgument { + if err = mkdirAll("", 0o777); err != errInvalidArgument { t.Fatal("Unexpected error", err) } - if err = mkdirAll(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), 0777); err != errFileNameTooLong { + if err = mkdirAll(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), 0o777); err != errFileNameTooLong { t.Fatal("Unexpected error", err) } - if err = mkdirAll(pathJoin(path, "success-vol", "success-object"), 0777); err != nil { + if err = mkdirAll(pathJoin(path, "success-vol", "success-object"), 0o777); err != nil { t.Fatal("Unexpected error", err) } } @@ -53,7 +53,7 @@ func TestOSRenameAll(t *testing.T) { } defer os.RemoveAll(path) - if err = mkdirAll(pathJoin(path, "testvolume1"), 0777); err != nil { + if err = mkdirAll(pathJoin(path, "testvolume1"), 0o777); err != nil { t.Fatal(err) } if err = renameAll("", "foo"); err != errInvalidArgument { diff --git a/cmd/peer-rest-client.go b/cmd/peer-rest-client.go index 09469eb1d..4be655a27 100644 --- a/cmd/peer-rest-client.go +++ b/cmd/peer-rest-client.go @@ -303,7 +303,6 @@ func maxLatencyForSizeThreads(size int64, threadCount uint) float64 { // GetNetPerfInfo - fetch network information for a remote node. func (client *peerRESTClient) GetNetPerfInfo(ctx context.Context) (info madmin.PeerNetPerfInfo, err error) { - // 100 Gbit -> 256 MiB * 50 threads // 40 Gbit -> 256 MiB * 20 threads // 25 Gbit -> 128 MiB * 25 threads @@ -757,7 +756,6 @@ func (client *peerRESTClient) UpdateMetacacheListing(ctx context.Context, m meta defer http.DrainBody(respBody) var resp metacache return resp, msgp.Decode(respBody, &resp) - } func (client *peerRESTClient) LoadTransitionTierConfig(ctx context.Context) error { diff --git a/cmd/peer-rest-server.go b/cmd/peer-rest-server.go index 8973a3efa..70d17e5ea 100644 --- a/cmd/peer-rest-server.go +++ b/cmd/peer-rest-server.go @@ -241,7 +241,7 @@ func (s *peerRESTServer) LoadUserHandler(w http.ResponseWriter, r *http.Request) return } - var userType = regUser + userType := regUser if temp { userType = stsUser } diff --git a/cmd/post-policy_test.go b/cmd/post-policy_test.go index 213595dfe..f94f4e98d 100644 --- a/cmd/post-policy_test.go +++ b/cmd/post-policy_test.go @@ -422,7 +422,6 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code) } } - } // Wrapper for calling TestPostPolicyBucketHandlerRedirect tests for both Erasure multiple disks and single node setup. @@ -507,7 +506,6 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t if rec.Header().Get("Location") != expectedLocation { t.Errorf("Unexpected location, expected = %s, found = `%s`", rec.Header().Get("Location"), expectedLocation) } - } // postPresignSignatureV4 - presigned signature for PostPolicy requests. diff --git a/cmd/prepare-storage.go b/cmd/prepare-storage.go index f7fd82081..1b972c778 100644 --- a/cmd/prepare-storage.go +++ b/cmd/prepare-storage.go @@ -96,7 +96,7 @@ func formatErasureCleanupTmp(diskPath string) error { // Removal of tmp-old folder is backgrounded completely. go removeAll(pathJoin(diskPath, minioMetaTmpBucket+"-old")) - if err := mkdirAll(pathJoin(diskPath, minioMetaTmpDeletedBucket), 0777); err != nil { + if err := mkdirAll(pathJoin(diskPath, minioMetaTmpDeletedBucket), 0o777); err != nil { logger.LogIf(GlobalContext, fmt.Errorf("unable to create (%s) %w, drive may be faulty please investigate", pathJoin(diskPath, minioMetaTmpBucket), err)) diff --git a/cmd/s3-zip-handlers.go b/cmd/s3-zip-handlers.go index 6427dc366..c82656039 100644 --- a/cmd/s3-zip-handlers.go +++ b/cmd/s3-zip-handlers.go @@ -323,7 +323,7 @@ func listObjectsV2InArchive(ctx context.Context, objectAPI ObjectLayer, bucket, // getFilesFromZIPObject reads a partial stream of a zip file to build the zipindex.Files index func getFilesListFromZIPObject(ctx context.Context, objectAPI ObjectLayer, bucket, object string, opts ObjectOptions) (zipindex.Files, ObjectInfo, error) { - var size = 1 << 20 + size := 1 << 20 var objSize int64 for { rs := &HTTPRangeSpec{IsSuffixLength: true, Start: int64(-size)} diff --git a/cmd/server_test.go b/cmd/server_test.go index 29c5fb4cf..8bdc48055 100644 --- a/cmd/server_test.go +++ b/cmd/server_test.go @@ -257,7 +257,6 @@ func (s *TestSuiteCommon) TestCors(c *check) { } } } - } func (s *TestSuiteCommon) TestObjectDir(c *check) { @@ -476,7 +475,6 @@ func (s *TestSuiteCommon) TestDeleteBucketNotEmpty(c *check) { response, err = s.client.Do(request) c.Assert(err, nil) c.Assert(response.StatusCode, http.StatusConflict) - } func (s *TestSuiteCommon) TestListenNotificationHandler(c *check) { @@ -591,7 +589,7 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *check) { c.Assert(err, nil) c.Assert(response.StatusCode, http.StatusOK) - var deleteResp = DeleteObjectsResponse{} + deleteResp := DeleteObjectsResponse{} delRespBytes, err := ioutil.ReadAll(response.Body) c.Assert(err, nil) err = xml.Unmarshal(delRespBytes, &deleteResp) @@ -1405,7 +1403,6 @@ func (s *TestSuiteCommon) TestHeadOnObjectLastModified(c *check) { // Since the "If-Modified-Since" header was ahead in time compared to the actual // modified time of the object expecting the response status to be http.StatusNotModified. c.Assert(response.StatusCode, http.StatusOK) - } // TestHeadOnBucket - Validates response for HEAD on the bucket. @@ -1594,20 +1591,22 @@ func (s *TestSuiteCommon) TestListObjectsHandler(c *check) { c.Assert(response.StatusCode, http.StatusOK) } - var testCases = []struct { + testCases := []struct { getURL string expectedStrings []string }{ {getListObjectsV1URL(s.endPoint, bucketName, "", "1000", ""), []string{"foo bar 1", "foo bar 2"}}, {getListObjectsV1URL(s.endPoint, bucketName, "", "1000", "url"), []string{"foo+bar+1", "foo+bar+2"}}, - {getListObjectsV2URL(s.endPoint, bucketName, "", "1000", "", ""), + { + getListObjectsV2URL(s.endPoint, bucketName, "", "1000", "", ""), []string{ "foo bar 1", "foo bar 2", fmt.Sprintf("%sminio", globalMinioDefaultOwnerID), }, }, - {getListObjectsV2URL(s.endPoint, bucketName, "", "1000", "true", ""), + { + getListObjectsV2URL(s.endPoint, bucketName, "", "1000", "true", ""), []string{ "foo bar 1", "foo bar 2", @@ -1669,7 +1668,6 @@ func (s *TestSuiteCommon) TestListObjectsHandlerErrors(c *check) { c.Assert(err, nil) // validating the error response. verifyError(c, response, "InvalidArgument", "Argument maxKeys must be an integer between 0 and 2147483647", http.StatusBadRequest) - } // TestPutBucketErrors - request for non valid bucket operation @@ -1879,7 +1877,7 @@ func (s *TestSuiteCommon) TestGetPartialObjectMisAligned(c *check) { // test Cases containing data to make partial range requests. // also has expected response data. - var testCases = []struct { + testCases := []struct { byteRange string expectedString string }{ diff --git a/cmd/signature-v2_test.go b/cmd/signature-v2_test.go index 0b8b3bfbe..4a00ad2c0 100644 --- a/cmd/signature-v2_test.go +++ b/cmd/signature-v2_test.go @@ -178,14 +178,12 @@ func TestValidateV2AuthHeader(t *testing.T) { // Test case - 1. // Case with empty V2AuthString. { - authString: "", expectedError: ErrAuthHeaderEmpty, }, // Test case - 2. // Test case with `signV2Algorithm` ("AWS") not being the prefix. { - authString: "NoV2Prefix", expectedError: ErrSignatureVersionNotSupported, }, @@ -194,28 +192,24 @@ func TestValidateV2AuthHeader(t *testing.T) { // below is the correct format of V2 Authorization header. // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature { - authString: signV2Algorithm, expectedError: ErrMissingFields, }, // Test case - 4. // Test case with signature part missing. { - authString: fmt.Sprintf("%s %s", signV2Algorithm, accessID), expectedError: ErrMissingFields, }, // Test case - 5. // Test case with wrong accessID. { - authString: fmt.Sprintf("%s %s:%s", signV2Algorithm, "InvalidAccessID", "signature"), expectedError: ErrInvalidAccessKeyID, }, // Test case - 6. // Case with right accessID and format. { - authString: fmt.Sprintf("%s %s:%s", signV2Algorithm, accessID, "signature"), expectedError: ErrNone, }, @@ -223,7 +217,6 @@ func TestValidateV2AuthHeader(t *testing.T) { for i, testCase := range testCases { t.Run(fmt.Sprintf("Case %d AuthStr \"%s\".", i+1, testCase.authString), func(t *testing.T) { - req := &http.Request{ Header: make(http.Header), URL: &url.URL{}, @@ -236,7 +229,6 @@ func TestValidateV2AuthHeader(t *testing.T) { } }) } - } func TestDoesPolicySignatureV2Match(t *testing.T) { diff --git a/cmd/signature-v4-parser.go b/cmd/signature-v4-parser.go index 1ee156971..9e28f60cf 100644 --- a/cmd/signature-v4-parser.go +++ b/cmd/signature-v4-parser.go @@ -107,7 +107,6 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) // Should validate region, only if region is set. if !isValidRegion(sRegion, region) { return ch, ErrAuthorizationHeaderMalformed - } if credElements[2] != string(stype) { switch stype { diff --git a/cmd/signature-v4-parser_test.go b/cmd/signature-v4-parser_test.go index 2c25eb9a3..8c32a4f0a 100644 --- a/cmd/signature-v4-parser_test.go +++ b/cmd/signature-v4-parser_test.go @@ -37,7 +37,8 @@ func joinWithSlash(accessKey, date, region, service, requestVersion string) stri date, region, service, - requestVersion}, SlashSeparator) + requestVersion, + }, SlashSeparator) } // generate CredentialHeader from its fields. @@ -59,7 +60,6 @@ func generateCredentials(t *testing.T, accessKey string, date string, region, se // validates the credential fields against the expected credential. func validateCredentialfields(t *testing.T, testNum int, expectedCredentials credentialHeader, actualCredential credentialHeader) { - if expectedCredentials.accessKey != actualCredential.accessKey { t.Errorf("Test %d: AccessKey mismatch: Expected \"%s\", got \"%s\"", testNum, expectedCredentials.accessKey, actualCredential.accessKey) } @@ -87,7 +87,6 @@ func validateCredentialfields(t *testing.T, testNum int, expectedCredentials cre // "aws4_request", // },SlashSeparator) func TestParseCredentialHeader(t *testing.T) { - sampleTimeStr := UTCNow().Format(yyyymmdd) testCases := []struct { @@ -296,7 +295,6 @@ func TestParseSignature(t *testing.T) { if actualErrCode == ErrNone { if testCase.expectedSignStr != actualSignStr { t.Errorf("Test %d: Expected the result to be \"%s\", but got \"%s\". ", i+1, testCase.expectedSignStr, actualSignStr) - } } @@ -342,7 +340,6 @@ func TestParseSignedHeaders(t *testing.T) { if actualErrCode == ErrNone { if strings.Join(testCase.expectedSignedHeaders, ",") != strings.Join(actualSignedHeaders, ",") { t.Errorf("Test %d: Expected the result to be \"%v\", but got \"%v\". ", i+1, testCase.expectedSignedHeaders, actualSignedHeaders) - } } @@ -518,7 +515,6 @@ func TestParseSignV4(t *testing.T) { } } - } // TestDoesV4PresignParamsExist - tests validate the logic to @@ -619,7 +615,6 @@ func TestDoesV4PresignParamsExist(t *testing.T) { inputQuery := url.Values{} // iterating through input query key value and setting the inputQuery of type url.Values. for j := 0; j < len(testCase.inputQueryKeyVals)-1; j += 2 { - inputQuery.Set(testCase.inputQueryKeyVals[j], testCase.inputQueryKeyVals[j+1]) } @@ -629,7 +624,6 @@ func TestDoesV4PresignParamsExist(t *testing.T) { t.Fatalf("Test %d: Expected the APIErrCode to be %d, got %d", i+1, testCase.expectedErrCode, actualErrCode) } } - } // TestParsePreSignV4 - Validates the parsing logic of Presignied v4 request from its url query values. @@ -668,7 +662,6 @@ func TestParsePreSignV4(t *testing.T) { // The other query params should exist, other wise ErrInvalidQueryParams will be returned because of missing fields. { inputQueryKeyVals: []string{ - "X-Amz-Algorithm", "InvalidValue", "X-Amz-Credential", "", "X-Amz-Signature", "", diff --git a/cmd/signature-v4-utils_test.go b/cmd/signature-v4-utils_test.go index 874431493..602fe6aa1 100644 --- a/cmd/signature-v4-utils_test.go +++ b/cmd/signature-v4-utils_test.go @@ -173,7 +173,6 @@ func TestIsValidRegion(t *testing.T) { expectedResult bool }{ - {"", "", true}, {globalMinioDefaultRegion, "", true}, {globalMinioDefaultRegion, "US", true}, diff --git a/cmd/site-replication.go b/cmd/site-replication.go index b8a352ece..be5a37c3d 100644 --- a/cmd/site-replication.go +++ b/cmd/site-replication.go @@ -110,7 +110,6 @@ func errSRBucketConfigError(err error) SRError { Cause: err, Code: ErrSiteReplicationBucketConfigError, } - } func errSRBucketMetaError(err error) SRError { @@ -127,12 +126,10 @@ func errSRIAMError(err error) SRError { } } -var ( - errSRObjectLayerNotReady = SRError{ - Cause: fmt.Errorf("object layer not ready"), - Code: ErrServerNotInitialized, - } -) +var errSRObjectLayerNotReady = SRError{ + Cause: fmt.Errorf("object layer not ready"), + Code: ErrServerNotInitialized, +} func getSRStateFilePath() string { return srStatePrefix + SlashSeparator + srStateFile @@ -345,7 +342,7 @@ func (c *SiteReplicationSys) AddPeerClusters(ctx context.Context, psites []madmi deploymentIDsSet := set.NewStringSet() localHasBuckets := false nonLocalPeerWithBuckets := "" - var selfIdx = -1 + selfIdx := -1 for i, v := range sites { // deploymentIDs must be unique if deploymentIDsSet.Contains(v.DeploymentID) { diff --git a/cmd/site-replication_test.go b/cmd/site-replication_test.go index 1e967df61..c19275342 100644 --- a/cmd/site-replication_test.go +++ b/cmd/site-replication_test.go @@ -34,9 +34,11 @@ func TestGetMissingSiteNames(t *testing.T) { }{ // Test1: missing some sites in replicated setup { - []madmin.PeerInfo{{Endpoint: "minio1:9000", Name: "minio1", DeploymentID: "dep1"}, + []madmin.PeerInfo{ + {Endpoint: "minio1:9000", Name: "minio1", DeploymentID: "dep1"}, {Endpoint: "minio2:9000", Name: "minio2", DeploymentID: "dep2"}, - {Endpoint: "minio3:9000", Name: "minio3", DeploymentID: "dep3"}}, + {Endpoint: "minio3:9000", Name: "minio3", DeploymentID: "dep3"}, + }, set.CreateStringSet("dep1", "dep2", "dep3"), set.CreateStringSet("dep1"), []string{"minio2", "minio3"}, diff --git a/cmd/storage-interface.go b/cmd/storage-interface.go index 2e6d4ba47..b81dcef54 100644 --- a/cmd/storage-interface.go +++ b/cmd/storage-interface.go @@ -33,7 +33,6 @@ type StorageAPI interface { LastConn() time.Time // Returns the last time this disk (re)-connected IsLocal() bool - Hostname() string // Returns host name if remote host. Endpoint() Endpoint // Returns endpoint. @@ -81,7 +80,6 @@ type StorageAPI interface { // Read all. ReadAll(ctx context.Context, volume string, path string) (buf []byte, err error) - GetDiskLoc() (poolIdx, setIdx, diskIdx int) // Retrieve location indexes. SetDiskLoc(poolIdx, setIdx, diskIdx int) // Set location indexes. } diff --git a/cmd/storage-rest-server.go b/cmd/storage-rest-server.go index 5cef991eb..58c19f408 100644 --- a/cmd/storage-rest-server.go +++ b/cmd/storage-rest-server.go @@ -746,7 +746,7 @@ func keepHTTPReqResponseAlive(w http.ResponseWriter, r *http.Request) (resp func doneCh := make(chan error) ctx := r.Context() go func() { - var canWrite = true + canWrite := true write := func(b []byte) { if canWrite { n, err := w.Write(b) @@ -820,7 +820,7 @@ func keepHTTPReqResponseAlive(w http.ResponseWriter, r *http.Request) (resp func func keepHTTPResponseAlive(w http.ResponseWriter) func(error) { doneCh := make(chan error) go func() { - var canWrite = true + canWrite := true write := func(b []byte) { if canWrite { n, err := w.Write(b) @@ -940,7 +940,7 @@ func streamHTTPResponse(w http.ResponseWriter) *httpStreamResponse { blockCh := make(chan []byte) h := httpStreamResponse{done: doneCh, block: blockCh} go func() { - var canWrite = true + canWrite := true write := func(b []byte) { if canWrite { n, err := w.Write(b) diff --git a/cmd/sts-handlers.go b/cmd/sts-handlers.go index 1aa2cf185..830bd1c5a 100644 --- a/cmd/sts-handlers.go +++ b/cmd/sts-handlers.go @@ -139,7 +139,6 @@ func registerSTSRouter(router *mux.Router) { stsRouter.Methods(http.MethodPost).HandlerFunc(httpTraceAll(sts.AssumeRoleWithCertificate)). Queries(stsAction, clientCertificate). Queries(stsVersion, stsAPIVersion) - } func checkAssumeRoleAuth(ctx context.Context, r *http.Request) (user auth.Credentials, isErrCodeSTS bool, stsErr STSErrorCode) { @@ -684,7 +683,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithLDAPIdentity(w http.ResponseWriter, r * // // API endpoint: https://minio:9000?Action=AssumeRoleWithCertificate&Version=2011-06-15 func (sts *stsAPIHandlers) AssumeRoleWithCertificate(w http.ResponseWriter, r *http.Request) { - var ctx = newContext(r, w, "AssumeRoleWithCertificate") + ctx := newContext(r, w, "AssumeRoleWithCertificate") if !globalSTSTLSConfig.Enabled { writeSTSErrorResponse(ctx, w, true, ErrSTSNotInitialized, errors.New("STS API 'AssumeRoleWithCertificate' is disabled")) @@ -706,7 +705,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithCertificate(w http.ResponseWriter, r *h // policy mapping would be ambigious. // However, we can filter all CA certificates and only check // whether they client has sent exactly one (non-CA) leaf certificate. - var peerCertificates = make([]*x509.Certificate, 0, len(r.TLS.PeerCertificates)) + peerCertificates := make([]*x509.Certificate, 0, len(r.TLS.PeerCertificates)) for _, cert := range r.TLS.PeerCertificates { if cert.IsCA { continue @@ -726,7 +725,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithCertificate(w http.ResponseWriter, r *h return } - var certificate = r.TLS.PeerCertificates[0] + certificate := r.TLS.PeerCertificates[0] if !globalSTSTLSConfig.InsecureSkipVerify { // Verify whether the client certificate has been issued by a trusted CA. _, err := certificate.Verify(x509.VerifyOptions{ KeyUsages: []x509.ExtKeyUsage{ @@ -812,7 +811,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithCertificate(w http.ResponseWriter, r *h return } - var response = new(AssumeRoleWithCertificateResponse) + response := new(AssumeRoleWithCertificateResponse) response.Result.Credentials = tmpCredentials response.Metadata.RequestID = w.Header().Get(xhttp.AmzRequestID) writeSuccessResponseXML(w, encodeResponse(response)) diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index f00e6e048..fd8d29028 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -723,8 +723,8 @@ func newTestStreamingRequest(method, urlStr string, dataLength, chunkSize int64, } func assembleStreamingChunks(req *http.Request, body io.ReadSeeker, chunkSize int64, - secretKey, signature string, currTime time.Time) (*http.Request, error) { - + secretKey, signature string, currTime time.Time) (*http.Request, error, +) { regionStr := globalSite.Region var stream []byte var buffer []byte @@ -1204,13 +1204,11 @@ func randString(n int) string { // generate random object name. func getRandomObjectName() string { return randString(16) - } // generate random bucket name. func getRandomBucketName() string { return randString(60) - } // construct URL for http requests for bucket operations. @@ -1262,7 +1260,6 @@ func getMultiDeleteObjectURL(endPoint, bucketName string) string { queryValue := url.Values{} queryValue.Set("delete", "") return makeTestTargetURL(endPoint, bucketName, "", queryValue) - } // return URL for HEAD on the object. @@ -1575,8 +1572,8 @@ func prepareTestBackend(ctx context.Context, instanceType string) (ObjectLayer, // The test works in 2 steps, here is the description of the steps. // STEP 1: Call the handler with the unsigned HTTP request (anonReq), assert for the `ErrAccessDenied` error response. func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketName, objectName, instanceType string, apiRouter http.Handler, - anonReq *http.Request, bucketPolicy *policy.Policy) { - + anonReq *http.Request, bucketPolicy *policy.Policy, +) { anonTestStr := "Anonymous HTTP request test" unknownSignTestStr := "Unknown HTTP signature test" @@ -1664,7 +1661,6 @@ func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketN if rec.Code != unsupportedSignature { t.Fatal(failTestStr(unknownSignTestStr, fmt.Sprintf("Object API Unknow auth test for \"%s\", expected to fail with %d, but failed with %d", testName, unsupportedSignature, rec.Code))) } - } // ExecObjectLayerAPINilTest - Sets the object layer to `nil`, and calls rhe registered object layer API endpoint, @@ -2227,8 +2223,8 @@ func TestToErrIsNil(t *testing.T) { // All upload failures are considered test errors - this function is // intended as a helper for other tests. func uploadTestObject(t *testing.T, apiRouter http.Handler, creds auth.Credentials, bucketName, objectName string, - partSizes []int64, metadata map[string]string, asMultipart bool) { - + partSizes []int64, metadata map[string]string, asMultipart bool, +) { if len(partSizes) == 0 { t.Fatalf("Cannot upload an object without part sizes") } diff --git a/cmd/tier-handlers.go b/cmd/tier-handlers.go index a51471d63..a9cf3d87f 100644 --- a/cmd/tier-handlers.go +++ b/cmd/tier-handlers.go @@ -93,7 +93,7 @@ func (api adminAPIHandlers) AddTierHandler(w http.ResponseWriter, r *http.Reques } var cfg madmin.TierConfig - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err := json.Unmarshal(reqBytes, &cfg); err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return @@ -179,7 +179,7 @@ func (api adminAPIHandlers) EditTierHandler(w http.ResponseWriter, r *http.Reque } var creds madmin.TierCreds - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err := json.Unmarshal(reqBytes, &creds); err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return diff --git a/cmd/tier-journal.go b/cmd/tier-journal.go index 15fb632fc..c84ef2984 100644 --- a/cmd/tier-journal.go +++ b/cmd/tier-journal.go @@ -57,9 +57,7 @@ const ( tierJournalHdrLen = 2 // 2 bytes ) -var ( - errUnsupportedJournalVersion = errors.New("unsupported pending deletes journal version") -) +var errUnsupportedJournalVersion = errors.New("unsupported pending deletes journal version") func newTierDiskJournal() *tierDiskJournal { return &tierDiskJournal{} @@ -75,7 +73,7 @@ func initTierDeletionJournal(ctx context.Context) (*tierJournal, error) { } for _, diskPath := range globalEndpoints.LocalDisksPaths() { j.diskPath = diskPath - if err := os.MkdirAll(filepath.Dir(j.JournalPath()), os.FileMode(0700)); err != nil { + if err := os.MkdirAll(filepath.Dir(j.JournalPath()), os.FileMode(0o700)); err != nil { logger.LogIf(ctx, err) continue } @@ -255,7 +253,7 @@ func (jd *tierDiskJournal) Open() error { } var err error - jd.file, err = os.OpenFile(jd.JournalPath(), os.O_APPEND|os.O_CREATE|os.O_WRONLY|writeMode, 0666) + jd.file, err = os.OpenFile(jd.JournalPath(), os.O_APPEND|os.O_CREATE|os.O_WRONLY|writeMode, 0o666) if err != nil { return err } diff --git a/cmd/tier.go b/cmd/tier.go index d4cb3a53e..221323087 100644 --- a/cmd/tier.go +++ b/cmd/tier.go @@ -359,7 +359,6 @@ func loadTierConfig(ctx context.Context, objAPI ObjectLayer) (*TierConfigMgr, er return nil, decErr } return cfg, nil - } // Reset clears remote tier configured and clears tier driver cache. @@ -372,7 +371,6 @@ func (config *TierConfigMgr) Reset() { delete(config.Tiers, k) } config.Unlock() - } // Init initializes tier configuration reading from objAPI diff --git a/cmd/tree-walk_test.go b/cmd/tree-walk_test.go index ad80e4607..f99efff15 100644 --- a/cmd/tree-walk_test.go +++ b/cmd/tree-walk_test.go @@ -147,7 +147,7 @@ func TestTreeWalk(t *testing.T) { t.Fatalf("Unable to create StorageAPI: %s", err) } - var files = []string{ + files := []string{ "d/e", "d/f", "d/g/h", @@ -284,7 +284,7 @@ func TestRecursiveTreeWalk(t *testing.T) { listDir := listDirFactory(context.Background(), disk1, isLeaf) // Create the namespace. - var files = []string{ + files := []string{ "d/e", "d/f", "d/g/h", @@ -398,7 +398,7 @@ func TestSortedness(t *testing.T) { listDir := listDirFactory(context.Background(), disk1, isLeaf) // Create the namespace. - var files = []string{ + files := []string{ "d/e", "d/f", "d/g/h", @@ -478,7 +478,7 @@ func TestTreeWalkIsEnd(t *testing.T) { listDir := listDirFactory(context.Background(), disk1, isLeaf) // Create the namespace. - var files = []string{ + files := []string{ "d/e", "d/f", "d/g/h", diff --git a/cmd/update.go b/cmd/update.go index 64c67bf10..687efd0ab 100644 --- a/cmd/update.go +++ b/cmd/update.go @@ -51,10 +51,8 @@ const ( updateTimeout = 10 * time.Second ) -var ( - // For windows our files have .exe additionally. - minioReleaseWindowsInfoURL = minioReleaseURL + "minio.exe.sha256sum" -) +// For windows our files have .exe additionally. +var minioReleaseWindowsInfoURL = minioReleaseURL + "minio.exe.sha256sum" // minioVersionToReleaseTime - parses a standard official release // MinIO version string. @@ -225,7 +223,6 @@ func IsPCFTile() bool { // Any change here should be discussed by opening an issue at // https://github.com/minio/minio/issues. func getUserAgent(mode string) string { - userAgentParts := []string{} // Helper function to concisely append a pair of strings to a // the user-agent slice. diff --git a/cmd/update_fips.go b/cmd/update_fips.go index 55c8915d7..8813c26a6 100644 --- a/cmd/update_fips.go +++ b/cmd/update_fips.go @@ -20,7 +20,5 @@ package cmd -var ( - // Newer official download info URLs appear earlier below. - minioReleaseInfoURL = minioReleaseURL + "minio.fips.sha256sum" -) +// Newer official download info URLs appear earlier below. +var minioReleaseInfoURL = minioReleaseURL + "minio.fips.sha256sum" diff --git a/cmd/update_nofips.go b/cmd/update_nofips.go index 148f8168c..5b1e0e639 100644 --- a/cmd/update_nofips.go +++ b/cmd/update_nofips.go @@ -20,7 +20,5 @@ package cmd -var ( - // Newer official download info URLs appear earlier below. - minioReleaseInfoURL = minioReleaseURL + "minio.sha256sum" -) +// Newer official download info URLs appear earlier below. +var minioReleaseInfoURL = minioReleaseURL + "minio.sha256sum" diff --git a/cmd/update_test.go b/cmd/update_test.go index 7d6b6c9d8..86e853cbf 100644 --- a/cmd/update_test.go +++ b/cmd/update_test.go @@ -56,14 +56,22 @@ func TestReleaseTagToNFromTimeConversion(t *testing.T) { tag string errStr string }{ - {time.Date(2017, time.September, 29, 19, 16, 56, 0, utcLoc), - "RELEASE.2017-09-29T19-16-56Z", ""}, - {time.Date(2017, time.August, 5, 0, 0, 53, 0, utcLoc), - "RELEASE.2017-08-05T00-00-53Z", ""}, - {time.Now().UTC(), "2017-09-29T19:16:56Z", - "2017-09-29T19:16:56Z is not a valid release tag"}, - {time.Now().UTC(), "DEVELOPMENT.GOGET", - "DEVELOPMENT.GOGET is not a valid release tag"}, + { + time.Date(2017, time.September, 29, 19, 16, 56, 0, utcLoc), + "RELEASE.2017-09-29T19-16-56Z", "", + }, + { + time.Date(2017, time.August, 5, 0, 0, 53, 0, utcLoc), + "RELEASE.2017-08-05T00-00-53Z", "", + }, + { + time.Now().UTC(), "2017-09-29T19:16:56Z", + "2017-09-29T19:16:56Z is not a valid release tag", + }, + { + time.Now().UTC(), "DEVELOPMENT.GOGET", + "DEVELOPMENT.GOGET is not a valid release tag", + }, } for i, testCase := range testCases { if testCase.errStr != "" { @@ -80,7 +88,6 @@ func TestReleaseTagToNFromTimeConversion(t *testing.T) { t.Errorf("Test %d: Expected %v but got %v", i+1, testCase.t, tagTime) } } - } func TestDownloadURL(t *testing.T) { @@ -310,10 +317,14 @@ func TestParseReleaseData(t *testing.T) { {"more than.two.fields", time.Time{}, "", "", true}, {"more minio.RELEASE.fields", time.Time{}, "", "", true}, {"more minio.RELEASE.2016-10-07T01-16-39Z", time.Time{}, "", "", true}, - {"fbe246edbd382902db9a4035df7dce8cb441357d minio.RELEASE.2016-10-07T01-16-39Z\n", releaseTime, "fbe246edbd382902db9a4035df7dce8cb441357d", - "minio.RELEASE.2016-10-07T01-16-39Z", false}, - {"fbe246edbd382902db9a4035df7dce8cb441357d minio.RELEASE.2016-10-07T01-16-39Z.customer-hotfix\n", releaseTime, "fbe246edbd382902db9a4035df7dce8cb441357d", - "minio.RELEASE.2016-10-07T01-16-39Z.customer-hotfix", false}, + { + "fbe246edbd382902db9a4035df7dce8cb441357d minio.RELEASE.2016-10-07T01-16-39Z\n", releaseTime, "fbe246edbd382902db9a4035df7dce8cb441357d", + "minio.RELEASE.2016-10-07T01-16-39Z", false, + }, + { + "fbe246edbd382902db9a4035df7dce8cb441357d minio.RELEASE.2016-10-07T01-16-39Z.customer-hotfix\n", releaseTime, "fbe246edbd382902db9a4035df7dce8cb441357d", + "minio.RELEASE.2016-10-07T01-16-39Z.customer-hotfix", false, + }, } for i, testCase := range testCases { diff --git a/cmd/utils_test.go b/cmd/utils_test.go index b9931a696..355edc9a1 100644 --- a/cmd/utils_test.go +++ b/cmd/utils_test.go @@ -293,7 +293,6 @@ func TestToS3ETag(t *testing.T) { // Test contains func TestContains(t *testing.T) { - testErr := errors.New("test err") testCases := []struct { @@ -398,8 +397,8 @@ func TestCeilFrac(t *testing.T) { // Test if isErrIgnored works correctly. func TestIsErrIgnored(t *testing.T) { - var errIgnored = fmt.Errorf("ignored error") - var testCases = []struct { + errIgnored := fmt.Errorf("ignored error") + testCases := []struct { err error ignored bool }{ @@ -425,7 +424,7 @@ func TestIsErrIgnored(t *testing.T) { // Test queries() func TestQueries(t *testing.T) { - var testCases = []struct { + testCases := []struct { keys []string keyvalues []string }{ @@ -446,7 +445,7 @@ func TestQueries(t *testing.T) { } func TestLCP(t *testing.T) { - var testCases = []struct { + testCases := []struct { prefixes []string commonPrefix string }{ @@ -485,7 +484,6 @@ func TestGetMinioMode(t *testing.T) { globalIsGateway, globalGatewayName = true, "azure" testMinioMode(globalMinioModeGatewayPrefix + globalGatewayName) - } func TestTimedValue(t *testing.T) { diff --git a/cmd/warm-backend-azure.go b/cmd/warm-backend-azure.go index 4f080352d..f9c804497 100644 --- a/cmd/warm-backend-azure.go +++ b/cmd/warm-backend-azure.go @@ -45,6 +45,7 @@ func (az *warmBackendAzure) getDest(object string) string { } return destObj } + func (az *warmBackendAzure) tier() azblob.AccessTierType { for _, t := range azblob.PossibleAccessTierTypeValues() { if strings.EqualFold(az.StorageClass, string(t)) { diff --git a/cmd/warm-backend-gcs.go b/cmd/warm-backend-gcs.go index 58225d4c1..2edba0559 100644 --- a/cmd/warm-backend-gcs.go +++ b/cmd/warm-backend-gcs.go @@ -51,7 +51,7 @@ func (gcs *warmBackendGCS) getDest(object string) string { func (gcs *warmBackendGCS) Put(ctx context.Context, key string, data io.Reader, length int64) (remoteVersionID, error) { object := gcs.client.Bucket(gcs.Bucket).Object(gcs.getDest(key)) - //TODO: set storage class + // TODO: set storage class w := object.NewWriter(ctx) if gcs.StorageClass != "" { w.ObjectAttrs.StorageClass = gcs.StorageClass @@ -74,7 +74,6 @@ func (gcs *warmBackendGCS) Get(ctx context.Context, key string, rv remoteVersion r, err = object.NewRangeReader(ctx, opts.startOffset, opts.length) if err != nil { return nil, gcsToObjectError(err, gcs.Bucket, key) - } return r, nil } diff --git a/cmd/xl-storage-format-utils.go b/cmd/xl-storage-format-utils.go index b0b540cc8..f372d6216 100644 --- a/cmd/xl-storage-format-utils.go +++ b/cmd/xl-storage-format-utils.go @@ -118,7 +118,7 @@ func getXLDiskLoc(diskID string) (poolIdx, setIdx, diskIdx int) { // Trivial collisions are avoided, but this is by no means a strong hash. func hashDeterministicString(m map[string]string) uint64 { // Seed (random) - var crc = uint64(0xc2b40bbac11a7295) + crc := uint64(0xc2b40bbac11a7295) // Xor each value to make order independent for k, v := range m { // Separate key and value with an individual xor with a random number. @@ -131,7 +131,7 @@ func hashDeterministicString(m map[string]string) uint64 { // hashDeterministicBytes will return a deterministic (weak) hash for the map values. // Trivial collisions are avoided, but this is by no means a strong hash. func hashDeterministicBytes(m map[string][]byte) uint64 { - var crc = uint64(0x1bbc7e1dde654743) + crc := uint64(0x1bbc7e1dde654743) for k, v := range m { crc ^= (xxh3.HashString(k) ^ 0x4ee3bbaf7ab2506b) + (xxh3.Hash(v) ^ 0x8da4c8da66194257) } diff --git a/cmd/xl-storage-format-utils_test.go b/cmd/xl-storage-format-utils_test.go index 0310bde4f..8a45d2732 100644 --- a/cmd/xl-storage-format-utils_test.go +++ b/cmd/xl-storage-format-utils_test.go @@ -107,7 +107,6 @@ func Test_hashDeterministicString(t *testing.T) { if got := hashDeterministicString(m); got == want { t.Errorf("hashDeterministicString() = %v, does not want %v", got, want) } - }) } } diff --git a/cmd/xl-storage-format-v1.go b/cmd/xl-storage-format-v1.go index 9df9e715e..03aa6feea 100644 --- a/cmd/xl-storage-format-v1.go +++ b/cmd/xl-storage-format-v1.go @@ -160,7 +160,7 @@ func (c ChecksumInfo) MarshalJSON() ([]byte, error) { // UnmarshalJSON - custom checksum info unmarshaller func (c *ChecksumInfo) UnmarshalJSON(data []byte) error { var info checksumInfoJSON - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err := json.Unmarshal(data, &info); err != nil { return err } diff --git a/cmd/xl-storage-format-v2.go b/cmd/xl-storage-format-v2.go index fa0848f7d..34a5178ac 100644 --- a/cmd/xl-storage-format-v2.go +++ b/cmd/xl-storage-format-v2.go @@ -834,7 +834,7 @@ func (x *xlMetaV2) LoadOrConvert(buf []byte) error { } xlMeta := &xlMetaV1Object{} - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err := json.Unmarshal(buf, xlMeta); err != nil { return errFileCorrupt } @@ -1790,7 +1790,7 @@ func mergeXLV2Versions(quorum int, strict bool, versions ...[]xlMetaV2ShallowVer } // Sanity check. Enable if duplicates show up. if false { - var found = make(map[[16]byte]struct{}) + found := make(map[[16]byte]struct{}) for _, ver := range merged { if _, ok := found[ver.header.VersionID]; ok { panic("found dupe") @@ -1927,7 +1927,6 @@ func (x xlMetaBuf) IsLatestDeleteMarker() bool { } isDeleteMarker = xl.Type == DeleteType return errDoneForNow - }) return isDeleteMarker } diff --git a/cmd/xl-storage-format-v2_test.go b/cmd/xl-storage-format-v2_test.go index 0c8548562..feb510d4e 100644 --- a/cmd/xl-storage-format-v2_test.go +++ b/cmd/xl-storage-format-v2_test.go @@ -147,7 +147,6 @@ func TestXLV2FormatData(t *testing.T) { failOnErr(xl2.Load(trimmed)) if len(xl2.data) != 0 { t.Fatal("data, was not trimmed, bytes left:", len(xl2.data)) - } // Corrupt metadata, last 5 bytes is the checksum, so go a bit further back. trimmed[len(trimmed)-10] += 10 @@ -415,7 +414,6 @@ func Benchmark_xlMetaV2Shallow_Load(b *testing.B) { } } }) - } func Test_xlMetaV2Shallow_Load(t *testing.T) { diff --git a/cmd/xl-storage-format_test.go b/cmd/xl-storage-format_test.go index 8f4f82f36..61232d737 100644 --- a/cmd/xl-storage-format_test.go +++ b/cmd/xl-storage-format_test.go @@ -238,7 +238,7 @@ func TestGetXLMetaV1Jsoniter1(t *testing.T) { } var jsoniterXLMeta xlMetaV1Object - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err := json.Unmarshal(xlMetaJSON, &jsoniterXLMeta); err != nil { t.Errorf("jsoniter parsing of XLMeta failed: %v", err) } @@ -248,7 +248,6 @@ func TestGetXLMetaV1Jsoniter1(t *testing.T) { // Tests the correctness of constructing XLMetaV1 using jsoniter lib for XLMetaV1 of size 10 parts. // The result will be compared with the result obtained from json.unMarshal of the byte data. func TestGetXLMetaV1Jsoniter10(t *testing.T) { - xlMetaJSON := getXLMetaBytes(10) var unMarshalXLMeta xlMetaV1Object @@ -257,7 +256,7 @@ func TestGetXLMetaV1Jsoniter10(t *testing.T) { } var jsoniterXLMeta xlMetaV1Object - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err := json.Unmarshal(xlMetaJSON, &jsoniterXLMeta); err != nil { t.Errorf("jsoniter parsing of XLMeta failed: %v", err) } @@ -341,11 +340,12 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { xhttp.AmzBucketReplicationStatus: "PENDING", xhttp.ContentType: "application/json", }, - Parts: []ObjectPartInfo{{ - Number: 1, - Size: 1234345, - ActualSize: 1234345, - }, + Parts: []ObjectPartInfo{ + { + Number: 1, + Size: 1234345, + ActualSize: 1234345, + }, { Number: 2, Size: 1234345, @@ -359,11 +359,12 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { BlockSize: 10000, Index: 1, Distribution: []int{1, 2, 3, 4, 5, 6, 7, 8}, - Checksums: []ChecksumInfo{{ - PartNumber: 1, - Algorithm: HighwayHash256S, - Hash: nil, - }, + Checksums: []ChecksumInfo{ + { + PartNumber: 1, + Algorithm: HighwayHash256S, + Hash: nil, + }, { PartNumber: 2, Algorithm: HighwayHash256S, @@ -390,7 +391,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { } b.Logf("Serialized size: %d bytes", len(enc)) rng := rand.New(rand.NewSource(0)) - var dump = make([]byte, len(enc)) + dump := make([]byte, len(enc)) b.Run("UpdateObjectVersion", func(b *testing.B) { b.SetBytes(int64(size)) b.ResetTimer() diff --git a/cmd/xl-storage-free-version_test.go b/cmd/xl-storage-free-version_test.go index ea81b2c24..6456a8f9d 100644 --- a/cmd/xl-storage-free-version_test.go +++ b/cmd/xl-storage-free-version_test.go @@ -84,8 +84,8 @@ func TestFreeVersion(t *testing.T) { }}, }, MarkDeleted: false, - //DeleteMarkerReplicationStatus: "", - //VersionPurgeStatus: "", + // DeleteMarkerReplicationStatus: "", + // VersionPurgeStatus: "", NumVersions: 1, SuccessorModTime: time.Time{}, } diff --git a/cmd/xl-storage.go b/cmd/xl-storage.go index 4832d7bf9..04964270e 100644 --- a/cmd/xl-storage.go +++ b/cmd/xl-storage.go @@ -170,7 +170,7 @@ func getValidPath(path string) (string, error) { } if osIsNotExist(err) { // Disk not found create it. - if err = mkdirAll(path, 0777); err != nil { + if err = mkdirAll(path, 0o777); err != nil { return path, err } } @@ -269,7 +269,7 @@ func newXLStorage(ep Endpoint) (*xlStorage, error) { _, _ = rand.Read(rnd[:]) tmpFile := ".writable-check-" + hex.EncodeToString(rnd[:]) + ".tmp" filePath := pathJoin(p.diskPath, minioMetaTmpBucket, tmpFile) - w, err := OpenFileDirectIO(filePath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0666) + w, err := OpenFileDirectIO(filePath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0o666) if err != nil { switch { case isSysErrInvalidArg(err): @@ -524,7 +524,6 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates } return sizeS, nil }) - if err != nil { return dataUsageInfo, err } @@ -663,7 +662,7 @@ func (s *xlStorage) GetDiskID() (string, error) { } format := &formatErasureV3{} - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err = json.Unmarshal(b, &format); err != nil { logger.LogIf(GlobalContext, err) // log unexpected errors return "", errCorruptedFormat @@ -708,7 +707,7 @@ func (s *xlStorage) MakeVol(ctx context.Context, volume string) error { // Volume does not exist we proceed to create. if osIsNotExist(err) { // Make a volume entry, with mode 0777 mkdir honors system umask. - err = mkdirAll(volumeDir, 0777) + err = mkdirAll(volumeDir, 0o777) } if osIsPermission(err) { return errDiskAccessDenied @@ -1329,7 +1328,7 @@ func (s *xlStorage) readAllData(ctx context.Context, volumeDir string, filePath return nil, time.Time{}, ctx.Err() } - f, err := OpenFileDirectIO(filePath, readMode, 0666) + f, err := OpenFileDirectIO(filePath, readMode, 0o666) if err != nil { if osIsNotExist(err) { // Check if the object doesn't exist because its bucket @@ -1521,11 +1520,11 @@ func (s *xlStorage) ReadFile(ctx context.Context, volume string, path string, of func (s *xlStorage) openFileSync(filePath string, mode int) (f *os.File, err error) { // Create top level directories if they don't exist. // with mode 0777 mkdir honors system umask. - if err = mkdirAll(pathutil.Dir(filePath), 0777); err != nil { + if err = mkdirAll(pathutil.Dir(filePath), 0o777); err != nil { return nil, osErrToFileErr(err) } - w, err := OpenFile(filePath, mode|writeMode, 0666) + w, err := OpenFile(filePath, mode|writeMode, 0o666) if err != nil { // File path cannot be verified since one of the parents is a file. switch { @@ -1548,11 +1547,11 @@ func (s *xlStorage) openFileSync(filePath string, mode int) (f *os.File, err err func (s *xlStorage) openFileNoSync(filePath string, mode int) (f *os.File, err error) { // Create top level directories if they don't exist. // with mode 0777 mkdir honors system umask. - if err = mkdirAll(pathutil.Dir(filePath), 0777); err != nil { + if err = mkdirAll(pathutil.Dir(filePath), 0o777); err != nil { return nil, osErrToFileErr(err) } - w, err := OpenFile(filePath, mode, 0666) + w, err := OpenFile(filePath, mode, 0o666) if err != nil { // File path cannot be verified since one of the parents is a file. switch { @@ -1589,7 +1588,7 @@ func (s *xlStorage) ReadFileStream(ctx context.Context, volume, path string, off return nil, err } - file, err := OpenFileDirectIO(filePath, readMode, 0666) + file, err := OpenFileDirectIO(filePath, readMode, 0o666) if err != nil { switch { case osIsNotExist(err): @@ -1733,11 +1732,11 @@ func (s *xlStorage) CreateFile(ctx context.Context, volume, path string, fileSiz // Create top level directories if they don't exist. // with mode 0777 mkdir honors system umask. - if err = mkdirAll(parentFilePath, 0777); err != nil { + if err = mkdirAll(parentFilePath, 0o777); err != nil { return osErrToFileErr(err) } - w, err := OpenFileDirectIO(filePath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0666) + w, err := OpenFileDirectIO(filePath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0o666) if err != nil { return osErrToFileErr(err) } @@ -2087,7 +2086,7 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f } else { // This code-path is to preserve the legacy data. xlMetaLegacy := &xlMetaV1Object{} - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err := json.Unmarshal(dstBuf, xlMetaLegacy); err != nil { logger.LogIf(ctx, err) // Data appears corrupt. Drop data. @@ -2138,7 +2137,7 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f } // legacy data dir means its old content, honor system umask. - if err = mkdirAll(legacyDataPath, 0777); err != nil { + if err = mkdirAll(legacyDataPath, 0o777); err != nil { // any failed mkdir-calls delete them. s.deleteFile(dstVolumeDir, legacyDataPath, true) return osErrToFileErr(err) @@ -2435,7 +2434,7 @@ func (s *xlStorage) StatInfoFile(ctx context.Context, volume, path string, glob } return stat, err } - var files = []string{pathJoin(volumeDir, path)} + files := []string{pathJoin(volumeDir, path)} if glob { files, err = filepathx.Glob(pathJoin(volumeDir, path)) if err != nil { diff --git a/cmd/xl-storage_test.go b/cmd/xl-storage_test.go index aaf5f2e10..e76fe7e01 100644 --- a/cmd/xl-storage_test.go +++ b/cmd/xl-storage_test.go @@ -158,22 +158,22 @@ func createPermDeniedFile(t *testing.T) (permDeniedDir string) { return permDeniedDir } - if err = os.Mkdir(slashpath.Join(permDeniedDir, "mybucket"), 0775); err != nil { + if err = os.Mkdir(slashpath.Join(permDeniedDir, "mybucket"), 0o775); err != nil { errMsg = fmt.Sprintf("Unable to create temporary directory %v. %v", slashpath.Join(permDeniedDir, "mybucket"), err) return permDeniedDir } - if err = ioutil.WriteFile(slashpath.Join(permDeniedDir, "mybucket", "myobject"), []byte(""), 0400); err != nil { + if err = ioutil.WriteFile(slashpath.Join(permDeniedDir, "mybucket", "myobject"), []byte(""), 0o400); err != nil { errMsg = fmt.Sprintf("Unable to create file %v. %v", slashpath.Join(permDeniedDir, "mybucket", "myobject"), err) return permDeniedDir } - if err = os.Chmod(slashpath.Join(permDeniedDir, "mybucket"), 0400); err != nil { + if err = os.Chmod(slashpath.Join(permDeniedDir, "mybucket"), 0o400); err != nil { errMsg = fmt.Sprintf("Unable to change permission to temporary directory %v. %v", slashpath.Join(permDeniedDir, "mybucket"), err) return permDeniedDir } - if err = os.Chmod(permDeniedDir, 0400); err != nil { + if err = os.Chmod(permDeniedDir, 0o400); err != nil { errMsg = fmt.Sprintf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } @@ -182,8 +182,8 @@ func createPermDeniedFile(t *testing.T) (permDeniedDir string) { // removePermDeniedFile - removes temporary directory and file with path '/mybucket/myobject' func removePermDeniedFile(permDeniedDir string) { - if err := os.Chmod(permDeniedDir, 0775); err == nil { - if err = os.Chmod(slashpath.Join(permDeniedDir, "mybucket"), 0775); err == nil { + if err := os.Chmod(permDeniedDir, 0o775); err == nil { + if err = os.Chmod(slashpath.Join(permDeniedDir, "mybucket"), 0o775); err == nil { os.RemoveAll(permDeniedDir) } } @@ -228,7 +228,7 @@ func TestXLStorageIsDirEmpty(t *testing.T) { // Should give false for not-a-directory. dir2 := slashpath.Join(tmp, "file") - err = ioutil.WriteFile(dir2, []byte("hello"), 0777) + err = ioutil.WriteFile(dir2, []byte("hello"), 0o777) if err != nil { t.Fatal(err) } @@ -239,7 +239,7 @@ func TestXLStorageIsDirEmpty(t *testing.T) { // Should give true for a real empty directory. dir3 := slashpath.Join(tmp, "empty") - err = os.Mkdir(dir3, 0777) + err = os.Mkdir(dir3, 0o777) if err != nil { t.Fatal(err) } @@ -514,7 +514,7 @@ func TestXLStorageMakeVol(t *testing.T) { t.Fatalf("Unable to create file, %s", err) } // Create a directory. - if err := os.Mkdir(slashpath.Join(path, "existing-vol"), 0777); err != nil { + if err := os.Mkdir(slashpath.Join(path, "existing-vol"), 0o777); err != nil { t.Fatalf("Unable to create directory, %s", err) } @@ -560,7 +560,7 @@ func TestXLStorageMakeVol(t *testing.T) { t.Fatalf("Unable to create temporary directory. %v", err) } defer os.RemoveAll(permDeniedDir) - if err = os.Chmod(permDeniedDir, 0400); err != nil { + if err = os.Chmod(permDeniedDir, 0o400); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } @@ -570,7 +570,7 @@ func TestXLStorageMakeVol(t *testing.T) { t.Fatalf("Unable to initialize xlStorage, %s", err) } - if err = os.Chmod(permDeniedDir, 0755); err != nil { + if err = os.Chmod(permDeniedDir, 0o755); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } @@ -580,7 +580,7 @@ func TestXLStorageMakeVol(t *testing.T) { } // change backend permissions for MakeVol error. - if err = os.Chmod(permDeniedDir, 0400); err != nil { + if err = os.Chmod(permDeniedDir, 0o400); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } @@ -606,7 +606,7 @@ func TestXLStorageDeleteVol(t *testing.T) { // TestXLStorage failure cases. vol := slashpath.Join(path, "nonempty-vol") - if err = os.Mkdir(vol, 0777); err != nil { + if err = os.Mkdir(vol, 0o777); err != nil { t.Fatalf("Unable to create directory, %s", err) } if err = ioutil.WriteFile(slashpath.Join(vol, "test-file"), []byte{}, os.ModePerm); err != nil { @@ -656,10 +656,10 @@ func TestXLStorageDeleteVol(t *testing.T) { t.Fatalf("Unable to create temporary directory. %v", err) } defer removePermDeniedFile(permDeniedDir) - if err = os.Mkdir(slashpath.Join(permDeniedDir, "mybucket"), 0400); err != nil { + if err = os.Mkdir(slashpath.Join(permDeniedDir, "mybucket"), 0o400); err != nil { t.Fatalf("Unable to create temporary directory %v. %v", slashpath.Join(permDeniedDir, "mybucket"), err) } - if err = os.Chmod(permDeniedDir, 0400); err != nil { + if err = os.Chmod(permDeniedDir, 0o400); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } @@ -669,7 +669,7 @@ func TestXLStorageDeleteVol(t *testing.T) { t.Fatalf("Unable to initialize xlStorage, %s", err) } - if err = os.Chmod(permDeniedDir, 0755); err != nil { + if err = os.Chmod(permDeniedDir, 0o755); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } @@ -679,7 +679,7 @@ func TestXLStorageDeleteVol(t *testing.T) { } // change backend permissions for MakeVol error. - if err = os.Chmod(permDeniedDir, 0400); err != nil { + if err = os.Chmod(permDeniedDir, 0o400); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } @@ -921,7 +921,7 @@ func TestXLStorageListDir(t *testing.T) { t.Fatalf("Unable to initialize xlStorage, %s", err) } - if err = os.Chmod(permDeniedDir, 0755); err != nil { + if err = os.Chmod(permDeniedDir, 0o755); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } @@ -978,7 +978,7 @@ func TestXLStorageDeleteFile(t *testing.T) { t.Fatalf("Unable to create file, %s", err.Error()) } // Parent directory must have write permissions, this is read + execute. - if err = os.Chmod(pathJoin(path, "no-permissions"), 0555); err != nil { + if err = os.Chmod(pathJoin(path, "no-permissions"), 0o555); err != nil { t.Fatalf("Unable to chmod directory, %s", err.Error()) } @@ -1049,7 +1049,7 @@ func TestXLStorageDeleteFile(t *testing.T) { t.Fatalf("Unable to initialize xlStorage, %s", err) } - if err = os.Chmod(permDeniedDir, 0755); err != nil { + if err = os.Chmod(permDeniedDir, 0o755); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } @@ -1087,7 +1087,7 @@ func TestXLStorageReadFile(t *testing.T) { } // Create directory to make errIsNotRegular - if err = os.Mkdir(slashpath.Join(path, "success-vol", "object-as-dir"), 0777); err != nil { + if err = os.Mkdir(slashpath.Join(path, "success-vol", "object-as-dir"), 0o777); err != nil { t.Fatalf("Unable to create directory, %s", err) } @@ -1112,15 +1112,18 @@ func TestXLStorageReadFile(t *testing.T) { // Object is a directory. - 3 { volume, "object-as-dir", - 0, 5, nil, errIsNotRegular}, + 0, 5, nil, errIsNotRegular, + }, // One path segment length is > 255 chars long. - 4 { volume, "path/to/my/object0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", - 0, 5, nil, errFileNameTooLong}, + 0, 5, nil, errFileNameTooLong, + }, // Path length is > 1024 chars long. - 5 { volume, "level0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001/level0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002/level0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003/object000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", - 0, 5, nil, errFileNameTooLong}, + 0, 5, nil, errFileNameTooLong, + }, // Buffer size greater than object size. - 6 { volume, "myobject", 0, 16, @@ -1185,7 +1188,7 @@ func TestXLStorageReadFile(t *testing.T) { for i, testCase := range testCases { var n int64 // Common read buffer. - var buf = make([]byte, testCase.bufSize) + buf := make([]byte, testCase.bufSize) n, err = xlStorage.ReadFile(context.Background(), testCase.volume, testCase.fileName, testCase.offset, buf, v) if err != nil && testCase.expectedErr != nil { // Validate if the type string of the errors are an exact match. @@ -1252,7 +1255,7 @@ func TestXLStorageReadFile(t *testing.T) { t.Fatalf("Unable to initialize xlStorage, %s", err) } - if err = os.Chmod(permDeniedDir, 0755); err != nil { + if err = os.Chmod(permDeniedDir, 0o755); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } @@ -1262,7 +1265,7 @@ func TestXLStorageReadFile(t *testing.T) { } // Common read buffer. - var buf = make([]byte, 10) + buf := make([]byte, 10) if _, err = xlStoragePermStorage.ReadFile(context.Background(), "mybucket", "myobject", 0, buf, v); err != errFileAccessDenied { t.Errorf("expected: %s, got: %s", errFileAccessDenied, err) } @@ -1355,7 +1358,7 @@ func TestXLStorageFormatFileChange(t *testing.T) { } // Change the format.json such that "this" is changed to "randomid". - if err = ioutil.WriteFile(pathJoin(xlStorage.String(), minioMetaBucket, formatConfigFile), []byte(`{"version":"1","format":"xl","id":"592a41c2-b7cc-4130-b883-c4b5cb15965b","xl":{"version":"3","this":"randomid","sets":[["e07285a6-8c73-4962-89c6-047fb939f803","33b8d431-482d-4376-b63c-626d229f0a29","cff6513a-4439-4dc1-bcaa-56c9e880c352","randomid","9c9f21d5-1f15-4737-bce6-835faa0d9626","0a59b346-1424-4fc2-9fa2-a2e80541d0c1","7924a3dc-b69a-4971-9a2e-014966d6aebb","4d2b8dd9-4e48-444b-bdca-c89194b26042"]],"distributionAlgo":"CRCMOD"}}`), 0644); err != nil { + if err = ioutil.WriteFile(pathJoin(xlStorage.String(), minioMetaBucket, formatConfigFile), []byte(`{"version":"1","format":"xl","id":"592a41c2-b7cc-4130-b883-c4b5cb15965b","xl":{"version":"3","this":"randomid","sets":[["e07285a6-8c73-4962-89c6-047fb939f803","33b8d431-482d-4376-b63c-626d229f0a29","cff6513a-4439-4dc1-bcaa-56c9e880c352","randomid","9c9f21d5-1f15-4737-bce6-835faa0d9626","0a59b346-1424-4fc2-9fa2-a2e80541d0c1","7924a3dc-b69a-4971-9a2e-014966d6aebb","4d2b8dd9-4e48-444b-bdca-c89194b26042"]],"distributionAlgo":"CRCMOD"}}`), 0o644); err != nil { t.Fatalf("ioutil.WriteFile failed with %s", err) } @@ -1380,7 +1383,7 @@ func TestXLStorageAppendFile(t *testing.T) { } // Create directory to make errIsNotRegular - if err = os.Mkdir(slashpath.Join(path, "success-vol", "object-as-dir"), 0777); err != nil { + if err = os.Mkdir(slashpath.Join(path, "success-vol", "object-as-dir"), 0o777); err != nil { t.Fatalf("Unable to create directory, %s", err) } @@ -1422,7 +1425,7 @@ func TestXLStorageAppendFile(t *testing.T) { t.Fatalf("Unable to initialize xlStorage, %s", err) } - if err = os.Chmod(permDeniedDir, 0755); err != nil { + if err = os.Chmod(permDeniedDir, 0o755); err != nil { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } @@ -1836,7 +1839,7 @@ func TestXLStorageVerifyFile(t *testing.T) { // 4) Streaming bitrot check on corrupted file filePath := pathJoin(storage.String(), volName, fileName) - f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0644) + f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0o644) if err != nil { t.Fatal(err) } diff --git a/cmd/xl-storage_unix_test.go b/cmd/xl-storage_unix_test.go index bd8e1b668..e781f3095 100644 --- a/cmd/xl-storage_unix_test.go +++ b/cmd/xl-storage_unix_test.go @@ -71,7 +71,7 @@ func TestIsValidUmaskVol(t *testing.T) { } // Get umask of the bits stored. - currentUmask := 0777 - uint32(st.Mode().Perm()) + currentUmask := 0o777 - uint32(st.Mode().Perm()) // Verify if umask is correct. if int(currentUmask) != testCase.expectedUmask { diff --git a/cmd/xl-storage_windows_test.go b/cmd/xl-storage_windows_test.go index 1b44ed275..189852f05 100644 --- a/cmd/xl-storage_windows_test.go +++ b/cmd/xl-storage_windows_test.go @@ -31,7 +31,7 @@ import ( // Test if various paths work as expected when converted to UNC form func TestUNCPaths(t *testing.T) { - var testCases = []struct { + testCases := []struct { objName string pass bool }{ diff --git a/internal/arn/arn.go b/internal/arn/arn.go index daa9bb8c4..96e9020a1 100644 --- a/internal/arn/arn.go +++ b/internal/arn/arn.go @@ -58,11 +58,9 @@ type ARN struct { ResourceID string } -var ( - // Allows english letters, numbers, '.', '-', '_' and '/'. Starts with a - // letter or digit. At least 1 character long. - validResourceIDRegex = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9_/\.-]*$`) -) +// Allows english letters, numbers, '.', '-', '_' and '/'. Starts with a +// letter or digit. At least 1 character long. +var validResourceIDRegex = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9_/\.-]*$`) // NewIAMRoleARN - returns an ARN for a role in MinIO. func NewIAMRoleARN(resourceID, serverRegion string) (ARN, error) { diff --git a/internal/bpool/bpool_test.go b/internal/bpool/bpool_test.go index 300bbf951..2525cdb70 100644 --- a/internal/bpool/bpool_test.go +++ b/internal/bpool/bpool_test.go @@ -21,9 +21,9 @@ import "testing" // Tests - bytePool functionality. func TestBytePool(t *testing.T) { - var size = 4 - var width = 10 - var capWidth = 16 + size := 4 + width := 10 + capWidth := 16 bufPool := NewBytePoolCap(size, width, capWidth) diff --git a/internal/bucket/bandwidth/monitor.go b/internal/bucket/bandwidth/monitor.go index bd50c94d3..48dacccbd 100644 --- a/internal/bucket/bandwidth/monitor.go +++ b/internal/bucket/bandwidth/monitor.go @@ -111,6 +111,7 @@ func (m *Monitor) getReport(selectBucket SelectionFunction) *madmin.BucketBandwi } return report } + func (m *Monitor) trackEWMA() { for { select { diff --git a/internal/bucket/bandwidth/monitor_test.go b/internal/bucket/bandwidth/monitor_test.go index 5a253c4f1..d7eb6a49b 100644 --- a/internal/bucket/bandwidth/monitor_test.go +++ b/internal/bucket/bandwidth/monitor_test.go @@ -80,7 +80,8 @@ func TestMonitor_GetReport(t *testing.T) { want2: &madmin.BucketBandwidthReport{ BucketStats: map[string]madmin.BandwidthDetails{"bucket": { LimitInBytesPerSecond: 1024 * 1024, - CurrentBandwidthInBytesPerSecond: exponentialMovingAverage(betaBucket, float64(oneMiB), 2*float64(oneMiB))}}, + CurrentBandwidthInBytesPerSecond: exponentialMovingAverage(betaBucket, float64(oneMiB), 2*float64(oneMiB)), + }}, }, }, } diff --git a/internal/bucket/bandwidth/reader.go b/internal/bucket/bandwidth/reader.go index ed143ddae..6b4935a89 100644 --- a/internal/bucket/bandwidth/reader.go +++ b/internal/bucket/bandwidth/reader.go @@ -64,7 +64,6 @@ func (r *MonitoredReader) Read(buf []byte) (n int, err error) { need = 1 // to ensure we read at least one byte for every Read tokens = b } - } else { // all tokens go towards payload need = int(math.Min(float64(b), float64(need))) tokens = need diff --git a/internal/bucket/lifecycle/expiration_test.go b/internal/bucket/lifecycle/expiration_test.go index 30164b21c..6d632194e 100644 --- a/internal/bucket/lifecycle/expiration_test.go +++ b/internal/bucket/lifecycle/expiration_test.go @@ -57,7 +57,6 @@ func TestInvalidExpiration(t *testing.T) { t.Fatalf("%d: Expected %v but got %v", i+1, tc.expectedErr, err) } }) - } validationTestCases := []struct { diff --git a/internal/bucket/lifecycle/filter.go b/internal/bucket/lifecycle/filter.go index ad7995d7a..f881e99b1 100644 --- a/internal/bucket/lifecycle/filter.go +++ b/internal/bucket/lifecycle/filter.go @@ -22,9 +22,7 @@ import ( "io" ) -var ( - errInvalidFilter = Errorf("Filter must have exactly one of Prefix, Tag, or And specified") -) +var errInvalidFilter = Errorf("Filter must have exactly one of Prefix, Tag, or And specified") // Filter - a filter for a lifecycle configuration Rule. type Filter struct { diff --git a/internal/bucket/lifecycle/lifecycle.go b/internal/bucket/lifecycle/lifecycle.go index 02bd93140..570fa5a94 100644 --- a/internal/bucket/lifecycle/lifecycle.go +++ b/internal/bucket/lifecycle/lifecycle.go @@ -285,7 +285,7 @@ func (o ObjectOpts) ExpiredObjectDeleteMarker() bool { // ComputeAction returns the action to perform by evaluating all lifecycle rules // against the object name and its modification time. func (lc Lifecycle) ComputeAction(obj ObjectOpts) Action { - var action = NoneAction + action := NoneAction if obj.ModTime.IsZero() { return action } diff --git a/internal/bucket/lifecycle/lifecycle_test.go b/internal/bucket/lifecycle/lifecycle_test.go index 9519e645a..f5fd3d8a7 100644 --- a/internal/bucket/lifecycle/lifecycle_test.go +++ b/internal/bucket/lifecycle/lifecycle_test.go @@ -219,7 +219,6 @@ func TestExpectedExpiryTime(t *testing.T) { } }) } - } func TestComputeActions(t *testing.T) { @@ -520,7 +519,6 @@ func TestHasActiveRules(t *testing.T) { if got := lc.HasActiveRules(tc.prefix, true); got != tc.expectedRec { t.Fatalf("Expected result with recursive set to true: `%v`, got: `%v`", tc.expectedRec, got) } - }) } diff --git a/internal/bucket/object/lock/lock.go b/internal/bucket/object/lock/lock.go index 8024848ed..734f65dfd 100644 --- a/internal/bucket/object/lock/lock.go +++ b/internal/bucket/object/lock/lock.go @@ -121,9 +121,7 @@ const ( ntpServerEnv = "MINIO_NTP_SERVER" ) -var ( - ntpServer = env.Get(ntpServerEnv, "") -) +var ntpServer = env.Get(ntpServerEnv, "") // UTCNowNTP - is similar in functionality to UTCNow() // but only used when we do not wish to rely on system @@ -424,7 +422,6 @@ func ParseObjectLockRetentionHeaders(h http.Header) (rmode RetMode, r RetentionD } return rmode, RetentionDate{retDate}, nil - } // GetObjectRetentionMeta constructs ObjectRetention from metadata @@ -480,7 +477,6 @@ func ParseObjectLockLegalHoldHeaders(h http.Header) (lhold ObjectLegalHold, err lhold = ObjectLegalHold{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/", Status: lh} } return lhold, nil - } // ObjectLegalHold specified in diff --git a/internal/bucket/object/lock/lock_test.go b/internal/bucket/object/lock/lock_test.go index a85392fcf..7add6d48a 100644 --- a/internal/bucket/object/lock/lock_test.go +++ b/internal/bucket/object/lock/lock_test.go @@ -55,6 +55,7 @@ func TestParseMode(t *testing.T) { } } } + func TestParseLegalHoldStatus(t *testing.T) { tests := []struct { value string @@ -510,6 +511,7 @@ func TestParseObjectLegalHold(t *testing.T) { } } } + func TestFilterObjectLockMetadata(t *testing.T) { tests := []struct { metadata map[string]string @@ -576,9 +578,11 @@ func TestFilterObjectLockMetadata(t *testing.T) { "x-amz-object-lock-mode": "governance", "x-amz-object-lock-retain-until-date": "2020-02-01", }, - expected: map[string]string{"x-amz-object-lock-legal-hold": "on", + expected: map[string]string{ + "x-amz-object-lock-legal-hold": "on", "x-amz-object-lock-mode": "governance", - "x-amz-object-lock-retain-until-date": "2020-02-01"}, + "x-amz-object-lock-retain-until-date": "2020-02-01", + }, }, } diff --git a/internal/bucket/replication/destination.go b/internal/bucket/replication/destination.go index 0557bed82..ba6469eb2 100644 --- a/internal/bucket/replication/destination.go +++ b/internal/bucket/replication/destination.go @@ -54,7 +54,6 @@ func (d Destination) IsValid() bool { func (d Destination) String() string { return d.ARN - } // LegacyArn returns true if arn format has prefix "arn:aws:s3:::" which was diff --git a/internal/bucket/replication/filter.go b/internal/bucket/replication/filter.go index 475c70893..dc362dc13 100644 --- a/internal/bucket/replication/filter.go +++ b/internal/bucket/replication/filter.go @@ -21,9 +21,7 @@ import ( "encoding/xml" ) -var ( - errInvalidFilter = Errorf("Filter must have exactly one of Prefix, Tag, or And specified") -) +var errInvalidFilter = Errorf("Filter must have exactly one of Prefix, Tag, or And specified") // Filter - a filter for a replication configuration Rule. type Filter struct { diff --git a/internal/bucket/replication/replication_test.go b/internal/bucket/replication/replication_test.go index d1e882af6..71dc1a43a 100644 --- a/internal/bucket/replication/replication_test.go +++ b/internal/bucket/replication/replication_test.go @@ -39,21 +39,24 @@ func TestParseAndValidateReplicationConfig(t *testing.T) { expectedValidationErr: errInvalidDeleteMarkerReplicationStatus, }, // 2 Invalid delete replication status in replication config - {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: nil, expectedValidationErr: errDeleteReplicationMissing, }, // 3 valid replication config - {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: nil, expectedValidationErr: nil, }, // 4 missing role in config and destination ARN is in legacy format - {inputConfig: `EnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, + { + inputConfig: `EnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, // destination bucket in config different from bucket specified destBucket: "destinationbucket", sameTarget: false, @@ -61,63 +64,72 @@ func TestParseAndValidateReplicationConfig(t *testing.T) { expectedValidationErr: errDestinationArnMissing, }, // 5 replication destination in different rules not identical - {inputConfig: `EnabledDisabledDisabledkey-prefixarn:minio:replication:::destinationbucketEnabled3DisabledDisabledkey-prefixarn:minio:replication:::destinationbucket2`, + { + inputConfig: `EnabledDisabledDisabledkey-prefixarn:minio:replication:::destinationbucketEnabled3DisabledDisabledkey-prefixarn:minio:replication:::destinationbucket2`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: nil, expectedValidationErr: nil, }, // 6 missing rule status in replication config - {inputConfig: `arn:aws:iam::AcctID:role/role-nameDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: nil, expectedValidationErr: errEmptyRuleStatus, }, // 7 invalid rule status in replication config - {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnssabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucketEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameEnssabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucketEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: nil, expectedValidationErr: errInvalidRuleStatus, }, // 8 invalid rule id exceeds length allowed in replication config - {inputConfig: `arn:aws:iam::AcctID:role/role-namevsUVERgOc8zZYagLSzSa5lE8qeI6nh1lyLNS4R9W052yfecrhhepGboswSWMMNO8CPcXM4GM3nKyQ72EadlMzzZBFoYWKn7ju5GoE5w9c57a0piHR1vexpdd9FrMquiruvAJ0MTGVupm0EegMVxoIOdjx7VgZhGrmi2XDvpVEFT7WmYMA9fSK297XkTHWyECaNHBySJ1Qp4vwX8tPNauKpfHx4kzUpnKe1PZbptGMWbY5qTcwlNuMhVSmgFffShqEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, + { + inputConfig: `arn:aws:iam::AcctID:role/role-namevsUVERgOc8zZYagLSzSa5lE8qeI6nh1lyLNS4R9W052yfecrhhepGboswSWMMNO8CPcXM4GM3nKyQ72EadlMzzZBFoYWKn7ju5GoE5w9c57a0piHR1vexpdd9FrMquiruvAJ0MTGVupm0EegMVxoIOdjx7VgZhGrmi2XDvpVEFT7WmYMA9fSK297XkTHWyECaNHBySJ1Qp4vwX8tPNauKpfHx4kzUpnKe1PZbptGMWbY5qTcwlNuMhVSmgFffShqEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: nil, expectedValidationErr: errInvalidRuleID, }, // 9 invalid priority status in replication config - {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucketEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucketEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: nil, expectedValidationErr: errReplicationUniquePriority, }, // 10 no rule in replication config - {inputConfig: `arn:aws:iam::AcctID:role/role-name`, + { + inputConfig: `arn:aws:iam::AcctID:role/role-name`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: nil, expectedValidationErr: errReplicationNoRule, }, // 11 no destination in replication config - {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefix`, + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefix`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: Errorf("invalid destination '%v'", ""), expectedValidationErr: nil, }, // 12 destination not matching ARN in replication config - {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixdestinationbucket2`, + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixdestinationbucket2`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: fmt.Errorf("invalid destination '%v'", "destinationbucket2"), expectedValidationErr: nil, }, // 13 missing role in config and destination ARN has target ARN - {inputConfig: `EnabledDisabledDisabledkey-prefixarn:minio:replication::8320b6d18f9032b4700f1f03b50d8d1853de8f22cab86931ee794e12f190852c:destinationbucket`, + { + inputConfig: `EnabledDisabledDisabledkey-prefixarn:minio:replication::8320b6d18f9032b4700f1f03b50d8d1853de8f22cab86931ee794e12f190852c:destinationbucket`, // destination bucket in config different from bucket specified destBucket: "destinationbucket", sameTarget: false, @@ -125,7 +137,8 @@ func TestParseAndValidateReplicationConfig(t *testing.T) { expectedValidationErr: nil, }, // 14 role absent in config and destination ARN has target ARN in invalid format - {inputConfig: `EnabledDisabledDisabledkey-prefixarn:xx:replication::8320b6d18f9032b4700f1f03b50d8d1853de8f22cab86931ee794e12f190852c:destinationbucket`, + { + inputConfig: `EnabledDisabledDisabledkey-prefixarn:xx:replication::8320b6d18f9032b4700f1f03b50d8d1853de8f22cab86931ee794e12f190852c:destinationbucket`, // destination bucket in config different from bucket specified destBucket: "destinationbucket", sameTarget: false, @@ -154,6 +167,7 @@ func TestParseAndValidateReplicationConfig(t *testing.T) { }) } } + func TestReplicate(t *testing.T) { cfgs := []Config{ { // Config0 - Replication config has no filters, all replication enabled @@ -297,33 +311,37 @@ func TestHasActiveRules(t *testing.T) { expectedNonRec bool expectedRec bool }{ - // case 1 - only one rule which is in Disabled status - {inputConfig: `arn:aws:iam::AcctID:role/role-nameDisabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameDisabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, prefix: "miss/prefix", expectedNonRec: false, expectedRec: false, }, // case 2 - only one rule which matches prefix filter - {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey/prefixarn:aws:s3:::destinationbucket`, + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey/prefixarn:aws:s3:::destinationbucket`, prefix: "key/prefix1", expectedNonRec: true, expectedRec: true, }, // case 3 - empty prefix - {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledarn:aws:s3:::destinationbucket`, + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledarn:aws:s3:::destinationbucket`, prefix: "key-prefix", expectedNonRec: true, expectedRec: true, }, // case 4 - has Filter based on prefix - {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledtestdir/dir1/arn:aws:s3:::destinationbucket`, + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledtestdir/dir1/arn:aws:s3:::destinationbucket`, prefix: "testdir/", expectedNonRec: false, expectedRec: true, }, // case 5 - has filter with prefix and tags, here we are not matching on tags - {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabled + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabled key-prefixkey1value1key2value2arn:aws:s3:::destinationbucket`, prefix: "testdir/", expectedNonRec: true, @@ -344,7 +362,6 @@ func TestHasActiveRules(t *testing.T) { if got := cfg.HasActiveRules(tc.prefix, true); got != tc.expectedRec { t.Fatalf("Expected result with recursive set to true: `%v`, got: `%v`", tc.expectedRec, got) } - }) } @@ -357,21 +374,24 @@ func TestFilterActionableRules(t *testing.T) { ExpectedRules []Rule }{ // case 1 - only one rule - {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledprefix1arn:minio:replication:xxx::destinationbucket`, + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledprefix1arn:minio:replication:xxx::destinationbucket`, prefix: "prefix", ExpectedRules: []Rule{{Status: Enabled, Priority: 1, DeleteMarkerReplication: DeleteMarkerReplication{Status: Enabled}, DeleteReplication: DeleteReplication{Status: Disabled}, Destination: Destination{Bucket: "destinationbucket", ARN: "arn:minio:replication:xxx::destinationbucket"}}}, }, // case 2 - multiple rules for same target, overlapping rules with different priority - {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledprefix3arn:minio:replication:xxx::destinationbucketEnabledDisabledDisabledprefix1arn:minio:replication:xxx::destinationbucket`, - prefix: "prefix", + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledprefix3arn:minio:replication:xxx::destinationbucketEnabledDisabledDisabledprefix1arn:minio:replication:xxx::destinationbucket`, + prefix: "prefix", ExpectedRules: []Rule{ {Status: Enabled, Priority: 3, DeleteMarkerReplication: DeleteMarkerReplication{Status: Enabled}, DeleteReplication: DeleteReplication{Status: Disabled}, Destination: Destination{Bucket: "destinationbucket", ARN: "arn:minio:replication:xxx::destinationbucket"}}, {Status: Enabled, Priority: 1, DeleteMarkerReplication: DeleteMarkerReplication{Status: Enabled}, DeleteReplication: DeleteReplication{Status: Disabled}, Destination: Destination{Bucket: "destinationbucket", ARN: "arn:minio:replication:xxx::destinationbucket"}}, }, }, // case 3 - multiple rules for different target, overlapping rules on a target - {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledprefix2arn:minio:replication:xxx::destinationbucket2EnabledDisabledDisabledprefix4arn:minio:replication:xxx::destinationbucket2EnabledDisabledDisabledprefix3arn:minio:replication:xxx::destinationbucketEnabledDisabledDisabledprefix1arn:minio:replication:xxx::destinationbucket`, - prefix: "prefix", + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledprefix2arn:minio:replication:xxx::destinationbucket2EnabledDisabledDisabledprefix4arn:minio:replication:xxx::destinationbucket2EnabledDisabledDisabledprefix3arn:minio:replication:xxx::destinationbucketEnabledDisabledDisabledprefix1arn:minio:replication:xxx::destinationbucket`, + prefix: "prefix", ExpectedRules: []Rule{ {Status: Enabled, Priority: 4, DeleteMarkerReplication: DeleteMarkerReplication{Status: Enabled}, DeleteReplication: DeleteReplication{Status: Disabled}, Destination: Destination{Bucket: "destinationbucket2", ARN: "arn:minio:replication:xxx::destinationbucket2"}}, {Status: Enabled, Priority: 2, DeleteMarkerReplication: DeleteMarkerReplication{Status: Enabled}, DeleteReplication: DeleteReplication{Status: Disabled}, Destination: Destination{Bucket: "destinationbucket2", ARN: "arn:minio:replication:xxx::destinationbucket2"}}, @@ -389,7 +409,6 @@ func TestFilterActionableRules(t *testing.T) { got := cfg.FilterActionableRules(ObjectOpts{Name: tc.prefix}) if len(got) != len(tc.ExpectedRules) { t.Fatalf("Expected matching number of actionable rules: `%v`, got: `%v`", tc.ExpectedRules, got) - } for i := range got { if got[i].Destination.ARN != tc.ExpectedRules[i].Destination.ARN || got[i].Priority != tc.ExpectedRules[i].Priority { diff --git a/internal/bucket/replication/rule_test.go b/internal/bucket/replication/rule_test.go index d51456330..df7192553 100644 --- a/internal/bucket/replication/rule_test.go +++ b/internal/bucket/replication/rule_test.go @@ -30,23 +30,27 @@ func TestMetadataReplicate(t *testing.T) { expectedResult bool }{ // case 1 - rule with replica modification enabled; not a replica - {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucketEnabled`, + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucketEnabled`, opts: ObjectOpts{Name: "c1test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: false}, // 1. Replica mod sync enabled; not a replica expectedResult: true, }, // case 2 - rule with replica modification disabled; a replica - {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucketDisabled`, + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucketDisabled`, opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: true}, // 1. Replica mod sync enabled; a replica expectedResult: false, }, // case 3 - rule with replica modification disabled; not a replica - {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucketDisabled`, + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucketDisabled`, opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: false}, // 1. Replica mod sync disabled; not a replica expectedResult: true, }, // case 4 - rule with replica modification enabled; a replica - {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucketEnabled`, + { + inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucketEnabled`, opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: MetadataReplicationType, Replica: true}, // 1. Replica mod sync enabled; a replica expectedResult: true, }, diff --git a/internal/config/cache/config.go b/internal/config/cache/config.go index efb9c00ac..361c0ca8a 100644 --- a/internal/config/cache/config.go +++ b/internal/config/cache/config.go @@ -53,7 +53,7 @@ type Config struct { // json entries for CacheConfig. func (cfg *Config) UnmarshalJSON(data []byte) (err error) { type Alias Config - var _cfg = &struct { + _cfg := &struct { *Alias }{ Alias: (*Alias)(cfg), diff --git a/internal/config/config.go b/internal/config/config.go index 92f33f3da..225c6ae97 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -252,7 +252,7 @@ func (kvs KVS) GetWithDefault(key string, defaultKVS KVS) string { // Keys returns the list of keys for the current KVS func (kvs KVS) Keys() []string { - var keys = make([]string, len(kvs)) + keys := make([]string, len(kvs)) var foundComment bool for i := range kvs { if kvs[i].Key == madmin.CommentKey { @@ -817,7 +817,7 @@ func (c Config) SetKVS(s string, defaultKVS map[string]KVS) (dynamic bool, err e return false, Errorf("sub-system '%s' cannot have empty keys", subSys) } - var kvs = KVS{} + kvs := KVS{} var prevK string for _, v := range fields { kv := strings.SplitN(v, KvSeparator, 2) diff --git a/internal/config/crypto.go b/internal/config/crypto.go index f27b2e577..cdf423fd7 100644 --- a/internal/config/crypto.go +++ b/internal/config/crypto.go @@ -62,7 +62,7 @@ func DecryptBytes(KMS kms.KMS, ciphertext []byte, context kms.Context) ([]byte, // The same context must be provided when decrypting the // ciphertext. func Encrypt(KMS kms.KMS, plaintext io.Reader, context kms.Context) (io.Reader, error) { - var algorithm = sio.AES_256_GCM + algorithm := sio.AES_256_GCM if !fips.Enabled && !sioutil.NativeAES() { algorithm = sio.ChaCha20Poly1305 } @@ -88,7 +88,7 @@ func Encrypt(KMS kms.KMS, plaintext io.Reader, context kms.Context) (io.Reader, header [5]byte buffer bytes.Buffer ) - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary metadata, err := json.Marshal(encryptedObject{ KeyID: key.KeyID, KMSKey: key.Ciphertext, @@ -140,7 +140,7 @@ func Decrypt(KMS kms.KMS, ciphertext io.Reader, context kms.Context) (io.Reader, if _, err := io.ReadFull(ciphertext, metadataBuffer); err != nil { return nil, err } - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err := json.Unmarshal(metadataBuffer, &metadata); err != nil { return nil, err } diff --git a/internal/config/dns/etcd_dns.go b/internal/config/dns/etcd_dns.go index 8f41bfbd8..4c6b55cf0 100644 --- a/internal/config/dns/etcd_dns.go +++ b/internal/config/dns/etcd_dns.go @@ -58,7 +58,7 @@ func (c *CoreDNS) Close() error { // List - Retrieves list of DNS entries for the domain. func (c *CoreDNS) List() (map[string][]SrvRecord, error) { - var srvRecords = map[string][]SrvRecord{} + srvRecords := map[string][]SrvRecord{} for _, domainName := range c.domainNames { key := msg.Path(fmt.Sprintf("%s.", domainName), c.prefixPath) records, err := c.list(key+etcdPathSeparator, true) diff --git a/internal/config/errors-utils.go b/internal/config/errors-utils.go index 1ba2da979..f4de31449 100644 --- a/internal/config/errors-utils.go +++ b/internal/config/errors-utils.go @@ -120,7 +120,6 @@ func ErrorToErr(err error) Err { // FmtError converts a fatal error message to a more clear error // using some colors func FmtError(introMsg string, err error, jsonFlag bool) string { - renderedTxt := "" uiErr := ErrorToErr(err) // JSON print diff --git a/internal/config/etcd/etcd_test.go b/internal/config/etcd/etcd_test.go index 4470888c2..d9889ca96 100644 --- a/internal/config/etcd/etcd_test.go +++ b/internal/config/etcd/etcd_test.go @@ -38,9 +38,13 @@ func TestParseEndpoints(t *testing.T) { {"http://localhost:2379000", nil, false, false}, // Valid inputs - {"https://localhost:2379,https://localhost:2380", []string{ - "https://localhost:2379", "https://localhost:2380"}, - true, true}, + { + "https://localhost:2379,https://localhost:2380", + []string{ + "https://localhost:2379", "https://localhost:2380", + }, + true, true, + }, {"http://localhost:2379", []string{"http://localhost:2379"}, false, true}, } diff --git a/internal/config/identity/openid/jwks_test.go b/internal/config/identity/openid/jwks_test.go index fa2895bbf..e85d7fa38 100644 --- a/internal/config/identity/openid/jwks_test.go +++ b/internal/config/identity/openid/jwks_test.go @@ -92,18 +92,23 @@ func TestPublicKey(t *testing.T) { t.Fatalf("Expected RSA key[1], got %T", keys[1]) } else if key0.Curve != elliptic.P256() { t.Fatal("Key[0] is not using P-256 curve") - } else if !bytes.Equal(key0.X.Bytes(), []byte{0x30, 0xa0, 0x42, 0x4c, 0xd2, + } else if !bytes.Equal(key0.X.Bytes(), []byte{ + 0x30, 0xa0, 0x42, 0x4c, 0xd2, 0x1c, 0x29, 0x44, 0x83, 0x8a, 0x2d, 0x75, 0xc9, 0x2b, 0x37, 0xe7, 0x6e, 0xa2, - 0xd, 0x9f, 0x0, 0x89, 0x3a, 0x3b, 0x4e, 0xee, 0x8a, 0x3c, 0xa, 0xaf, 0xec, 0x3e}) { + 0xd, 0x9f, 0x0, 0x89, 0x3a, 0x3b, 0x4e, 0xee, 0x8a, 0x3c, 0xa, 0xaf, 0xec, 0x3e, + }) { t.Fatalf("Bad key[0].X, got %v", key0.X.Bytes()) - } else if !bytes.Equal(key0.Y.Bytes(), []byte{0xe0, 0x4b, 0x65, 0xe9, 0x24, + } else if !bytes.Equal(key0.Y.Bytes(), []byte{ + 0xe0, 0x4b, 0x65, 0xe9, 0x24, 0x56, 0xd9, 0x88, 0x8b, 0x52, 0xb3, 0x79, 0xbd, 0xfb, 0xd5, 0x1e, 0xe8, 0x69, 0xef, 0x1f, 0xf, 0xc6, 0x5b, 0x66, 0x59, 0x69, 0x5b, 0x6c, 0xce, - 0x8, 0x17, 0x23}) { + 0x8, 0x17, 0x23, + }) { t.Fatalf("Bad key[0].Y, got %v", key0.Y.Bytes()) } else if key1.E != 0x10001 { t.Fatalf("Bad key[1].E: %d", key1.E) - } else if !bytes.Equal(key1.N.Bytes(), []byte{0xd2, 0xfc, 0x7b, 0x6a, 0xa, 0x1e, + } else if !bytes.Equal(key1.N.Bytes(), []byte{ + 0xd2, 0xfc, 0x7b, 0x6a, 0xa, 0x1e, 0x6c, 0x67, 0x10, 0x4a, 0xeb, 0x8f, 0x88, 0xb2, 0x57, 0x66, 0x9b, 0x4d, 0xf6, 0x79, 0xdd, 0xad, 0x9, 0x9b, 0x5c, 0x4a, 0x6c, 0xd9, 0xa8, 0x80, 0x15, 0xb5, 0xa1, 0x33, 0xbf, 0xb, 0x85, 0x6c, 0x78, 0x71, 0xb6, 0xdf, 0x0, 0xb, 0x55, @@ -122,7 +127,8 @@ func TestPublicKey(t *testing.T) { 0xa5, 0x9e, 0x66, 0xed, 0x1f, 0x33, 0x89, 0x45, 0x77, 0x63, 0x5c, 0x47, 0xa, 0xf7, 0x5c, 0xf9, 0x2c, 0x20, 0xd1, 0xda, 0x43, 0xe1, 0xbf, 0xc4, 0x19, 0xe2, 0x22, 0xa6, 0xf0, 0xd0, 0xbb, 0x35, 0x8c, 0x5e, 0x38, 0xf9, 0xcb, 0x5, 0xa, 0xea, - 0xfe, 0x90, 0x48, 0x14, 0xf1, 0xac, 0x1a, 0xa4, 0x9c, 0xca, 0x9e, 0xa0, 0xca, 0x83}) { + 0xfe, 0x90, 0x48, 0x14, 0xf1, 0xac, 0x1a, 0xa4, 0x9c, 0xca, 0x9e, 0xa0, 0xca, 0x83, + }) { t.Fatalf("Bad key[1].N, got %v", key1.N.Bytes()) } } diff --git a/internal/config/identity/openid/jwt.go b/internal/config/identity/openid/jwt.go index 42e343f2f..4f39bf506 100644 --- a/internal/config/identity/openid/jwt.go +++ b/internal/config/identity/openid/jwt.go @@ -112,7 +112,7 @@ func (r *Config) UserInfo(accessToken string) (map[string]interface{}, error) { } dec := json.NewDecoder(resp.Body) - var claims = map[string]interface{}{} + claims := map[string]interface{}{} if err = dec.Decode(&claims); err != nil { // uncomment this for debugging when needed. diff --git a/internal/config/storageclass/storage-class_test.go b/internal/config/storageclass/storage-class_test.go index 3b9e5a133..375e99722 100644 --- a/internal/config/storageclass/storage-class_test.go +++ b/internal/config/storageclass/storage-class_test.go @@ -29,24 +29,48 @@ func TestParseStorageClass(t *testing.T) { wantSc StorageClass expectedError error }{ - {"EC:3", StorageClass{ - Parity: 3}, - nil}, - {"EC:4", StorageClass{ - Parity: 4}, - nil}, - {"AB:4", StorageClass{ - Parity: 4}, - errors.New("Unsupported scheme AB. Supported scheme is EC")}, - {"EC:4:5", StorageClass{ - Parity: 4}, - errors.New("Too many sections in EC:4:5")}, - {"EC:A", StorageClass{ - Parity: 4}, - errors.New(`strconv.Atoi: parsing "A": invalid syntax`)}, - {"AB", StorageClass{ - Parity: 4}, - errors.New("Too few sections in AB")}, + { + "EC:3", + StorageClass{ + Parity: 3, + }, + nil, + }, + { + "EC:4", + StorageClass{ + Parity: 4, + }, + nil, + }, + { + "AB:4", + StorageClass{ + Parity: 4, + }, + errors.New("Unsupported scheme AB. Supported scheme is EC"), + }, + { + "EC:4:5", + StorageClass{ + Parity: 4, + }, + errors.New("Too many sections in EC:4:5"), + }, + { + "EC:A", + StorageClass{ + Parity: 4, + }, + errors.New(`strconv.Atoi: parsing "A": invalid syntax`), + }, + { + "AB", + StorageClass{ + Parity: 4, + }, + errors.New("Too few sections in AB"), + }, } for i, tt := range tests { gotSc, err := parseStorageClass(tt.storageClassEnv) diff --git a/internal/crypto/error.go b/internal/crypto/error.go index 0a6651840..7711a05a8 100644 --- a/internal/crypto/error.go +++ b/internal/crypto/error.go @@ -86,8 +86,6 @@ var ( errInvalidInternalSealAlgorithm = Errorf("The internal seal algorithm is invalid and not supported") ) -var ( - // errOutOfEntropy indicates that the a source of randomness (PRNG) wasn't able - // to produce enough random data. This is fatal error and should cause a panic. - errOutOfEntropy = Errorf("Unable to read enough randomness from the system") -) +// errOutOfEntropy indicates that the a source of randomness (PRNG) wasn't able +// to produce enough random data. This is fatal error and should cause a panic. +var errOutOfEntropy = Errorf("Unable to read enough randomness from the system") diff --git a/internal/crypto/header.go b/internal/crypto/header.go index 58903c46a..96ad2a30b 100644 --- a/internal/crypto/header.go +++ b/internal/crypto/header.go @@ -36,11 +36,9 @@ func RemoveSensitiveHeaders(h http.Header) { h.Del(xhttp.AmzMetaUnencryptedContentMD5) } -var ( - // SSECopy represents AWS SSE-C for copy requests. It provides - // functionality to handle SSE-C copy requests. - SSECopy = ssecCopy{} -) +// SSECopy represents AWS SSE-C for copy requests. It provides +// functionality to handle SSE-C copy requests. +var SSECopy = ssecCopy{} type ssecCopy struct{} diff --git a/internal/crypto/key.go b/internal/crypto/key.go index 8e81c165d..49cc9763a 100644 --- a/internal/crypto/key.go +++ b/internal/crypto/key.go @@ -111,9 +111,7 @@ func (key ObjectKey) Seal(extKey []byte, iv [32]byte, domain, bucket, object str // may be cryptographically bound to the object's path the same bucket/object as during sealing // must be provided. On success the ObjectKey contains the decrypted sealed key. func (key *ObjectKey) Unseal(extKey []byte, sealedKey SealedKey, domain, bucket, object string) error { - var ( - unsealConfig sio.Config - ) + var unsealConfig sio.Config switch sealedKey.Algorithm { default: return Errorf("The sealing algorithm '%s' is not supported", sealedKey.Algorithm) diff --git a/internal/crypto/metadata_test.go b/internal/crypto/metadata_test.go index 96d46af75..ff59ec277 100644 --- a/internal/crypto/metadata_test.go +++ b/internal/crypto/metadata_test.go @@ -306,7 +306,6 @@ var s3CreateMetadataTests = []struct { SealedDataKey []byte SealedKey SealedKey }{ - {KeyID: "", SealedDataKey: nil, SealedKey: SealedKey{Algorithm: SealAlgorithm}}, {KeyID: "my-minio-key", SealedDataKey: make([]byte, 48), SealedKey: SealedKey{Algorithm: SealAlgorithm}}, {KeyID: "cafebabe", SealedDataKey: make([]byte, 48), SealedKey: SealedKey{Algorithm: SealAlgorithm}}, diff --git a/internal/crypto/sse-kms.go b/internal/crypto/sse-kms.go index 4dfec6f05..dfe0a00ba 100644 --- a/internal/crypto/sse-kms.go +++ b/internal/crypto/sse-kms.go @@ -75,7 +75,7 @@ func (ssekms) ParseHTTP(h http.Header) (string, kms.Context, error) { return "", nil, err } - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err := json.Unmarshal(b, &ctx); err != nil { return "", nil, err } @@ -209,7 +209,7 @@ func (ssekms) ParseMetadata(metadata map[string]string) (keyID string, kmsKey [] if err != nil { return keyID, kmsKey, sealedKey, ctx, Errorf("The internal KMS context is not base64-encoded") } - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err = json.Unmarshal(b, &ctx); err != nil { return keyID, kmsKey, sealedKey, ctx, Errorf("The internal sealed KMS context is invalid %w", err) } diff --git a/internal/crypto/sse.go b/internal/crypto/sse.go index 59635d835..ec7f33ec2 100644 --- a/internal/crypto/sse.go +++ b/internal/crypto/sse.go @@ -50,7 +50,6 @@ type Type interface { fmt.Stringer IsRequested(http.Header) bool - IsEncrypted(map[string]string) bool } diff --git a/internal/disk/health.go b/internal/disk/health.go index 2519fb417..94e321975 100644 --- a/internal/disk/health.go +++ b/internal/disk/health.go @@ -30,10 +30,9 @@ import ( // GetHealthInfo about the drive func GetHealthInfo(ctx context.Context, drive, fsPath string) (madmin.DiskLatency, madmin.DiskThroughput, error) { - // Create a file with O_DIRECT flag, choose default umask and also make sure // we are exclusively writing to a new file using O_EXCL. - w, err := OpenFileDirectIO(fsPath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0666) + w, err := OpenFileDirectIO(fsPath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0o666) if err != nil { return madmin.DiskLatency{}, madmin.DiskThroughput{}, err } diff --git a/internal/disk/type_windows.go b/internal/disk/type_windows.go index cd17aa01f..579f5e81f 100644 --- a/internal/disk/type_windows.go +++ b/internal/disk/type_windows.go @@ -26,10 +26,8 @@ import ( "unsafe" ) -var ( - // GetVolumeInformation provides windows drive volume information. - GetVolumeInformation = kernel32.NewProc("GetVolumeInformationW") -) +// GetVolumeInformation provides windows drive volume information. +var GetVolumeInformation = kernel32.NewProc("GetVolumeInformationW") // getFSType returns the filesystem type of the underlying mounted filesystem func getFSType(path string) string { @@ -37,7 +35,7 @@ func getFSType(path string) string { var lpVolumeSerialNumber uint32 var lpFileSystemFlags, lpMaximumComponentLength uint32 var lpFileSystemNameBuffer, volumeName [260]uint16 - var ps = syscall.StringToUTF16Ptr(filepath.VolumeName(path)) + ps := syscall.StringToUTF16Ptr(filepath.VolumeName(path)) // Extract values safely // BOOL WINAPI GetVolumeInformation( diff --git a/internal/dsync/drwmutex.go b/internal/dsync/drwmutex.go index 50c553aa6..9ce071560 100644 --- a/internal/dsync/drwmutex.go +++ b/internal/dsync/drwmutex.go @@ -103,7 +103,6 @@ func NewDRWMutex(clnt *Dsync, names ...string) *DRWMutex { // If the lock is already in use, the calling go routine // blocks until the mutex is available. func (dm *DRWMutex) Lock(id, source string) { - isReadLock := false dm.lockBlocking(context.Background(), nil, id, source, isReadLock, Options{ Timeout: drwMutexInfinite, @@ -121,7 +120,6 @@ type Options struct { // blocks until either the mutex becomes available and return success or // more time has passed than the timeout value and return false. func (dm *DRWMutex) GetLock(ctx context.Context, cancel context.CancelFunc, id, source string, opts Options) (locked bool) { - isReadLock := false return dm.lockBlocking(ctx, cancel, id, source, isReadLock, opts) } @@ -131,7 +129,6 @@ func (dm *DRWMutex) GetLock(ctx context.Context, cancel context.CancelFunc, id, // If one or more read locks are already in use, it will grant another lock. // Otherwise the calling go routine blocks until the mutex is available. func (dm *DRWMutex) RLock(id, source string) { - isReadLock := true dm.lockBlocking(context.Background(), nil, id, source, isReadLock, Options{ Timeout: drwMutexInfinite, @@ -145,7 +142,6 @@ func (dm *DRWMutex) RLock(id, source string) { // available and return success or more time has passed than the timeout // value and return false. func (dm *DRWMutex) GetRLock(ctx context.Context, cancel context.CancelFunc, id, source string, opts Options) (locked bool) { - isReadLock := true return dm.lockBlocking(ctx, cancel, id, source, isReadLock, opts) } @@ -317,7 +313,6 @@ func refresh(ctx context.Context, ds *Dsync, id, source string, quorum int) (boo log("dsync: Refresh returned false for %#v at %s\n", args, c) } } - }(index, c) } @@ -422,7 +417,6 @@ func lock(ctx context.Context, ds *Dsync, locks *[]string, id, source string, is g.lockUID = args.UID } ch <- g - }(index, isReadLock, c) } diff --git a/internal/dsync/drwmutex_test.go b/internal/dsync/drwmutex_test.go index ce27d4df7..be13c97ec 100644 --- a/internal/dsync/drwmutex_test.go +++ b/internal/dsync/drwmutex_test.go @@ -32,7 +32,6 @@ const ( ) func testSimpleWriteLock(t *testing.T, duration time.Duration) (locked bool) { - drwm := NewDRWMutex(ds, "simplelock") ctx1, cancel1 := context.WithCancel(context.Background()) @@ -91,7 +90,6 @@ func TestSimpleWriteLockTimedOut(t *testing.T) { } func testDualWriteLock(t *testing.T, duration time.Duration) (locked bool) { - drwm := NewDRWMutex(ds, "duallock") // fmt.Println("Getting initial write lock") @@ -126,7 +124,6 @@ func TestDualWriteLockAcquired(t *testing.T) { if locked != expected { t.Errorf("TestDualWriteLockAcquired(): \nexpected %#v\ngot %#v", expected, locked) } - } func TestDualWriteLockTimedOut(t *testing.T) { @@ -136,7 +133,6 @@ func TestDualWriteLockTimedOut(t *testing.T) { if locked != expected { t.Errorf("TestDualWriteLockTimedOut(): \nexpected %#v\ngot %#v", expected, locked) } - } // Test cases below are copied 1 to 1 from sync/rwmutex_test.go (adapted to use DRWMutex) diff --git a/internal/dsync/dsync_test.go b/internal/dsync/dsync_test.go index 0b25fa896..5ba3aba0c 100644 --- a/internal/dsync/dsync_test.go +++ b/internal/dsync/dsync_test.go @@ -36,11 +36,15 @@ import ( const numberOfNodes = 5 -var ds *Dsync -var rpcPaths []string // list of rpc paths where lock server is serving. +var ( + ds *Dsync + rpcPaths []string // list of rpc paths where lock server is serving. +) -var nodes = make([]string, numberOfNodes) // list of node IP addrs or hostname with ports. -var lockServers []*lockServer +var ( + nodes = make([]string, numberOfNodes) // list of node IP addrs or hostname with ports. + lockServers []*lockServer +) func startRPCServers() { for i := range nodes { @@ -94,7 +98,6 @@ func TestMain(m *testing.M) { } func TestSimpleLock(t *testing.T) { - dm := NewDRWMutex(ds, "test") dm.Lock(id, source) @@ -106,7 +109,6 @@ func TestSimpleLock(t *testing.T) { } func TestSimpleLockUnlockMultipleTimes(t *testing.T) { - dm := NewDRWMutex(ds, "test") dm.Lock(id, source) @@ -132,7 +134,6 @@ func TestSimpleLockUnlockMultipleTimes(t *testing.T) { // Test two locks for same resource, one succeeds, one fails (after timeout) func TestTwoSimultaneousLocksForSameResource(t *testing.T) { - dm1st := NewDRWMutex(ds, "aap") dm2nd := NewDRWMutex(ds, "aap") @@ -156,7 +157,6 @@ func TestTwoSimultaneousLocksForSameResource(t *testing.T) { // Test three locks for same resource, one succeeds, one fails (after timeout) func TestThreeSimultaneousLocksForSameResource(t *testing.T) { - dm1st := NewDRWMutex(ds, "aap") dm2nd := NewDRWMutex(ds, "aap") dm3rd := NewDRWMutex(ds, "aap") @@ -221,7 +221,6 @@ func TestThreeSimultaneousLocksForSameResource(t *testing.T) { // Test two locks for different resources, both succeed func TestTwoSimultaneousLocksForDifferentResources(t *testing.T) { - dm1 := NewDRWMutex(ds, "aap") dm2 := NewDRWMutex(ds, "noot") @@ -332,7 +331,7 @@ func BenchmarkMutexUncontended(b *testing.B) { *DRWMutex } b.RunParallel(func(pb *testing.PB) { - var mu = PaddedMutex{NewDRWMutex(ds, "")} + mu := PaddedMutex{NewDRWMutex(ds, "")} for pb.Next() { mu.Lock(id, source) mu.Unlock() diff --git a/internal/event/name_test.go b/internal/event/name_test.go index 5bb67a09c..edc56a3ff 100644 --- a/internal/event/name_test.go +++ b/internal/event/name_test.go @@ -32,8 +32,10 @@ func TestNameExpand(t *testing.T) { {BucketCreated, []Name{BucketCreated}}, {BucketRemoved, []Name{BucketRemoved}}, {ObjectAccessedAll, []Name{ObjectAccessedGet, ObjectAccessedHead, ObjectAccessedGetRetention, ObjectAccessedGetLegalHold}}, - {ObjectCreatedAll, []Name{ObjectCreatedCompleteMultipartUpload, ObjectCreatedCopy, ObjectCreatedPost, ObjectCreatedPut, - ObjectCreatedPutRetention, ObjectCreatedPutLegalHold, ObjectCreatedPutTagging, ObjectCreatedDeleteTagging}}, + {ObjectCreatedAll, []Name{ + ObjectCreatedCompleteMultipartUpload, ObjectCreatedCopy, ObjectCreatedPost, ObjectCreatedPut, + ObjectCreatedPutRetention, ObjectCreatedPutLegalHold, ObjectCreatedPutTagging, ObjectCreatedDeleteTagging, + }}, {ObjectRemovedAll, []Name{ObjectRemovedDelete, ObjectRemovedDeleteMarkerCreated}}, {ObjectAccessedHead, []Name{ObjectAccessedHead}}, } diff --git a/internal/event/rulesmap_test.go b/internal/event/rulesmap_test.go index 832006b7e..0d9b279f3 100644 --- a/internal/event/rulesmap_test.go +++ b/internal/event/rulesmap_test.go @@ -158,8 +158,10 @@ func TestNewRulesMap(t *testing.T) { "*", TargetID{"1", "webhook"}) rulesMapCase2 := make(RulesMap) - rulesMapCase2.add([]Name{ObjectAccessedGet, ObjectAccessedHead, - ObjectCreatedPut, ObjectAccessedGetRetention, ObjectAccessedGetLegalHold}, "*", TargetID{"1", "webhook"}) + rulesMapCase2.add([]Name{ + ObjectAccessedGet, ObjectAccessedHead, + ObjectCreatedPut, ObjectAccessedGetRetention, ObjectAccessedGetLegalHold, + }, "*", TargetID{"1", "webhook"}) rulesMapCase3 := make(RulesMap) rulesMapCase3.add([]Name{ObjectRemovedDelete}, "2010*.jpg", TargetID{"1", "webhook"}) diff --git a/internal/event/target/elasticsearch.go b/internal/event/target/elasticsearch.go index 8344bf51a..8db0787c4 100644 --- a/internal/event/target/elasticsearch.go +++ b/internal/event/target/elasticsearch.go @@ -106,7 +106,6 @@ type esClient interface { createIndex(ElasticsearchArgs) error ping(context.Context, ElasticsearchArgs) (bool, error) stop() - entryExists(context.Context, string, string) (bool, error) removeEntry(context.Context, string, string) error updateEntry(context.Context, string, string, event.Event) error @@ -408,7 +407,6 @@ func (c *esClientV7) getServerSupportStatus(ctx context.Context) (ESSupportStatu } } return ESSUnknown, "", fmt.Errorf("Unable to get ES Server Version - got INFO response: %v", m) - } func (c *esClientV7) isAtleastV7() bool { diff --git a/internal/event/target/kafka.go b/internal/event/target/kafka.go index 59237a902..e6b81715d 100644 --- a/internal/event/target/kafka.go +++ b/internal/event/target/kafka.go @@ -241,7 +241,6 @@ func (target *KafkaTarget) Close() error { // Check if atleast one broker in cluster is active func (k KafkaArgs) pingBrokers() bool { - for _, broker := range k.Brokers { _, dErr := net.Dial("tcp", broker.String()) if dErr == nil { @@ -276,7 +275,6 @@ func NewKafkaTarget(id string, args KafkaArgs, doneCh <-chan struct{}, loggerOnc config.Net.SASL.Enable = args.SASL.Enable tlsConfig, err := saramatls.NewConfig(args.TLS.ClientTLSCert, args.TLS.ClientTLSKey) - if err != nil { target.loggerOnce(context.Background(), err, target.ID()) return target, err diff --git a/internal/event/target/mysql.go b/internal/event/target/mysql.go index f782dfa10..c1cd2894b 100644 --- a/internal/event/target/mysql.go +++ b/internal/event/target/mysql.go @@ -240,7 +240,6 @@ func (target *MySQLTarget) send(eventData event.Event) error { // Send - reads an event from store and sends it to MySQL. func (target *MySQLTarget) Send(eventKey string) error { - _, err := target.IsActive() if err != nil { return err @@ -298,7 +297,6 @@ func (target *MySQLTarget) Close() error { // Executes the table creation statements. func (target *MySQLTarget) executeStmts() error { - _, err := target.db.Exec(fmt.Sprintf(mysqlTableExists, target.args.Table)) if err != nil { createStmt := mysqlCreateNamespaceTable @@ -329,7 +327,6 @@ func (target *MySQLTarget) executeStmts() error { } return nil - } // NewMySQLTarget - creates new MySQL target. diff --git a/internal/event/target/nats_contrib_test.go b/internal/event/target/nats_contrib_test.go index 0e06961ad..5413d3767 100644 --- a/internal/event/target/nats_contrib_test.go +++ b/internal/event/target/nats_contrib_test.go @@ -31,9 +31,11 @@ func TestNatsConnPlain(t *testing.T) { clientConfig := &NATSArgs{ Enable: true, - Address: xnet.Host{Name: "localhost", + Address: xnet.Host{ + Name: "localhost", Port: (xnet.Port(opts.Port)), - IsPortSet: true}, + IsPortSet: true, + }, Subject: "test", } con, err := clientConfig.connectNats() @@ -53,9 +55,11 @@ func TestNatsConnUserPass(t *testing.T) { clientConfig := &NATSArgs{ Enable: true, - Address: xnet.Host{Name: "localhost", + Address: xnet.Host{ + Name: "localhost", Port: (xnet.Port(opts.Port)), - IsPortSet: true}, + IsPortSet: true, + }, Subject: "test", Username: opts.Username, Password: opts.Password, @@ -77,9 +81,11 @@ func TestNatsConnToken(t *testing.T) { clientConfig := &NATSArgs{ Enable: true, - Address: xnet.Host{Name: "localhost", + Address: xnet.Host{ + Name: "localhost", Port: (xnet.Port(opts.Port)), - IsPortSet: true}, + IsPortSet: true, + }, Subject: "test", Token: opts.Authorization, } diff --git a/internal/event/target/nats_tls_contrib_test.go b/internal/event/target/nats_tls_contrib_test.go index e850cee4d..4bbcc0bb0 100644 --- a/internal/event/target/nats_tls_contrib_test.go +++ b/internal/event/target/nats_tls_contrib_test.go @@ -31,9 +31,11 @@ func TestNatsConnTLSCustomCA(t *testing.T) { clientConfig := &NATSArgs{ Enable: true, - Address: xnet.Host{Name: "localhost", + Address: xnet.Host{ + Name: "localhost", Port: (xnet.Port(opts.Port)), - IsPortSet: true}, + IsPortSet: true, + }, Subject: "test", Secure: true, CertAuthority: path.Join("testdata", "contrib", "certs", "root_ca_cert.pem"), @@ -52,9 +54,11 @@ func TestNatsConnTLSClientAuthorization(t *testing.T) { clientConfig := &NATSArgs{ Enable: true, - Address: xnet.Host{Name: "localhost", + Address: xnet.Host{ + Name: "localhost", Port: (xnet.Port(opts.Port)), - IsPortSet: true}, + IsPortSet: true, + }, Subject: "test", Secure: true, CertAuthority: path.Join("testdata", "contrib", "certs", "root_ca_cert.pem"), diff --git a/internal/event/target/postgresql.go b/internal/event/target/postgresql.go index d7565d40d..a865d5fd9 100644 --- a/internal/event/target/postgresql.go +++ b/internal/event/target/postgresql.go @@ -296,7 +296,6 @@ func (target *PostgreSQLTarget) Close() error { // Executes the table creation statements. func (target *PostgreSQLTarget) executeStmts() error { - _, err := target.db.Exec(fmt.Sprintf(psqlTableExists, target.args.Table)) if err != nil { createStmt := psqlCreateNamespaceTable diff --git a/internal/event/target/queuestore.go b/internal/event/target/queuestore.go index 0a0622e04..0ac1ccecd 100644 --- a/internal/event/target/queuestore.go +++ b/internal/event/target/queuestore.go @@ -59,7 +59,7 @@ func (store *QueueStore) Open() error { store.Lock() defer store.Unlock() - if err := os.MkdirAll(store.directory, os.FileMode(0770)); err != nil { + if err := os.MkdirAll(store.directory, os.FileMode(0o770)); err != nil { return err } @@ -80,7 +80,6 @@ func (store *QueueStore) Open() error { // write - writes event to the directory. func (store *QueueStore) write(key string, e event.Event) error { - // Marshalls the event. eventData, err := json.Marshal(e) if err != nil { @@ -88,7 +87,7 @@ func (store *QueueStore) write(key string, e event.Event) error { } path := filepath.Join(store.directory, key+eventExt) - if err := ioutil.WriteFile(path, eventData, os.FileMode(0770)); err != nil { + if err := ioutil.WriteFile(path, eventData, os.FileMode(0o770)); err != nil { return err } diff --git a/internal/event/target/queuestore_test.go b/internal/event/target/queuestore_test.go index 79751a3d6..f82395a71 100644 --- a/internal/event/target/queuestore_test.go +++ b/internal/event/target/queuestore_test.go @@ -57,7 +57,6 @@ func TestQueueStorePut(t *testing.T) { store, err := setUpStore(queueDir, 100) if err != nil { t.Fatal("Failed to create a queue store ", err) - } // Put 100 events. for i := 0; i < 100; i++ { diff --git a/internal/handlers/proxy_test.go b/internal/handlers/proxy_test.go index 240b9d6be..f014daf7f 100644 --- a/internal/handlers/proxy_test.go +++ b/internal/handlers/proxy_test.go @@ -45,7 +45,8 @@ func TestGetScheme(t *testing.T) { req := &http.Request{ Header: http.Header{ v.key: []string{v.val}, - }} + }, + } res := GetSourceScheme(req) if res != v.expected { t.Errorf("wrong header for %s: got %s want %s", v.key, res, @@ -74,7 +75,8 @@ func TestGetSourceIP(t *testing.T) { req := &http.Request{ Header: http.Header{ v.key: []string{v.val}, - }} + }, + } res := GetSourceIP(req) if res != v.expected { t.Errorf("wrong header for %s: got %s want %s", v.key, res, diff --git a/internal/http/dial_linux.go b/internal/http/dial_linux.go index bc05ef614..2ced89899 100644 --- a/internal/http/dial_linux.go +++ b/internal/http/dial_linux.go @@ -51,7 +51,6 @@ func setTCPParameters(network, address string, c syscall.RawConn) error { // Enable TCP quick ACK, John Nagle says // "Set TCP_QUICKACK. If you find a case where that makes things worse, let me know." _ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, unix.TCP_QUICKACK, 1) - }) return nil } diff --git a/internal/http/listener.go b/internal/http/listener.go index 56c95d7bb..76c297862 100644 --- a/internal/http/listener.go +++ b/internal/http/listener.go @@ -122,7 +122,6 @@ func (listener *httpListener) Addrs() (addrs []net.Addr) { // * listen to multiple addresses // * controls incoming connections only doing HTTP protocol func newHTTPListener(ctx context.Context, serverAddrs []string) (listener *httpListener, err error) { - var tcpListeners []*net.TCPListener // Close all opened listeners on error diff --git a/internal/ioutil/append-file_nix.go b/internal/ioutil/append-file_nix.go index 757d2251b..3f53fb1b7 100644 --- a/internal/ioutil/append-file_nix.go +++ b/internal/ioutil/append-file_nix.go @@ -31,7 +31,7 @@ func AppendFile(dst string, src string, osync bool) error { if osync { flags |= os.O_SYNC } - appendFile, err := os.OpenFile(dst, flags, 0666) + appendFile, err := os.OpenFile(dst, flags, 0o666) if err != nil { return err } diff --git a/internal/ioutil/append-file_windows.go b/internal/ioutil/append-file_windows.go index d916bb218..9a27eb933 100644 --- a/internal/ioutil/append-file_windows.go +++ b/internal/ioutil/append-file_windows.go @@ -26,13 +26,13 @@ import ( // AppendFile - appends the file "src" to the file "dst" func AppendFile(dst string, src string, osync bool) error { - appendFile, err := lock.Open(dst, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) + appendFile, err := lock.Open(dst, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666) if err != nil { return err } defer appendFile.Close() - srcFile, err := lock.Open(src, os.O_RDONLY, 0666) + srcFile, err := lock.Open(src, os.O_RDONLY, 0o666) if err != nil { return err } diff --git a/internal/ioutil/ioutil_test.go b/internal/ioutil/ioutil_test.go index b9d0e1f50..bd192cff6 100644 --- a/internal/ioutil/ioutil_test.go +++ b/internal/ioutil/ioutil_test.go @@ -159,7 +159,7 @@ func TestSameFile(t *testing.T) { if !SameFile(fi1, fi2) { t.Fatal("Expected the files to be same") } - if err = goioutil.WriteFile(tmpFile, []byte("aaa"), 0644); err != nil { + if err = goioutil.WriteFile(tmpFile, []byte("aaa"), 0o644); err != nil { t.Fatal(err) } fi2, err = os.Stat(tmpFile) diff --git a/internal/ioutil/read_file.go b/internal/ioutil/read_file.go index e7cc7c3e3..cf487f760 100644 --- a/internal/ioutil/read_file.go +++ b/internal/ioutil/read_file.go @@ -31,11 +31,11 @@ import ( // // passes NOATIME flag for reads on Unix systems to avoid atime updates. func ReadFile(name string) ([]byte, error) { - f, err := disk.OpenFileDirectIO(name, readMode, 0666) + f, err := disk.OpenFileDirectIO(name, readMode, 0o666) if err != nil { // fallback if there is an error to read // 'name' with O_DIRECT - f, err = os.OpenFile(name, readMode, 0666) + f, err = os.OpenFile(name, readMode, 0o666) if err != nil { return nil, err } diff --git a/internal/jwt/parser.go b/internal/jwt/parser.go index 30b416755..62e4138b1 100644 --- a/internal/jwt/parser.go +++ b/internal/jwt/parser.go @@ -294,7 +294,7 @@ func (c *MapClaims) Map() map[string]interface{} { // MarshalJSON marshals the MapClaims struct func (c *MapClaims) MarshalJSON() ([]byte, error) { - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary return json.Marshal(c.MapClaims) } @@ -493,7 +493,7 @@ func ParseUnverifiedMapClaims(token []byte, claims *MapClaims, buf []byte) (*Sig return nil, &jwtgo.ValidationError{Inner: err, Errors: jwtgo.ValidationErrorMalformed} } - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err = json.Unmarshal(buf[:n], &claims.MapClaims); err != nil { return nil, &jwtgo.ValidationError{Inner: err, Errors: jwtgo.ValidationErrorMalformed} } diff --git a/internal/kms/context.go b/internal/kms/context.go index 34f4edd6b..fd9c87e70 100644 --- a/internal/kms/context.go +++ b/internal/kms/context.go @@ -45,7 +45,7 @@ func (c Context) MarshalText() ([]byte, error) { // Pre-allocate a buffer - 128 bytes is an arbitrary // heuristic value that seems like a good starting size. - var b = bytes.NewBuffer(make([]byte, 0, 128)) + b := bytes.NewBuffer(make([]byte, 0, 128)) if len(c) == 1 { for k, v := range c { b.WriteString(`{"`) diff --git a/internal/kms/kes.go b/internal/kms/kes.go index b98de7de2..5227f0f2f 100644 --- a/internal/kms/kes.go +++ b/internal/kms/kes.go @@ -55,7 +55,7 @@ func NewWithConfig(config Config) (KMS, error) { if len(config.Endpoints) == 0 { return nil, errors.New("kms: no server endpoints") } - var endpoints = make([]string, len(config.Endpoints)) // Copy => avoid being affect by any changes to the original slice + endpoints := make([]string, len(config.Endpoints)) // Copy => avoid being affect by any changes to the original slice copy(endpoints, config.Endpoints) client := kes.NewClientWithConfig("", &tls.Config{ @@ -85,7 +85,7 @@ func (c *kesClient) Stat() (Status, error) { if _, err := c.client.Version(ctx); err != nil { return Status{}, err } - var endpoints = make([]string, len(c.client.Endpoints)) + endpoints := make([]string, len(c.client.Endpoints)) copy(endpoints, c.client.Endpoints) return Status{ Name: "KES", diff --git a/internal/kms/kms.go b/internal/kms/kms.go index 92d645f0f..d1eb8c4b5 100644 --- a/internal/kms/kms.go +++ b/internal/kms/kms.go @@ -108,7 +108,7 @@ func (d *DEK) UnmarshalText(text []byte) error { Ciphertext []byte `json:"ciphertext"` } var v JSON - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err := json.Unmarshal(text, &v); err != nil { return err } diff --git a/internal/kms/single-key.go b/internal/kms/single-key.go index 29d7f8713..8cafd429d 100644 --- a/internal/kms/single-key.go +++ b/internal/kms/single-key.go @@ -46,7 +46,7 @@ func Parse(s string) (KMS, error) { return nil, errors.New("kms: invalid master key format") } - var keyID, b64Key = v[0], v[1] + keyID, b64Key := v[0], v[1] key, err := base64.StdEncoding.DecodeString(b64Key) if err != nil { return nil, err @@ -152,7 +152,7 @@ func (kms secretKey) GenerateKey(keyID string, context Context) (DEK, error) { associatedData, _ := context.MarshalText() ciphertext := aead.Seal(nil, nonce, plaintext, associatedData) - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary ciphertext, err = json.Marshal(encryptedKey{ Algorithm: algorithm, IV: iv, @@ -175,7 +175,7 @@ func (kms secretKey) DecryptKey(keyID string, ciphertext []byte, context Context } var encryptedKey encryptedKey - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary if err := json.Unmarshal(ciphertext, &encryptedKey); err != nil { return nil, err } diff --git a/internal/lock/lock.go b/internal/lock/lock.go index d0727596b..c480a53e6 100644 --- a/internal/lock/lock.go +++ b/internal/lock/lock.go @@ -25,10 +25,8 @@ import ( "sync" ) -var ( - // ErrAlreadyLocked is returned if the underlying fd is already locked. - ErrAlreadyLocked = errors.New("file already locked") -) +// ErrAlreadyLocked is returned if the underlying fd is already locked. +var ErrAlreadyLocked = errors.New("file already locked") // RLockedFile represents a read locked file, implements a special // closer which only closes the associated *os.File when the ref count. @@ -87,13 +85,12 @@ func newRLockedFile(lkFile *LockedFile) (*RLockedFile, error) { // RLockedOpenFile - returns a wrapped read locked file, if the file // doesn't exist at path returns an error. func RLockedOpenFile(path string) (*RLockedFile, error) { - lkFile, err := LockedOpenFile(path, os.O_RDONLY, 0666) + lkFile, err := LockedOpenFile(path, os.O_RDONLY, 0o666) if err != nil { return nil, err } return newRLockedFile(lkFile) - } // LockedFile represents a locked file diff --git a/internal/lock/lock_solaris.go b/internal/lock/lock_solaris.go index 8d7bcfd58..b2ab4dc3d 100644 --- a/internal/lock/lock_solaris.go +++ b/internal/lock/lock_solaris.go @@ -47,7 +47,7 @@ func lockedOpenFile(path string, flag int, perm os.FileMode, rlockType int) (*Lo } } - var lock = syscall.Flock_t{ + lock := syscall.Flock_t{ Start: 0, Len: 0, Pid: 0, diff --git a/internal/lock/lock_test.go b/internal/lock/lock_test.go index 0ebd5c5d2..784b5eaa4 100644 --- a/internal/lock/lock_test.go +++ b/internal/lock/lock_test.go @@ -38,7 +38,7 @@ func TestLockFail(t *testing.T) { } }() - _, err = LockedOpenFile(f.Name(), os.O_APPEND, 0600) + _, err = LockedOpenFile(f.Name(), os.O_APPEND, 0o600) if err == nil { t.Fatal("Should fail here") } @@ -57,7 +57,7 @@ func TestLockDirFail(t *testing.T) { } }() - _, err = LockedOpenFile(d, os.O_APPEND, 0600) + _, err = LockedOpenFile(d, os.O_APPEND, 0o600) if err == nil { t.Fatal("Should fail here") } @@ -141,7 +141,7 @@ func TestLockAndUnlock(t *testing.T) { }() // lock the file - l, err := LockedOpenFile(f.Name(), os.O_WRONLY, 0600) + l, err := LockedOpenFile(f.Name(), os.O_WRONLY, 0o600) if err != nil { t.Fatal(err) } @@ -152,7 +152,7 @@ func TestLockAndUnlock(t *testing.T) { } // try lock the unlocked file - dupl, err := LockedOpenFile(f.Name(), os.O_WRONLY|os.O_CREATE, 0600) + dupl, err := LockedOpenFile(f.Name(), os.O_WRONLY|os.O_CREATE, 0o600) if err != nil { t.Errorf("err = %v, want %v", err, nil) } @@ -160,7 +160,7 @@ func TestLockAndUnlock(t *testing.T) { // blocking on locked file locked := make(chan struct{}, 1) go func() { - bl, blerr := LockedOpenFile(f.Name(), os.O_WRONLY, 0600) + bl, blerr := LockedOpenFile(f.Name(), os.O_WRONLY, 0o600) if blerr != nil { t.Error(blerr) return diff --git a/internal/lock/lock_windows.go b/internal/lock/lock_windows.go index 6d0deb3ae..dccca6779 100644 --- a/internal/lock/lock_windows.go +++ b/internal/lock/lock_windows.go @@ -251,7 +251,7 @@ func lockFile(fd syscall.Handle, flags uint32) error { } func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - var reserved = uint32(0) + reserved := uint32(0) r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) if r1 == 0 { diff --git a/internal/logger/config.go b/internal/logger/config.go index 76d375902..0119f46ab 100644 --- a/internal/logger/config.go +++ b/internal/logger/config.go @@ -277,7 +277,6 @@ func lookupLegacyConfig() (Config, error) { } return cfg, nil - } // GetAuditKafka - returns a map of registered notification 'kafka' targets diff --git a/internal/logger/console.go b/internal/logger/console.go index b85c6f73b..a3eb98263 100644 --- a/internal/logger/console.go +++ b/internal/logger/console.go @@ -67,8 +67,7 @@ func fatal(err error, msg string, data ...interface{}) { var fatalMessage fatalMsg -type fatalMsg struct { -} +type fatalMsg struct{} func (f fatalMsg) json(msg string, args ...interface{}) { var message string @@ -89,7 +88,6 @@ func (f fatalMsg) json(msg string, args ...interface{}) { fmt.Println(string(logJSON)) os.Exit(1) - } func (f fatalMsg) quiet(msg string, args ...interface{}) { @@ -224,8 +222,7 @@ func Info(msg string, data ...interface{}) { var startupMessage startUpMsg -type startUpMsg struct { -} +type startUpMsg struct{} func (s startUpMsg) json(msg string, args ...interface{}) { } diff --git a/internal/logger/logger.go b/internal/logger/logger.go index a38d63fcd..39c6d1714 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -35,10 +35,8 @@ import ( "github.com/minio/minio/internal/logger/message/log" ) -var ( - // HighwayHash key for logging in anonymous mode - magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0") -) +// HighwayHash key for logging in anonymous mode +var magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0") // Disable disables all logging, false by default. (used for "go test") var Disable = false @@ -159,7 +157,6 @@ func SetDeploymentID(deploymentID string) { // This is done to clean up the filename, when stack trace is // displayed when an error happens. func Init(goPath string, goRoot string) { - var goPathList []string var goRootList []string var defaultgoPathList []string @@ -377,7 +374,6 @@ func logIf(ctx context.Context, err error, errKind ...interface{}) { if err := t.Send(entry, entry.LogKind); err != nil { LogAlwaysIf(context.Background(), fmt.Errorf("event(%v) was not sent to Logger target (%v): %v", entry, t, err), entry.LogKind) } - } } diff --git a/internal/logger/message/audit/entry.go b/internal/logger/message/audit/entry.go index 527e57742..fcc52c394 100644 --- a/internal/logger/message/audit/entry.go +++ b/internal/logger/message/audit/entry.go @@ -67,7 +67,6 @@ func NewEntry(deploymentID string) Entry { // ToEntry - constructs an audit entry from a http request func ToEntry(w http.ResponseWriter, r *http.Request, reqClaims map[string]interface{}, deploymentID string) Entry { - entry := NewEntry(deploymentID) entry.RemoteHost = handlers.GetSourceIP(r) diff --git a/internal/logger/target/console/console.go b/internal/logger/target/console/console.go index eea12df5c..9aec6de34 100644 --- a/internal/logger/target/console/console.go +++ b/internal/logger/target/console/console.go @@ -124,8 +124,8 @@ func (c *Target) Send(e interface{}, logKind string) error { tagString = "\n " + tagString } - var msg = color.FgRed(color.Bold(entry.Trace.Message)) - var output = fmt.Sprintf("\n%s\n%s%s%s%s%s%s\nError: %s%s\n%s", + msg := color.FgRed(color.Bold(entry.Trace.Message)) + output := fmt.Sprintf("\n%s\n%s%s%s%s%s%s\nError: %s%s\n%s", apiString, timeString, deploymentID, requestID, remoteHost, host, userAgent, msg, tagString, strings.Join(trace, "\n")) diff --git a/internal/logger/utils.go b/internal/logger/utils.go index 301cf0d63..4c503489c 100644 --- a/internal/logger/utils.go +++ b/internal/logger/utils.go @@ -29,7 +29,7 @@ var ansiRE = regexp.MustCompile("(\x1b[^m]*m)") // Print ANSI Control escape func ansiEscape(format string, args ...interface{}) { - var Esc = "\x1b" + Esc := "\x1b" fmt.Printf("%s%s", Esc, fmt.Sprintf(format, args...)) } @@ -58,5 +58,4 @@ func ansiRestoreAttributes() { if color.IsTerminal() { ansiEscape("8") } - } diff --git a/internal/lsync/lrwmutex.go b/internal/lsync/lrwmutex.go index 19625d6c6..30442a9a0 100644 --- a/internal/lsync/lrwmutex.go +++ b/internal/lsync/lrwmutex.go @@ -44,14 +44,12 @@ func NewLRWMutex() *LRWMutex { // If the lock is already in use, the calling go routine // blocks until the mutex is available. func (lm *LRWMutex) Lock() { - const isWriteLock = true lm.lockLoop(context.Background(), lm.id, lm.source, math.MaxInt64, isWriteLock) } // GetLock tries to get a write lock on lm before the timeout occurs. func (lm *LRWMutex) GetLock(ctx context.Context, id string, source string, timeout time.Duration) (locked bool) { - const isWriteLock = true return lm.lockLoop(ctx, id, source, timeout, isWriteLock) } @@ -61,14 +59,12 @@ func (lm *LRWMutex) GetLock(ctx context.Context, id string, source string, timeo // If one or more read lock are already in use, it will grant another lock. // Otherwise the calling go routine blocks until the mutex is available. func (lm *LRWMutex) RLock() { - const isWriteLock = false lm.lockLoop(context.Background(), lm.id, lm.source, 1<<63-1, isWriteLock) } // GetRLock tries to get a read lock on lm before the timeout occurs. func (lm *LRWMutex) GetRLock(ctx context.Context, id string, source string, timeout time.Duration) (locked bool) { - const isWriteLock = false return lm.lockLoop(ctx, id, source, timeout, isWriteLock) } @@ -128,7 +124,6 @@ func (lm *LRWMutex) lockLoop(ctx context.Context, id, source string, timeout tim // // It is a run-time error if lm is not locked on entry to Unlock. func (lm *LRWMutex) Unlock() { - isWriteLock := true success := lm.unlock(isWriteLock) if !success { @@ -140,7 +135,6 @@ func (lm *LRWMutex) Unlock() { // // It is a run-time error if lm is not locked on entry to RUnlock. func (lm *LRWMutex) RUnlock() { - isWriteLock := false success := lm.unlock(isWriteLock) if !success { @@ -178,7 +172,6 @@ func (lm *LRWMutex) ForceUnlock() { lm.ref = 0 lm.isWriteLock = false - } // DRLocker returns a sync.Locker interface that implements diff --git a/internal/lsync/lrwmutex_test.go b/internal/lsync/lrwmutex_test.go index 0592044a3..8ad5eda7f 100644 --- a/internal/lsync/lrwmutex_test.go +++ b/internal/lsync/lrwmutex_test.go @@ -20,18 +20,16 @@ package lsync_test import ( "context" "fmt" + "runtime" "sync" "sync/atomic" "testing" "time" - "runtime" - . "github.com/minio/minio/internal/lsync" ) func testSimpleWriteLock(t *testing.T, duration time.Duration) (locked bool) { - ctx := context.Background() lrwm := NewLRWMutex() @@ -89,7 +87,6 @@ func TestSimpleWriteLockTimedOut(t *testing.T) { } func testDualWriteLock(t *testing.T, duration time.Duration) (locked bool) { - ctx := context.Background() lrwm := NewLRWMutex() @@ -124,7 +121,6 @@ func TestDualWriteLockAcquired(t *testing.T) { if locked != expected { t.Errorf("TestDualWriteLockAcquired(): \nexpected %#v\ngot %#v", expected, locked) } - } func TestDualWriteLockTimedOut(t *testing.T) { @@ -134,7 +130,6 @@ func TestDualWriteLockTimedOut(t *testing.T) { if locked != expected { t.Errorf("TestDualWriteLockTimedOut(): \nexpected %#v\ngot %#v", expected, locked) } - } // Test cases below are copied 1 to 1 from sync/rwmutex_test.go (adapted to use LRWMutex) diff --git a/internal/mountinfo/mountinfo_linux.go b/internal/mountinfo/mountinfo_linux.go index c2db4c05e..4217a81a3 100644 --- a/internal/mountinfo/mountinfo_linux.go +++ b/internal/mountinfo/mountinfo_linux.go @@ -126,7 +126,7 @@ func readProcMounts(mountFilePath string) (mountInfos, error) { } func parseMountFrom(file io.Reader) (mountInfos, error) { - var mounts = mountInfos{} + mounts := mountInfos{} scanner := bufio.NewReader(file) for { line, err := scanner.ReadString('\n') diff --git a/internal/mountinfo/mountinfo_linux_test.go b/internal/mountinfo/mountinfo_linux_test.go index 3a49bd9c3..dab578497 100644 --- a/internal/mountinfo/mountinfo_linux_test.go +++ b/internal/mountinfo/mountinfo_linux_test.go @@ -32,8 +32,7 @@ import ( // Tests cross device mount verification function, for both failure // and success cases. func TestCrossDeviceMountPaths(t *testing.T) { - successCase := - `/dev/0 /path/to/0/1 type0 flags 0 0 + successCase := `/dev/0 /path/to/0/1 type0 flags 0 0 /dev/1 /path/to/1 type1 flags 1 1 /dev/2 /path/to/1/2 type2 flags,1,2=3 2 2 /dev/3 /path/to/1.1 type3 falgs,1,2=3 3 3 @@ -44,12 +43,12 @@ func TestCrossDeviceMountPaths(t *testing.T) { } defer os.RemoveAll(dir) mountsPath := filepath.Join(dir, "mounts") - if err = ioutil.WriteFile(mountsPath, []byte(successCase), 0666); err != nil { + if err = ioutil.WriteFile(mountsPath, []byte(successCase), 0o666); err != nil { t.Fatal(err) } // Failure case where we detected successfully cross device mounts. { - var absPaths = []string{"/path/to/1"} + absPaths := []string{"/path/to/1"} if err = checkCrossDevice(absPaths, mountsPath); err == nil { t.Fatal("Expected to fail, but found success") } @@ -64,7 +63,7 @@ func TestCrossDeviceMountPaths(t *testing.T) { } // Failure case when input path is not absolute. { - var absPaths = []string{"."} + absPaths := []string{"."} if err = checkCrossDevice(absPaths, mountsPath); err == nil { t.Fatal("Expected to fail for non absolute paths") } @@ -75,7 +74,7 @@ func TestCrossDeviceMountPaths(t *testing.T) { } // Success case, where path doesn't have any mounts. { - var absPaths = []string{"/path/to/x"} + absPaths := []string{"/path/to/x"} if err = checkCrossDevice(absPaths, mountsPath); err != nil { t.Fatalf("Expected success, failed instead (%s)", err) } @@ -85,8 +84,7 @@ func TestCrossDeviceMountPaths(t *testing.T) { // Tests cross device mount verification function, for both failure // and success cases. func TestCrossDeviceMount(t *testing.T) { - successCase := - `/dev/0 /path/to/0/1 type0 flags 0 0 + successCase := `/dev/0 /path/to/0/1 type0 flags 0 0 /dev/1 /path/to/1 type1 flags 1 1 /dev/2 /path/to/1/2 type2 flags,1,2=3 2 2 /dev/3 /path/to/1.1 type3 falgs,1,2=3 3 3 @@ -97,7 +95,7 @@ func TestCrossDeviceMount(t *testing.T) { } defer os.RemoveAll(dir) mountsPath := filepath.Join(dir, "mounts") - if err = ioutil.WriteFile(mountsPath, []byte(successCase), 0666); err != nil { + if err = ioutil.WriteFile(mountsPath, []byte(successCase), 0o666); err != nil { t.Fatal(err) } mounts, err := readProcMounts(mountsPath) @@ -138,8 +136,7 @@ func TestCrossDeviceMount(t *testing.T) { // Tests read proc mounts file. func TestReadProcmountInfos(t *testing.T) { - successCase := - `/dev/0 /path/to/0 type0 flags 0 0 + successCase := `/dev/0 /path/to/0 type0 flags 0 0 /dev/1 /path/to/1 type1 flags 1 1 /dev/2 /path/to/2 type2 flags,1,2=3 2 2 ` @@ -150,7 +147,7 @@ func TestReadProcmountInfos(t *testing.T) { defer os.RemoveAll(dir) mountsPath := filepath.Join(dir, "mounts") - if err = ioutil.WriteFile(mountsPath, []byte(successCase), 0666); err != nil { + if err = ioutil.WriteFile(mountsPath, []byte(successCase), 0o666); err != nil { t.Fatal(err) } // Verifies if reading each line worked properly. @@ -186,8 +183,7 @@ func TestReadProcmountInfos(t *testing.T) { // Tests read proc mounts reader. func TestReadProcMountFrom(t *testing.T) { - successCase := - `/dev/0 /path/to/0 type0 flags 0 0 + successCase := `/dev/0 /path/to/0 type0 flags 0 0 /dev/1 /path/to/1 type1 flags 1 1 /dev/2 /path/to/2 type2 flags,1,2=3 2 2 ` diff --git a/internal/s3select/csv/args.go b/internal/s3select/csv/args.go index ae126f2cf..a9cc46cea 100644 --- a/internal/s3select/csv/args.go +++ b/internal/s3select/csv/args.go @@ -142,7 +142,6 @@ func (args *WriterArgs) IsEmpty() bool { // UnmarshalXML - decodes XML data. func (args *WriterArgs) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - args.QuoteFields = asneeded args.RecordDelimiter = defaultRecordDelimiter args.FieldDelimiter = defaultFieldDelimiter diff --git a/internal/s3select/csv/reader.go b/internal/s3select/csv/reader.go index 452484aa2..b1afbacb5 100644 --- a/internal/s3select/csv/reader.go +++ b/internal/s3select/csv/reader.go @@ -277,7 +277,6 @@ func (r *Reader) startReaders(newReader func(io.Reader) *csv.Reader) error { }() } return nil - } // NewReader - creates new CSV reader using readCloser. diff --git a/internal/s3select/csv/reader_contrib_test.go b/internal/s3select/csv/reader_contrib_test.go index 903b24582..9cbac8066 100644 --- a/internal/s3select/csv/reader_contrib_test.go +++ b/internal/s3select/csv/reader_contrib_test.go @@ -219,7 +219,6 @@ func TestReadExtended(t *testing.T) { for i, c := range cases { t.Run(c.file, func(t *testing.T) { - var err error var record sql.Record var result bytes.Buffer @@ -435,7 +434,6 @@ func TestReadFailures(t *testing.T) { for i, c := range cases { t.Run(c.file, func(t *testing.T) { - var err error var record sql.Record var result bytes.Buffer diff --git a/internal/s3select/message.go b/internal/s3select/message.go index efad61985..61931920f 100644 --- a/internal/s3select/message.go +++ b/internal/s3select/message.go @@ -71,9 +71,7 @@ const ( maxRecordMessageLength = (128 << 10) - 256 ) -var ( - bufLength = payloadLenForMsgLen(maxRecordMessageLength) -) +var bufLength = payloadLenForMsgLen(maxRecordMessageLength) // newRecordsMessage - creates new Records Message which can contain a single record, partial records, // or multiple records. Depending on the size of the result, a response can contain one or more of these messages. diff --git a/internal/s3select/select.go b/internal/s3select/select.go index 2fc624254..d66c7f678 100644 --- a/internal/s3select/select.go +++ b/internal/s3select/select.go @@ -229,9 +229,7 @@ type S3Select struct { close func() error } -var ( - legacyXMLName = "SelectObjectContentRequest" -) +var legacyXMLName = "SelectObjectContentRequest" // UnmarshalXML - decodes XML data. func (s3Select *S3Select) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { diff --git a/internal/s3select/select_benchmark_test.go b/internal/s3select/select_benchmark_test.go index 15aac02cb..c4a6bdb73 100644 --- a/internal/s3select/select_benchmark_test.go +++ b/internal/s3select/select_benchmark_test.go @@ -61,8 +61,7 @@ func genSampleCSVData(count int) []byte { return buf.Bytes() } -type nullResponseWriter struct { -} +type nullResponseWriter struct{} func (w *nullResponseWriter) Header() http.Header { return nil @@ -79,7 +78,7 @@ func (w *nullResponseWriter) Flush() { } func benchmarkSelect(b *testing.B, count int, query string) { - var requestXML = []byte(` + requestXML := []byte(` ` + query + ` diff --git a/internal/s3select/select_test.go b/internal/s3select/select_test.go index c46183ebb..5d4a5133c 100644 --- a/internal/s3select/select_test.go +++ b/internal/s3select/select_test.go @@ -61,7 +61,7 @@ func TestJSONQueries(t *testing.T) { {"id": 2,"title": "Second Record","desc": "another text","numbers": [2, 3.0, 4]} {"id": 3,"title": "Second Record","desc": "another text","nested": [[2, 3.0, 4], [7, 8.5, 9]]}` - var testTable = []struct { + testTable := []struct { name string query string requestXML []byte // override request XML @@ -100,14 +100,16 @@ func TestJSONQueries(t *testing.T) { wantResult: `{"id":0} {"id":1} {"id":2} -{"id":3}`}, +{"id":3}`, + }, { name: "bignum-2", query: `SELECT id from s3object s WHERE s.id >= -9223372036854775808`, wantResult: `{"id":0} {"id":1} {"id":2} -{"id":3}`}, +{"id":3}`, + }, { name: "donatello-3", query: `SELECT * from s3object s WHERE 'value' IN s.synonyms[*]`, @@ -603,7 +605,7 @@ func TestCSVQueries(t *testing.T) { input := `index,ID,CaseNumber,Date,Day,Month,Year,Block,IUCR,PrimaryType,Description,LocationDescription,Arrest,Domestic,Beat,District,Ward,CommunityArea,FBI Code,XCoordinate,YCoordinate,UpdatedOn,Latitude,Longitude,Location 2700763,7732229,,2010-05-26 00:00:00,26,May,2010,113XX S HALSTED ST,1150,,CREDIT CARD FRAUD,,False,False,2233,22.0,34.0,,11,,,,41.688043288,-87.6422444,"(41.688043288, -87.6422444)"` - var testTable = []struct { + testTable := []struct { name string query string requestXML []byte @@ -687,7 +689,7 @@ func TestCSVQueries2(t *testing.T) { 1,2010-01-01T,7867786,4565.908123,"a text, with comma" 2,2017-01-02T03:04Z,-5, 0.765111, `) - var testTable = []struct { + testTable := []struct { name string query string input []byte @@ -872,7 +874,7 @@ func TestCSVQueries3(t *testing.T) { apple,1,true mango,3,false ` - var testTable = []struct { + testTable := []struct { name string query string requestXML []byte // override request XML @@ -1014,7 +1016,7 @@ true`, } func TestCSVInput(t *testing.T) { - var testTable = []struct { + testTable := []struct { requestXML []byte expectedResult []byte }{ @@ -1114,7 +1116,7 @@ func TestCSVInput(t *testing.T) { }, } - var csvData = []byte(`one,two,three + csvData := []byte(`one,two,three -1,foo,true ,bar,false 2.5,baz,true @@ -1158,12 +1160,10 @@ func TestCSVInput(t *testing.T) { } }) } - } func TestJSONInput(t *testing.T) { - - var testTable = []struct { + testTable := []struct { requestXML []byte expectedResult []byte }{ @@ -1239,7 +1239,7 @@ func TestJSONInput(t *testing.T) { }, } - var jsonData = []byte(`{"three":true,"two":"foo","one":-1} + jsonData := []byte(`{"three":true,"two":"foo","one":-1} {"three":false,"two":"bar","one":null} {"three":true,"two":"baz","one":2.5} `) @@ -1288,7 +1288,7 @@ func TestParquetInput(t *testing.T) { os.Setenv("MINIO_API_SELECT_PARQUET", "on") defer os.Setenv("MINIO_API_SELECT_PARQUET", "off") - var testTable = []struct { + testTable := []struct { requestXML []byte expectedResult []byte }{ @@ -1405,7 +1405,7 @@ func TestParquetInputSchema(t *testing.T) { os.Setenv("MINIO_API_SELECT_PARQUET", "on") defer os.Setenv("MINIO_API_SELECT_PARQUET", "off") - var testTable = []struct { + testTable := []struct { requestXML []byte wantResult string }{ @@ -1517,7 +1517,6 @@ func TestParquetInputSchema(t *testing.T) { if !reflect.DeepEqual(gotS, testCase.wantResult) { t.Errorf("received response does not match with expected reply. Query: %s\ngot: %s\nwant:%s", testCase.requestXML, gotS, testCase.wantResult) } - }) } } @@ -1526,7 +1525,7 @@ func TestParquetInputSchemaCSV(t *testing.T) { os.Setenv("MINIO_API_SELECT_PARQUET", "on") defer os.Setenv("MINIO_API_SELECT_PARQUET", "off") - var testTable = []struct { + testTable := []struct { requestXML []byte wantResult string }{ @@ -1636,7 +1635,6 @@ func TestParquetInputSchemaCSV(t *testing.T) { if !reflect.DeepEqual(gotS, testCase.wantResult) { t.Errorf("received response does not match with expected reply. Query: %s\ngot: %s\nwant:%s", testCase.requestXML, gotS, testCase.wantResult) } - }) } } diff --git a/internal/s3select/simdj/reader_amd64_test.go b/internal/s3select/simdj/reader_amd64_test.go index be6c770a6..26ffadfea 100644 --- a/internal/s3select/simdj/reader_amd64_test.go +++ b/internal/s3select/simdj/reader_amd64_test.go @@ -67,7 +67,6 @@ func TestNDJSON(t *testing.T) { } for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { ref := loadCompressed(t, tt.name) diff --git a/internal/s3select/sql/parser_test.go b/internal/s3select/sql/parser_test.go index ff0ed3a84..b1af2fcbc 100644 --- a/internal/s3select/sql/parser_test.go +++ b/internal/s3select/sql/parser_test.go @@ -84,7 +84,6 @@ func TestJSONPath(t *testing.T) { } // repr.Println(j, repr.Indent(" "), repr.OmitEmpty(true)) } - } func TestIdentifierParsing(t *testing.T) { @@ -342,7 +341,6 @@ func TestFromClauseJSONPath(t *testing.T) { // repr.Println(s, repr.Indent(" "), repr.OmitEmpty(true)) } - } func TestSelectParsing(t *testing.T) { diff --git a/internal/s3select/sql/statement.go b/internal/s3select/sql/statement.go index 62a630d0b..9c500506c 100644 --- a/internal/s3select/sql/statement.go +++ b/internal/s3select/sql/statement.go @@ -26,9 +26,7 @@ import ( "github.com/minio/simdjson-go" ) -var ( - errBadLimitSpecified = errors.New("Limit value must be a positive integer") -) +var errBadLimitSpecified = errors.New("Limit value must be a positive integer") const ( baseTableName = "s3object" diff --git a/internal/s3select/sql/timestampfuncs.go b/internal/s3select/sql/timestampfuncs.go index 006f1dd5a..4622f992a 100644 --- a/internal/s3select/sql/timestampfuncs.go +++ b/internal/s3select/sql/timestampfuncs.go @@ -30,16 +30,14 @@ const ( layoutNanosecond = "2006-01-02T15:04:05.999999999Z07:00" ) -var ( - tformats = []string{ - layoutYear, - layoutMonth, - layoutDay, - layoutMinute, - layoutSecond, - layoutNanosecond, - } -) +var tformats = []string{ + layoutYear, + layoutMonth, + layoutDay, + layoutMinute, + layoutSecond, + layoutNanosecond, +} func parseSQLTimestamp(s string) (t time.Time, err error) { for _, f := range tformats { diff --git a/internal/smart/bindata.go b/internal/smart/bindata.go index a3d9df305..65bdd53ee 100644 --- a/internal/smart/bindata.go +++ b/internal/smart/bindata.go @@ -69,18 +69,23 @@ type bindataFileInfo struct { func (fi bindataFileInfo) Name() string { return fi.name } + func (fi bindataFileInfo) Size() int64 { return fi.size } + func (fi bindataFileInfo) Mode() os.FileMode { return fi.mode } + func (fi bindataFileInfo) ModTime() time.Time { return fi.modTime } + func (fi bindataFileInfo) IsDir() bool { return false } + func (fi bindataFileInfo) Sys() interface{} { return nil } @@ -214,7 +219,7 @@ func RestoreAsset(dir, name string) error { if err != nil { return err } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0o755)) if err != nil { return err } diff --git a/internal/smart/nvme.go b/internal/smart/nvme.go index f23d3a4d8..49ea2b3c1 100644 --- a/internal/smart/nvme.go +++ b/internal/smart/nvme.go @@ -40,9 +40,7 @@ const ( NvmeAdminIdentify = 0x06 ) -var ( - nvmeIoctlAdminCmd = ioctl.Iowr('N', 0x41, unsafe.Sizeof(nvmePassthruCommand{})) -) +var nvmeIoctlAdminCmd = ioctl.Iowr('N', 0x41, unsafe.Sizeof(nvmePassthruCommand{})) // NewNVMeDevice creates a new NVMeDevice struct with name func NewNVMeDevice(name string) *NVMeDevice { @@ -51,7 +49,7 @@ func NewNVMeDevice(name string) *NVMeDevice { // Open - open device file to find kernel info func (d *NVMeDevice) Open() (err error) { - d.fd, err = unix.Open(d.Name, unix.O_RDWR, 0600) + d.fd, err = unix.Open(d.Name, unix.O_RDWR, 0o600) return err }