diff --git a/cmd/admin-bucket-handlers.go b/cmd/admin-bucket-handlers.go index 360067fe3..36cc23f00 100644 --- a/cmd/admin-bucket-handlers.go +++ b/cmd/admin-bucket-handlers.go @@ -18,15 +18,32 @@ package cmd import ( + "bytes" + "context" + "encoding/base64" "encoding/json" + "encoding/xml" + "errors" + "fmt" "io" "io/ioutil" "net/http" + "strings" + "time" "github.com/gorilla/mux" jsoniter "github.com/json-iterator/go" + "github.com/klauspost/compress/zip" + "github.com/minio/kes" "github.com/minio/madmin-go" + "github.com/minio/minio-go/v7/pkg/tags" + "github.com/minio/minio/internal/bucket/lifecycle" + objectlock "github.com/minio/minio/internal/bucket/object/lock" + "github.com/minio/minio/internal/bucket/versioning" + "github.com/minio/minio/internal/event" + "github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/logger" + "github.com/minio/pkg/bucket/policy" iampolicy "github.com/minio/pkg/iam/policy" ) @@ -354,3 +371,698 @@ func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *ht // Write success response. writeSuccessNoContent(w) } + +// ExportBucketMetadataHandler - exports all bucket metadata as a zipped file +func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "ExportBucketMetadata") + defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) + + bucket := pathClean(r.Form.Get("bucket")) + if !globalIsErasure { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) + return + } + // Get current object layer instance. + objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ExportBucketMetadataAction) + if objectAPI == nil { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) + return + } + var ( + buckets []BucketInfo + err error + ) + if bucket != "" { + // Check if bucket exists. + if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { + writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) + return + } + buckets = append(buckets, BucketInfo{Name: bucket}) + } else { + buckets, err = objectAPI.ListBuckets(ctx) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + } + + // Initialize a zip writer which will provide a zipped content + // of bucket metadata + zipWriter := zip.NewWriter(w) + defer zipWriter.Close() + rawDataFn := func(r io.Reader, filename string, sz int) error { + header, zerr := zip.FileInfoHeader(dummyFileInfo{ + name: filename, + size: int64(sz), + mode: 0o600, + modTime: time.Now(), + isDir: false, + sys: nil, + }) + if zerr != nil { + logger.LogIf(ctx, zerr) + return nil + } + header.Method = zip.Deflate + zwriter, zerr := zipWriter.CreateHeader(header) + if zerr != nil { + logger.LogIf(ctx, zerr) + return nil + } + if _, err := io.Copy(zwriter, r); err != nil { + logger.LogIf(ctx, err) + } + return nil + } + + cfgFiles := []string{ + bucketPolicyConfig, + bucketNotificationConfig, + bucketLifecycleConfig, + bucketSSEConfig, + bucketTaggingConfig, + bucketQuotaConfigFile, + objectLockConfig, + bucketVersioningConfig, + bucketReplicationConfig, + bucketTargetsFile, + } + for _, bi := range buckets { + for _, cfgFile := range cfgFiles { + cfgPath := pathJoin(bi.Name, cfgFile) + bucket := bi.Name + switch cfgFile { + case bucketNotificationConfig: + config, err := globalBucketMetadataSys.GetNotificationConfig(bucket) + if err != nil { + logger.LogIf(ctx, err) + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + configData, err := xml.Marshal(config) + if err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + case bucketLifecycleConfig: + config, err := globalBucketMetadataSys.GetLifecycleConfig(bucket) + if err != nil { + if errors.Is(err, BucketLifecycleNotFound{Bucket: bucket}) { + continue + } + logger.LogIf(ctx, err) + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + configData, err := xml.Marshal(config) + if err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + case bucketQuotaConfigFile: + config, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket) + if err != nil { + if errors.Is(err, BucketQuotaConfigNotFound{Bucket: bucket}) { + continue + } + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + configData, err := json.Marshal(config) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + case bucketSSEConfig: + config, _, err := globalBucketMetadataSys.GetSSEConfig(bucket) + if err != nil { + if errors.Is(err, BucketSSEConfigNotFound{Bucket: bucket}) { + continue + } + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + configData, err := xml.Marshal(config) + if err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + case bucketTaggingConfig: + config, _, err := globalBucketMetadataSys.GetTaggingConfig(bucket) + if err != nil { + if errors.Is(err, BucketTaggingNotFound{Bucket: bucket}) { + continue + } + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + configData, err := xml.Marshal(config) + if err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + case objectLockConfig: + config, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucket) + if err != nil { + if errors.Is(err, BucketObjectLockConfigNotFound{Bucket: bucket}) { + continue + } + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + + configData, err := xml.Marshal(config) + if err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + case bucketVersioningConfig: + config, _, err := globalBucketMetadataSys.GetVersioningConfig(bucket) + if err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + // ignore empty versioning configs + if config.Status != versioning.Enabled && config.Status != versioning.Suspended { + continue + } + configData, err := xml.Marshal(config) + if err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + case bucketReplicationConfig: + config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket) + if err != nil { + if errors.Is(err, BucketReplicationConfigNotFound{Bucket: bucket}) { + continue + } + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + configData, err := xml.Marshal(config) + if err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + + if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + case bucketTargetsFile: + config, err := globalBucketMetadataSys.GetBucketTargetsConfig(bucket) + if err != nil { + if errors.Is(err, BucketRemoteTargetNotFound{Bucket: bucket}) { + continue + } + + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + configData, err := xml.Marshal(config) + if err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + if err = rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)); err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile), r.URL) + return + } + } + } + } +} + +// ImportBucketMetadataHandler - imports all bucket metadata from a zipped file and overwrite bucket metadata config +// There are some caveats regarding the following: +// 1. object lock config - object lock should have been specified at time of bucket creation. Only default retention settings are imported here. +// 2. Replication config - is omitted from import as remote target credentials are not available from exported data for security reasons. +// 3. lifecycle config - if transition rules are present, tier name needs to have been defined. +func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "ImportBucketMetadata") + + defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) + bucket := pathClean(r.Form.Get("bucket")) + + if !globalIsErasure { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) + return + } + // Get current object layer instance. + objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ImportBucketMetadataAction) + if objectAPI == nil { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) + return + } + data, err := ioutil.ReadAll(r.Body) + if err != nil { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) + return + } + reader := bytes.NewReader(data) + zr, err := zip.NewReader(reader, int64(len(data))) + if err != nil { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) + return + } + bucketMap := make(map[string]struct{}, 1) + + // import object lock config if any - order of import matters here. + for _, file := range zr.File { + slc := strings.Split(file.Name, slashSeparator) + if len(slc) != 2 { // expecting bucket/configfile in the zipfile + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) + return + } + b, fileName := slc[0], slc[1] + if bucket == "" { // use bucket requested in query parameters if specified. Otherwise default bucket name to directory name within zip + bucket = b + } + switch fileName { + case objectLockConfig: + reader, err := file.Open() + if err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + config, err := objectlock.ParseObjectLockConfig(reader) + if err != nil { + apiErr := errorCodes.ToAPIErr(ErrMalformedXML) + apiErr.Description = err.Error() + writeErrorResponse(ctx, w, apiErr, r.URL) + return + } + + configData, err := xml.Marshal(config) + if err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + if _, ok := bucketMap[bucket]; !ok { + opts := BucketOptions{ + LockEnabled: config.ObjectLockEnabled == "Enabled", + } + err = objectAPI.MakeBucketWithLocation(ctx, bucket, opts) + if err != nil { + if _, ok := err.(BucketExists); !ok { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + } + bucketMap[bucket] = struct{}{} + } + + // Deny object locking configuration settings on existing buckets without object lock enabled. + if _, _, err = globalBucketMetadataSys.GetObjectLockConfig(bucket); err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + + if err = globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, configData); err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + + // Call site replication hook. + // + // We encode the xml bytes as base64 to ensure there are no encoding + // errors. + cfgStr := base64.StdEncoding.EncodeToString(configData) + if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + Type: madmin.SRBucketMetaTypeObjectLockConfig, + Bucket: bucket, + ObjectLockConfig: &cfgStr, + }); err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + } + } + + // import versioning metadata + for _, file := range zr.File { + slc := strings.Split(file.Name, slashSeparator) + if len(slc) != 2 { // expecting bucket/configfile in the zipfile + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) + return + } + b, fileName := slc[0], slc[1] + if bucket == "" { // use bucket requested in query parameters if specified. Otherwise default bucket name to directory name within zip + bucket = b + } + switch fileName { + case bucketVersioningConfig: + reader, err := file.Open() + if err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + v, err := versioning.ParseConfig(io.LimitReader(reader, maxBucketVersioningConfigSize)) + if err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + if _, ok := bucketMap[bucket]; !ok { + err = objectAPI.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) + if err != nil { + if _, ok := err.(BucketExists); !ok { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + } + bucketMap[bucket] = struct{}{} + } + + if globalSiteReplicationSys.isEnabled() && v.Suspended() { + writeErrorResponse(ctx, w, APIError{ + Code: "InvalidBucketState", + Description: "Cluster replication is enabled for this site, so the versioning state cannot be changed.", + HTTPStatusCode: http.StatusConflict, + }, r.URL) + return + } + + if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled && v.Suspended() { + writeErrorResponse(ctx, w, APIError{ + Code: "InvalidBucketState", + Description: "An Object Lock configuration is present on this bucket, so the versioning state cannot be changed.", + HTTPStatusCode: http.StatusConflict, + }, r.URL) + return + } + if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() { + writeErrorResponse(ctx, w, APIError{ + Code: "InvalidBucketState", + Description: "A replication configuration is present on this bucket, so the versioning state cannot be changed.", + HTTPStatusCode: http.StatusConflict, + }, r.URL) + return + } + + configData, err := xml.Marshal(v) + if err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + + if err = globalBucketMetadataSys.Update(ctx, bucket, bucketVersioningConfig, configData); err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + } + } + + for _, file := range zr.File { + reader, err := file.Open() + if err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + sz := file.FileInfo().Size() + slc := strings.Split(file.Name, slashSeparator) + if len(slc) != 2 { // expecting bucket/configfile in the zipfile + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) + return + } + b, fileName := slc[0], slc[1] + if bucket == "" { // use bucket requested in query parameters if specified. Otherwise default bucket name to directory name within zip + bucket = b + } + // create bucket if it does not exist yet. + if _, ok := bucketMap[bucket]; !ok { + err = objectAPI.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) + if err != nil { + if _, ok := err.(BucketExists); !ok { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + } + bucketMap[bucket] = struct{}{} + } + switch fileName { + case bucketNotificationConfig: + config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region, globalNotificationSys.targetList) + if err != nil { + apiErr := errorCodes.ToAPIErr(ErrMalformedXML) + if event.IsEventError(err) { + apiErr = importError(ctx, err, file.Name) + } + writeErrorResponse(ctx, w, apiErr, r.URL) + return + } + + configData, err := xml.Marshal(config) + if err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + + if err = globalBucketMetadataSys.Update(ctx, bucket, bucketNotificationConfig, configData); err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + rulesMap := config.ToRulesMap() + globalNotificationSys.AddRulesMap(bucket, rulesMap) + case bucketPolicyConfig: + // Error out if Content-Length is beyond allowed size. + if sz > maxBucketPolicySize { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPolicyTooLarge), r.URL) + return + } + + bucketPolicyBytes, err := ioutil.ReadAll(io.LimitReader(reader, sz)) + if err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + + bucketPolicy, err := policy.ParseConfig(bytes.NewReader(bucketPolicyBytes), bucket) + if err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + + // Version in policy must not be empty + if bucketPolicy.Version == "" { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL) + return + } + + configData, err := json.Marshal(bucketPolicy) + if err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + + if err = globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, configData); err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + // Call site replication hook. + if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + Type: madmin.SRBucketMetaTypePolicy, + Bucket: bucket, + Policy: bucketPolicyBytes, + }); err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + + case bucketLifecycleConfig: + bucketLifecycle, err := lifecycle.ParseLifecycleConfig(io.LimitReader(reader, sz)) + if err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + + // Validate the received bucket policy document + if err = bucketLifecycle.Validate(); err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + + // Validate the transition storage ARNs + if err = validateTransitionTier(bucketLifecycle); err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + + configData, err := xml.Marshal(bucketLifecycle) + if err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + + if err = globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, configData); err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + case bucketSSEConfig: + // Parse bucket encryption xml + encConfig, err := validateBucketSSEConfig(io.LimitReader(reader, maxBucketSSEConfigSize)) + if err != nil { + apiErr := APIError{ + Code: "MalformedXML", + Description: fmt.Sprintf("%s (%s)", errorCodes[ErrMalformedXML].Description, err), + HTTPStatusCode: errorCodes[ErrMalformedXML].HTTPStatusCode, + } + writeErrorResponse(ctx, w, apiErr, r.URL) + return + } + + // Return error if KMS is not initialized + if GlobalKMS == nil { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL) + return + } + kmsKey := encConfig.KeyID() + if kmsKey != "" { + kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation + _, err := GlobalKMS.GenerateKey(kmsKey, kmsContext) + if err != nil { + if errors.Is(err, kes.ErrKeyNotFound) { + writeErrorResponse(ctx, w, importError(ctx, errKMSKeyNotFound, file.Name), r.URL) + return + } + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + } + + configData, err := xml.Marshal(encConfig) + if err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + + // Store the bucket encryption configuration in the object layer + if err = globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, configData); err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + + // Call site replication hook. + // + // We encode the xml bytes as base64 to ensure there are no encoding + // errors. + cfgStr := base64.StdEncoding.EncodeToString(configData) + if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + Type: madmin.SRBucketMetaTypeSSEConfig, + Bucket: bucket, + SSEConfig: &cfgStr, + }); err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + + case bucketTaggingConfig: + tags, err := tags.ParseBucketXML(io.LimitReader(reader, sz)) + if err != nil { + apiErr := errorCodes.ToAPIErrWithErr(ErrMalformedXML, fmt.Errorf("error importing %s with %w", file.Name, err)) + writeErrorResponse(ctx, w, apiErr, r.URL) + return + } + + configData, err := xml.Marshal(tags) + if err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + + if err = globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, configData); err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + // Call site replication hook. + // + // We encode the xml bytes as base64 to ensure there are no encoding + // errors. + cfgStr := base64.StdEncoding.EncodeToString(configData) + if err = globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + Type: madmin.SRBucketMetaTypeTags, + Bucket: bucket, + Tags: &cfgStr, + }); err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + case bucketQuotaConfigFile: + data, err := ioutil.ReadAll(reader) + if err != nil { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) + return + } + + quotaConfig, err := parseBucketQuota(bucket, data) + if err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + + if quotaConfig.Type == "fifo" { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) + return + } + + if err = globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, data); err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + + bucketMeta := madmin.SRBucketMeta{ + Type: madmin.SRBucketMetaTypeQuotaConfig, + Bucket: bucket, + Quota: data, + } + if quotaConfig.Quota == 0 { + bucketMeta.Quota = nil + } + + // Call site replication hook. + if err = globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta); err != nil { + writeErrorResponse(ctx, w, importError(ctx, err, file.Name), r.URL) + return + } + } + } +} + +// wraps import error for more context +func importError(ctx context.Context, err error, fname string) APIError { + return toAPIError(ctx, fmt.Errorf("error importing %s with %w", fname, err)) +} + +// wraps export error for more context +func exportError(ctx context.Context, err error, fname string) APIError { + return toAPIError(ctx, fmt.Errorf("error exporting %s with %w", fname, err)) +} diff --git a/cmd/admin-router.go b/cmd/admin-router.go index 9a62dcede..c5232400f 100644 --- a/cmd/admin-router.go +++ b/cmd/admin-router.go @@ -188,6 +188,14 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) { adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc( gz(httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler))).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}") + // Bucket migration operations + // ExportBucketMetaHandler + adminRouter.Methods(http.MethodGet).Path(adminVersion + "/export-bucket-metadata").HandlerFunc( + gz(httpTraceHdrs(adminAPI.ExportBucketMetadataHandler))) + // ImportBucketMetaHandler + adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-bucket-metadata").HandlerFunc( + gz(httpTraceHdrs(adminAPI.ImportBucketMetadataHandler))) + // Remote Tier management operations adminRouter.Methods(http.MethodPut).Path(adminVersion + "/tier").HandlerFunc(gz(httpTraceHdrs(adminAPI.AddTierHandler))) adminRouter.Methods(http.MethodPost).Path(adminVersion + "/tier/{tier}").HandlerFunc(gz(httpTraceHdrs(adminAPI.EditTierHandler))) diff --git a/cmd/bucket-metadata-sys.go b/cmd/bucket-metadata-sys.go index 73c26f8ef..c1b57646d 100644 --- a/cmd/bucket-metadata-sys.go +++ b/cmd/bucket-metadata-sys.go @@ -338,6 +338,9 @@ func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, ti func (sys *BucketMetadataSys) GetQuotaConfig(ctx context.Context, bucket string) (*madmin.BucketQuota, time.Time, error) { meta, err := sys.GetConfig(ctx, bucket) if err != nil { + if errors.Is(err, errConfigNotFound) { + return nil, time.Time{}, BucketQuotaConfigNotFound{Bucket: bucket} + } return nil, time.Time{}, err } return meta.quotaConfig, meta.QuotaConfigUpdatedAt, nil diff --git a/go.mod b/go.mod index 65346e1fe..9f8bf87cb 100644 --- a/go.mod +++ b/go.mod @@ -49,8 +49,8 @@ require ( github.com/minio/highwayhash v1.0.2 github.com/minio/kes v0.19.2 github.com/minio/madmin-go v1.3.14 - github.com/minio/minio-go/v7 v7.0.29 - github.com/minio/pkg v1.1.24 + github.com/minio/minio-go/v7 v7.0.29 + github.com/minio/pkg v1.1.25 github.com/minio/selfupdate v0.4.0 github.com/minio/sha256-simd v1.0.0 github.com/minio/simdjson-go v0.4.2 diff --git a/go.sum b/go.sum index d26fc56ed..a16067ea0 100644 --- a/go.sum +++ b/go.sum @@ -636,8 +636,10 @@ github.com/minio/minio-go/v7 v7.0.23/go.mod h1:ei5JjmxwHaMrgsMrn4U/+Nmg+d8MKS1U2 github.com/minio/minio-go/v7 v7.0.29 h1:7md6lIq1s6zPzUiDRX1BVLHolA4pDM8RMQqIszaJbY0= github.com/minio/minio-go/v7 v7.0.29/go.mod h1:x81+AX5gHSfCSqw7jxRKHvxUXMlE5uKX0Vb75Xk5yYg= github.com/minio/pkg v1.1.20/go.mod h1:Xo7LQshlxGa9shKwJ7NzQbgW4s8T/Wc1cOStR/eUiMY= -github.com/minio/pkg v1.1.24 h1:a2RCb6LgsCi9DvrripuvlFQRCNb5Hp1HIssnsUqLoZY= -github.com/minio/pkg v1.1.24/go.mod h1:z9PfmEI804KFkF6eY4LoGe8IDVvTCsYGVuaf58Dr0WI= +github.com/minio/pkg v1.1.25-0.20220603141436-1067208cd84a h1:0F3rTF08Voql68oOGL27o8q5OjJ9JmJvyY1IgU24ngI= +github.com/minio/pkg v1.1.25-0.20220603141436-1067208cd84a/go.mod h1:z9PfmEI804KFkF6eY4LoGe8IDVvTCsYGVuaf58Dr0WI= +github.com/minio/pkg v1.1.25 h1:QYLzmTFUV5D3bY9qXKzDj7eW2C+HOPcdtIZft9q2Azo= +github.com/minio/pkg v1.1.25/go.mod h1:z9PfmEI804KFkF6eY4LoGe8IDVvTCsYGVuaf58Dr0WI= github.com/minio/selfupdate v0.4.0 h1:A7t07pN4Ch1tBTIRStW0KhUVyykz+2muCqFsITQeEW8= github.com/minio/selfupdate v0.4.0/go.mod h1:mcDkzMgq8PRcpCRJo/NlPY7U45O5dfYl2Y0Rg7IustY= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=