diff --git a/buildscripts/minio-upgrade.sh b/buildscripts/minio-upgrade.sh index 5721a8c64..5828aaacb 100644 --- a/buildscripts/minio-upgrade.sh +++ b/buildscripts/minio-upgrade.sh @@ -55,6 +55,11 @@ __init__() { go install github.com/minio/mc@latest + ## this is needed because github actions don't have + ## docker-compose on all runners + go install github.com/docker/compose/v2/cmd@latest + mv -v /tmp/gopath/bin/cmd /tmp/gopath/bin/docker-compose + TAG=minio/minio:dev make docker MINIO_VERSION=RELEASE.2019-12-19T22-52-26Z docker-compose \ diff --git a/cmd/bucket-replication.go b/cmd/bucket-replication.go index 90593b928..12e8cac02 100644 --- a/cmd/bucket-replication.go +++ b/cmd/bucket-replication.go @@ -1593,14 +1593,10 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob for attempts <= 3 { actx, acancel := context.WithTimeout(ctx, time.Minute) aerr := c.AbortMultipartUpload(actx, bucket, object, uploadID) + acancel() if aerr == nil { - acancel() return } - acancel() - replLogIf(actx, - fmt.Errorf("trying %s: Unable to cleanup failed multipart replication %s on remote %s/%s: %w - this may consume space on remote cluster", - humanize.Ordinal(attempts), uploadID, bucket, object, aerr)) attempts++ time.Sleep(time.Duration(rand.Int63n(int64(time.Second)))) } @@ -1648,6 +1644,8 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob ETag: pInfo.ETag, }) } + + // really big value but its okay on heavily loaded systems. This is just tail end timeout. cctx, ccancel := context.WithTimeout(ctx, 10*time.Minute) defer ccancel() _, err = c.CompleteMultipartUpload(cctx, bucket, object, uploadID, uploadedParts, minio.PutObjectOptions{