diff --git a/cmd/gateway/gateway.go b/cmd/gateway/gateway.go index a50ae1c1c..c351fd816 100644 --- a/cmd/gateway/gateway.go +++ b/cmd/gateway/gateway.go @@ -21,6 +21,7 @@ import ( _ "github.com/minio/minio/cmd/gateway/azure" _ "github.com/minio/minio/cmd/gateway/b2" _ "github.com/minio/minio/cmd/gateway/gcs" + _ "github.com/minio/minio/cmd/gateway/oss" _ "github.com/minio/minio/cmd/gateway/s3" _ "github.com/minio/minio/cmd/gateway/sia" ) diff --git a/cmd/gateway/oss/gateway-oss-anonymous.go b/cmd/gateway/oss/gateway-oss-anonymous.go new file mode 100644 index 000000000..3e8442e4e --- /dev/null +++ b/cmd/gateway/oss/gateway-oss-anonymous.go @@ -0,0 +1,54 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package oss + +import ( + "io" + + minio "github.com/minio/minio/cmd" + "github.com/minio/minio/pkg/hash" +) + +// AnonPutObject creates a new object anonymously with the incoming data, +func (l *ossObjects) AnonPutObject(bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) { + return ossPutObject(l.anonClient, bucket, object, data, metadata) +} + +// AnonGetObject - Get object anonymously +func (l *ossObjects) AnonGetObject(bucket, key string, startOffset, length int64, writer io.Writer) error { + return ossGetObject(l.anonClient, bucket, key, startOffset, length, writer) +} + +// AnonGetObjectInfo - Get object info anonymously +func (l *ossObjects) AnonGetObjectInfo(bucket, object string) (objInfo minio.ObjectInfo, err error) { + return ossGetObjectInfo(l.anonClient, bucket, object) +} + +// AnonListObjects lists objects anonymously. +func (l *ossObjects) AnonListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) { + return ossListObjects(l.anonClient, bucket, prefix, marker, delimiter, maxKeys) +} + +// AnonListObjectsV2 lists objects in V2 mode, anonymously. +func (l *ossObjects) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) { + return ossListObjectsV2(l.anonClient, bucket, prefix, continuationToken, delimiter, maxKeys, fetchOwner, startAfter) +} + +// AnonGetBucketInfo gets bucket metadata anonymously. +func (l *ossObjects) AnonGetBucketInfo(bucket string) (bi minio.BucketInfo, err error) { + return ossGeBucketInfo(l.anonClient, bucket) +} diff --git a/cmd/gateway/oss/gateway-oss.go b/cmd/gateway/oss/gateway-oss.go new file mode 100644 index 000000000..a87789b5d --- /dev/null +++ b/cmd/gateway/oss/gateway-oss.go @@ -0,0 +1,991 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package oss + +import ( + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + "strings" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/dustin/go-humanize" + + "github.com/minio/cli" + "github.com/minio/minio-go/pkg/policy" + minio "github.com/minio/minio/cmd" + "github.com/minio/minio/pkg/auth" + "github.com/minio/minio/pkg/errors" + "github.com/minio/minio/pkg/hash" +) + +const ( + ossS3MinPartSize = 5 * humanize.MiByte + ossMaxParts = 1000 + ossMaxKeys = 1000 + ossBackend = "oss" +) + +func init() { + const ossGatewayTemplate = `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [ENDPOINT] +{{if .VisibleFlags}} +FLAGS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +ENDPOINT: + OSS server endpoint. Default ENDPOINT is https://oss.aliyuncs.com + +ENVIRONMENT VARIABLES: + ACCESS: + MINIO_ACCESS_KEY: Username or access key of OSS storage. + MINIO_SECRET_KEY: Password or secret key of OSS storage. + + BROWSER: + MINIO_BROWSER: To disable web browser access, set this value to "off". + +EXAMPLES: + 1. Start minio gateway server for Aliyun OSS backend. + $ export MINIO_ACCESS_KEY=accesskey + $ export MINIO_SECRET_KEY=secretkey + $ {{.HelpName}} + + 2. Start minio gateway server for Aliyun OSS backend on custom endpoint. + $ export MINIO_ACCESS_KEY=Q3AM3UQ867SPQQA43P2F + $ export MINIO_SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG + $ {{.HelpName}} https://oss.example.com + +` + + minio.RegisterGatewayCommand(cli.Command{ + Name: "oss", + Usage: "Alibaba Cloud (Aliyun) Object Storage Service (OSS).", + Action: ossGatewayMain, + CustomHelpTemplate: ossGatewayTemplate, + HideHelpCommand: true, + }) +} + +// Handler for 'minio gateway oss' command line. +func ossGatewayMain(ctx *cli.Context) { + if ctx.Args().First() == "help" { + cli.ShowCommandHelpAndExit(ctx, ossBackend, 1) + } + + // Validate gateway arguments. + host := ctx.Args().First() + minio.FatalIf(minio.ValidateGatewayArguments(ctx.GlobalString("address"), host), "Invalid argument") + + minio.StartGateway(ctx, &OSS{host}) +} + +// OSS implements Gateway. +type OSS struct { + host string +} + +// Name implements Gateway interface. +func (g *OSS) Name() string { + return ossBackend +} + +// NewGatewayLayer implements Gateway interface and returns OSS GatewayLayer. +func (g *OSS) NewGatewayLayer(creds auth.Credentials) (minio.GatewayLayer, error) { + var err error + + // Regions and endpoints + // https://www.alibabacloud.com/help/doc-detail/31837.htm + if g.host == "" { + g.host = "https://oss.aliyuncs.com" + } + + // Initialize oss client object. + client, err := oss.New(g.host, creds.AccessKey, creds.SecretKey) + if err != nil { + return nil, err + } + + anonClient, err := oss.New(g.host, "", "") + if err != nil { + return nil, err + } + + return &ossObjects{ + Client: client, + anonClient: anonClient, + }, nil +} + +// Production - oss is not production ready yet. +func (g *OSS) Production() bool { + return false +} + +// appendS3MetaToOSSOptions converts metadata meant for S3 PUT/COPY +// object into oss.Option. +// +// S3 user-metadata is translated to OSS metadata by removing the +// `X-Amz-Meta-` prefix and converted into `X-Oss-Meta-`. +// +// Header names are canonicalized as in http.Header. +func appendS3MetaToOSSOptions(opts []oss.Option, s3Metadata map[string]string) ([]oss.Option, error) { + if opts == nil { + opts = make([]oss.Option, 0, len(s3Metadata)) + } + for k, v := range s3Metadata { + k = http.CanonicalHeaderKey(k) + + switch { + case strings.HasPrefix(k, "X-Amz-Meta-"): + metaKey := k[len("X-Amz-Meta-"):] + // NOTE(timonwong): OSS won't allow headers with underscore(_). + if strings.Contains(metaKey, "_") { + return nil, errors.Trace(minio.UnsupportedMetadata{}) + } + opts = append(opts, oss.Meta(metaKey, v)) + case k == "X-Amz-Acl": + // Valid values: public-read, private, and public-read-write + opts = append(opts, oss.ObjectACL(oss.ACLType(v))) + case k == "X-Amz-Server-Side​-Encryption": + opts = append(opts, oss.ServerSideEncryption(v)) + case k == "X-Amz-Copy-Source-If-Match": + opts = append(opts, oss.CopySourceIfMatch(v)) + case k == "X-Amz-Copy-Source-If-None-Match": + opts = append(opts, oss.CopySourceIfNoneMatch(v)) + case k == "X-Amz-Copy-Source-If-Unmodified-Since": + if v, err := http.ParseTime(v); err == nil { + opts = append(opts, oss.CopySourceIfUnmodifiedSince(v)) + } + case k == "X-Amz-Copy-Source-If-Modified-Since": + if v, err := http.ParseTime(v); err == nil { + opts = append(opts, oss.CopySourceIfModifiedSince(v)) + } + case k == "Accept-Encoding": + opts = append(opts, oss.AcceptEncoding(v)) + case k == "Cache-Control": + opts = append(opts, oss.CacheControl(v)) + case k == "Content-Disposition": + opts = append(opts, oss.ContentDisposition(v)) + case k == "Content-Encoding": + opts = append(opts, oss.ContentEncoding(v)) + case k == "Content-Length": + if v, err := strconv.ParseInt(v, 10, 64); err == nil { + opts = append(opts, oss.ContentLength(v)) + } + case k == "Content-MD5": + opts = append(opts, oss.ContentMD5(v)) + case k == "Content-Type": + opts = append(opts, oss.ContentType(v)) + case k == "Expires": + if v, err := http.ParseTime(v); err == nil { + opts = append(opts, oss.Expires(v)) + } + } + } + + return opts, nil +} + +// ossMetaToS3Meta converts OSS metadata to S3 metadata. +// It is the reverse of appendS3MetaToOSSOptions. +func ossHeaderToS3Meta(header http.Header) map[string]string { + // Decoding technique for each key is used here is as follows + // Each '_' is converted to '-' + // Each '__' is converted to '_' + // With this basic assumption here are some of the expected + // translations for these keys. + // i: 'x_s3cmd__attrs' -> o: 'x-s3cmd_attrs' (mixed) + // i: 'x____test____value' -> o: 'x__test__value' (double '_') + decodeKey := func(key string) string { + tokens := strings.Split(key, "__") + for i := range tokens { + tokens[i] = strings.Replace(tokens[i], "_", "-", -1) + } + return strings.Join(tokens, "_") + } + + s3Metadata := make(map[string]string) + for k := range header { + k = http.CanonicalHeaderKey(k) + switch { + case strings.HasPrefix(k, oss.HTTPHeaderOssMetaPrefix): + // Add amazon s3 meta prefix + metaKey := k[len(oss.HTTPHeaderOssMetaPrefix):] + metaKey = "X-Amz-Meta-" + decodeKey(metaKey) + metaKey = http.CanonicalHeaderKey(metaKey) + s3Metadata[metaKey] = header.Get(k) + case k == "Cache-Control": + fallthrough + case k == "Content-Encoding": + fallthrough + case k == "Content-Disposition": + fallthrough + case k == "Content-Length": + fallthrough + case k == "Content-MD5": + fallthrough + case k == "Content-Type": + s3Metadata[k] = header.Get(k) + } + } + + return s3Metadata +} + +// ossToObjectError converts OSS errors to minio object layer errors. +func ossToObjectError(err error, params ...string) error { + if err == nil { + return nil + } + + e, ok := err.(*errors.Error) + if !ok { + // Code should be fixed if this function is called without doing errors.Trace() + // Else handling different situations in this function makes this function complicated. + minio.ErrorIf(err, "Expected type *Error") + return err + } + + err = e.Cause + bucket := "" + object := "" + uploadID := "" + switch len(params) { + case 3: + uploadID = params[2] + fallthrough + case 2: + object = params[1] + fallthrough + case 1: + bucket = params[0] + } + + ossErr, ok := err.(oss.ServiceError) + if !ok { + // We don't interpret non OSS errors. As oss errors will + // have StatusCode to help to convert to object errors. + return e + } + + switch ossErr.Code { + case "BucketAlreadyExists": + err = minio.BucketAlreadyOwnedByYou{Bucket: bucket} + case "BucketNotEmpty": + err = minio.BucketNotEmpty{Bucket: bucket} + case "InvalidBucketName": + err = minio.BucketNameInvalid{Bucket: bucket} + case "NoSuchBucket": + err = minio.BucketNotFound{Bucket: bucket} + case "NoSuchKey": + if object != "" { + err = minio.ObjectNotFound{Bucket: bucket, Object: object} + } else { + err = minio.BucketNotFound{Bucket: bucket} + } + case "InvalidObjectName": + err = minio.ObjectNameInvalid{Bucket: bucket, Object: object} + case "AccessDenied": + err = minio.PrefixAccessDenied{Bucket: bucket, Object: object} + case "NoSuchUpload": + err = minio.InvalidUploadID{UploadID: uploadID} + case "EntityTooSmall": + err = minio.PartTooSmall{} + case "SignatureDoesNotMatch": + err = minio.SignatureDoesNotMatch{} + case "InvalidPart": + err = minio.InvalidPart{} + } + + e.Cause = err + return e +} + +// ossObjects implements gateway for Aliyun Object Storage Service. +type ossObjects struct { + minio.GatewayUnsupported + Client *oss.Client + anonClient *oss.Client +} + +// Shutdown saves any gateway metadata to disk +// if necessary and reload upon next restart. +func (l *ossObjects) Shutdown() error { + return nil +} + +// StorageInfo is not relevant to OSS backend. +func (l *ossObjects) StorageInfo() (si minio.StorageInfo) { + return +} + +// ossIsValidBucketName verifies whether a bucket name is valid. +func ossIsValidBucketName(bucket string) bool { + // dot is not allowed in bucket name + if strings.Contains(bucket, ".") { + return false + } + if !minio.IsValidBucketName(bucket) { + return false + } + return true +} + +// MakeBucketWithLocation creates a new container on OSS backend. +func (l *ossObjects) MakeBucketWithLocation(bucket, location string) error { + if !ossIsValidBucketName(bucket) { + return errors.Trace(minio.BucketNameInvalid{Bucket: bucket}) + } + + err := l.Client.CreateBucket(bucket) + return ossToObjectError(errors.Trace(err), bucket) +} + +// ossGeBucketInfo gets bucket metadata. +func ossGeBucketInfo(client *oss.Client, bucket string) (bi minio.BucketInfo, err error) { + if !ossIsValidBucketName(bucket) { + return bi, errors.Trace(minio.BucketNameInvalid{Bucket: bucket}) + } + + bgir, err := client.GetBucketInfo(bucket) + if err != nil { + return bi, ossToObjectError(errors.Trace(err), bucket) + } + + return minio.BucketInfo{ + Name: bgir.BucketInfo.Name, + Created: bgir.BucketInfo.CreationDate, + }, nil +} + +// GetBucketInfo gets bucket metadata. +func (l *ossObjects) GetBucketInfo(bucket string) (bi minio.BucketInfo, err error) { + return ossGeBucketInfo(l.Client, bucket) +} + +// ListBuckets lists all OSS buckets. +func (l *ossObjects) ListBuckets() (buckets []minio.BucketInfo, err error) { + marker := oss.Marker("") + for { + lbr, err := l.Client.ListBuckets(marker) + if err != nil { + return nil, ossToObjectError(errors.Trace(err)) + } + + for _, bi := range lbr.Buckets { + buckets = append(buckets, minio.BucketInfo{ + Name: bi.Name, + Created: bi.CreationDate, + }) + } + + marker = oss.Marker(lbr.NextMarker) + if !lbr.IsTruncated { + break + } + } + + return buckets, nil +} + +// DeleteBucket deletes a bucket on OSS. +func (l *ossObjects) DeleteBucket(bucket string) error { + err := l.Client.DeleteBucket(bucket) + if err != nil { + return ossToObjectError(errors.Trace(err), bucket) + } + return nil +} + +// fromOSSClientObjectProperties converts oss ObjectProperties to ObjectInfo. +func fromOSSClientObjectProperties(bucket string, o oss.ObjectProperties) minio.ObjectInfo { + // NOTE(timonwong): No Content-Type and user defined metadata. + // https://www.alibabacloud.com/help/doc-detail/31965.htm + + return minio.ObjectInfo{ + Bucket: bucket, + Name: o.Key, + ModTime: o.LastModified, + Size: o.Size, + ETag: minio.ToS3ETag(o.ETag), + } +} + +// fromOSSClientListObjectsResult converts oss ListBucketResult to ListObjectsInfo. +func fromOSSClientListObjectsResult(bucket string, lor oss.ListObjectsResult) minio.ListObjectsInfo { + objects := make([]minio.ObjectInfo, len(lor.Objects)) + for i, oi := range lor.Objects { + objects[i] = fromOSSClientObjectProperties(bucket, oi) + } + + prefixes := make([]string, len(lor.CommonPrefixes)) + copy(prefixes, lor.CommonPrefixes) + + return minio.ListObjectsInfo{ + IsTruncated: lor.IsTruncated, + NextMarker: lor.NextMarker, + Objects: objects, + Prefixes: prefixes, + } +} + +// ossListObjects lists all blobs in OSS bucket filtered by prefix. +func ossListObjects(client *oss.Client, bucket, prefix, marker, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) { + buck, err := client.Bucket(bucket) + if err != nil { + return loi, ossToObjectError(errors.Trace(err), bucket) + } + + // maxKeys should default to 1000 or less. + if maxKeys == 0 || maxKeys > ossMaxKeys { + maxKeys = ossMaxKeys + } + + lor, err := buck.ListObjects(oss.Prefix(prefix), oss.Marker(marker), oss.Delimiter(delimiter), oss.MaxKeys(maxKeys)) + if err != nil { + return loi, ossToObjectError(errors.Trace(err), bucket) + } + + return fromOSSClientListObjectsResult(bucket, lor), nil +} + +// ossListObjectsV2 lists all blobs in OSS bucket filtered by prefix. +func ossListObjectsV2(client *oss.Client, bucket, prefix, continuationToken, delimiter string, maxKeys int, + fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) { + // fetchOwner and startAfter are not supported and unused. + marker := continuationToken + + resultV1, err := ossListObjects(client, bucket, prefix, marker, delimiter, maxKeys) + if err != nil { + return loi, err + } + + return minio.ListObjectsV2Info{ + Objects: resultV1.Objects, + Prefixes: resultV1.Prefixes, + ContinuationToken: continuationToken, + NextContinuationToken: resultV1.NextMarker, + IsTruncated: resultV1.IsTruncated, + }, nil +} + +// ListObjects lists all blobs in OSS bucket filtered by prefix. +func (l *ossObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) { + return ossListObjects(l.Client, bucket, prefix, marker, delimiter, maxKeys) +} + +// ListObjectsV2 lists all blobs in OSS bucket filtered by prefix +func (l *ossObjects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, + fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) { + return ossListObjectsV2(l.Client, bucket, prefix, continuationToken, delimiter, maxKeys, fetchOwner, startAfter) +} + +// ossGetObject reads an object on OSS. Supports additional +// parameters like offset and length which are synonymous with +// HTTP Range requests. +// +// startOffset indicates the starting read location of the object. +// length indicates the total length of the object. +func ossGetObject(client *oss.Client, bucket, key string, startOffset, length int64, writer io.Writer) error { + if length < 0 && length != -1 { + return ossToObjectError(errors.Trace(fmt.Errorf("Invalid argument")), bucket, key) + } + + bkt, err := client.Bucket(bucket) + if err != nil { + return ossToObjectError(errors.Trace(err), bucket, key) + } + + var opts []oss.Option + if startOffset >= 0 && length >= 0 { + opts = append(opts, oss.Range(startOffset, startOffset+length-1)) + } + + object, err := bkt.GetObject(key, opts...) + if err != nil { + return ossToObjectError(errors.Trace(err), bucket, key) + } + defer object.Close() + + if _, err := io.Copy(writer, object); err != nil { + return ossToObjectError(errors.Trace(err), bucket, key) + } + return nil +} + +// GetObject reads an object on OSS. Supports additional +// parameters like offset and length which are synonymous with +// HTTP Range requests. +// +// startOffset indicates the starting read location of the object. +// length indicates the total length of the object. +func (l *ossObjects) GetObject(bucket, key string, startOffset, length int64, writer io.Writer) error { + return ossGetObject(l.Client, bucket, key, startOffset, length, writer) +} + +func translatePlainError(err error) error { + errString := err.Error() + + switch errString { + case "oss: service returned without a response body (404 Not Found)": + return oss.ServiceError{Code: "NoSuchKey"} + case "oss: service returned without a response body (400 Bad Request)": + return oss.ServiceError{Code: "AccessDenied"} + } + + return err +} + +// ossGetObjectInfo reads object info and replies back ObjectInfo. +func ossGetObjectInfo(client *oss.Client, bucket, object string) (objInfo minio.ObjectInfo, err error) { + bkt, err := client.Bucket(bucket) + if err != nil { + return objInfo, ossToObjectError(errors.Trace(err), bucket, object) + } + + header, err := bkt.GetObjectDetailedMeta(object) + if err != nil { + return objInfo, ossToObjectError(errors.Trace(translatePlainError(err)), bucket, object) + } + + // Build S3 metadata from OSS metadata + userDefined := ossHeaderToS3Meta(header) + + modTime, _ := http.ParseTime(header.Get("Last-Modified")) + size, _ := strconv.ParseInt(header.Get("Content-Length"), 10, 64) + + return minio.ObjectInfo{ + Bucket: bucket, + Name: object, + ModTime: modTime, + Size: size, + ETag: minio.ToS3ETag(header.Get("ETag")), + UserDefined: userDefined, + ContentType: header.Get("Content-Type"), + ContentEncoding: header.Get("Content-Encoding"), + }, nil +} + +// GetObjectInfo reads object info and replies back ObjectInfo. +func (l *ossObjects) GetObjectInfo(bucket, object string) (objInfo minio.ObjectInfo, err error) { + return ossGetObjectInfo(l.Client, bucket, object) +} + +// ossPutObject creates a new object with the incoming data. +func ossPutObject(client *oss.Client, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) { + bkt, err := client.Bucket(bucket) + if err != nil { + return objInfo, ossToObjectError(errors.Trace(err), bucket, object) + } + + // Build OSS metadata + opts, err := appendS3MetaToOSSOptions(nil, metadata) + if err != nil { + return objInfo, ossToObjectError(err, bucket, object) + } + + err = bkt.PutObject(object, data, opts...) + if err != nil { + return objInfo, ossToObjectError(errors.Trace(err), bucket, object) + } + + return ossGetObjectInfo(client, bucket, object) +} + +// PutObject creates a new object with the incoming data. +func (l *ossObjects) PutObject(bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) { + return ossPutObject(l.Client, bucket, object, data, metadata) +} + +// CopyObject copies an object from source bucket to a destination bucket. +func (l *ossObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string, metadata map[string]string) (objInfo minio.ObjectInfo, err error) { + bkt, err := l.Client.Bucket(srcBucket) + if err != nil { + return objInfo, ossToObjectError(errors.Trace(err), srcBucket, srcObject) + } + + opts := make([]oss.Option, 0, len(metadata)+1) + // Set this header such that following CopyObject() always sets the right metadata on the destination. + // metadata input is already a trickled down value from interpreting x-oss-metadata-directive at + // handler layer. So what we have right now is supposed to be applied on the destination object anyways. + // So preserve it by adding "REPLACE" directive to save all the metadata set by CopyObject API. + opts = append(opts, oss.MetadataDirective(oss.MetaReplace)) + + // Build OSS metadata + opts, err = appendS3MetaToOSSOptions(opts, metadata) + if err != nil { + return objInfo, ossToObjectError(err, srcBucket, srcObject) + } + + if _, err = bkt.CopyObjectTo(dstBucket, dstObject, srcObject, opts...); err != nil { + return objInfo, ossToObjectError(errors.Trace(err), srcBucket, srcObject) + } + return l.GetObjectInfo(dstBucket, dstObject) +} + +// DeleteObject deletes a blob in bucket. +func (l *ossObjects) DeleteObject(bucket, object string) error { + bkt, err := l.Client.Bucket(bucket) + if err != nil { + return ossToObjectError(errors.Trace(err), bucket, object) + } + + err = bkt.DeleteObject(object) + if err != nil { + return ossToObjectError(errors.Trace(err), bucket, object) + } + return nil +} + +// fromOSSClientListMultipartsInfo converts oss ListMultipartUploadResult to ListMultipartsInfo +func fromOSSClientListMultipartsInfo(lmur oss.ListMultipartUploadResult) minio.ListMultipartsInfo { + uploads := make([]minio.MultipartInfo, len(lmur.Uploads)) + for i, um := range lmur.Uploads { + uploads[i] = minio.MultipartInfo{ + Object: um.Key, + UploadID: um.UploadID, + Initiated: um.Initiated, + } + } + + commonPrefixes := make([]string, len(lmur.CommonPrefixes)) + copy(commonPrefixes, lmur.CommonPrefixes) + + return minio.ListMultipartsInfo{ + KeyMarker: lmur.KeyMarker, + UploadIDMarker: lmur.UploadIDMarker, + NextKeyMarker: lmur.NextKeyMarker, + NextUploadIDMarker: lmur.NextUploadIDMarker, + MaxUploads: lmur.MaxUploads, + IsTruncated: lmur.IsTruncated, + Uploads: uploads, + Prefix: lmur.Prefix, + Delimiter: lmur.Delimiter, + CommonPrefixes: commonPrefixes, + } +} + +// ListMultipartUploads lists all multipart uploads. +func (l *ossObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, err error) { + bkt, err := l.Client.Bucket(bucket) + if err != nil { + return lmi, ossToObjectError(errors.Trace(err), bucket) + } + + lmur, err := bkt.ListMultipartUploads(oss.Prefix(prefix), oss.KeyMarker(keyMarker), oss.UploadIDMarker(uploadIDMarker), + oss.Delimiter(delimiter), oss.MaxUploads(maxUploads)) + if err != nil { + return lmi, ossToObjectError(errors.Trace(err), bucket) + } + + return fromOSSClientListMultipartsInfo(lmur), nil +} + +// NewMultipartUpload upload object in multiple parts. +func (l *ossObjects) NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error) { + bkt, err := l.Client.Bucket(bucket) + if err != nil { + return uploadID, ossToObjectError(errors.Trace(err), bucket, object) + } + + // Build OSS metadata + opts, err := appendS3MetaToOSSOptions(nil, metadata) + if err != nil { + return uploadID, ossToObjectError(err, bucket, object) + } + + lmur, err := bkt.InitiateMultipartUpload(object, opts...) + if err != nil { + return uploadID, ossToObjectError(errors.Trace(err), bucket, object) + } + + return lmur.UploadID, nil +} + +// PutObjectPart puts a part of object in bucket. +func (l *ossObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *hash.Reader) (pi minio.PartInfo, err error) { + bkt, err := l.Client.Bucket(bucket) + if err != nil { + return pi, ossToObjectError(errors.Trace(err), bucket, object) + } + + imur := oss.InitiateMultipartUploadResult{ + Bucket: bucket, + Key: object, + UploadID: uploadID, + } + size := data.Size() + up, err := bkt.UploadPart(imur, data, size, partID) + if err != nil { + return pi, ossToObjectError(errors.Trace(err), bucket, object) + } + + return minio.PartInfo{ + Size: size, + ETag: minio.ToS3ETag(up.ETag), + // NOTE(timonwong): LastModified is not supported + PartNumber: up.PartNumber, + }, nil +} + +func ossBuildListObjectPartsParams(uploadID string, partNumberMarker, maxParts int) map[string]interface{} { + return map[string]interface{}{ + "uploadId": uploadID, + "part-number-marker": strconv.Itoa(partNumberMarker), + "max-parts": strconv.Itoa(maxParts), + } +} + +// fromOSSClientListPartsInfo converts OSS ListUploadedPartsResult to ListPartsInfo +func fromOSSClientListPartsInfo(lupr oss.ListUploadedPartsResult, partNumberMarker int) minio.ListPartsInfo { + parts := make([]minio.PartInfo, len(lupr.UploadedParts)) + for i, up := range lupr.UploadedParts { + parts[i] = minio.PartInfo{ + PartNumber: up.PartNumber, + LastModified: up.LastModified, + ETag: minio.ToS3ETag(up.ETag), + Size: int64(up.Size), + } + } + + nextPartNumberMarker, _ := strconv.Atoi(lupr.NextPartNumberMarker) + return minio.ListPartsInfo{ + Bucket: lupr.Bucket, + Object: lupr.Key, + UploadID: lupr.UploadID, + PartNumberMarker: partNumberMarker, + NextPartNumberMarker: nextPartNumberMarker, + MaxParts: lupr.MaxParts, + IsTruncated: lupr.IsTruncated, + Parts: parts, + } +} + +func ossListObjectParts(client *oss.Client, bucket, object, uploadID string, partNumberMarker, maxParts int) (lupr oss.ListUploadedPartsResult, err error) { + params := ossBuildListObjectPartsParams(uploadID, partNumberMarker, maxParts) + resp, err := client.Conn.Do("GET", bucket, object, params, nil, nil, 0, nil) + if err != nil { + return lupr, err + } + + defer func() { + // always drain output (response body) + io.CopyN(ioutil.Discard, resp.Body, 512) + resp.Body.Close() + }() + + err = xml.NewDecoder(resp.Body).Decode(&lupr) + if err != nil { + return lupr, err + } + return lupr, nil +} + +// CopyObjectPart creates a part in a multipart upload by copying +// existing object or a part of it. +func (l *ossObjects) CopyObjectPart(srcBucket, srcObject, destBucket, destObject, uploadID string, + partID int, startOffset, length int64, metadata map[string]string) (p minio.PartInfo, err error) { + + bkt, err := l.Client.Bucket(destBucket) + if err != nil { + return p, ossToObjectError(errors.Trace(err), destBucket) + } + + // Build OSS metadata + opts, err := appendS3MetaToOSSOptions(nil, metadata) + if err != nil { + return p, ossToObjectError(err, srcBucket, srcObject) + } + + completePart, err := bkt.UploadPartCopy(oss.InitiateMultipartUploadResult{ + Key: destObject, + UploadID: uploadID, + }, srcBucket, srcObject, startOffset, length, partID, opts...) + + if err != nil { + return p, ossToObjectError(errors.Trace(err), srcBucket, srcObject) + } + + p.PartNumber = completePart.PartNumber + p.ETag = minio.ToS3ETag(completePart.ETag) + return p, nil +} + +// ListObjectParts returns all object parts for specified object in specified bucket +func (l *ossObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (lpi minio.ListPartsInfo, err error) { + lupr, err := ossListObjectParts(l.Client, bucket, object, uploadID, partNumberMarker, maxParts) + if err != nil { + return lpi, ossToObjectError(errors.Trace(err), bucket, object, uploadID) + } + + return fromOSSClientListPartsInfo(lupr, partNumberMarker), nil +} + +// AbortMultipartUpload aborts a ongoing multipart upload. +func (l *ossObjects) AbortMultipartUpload(bucket, object, uploadID string) error { + bkt, err := l.Client.Bucket(bucket) + if err != nil { + return ossToObjectError(errors.Trace(err), bucket, object) + } + + err = bkt.AbortMultipartUpload(oss.InitiateMultipartUploadResult{ + Bucket: bucket, + Key: object, + UploadID: uploadID, + }) + if err != nil { + return ossToObjectError(errors.Trace(err), bucket, object) + } + return nil +} + +// CompleteMultipartUpload completes ongoing multipart upload and finalizes object. +func (l *ossObjects) CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []minio.CompletePart) (oi minio.ObjectInfo, err error) { + client := l.Client + bkt, err := client.Bucket(bucket) + if err != nil { + return oi, ossToObjectError(errors.Trace(err), bucket, object) + } + + // Error out if uploadedParts except last part sizing < 5MiB. + // NOTE(timonwong): Actually, OSS wont't throw EntityTooSmall error, doing this check just for mint :( + var partNumberMarker int + lupr := oss.ListUploadedPartsResult{IsTruncated: true} + for lupr.IsTruncated { + lupr, err = ossListObjectParts(client, bucket, object, uploadID, partNumberMarker, ossMaxParts) + if err != nil { + return oi, ossToObjectError(errors.Trace(err), bucket, object, uploadID) + } + + uploadedParts := lupr.UploadedParts + if !lupr.IsTruncated { + if len(uploadedParts) < 1 { + uploadedParts = nil + } else { + uploadedParts = uploadedParts[:len(uploadedParts)-1] + } + } + + for _, part := range uploadedParts { + if part.Size < ossS3MinPartSize { + return oi, errors.Trace(minio.PartTooSmall{ + PartNumber: part.PartNumber, + PartSize: int64(part.Size), + PartETag: minio.ToS3ETag(part.ETag), + }) + } + } + + partNumberMarker, _ = strconv.Atoi(lupr.NextPartNumberMarker) + } + + imur := oss.InitiateMultipartUploadResult{ + Bucket: bucket, + Key: object, + UploadID: uploadID, + } + parts := make([]oss.UploadPart, len(uploadedParts)) + for i, up := range uploadedParts { + parts[i] = oss.UploadPart{ + PartNumber: up.PartNumber, + ETag: strings.TrimSuffix(up.ETag, "-1"), // Trim "-1" suffix in ETag as PutObjectPart(). + } + } + + _, err = bkt.CompleteMultipartUpload(imur, parts) + if err != nil { + return oi, ossToObjectError(errors.Trace(err), bucket, object) + } + + return l.GetObjectInfo(bucket, object) +} + +// SetBucketPolicies sets policy on bucket. +// OSS supports three types of bucket policies: +// oss.ACLPublicReadWrite: readwrite in minio terminology +// oss.ACLPublicRead: readonly in minio terminology +// oss.ACLPrivate: none in minio terminology +func (l *ossObjects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { + bucketPolicies := policy.GetPolicies(policyInfo.Statements, bucket) + if len(bucketPolicies) != 1 { + return errors.Trace(minio.NotImplemented{}) + } + + prefix := bucket + "/*" // For all objects inside the bucket. + for policyPrefix, bucketPolicy := range bucketPolicies { + if policyPrefix != prefix { + return errors.Trace(minio.NotImplemented{}) + } + + var acl oss.ACLType + switch bucketPolicy { + case policy.BucketPolicyNone: + acl = oss.ACLPrivate + case policy.BucketPolicyReadOnly: + acl = oss.ACLPublicRead + case policy.BucketPolicyReadWrite: + acl = oss.ACLPublicReadWrite + default: + return errors.Trace(minio.NotImplemented{}) + } + + err := l.Client.SetBucketACL(bucket, acl) + if err != nil { + return ossToObjectError(errors.Trace(err), bucket) + } + } + + return nil +} + +// GetBucketPolicies will get policy on bucket. +func (l *ossObjects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) { + result, err := l.Client.GetBucketACL(bucket) + if err != nil { + return policy.BucketAccessPolicy{}, ossToObjectError(errors.Trace(err)) + } + + policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"} + switch result.ACL { + case string(oss.ACLPrivate): + // By default, all buckets starts with a "private" policy. + return policy.BucketAccessPolicy{}, ossToObjectError(errors.Trace(minio.PolicyNotFound{}), bucket) + case string(oss.ACLPublicRead): + policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "") + case string(oss.ACLPublicReadWrite): + policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadWrite, bucket, "") + default: + return policy.BucketAccessPolicy{}, errors.Trace(minio.NotImplemented{}) + } + + return policyInfo, nil +} + +// DeleteBucketPolicies deletes all policies on bucket. +func (l *ossObjects) DeleteBucketPolicies(bucket string) error { + err := l.Client.SetBucketACL(bucket, oss.ACLPrivate) + if err != nil { + return ossToObjectError(errors.Trace(err), bucket) + } + return nil +} diff --git a/cmd/gateway/oss/gateway-oss_test.go b/cmd/gateway/oss/gateway-oss_test.go new file mode 100644 index 000000000..03318ae1d --- /dev/null +++ b/cmd/gateway/oss/gateway-oss_test.go @@ -0,0 +1,191 @@ +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package oss + +import ( + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" + + minio "github.com/minio/minio/cmd" + "github.com/minio/minio/pkg/errors" +) + +func ossErrResponse(code string) error { + return errors.Trace(oss.ServiceError{ + Code: code, + }) +} + +func TestOSSToObjectError(t *testing.T) { + testCases := []struct { + inputErr error + expectedErr error + bucket, object string + }{ + { + inputErr: ossErrResponse("BucketAlreadyExists"), + expectedErr: minio.BucketAlreadyOwnedByYou{}, + }, + { + inputErr: ossErrResponse("BucketNotEmpty"), + expectedErr: minio.BucketNotEmpty{}, + }, + { + inputErr: ossErrResponse("InvalidBucketName"), + expectedErr: minio.BucketNameInvalid{}, + }, + { + inputErr: ossErrResponse("NoSuchBucket"), + expectedErr: minio.BucketNotFound{}, + }, + // with empty object, NoSuchKey is interpreted as BucketNotFound + { + inputErr: ossErrResponse("NoSuchKey"), + expectedErr: minio.BucketNotFound{}, + }, + { + inputErr: ossErrResponse("NoSuchUpload"), + expectedErr: minio.InvalidUploadID{}, + }, + { + inputErr: ossErrResponse("InvalidObjectName"), + expectedErr: minio.ObjectNameInvalid{}, + }, + { + inputErr: ossErrResponse("AccessDenied"), + expectedErr: minio.PrefixAccessDenied{}, + }, + { + inputErr: ossErrResponse("NoSuchUpload"), + expectedErr: minio.InvalidUploadID{}, + }, + { + inputErr: ossErrResponse("EntityTooSmall"), + expectedErr: minio.PartTooSmall{}, + }, + { + inputErr: nil, + expectedErr: nil, + }, + // Special test case for NoSuchKey with object name + { + inputErr: ossErrResponse("NoSuchKey"), + expectedErr: minio.ObjectNotFound{Bucket: "bucket", Object: "object"}, + bucket: "bucket", + object: "object", + }, + + // Special test case for error value that is not of + // type (*Error) + { + inputErr: fmt.Errorf("not a *Error"), + expectedErr: fmt.Errorf("not a *Error"), + }, + } + + for i, tc := range testCases { + actualErr := ossToObjectError(tc.inputErr, tc.bucket, tc.object) + if e, ok := actualErr.(*errors.Error); ok && e.Cause != tc.expectedErr { + t.Errorf("Test case %d: Expected error '%v' but received error '%v'", i+1, tc.expectedErr, e.Cause) + } + } +} + +func TestS3MetaToOSSOptions(t *testing.T) { + var err error + var headers map[string]string + + headers = map[string]string{ + "x-amz-meta-invalid_meta": "value", + } + _, err = appendS3MetaToOSSOptions(nil, headers) + if err = errors.Cause(err); err != nil { + if _, ok := err.(minio.UnsupportedMetadata); !ok { + t.Fatalf("Test failed with unexpected error %s, expected UnsupportedMetadata", err) + } + } + + headers = map[string]string{ + "accept-encoding": "gzip", // not this + "content-encoding": "gzip", + "X-Amz-Meta-Hdr": "value", + "X-Amz-Meta-X-test-key": "value", + "X-Amz-Meta-X--test--key": "value", + "X-Amz-Meta-X-Amz-Key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=", + "X-Amz-Meta-X-Amz-Matdesc": "{}", + "X-Amz-Meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==", + } + opts, err := appendS3MetaToOSSOptions(nil, headers) + if err != nil { + t.Fatalf("Test failed, with %s", err) + } + if len(opts) != len(headers) { + t.Fatalf("Test failed, S3 metdata is not fully transformed. expeted: %d, actual: %d", len(headers)-1, len(opts)) + } +} + +func TestOSSHeaderToS3Meta(t *testing.T) { + meta := map[string]string{ + "x-oss-meta-first_name": "myname", + "X-OSS-Meta-x_test_key": "value", + "X-Oss-Meta-x_test__key": "value", + "X-Oss-Meta-x__test__key": "value", + "X-Oss-Meta-x____test____key": "value", + "X-Oss-Meta-x_amz_key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=", + "X-Oss-Meta-x_amz_matdesc": "{}", + "x-oss-meta-x_amz_iv": "eWmyryl8kq+EVnnsE7jpOg==", + } + header := make(http.Header) + for k, v := range meta { + header.Set(k, v) + } + + expectedMeta := map[string]string{ + "X-Amz-Meta-First-Name": "myname", + "X-Amz-Meta-X-Test-Key": "value", + "X-Amz-Meta-X-Test_key": "value", + "X-Amz-Meta-X_test_key": "value", + "X-Amz-Meta-X__test__key": "value", + "X-Amz-Meta-X-Amz-Key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=", + "X-Amz-Meta-X-Amz-Matdesc": "{}", + "X-Amz-Meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==", + } + actualMeta := ossHeaderToS3Meta(header) + for k, v := range expectedMeta { + if v2, ok := actualMeta[k]; !ok { + t.Errorf("Test failed for key %s: missing key", k) + } else if v != v2 { + t.Errorf("Test failed for key %s, expected '%s', got '%s'", k, v, v2) + } + } +} + +func TestOSSBuildListObjectPartsParams(t *testing.T) { + expected := map[string]interface{}{ + "uploadId": "test", + "part-number-marker": "123", + "max-parts": "456", + } + actual := ossBuildListObjectPartsParams("test", 123, 456) + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("Test failed, expected %v, got %v", expected, actual) + } +} diff --git a/docs/gateway/oss.md b/docs/gateway/oss.md new file mode 100644 index 000000000..77dbcc998 --- /dev/null +++ b/docs/gateway/oss.md @@ -0,0 +1,58 @@ +# Minio OSS Gateway [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) +Minio Gateway adds Amazon S3 compatibility to Alibaba Cloud Object Storage Service (OSS). + +## Run Minio Gateway for OSS. + +### Using Docker +``` +docker run -p 9000:9000 --name azure-s3 \ + -e "MINIO_ACCESS_KEY=ossaccesskey" \ + -e "MINIO_SECRET_KEY=osssecretkey" \ + minio/minio gateway azure +``` + +### Using Binary +``` +export MINIO_ACCESS_KEY=ossaccesskey +export MINIO_SECRET_KEY=osssecretkey +minio gateway azure +``` + +## Test using Minio Browser +Minio Gateway comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 ensure your server has started successfully. + +![Screenshot](https://raw.githubusercontent.com/minio/minio/master/docs/screenshots/minio-browser-gateway.png) + +## Test using Minio Client `mc` +`mc` provides a modern alternative to UNIX commands such as ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. + +### Configure `mc` +``` +mc config host add myoss http://gateway-ip:9000 ossaccesskey osssecretkey +``` + +### List buckets on OSS +``` +mc ls myb2 +[2017-02-22 01:50:43 PST] 0B ferenginar/ +[2017-02-26 21:43:51 PST] 0B my-bucket/ +[2017-02-26 22:10:11 PST] 0B test-bucket1/ +``` + +### Known limitations + +Gateway inherits the following OSS limitations: + +- Bucket names with "." in the bucket name are not supported. +- Custom metadata with "_" in the key is not supported. + +Other limitations: + +- Bucket notification APIs are not supported. + +## Explore Further + +- [`mc` command-line interface](https://docs.minio.io/docs/minio-client-quickstart-guide) +- [`aws` command-line interface](https://docs.minio.io/docs/aws-cli-with-minio) +- [`minfs` filesystem interface](http://docs.minio.io/docs/minfs-quickstart-guide) +- [`minio-go` Go SDK](https://docs.minio.io/docs/golang-client-quickstart-guide) diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go new file mode 100644 index 000000000..5f9bd84f2 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go @@ -0,0 +1,97 @@ +package oss + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "hash" + "io" + "net/http" + "sort" + "strings" +) + +// 用于signHeader的字典排序存放容器。 +type headerSorter struct { + Keys []string + Vals []string +} + +// 生成签名方法(直接设置请求的Header)。 +func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) { + // Get the final Authorization' string + authorizationStr := "OSS " + conn.config.AccessKeyID + ":" + conn.getSignedStr(req, canonicalizedResource) + + // Give the parameter "Authorization" value + req.Header.Set(HTTPHeaderAuthorization, authorizationStr) +} + +func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string) string { + // Find out the "x-oss-"'s address in this request'header + temp := make(map[string]string) + + for k, v := range req.Header { + if strings.HasPrefix(strings.ToLower(k), "x-oss-") { + temp[strings.ToLower(k)] = v[0] + } + } + hs := newHeaderSorter(temp) + + // Sort the temp by the Ascending Order + hs.Sort() + + // Get the CanonicalizedOSSHeaders + canonicalizedOSSHeaders := "" + for i := range hs.Keys { + canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n" + } + + // Give other parameters values + // when sign url, date is expires + date := req.Header.Get(HTTPHeaderDate) + contentType := req.Header.Get(HTTPHeaderContentType) + contentMd5 := req.Header.Get(HTTPHeaderContentMD5) + + signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource + h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(conn.config.AccessKeySecret)) + io.WriteString(h, signStr) + signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil)) + + return signedStr +} + +// Additional function for function SignHeader. +func newHeaderSorter(m map[string]string) *headerSorter { + hs := &headerSorter{ + Keys: make([]string, 0, len(m)), + Vals: make([]string, 0, len(m)), + } + + for k, v := range m { + hs.Keys = append(hs.Keys, k) + hs.Vals = append(hs.Vals, v) + } + return hs +} + +// Additional function for function SignHeader. +func (hs *headerSorter) Sort() { + sort.Sort(hs) +} + +// Additional function for function SignHeader. +func (hs *headerSorter) Len() int { + return len(hs.Vals) +} + +// Additional function for function SignHeader. +func (hs *headerSorter) Less(i, j int) bool { + return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0 +} + +// Additional function for function SignHeader. +func (hs *headerSorter) Swap(i, j int) { + hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i] + hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i] +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go new file mode 100644 index 000000000..ca6cb4bc5 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go @@ -0,0 +1,958 @@ +package oss + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/xml" + "fmt" + "hash" + "hash/crc64" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strconv" + "time" +) + +// Bucket implements the operations of object. +type Bucket struct { + Client Client + BucketName string +} + +// +// PutObject 新建Object,如果Object已存在,覆盖原有Object。 +// +// objectKey 上传对象的名称,使用UTF-8编码、长度必须在1-1023字节之间、不能以“/”或者“\”字符开头。 +// reader io.Reader读取object的数据。 +// options 上传对象时可以指定对象的属性,可用选项有CacheControl、ContentDisposition、ContentEncoding、 +// Expires、ServerSideEncryption、ObjectACL、Meta,具体含义请参看 +// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Option) error { + opts := addContentType(options, objectKey) + + request := &PutObjectRequest{ + ObjectKey: objectKey, + Reader: reader, + } + resp, err := bucket.DoPutObject(request, opts) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// +// PutObjectFromFile 新建Object,内容从本地文件中读取。 +// +// objectKey 上传对象的名称。 +// filePath 本地文件,上传对象的值为该文件内容。 +// options 上传对象时可以指定对象的属性。详见PutObject的options。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Option) error { + fd, err := os.Open(filePath) + if err != nil { + return err + } + defer fd.Close() + + opts := addContentType(options, filePath, objectKey) + + request := &PutObjectRequest{ + ObjectKey: objectKey, + Reader: fd, + } + resp, err := bucket.DoPutObject(request, opts) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// +// DoPutObject 上传文件。 +// +// request 上传请求。 +// options 上传选项。 +// +// Response 上传请求返回值。 +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*Response, error) { + isOptSet, _, _ := isOptionSet(options, HTTPHeaderContentType) + if !isOptSet { + options = addContentType(options, request.ObjectKey) + } + + listener := getProgressListener(options) + + params := map[string]interface{}{} + resp, err := bucket.do("PUT", request.ObjectKey, params, options, request.Reader, listener) + if err != nil { + return nil, err + } + + if bucket.getConfig().IsEnableCRC { + err = checkCRC(resp, "DoPutObject") + if err != nil { + return resp, err + } + } + + err = checkRespCode(resp.StatusCode, []int{http.StatusOK}) + + return resp, err +} + +// +// GetObject 下载文件。 +// +// objectKey 下载的文件名称。 +// options 对象的属性限制项,可选值有Range、IfModifiedSince、IfUnmodifiedSince、IfMatch、 +// IfNoneMatch、AcceptEncoding,详细请参考 +// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html +// +// io.ReadCloser reader,读取数据后需要close。error为nil时有效。 +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) GetObject(objectKey string, options ...Option) (io.ReadCloser, error) { + result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options) + if err != nil { + return nil, err + } + return result.Response.Body, nil +} + +// +// GetObjectToFile 下载文件。 +// +// objectKey 下载的文件名称。 +// filePath 下载对象的内容写到该本地文件。 +// options 对象的属性限制项。详见GetObject的options。 +// +// error 操作无错误时返回error为nil,非nil为错误说明。 +// +func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Option) error { + tempFilePath := filePath + TempFileSuffix + + // 读取Object内容 + result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options) + if err != nil { + return err + } + defer result.Response.Body.Close() + + // 如果文件不存在则创建,存在则清空 + fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode) + if err != nil { + return err + } + + // 存储数据到文件 + _, err = io.Copy(fd, result.Response.Body) + fd.Close() + if err != nil { + return err + } + + // 比较CRC值 + hasRange, _, _ := isOptionSet(options, HTTPHeaderRange) + if bucket.getConfig().IsEnableCRC && !hasRange { + result.Response.ClientCRC = result.ClientCRC.Sum64() + err = checkCRC(result.Response, "GetObjectToFile") + if err != nil { + os.Remove(tempFilePath) + return err + } + } + + return os.Rename(tempFilePath, filePath) +} + +// +// DoGetObject 下载文件 +// +// request 下载请求 +// options 对象的属性限制项。详见GetObject的options。 +// +// GetObjectResult 下载请求返回值。 +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*GetObjectResult, error) { + params := map[string]interface{}{} + resp, err := bucket.do("GET", request.ObjectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + + result := &GetObjectResult{ + Response: resp, + } + + // crc + var crcCalc hash.Hash64 + hasRange, _, _ := isOptionSet(options, HTTPHeaderRange) + if bucket.getConfig().IsEnableCRC && !hasRange { + crcCalc = crc64.New(crcTable()) + result.ServerCRC = resp.ServerCRC + result.ClientCRC = crcCalc + } + + // progress + listener := getProgressListener(options) + + contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64) + resp.Body = ioutil.NopCloser(TeeReader(resp.Body, crcCalc, contentLen, listener, nil)) + + return result, nil +} + +// +// CopyObject 同一个bucket内拷贝Object。 +// +// srcObjectKey Copy的源对象。 +// destObjectKey Copy的目标对象。 +// options Copy对象时,您可以指定源对象的限制条件,满足限制条件时copy,不满足时返回错误,您可以选择如下选项CopySourceIfMatch、 +// CopySourceIfNoneMatch、CopySourceIfModifiedSince、CopySourceIfUnmodifiedSince、MetadataDirective。 +// Copy对象时,您可以指定目标对象的属性,如CacheControl、ContentDisposition、ContentEncoding、Expires、 +// ServerSideEncryption、ObjectACL、Meta,选项的含义请参看 +// https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) { + var out CopyObjectResult + options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey))) + params := map[string]interface{}{} + resp, err := bucket.do("PUT", destObjectKey, params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// CopyObjectTo bucket间拷贝object。 +// +// srcObjectKey 源Object名称。源Bucket名称为Bucket.BucketName。 +// destBucketName 目标Bucket名称。 +// destObjectKey 目标Object名称。 +// options Copy选项,详见CopyObject的options。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) CopyObjectTo(destBucketName, destObjectKey, srcObjectKey string, options ...Option) (CopyObjectResult, error) { + return bucket.copy(srcObjectKey, destBucketName, destObjectKey, options...) +} + +// +// CopyObjectFrom bucket间拷贝object。 +// +// srcBucketName 源Bucket名称。 +// srcObjectKey 源Object名称。 +// destObjectKey 目标Object名称。目标Bucket名称为Bucket.BucketName。 +// options Copy选项,详见CopyObject的options。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) CopyObjectFrom(srcBucketName, srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) { + destBucketName := bucket.BucketName + var out CopyObjectResult + srcBucket, err := bucket.Client.Bucket(srcBucketName) + if err != nil { + return out, err + } + + return srcBucket.copy(srcObjectKey, destBucketName, destObjectKey, options...) +} + +func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, options ...Option) (CopyObjectResult, error) { + var out CopyObjectResult + options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey))) + headers := make(map[string]string) + err := handleOptions(headers, options) + if err != nil { + return out, err + } + params := map[string]interface{}{} + resp, err := bucket.Client.Conn.Do("PUT", destBucketName, destObjectKey, params, headers, nil, 0, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// AppendObject 追加方式上传。 +// +// AppendObject参数必须包含position,其值指定从何处进行追加。首次追加操作的position必须为0, +// 后续追加操作的position是Object的当前长度。例如,第一次Append Object请求指定position值为0, +// content-length是65536;那么,第二次Append Object需要指定position为65536。 +// 每次操作成功后,响应头部x-oss-next-append-position也会标明下一次追加的position。 +// +// objectKey 需要追加的Object。 +// reader io.Reader,读取追的内容。 +// appendPosition object追加的起始位置。 +// destObjectProperties 第一次追加时指定新对象的属性,如CacheControl、ContentDisposition、ContentEncoding、 +// Expires、ServerSideEncryption、ObjectACL。 +// +// int64 下次追加的开始位置,error为nil空时有效。 +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosition int64, options ...Option) (int64, error) { + request := &AppendObjectRequest{ + ObjectKey: objectKey, + Reader: reader, + Position: appendPosition, + } + + result, err := bucket.DoAppendObject(request, options) + if err != nil { + return appendPosition, err + } + + return result.NextPosition, err +} + +// +// DoAppendObject 追加上传。 +// +// request 追加上传请求。 +// options 追加上传选项。 +// +// AppendObjectResult 追加上传请求返回值。 +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Option) (*AppendObjectResult, error) { + params := map[string]interface{}{} + params["append"] = nil + params["position"] = strconv.FormatInt(request.Position, 10) + headers := make(map[string]string) + + opts := addContentType(options, request.ObjectKey) + handleOptions(headers, opts) + + var initCRC uint64 + isCRCSet, initCRCOpt, _ := isOptionSet(options, initCRC64) + if isCRCSet { + initCRC = initCRCOpt.(uint64) + } + + listener := getProgressListener(options) + + handleOptions(headers, opts) + resp, err := bucket.Client.Conn.Do("POST", bucket.BucketName, request.ObjectKey, params, headers, + request.Reader, initCRC, listener) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + nextPosition, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderOssNextAppendPosition), 10, 64) + result := &AppendObjectResult{ + NextPosition: nextPosition, + CRC: resp.ServerCRC, + } + + if bucket.getConfig().IsEnableCRC && isCRCSet { + err = checkCRC(resp, "AppendObject") + if err != nil { + return result, err + } + } + + return result, nil +} + +// +// DeleteObject 删除Object。 +// +// objectKey 待删除Object。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) DeleteObject(objectKey string) error { + params := map[string]interface{}{} + resp, err := bucket.do("DELETE", objectKey, params, nil, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// +// DeleteObjects 批量删除object。 +// +// objectKeys 待删除object类表。 +// options 删除选项,DeleteObjectsQuiet,是否是安静模式,默认不使用。 +// +// DeleteObjectsResult 非安静模式的的返回值。 +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (DeleteObjectsResult, error) { + out := DeleteObjectsResult{} + dxml := deleteXML{} + for _, key := range objectKeys { + dxml.Objects = append(dxml.Objects, DeleteObject{Key: key}) + } + isQuiet, _ := findOption(options, deleteObjectsQuiet, false) + dxml.Quiet = isQuiet.(bool) + + bs, err := xml.Marshal(dxml) + if err != nil { + return out, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + options = append(options, ContentType(contentType)) + sum := md5.Sum(bs) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + options = append(options, ContentMD5(b64)) + + params := map[string]interface{}{} + params["delete"] = nil + params["encoding-type"] = "url" + + resp, err := bucket.do("POST", "", params, options, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + if !dxml.Quiet { + if err = xmlUnmarshal(resp.Body, &out); err == nil { + err = decodeDeleteObjectsResult(&out) + } + } + return out, err +} + +// +// IsObjectExist object是否存在。 +// +// bool object是否存在,true存在,false不存在。error为nil时有效。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) IsObjectExist(objectKey string) (bool, error) { + _, err := bucket.GetObjectMeta(objectKey) + if err == nil { + return true, nil + } + + switch err.(type) { + case ServiceError: + if err.(ServiceError).StatusCode == 404 && err.(ServiceError).Code == "NoSuchKey" { + return false, nil + } + } + + return false, err +} + +// +// ListObjects 获得Bucket下筛选后所有的object的列表。 +// +// options ListObject的筛选行为。Prefix指定的前缀、MaxKeys最大数目、Marker第一个开始、Delimiter对Object名字进行分组的字符。 +// +// 您有如下8个object,my-object-1, my-object-11, my-object-2, my-object-21, +// my-object-22, my-object-3, my-object-31, my-object-32。如果您指定了Prefix为my-object-2, +// 则返回my-object-2, my-object-21, my-object-22三个object。如果您指定了Marker为my-object-22, +// 则返回my-object-3, my-object-31, my-object-32三个object。如果您指定MaxKeys则每次最多返回MaxKeys个, +// 最后一次可能不足。这三个参数可以组合使用,实现分页等功能。如果把prefix设为某个文件夹名,就可以罗列以此prefix开头的文件, +// 即该文件夹下递归的所有的文件和子文件夹。如果再把delimiter设置为"/"时,返回值就只罗列该文件夹下的文件,该文件夹下的子文件名 +// 返回在CommonPrefixes部分,子文件夹下递归的文件和文件夹不被显示。例如一个bucket存在三个object,fun/test.jpg、 +// fun/movie/001.avi、fun/movie/007.avi。若设定prefix为"fun/",则返回三个object;如果增加设定 +// delimiter为"/",则返回文件"fun/test.jpg"和前缀"fun/movie/",即实现了文件夹的逻辑。 +// +// 常用场景,请参数示例sample/list_object.go。 +// +// ListObjectsResponse 操作成功后的返回值,成员Objects为bucket中对象列表。error为nil时该返回值有效。 +// +func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) { + var out ListObjectsResult + + options = append(options, EncodingType("url")) + params, err := getRawParams(options) + if err != nil { + return out, err + } + + resp, err := bucket.do("GET", "", params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + + err = decodeListObjectsResult(&out) + return out, err +} + +// +// SetObjectMeta 设置Object的Meta。 +// +// objectKey object +// options 指定对象的属性,有以下可选项CacheControl、ContentDisposition、ContentEncoding、Expires、 +// ServerSideEncryption、Meta。 +// +// error 操作无错误时error为nil,非nil为错误信息。 +// +func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error { + options = append(options, MetadataDirective(MetaReplace)) + _, err := bucket.CopyObject(objectKey, objectKey, options...) + return err +} + +// +// GetObjectDetailedMeta 查询Object的头信息。 +// +// objectKey object名称。 +// objectPropertyConstraints 对象的属性限制项,满足时正常返回,不满足时返回错误。现在项有IfModifiedSince、IfUnmodifiedSince、 +// IfMatch、IfNoneMatch。具体含义请参看 https://help.aliyun.com/document_detail/oss/api-reference/object/HeadObject.html +// +// http.Header 对象的meta,error为nil时有效。 +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option) (http.Header, error) { + params := map[string]interface{}{} + resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return resp.Headers, nil +} + +// +// GetObjectMeta 查询Object的头信息。 +// +// GetObjectMeta相比GetObjectDetailedMeta更轻量,仅返回指定Object的少量基本meta信息, +// 包括该Object的ETag、Size(对象大小)、LastModified,其中Size由响应头Content-Length的数值表示。 +// +// objectKey object名称。 +// +// http.Header 对象的meta,error为nil时有效。 +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) GetObjectMeta(objectKey string) (http.Header, error) { + params := map[string]interface{}{} + params["objectMeta"] = nil + //resp, err := bucket.do("GET", objectKey, "?objectMeta", "", nil, nil, nil) + resp, err := bucket.do("GET", objectKey, params, nil, nil, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return resp.Headers, nil +} + +// +// SetObjectACL 修改Object的ACL权限。 +// +// 只有Bucket Owner才有权限调用PutObjectACL来修改Object的ACL。Object ACL优先级高于Bucket ACL。 +// 例如Bucket ACL是private的,而Object ACL是public-read-write的,则访问这个Object时, +// 先判断Object的ACL,所以所有用户都拥有这个Object的访问权限,即使这个Bucket是private bucket。 +// 如果某个Object从来没设置过ACL,则访问权限遵循Bucket ACL。 +// +// Object的读操作包括GetObject,HeadObject,CopyObject和UploadPartCopy中的对source object的读; +// Object的写操作包括:PutObject,PostObject,AppendObject,DeleteObject, +// DeleteMultipleObjects,CompleteMultipartUpload以及CopyObject对新的Object的写。 +// +// objectKey 设置权限的object。 +// objectAcl 对象权限。可选值PrivateACL(私有读写)、PublicReadACL(公共读私有写)、PublicReadWriteACL(公共读写)。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType) error { + options := []Option{ObjectACL(objectACL)} + params := map[string]interface{}{} + params["acl"] = nil + resp, err := bucket.do("PUT", objectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// +// GetObjectACL 获取对象的ACL权限。 +// +// objectKey 获取权限的object。 +// +// GetObjectAclResponse 获取权限操作返回值,error为nil时有效。GetObjectAclResponse.Acl为对象的权限。 +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) GetObjectACL(objectKey string) (GetObjectACLResult, error) { + var out GetObjectACLResult + params := map[string]interface{}{} + params["acl"] = nil + resp, err := bucket.do("GET", objectKey, params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// PutSymlink 创建符号链接。 +// +// 符号链接的目标文件类型不能为符号链接。 +// 创建符号链接时: 不检查目标文件是否存在, 不检查目标文件类型是否合法, 不检查目标文件是否有权限访问。 +// 以上检查,都推迟到GetObject等需要访问目标文件的API。 +// 如果试图添加的文件已经存在,并且有访问权限。新添加的文件将覆盖原来的文件。 +// 如果在PutSymlink的时候,携带以x-oss-meta-为前缀的参数,则视为user meta。 +// +// symObjectKey 要创建的符号链接文件。 +// targetObjectKey 目标文件。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) PutSymlink(symObjectKey string, targetObjectKey string, options ...Option) error { + options = append(options, symlinkTarget(url.QueryEscape(targetObjectKey))) + params := map[string]interface{}{} + params["symlink"] = nil + resp, err := bucket.do("PUT", symObjectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// +// GetSymlink 获取符号链接的目标文件。 +// 如果符号链接不存在返回404。 +// +// objectKey 获取目标文件的符号链接object。 +// +// error 操作无错误为nil,非nil为错误信息。当error为nil时,返回的string为目标文件,否则该值无效。 +// +func (bucket Bucket) GetSymlink(objectKey string) (http.Header, error) { + params := map[string]interface{}{} + params["symlink"] = nil + resp, err := bucket.do("GET", objectKey, params, nil, nil, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + targetObjectKey := resp.Headers.Get(HTTPHeaderOssSymlinkTarget) + targetObjectKey, err = url.QueryUnescape(targetObjectKey) + if err != nil { + return resp.Headers, err + } + resp.Headers.Set(HTTPHeaderOssSymlinkTarget, targetObjectKey) + return resp.Headers, err +} + +// +// RestoreObject 恢复处于冷冻状态的归档类型Object进入读就绪状态。 +// +// 一个Archive类型的object初始时处于冷冻状态。 +// +// 针对处于冷冻状态的object调用restore命令,返回成功。object处于解冻中,服务端执行解冻,在此期间再次调用restore命令,同样成功,且不会延长object可读状态持续时间。 +// 待服务端执行完成解冻任务后,object就进入了解冻状态,此时用户可以读取object。 +// 解冻状态默认持续1天,对于解冻状态的object调用restore命令,会将object的解冻状态延长一天,最多可以延长到7天,之后object又回到初始时的冷冻状态。 +// +// objectKey 需要恢复状态的object名称。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) RestoreObject(objectKey string) error { + params := map[string]interface{}{} + params["restore"] = nil + resp, err := bucket.do("POST", objectKey, params, nil, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted}) +} + +// +// SignURL 获取签名URL。 +// +// objectKey 获取URL的object。 +// signURLConfig 获取URL的配置。 +// +// 返回URL字符串,error为nil时有效。 +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) SignURL(objectKey string, method HTTPMethod, expiredInSec int64, options ...Option) (string, error) { + if expiredInSec < 0 { + return "", fmt.Errorf("invalid expires: %d, expires must bigger than 0", expiredInSec) + } + expiration := time.Now().Unix() + expiredInSec + + params, err := getRawParams(options) + if err != nil { + return "", err + } + + headers := make(map[string]string) + err = handleOptions(headers, options) + if err != nil { + return "", err + } + + return bucket.Client.Conn.signURL(method, bucket.BucketName, objectKey, expiration, params, headers), nil +} + +// +// PutObjectWithURL 新建Object,如果Object已存在,覆盖原有Object。 +// PutObjectWithURL 不会根据key生成minetype。 +// +// signedURL 签名的URL。 +// reader io.Reader读取object的数据。 +// options 上传对象时可以指定对象的属性,可用选项有CacheControl、ContentDisposition、ContentEncoding、 +// Expires、ServerSideEncryption、ObjectACL、Meta,具体含义请参看 +// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) PutObjectWithURL(signedURL string, reader io.Reader, options ...Option) error { + resp, err := bucket.DoPutObjectWithURL(signedURL, reader, options) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// +// PutObjectFromFileWithURL 新建Object,内容从本地文件中读取。 +// PutObjectFromFileWithURL 不会根据key、filePath生成minetype。 +// +// signedURL 签名的URL。 +// filePath 本地文件,如 dir/file.txt,上传对象的值为该文件内容。 +// options 上传对象时可以指定对象的属性。详见PutObject的options。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) PutObjectFromFileWithURL(signedURL, filePath string, options ...Option) error { + fd, err := os.Open(filePath) + if err != nil { + return err + } + defer fd.Close() + + resp, err := bucket.DoPutObjectWithURL(signedURL, fd, options) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// +// DoPutObjectWithURL 上传文件。 +// +// signedURL 签名的URL。 +// reader io.Reader读取object的数据。 +// options 上传选项。 +// +// Response 上传请求返回值。 +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) DoPutObjectWithURL(signedURL string, reader io.Reader, options []Option) (*Response, error) { + listener := getProgressListener(options) + + params := map[string]interface{}{} + resp, err := bucket.doURL("PUT", signedURL, params, options, reader, listener) + if err != nil { + return nil, err + } + + if bucket.getConfig().IsEnableCRC { + err = checkCRC(resp, "DoPutObjectWithURL") + if err != nil { + return resp, err + } + } + + err = checkRespCode(resp.StatusCode, []int{http.StatusOK}) + + return resp, err +} + +// +// GetObjectWithURL 下载文件。 +// +// signedURL 签名的URL。 +// options 对象的属性限制项,可选值有Range、IfModifiedSince、IfUnmodifiedSince、IfMatch、 +// IfNoneMatch、AcceptEncoding,详细请参考 +// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html +// +// io.ReadCloser reader,读取数据后需要close。error为nil时有效。 +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) GetObjectWithURL(signedURL string, options ...Option) (io.ReadCloser, error) { + result, err := bucket.DoGetObjectWithURL(signedURL, options) + if err != nil { + return nil, err + } + return result.Response.Body, nil +} + +// +// GetObjectToFile 下载文件。 +// +// signedURL 签名的URL。 +// filePath 下载对象的内容写到该本地文件。 +// options 对象的属性限制项。详见GetObject的options。 +// +// error 操作无错误时返回error为nil,非nil为错误说明。 +// +func (bucket Bucket) GetObjectToFileWithURL(signedURL, filePath string, options ...Option) error { + tempFilePath := filePath + TempFileSuffix + + // 读取Object内容 + result, err := bucket.DoGetObjectWithURL(signedURL, options) + if err != nil { + return err + } + defer result.Response.Body.Close() + + // 如果文件不存在则创建,存在则清空 + fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode) + if err != nil { + return err + } + + // 存储数据到文件 + _, err = io.Copy(fd, result.Response.Body) + fd.Close() + if err != nil { + return err + } + + // 比较CRC值 + hasRange, _, _ := isOptionSet(options, HTTPHeaderRange) + if bucket.getConfig().IsEnableCRC && !hasRange { + result.Response.ClientCRC = result.ClientCRC.Sum64() + err = checkCRC(result.Response, "GetObjectToFileWithURL") + if err != nil { + os.Remove(tempFilePath) + return err + } + } + + return os.Rename(tempFilePath, filePath) +} + +// +// DoGetObjectWithURL 下载文件 +// +// signedURL 签名的URL。 +// options 对象的属性限制项。详见GetObject的options。 +// +// GetObjectResult 下载请求返回值。 +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) DoGetObjectWithURL(signedURL string, options []Option) (*GetObjectResult, error) { + params := map[string]interface{}{} + resp, err := bucket.doURL("GET", signedURL, params, options, nil, nil) + if err != nil { + return nil, err + } + + result := &GetObjectResult{ + Response: resp, + } + + // crc + var crcCalc hash.Hash64 + hasRange, _, _ := isOptionSet(options, HTTPHeaderRange) + if bucket.getConfig().IsEnableCRC && !hasRange { + crcCalc = crc64.New(crcTable()) + result.ServerCRC = resp.ServerCRC + result.ClientCRC = crcCalc + } + + // progress + listener := getProgressListener(options) + + contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64) + resp.Body = ioutil.NopCloser(TeeReader(resp.Body, crcCalc, contentLen, listener, nil)) + + return result, nil +} + +// Private +func (bucket Bucket) do(method, objectName string, params map[string]interface{}, options []Option, + data io.Reader, listener ProgressListener) (*Response, error) { + headers := make(map[string]string) + err := handleOptions(headers, options) + if err != nil { + return nil, err + } + return bucket.Client.Conn.Do(method, bucket.BucketName, objectName, + params, headers, data, 0, listener) +} + +func (bucket Bucket) doURL(method HTTPMethod, signedURL string, params map[string]interface{}, options []Option, + data io.Reader, listener ProgressListener) (*Response, error) { + headers := make(map[string]string) + err := handleOptions(headers, options) + if err != nil { + return nil, err + } + return bucket.Client.Conn.DoURL(method, signedURL, headers, data, 0, listener) +} + +func (bucket Bucket) getConfig() *Config { + return bucket.Client.Config +} + +func addContentType(options []Option, keys ...string) []Option { + typ := TypeByExtension("") + for _, key := range keys { + typ = TypeByExtension(key) + if typ != "" { + break + } + } + + if typ == "" { + typ = "application/octet-stream" + } + + opts := []Option{ContentType(typ)} + opts = append(opts, options...) + + return opts +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go new file mode 100644 index 000000000..9f7dac9da --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go @@ -0,0 +1,800 @@ +// Package oss implements functions for access oss service. +// It has two main struct Client and Bucket. +package oss + +import ( + "bytes" + "encoding/xml" + "io" + "net/http" + "strings" + "time" +) + +// +// Client Sdk的入口,Client的方法可以完成bucket的各种操作,如create/delete bucket, +// set/get acl/lifecycle/referer/logging/website等。文件(object)的上传下载通过Bucket完成。 +// 用户用oss.New创建Client。 +// +type ( + // Client oss client + Client struct { + Config *Config // Oss Client configure + Conn *Conn // Send http request + } + + // ClientOption client option such as UseCname, Timeout, SecurityToken. + ClientOption func(*Client) +) + +// +// New 生成一个新的Client。 +// +// endpoint 用户Bucket所在数据中心的访问域名,如http://oss-cn-hangzhou.aliyuncs.com。 +// accessKeyId 用户标识。 +// accessKeySecret 用户密钥。 +// +// Client 生成的新Client。error为nil时有效。 +// error 操作无错误时为nil,非nil时表示操作出错。 +// +func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption) (*Client, error) { + // configuration + config := getDefaultOssConfig() + config.Endpoint = endpoint + config.AccessKeyID = accessKeyID + config.AccessKeySecret = accessKeySecret + + // url parse + url := &urlMaker{} + url.Init(config.Endpoint, config.IsCname, config.IsUseProxy) + + // http connect + conn := &Conn{config: config, url: url} + + // oss client + client := &Client{ + config, + conn, + } + + // client options parse + for _, option := range options { + option(client) + } + + // create http connect + err := conn.init(config, url) + + return client, err +} + +// +// Bucket 取存储空间(Bucket)的对象实例。 +// +// bucketName 存储空间名称。 +// Bucket 新的Bucket。error为nil时有效。 +// +// error 操作无错误时返回nil,非nil为错误信息。 +// +func (client Client) Bucket(bucketName string) (*Bucket, error) { + return &Bucket{ + client, + bucketName, + }, nil +} + +// +// CreateBucket 创建Bucket。 +// +// bucketName bucket名称,在整个OSS中具有全局唯一性,且不能修改。bucket名称的只能包括小写字母,数字和短横线-, +// 必须以小写字母或者数字开头,长度必须在3-255字节之间。 +// options 创建bucket的选项。您可以使用选项ACL,指定bucket的访问权限。Bucket有以下三种访问权限,私有读写(ACLPrivate)、 +// 公共读私有写(ACLPublicRead),公共读公共写(ACLPublicReadWrite),默认访问权限是私有读写。可以使用StorageClass选项设置bucket的存储方式,目前支持:标准存储模式(StorageStandard)、 低频存储模式(StorageIA)、 归档存储模式(StorageArchive)。 +// +// error 操作无错误时返回nil,非nil为错误信息。 +// +func (client Client) CreateBucket(bucketName string, options ...Option) error { + headers := make(map[string]string) + handleOptions(headers, options) + + buffer := new(bytes.Buffer) + + isOptSet, val, _ := isOptionSet(options, storageClass) + if isOptSet { + cbConfig := createBucketConfiguration{StorageClass: val.(StorageClassType)} + bs, err := xml.Marshal(cbConfig) + if err != nil { + return err + } + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers[HTTPHeaderContentType] = contentType + } + + params := map[string]interface{}{} + resp, err := client.do("PUT", bucketName, params, headers, buffer) + if err != nil { + return err + } + + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// +// ListBuckets 获取当前用户下的bucket。 +// +// options 指定ListBuckets的筛选行为,Prefix、Marker、MaxKeys三个选项。Prefix限定前缀。 +// Marker设定从Marker之后的第一个开始返回。MaxKeys限定此次返回的最大数目,默认为100。 +// 常用使用场景的实现,参数示例程序list_bucket.go。 +// ListBucketsResponse 操作成功后的返回值,error为nil时该返回值有效。 +// +// error 操作无错误时返回nil,非nil为错误信息。 +// +func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) { + var out ListBucketsResult + + params, err := getRawParams(options) + if err != nil { + return out, err + } + + resp, err := client.do("GET", "", params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// IsBucketExist Bucket是否存在。 +// +// bucketName 存储空间名称。 +// +// bool 存储空间是否存在。error为nil时有效。 +// error 操作无错误时返回nil,非nil为错误信息。 +// +func (client Client) IsBucketExist(bucketName string) (bool, error) { + listRes, err := client.ListBuckets(Prefix(bucketName), MaxKeys(1)) + if err != nil { + return false, err + } + + if len(listRes.Buckets) == 1 && listRes.Buckets[0].Name == bucketName { + return true, nil + } + return false, nil +} + +// +// DeleteBucket 删除空存储空间。非空时请先清理Object、Upload。 +// +// bucketName 存储空间名称。 +// +// error 操作无错误时返回nil,非nil为错误信息。 +// +func (client Client) DeleteBucket(bucketName string) error { + params := map[string]interface{}{} + resp, err := client.do("DELETE", bucketName, params, nil, nil) + if err != nil { + return err + } + + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// +// GetBucketLocation 查看Bucket所属数据中心位置的信息。 +// +// 如果您想了解"访问域名和数据中心"详细信息,请参看 +// https://help.aliyun.com/document_detail/oss/user_guide/oss_concept/endpoint.html +// +// bucketName 存储空间名称。 +// +// string Bucket所属的数据中心位置信息。 +// error 操作无错误时返回nil,非nil为错误信息。 +// +func (client Client) GetBucketLocation(bucketName string) (string, error) { + params := map[string]interface{}{} + params["location"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return "", err + } + defer resp.Body.Close() + + var LocationConstraint string + err = xmlUnmarshal(resp.Body, &LocationConstraint) + return LocationConstraint, err +} + +// +// SetBucketACL 修改Bucket的访问权限。 +// +// bucketName 存储空间名称。 +// bucketAcl bucket的访问权限。Bucket有以下三种访问权限,Bucket有以下三种访问权限,私有读写(ACLPrivate)、 +// 公共读私有写(ACLPublicRead),公共读公共写(ACLPublicReadWrite)。 +// +// error 操作无错误时返回nil,非nil为错误信息。 +// +func (client Client) SetBucketACL(bucketName string, bucketACL ACLType) error { + headers := map[string]string{HTTPHeaderOssACL: string(bucketACL)} + params := map[string]interface{}{} + resp, err := client.do("PUT", bucketName, params, headers, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// +// GetBucketACL 获得Bucket的访问权限。 +// +// bucketName 存储空间名称。 +// +// GetBucketAclResponse 操作成功后的返回值,error为nil时该返回值有效。 +// error 操作无错误时返回nil,非nil为错误信息。 +// +func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error) { + var out GetBucketACLResult + params := map[string]interface{}{} + params["acl"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// SetBucketLifecycle 修改Bucket的生命周期设置。 +// +// OSS提供Object生命周期管理来为用户管理对象。用户可以为某个Bucket定义生命周期配置,来为该Bucket的Object定义各种规则。 +// Bucket的拥有者可以通过SetBucketLifecycle来设置Bucket的Lifecycle配置。Lifecycle开启后,OSS将按照配置, +// 定期自动删除与Lifecycle规则相匹配的Object。如果您想了解更多的生命周期的信息,请参看 +// https://help.aliyun.com/document_detail/oss/user_guide/manage_object/object_lifecycle.html +// +// bucketName 存储空间名称。 +// rules 生命周期规则列表。生命周期规则有两种格式,指定绝对和相对过期时间,分布由days和year/month/day控制。 +// 具体用法请参考示例程序sample/bucket_lifecycle.go。 +// +// error 操作无错误时返回error为nil,非nil为错误信息。 +// +func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule) error { + lxml := lifecycleXML{Rules: convLifecycleRule(rules)} + bs, err := xml.Marshal(lxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// +// DeleteBucketLifecycle 删除Bucket的生命周期设置。 +// +// +// bucketName 存储空间名称。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (client Client) DeleteBucketLifecycle(bucketName string) error { + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// +// GetBucketLifecycle 查看Bucket的生命周期设置。 +// +// bucketName 存储空间名称。 +// +// GetBucketLifecycleResponse 操作成功的返回值,error为nil时该返回值有效。Rules为该bucket上的规则列表。 +// error 操作无错误时为nil,非nil为错误信息。 +// +func (client Client) GetBucketLifecycle(bucketName string) (GetBucketLifecycleResult, error) { + var out GetBucketLifecycleResult + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// SetBucketReferer 设置bucket的referer访问白名单和是否允许referer字段为空的请求访问。 +// +// 防止用户在OSS上的数据被其他人盗用,OSS支持基于HTTP header中表头字段referer的防盗链方法。可以通过OSS控制台或者API的方式对 +// 一个bucket设置referer字段的白名单和是否允许referer字段为空的请求访问。例如,对于一个名为oss-example的bucket, +// 设置其referer白名单为http://www.aliyun.com。则所有referer为http://www.aliyun.com的请求才能访问oss-example +// 这个bucket中的object。如果您还需要了解更多信息,请参看 +// https://help.aliyun.com/document_detail/oss/user_guide/security_management/referer.html +// +// bucketName 存储空间名称。 +// referers 访问白名单列表。一个bucket可以支持多个referer参数。referer参数支持通配符"*"和"?"。 +// 用法请参看示例sample/bucket_referer.go +// allowEmptyReferer 指定是否允许referer字段为空的请求访问。 默认为true。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (client Client) SetBucketReferer(bucketName string, referers []string, allowEmptyReferer bool) error { + rxml := RefererXML{} + rxml.AllowEmptyReferer = allowEmptyReferer + if referers == nil { + rxml.RefererList = append(rxml.RefererList, "") + } else { + for _, referer := range referers { + rxml.RefererList = append(rxml.RefererList, referer) + } + } + + bs, err := xml.Marshal(rxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["referer"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// +// GetBucketReferer 获得Bucket的白名单地址。 +// +// bucketName 存储空间名称。 +// +// GetBucketRefererResponse 操作成功的返回值,error为nil时该返回值有效。 +// error 操作无错误时为nil,非nil为错误信息。 +// +func (client Client) GetBucketReferer(bucketName string) (GetBucketRefererResult, error) { + var out GetBucketRefererResult + params := map[string]interface{}{} + params["referer"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// SetBucketLogging 修改Bucket的日志设置。 +// +// OSS为您提供自动保存访问日志记录功能。Bucket的拥有者可以开启访问日志记录功能。当一个bucket开启访问日志记录功能后, +// OSS自动将访问这个bucket的请求日志,以小时为单位,按照固定的命名规则,生成一个Object写入用户指定的bucket中。 +// 如果您需要更多,请参看 https://help.aliyun.com/document_detail/oss/user_guide/security_management/logging.html +// +// bucketName 需要记录访问日志的Bucket。 +// targetBucket 访问日志记录到的Bucket。 +// targetPrefix bucketName中需要存储访问日志记录的object前缀。为空记录所有object的访问日志。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix string, + isEnable bool) error { + var err error + var bs []byte + if isEnable { + lxml := LoggingXML{} + lxml.LoggingEnabled.TargetBucket = targetBucket + lxml.LoggingEnabled.TargetPrefix = targetPrefix + bs, err = xml.Marshal(lxml) + } else { + lxml := loggingXMLEmpty{} + bs, err = xml.Marshal(lxml) + } + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["logging"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// +// DeleteBucketLogging 删除Bucket的日志设置。 +// +// bucketName 需要删除访问日志的Bucket。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (client Client) DeleteBucketLogging(bucketName string) error { + params := map[string]interface{}{} + params["logging"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// +// GetBucketLogging 获得Bucket的日志设置。 +// +// bucketName 需要删除访问日志的Bucket。 +// GetBucketLoggingResponse 操作成功的返回值,error为nil时该返回值有效。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (client Client) GetBucketLogging(bucketName string) (GetBucketLoggingResult, error) { + var out GetBucketLoggingResult + params := map[string]interface{}{} + params["logging"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// SetBucketWebsite 设置/修改Bucket的默认首页以及错误页。 +// +// OSS支持静态网站托管,Website操作可以将一个bucket设置成静态网站托管模式 。您可以将自己的Bucket配置成静态网站托管模式。 +// 如果您需要更多,请参看 https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html +// +// bucketName 需要设置Website的Bucket。 +// indexDocument 索引文档。 +// errorDocument 错误文档。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument string) error { + wxml := WebsiteXML{} + wxml.IndexDocument.Suffix = indexDocument + wxml.ErrorDocument.Key = errorDocument + + bs, err := xml.Marshal(wxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// +// DeleteBucketWebsite 删除Bucket的Website设置。 +// +// bucketName 需要删除website设置的Bucket。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (client Client) DeleteBucketWebsite(bucketName string) error { + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// +// GetBucketWebsite 获得Bucket的默认首页以及错误页。 +// +// bucketName 存储空间名称。 +// +// GetBucketWebsiteResponse 操作成功的返回值,error为nil时该返回值有效。 +// error 操作无错误为nil,非nil为错误信息。 +// +func (client Client) GetBucketWebsite(bucketName string) (GetBucketWebsiteResult, error) { + var out GetBucketWebsiteResult + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// SetBucketCORS 设置Bucket的跨域访问(CORS)规则。 +// +// 跨域访问的更多信息,请参看 https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html +// +// bucketName 需要设置Website的Bucket。 +// corsRules 待设置的CORS规则。用法请参看示例代码sample/bucket_cors.go。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule) error { + corsxml := CORSXML{} + for _, v := range corsRules { + cr := CORSRule{} + cr.AllowedMethod = v.AllowedMethod + cr.AllowedOrigin = v.AllowedOrigin + cr.AllowedHeader = v.AllowedHeader + cr.ExposeHeader = v.ExposeHeader + cr.MaxAgeSeconds = v.MaxAgeSeconds + corsxml.CORSRules = append(corsxml.CORSRules, cr) + } + + bs, err := xml.Marshal(corsxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// +// DeleteBucketCORS 删除Bucket的Website设置。 +// +// bucketName 需要删除cors设置的Bucket。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (client Client) DeleteBucketCORS(bucketName string) error { + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// +// GetBucketCORS 获得Bucket的CORS设置。 +// +// +// bucketName 存储空间名称。 +// GetBucketCORSResult 操作成功的返回值,error为nil时该返回值有效。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (client Client) GetBucketCORS(bucketName string) (GetBucketCORSResult, error) { + var out GetBucketCORSResult + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// GetBucketInfo 获得Bucket的信息。 +// +// bucketName 存储空间名称。 +// GetBucketInfoResult 操作成功的返回值,error为nil时该返回值有效。 +// +// error 操作无错误为nil,非nil为错误信息。 +// +func (client Client) GetBucketInfo(bucketName string) (GetBucketInfoResult, error) { + var out GetBucketInfoResult + params := map[string]interface{}{} + params["bucketInfo"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// UseCname 设置是否使用CNAME,默认不使用。 +// +// isUseCname true设置endpoint格式是cname格式,false为非cname格式,默认false +// +func UseCname(isUseCname bool) ClientOption { + return func(client *Client) { + client.Config.IsCname = isUseCname + client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) + } +} + +// +// Timeout 设置HTTP超时时间。 +// +// connectTimeoutSec HTTP链接超时时间,单位是秒,默认10秒。0表示永不超时。 +// readWriteTimeout HTTP发送接受数据超时时间,单位是秒,默认20秒。0表示永不超时。 +// +func Timeout(connectTimeoutSec, readWriteTimeout int64) ClientOption { + return func(client *Client) { + client.Config.HTTPTimeout.ConnectTimeout = + time.Second * time.Duration(connectTimeoutSec) + client.Config.HTTPTimeout.ReadWriteTimeout = + time.Second * time.Duration(readWriteTimeout) + client.Config.HTTPTimeout.HeaderTimeout = + time.Second * time.Duration(readWriteTimeout) + client.Config.HTTPTimeout.LongTimeout = + time.Second * time.Duration(readWriteTimeout*10) + } +} + +// +// SecurityToken 临时用户设置SecurityToken。 +// +// token STS token +// +func SecurityToken(token string) ClientOption { + return func(client *Client) { + client.Config.SecurityToken = strings.TrimSpace(token) + } +} + +// +// EnableMD5 是否启用MD5校验,默认启用。 +// +// isEnableMD5 true启用MD5校验,false不启用MD5校验 +// +func EnableMD5(isEnableMD5 bool) ClientOption { + return func(client *Client) { + client.Config.IsEnableMD5 = isEnableMD5 + } +} + +// +// MD5ThresholdCalcInMemory 使用内存计算MD5值的上限,默认16MB。 +// +// threshold 单位Byte。上传内容小于threshold在MD5在内存中计算,大于使用临时文件计算MD5 +// +func MD5ThresholdCalcInMemory(threshold int64) ClientOption { + return func(client *Client) { + client.Config.MD5Threshold = threshold + } +} + +// +// EnableCRC 上传是否启用CRC校验,默认启用。 +// +// isEnableCRC true启用CRC校验,false不启用CRC校验 +// +func EnableCRC(isEnableCRC bool) ClientOption { + return func(client *Client) { + client.Config.IsEnableCRC = isEnableCRC + } +} + +// +// UserAgent 指定UserAgent,默认如下aliyun-sdk-go/1.2.0 (windows/-/amd64;go1.5.2)。 +// +// userAgent user agent字符串。 +// +func UserAgent(userAgent string) ClientOption { + return func(client *Client) { + client.Config.UserAgent = userAgent + } +} + +// +// Proxy 设置代理服务器,默认不使用代理。 +// +// proxyHost 代理服务器地址,格式是host或host:port +// +func Proxy(proxyHost string) ClientOption { + return func(client *Client) { + client.Config.IsUseProxy = true + client.Config.ProxyHost = proxyHost + client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) + } +} + +// +// AuthProxy 设置需要认证的代理服务器,默认不使用代理。 +// +// proxyHost 代理服务器地址,格式是host或host:port +// proxyUser 代理服务器认证的用户名 +// proxyPassword 代理服务器认证的用户密码 +// +func AuthProxy(proxyHost, proxyUser, proxyPassword string) ClientOption { + return func(client *Client) { + client.Config.IsUseProxy = true + client.Config.ProxyHost = proxyHost + client.Config.IsAuthProxy = true + client.Config.ProxyUser = proxyUser + client.Config.ProxyPassword = proxyPassword + client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) + } +} + +// Private +func (client Client) do(method, bucketName string, params map[string]interface{}, + headers map[string]string, data io.Reader) (*Response, error) { + return client.Conn.Do(method, bucketName, "", params, + headers, data, 0, nil) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go new file mode 100644 index 000000000..e8bee299e --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go @@ -0,0 +1,67 @@ +package oss + +import ( + "time" +) + +// HTTPTimeout http timeout +type HTTPTimeout struct { + ConnectTimeout time.Duration + ReadWriteTimeout time.Duration + HeaderTimeout time.Duration + LongTimeout time.Duration +} + +// Config oss configure +type Config struct { + Endpoint string // oss地址 + AccessKeyID string // accessId + AccessKeySecret string // accessKey + RetryTimes uint // 失败重试次数,默认5 + UserAgent string // SDK名称/版本/系统信息 + IsDebug bool // 是否开启调试模式,默认false + Timeout uint // 超时时间,默认60s + SecurityToken string // STS Token + IsCname bool // Endpoint是否是CNAME + HTTPTimeout HTTPTimeout // HTTP的超时时间设置 + IsUseProxy bool // 是否使用代理 + ProxyHost string // 代理服务器地址 + IsAuthProxy bool // 代理服务器是否使用用户认证 + ProxyUser string // 代理服务器认证用户名 + ProxyPassword string // 代理服务器认证密码 + IsEnableMD5 bool // 上传数据时是否启用MD5校验 + MD5Threshold int64 // 内存中计算MD5的上线大小,大于该值启用临时文件,单位Byte + IsEnableCRC bool // 上传数据时是否启用CRC64校验 +} + +// 获取默认配置 +func getDefaultOssConfig() *Config { + config := Config{} + + config.Endpoint = "" + config.AccessKeyID = "" + config.AccessKeySecret = "" + config.RetryTimes = 5 + config.IsDebug = false + config.UserAgent = userAgent + config.Timeout = 60 // seconds + config.SecurityToken = "" + config.IsCname = false + + config.HTTPTimeout.ConnectTimeout = time.Second * 30 // 30s + config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s + config.HTTPTimeout.HeaderTimeout = time.Second * 60 // 60s + config.HTTPTimeout.LongTimeout = time.Second * 300 // 300s + + config.IsUseProxy = false + config.ProxyHost = "" + config.IsAuthProxy = false + config.ProxyUser = "" + config.ProxyPassword = "" + + config.MD5Threshold = 16 * 1024 * 1024 // 16MB + config.IsEnableMD5 = false + config.IsEnableCRC = true + + return &config +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go new file mode 100644 index 000000000..4dffa5ee8 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go @@ -0,0 +1,610 @@ +package oss + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/xml" + "fmt" + "hash" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "sort" + "strconv" + "strings" + "time" +) + +// Conn oss conn +type Conn struct { + config *Config + url *urlMaker + client *http.Client +} + +var signKeyList = []string{"acl", "uploads", "location", "cors", "logging", "website", "referer", "lifecycle", "delete", "append", "tagging", "objectMeta", "uploadId", "partNumber", "security-token", "position", "img", "style", "styleName", "replication", "replicationProgress", "replicationLocation", "cname", "bucketInfo", "comp", "qos", "live", "status", "vod", "startTime", "endTime", "symlink", "x-oss-process", "response-content-type", "response-content-language", "response-expires", "response-cache-control", "response-content-disposition", "response-content-encoding", "udf", "udfName", "udfImage", "udfId", "udfImageDesc", "udfApplication", "comp", "udfApplicationLog", "restore"} + +// init 初始化Conn +func (conn *Conn) init(config *Config, urlMaker *urlMaker) error { + httpTimeOut := conn.config.HTTPTimeout + + // new Transport + transport := &http.Transport{ + Dial: func(netw, addr string) (net.Conn, error) { + conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout) + if err != nil { + return nil, err + } + return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil + }, + ResponseHeaderTimeout: httpTimeOut.HeaderTimeout, + } + + // Proxy + if conn.config.IsUseProxy { + proxyURL, err := url.Parse(config.ProxyHost) + if err != nil { + return err + } + transport.Proxy = http.ProxyURL(proxyURL) + } + + conn.config = config + conn.url = urlMaker + conn.client = &http.Client{Transport: transport} + + return nil +} + +// Do 处理请求,返回响应结果。 +func (conn Conn) Do(method, bucketName, objectName string, params map[string]interface{}, headers map[string]string, + data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { + urlParams := conn.getURLParams(params) + subResource := conn.getSubResource(params) + uri := conn.url.getURL(bucketName, objectName, urlParams) + resource := conn.url.getResource(bucketName, objectName, subResource) + return conn.doRequest(method, uri, resource, headers, data, initCRC, listener) +} + +// DoURL 根据已签名的URL处理请求,返回响应结果。 +func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]string, + data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { + // get uri form signedURL + uri, err := url.ParseRequestURI(signedURL) + if err != nil { + return nil, err + } + + m := strings.ToUpper(string(method)) + req := &http.Request{ + Method: m, + URL: uri, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: uri.Host, + } + + tracker := &readerTracker{completedBytes: 0} + fd, crc := conn.handleBody(req, data, initCRC, listener, tracker) + if fd != nil { + defer func() { + fd.Close() + os.Remove(fd.Name()) + }() + } + + if conn.config.IsAuthProxy { + auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Header.Set("Proxy-Authorization", basic) + } + + req.Header.Set(HTTPHeaderHost, conn.config.Endpoint) + req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) + + if headers != nil { + for k, v := range headers { + req.Header.Set(k, v) + } + } + + // transfer started + event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength) + publishProgress(listener, event) + + resp, err := conn.client.Do(req) + if err != nil { + // transfer failed + event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength) + publishProgress(listener, event) + return nil, err + } + + // transfer completed + event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength) + publishProgress(listener, event) + + return conn.handleResponse(resp, crc) +} + +func (conn Conn) getURLParams(params map[string]interface{}) string { + // sort + keys := make([]string, 0, len(params)) + for k := range params { + keys = append(keys, k) + } + sort.Strings(keys) + + // serialize + var buf bytes.Buffer + for _, k := range keys { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(url.QueryEscape(k)) + if params[k] != nil { + buf.WriteString("=" + url.QueryEscape(params[k].(string))) + } + } + + return buf.String() +} + +func (conn Conn) getSubResource(params map[string]interface{}) string { + // sort + keys := make([]string, 0, len(params)) + for k := range params { + if conn.isParamSign(k) { + keys = append(keys, k) + } + } + sort.Strings(keys) + + // serialize + var buf bytes.Buffer + for _, k := range keys { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(k) + if params[k] != nil { + buf.WriteString("=" + params[k].(string)) + } + } + + return buf.String() +} + +func (conn Conn) isParamSign(paramKey string) bool { + for _, k := range signKeyList { + if paramKey == k { + return true + } + } + return false +} + +func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource string, headers map[string]string, + data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { + method = strings.ToUpper(method) + req := &http.Request{ + Method: method, + URL: uri, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: uri.Host, + } + + tracker := &readerTracker{completedBytes: 0} + fd, crc := conn.handleBody(req, data, initCRC, listener, tracker) + if fd != nil { + defer func() { + fd.Close() + os.Remove(fd.Name()) + }() + } + + if conn.config.IsAuthProxy { + auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Header.Set("Proxy-Authorization", basic) + } + + date := time.Now().UTC().Format(http.TimeFormat) + req.Header.Set(HTTPHeaderDate, date) + req.Header.Set(HTTPHeaderHost, conn.config.Endpoint) + req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) + if conn.config.SecurityToken != "" { + req.Header.Set(HTTPHeaderOssSecurityToken, conn.config.SecurityToken) + } + + if headers != nil { + for k, v := range headers { + req.Header.Set(k, v) + } + } + + conn.signHeader(req, canonicalizedResource) + + // transfer started + event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength) + publishProgress(listener, event) + + resp, err := conn.client.Do(req) + if err != nil { + // transfer failed + event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength) + publishProgress(listener, event) + return nil, err + } + + // transfer completed + event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength) + publishProgress(listener, event) + + return conn.handleResponse(resp, crc) +} + +func (conn Conn) signURL(method HTTPMethod, bucketName, objectName string, expiration int64, params map[string]interface{}, headers map[string]string) string { + subResource := conn.getSubResource(params) + canonicalizedResource := conn.url.getResource(bucketName, objectName, subResource) + + m := strings.ToUpper(string(method)) + req := &http.Request{ + Method: m, + Header: make(http.Header), + } + + if conn.config.IsAuthProxy { + auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Header.Set("Proxy-Authorization", basic) + } + + req.Header.Set(HTTPHeaderDate, strconv.FormatInt(expiration, 10)) + req.Header.Set(HTTPHeaderHost, conn.config.Endpoint) + req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) + + if headers != nil { + for k, v := range headers { + req.Header.Set(k, v) + } + } + + signedStr := conn.getSignedStr(req, canonicalizedResource) + + params[HTTPParamExpires] = strconv.FormatInt(expiration, 10) + params[HTTPParamAccessKeyID] = conn.config.AccessKeyID + params[HTTPParamSignature] = signedStr + if conn.config.SecurityToken != "" { + params[HTTPParamSecurityToken] = conn.config.SecurityToken + } + + urlParams := conn.getURLParams(params) + return conn.url.getSignURL(bucketName, objectName, urlParams) +} + +// handle request body +func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64, + listener ProgressListener, tracker *readerTracker) (*os.File, hash.Hash64) { + var file *os.File + var crc hash.Hash64 + reader := body + + // length + switch v := body.(type) { + case *bytes.Buffer: + req.ContentLength = int64(v.Len()) + case *bytes.Reader: + req.ContentLength = int64(v.Len()) + case *strings.Reader: + req.ContentLength = int64(v.Len()) + case *os.File: + req.ContentLength = tryGetFileSize(v) + case *io.LimitedReader: + req.ContentLength = int64(v.N) + } + req.Header.Set(HTTPHeaderContentLength, strconv.FormatInt(req.ContentLength, 10)) + + // md5 + if body != nil && conn.config.IsEnableMD5 && req.Header.Get(HTTPHeaderContentMD5) == "" { + md5 := "" + reader, md5, file, _ = calcMD5(body, req.ContentLength, conn.config.MD5Threshold) + req.Header.Set(HTTPHeaderContentMD5, md5) + } + + // crc + if reader != nil && conn.config.IsEnableCRC { + crc = NewCRC(crcTable(), initCRC) + reader = TeeReader(reader, crc, req.ContentLength, listener, tracker) + } + + // http body + rc, ok := reader.(io.ReadCloser) + if !ok && reader != nil { + rc = ioutil.NopCloser(reader) + } + req.Body = rc + + return file, crc +} + +func tryGetFileSize(f *os.File) int64 { + fInfo, _ := f.Stat() + return fInfo.Size() +} + +// handle response +func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response, error) { + var cliCRC uint64 + var srvCRC uint64 + + statusCode := resp.StatusCode + if statusCode >= 400 && statusCode <= 505 { + // 4xx and 5xx indicate that the operation has error occurred + var respBody []byte + respBody, err := readResponseBody(resp) + if err != nil { + return nil, err + } + + if len(respBody) == 0 { + // no error in response body + err = fmt.Errorf("oss: service returned without a response body (%s)", resp.Status) + } else { + // response contains storage service error object, unmarshal + srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, + resp.Header.Get(HTTPHeaderOssRequestID)) + if err != nil { // error unmarshaling the error response + err = errIn + } + err = srvErr + } + + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body + }, err + } else if statusCode >= 300 && statusCode <= 307 { + // oss use 3xx, but response has no body + err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status) + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: resp.Body, + }, err + } + + if conn.config.IsEnableCRC && crc != nil { + cliCRC = crc.Sum64() + } + srvCRC, _ = strconv.ParseUint(resp.Header.Get(HTTPHeaderOssCRC64), 10, 64) + + // 2xx, successful + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: resp.Body, + ClientCRC: cliCRC, + ServerCRC: srvCRC, + }, nil +} + +func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader, b64 string, tempFile *os.File, err error) { + if contentLen == 0 || contentLen > md5Threshold { + // huge body, use temporary file + tempFile, err = ioutil.TempFile(os.TempDir(), TempFilePrefix) + if tempFile != nil { + io.Copy(tempFile, body) + tempFile.Seek(0, os.SEEK_SET) + md5 := md5.New() + io.Copy(md5, tempFile) + sum := md5.Sum(nil) + b64 = base64.StdEncoding.EncodeToString(sum[:]) + tempFile.Seek(0, os.SEEK_SET) + reader = tempFile + } + } else { + // small body, use memory + buf, _ := ioutil.ReadAll(body) + sum := md5.Sum(buf) + b64 = base64.StdEncoding.EncodeToString(sum[:]) + reader = bytes.NewReader(buf) + } + return +} + +func readResponseBody(resp *http.Response) ([]byte, error) { + defer resp.Body.Close() + out, err := ioutil.ReadAll(resp.Body) + if err == io.EOF { + err = nil + } + return out, err +} + +func serviceErrFromXML(body []byte, statusCode int, requestID string) (ServiceError, error) { + var storageErr ServiceError + if err := xml.Unmarshal(body, &storageErr); err != nil { + return storageErr, err + } + storageErr.StatusCode = statusCode + storageErr.RequestID = requestID + storageErr.RawMessage = string(body) + return storageErr, nil +} + +func xmlUnmarshal(body io.Reader, v interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return err + } + return xml.Unmarshal(data, v) +} + +// Handle http timeout +type timeoutConn struct { + conn net.Conn + timeout time.Duration + longTimeout time.Duration +} + +func newTimeoutConn(conn net.Conn, timeout time.Duration, longTimeout time.Duration) *timeoutConn { + conn.SetReadDeadline(time.Now().Add(longTimeout)) + return &timeoutConn{ + conn: conn, + timeout: timeout, + longTimeout: longTimeout, + } +} + +func (c *timeoutConn) Read(b []byte) (n int, err error) { + c.SetReadDeadline(time.Now().Add(c.timeout)) + n, err = c.conn.Read(b) + c.SetReadDeadline(time.Now().Add(c.longTimeout)) + return n, err +} + +func (c *timeoutConn) Write(b []byte) (n int, err error) { + c.SetWriteDeadline(time.Now().Add(c.timeout)) + n, err = c.conn.Write(b) + c.SetReadDeadline(time.Now().Add(c.longTimeout)) + return n, err +} + +func (c *timeoutConn) Close() error { + return c.conn.Close() +} + +func (c *timeoutConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *timeoutConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *timeoutConn) SetDeadline(t time.Time) error { + return c.conn.SetDeadline(t) +} + +func (c *timeoutConn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +func (c *timeoutConn) SetWriteDeadline(t time.Time) error { + return c.conn.SetWriteDeadline(t) +} + +// UrlMaker - build url and resource +const ( + urlTypeCname = 1 + urlTypeIP = 2 + urlTypeAliyun = 3 +) + +type urlMaker struct { + Scheme string // http or https + NetLoc string // host or ip + Type int // 1 CNAME 2 IP 3 ALIYUN + IsProxy bool // proxy +} + +// Parse endpoint +func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) { + if strings.HasPrefix(endpoint, "http://") { + um.Scheme = "http" + um.NetLoc = endpoint[len("http://"):] + } else if strings.HasPrefix(endpoint, "https://") { + um.Scheme = "https" + um.NetLoc = endpoint[len("https://"):] + } else { + um.Scheme = "http" + um.NetLoc = endpoint + } + + host, _, err := net.SplitHostPort(um.NetLoc) + if err != nil { + host = um.NetLoc + } + ip := net.ParseIP(host) + if ip != nil { + um.Type = urlTypeIP + } else if isCname { + um.Type = urlTypeCname + } else { + um.Type = urlTypeAliyun + } + um.IsProxy = isProxy +} + +// Build URL +func (um urlMaker) getURL(bucket, object, params string) *url.URL { + host, path := um.buildURL(bucket, object) + addr := "" + if params == "" { + addr = fmt.Sprintf("%s://%s%s", um.Scheme, host, path) + } else { + addr = fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params) + } + uri, _ := url.ParseRequestURI(addr) + return uri +} + +// Build Sign URL +func (um urlMaker) getSignURL(bucket, object, params string) string { + host, path := um.buildURL(bucket, object) + return fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params) +} + +// Build URL +func (um urlMaker) buildURL(bucket, object string) (string, string) { + var host = "" + var path = "" + + object = url.QueryEscape(object) + object = strings.Replace(object, "+", "%20", -1) + + if um.Type == urlTypeCname { + host = um.NetLoc + path = "/" + object + } else if um.Type == urlTypeIP { + if bucket == "" { + host = um.NetLoc + path = "/" + } else { + host = um.NetLoc + path = fmt.Sprintf("/%s/%s", bucket, object) + } + } else { + if bucket == "" { + host = um.NetLoc + path = "/" + } else { + host = bucket + "." + um.NetLoc + path = "/" + object + } + } + + return host, path +} + +// Canonicalized Resource +func (um urlMaker) getResource(bucketName, objectName, subResource string) string { + if subResource != "" { + subResource = "?" + subResource + } + if bucketName == "" { + return fmt.Sprintf("/%s%s", bucketName, subResource) + } + return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go new file mode 100644 index 000000000..ba4bb5ed0 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go @@ -0,0 +1,132 @@ +package oss + +import "os" + +// ACLType Bucket/Object的访问控制 +type ACLType string + +const ( + // ACLPrivate 私有读写 + ACLPrivate ACLType = "private" + + // ACLPublicRead 公共读私有写 + ACLPublicRead ACLType = "public-read" + + // ACLPublicReadWrite 公共读写 + ACLPublicReadWrite ACLType = "public-read-write" + + // ACLDefault Object默认权限,Bucket无此权限 + ACLDefault ACLType = "default" +) + +// MetadataDirectiveType 对象COPY时新对象是否使用原对象的Meta +type MetadataDirectiveType string + +const ( + // MetaCopy 目标对象使用源对象的META + MetaCopy MetadataDirectiveType = "COPY" + + // MetaReplace 目标对象使用自定义的META + MetaReplace MetadataDirectiveType = "REPLACE" +) + +// StorageClassType Bucket的存储类型 +type StorageClassType string + +const ( + // StorageStandard 标准存储模式 + StorageStandard StorageClassType = "Standard" + + // StorageIA 低频存储模式 + StorageIA StorageClassType = "IA" + + // StorageArchive 归档存储模式 + StorageArchive StorageClassType = "Archive" +) + +// HTTPMethod HTTP请求方法 +type HTTPMethod string + +const ( + // HTTPGet HTTP请求方法 GET + HTTPGet HTTPMethod = "GET" + + // HTTPPut HTTP请求方法 PUT + HTTPPut HTTPMethod = "PUT" + + // HTTPHead HTTP请求方法 HEAD + HTTPHead HTTPMethod = "HEAD" + + // HTTPPost HTTP请求方法 POST + HTTPPost HTTPMethod = "POST" + + // HTTPDelete HTTP请求方法 DELETE + HTTPDelete HTTPMethod = "DELETE" +) + +// Http头标签 +const ( + HTTPHeaderAcceptEncoding string = "Accept-Encoding" + HTTPHeaderAuthorization = "Authorization" + HTTPHeaderCacheControl = "Cache-Control" + HTTPHeaderContentDisposition = "Content-Disposition" + HTTPHeaderContentEncoding = "Content-Encoding" + HTTPHeaderContentLength = "Content-Length" + HTTPHeaderContentMD5 = "Content-MD5" + HTTPHeaderContentType = "Content-Type" + HTTPHeaderContentLanguage = "Content-Language" + HTTPHeaderDate = "Date" + HTTPHeaderEtag = "ETag" + HTTPHeaderExpires = "Expires" + HTTPHeaderHost = "Host" + HTTPHeaderLastModified = "Last-Modified" + HTTPHeaderRange = "Range" + HTTPHeaderLocation = "Location" + HTTPHeaderOrigin = "Origin" + HTTPHeaderServer = "Server" + HTTPHeaderUserAgent = "User-Agent" + HTTPHeaderIfModifiedSince = "If-Modified-Since" + HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since" + HTTPHeaderIfMatch = "If-Match" + HTTPHeaderIfNoneMatch = "If-None-Match" + + HTTPHeaderOssACL = "X-Oss-Acl" + HTTPHeaderOssMetaPrefix = "X-Oss-Meta-" + HTTPHeaderOssObjectACL = "X-Oss-Object-Acl" + HTTPHeaderOssSecurityToken = "X-Oss-Security-Token" + HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption" + HTTPHeaderOssCopySource = "X-Oss-Copy-Source" + HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range" + HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match" + HTTPHeaderOssCopySourceIfNoneMatch = "X-Oss-Copy-Source-If-None-Match" + HTTPHeaderOssCopySourceIfModifiedSince = "X-Oss-Copy-Source-If-Modified-Since" + HTTPHeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since" + HTTPHeaderOssMetadataDirective = "X-Oss-Metadata-Directive" + HTTPHeaderOssNextAppendPosition = "X-Oss-Next-Append-Position" + HTTPHeaderOssRequestID = "X-Oss-Request-Id" + HTTPHeaderOssCRC64 = "X-Oss-Hash-Crc64ecma" + HTTPHeaderOssSymlinkTarget = "X-Oss-Symlink-Target" +) + +// Http Param +const ( + HTTPParamExpires = "Expires" + HTTPParamAccessKeyID = "OSSAccessKeyId" + HTTPParamSignature = "Signature" + HTTPParamSecurityToken = "security-token" +) + +// 其它常量 +const ( + MaxPartSize = 5 * 1024 * 1024 * 1024 // 文件片最大值,5GB + MinPartSize = 100 * 1024 // 文件片最小值,100KBß + + FilePermMode = os.FileMode(0664) // 新建文件默认权限 + + TempFilePrefix = "oss-go-temp-" // 临时文件前缀 + TempFileSuffix = ".temp" // 临时文件后缀 + + CheckpointFileSuffix = ".cp" // Checkpoint文件后缀 + + Version = "1.7.0" // Go sdk版本 +) diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go new file mode 100644 index 000000000..fb9eb1245 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go @@ -0,0 +1,123 @@ +package oss + +import ( + "hash" + "hash/crc64" +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + crc uint64 + tab *crc64.Table +} + +// NewCRC creates a new hash.Hash64 computing the CRC-64 checksum +// using the polynomial represented by the Table. +func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} } + +// Size returns the number of bytes Sum will return. +func (d *digest) Size() int { return crc64.Size } + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (d *digest) BlockSize() int { return 1 } + +// Reset resets the Hash to its initial state. +func (d *digest) Reset() { d.crc = 0 } + +// Write (via the embedded io.Writer interface) adds more data to the running hash. +// It never returns an error. +func (d *digest) Write(p []byte) (n int, err error) { + d.crc = crc64.Update(d.crc, d.tab, p) + return len(p), nil +} + +// Sum64 returns crc64 value. +func (d *digest) Sum64() uint64 { return d.crc } + +// Sum returns hash value. +func (d *digest) Sum(in []byte) []byte { + s := d.Sum64() + return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} + +// gf2Dim dimension of GF(2) vectors (length of CRC) +const gf2Dim int = 64 + +func gf2MatrixTimes(mat []uint64, vec uint64) uint64 { + var sum uint64 + for i := 0; vec != 0; i++ { + if vec&1 != 0 { + sum ^= mat[i] + } + + vec >>= 1 + } + return sum +} + +func gf2MatrixSquare(square []uint64, mat []uint64) { + for n := 0; n < gf2Dim; n++ { + square[n] = gf2MatrixTimes(mat, mat[n]) + } +} + +// CRC64Combine combine crc64 +func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 { + var even [gf2Dim]uint64 // even-power-of-two zeros operator + var odd [gf2Dim]uint64 // odd-power-of-two zeros operator + + // Degenerate case + if len2 == 0 { + return crc1 + } + + // Put operator for one zero bit in odd + odd[0] = crc64.ECMA // CRC64 polynomial + var row uint64 = 1 + for n := 1; n < gf2Dim; n++ { + odd[n] = row + row <<= 1 + } + + // Put operator for two zero bits in even + gf2MatrixSquare(even[:], odd[:]) + + // Put operator for four zero bits in odd + gf2MatrixSquare(odd[:], even[:]) + + // Apply len2 zeros to crc1, first square will put the operator for one zero byte, eight zero bits, in even + for { + // Apply zeros operator for this bit of len2 + gf2MatrixSquare(even[:], odd[:]) + + if len2&1 != 0 { + crc1 = gf2MatrixTimes(even[:], crc1) + } + + len2 >>= 1 + + // If no more bits set, then done + if len2 == 0 { + break + } + + // Another iteration of the loop with odd and even swapped + gf2MatrixSquare(odd[:], even[:]) + if len2&1 != 0 { + crc1 = gf2MatrixTimes(odd[:], crc1) + } + len2 >>= 1 + + // If no more bits set, then done + if len2 == 0 { + break + } + } + + // Return combined crc + crc1 ^= crc2 + return crc1 +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go new file mode 100644 index 000000000..648a5a59e --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go @@ -0,0 +1,551 @@ +package oss + +import ( + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "hash" + "hash/crc64" + "io" + "io/ioutil" + "os" + "strconv" +) + +// +// DownloadFile 分片下载文件 +// +// objectKey object key。 +// filePath 本地文件。objectKey下载到文件。 +// partSize 本次上传文件片的大小,字节数。比如100 * 1024为每片100KB。 +// options Object的属性限制项。详见GetObject。 +// +// error 操作成功error为nil,非nil为错误信息。 +// +func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error { + if partSize < 1 { + return errors.New("oss: part size smaller than 1") + } + + cpConf, err := getCpConfig(options, filePath) + if err != nil { + return err + } + + uRange, err := getRangeConfig(options) + if err != nil { + return err + } + + routines := getRoutines(options) + + if cpConf.IsEnable { + return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines, uRange) + } + + return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange) +} + +// 获取下载范围 +func getRangeConfig(options []Option) (*unpackedRange, error) { + rangeOpt, err := findOption(options, HTTPHeaderRange, nil) + if err != nil || rangeOpt == nil { + return nil, err + } + return parseRange(rangeOpt.(string)) +} + +// ----- 并发无断点的下载 ----- + +// 工作协程参数 +type downloadWorkerArg struct { + bucket *Bucket + key string + filePath string + options []Option + hook downloadPartHook + enableCRC bool +} + +// Hook用于测试 +type downloadPartHook func(part downloadPart) error + +var downloadPartHooker downloadPartHook = defaultDownloadPartHook + +func defaultDownloadPartHook(part downloadPart) error { + return nil +} + +// 默认ProgressListener,屏蔽GetObject的Options中ProgressListener +type defaultDownloadProgressListener struct { +} + +// ProgressChanged 静默处理 +func (listener *defaultDownloadProgressListener) ProgressChanged(event *ProgressEvent) { +} + +// 工作协程 +func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <-chan bool) { + for part := range jobs { + if err := arg.hook(part); err != nil { + failed <- err + break + } + + // resolve options + r := Range(part.Start, part.End) + p := Progress(&defaultDownloadProgressListener{}) + opts := make([]Option, len(arg.options)+2) + // append orderly, can not be reversed! + opts = append(opts, arg.options...) + opts = append(opts, r, p) + + rd, err := arg.bucket.GetObject(arg.key, opts...) + if err != nil { + failed <- err + break + } + defer rd.Close() + + var crcCalc hash.Hash64 + if arg.enableCRC { + crcCalc = crc64.New(crcTable()) + contentLen := part.End - part.Start + 1 + rd = ioutil.NopCloser(TeeReader(rd, crcCalc, contentLen, nil, nil)) + } + defer rd.Close() + + select { + case <-die: + return + default: + } + + fd, err := os.OpenFile(arg.filePath, os.O_WRONLY, FilePermMode) + if err != nil { + failed <- err + break + } + + _, err = fd.Seek(part.Start-part.Offset, os.SEEK_SET) + if err != nil { + fd.Close() + failed <- err + break + } + + _, err = io.Copy(fd, rd) + if err != nil { + fd.Close() + failed <- err + break + } + + if arg.enableCRC { + part.CRC64 = crcCalc.Sum64() + } + + fd.Close() + results <- part + } +} + +// 调度协程 +func downloadScheduler(jobs chan downloadPart, parts []downloadPart) { + for _, part := range parts { + jobs <- part + } + close(jobs) +} + +// 下载片 +type downloadPart struct { + Index int // 片序号,从0开始编号 + Start int64 // 片起始位置 + End int64 // 片结束位置 + Offset int64 // 文件中的偏移位置 + CRC64 uint64 // 片的校验值 +} + +// 文件分片 +func getDownloadParts(bucket *Bucket, objectKey string, partSize int64, uRange *unpackedRange) ([]downloadPart, bool, uint64, error) { + meta, err := bucket.GetObjectDetailedMeta(objectKey) + if err != nil { + return nil, false, 0, err + } + + parts := []downloadPart{} + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return nil, false, 0, err + } + + enableCRC := false + crcVal := (uint64)(0) + if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" { + if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) { + enableCRC = true + crcVal, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0) + } + } + + part := downloadPart{} + i := 0 + start, end := adjustRange(uRange, objectSize) + for offset := start; offset < end; offset += partSize { + part.Index = i + part.Start = offset + part.End = GetPartEnd(offset, end, partSize) + part.Offset = start + part.CRC64 = 0 + parts = append(parts, part) + i++ + } + return parts, enableCRC, crcVal, nil +} + +// 文件大小 +func getObjectBytes(parts []downloadPart) int64 { + var ob int64 + for _, part := range parts { + ob += (part.End - part.Start + 1) + } + return ob +} + +// 计算连续分片总的CRC +func combineCRCInParts(dps []downloadPart) uint64 { + if dps == nil || len(dps) == 0 { + return 0 + } + + crc := dps[0].CRC64 + for i := 1; i < len(dps); i++ { + crc = CRC64Combine(crc, dps[i].CRC64, (uint64)(dps[i].End-dps[i].Start+1)) + } + + return crc +} + +// 并发无断点续传的下载 +func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *unpackedRange) error { + tempFilePath := filePath + TempFileSuffix + listener := getProgressListener(options) + + // 如果文件不存在则创建,存在不清空,下载分片会重写文件内容 + fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode) + if err != nil { + return err + } + fd.Close() + + // 分割文件 + parts, enableCRC, expectedCRC, err := getDownloadParts(&bucket, objectKey, partSize, uRange) + if err != nil { + return err + } + + jobs := make(chan downloadPart, len(parts)) + results := make(chan downloadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + var completedBytes int64 + totalBytes := getObjectBytes(parts) + event := newProgressEvent(TransferStartedEvent, 0, totalBytes) + publishProgress(listener, event) + + // 启动工作协程 + arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, enableCRC} + for w := 1; w <= routines; w++ { + go downloadWorker(w, arg, jobs, results, failed, die) + } + + // 并发上传分片 + go downloadScheduler(jobs, parts) + + // 等待分片下载完成 + completed := 0 + for completed < len(parts) { + select { + case part := <-results: + completed++ + completedBytes += (part.End - part.Start + 1) + parts[part.Index].CRC64 = part.CRC64 + event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes) + publishProgress(listener, event) + + if enableCRC { + actualCRC := combineCRCInParts(parts) + err = checkDownloadCRC(actualCRC, expectedCRC) + if err != nil { + return err + } + } + + return os.Rename(tempFilePath, filePath) +} + +// ----- 并发有断点的下载 ----- + +const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3" + +type downloadCheckpoint struct { + Magic string // magic + MD5 string // cp内容的MD5 + FilePath string // 本地文件 + Object string // key + ObjStat objectStat // 文件状态 + Parts []downloadPart // 全部分片 + PartStat []bool // 分片下载是否完成 + Start int64 // 起点 + End int64 // 终点 + enableCRC bool // 是否有CRC校验 + CRC uint64 // CRC校验值 +} + +type objectStat struct { + Size int64 // 大小 + LastModified string // 最后修改时间 + Etag string // etag +} + +// CP数据是否有效,CP有效且Object没有更新时有效 +func (cp downloadCheckpoint) isValid(bucket *Bucket, objectKey string, uRange *unpackedRange) (bool, error) { + // 比较CP的Magic及MD5 + cpb := cp + cpb.MD5 = "" + js, _ := json.Marshal(cpb) + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + + if cp.Magic != downloadCpMagic || b64 != cp.MD5 { + return false, nil + } + + // 确认object没有更新 + meta, err := bucket.GetObjectDetailedMeta(objectKey) + if err != nil { + return false, err + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return false, err + } + + // 比较Object的大小/最后修改时间/etag + if cp.ObjStat.Size != objectSize || + cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) || + cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) { + return false, nil + } + + // 确认下载范围是否变化 + if uRange != nil { + start, end := adjustRange(uRange, objectSize) + if start != cp.Start || end != cp.End { + return false, nil + } + } + + return true, nil +} + +// 从文件中load +func (cp *downloadCheckpoint) load(filePath string) error { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + err = json.Unmarshal(contents, cp) + return err +} + +// dump到文件 +func (cp *downloadCheckpoint) dump(filePath string) error { + bcp := *cp + + // 计算MD5 + bcp.MD5 = "" + js, err := json.Marshal(bcp) + if err != nil { + return err + } + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + bcp.MD5 = b64 + + // 序列化 + js, err = json.Marshal(bcp) + if err != nil { + return err + } + + // dump + return ioutil.WriteFile(filePath, js, FilePermMode) +} + +// 未完成的分片 +func (cp downloadCheckpoint) todoParts() []downloadPart { + dps := []downloadPart{} + for i, ps := range cp.PartStat { + if !ps { + dps = append(dps, cp.Parts[i]) + } + } + return dps +} + +// 完成的字节数 +func (cp downloadCheckpoint) getCompletedBytes() int64 { + var completedBytes int64 + for i, part := range cp.Parts { + if cp.PartStat[i] { + completedBytes += (part.End - part.Start + 1) + } + } + return completedBytes +} + +// 初始化下载任务 +func (cp *downloadCheckpoint) prepare(bucket *Bucket, objectKey, filePath string, partSize int64, uRange *unpackedRange) error { + // cp + cp.Magic = downloadCpMagic + cp.FilePath = filePath + cp.Object = objectKey + + // object + meta, err := bucket.GetObjectDetailedMeta(objectKey) + if err != nil { + return err + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return err + } + + cp.ObjStat.Size = objectSize + cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified) + cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag) + + // parts + cp.Parts, cp.enableCRC, cp.CRC, err = getDownloadParts(bucket, objectKey, partSize, uRange) + if err != nil { + return err + } + cp.PartStat = make([]bool, len(cp.Parts)) + for i := range cp.PartStat { + cp.PartStat[i] = false + } + + return nil +} + +func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error { + os.Remove(cpFilePath) + return os.Rename(downFilepath, cp.FilePath) +} + +// 并发带断点的下载 +func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *unpackedRange) error { + tempFilePath := filePath + TempFileSuffix + listener := getProgressListener(options) + + // LOAD CP数据 + dcp := downloadCheckpoint{} + err := dcp.load(cpFilePath) + if err != nil { + os.Remove(cpFilePath) + } + + // LOAD出错或数据无效重新初始化下载 + valid, err := dcp.isValid(&bucket, objectKey, uRange) + if err != nil || !valid { + if err = dcp.prepare(&bucket, objectKey, filePath, partSize, uRange); err != nil { + return err + } + os.Remove(cpFilePath) + } + + // 如果文件不存在则创建,存在不清空,下载分片会重写文件内容 + fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode) + if err != nil { + return err + } + fd.Close() + + // 未完成的分片 + parts := dcp.todoParts() + jobs := make(chan downloadPart, len(parts)) + results := make(chan downloadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + completedBytes := dcp.getCompletedBytes() + event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size) + publishProgress(listener, event) + + // 启动工作协程 + arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, dcp.enableCRC} + for w := 1; w <= routines; w++ { + go downloadWorker(w, arg, jobs, results, failed, die) + } + + // 并发下载分片 + go downloadScheduler(jobs, parts) + + // 等待分片下载完成 + completed := 0 + for completed < len(parts) { + select { + case part := <-results: + completed++ + dcp.PartStat[part.Index] = true + dcp.Parts[part.Index].CRC64 = part.CRC64 + dcp.dump(cpFilePath) + completedBytes += (part.End - part.Start + 1) + event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size) + publishProgress(listener, event) + + if dcp.enableCRC { + actualCRC := combineCRCInParts(dcp.Parts) + err = checkDownloadCRC(actualCRC, dcp.CRC) + if err != nil { + return err + } + } + + return dcp.complete(cpFilePath, tempFilePath) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go new file mode 100644 index 000000000..a54b9d765 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go @@ -0,0 +1,89 @@ +package oss + +import ( + "encoding/xml" + "fmt" + "net/http" + "strings" +) + +// ServiceError contains fields of the error response from Oss Service REST API. +type ServiceError struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` // OSS返回给用户的错误码 + Message string `xml:"Message"` // OSS给出的详细错误信息 + RequestID string `xml:"RequestId"` // 用于唯一标识该次请求的UUID + HostID string `xml:"HostId"` // 用于标识访问的OSS集群 + RawMessage string // OSS返回的原始消息内容 + StatusCode int // HTTP状态码 +} + +// Implement interface error +func (e ServiceError) Error() string { + return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s", + e.StatusCode, e.Code, e.Message, e.RequestID) +} + +// UnexpectedStatusCodeError is returned when a storage service responds with neither an error +// nor with an HTTP status code indicating success. +type UnexpectedStatusCodeError struct { + allowed []int // 预期OSS返回HTTP状态码 + got int // OSS实际返回HTTP状态码 +} + +// Implement interface error +func (e UnexpectedStatusCodeError) Error() string { + s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } + + got := s(e.got) + expected := []string{} + for _, v := range e.allowed { + expected = append(expected, s(v)) + } + return fmt.Sprintf("oss: status code from service response is %s; was expecting %s", + got, strings.Join(expected, " or ")) +} + +// Got is the actual status code returned by oss. +func (e UnexpectedStatusCodeError) Got() int { + return e.got +} + +// checkRespCode returns UnexpectedStatusError if the given response code is not +// one of the allowed status codes; otherwise nil. +func checkRespCode(respCode int, allowed []int) error { + for _, v := range allowed { + if respCode == v { + return nil + } + } + return UnexpectedStatusCodeError{allowed, respCode} +} + +// CRCCheckError is returned when crc check is inconsistent between client and server +type CRCCheckError struct { + clientCRC uint64 // 客户端计算的CRC64值 + serverCRC uint64 // 服务端计算的CRC64值 + operation string // 上传操作,如PutObject/AppendObject/UploadPart等 + requestID string // 本次操作的RequestID +} + +// Implement interface error +func (e CRCCheckError) Error() string { + return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s", + e.operation, e.clientCRC, e.serverCRC, e.requestID) +} + +func checkDownloadCRC(clientCRC, serverCRC uint64) error { + if clientCRC == serverCRC { + return nil + } + return CRCCheckError{clientCRC, serverCRC, "DownloadFile", ""} +} + +func checkCRC(resp *Response, operation string) error { + if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC { + return nil + } + return CRCCheckError{resp.ClientCRC, resp.ServerCRC, operation, resp.Headers.Get(HTTPHeaderOssRequestID)} +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go new file mode 100644 index 000000000..e2ed9ce10 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go @@ -0,0 +1,245 @@ +package oss + +import ( + "mime" + "path" + "strings" +) + +var extToMimeType = map[string]string{ + ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template", + ".potx": "application/vnd.openxmlformats-officedocument.presentationml.template", + ".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow", + ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + ".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide", + ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + ".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template", + ".xlam": "application/vnd.ms-excel.addin.macroEnabled.12", + ".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12", + ".apk": "application/vnd.android.package-archive", + ".hqx": "application/mac-binhex40", + ".cpt": "application/mac-compactpro", + ".doc": "application/msword", + ".ogg": "application/ogg", + ".pdf": "application/pdf", + ".rtf": "text/rtf", + ".mif": "application/vnd.mif", + ".xls": "application/vnd.ms-excel", + ".ppt": "application/vnd.ms-powerpoint", + ".odc": "application/vnd.oasis.opendocument.chart", + ".odb": "application/vnd.oasis.opendocument.database", + ".odf": "application/vnd.oasis.opendocument.formula", + ".odg": "application/vnd.oasis.opendocument.graphics", + ".otg": "application/vnd.oasis.opendocument.graphics-template", + ".odi": "application/vnd.oasis.opendocument.image", + ".odp": "application/vnd.oasis.opendocument.presentation", + ".otp": "application/vnd.oasis.opendocument.presentation-template", + ".ods": "application/vnd.oasis.opendocument.spreadsheet", + ".ots": "application/vnd.oasis.opendocument.spreadsheet-template", + ".odt": "application/vnd.oasis.opendocument.text", + ".odm": "application/vnd.oasis.opendocument.text-master", + ".ott": "application/vnd.oasis.opendocument.text-template", + ".oth": "application/vnd.oasis.opendocument.text-web", + ".sxw": "application/vnd.sun.xml.writer", + ".stw": "application/vnd.sun.xml.writer.template", + ".sxc": "application/vnd.sun.xml.calc", + ".stc": "application/vnd.sun.xml.calc.template", + ".sxd": "application/vnd.sun.xml.draw", + ".std": "application/vnd.sun.xml.draw.template", + ".sxi": "application/vnd.sun.xml.impress", + ".sti": "application/vnd.sun.xml.impress.template", + ".sxg": "application/vnd.sun.xml.writer.global", + ".sxm": "application/vnd.sun.xml.math", + ".sis": "application/vnd.symbian.install", + ".wbxml": "application/vnd.wap.wbxml", + ".wmlc": "application/vnd.wap.wmlc", + ".wmlsc": "application/vnd.wap.wmlscriptc", + ".bcpio": "application/x-bcpio", + ".torrent": "application/x-bittorrent", + ".bz2": "application/x-bzip2", + ".vcd": "application/x-cdlink", + ".pgn": "application/x-chess-pgn", + ".cpio": "application/x-cpio", + ".csh": "application/x-csh", + ".dvi": "application/x-dvi", + ".spl": "application/x-futuresplash", + ".gtar": "application/x-gtar", + ".hdf": "application/x-hdf", + ".jar": "application/x-java-archive", + ".jnlp": "application/x-java-jnlp-file", + ".js": "application/x-javascript", + ".ksp": "application/x-kspread", + ".chrt": "application/x-kchart", + ".kil": "application/x-killustrator", + ".latex": "application/x-latex", + ".rpm": "application/x-rpm", + ".sh": "application/x-sh", + ".shar": "application/x-shar", + ".swf": "application/x-shockwave-flash", + ".sit": "application/x-stuffit", + ".sv4cpio": "application/x-sv4cpio", + ".sv4crc": "application/x-sv4crc", + ".tar": "application/x-tar", + ".tcl": "application/x-tcl", + ".tex": "application/x-tex", + ".man": "application/x-troff-man", + ".me": "application/x-troff-me", + ".ms": "application/x-troff-ms", + ".ustar": "application/x-ustar", + ".src": "application/x-wais-source", + ".zip": "application/zip", + ".m3u": "audio/x-mpegurl", + ".ra": "audio/x-pn-realaudio", + ".wav": "audio/x-wav", + ".wma": "audio/x-ms-wma", + ".wax": "audio/x-ms-wax", + ".pdb": "chemical/x-pdb", + ".xyz": "chemical/x-xyz", + ".bmp": "image/bmp", + ".gif": "image/gif", + ".ief": "image/ief", + ".png": "image/png", + ".wbmp": "image/vnd.wap.wbmp", + ".ras": "image/x-cmu-raster", + ".pnm": "image/x-portable-anymap", + ".pbm": "image/x-portable-bitmap", + ".pgm": "image/x-portable-graymap", + ".ppm": "image/x-portable-pixmap", + ".rgb": "image/x-rgb", + ".xbm": "image/x-xbitmap", + ".xpm": "image/x-xpixmap", + ".xwd": "image/x-xwindowdump", + ".css": "text/css", + ".rtx": "text/richtext", + ".tsv": "text/tab-separated-values", + ".jad": "text/vnd.sun.j2me.app-descriptor", + ".wml": "text/vnd.wap.wml", + ".wmls": "text/vnd.wap.wmlscript", + ".etx": "text/x-setext", + ".mxu": "video/vnd.mpegurl", + ".flv": "video/x-flv", + ".wm": "video/x-ms-wm", + ".wmv": "video/x-ms-wmv", + ".wmx": "video/x-ms-wmx", + ".wvx": "video/x-ms-wvx", + ".avi": "video/x-msvideo", + ".movie": "video/x-sgi-movie", + ".ice": "x-conference/x-cooltalk", + ".3gp": "video/3gpp", + ".ai": "application/postscript", + ".aif": "audio/x-aiff", + ".aifc": "audio/x-aiff", + ".aiff": "audio/x-aiff", + ".asc": "text/plain", + ".atom": "application/atom+xml", + ".au": "audio/basic", + ".bin": "application/octet-stream", + ".cdf": "application/x-netcdf", + ".cgm": "image/cgm", + ".class": "application/octet-stream", + ".dcr": "application/x-director", + ".dif": "video/x-dv", + ".dir": "application/x-director", + ".djv": "image/vnd.djvu", + ".djvu": "image/vnd.djvu", + ".dll": "application/octet-stream", + ".dmg": "application/octet-stream", + ".dms": "application/octet-stream", + ".dtd": "application/xml-dtd", + ".dv": "video/x-dv", + ".dxr": "application/x-director", + ".eps": "application/postscript", + ".exe": "application/octet-stream", + ".ez": "application/andrew-inset", + ".gram": "application/srgs", + ".grxml": "application/srgs+xml", + ".gz": "application/x-gzip", + ".htm": "text/html", + ".html": "text/html", + ".ico": "image/x-icon", + ".ics": "text/calendar", + ".ifb": "text/calendar", + ".iges": "model/iges", + ".igs": "model/iges", + ".jp2": "image/jp2", + ".jpe": "image/jpeg", + ".jpeg": "image/jpeg", + ".jpg": "image/jpeg", + ".kar": "audio/midi", + ".lha": "application/octet-stream", + ".lzh": "application/octet-stream", + ".m4a": "audio/mp4a-latm", + ".m4p": "audio/mp4a-latm", + ".m4u": "video/vnd.mpegurl", + ".m4v": "video/x-m4v", + ".mac": "image/x-macpaint", + ".mathml": "application/mathml+xml", + ".mesh": "model/mesh", + ".mid": "audio/midi", + ".midi": "audio/midi", + ".mov": "video/quicktime", + ".mp2": "audio/mpeg", + ".mp3": "audio/mpeg", + ".mp4": "video/mp4", + ".mpe": "video/mpeg", + ".mpeg": "video/mpeg", + ".mpg": "video/mpeg", + ".mpga": "audio/mpeg", + ".msh": "model/mesh", + ".nc": "application/x-netcdf", + ".oda": "application/oda", + ".ogv": "video/ogv", + ".pct": "image/pict", + ".pic": "image/pict", + ".pict": "image/pict", + ".pnt": "image/x-macpaint", + ".pntg": "image/x-macpaint", + ".ps": "application/postscript", + ".qt": "video/quicktime", + ".qti": "image/x-quicktime", + ".qtif": "image/x-quicktime", + ".ram": "audio/x-pn-realaudio", + ".rdf": "application/rdf+xml", + ".rm": "application/vnd.rn-realmedia", + ".roff": "application/x-troff", + ".sgm": "text/sgml", + ".sgml": "text/sgml", + ".silo": "model/mesh", + ".skd": "application/x-koan", + ".skm": "application/x-koan", + ".skp": "application/x-koan", + ".skt": "application/x-koan", + ".smi": "application/smil", + ".smil": "application/smil", + ".snd": "audio/basic", + ".so": "application/octet-stream", + ".svg": "image/svg+xml", + ".t": "application/x-troff", + ".texi": "application/x-texinfo", + ".texinfo": "application/x-texinfo", + ".tif": "image/tiff", + ".tiff": "image/tiff", + ".tr": "application/x-troff", + ".txt": "text/plain", + ".vrml": "model/vrml", + ".vxml": "application/voicexml+xml", + ".webm": "video/webm", + ".wrl": "model/vrml", + ".xht": "application/xhtml+xml", + ".xhtml": "application/xhtml+xml", + ".xml": "application/xml", + ".xsl": "application/xml", + ".xslt": "application/xslt+xml", + ".xul": "application/vnd.mozilla.xul+xml", +} + +// TypeByExtension returns the MIME type associated with the file extension ext. +// 获取文件类型,选项ContentType使用 +func TypeByExtension(filePath string) string { + typ := mime.TypeByExtension(path.Ext(filePath)) + if typ == "" { + typ = extToMimeType[strings.ToLower(path.Ext(filePath))] + } + return typ +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go new file mode 100644 index 000000000..7c71b0181 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go @@ -0,0 +1,60 @@ +package oss + +import ( + "hash" + "io" + "net/http" +) + +// Response Http response from oss +type Response struct { + StatusCode int + Headers http.Header + Body io.ReadCloser + ClientCRC uint64 + ServerCRC uint64 +} + +// PutObjectRequest The request of DoPutObject +type PutObjectRequest struct { + ObjectKey string + Reader io.Reader +} + +// GetObjectRequest The request of DoGetObject +type GetObjectRequest struct { + ObjectKey string +} + +// GetObjectResult The result of DoGetObject +type GetObjectResult struct { + Response *Response + ClientCRC hash.Hash64 + ServerCRC uint64 +} + +// AppendObjectRequest The requtest of DoAppendObject +type AppendObjectRequest struct { + ObjectKey string + Reader io.Reader + Position int64 +} + +// AppendObjectResult The result of DoAppendObject +type AppendObjectResult struct { + NextPosition int64 + CRC uint64 +} + +// UploadPartRequest The request of DoUploadPart +type UploadPartRequest struct { + InitResult *InitiateMultipartUploadResult + Reader io.Reader + PartSize int64 + PartNumber int +} + +// UploadPartResult The result of DoUploadPart +type UploadPartResult struct { + Part UploadPart +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go new file mode 100644 index 000000000..a33b48870 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go @@ -0,0 +1,461 @@ +package oss + +import ( + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "io/ioutil" + "os" + "path/filepath" + "strconv" +) + +// +// CopyFile 分片复制文件 +// +// srcBucketName 源Bucket名称。 +// srcObjectKey 源Object名称。 +// destObjectKey 目标Object名称。目标Bucket名称为Bucket.BucketName。 +// partSize 复制文件片的大小,字节数。比如100 * 1024为每片100KB。 +// options Object的属性限制项。详见InitiateMultipartUpload。 +// +// error 操作成功error为nil,非nil为错误信息。 +// +func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error { + destBucketName := bucket.BucketName + if partSize < MinPartSize || partSize > MaxPartSize { + return errors.New("oss: part size invalid range (1024KB, 5GB]") + } + + cpConf, err := getCpConfig(options, filepath.Base(destObjectKey)) + if err != nil { + return err + } + + routines := getRoutines(options) + + if cpConf.IsEnable { + return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, + partSize, options, cpConf.FilePath, routines) + } + + return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey, + partSize, options, routines) +} + +// ----- 并发无断点的下载 ----- + +// 工作协程参数 +type copyWorkerArg struct { + bucket *Bucket + imur InitiateMultipartUploadResult + srcBucketName string + srcObjectKey string + options []Option + hook copyPartHook +} + +// Hook用于测试 +type copyPartHook func(part copyPart) error + +var copyPartHooker copyPartHook = defaultCopyPartHook + +func defaultCopyPartHook(part copyPart) error { + return nil +} + +// 工作协程 +func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <-chan bool) { + for chunk := range jobs { + if err := arg.hook(chunk); err != nil { + failed <- err + break + } + chunkSize := chunk.End - chunk.Start + 1 + part, err := arg.bucket.UploadPartCopy(arg.imur, arg.srcBucketName, arg.srcObjectKey, + chunk.Start, chunkSize, chunk.Number, arg.options...) + if err != nil { + failed <- err + break + } + select { + case <-die: + return + default: + } + results <- part + } +} + +// 调度协程 +func copyScheduler(jobs chan copyPart, parts []copyPart) { + for _, part := range parts { + jobs <- part + } + close(jobs) +} + +// 分片 +type copyPart struct { + Number int // 片序号[1, 10000] + Start int64 // 片起始位置 + End int64 // 片结束位置 +} + +// 文件分片 +func getCopyParts(bucket *Bucket, objectKey string, partSize int64) ([]copyPart, error) { + meta, err := bucket.GetObjectDetailedMeta(objectKey) + if err != nil { + return nil, err + } + + parts := []copyPart{} + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return nil, err + } + + part := copyPart{} + i := 0 + for offset := int64(0); offset < objectSize; offset += partSize { + part.Number = i + 1 + part.Start = offset + part.End = GetPartEnd(offset, objectSize, partSize) + parts = append(parts, part) + i++ + } + return parts, nil +} + +// 获取源文件大小 +func getSrcObjectBytes(parts []copyPart) int64 { + var ob int64 + for _, part := range parts { + ob += (part.End - part.Start + 1) + } + return ob +} + +// 并发无断点续传的下载 +func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string, + partSize int64, options []Option, routines int) error { + descBucket, err := bucket.Client.Bucket(destBucketName) + srcBucket, err := bucket.Client.Bucket(srcBucketName) + listener := getProgressListener(options) + + // 分割文件 + parts, err := getCopyParts(srcBucket, srcObjectKey, partSize) + if err != nil { + return err + } + + // 初始化上传任务 + imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...) + if err != nil { + return err + } + + jobs := make(chan copyPart, len(parts)) + results := make(chan UploadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + var completedBytes int64 + totalBytes := getSrcObjectBytes(parts) + event := newProgressEvent(TransferStartedEvent, 0, totalBytes) + publishProgress(listener, event) + + // 启动工作协程 + arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker} + for w := 1; w <= routines; w++ { + go copyWorker(w, arg, jobs, results, failed, die) + } + + // 并发上传分片 + go copyScheduler(jobs, parts) + + // 等待分片下载完成 + completed := 0 + ups := make([]UploadPart, len(parts)) + for completed < len(parts) { + select { + case part := <-results: + completed++ + ups[part.PartNumber-1] = part + completedBytes += (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1) + event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + descBucket.AbortMultipartUpload(imur) + event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes) + publishProgress(listener, event) + + // 提交任务 + _, err = descBucket.CompleteMultipartUpload(imur, ups) + if err != nil { + bucket.AbortMultipartUpload(imur) + return err + } + return nil +} + +// ----- 并发有断点的下载 ----- + +const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A" + +type copyCheckpoint struct { + Magic string // magic + MD5 string // cp内容的MD5 + SrcBucketName string // 源Bucket + SrcObjectKey string // 源Object + DestBucketName string // 目标Bucket + DestObjectKey string // 目标Bucket + CopyID string // copy id + ObjStat objectStat // 文件状态 + Parts []copyPart // 全部分片 + CopyParts []UploadPart // 分片上传成功后的返回值 + PartStat []bool // 分片下载是否完成 +} + +// CP数据是否有效,CP有效且Object没有更新时有效 +func (cp copyCheckpoint) isValid(bucket *Bucket, objectKey string) (bool, error) { + // 比较CP的Magic及MD5 + cpb := cp + cpb.MD5 = "" + js, _ := json.Marshal(cpb) + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + + if cp.Magic != downloadCpMagic || b64 != cp.MD5 { + return false, nil + } + + // 确认object没有更新 + meta, err := bucket.GetObjectDetailedMeta(objectKey) + if err != nil { + return false, err + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return false, err + } + + // 比较Object的大小/最后修改时间/etag + if cp.ObjStat.Size != objectSize || + cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) || + cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) { + return false, nil + } + + return true, nil +} + +// 从文件中load +func (cp *copyCheckpoint) load(filePath string) error { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + err = json.Unmarshal(contents, cp) + return err +} + +// 更新分片状态 +func (cp *copyCheckpoint) update(part UploadPart) { + cp.CopyParts[part.PartNumber-1] = part + cp.PartStat[part.PartNumber-1] = true +} + +// dump到文件 +func (cp *copyCheckpoint) dump(filePath string) error { + bcp := *cp + + // 计算MD5 + bcp.MD5 = "" + js, err := json.Marshal(bcp) + if err != nil { + return err + } + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + bcp.MD5 = b64 + + // 序列化 + js, err = json.Marshal(bcp) + if err != nil { + return err + } + + // dump + return ioutil.WriteFile(filePath, js, FilePermMode) +} + +// 未完成的分片 +func (cp copyCheckpoint) todoParts() []copyPart { + dps := []copyPart{} + for i, ps := range cp.PartStat { + if !ps { + dps = append(dps, cp.Parts[i]) + } + } + return dps +} + +// 完成的字节数 +func (cp copyCheckpoint) getCompletedBytes() int64 { + var completedBytes int64 + for i, part := range cp.Parts { + if cp.PartStat[i] { + completedBytes += (part.End - part.Start + 1) + } + } + return completedBytes +} + +// 初始化下载任务 +func (cp *copyCheckpoint) prepare(srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string, + partSize int64, options []Option) error { + // cp + cp.Magic = copyCpMagic + cp.SrcBucketName = srcBucket.BucketName + cp.SrcObjectKey = srcObjectKey + cp.DestBucketName = destBucket.BucketName + cp.DestObjectKey = destObjectKey + + // object + meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey) + if err != nil { + return err + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return err + } + + cp.ObjStat.Size = objectSize + cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified) + cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag) + + // parts + cp.Parts, err = getCopyParts(srcBucket, srcObjectKey, partSize) + if err != nil { + return err + } + cp.PartStat = make([]bool, len(cp.Parts)) + for i := range cp.PartStat { + cp.PartStat[i] = false + } + cp.CopyParts = make([]UploadPart, len(cp.Parts)) + + // init copy + imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...) + if err != nil { + return err + } + cp.CopyID = imur.UploadID + + return nil +} + +func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string) error { + imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName, + Key: cp.DestObjectKey, UploadID: cp.CopyID} + _, err := bucket.CompleteMultipartUpload(imur, parts) + if err != nil { + return err + } + os.Remove(cpFilePath) + return err +} + +// 并发带断点的下载 +func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string, + partSize int64, options []Option, cpFilePath string, routines int) error { + descBucket, err := bucket.Client.Bucket(destBucketName) + srcBucket, err := bucket.Client.Bucket(srcBucketName) + listener := getProgressListener(options) + + // LOAD CP数据 + ccp := copyCheckpoint{} + err = ccp.load(cpFilePath) + if err != nil { + os.Remove(cpFilePath) + } + + // LOAD出错或数据无效重新初始化下载 + valid, err := ccp.isValid(srcBucket, srcObjectKey) + if err != nil || !valid { + if err = ccp.prepare(srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil { + return err + } + os.Remove(cpFilePath) + } + + // 未完成的分片 + parts := ccp.todoParts() + imur := InitiateMultipartUploadResult{ + Bucket: destBucketName, + Key: destObjectKey, + UploadID: ccp.CopyID} + + jobs := make(chan copyPart, len(parts)) + results := make(chan UploadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + completedBytes := ccp.getCompletedBytes() + event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size) + publishProgress(listener, event) + + // 启动工作协程 + arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker} + for w := 1; w <= routines; w++ { + go copyWorker(w, arg, jobs, results, failed, die) + } + + // 并发下载分片 + go copyScheduler(jobs, parts) + + // 等待分片下载完成 + completed := 0 + for completed < len(parts) { + select { + case part := <-results: + completed++ + ccp.update(part) + ccp.dump(cpFilePath) + completedBytes += (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1) + event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size) + publishProgress(listener, event) + + return ccp.complete(descBucket, ccp.CopyParts, cpFilePath) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go new file mode 100644 index 000000000..de8ea8fde --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go @@ -0,0 +1,291 @@ +package oss + +import ( + "bytes" + "encoding/xml" + "io" + "net/http" + "os" + "sort" + "strconv" +) + +// +// InitiateMultipartUpload 初始化分片上传任务。 +// +// objectKey Object名称。 +// options 上传时可以指定Object的属性,可选属性有CacheControl、ContentDisposition、ContentEncoding、Expires、 +// ServerSideEncryption、Meta,具体含义请参考 +// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html +// +// InitiateMultipartUploadResult 初始化后操作成功的返回值,用于后面的UploadPartFromFile、UploadPartCopy等操作。error为nil时有效。 +// error 操作成功error为nil,非nil为错误信息。 +// +func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) { + var imur InitiateMultipartUploadResult + opts := addContentType(options, objectKey) + params := map[string]interface{}{} + params["uploads"] = nil + resp, err := bucket.do("POST", objectKey, params, opts, nil, nil) + if err != nil { + return imur, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &imur) + return imur, err +} + +// +// UploadPart 上传分片。 +// +// 初始化一个Multipart Upload之后,可以根据指定的Object名和Upload ID来分片(Part)上传数据。 +// 每一个上传的Part都有一个标识它的号码(part number,范围是1~10000)。对于同一个Upload ID, +// 该号码不但唯一标识这一片数据,也标识了这片数据在整个文件内的相对位置。如果您用同一个part号码,上传了新的数据, +// 那么OSS上已有的这个号码的Part数据将被覆盖。除了最后一片Part以外,其他的part最小为100KB; +// 最后一片Part没有大小限制。 +// +// imur InitiateMultipartUpload成功后的返回值。 +// reader io.Reader 需要分片上传的reader。 +// size 本次上传片Part的大小。 +// partNumber 本次上传片(Part)的编号,范围是1~10000。如果超出范围,OSS将返回InvalidArgument错误。 +// +// UploadPart 上传成功的返回值,两个成员PartNumber、ETag。PartNumber片编号,即传入参数partNumber; +// ETag及上传数据的MD5。error为nil时有效。 +// error 操作成功error为nil,非nil为错误信息。 +// +func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader, + partSize int64, partNumber int, options ...Option) (UploadPart, error) { + request := &UploadPartRequest{ + InitResult: &imur, + Reader: reader, + PartSize: partSize, + PartNumber: partNumber, + } + + result, err := bucket.DoUploadPart(request, options) + + return result.Part, err +} + +// +// UploadPartFromFile 上传分片。 +// +// imur InitiateMultipartUpload成功后的返回值。 +// filePath 需要分片上传的本地文件。 +// startPosition 本次上传文件片的起始位置。 +// partSize 本次上传文件片的大小。 +// partNumber 本次上传文件片的编号,范围是1~10000。 +// +// UploadPart 上传成功的返回值,两个成员PartNumber、ETag。PartNumber片编号,传入参数partNumber; +// ETag上传数据的MD5。error为nil时有效。 +// error 操作成功error为nil,非nil为错误信息。 +// +func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string, + startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) { + var part = UploadPart{} + fd, err := os.Open(filePath) + if err != nil { + return part, err + } + defer fd.Close() + fd.Seek(startPosition, os.SEEK_SET) + + request := &UploadPartRequest{ + InitResult: &imur, + Reader: fd, + PartSize: partSize, + PartNumber: partNumber, + } + + result, err := bucket.DoUploadPart(request, options) + + return result.Part, err +} + +// +// DoUploadPart 上传分片。 +// +// request 上传分片请求。 +// +// UploadPartResult 上传分片请求返回值。 +// error 操作无错误为nil,非nil为错误信息。 +// +func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) { + listener := getProgressListener(options) + opts := []Option{ContentLength(request.PartSize)} + params := map[string]interface{}{} + params["partNumber"] = strconv.Itoa(request.PartNumber) + params["uploadId"] = request.InitResult.UploadID + resp, err := bucket.do("PUT", request.InitResult.Key, params, opts, + &io.LimitedReader{R: request.Reader, N: request.PartSize}, listener) + if err != nil { + return &UploadPartResult{}, err + } + defer resp.Body.Close() + + part := UploadPart{ + ETag: resp.Headers.Get(HTTPHeaderEtag), + PartNumber: request.PartNumber, + } + + if bucket.getConfig().IsEnableCRC { + err = checkCRC(resp, "DoUploadPart") + if err != nil { + return &UploadPartResult{part}, err + } + } + + return &UploadPartResult{part}, nil +} + +// +// UploadPartCopy 拷贝分片。 +// +// imur InitiateMultipartUpload成功后的返回值。 +// copySrc 源Object名称。 +// startPosition 本次拷贝片(Part)在源Object的起始位置。 +// partSize 本次拷贝片的大小。 +// partNumber 本次拷贝片的编号,范围是1~10000。如果超出范围,OSS将返回InvalidArgument错误。 +// options copy时源Object的限制条件,满足限制条件时copy,不满足时返回错误。可选条件有CopySourceIfMatch、 +// CopySourceIfNoneMatch、CopySourceIfModifiedSince CopySourceIfUnmodifiedSince,具体含义请参看 +// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html +// +// UploadPart 上传成功的返回值,两个成员PartNumber、ETag。PartNumber片(Part)编号,即传入参数partNumber; +// ETag及上传数据的MD5。error为nil时有效。 +// error 操作成功error为nil,非nil为错误信息。 +// +func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string, + startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) { + var out UploadPartCopyResult + var part UploadPart + + opts := []Option{CopySource(srcBucketName, srcObjectKey), + CopySourceRange(startPosition, partSize)} + opts = append(opts, options...) + params := map[string]interface{}{} + params["partNumber"] = strconv.Itoa(partNumber) + params["uploadId"] = imur.UploadID + resp, err := bucket.do("PUT", imur.Key, params, opts, nil, nil) + if err != nil { + return part, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return part, err + } + part.ETag = out.ETag + part.PartNumber = partNumber + + return part, nil +} + +// +// CompleteMultipartUpload 提交分片上传任务。 +// +// imur InitiateMultipartUpload的返回值。 +// parts UploadPart/UploadPartFromFile/UploadPartCopy返回值组成的数组。 +// +// CompleteMultipartUploadResponse 操作成功后的返回值。error为nil时有效。 +// error 操作成功error为nil,非nil为错误信息。 +// +func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult, + parts []UploadPart) (CompleteMultipartUploadResult, error) { + var out CompleteMultipartUploadResult + + sort.Sort(uploadParts(parts)) + cxml := completeMultipartUploadXML{} + cxml.Part = parts + bs, err := xml.Marshal(cxml) + if err != nil { + return out, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + params := map[string]interface{}{} + params["uploadId"] = imur.UploadID + resp, err := bucket.do("POST", imur.Key, params, nil, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// AbortMultipartUpload 取消分片上传任务。 +// +// imur InitiateMultipartUpload的返回值。 +// +// error 操作成功error为nil,非nil为错误信息。 +// +func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult) error { + params := map[string]interface{}{} + params["uploadId"] = imur.UploadID + resp, err := bucket.do("DELETE", imur.Key, params, nil, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// +// ListUploadedParts 列出指定上传任务已经上传的分片。 +// +// imur InitiateMultipartUpload的返回值。 +// +// ListUploadedPartsResponse 操作成功后的返回值,成员UploadedParts已经上传/拷贝的片。error为nil时该返回值有效。 +// error 操作成功error为nil,非nil为错误信息。 +// +func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult) (ListUploadedPartsResult, error) { + var out ListUploadedPartsResult + params := map[string]interface{}{} + params["uploadId"] = imur.UploadID + resp, err := bucket.do("GET", imur.Key, params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// ListMultipartUploads 列出所有未上传完整的multipart任务列表。 +// +// options ListObject的筛选行为。Prefix返回object的前缀,KeyMarker返回object的起始位置,MaxUploads最大数目默认1000, +// Delimiter用于对Object名字进行分组的字符,所有名字包含指定的前缀且第一次出现delimiter字符之间的object。 +// +// ListMultipartUploadResponse 操作成功后的返回值,error为nil时该返回值有效。 +// error 操作成功error为nil,非nil为错误信息。 +// +func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) { + var out ListMultipartUploadResult + + options = append(options, EncodingType("url")) + params, err := getRawParams(options) + if err != nil { + return out, err + } + params["uploads"] = nil + + resp, err := bucket.do("GET", "", params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + err = decodeListMultipartUploadResult(&out) + return out, err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go new file mode 100644 index 000000000..f0e613b8b --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go @@ -0,0 +1,386 @@ +package oss + +import ( + "fmt" + "net/http" + "strconv" + "strings" + "time" +) + +type optionType string + +const ( + optionParam optionType = "HTTPParameter" // URL参数 + optionHTTP optionType = "HTTPHeader" // HTTP头 + optionArg optionType = "FuncArgument" // 函数参数 +) + +const ( + deleteObjectsQuiet = "delete-objects-quiet" + routineNum = "x-routine-num" + checkpointConfig = "x-cp-config" + initCRC64 = "init-crc64" + progressListener = "x-progress-listener" + storageClass = "storage-class" +) + +type ( + optionValue struct { + Value interface{} + Type optionType + } + + // Option http option + Option func(map[string]optionValue) error +) + +// ACL is an option to set X-Oss-Acl header +func ACL(acl ACLType) Option { + return setHeader(HTTPHeaderOssACL, string(acl)) +} + +// ContentType is an option to set Content-Type header +func ContentType(value string) Option { + return setHeader(HTTPHeaderContentType, value) +} + +// ContentLength is an option to set Content-Length header +func ContentLength(length int64) Option { + return setHeader(HTTPHeaderContentLength, strconv.FormatInt(length, 10)) +} + +// CacheControl is an option to set Cache-Control header +func CacheControl(value string) Option { + return setHeader(HTTPHeaderCacheControl, value) +} + +// ContentDisposition is an option to set Content-Disposition header +func ContentDisposition(value string) Option { + return setHeader(HTTPHeaderContentDisposition, value) +} + +// ContentEncoding is an option to set Content-Encoding header +func ContentEncoding(value string) Option { + return setHeader(HTTPHeaderContentEncoding, value) +} + +// ContentMD5 is an option to set Content-MD5 header +func ContentMD5(value string) Option { + return setHeader(HTTPHeaderContentMD5, value) +} + +// Expires is an option to set Expires header +func Expires(t time.Time) Option { + return setHeader(HTTPHeaderExpires, t.Format(http.TimeFormat)) +} + +// Meta is an option to set Meta header +func Meta(key, value string) Option { + return setHeader(HTTPHeaderOssMetaPrefix+key, value) +} + +// Range is an option to set Range header, [start, end] +func Range(start, end int64) Option { + return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%d-%d", start, end)) +} + +// NormalizedRange is an option to set Range header, such as 1024-2048 or 1024- or -2048 +func NormalizedRange(nr string) Option { + return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%s", strings.TrimSpace(nr))) +} + +// AcceptEncoding is an option to set Accept-Encoding header +func AcceptEncoding(value string) Option { + return setHeader(HTTPHeaderAcceptEncoding, value) +} + +// IfModifiedSince is an option to set If-Modified-Since header +func IfModifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderIfModifiedSince, t.Format(http.TimeFormat)) +} + +// IfUnmodifiedSince is an option to set If-Unmodified-Since header +func IfUnmodifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderIfUnmodifiedSince, t.Format(http.TimeFormat)) +} + +// IfMatch is an option to set If-Match header +func IfMatch(value string) Option { + return setHeader(HTTPHeaderIfMatch, value) +} + +// IfNoneMatch is an option to set IfNoneMatch header +func IfNoneMatch(value string) Option { + return setHeader(HTTPHeaderIfNoneMatch, value) +} + +// CopySource is an option to set X-Oss-Copy-Source header +func CopySource(sourceBucket, sourceObject string) Option { + return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject) +} + +// CopySourceRange is an option to set X-Oss-Copy-Source header +func CopySourceRange(startPosition, partSize int64) Option { + val := "bytes=" + strconv.FormatInt(startPosition, 10) + "-" + + strconv.FormatInt((startPosition+partSize-1), 10) + return setHeader(HTTPHeaderOssCopySourceRange, val) +} + +// CopySourceIfMatch is an option to set X-Oss-Copy-Source-If-Match header +func CopySourceIfMatch(value string) Option { + return setHeader(HTTPHeaderOssCopySourceIfMatch, value) +} + +// CopySourceIfNoneMatch is an option to set X-Oss-Copy-Source-If-None-Match header +func CopySourceIfNoneMatch(value string) Option { + return setHeader(HTTPHeaderOssCopySourceIfNoneMatch, value) +} + +// CopySourceIfModifiedSince is an option to set X-Oss-CopySource-If-Modified-Since header +func CopySourceIfModifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderOssCopySourceIfModifiedSince, t.Format(http.TimeFormat)) +} + +// CopySourceIfUnmodifiedSince is an option to set X-Oss-Copy-Source-If-Unmodified-Since header +func CopySourceIfUnmodifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderOssCopySourceIfUnmodifiedSince, t.Format(http.TimeFormat)) +} + +// MetadataDirective is an option to set X-Oss-Metadata-Directive header +func MetadataDirective(directive MetadataDirectiveType) Option { + return setHeader(HTTPHeaderOssMetadataDirective, string(directive)) +} + +// ServerSideEncryption is an option to set X-Oss-Server-Side-Encryption header +func ServerSideEncryption(value string) Option { + return setHeader(HTTPHeaderOssServerSideEncryption, value) +} + +// ObjectACL is an option to set X-Oss-Object-Acl header +func ObjectACL(acl ACLType) Option { + return setHeader(HTTPHeaderOssObjectACL, string(acl)) +} + +// symlinkTarget is an option to set X-Oss-Symlink-Target +func symlinkTarget(targetObjectKey string) Option { + return setHeader(HTTPHeaderOssSymlinkTarget, targetObjectKey) +} + +// Origin is an option to set Origin header +func Origin(value string) Option { + return setHeader(HTTPHeaderOrigin, value) +} + +// Delimiter is an option to set delimiler parameter +func Delimiter(value string) Option { + return addParam("delimiter", value) +} + +// Marker is an option to set marker parameter +func Marker(value string) Option { + return addParam("marker", value) +} + +// MaxKeys is an option to set maxkeys parameter +func MaxKeys(value int) Option { + return addParam("max-keys", strconv.Itoa(value)) +} + +// Prefix is an option to set prefix parameter +func Prefix(value string) Option { + return addParam("prefix", value) +} + +// EncodingType is an option to set encoding-type parameter +func EncodingType(value string) Option { + return addParam("encoding-type", value) +} + +// MaxUploads is an option to set max-uploads parameter +func MaxUploads(value int) Option { + return addParam("max-uploads", strconv.Itoa(value)) +} + +// KeyMarker is an option to set key-marker parameter +func KeyMarker(value string) Option { + return addParam("key-marker", value) +} + +// UploadIDMarker is an option to set upload-id-marker parameter +func UploadIDMarker(value string) Option { + return addParam("upload-id-marker", value) +} + +// DeleteObjectsQuiet DeleteObjects详细(verbose)模式或简单(quiet)模式,默认详细模式。 +func DeleteObjectsQuiet(isQuiet bool) Option { + return addArg(deleteObjectsQuiet, isQuiet) +} + +// StorageClass bucket的存储方式 +func StorageClass(value StorageClassType) Option { + return addArg(storageClass, value) +} + +// 断点续传配置,包括是否启用、cp文件 +type cpConfig struct { + IsEnable bool + FilePath string +} + +// Checkpoint DownloadFile/UploadFile是否开启checkpoint及checkpoint文件路径 +func Checkpoint(isEnable bool, filePath string) Option { + return addArg(checkpointConfig, &cpConfig{isEnable, filePath}) +} + +// Routines DownloadFile/UploadFile并发数 +func Routines(n int) Option { + return addArg(routineNum, n) +} + +// InitCRC AppendObject CRC的校验的初始值 +func InitCRC(initCRC uint64) Option { + return addArg(initCRC64, initCRC) +} + +// Progress set progress listener +func Progress(listener ProgressListener) Option { + return addArg(progressListener, listener) +} + +// ResponseContentType is an option to set response-content-type param +func ResponseContentType(value string) Option { + return addParam("response-content-type", value) +} + +// ResponseContentLanguage is an option to set response-content-language param +func ResponseContentLanguage(value string) Option { + return addParam("response-content-language", value) +} + +// ResponseExpires is an option to set response-expires param +func ResponseExpires(value string) Option { + return addParam("response-expires", value) +} + +// ResponseCacheControl is an option to set response-cache-control param +func ResponseCacheControl(value string) Option { + return addParam("response-cache-control", value) +} + +// ResponseContentDisposition is an option to set response-content-disposition param +func ResponseContentDisposition(value string) Option { + return addParam("response-content-disposition", value) +} + +// ResponseContentEncoding is an option to set response-content-encoding param +func ResponseContentEncoding(value string) Option { + return addParam("response-content-encoding", value) +} + +// Process is an option to set X-Oss-Process param +func Process(value string) Option { + return addParam("X-Oss-Process", value) +} +func setHeader(key string, value interface{}) Option { + return func(params map[string]optionValue) error { + if value == nil { + return nil + } + params[key] = optionValue{value, optionHTTP} + return nil + } +} + +func addParam(key string, value interface{}) Option { + return func(params map[string]optionValue) error { + if value == nil { + return nil + } + params[key] = optionValue{value, optionParam} + return nil + } +} + +func addArg(key string, value interface{}) Option { + return func(params map[string]optionValue) error { + if value == nil { + return nil + } + params[key] = optionValue{value, optionArg} + return nil + } +} + +func handleOptions(headers map[string]string, options []Option) error { + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return err + } + } + } + + for k, v := range params { + if v.Type == optionHTTP { + headers[k] = v.Value.(string) + } + } + return nil +} + +func getRawParams(options []Option) (map[string]interface{}, error) { + // option + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return nil, err + } + } + } + + paramsm := map[string]interface{}{} + // serialize + for k, v := range params { + if v.Type == optionParam { + vs := params[k] + paramsm[k] = vs.Value.(string) + } + } + + return paramsm, nil +} + +func findOption(options []Option, param string, defaultVal interface{}) (interface{}, error) { + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return nil, err + } + } + } + + if val, ok := params[param]; ok { + return val.Value, nil + } + return defaultVal, nil +} + +func isOptionSet(options []Option, option string) (bool, interface{}, error) { + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return false, nil, err + } + } + } + + if val, ok := params[option]; ok { + return true, val.Value, nil + } + return false, nil, nil +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go new file mode 100644 index 000000000..0ea897f03 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go @@ -0,0 +1,105 @@ +package oss + +import "io" + +// ProgressEventType transfer progress event type +type ProgressEventType int + +const ( + // TransferStartedEvent transfer started, set TotalBytes + TransferStartedEvent ProgressEventType = 1 + iota + // TransferDataEvent transfer data, set ConsumedBytes anmd TotalBytes + TransferDataEvent + // TransferCompletedEvent transfer completed + TransferCompletedEvent + // TransferFailedEvent transfer encounters an error + TransferFailedEvent +) + +// ProgressEvent progress event +type ProgressEvent struct { + ConsumedBytes int64 + TotalBytes int64 + EventType ProgressEventType +} + +// ProgressListener listen progress change +type ProgressListener interface { + ProgressChanged(event *ProgressEvent) +} + +// -------------------- private -------------------- + +func newProgressEvent(eventType ProgressEventType, consumed, total int64) *ProgressEvent { + return &ProgressEvent{ + ConsumedBytes: consumed, + TotalBytes: total, + EventType: eventType} +} + +// publishProgress +func publishProgress(listener ProgressListener, event *ProgressEvent) { + if listener != nil && event != nil { + listener.ProgressChanged(event) + } +} + +type readerTracker struct { + completedBytes int64 +} + +type teeReader struct { + reader io.Reader + writer io.Writer + listener ProgressListener + consumedBytes int64 + totalBytes int64 + tracker *readerTracker +} + +// TeeReader returns a Reader that writes to w what it reads from r. +// All reads from r performed through it are matched with +// corresponding writes to w. There is no internal buffering - +// the write must complete before the read completes. +// Any error encountered while writing is reported as a read error. +func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.Reader { + return &teeReader{ + reader: reader, + writer: writer, + listener: listener, + consumedBytes: 0, + totalBytes: totalBytes, + tracker: tracker, + } +} + +func (t *teeReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + + // read encountered error + if err != nil && err != io.EOF { + event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes) + publishProgress(t.listener, event) + } + + if n > 0 { + t.consumedBytes += int64(n) + // crc + if t.writer != nil { + if n, err := t.writer.Write(p[:n]); err != nil { + return n, err + } + } + // progress + if t.listener != nil { + event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes) + publishProgress(t.listener, event) + } + // track + if t.tracker != nil { + t.tracker.completedBytes = t.consumedBytes + } + } + + return +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go new file mode 100644 index 000000000..8df36f7b2 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go @@ -0,0 +1,450 @@ +package oss + +import ( + "encoding/xml" + "net/url" + "time" +) + +// ListBucketsResult ListBuckets请求返回的结果 +type ListBucketsResult struct { + XMLName xml.Name `xml:"ListAllMyBucketsResult"` + Prefix string `xml:"Prefix"` // 本次查询结果的前缀 + Marker string `xml:"Marker"` // 标明查询的起点,未全部返回时有此节点 + MaxKeys int `xml:"MaxKeys"` // 返回结果的最大数目,未全部返回时有此节点 + IsTruncated bool `xml:"IsTruncated"` // 所有的结果是否已经全部返回 + NextMarker string `xml:"NextMarker"` // 表示下一次查询的起点 + Owner Owner `xml:"Owner"` // 拥有者信息 + Buckets []BucketProperties `xml:"Buckets>Bucket"` // Bucket列表 +} + +// BucketProperties Bucket信息 +type BucketProperties struct { + XMLName xml.Name `xml:"Bucket"` + Name string `xml:"Name"` // Bucket名称 + Location string `xml:"Location"` // Bucket所在的数据中心 + CreationDate time.Time `xml:"CreationDate"` // Bucket创建时间 + StorageClass string `xml:"StorageClass"` // Bucket的存储方式 +} + +// GetBucketACLResult GetBucketACL请求返回的结果 +type GetBucketACLResult struct { + XMLName xml.Name `xml:"AccessControlPolicy"` + ACL string `xml:"AccessControlList>Grant"` // Bucket权限 + Owner Owner `xml:"Owner"` // Bucket拥有者信息 +} + +// LifecycleConfiguration Bucket的Lifecycle配置 +type LifecycleConfiguration struct { + XMLName xml.Name `xml:"LifecycleConfiguration"` + Rules []LifecycleRule `xml:"Rule"` +} + +// LifecycleRule Lifecycle规则 +type LifecycleRule struct { + XMLName xml.Name `xml:"Rule"` + ID string `xml:"ID"` // 规则唯一的ID + Prefix string `xml:"Prefix"` // 规则所适用Object的前缀 + Status string `xml:"Status"` // 规则是否生效 + Expiration LifecycleExpiration `xml:"Expiration"` // 规则的过期属性 +} + +// LifecycleExpiration 规则的过期属性 +type LifecycleExpiration struct { + XMLName xml.Name `xml:"Expiration"` + Days int `xml:"Days,omitempty"` // 最后修改时间过后多少天生效 + Date time.Time `xml:"Date,omitempty"` // 指定规则何时生效 +} + +type lifecycleXML struct { + XMLName xml.Name `xml:"LifecycleConfiguration"` + Rules []lifecycleRule `xml:"Rule"` +} + +type lifecycleRule struct { + XMLName xml.Name `xml:"Rule"` + ID string `xml:"ID"` + Prefix string `xml:"Prefix"` + Status string `xml:"Status"` + Expiration lifecycleExpiration `xml:"Expiration"` +} + +type lifecycleExpiration struct { + XMLName xml.Name `xml:"Expiration"` + Days int `xml:"Days,omitempty"` + Date string `xml:"Date,omitempty"` +} + +const expirationDateFormat = "2006-01-02T15:04:05.000Z" + +func convLifecycleRule(rules []LifecycleRule) []lifecycleRule { + rs := []lifecycleRule{} + for _, rule := range rules { + r := lifecycleRule{} + r.ID = rule.ID + r.Prefix = rule.Prefix + r.Status = rule.Status + if rule.Expiration.Date.IsZero() { + r.Expiration.Days = rule.Expiration.Days + } else { + r.Expiration.Date = rule.Expiration.Date.Format(expirationDateFormat) + } + rs = append(rs, r) + } + return rs +} + +// BuildLifecycleRuleByDays 指定过期天数构建Lifecycle规则 +func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) LifecycleRule { + var statusStr = "Enabled" + if !status { + statusStr = "Disabled" + } + return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr, + Expiration: LifecycleExpiration{Days: days}} +} + +// BuildLifecycleRuleByDate 指定过期时间构建Lifecycle规则 +func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day int) LifecycleRule { + var statusStr = "Enabled" + if !status { + statusStr = "Disabled" + } + date := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC) + return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr, + Expiration: LifecycleExpiration{Date: date}} +} + +// GetBucketLifecycleResult GetBucketLifecycle请求请求结果 +type GetBucketLifecycleResult LifecycleConfiguration + +// RefererXML Referer配置 +type RefererXML struct { + XMLName xml.Name `xml:"RefererConfiguration"` + AllowEmptyReferer bool `xml:"AllowEmptyReferer"` // 是否允许referer字段为空的请求访问 + RefererList []string `xml:"RefererList>Referer"` // referer访问白名单 +} + +// GetBucketRefererResult GetBucketReferer请教返回结果 +type GetBucketRefererResult RefererXML + +// LoggingXML Logging配置 +type LoggingXML struct { + XMLName xml.Name `xml:"BucketLoggingStatus"` + LoggingEnabled LoggingEnabled `xml:"LoggingEnabled"` // 访问日志信息容器 +} + +type loggingXMLEmpty struct { + XMLName xml.Name `xml:"BucketLoggingStatus"` +} + +// LoggingEnabled 访问日志信息容器 +type LoggingEnabled struct { + XMLName xml.Name `xml:"LoggingEnabled"` + TargetBucket string `xml:"TargetBucket"` //存放访问日志的Bucket + TargetPrefix string `xml:"TargetPrefix"` //保存访问日志的文件前缀 +} + +// GetBucketLoggingResult GetBucketLogging请求返回结果 +type GetBucketLoggingResult LoggingXML + +// WebsiteXML Website配置 +type WebsiteXML struct { + XMLName xml.Name `xml:"WebsiteConfiguration"` + IndexDocument IndexDocument `xml:"IndexDocument"` // 目录URL时添加的索引文件 + ErrorDocument ErrorDocument `xml:"ErrorDocument"` // 404错误时使用的文件 +} + +// IndexDocument 目录URL时添加的索引文件 +type IndexDocument struct { + XMLName xml.Name `xml:"IndexDocument"` + Suffix string `xml:"Suffix"` // 目录URL时添加的索引文件名 +} + +// ErrorDocument 404错误时使用的文件 +type ErrorDocument struct { + XMLName xml.Name `xml:"ErrorDocument"` + Key string `xml:"Key"` // 404错误时使用的文件名 +} + +// GetBucketWebsiteResult GetBucketWebsite请求返回结果 +type GetBucketWebsiteResult WebsiteXML + +// CORSXML CORS配置 +type CORSXML struct { + XMLName xml.Name `xml:"CORSConfiguration"` + CORSRules []CORSRule `xml:"CORSRule"` // CORS规则列表 +} + +// CORSRule CORS规则 +type CORSRule struct { + XMLName xml.Name `xml:"CORSRule"` + AllowedOrigin []string `xml:"AllowedOrigin"` // 允许的来源,默认通配符"*" + AllowedMethod []string `xml:"AllowedMethod"` // 允许的方法 + AllowedHeader []string `xml:"AllowedHeader"` // 允许的请求头 + ExposeHeader []string `xml:"ExposeHeader"` // 允许的响应头 + MaxAgeSeconds int `xml:"MaxAgeSeconds"` // 最大的缓存时间 +} + +// GetBucketCORSResult GetBucketCORS请求返回的结果 +type GetBucketCORSResult CORSXML + +// GetBucketInfoResult GetBucketInfo请求返回结果 +type GetBucketInfoResult struct { + XMLName xml.Name `xml:"BucketInfo"` + BucketInfo BucketInfo `xml:"Bucket"` +} + +// BucketInfo Bucket信息 +type BucketInfo struct { + XMLName xml.Name `xml:"Bucket"` + Name string `xml:"Name"` // Bucket名称 + Location string `xml:"Location"` // Bucket所在的数据中心 + CreationDate time.Time `xml:"CreationDate"` // Bucket创建时间 + ExtranetEndpoint string `xml:"ExtranetEndpoint"` // Bucket访问的外网域名 + IntranetEndpoint string `xml:"IntranetEndpoint"` // Bucket访问的内网域名 + ACL string `xml:"AccessControlList>Grant"` // Bucket权限 + Owner Owner `xml:"Owner"` // Bucket拥有者信息 + StorageClass string `xml:"StorageClass"` // Bucket存储类型 +} + +// ListObjectsResult ListObjects请求返回结果 +type ListObjectsResult struct { + XMLName xml.Name `xml:"ListBucketResult"` + Prefix string `xml:"Prefix"` // 本次查询结果的开始前缀 + Marker string `xml:"Marker"` // 这次查询的起点 + MaxKeys int `xml:"MaxKeys"` // 请求返回结果的最大数目 + Delimiter string `xml:"Delimiter"` // 对Object名字进行分组的字符 + IsTruncated bool `xml:"IsTruncated"` // 是否所有的结果都已经返回 + NextMarker string `xml:"NextMarker"` // 下一次查询的起点 + Objects []ObjectProperties `xml:"Contents"` // Object类别 + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // 以delimiter结尾并有共同前缀的Object的集合 +} + +// ObjectProperties Objecct属性 +type ObjectProperties struct { + XMLName xml.Name `xml:"Contents"` + Key string `xml:"Key"` // Object的Key + Type string `xml:"Type"` // Object Type + Size int64 `xml:"Size"` // Object的长度字节数 + ETag string `xml:"ETag"` // 标示Object的内容 + Owner Owner `xml:"Owner"` // 保存Object拥有者信息的容器 + LastModified time.Time `xml:"LastModified"` // Object最后修改时间 + StorageClass string `xml:"StorageClass"` // Object的存储类型 +} + +// Owner Bucket/Object的owner +type Owner struct { + XMLName xml.Name `xml:"Owner"` + ID string `xml:"ID"` // 用户ID + DisplayName string `xml:"DisplayName"` // Owner名字 +} + +// CopyObjectResult CopyObject请求返回的结果 +type CopyObjectResult struct { + XMLName xml.Name `xml:"CopyObjectResult"` + LastModified time.Time `xml:"LastModified"` // 新Object最后更新时间 + ETag string `xml:"ETag"` // 新Object的ETag值 +} + +// GetObjectACLResult GetObjectACL请求返回的结果 +type GetObjectACLResult GetBucketACLResult + +type deleteXML struct { + XMLName xml.Name `xml:"Delete"` + Objects []DeleteObject `xml:"Object"` // 删除的所有Object + Quiet bool `xml:"Quiet"` // 安静响应模式 +} + +// DeleteObject 删除的Object +type DeleteObject struct { + XMLName xml.Name `xml:"Object"` + Key string `xml:"Key"` // Object名称 +} + +// DeleteObjectsResult DeleteObjects请求返回结果 +type DeleteObjectsResult struct { + XMLName xml.Name `xml:"DeleteResult"` + DeletedObjects []string `xml:"Deleted>Key"` // 删除的Object列表 +} + +// InitiateMultipartUploadResult InitiateMultipartUpload请求返回结果 +type InitiateMultipartUploadResult struct { + XMLName xml.Name `xml:"InitiateMultipartUploadResult"` + Bucket string `xml:"Bucket"` // Bucket名称 + Key string `xml:"Key"` // 上传Object名称 + UploadID string `xml:"UploadId"` // 生成的UploadId +} + +// UploadPart 上传/拷贝的分片 +type UploadPart struct { + XMLName xml.Name `xml:"Part"` + PartNumber int `xml:"PartNumber"` // Part编号 + ETag string `xml:"ETag"` // ETag缓存码 +} + +type uploadParts []UploadPart + +func (slice uploadParts) Len() int { + return len(slice) +} + +func (slice uploadParts) Less(i, j int) bool { + return slice[i].PartNumber < slice[j].PartNumber +} + +func (slice uploadParts) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +// UploadPartCopyResult 拷贝分片请求返回的结果 +type UploadPartCopyResult struct { + XMLName xml.Name `xml:"CopyPartResult"` + LastModified time.Time `xml:"LastModified"` // 最后修改时间 + ETag string `xml:"ETag"` // ETag +} + +type completeMultipartUploadXML struct { + XMLName xml.Name `xml:"CompleteMultipartUpload"` + Part []UploadPart `xml:"Part"` +} + +// CompleteMultipartUploadResult 提交分片上传任务返回结果 +type CompleteMultipartUploadResult struct { + XMLName xml.Name `xml:"CompleteMultipartUploadResult"` + Location string `xml:"Location"` // Object的URL + Bucket string `xml:"Bucket"` // Bucket名称 + ETag string `xml:"ETag"` // Object的ETag + Key string `xml:"Key"` // Object的名字 +} + +// ListUploadedPartsResult ListUploadedParts请求返回结果 +type ListUploadedPartsResult struct { + XMLName xml.Name `xml:"ListPartsResult"` + Bucket string `xml:"Bucket"` // Bucket名称 + Key string `xml:"Key"` // Object名称 + UploadID string `xml:"UploadId"` // 上传Id + NextPartNumberMarker string `xml:"NextPartNumberMarker"` // 下一个Part的位置 + MaxParts int `xml:"MaxParts"` // 最大Part个数 + IsTruncated bool `xml:"IsTruncated"` // 是否完全上传完成 + UploadedParts []UploadedPart `xml:"Part"` // 已完成的Part +} + +// UploadedPart 该任务已经上传的分片 +type UploadedPart struct { + XMLName xml.Name `xml:"Part"` + PartNumber int `xml:"PartNumber"` // Part编号 + LastModified time.Time `xml:"LastModified"` // 最后一次修改时间 + ETag string `xml:"ETag"` // ETag缓存码 + Size int `xml:"Size"` // Part大小 +} + +// ListMultipartUploadResult ListMultipartUpload请求返回结果 +type ListMultipartUploadResult struct { + XMLName xml.Name `xml:"ListMultipartUploadsResult"` + Bucket string `xml:"Bucket"` // Bucket名称 + Delimiter string `xml:"Delimiter"` // 分组分割符 + Prefix string `xml:"Prefix"` // 筛选前缀 + KeyMarker string `xml:"KeyMarker"` // 起始Object位置 + UploadIDMarker string `xml:"UploadIdMarker"` // 起始UploadId位置 + NextKeyMarker string `xml:"NextKeyMarker"` // 如果没有全部返回,标明接下去的KeyMarker位置 + NextUploadIDMarker string `xml:"NextUploadIdMarker"` // 如果没有全部返回,标明接下去的UploadId位置 + MaxUploads int `xml:"MaxUploads"` // 返回最大Upload数目 + IsTruncated bool `xml:"IsTruncated"` // 是否完全返回 + Uploads []UncompletedUpload `xml:"Upload"` // 未完成上传的MultipartUpload + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // 所有名字包含指定的前缀且第一次出现delimiter字符之间的object作为一组的分组结果 +} + +// UncompletedUpload 未完成的Upload任务 +type UncompletedUpload struct { + XMLName xml.Name `xml:"Upload"` + Key string `xml:"Key"` // Object名称 + UploadID string `xml:"UploadId"` // 对应UploadId + Initiated time.Time `xml:"Initiated"` // 初始化时间,格式2012-02-23T04:18:23.000Z +} + +// 解析URL编码 +func decodeDeleteObjectsResult(result *DeleteObjectsResult) error { + var err error + for i := 0; i < len(result.DeletedObjects); i++ { + result.DeletedObjects[i], err = url.QueryUnescape(result.DeletedObjects[i]) + if err != nil { + return err + } + } + return nil +} + +// 解析URL编码 +func decodeListObjectsResult(result *ListObjectsResult) error { + var err error + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + result.Marker, err = url.QueryUnescape(result.Marker) + if err != nil { + return err + } + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + result.NextMarker, err = url.QueryUnescape(result.NextMarker) + if err != nil { + return err + } + for i := 0; i < len(result.Objects); i++ { + result.Objects[i].Key, err = url.QueryUnescape(result.Objects[i].Key) + if err != nil { + return err + } + } + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + return nil +} + +// 解析URL编码 +func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error { + var err error + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + result.KeyMarker, err = url.QueryUnescape(result.KeyMarker) + if err != nil { + return err + } + result.NextKeyMarker, err = url.QueryUnescape(result.NextKeyMarker) + if err != nil { + return err + } + for i := 0; i < len(result.Uploads); i++ { + result.Uploads[i].Key, err = url.QueryUnescape(result.Uploads[i].Key) + if err != nil { + return err + } + } + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + return nil +} + +// createBucketConfiguration 规则的过期属性 +type createBucketConfiguration struct { + XMLName xml.Name `xml:"CreateBucketConfiguration"` + StorageClass StorageClassType `xml:"StorageClass,omitempty"` +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go new file mode 100644 index 000000000..049ed82d9 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go @@ -0,0 +1,485 @@ +package oss + +import ( + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "io/ioutil" + "os" + "time" +) + +// +// UploadFile 分片上传文件 +// +// objectKey object名称。 +// filePath 本地文件。需要上传的文件。 +// partSize 本次上传文件片的大小,字节数。比如100 * 1024为每片100KB。 +// options 上传Object时可以指定Object的属性。详见InitiateMultipartUpload。 +// +// error 操作成功为nil,非nil为错误信息。 +// +func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error { + if partSize < MinPartSize || partSize > MaxPartSize { + return errors.New("oss: part size invalid range (1024KB, 5GB]") + } + + cpConf, err := getCpConfig(options, filePath) + if err != nil { + return err + } + + routines := getRoutines(options) + + if cpConf.IsEnable { + return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpConf.FilePath, routines) + } + + return bucket.uploadFile(objectKey, filePath, partSize, options, routines) +} + +// ----- 并发无断点的上传 ----- + +// 获取Checkpoint配置 +func getCpConfig(options []Option, filePath string) (*cpConfig, error) { + cpc := &cpConfig{} + cpcOpt, err := findOption(options, checkpointConfig, nil) + if err != nil || cpcOpt == nil { + return cpc, err + } + + cpc = cpcOpt.(*cpConfig) + if cpc.IsEnable && cpc.FilePath == "" { + cpc.FilePath = filePath + CheckpointFileSuffix + } + + return cpc, nil +} + +// 获取并发数,默认并发数1 +func getRoutines(options []Option) int { + rtnOpt, err := findOption(options, routineNum, nil) + if err != nil || rtnOpt == nil { + return 1 + } + + rs := rtnOpt.(int) + if rs < 1 { + rs = 1 + } else if rs > 100 { + rs = 100 + } + + return rs +} + +// 获取进度回调 +func getProgressListener(options []Option) ProgressListener { + isSet, listener, _ := isOptionSet(options, progressListener) + if !isSet { + return nil + } + return listener.(ProgressListener) +} + +// 测试使用 +type uploadPartHook func(id int, chunk FileChunk) error + +var uploadPartHooker uploadPartHook = defaultUploadPart + +func defaultUploadPart(id int, chunk FileChunk) error { + return nil +} + +// 工作协程参数 +type workerArg struct { + bucket *Bucket + filePath string + imur InitiateMultipartUploadResult + hook uploadPartHook +} + +// 工作协程 +func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <-chan bool) { + for chunk := range jobs { + if err := arg.hook(id, chunk); err != nil { + failed <- err + break + } + part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number) + if err != nil { + failed <- err + break + } + select { + case <-die: + return + default: + } + results <- part + } +} + +// 调度协程 +func scheduler(jobs chan FileChunk, chunks []FileChunk) { + for _, chunk := range chunks { + jobs <- chunk + } + close(jobs) +} + +func getTotalBytes(chunks []FileChunk) int64 { + var tb int64 + for _, chunk := range chunks { + tb += chunk.Size + } + return tb +} + +// 并发上传,不带断点续传功能 +func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error { + listener := getProgressListener(options) + + chunks, err := SplitFileByPartSize(filePath, partSize) + if err != nil { + return err + } + + // 初始化上传任务 + imur, err := bucket.InitiateMultipartUpload(objectKey, options...) + if err != nil { + return err + } + + jobs := make(chan FileChunk, len(chunks)) + results := make(chan UploadPart, len(chunks)) + failed := make(chan error) + die := make(chan bool) + + var completedBytes int64 + totalBytes := getTotalBytes(chunks) + event := newProgressEvent(TransferStartedEvent, 0, totalBytes) + publishProgress(listener, event) + + // 启动工作协程 + arg := workerArg{&bucket, filePath, imur, uploadPartHooker} + for w := 1; w <= routines; w++ { + go worker(w, arg, jobs, results, failed, die) + } + + // 并发上传分片 + go scheduler(jobs, chunks) + + // 等待分配分片上传完成 + completed := 0 + parts := make([]UploadPart, len(chunks)) + for completed < len(chunks) { + select { + case part := <-results: + completed++ + parts[part.PartNumber-1] = part + completedBytes += chunks[part.PartNumber-1].Size + event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes) + publishProgress(listener, event) + bucket.AbortMultipartUpload(imur) + return err + } + + if completed >= len(chunks) { + break + } + } + + event = newProgressEvent(TransferStartedEvent, completedBytes, totalBytes) + publishProgress(listener, event) + + // 提交任务 + _, err = bucket.CompleteMultipartUpload(imur, parts) + if err != nil { + bucket.AbortMultipartUpload(imur) + return err + } + return nil +} + +// ----- 并发带断点的上传 ----- +const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62" + +type uploadCheckpoint struct { + Magic string // magic + MD5 string // cp内容的MD5 + FilePath string // 本地文件 + FileStat cpStat // 文件状态 + ObjectKey string // key + UploadID string // upload id + Parts []cpPart // 本地文件的全部分片 +} + +type cpStat struct { + Size int64 // 文件大小 + LastModified time.Time // 本地文件最后修改时间 + MD5 string // 本地文件MD5 +} + +type cpPart struct { + Chunk FileChunk // 分片 + Part UploadPart // 上传完成的分片 + IsCompleted bool // upload是否完成 +} + +// CP数据是否有效,CP有效且文件没有更新时有效 +func (cp uploadCheckpoint) isValid(filePath string) (bool, error) { + // 比较CP的Magic及MD5 + cpb := cp + cpb.MD5 = "" + js, _ := json.Marshal(cpb) + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + + if cp.Magic != uploadCpMagic || b64 != cp.MD5 { + return false, nil + } + + // 确认本地文件是否更新 + fd, err := os.Open(filePath) + if err != nil { + return false, err + } + defer fd.Close() + + st, err := fd.Stat() + if err != nil { + return false, err + } + + md, err := calcFileMD5(filePath) + if err != nil { + return false, err + } + + // 比较文件大小/文件最后更新时间/文件MD5 + if cp.FileStat.Size != st.Size() || + cp.FileStat.LastModified != st.ModTime() || + cp.FileStat.MD5 != md { + return false, nil + } + + return true, nil +} + +// 从文件中load +func (cp *uploadCheckpoint) load(filePath string) error { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + err = json.Unmarshal(contents, cp) + return err +} + +// dump到文件 +func (cp *uploadCheckpoint) dump(filePath string) error { + bcp := *cp + + // 计算MD5 + bcp.MD5 = "" + js, err := json.Marshal(bcp) + if err != nil { + return err + } + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + bcp.MD5 = b64 + + // 序列化 + js, err = json.Marshal(bcp) + if err != nil { + return err + } + + // dump + return ioutil.WriteFile(filePath, js, FilePermMode) +} + +// 更新分片状态 +func (cp *uploadCheckpoint) updatePart(part UploadPart) { + cp.Parts[part.PartNumber-1].Part = part + cp.Parts[part.PartNumber-1].IsCompleted = true +} + +// 未完成的分片 +func (cp *uploadCheckpoint) todoParts() []FileChunk { + fcs := []FileChunk{} + for _, part := range cp.Parts { + if !part.IsCompleted { + fcs = append(fcs, part.Chunk) + } + } + return fcs +} + +// 所有的分片 +func (cp *uploadCheckpoint) allParts() []UploadPart { + ps := []UploadPart{} + for _, part := range cp.Parts { + ps = append(ps, part.Part) + } + return ps +} + +// 完成的字节数 +func (cp *uploadCheckpoint) getCompletedBytes() int64 { + var completedBytes int64 + for _, part := range cp.Parts { + if part.IsCompleted { + completedBytes += part.Chunk.Size + } + } + return completedBytes +} + +// 计算文件文件MD5 +func calcFileMD5(filePath string) (string, error) { + return "", nil +} + +// 初始化分片上传 +func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error { + // cp + cp.Magic = uploadCpMagic + cp.FilePath = filePath + cp.ObjectKey = objectKey + + // localfile + fd, err := os.Open(filePath) + if err != nil { + return err + } + defer fd.Close() + + st, err := fd.Stat() + if err != nil { + return err + } + cp.FileStat.Size = st.Size() + cp.FileStat.LastModified = st.ModTime() + md, err := calcFileMD5(filePath) + if err != nil { + return err + } + cp.FileStat.MD5 = md + + // chunks + parts, err := SplitFileByPartSize(filePath, partSize) + if err != nil { + return err + } + + cp.Parts = make([]cpPart, len(parts)) + for i, part := range parts { + cp.Parts[i].Chunk = part + cp.Parts[i].IsCompleted = false + } + + // init load + imur, err := bucket.InitiateMultipartUpload(objectKey, options...) + if err != nil { + return err + } + cp.UploadID = imur.UploadID + + return nil +} + +// 提交分片上传,删除CP文件 +func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string) error { + imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName, + Key: cp.ObjectKey, UploadID: cp.UploadID} + _, err := bucket.CompleteMultipartUpload(imur, parts) + if err != nil { + return err + } + os.Remove(cpFilePath) + return err +} + +// 并发带断点的上传 +func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error { + listener := getProgressListener(options) + + // LOAD CP数据 + ucp := uploadCheckpoint{} + err := ucp.load(cpFilePath) + if err != nil { + os.Remove(cpFilePath) + } + + // LOAD出错或数据无效重新初始化上传 + valid, err := ucp.isValid(filePath) + if err != nil || !valid { + if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil { + return err + } + os.Remove(cpFilePath) + } + + chunks := ucp.todoParts() + imur := InitiateMultipartUploadResult{ + Bucket: bucket.BucketName, + Key: objectKey, + UploadID: ucp.UploadID} + + jobs := make(chan FileChunk, len(chunks)) + results := make(chan UploadPart, len(chunks)) + failed := make(chan error) + die := make(chan bool) + + completedBytes := ucp.getCompletedBytes() + event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size) + publishProgress(listener, event) + + // 启动工作协程 + arg := workerArg{&bucket, filePath, imur, uploadPartHooker} + for w := 1; w <= routines; w++ { + go worker(w, arg, jobs, results, failed, die) + } + + // 并发上传分片 + go scheduler(jobs, chunks) + + // 等待分配分片上传完成 + completed := 0 + for completed < len(chunks) { + select { + case part := <-results: + completed++ + ucp.updatePart(part) + ucp.dump(cpFilePath) + completedBytes += ucp.Parts[part.PartNumber-1].Chunk.Size + event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size) + publishProgress(listener, event) + return err + } + + if completed >= len(chunks) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size) + publishProgress(listener, event) + + // 提交分片上传 + err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath) + return err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go new file mode 100644 index 000000000..646f6da58 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go @@ -0,0 +1,265 @@ +package oss + +import ( + "bytes" + "errors" + "fmt" + "hash/crc64" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" + "time" +) + +// Get User Agent +// Go sdk相关信息,包括sdk版本,操作系统类型,GO版本 +var userAgent = func() string { + sys := getSysInfo() + return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name, + sys.release, sys.machine, runtime.Version()) +}() + +type sysInfo struct { + name string // 操作系统名称windows/Linux + release string // 操作系统版本 2.6.32-220.23.2.ali1089.el5.x86_64等 + machine string // 机器类型amd64/x86_64 +} + +// Get system info +// 获取操作系统信息、机器类型 +func getSysInfo() sysInfo { + name := runtime.GOOS + release := "-" + machine := runtime.GOARCH + if out, err := exec.Command("uname", "-s").CombinedOutput(); err == nil { + name = string(bytes.TrimSpace(out)) + } + if out, err := exec.Command("uname", "-r").CombinedOutput(); err == nil { + release = string(bytes.TrimSpace(out)) + } + if out, err := exec.Command("uname", "-m").CombinedOutput(); err == nil { + machine = string(bytes.TrimSpace(out)) + } + return sysInfo{name: name, release: release, machine: machine} +} + +// unpackedRange +type unpackedRange struct { + hasStart bool // 是否指定了起点 + hasEnd bool // 是否指定了终点 + start int64 // 起点 + end int64 // 终点 +} + +// invalid Range Error +func invalidRangeError(r string) error { + return fmt.Errorf("InvalidRange %s", r) +} + +// parseRange parse various styles of range such as bytes=M-N +func parseRange(normalizedRange string) (*unpackedRange, error) { + var err error + hasStart := false + hasEnd := false + var start int64 + var end int64 + + // bytes==M-N or ranges=M-N + nrSlice := strings.Split(normalizedRange, "=") + if len(nrSlice) != 2 || nrSlice[0] != "bytes" { + return nil, invalidRangeError(normalizedRange) + } + + // bytes=M-N,X-Y + rSlice := strings.Split(nrSlice[1], ",") + rStr := rSlice[0] + + if strings.HasSuffix(rStr, "-") { // M- + startStr := rStr[:len(rStr)-1] + start, err = strconv.ParseInt(startStr, 10, 64) + if err != nil { + return nil, invalidRangeError(normalizedRange) + } + hasStart = true + } else if strings.HasPrefix(rStr, "-") { // -N + len := rStr[1:] + end, err = strconv.ParseInt(len, 10, 64) + if err != nil { + return nil, invalidRangeError(normalizedRange) + } + if end == 0 { // -0 + return nil, invalidRangeError(normalizedRange) + } + hasEnd = true + } else { // M-N + valSlice := strings.Split(rStr, "-") + if len(valSlice) != 2 { + return nil, invalidRangeError(normalizedRange) + } + start, err = strconv.ParseInt(valSlice[0], 10, 64) + if err != nil { + return nil, invalidRangeError(normalizedRange) + } + hasStart = true + end, err = strconv.ParseInt(valSlice[1], 10, 64) + if err != nil { + return nil, invalidRangeError(normalizedRange) + } + hasEnd = true + } + + return &unpackedRange{hasStart, hasEnd, start, end}, nil +} + +// adjustRange return adjusted range, adjust the range according to the length of the file +func adjustRange(ur *unpackedRange, size int64) (start, end int64) { + if ur == nil { + return 0, size + } + + if ur.hasStart && ur.hasEnd { + start = ur.start + end = ur.end + 1 + if ur.start < 0 || ur.start >= size || ur.end > size || ur.start > ur.end { + start = 0 + end = size + } + } else if ur.hasStart { + start = ur.start + end = size + if ur.start < 0 || ur.start >= size { + start = 0 + } + } else if ur.hasEnd { + start = size - ur.end + end = size + if ur.end < 0 || ur.end > size { + start = 0 + end = size + } + } + return +} + +// GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC. +// 获取当前时间,从UTC开始的秒数。 +func GetNowSec() int64 { + return time.Now().Unix() +} + +// GetNowNanoSec returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. The result is undefined if the Unix time +// in nanoseconds cannot be represented by an int64. Note that this +// means the result of calling UnixNano on the zero Time is undefined. +// 获取当前时间,从UTC开始的纳秒。 +func GetNowNanoSec() int64 { + return time.Now().UnixNano() +} + +// GetNowGMT 获取当前时间,格式形如"Mon, 02 Jan 2006 15:04:05 GMT",HTTP中使用的时间格式 +func GetNowGMT() string { + return time.Now().UTC().Format(http.TimeFormat) +} + +// FileChunk 文件片定义 +type FileChunk struct { + Number int // 块序号 + Offset int64 // 块在文件中的偏移量 + Size int64 // 块大小 +} + +// SplitFileByPartNum Split big file to part by the num of part +// 按指定的块数分割文件。返回值FileChunk为分割结果,error为nil时有效。 +func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) { + if chunkNum <= 0 || chunkNum > 10000 { + return nil, errors.New("chunkNum invalid") + } + + file, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer file.Close() + + stat, err := file.Stat() + if err != nil { + return nil, err + } + + if int64(chunkNum) > stat.Size() { + return nil, errors.New("oss: chunkNum invalid") + } + + var chunks []FileChunk + var chunk = FileChunk{} + var chunkN = (int64)(chunkNum) + for i := int64(0); i < chunkN; i++ { + chunk.Number = int(i + 1) + chunk.Offset = i * (stat.Size() / chunkN) + if i == chunkN-1 { + chunk.Size = stat.Size()/chunkN + stat.Size()%chunkN + } else { + chunk.Size = stat.Size() / chunkN + } + chunks = append(chunks, chunk) + } + + return chunks, nil +} + +// SplitFileByPartSize Split big file to part by the size of part +// 按块大小分割文件。返回值FileChunk为分割结果,error为nil时有效。 +func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) { + if chunkSize <= 0 { + return nil, errors.New("chunkSize invalid") + } + + file, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer file.Close() + + stat, err := file.Stat() + if err != nil { + return nil, err + } + var chunkN = stat.Size() / chunkSize + if chunkN >= 10000 { + return nil, errors.New("Too many parts, please increase part size.") + } + + var chunks []FileChunk + var chunk = FileChunk{} + for i := int64(0); i < chunkN; i++ { + chunk.Number = int(i + 1) + chunk.Offset = i * chunkSize + chunk.Size = chunkSize + chunks = append(chunks, chunk) + } + + if stat.Size()%chunkSize > 0 { + chunk.Number = len(chunks) + 1 + chunk.Offset = int64(len(chunks)) * chunkSize + chunk.Size = stat.Size() % chunkSize + chunks = append(chunks, chunk) + } + + return chunks, nil +} + +// GetPartEnd 计算结束位置 +func GetPartEnd(begin int64, total int64, per int64) int64 { + if begin+per > total { + return total - 1 + } + return begin + per - 1 +} + +// crcTable returns the Table constructed from the specified polynomial +var crcTable = func() *crc64.Table { + return crc64.MakeTable(crc64.ECMA) +} diff --git a/vendor/vendor.json b/vendor/vendor.json index b4e436f9d..daa566252 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -85,6 +85,12 @@ "revision": "32055c351ea8b00b96d70f28db48d9840feaf0ec", "revisionTime": "2016-07-12T20:17:32-04:00" }, + { + "checksumSHA1": "tX0Bq1gzqskL98nnB1X2rDqxH18=", + "path": "github.com/aliyun/aliyun-oss-go-sdk/oss", + "revision": "6fe16293d6b7af4f5c2450714c5b4825c8ad040c", + "revisionTime": "2017-09-25T03:23:15Z" + }, { "path": "github.com/cheggaaa/pb", "revision": "73ae1d68fe0bd482ab11913a9828634f795b987f",