code
stringlengths
11
335k
docstring
stringlengths
20
11.8k
func_name
stringlengths
1
100
language
stringclasses
1 value
repo
stringclasses
245 values
path
stringlengths
4
144
url
stringlengths
43
214
license
stringclasses
4 values
func NewDriver(drv dialect.Driver, opts ...Option) *Driver { options := &Options{Hash: DefaultHash, Cache: NewLRU(0)} for _, opt := range opts { opt(options) } return &Driver{ Driver: drv, Options: options, } }
NewDriver returns a new Driver an existing driver and optional configuration functions. For example: entcache.NewDriver( drv, entcache.TTL(time.Minute), entcache.Levels( NewLRU(256), NewRedis(redis.NewClient(&redis.Options{ Addr: ":6379", })), ) )
NewDriver
go
ariga/entcache
driver.go
https://github.com/ariga/entcache/blob/master/driver.go
Apache-2.0
func TTL(ttl time.Duration) Option { return func(o *Options) { o.TTL = ttl } }
TTL configures the period of time that an Entry is valid in the cache.
TTL
go
ariga/entcache
driver.go
https://github.com/ariga/entcache/blob/master/driver.go
Apache-2.0
func Hash(hash func(query string, args []any) (Key, error)) Option { return func(o *Options) { o.Hash = hash } }
Hash configures an optional Hash function for converting a query and its arguments to a cache key.
Hash
go
ariga/entcache
driver.go
https://github.com/ariga/entcache/blob/master/driver.go
Apache-2.0
func Levels(levels ...AddGetDeleter) Option { return func(o *Options) { if len(levels) == 1 { o.Cache = levels[0] } else { o.Cache = &multiLevel{levels: levels} } } }
Levels configures the Driver to work with the given cache levels. For example, in process LRU cache and a remote Redis cache.
Levels
go
ariga/entcache
driver.go
https://github.com/ariga/entcache/blob/master/driver.go
Apache-2.0
func ContextLevel() Option { return func(o *Options) { o.Cache = &contextLevel{} } }
ContextLevel configures the driver to work with context/request level cache. Users that use this option, should wraps the *http.Request context with the cache value as follows: ctx = entcache.NewContext(ctx) ctx = entcache.NewContext(ctx, entcache.NewLRU(128))
ContextLevel
go
ariga/entcache
driver.go
https://github.com/ariga/entcache/blob/master/driver.go
Apache-2.0
func DefaultHash(query string, args []any) (Key, error) { key, err := hashstructure.Hash(struct { Q string A []any }{ Q: query, A: args, }, hashstructure.FormatV2, nil) if err != nil { return nil, err } return key, nil }
DefaultHash provides the default implementation for converting a query and its argument to a cache key.
DefaultHash
go
ariga/entcache
driver.go
https://github.com/ariga/entcache/blob/master/driver.go
Apache-2.0
func NewContext(ctx context.Context, levels ...AddGetDeleter) context.Context { var cache AddGetDeleter switch len(levels) { case 0: cache = NewLRU(0) case 1: cache = levels[0] default: cache = &multiLevel{levels: levels} } return context.WithValue(ctx, ctxKey{}, cache) }
NewContext returns a new Context that carries a cache.
NewContext
go
ariga/entcache
context.go
https://github.com/ariga/entcache/blob/master/context.go
Apache-2.0
func FromContext(ctx context.Context) (AddGetDeleter, bool) { c, ok := ctx.Value(ctxKey{}).(AddGetDeleter) return c, ok }
FromContext returns the cache value stored in ctx, if any.
FromContext
go
ariga/entcache
context.go
https://github.com/ariga/entcache/blob/master/context.go
Apache-2.0
func Skip(ctx context.Context) context.Context { c, ok := ctx.Value(ctxOptionsKey).(*ctxOptions) if !ok { return context.WithValue(ctx, ctxOptionsKey, &ctxOptions{skip: true}) } c.skip = true return ctx }
Skip returns a new Context that tells the Driver to skip the cache entry on Query. client.T.Query().All(entcache.Skip(ctx))
Skip
go
ariga/entcache
context.go
https://github.com/ariga/entcache/blob/master/context.go
Apache-2.0
func Evict(ctx context.Context) context.Context { c, ok := ctx.Value(ctxOptionsKey).(*ctxOptions) if !ok { return context.WithValue(ctx, ctxOptionsKey, &ctxOptions{skip: true, evict: true}) } c.skip = true c.evict = true return ctx }
Evict returns a new Context that tells the Driver to skip and invalidate the cache entry on Query. client.T.Query().All(entcache.Evict(ctx))
Evict
go
ariga/entcache
context.go
https://github.com/ariga/entcache/blob/master/context.go
Apache-2.0
func WithKey(ctx context.Context, key Key) context.Context { c, ok := ctx.Value(ctxOptionsKey).(*ctxOptions) if !ok { return context.WithValue(ctx, ctxOptionsKey, &ctxOptions{key: key}) } c.key = key return ctx }
WithKey returns a new Context that carries the Key for the cache entry. Note that, this option should not be used if the ent.Client query involves more than 1 SQL query (e.g. eager loading). client.T.Query().All(entcache.WithKey(ctx, "key"))
WithKey
go
ariga/entcache
context.go
https://github.com/ariga/entcache/blob/master/context.go
Apache-2.0
func WithTTL(ctx context.Context, ttl time.Duration) context.Context { c, ok := ctx.Value(ctxOptionsKey).(*ctxOptions) if !ok { return context.WithValue(ctx, ctxOptionsKey, &ctxOptions{ttl: ttl}) } c.ttl = ttl return ctx }
WithTTL returns a new Context that carries the TTL for the cache entry. client.T.Query().All(entcache.WithTTL(ctx, time.Second))
WithTTL
go
ariga/entcache
context.go
https://github.com/ariga/entcache/blob/master/context.go
Apache-2.0
func CopyObjectSample() { // Create a bucket bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } // Create an object err = bucket.PutObjectFromFile(objectKey, localFile) if err != nil { HandleError(err) } // Case 1: Copy an existing object var descObjectKey = "descobject" _, err = bucket.CopyObject(objectKey, descObjectKey) if err != nil { HandleError(err) } // Case 2: Copy an existing object to another existing object _, err = bucket.CopyObject(objectKey, descObjectKey) if err != nil { HandleError(err) } err = bucket.DeleteObject(descObjectKey) if err != nil { HandleError(err) } // Case 3: Copy file with constraints. When the constraints are met, the copy executes. otherwise the copy does not execute. // constraints are not met, copy does not execute _, err = bucket.CopyObject(objectKey, descObjectKey, oss.CopySourceIfModifiedSince(futureDate)) if err == nil { HandleError(err) } fmt.Println("CopyObjectError:", err) // Constraints are met, the copy executes _, err = bucket.CopyObject(objectKey, descObjectKey, oss.CopySourceIfUnmodifiedSince(futureDate)) if err != nil { HandleError(err) } // Case 4: Specify the properties when copying. The MetadataDirective needs to be MetaReplace options := []oss.Option{ oss.Expires(futureDate), oss.Meta("myprop", "mypropval"), oss.MetadataDirective(oss.MetaReplace)} _, err = bucket.CopyObject(objectKey, descObjectKey, options...) if err != nil { HandleError(err) } meta, err := bucket.GetObjectDetailedMeta(descObjectKey) if err != nil { HandleError(err) } fmt.Println("meta:", meta) // Case 5: When the source file is the same as the target file, the copy could be used to update metadata options = []oss.Option{ oss.Expires(futureDate), oss.Meta("myprop", "mypropval"), oss.MetadataDirective(oss.MetaReplace)} _, err = bucket.CopyObject(objectKey, objectKey, options...) if err != nil { HandleError(err) } fmt.Println("meta:", meta) // Case 6: Big file's multipart copy. It supports concurrent copy with resumable upload // copy file with multipart. The part size is 100K. By default one routine is used without resumable upload err = bucket.CopyFile(bucketName, objectKey, descObjectKey, 100*1024) if err != nil { HandleError(err) } // Part size is 100K and three coroutines for the concurrent copy err = bucket.CopyFile(bucketName, objectKey, descObjectKey, 100*1024, oss.Routines(3)) if err != nil { HandleError(err) } // Part size is 100K and three coroutines for the concurrent copy with resumable upload err = bucket.CopyFile(bucketName, objectKey, descObjectKey, 100*1024, oss.Routines(3), oss.Checkpoint(true, "")) if err != nil { HandleError(err) } // Specify the checkpoint file path. If the checkpoint file path is not specified, the current folder is used. err = bucket.CopyFile(bucketName, objectKey, descObjectKey, 100*1024, oss.Checkpoint(true, localFile+".cp")) if err != nil { HandleError(err) } // Case 7: Set the storage classes.OSS provides three storage classes: Standard, Infrequent Access, and Archive. // Copy a object in the same bucket, and set object's storage-class to Archive. _, err = bucket.CopyObject(objectKey, objectKey+"DestArchive", oss.ObjectStorageClass("Archive")) if err != nil { HandleError(err) } // Case 8: Copy object with tagging, the value of tagging directive is REPLACE tag1 := oss.Tag{ Key: "key1", Value: "value1", } tag2 := oss.Tag{ Key: "key2", Value: "value2", } tagging := oss.Tagging{ Tags: []oss.Tag{tag1, tag2}, } _, err = bucket.CopyObject(objectKey, objectKey+"WithTagging", oss.SetTagging(tagging), oss.TaggingDirective(oss.TaggingReplace)) if err != nil { HandleError(err) } // Delete object and bucket err = DeleteTestBucketAndObject(bucketName) if err != nil { HandleError(err) } fmt.Println("CopyObjectSample completed") }
CopyObjectSample shows the copy files usage
CopyObjectSample
go
aliyun/aliyun-oss-go-sdk
sample/copy_object.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/copy_object.go
MIT
func SignURLSample() { // Create bucket bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } // Put object signedURL, err := bucket.SignURL(objectKey, oss.HTTPPut, 60) if err != nil { HandleError(err) } var val = "花间一壶酒,独酌无相亲。 举杯邀明月,对影成三人。" err = bucket.PutObjectWithURL(signedURL, strings.NewReader(val)) if err != nil { HandleError(err) } // Put object with option options := []oss.Option{ oss.Meta("myprop", "mypropval"), oss.ContentType("image/tiff"), } signedURL, err = bucket.SignURL(objectKey, oss.HTTPPut, 60, options...) if err != nil { HandleError(err) } err = bucket.PutObjectFromFileWithURL(signedURL, localFile, options...) if err != nil { HandleError(err) } // Get object signedURL, err = bucket.SignURL(objectKey, oss.HTTPGet, 60) if err != nil { HandleError(err) } body, err := bucket.GetObjectWithURL(signedURL) if err != nil { HandleError(err) } defer body.Close() // Read content data, err := ioutil.ReadAll(body) if err != nil { fmt.Println("Error:", err) os.Exit(-1) } fmt.Println("data:", string(data)) err = bucket.GetObjectToFileWithURL(signedURL, "mynewfile-1.jpg") if err != nil { HandleError(err) } // Delete the object and bucket err = DeleteTestBucketAndObject(bucketName) if err != nil { HandleError(err) } fmt.Println("SignURLSample completed") }
SignURLSample signs URL sample
SignURLSample
go
aliyun/aliyun-oss-go-sdk
sample/sign_url.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/sign_url.go
MIT
func BucketResourceGroupSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Create the bucket with default parameters err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } // Get bucket's resource group. result, err := client.GetBucketResourceGroup(bucketName) if err != nil { HandleError(err) } fmt.Printf("Resource Group Id:%s\n", result.ResourceGroupId) // Set bucket's resource group. resourceGroup := oss.PutBucketResourceGroup{ ResourceGroupId: "rg-aek27tc********", } err = client.PutBucketResourceGroup(bucketName, resourceGroup) if err != nil { HandleError(err) } fmt.Println("Bucket Resource Group Set Success!") fmt.Println("BucketResourceGroupSample completed") }
BucketResourceGroupSample shows how to set and get the bucket's resource group.
BucketResourceGroupSample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_resourcegroup.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_resourcegroup.go
MIT
func ObjectACLSample() { // Create bucket bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } // Create object err = bucket.PutObject(objectKey, strings.NewReader("YoursObjectValue")) if err != nil { HandleError(err) } // Case 1: Set bucket ACL, valid ACLs are ACLPrivate、ACLPublicRead、ACLPublicReadWrite err = bucket.SetObjectACL(objectKey, oss.ACLPrivate) if err != nil { HandleError(err) } // Get object ACL, returns one of the three values: private、public-read、public-read-write goar, err := bucket.GetObjectACL(objectKey) if err != nil { HandleError(err) } fmt.Println("Object ACL:", goar.ACL) // Delete object and bucket err = DeleteTestBucketAndObject(bucketName) if err != nil { HandleError(err) } fmt.Println("ObjectACLSample completed") }
ObjectACLSample shows how to set and get object ACL
ObjectACLSample
go
aliyun/aliyun-oss-go-sdk
sample/object_acl.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/object_acl.go
MIT
func BucketStatSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Get bucket stat stat, err := client.GetBucketStat(bucketName) if err != nil { HandleError(err) } fmt.Println("Bucket Stat Storage:", stat.Storage) fmt.Println("Bucket Stat Object Count:", stat.ObjectCount) fmt.Println("Bucket Stat Multipart Upload Count:", stat.MultipartUploadCount) fmt.Println("Bucket Stat Live Channel Count:", stat.LiveChannelCount) fmt.Println("Bucket Stat Last Modified Time:", stat.LastModifiedTime) fmt.Println("Bucket Stat Standard Storage:", stat.StandardStorage) fmt.Println("Bucket Stat Standard Object Count:", stat.StandardObjectCount) fmt.Println("Bucket Stat Infrequent Access Storage:", stat.InfrequentAccessStorage) fmt.Println("Bucket Stat Infrequent Access Real Storage:", stat.InfrequentAccessRealStorage) fmt.Println("Bucket Stat Infrequent Access Object Count:", stat.InfrequentAccessObjectCount) fmt.Println("Bucket Stat Archive Storage:", stat.ArchiveStorage) fmt.Println("Bucket Stat Archive Real Storage:", stat.ArchiveRealStorage) fmt.Println("Bucket Stat Archive Object Count:", stat.ArchiveObjectCount) fmt.Println("Bucket Stat Cold Archive Storage:", stat.ColdArchiveStorage) fmt.Println("Bucket Stat Cold Archive Real Storage:", stat.ColdArchiveRealStorage) fmt.Println("Bucket Stat Cold Archive Object Count:", stat.ColdArchiveObjectCount) fmt.Println("BucketStatSample completed") }
BucketStatSample shows how to get the bucket stat.
BucketStatSample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_stat.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_stat.go
MIT
func BucketStyleSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Create the bucket with default parameters err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } // Get bucket's style. styleName := "image-style" style, err := client.GetBucketStyle(bucketName, styleName) if err != nil { HandleError(err) } fmt.Printf("Style Name:%s\n", style.Name) fmt.Printf("Style Name:%s\n", style.Content) fmt.Printf("Style Create Time:%s\n", style.CreateTime) fmt.Printf("Style Last Modify Time:%s\n", style.LastModifyTime) // Set bucket's style. styleContent := "image/resize,p_50" err = client.PutBucketStyle(bucketName, styleName, styleContent) if err != nil { HandleError(err) } fmt.Println("Put Bucket Style Success!") // List bucket's style list, err := client.ListBucketStyle(bucketName) if err != nil { HandleError(err) } for _, styleInfo := range list.Style { fmt.Printf("Style Name:%s\n", styleInfo.Name) fmt.Printf("Style Name:%s\n", styleInfo.Content) fmt.Printf("Style Create Time:%s\n", styleInfo.CreateTime) fmt.Printf("Style Last Modify Time:%s\n", styleInfo.LastModifyTime) } // Delete bucket's style err = client.DeleteBucketStyle(bucketName, styleName) if err != nil { HandleError(err) } fmt.Println("Bucket Style Delete Success!") fmt.Println("BucketStyleSample completed") }
BucketStyleSample shows how to set,get list and delete the bucket's style.
BucketStyleSample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_style.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_style.go
MIT
func NewBucketSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Create bucket err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } // New bucket bucket, err := client.Bucket(bucketName) if err != nil { HandleError(err) } // Put object, uploads an object var objectName = "myobject" err = bucket.PutObject(objectName, strings.NewReader("MyObjectValue")) if err != nil { HandleError(err) } // Delete object, deletes an object err = bucket.DeleteObject(objectName) if err != nil { HandleError(err) } // Delete bucket err = client.DeleteBucket(bucketName) if err != nil { HandleError(err) } fmt.Println("NewBucketSample completed") }
NewBucketSample shows how to initialize client and bucket
NewBucketSample
go
aliyun/aliyun-oss-go-sdk
sample/new_bucket.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/new_bucket.go
MIT
func BucketInventorySample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Create the bucket with default parameters err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } // the inventory configuration,not use any encryption bl := true invConfig := oss.InventoryConfiguration{ Id: "report1", IsEnabled: &bl, Prefix: "filterPrefix/", OSSBucketDestination: oss.OSSBucketDestination{ Format: "CSV", AccountId: accountID, RoleArn: stsARN, Bucket: "acs:oss:::" + bucketName, Prefix: "prefix1", }, Frequency: "Daily", IncludedObjectVersions: "All", OptionalFields: oss.OptionalFields{ Field: []string{ "Size", "LastModifiedDate", "ETag", "StorageClass", "IsMultipartUploaded", "EncryptionStatus", }, }, } // case 1: Set inventory err = client.SetBucketInventory(bucketName, invConfig) if err != nil { HandleError(err) } // case 2: Get Bucket inventory ret, err := client.GetBucketInventory(bucketName, invConfig.Id) if err != nil { HandleError(err) } fmt.Println("Bucket inventory:", ret) // case 3: List Bucket inventory invConfig2 := oss.InventoryConfiguration{ Id: "report2", IsEnabled: &bl, Prefix: "filterPrefix/", OSSBucketDestination: oss.OSSBucketDestination{ Format: "CSV", AccountId: accountID, RoleArn: stsARN, Bucket: "acs:oss:::" + bucketName, Prefix: "prefix1", }, Frequency: "Daily", IncludedObjectVersions: "All", OptionalFields: oss.OptionalFields{ Field: []string{ "Size", "LastModifiedDate", "ETag", "StorageClass", "IsMultipartUploaded", "EncryptionStatus", }, }, } err = client.SetBucketInventory(bucketName, invConfig2) if err != nil { HandleError(err) } NextContinuationToken := "" listInvConf, err := client.ListBucketInventory(bucketName, NextContinuationToken) if err != nil { HandleError(err) } fmt.Println("Bucket inventory list:", listInvConf) // case 4: Delete Bucket inventory err = client.DeleteBucketInventory(bucketName, invConfig.Id) if err != nil { HandleError(err) } // Delete bucket err = client.DeleteBucket(bucketName) if err != nil { HandleError(err) } fmt.Println("BucketInventorySample completed") }
BucketInventorySample shows how to set, get, list and delete the bucket inventory configuration
BucketInventorySample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_inventory.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_inventory.go
MIT
func ObjectTaggingSample() { // Create bucket bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } // Create object err = bucket.PutObject(objectKey, strings.NewReader("ObjectTaggingSample")) if err != nil { HandleError(err) } // Case 1: Set Tagging of object tag1 := oss.Tag{ Key: "key1", Value: "value1", } tag2 := oss.Tag{ Key: "key2", Value: "value2", } tagging := oss.Tagging{ Tags: []oss.Tag{tag1, tag2}, } err = bucket.PutObjectTagging(objectKey, tagging) if err != nil { HandleError(err) } // Case 2: Get Tagging of object taggingResult, err := bucket.GetObjectTagging(objectKey) if err != nil { HandleError(err) } fmt.Printf("Object Tagging: %v\n", taggingResult) tag3 := oss.Tag{ Key: "key3", Value: "value3", } // Case 3: Put object with tagging tagging = oss.Tagging{ Tags: []oss.Tag{tag1, tag2, tag3}, } err = bucket.PutObject(objectKey, strings.NewReader("ObjectTaggingSample"), oss.SetTagging(tagging)) if err != nil { HandleError(err) } // Case 4: Delete Tagging of object err = bucket.DeleteObjectTagging(objectKey) if err != nil { HandleError(err) } // Delete object and bucket err = DeleteTestBucketAndObject(bucketName) if err != nil { HandleError(err) } fmt.Println("ObjectACLSample completed") }
ObjectTaggingSample shows how to set and get object Tagging
ObjectTaggingSample
go
aliyun/aliyun-oss-go-sdk
sample/object_tagging.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/object_tagging.go
MIT
func DeleteObjectSample() { // Create a bucket bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } var val = "抽刀断水水更流,举杯销愁愁更愁。 人生在世不称意,明朝散发弄扁舟。" // Case 1: Delete an object err = bucket.PutObject(objectKey, strings.NewReader(val)) if err != nil { HandleError(err) } err = bucket.DeleteObject(objectKey) if err != nil { HandleError(err) } // Case 2: Delete multiple Objects err = bucket.PutObject(objectKey+"1", strings.NewReader(val)) if err != nil { HandleError(err) } err = bucket.PutObject(objectKey+"2", strings.NewReader(val)) if err != nil { HandleError(err) } delRes, err := bucket.DeleteObjects([]string{objectKey + "1", objectKey + "2"}) if err != nil { HandleError(err) } fmt.Println("Del Res:", delRes) lsRes, err := bucket.ListObjects() if err != nil { HandleError(err) } fmt.Println("Objects:", getObjectsFormResponse(lsRes)) // Case 3: Delete multiple objects and it will return deleted objects in detail mode which is by default. err = bucket.PutObject(objectKey+"1", strings.NewReader(val)) if err != nil { HandleError(err) } err = bucket.PutObject(objectKey+"2", strings.NewReader(val)) if err != nil { HandleError(err) } delRes, err = bucket.DeleteObjects([]string{objectKey + "1", objectKey + "2"}, oss.DeleteObjectsQuiet(false)) if err != nil { HandleError(err) } fmt.Println("Detail Del Res:", delRes) lsRes, err = bucket.ListObjects() if err != nil { HandleError(err) } fmt.Println("Objects:", getObjectsFormResponse(lsRes)) // Case 4: Delete multiple objects and returns undeleted objects in quiet mode err = bucket.PutObject(objectKey+"1", strings.NewReader(val)) if err != nil { HandleError(err) } err = bucket.PutObject(objectKey+"2", strings.NewReader(val)) if err != nil { HandleError(err) } delRes, err = bucket.DeleteObjects([]string{objectKey + "1", objectKey + "2"}, oss.DeleteObjectsQuiet(true)) if err != nil { HandleError(err) } fmt.Println("Sample Del Res:", delRes) lsRes, err = bucket.ListObjects() if err != nil { HandleError(err) } fmt.Println("Objects:", getObjectsFormResponse(lsRes)) // Delete object and bucket err = DeleteTestBucketAndObject(bucketName) if err != nil { HandleError(err) } fmt.Println("DeleteObjectSample completed") }
DeleteObjectSample shows how to delete single file or multiple files
DeleteObjectSample
go
aliyun/aliyun-oss-go-sdk
sample/delete_object.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/delete_object.go
MIT
func ListObjectsSample() { var myObjects = []Object{ {"my-object-1", ""}, {"my-object-11", ""}, {"my-object-2", ""}, {"my-object-21", ""}, {"my-object-22", ""}, {"my-object-3", ""}, {"my-object-31", ""}, {"my-object-32", ""}} // Create bucket bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } // Create objects err = CreateObjects(bucket, myObjects) if err != nil { HandleError(err) } // Case 1: Use default parameters lor, err := bucket.ListObjects() if err != nil { HandleError(err) } fmt.Println("my objects:", getObjectsFormResponse(lor)) // Case 2: Specify max keys lor, err = bucket.ListObjects(oss.MaxKeys(3)) if err != nil { HandleError(err) } fmt.Println("my objects max num:", getObjectsFormResponse(lor)) // Case 3: Specify prefix of objects lor, err = bucket.ListObjects(oss.Prefix("my-object-2")) if err != nil { HandleError(err) } fmt.Println("my objects prefix :", getObjectsFormResponse(lor)) // Case 4: Specify the marker lor, err = bucket.ListObjects(oss.Marker("my-object-22")) if err != nil { HandleError(err) } fmt.Println("my objects marker :", getObjectsFormResponse(lor)) // Case 5: List object with paging. each page has 3 objects marker := oss.Marker("") for { lor, err = bucket.ListObjects(oss.MaxKeys(3), marker) if err != nil { HandleError(err) } marker = oss.Marker(lor.NextMarker) fmt.Println("my objects page :", getObjectsFormResponse(lor)) if !lor.IsTruncated { break } } // Case 6: List object with paging , marker and max keys; return 3 items each time. marker = oss.Marker("my-object-22") for { lor, err = bucket.ListObjects(oss.MaxKeys(3), marker) if err != nil { HandleError(err) } marker = oss.Marker(lor.NextMarker) fmt.Println("my objects marker&page :", getObjectsFormResponse(lor)) if !lor.IsTruncated { break } } // Case 7: List object with paging , with prefix and max keys; return 2 items each time. pre := oss.Prefix("my-object-2") marker = oss.Marker("") for { lor, err = bucket.ListObjects(oss.MaxKeys(2), marker, pre) if err != nil { HandleError(err) } pre = oss.Prefix(lor.Prefix) marker = oss.Marker(lor.NextMarker) fmt.Println("my objects prefix&page :", getObjectsFormResponse(lor)) if !lor.IsTruncated { break } } err = DeleteObjects(bucket, myObjects) if err != nil { HandleError(err) } // Case 8: Combine the prefix and delimiter for grouping. ListObjectsResponse.Objects is the objects returned. // ListObjectsResponse.CommonPrefixes is the common prefixes returned. myObjects = []Object{ {"fun/test.txt", ""}, {"fun/test.jpg", ""}, {"fun/movie/001.avi", ""}, {"fun/movie/007.avi", ""}, {"fun/music/001.mp3", ""}, {"fun/music/001.mp3", ""}} // Create object err = CreateObjects(bucket, myObjects) if err != nil { HandleError(err) } lor, err = bucket.ListObjects(oss.Prefix("fun/"), oss.Delimiter("/")) if err != nil { HandleError(err) } fmt.Println("my objects prefix :", getObjectsFormResponse(lor), "common prefixes:", lor.CommonPrefixes) // Delete object and bucket err = DeleteTestBucketAndObject(bucketName) if err != nil { HandleError(err) } fmt.Println("ListObjectsSample completed") }
ListObjectsSample shows the file list, including default and specified parameters.
ListObjectsSample
go
aliyun/aliyun-oss-go-sdk
sample/list_objects.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/list_objects.go
MIT
func BucketACLSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Create a bucket with default parameters err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } // Set bucket ACL. The valid ACLs are ACLPrivate、ACLPublicRead、ACLPublicReadWrite err = client.SetBucketACL(bucketName, oss.ACLPublicRead) if err != nil { HandleError(err) } // Get bucket ACL gbar, err := client.GetBucketACL(bucketName) if err != nil { HandleError(err) } fmt.Println("Bucket ACL:", gbar.ACL) // Delete the bucket err = client.DeleteBucket(bucketName) if err != nil { HandleError(err) } fmt.Println("BucketACLSample completed") }
BucketACLSample shows how to get and set the bucket ACL
BucketACLSample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_acl.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_acl.go
MIT
func BucketWebsiteSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Create the bucket with default parameters err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } //Define bucket website indexWebsite or errorWebsite var indexWebsite = "myindex.html" var errorWebsite = "myerror.html" // Set bucket website indexWebsite or errorWebsite err = client.SetBucketWebsite(bucketName, indexWebsite, errorWebsite) if err != nil { HandleError(err) } bEnable := true bDisable := false // Define one website detail ruleOk := oss.RoutingRule{ RuleNumber: 1, Condition: oss.Condition{ KeyPrefixEquals: "abc", HTTPErrorCodeReturnedEquals: 404, IncludeHeader: []oss.IncludeHeader{ oss.IncludeHeader{ Key: "host", Equals: "test.oss-cn-beijing-internal.aliyuncs.com", }, }, }, Redirect: oss.Redirect{ RedirectType: "Mirror", PassQueryString: &bDisable, MirrorURL: "http://www.test.com/", MirrorPassQueryString: &bEnable, MirrorFollowRedirect: &bEnable, MirrorCheckMd5: &bDisable, MirrorHeaders: oss.MirrorHeaders{ PassAll: &bEnable, Pass: []string{"key1", "key2"}, Remove: []string{"remove1", "remove2"}, Set: []oss.MirrorHeaderSet{ oss.MirrorHeaderSet{ Key: "setKey1", Value: "setValue1", }, }, }, }, } wxmlDetail := oss.WebsiteXML{} wxmlDetail.RoutingRules = append(wxmlDetail.RoutingRules, ruleOk) // Get website res, err := client.GetBucketWebsite(bucketName) if err != nil { HandleError(err) } fmt.Println("Website IndexDocument:", res.IndexDocument.Suffix) // Set bucket website detail err = client.SetBucketWebsiteDetail(bucketName, wxmlDetail) if err != nil { HandleError(err) } // Get website Detail res, err = client.GetBucketWebsite(bucketName) if err != nil { HandleError(err) } fmt.Println("Website Redirect type:", res.RoutingRules[0].Redirect.RedirectType) // Delete Website err = client.DeleteBucketWebsite(bucketName) if err != nil { HandleError(err) } // Delete bucket err = client.DeleteBucket(bucketName) if err != nil { HandleError(err) } fmt.Println("BucketWebsiteSample completed") }
BucketWebsiteSample shows how to set, get and delete the bucket website.
BucketWebsiteSample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_website.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_website.go
MIT
func CreateBucketSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } DeleteTestBucketAndObject(bucketName) // Case 1: Create a bucket with default parameters err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } // Delete bucket err = client.DeleteBucket(bucketName) if err != nil { HandleError(err) } // Case 2: Create the bucket with ACL err = client.CreateBucket(bucketName, oss.ACL(oss.ACLPublicRead)) if err != nil { HandleError(err) } // Case 3: Repeat the same bucket. OSS will not return error, but just no op. The ACL is not updated. err = client.CreateBucket(bucketName, oss.ACL(oss.ACLPublicReadWrite)) if err != nil { HandleError(err) } // Delete bucket err = client.DeleteBucket(bucketName) if err != nil { HandleError(err) } fmt.Println("CreateBucketSample completed") }
CreateBucketSample shows how to create bucket
CreateBucketSample
go
aliyun/aliyun-oss-go-sdk
sample/create_bucket.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/create_bucket.go
MIT
func CreateLiveChannelSample() { channelName := "create-livechannel" //create bucket bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } // Case 1 - Create live-channel with Completely configure config := oss.LiveChannelConfiguration{ Description: "sample-for-livechannel", //description information, up to 128 bytes Status: "enabled", //enabled or disabled Target: oss.LiveChannelTarget{ Type: "HLS", //the type of object, only supports HLS, required FragDuration: 10, //the length of each ts object (in seconds), in the range [1,100], default: 5 FragCount: 4, //the number of ts objects in the m3u8 object, in the range of [1,100], default: 3 PlaylistName: "test-get-channel-status.m3u8", //the name of m3u8 object, which must end with ".m3u8" and the length range is [6,128],default: playlist.m3u8 }, } result, err := bucket.CreateLiveChannel(channelName, config) if err != nil { HandleError(err) } playURL := result.PlayUrls[0] publishURL := result.PublishUrls[0] fmt.Printf("create livechannel:%s with config respones: playURL:%s, publishURL: %s\n", channelName, playURL, publishURL) // Case 2 - Create live-channel only specified type of target which is required simpleCfg := oss.LiveChannelConfiguration{ Target: oss.LiveChannelTarget{ Type: "HLS", }, } result, err = bucket.CreateLiveChannel(channelName, simpleCfg) if err != nil { HandleError(err) } playURL = result.PlayUrls[0] publishURL = result.PublishUrls[0] fmt.Printf("create livechannel:%s with simple config respones: playURL:%s, publishURL: %s\n", channelName, playURL, publishURL) err = DeleteTestBucketAndLiveChannel(bucketName) if err != nil { HandleError(err) } fmt.Println("PutObjectSample completed") }
CreateLiveChannelSample Samples for create a live-channel
CreateLiveChannelSample
go
aliyun/aliyun-oss-go-sdk
sample/livechannel.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/livechannel.go
MIT
func PutLiveChannelStatusSample() { channelName := "put-livechannel-status" bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } config := oss.LiveChannelConfiguration{ Target: oss.LiveChannelTarget{ Type: "HLS", //the type of object, only supports HLS, required }, } _, err = bucket.CreateLiveChannel(channelName, config) if err != nil { HandleError(err) } // Case 1 - Set the status of live-channel to disabled err = bucket.PutLiveChannelStatus(channelName, "disabled") if err != nil { HandleError(err) } // Case 2 - Set the status of live-channel to enabled err = bucket.PutLiveChannelStatus(channelName, "enabled") if err != nil { HandleError(err) } err = DeleteTestBucketAndLiveChannel(bucketName) if err != nil { HandleError(err) } fmt.Println("PutLiveChannelStatusSample completed") }
PutLiveChannelStatusSample Set the status of the live-channel sample: enabled/disabled
PutLiveChannelStatusSample
go
aliyun/aliyun-oss-go-sdk
sample/livechannel.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/livechannel.go
MIT
func PostVodPlayListSample() { channelName := "post-vod-playlist" playlistName := "playlist.m3u8" bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } config := oss.LiveChannelConfiguration{ Target: oss.LiveChannelTarget{ Type: "HLS", //the type of object, only supports HLS, required PlaylistName: "playlist.m3u8", }, } _, err = bucket.CreateLiveChannel(channelName, config) if err != nil { HandleError(err) } //This stage you can push live stream, and after that you could generator playlist endTime := time.Now().Add(-1 * time.Minute) startTime := endTime.Add(-60 * time.Minute) err = bucket.PostVodPlaylist(channelName, playlistName, startTime, endTime) if err != nil { HandleError(err) } err = DeleteTestBucketAndLiveChannel(bucketName) if err != nil { HandleError(err) } fmt.Println("PostVodPlayListSampleSample completed") }
PostVodPlayListSample Sample for generate playlist
PostVodPlayListSample
go
aliyun/aliyun-oss-go-sdk
sample/livechannel.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/livechannel.go
MIT
func GetVodPlayListSample() { channelName := "get-vod-playlist" bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } config := oss.LiveChannelConfiguration{ Target: oss.LiveChannelTarget{ Type: "HLS", //the type of object, only supports HLS, required PlaylistName: "playlist.m3u8", }, } _, err = bucket.CreateLiveChannel(channelName, config) if err != nil { HandleError(err) } //This stage you can push live stream, and after that you could generator playlist endTime := time.Now().Add(-1 * time.Minute) startTime := endTime.Add(-60 * time.Minute) body, err := bucket.GetVodPlaylist(channelName, startTime, endTime) if err != nil { HandleError(err) } defer body.Close() data, err := ioutil.ReadAll(body) if err != nil { HandleError(err) } fmt.Printf("content of playlist is:%v\n", string(data)) err = DeleteTestBucketAndLiveChannel(bucketName) if err != nil { HandleError(err) } fmt.Println("PostVodPlayListSampleSample completed") }
GetVodPlayListSample Sample for generate playlist and return the content of the playlist
GetVodPlayListSample
go
aliyun/aliyun-oss-go-sdk
sample/livechannel.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/livechannel.go
MIT
func GetLiveChannelStatSample() { channelName := "get-livechannel-stat" bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } config := oss.LiveChannelConfiguration{ Target: oss.LiveChannelTarget{ Type: "HLS", //the type of object, only supports HLS, required }, } _, err = bucket.CreateLiveChannel(channelName, config) if err != nil { HandleError(err) } stat, err := bucket.GetLiveChannelStat(channelName) if err != nil { HandleError(err) } status := stat.Status connectedTime := stat.ConnectedTime remoteAddr := stat.RemoteAddr audioBW := stat.Audio.Bandwidth audioCodec := stat.Audio.Codec audioSampleRate := stat.Audio.SampleRate videoBW := stat.Video.Bandwidth videoFrameRate := stat.Video.FrameRate videoHeight := stat.Video.Height videoWidth := stat.Video.Width fmt.Printf("get channel stat:(%v, %v,%v, %v), audio(%v, %v, %v), video(%v, %v, %v, %v)\n", channelName, status, connectedTime, remoteAddr, audioBW, audioCodec, audioSampleRate, videoBW, videoFrameRate, videoHeight, videoWidth) err = DeleteTestBucketAndLiveChannel(bucketName) if err != nil { HandleError(err) } fmt.Println("GetLiveChannelStatSample completed") }
GetLiveChannelStatSample Sample for get the state of live-channel
GetLiveChannelStatSample
go
aliyun/aliyun-oss-go-sdk
sample/livechannel.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/livechannel.go
MIT
func GetLiveChannelInfoSample() { channelName := "get-livechannel-info" bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } config := oss.LiveChannelConfiguration{ Target: oss.LiveChannelTarget{ Type: "HLS", //the type of object, only supports HLS, required }, } _, err = bucket.CreateLiveChannel(channelName, config) if err != nil { HandleError(err) } info, err := bucket.GetLiveChannelInfo(channelName) if err != nil { HandleError(err) } desc := info.Description status := info.Status fragCount := info.Target.FragCount fragDuation := info.Target.FragDuration playlistName := info.Target.PlaylistName targetType := info.Target.Type fmt.Printf("get channel stat:(%v,%v, %v), target(%v, %v, %v, %v)\n", channelName, desc, status, fragCount, fragDuation, playlistName, targetType) err = DeleteTestBucketAndLiveChannel(bucketName) if err != nil { HandleError(err) } fmt.Println("GetLiveChannelInfoSample completed") }
GetLiveChannelInfoSample Sample for get the configuration infomation of live-channel
GetLiveChannelInfoSample
go
aliyun/aliyun-oss-go-sdk
sample/livechannel.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/livechannel.go
MIT
func GetLiveChannelHistorySample() { channelName := "get-livechannel-info" bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } config := oss.LiveChannelConfiguration{ Target: oss.LiveChannelTarget{ Type: "HLS", //the type of object, only supports HLS, required }, } _, err = bucket.CreateLiveChannel(channelName, config) if err != nil { HandleError(err) } //at most return up to lastest 10 push records history, err := bucket.GetLiveChannelHistory(channelName) for _, record := range history.Record { remoteAddr := record.RemoteAddr startTime := record.StartTime endTime := record.EndTime fmt.Printf("get channel:%s history:(%v, %v, %v)\n", channelName, remoteAddr, startTime, endTime) } err = DeleteTestBucketAndLiveChannel(bucketName) if err != nil { HandleError(err) } fmt.Println("GetLiveChannelHistorySample completed") }
GetLiveChannelHistorySample Sample for get push records of live-channel
GetLiveChannelHistorySample
go
aliyun/aliyun-oss-go-sdk
sample/livechannel.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/livechannel.go
MIT
func ListLiveChannelSample() { channelName := "list-livechannel" bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } config := oss.LiveChannelConfiguration{ Target: oss.LiveChannelTarget{ Type: "HLS", //the type of object, only supports HLS, required }, } _, err = bucket.CreateLiveChannel(channelName, config) if err != nil { HandleError(err) } // Case 1: list all the live-channels marker := "" for { // Set the marker value, the first time is "", the value of NextMarker that returned should as the marker in the next time // At most return up to lastest 100 live-channels if "max-keys" is not specified result, err := bucket.ListLiveChannel(oss.Marker(marker)) if err != nil { HandleError(err) } for _, channel := range result.LiveChannel { fmt.Printf("list livechannel: (%v, %v, %v, %v, %v, %v)\n", channel.Name, channel.Status, channel.Description, channel.LastModified, channel.PlayUrls[0], channel.PublishUrls[0]) } if result.IsTruncated { marker = result.NextMarker } else { break } } // Case 2: Use the parameter "max-keys" to specify the maximum number of records returned, the value of max-keys cannot exceed 1000 // if "max-keys" the default value is 100 result, err := bucket.ListLiveChannel(oss.MaxKeys(10)) if err != nil { HandleError(err) } for _, channel := range result.LiveChannel { fmt.Printf("list livechannel: (%v, %v, %v, %v, %v, %v)\n", channel.Name, channel.Status, channel.Description, channel.LastModified, channel.PlayUrls[0], channel.PublishUrls[0]) } // Case 3: Only list the live-channels with the value of parameter "prefix" as prefix // max-keys, prefix, maker parameters can be combined result, err = bucket.ListLiveChannel(oss.MaxKeys(10), oss.Prefix("list-")) if err != nil { HandleError(err) } for _, channel := range result.LiveChannel { fmt.Printf("list livechannel: (%v, %v, %v, %v, %v, %v)\n", channel.Name, channel.Status, channel.Description, channel.LastModified, channel.PlayUrls[0], channel.PublishUrls[0]) } err = DeleteTestBucketAndLiveChannel(bucketName) if err != nil { HandleError(err) } fmt.Println("ListLiveChannelSample completed") }
ListLiveChannelSample Samples for list live-channels with specified bucket name
ListLiveChannelSample
go
aliyun/aliyun-oss-go-sdk
sample/livechannel.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/livechannel.go
MIT
func DeleteLiveChannelSample() { channelName := "delete-livechannel" bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } config := oss.LiveChannelConfiguration{ Target: oss.LiveChannelTarget{ Type: "HLS", //the type of object, only supports HLS, required }, } _, err = bucket.CreateLiveChannel(channelName, config) if err != nil { HandleError(err) } err = bucket.DeleteLiveChannel(channelName) if err != nil { HandleError(err) } err = DeleteTestBucketAndLiveChannel(bucketName) if err != nil { HandleError(err) } fmt.Println("DeleteLiveChannelSample completed") }
DeleteLiveChannelSample Sample for delete live-channel
DeleteLiveChannelSample
go
aliyun/aliyun-oss-go-sdk
sample/livechannel.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/livechannel.go
MIT
func SignRtmpURLSample() { channelName := "sign-rtmp-url" playlistName := "playlist.m3u8" bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } config := oss.LiveChannelConfiguration{ Target: oss.LiveChannelTarget{ Type: "HLS", //the type of object, only supports HLS, required PlaylistName: "playlist.m3u8", }, } result, err := bucket.CreateLiveChannel(channelName, config) if err != nil { HandleError(err) } playURL := result.PlayUrls[0] publishURL := result.PublishUrls[0] fmt.Printf("livechannel:%s, playURL:%s, publishURL: %s\n", channelName, playURL, publishURL) signedRtmpURL, err := bucket.SignRtmpURL(channelName, playlistName, 3600) if err != nil { HandleError(err) } fmt.Printf("livechannel:%s, sinedRtmpURL: %s\n", channelName, signedRtmpURL) err = DeleteTestBucketAndLiveChannel(bucketName) if err != nil { HandleError(err) } fmt.Println("SignRtmpURLSample completed") }
SignRtmpURLSample Sample for generate a RTMP push-stream signature URL for the trusted user to push the RTMP stream to the live channel.
SignRtmpURLSample
go
aliyun/aliyun-oss-go-sdk
sample/livechannel.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/livechannel.go
MIT
func BucketReplicationSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Create a bucket with default parameters err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } // Case 1:Put Bucket Replication // Case 1-1:Put Bucket Replication in xml format xmlData := `<?xml version="1.0" encoding="UTF-8"?> <ReplicationConfiguration> <Rule> <PrefixSet> <Prefix>source1</Prefix> <Prefix>video</Prefix> </PrefixSet> <Action>PUT</Action> <Destination> <Bucket>destBucketName</Bucket> <Location>oss-cn-hangzhou</Location> <TransferType>oss_acc</TransferType> </Destination> <HistoricalObjectReplication>enabled</HistoricalObjectReplication> <SyncRole>aliyunramrole</SyncRole> <SourceSelectionCriteria> <SseKmsEncryptedObjects> <Status>Enabled</Status> </SseKmsEncryptedObjects> </SourceSelectionCriteria> <EncryptionConfiguration> <ReplicaKmsKeyID>c4d49f85-ee30-426b-a5ed-95e9139d****</ReplicaKmsKeyID> </EncryptionConfiguration> </Rule> </ReplicationConfiguration>` err = client.PutBucketReplication(bucketName, xmlData) if err != nil { HandleError(err) } fmt.Println("Put Bucket Replication in xml format Success!") // Case 1-2:Put Bucket Replication in Struct destBucketName := "yp-re" prefix1 := "prefix_1" prefix2 := "prefix_2" keyId := "c4d49f85-ee30-426b-a5ed-95e9139d******" source := "Enabled" prefixSet := oss.ReplicationRulePrefix{Prefix: []*string{&prefix1, &prefix2}} reqReplication := oss.PutBucketReplication{ Rule: []oss.ReplicationRule{ { PrefixSet: &prefixSet, Action: "ALL", Destination: &oss.ReplicationRuleDestination{ Bucket: destBucketName, Location: "oss-cn-hangzhou", TransferType: "oss_acc", }, HistoricalObjectReplication: "disabled", SyncRole: "aliyunramrole", EncryptionConfiguration: &keyId, SourceSelectionCriteria: &source, }, }, } xmlBody, err := xml.Marshal(reqReplication) if err != nil { HandleError(err) } err = client.PutBucketReplication(bucketName, string(xmlBody)) if err != nil { HandleError(err) } fmt.Println("Put Bucket Replication Success!") // Case 2:Get Bucket Replication stringData, err := client.GetBucketReplication(bucketName) if err != nil { HandleError(err) } var repResult oss.GetBucketReplicationResult err = xml.Unmarshal([]byte(stringData),&repResult) if err != nil { HandleError(err) } for _, rule := range repResult.Rule { fmt.Printf("Rule Id:%s\n", rule.ID) if rule.RTC != nil { fmt.Printf("Rule RTC:%s\n", *rule.RTC) } if rule.PrefixSet != nil { for _, prefix := range rule.PrefixSet.Prefix { fmt.Printf("Rule Prefix:%s\n", *prefix) } } fmt.Printf("Rule Action:%s\n", rule.Action) fmt.Printf("Rule Destination Bucket:%s\n", rule.Destination.Bucket) fmt.Printf("Rule Destination Location:%s\n", rule.Destination.Location) fmt.Printf("Rule Destination TransferType:%s\n", rule.Destination.TransferType) fmt.Printf("Rule Status:%s\n", rule.Status) fmt.Printf("Rule Historical Object Replication:%s\n", rule.HistoricalObjectReplication) if rule.SyncRole != "" { fmt.Printf("Rule SyncRole:%s\n", rule.SyncRole) } } // Case 3:Put Bucket RTC enabled := "enabled" ruleId := "564df6de-7372-46dc-b4eb-10f******" rtc := oss.PutBucketRTC{ RTC: &enabled, ID: ruleId, } err = client.PutBucketRTC(bucketName, rtc) if err != nil { HandleError(err) } fmt.Println("Put Bucket RTC Success!") // Case 4:Get Bucket Replication Location stringData, err = client.GetBucketReplicationLocation(bucketName) if err != nil { HandleError(err) } var repLocation oss.GetBucketReplicationLocationResult err = xml.Unmarshal([]byte(stringData),&repLocation) if err != nil { HandleError(err) } for _, location := range repLocation.Location { fmt.Printf("Bucket Replication Location: %s\n", location) } for _, transferType := range repLocation.LocationTransferType { fmt.Printf("Bucket Replication Location Transfer Type Location: %s\n", transferType.Location) fmt.Printf("Bucket Replication Location Transfer Type Type: %s\n", transferType.TransferTypes) } for _, rtcLocation := range repLocation.RTCLocation { fmt.Printf("Bucket Replication Location RTC Location: %s\n", rtcLocation) } fmt.Println("Get Bucket Replication Location Success!") // Case 5:Get Bucket Replication Progress stringData, err = client.GetBucketReplicationProgress(bucketName, ruleId) if err != nil { HandleError(err) } var repProgress oss.GetBucketReplicationProgressResult err = xml.Unmarshal([]byte(stringData),&repProgress) if err != nil { HandleError(err) } for _, repProgressRule := range repProgress.Rule { fmt.Printf("Rule Id:%s\n", repProgressRule.ID) if repProgressRule.PrefixSet != nil { for _, prefix := range repProgressRule.PrefixSet.Prefix { fmt.Printf("Rule Prefix:%s\n", *prefix) } } fmt.Printf("Replication Progress Rule Action:%s\n", repProgressRule.Action) fmt.Printf("Replication Progress Rule Destination Bucket:%s\n", repProgressRule.Destination.Bucket) fmt.Printf("Replication Progress Rule Destination Location:%s\n", repProgressRule.Destination.Location) fmt.Printf("Replication Progress Rule Destination TransferType:%s\n", repProgressRule.Destination.TransferType) fmt.Printf("Replication Progress Rule Status:%s\n", repProgressRule.Status) fmt.Printf("Replication Progress Rule Historical Object Replication:%s\n", repProgressRule.HistoricalObjectReplication) if (*repProgressRule.Progress).HistoricalObject != "" { fmt.Printf("Replication Progress Rule Progress Historical Object:%s\n", (*repProgressRule.Progress).HistoricalObject) } fmt.Printf("Replication Progress Rule Progress NewObject:%s\n", (*repProgressRule.Progress).NewObject) } fmt.Println("Get Bucket Replication Progress Success!") // Case 6:Delete Bucket Replication err = client.DeleteBucketReplication(bucketName, ruleId) if err != nil { HandleError(err) } fmt.Println("Delete Bucket Replication Success!") fmt.Println("BucketReplicationSample completed") }
BucketReplicationSample how to set, get or delete the bucket replication.
BucketReplicationSample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_replication.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_replication.go
MIT
func GetObjectSample() { // Create bucket bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } // Upload the object err = bucket.PutObjectFromFile(objectKey, localFile) if err != nil { HandleError(err) } // Case 1: Download the object into ReadCloser(). The body needs to be closed body, err := bucket.GetObject(objectKey) if err != nil { HandleError(err) } data, err := ioutil.ReadAll(body) body.Close() if err != nil { HandleError(err) } fmt.Println("size of data is: ", len(data)) // Case 2: Download in the range of object. body, err = bucket.GetObject(objectKey, oss.Range(15, 19)) if err != nil { HandleError(err) } data, err = ioutil.ReadAll(body) body.Close() fmt.Println("the range of data is: ", string(data)) // Case 3: Download the object to byte array. This is for small object. buf := new(bytes.Buffer) body, err = bucket.GetObject(objectKey) if err != nil { HandleError(err) } io.Copy(buf, body) body.Close() // Case 4: Download the object to local file. The file handle needs to be specified fd, err := os.OpenFile("mynewfile-1.jpg", os.O_WRONLY|os.O_CREATE, 0660) if err != nil { HandleError(err) } defer fd.Close() body, err = bucket.GetObject(objectKey) if err != nil { HandleError(err) } io.Copy(fd, body) body.Close() // Case 5: Download the object to local file with file name specified err = bucket.GetObjectToFile(objectKey, "mynewfile-2.jpg") if err != nil { HandleError(err) } // Case 6: Get the object with contraints. When contraints are met, download the file. Otherwise return precondition error // last modified time constraint is met, download the file body, err = bucket.GetObject(objectKey, oss.IfModifiedSince(pastDate)) if err != nil { HandleError(err) } body.Close() // Last modified time contraint is not met, do not download the file _, err = bucket.GetObject(objectKey, oss.IfUnmodifiedSince(pastDate)) if err == nil { HandleError(fmt.Errorf("This result is not the expected result")) } // body.Close() meta, err := bucket.GetObjectDetailedMeta(objectKey) if err != nil { HandleError(err) } etag := meta.Get(oss.HTTPHeaderEtag) // Check the content, etag contraint is met, download the file body, err = bucket.GetObject(objectKey, oss.IfMatch(etag)) if err != nil { HandleError(err) } body.Close() // Check the content, etag contraint is not met, do not download the file _, err = bucket.GetObject(objectKey, oss.IfNoneMatch(etag)) if err == nil { HandleError(fmt.Errorf("This result is not the expected result")) } // body.Close() // Case 7: Big file's multipart download, concurrent and resumable download is supported. // multipart download with part size 100KB. By default single coroutine is used and no checkpoint err = bucket.DownloadFile(objectKey, "mynewfile-3.jpg", 100*1024) if err != nil { HandleError(err) } // Part size is 100K and 3 coroutines are used err = bucket.DownloadFile(objectKey, "mynewfile-3.jpg", 100*1024, oss.Routines(3)) if err != nil { HandleError(err) } // Part size is 100K and 3 coroutines with checkpoint err = bucket.DownloadFile(objectKey, "mynewfile-3.jpg", 100*1024, oss.Routines(3), oss.Checkpoint(true, "")) if err != nil { HandleError(err) } // Specify the checkpoint file path to record which parts have been downloaded. // This file path can be specified by the 2nd parameter of Checkpoint, it will be the download directory if the file path is empty. err = bucket.DownloadFile(objectKey, "mynewfile-3.jpg", 100*1024, oss.Checkpoint(true, "mynewfile.cp")) if err != nil { HandleError(err) } // Case 8: Use GZIP encoding for downloading the file, GetObject/GetObjectToFile are the same. err = bucket.PutObjectFromFile(objectKey, htmlLocalFile) if err != nil { HandleError(err) } err = bucket.GetObjectToFile(objectKey, "myhtml.gzip", oss.AcceptEncoding("gzip")) if err != nil { HandleError(err) } // Delete the object and bucket err = DeleteTestBucketAndObject(bucketName) if err != nil { HandleError(err) } fmt.Println("GetObjectSample completed") }
GetObjectSample shows the streaming download, range download and resumable download.
GetObjectSample
go
aliyun/aliyun-oss-go-sdk
sample/get_object.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/get_object.go
MIT
func ObjectMetaSample() { // Create bucket bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } // Delete object err = bucket.PutObject(objectKey, strings.NewReader("YoursObjectValue")) if err != nil { HandleError(err) } // Case 0: Set bucket meta. one or more properties could be set // Note: Meta is case insensitive options := []oss.Option{ oss.Expires(futureDate), oss.Meta("myprop", "mypropval")} err = bucket.SetObjectMeta(objectKey, options...) if err != nil { HandleError(err) } // Case 1: Get the object metadata. Only return basic meta information includes ETag, size and last modified. props, err := bucket.GetObjectMeta(objectKey) if err != nil { HandleError(err) } fmt.Println("Object Meta:", props) // Case 2: Get all the detailed object meta including custom meta props, err = bucket.GetObjectDetailedMeta(objectKey) if err != nil { HandleError(err) } fmt.Println("Expires:", props.Get("Expires")) // Case 3: Get the object's all metadata with contraints. When constraints are met, return the metadata. props, err = bucket.GetObjectDetailedMeta(objectKey, oss.IfUnmodifiedSince(futureDate)) if err != nil { HandleError(err) } fmt.Println("MyProp:", props.Get("X-Oss-Meta-Myprop")) _, err = bucket.GetObjectDetailedMeta(objectKey, oss.IfModifiedSince(futureDate)) if err == nil { HandleError(err) } goar, err := bucket.GetObjectACL(objectKey) if err != nil { HandleError(err) } fmt.Println("Object ACL:", goar.ACL) // Delete object and bucket err = DeleteTestBucketAndObject(bucketName) if err != nil { HandleError(err) } fmt.Println("ObjectMetaSample completed") }
ObjectMetaSample shows how to get and set the object metadata
ObjectMetaSample
go
aliyun/aliyun-oss-go-sdk
sample/object_meta.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/object_meta.go
MIT
func PutObjectSample() { // Create bucket bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } var val = "花间一壶酒,独酌无相亲。 举杯邀明月,对影成三人。" // Case 1: Upload an object from a string err = bucket.PutObject(objectKey, strings.NewReader(val)) if err != nil { HandleError(err) } // Case 2: Upload an object whose value is a byte[] err = bucket.PutObject(objectKey, bytes.NewReader([]byte(val))) if err != nil { HandleError(err) } // Case 3: Upload the local file with file handle, user should open the file at first. fd, err := os.Open(localFile) if err != nil { HandleError(err) } defer fd.Close() err = bucket.PutObject(objectKey, fd) if err != nil { HandleError(err) } // Case 4: Upload an object with local file name, user need not open the file. err = bucket.PutObjectFromFile(objectKey, localFile) if err != nil { HandleError(err) } // Case 5: Upload an object with specified properties, PutObject/PutObjectFromFile/UploadFile also support this feature. options := []oss.Option{ oss.Expires(futureDate), oss.ObjectACL(oss.ACLPublicRead), oss.Meta("myprop", "mypropval"), } err = bucket.PutObject(objectKey, strings.NewReader(val), options...) if err != nil { HandleError(err) } props, err := bucket.GetObjectDetailedMeta(objectKey) if err != nil { HandleError(err) } fmt.Println("Object Meta:", props) // Case 6: Upload an object with sever side encrpytion kms and kms id specified err = bucket.PutObject(objectKey, strings.NewReader(val), oss.ServerSideEncryption("KMS"), oss.ServerSideEncryptionKeyID(kmsID)) if err != nil { HandleError(err) } // Case 7: Upload an object with callback callbackMap := map[string]string{} callbackMap["callbackUrl"] = "http://oss-demo.aliyuncs.com:23450" callbackMap["callbackHost"] = "oss-cn-hangzhou.aliyuncs.com" callbackMap["callbackBody"] = "filename=${object}&size=${size}&mimeType=${mimeType}" callbackMap["callbackBodyType"] = "application/x-www-form-urlencoded" callbackBuffer := bytes.NewBuffer([]byte{}) callbackEncoder := json.NewEncoder(callbackBuffer) //do not encode '&' to "\u0026" callbackEncoder.SetEscapeHTML(false) err = callbackEncoder.Encode(callbackMap) if err != nil { HandleError(err) } callbackVal := base64.StdEncoding.EncodeToString(callbackBuffer.Bytes()) err = bucket.PutObject(objectKey, strings.NewReader(val), oss.Callback(callbackVal)) if err != nil { HandleError(err) } // Case 7-2: Upload an object with callback and get callback body callbackMap = map[string]string{} callbackMap["callbackUrl"] = "http://oss-demo.aliyuncs.com:23450" callbackMap["callbackHost"] = "oss-cn-hangzhou.aliyuncs.com" callbackMap["callbackBody"] = "filename=${object}&size=${size}&mimeType=${mimeType}" callbackMap["callbackBodyType"] = "application/x-www-form-urlencoded" callbackBuffer = bytes.NewBuffer([]byte{}) callbackEncoder = json.NewEncoder(callbackBuffer) //do not encode '&' to "\u0026" callbackEncoder.SetEscapeHTML(false) err = callbackEncoder.Encode(callbackMap) if err != nil { HandleError(err) } callbackVal = base64.StdEncoding.EncodeToString(callbackBuffer.Bytes()) var body []byte err = bucket.PutObject(objectKey, strings.NewReader(val), oss.Callback(callbackVal), oss.CallbackResult(&body)) if err != nil { e, ok := err.(oss.UnexpectedStatusCodeError) if !(ok && e.Got() == 203) { HandleError(err) } } fmt.Printf("callback body:%s\n", body) // Case 8: Big file's multipart upload. It supports concurrent upload with resumable upload. // multipart upload with 100K as part size. By default 1 coroutine is used and no checkpoint is used. err = bucket.UploadFile(objectKey, localFile, 100*1024) if err != nil { HandleError(err) } // Part size is 100K and 3 coroutines are used err = bucket.UploadFile(objectKey, localFile, 100*1024, oss.Routines(3)) if err != nil { HandleError(err) } // Part size is 100K and 3 coroutines with checkpoint err = bucket.UploadFile(objectKey, localFile, 100*1024, oss.Routines(3), oss.Checkpoint(true, "")) if err != nil { HandleError(err) } // Specify the local file path for checkpoint files. // the 2nd parameter of Checkpoint can specify the file path, when the file path is empty, it will upload the directory. err = bucket.UploadFile(objectKey, localFile, 100*1024, oss.Checkpoint(true, localFile+".cp")) if err != nil { HandleError(err) } // Case 8-1:Big file's multipart upload. Set callback and get callback body //The local file is partitioned, and the number of partitions is specified as 3. chunks, err := oss.SplitFileByPartNum(localFile, 3) fd, err = os.Open(localFile) defer fd.Close() //Specify the expiration time. expires := time.Date(2049, time.January, 10, 23, 0, 0, 0, time.UTC) // If you need to set the request header when initializing fragmentation, please refer to the following example code. options = []oss.Option{ oss.MetadataDirective(oss.MetaReplace), oss.Expires(expires), // Specifies the web page caching behavior when the object is downloaded. // oss.CacheControl("no-cache"), // Specifies the name of the object when it is downloaded. // oss.ContentDisposition("attachment;filename=FileName.txt"), // Specifies the content encoding format of the object. // oss.ContentEncoding("gzip"), // Specifies to encode the returned key. Currently, URL encoding is supported. // oss.EncodingType("url"), // Specifies the storage type of the object. // oss.ObjectStorageClass(oss.StorageStandard), } // Step 1: initialize a fragment upload event and specify the storage type as standard storage imur, err := bucket.InitiateMultipartUpload(objectKey, options...) // Step 2: upload fragments. var parts []oss.UploadPart for _, chunk := range chunks { fd.Seek(chunk.Offset, os.SEEK_SET) // Call the uploadpart method to upload each fragment. part, err := bucket.UploadPart(imur, fd, chunk.Size, chunk.Number) if err != nil { HandleError(err) } parts = append(parts, part) } // Construct callback map callbackMap = map[string]string{} callbackMap["callbackUrl"] = "www.aliyuncs.com" callbackMap["callbackBody"] = "filename=demo.go&name=golang" callbackMap["callbackBodyType"] = "application/x-www-form-urlencoded" callbackBuffer = bytes.NewBuffer([]byte{}) callbackEncoder = json.NewEncoder(callbackBuffer) //do not encode '&' to "\u0026" callbackEncoder.SetEscapeHTML(false) err = callbackEncoder.Encode(callbackMap) if err != nil { HandleError(err) } callbackVal = base64.StdEncoding.EncodeToString(callbackBuffer.Bytes()) var pbody []byte // Step 3: complete fragment uploading _, err = bucket.CompleteMultipartUpload(imur, parts, oss.Callback(callbackVal), oss.CallbackResult(&pbody)) if err != nil { HandleError(err) } fmt.Printf("callback body:%s/n", pbody) // Case 9: Set the storage classes.OSS provides three storage classes: Standard, Infrequent Access, and Archive. // Supported APIs: PutObject, CopyObject, UploadFile, AppendObject... err = bucket.PutObject(objectKey, strings.NewReader(val), oss.ObjectStorageClass("IA")) if err != nil { HandleError(err) } // Upload a local file, and set the object's storage-class to 'Archive'. err = bucket.UploadFile(objectKey, localFile, 100*1024, oss.ObjectStorageClass("Archive")) if err != nil { HandleError(err) } // Delete object and bucket err = DeleteTestBucketAndObject(bucketName) if err != nil { HandleError(err) } fmt.Println("PutObjectSample completed") }
PutObjectSample illustrates two methods for uploading a file: simple upload and multipart upload.
PutObjectSample
go
aliyun/aliyun-oss-go-sdk
sample/put_object.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/put_object.go
MIT
func BucketInfoSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Get bucket info res, err := client.GetBucketInfo(bucketName) if err != nil { HandleError(err) } fmt.Println("Bucket Info Name: ", res.BucketInfo.Name) fmt.Println("Bucket Info AccessMonitor: ", res.BucketInfo.AccessMonitor) fmt.Println("Bucket Info Location: ", res.BucketInfo.Location) fmt.Println("Bucket Info CreationDate: ", res.BucketInfo.CreationDate) fmt.Println("Bucket Info ACL: ", res.BucketInfo.ACL) fmt.Println("Bucket Info Owner Id: ", res.BucketInfo.Owner.ID) fmt.Println("Bucket Info Owner DisplayName: ", res.BucketInfo.Owner.DisplayName) fmt.Println("Bucket Info StorageClass: ", res.BucketInfo.StorageClass) fmt.Println("Bucket Info RedundancyType: ", res.BucketInfo.RedundancyType) fmt.Println("Bucket Info ExtranetEndpoint: ", res.BucketInfo.ExtranetEndpoint) fmt.Println("Bucket Info IntranetEndpoint: ", res.BucketInfo.IntranetEndpoint) fmt.Println("Bucket Info CrossRegionReplication: ", res.BucketInfo.CrossRegionReplication) if res.BucketInfo.Versioning != "" { fmt.Println("Bucket Info Versioning: ", res.BucketInfo.Versioning) } if res.BucketInfo.SseRule.KMSDataEncryption != "" { fmt.Println("Bucket Info SseRule KMSDataEncryption: ", res.BucketInfo.SseRule.KMSDataEncryption) } if res.BucketInfo.SseRule.KMSMasterKeyID != "" { fmt.Println("Bucket Info SseRule KMSMasterKeyID: ", res.BucketInfo.SseRule.KMSMasterKeyID) } if res.BucketInfo.SseRule.SSEAlgorithm != "" { fmt.Println("Bucket Info SseRule SSEAlgorithm: ", res.BucketInfo.SseRule.SSEAlgorithm) } }
BucketInfoSample shows how to get the bucket info.
BucketInfoSample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_info.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_info.go
MIT
func ArchiveSample() { // Create archive bucket client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } err = client.CreateBucket(bucketName, oss.StorageClass(oss.StorageArchive)) if err != nil { HandleError(err) } archiveBucket, err := client.Bucket(bucketName) if err != nil { HandleError(err) } // Put archive object var val = "花间一壶酒,独酌无相亲。 举杯邀明月,对影成三人。" err = archiveBucket.PutObject(objectKey, strings.NewReader(val)) if err != nil { HandleError(err) } // Check whether the object is archive class meta, err := archiveBucket.GetObjectDetailedMeta(objectKey) if err != nil { HandleError(err) } if meta.Get("X-Oss-Storage-Class") == string(oss.StorageArchive) { // Restore object err = archiveBucket.RestoreObject(objectKey) if err != nil { HandleError(err) } // Wait for restore completed meta, err = archiveBucket.GetObjectDetailedMeta(objectKey) for meta.Get("X-Oss-Restore") == "ongoing-request=\"true\"" { fmt.Println("x-oss-restore:" + meta.Get("X-Oss-Restore")) time.Sleep(1000 * time.Second) meta, err = archiveBucket.GetObjectDetailedMeta(objectKey) } } // Get restored object err = archiveBucket.GetObjectToFile(objectKey, localFile) if err != nil { HandleError(err) } // Restore repeatedly err = archiveBucket.RestoreObject(objectKey) // Delete object and bucket err = DeleteTestBucketAndObject(bucketName) if err != nil { HandleError(err) } fmt.Println("ArchiveSample completed") }
ArchiveSample archives sample
ArchiveSample
go
aliyun/aliyun-oss-go-sdk
sample/archive.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/archive.go
MIT
func BucketrRequestPaymentSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Create the bucket with default parameters err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } reqPayConf := oss.RequestPaymentConfiguration{ Payer: string(oss.Requester), } // Case 1: Set bucket request payment. err = client.SetBucketRequestPayment(bucketName, reqPayConf) if err != nil { HandleError(err) } // Get bucket request payment configuration ret, err := client.GetBucketRequestPayment(bucketName) if err != nil { HandleError(err) } fmt.Println("Bucket request payer:", ret.Payer) if credentialUID == "" { fmt.Println("Please enter a credential User ID, if you want to test credential user.") clearData(client, bucketName) return } // Credential other User policyInfo := ` { "Version":"1", "Statement":[ { "Action":[ "oss:*" ], "Effect":"Allow", "Principal":["` + credentialUID + `"], "Resource":["acs:oss:*:*:` + bucketName + `", "acs:oss:*:*:` + bucketName + `/*"] } ] }` err = client.SetBucketPolicy(bucketName, policyInfo) if err != nil { HandleError(err) } // New a Credential client creClient, err := oss.New(endpoint, credentialAccessID, credentialAccessKey) if err != nil { HandleError(err) } // Get credential bucket creBucket, err := creClient.Bucket(bucketName) if err != nil { HandleError(err) } // Put object by credential User key := "testCredentialObject" objectValue := "this is a test string." // Put object err = creBucket.PutObject(key, strings.NewReader(objectValue), oss.RequestPayer(oss.Requester)) if err != nil { HandleError(err) } // Get object body, err := creBucket.GetObject(key, oss.RequestPayer(oss.Requester)) if err != nil { HandleError(err) } defer body.Close() data, err := ioutil.ReadAll(body) if err != nil { HandleError(err) } fmt.Println(string(data)) // Delete object err = creBucket.DeleteObject(key, oss.RequestPayer(oss.Requester)) if err != nil { HandleError(err) } clearData(client, bucketName) }
BucketrRequestPaymentSample shows how to set, get the bucket request payment.
BucketrRequestPaymentSample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_requestpayment.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_requestpayment.go
MIT
func BucketCnameSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Create a bucket with default parameters err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } // case1:Create the bucket cname token cname := "www.example.com" cbResult, err := client.CreateBucketCnameToken(bucketName, cname) if err != nil { HandleError(err) } fmt.Printf("Cname: %s\n", cbResult.Cname) fmt.Printf("Token: %s\n", cbResult.Token) fmt.Printf("ExpireTime: %s\n", cbResult.ExpireTime) // case2: Get the bucket cname token ctResult, err := client.GetBucketCnameToken(bucketName, cname) if err != nil { HandleError(err) } fmt.Printf("Cname: %s\n", ctResult.Cname) fmt.Printf("Token: %s\n", ctResult.Token) fmt.Printf("ExpireTime: %s\n", ctResult.ExpireTime) // case3: Add the bucket cname // case 3-1: Add bucket cname err = client.PutBucketCname(bucketName, cname) if err != nil { fmt.Println("Error:", err) os.Exit(-1) } fmt.Println("Put Bucket Cname Success!") // case 3-2: Bind certificate var bindCnameConfig oss.PutBucketCname var bindCertificateConfig oss.CertificateConfiguration bindCnameConfig.Cname = "www.example.com" bindCertificate := "-----BEGIN CERTIFICATE-----MIIGeDCCBOCgAwIBAgIRAPj4FWpW5XN6kwgU7*******-----END CERTIFICATE-----" privateKey := "-----BEGIN CERTIFICATE-----MIIFBzCCA++gT2H2hT6Wb3nwxjpLIfXmSVcV*****-----END CERTIFICATE-----" bindCertificateConfig.CertId = "92******-cn-hangzhou" bindCertificateConfig.Certificate = bindCertificate bindCertificateConfig.PrivateKey = privateKey bindCertificateConfig.Force = true bindCnameConfig.CertificateConfiguration = &bindCertificateConfig err = client.PutBucketCnameWithCertificate(bucketName, bindCnameConfig) if err != nil { fmt.Println("Error:", err) os.Exit(-1) } fmt.Println("Bind Certificate Success!") // case 3-3: Unbind certificate var putCnameConfig oss.PutBucketCname var CertificateConfig oss.CertificateConfiguration putCnameConfig.Cname = "www.example.com" CertificateConfig.DeleteCertificate = true putCnameConfig.CertificateConfiguration = &CertificateConfig err = client.PutBucketCnameWithCertificate(bucketName, putCnameConfig) if err != nil { fmt.Println("Error:", err) os.Exit(-1) } fmt.Println("Unbind Certificate Success!") // case4: List the bucket cname cnResult, err := client.ListBucketCname(bucketName) if err != nil { fmt.Println("Error:", err) os.Exit(-1) } var certificate oss.Certificate fmt.Printf("Bucket:%s\n", cnResult.Bucket) fmt.Printf("Owner:%s\n", cnResult.Owner) if len(cnResult.Cname) > 0 { for _, cnameInfo := range cnResult.Cname { fmt.Printf("Domain:%s\n", cnameInfo.Domain) fmt.Printf("LastModified:%s\n", cnameInfo.LastModified) fmt.Printf("Status:%s\n", cnameInfo.Status) if cnameInfo.Certificate != certificate { fmt.Printf("Type:%s\n", cnameInfo.Certificate.Type) fmt.Printf("CertId:%s\n", cnameInfo.Certificate.CertId) fmt.Printf("Status:%s\n", cnameInfo.Certificate.Status) fmt.Printf("CreationDate:%s\n", cnameInfo.Certificate.CreationDate) fmt.Printf("Fingerprint:%s\n", cnameInfo.Certificate.Fingerprint) fmt.Printf("ValidStartDate:%s\n", cnameInfo.Certificate.ValidStartDate) fmt.Printf("ValidEndDate:%s\n", cnameInfo.Certificate.ValidEndDate) } } } // case5: Delete the bucket cname err = client.DeleteBucketCname(bucketName, cname) if err != nil { fmt.Println("Error:", err) os.Exit(-1) } fmt.Println("Delete Bucket Cname Success!") fmt.Println("BucketCnameSample completed") }
BucketCnameSample shows how to get,put,list or delete the bucket cname.
BucketCnameSample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_cname.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_cname.go
MIT
func BucketQoSInfoSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Create the bucket with default parameters err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } // Initial QoS Configuration five := 5 four := 4 totalQps := 200 qosConf := oss.BucketQoSConfiguration{ TotalUploadBandwidth: &five, IntranetUploadBandwidth: &four, ExtranetUploadBandwidth: &four, TotalDownloadBandwidth: &four, IntranetDownloadBandwidth: &four, ExtranetDownloadBandwidth: &four, TotalQPS: &totalQps, IntranetQPS: &totalQps, ExtranetQPS: &totalQps, } // Set Qos Info err = client.SetBucketQoSInfo(bucketName, qosConf) if err != nil { HandleError(err) } // Get Qos Info ret, err := client.GetBucketQosInfo(bucketName) if err != nil { HandleError(err) } fmt.Printf("Bucket QoSInfo\n TotalUploadBandwidth: %d\n IntranetUploadBandwidth: %d\n ExtranetUploadBandwidth: %d\n", *ret.TotalUploadBandwidth, *ret.IntranetUploadBandwidth, *ret.ExtranetUploadBandwidth) // Delete QosInfo err = client.DeleteBucketQosInfo(bucketName) if err != nil { HandleError(err) } // Delete bucket err = client.DeleteBucket(bucketName) if err != nil { HandleError(err) } fmt.Println("BucketPolicySample completed") }
BucketQoSInfoSample shows how to set, get and delete the bucket QoS configuration
BucketQoSInfoSample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_qosInfo.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_qosInfo.go
MIT
func AppendObjectSample() { // Create bucket bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } err = bucket.DeleteObject(objectKey) var str = "弃我去者,昨日之日不可留。 乱我心者,今日之日多烦忧!" var nextPos int64 // Case 1: Append a string to the object // The first append position is 0 and the return value is for the next append's position. nextPos, err = bucket.AppendObject(objectKey, strings.NewReader(str), nextPos) if err != nil { HandleError(err) } // Second append nextPos, err = bucket.AppendObject(objectKey, strings.NewReader(str), nextPos) if err != nil { HandleError(err) } // Download body, err := bucket.GetObject(objectKey) if err != nil { HandleError(err) } data, err := ioutil.ReadAll(body) body.Close() if err != nil { HandleError(err) } fmt.Println(objectKey, ":", string(data)) err = bucket.DeleteObject(objectKey) if err != nil { HandleError(err) } // Case 2: Append byte array to the object nextPos = 0 // The first append position is 0, and the return value is for the next append's position. nextPos, err = bucket.AppendObject(objectKey, bytes.NewReader([]byte(str)), nextPos) if err != nil { HandleError(err) } // Second append nextPos, err = bucket.AppendObject(objectKey, bytes.NewReader([]byte(str)), nextPos) if err != nil { HandleError(err) } // Download body, err = bucket.GetObject(objectKey) if err != nil { HandleError(err) } data, err = ioutil.ReadAll(body) body.Close() if err != nil { HandleError(err) } fmt.Println(objectKey, ":", string(data)) err = bucket.DeleteObject(objectKey) if err != nil { HandleError(err) } // Case 3: Append a local file to the object fd, err := os.Open(localFile) if err != nil { HandleError(err) } defer fd.Close() nextPos = 0 nextPos, err = bucket.AppendObject(objectKey, fd, nextPos) if err != nil { HandleError(err) } // Case 4: Get the next append position by GetObjectDetailedMeta props, err := bucket.GetObjectDetailedMeta(objectKey) nextPos, err = strconv.ParseInt(props.Get(oss.HTTPHeaderOssNextAppendPosition), 10, 64) if err != nil { HandleError(err) } nextPos, err = bucket.AppendObject(objectKey, strings.NewReader(str), nextPos) if err != nil { HandleError(err) } err = bucket.DeleteObject(objectKey) if err != nil { HandleError(err) } // Case 5: Specify the object properties for the first append, including the "x-oss-meta"'s custom metadata. options := []oss.Option{ oss.Expires(futureDate), oss.ObjectACL(oss.ACLPublicRead), oss.Meta("myprop", "mypropval")} nextPos = 0 fd.Seek(0, os.SEEK_SET) nextPos, err = bucket.AppendObject(objectKey, strings.NewReader(str), nextPos, options...) if err != nil { HandleError(err) } // Second append fd.Seek(0, os.SEEK_SET) nextPos, err = bucket.AppendObject(objectKey, strings.NewReader(str), nextPos) if err != nil { HandleError(err) } props, err = bucket.GetObjectDetailedMeta(objectKey) if err != nil { HandleError(err) } fmt.Println("myprop:", props.Get("x-oss-meta-myprop")) goar, err := bucket.GetObjectACL(objectKey) if err != nil { HandleError(err) } fmt.Println("Object ACL:", goar.ACL) // Case 6: Set the storage classes.OSS provides three storage classes: Standard, Infrequent Access, and Archive. // Upload a strings, and you can append some strings in the behind of object. but the object is 'Archive' storange class. // An object created with the AppendObject operation is an appendable object. set the object storange-class to 'Archive'. nextPos, err = bucket.AppendObject(appendObjectKey, strings.NewReader("昨夜雨疏风骤,浓睡不消残酒。试问卷帘人,"), nextPos, oss.ObjectStorageClass("Archive")) if err != nil { HandleError(err) } // Delete the object and bucket err = DeleteTestBucketAndObject(bucketName) if err != nil { HandleError(err) } fmt.Println("AppendObjectSample completed") }
AppendObjectSample shows the append file's usage
AppendObjectSample
go
aliyun/aliyun-oss-go-sdk
sample/append_object.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/append_object.go
MIT
func BucketRefererSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Create the bucket with default parameters err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } var referers = []string{ "http://www.aliyun.com", "http://www.???.aliyuncs.com", "http://www.*.com", } // Case 1: Set referers. The referers are with wildcards ? and * which could represent one and zero to multiple characters err = client.SetBucketReferer(bucketName, referers, false) if err != nil { HandleError(err) } // Case 2: Clear referers referers = []string{} err = client.SetBucketReferer(bucketName, referers, true) if err != nil { HandleError(err) } // Case 3: Create Refer With SetBucketRefererV2 var setBucketReferer oss.RefererXML setBucketReferer.RefererList = []string{ "http://www.aliyun.com", "https://www.aliyun.com", "http://www.???.aliyuncs.com", "http://www.*.com", } referer1 := "http://www.refuse.com" referer2 := "https://*.hack.com" referer3 := "http://ban.*.com" referer4 := "https://www.?.deny.com" setBucketReferer.RefererBlacklist = &oss.RefererBlacklist{ []string{ referer1, referer2, referer3, referer4, }, } setBucketReferer.AllowEmptyReferer = false boolTrue := true setBucketReferer.AllowTruncateQueryString = &boolTrue err = client.SetBucketRefererV2(bucketName, setBucketReferer) if err != nil { HandleError(err) } fmt.Println("Set Bucket Referer Success") // Get bucket referer configuration refRes, err := client.GetBucketReferer(bucketName) if err != nil { HandleError(err) } fmt.Println("Allow Empty Referer: ", refRes.AllowEmptyReferer) if refRes.AllowTruncateQueryString != nil { fmt.Println("Allow Truncate QueryString: ", *refRes.AllowTruncateQueryString) } if len(refRes.RefererList) > 0 { for _, referer := range refRes.RefererList { fmt.Println("Referer List: ", referer) } } if refRes.RefererBlacklist != nil { for _, refererBlack := range refRes.RefererBlacklist.Referer { fmt.Println("Referer Black List: ", refererBlack) } } fmt.Println("Get Bucket Referer Success") // Delete bucket referer // Case 1:Delete Refer With SetBucketReferer err = client.SetBucketReferer(bucketName, []string{}, true) if err != nil { HandleError(err) } // Case 2:Delete Refer With SetBucketRefererV2 var delBucketReferer oss.RefererXML delBucketReferer.RefererList = []string{} delBucketReferer.AllowEmptyReferer = true err = client.SetBucketRefererV2(bucketName, delBucketReferer) if err != nil { HandleError(err) } fmt.Println("Delete Bucket Referer Success") // Delete bucket err = client.DeleteBucket(bucketName) if err != nil { HandleError(err) } fmt.Println("BucketRefererSample completed") }
BucketRefererSample shows how to set, get and delete the bucket referer.
BucketRefererSample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_referer.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_referer.go
MIT
func BucketLoggingSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Create the bucket with default parameters err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } // Create target bucket to store the logging files. var targetBucketName = "target-bucket" err = client.CreateBucket(targetBucketName) if err != nil { HandleError(err) } // Case 1: Set the logging for the object prefixed with "prefix-1" and save their access logs to the target bucket err = client.SetBucketLogging(bucketName, targetBucketName, "prefix-1", true) if err != nil { HandleError(err) } // Case 2: Set the logging for the object prefixed with "prefix-2" and save their logs to the same bucket // Note: the rule will overwrite other rules if they have same bucket and prefix err = client.SetBucketLogging(bucketName, bucketName, "prefix-2", true) if err != nil { HandleError(err) } // Delete the bucket's logging configuration err = client.DeleteBucketLogging(bucketName) if err != nil { HandleError(err) } // Case 3: Set the logging without enabling it err = client.SetBucketLogging(bucketName, targetBucketName, "prefix-3", false) if err != nil { HandleError(err) } // Get the bucket's logging configuration gbl, err := client.GetBucketLogging(bucketName) if err != nil { HandleError(err) } fmt.Println("Bucket Logging:", gbl.LoggingEnabled) err = client.SetBucketLogging(bucketName, bucketName, "prefix2", true) if err != nil { HandleError(err) } // Get the bucket's logging configuration gbl, err = client.GetBucketLogging(bucketName) if err != nil { HandleError(err) } fmt.Println("Bucket Logging:", gbl.LoggingEnabled) // Delete the bucket's logging configuration err = client.DeleteBucketLogging(bucketName) if err != nil { HandleError(err) } // Delete bucket err = client.DeleteBucket(bucketName) if err != nil { HandleError(err) } err = client.DeleteBucket(targetBucketName) if err != nil { HandleError(err) } fmt.Println("BucketLoggingSample completed") }
BucketLoggingSample shows how to set, get and delete the bucket logging configuration
BucketLoggingSample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_logging.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_logging.go
MIT
func BucketTaggingSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Set bucket tagging tag1 := oss.Tag{ Key: "key1", Value: "value1", } tag2 := oss.Tag{ Key: "key2", Value: "value2", } tag3 := oss.Tag{ Key: "key3", Value: "value2", } tagging := oss.Tagging{ Tags: []oss.Tag{tag1, tag2, tag3}, } err = client.SetBucketTagging(bucketName, tagging) if err != nil { HandleError(err) } //Get bucket tagging ret, err := client.GetBucketTagging(bucketName) if err != nil { HandleError(err) } fmt.Println("Tag length: ", len(ret.Tags)) for _, tag := range ret.Tags { fmt.Printf("Tag Key: %s\n", tag.Key) fmt.Printf("Tag Value: %s\n", tag.Value) } //Delete one tagging err = client.DeleteBucketTagging(bucketName, oss.AddParam("tagging", "key1")) if err != nil { HandleError(err) } // Delete many tagging err = client.DeleteBucketTagging(bucketName, oss.AddParam("tagging", "key1,key2")) if err != nil { HandleError(err) } // Delete all tagging err = client.DeleteBucketTagging(bucketName) if err != nil { HandleError(err) } fmt.Println("BucketTaggingSample completed") }
BucketTaggingSample shows how to set,get and the bucket stat.
BucketTaggingSample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_tagging.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_tagging.go
MIT
func HandleError(err error) { fmt.Println("occurred error:", err) os.Exit(-1) }
HandleError is the error handling method in the sample code
HandleError
go
aliyun/aliyun-oss-go-sdk
sample/comm.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/comm.go
MIT
func GetTestBucket(bucketName string) (*oss.Bucket, error) { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { return nil, err } // Create bucket err = client.CreateBucket(bucketName) if err != nil { return nil, err } // Get bucket bucket, err := client.Bucket(bucketName) if err != nil { return nil, err } return bucket, nil }
GetTestBucket creates the test bucket
GetTestBucket
go
aliyun/aliyun-oss-go-sdk
sample/comm.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/comm.go
MIT
func DeleteTestBucketAndLiveChannel(bucketName string) error { // New Client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { return err } // Get Bucket bucket, err := client.Bucket(bucketName) if err != nil { return err } marker := "" for { result, err := bucket.ListLiveChannel(oss.Marker(marker)) if err != nil { HandleError(err) } for _, channel := range result.LiveChannel { err := bucket.DeleteLiveChannel(channel.Name) if err != nil { HandleError(err) } } if result.IsTruncated { marker = result.NextMarker } else { break } } // Delete Bucket err = client.DeleteBucket(bucketName) if err != nil { return err } return nil }
DeleteTestBucketAndLiveChannel 删除sample的channelname和bucket,该函数为了简化sample,让sample代码更明了
DeleteTestBucketAndLiveChannel
go
aliyun/aliyun-oss-go-sdk
sample/comm.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/comm.go
MIT
func DeleteTestBucketAndObject(bucketName string) error { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { return err } // Get bucket bucket, err := client.Bucket(bucketName) if err != nil { return err } // Delete part keyMarker := oss.KeyMarker("") uploadIDMarker := oss.UploadIDMarker("") for { lmur, err := bucket.ListMultipartUploads(keyMarker, uploadIDMarker) if err != nil { return err } for _, upload := range lmur.Uploads { var imur = oss.InitiateMultipartUploadResult{Bucket: bucket.BucketName, Key: upload.Key, UploadID: upload.UploadID} err = bucket.AbortMultipartUpload(imur) if err != nil { return err } } keyMarker = oss.KeyMarker(lmur.NextKeyMarker) uploadIDMarker = oss.UploadIDMarker(lmur.NextUploadIDMarker) if !lmur.IsTruncated { break } } // Delete objects marker := oss.Marker("") for { lor, err := bucket.ListObjects(marker) if err != nil { return err } for _, object := range lor.Objects { err = bucket.DeleteObject(object.Key) if err != nil { return err } } marker = oss.Marker(lor.NextMarker) if !lor.IsTruncated { break } } // Delete bucket err = client.DeleteBucket(bucketName) if err != nil { return err } return nil }
DeleteTestBucketAndObject deletes the test bucket and its objects
DeleteTestBucketAndObject
go
aliyun/aliyun-oss-go-sdk
sample/comm.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/comm.go
MIT
func CreateObjects(bucket *oss.Bucket, objects []Object) error { for _, object := range objects { err := bucket.PutObject(object.Key, strings.NewReader(object.Value)) if err != nil { return err } } return nil }
CreateObjects creates some objects
CreateObjects
go
aliyun/aliyun-oss-go-sdk
sample/comm.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/comm.go
MIT
func DeleteObjects(bucket *oss.Bucket, objects []Object) error { for _, object := range objects { err := bucket.DeleteObject(object.Key) if err != nil { return err } } return nil }
DeleteObjects deletes some objects.
DeleteObjects
go
aliyun/aliyun-oss-go-sdk
sample/comm.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/comm.go
MIT
func BucketLifecycleSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Create the bucket with default parameters err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } // Case 1: Set the lifecycle. The rule ID is rule1 and the applied objects' prefix is one and the last modified Date is before 2015/11/11 expiration := oss.LifecycleExpiration{ CreatedBeforeDate: "2015-11-11T00:00:00.000Z", } rule1 := oss.LifecycleRule{ ID: "rule1", Prefix: "one", Status: "Enabled", Expiration: &expiration, } var rules = []oss.LifecycleRule{rule1} err = client.SetBucketLifecycle(bucketName, rules) if err != nil { HandleError(err) } // Case 2: Get the bucket's lifecycle lc, err := client.GetBucketLifecycle(bucketName) if err != nil { HandleError(err) } fmt.Printf("Bucket Lifecycle:%v, %v\n", lc.Rules, *lc.Rules[0].Expiration) // Case 3: Set the lifecycle, The rule ID is rule2 and the applied objects' prefix is two. The object start with the prefix will be transited to IA storage Type 3 days latter, and to archive storage type 30 days latter transitionIA := oss.LifecycleTransition{ Days: 3, StorageClass: oss.StorageIA, } transitionArch := oss.LifecycleTransition{ Days: 30, StorageClass: oss.StorageArchive, } rule2 := oss.LifecycleRule{ ID: "rule2", Prefix: "two", Status: "Enabled", Transitions: []oss.LifecycleTransition{transitionIA, transitionArch}, } rules = []oss.LifecycleRule{rule2} err = client.SetBucketLifecycle(bucketName, rules) if err != nil { HandleError(err) } // Case 4: Set the lifecycle, The rule ID is rule3 and the applied objects' prefix is three. The object start with the prefix will be transited to IA storage Type 3 days latter, and to archive storage type 30 days latter, the uncompleted multipart upload will be abort 3 days latter. abortMPU := oss.LifecycleAbortMultipartUpload{ Days: 3, } rule3 := oss.LifecycleRule{ ID: "rule3", Prefix: "three", Status: "Enabled", AbortMultipartUpload: &abortMPU, } rules = append(lc.Rules, rule3) err = client.SetBucketLifecycle(bucketName, rules) if err != nil { HandleError(err) } // Case 5: Set the lifecycle. The rule ID is rule4 and the applied objects' has the tagging which prefix is four and the last modified Date is before 2015/11/11 expiration = oss.LifecycleExpiration{ CreatedBeforeDate: "2015-11-11T00:00:00.000Z", } tag1 := oss.Tag{ Key: "key1", Value: "value1", } tag2 := oss.Tag{ Key: "key2", Value: "value2", } rule4 := oss.LifecycleRule{ ID: "rule4", Prefix: "four", Status: "Enabled", Tags: []oss.Tag{tag1, tag2}, Expiration: &expiration, } rules = []oss.LifecycleRule{rule4} err = client.SetBucketLifecycle(bucketName, rules) if err != nil { HandleError(err) } // Case 6: Set the lifecycle. The rule ID is filter one and Include Not exclusion conditions expiration = oss.LifecycleExpiration{ CreatedBeforeDate: "2015-11-11T00:00:00.000Z", } tag := oss.Tag{ Key: "key1", Value: "value1", } greater := int64(500) less := int64(645000) filter := oss.LifecycleFilter{ ObjectSizeLessThan: &greater, ObjectSizeGreaterThan: &less, Not: []oss.LifecycleFilterNot{ { Prefix: "logs/log2", Tag: &tag, }, }, } filterRule := oss.LifecycleRule{ ID: "filter one", Prefix: "logs", Status: "Enabled", Expiration: &expiration, Transitions: []oss.LifecycleTransition{ { Days: 10, StorageClass: oss.StorageIA, }, }, Filter: &filter, } rules = []oss.LifecycleRule{filterRule} err = client.SetBucketLifecycle(bucketName, rules) if err != nil { HandleError(err) } // Case 7: Set the lifecycle. The rules with amtime and return to std when visit isTrue := true isFalse := false rule1 = oss.LifecycleRule{ ID: "mtime transition1", Prefix: "logs1", Status: "Enabled", Transitions: []oss.LifecycleTransition{ { Days: 30, StorageClass: oss.StorageIA, }, }, } rule2 = oss.LifecycleRule{ ID: "mtime transition2", Prefix: "logs2", Status: "Enabled", Transitions: []oss.LifecycleTransition{ { Days: 30, StorageClass: oss.StorageIA, IsAccessTime: &isFalse, }, }, } rule3 = oss.LifecycleRule{ ID: "amtime transition1", Prefix: "logs3", Status: "Enabled", Transitions: []oss.LifecycleTransition{ { Days: 30, StorageClass: oss.StorageIA, IsAccessTime: &isTrue, ReturnToStdWhenVisit: &isFalse, }, }, } rule4 = oss.LifecycleRule{ ID: "amtime transition2", Prefix: "logs4", Status: "Enabled", Transitions: []oss.LifecycleTransition{ { Days: 30, StorageClass: oss.StorageIA, IsAccessTime: &isTrue, ReturnToStdWhenVisit: &isTrue, AllowSmallFile: &isFalse, }, }, } rule5 := oss.LifecycleRule{ ID: "amtime transition3", Prefix: "logs5", Status: "Enabled", NonVersionTransitions: []oss.LifecycleVersionTransition{ { NoncurrentDays: 10, StorageClass: oss.StorageIA, IsAccessTime: &isTrue, ReturnToStdWhenVisit: &isFalse, AllowSmallFile: &isTrue, }, }, } rules = []oss.LifecycleRule{rule1, rule2, rule3, rule4, rule5} err = client.SetBucketLifecycle(bucketName, rules) if err != nil { HandleError(err) } // case 8: Set bucket's Lifecycle with xml xmlData := `<?xml version="1.0" encoding="UTF-8"?> <LifecycleConfiguration> <Rule> <ID>mtime transition1</ID> <Prefix>logs1/</Prefix> <Status>Enabled</Status> <Transition> <Days>30</Days> <StorageClass>IA</StorageClass> </Transition> </Rule> <Rule> <ID>mtime transition2</ID> <Prefix>logs2/</Prefix> <Status>Enabled</Status> <Transition> <Days>30</Days> <StorageClass>IA</StorageClass> <IsAccessTime>false</IsAccessTime> </Transition> </Rule> <Rule> <ID>atime transition1</ID> <Prefix>logs3/</Prefix> <Status>Enabled</Status> <Transition> <Days>30</Days> <StorageClass>IA</StorageClass> <IsAccessTime>true</IsAccessTime> <ReturnToStdWhenVisit>false</ReturnToStdWhenVisit> </Transition> </Rule> <Rule> <ID>atime transition2</ID> <Prefix>logs4/</Prefix> <Status>Enabled</Status> <Transition> <Days>30</Days> <StorageClass>IA</StorageClass> <IsAccessTime>true</IsAccessTime> <ReturnToStdWhenVisit>true</ReturnToStdWhenVisit> <AllowSmallFile>false</AllowSmallFile> </Transition> </Rule> <Rule> <ID>atime transition3</ID> <Prefix>logs5/</Prefix> <Status>Enabled</Status> <NoncurrentVersionTransition> <NoncurrentDays>10</NoncurrentDays> <StorageClass>IA</StorageClass> <IsAccessTime>true</IsAccessTime> <ReturnToStdWhenVisit>false</ReturnToStdWhenVisit> <AllowSmallFile>true</AllowSmallFile> </NoncurrentVersionTransition> </Rule> <Rule> <ID>r1</ID> <Prefix>abc/</Prefix> <Filter> <ObjectSizeGreaterThan>500</ObjectSizeGreaterThan> <ObjectSizeLessThan>64000</ObjectSizeLessThan> <Not> <Prefix>abc/not1/</Prefix> <Tag> <Key>notkey1</Key> <Value>notvalue1</Value> </Tag> </Not> <Not> <Prefix>abc/not2/</Prefix> <Tag> <Key>notkey2</Key> <Value>notvalue2</Value> </Tag> </Not> </Filter> </Rule> </LifecycleConfiguration> ` err = client.SetBucketLifecycleXml(bucketName, xmlData) if err != nil { HandleError(err) } // case 9: Get bucket's Lifecycle print info lcRes, err := client.GetBucketLifecycle(bucketName) if err != nil { HandleError(err) } for _, rule := range lcRes.Rules { fmt.Println("Lifecycle Rule Id:", rule.ID) fmt.Println("Lifecycle Rule Prefix:", rule.Prefix) fmt.Println("Lifecycle Rule Status:", rule.Status) if rule.Expiration != nil { fmt.Println("Lifecycle Rule Expiration Days:", rule.Expiration.Days) fmt.Println("Lifecycle Rule Expiration Date:", rule.Expiration.Date) fmt.Println("Lifecycle Rule Expiration Created Before Date:", rule.Expiration.CreatedBeforeDate) if rule.Expiration.ExpiredObjectDeleteMarker != nil { fmt.Println("Lifecycle Rule Expiration Expired Object DeleteMarker:", *rule.Expiration.ExpiredObjectDeleteMarker) } } for _, tag := range rule.Tags { fmt.Println("Lifecycle Rule Tag Key:", tag.Key) fmt.Println("Lifecycle Rule Tag Value:", tag.Value) } for _, transition := range rule.Transitions { fmt.Println("Lifecycle Rule Transition Days:", transition.Days) fmt.Println("Lifecycle Rule Transition Created Before Date:", transition.CreatedBeforeDate) fmt.Println("Lifecycle Rule Transition Storage Class:", transition.StorageClass) if transition.IsAccessTime != nil { fmt.Println("Lifecycle Rule Transition Is Access Time:", *transition.IsAccessTime) } if transition.ReturnToStdWhenVisit != nil { fmt.Println("Lifecycle Rule Transition Return To Std When Visit:", *transition.ReturnToStdWhenVisit) } if transition.AllowSmallFile != nil { fmt.Println("Lifecycle Rule Transition Allow Small File:", *transition.AllowSmallFile) } } if rule.AbortMultipartUpload != nil { fmt.Println("Lifecycle Rule Abort Multipart Upload Days:", rule.AbortMultipartUpload.Days) fmt.Println("Lifecycle Rule Abort Multipart Upload Created Before Date:", rule.AbortMultipartUpload.CreatedBeforeDate) } if rule.NonVersionExpiration != nil { fmt.Println("Lifecycle Non Version Expiration Non Current Days:", rule.NonVersionExpiration.NoncurrentDays) } for _, nonVersionTransition := range rule.NonVersionTransitions { fmt.Println("Lifecycle Rule Non Version Transitions Non current Days:", nonVersionTransition.NoncurrentDays) fmt.Println("Lifecycle Rule Non Version Transition Storage Class:", nonVersionTransition.StorageClass) if nonVersionTransition.IsAccessTime != nil { fmt.Println("Lifecycle Rule Non Version Transition Is Access Time:", *nonVersionTransition.IsAccessTime) } if nonVersionTransition.ReturnToStdWhenVisit != nil { fmt.Println("Lifecycle Rule Non Version Transition Return To Std When Visit:", *nonVersionTransition.ReturnToStdWhenVisit) } if nonVersionTransition.AllowSmallFile != nil { fmt.Println("Lifecycle Rule Non Version Allow Small File:", *nonVersionTransition.AllowSmallFile) } if rule.Filter != nil { if rule.Filter.ObjectSizeGreaterThan != nil { fmt.Println("Lifecycle Rule Filter Object Size Greater Than:", *rule.Filter.ObjectSizeGreaterThan) } if rule.Filter.ObjectSizeLessThan != nil { fmt.Println("Lifecycle Rule Filter Object Size Less Than:", *rule.Filter.ObjectSizeLessThan) } for _, filterNot := range rule.Filter.Not { fmt.Println("Lifecycle Rule Filter Not Prefix:", filterNot.Prefix) if filterNot.Tag != nil { fmt.Println("Lifecycle Rule Filter Not Tag Key:", filterNot.Tag.Key) fmt.Println("Lifecycle Rule Filter Not Tag Value:", filterNot.Tag.Value) } } } } } // Case 10: Delete bucket's Lifecycle err = client.DeleteBucketLifecycle(bucketName) if err != nil { HandleError(err) } // Delete bucket err = client.DeleteBucket(bucketName) if err != nil { HandleError(err) } fmt.Println("BucketLifecycleSample completed") }
BucketLifecycleSample shows how to set, get and delete bucket's lifecycle.
BucketLifecycleSample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_lifecycle.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_lifecycle.go
MIT
func ListBucketsSample() { var myBuckets = []string{ "my-bucket-1", "my-bucket-11", "my-bucket-2", "my-bucket-21", "my-bucket-22", "my-bucket-3", "my-bucket-31", "my-bucket-32"} // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Remove other bucket lbr, err := client.ListBuckets() if err != nil { HandleError(err) } for _, bucket := range lbr.Buckets { err = client.DeleteBucket(bucket.Name) if err != nil { //HandleError(err) } } // Create bucket for _, bucketName := range myBuckets { err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } } // Case 1: Use default parameter lbr, err = client.ListBuckets() if err != nil { HandleError(err) } fmt.Println("my buckets:", lbr.Buckets) // Case 2: Specify the max keys : 3 lbr, err = client.ListBuckets(oss.MaxKeys(3)) if err != nil { HandleError(err) } fmt.Println("my buckets max num:", lbr.Buckets) // Case 3: Specify the prefix of buckets. lbr, err = client.ListBuckets(oss.Prefix("my-bucket-2")) if err != nil { HandleError(err) } fmt.Println("my buckets prefix :", lbr.Buckets) // Case 4: Specify the marker to return from a certain one lbr, err = client.ListBuckets(oss.Marker("my-bucket-22")) if err != nil { HandleError(err) } fmt.Println("my buckets marker :", lbr.Buckets) // Case 5: Specify max key and list all buckets with paging, return 3 items each time. marker := oss.Marker("") for { lbr, err = client.ListBuckets(oss.MaxKeys(3), marker) if err != nil { HandleError(err) } marker = oss.Marker(lbr.NextMarker) fmt.Println("my buckets page :", lbr.Buckets) if !lbr.IsTruncated { break } } // Case 6: List bucket with marker and max key; return 3 items each time. marker = oss.Marker("my-bucket-22") for { lbr, err = client.ListBuckets(oss.MaxKeys(3), marker) if err != nil { HandleError(err) } marker = oss.Marker(lbr.NextMarker) fmt.Println("my buckets marker&page :", lbr.Buckets) if !lbr.IsTruncated { break } } // Case 7: List bucket with prefix and max key, return 3 items each time. pre := oss.Prefix("my-bucket-2") marker = oss.Marker("") for { lbr, err = client.ListBuckets(oss.MaxKeys(3), pre, marker) if err != nil { HandleError(err) } pre = oss.Prefix(lbr.Prefix) marker = oss.Marker(lbr.NextMarker) fmt.Println("my buckets prefix&page :", lbr.Buckets) if !lbr.IsTruncated { break } } // Delete bucket for _, bucketName := range myBuckets { err = client.DeleteBucket(bucketName) if err != nil { HandleError(err) } } fmt.Println("ListsBucketSample completed") }
ListBucketsSample shows the list bucket, including default and specified parameters.
ListBucketsSample
go
aliyun/aliyun-oss-go-sdk
sample/list_buckets.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/list_buckets.go
MIT
func DescribeRegionsSample() { // Create archive bucket client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Get describe regions regionEndpoint := "oss-cn-hangzhou" list, err := client.DescribeRegions(oss.AddParam("regions", regionEndpoint)) if err != nil { HandleError(err) } for _, region := range list.Regions { fmt.Printf("Region:%s\n", region.Region) fmt.Printf("Region Internet Endpoint:%s\n", region.InternetEndpoint) fmt.Printf("Region Internal Endpoint:%s\n", region.InternalEndpoint) fmt.Printf("Region Accelerate Endpoint:%s\n", region.AccelerateEndpoint) } fmt.Println("Get Describe Regions Success") // List describe regions list, err = client.DescribeRegions() if err != nil { HandleError(err) } for _, region := range list.Regions { fmt.Printf("Region:%s\n", region.Region) fmt.Printf("Region Internet Endpoint:%s\n", region.InternetEndpoint) fmt.Printf("Region Internal Endpoint:%s\n", region.InternalEndpoint) fmt.Printf("Region Accelerate Endpoint:%s\n", region.AccelerateEndpoint) } fmt.Println("List Describe Regions Success") fmt.Println("DescribeRegionsSample completed") }
DescribeRegionsSample shows how to get or list describe regions
DescribeRegionsSample
go
aliyun/aliyun-oss-go-sdk
sample/describe_regions.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/describe_regions.go
MIT
func BucketResponseHeaderSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Create the bucket with default parameters err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } // Set bucket's response header. reqHeader := oss.PutBucketResponseHeader{ Rule: []oss.ResponseHeaderRule{ { Name: "name1", Filters: oss.ResponseHeaderRuleFilters{ []string{ "Put", "GetObject", }, }, HideHeaders: oss.ResponseHeaderRuleHeaders{ []string{ "Last-Modified", }, }, }, { Name: "name2", Filters: oss.ResponseHeaderRuleFilters{ []string{ "*", }, }, HideHeaders: oss.ResponseHeaderRuleHeaders{ []string{ "Last-Modified", }, }, }, }, } err = client.PutBucketResponseHeader(bucketName, reqHeader) if err != nil { HandleError(err) } fmt.Println("Bucket Response Header Set Success!") // Get bucket's response header. header, err := client.GetBucketResponseHeader(bucketName) if err != nil { HandleError(err) } for _, rule := range header.Rule { fmt.Printf("Rule Name:%#v\n", rule.Name) if len(rule.Filters.Operation) > 0 { for _, Operation := range rule.Filters.Operation { fmt.Printf("Rule Filter Operation:%s\n", Operation) } } if len(rule.HideHeaders.Header) > 0 { for _, head := range rule.HideHeaders.Header { fmt.Printf("Rule Hide Headers Header:%s\n", head) } } } // Delete bucket's response header. err = client.DeleteBucketResponseHeader(bucketName) if err != nil { HandleError(err) } fmt.Println("Bucket Response Header Delete Success!") fmt.Println("BucketResponseHeaderSample completed") }
BucketResponseHeaderSample shows how to set, get and delete the bucket's response header.
BucketResponseHeaderSample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_responseheader.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_responseheader.go
MIT
func BucketEncryptionSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Create a bucket with default parameters err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } // SetBucketEncryption:AES256 ,"123" encryptionRule := oss.ServerEncryptionRule{} encryptionRule.SSEDefault.SSEAlgorithm = string(oss.AESAlgorithm) err = client.SetBucketEncryption(bucketName, encryptionRule) if err != nil { HandleError(err) } // Get bucket encryption encryptionResult, err := client.GetBucketEncryption(bucketName) if err != nil { HandleError(err) } fmt.Println("Bucket Encryption:", encryptionResult) // Delete the bucket err = client.DeleteBucketEncryption(bucketName) if err != nil { HandleError(err) } // Delete the object and bucket err = DeleteTestBucketAndObject(bucketName) if err != nil { HandleError(err) } fmt.Println("BucketEncryptionSample completed") }
BucketEncryptionSample shows how to get and set the bucket encryption Algorithm
BucketEncryptionSample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_encryption.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_encryption.go
MIT
func BucketCORSSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Create the bucket with default parameters err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } rule1 := oss.CORSRule{ AllowedOrigin: []string{"*"}, AllowedMethod: []string{"PUT", "GET", "POST"}, AllowedHeader: []string{}, ExposeHeader: []string{}, MaxAgeSeconds: 100, } rule2 := oss.CORSRule{ AllowedOrigin: []string{"http://www.a.com", "http://www.b.com"}, AllowedMethod: []string{"GET"}, AllowedHeader: []string{"Authorization"}, ExposeHeader: []string{"x-oss-test", "x-oss-test1"}, MaxAgeSeconds: 100, } // Case 1: Set the bucket CORS rules err = client.SetBucketCORS(bucketName, []oss.CORSRule{rule1}) if err != nil { HandleError(err) } // Case 2: Set the bucket CORS rules. if CORS rules exist, they will be overwritten. err = client.SetBucketCORS(bucketName, []oss.CORSRule{rule1, rule2}) if err != nil { HandleError(err) } // Case 3: Set the bucket CORS rules. if CORS rules exist, they will be overwritten. isTrue := true put := oss.PutBucketCORS{} put.CORSRules = []oss.CORSRule{rule1, rule2} put.ResponseVary = &isTrue err = client.SetBucketCORSV2(bucketName, put) if err != nil { HandleError(err) } // Get the bucket's CORS corsRes, err := client.GetBucketCORS(bucketName) if err != nil { HandleError(err) } for _, rule := range corsRes.CORSRules { fmt.Printf("Cors Rules Allowed Origin:%s\n", rule.AllowedOrigin) fmt.Printf("Cors Rules Allowed Method:%s\n", rule.AllowedMethod) fmt.Printf("Cors Rules Allowed Header:%s\n", rule.AllowedHeader) fmt.Printf("Cors Rules Expose Header:%s\n", rule.ExposeHeader) fmt.Printf("Cors Rules Max Age Seconds:%d\n", rule.MaxAgeSeconds) } if corsRes.ResponseVary != nil { fmt.Printf("Cors Rules Response Vary:%t\n", *corsRes.ResponseVary) } // Delete bucket's CORS err = client.DeleteBucketCORS(bucketName) if err != nil { HandleError(err) } // Delete bucket err = client.DeleteBucket(bucketName) if err != nil { HandleError(err) } fmt.Println("Bucket CORS Sample completed") }
BucketCORSSample shows how to get or set the bucket CORS.
BucketCORSSample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_cors.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_cors.go
MIT
func BucketPolicySample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } // Create the bucket with default parameters err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } // the policy string var policyInfo string policyInfo = ` { "Version":"1", "Statement":[ { "Action":[ "oss:GetObject", "oss:PutObject" ], "Effect":"Deny", "Principal":"[123456790]", "Resource":["acs:oss:*:1234567890:*/*"] } ] }` // Set policy err = client.SetBucketPolicy(bucketName, policyInfo) if err != nil { HandleError(err) } // Get Bucket policy ret, err := client.GetBucketPolicy(bucketName) if err != nil { HandleError(err) } fmt.Println("Bucket policy:", ret) // Delete Bucket policy err = client.DeleteBucketPolicy(bucketName) if err != nil { HandleError(err) } // Delete bucket err = client.DeleteBucket(bucketName) if err != nil { HandleError(err) } fmt.Println("BucketPolicySample completed") }
BucketPolicySample shows how to set, get and delete the bucket policy configuration
BucketPolicySample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_policy.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_policy.go
MIT
func CnameSample() { // New client client, err := oss.New(endpoint4Cname, accessID, accessKey, oss.UseCname(true)) if err != nil { HandleError(err) } // Create bucket err = client.CreateBucket(bucketName) if err != nil { HandleError(err) } // Set bucket ACL err = client.SetBucketACL(bucketName, oss.ACLPrivate) if err != nil { HandleError(err) } // Look up bucket ACL gbar, err := client.GetBucketACL(bucketName) if err != nil { HandleError(err) } fmt.Println("Bucket ACL:", gbar.ACL) // List buckets, the list operation could not be done by cname's endpoint _, err = client.ListBuckets() if err == nil { HandleError(err) } bucket, err := client.Bucket(bucketName) if err != nil { HandleError(err) } objectValue := "长忆观潮, 满郭人争江上望。来疑沧海尽成空, 万面鼓声中。弄潮儿向涛头立, 手把红旗旗不湿。别来几向梦中看, 梦觉尚心寒。" // Put object err = bucket.PutObject(objectKey, strings.NewReader(objectValue)) if err != nil { HandleError(err) } // Get object body, err := bucket.GetObject(objectKey) if err != nil { HandleError(err) } data, err := ioutil.ReadAll(body) body.Close() if err != nil { HandleError(err) } fmt.Println(objectKey, ":", string(data)) // Put object from file err = bucket.PutObjectFromFile(objectKey, localFile) if err != nil { HandleError(err) } // Get object to file err = bucket.GetObjectToFile(objectKey, localFile) if err != nil { HandleError(err) } // List objects lor, err := bucket.ListObjects() if err != nil { HandleError(err) } fmt.Println("objects:", lor.Objects) // Delete object err = bucket.DeleteObject(objectKey) if err != nil { HandleError(err) } fmt.Println("CnameSample completed") }
CnameSample shows the cname usage
CnameSample
go
aliyun/aliyun-oss-go-sdk
sample/cname_sample.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/cname_sample.go
MIT
func BucketAccessMonitorSample() { // New client client, err := oss.New(endpoint, accessID, accessKey) if err != nil { HandleError(err) } access := oss.PutBucketAccessMonitor{ Status: "Enabled", } // put bucket access monitor err = client.PutBucketAccessMonitor(bucketName, access) if err != nil { HandleError(err) } fmt.Println("put bucket access monitor success!") // put bucket access monitor in xml format xml := `<?xml version="1.0" encoding="UTF-8"?> <AccessMonitorConfiguration> <Status>Enabled</Status> </AccessMonitorConfiguration> ` err = client.PutBucketAccessMonitorXml(bucketName, xml) if err != nil { HandleError(err) } fmt.Println("put bucket access monitor in xml format success!") // get bucket access monitor result, err := client.GetBucketAccessMonitor(bucketName) if err != nil { HandleError(err) } fmt.Printf("bucket access monitor config is:%s\n", result.Status) // get bucket access monitor in xml format xmlData, err := client.GetBucketAccessMonitorXml(bucketName) if err != nil { HandleError(err) } fmt.Printf("bucket access monitor config is:%s\n", xmlData) fmt.Println("BucketAccessMonitorSample completed") }
BucketAccessMonitorSample how to set, get the bucket access monitor.
BucketAccessMonitorSample
go
aliyun/aliyun-oss-go-sdk
sample/bucket_accessmonitor.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/bucket_accessmonitor.go
MIT
func SelectObjectSample() { // Create a bucket bucket, err := GetTestBucket(bucketName) if err != nil { HandleError(err) } // // Create a Csv object // err = bucket.PutObjectFromFile(objectKey, localCsvFile) if err != nil { HandleError(err) } // Create Csv Meta csvMeta := oss.CsvMetaRequest{} ret, err := bucket.CreateSelectCsvObjectMeta(objectKey, csvMeta) if err != nil { HandleError(err) } fmt.Println("csv file meta:", ret) // case 1: Isn't NULL selReq := oss.SelectRequest{} selReq.Expression = "select Year, StateAbbr, CityName, PopulationCount from ossobject where CityName != ''" selReq.InputSerializationSelect.CsvBodyInput.FileHeaderInfo = "Use" body, err := bucket.SelectObject(objectKey, selReq) if err != nil { HandleError(err) } defer body.Close() databyte, err := ioutil.ReadAll(body) if err != nil { HandleError(err) } fmt.Println("some data in SelectCSVObject result:", string(databyte[:9])) // case 2: Like selReq = oss.SelectRequest{} selReq.Expression = "select Year, StateAbbr, CityName, Short_Question_Text from ossobject where Measure like '%blood pressure%Years'" selReq.InputSerializationSelect.CsvBodyInput.FileHeaderInfo = "Use" body, err = bucket.SelectObject(objectKey, selReq) if err != nil { HandleError(err) } defer body.Close() databyte, err = ioutil.ReadAll(body) if err != nil { HandleError(err) } fmt.Println("some data in SelectCSVObject result:", string(databyte[:9])) // delete object err = bucket.DeleteObject(objectKey) if err != nil { HandleError(err) } // // Create a LINES json object // err = bucket.PutObjectFromFile(objectKey, localJSONLinesFile) if err != nil { HandleError(err) } // Create LINES JSON Meta jsonMeta := oss.JsonMetaRequest{ InputSerialization: oss.InputSerialization { JSON: oss.JSON { JSONType:"LINES", }, }, } restSt, err := bucket.CreateSelectJsonObjectMeta(objectKey, jsonMeta) if err != nil { HandleError(err) } fmt.Println("csv json meta:", restSt) // case 1: sql where A=B selReq = oss.SelectRequest{} selReq.Expression = "select * from ossobject where party = 'Democrat'" selReq.OutputSerializationSelect.JsonBodyOutput.RecordDelimiter = "," selReq.InputSerializationSelect.JsonBodyInput.JSONType = "LINES" body, err = bucket.SelectObject(objectKey, selReq) if err != nil { HandleError(err) } defer body.Close() databyte, err = ioutil.ReadAll(body) if err != nil { HandleError(err) } fmt.Println("some data in SelectJsonObject result:", string(databyte[:9])) // case 2: LIKE selReq = oss.SelectRequest{} selReq.Expression = "select person.firstname, person.lastname from ossobject where person.birthday like '1959%'" selReq.OutputSerializationSelect.JsonBodyOutput.RecordDelimiter = "," selReq.InputSerializationSelect.JsonBodyInput.JSONType = "LINES" body, err = bucket.SelectObject(objectKey, selReq) if err != nil { HandleError(err) } defer body.Close() databyte, err = ioutil.ReadAll(body) if err != nil { HandleError(err) } fmt.Println("some data in SelectJsonObject result:", string(databyte[:9])) // delete object err = bucket.DeleteObject(objectKey) if err != nil { HandleError(err) } // // Create a Document json object // err = bucket.PutObjectFromFile(objectKey, localJSONFile) if err != nil { HandleError(err) } // case 1: int avg, max, min selReq = oss.SelectRequest{} selReq.Expression = ` select avg(cast(person.cspanid as int)), max(cast(person.cspanid as int)), min(cast(person.cspanid as int)) from ossobject.objects[*] where person.cspanid = 1011723 ` selReq.OutputSerializationSelect.JsonBodyOutput.RecordDelimiter = "," selReq.InputSerializationSelect.JsonBodyInput.JSONType = "Document" body, err = bucket.SelectObject(objectKey, selReq) if err != nil { HandleError(err) } defer body.Close() databyte, err = ioutil.ReadAll(body) if err != nil { HandleError(err) } fmt.Println("data:", string(databyte)) // case 2: Concat selReq = oss.SelectRequest{} selReq.Expression = ` select person from ossobject.objects[*] where (person.firstname || person.lastname) = 'JohnKennedy' ` selReq.OutputSerializationSelect.JsonBodyOutput.RecordDelimiter = "," selReq.InputSerializationSelect.JsonBodyInput.JSONType = "Document" body, err = bucket.SelectObject(objectKey, selReq) if err != nil { HandleError(err) } defer body.Close() databyte, err = ioutil.ReadAll(body) if err != nil { HandleError(err) } fmt.Println("some data in SelectJsonObject result:", string(databyte[:9])) // Delete the object and bucket err = DeleteTestBucketAndObject(bucketName) if err != nil { HandleError(err) } fmt.Println("SelectObjectSample completed") }
SelectObjectSample shows how to get data from csv/json object by sql
SelectObjectSample
go
aliyun/aliyun-oss-go-sdk
sample/select_object.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample/select_object.go
MIT
func SampleMultipleMasterRsa() { // create oss client client, err := oss.New("<yourEndpoint>", "<yourAccessKeyId>", "<yourAccessKeySecret>") if err != nil { fmt.Println("Error:", err) os.Exit(-1) } // Create a description of the master key. Once created, it cannot be modified. The master key description and the master key are one-to-one correspondence. // If all objects use the same master key, the master key description can also be empty, but subsequent replacement of the master key is not supported. // Because if the description is empty, it is impossible to determine which master key is used when decrypting object. // It is strongly recommended that: configure the master key description(json string) for each master key, and the client should save the correspondence between them. // The server does not save their correspondence // Map converted by the master key description information (json string) materialDesc := make(map[string]string) materialDesc["desc"] = "<your master encrypt key material describe information>" // Create a master key object based on the master key description masterRsaCipher, err := osscrypto.CreateMasterRsa(materialDesc, "<your rsa public key>", "<your rsa private key>") if err != nil { fmt.Println("Error:", err) os.Exit(-1) } // Create an interface for encryption based on the master key object, encrypt using aec ctr mode contentProvider := osscrypto.CreateAesCtrCipher(masterRsaCipher) // If you need to decrypt objects encrypted by different ma keys, you need to provide this interface. var mockRsaManager MockRsaManager var options []osscrypto.CryptoBucketOption options = append(options, osscrypto.SetMasterCipherManager(&mockRsaManager)) // Get a storage space for client encryption, the bucket has to be created // Client-side encrypted buckets have similar usages to ordinary buckets. cryptoBucket, err := osscrypto.GetCryptoBucket(client, "<yourBucketName>", contentProvider, options...) if err != nil { fmt.Println("Error:", err) os.Exit(-1) } // put object ,will be automatically encrypted err = cryptoBucket.PutObject("<yourObjectName>", bytes.NewReader([]byte("yourObjectValueByteArrary"))) if err != nil { fmt.Println("Error:", err) os.Exit(-1) } // get object ,will be automatically decrypted body, err := cryptoBucket.GetObject("<otherObjectNameEncryptedWithOtherRsa>") if err != nil { fmt.Println("Error:", err) os.Exit(-1) } defer body.Close() data, err := ioutil.ReadAll(body) if err != nil { fmt.Println("Error:", err) os.Exit(-1) } fmt.Println("data:", string(data)) }
Decrypt the object encrypted by different master keys
SampleMultipleMasterRsa
go
aliyun/aliyun-oss-go-sdk
sample_crypto/sample_crypto.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/sample_crypto/sample_crypto.go
MIT
func (s *OssBucketSuite) SetUpSuite(c *C) { time.Sleep(timeoutInOperation) if cloudboxControlEndpoint == "" { client, err := New(endpoint, accessID, accessKey) c.Assert(err, IsNil) s.client = client s.client.CreateBucket(bucketName) err = s.client.CreateBucket(archiveBucketName, StorageClass(StorageArchive)) c.Assert(err, IsNil) bucket, err := s.client.Bucket(bucketName) c.Assert(err, IsNil) s.bucket = bucket archiveBucket, err := s.client.Bucket(archiveBucketName) c.Assert(err, IsNil) s.archiveBucket = archiveBucket testLogger.Println("test bucket started") } else { client, err := New(endpoint, accessID, accessKey) s.client = client c.Assert(err, IsNil) controlClient, err := New(cloudboxControlEndpoint, accessID, accessKey) c.Assert(err, IsNil) s.cloudBoxControlClient = controlClient controlClient.CreateBucket(bucketName) //err = controlClient.CreateBucket(archiveBucketName, StorageClass(StorageArchive)) //c.Assert(err, IsNil) bucket, err := s.client.Bucket(bucketName) c.Assert(err, IsNil) s.bucket = bucket //archiveBucket, err := s.client.Bucket(archiveBucketName) //c.Assert(err, IsNil) //s.archiveBucket = archiveBucket testLogger.Println("test bucket started") } time.Sleep(timeoutInOperation) }
SetUpSuite runs once when the suite starts running.
SetUpSuite
go
aliyun/aliyun-oss-go-sdk
oss/bucket_test.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/bucket_test.go
MIT
func (s *OssBucketSuite) TearDownSuite(c *C) { time.Sleep(timeoutInOperation) for _, bucket := range []*Bucket{s.bucket, s.archiveBucket} { // Delete multipart keyMarker := KeyMarker("") uploadIDMarker := UploadIDMarker("") for { lmu, err := bucket.ListMultipartUploads(keyMarker, uploadIDMarker) c.Assert(err, IsNil) for _, upload := range lmu.Uploads { imur := InitiateMultipartUploadResult{Bucket: bucketName, Key: upload.Key, UploadID: upload.UploadID} err = bucket.AbortMultipartUpload(imur) c.Assert(err, IsNil) } keyMarker = KeyMarker(lmu.NextKeyMarker) uploadIDMarker = UploadIDMarker(lmu.NextUploadIDMarker) if !lmu.IsTruncated { break } } // Delete objects marker := Marker("") for { lor, err := bucket.ListObjects(marker) c.Assert(err, IsNil) for _, object := range lor.Objects { err = bucket.DeleteObject(object.Key) c.Assert(err, IsNil) } marker = Marker(lor.NextMarker) if !lor.IsTruncated { break } } // Delete bucket if s.cloudBoxControlClient != nil { err := s.cloudBoxControlClient.DeleteBucket(bucket.BucketName) c.Assert(err, IsNil) } else { err := s.client.DeleteBucket(bucket.BucketName) c.Assert(err, IsNil) } } time.Sleep(timeoutInOperation) testLogger.Println("test bucket completed") }
TearDownSuite runs before each test or benchmark starts running.
TearDownSuite
go
aliyun/aliyun-oss-go-sdk
oss/bucket_test.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/bucket_test.go
MIT
func (s *OssBucketSuite) SetUpTest(c *C) { err := removeTempFiles("../oss", ".jpg") c.Assert(err, IsNil) }
SetUpTest runs after each test or benchmark runs.
SetUpTest
go
aliyun/aliyun-oss-go-sdk
oss/bucket_test.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/bucket_test.go
MIT
func (s *OssBucketSuite) TearDownTest(c *C) { err := removeTempFiles("../oss", ".jpg") c.Assert(err, IsNil) err = removeTempFiles("../oss", ".txt") c.Assert(err, IsNil) err = removeTempFiles("../oss", ".temp") c.Assert(err, IsNil) err = removeTempFiles("../oss", ".txt1") c.Assert(err, IsNil) err = removeTempFiles("../oss", ".txt2") c.Assert(err, IsNil) }
TearDownTest runs once after all tests or benchmarks have finished running.
TearDownTest
go
aliyun/aliyun-oss-go-sdk
oss/bucket_test.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/bucket_test.go
MIT
func compareFiles(fileL string, fileR string) (bool, error) { finL, err := os.Open(fileL) if err != nil { return false, err } defer finL.Close() finR, err := os.Open(fileR) if err != nil { return false, err } defer finR.Close() statL, err := finL.Stat() if err != nil { return false, err } statR, err := finR.Stat() if err != nil { return false, err } if statL.Size() != statR.Size() { return false, nil } size := statL.Size() if size > 102400 { size = 102400 } bufL := make([]byte, size) bufR := make([]byte, size) for { n, _ := finL.Read(bufL) if 0 == n { break } n, _ = finR.Read(bufR) if 0 == n { break } if !bytes.Equal(bufL, bufR) { return false, nil } } return true, nil }
Compare the content between fileL and fileR
compareFiles
go
aliyun/aliyun-oss-go-sdk
oss/bucket_test.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/bucket_test.go
MIT
func compareFileData(file string, data []byte) (bool, error) { fin, err := os.Open(file) if err != nil { return false, err } defer fin.Close() stat, err := fin.Stat() if err != nil { return false, err } if stat.Size() != (int64)(len(data)) { return false, nil } buf := make([]byte, stat.Size()) n, err := fin.Read(buf) if err != nil { return false, err } if stat.Size() != (int64)(n) { return false, errors.New("read error") } if !bytes.Equal(buf, data) { return false, nil } return true, nil }
Compare the content of file and data
compareFileData
go
aliyun/aliyun-oss-go-sdk
oss/bucket_test.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/bucket_test.go
MIT
func MyRangeBehavior(value string) Option { return SetHeader(HTTPHeaderOssRangeBehavior, value) }
RangeBehavior is an option to set Range value, such as "standard"
MyRangeBehavior
go
aliyun/aliyun-oss-go-sdk
oss/bucket_test.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/bucket_test.go
MIT
func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error { if partSize < MinPartSize || partSize > MaxPartSize { return errors.New("oss: part size invalid range (100KB, 5GB]") } cpConf := getCpConfig(options) routines := getRoutines(options) if cpConf != nil && cpConf.IsEnable { cpFilePath := getUploadCpFilePath(cpConf, filePath, bucket.BucketName, objectKey) if cpFilePath != "" { return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines) } } return bucket.uploadFile(objectKey, filePath, partSize, options, routines) }
UploadFile is multipart file upload. objectKey the object name. filePath the local file path to upload. partSize the part size in byte. options the options for uploading object. error it's nil if the operation succeeds, otherwise it's an error object.
UploadFile
go
aliyun/aliyun-oss-go-sdk
oss/upload.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/upload.go
MIT
func getCpConfig(options []Option) *cpConfig { cpcOpt, err := FindOption(options, checkpointConfig, nil) if err != nil || cpcOpt == nil { return nil } return cpcOpt.(*cpConfig) }
getCpConfig gets checkpoint configuration
getCpConfig
go
aliyun/aliyun-oss-go-sdk
oss/upload.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/upload.go
MIT
func getCpFileName(src, dest, versionId string) string { md5Ctx := md5.New() md5Ctx.Write([]byte(src)) srcCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) md5Ctx.Reset() md5Ctx.Write([]byte(dest)) destCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) if versionId == "" { return fmt.Sprintf("%v-%v.cp", srcCheckSum, destCheckSum) } md5Ctx.Reset() md5Ctx.Write([]byte(versionId)) versionCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) return fmt.Sprintf("%v-%v-%v.cp", srcCheckSum, destCheckSum, versionCheckSum) }
getCpFileName return the name of the checkpoint file
getCpFileName
go
aliyun/aliyun-oss-go-sdk
oss/upload.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/upload.go
MIT
func getRoutines(options []Option) int { rtnOpt, err := FindOption(options, routineNum, nil) if err != nil || rtnOpt == nil { return 1 } rs := rtnOpt.(int) if rs < 1 { rs = 1 } else if rs > 100 { rs = 100 } return rs }
getRoutines gets the routine count. by default it's 1.
getRoutines
go
aliyun/aliyun-oss-go-sdk
oss/upload.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/upload.go
MIT
func getPayer(options []Option) string { payerOpt, err := FindOption(options, HTTPHeaderOssRequester, nil) if err != nil || payerOpt == nil { return "" } return payerOpt.(string) }
getPayer return the payer of the request
getPayer
go
aliyun/aliyun-oss-go-sdk
oss/upload.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/upload.go
MIT
func GetProgressListener(options []Option) ProgressListener { isSet, listener, _ := IsOptionSet(options, progressListener) if !isSet { return nil } return listener.(ProgressListener) }
GetProgressListener gets the progress callback
GetProgressListener
go
aliyun/aliyun-oss-go-sdk
oss/upload.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/upload.go
MIT
func (listener *defaultUploadProgressListener) ProgressChanged(event *ProgressEvent) { }
ProgressChanged no-ops
ProgressChanged
go
aliyun/aliyun-oss-go-sdk
oss/upload.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/upload.go
MIT
func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error { listener := GetProgressListener(options) chunks, err := SplitFileByPartSize(filePath, partSize) if err != nil { return err } partOptions := ChoiceTransferPartOption(options) completeOptions := ChoiceCompletePartOption(options) abortOptions := ChoiceAbortPartOption(options) // Initialize the multipart upload imur, err := bucket.InitiateMultipartUpload(objectKey, options...) if err != nil { return err } jobs := make(chan FileChunk, len(chunks)) results := make(chan UploadPart, len(chunks)) failed := make(chan error) die := make(chan bool) var completedBytes int64 totalBytes := getTotalBytes(chunks) event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0) publishProgress(listener, event) // Start the worker coroutine arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker} for w := 1; w <= routines; w++ { go worker(w, arg, jobs, results, failed, die) } // Schedule the jobs go scheduler(jobs, chunks) // Waiting for the upload finished completed := 0 parts := make([]UploadPart, len(chunks)) for completed < len(chunks) { select { case part := <-results: completed++ parts[part.PartNumber-1] = part completedBytes += chunks[part.PartNumber-1].Size // why RwBytes in ProgressEvent is 0 ? // because read or write event has been notified in teeReader.Read() event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, chunks[part.PartNumber-1].Size) publishProgress(listener, event) case err := <-failed: close(die) event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0) publishProgress(listener, event) bucket.AbortMultipartUpload(imur, abortOptions...) return err } if completed >= len(chunks) { break } } event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0) publishProgress(listener, event) // Complete the multpart upload _, err = bucket.CompleteMultipartUpload(imur, parts, completeOptions...) if err != nil { bucket.AbortMultipartUpload(imur, abortOptions...) return err } return nil }
uploadFile is a concurrent upload, without checkpoint
uploadFile
go
aliyun/aliyun-oss-go-sdk
oss/upload.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/upload.go
MIT
func (cp uploadCheckpoint) isValid(filePath string,options []Option) (bool, error) { callbackVal, _ := FindOption(options, HTTPHeaderOssCallback, "") if callbackVal != "" && cp.CallbackVal != callbackVal { return false, nil } callbackBody, _ := FindOption(options, responseBody, nil) if callbackBody != nil{ body, _ := json.Marshal(callbackBody) if bytes.Equal(*cp.CallbackBody, body) { return false, nil } } // Compare the CP's magic number and MD5. cpb := cp cpb.MD5 = "" js, _ := json.Marshal(cpb) sum := md5.Sum(js) b64 := base64.StdEncoding.EncodeToString(sum[:]) if cp.Magic != uploadCpMagic || b64 != cp.MD5 { return false, nil } // Make sure if the local file is updated. fd, err := os.Open(filePath) if err != nil { return false, err } defer fd.Close() st, err := fd.Stat() if err != nil { return false, err } md, err := calcFileMD5(filePath) if err != nil { return false, err } // Compare the file size, file's last modified time and file's MD5 if cp.FileStat.Size != st.Size() || !cp.FileStat.LastModified.Equal(st.ModTime()) || cp.FileStat.MD5 != md { return false, nil } return true, nil }
isValid checks if the uploaded data is valid---it's valid when the file is not updated and the checkpoint data is valid.
isValid
go
aliyun/aliyun-oss-go-sdk
oss/upload.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/upload.go
MIT
func (cp *uploadCheckpoint) load(filePath string) error { contents, err := ioutil.ReadFile(filePath) if err != nil { return err } err = json.Unmarshal(contents, cp) return err }
load loads from the file
load
go
aliyun/aliyun-oss-go-sdk
oss/upload.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/upload.go
MIT
func (cp *uploadCheckpoint) dump(filePath string) error { bcp := *cp // Calculate MD5 bcp.MD5 = "" js, err := json.Marshal(bcp) if err != nil { return err } sum := md5.Sum(js) b64 := base64.StdEncoding.EncodeToString(sum[:]) bcp.MD5 = b64 // Serialization js, err = json.Marshal(bcp) if err != nil { return err } // Dump return ioutil.WriteFile(filePath, js, FilePermMode) }
dump dumps to the local file
dump
go
aliyun/aliyun-oss-go-sdk
oss/upload.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/upload.go
MIT
func (cp *uploadCheckpoint) updatePart(part UploadPart) { cp.Parts[part.PartNumber-1].Part = part cp.Parts[part.PartNumber-1].IsCompleted = true }
updatePart updates the part status
updatePart
go
aliyun/aliyun-oss-go-sdk
oss/upload.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/upload.go
MIT
func (cp *uploadCheckpoint) allParts() []UploadPart { ps := []UploadPart{} for _, part := range cp.Parts { ps = append(ps, part.Part) } return ps }
allParts returns all parts
allParts
go
aliyun/aliyun-oss-go-sdk
oss/upload.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/upload.go
MIT
func (cp *uploadCheckpoint) getCompletedBytes() int64 { var completedBytes int64 for _, part := range cp.Parts { if part.IsCompleted { completedBytes += part.Chunk.Size } } return completedBytes }
getCompletedBytes returns completed bytes count
getCompletedBytes
go
aliyun/aliyun-oss-go-sdk
oss/upload.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/upload.go
MIT
func calcFileMD5(filePath string) (string, error) { return "", nil }
calcFileMD5 calculates the MD5 for the specified local file
calcFileMD5
go
aliyun/aliyun-oss-go-sdk
oss/upload.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/upload.go
MIT
func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error { // CP cp.Magic = uploadCpMagic cp.FilePath = filePath cp.ObjectKey = objectKey // Local file fd, err := os.Open(filePath) if err != nil { return err } defer fd.Close() st, err := fd.Stat() if err != nil { return err } cp.FileStat.Size = st.Size() cp.FileStat.LastModified = st.ModTime() callbackVal, _ := FindOption(options, HTTPHeaderOssCallback, "") cp.CallbackVal = callbackVal.(string) callbackBody, _ := FindOption(options, responseBody, nil) if callbackBody != nil { body, _ := json.Marshal(callbackBody) cp.CallbackBody = &body } md, err := calcFileMD5(filePath) if err != nil { return err } cp.FileStat.MD5 = md // Chunks parts, err := SplitFileByPartSize(filePath, partSize) if err != nil { return err } cp.Parts = make([]cpPart, len(parts)) for i, part := range parts { cp.Parts[i].Chunk = part cp.Parts[i].IsCompleted = false } // Init load imur, err := bucket.InitiateMultipartUpload(objectKey, options...) if err != nil { return err } cp.UploadID = imur.UploadID return nil }
prepare initializes the multipart upload
prepare
go
aliyun/aliyun-oss-go-sdk
oss/upload.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/upload.go
MIT
func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error { imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName, Key: cp.ObjectKey, UploadID: cp.UploadID} _, err := bucket.CompleteMultipartUpload(imur, parts, options...) if err != nil { if e, ok := err.(ServiceError);ok && (e.StatusCode == 203 || e.StatusCode == 404) { os.Remove(cpFilePath) } return err } os.Remove(cpFilePath) return err }
complete completes the multipart upload and deletes the local CP files
complete
go
aliyun/aliyun-oss-go-sdk
oss/upload.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/upload.go
MIT
func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error { listener := GetProgressListener(options) partOptions := ChoiceTransferPartOption(options) completeOptions := ChoiceCompletePartOption(options) // Load CP data ucp := uploadCheckpoint{} err := ucp.load(cpFilePath) if err != nil { os.Remove(cpFilePath) } // Load error or the CP data is invalid. valid, err := ucp.isValid(filePath,options) if err != nil || !valid { if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil { return err } os.Remove(cpFilePath) } chunks := ucp.todoParts() imur := InitiateMultipartUploadResult{ Bucket: bucket.BucketName, Key: objectKey, UploadID: ucp.UploadID} jobs := make(chan FileChunk, len(chunks)) results := make(chan UploadPart, len(chunks)) failed := make(chan error) die := make(chan bool) completedBytes := ucp.getCompletedBytes() // why RwBytes in ProgressEvent is 0 ? // because read or write event has been notified in teeReader.Read() event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size, 0) publishProgress(listener, event) // Start the workers arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker} for w := 1; w <= routines; w++ { go worker(w, arg, jobs, results, failed, die) } // Schedule jobs go scheduler(jobs, chunks) // Waiting for the job finished completed := 0 for completed < len(chunks) { select { case part := <-results: completed++ ucp.updatePart(part) ucp.dump(cpFilePath) completedBytes += ucp.Parts[part.PartNumber-1].Chunk.Size event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size, ucp.Parts[part.PartNumber-1].Chunk.Size) publishProgress(listener, event) case err := <-failed: close(die) event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size, 0) publishProgress(listener, event) return err } if completed >= len(chunks) { break } } event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size, 0) publishProgress(listener, event) // Complete the multipart upload err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath, completeOptions) return err }
uploadFileWithCp handles concurrent upload with checkpoint
uploadFileWithCp
go
aliyun/aliyun-oss-go-sdk
oss/upload.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/upload.go
MIT
func (s *OssBucketLiveChannelSuite) SetUpSuite(c *C) { bucketName := bucketNamePrefix + RandLowStr(6) if cloudboxControlEndpoint == "" { client, err := New(endpoint, accessID, accessKey) c.Assert(err, IsNil) s.client = client err = s.client.CreateBucket(bucketName) c.Assert(err, IsNil) time.Sleep(5 * time.Second) bucket, err := s.client.Bucket(bucketName) c.Assert(err, IsNil) s.bucket = bucket } else { client, err := New(cloudboxEndpoint, accessID, accessKey) c.Assert(err, IsNil) s.client = client controlClient, err := New(cloudboxControlEndpoint, accessID, accessKey) c.Assert(err, IsNil) s.cloudBoxControlClient = controlClient controlClient.CreateBucket(bucketName) bucket, err := s.client.Bucket(bucketName) c.Assert(err, IsNil) s.bucket = bucket } testLogger.Println("test livechannel started...") }
SetUpSuite Run once when the suite starts running
SetUpSuite
go
aliyun/aliyun-oss-go-sdk
oss/livechannel_test.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/livechannel_test.go
MIT
func (s *OssBucketLiveChannelSuite) TearDownSuite(c *C) { marker := "" for { result, err := s.bucket.ListLiveChannel(Marker(marker)) c.Assert(err, IsNil) for _, channel := range result.LiveChannel { err := s.bucket.DeleteLiveChannel(channel.Name) c.Assert(err, IsNil) } if result.IsTruncated { marker = result.NextMarker } else { break } } // Delete bucket if s.cloudBoxControlClient != nil { err := s.cloudBoxControlClient.DeleteBucket(s.bucket.BucketName) c.Assert(err, IsNil) } else { err := s.client.DeleteBucket(s.bucket.BucketName) c.Assert(err, IsNil) } testLogger.Println("test livechannel done...") }
TearDownSuite Run once after all tests or benchmarks
TearDownSuite
go
aliyun/aliyun-oss-go-sdk
oss/livechannel_test.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/livechannel_test.go
MIT
func (s *OssBucketLiveChannelSuite) SetUpTest(c *C) { }
SetUpTest Run before each test or benchmark starts
SetUpTest
go
aliyun/aliyun-oss-go-sdk
oss/livechannel_test.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/livechannel_test.go
MIT
func (s *OssBucketLiveChannelSuite) TearDownTest(c *C) { }
TearDownTest Run after each test or benchmark runs.
TearDownTest
go
aliyun/aliyun-oss-go-sdk
oss/livechannel_test.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/livechannel_test.go
MIT
func getPlayURL(bucketName, channelName, playlistName string) string { host := "" useHTTPS := false if strings.Contains(endpoint, "https://") { host = endpoint[8:] useHTTPS = true } else if strings.Contains(endpoint, "http://") { host = endpoint[7:] } else { host = endpoint } var url string if useHTTPS { url = fmt.Sprintf("https://%s.%s/%s/%s", bucketName, host, channelName, playlistName) } url = fmt.Sprintf("http://%s.%s/%s/%s", bucketName, host, channelName, playlistName) strings.Replace(url, "-internal.aliyuncs.com", ".aliyuncs.com", -1) return url }
private getPlayURL Get the play url of the live channel
getPlayURL
go
aliyun/aliyun-oss-go-sdk
oss/livechannel_test.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/livechannel_test.go
MIT
func getPublishURL(bucketName, channelName string) string { host := "" if strings.Contains(endpoint, "https://") { host = endpoint[8:] } else if strings.Contains(endpoint, "http://") { host = endpoint[7:] } else { host = endpoint } url := fmt.Sprintf("rtmp://%s.%s/live/%s", bucketName, host, channelName) strings.Replace(url, "-internal.aliyuncs.com", ".aliyuncs.com", -1) return url }
getPublistURL Get the push url of the live stream channel
getPublishURL
go
aliyun/aliyun-oss-go-sdk
oss/livechannel_test.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/livechannel_test.go
MIT
func Test(t *testing.T) { TestingT(t) }
Test hooks up gocheck into the "go test" runner.
Test
go
aliyun/aliyun-oss-go-sdk
oss/client_test.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/client_test.go
MIT
func (s *OssClientSuite) SetUpSuite(c *C) { client, err := New(endpoint, accessID, accessKey) c.Assert(err, IsNil) lbr, err := client.ListBuckets(Prefix(bucketNamePrefix), MaxKeys(1000)) c.Assert(err, IsNil) for _, bucket := range lbr.Buckets { ForceDeleteBucket(client, bucket.Name, c) } time.Sleep(timeoutInOperation) testLogger.Println("test client started") }
SetUpSuite runs once when the suite starts running
SetUpSuite
go
aliyun/aliyun-oss-go-sdk
oss/client_test.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/client_test.go
MIT
func (s *OssClientSuite) TearDownSuite(c *C) { client, err := New(endpoint, accessID, accessKey) c.Assert(err, IsNil) lbr, err := client.ListBuckets(Prefix(bucketNamePrefix), MaxKeys(1000)) c.Assert(err, IsNil) for _, bucket := range lbr.Buckets { s.deleteBucket(client, bucket.Name, c) } time.Sleep(timeoutInOperation) testLogger.Println("test client completed") }
TearDownSuite runs before each test or benchmark starts running
TearDownSuite
go
aliyun/aliyun-oss-go-sdk
oss/client_test.go
https://github.com/aliyun/aliyun-oss-go-sdk/blob/master/oss/client_test.go
MIT