Update Go AWS SDK to the latest version

This commit is contained in:
Andrey Smirnov
2019-07-13 00:03:55 +03:00
committed by Andrey Smirnov
parent d08be990ef
commit 94a72b23ff
2183 changed files with 885887 additions and 228114 deletions
@@ -0,0 +1,66 @@
# AWS DynamoDB Transaction Error Aware Client for Go
The client provides a workaround for [this bug](https://github.com/aws/aws-sdk-go/issues/2318)
## How to use
This example shows how to use the client to read transaction error cancellation reasons.
```go
sess := session.Must(session.NewSession())
svc := NewTxErrorAwareDynamoDBClient(sess)
input := &dynamodb.TransactWriteItemsInput{
//...
}
if _, err := svc.TransactWriteItems(input); err != nil {
txErr := err.(TxRequestFailure)
fmt.Println(txErr.CancellationReasons())
}
```
Sample response of the Println statement
```
{com.amazonaws.dynamodb.v20120810#TransactionCanceledException Transaction cancelled, please refer cancellation reasons for specific reasons [ConditionalCheckFailed, None, None] [{
Code: "ConditionalCheckFailed",
Item: {
AlbumTitle: {
S: "========== 43"
},
Artist: {
S: "Acme Band 14"
},
Year: {
N: "2017"
},
SongTitle: {
S: "Happy Day 12"
}
},
Message: "The conditional request failed"
} {
Code: "None"
} {
Code: "None"
}]}
[{
Code: "ConditionalCheckFailed",
Item: {
AlbumTitle: {
S: "========== 43"
},
Artist: {
S: "Acme Band 14"
},
Year: {
N: "2017"
},
SongTitle: {
S: "Happy Day 12"
}
},
Message: "The conditional request failed"
} {
Code: "None"
} {
Code: "None"
}]
```
@@ -0,0 +1,121 @@
// +build example
package transaction
import (
"encoding/json"
"fmt"
"io"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
"github.com/aws/aws-sdk-go/service/dynamodb"
)
const TxAwareErrorUnmarshallerName = "awssdk.jsonrpc.TxAwareErrorUnmarshaller"
// New creates a new instance of the DynamoDB client with a session.
// The client's behaviour is same as what is returned by dynamodb.New(), except for richer error reasons.
func NewTxErrorAwareDynamoDBClient(p client.ConfigProvider, cfgs ...*aws.Config) *dynamodb.DynamoDB {
c := dynamodb.New(p, cfgs...)
// NOTE: Ignore if swap failed. Returning nil might fail app startup which is worse than inadequate error details.
c.Handlers.UnmarshalError.Swap(jsonrpc.UnmarshalErrorHandler.Name, request.NamedHandler{
Name: TxAwareErrorUnmarshallerName,
Fn: TxAwareUnmarshalError,
})
return c
}
// A RequestFailure is an interface to extract request failure information from an Error.
type TxRequestFailure interface {
awserr.RequestFailure
CancellationReasons() []dynamodb.CancellationReason
}
// TxAwareUnmarshalError unmarshals an error response for a JSON RPC service.
// This is exactly same as jsonrpc.UnmarshalError, except for attempt to parse CancellationReasons
func TxAwareUnmarshalError(req *request.Request) {
defer req.HTTPResponse.Body.Close()
var jsonErr jsonTxErrorResponse
err := json.NewDecoder(req.HTTPResponse.Body).Decode(&jsonErr)
if err == io.EOF {
req.Error = awserr.NewRequestFailure(
awserr.New(request.ErrCodeSerialization, req.HTTPResponse.Status, nil),
req.HTTPResponse.StatusCode,
req.RequestID,
)
return
} else if err != nil {
req.Error = awserr.NewRequestFailure(
awserr.New(request.ErrCodeSerialization,
"failed decoding JSON RPC error response", err),
req.HTTPResponse.StatusCode,
req.RequestID,
)
return
}
codes := strings.SplitN(jsonErr.Code, "#", 2)
req.Error = newTxRequestError(
awserr.New(codes[len(codes)-1], jsonErr.Message, nil),
req.HTTPResponse.StatusCode,
req.RequestID,
jsonErr.CancellationReasons,
)
}
type jsonTxErrorResponse struct {
Code string `json:"__type"`
Message string `json:"message"`
CancellationReasons []dynamodb.CancellationReason `json:"CancellationReasons"`
}
// So that the Error interface type can be included as an anonymous field
// in the requestError struct and not conflict with the error.Error() method.
type awsError awserr.Error
// A TxRequestError wraps a request or service error.
// TxRequestError is awserr.requestError with additional cancellationReasons field
type txRequestError struct {
awsError
statusCode int
requestID string
cancellationReasons []dynamodb.CancellationReason
}
func newTxRequestError(err awserr.Error, statusCode int, requestID string, cancellationReasons []dynamodb.CancellationReason) TxRequestFailure {
return &txRequestError{
awsError: err,
statusCode: statusCode,
requestID: requestID,
cancellationReasons: cancellationReasons,
}
}
func (r txRequestError) Error() string {
extra := fmt.Sprintf("status code: %d, request id: %s",
r.statusCode, r.requestID)
return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr())
}
func (r txRequestError) String() string {
return r.Error()
}
func (r txRequestError) StatusCode() int {
return r.statusCode
}
func (r txRequestError) RequestID() string {
return r.requestID
}
func (r txRequestError) CancellationReasons() []dynamodb.CancellationReason {
return r.cancellationReasons
}
@@ -0,0 +1,171 @@
// +build example
package transaction
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"reflect"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/awstesting/unit"
)
const errStatusCode = 400
const requestId = "requestId1"
func TestNewTxErrorAwareDynamoDBClient(t *testing.T) {
sess := unit.Session
svc := NewTxErrorAwareDynamoDBClient(sess)
if svc.Handlers.UnmarshalError.Len() != 1 {
t.Errorf("expected 1 UnmarshallErrorHandler, got %v", svc.Handlers.UnmarshalError.Len())
}
if svc.Handlers.UnmarshalError.Swap(TxAwareErrorUnmarshallerName, request.NamedHandler{}) == false {
t.Errorf("expected to contain %s, got none", TxAwareErrorUnmarshallerName)
}
}
func TestTxAwareUnmarshalError(t *testing.T) {
input := map[string]struct {
enc interface{}
err TxRequestFailure
}{
"Error response without CancellationReasons": {
jsonErrorResponse{
Code: "com.amazonaws.dynamodb.v20120810#ResourceNotFoundException",
Message: "Requested resource not found",
},
txRequestError{
awsError: awserr.New("ResourceNotFoundException", "Requested resource not found", nil),
statusCode: errStatusCode,
requestID: requestId,
cancellationReasons: nil,
},
},
"Error response with empty CancellationReasons": {
jsonTxErrorResponse{
Code: "com.amazonaws.dynamodb.v20120810#TransactionCanceledException",
Message: "Transaction cancelled, please refer cancellation reasons for specific reasons [ConditionalCheckFailed, None, None]",
CancellationReasons: []dynamodb.CancellationReason{},
},
txRequestError{
awsError: awserr.New("TransactionCanceledException", "Transaction cancelled, please refer cancellation reasons for specific reasons [ConditionalCheckFailed, None, None]", nil),
statusCode: errStatusCode,
requestID: requestId,
cancellationReasons: []dynamodb.CancellationReason{},
},
},
"Error response with non-empty CancellationReasons": {
jsonTxErrorResponse{
Code: "com.amazonaws.dynamodb.v20120810#TransactionCanceledException",
Message: "Transaction cancelled, please refer cancellation reasons for specific reasons [ConditionalCheckFailed, None, None]",
CancellationReasons: []dynamodb.CancellationReason{
{
Code: aws.String("ConditionalCheckFailed"),
Item: map[string]*dynamodb.AttributeValue{
"hk": {S: aws.String("hkVal1")},
"attr": {S: aws.String("attrVal1")},
},
Message: aws.String("The conditional request failed"),
},
},
},
txRequestError{
awsError: awserr.New("TransactionCanceledException", "Transaction cancelled, please refer cancellation reasons for specific reasons [ConditionalCheckFailed, None, None]", nil),
statusCode: errStatusCode,
requestID: requestId,
cancellationReasons: []dynamodb.CancellationReason{
{
Code: aws.String("ConditionalCheckFailed"),
Item: map[string]*dynamodb.AttributeValue{
"hk": {S: aws.String("hkVal1")},
"attr": {S: aws.String("attrVal1")},
},
Message: aws.String("The conditional request failed"),
},
},
},
},
}
for name, in := range input {
t.Run(name, func(t *testing.T) {
if err := validateUnmarshallError(in.enc, in.err); err != nil {
t.Errorf("%s: expected nil, got %v", name, err)
}
})
}
}
func validateUnmarshallError(enc interface{}, err TxRequestFailure) error {
req := &request.Request{
HTTPResponse: &http.Response{
StatusCode: errStatusCode,
Body: newBufferCloser(encode(enc)),
},
RequestID: requestId,
}
TxAwareUnmarshalError(req)
if aerr, ok := req.Error.(TxRequestFailure); ok {
if err.RequestID() != aerr.RequestID() {
return fmt.Errorf("expected %v, got %v", err.RequestID(), aerr.RequestID())
}
if err.StatusCode() != aerr.StatusCode() {
return fmt.Errorf("expected %v, got %v", err.StatusCode(), aerr.StatusCode())
}
if err.Message() != aerr.Message() {
return fmt.Errorf("expected %v, got %v", err.Message(), aerr.Message())
}
if err.Code() != aerr.Code() {
return fmt.Errorf("expected %v, got %v", err.Code(), aerr.Code())
}
if err.OrigErr() != aerr.OrigErr() {
return fmt.Errorf("expected %v, got %v", err.OrigErr(), aerr.OrigErr())
}
if !reflect.DeepEqual(err.Error(), aerr.Error()) {
return fmt.Errorf("expected %v, got %v", err.Error(), aerr.Error())
}
if !reflect.DeepEqual(err.CancellationReasons(), aerr.CancellationReasons()) {
return fmt.Errorf("expected %v, got %v", err.CancellationReasons(), aerr.CancellationReasons())
}
} else {
return fmt.Errorf("expected type 'TxRequestFailure', got %T", req.Error)
}
return nil
}
func encode(v interface{}) []byte {
var buf bytes.Buffer
json.NewEncoder(&buf).Encode(&v)
return buf.Bytes()
}
// Implementation of io.ReadCloser backed by bytes.Buffer
type bufferCloser struct {
bytes.Buffer
}
func newBufferCloser(data []byte) *bufferCloser {
return &bufferCloser{*bytes.NewBuffer(data)}
}
func (b *bufferCloser) Close() error {
b.Reset()
return nil
}
// Define error response without the cancellation reason
type jsonErrorResponse struct {
Code string `json:"__type"`
Message string `json:"message"`
}
@@ -5,10 +5,10 @@ This is an example using the AWS SDK for Go to list ec2 instances that match pro
# Usage
The example uses the bucket name provided, and lists all object keys in a bucket.
The example uses the tag name provided, and lists all matching ec2 instances.
```sh
go run -tags example filter_ec2_by_tag.go <name_filter>
go run filter_ec2_by_tag.go <name_filter>
```
Output:
@@ -1,7 +1,7 @@
# Example Fetch By region
This is an example using the AWS SDK for Go to list ec2 instances instance state By different region . By default it fetch all running and stopped instance
This is an example using the AWS SDK for Go to list ec2 instances instance state By different region . By default it fetch all running and stopped instance
# Usage
@@ -49,7 +49,7 @@ func main() {
ec2Svc := ec2.New(sess)
params := &ec2.DescribeInstancesInput{
Filters: []*ec2.Filter{
&ec2.Filter{
{
Name: aws.String("instance-state-name"),
Values: aws.StringSlice(states),
},
@@ -0,0 +1,17 @@
# Example
This is an example demonstrates how you can use the AWS Elemental MediaStore
API PutObject operation with a non-seekable io.Reader.
# Usage
The example will create an Elemental MediaStore container, and upload a
contrived non-seekable io.Reader to that container. Using the SDK's
[aws.ReadSeekCloser](https://docs.aws.amazon.com/sdk-for-go/api/aws/#ReadSeekCloser)
utility for wrapping the `io.Reader` in a value the
[mediastore#PutObjectInput](https://docs.aws.amazon.com/sdk-for-go/api/service/mediastoredata/#PutObjectInput).Body will accept.
The example will attempt to create the container if it does not already exist.
```sh
AWS_REGION=<region> go run -tags example main.go <containerName> <object-path>
@@ -0,0 +1,92 @@
// +build example
package main
import (
"fmt"
"io"
"log"
"math/rand"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/mediastore"
"github.com/aws/aws-sdk-go/service/mediastoredata"
)
func main() {
containerName := os.Args[1]
objectPath := os.Args[2]
// Create the SDK's session, and a AWS Elemental MediaStore Data client.
sess := session.Must(session.NewSession())
dataSvc, err := getMediaStoreDataClient(containerName, sess)
if err != nil {
log.Fatalf("failed to create client, %v", err)
}
// Create a random reader to simulate a unseekable reader, wrap the reader
// in an io.LimitReader to prevent uploading forever.
randReader := rand.New(rand.NewSource(0))
reader := io.LimitReader(randReader, 1024*1024 /* 1MB */)
// Wrap the unseekable reader with the SDK's RandSeekCloser. This type will
// allow the SDK's to use the nonseekable reader.
body := aws.ReadSeekCloser(reader)
// make the PutObject API call with the nonseekable reader, causing the SDK
// to send the request body payload as chunked transfer encoding.
_, err = dataSvc.PutObject(&mediastoredata.PutObjectInput{
Path: &objectPath,
Body: body,
})
if err != nil {
log.Fatalf("failed to upload object, %v", err)
}
fmt.Println("object uploaded")
}
// getMediaStoreDataClient uses the AWS Elemental MediaStore API to get the
// endpoint for a container. If the container endpoint can be retrieved a AWS
// Elemental MediaStore Data client will be created and returned. Otherwise
// error is returned.
func getMediaStoreDataClient(containerName string, sess *session.Session) (*mediastoredata.MediaStoreData, error) {
endpoint, err := containerEndpoint(containerName, sess)
if err != nil {
return nil, err
}
dataSvc := mediastoredata.New(sess, &aws.Config{
Endpoint: endpoint,
})
return dataSvc, nil
}
// ContainerEndpoint will attempt to get the endpoint for a container,
// returning error if the container doesn't exist, or is not active within a
// timeout.
func containerEndpoint(name string, sess *session.Session) (*string, error) {
for i := 0; i < 3; i++ {
ctrlSvc := mediastore.New(sess)
descResp, err := ctrlSvc.DescribeContainer(&mediastore.DescribeContainerInput{
ContainerName: &name,
})
if err != nil {
return nil, err
}
if status := aws.StringValue(descResp.Container.Status); status != "ACTIVE" {
log.Println("waiting for container to be active, ", status)
time.Sleep(10 * time.Second)
continue
}
return descResp.Container.Endpoint, nil
}
return nil, fmt.Errorf("container is not active")
}
@@ -0,0 +1,22 @@
# Example
This is an example using the AWS SDK for Go to download an S3 object with a
progress bar.
# Usage
The example uses the bucket name provided, one key for object, and output the
progress to stdout.
```prompt
AWS_PROFILE=my-profile AWS_REGION=us-west-2 go run -tags example getObjectWithProgress.go cool-bucket my/object/prefix/cool_thing.zip
2019/02/22 13:04:52 File size is: 35.9 MB
2019/02/22 13:04:53 File size:35943530 downloaded:8580 percentage:0%
2019/02/22 13:04:53 File size:35943530 downloaded:17580 percentage:0%
2019/02/22 13:04:53 File size:35943530 downloaded:33940 percentage:0%
2019/02/22 13:04:53 File size:35943530 downloaded:34988 percentage:0%
2019/02/22 13:04:53 File size:35943530 downloaded:51348 percentage:0%
2019/02/22 13:04:53 File size:35943530 downloaded:52396 percentage:0%
...
```
@@ -0,0 +1,133 @@
// +build example
package main
import (
"fmt"
"io"
"io/ioutil"
"log"
"os"
"strings"
"sync/atomic"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
// progressWriter tracks the download progress of a file from S3 to a file
// as the writeAt method is called, the byte size is added to the written total,
// and then a log is printed of the written percentage from the total size
// it looks like this on the command line:
// 2019/02/22 12:59:15 File size:35943530 downloaded:16360 percentage:0%
// 2019/02/22 12:59:15 File size:35943530 downloaded:16988 percentage:0%
// 2019/02/22 12:59:15 File size:35943530 downloaded:33348 percentage:0%
type progressWriter struct {
written int64
writer io.WriterAt
size int64
}
func (pw *progressWriter) WriteAt(p []byte, off int64) (int, error) {
atomic.AddInt64(&pw.written, int64(len(p)))
percentageDownloaded := float32(pw.written*100) / float32(pw.size)
fmt.Printf("File size:%d downloaded:%d percentage:%.2f%%\r", pw.size, pw.written, percentageDownloaded)
return pw.writer.WriteAt(p, off)
}
func byteCountDecimal(b int64) string {
const unit = 1000
if b < unit {
return fmt.Sprintf("%d B", b)
}
div, exp := int64(unit), 0
for n := b / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB", float64(b)/float64(div), "kMGTPE"[exp])
}
func getFileSize(svc *s3.S3, bucket string, prefix string) (filesize int64, error error) {
params := &s3.HeadObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(prefix),
}
resp, err := svc.HeadObject(params)
if err != nil {
return 0, err
}
return *resp.ContentLength, nil
}
func parseFilename(keyString string) (filename string) {
ss := strings.Split(keyString, "/")
s := ss[len(ss)-1]
return s
}
func main() {
if len(os.Args) < 2 {
log.Println("USAGE ERROR: AWS_REGION=us-east-1 go run getObjWithProgress.go bucket-name object-key")
return
}
bucket := os.Args[1]
key := os.Args[2]
filename := parseFilename(key)
sess, err := session.NewSession()
if err != nil {
panic(err)
}
s3Client := s3.New(sess)
downloader := s3manager.NewDownloader(sess)
size, err := getFileSize(s3Client, bucket, key)
if err != nil {
panic(err)
}
log.Println("Starting download, size:", byteCountDecimal(size))
cwd, err := os.Getwd()
if err != nil {
panic(err)
}
temp, err := ioutil.TempFile(cwd, "getObjWithProgress-tmp-")
if err != nil {
panic(err)
}
tempfileName := temp.Name()
writer := &progressWriter{writer: temp, size: size, written: 0}
params := &s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
}
if _, err := downloader.Download(writer, params); err != nil {
log.Printf("Download failed! Deleting tempfile: %s", tempfileName)
os.Remove(tempfileName)
panic(err)
}
if err := temp.Close(); err != nil {
panic(err)
}
if err := os.Rename(temp.Name(), filename); err != nil {
panic(err)
}
fmt.Println()
log.Println("File downloaded! Avaliable at:", filename)
}
+6 -6
View File
@@ -24,11 +24,11 @@ AWS credentials. See the [`Configuring Credentials`](http://docs.aws.amazon.com/
section of the SDK's API Reference guide on how the SDK loads your AWS credentials.
The server requires the S3 `-b bucket` the presigned URLs will be generated for. A
`-r region` is only needed if the bucket is in AWS China or AWS Gov Cloud. For
`-r region` is only needed if the bucket is in AWS China or AWS Gov Cloud. For
buckets in AWS the server will use the [`s3manager.GetBucketRegion`](http://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion) utility to lookup the bucket's region.
You should run the service in the background or in a separate terminal tab before
moving onto the client.
moving onto the client.
```sh
@@ -43,7 +43,7 @@ defaults.
Use the client application to request a presigned URL from the server and use
that presigned URL to download the object from S3. Calling the client with the
`-get key` flag will do this. An optional `-f filename` flag can be provided as
`-get key` flag will do this. An optional `-f filename` flag can be provided as
well to write the object to. If no flag is provided the object will be written
to `stdout`
@@ -63,7 +63,7 @@ URL. The `method` value can be `GET` or `PUT` for the `GetObject` or `PutObject`
curl -v "http://127.0.0.1:8080/presign/my-object/key?method=GET"
```
The server will respond with a JSON value. The value contains three pieces of
The server will respond with a JSON value. The value contains three pieces of
information that the client will need to correctly make the request. First is
the presigned URL. This is the URL the client will make the request to. Second
is the HTTP method the request should be sent as. This is included to simplify
@@ -97,7 +97,7 @@ service
go run -tags example client/client.go -put "my-object/key" -f filename
```
Like the download case this will make a HTTP request to the server for the
Like the download case this will make a HTTP request to the server for the
presigned URL. The Server will respond with a presigned URL for S3's `PutObject`
API operation. In addition the `method` query parameter the client will also
include a `contentLength` this value instructs the server to generate the presigned
@@ -118,7 +118,7 @@ such as additional constraints the server puts on the presigned URLs like
`Content-Type`.
In addition to adding constraints to the presigned URLs the service could be
updated to obfuscate S3 object's key. Instead of the client knowing the object's
updated to obfuscate S3 object's key. Instead of the client knowing the object's
key, a lookup system could be used instead. This could be substitution based,
or lookup into an external data store such as DynamoDB.
@@ -19,10 +19,10 @@ putBucketAcl <params>
```
```sh
go run -tags example putObjectAcl.go
-bucket <bucket>
-key <key>
-owner-name <name>
go run -tags example putObjectAcl.go
-bucket <bucket>
-key <key>
-owner-name <name>
-owner-id <id>
-grantee-type <some type>
-user-id <user-id>
@@ -0,0 +1,14 @@
# Example
This is an example using the AWS SDK for Go to upload object with progress.
We use CustomReader to implement it
# Usage
The example uses the bucket name provided, one key for object, and output the progress to stdout.
The Object size should larger than 5M or your will not see the progress
```sh
AWS_REGION=<region> go run putObjWithProcess.go <credential> <bucket> <key for object> <local file name>
```
@@ -0,0 +1,102 @@
// +build example
package main
import (
"log"
"os"
"sync/atomic"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
type CustomReader struct {
fp *os.File
size int64
read int64
}
func (r *CustomReader) Read(p []byte) (int, error) {
return r.fp.Read(p)
}
func (r *CustomReader) ReadAt(p []byte, off int64) (int, error) {
n, err := r.fp.ReadAt(p, off)
if err != nil {
return n, err
}
// Got the length have read( or means has uploaded), and you can construct your message
atomic.AddInt64(&r.read, int64(n))
// I have no idea why the read length need to be div 2,
// maybe the request read once when Sign and actually send call ReadAt again
// It works for me
log.Printf("total read:%d progress:%d%%\n", r.read/2, int(float32(r.read*100/2)/float32(r.size)))
return n, err
}
func (r *CustomReader) Seek(offset int64, whence int) (int64, error) {
return r.fp.Seek(offset, whence)
}
func main() {
if len(os.Args) < 4 {
log.Println("USAGE ERROR: AWS_REGION=us-east-1 go run putObjWithProcess.go <credential> <bucket> <key for object> <local file name>")
return
}
credential := os.Args[1]
bucket := os.Args[2]
key := os.Args[3]
fileName := os.Args[4]
creds := credentials.NewSharedCredentials(credential, "default")
if _, err := creds.Get(); err != nil {
log.Println("ERROR:", err)
return
}
sess := session.New(&aws.Config{
Credentials: creds,
})
file, err := os.Open(fileName)
if err != nil {
log.Println("ERROR:", err)
return
}
fileInfo, err := file.Stat()
if err != nil {
log.Println("ERROR:", err)
return
}
reader := &CustomReader{
fp: file,
size: fileInfo.Size(),
}
uploader := s3manager.NewUploader(sess, func(u *s3manager.Uploader) {
u.PartSize = 5 * 1024 * 1024
u.LeavePartsOnError = true
})
output, err := uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
Body: reader,
})
if err != nil {
log.Println("ERROR:", err)
return
}
log.Println(output.Location)
}
+2 -2
View File
@@ -2,7 +2,7 @@
sync will upload a given directory to Amazon S3 using the upload iterator interface defined in the
s3manager package. This example uses a path that is specified during runtime to walk and build keys
to upload to Amazon S3. It will use the keys to upload the files/folders to Amazon S3.
to upload to Amazon S3. It will use the keys to upload the files/folders to Amazon S3.
# Usage
@@ -14,7 +14,7 @@ sync <params>
```
```sh
go run -tags example sync.go
go run -tags example sync.go
-region <region> // required
-bucket <bucket> // required
-path <path> // required
+15 -7
View File
@@ -5,6 +5,7 @@ package main
import (
"flag"
"fmt"
"mime"
"os"
"path/filepath"
"strings"
@@ -69,15 +70,22 @@ func (iter *SyncFolderIterator) UploadObject() s3manager.BatchUploadObject {
iter.err = err
}
extension := filepath.Ext(fi.key)
mimeType := mime.TypeByExtension(extension)
if mimeType == "" {
mimeType = "binary/octet-stream"
}
input := s3manager.UploadInput{
Bucket: &iter.bucket,
Key: &fi.key,
Body: body,
Bucket: &iter.bucket,
Key: &fi.key,
Body: body,
ContentType: &mimeType,
}
return s3manager.BatchUploadObject{
&input,
nil,
Object: &input,
}
}
@@ -101,11 +109,11 @@ func main() {
iter := NewSyncFolderIterator(*pathPtr, *bucketPtr)
if err := uploader.UploadWithIterator(aws.BackgroundContext(), iter); err != nil {
fmt.Fprintf(os.Stderr, "unexpected error has occured: %v", err)
fmt.Fprintf(os.Stderr, "unexpected error has occurred: %v", err)
}
if err := iter.Err(); err != nil {
fmt.Fprintf(os.Stderr, "unexpected error occured during file walking: %v", err)
fmt.Fprintf(os.Stderr, "unexpected error occurred during file walking: %v", err)
}
fmt.Println("Success")