Update vendored deps, including AWS SDK, openpgp, ftp, ...

This commit is contained in:
Andrey Smirnov
2018-04-05 17:46:45 +03:00
parent cef4fefc40
commit 0e6ee35942
1497 changed files with 450721 additions and 68034 deletions
@@ -68,7 +68,7 @@ func main() {
ddbSvc.ListTables(&dynamodb.ListTablesInput{})
// Setting Config's Endpoint will override the EndpointResolver. Forcing
// the service clien to make all operation to the endpoint specified
// the service client to make all operation to the endpoint specified
// the in the config.
ddbSvcLocal := dynamodb.New(sess, &aws.Config{
Endpoint: aws.String("http://localhost:8088"),
@@ -0,0 +1,11 @@
# Using Custom Retry Strategies with the SDK
This example highlights how you can define a custom retry strategy for the SDK to use. The example wraps the SDK's DefaultRetryer with a set of custom rules to not retry HTTP 5xx status codes. In all other cases the custom retry strategy falls back the SDK's DefaultRetryer's functionality.
## Usage
This example will attempt to make an Amazon CloudWatch Logs PutLogEvents DescribeLogGroups API call. This example expects to retrieve credentials from the `~/.aws/credentials` file.
```sh
go run ./custom_retryer.go
```
@@ -0,0 +1,75 @@
// +build example
package main
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
)
func main() {
sess := session.Must(
session.NewSession(&aws.Config{
// Use a custom retryer to provide custom retry rules.
Retryer: CustomRetryer{DefaultRetryer: client.DefaultRetryer{NumMaxRetries: 3}},
// Use the SDK's SharedCredentialsProvider directly instead of the
// SDK's default credential chain. This ensures that the
// application can call Config.Credentials.Expire. This is counter
// to the SDK's default credentials chain, which will never reread
// the shared credentials file.
Credentials: credentials.NewCredentials(&credentials.SharedCredentialsProvider{
Filename: defaults.SharedCredentialsFilename(),
Profile: "default",
}),
Region: aws.String(endpoints.UsWest2RegionID),
}),
)
// Add a request handler to the AfterRetry handler stack that is used by the
// SDK to be executed after the SDK has determined if it will retry.
// This handler forces the SDK's Credentials to be expired, and next call to
// Credentials.Get will attempt to refresh the credentials.
sess.Handlers.AfterRetry.PushBack(func(req *request.Request) {
if aerr, ok := req.Error.(awserr.RequestFailure); ok && aerr != nil {
if aerr.Code() == "InvalidClaimException" {
// Force the credentials to expire based on error code. Next
// call to Credentials.Get will attempt to refresh credentials.
req.Config.Credentials.Expire()
}
}
})
svc := cloudwatchlogs.New(sess)
resp, err := svc.DescribeLogGroups(&cloudwatchlogs.DescribeLogGroupsInput{})
fmt.Println(resp, err)
}
// CustomRetryer wraps the SDK's built in DefaultRetryer adding additional
// custom features. Such as, no retry for 5xx status codes, and refresh
// credentials.
type CustomRetryer struct {
client.DefaultRetryer
}
// ShouldRetry overrides the SDK's built in DefaultRetryer adding customization
// to not retry 5xx status codes.
func (r CustomRetryer) ShouldRetry(req *request.Request) bool {
if req.HTTPResponse.StatusCode >= 500 {
// Don't retry any 5xx status codes.
return false
}
// Fallback to SDK's built in retry rules
return r.DefaultRetryer.ShouldRetry(req)
}
@@ -4,7 +4,7 @@ Uploads a file to S3 given a bucket and object key. Also takes a duration
value to terminate the update if it doesn't complete within that time.
The AWS Region needs to be provided in the AWS shared config or on the
environment variable as `AWS_REGION`. Credentials also must be provided
environment variable as `AWS_REGION`. Credentials also must be provided.
Will default to shared config file, but can load from environment if provided.
## Usage:
@@ -8,10 +8,10 @@ This is an example using the AWS SDK for Go to list ec2 instances instance state
```sh
# To fetch the stopped instance of all region use below:
# To fetch the stopped and running instances of all region use below:
./filter_ec2_by_region --state running --state stopped
# To fetch the stopped and running instance for region us-west-1 and eu-west-1 use below:
# To fetch the stopped and running instances for region us-west-1 and eu-west-1 use below:
./filter_ec2_by_region --state running --state stopped --region us-west-1 --region=eu-west-1
```
@@ -60,9 +60,9 @@ func main() {
if err != nil {
fmt.Println("Error", err)
} else {
fmt.Printf("\n\n\nFetching instace details for region: %s with criteria: %s**\n ", region, instanceCriteria)
fmt.Printf("\n\n\nFetching instance details for region: %s with criteria: %s**\n ", region, instanceCriteria)
if len(result.Reservations) == 0 {
fmt.Printf("There is no instance for the for region %s with the matching Criteria:%s \n", region, instanceCriteria)
fmt.Printf("There is no instance for the region: %s with the matching criteria:%s \n", region, instanceCriteria)
}
for _, reservation := range result.Reservations {
+26
View File
@@ -0,0 +1,26 @@
# Example
sync will upload a given directory to Amazon S3 using the upload iterator interface defined in the
s3manager package. This example uses a path that is specified during runtime to walk and build keys
to upload to Amazon S3. It will use the keys to upload the files/folders to Amazon S3.
# Usage
```sh
sync <params>
-region <region> // required
-bucket <bucket> // required
-path <path> // required
```
```sh
go run -tags example sync.go
-region <region> // required
-bucket <bucket> // required
-path <path> // required
```
Output:
```
success
```
+112
View File
@@ -0,0 +1,112 @@
// +build example
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
// SyncFolderIterator is used to upload a given folder
// to Amazon S3.
type SyncFolderIterator struct {
bucket string
fileInfos []fileInfo
err error
}
type fileInfo struct {
key string
fullpath string
}
// NewSyncFolderIterator will walk the path, and store the key and full path
// of the object to be uploaded. This will return a new SyncFolderIterator
// with the data provided from walking the path.
func NewSyncFolderIterator(path, bucket string) *SyncFolderIterator {
metadata := []fileInfo{}
filepath.Walk(path, func(p string, info os.FileInfo, err error) error {
if !info.IsDir() {
key := strings.TrimPrefix(p, path)
metadata = append(metadata, fileInfo{key, p})
}
return nil
})
return &SyncFolderIterator{
bucket,
metadata,
nil,
}
}
// Next will determine whether or not there is any remaining files to
// be uploaded.
func (iter *SyncFolderIterator) Next() bool {
return len(iter.fileInfos) > 0
}
// Err returns any error when os.Open is called.
func (iter *SyncFolderIterator) Err() error {
return iter.err
}
// UploadObject will prep the new upload object by open that file and constructing a new
// s3manager.UploadInput.
func (iter *SyncFolderIterator) UploadObject() s3manager.BatchUploadObject {
fi := iter.fileInfos[0]
iter.fileInfos = iter.fileInfos[1:]
body, err := os.Open(fi.fullpath)
if err != nil {
iter.err = err
}
input := s3manager.UploadInput{
Bucket: &iter.bucket,
Key: &fi.key,
Body: body,
}
return s3manager.BatchUploadObject{
&input,
nil,
}
}
// Upload a directory to a given bucket
//
// Usage:
// sync <params>
// -region <region> // required
// -bucket <bucket> // required
// -path <path> // required
func main() {
bucketPtr := flag.String("bucket", "", "bucket to upload to")
regionPtr := flag.String("region", "", "region to be used when making requests")
pathPtr := flag.String("path", "", "path of directory to be synced")
flag.Parse()
sess := session.New(&aws.Config{
Region: regionPtr,
})
uploader := s3manager.NewUploader(sess)
iter := NewSyncFolderIterator(*pathPtr, *bucketPtr)
if err := uploader.UploadWithIterator(aws.BackgroundContext(), iter); err != nil {
fmt.Fprintf(os.Stderr, "unexpected error has occured: %v", err)
}
if err := iter.Err(); err != nil {
fmt.Fprintf(os.Stderr, "unexpected error occured during file walking: %v", err)
}
fmt.Println("Success")
}