mirror of
https://github.com/aptly-dev/aptly.git
synced 2026-05-06 22:18:28 +00:00
Update vendored deps, including AWS SDK, openpgp, ftp, ...
This commit is contained in:
+2
-2
@@ -8,10 +8,10 @@ This is an example using the AWS SDK for Go to list ec2 instances instance state
|
||||
|
||||
|
||||
```sh
|
||||
# To fetch the stopped instance of all region use below:
|
||||
# To fetch the stopped and running instances of all region use below:
|
||||
./filter_ec2_by_region --state running --state stopped
|
||||
|
||||
# To fetch the stopped and running instance for region us-west-1 and eu-west-1 use below:
|
||||
# To fetch the stopped and running instances for region us-west-1 and eu-west-1 use below:
|
||||
./filter_ec2_by_region --state running --state stopped --region us-west-1 --region=eu-west-1
|
||||
```
|
||||
|
||||
|
||||
Generated
Vendored
+2
-2
@@ -60,9 +60,9 @@ func main() {
|
||||
if err != nil {
|
||||
fmt.Println("Error", err)
|
||||
} else {
|
||||
fmt.Printf("\n\n\nFetching instace details for region: %s with criteria: %s**\n ", region, instanceCriteria)
|
||||
fmt.Printf("\n\n\nFetching instance details for region: %s with criteria: %s**\n ", region, instanceCriteria)
|
||||
if len(result.Reservations) == 0 {
|
||||
fmt.Printf("There is no instance for the for region %s with the matching Criteria:%s \n", region, instanceCriteria)
|
||||
fmt.Printf("There is no instance for the region: %s with the matching criteria:%s \n", region, instanceCriteria)
|
||||
}
|
||||
for _, reservation := range result.Reservations {
|
||||
|
||||
|
||||
+26
@@ -0,0 +1,26 @@
|
||||
# Example
|
||||
|
||||
sync will upload a given directory to Amazon S3 using the upload iterator interface defined in the
|
||||
s3manager package. This example uses a path that is specified during runtime to walk and build keys
|
||||
to upload to Amazon S3. It will use the keys to upload the files/folders to Amazon S3.
|
||||
|
||||
# Usage
|
||||
|
||||
```sh
|
||||
sync <params>
|
||||
-region <region> // required
|
||||
-bucket <bucket> // required
|
||||
-path <path> // required
|
||||
```
|
||||
|
||||
```sh
|
||||
go run -tags example sync.go
|
||||
-region <region> // required
|
||||
-bucket <bucket> // required
|
||||
-path <path> // required
|
||||
```
|
||||
|
||||
Output:
|
||||
```
|
||||
success
|
||||
```
|
||||
+112
@@ -0,0 +1,112 @@
|
||||
// +build example
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
)
|
||||
|
||||
// SyncFolderIterator is used to upload a given folder
|
||||
// to Amazon S3.
|
||||
type SyncFolderIterator struct {
|
||||
bucket string
|
||||
fileInfos []fileInfo
|
||||
err error
|
||||
}
|
||||
|
||||
type fileInfo struct {
|
||||
key string
|
||||
fullpath string
|
||||
}
|
||||
|
||||
// NewSyncFolderIterator will walk the path, and store the key and full path
|
||||
// of the object to be uploaded. This will return a new SyncFolderIterator
|
||||
// with the data provided from walking the path.
|
||||
func NewSyncFolderIterator(path, bucket string) *SyncFolderIterator {
|
||||
metadata := []fileInfo{}
|
||||
filepath.Walk(path, func(p string, info os.FileInfo, err error) error {
|
||||
if !info.IsDir() {
|
||||
key := strings.TrimPrefix(p, path)
|
||||
metadata = append(metadata, fileInfo{key, p})
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return &SyncFolderIterator{
|
||||
bucket,
|
||||
metadata,
|
||||
nil,
|
||||
}
|
||||
}
|
||||
|
||||
// Next will determine whether or not there is any remaining files to
|
||||
// be uploaded.
|
||||
func (iter *SyncFolderIterator) Next() bool {
|
||||
return len(iter.fileInfos) > 0
|
||||
}
|
||||
|
||||
// Err returns any error when os.Open is called.
|
||||
func (iter *SyncFolderIterator) Err() error {
|
||||
return iter.err
|
||||
}
|
||||
|
||||
// UploadObject will prep the new upload object by open that file and constructing a new
|
||||
// s3manager.UploadInput.
|
||||
func (iter *SyncFolderIterator) UploadObject() s3manager.BatchUploadObject {
|
||||
fi := iter.fileInfos[0]
|
||||
iter.fileInfos = iter.fileInfos[1:]
|
||||
body, err := os.Open(fi.fullpath)
|
||||
if err != nil {
|
||||
iter.err = err
|
||||
}
|
||||
|
||||
input := s3manager.UploadInput{
|
||||
Bucket: &iter.bucket,
|
||||
Key: &fi.key,
|
||||
Body: body,
|
||||
}
|
||||
|
||||
return s3manager.BatchUploadObject{
|
||||
&input,
|
||||
nil,
|
||||
}
|
||||
}
|
||||
|
||||
// Upload a directory to a given bucket
|
||||
//
|
||||
// Usage:
|
||||
// sync <params>
|
||||
// -region <region> // required
|
||||
// -bucket <bucket> // required
|
||||
// -path <path> // required
|
||||
func main() {
|
||||
bucketPtr := flag.String("bucket", "", "bucket to upload to")
|
||||
regionPtr := flag.String("region", "", "region to be used when making requests")
|
||||
pathPtr := flag.String("path", "", "path of directory to be synced")
|
||||
flag.Parse()
|
||||
|
||||
sess := session.New(&aws.Config{
|
||||
Region: regionPtr,
|
||||
})
|
||||
uploader := s3manager.NewUploader(sess)
|
||||
|
||||
iter := NewSyncFolderIterator(*pathPtr, *bucketPtr)
|
||||
if err := uploader.UploadWithIterator(aws.BackgroundContext(), iter); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "unexpected error has occured: %v", err)
|
||||
}
|
||||
|
||||
if err := iter.Err(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "unexpected error occured during file walking: %v", err)
|
||||
}
|
||||
|
||||
fmt.Println("Success")
|
||||
}
|
||||
Reference in New Issue
Block a user