First version of PublishedStorage for S3. #15

This commit is contained in:
Andrey Smirnov
2014-07-17 18:05:38 +04:00
parent b260b0010a
commit c54406e29f
6 changed files with 327 additions and 2 deletions

View File

@@ -10,6 +10,7 @@ gom 'github.com/syndtr/goleveldb/leveldb', :commit => '9888007'
gom 'github.com/ugorji/go/codec', :commit => '71c2886f5a673a35f909803f38ece5810165097b'
gom 'github.com/wsxiaoys/terminal/color', :commit => '5668e431776a7957528361f90ce828266c69ed08'
gom 'code.google.com/p/mxk/go1/flowcontrol', :commit => '5ff2502e2556'
gom 'github.com/mitchellh/goamz/s3', :commit => '3c2b519fc7544563e5a2031cc8bb04b567b1b9bc'
group :test do
gom 'launchpad.net/gocheck'

View File

@@ -1,6 +1,6 @@
GOVERSION=$(shell go version | awk '{print $$3;}')
PACKAGES=database deb files http query utils
ALL_PACKAGES=aptly cmd console database deb files http query utils
PACKAGES=database deb files http query s3 utils
ALL_PACKAGES=aptly cmd console database deb files http query s3 utils
BINPATH=$(abspath ./_vendor/bin)
GOM_ENVIRONMENT=-test
PYTHON?=python

195
s3/public.go Normal file
View File

@@ -0,0 +1,195 @@
package s3
import (
"fmt"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
"github.com/smira/aptly/aptly"
"github.com/smira/aptly/files"
"os"
"path/filepath"
)
// PublishedStorage abstract file system with published files (actually hosted on S3)
type PublishedStorage struct {
s3 *s3.S3
bucket *s3.Bucket
acl s3.ACL
prefix string
}
// Check interface
var (
_ aptly.PublishedStorage = (*PublishedStorage)(nil)
)
func NewPublishedStorageRaw(auth aws.Auth, region aws.Region, bucket, defaultACL, prefix string) (*PublishedStorage, error) {
if defaultACL == "" {
defaultACL = "private"
}
result := &PublishedStorage{s3: s3.New(auth, region), acl: s3.ACL(defaultACL), prefix: prefix}
result.bucket = result.s3.Bucket(bucket)
return result, nil
}
// NewPublishedStorage creates new instance of PublishedStorage with specified S3 access
// keys, region and bucket name
func NewPublishedStorage(accessKey, secretKey, region, bucket, defaultACL, prefix string) (*PublishedStorage, error) {
auth, err := aws.GetAuth(accessKey, secretKey)
if err != nil {
return nil, err
}
awsRegion, ok := aws.Regions[region]
if !ok {
return nil, fmt.Errorf("unknown region: %#v", region)
}
return NewPublishedStorageRaw(auth, awsRegion, bucket, defaultACL, prefix)
}
// PublicPath returns root of public part
func (storage *PublishedStorage) PublicPath() string {
panic("never would be implemented")
}
// MkDir creates directory recursively under public path
func (storage *PublishedStorage) MkDir(path string) error {
// no op for S3
return nil
}
// PutFile puts file into published storage at specified path
func (storage *PublishedStorage) PutFile(path string, sourceFilename string) error {
var (
source *os.File
err error
fi os.FileInfo
)
source, err = os.Open(sourceFilename)
if err != nil {
return err
}
defer source.Close()
fi, err = source.Stat()
if err != nil {
return err
}
return storage.bucket.PutReader(filepath.Join(storage.prefix, path), source, fi.Size(), "binary/octet-stream", storage.acl)
}
// Remove removes single file under public path
func (storage *PublishedStorage) Remove(path string) error {
return storage.bucket.Del(filepath.Join(storage.prefix, path))
}
// RemoveDirs removes directory structure under public path
func (storage *PublishedStorage) RemoveDirs(path string, progress aptly.Progress) error {
const page = 1000
filelist, err := storage.Filelist(path)
numParts := (len(filelist) + page - 1) / page
for i := 0; i < numParts; i++ {
var part []string
if i == numParts-1 {
part = filelist[i*page:]
} else {
part = filelist[i*page : (i+1)*page]
}
paths := make([]string, len(part))
for i := range part {
paths[i] = filepath.Join(storage.prefix, path, part[i])
}
err = storage.bucket.MultiDel(paths)
if err != nil {
return err
}
}
return nil
}
// LinkFromPool links package file from pool to dist's pool location
//
// publishedDirectory is desired location in pool (like prefix/pool/component/liba/libav/)
// sourcePool is instance of aptly.PackagePool
// sourcePath is filepath to package file in package pool
//
// LinkFromPool returns relative path for the published file to be included in package index
func (storage *PublishedStorage) LinkFromPool(publishedDirectory string, sourcePool aptly.PackagePool, sourcePath, sourceMD5 string) error {
// verify that package pool is local pool in filesystem
_ = sourcePool.(*files.PackagePool)
baseName := filepath.Base(sourcePath)
relPath := filepath.Join(publishedDirectory, baseName)
poolPath := filepath.Join(storage.prefix, relPath)
var (
dstKey *s3.Key
err error
)
dstKey, err = storage.bucket.GetKey(poolPath)
if err != nil {
if s3err, ok := err.(*s3.Error); !ok || s3err.StatusCode != 404 {
return err
}
} else {
if dstKey.ETag == sourceMD5 {
return nil
}
}
return storage.PutFile(relPath, sourcePath)
}
// Filelist returns list of files under prefix
func (storage *PublishedStorage) Filelist(prefix string) ([]string, error) {
result := []string{}
marker := ""
prefix = filepath.Join(storage.prefix, prefix)
if prefix != "" {
prefix += "/"
}
for {
contents, err := storage.bucket.List(prefix, "", marker, 1000)
if err != nil {
return nil, err
}
last_key := ""
for _, key := range contents.Contents {
if prefix == "" {
result = append(result, key.Key)
} else {
result = append(result, key.Key[len(prefix):])
}
last_key = key.Key
}
if contents.IsTruncated {
marker = contents.NextMarker
if marker == "" {
// From the s3 docs: If response does not include the
// NextMarker and it is truncated, you can use the value of the
// last Key in the response as the marker in the subsequent
// request to get the next set of object keys.
marker = last_key
}
} else {
break
}
}
return result, nil
}
// RenameFile renames (moves) file
func (storage *PublishedStorage) RenameFile(oldName, newName string) error {
panic("not implemented yet")
}

116
s3/public_test.go Normal file
View File

@@ -0,0 +1,116 @@
package s3
import (
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3/s3test"
"io/ioutil"
. "launchpad.net/gocheck"
"path/filepath"
)
type PublishedStorageSuite struct {
srv *s3test.Server
storage, prefixed_storage *PublishedStorage
}
var _ = Suite(&PublishedStorageSuite{})
func (s *PublishedStorageSuite) SetUpTest(c *C) {
var err error
s.srv, err = s3test.NewServer(&s3test.Config{})
c.Assert(err, IsNil)
c.Assert(s.srv, NotNil)
auth, _ := aws.GetAuth("aa", "bb")
s.storage, err = NewPublishedStorageRaw(auth, aws.Region{Name: "test-1", S3Endpoint: s.srv.URL(), S3LocationConstraint: true}, "test", "", "")
c.Assert(err, IsNil)
s.prefixed_storage, err = NewPublishedStorageRaw(auth, aws.Region{Name: "test-1", S3Endpoint: s.srv.URL(), S3LocationConstraint: true}, "test", "", "lala")
c.Assert(err, IsNil)
err = s.storage.s3.Bucket("test").PutBucket("private")
c.Assert(err, IsNil)
}
func (s *PublishedStorageSuite) TearDownTest(c *C) {
s.srv.Quit()
}
func (s *PublishedStorageSuite) TestNewPublishedStorage(c *C) {
stor, err := NewPublishedStorage("aa", "bbb", "", "", "", "")
c.Check(stor, IsNil)
c.Check(err, ErrorMatches, "unknown region: .*")
}
func (s *PublishedStorageSuite) TestPutFile(c *C) {
dir := c.MkDir()
err := ioutil.WriteFile(filepath.Join(dir, "a"), []byte("welcome to s3!"), 0644)
c.Assert(err, IsNil)
err = s.storage.PutFile("a/b.txt", filepath.Join(dir, "a"))
c.Check(err, IsNil)
data, err := s.storage.bucket.Get("a/b.txt")
c.Check(err, IsNil)
c.Check(data, DeepEquals, []byte("welcome to s3!"))
err = s.prefixed_storage.PutFile("a/b.txt", filepath.Join(dir, "a"))
c.Check(err, IsNil)
data, err = s.storage.bucket.Get("lala/a/b.txt")
c.Check(err, IsNil)
c.Check(data, DeepEquals, []byte("welcome to s3!"))
}
func (s *PublishedStorageSuite) TestFilelist(c *C) {
paths := []string{"a", "b", "c", "testa", "test/a", "test/b", "lala/a", "lala/b", "lala/c"}
for _, path := range paths {
err := s.storage.bucket.Put(path, []byte("test"), "binary/octet-stream", "private")
c.Check(err, IsNil)
}
list, err := s.storage.Filelist("")
c.Check(err, IsNil)
c.Check(list, DeepEquals, []string{"a", "b", "c", "lala/a", "lala/b", "lala/c", "test/a", "test/b", "testa"})
list, err = s.storage.Filelist("test")
c.Check(err, IsNil)
c.Check(list, DeepEquals, []string{"a", "b"})
list, err = s.storage.Filelist("test2")
c.Check(err, IsNil)
c.Check(list, DeepEquals, []string{})
list, err = s.prefixed_storage.Filelist("")
c.Check(err, IsNil)
c.Check(list, DeepEquals, []string{"a", "b", "c"})
}
func (s *PublishedStorageSuite) TestRemove(c *C) {
err := s.storage.bucket.Put("a/b", []byte("test"), "binary/octet-stream", "private")
c.Check(err, IsNil)
err = s.storage.Remove("a/b")
c.Check(err, IsNil)
_, err = s.storage.bucket.Get("a/b")
c.Check(err, ErrorMatches, "The specified key does not exist.")
}
func (s *PublishedStorageSuite) TestRemoveDirs(c *C) {
c.Skip("multiple-delete not available in s3test")
paths := []string{"a", "b", "c", "testa", "test/a", "test/b", "lala/a", "lala/b", "lala/c"}
for _, path := range paths {
err := s.storage.bucket.Put(path, []byte("test"), "binary/octet-stream", "private")
c.Check(err, IsNil)
}
err := s.storage.RemoveDirs("test", nil)
c.Check(err, IsNil)
list, err := s.storage.Filelist("")
c.Check(err, IsNil)
c.Check(list, DeepEquals, []string{"a", "b", "c", "lala/a", "lala/b", "lala/c", "test/a", "test/b", "testa"})
}

2
s3/s3.go Normal file
View File

@@ -0,0 +1,2 @@
// Package files handles publishing to S3
package s3

11
s3/s3_test.go Normal file
View File

@@ -0,0 +1,11 @@
package s3
import (
. "launchpad.net/gocheck"
"testing"
)
// Launch gocheck tests
func Test(t *testing.T) {
TestingT(t)
}