Add support for Azure package pools

This adds support for storing packages directly on Azure, with no truly
"local" (on-disk) repo used. The existing Azure PublishedStorage
implementation was refactored to move the shared code to a separate
context struct, which can then be re-used by the new PackagePool. In
addition, the files package's mockChecksumStorage was made public so
that it could be used in the Azure PackagePool tests as well.

Signed-off-by: Ryan Gonzalez <ryan.gonzalez@collabora.com>
This commit is contained in:
Ryan Gonzalez
2022-05-17 08:52:59 -05:00
committed by André Roth
parent 810df17009
commit f9325fbc91
16 changed files with 820 additions and 148 deletions

View File

@@ -1,2 +1,128 @@
// Package azure handles publishing to Azure Storage
package azure
import (
"context"
"encoding/hex"
"fmt"
"io"
"net/url"
"path/filepath"
"time"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/aptly-dev/aptly/aptly"
)
func isBlobNotFound(err error) bool {
storageError, ok := err.(azblob.StorageError)
return ok && storageError.ServiceCode() == azblob.ServiceCodeBlobNotFound
}
type azContext struct {
container azblob.ContainerURL
prefix string
}
func newAzContext(accountName, accountKey, container, prefix, endpoint string) (*azContext, error) {
credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
if err != nil {
return nil, err
}
if endpoint == "" {
endpoint = fmt.Sprintf("https://%s.blob.core.windows.net", accountName)
}
url, err := url.Parse(fmt.Sprintf("%s/%s", endpoint, container))
if err != nil {
return nil, err
}
containerURL := azblob.NewContainerURL(*url, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
result := &azContext{
container: containerURL,
prefix: prefix,
}
return result, nil
}
func (az *azContext) blobPath(path string) string {
return filepath.Join(az.prefix, path)
}
func (az *azContext) blobURL(path string) azblob.BlobURL {
return az.container.NewBlobURL(az.blobPath(path))
}
func (az *azContext) internalFilelist(prefix string, progress aptly.Progress) (paths []string, md5s []string, err error) {
const delimiter = "/"
paths = make([]string, 0, 1024)
md5s = make([]string, 0, 1024)
prefix = filepath.Join(az.prefix, prefix)
if prefix != "" {
prefix += delimiter
}
for marker := (azblob.Marker{}); marker.NotDone(); {
listBlob, err := az.container.ListBlobsFlatSegment(
context.Background(), marker, azblob.ListBlobsSegmentOptions{
Prefix: prefix,
MaxResults: 1,
Details: azblob.BlobListingDetails{Metadata: true}})
if err != nil {
return nil, nil, fmt.Errorf("error listing under prefix %s in %s: %s", prefix, az, err)
}
marker = listBlob.NextMarker
for _, blob := range listBlob.Segment.BlobItems {
if prefix == "" {
paths = append(paths, blob.Name)
} else {
paths = append(paths, blob.Name[len(prefix):])
}
md5s = append(md5s, fmt.Sprintf("%x", blob.Properties.ContentMD5))
}
if progress != nil {
time.Sleep(time.Duration(500) * time.Millisecond)
progress.AddBar(1)
}
}
return paths, md5s, nil
}
func (az *azContext) putFile(blob azblob.BlobURL, source io.Reader, sourceMD5 string) error {
uploadOptions := azblob.UploadStreamToBlockBlobOptions{
BufferSize: 4 * 1024 * 1024,
MaxBuffers: 8,
}
if len(sourceMD5) > 0 {
decodedMD5, err := hex.DecodeString(sourceMD5)
if err != nil {
return err
}
uploadOptions.BlobHTTPHeaders = azblob.BlobHTTPHeaders{
ContentMD5: decodedMD5,
}
}
_, err := azblob.UploadStreamToBlockBlob(
context.Background(),
source,
blob.ToBlockBlobURL(),
uploadOptions,
)
return err
}
// String
func (az *azContext) String() string {
return fmt.Sprintf("Azure: %s/%s", az.container, az.prefix)
}

219
azure/package_pool.go Normal file
View File

@@ -0,0 +1,219 @@
package azure
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/aptly-dev/aptly/aptly"
"github.com/aptly-dev/aptly/utils"
"github.com/pkg/errors"
)
type PackagePool struct {
az *azContext
}
// Check interface
var (
_ aptly.PackagePool = (*PackagePool)(nil)
)
// NewPackagePool creates published storage from Azure storage credentials
func NewPackagePool(accountName, accountKey, container, prefix, endpoint string) (*PackagePool, error) {
azctx, err := newAzContext(accountName, accountKey, container, prefix, endpoint)
if err != nil {
return nil, err
}
return &PackagePool{az: azctx}, nil
}
// String
func (pool *PackagePool) String() string {
return pool.az.String()
}
func (pool *PackagePool) buildPoolPath(filename string, checksums *utils.ChecksumInfo) string {
hash := checksums.SHA256
// Use the same path as the file pool, for compat reasons.
return filepath.Join(hash[0:2], hash[2:4], hash[4:32]+"_"+filename)
}
func (pool *PackagePool) ensureChecksums(
poolPath string,
checksumStorage aptly.ChecksumStorage,
) (*utils.ChecksumInfo, error) {
targetChecksums, err := checksumStorage.Get(poolPath)
if err != nil {
return nil, err
}
if targetChecksums == nil {
// we don't have checksums stored yet for this file
blob := pool.az.blobURL(poolPath)
download, err := blob.Download(context.Background(), 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
if err != nil {
if isBlobNotFound(err) {
return nil, nil
}
return nil, errors.Wrapf(err, "error downloading blob at %s", poolPath)
}
targetChecksums = &utils.ChecksumInfo{}
*targetChecksums, err = utils.ChecksumsForReader(download.Body(azblob.RetryReaderOptions{}))
if err != nil {
return nil, errors.Wrapf(err, "error checksumming blob at %s", poolPath)
}
err = checksumStorage.Update(poolPath, targetChecksums)
if err != nil {
return nil, err
}
}
return targetChecksums, nil
}
func (pool *PackagePool) FilepathList(progress aptly.Progress) ([]string, error) {
if progress != nil {
progress.InitBar(0, false, aptly.BarGeneralBuildFileList)
defer progress.ShutdownBar()
}
paths, _, err := pool.az.internalFilelist("", progress)
return paths, err
}
func (pool *PackagePool) LegacyPath(filename string, checksums *utils.ChecksumInfo) (string, error) {
return "", errors.New("Azure package pool does not support legacy paths")
}
func (pool *PackagePool) Size(path string) (int64, error) {
blob := pool.az.blobURL(path)
props, err := blob.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
if err != nil {
return 0, errors.Wrapf(err, "error examining %s from %s", path, pool)
}
return props.ContentLength(), nil
}
func (pool *PackagePool) Open(path string) (aptly.ReadSeekerCloser, error) {
blob := pool.az.blobURL(path)
temp, err := ioutil.TempFile("", "blob-download")
if err != nil {
return nil, errors.Wrap(err, "error creating temporary file for blob download")
}
defer os.Remove(temp.Name())
err = azblob.DownloadBlobToFile(context.Background(), blob, 0, 0, temp, azblob.DownloadFromBlobOptions{})
if err != nil {
return nil, errors.Wrapf(err, "error downloading blob at %s", path)
}
return temp, nil
}
func (pool *PackagePool) Remove(path string) (int64, error) {
blob := pool.az.blobURL(path)
props, err := blob.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
if err != nil {
return 0, errors.Wrapf(err, "error getting props of %s from %s", path, pool)
}
_, err = blob.Delete(context.Background(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
if err != nil {
return 0, errors.Wrapf(err, "error deleting %s from %s", path, pool)
}
return props.ContentLength(), nil
}
func (pool *PackagePool) Import(srcPath, basename string, checksums *utils.ChecksumInfo, move bool, checksumStorage aptly.ChecksumStorage) (string, error) {
if checksums.MD5 == "" || checksums.SHA256 == "" || checksums.SHA512 == "" {
// need to update checksums, MD5 and SHA256 should be always defined
var err error
*checksums, err = utils.ChecksumsForFile(srcPath)
if err != nil {
return "", err
}
}
path := pool.buildPoolPath(basename, checksums)
blob := pool.az.blobURL(path)
targetChecksums, err := pool.ensureChecksums(path, checksumStorage)
if err != nil {
return "", err
} else if targetChecksums != nil {
// target already exists
*checksums = *targetChecksums
return path, nil
}
source, err := os.Open(srcPath)
if err != nil {
return "", err
}
defer source.Close()
err = pool.az.putFile(blob, source, checksums.MD5)
if err != nil {
return "", err
}
if !checksums.Complete() {
// need full checksums here
*checksums, err = utils.ChecksumsForFile(srcPath)
if err != nil {
return "", err
}
}
err = checksumStorage.Update(path, checksums)
if err != nil {
return "", err
}
return path, nil
}
func (pool *PackagePool) Verify(poolPath, basename string, checksums *utils.ChecksumInfo, checksumStorage aptly.ChecksumStorage) (string, bool, error) {
if poolPath == "" {
if checksums.SHA256 != "" {
poolPath = pool.buildPoolPath(basename, checksums)
} else {
// No checksums or pool path, so no idea what file to look for.
return "", false, nil
}
}
size, err := pool.Size(poolPath)
if err != nil {
return "", false, err
} else if size != checksums.Size {
return "", false, nil
}
targetChecksums, err := pool.ensureChecksums(poolPath, checksumStorage)
if err != nil {
return "", false, err
} else if targetChecksums == nil {
return "", false, nil
}
if checksums.MD5 != "" && targetChecksums.MD5 != checksums.MD5 ||
checksums.SHA256 != "" && targetChecksums.SHA256 != checksums.SHA256 {
// wrong file?
return "", false, nil
}
// fill back checksums
*checksums = *targetChecksums
return poolPath, true, nil
}

255
azure/package_pool_test.go Normal file
View File

@@ -0,0 +1,255 @@
package azure
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/aptly-dev/aptly/aptly"
"github.com/aptly-dev/aptly/files"
"github.com/aptly-dev/aptly/utils"
. "gopkg.in/check.v1"
)
type PackagePoolSuite struct {
accountName, accountKey, endpoint string
pool, prefixedPool *PackagePool
debFile string
cs aptly.ChecksumStorage
}
var _ = Suite(&PackagePoolSuite{})
func (s *PackagePoolSuite) SetUpSuite(c *C) {
s.accountName = os.Getenv("AZURE_STORAGE_ACCOUNT")
if s.accountName == "" {
println("Please set the the following two environment variables to run the Azure storage tests.")
println(" 1. AZURE_STORAGE_ACCOUNT")
println(" 2. AZURE_STORAGE_ACCESS_KEY")
c.Skip("AZURE_STORAGE_ACCOUNT not set.")
}
s.accountKey = os.Getenv("AZURE_STORAGE_ACCESS_KEY")
if s.accountKey == "" {
println("Please set the the following two environment variables to run the Azure storage tests.")
println(" 1. AZURE_STORAGE_ACCOUNT")
println(" 2. AZURE_STORAGE_ACCESS_KEY")
c.Skip("AZURE_STORAGE_ACCESS_KEY not set.")
}
s.endpoint = os.Getenv("AZURE_STORAGE_ENDPOINT")
}
func (s *PackagePoolSuite) SetUpTest(c *C) {
container := randContainer()
prefix := "lala"
var err error
s.pool, err = NewPackagePool(s.accountName, s.accountKey, container, "", s.endpoint)
c.Assert(err, IsNil)
cnt := s.pool.az.container
_, err = cnt.Create(context.Background(), azblob.Metadata{}, azblob.PublicAccessContainer)
c.Assert(err, IsNil)
s.prefixedPool, err = NewPackagePool(s.accountName, s.accountKey, container, prefix, s.endpoint)
c.Assert(err, IsNil)
_, _File, _, _ := runtime.Caller(0)
s.debFile = filepath.Join(filepath.Dir(_File), "../system/files/libboost-program-options-dev_1.49.0.1_i386.deb")
s.cs = files.NewMockChecksumStorage()
}
func (s *PackagePoolSuite) TestFilepathList(c *C) {
list, err := s.pool.FilepathList(nil)
c.Check(err, IsNil)
c.Check(list, DeepEquals, []string{})
s.pool.Import(s.debFile, "a.deb", &utils.ChecksumInfo{}, false, s.cs)
s.pool.Import(s.debFile, "b.deb", &utils.ChecksumInfo{}, false, s.cs)
list, err = s.pool.FilepathList(nil)
c.Check(err, IsNil)
c.Check(list, DeepEquals, []string{
"c7/6b/4bd12fd92e4dfe1b55b18a67a669_a.deb",
"c7/6b/4bd12fd92e4dfe1b55b18a67a669_b.deb",
})
}
func (s *PackagePoolSuite) TestRemove(c *C) {
s.pool.Import(s.debFile, "a.deb", &utils.ChecksumInfo{}, false, s.cs)
s.pool.Import(s.debFile, "b.deb", &utils.ChecksumInfo{}, false, s.cs)
size, err := s.pool.Remove("c7/6b/4bd12fd92e4dfe1b55b18a67a669_a.deb")
c.Check(err, IsNil)
c.Check(size, Equals, int64(2738))
_, err = s.pool.Remove("c7/6b/4bd12fd92e4dfe1b55b18a67a669_a.deb")
c.Check(err, ErrorMatches, "(.|\n)*BlobNotFound(.|\n)*")
list, err := s.pool.FilepathList(nil)
c.Check(err, IsNil)
c.Check(list, DeepEquals, []string{"c7/6b/4bd12fd92e4dfe1b55b18a67a669_b.deb"})
}
func (s *PackagePoolSuite) TestImportOk(c *C) {
var checksum utils.ChecksumInfo
path, err := s.pool.Import(s.debFile, filepath.Base(s.debFile), &checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb")
// SHA256 should be automatically calculated
c.Check(checksum.SHA256, Equals, "c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12")
// checksum storage is filled with new checksum
c.Check(s.cs.(*files.MockChecksumStorage).Store[path].SHA256, Equals, "c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12")
size, err := s.pool.Size(path)
c.Assert(err, IsNil)
c.Check(size, Equals, int64(2738))
// import as different name
checksum = utils.ChecksumInfo{}
path, err = s.pool.Import(s.debFile, "some.deb", &checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_some.deb")
// checksum storage is filled with new checksum
c.Check(s.cs.(*files.MockChecksumStorage).Store[path].SHA256, Equals, "c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12")
// double import, should be ok
checksum = utils.ChecksumInfo{}
path, err = s.pool.Import(s.debFile, filepath.Base(s.debFile), &checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb")
// checksum is filled back based on checksum storage
c.Check(checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// clear checksum storage, and do double-import
delete(s.cs.(*files.MockChecksumStorage).Store, path)
checksum = utils.ChecksumInfo{}
path, err = s.pool.Import(s.debFile, filepath.Base(s.debFile), &checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb")
// checksum is filled back based on re-calculation of file in the pool
c.Check(checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// import under new name, but with path-relevant checksums already filled in
checksum = utils.ChecksumInfo{SHA256: checksum.SHA256}
path, err = s.pool.Import(s.debFile, "other.deb", &checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_other.deb")
// checksum is filled back based on re-calculation of source file
c.Check(checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
}
func (s *PackagePoolSuite) TestVerify(c *C) {
// file doesn't exist yet
ppath, exists, err := s.pool.Verify("", filepath.Base(s.debFile), &utils.ChecksumInfo{}, s.cs)
c.Check(ppath, Equals, "")
c.Check(err, IsNil)
c.Check(exists, Equals, false)
// import file
checksum := utils.ChecksumInfo{}
path, err := s.pool.Import(s.debFile, filepath.Base(s.debFile), &checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb")
// check existence
ppath, exists, err = s.pool.Verify("", filepath.Base(s.debFile), &checksum, s.cs)
c.Check(ppath, Equals, ppath)
c.Check(err, IsNil)
c.Check(exists, Equals, true)
c.Check(checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// check existence with fixed path
checksum = utils.ChecksumInfo{Size: checksum.Size}
ppath, exists, err = s.pool.Verify(path, filepath.Base(s.debFile), &checksum, s.cs)
c.Check(ppath, Equals, path)
c.Check(err, IsNil)
c.Check(exists, Equals, true)
c.Check(checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// check existence, but with checksums missing (that aren't needed to find the path)
checksum.SHA512 = ""
ppath, exists, err = s.pool.Verify("", filepath.Base(s.debFile), &checksum, s.cs)
c.Check(ppath, Equals, path)
c.Check(err, IsNil)
c.Check(exists, Equals, true)
// checksum is filled back based on checksum storage
c.Check(checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// check existence, with missing checksum info but correct path and size available
checksum = utils.ChecksumInfo{Size: checksum.Size}
ppath, exists, err = s.pool.Verify(path, filepath.Base(s.debFile), &checksum, s.cs)
c.Check(ppath, Equals, path)
c.Check(err, IsNil)
c.Check(exists, Equals, true)
// checksum is filled back based on checksum storage
c.Check(checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// check existence, with wrong checksum info but correct path and size available
ppath, exists, err = s.pool.Verify(path, filepath.Base(s.debFile), &utils.ChecksumInfo{
SHA256: "abc",
Size: checksum.Size,
}, s.cs)
c.Check(ppath, Equals, "")
c.Check(err, IsNil)
c.Check(exists, Equals, false)
// check existence, with missing checksums (that aren't needed to find the path)
// and no info in checksum storage
delete(s.cs.(*files.MockChecksumStorage).Store, path)
checksum.SHA512 = ""
ppath, exists, err = s.pool.Verify("", filepath.Base(s.debFile), &checksum, s.cs)
c.Check(ppath, Equals, path)
c.Check(err, IsNil)
c.Check(exists, Equals, true)
// checksum is filled back based on re-calculation
c.Check(checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// check existence, with wrong size
checksum = utils.ChecksumInfo{Size: 13455}
ppath, exists, err = s.pool.Verify(path, filepath.Base(s.debFile), &checksum, s.cs)
c.Check(ppath, Equals, "")
c.Check(err, IsNil)
c.Check(exists, Equals, false)
// check existence, with empty checksum info
ppath, exists, err = s.pool.Verify("", filepath.Base(s.debFile), &utils.ChecksumInfo{}, s.cs)
c.Check(ppath, Equals, "")
c.Check(err, IsNil)
c.Check(exists, Equals, false)
}
func (s *PackagePoolSuite) TestImportNotExist(c *C) {
_, err := s.pool.Import("no-such-file", "a.deb", &utils.ChecksumInfo{}, false, s.cs)
c.Check(err, ErrorMatches, ".*no such file or directory")
}
func (s *PackagePoolSuite) TestSize(c *C) {
path, err := s.pool.Import(s.debFile, filepath.Base(s.debFile), &utils.ChecksumInfo{}, false, s.cs)
c.Check(err, IsNil)
size, err := s.pool.Size(path)
c.Assert(err, IsNil)
c.Check(size, Equals, int64(2738))
_, err = s.pool.Size("do/es/ntexist")
c.Check(err, ErrorMatches, "(.|\n)*BlobNotFound(.|\n)*")
}
func (s *PackagePoolSuite) TestOpen(c *C) {
path, err := s.pool.Import(s.debFile, filepath.Base(s.debFile), &utils.ChecksumInfo{}, false, s.cs)
c.Check(err, IsNil)
f, err := s.pool.Open(path)
c.Assert(err, IsNil)
contents, err := ioutil.ReadAll(f)
c.Assert(err, IsNil)
c.Check(len(contents), Equals, 2738)
c.Check(f.Close(), IsNil)
_, err = s.pool.Open("do/es/ntexist")
c.Check(err, ErrorMatches, "(.|\n)*BlobNotFound(.|\n)*")
}

View File

@@ -2,11 +2,8 @@ package azure
import (
"context"
"encoding/hex"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path/filepath"
"time"
@@ -21,6 +18,7 @@ import (
type PublishedStorage struct {
container azblob.ContainerURL
prefix string
az *azContext
pathCache map[string]map[string]string
}
@@ -31,33 +29,17 @@ var (
// NewPublishedStorage creates published storage from Azure storage credentials
func NewPublishedStorage(accountName, accountKey, container, prefix, endpoint string) (*PublishedStorage, error) {
credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
azctx, err := newAzContext(accountName, accountKey, container, prefix, endpoint)
if err != nil {
return nil, err
}
if endpoint == "" {
endpoint = fmt.Sprintf("https://%s.blob.core.windows.net", accountName)
}
url, err := url.Parse(fmt.Sprintf("%s/%s", endpoint, container))
if err != nil {
return nil, err
}
containerURL := azblob.NewContainerURL(*url, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
result := &PublishedStorage{
container: containerURL,
prefix: prefix,
}
return result, nil
return &PublishedStorage{az: azctx}, nil
}
// String
func (storage *PublishedStorage) String() string {
return fmt.Sprintf("Azure: %s/%s", storage.container, storage.prefix)
return storage.az.String()
}
// MkDir creates directory recursively under public path
@@ -84,7 +66,7 @@ func (storage *PublishedStorage) PutFile(path string, sourceFilename string) err
}
defer source.Close()
err = storage.putFile(path, source, sourceMD5)
err = storage.az.putFile(storage.az.blobURL(path), source, sourceMD5)
if err != nil {
err = errors.Wrap(err, fmt.Sprintf("error uploading %s to %s", sourceFilename, storage))
}
@@ -92,36 +74,6 @@ func (storage *PublishedStorage) PutFile(path string, sourceFilename string) err
return err
}
// putFile uploads file-like object to
func (storage *PublishedStorage) putFile(path string, source io.Reader, sourceMD5 string) error {
path = filepath.Join(storage.prefix, path)
blob := storage.container.NewBlockBlobURL(path)
uploadOptions := azblob.UploadStreamToBlockBlobOptions{
BufferSize: 4 * 1024 * 1024,
MaxBuffers: 8,
}
if len(sourceMD5) > 0 {
decodedMD5, err := hex.DecodeString(sourceMD5)
if err != nil {
return err
}
uploadOptions.BlobHTTPHeaders = azblob.BlobHTTPHeaders{
ContentMD5: decodedMD5,
}
}
_, err := azblob.UploadStreamToBlockBlob(
context.Background(),
source,
blob,
uploadOptions,
)
return err
}
// RemoveDirs removes directory structure under public path
func (storage *PublishedStorage) RemoveDirs(path string, _ aptly.Progress) error {
filelist, err := storage.Filelist(path)
@@ -130,7 +82,7 @@ func (storage *PublishedStorage) RemoveDirs(path string, _ aptly.Progress) error
}
for _, filename := range filelist {
blob := storage.container.NewBlobURL(filepath.Join(storage.prefix, path, filename))
blob := storage.az.blobURL(filepath.Join(path, filename))
_, err := blob.Delete(context.Background(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
if err != nil {
return fmt.Errorf("error deleting path %s from %s: %s", filename, storage, err)
@@ -142,7 +94,7 @@ func (storage *PublishedStorage) RemoveDirs(path string, _ aptly.Progress) error
// Remove removes single file under public path
func (storage *PublishedStorage) Remove(path string) error {
blob := storage.container.NewBlobURL(filepath.Join(storage.prefix, path))
blob := storage.az.blobURL(path)
_, err := blob.Delete(context.Background(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
if err != nil {
err = errors.Wrap(err, fmt.Sprintf("error deleting %s from %s: %s", path, storage, err))
@@ -163,14 +115,14 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
relFilePath := filepath.Join(publishedRelPath, fileName)
prefixRelFilePath := filepath.Join(publishedPrefix, relFilePath)
poolPath := filepath.Join(storage.prefix, prefixRelFilePath)
poolPath := storage.az.blobPath(fileName)
if storage.pathCache == nil {
storage.pathCache = make(map[string]map[string]string)
}
pathCache := storage.pathCache[publishedPrefix]
if pathCache == nil {
paths, md5s, err := storage.internalFilelist(publishedPrefix)
paths, md5s, err := storage.az.internalFilelist(publishedPrefix, nil)
if err != nil {
return fmt.Errorf("error caching paths under prefix: %s", err)
}
@@ -206,7 +158,7 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
}
defer source.Close()
err = storage.putFile(prefixRelFilePath, source, sourceMD5)
err = storage.az.putFile(storage.az.blobURL(relPath), source, sourceMD5)
if err == nil {
pathCache[relFilePath] = sourceMD5
} else {
@@ -216,43 +168,9 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
return err
}
func (storage *PublishedStorage) internalFilelist(prefix string) (paths []string, md5s []string, err error) {
const delimiter = "/"
paths = make([]string, 0, 1024)
md5s = make([]string, 0, 1024)
prefix = filepath.Join(storage.prefix, prefix)
if prefix != "" {
prefix += delimiter
}
for marker := (azblob.Marker{}); marker.NotDone(); {
listBlob, err := storage.container.ListBlobsFlatSegment(
context.Background(), marker, azblob.ListBlobsSegmentOptions{
Prefix: prefix,
MaxResults: 1000,
Details: azblob.BlobListingDetails{Metadata: true}})
if err != nil {
return nil, nil, fmt.Errorf("error listing under prefix %s in %s: %s", prefix, storage, err)
}
marker = listBlob.NextMarker
for _, blob := range listBlob.Segment.BlobItems {
if prefix == "" {
paths = append(paths, blob.Name)
} else {
paths = append(paths, blob.Name[len(prefix):])
}
md5s = append(md5s, fmt.Sprintf("%x", blob.Properties.ContentMD5))
}
}
return paths, md5s, nil
}
// Filelist returns list of files under prefix
func (storage *PublishedStorage) Filelist(prefix string) ([]string, error) {
paths, _, err := storage.internalFilelist(prefix)
paths, _, err := storage.az.internalFilelist(prefix, nil)
return paths, err
}
@@ -260,8 +178,8 @@ func (storage *PublishedStorage) Filelist(prefix string) ([]string, error) {
func (storage *PublishedStorage) internalCopyOrMoveBlob(src, dst string, metadata azblob.Metadata, move bool) error {
const leaseDuration = 30
dstBlobURL := storage.container.NewBlobURL(filepath.Join(storage.prefix, dst))
srcBlobURL := storage.container.NewBlobURL(filepath.Join(storage.prefix, src))
dstBlobURL := storage.az.blobURL(dst)
srcBlobURL := storage.az.blobURL(src)
leaseResp, err := srcBlobURL.AcquireLease(context.Background(), "", leaseDuration, azblob.ModifiedAccessConditions{})
if err != nil || leaseResp.StatusCode() != http.StatusCreated {
return fmt.Errorf("error acquiring lease on source blob %s", srcBlobURL)
@@ -332,11 +250,10 @@ func (storage *PublishedStorage) HardLink(src string, dst string) error {
// FileExists returns true if path exists
func (storage *PublishedStorage) FileExists(path string) (bool, error) {
blob := storage.container.NewBlobURL(filepath.Join(storage.prefix, path))
blob := storage.az.blobURL(path)
resp, err := blob.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
if err != nil {
storageError, ok := err.(azblob.StorageError)
if ok && string(storageError.ServiceCode()) == string(azblob.StorageErrorCodeBlobNotFound) {
if isBlobNotFound(err) {
return false, nil
}
return false, err
@@ -349,7 +266,7 @@ func (storage *PublishedStorage) FileExists(path string) (bool, error) {
// ReadLink returns the symbolic link pointed to by path.
// This simply reads text file created with SymLink
func (storage *PublishedStorage) ReadLink(path string) (string, error) {
blob := storage.container.NewBlobURL(filepath.Join(storage.prefix, path))
blob := storage.az.blobURL(path)
resp, err := blob.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
if err != nil {
return "", err

View File

@@ -66,7 +66,7 @@ func (s *PublishedStorageSuite) SetUpTest(c *C) {
s.storage, err = NewPublishedStorage(s.accountName, s.accountKey, container, "", s.endpoint)
c.Assert(err, IsNil)
cnt := s.storage.container
cnt := s.storage.az.container
_, err = cnt.Create(context.Background(), azblob.Metadata{}, azblob.PublicAccessContainer)
c.Assert(err, IsNil)
@@ -75,13 +75,13 @@ func (s *PublishedStorageSuite) SetUpTest(c *C) {
}
func (s *PublishedStorageSuite) TearDownTest(c *C) {
cnt := s.storage.container
cnt := s.storage.az.container
_, err := cnt.Delete(context.Background(), azblob.ContainerAccessConditions{})
c.Assert(err, IsNil)
}
func (s *PublishedStorageSuite) GetFile(c *C, path string) []byte {
blob := s.storage.container.NewBlobURL(path)
blob := s.storage.az.container.NewBlobURL(path)
resp, err := blob.Download(context.Background(), 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
c.Assert(err, IsNil)
body := resp.Body(azblob.RetryReaderOptions{MaxRetryRequests: 3})
@@ -91,7 +91,7 @@ func (s *PublishedStorageSuite) GetFile(c *C, path string) []byte {
}
func (s *PublishedStorageSuite) AssertNoFile(c *C, path string) {
_, err := s.storage.container.NewBlobURL(path).GetProperties(
_, err := s.storage.az.container.NewBlobURL(path).GetProperties(
context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
c.Assert(err, NotNil)
storageError, ok := err.(azblob.StorageError)
@@ -104,7 +104,7 @@ func (s *PublishedStorageSuite) PutFile(c *C, path string, data []byte) {
_, err := azblob.UploadBufferToBlockBlob(
context.Background(),
data,
s.storage.container.NewBlockBlobURL(path),
s.storage.az.container.NewBlockBlobURL(path),
azblob.UploadToBlockBlobOptions{
BlobHTTPHeaders: azblob.BlobHTTPHeaders{
ContentMD5: hash[:],
@@ -129,7 +129,7 @@ func (s *PublishedStorageSuite) TestPutFile(c *C) {
err = s.prefixedStorage.PutFile(filename, filepath.Join(dir, "a"))
c.Check(err, IsNil)
c.Check(s.GetFile(c, filepath.Join(s.prefixedStorage.prefix, filename)), DeepEquals, content)
c.Check(s.GetFile(c, filepath.Join(s.prefixedStorage.az.prefix, filename)), DeepEquals, content)
}
func (s *PublishedStorageSuite) TestPutFilePlus(c *C) {