Add support for Azure package pools

This adds support for storing packages directly on Azure, with no truly
"local" (on-disk) repo used. The existing Azure PublishedStorage
implementation was refactored to move the shared code to a separate
context struct, which can then be re-used by the new PackagePool. In
addition, the files package's mockChecksumStorage was made public so
that it could be used in the Azure PackagePool tests as well.

Signed-off-by: Ryan Gonzalez <ryan.gonzalez@collabora.com>
This commit is contained in:
Ryan Gonzalez
2022-05-17 08:52:59 -05:00
committed by André Roth
parent 810df17009
commit f9325fbc91
16 changed files with 820 additions and 148 deletions

View File

@@ -1,2 +1,128 @@
// Package azure handles publishing to Azure Storage // Package azure handles publishing to Azure Storage
package azure package azure
import (
"context"
"encoding/hex"
"fmt"
"io"
"net/url"
"path/filepath"
"time"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/aptly-dev/aptly/aptly"
)
func isBlobNotFound(err error) bool {
storageError, ok := err.(azblob.StorageError)
return ok && storageError.ServiceCode() == azblob.ServiceCodeBlobNotFound
}
type azContext struct {
container azblob.ContainerURL
prefix string
}
func newAzContext(accountName, accountKey, container, prefix, endpoint string) (*azContext, error) {
credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
if err != nil {
return nil, err
}
if endpoint == "" {
endpoint = fmt.Sprintf("https://%s.blob.core.windows.net", accountName)
}
url, err := url.Parse(fmt.Sprintf("%s/%s", endpoint, container))
if err != nil {
return nil, err
}
containerURL := azblob.NewContainerURL(*url, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
result := &azContext{
container: containerURL,
prefix: prefix,
}
return result, nil
}
func (az *azContext) blobPath(path string) string {
return filepath.Join(az.prefix, path)
}
func (az *azContext) blobURL(path string) azblob.BlobURL {
return az.container.NewBlobURL(az.blobPath(path))
}
func (az *azContext) internalFilelist(prefix string, progress aptly.Progress) (paths []string, md5s []string, err error) {
const delimiter = "/"
paths = make([]string, 0, 1024)
md5s = make([]string, 0, 1024)
prefix = filepath.Join(az.prefix, prefix)
if prefix != "" {
prefix += delimiter
}
for marker := (azblob.Marker{}); marker.NotDone(); {
listBlob, err := az.container.ListBlobsFlatSegment(
context.Background(), marker, azblob.ListBlobsSegmentOptions{
Prefix: prefix,
MaxResults: 1,
Details: azblob.BlobListingDetails{Metadata: true}})
if err != nil {
return nil, nil, fmt.Errorf("error listing under prefix %s in %s: %s", prefix, az, err)
}
marker = listBlob.NextMarker
for _, blob := range listBlob.Segment.BlobItems {
if prefix == "" {
paths = append(paths, blob.Name)
} else {
paths = append(paths, blob.Name[len(prefix):])
}
md5s = append(md5s, fmt.Sprintf("%x", blob.Properties.ContentMD5))
}
if progress != nil {
time.Sleep(time.Duration(500) * time.Millisecond)
progress.AddBar(1)
}
}
return paths, md5s, nil
}
func (az *azContext) putFile(blob azblob.BlobURL, source io.Reader, sourceMD5 string) error {
uploadOptions := azblob.UploadStreamToBlockBlobOptions{
BufferSize: 4 * 1024 * 1024,
MaxBuffers: 8,
}
if len(sourceMD5) > 0 {
decodedMD5, err := hex.DecodeString(sourceMD5)
if err != nil {
return err
}
uploadOptions.BlobHTTPHeaders = azblob.BlobHTTPHeaders{
ContentMD5: decodedMD5,
}
}
_, err := azblob.UploadStreamToBlockBlob(
context.Background(),
source,
blob.ToBlockBlobURL(),
uploadOptions,
)
return err
}
// String
func (az *azContext) String() string {
return fmt.Sprintf("Azure: %s/%s", az.container, az.prefix)
}

219
azure/package_pool.go Normal file
View File

@@ -0,0 +1,219 @@
package azure
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/aptly-dev/aptly/aptly"
"github.com/aptly-dev/aptly/utils"
"github.com/pkg/errors"
)
type PackagePool struct {
az *azContext
}
// Check interface
var (
_ aptly.PackagePool = (*PackagePool)(nil)
)
// NewPackagePool creates published storage from Azure storage credentials
func NewPackagePool(accountName, accountKey, container, prefix, endpoint string) (*PackagePool, error) {
azctx, err := newAzContext(accountName, accountKey, container, prefix, endpoint)
if err != nil {
return nil, err
}
return &PackagePool{az: azctx}, nil
}
// String
func (pool *PackagePool) String() string {
return pool.az.String()
}
func (pool *PackagePool) buildPoolPath(filename string, checksums *utils.ChecksumInfo) string {
hash := checksums.SHA256
// Use the same path as the file pool, for compat reasons.
return filepath.Join(hash[0:2], hash[2:4], hash[4:32]+"_"+filename)
}
func (pool *PackagePool) ensureChecksums(
poolPath string,
checksumStorage aptly.ChecksumStorage,
) (*utils.ChecksumInfo, error) {
targetChecksums, err := checksumStorage.Get(poolPath)
if err != nil {
return nil, err
}
if targetChecksums == nil {
// we don't have checksums stored yet for this file
blob := pool.az.blobURL(poolPath)
download, err := blob.Download(context.Background(), 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
if err != nil {
if isBlobNotFound(err) {
return nil, nil
}
return nil, errors.Wrapf(err, "error downloading blob at %s", poolPath)
}
targetChecksums = &utils.ChecksumInfo{}
*targetChecksums, err = utils.ChecksumsForReader(download.Body(azblob.RetryReaderOptions{}))
if err != nil {
return nil, errors.Wrapf(err, "error checksumming blob at %s", poolPath)
}
err = checksumStorage.Update(poolPath, targetChecksums)
if err != nil {
return nil, err
}
}
return targetChecksums, nil
}
func (pool *PackagePool) FilepathList(progress aptly.Progress) ([]string, error) {
if progress != nil {
progress.InitBar(0, false, aptly.BarGeneralBuildFileList)
defer progress.ShutdownBar()
}
paths, _, err := pool.az.internalFilelist("", progress)
return paths, err
}
func (pool *PackagePool) LegacyPath(filename string, checksums *utils.ChecksumInfo) (string, error) {
return "", errors.New("Azure package pool does not support legacy paths")
}
func (pool *PackagePool) Size(path string) (int64, error) {
blob := pool.az.blobURL(path)
props, err := blob.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
if err != nil {
return 0, errors.Wrapf(err, "error examining %s from %s", path, pool)
}
return props.ContentLength(), nil
}
func (pool *PackagePool) Open(path string) (aptly.ReadSeekerCloser, error) {
blob := pool.az.blobURL(path)
temp, err := ioutil.TempFile("", "blob-download")
if err != nil {
return nil, errors.Wrap(err, "error creating temporary file for blob download")
}
defer os.Remove(temp.Name())
err = azblob.DownloadBlobToFile(context.Background(), blob, 0, 0, temp, azblob.DownloadFromBlobOptions{})
if err != nil {
return nil, errors.Wrapf(err, "error downloading blob at %s", path)
}
return temp, nil
}
func (pool *PackagePool) Remove(path string) (int64, error) {
blob := pool.az.blobURL(path)
props, err := blob.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
if err != nil {
return 0, errors.Wrapf(err, "error getting props of %s from %s", path, pool)
}
_, err = blob.Delete(context.Background(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
if err != nil {
return 0, errors.Wrapf(err, "error deleting %s from %s", path, pool)
}
return props.ContentLength(), nil
}
func (pool *PackagePool) Import(srcPath, basename string, checksums *utils.ChecksumInfo, move bool, checksumStorage aptly.ChecksumStorage) (string, error) {
if checksums.MD5 == "" || checksums.SHA256 == "" || checksums.SHA512 == "" {
// need to update checksums, MD5 and SHA256 should be always defined
var err error
*checksums, err = utils.ChecksumsForFile(srcPath)
if err != nil {
return "", err
}
}
path := pool.buildPoolPath(basename, checksums)
blob := pool.az.blobURL(path)
targetChecksums, err := pool.ensureChecksums(path, checksumStorage)
if err != nil {
return "", err
} else if targetChecksums != nil {
// target already exists
*checksums = *targetChecksums
return path, nil
}
source, err := os.Open(srcPath)
if err != nil {
return "", err
}
defer source.Close()
err = pool.az.putFile(blob, source, checksums.MD5)
if err != nil {
return "", err
}
if !checksums.Complete() {
// need full checksums here
*checksums, err = utils.ChecksumsForFile(srcPath)
if err != nil {
return "", err
}
}
err = checksumStorage.Update(path, checksums)
if err != nil {
return "", err
}
return path, nil
}
func (pool *PackagePool) Verify(poolPath, basename string, checksums *utils.ChecksumInfo, checksumStorage aptly.ChecksumStorage) (string, bool, error) {
if poolPath == "" {
if checksums.SHA256 != "" {
poolPath = pool.buildPoolPath(basename, checksums)
} else {
// No checksums or pool path, so no idea what file to look for.
return "", false, nil
}
}
size, err := pool.Size(poolPath)
if err != nil {
return "", false, err
} else if size != checksums.Size {
return "", false, nil
}
targetChecksums, err := pool.ensureChecksums(poolPath, checksumStorage)
if err != nil {
return "", false, err
} else if targetChecksums == nil {
return "", false, nil
}
if checksums.MD5 != "" && targetChecksums.MD5 != checksums.MD5 ||
checksums.SHA256 != "" && targetChecksums.SHA256 != checksums.SHA256 {
// wrong file?
return "", false, nil
}
// fill back checksums
*checksums = *targetChecksums
return poolPath, true, nil
}

255
azure/package_pool_test.go Normal file
View File

@@ -0,0 +1,255 @@
package azure
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/aptly-dev/aptly/aptly"
"github.com/aptly-dev/aptly/files"
"github.com/aptly-dev/aptly/utils"
. "gopkg.in/check.v1"
)
type PackagePoolSuite struct {
accountName, accountKey, endpoint string
pool, prefixedPool *PackagePool
debFile string
cs aptly.ChecksumStorage
}
var _ = Suite(&PackagePoolSuite{})
func (s *PackagePoolSuite) SetUpSuite(c *C) {
s.accountName = os.Getenv("AZURE_STORAGE_ACCOUNT")
if s.accountName == "" {
println("Please set the the following two environment variables to run the Azure storage tests.")
println(" 1. AZURE_STORAGE_ACCOUNT")
println(" 2. AZURE_STORAGE_ACCESS_KEY")
c.Skip("AZURE_STORAGE_ACCOUNT not set.")
}
s.accountKey = os.Getenv("AZURE_STORAGE_ACCESS_KEY")
if s.accountKey == "" {
println("Please set the the following two environment variables to run the Azure storage tests.")
println(" 1. AZURE_STORAGE_ACCOUNT")
println(" 2. AZURE_STORAGE_ACCESS_KEY")
c.Skip("AZURE_STORAGE_ACCESS_KEY not set.")
}
s.endpoint = os.Getenv("AZURE_STORAGE_ENDPOINT")
}
func (s *PackagePoolSuite) SetUpTest(c *C) {
container := randContainer()
prefix := "lala"
var err error
s.pool, err = NewPackagePool(s.accountName, s.accountKey, container, "", s.endpoint)
c.Assert(err, IsNil)
cnt := s.pool.az.container
_, err = cnt.Create(context.Background(), azblob.Metadata{}, azblob.PublicAccessContainer)
c.Assert(err, IsNil)
s.prefixedPool, err = NewPackagePool(s.accountName, s.accountKey, container, prefix, s.endpoint)
c.Assert(err, IsNil)
_, _File, _, _ := runtime.Caller(0)
s.debFile = filepath.Join(filepath.Dir(_File), "../system/files/libboost-program-options-dev_1.49.0.1_i386.deb")
s.cs = files.NewMockChecksumStorage()
}
func (s *PackagePoolSuite) TestFilepathList(c *C) {
list, err := s.pool.FilepathList(nil)
c.Check(err, IsNil)
c.Check(list, DeepEquals, []string{})
s.pool.Import(s.debFile, "a.deb", &utils.ChecksumInfo{}, false, s.cs)
s.pool.Import(s.debFile, "b.deb", &utils.ChecksumInfo{}, false, s.cs)
list, err = s.pool.FilepathList(nil)
c.Check(err, IsNil)
c.Check(list, DeepEquals, []string{
"c7/6b/4bd12fd92e4dfe1b55b18a67a669_a.deb",
"c7/6b/4bd12fd92e4dfe1b55b18a67a669_b.deb",
})
}
func (s *PackagePoolSuite) TestRemove(c *C) {
s.pool.Import(s.debFile, "a.deb", &utils.ChecksumInfo{}, false, s.cs)
s.pool.Import(s.debFile, "b.deb", &utils.ChecksumInfo{}, false, s.cs)
size, err := s.pool.Remove("c7/6b/4bd12fd92e4dfe1b55b18a67a669_a.deb")
c.Check(err, IsNil)
c.Check(size, Equals, int64(2738))
_, err = s.pool.Remove("c7/6b/4bd12fd92e4dfe1b55b18a67a669_a.deb")
c.Check(err, ErrorMatches, "(.|\n)*BlobNotFound(.|\n)*")
list, err := s.pool.FilepathList(nil)
c.Check(err, IsNil)
c.Check(list, DeepEquals, []string{"c7/6b/4bd12fd92e4dfe1b55b18a67a669_b.deb"})
}
func (s *PackagePoolSuite) TestImportOk(c *C) {
var checksum utils.ChecksumInfo
path, err := s.pool.Import(s.debFile, filepath.Base(s.debFile), &checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb")
// SHA256 should be automatically calculated
c.Check(checksum.SHA256, Equals, "c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12")
// checksum storage is filled with new checksum
c.Check(s.cs.(*files.MockChecksumStorage).Store[path].SHA256, Equals, "c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12")
size, err := s.pool.Size(path)
c.Assert(err, IsNil)
c.Check(size, Equals, int64(2738))
// import as different name
checksum = utils.ChecksumInfo{}
path, err = s.pool.Import(s.debFile, "some.deb", &checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_some.deb")
// checksum storage is filled with new checksum
c.Check(s.cs.(*files.MockChecksumStorage).Store[path].SHA256, Equals, "c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12")
// double import, should be ok
checksum = utils.ChecksumInfo{}
path, err = s.pool.Import(s.debFile, filepath.Base(s.debFile), &checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb")
// checksum is filled back based on checksum storage
c.Check(checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// clear checksum storage, and do double-import
delete(s.cs.(*files.MockChecksumStorage).Store, path)
checksum = utils.ChecksumInfo{}
path, err = s.pool.Import(s.debFile, filepath.Base(s.debFile), &checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb")
// checksum is filled back based on re-calculation of file in the pool
c.Check(checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// import under new name, but with path-relevant checksums already filled in
checksum = utils.ChecksumInfo{SHA256: checksum.SHA256}
path, err = s.pool.Import(s.debFile, "other.deb", &checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_other.deb")
// checksum is filled back based on re-calculation of source file
c.Check(checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
}
func (s *PackagePoolSuite) TestVerify(c *C) {
// file doesn't exist yet
ppath, exists, err := s.pool.Verify("", filepath.Base(s.debFile), &utils.ChecksumInfo{}, s.cs)
c.Check(ppath, Equals, "")
c.Check(err, IsNil)
c.Check(exists, Equals, false)
// import file
checksum := utils.ChecksumInfo{}
path, err := s.pool.Import(s.debFile, filepath.Base(s.debFile), &checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb")
// check existence
ppath, exists, err = s.pool.Verify("", filepath.Base(s.debFile), &checksum, s.cs)
c.Check(ppath, Equals, ppath)
c.Check(err, IsNil)
c.Check(exists, Equals, true)
c.Check(checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// check existence with fixed path
checksum = utils.ChecksumInfo{Size: checksum.Size}
ppath, exists, err = s.pool.Verify(path, filepath.Base(s.debFile), &checksum, s.cs)
c.Check(ppath, Equals, path)
c.Check(err, IsNil)
c.Check(exists, Equals, true)
c.Check(checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// check existence, but with checksums missing (that aren't needed to find the path)
checksum.SHA512 = ""
ppath, exists, err = s.pool.Verify("", filepath.Base(s.debFile), &checksum, s.cs)
c.Check(ppath, Equals, path)
c.Check(err, IsNil)
c.Check(exists, Equals, true)
// checksum is filled back based on checksum storage
c.Check(checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// check existence, with missing checksum info but correct path and size available
checksum = utils.ChecksumInfo{Size: checksum.Size}
ppath, exists, err = s.pool.Verify(path, filepath.Base(s.debFile), &checksum, s.cs)
c.Check(ppath, Equals, path)
c.Check(err, IsNil)
c.Check(exists, Equals, true)
// checksum is filled back based on checksum storage
c.Check(checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// check existence, with wrong checksum info but correct path and size available
ppath, exists, err = s.pool.Verify(path, filepath.Base(s.debFile), &utils.ChecksumInfo{
SHA256: "abc",
Size: checksum.Size,
}, s.cs)
c.Check(ppath, Equals, "")
c.Check(err, IsNil)
c.Check(exists, Equals, false)
// check existence, with missing checksums (that aren't needed to find the path)
// and no info in checksum storage
delete(s.cs.(*files.MockChecksumStorage).Store, path)
checksum.SHA512 = ""
ppath, exists, err = s.pool.Verify("", filepath.Base(s.debFile), &checksum, s.cs)
c.Check(ppath, Equals, path)
c.Check(err, IsNil)
c.Check(exists, Equals, true)
// checksum is filled back based on re-calculation
c.Check(checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// check existence, with wrong size
checksum = utils.ChecksumInfo{Size: 13455}
ppath, exists, err = s.pool.Verify(path, filepath.Base(s.debFile), &checksum, s.cs)
c.Check(ppath, Equals, "")
c.Check(err, IsNil)
c.Check(exists, Equals, false)
// check existence, with empty checksum info
ppath, exists, err = s.pool.Verify("", filepath.Base(s.debFile), &utils.ChecksumInfo{}, s.cs)
c.Check(ppath, Equals, "")
c.Check(err, IsNil)
c.Check(exists, Equals, false)
}
func (s *PackagePoolSuite) TestImportNotExist(c *C) {
_, err := s.pool.Import("no-such-file", "a.deb", &utils.ChecksumInfo{}, false, s.cs)
c.Check(err, ErrorMatches, ".*no such file or directory")
}
func (s *PackagePoolSuite) TestSize(c *C) {
path, err := s.pool.Import(s.debFile, filepath.Base(s.debFile), &utils.ChecksumInfo{}, false, s.cs)
c.Check(err, IsNil)
size, err := s.pool.Size(path)
c.Assert(err, IsNil)
c.Check(size, Equals, int64(2738))
_, err = s.pool.Size("do/es/ntexist")
c.Check(err, ErrorMatches, "(.|\n)*BlobNotFound(.|\n)*")
}
func (s *PackagePoolSuite) TestOpen(c *C) {
path, err := s.pool.Import(s.debFile, filepath.Base(s.debFile), &utils.ChecksumInfo{}, false, s.cs)
c.Check(err, IsNil)
f, err := s.pool.Open(path)
c.Assert(err, IsNil)
contents, err := ioutil.ReadAll(f)
c.Assert(err, IsNil)
c.Check(len(contents), Equals, 2738)
c.Check(f.Close(), IsNil)
_, err = s.pool.Open("do/es/ntexist")
c.Check(err, ErrorMatches, "(.|\n)*BlobNotFound(.|\n)*")
}

View File

@@ -2,11 +2,8 @@ package azure
import ( import (
"context" "context"
"encoding/hex"
"fmt" "fmt"
"io"
"net/http" "net/http"
"net/url"
"os" "os"
"path/filepath" "path/filepath"
"time" "time"
@@ -21,6 +18,7 @@ import (
type PublishedStorage struct { type PublishedStorage struct {
container azblob.ContainerURL container azblob.ContainerURL
prefix string prefix string
az *azContext
pathCache map[string]map[string]string pathCache map[string]map[string]string
} }
@@ -31,33 +29,17 @@ var (
// NewPublishedStorage creates published storage from Azure storage credentials // NewPublishedStorage creates published storage from Azure storage credentials
func NewPublishedStorage(accountName, accountKey, container, prefix, endpoint string) (*PublishedStorage, error) { func NewPublishedStorage(accountName, accountKey, container, prefix, endpoint string) (*PublishedStorage, error) {
credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) azctx, err := newAzContext(accountName, accountKey, container, prefix, endpoint)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if endpoint == "" { return &PublishedStorage{az: azctx}, nil
endpoint = fmt.Sprintf("https://%s.blob.core.windows.net", accountName)
}
url, err := url.Parse(fmt.Sprintf("%s/%s", endpoint, container))
if err != nil {
return nil, err
}
containerURL := azblob.NewContainerURL(*url, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
result := &PublishedStorage{
container: containerURL,
prefix: prefix,
}
return result, nil
} }
// String // String
func (storage *PublishedStorage) String() string { func (storage *PublishedStorage) String() string {
return fmt.Sprintf("Azure: %s/%s", storage.container, storage.prefix) return storage.az.String()
} }
// MkDir creates directory recursively under public path // MkDir creates directory recursively under public path
@@ -84,7 +66,7 @@ func (storage *PublishedStorage) PutFile(path string, sourceFilename string) err
} }
defer source.Close() defer source.Close()
err = storage.putFile(path, source, sourceMD5) err = storage.az.putFile(storage.az.blobURL(path), source, sourceMD5)
if err != nil { if err != nil {
err = errors.Wrap(err, fmt.Sprintf("error uploading %s to %s", sourceFilename, storage)) err = errors.Wrap(err, fmt.Sprintf("error uploading %s to %s", sourceFilename, storage))
} }
@@ -92,36 +74,6 @@ func (storage *PublishedStorage) PutFile(path string, sourceFilename string) err
return err return err
} }
// putFile uploads file-like object to
func (storage *PublishedStorage) putFile(path string, source io.Reader, sourceMD5 string) error {
path = filepath.Join(storage.prefix, path)
blob := storage.container.NewBlockBlobURL(path)
uploadOptions := azblob.UploadStreamToBlockBlobOptions{
BufferSize: 4 * 1024 * 1024,
MaxBuffers: 8,
}
if len(sourceMD5) > 0 {
decodedMD5, err := hex.DecodeString(sourceMD5)
if err != nil {
return err
}
uploadOptions.BlobHTTPHeaders = azblob.BlobHTTPHeaders{
ContentMD5: decodedMD5,
}
}
_, err := azblob.UploadStreamToBlockBlob(
context.Background(),
source,
blob,
uploadOptions,
)
return err
}
// RemoveDirs removes directory structure under public path // RemoveDirs removes directory structure under public path
func (storage *PublishedStorage) RemoveDirs(path string, _ aptly.Progress) error { func (storage *PublishedStorage) RemoveDirs(path string, _ aptly.Progress) error {
filelist, err := storage.Filelist(path) filelist, err := storage.Filelist(path)
@@ -130,7 +82,7 @@ func (storage *PublishedStorage) RemoveDirs(path string, _ aptly.Progress) error
} }
for _, filename := range filelist { for _, filename := range filelist {
blob := storage.container.NewBlobURL(filepath.Join(storage.prefix, path, filename)) blob := storage.az.blobURL(filepath.Join(path, filename))
_, err := blob.Delete(context.Background(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) _, err := blob.Delete(context.Background(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
if err != nil { if err != nil {
return fmt.Errorf("error deleting path %s from %s: %s", filename, storage, err) return fmt.Errorf("error deleting path %s from %s: %s", filename, storage, err)
@@ -142,7 +94,7 @@ func (storage *PublishedStorage) RemoveDirs(path string, _ aptly.Progress) error
// Remove removes single file under public path // Remove removes single file under public path
func (storage *PublishedStorage) Remove(path string) error { func (storage *PublishedStorage) Remove(path string) error {
blob := storage.container.NewBlobURL(filepath.Join(storage.prefix, path)) blob := storage.az.blobURL(path)
_, err := blob.Delete(context.Background(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) _, err := blob.Delete(context.Background(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
if err != nil { if err != nil {
err = errors.Wrap(err, fmt.Sprintf("error deleting %s from %s: %s", path, storage, err)) err = errors.Wrap(err, fmt.Sprintf("error deleting %s from %s: %s", path, storage, err))
@@ -163,14 +115,14 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
relFilePath := filepath.Join(publishedRelPath, fileName) relFilePath := filepath.Join(publishedRelPath, fileName)
prefixRelFilePath := filepath.Join(publishedPrefix, relFilePath) prefixRelFilePath := filepath.Join(publishedPrefix, relFilePath)
poolPath := filepath.Join(storage.prefix, prefixRelFilePath) poolPath := storage.az.blobPath(fileName)
if storage.pathCache == nil { if storage.pathCache == nil {
storage.pathCache = make(map[string]map[string]string) storage.pathCache = make(map[string]map[string]string)
} }
pathCache := storage.pathCache[publishedPrefix] pathCache := storage.pathCache[publishedPrefix]
if pathCache == nil { if pathCache == nil {
paths, md5s, err := storage.internalFilelist(publishedPrefix) paths, md5s, err := storage.az.internalFilelist(publishedPrefix, nil)
if err != nil { if err != nil {
return fmt.Errorf("error caching paths under prefix: %s", err) return fmt.Errorf("error caching paths under prefix: %s", err)
} }
@@ -206,7 +158,7 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
} }
defer source.Close() defer source.Close()
err = storage.putFile(prefixRelFilePath, source, sourceMD5) err = storage.az.putFile(storage.az.blobURL(relPath), source, sourceMD5)
if err == nil { if err == nil {
pathCache[relFilePath] = sourceMD5 pathCache[relFilePath] = sourceMD5
} else { } else {
@@ -216,43 +168,9 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
return err return err
} }
func (storage *PublishedStorage) internalFilelist(prefix string) (paths []string, md5s []string, err error) {
const delimiter = "/"
paths = make([]string, 0, 1024)
md5s = make([]string, 0, 1024)
prefix = filepath.Join(storage.prefix, prefix)
if prefix != "" {
prefix += delimiter
}
for marker := (azblob.Marker{}); marker.NotDone(); {
listBlob, err := storage.container.ListBlobsFlatSegment(
context.Background(), marker, azblob.ListBlobsSegmentOptions{
Prefix: prefix,
MaxResults: 1000,
Details: azblob.BlobListingDetails{Metadata: true}})
if err != nil {
return nil, nil, fmt.Errorf("error listing under prefix %s in %s: %s", prefix, storage, err)
}
marker = listBlob.NextMarker
for _, blob := range listBlob.Segment.BlobItems {
if prefix == "" {
paths = append(paths, blob.Name)
} else {
paths = append(paths, blob.Name[len(prefix):])
}
md5s = append(md5s, fmt.Sprintf("%x", blob.Properties.ContentMD5))
}
}
return paths, md5s, nil
}
// Filelist returns list of files under prefix // Filelist returns list of files under prefix
func (storage *PublishedStorage) Filelist(prefix string) ([]string, error) { func (storage *PublishedStorage) Filelist(prefix string) ([]string, error) {
paths, _, err := storage.internalFilelist(prefix) paths, _, err := storage.az.internalFilelist(prefix, nil)
return paths, err return paths, err
} }
@@ -260,8 +178,8 @@ func (storage *PublishedStorage) Filelist(prefix string) ([]string, error) {
func (storage *PublishedStorage) internalCopyOrMoveBlob(src, dst string, metadata azblob.Metadata, move bool) error { func (storage *PublishedStorage) internalCopyOrMoveBlob(src, dst string, metadata azblob.Metadata, move bool) error {
const leaseDuration = 30 const leaseDuration = 30
dstBlobURL := storage.container.NewBlobURL(filepath.Join(storage.prefix, dst)) dstBlobURL := storage.az.blobURL(dst)
srcBlobURL := storage.container.NewBlobURL(filepath.Join(storage.prefix, src)) srcBlobURL := storage.az.blobURL(src)
leaseResp, err := srcBlobURL.AcquireLease(context.Background(), "", leaseDuration, azblob.ModifiedAccessConditions{}) leaseResp, err := srcBlobURL.AcquireLease(context.Background(), "", leaseDuration, azblob.ModifiedAccessConditions{})
if err != nil || leaseResp.StatusCode() != http.StatusCreated { if err != nil || leaseResp.StatusCode() != http.StatusCreated {
return fmt.Errorf("error acquiring lease on source blob %s", srcBlobURL) return fmt.Errorf("error acquiring lease on source blob %s", srcBlobURL)
@@ -332,11 +250,10 @@ func (storage *PublishedStorage) HardLink(src string, dst string) error {
// FileExists returns true if path exists // FileExists returns true if path exists
func (storage *PublishedStorage) FileExists(path string) (bool, error) { func (storage *PublishedStorage) FileExists(path string) (bool, error) {
blob := storage.container.NewBlobURL(filepath.Join(storage.prefix, path)) blob := storage.az.blobURL(path)
resp, err := blob.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) resp, err := blob.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
if err != nil { if err != nil {
storageError, ok := err.(azblob.StorageError) if isBlobNotFound(err) {
if ok && string(storageError.ServiceCode()) == string(azblob.StorageErrorCodeBlobNotFound) {
return false, nil return false, nil
} }
return false, err return false, err
@@ -349,7 +266,7 @@ func (storage *PublishedStorage) FileExists(path string) (bool, error) {
// ReadLink returns the symbolic link pointed to by path. // ReadLink returns the symbolic link pointed to by path.
// This simply reads text file created with SymLink // This simply reads text file created with SymLink
func (storage *PublishedStorage) ReadLink(path string) (string, error) { func (storage *PublishedStorage) ReadLink(path string) (string, error) {
blob := storage.container.NewBlobURL(filepath.Join(storage.prefix, path)) blob := storage.az.blobURL(path)
resp, err := blob.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) resp, err := blob.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
if err != nil { if err != nil {
return "", err return "", err

View File

@@ -66,7 +66,7 @@ func (s *PublishedStorageSuite) SetUpTest(c *C) {
s.storage, err = NewPublishedStorage(s.accountName, s.accountKey, container, "", s.endpoint) s.storage, err = NewPublishedStorage(s.accountName, s.accountKey, container, "", s.endpoint)
c.Assert(err, IsNil) c.Assert(err, IsNil)
cnt := s.storage.container cnt := s.storage.az.container
_, err = cnt.Create(context.Background(), azblob.Metadata{}, azblob.PublicAccessContainer) _, err = cnt.Create(context.Background(), azblob.Metadata{}, azblob.PublicAccessContainer)
c.Assert(err, IsNil) c.Assert(err, IsNil)
@@ -75,13 +75,13 @@ func (s *PublishedStorageSuite) SetUpTest(c *C) {
} }
func (s *PublishedStorageSuite) TearDownTest(c *C) { func (s *PublishedStorageSuite) TearDownTest(c *C) {
cnt := s.storage.container cnt := s.storage.az.container
_, err := cnt.Delete(context.Background(), azblob.ContainerAccessConditions{}) _, err := cnt.Delete(context.Background(), azblob.ContainerAccessConditions{})
c.Assert(err, IsNil) c.Assert(err, IsNil)
} }
func (s *PublishedStorageSuite) GetFile(c *C, path string) []byte { func (s *PublishedStorageSuite) GetFile(c *C, path string) []byte {
blob := s.storage.container.NewBlobURL(path) blob := s.storage.az.container.NewBlobURL(path)
resp, err := blob.Download(context.Background(), 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) resp, err := blob.Download(context.Background(), 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
c.Assert(err, IsNil) c.Assert(err, IsNil)
body := resp.Body(azblob.RetryReaderOptions{MaxRetryRequests: 3}) body := resp.Body(azblob.RetryReaderOptions{MaxRetryRequests: 3})
@@ -91,7 +91,7 @@ func (s *PublishedStorageSuite) GetFile(c *C, path string) []byte {
} }
func (s *PublishedStorageSuite) AssertNoFile(c *C, path string) { func (s *PublishedStorageSuite) AssertNoFile(c *C, path string) {
_, err := s.storage.container.NewBlobURL(path).GetProperties( _, err := s.storage.az.container.NewBlobURL(path).GetProperties(
context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
c.Assert(err, NotNil) c.Assert(err, NotNil)
storageError, ok := err.(azblob.StorageError) storageError, ok := err.(azblob.StorageError)
@@ -104,7 +104,7 @@ func (s *PublishedStorageSuite) PutFile(c *C, path string, data []byte) {
_, err := azblob.UploadBufferToBlockBlob( _, err := azblob.UploadBufferToBlockBlob(
context.Background(), context.Background(),
data, data,
s.storage.container.NewBlockBlobURL(path), s.storage.az.container.NewBlockBlobURL(path),
azblob.UploadToBlockBlobOptions{ azblob.UploadToBlockBlobOptions{
BlobHTTPHeaders: azblob.BlobHTTPHeaders{ BlobHTTPHeaders: azblob.BlobHTTPHeaders{
ContentMD5: hash[:], ContentMD5: hash[:],
@@ -129,7 +129,7 @@ func (s *PublishedStorageSuite) TestPutFile(c *C) {
err = s.prefixedStorage.PutFile(filename, filepath.Join(dir, "a")) err = s.prefixedStorage.PutFile(filename, filepath.Join(dir, "a"))
c.Check(err, IsNil) c.Check(err, IsNil)
c.Check(s.GetFile(c, filepath.Join(s.prefixedStorage.prefix, filename)), DeepEquals, content) c.Check(s.GetFile(c, filepath.Join(s.prefixedStorage.az.prefix, filename)), DeepEquals, content)
} }
func (s *PublishedStorageSuite) TestPutFilePlus(c *C) { func (s *PublishedStorageSuite) TestPutFilePlus(c *C) {

View File

@@ -361,12 +361,26 @@ func (context *AptlyContext) PackagePool() aptly.PackagePool {
defer context.Unlock() defer context.Unlock()
if context.packagePool == nil { if context.packagePool == nil {
poolRoot := context.config().PackagePoolStorage.Path storageConfig := context.config().PackagePoolStorage
if poolRoot == "" { if storageConfig.Azure != nil {
poolRoot = filepath.Join(context.config().RootDir, "pool") var err error
} context.packagePool, err = azure.NewPackagePool(
storageConfig.Azure.AccountName,
storageConfig.Azure.AccountKey,
storageConfig.Azure.Container,
storageConfig.Azure.Prefix,
storageConfig.Azure.Endpoint)
if err != nil {
Fatal(err)
}
} else {
poolRoot := context.config().PackagePoolStorage.Local.Path
if poolRoot == "" {
poolRoot = filepath.Join(context.config().RootDir, "pool")
}
context.packagePool = files.NewPackagePool(poolRoot, !context.config().SkipLegacyPool) context.packagePool = files.NewPackagePool(poolRoot, !context.config().SkipLegacyPool)
}
} }
return context.packagePool return context.packagePool

View File

@@ -5,19 +5,19 @@ import (
"github.com/aptly-dev/aptly/utils" "github.com/aptly-dev/aptly/utils"
) )
type mockChecksumStorage struct { type MockChecksumStorage struct {
store map[string]utils.ChecksumInfo Store map[string]utils.ChecksumInfo
} }
// NewMockChecksumStorage creates aptly.ChecksumStorage for tests // NewMockChecksumStorage creates aptly.ChecksumStorage for tests
func NewMockChecksumStorage() aptly.ChecksumStorage { func NewMockChecksumStorage() aptly.ChecksumStorage {
return &mockChecksumStorage{ return &MockChecksumStorage{
store: make(map[string]utils.ChecksumInfo), Store: make(map[string]utils.ChecksumInfo),
} }
} }
func (st *mockChecksumStorage) Get(path string) (*utils.ChecksumInfo, error) { func (st *MockChecksumStorage) Get(path string) (*utils.ChecksumInfo, error) {
c, ok := st.store[path] c, ok := st.Store[path]
if !ok { if !ok {
return nil, nil return nil, nil
} }
@@ -25,12 +25,12 @@ func (st *mockChecksumStorage) Get(path string) (*utils.ChecksumInfo, error) {
return &c, nil return &c, nil
} }
func (st *mockChecksumStorage) Update(path string, c *utils.ChecksumInfo) error { func (st *MockChecksumStorage) Update(path string, c *utils.ChecksumInfo) error {
st.store[path] = *c st.Store[path] = *c
return nil return nil
} }
// Check interface // Check interface
var ( var (
_ aptly.ChecksumStorage = &mockChecksumStorage{} _ aptly.ChecksumStorage = &MockChecksumStorage{}
) )

View File

@@ -111,7 +111,7 @@ func (s *PackagePoolSuite) TestImportOk(c *C) {
// SHA256 should be automatically calculated // SHA256 should be automatically calculated
c.Check(s.checksum.SHA256, Equals, "c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12") c.Check(s.checksum.SHA256, Equals, "c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12")
// checksum storage is filled with new checksum // checksum storage is filled with new checksum
c.Check(s.cs.(*mockChecksumStorage).store[path].SHA256, Equals, "c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12") c.Check(s.cs.(*MockChecksumStorage).Store[path].SHA256, Equals, "c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12")
info, err := s.pool.Stat(path) info, err := s.pool.Stat(path)
c.Assert(err, IsNil) c.Assert(err, IsNil)
@@ -128,7 +128,7 @@ func (s *PackagePoolSuite) TestImportOk(c *C) {
c.Check(err, IsNil) c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_some.deb") c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_some.deb")
// checksum storage is filled with new checksum // checksum storage is filled with new checksum
c.Check(s.cs.(*mockChecksumStorage).store[path].SHA256, Equals, "c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12") c.Check(s.cs.(*MockChecksumStorage).Store[path].SHA256, Equals, "c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12")
// double import, should be ok // double import, should be ok
s.checksum.SHA512 = "" // clear checksum s.checksum.SHA512 = "" // clear checksum
@@ -139,7 +139,7 @@ func (s *PackagePoolSuite) TestImportOk(c *C) {
c.Check(s.checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c") c.Check(s.checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// clear checksum storage, and do double-import // clear checksum storage, and do double-import
delete(s.cs.(*mockChecksumStorage).store, path) delete(s.cs.(*MockChecksumStorage).Store, path)
s.checksum.SHA512 = "" // clear checksum s.checksum.SHA512 = "" // clear checksum
path, err = s.pool.Import(s.debFile, filepath.Base(s.debFile), &s.checksum, false, s.cs) path, err = s.pool.Import(s.debFile, filepath.Base(s.debFile), &s.checksum, false, s.cs)
c.Check(err, IsNil) c.Check(err, IsNil)
@@ -244,7 +244,7 @@ func (s *PackagePoolSuite) TestVerify(c *C) {
c.Check(exists, Equals, false) c.Check(exists, Equals, false)
// check existence, with missing checksum and no info in checksum storage // check existence, with missing checksum and no info in checksum storage
delete(s.cs.(*mockChecksumStorage).store, path) delete(s.cs.(*MockChecksumStorage).Store, path)
s.checksum.SHA512 = "" s.checksum.SHA512 = ""
ppath, exists, err = s.pool.Verify("", filepath.Base(s.debFile), &s.checksum, s.cs) ppath, exists, err = s.pool.Verify("", filepath.Base(s.debFile), &s.checksum, s.cs)
c.Check(ppath, Equals, path) c.Check(ppath, Equals, path)

View File

@@ -44,7 +44,14 @@ Configuration file is stored in JSON format (default values shown below):
"gpgProvider": "gpg", "gpgProvider": "gpg",
"downloadSourcePackages": false, "downloadSourcePackages": false,
"packagePoolStorage": { "packagePoolStorage": {
"path": "$ROOTDIR/pool" "path": "$ROOTDIR/pool",
"azure": {
"accountName": "",
"accountKey": "",
"container": "repo",
"prefix": "",
"endpoint": ""
}
}, },
"skipLegacyPool": true, "skipLegacyPool": true,
"ppaDistributorID": "ubuntu", "ppaDistributorID": "ubuntu",
@@ -163,8 +170,12 @@ Options:
this setting could be controlled on per-mirror basis with `-with-sources` flag this setting could be controlled on per-mirror basis with `-with-sources` flag
* `packagePoolStorage`: * `packagePoolStorage`:
is the directory to store downloaded packages into; defaults to the value of configures the location to store downloaded packages (defaults to the
`rootDir` followed by `/pool` path `$ROOTDIR/pool`), by setting the value of the `type`:
* `path`: store the packages in the given path
* `azure`: store the packages in the given Azure Blob Storage container
(see the section on Azure publishing below for information on the
configuration)
* `skipLegacyPool`: * `skipLegacyPool`:
in aptly up to version 1.0.0, package files were stored in internal package pool in aptly up to version 1.0.0, package files were stored in internal package pool
@@ -189,6 +200,19 @@ Options:
* `AzurePublishEndpoints`: * `AzurePublishEndpoints`:
configuration of Azure publishing endpoints (see below) configuration of Azure publishing endpoints (see below)
## CUSTOM PACKAGE POOLS
aptly defaults to storing downloaded packages at `rootDir/`pool. In order to
change this, you can set the `type` key within `packagePoolStorage` to one of
two values:
* `local`: Store the package pool locally (the default). In order to change
the path, additionally set the `path` key within `packagePoolStorage` to
the desired location.
* `azure`: Store the package pool in an Azure Blob Storage container. Any
keys in the below section on Azure publishing may be set on the
`packagePoolStorage` object in order to configure the Azure connection.
## FILESYSTEM PUBLISHING ENDPOINTS ## FILESYSTEM PUBLISHING ENDPOINTS
aptly defaults to publish to a single publish directory under `rootDir`/public. For aptly defaults to publish to a single publish directory under `rootDir`/public. For

View File

@@ -60,7 +60,8 @@ class AzureTest(BaseTest):
} }
if self.use_azure_pool: if self.use_azure_pool:
self.configOverride['packagePoolStorage'] = { self.configOverride['packagePoolStorage'] = {
'azure': self.azure_endpoint, 'type': 'azure',
**self.azure_endpoint,
} }
super(AzureTest, self).prepare() super(AzureTest, self).prepare()
@@ -78,7 +79,7 @@ class AzureTest(BaseTest):
] ]
if path.startswith('public/'): if path.startswith('public/'):
path = path[7:] path = path.removeprefix('public/')
if path in self.container_contents: if path in self.container_contents:
return True return True
@@ -96,6 +97,10 @@ class AzureTest(BaseTest):
if not self.check_path(path): if not self.check_path(path):
raise Exception("path %s doesn't exist" % (path,)) raise Exception("path %s doesn't exist" % (path,))
def check_exists_azure_only(self, path):
self.check_exists(path)
BaseTest.check_not_exists(self, path)
def check_not_exists(self, path): def check_not_exists(self, path):
if self.check_path(path): if self.check_path(path):
raise Exception('path %s exists' % (path,)) raise Exception('path %s exists' % (path,))
@@ -104,7 +109,7 @@ class AzureTest(BaseTest):
assert not mode assert not mode
if path.startswith('public/'): if path.startswith('public/'):
path = path[7:] path = path.removeprefix('public/')
blob = self.container.download_blob(path) blob = self.container.download_blob(path)
return blob.readall().decode('utf-8') return blob.readall().decode('utf-8')

View File

@@ -0,0 +1,5 @@
Loading packages...
[+] libboost-program-options-dev_1.49.0.1_i386 added
[+] libboost-program-options-dev_1.62.0.1_i386 added
[+] pyspi_0.6.1-1.4_source added
[+] pyspi_0.6.1-1.3_source added

View File

@@ -0,0 +1,10 @@
Name: repo
Comment: Repo
Default Distribution: squeeze
Default Component: main
Number of packages: 4
Packages:
libboost-program-options-dev_1.62.0.1_i386
libboost-program-options-dev_1.49.0.1_i386
pyspi_0.6.1-1.4_source
pyspi_0.6.1-1.3_source

42
system/t09_repo/azure.py Normal file
View File

@@ -0,0 +1,42 @@
from azure_lib import AzureTest
class AzureRepoTest(AzureTest):
"""
Azure: add directory to repo
"""
fixtureCmds = [
'aptly repo create -comment=Repo -distribution=squeeze repo',
]
runCmd = 'aptly repo add repo ${files}'
use_azure_pool = True
def prepare(self):
super(AzureRepoTest, self).prepare()
self.configOverride['packagePoolStorage'] = {
'azure': self.azure_endpoint,
}
def check(self):
self.check_output()
self.check_cmd_output('aptly repo show -with-packages repo', 'repo_show')
# check pool
self.check_exists_azure_only(
'c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb'
)
self.check_exists_azure_only(
'2e/77/0b28df948f3197ed0b679bdea99f_pyspi_0.6.1-1.3.diff.gz'
)
self.check_exists_azure_only(
'd4/94/aaf526f1ec6b02f14c2f81e060a5_pyspi_0.6.1-1.3.dsc'
)
self.check_exists_azure_only(
'64/06/9ee828c50b1c597d10a3fefbba27_pyspi_0.6.1.orig.tar.gz'
)
self.check_exists_azure_only(
'28/9d/3aefa970876e9c43686ce2b02f47_pyspi-0.6.1-1.3.stripped.dsc'
)

View File

@@ -42,7 +42,21 @@ func (cksum *ChecksumInfo) Complete() bool {
return cksum.MD5 != "" && cksum.SHA1 != "" && cksum.SHA256 != "" && cksum.SHA512 != "" return cksum.MD5 != "" && cksum.SHA1 != "" && cksum.SHA256 != "" && cksum.SHA512 != ""
} }
// ChecksumsForFile generates size, MD5, SHA1 & SHA256 checksums for given file // ChecksumsForReader generates size, MD5, SHA1 & SHA256 checksums for the given
// io.Reader
func ChecksumsForReader(rd io.Reader) (ChecksumInfo, error) {
w := NewChecksumWriter()
_, err := io.Copy(w, rd)
if err != nil {
return ChecksumInfo{}, err
}
return w.Sum(), nil
}
// ChecksumsForFile generates size, MD5, SHA1 & SHA256 checksums for the file at
// the given path
func ChecksumsForFile(path string) (ChecksumInfo, error) { func ChecksumsForFile(path string) (ChecksumInfo, error) {
file, err := os.Open(path) file, err := os.Open(path)
if err != nil { if err != nil {
@@ -50,14 +64,7 @@ func ChecksumsForFile(path string) (ChecksumInfo, error) {
} }
defer file.Close() defer file.Close()
w := NewChecksumWriter() return ChecksumsForReader(file)
_, err = io.Copy(w, file)
if err != nil {
return ChecksumInfo{}, err
}
return w.Sum(), nil
} }
// ChecksumWriter is a writer that does checksum calculation on the fly passing data // ChecksumWriter is a writer that does checksum calculation on the fly passing data

View File

@@ -2,6 +2,7 @@ package utils
import ( import (
"encoding/json" "encoding/json"
"fmt"
"os" "os"
"path/filepath" "path/filepath"
) )
@@ -24,7 +25,7 @@ type ConfigStructure struct { // nolint: maligned
GpgDisableVerify bool `json:"gpgDisableVerify"` GpgDisableVerify bool `json:"gpgDisableVerify"`
GpgProvider string `json:"gpgProvider"` GpgProvider string `json:"gpgProvider"`
DownloadSourcePackages bool `json:"downloadSourcePackages"` DownloadSourcePackages bool `json:"downloadSourcePackages"`
PackagePoolStorage PackagePool `json:"packagePoolStorage"` PackagePoolStorage PackagePoolStorage `json:"packagePoolStorage"`
SkipLegacyPool bool `json:"skipLegacyPool"` SkipLegacyPool bool `json:"skipLegacyPool"`
PpaDistributorID string `json:"ppaDistributorID"` PpaDistributorID string `json:"ppaDistributorID"`
PpaCodename string `json:"ppaCodename"` PpaCodename string `json:"ppaCodename"`
@@ -33,7 +34,7 @@ type ConfigStructure struct { // nolint: maligned
FileSystemPublishRoots map[string]FileSystemPublishRoot `json:"FileSystemPublishEndpoints"` FileSystemPublishRoots map[string]FileSystemPublishRoot `json:"FileSystemPublishEndpoints"`
S3PublishRoots map[string]S3PublishRoot `json:"S3PublishEndpoints"` S3PublishRoots map[string]S3PublishRoot `json:"S3PublishEndpoints"`
SwiftPublishRoots map[string]SwiftPublishRoot `json:"SwiftPublishEndpoints"` SwiftPublishRoots map[string]SwiftPublishRoot `json:"SwiftPublishEndpoints"`
AzurePublishRoots map[string]AzurePublishRoot `json:"AzurePublishEndpoints"` AzurePublishRoots map[string]AzureEndpoint `json:"AzurePublishEndpoints"`
AsyncAPI bool `json:"AsyncAPI"` AsyncAPI bool `json:"AsyncAPI"`
EnableMetricsEndpoint bool `json:"enableMetricsEndpoint"` EnableMetricsEndpoint bool `json:"enableMetricsEndpoint"`
LogLevel string `json:"logLevel"` LogLevel string `json:"logLevel"`
@@ -41,8 +42,52 @@ type ConfigStructure struct { // nolint: maligned
ServeInAPIMode bool `json:"serveInAPIMode"` ServeInAPIMode bool `json:"serveInAPIMode"`
} }
type PackagePool struct { type LocalPoolStorage struct {
Path string `json:"path"` Path string `json:"path,omitempty"`
}
type PackagePoolStorage struct {
Local *LocalPoolStorage
Azure *AzureEndpoint
}
func (pool *PackagePoolStorage) UnmarshalJSON(data []byte) error {
var discriminator struct {
Type string `json:"type"`
}
if err := json.Unmarshal(data, &discriminator); err != nil {
return err
}
switch discriminator.Type {
case "azure":
pool.Azure = &AzureEndpoint{}
return json.Unmarshal(data, &pool.Azure)
case "local", "":
pool.Local = &LocalPoolStorage{}
return json.Unmarshal(data, &pool.Local)
default:
return fmt.Errorf("unknown pool storage type: %s", discriminator.Type)
}
}
func (pool *PackagePoolStorage) MarshalJSON() ([]byte, error) {
var wrapper struct {
Type string `json:"type,omitempty"`
*LocalPoolStorage
*AzureEndpoint
}
if pool.Azure != nil {
wrapper.Type = "azure"
wrapper.AzureEndpoint = pool.Azure
} else if pool.Local.Path != "" {
wrapper.Type = "local"
wrapper.LocalPoolStorage = pool.Local
}
return json.Marshal(wrapper)
} }
// FileSystemPublishRoot describes single filesystem publishing entry point // FileSystemPublishRoot describes single filesystem publishing entry point
@@ -86,8 +131,8 @@ type SwiftPublishRoot struct {
Container string `json:"container"` Container string `json:"container"`
} }
// AzurePublishRoot describes single Azure publishing entry point // AzureEndpoint describes single Azure publishing entry point
type AzurePublishRoot struct { type AzureEndpoint struct {
AccountName string `json:"accountName"` AccountName string `json:"accountName"`
AccountKey string `json:"accountKey"` AccountKey string `json:"accountKey"`
Container string `json:"container"` Container string `json:"container"`
@@ -111,14 +156,16 @@ var Config = ConfigStructure{
GpgDisableSign: false, GpgDisableSign: false,
GpgDisableVerify: false, GpgDisableVerify: false,
DownloadSourcePackages: false, DownloadSourcePackages: false,
PackagePoolStorage: PackagePool{Path: ""}, PackagePoolStorage: PackagePoolStorage{
Local: &LocalPoolStorage{Path: ""},
},
SkipLegacyPool: false, SkipLegacyPool: false,
PpaDistributorID: "ubuntu", PpaDistributorID: "ubuntu",
PpaCodename: "", PpaCodename: "",
FileSystemPublishRoots: map[string]FileSystemPublishRoot{}, FileSystemPublishRoots: map[string]FileSystemPublishRoot{},
S3PublishRoots: map[string]S3PublishRoot{}, S3PublishRoots: map[string]S3PublishRoot{},
SwiftPublishRoots: map[string]SwiftPublishRoot{}, SwiftPublishRoots: map[string]SwiftPublishRoot{},
AzurePublishRoots: map[string]AzurePublishRoot{}, AzurePublishRoots: map[string]AzureEndpoint{},
AsyncAPI: false, AsyncAPI: false,
EnableMetricsEndpoint: false, EnableMetricsEndpoint: false,
LogLevel: "debug", LogLevel: "debug",

View File

@@ -34,7 +34,7 @@ func (s *ConfigSuite) TestSaveConfig(c *C) {
s.config.DatabaseOpenAttempts = 5 s.config.DatabaseOpenAttempts = 5
s.config.GpgProvider = "gpg" s.config.GpgProvider = "gpg"
s.config.PackagePoolStorage.Path = "/tmp/aptly-pool" s.config.PackagePoolStorage.Local = &LocalPoolStorage{"/tmp/aptly-pool"}
s.config.FileSystemPublishRoots = map[string]FileSystemPublishRoot{"test": { s.config.FileSystemPublishRoots = map[string]FileSystemPublishRoot{"test": {
RootDir: "/opt/aptly-publish"}} RootDir: "/opt/aptly-publish"}}
@@ -46,7 +46,7 @@ func (s *ConfigSuite) TestSaveConfig(c *C) {
s.config.SwiftPublishRoots = map[string]SwiftPublishRoot{"test": { s.config.SwiftPublishRoots = map[string]SwiftPublishRoot{"test": {
Container: "repo"}} Container: "repo"}}
s.config.AzurePublishRoots = map[string]AzurePublishRoot{"test": { s.config.AzurePublishRoots = map[string]AzureEndpoint{"test": {
Container: "repo"}} Container: "repo"}}
s.config.LogLevel = "info" s.config.LogLevel = "info"
@@ -81,6 +81,7 @@ func (s *ConfigSuite) TestSaveConfig(c *C) {
" \"gpgProvider\": \"gpg\",\n"+ " \"gpgProvider\": \"gpg\",\n"+
" \"downloadSourcePackages\": false,\n"+ " \"downloadSourcePackages\": false,\n"+
" \"packagePoolStorage\": {\n"+ " \"packagePoolStorage\": {\n"+
" \"type\": \"local\",\n"+
" \"path\": \"/tmp/aptly-pool\"\n"+ " \"path\": \"/tmp/aptly-pool\"\n"+
" },\n"+ " },\n"+
" \"skipLegacyPool\": false,\n"+ " \"skipLegacyPool\": false,\n"+