use new azure-sdk

This commit is contained in:
André Roth
2024-11-11 15:03:02 +01:00
parent d96ef0f178
commit e2cbd637b8
9 changed files with 187 additions and 184 deletions

View File

@@ -5,28 +5,35 @@ package azure
import (
"context"
"encoding/hex"
"errors"
"fmt"
"io"
"net/url"
"os"
"path/filepath"
"time"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/aptly-dev/aptly/aptly"
)
func isBlobNotFound(err error) bool {
storageError, ok := err.(azblob.StorageError)
return ok && storageError.ServiceCode() == azblob.ServiceCodeBlobNotFound
var respErr *azcore.ResponseError
if errors.As(err, &respErr) {
return respErr.StatusCode == 404 // BlobNotFound
}
return false
}
type azContext struct {
container azblob.ContainerURL
client *azblob.Client
container string
prefix string
}
func newAzContext(accountName, accountKey, container, prefix, endpoint string) (*azContext, error) {
credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
cred, err := azblob.NewSharedKeyCredential(accountName, accountKey)
if err != nil {
return nil, err
}
@@ -35,15 +42,14 @@ func newAzContext(accountName, accountKey, container, prefix, endpoint string) (
endpoint = fmt.Sprintf("https://%s.blob.core.windows.net", accountName)
}
url, err := url.Parse(fmt.Sprintf("%s/%s", endpoint, container))
serviceClient, err := azblob.NewClientWithSharedKeyCredential(endpoint, cred, nil)
if err != nil {
return nil, err
}
containerURL := azblob.NewContainerURL(*url, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
result := &azContext{
container: containerURL,
client: serviceClient,
container: container,
prefix: prefix,
}
@@ -54,10 +60,6 @@ func (az *azContext) blobPath(path string) string {
return filepath.Join(az.prefix, path)
}
func (az *azContext) blobURL(path string) azblob.BlobURL {
return az.container.NewBlobURL(az.blobPath(path))
}
func (az *azContext) internalFilelist(prefix string, progress aptly.Progress) (paths []string, md5s []string, err error) {
const delimiter = "/"
paths = make([]string, 0, 1024)
@@ -67,27 +69,33 @@ func (az *azContext) internalFilelist(prefix string, progress aptly.Progress) (p
prefix += delimiter
}
for marker := (azblob.Marker{}); marker.NotDone(); {
listBlob, err := az.container.ListBlobsFlatSegment(
context.Background(), marker, azblob.ListBlobsSegmentOptions{
Prefix: prefix,
MaxResults: 1,
Details: azblob.BlobListingDetails{Metadata: true}})
ctx := context.Background()
maxResults := int32(1)
pager := az.client.NewListBlobsFlatPager(az.container, &azblob.ListBlobsFlatOptions{
Prefix: &prefix,
MaxResults: &maxResults,
Include: azblob.ListBlobsInclude{Metadata: true},
})
// Iterate over each page
for pager.More() {
page, err := pager.NextPage(ctx)
if err != nil {
return nil, nil, fmt.Errorf("error listing under prefix %s in %s: %s", prefix, az, err)
}
marker = listBlob.NextMarker
for _, blob := range listBlob.Segment.BlobItems {
for _, blob := range page.Segment.BlobItems {
if prefix == "" {
paths = append(paths, blob.Name)
paths = append(paths, *blob.Name)
} else {
paths = append(paths, blob.Name[len(prefix):])
name := *blob.Name
paths = append(paths, name[len(prefix):])
}
md5s = append(md5s, fmt.Sprintf("%x", blob.Properties.ContentMD5))
}
b := *blob
md5 := b.Properties.ContentMD5
md5s = append(md5s, fmt.Sprintf("%x", md5))
}
if progress != nil {
time.Sleep(time.Duration(500) * time.Millisecond)
progress.AddBar(1)
@@ -97,28 +105,27 @@ func (az *azContext) internalFilelist(prefix string, progress aptly.Progress) (p
return paths, md5s, nil
}
func (az *azContext) putFile(blob azblob.BlobURL, source io.Reader, sourceMD5 string) error {
uploadOptions := azblob.UploadStreamToBlockBlobOptions{
BufferSize: 4 * 1024 * 1024,
MaxBuffers: 8,
func (az *azContext) putFile(blobName string, source io.Reader, sourceMD5 string) error {
uploadOptions := &azblob.UploadFileOptions{
BlockSize: 4 * 1024 * 1024,
Concurrency: 8,
}
path := az.blobPath(blobName)
if len(sourceMD5) > 0 {
decodedMD5, err := hex.DecodeString(sourceMD5)
if err != nil {
return err
}
uploadOptions.BlobHTTPHeaders = azblob.BlobHTTPHeaders{
ContentMD5: decodedMD5,
uploadOptions.HTTPHeaders = &blob.HTTPHeaders{
BlobContentMD5: decodedMD5,
}
}
_, err := azblob.UploadStreamToBlockBlob(
context.Background(),
source,
blob.ToBlockBlobURL(),
uploadOptions,
)
var err error
if file, ok := source.(*os.File); ok {
_, err = az.client.UploadFile(context.TODO(), az.container, path, file, uploadOptions)
}
return err
}

View File

@@ -5,7 +5,6 @@ import (
"os"
"path/filepath"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/aptly-dev/aptly/aptly"
"github.com/aptly-dev/aptly/utils"
"github.com/pkg/errors"
@@ -41,10 +40,7 @@ func (pool *PackagePool) buildPoolPath(filename string, checksums *utils.Checksu
return filepath.Join(hash[0:2], hash[2:4], hash[4:32]+"_"+filename)
}
func (pool *PackagePool) ensureChecksums(
poolPath string,
checksumStorage aptly.ChecksumStorage,
) (*utils.ChecksumInfo, error) {
func (pool *PackagePool) ensureChecksums(poolPath string, checksumStorage aptly.ChecksumStorage) (*utils.ChecksumInfo, error) {
targetChecksums, err := checksumStorage.Get(poolPath)
if err != nil {
return nil, err
@@ -52,8 +48,7 @@ func (pool *PackagePool) ensureChecksums(
if targetChecksums == nil {
// we don't have checksums stored yet for this file
blob := pool.az.blobURL(poolPath)
download, err := blob.Download(context.Background(), 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
download, err := pool.az.client.DownloadStream(context.Background(), pool.az.container, poolPath, nil)
if err != nil {
if isBlobNotFound(err) {
return nil, nil
@@ -63,7 +58,7 @@ func (pool *PackagePool) ensureChecksums(
}
targetChecksums = &utils.ChecksumInfo{}
*targetChecksums, err = utils.ChecksumsForReader(download.Body(azblob.RetryReaderOptions{}))
*targetChecksums, err = utils.ChecksumsForReader(download.Body)
if err != nil {
return nil, errors.Wrapf(err, "error checksumming blob at %s", poolPath)
}
@@ -92,46 +87,49 @@ func (pool *PackagePool) LegacyPath(_ string, _ *utils.ChecksumInfo) (string, er
}
func (pool *PackagePool) Size(path string) (int64, error) {
blob := pool.az.blobURL(path)
props, err := blob.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
serviceClient := pool.az.client.ServiceClient()
containerClient := serviceClient.NewContainerClient(pool.az.container)
blobClient := containerClient.NewBlobClient(path)
props, err := blobClient.GetProperties(context.TODO(), nil)
if err != nil {
return 0, errors.Wrapf(err, "error examining %s from %s", path, pool)
}
return props.ContentLength(), nil
return *props.ContentLength, nil
}
func (pool *PackagePool) Open(path string) (aptly.ReadSeekerCloser, error) {
blob := pool.az.blobURL(path)
temp, err := os.CreateTemp("", "blob-download")
if err != nil {
return nil, errors.Wrap(err, "error creating temporary file for blob download")
return nil, errors.Wrapf(err, "error creating tempfile for %s", path)
}
defer os.Remove(temp.Name())
err = azblob.DownloadBlobToFile(context.Background(), blob, 0, 0, temp, azblob.DownloadFromBlobOptions{})
_, err = pool.az.client.DownloadFile(context.TODO(), pool.az.container, path, temp, nil)
if err != nil {
return nil, errors.Wrapf(err, "error downloading blob at %s", path)
return nil, errors.Wrapf(err, "error downloading blob %s", path)
}
return temp, nil
}
func (pool *PackagePool) Remove(path string) (int64, error) {
blob := pool.az.blobURL(path)
props, err := blob.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
serviceClient := pool.az.client.ServiceClient()
containerClient := serviceClient.NewContainerClient(pool.az.container)
blobClient := containerClient.NewBlobClient(path)
props, err := blobClient.GetProperties(context.TODO(), nil)
if err != nil {
return 0, errors.Wrapf(err, "error getting props of %s from %s", path, pool)
return 0, errors.Wrapf(err, "error examining %s from %s", path, pool)
}
_, err = blob.Delete(context.Background(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
_, err = pool.az.client.DeleteBlob(context.Background(), pool.az.container, path, nil)
if err != nil {
return 0, errors.Wrapf(err, "error deleting %s from %s", path, pool)
}
return props.ContentLength(), nil
return *props.ContentLength, nil
}
func (pool *PackagePool) Import(srcPath, basename string, checksums *utils.ChecksumInfo, _ bool, checksumStorage aptly.ChecksumStorage) (string, error) {
@@ -145,7 +143,6 @@ func (pool *PackagePool) Import(srcPath, basename string, checksums *utils.Check
}
path := pool.buildPoolPath(basename, checksums)
blob := pool.az.blobURL(path)
targetChecksums, err := pool.ensureChecksums(path, checksumStorage)
if err != nil {
return "", err
@@ -161,7 +158,7 @@ func (pool *PackagePool) Import(srcPath, basename string, checksums *utils.Check
}
defer source.Close()
err = pool.az.putFile(blob, source, checksums.MD5)
err = pool.az.putFile(path, source, checksums.MD5)
if err != nil {
return "", err
}

View File

@@ -7,7 +7,7 @@ import (
"path/filepath"
"runtime"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/aptly-dev/aptly/aptly"
"github.com/aptly-dev/aptly/files"
"github.com/aptly-dev/aptly/utils"
@@ -50,8 +50,10 @@ func (s *PackagePoolSuite) SetUpTest(c *C) {
s.pool, err = NewPackagePool(s.accountName, s.accountKey, container, "", s.endpoint)
c.Assert(err, IsNil)
cnt := s.pool.az.container
_, err = cnt.Create(context.Background(), azblob.Metadata{}, azblob.PublicAccessContainer)
publicAccessType := azblob.PublicAccessTypeContainer
_, err = s.pool.az.client.CreateContainer(context.TODO(), s.pool.az.container, &azblob.CreateContainerOptions{
Access: &publicAccessType,
})
c.Assert(err, IsNil)
s.prefixedPool, err = NewPackagePool(s.accountName, s.accountKey, container, prefix, s.endpoint)

View File

@@ -3,20 +3,21 @@ package azure
import (
"context"
"fmt"
"net/http"
"os"
"path/filepath"
"time"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/lease"
"github.com/aptly-dev/aptly/aptly"
"github.com/aptly-dev/aptly/utils"
"github.com/pborman/uuid"
"github.com/pkg/errors"
)
// PublishedStorage abstract file system with published files (actually hosted on Azure)
type PublishedStorage struct {
container azblob.ContainerURL
prefix string
az *azContext
pathCache map[string]map[string]string
@@ -66,7 +67,7 @@ func (storage *PublishedStorage) PutFile(path string, sourceFilename string) err
}
defer source.Close()
err = storage.az.putFile(storage.az.blobURL(path), source, sourceMD5)
err = storage.az.putFile(path, source, sourceMD5)
if err != nil {
err = errors.Wrap(err, fmt.Sprintf("error uploading %s to %s", sourceFilename, storage))
}
@@ -76,14 +77,15 @@ func (storage *PublishedStorage) PutFile(path string, sourceFilename string) err
// RemoveDirs removes directory structure under public path
func (storage *PublishedStorage) RemoveDirs(path string, _ aptly.Progress) error {
path = storage.az.blobPath(path)
filelist, err := storage.Filelist(path)
if err != nil {
return err
}
for _, filename := range filelist {
blob := storage.az.blobURL(filepath.Join(path, filename))
_, err := blob.Delete(context.Background(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
blob := filepath.Join(path, filename)
_, err := storage.az.client.DeleteBlob(context.Background(), storage.az.container, blob, nil)
if err != nil {
return fmt.Errorf("error deleting path %s from %s: %s", filename, storage, err)
}
@@ -94,8 +96,8 @@ func (storage *PublishedStorage) RemoveDirs(path string, _ aptly.Progress) error
// Remove removes single file under public path
func (storage *PublishedStorage) Remove(path string) error {
blob := storage.az.blobURL(path)
_, err := blob.Delete(context.Background(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
path = storage.az.blobPath(path)
_, err := storage.az.client.DeleteBlob(context.Background(), storage.az.container, path, nil)
if err != nil {
err = errors.Wrap(err, fmt.Sprintf("error deleting %s from %s: %s", path, storage, err))
}
@@ -114,9 +116,8 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
sourcePath string, sourceChecksums utils.ChecksumInfo, force bool) error {
relFilePath := filepath.Join(publishedRelPath, fileName)
// prefixRelFilePath := filepath.Join(publishedPrefix, relFilePath)
// FIXME: check how to integrate publishedPrefix:
poolPath := storage.az.blobPath(fileName)
prefixRelFilePath := filepath.Join(publishedPrefix, relFilePath)
poolPath := storage.az.blobPath(prefixRelFilePath)
if storage.pathCache == nil {
storage.pathCache = make(map[string]map[string]string)
@@ -159,7 +160,7 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
}
defer source.Close()
err = storage.az.putFile(storage.az.blobURL(relFilePath), source, sourceMD5)
err = storage.az.putFile(relFilePath, source, sourceMD5)
if err == nil {
pathCache[relFilePath] = sourceMD5
} else {
@@ -176,57 +177,58 @@ func (storage *PublishedStorage) Filelist(prefix string) ([]string, error) {
}
// Internal copy or move implementation
func (storage *PublishedStorage) internalCopyOrMoveBlob(src, dst string, metadata azblob.Metadata, move bool) error {
func (storage *PublishedStorage) internalCopyOrMoveBlob(src, dst string, metadata map[string]*string, move bool) error {
const leaseDuration = 30
leaseID := uuid.NewRandom().String()
dstBlobURL := storage.az.blobURL(dst)
srcBlobURL := storage.az.blobURL(src)
leaseResp, err := srcBlobURL.AcquireLease(context.Background(), "", leaseDuration, azblob.ModifiedAccessConditions{})
if err != nil || leaseResp.StatusCode() != http.StatusCreated {
return fmt.Errorf("error acquiring lease on source blob %s", srcBlobURL)
serviceClient := storage.az.client.ServiceClient()
containerClient := serviceClient.NewContainerClient(storage.az.container)
srcBlobClient := containerClient.NewBlobClient(src)
blobLeaseClient, err := lease.NewBlobClient(srcBlobClient, &lease.BlobClientOptions{LeaseID: to.Ptr(leaseID)})
if err != nil {
return fmt.Errorf("error acquiring lease on source blob %s", src)
}
defer srcBlobURL.BreakLease(context.Background(), azblob.LeaseBreakNaturally, azblob.ModifiedAccessConditions{})
srcBlobLeaseID := leaseResp.LeaseID()
copyResp, err := dstBlobURL.StartCopyFromURL(
context.Background(),
srcBlobURL.URL(),
metadata,
azblob.ModifiedAccessConditions{},
azblob.BlobAccessConditions{},
azblob.DefaultAccessTier,
nil)
_, err = blobLeaseClient.AcquireLease(context.Background(), leaseDuration, nil)
if err != nil {
return fmt.Errorf("error acquiring lease on source blob %s", src)
}
defer blobLeaseClient.BreakLease(context.Background(), &lease.BlobBreakOptions{BreakPeriod: to.Ptr(int32(60))})
dstBlobClient := containerClient.NewBlobClient(dst)
copyResp, err := dstBlobClient.StartCopyFromURL(context.Background(), srcBlobClient.URL(), &blob.StartCopyFromURLOptions{
Metadata: metadata,
})
if err != nil {
return fmt.Errorf("error copying %s -> %s in %s: %s", src, dst, storage, err)
}
copyStatus := copyResp.CopyStatus()
copyStatus := *copyResp.CopyStatus
for {
if copyStatus == azblob.CopyStatusSuccess {
if copyStatus == blob.CopyStatusTypeSuccess {
if move {
_, err = srcBlobURL.Delete(
context.Background(),
azblob.DeleteSnapshotsOptionNone,
azblob.BlobAccessConditions{
LeaseAccessConditions: azblob.LeaseAccessConditions{LeaseID: srcBlobLeaseID},
})
_, err := storage.az.client.DeleteBlob(context.Background(), storage.az.container, src, &blob.DeleteOptions{
AccessConditions: &blob.AccessConditions{
LeaseAccessConditions: &blob.LeaseAccessConditions{
LeaseID: &leaseID,
},
},
})
return err
}
return nil
} else if copyStatus == azblob.CopyStatusPending {
} else if copyStatus == blob.CopyStatusTypePending {
time.Sleep(1 * time.Second)
blobPropsResp, err := dstBlobURL.GetProperties(
context.Background(),
azblob.BlobAccessConditions{LeaseAccessConditions: azblob.LeaseAccessConditions{LeaseID: srcBlobLeaseID}},
azblob.ClientProvidedKeyOptions{})
getMetadata, err := dstBlobClient.GetProperties(context.TODO(), nil)
if err != nil {
return fmt.Errorf("error getting destination blob properties %s", dstBlobURL)
return fmt.Errorf("error getting copy progress %s", dst)
}
copyStatus = blobPropsResp.CopyStatus()
copyStatus = *getMetadata.CopyStatus
_, err = srcBlobURL.RenewLease(context.Background(), srcBlobLeaseID, azblob.ModifiedAccessConditions{})
_, err = blobLeaseClient.RenewLease(context.Background(), nil)
if err != nil {
return fmt.Errorf("error renewing source blob lease %s", srcBlobURL)
return fmt.Errorf("error renewing source blob lease %s", src)
}
} else {
return fmt.Errorf("error copying %s -> %s in %s: %s", dst, src, storage, copyStatus)
@@ -241,7 +243,9 @@ func (storage *PublishedStorage) RenameFile(oldName, newName string) error {
// SymLink creates a copy of src file and adds link information as meta data
func (storage *PublishedStorage) SymLink(src string, dst string) error {
return storage.internalCopyOrMoveBlob(src, dst, azblob.Metadata{"SymLink": src}, false /* move */)
metadata := make(map[string]*string)
metadata["SymLink"] = &src
return storage.internalCopyOrMoveBlob(src, dst, metadata, false /* do not remove src */)
}
// HardLink using symlink functionality as hard links do not exist
@@ -251,28 +255,33 @@ func (storage *PublishedStorage) HardLink(src string, dst string) error {
// FileExists returns true if path exists
func (storage *PublishedStorage) FileExists(path string) (bool, error) {
blob := storage.az.blobURL(path)
resp, err := blob.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
serviceClient := storage.az.client.ServiceClient()
containerClient := serviceClient.NewContainerClient(storage.az.container)
blobClient := containerClient.NewBlobClient(path)
_, err := blobClient.GetProperties(context.Background(), nil)
if err != nil {
if isBlobNotFound(err) {
return false, nil
}
return false, err
} else if resp.StatusCode() == http.StatusOK {
return true, nil
return false, fmt.Errorf("error checking if blob %s exists: %v", path, err)
}
return false, fmt.Errorf("error checking if blob %s exists %d", blob, resp.StatusCode())
return true, nil
}
// ReadLink returns the symbolic link pointed to by path.
// This simply reads text file created with SymLink
func (storage *PublishedStorage) ReadLink(path string) (string, error) {
blob := storage.az.blobURL(path)
resp, err := blob.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
serviceClient := storage.az.client.ServiceClient()
containerClient := serviceClient.NewContainerClient(storage.az.container)
blobClient := containerClient.NewBlobClient(path)
props, err := blobClient.GetProperties(context.Background(), nil)
if err != nil {
return "", err
} else if resp.StatusCode() != http.StatusOK {
return "", fmt.Errorf("error checking if blob %s exists %d", blob, resp.StatusCode())
return "", fmt.Errorf("failed to get blob properties: %v", err)
}
return resp.NewMetadata()["SymLink"], nil
metadata := props.Metadata
if originalBlob, exists := metadata["original_blob"]; exists {
return *originalBlob, nil
}
return "", fmt.Errorf("error reading link %s: %v", path, err)
}

View File

@@ -7,8 +7,11 @@ import (
"io/ioutil"
"os"
"path/filepath"
"bytes"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/aptly-dev/aptly/files"
"github.com/aptly-dev/aptly/utils"
. "gopkg.in/check.v1"
@@ -66,8 +69,10 @@ func (s *PublishedStorageSuite) SetUpTest(c *C) {
s.storage, err = NewPublishedStorage(s.accountName, s.accountKey, container, "", s.endpoint)
c.Assert(err, IsNil)
cnt := s.storage.az.container
_, err = cnt.Create(context.Background(), azblob.Metadata{}, azblob.PublicAccessContainer)
publicAccessType := azblob.PublicAccessTypeContainer
_, err = s.storage.az.client.CreateContainer(context.Background(), s.storage.az.container, &azblob.CreateContainerOptions{
Access: &publicAccessType,
})
c.Assert(err, IsNil)
s.prefixedStorage, err = NewPublishedStorage(s.accountName, s.accountKey, container, prefix, s.endpoint)
@@ -75,41 +80,39 @@ func (s *PublishedStorageSuite) SetUpTest(c *C) {
}
func (s *PublishedStorageSuite) TearDownTest(c *C) {
cnt := s.storage.az.container
_, err := cnt.Delete(context.Background(), azblob.ContainerAccessConditions{})
_, err := s.storage.az.client.DeleteContainer(context.Background(), s.storage.az.container, nil)
c.Assert(err, IsNil)
}
func (s *PublishedStorageSuite) GetFile(c *C, path string) []byte {
blob := s.storage.az.container.NewBlobURL(path)
resp, err := blob.Download(context.Background(), 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
resp, err := s.storage.az.client.DownloadStream(context.Background(), s.storage.az.container, path, nil)
c.Assert(err, IsNil)
body := resp.Body(azblob.RetryReaderOptions{MaxRetryRequests: 3})
data, err := ioutil.ReadAll(body)
data, err := ioutil.ReadAll(resp.Body)
c.Assert(err, IsNil)
return data
}
func (s *PublishedStorageSuite) AssertNoFile(c *C, path string) {
_, err := s.storage.az.container.NewBlobURL(path).GetProperties(
context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
serviceClient := s.storage.az.client.ServiceClient()
containerClient := serviceClient.NewContainerClient(s.storage.az.container)
blobClient := containerClient.NewBlobClient(path)
_, err := blobClient.GetProperties(context.Background(), nil)
c.Assert(err, NotNil)
storageError, ok := err.(azblob.StorageError)
storageError, ok := err.(*azcore.ResponseError)
c.Assert(ok, Equals, true)
c.Assert(string(storageError.ServiceCode()), Equals, string(string(azblob.StorageErrorCodeBlobNotFound)))
c.Assert(storageError.StatusCode, Equals, 404)
}
func (s *PublishedStorageSuite) PutFile(c *C, path string, data []byte) {
hash := md5.Sum(data)
_, err := azblob.UploadBufferToBlockBlob(
context.Background(),
data,
s.storage.az.container.NewBlockBlobURL(path),
azblob.UploadToBlockBlobOptions{
BlobHTTPHeaders: azblob.BlobHTTPHeaders{
ContentMD5: hash[:],
},
})
uploadOptions := &azblob.UploadStreamOptions{
HTTPHeaders: &blob.HTTPHeaders{
BlobContentMD5: hash[:],
},
}
reader := bytes.NewReader(data)
_, err := s.storage.az.client.UploadStream(context.Background(), s.storage.az.container, path, reader, uploadOptions)
c.Assert(err, IsNil)
}
@@ -330,7 +333,7 @@ func (s *PublishedStorageSuite) TestLinkFromPool(c *C) {
// 2nd link from pool, providing wrong path for source file
//
// this test should check that file already exists in S3 and skip upload (which would fail if not skipped)
// this test should check that file already exists in Azure and skip upload (which would fail if not skipped)
s.prefixedStorage.pathCache = nil
err = s.prefixedStorage.LinkFromPool("", filepath.Join("pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, "wrong-looks-like-pathcache-doesnt-work", cksum1, false)
c.Check(err, IsNil)