mirror of
https://github.com/aptly-dev/aptly.git
synced 2026-01-12 03:21:33 +00:00
Add support for Azure storage as a publishing backend
This adds a new configuration setting: AzurePublishEndpoints, similar to the existing S3PublishEndpoints and SwiftPublishEndpoints. For each endpoint, the following has to be defined: - accountName - accountKey - container - prefix Azure tests require the following environment variables to be set: - AZURE_STORAGE_ACCOUNT - AZURE_STORAGE_ACCESS_KEY With either of these not set, Azure-specific tests are skipped.
This commit is contained in:
2
azure/azure.go
Normal file
2
azure/azure.go
Normal file
@@ -0,0 +1,2 @@
|
||||
// Package azure handles publishing to Azure Storage
|
||||
package azure
|
||||
12
azure/azure_test.go
Normal file
12
azure/azure_test.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
// Launch gocheck tests
|
||||
func Test(t *testing.T) {
|
||||
TestingT(t)
|
||||
}
|
||||
347
azure/public.go
Normal file
347
azure/public.go
Normal file
@@ -0,0 +1,347 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/aptly-dev/aptly/aptly"
|
||||
"github.com/aptly-dev/aptly/utils"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// PublishedStorage abstract file system with published files (actually hosted on Azure)
|
||||
type PublishedStorage struct {
|
||||
container azblob.ContainerURL
|
||||
prefix string
|
||||
pathCache map[string]string
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var (
|
||||
_ aptly.PublishedStorage = (*PublishedStorage)(nil)
|
||||
)
|
||||
|
||||
// NewPublishedStorage creates published storage from Azure storage credentials
|
||||
func NewPublishedStorage(accountName, accountKey, container, prefix string) (*PublishedStorage, error) {
|
||||
credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
containerURL, err := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/%s", accountName, container))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &PublishedStorage{
|
||||
container: azblob.NewContainerURL(*containerURL, azblob.NewPipeline(credential, azblob.PipelineOptions{})),
|
||||
prefix: prefix,
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// String
|
||||
func (storage *PublishedStorage) String() string {
|
||||
return fmt.Sprintf("Azure :%s/%s", storage.container, storage.prefix)
|
||||
}
|
||||
|
||||
// MkDir creates directory recursively under public path
|
||||
func (storage *PublishedStorage) MkDir(path string) error {
|
||||
// no op for Azure
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutFile puts file into published storage at specified path
|
||||
func (storage *PublishedStorage) PutFile(path string, sourceFilename string) error {
|
||||
var (
|
||||
source *os.File
|
||||
err error
|
||||
)
|
||||
|
||||
sourceMD5, err := utils.MD5ChecksumForFile(sourceFilename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
source, err = os.Open(sourceFilename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer source.Close()
|
||||
|
||||
err = storage.putFile(path, source, sourceMD5)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, fmt.Sprintf("error uploading %s to %s", sourceFilename, storage))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// putFile uploads file-like object to
|
||||
func (storage *PublishedStorage) putFile(path string, source io.Reader, sourceMD5 string) error {
|
||||
path = filepath.Join(storage.prefix, path)
|
||||
|
||||
blob := storage.container.NewBlockBlobURL(path)
|
||||
|
||||
uploadOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||
BufferSize: 4 * 1024 * 1024,
|
||||
MaxBuffers: 8,
|
||||
}
|
||||
if len(sourceMD5) > 0 {
|
||||
decodedMD5, err := hex.DecodeString(sourceMD5)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uploadOptions.BlobHTTPHeaders = azblob.BlobHTTPHeaders{
|
||||
ContentMD5: decodedMD5,
|
||||
}
|
||||
}
|
||||
|
||||
_, err := azblob.UploadStreamToBlockBlob(
|
||||
context.Background(),
|
||||
source,
|
||||
blob,
|
||||
uploadOptions,
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// RemoveDirs removes directory structure under public path
|
||||
func (storage *PublishedStorage) RemoveDirs(path string, progress aptly.Progress) error {
|
||||
filelist, err := storage.Filelist(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, filename := range filelist {
|
||||
blob := storage.container.NewBlobURL(filepath.Join(storage.prefix, path, filename))
|
||||
_, err := blob.Delete(context.Background(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error deleting path %s from %s: %s", filename, storage, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes single file under public path
|
||||
func (storage *PublishedStorage) Remove(path string) error {
|
||||
blob := storage.container.NewBlobURL(filepath.Join(storage.prefix, path))
|
||||
_, err := blob.Delete(context.Background(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, fmt.Sprintf("error deleting %s from %s: %s", path, storage, err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// LinkFromPool links package file from pool to dist's pool location
|
||||
//
|
||||
// publishedDirectory is desired location in pool (like prefix/pool/component/liba/libav/)
|
||||
// sourcePool is instance of aptly.PackagePool
|
||||
// sourcePath is filepath to package file in package pool
|
||||
//
|
||||
// LinkFromPool returns relative path for the published file to be included in package index
|
||||
func (storage *PublishedStorage) LinkFromPool(publishedDirectory, fileName string, sourcePool aptly.PackagePool,
|
||||
sourcePath string, sourceChecksums utils.ChecksumInfo, force bool) error {
|
||||
|
||||
relPath := filepath.Join(publishedDirectory, fileName)
|
||||
poolPath := filepath.Join(storage.prefix, relPath)
|
||||
|
||||
if storage.pathCache == nil {
|
||||
paths, md5s, err := storage.internalFilelist("")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error caching paths under prefix: %s", err)
|
||||
}
|
||||
|
||||
storage.pathCache = make(map[string]string, len(paths))
|
||||
|
||||
for i := range paths {
|
||||
storage.pathCache[paths[i]] = md5s[i]
|
||||
}
|
||||
}
|
||||
|
||||
destinationMD5, exists := storage.pathCache[relPath]
|
||||
sourceMD5 := sourceChecksums.MD5
|
||||
|
||||
if exists {
|
||||
if sourceMD5 == "" {
|
||||
return fmt.Errorf("unable to compare object, MD5 checksum missing")
|
||||
}
|
||||
|
||||
if destinationMD5 == sourceMD5 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !force && destinationMD5 != sourceMD5 {
|
||||
return fmt.Errorf("error putting file to %s: file already exists and is different: %s", poolPath, storage)
|
||||
}
|
||||
}
|
||||
|
||||
source, err := sourcePool.Open(sourcePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer source.Close()
|
||||
|
||||
err = storage.putFile(relPath, source, sourceMD5)
|
||||
if err == nil {
|
||||
storage.pathCache[relPath] = sourceMD5
|
||||
} else {
|
||||
err = errors.Wrap(err, fmt.Sprintf("error uploading %s to %s: %s", sourcePath, storage, poolPath))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (storage *PublishedStorage) internalFilelist(prefix string) (paths []string, md5s []string, err error) {
|
||||
const delimiter = "/"
|
||||
paths = make([]string, 0, 1024)
|
||||
md5s = make([]string, 0, 1024)
|
||||
prefix = filepath.Join(storage.prefix, prefix)
|
||||
if prefix != "" {
|
||||
prefix += delimiter
|
||||
}
|
||||
|
||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||
listBlob, err := storage.container.ListBlobsFlatSegment(
|
||||
context.Background(), marker, azblob.ListBlobsSegmentOptions{
|
||||
Prefix: prefix,
|
||||
MaxResults: 1000,
|
||||
Details: azblob.BlobListingDetails{Metadata: true}})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error listing under prefix %s in %s: %s", prefix, storage, err)
|
||||
}
|
||||
|
||||
marker = listBlob.NextMarker
|
||||
|
||||
for _, blob := range listBlob.Segment.BlobItems {
|
||||
if prefix == "" {
|
||||
paths = append(paths, blob.Name)
|
||||
} else {
|
||||
paths = append(paths, blob.Name[len(prefix):])
|
||||
}
|
||||
md5s = append(md5s, fmt.Sprintf("%x", blob.Properties.ContentMD5))
|
||||
}
|
||||
}
|
||||
|
||||
return paths, md5s, nil
|
||||
}
|
||||
|
||||
// Filelist returns list of files under prefix
|
||||
func (storage *PublishedStorage) Filelist(prefix string) ([]string, error) {
|
||||
paths, _, err := storage.internalFilelist(prefix)
|
||||
return paths, err
|
||||
}
|
||||
|
||||
// Internal copy or move implementation
|
||||
func (storage *PublishedStorage) internalCopyOrMoveBlob(src, dst string, metadata azblob.Metadata, move bool) error {
|
||||
const leaseDuration = 30
|
||||
|
||||
dstBlobURL := storage.container.NewBlobURL(filepath.Join(storage.prefix, dst))
|
||||
srcBlobURL := storage.container.NewBlobURL(filepath.Join(storage.prefix, src))
|
||||
leaseResp, err := srcBlobURL.AcquireLease(context.Background(), "", leaseDuration, azblob.ModifiedAccessConditions{})
|
||||
if err != nil || leaseResp.StatusCode() != http.StatusCreated {
|
||||
return fmt.Errorf("error acquiring lease on source blob %s", srcBlobURL)
|
||||
}
|
||||
defer srcBlobURL.BreakLease(context.Background(), azblob.LeaseBreakNaturally, azblob.ModifiedAccessConditions{})
|
||||
srcBlobLeaseID := leaseResp.LeaseID()
|
||||
|
||||
copyResp, err := dstBlobURL.StartCopyFromURL(
|
||||
context.Background(),
|
||||
srcBlobURL.URL(),
|
||||
metadata,
|
||||
azblob.ModifiedAccessConditions{},
|
||||
azblob.BlobAccessConditions{},
|
||||
azblob.DefaultAccessTier,
|
||||
nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error copying %s -> %s in %s: %s", src, dst, storage, err)
|
||||
}
|
||||
|
||||
copyStatus := copyResp.CopyStatus()
|
||||
for {
|
||||
if copyStatus == azblob.CopyStatusSuccess {
|
||||
if move {
|
||||
_, err = srcBlobURL.Delete(
|
||||
context.Background(),
|
||||
azblob.DeleteSnapshotsOptionNone,
|
||||
azblob.BlobAccessConditions{
|
||||
LeaseAccessConditions: azblob.LeaseAccessConditions{LeaseID: srcBlobLeaseID},
|
||||
})
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
} else if copyStatus == azblob.CopyStatusPending {
|
||||
time.Sleep(1 * time.Second)
|
||||
blobPropsResp, err := dstBlobURL.GetProperties(
|
||||
context.Background(),
|
||||
azblob.BlobAccessConditions{LeaseAccessConditions: azblob.LeaseAccessConditions{LeaseID: srcBlobLeaseID}},
|
||||
azblob.ClientProvidedKeyOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting destination blob properties %s", dstBlobURL)
|
||||
}
|
||||
copyStatus = blobPropsResp.CopyStatus()
|
||||
|
||||
_, err = srcBlobURL.RenewLease(context.Background(), srcBlobLeaseID, azblob.ModifiedAccessConditions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error renewing source blob lease %s", srcBlobURL)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("error copying %s -> %s in %s: %s", dst, src, storage, copyStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RenameFile renames (moves) file
|
||||
func (storage *PublishedStorage) RenameFile(oldName, newName string) error {
|
||||
return storage.internalCopyOrMoveBlob(oldName, newName, nil, true /* move */)
|
||||
}
|
||||
|
||||
// SymLink creates a copy of src file and adds link information as meta data
|
||||
func (storage *PublishedStorage) SymLink(src string, dst string) error {
|
||||
return storage.internalCopyOrMoveBlob(src, dst, azblob.Metadata{"SymLink": src}, false /* move */)
|
||||
}
|
||||
|
||||
// HardLink using symlink functionality as hard links do not exist
|
||||
func (storage *PublishedStorage) HardLink(src string, dst string) error {
|
||||
return storage.SymLink(src, dst)
|
||||
}
|
||||
|
||||
// FileExists returns true if path exists
|
||||
func (storage *PublishedStorage) FileExists(path string) (bool, error) {
|
||||
blob := storage.container.NewBlobURL(filepath.Join(storage.prefix, path))
|
||||
resp, err := blob.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
|
||||
if err != nil {
|
||||
storageError, ok := err.(azblob.StorageError)
|
||||
if ok && string(storageError.ServiceCode()) == string(azblob.StorageErrorCodeBlobNotFound) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
} else if resp.StatusCode() == http.StatusOK {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("error checking if blob %s exists %d", blob, resp.StatusCode())
|
||||
}
|
||||
|
||||
// ReadLink returns the symbolic link pointed to by path.
|
||||
// This simply reads text file created with SymLink
|
||||
func (storage *PublishedStorage) ReadLink(path string) (string, error) {
|
||||
blob := storage.container.NewBlobURL(filepath.Join(storage.prefix, path))
|
||||
resp, err := blob.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if resp.StatusCode() != http.StatusOK {
|
||||
return "", fmt.Errorf("error checking if blob %s exists %d", blob, resp.StatusCode())
|
||||
}
|
||||
return resp.NewMetadata()["SymLink"], nil
|
||||
}
|
||||
370
azure/public_test.go
Normal file
370
azure/public_test.go
Normal file
@@ -0,0 +1,370 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/aptly-dev/aptly/files"
|
||||
"github.com/aptly-dev/aptly/utils"
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type PublishedStorageSuite struct {
|
||||
accountName, accountKey string
|
||||
storage, prefixedStorage *PublishedStorage
|
||||
}
|
||||
|
||||
var _ = Suite(&PublishedStorageSuite{})
|
||||
|
||||
const testContainerPrefix = "aptlytest-"
|
||||
|
||||
func randContainer() string {
|
||||
return testContainerPrefix + randString(32-len(testContainerPrefix))
|
||||
}
|
||||
|
||||
func randString(n int) string {
|
||||
if n <= 0 {
|
||||
panic("negative number")
|
||||
}
|
||||
const alphanum = "0123456789abcdefghijklmnopqrstuvwxyz"
|
||||
var bytes = make([]byte, n)
|
||||
rand.Read(bytes)
|
||||
for i, b := range bytes {
|
||||
bytes[i] = alphanum[b%byte(len(alphanum))]
|
||||
}
|
||||
return string(bytes)
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) SetUpSuite(c *C) {
|
||||
s.accountName = os.Getenv("AZURE_STORAGE_ACCOUNT")
|
||||
if s.accountName == "" {
|
||||
println("Please set the the following two environment variables to run the Azure storage tests.")
|
||||
println(" 1. AZURE_STORAGE_ACCOUNT")
|
||||
println(" 2. AZURE_STORAGE_ACCESS_KEY")
|
||||
c.Skip("AZURE_STORAGE_ACCOUNT not set.")
|
||||
}
|
||||
s.accountKey = os.Getenv("AZURE_STORAGE_ACCESS_KEY")
|
||||
if s.accountKey == "" {
|
||||
println("Please set the the following two environment variables to run the Azure storage tests.")
|
||||
println(" 1. AZURE_STORAGE_ACCOUNT")
|
||||
println(" 2. AZURE_STORAGE_ACCESS_KEY")
|
||||
c.Skip("AZURE_STORAGE_ACCESS_KEY not set.")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) SetUpTest(c *C) {
|
||||
container := randContainer()
|
||||
prefix := "lala"
|
||||
|
||||
var err error
|
||||
|
||||
s.storage, err = NewPublishedStorage(s.accountName, s.accountKey, container, "")
|
||||
c.Assert(err, IsNil)
|
||||
cnt := s.storage.container
|
||||
_, err = cnt.Create(context.Background(), azblob.Metadata{}, azblob.PublicAccessContainer)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
s.prefixedStorage, err = NewPublishedStorage(s.accountName, s.accountKey, container, prefix)
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) TearDownTest(c *C) {
|
||||
cnt := s.storage.container
|
||||
_, err := cnt.Delete(context.Background(), azblob.ContainerAccessConditions{})
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) GetFile(c *C, path string) []byte {
|
||||
blob := s.storage.container.NewBlobURL(path)
|
||||
resp, err := blob.Download(context.Background(), 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
|
||||
c.Assert(err, IsNil)
|
||||
body := resp.Body(azblob.RetryReaderOptions{MaxRetryRequests: 3})
|
||||
data, err := ioutil.ReadAll(body)
|
||||
c.Assert(err, IsNil)
|
||||
return data
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) AssertNoFile(c *C, path string) {
|
||||
_, err := s.storage.container.NewBlobURL(path).GetProperties(
|
||||
context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
|
||||
c.Assert(err, NotNil)
|
||||
storageError, ok := err.(azblob.StorageError)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Assert(string(storageError.ServiceCode()), Equals, string(string(azblob.StorageErrorCodeBlobNotFound)))
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) PutFile(c *C, path string, data []byte) {
|
||||
hash := md5.Sum(data)
|
||||
_, err := azblob.UploadBufferToBlockBlob(
|
||||
context.Background(),
|
||||
data,
|
||||
s.storage.container.NewBlockBlobURL(path),
|
||||
azblob.UploadToBlockBlobOptions{
|
||||
BlobHTTPHeaders: azblob.BlobHTTPHeaders{
|
||||
ContentMD5: hash[:],
|
||||
},
|
||||
})
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) TestPutFile(c *C) {
|
||||
content := []byte("Welcome to Azure!")
|
||||
filename := "a/b.txt"
|
||||
|
||||
dir := c.MkDir()
|
||||
err := ioutil.WriteFile(filepath.Join(dir, "a"), content, 0644)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
err = s.storage.PutFile(filename, filepath.Join(dir, "a"))
|
||||
c.Check(err, IsNil)
|
||||
|
||||
c.Check(s.GetFile(c, filename), DeepEquals, content)
|
||||
|
||||
err = s.prefixedStorage.PutFile(filename, filepath.Join(dir, "a"))
|
||||
c.Check(err, IsNil)
|
||||
|
||||
c.Check(s.GetFile(c, filepath.Join(s.prefixedStorage.prefix, filename)), DeepEquals, content)
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) TestPutFilePlus(c *C) {
|
||||
content := []byte("Welcome to Azure!")
|
||||
filename := "a/b+c.txt"
|
||||
|
||||
dir := c.MkDir()
|
||||
err := ioutil.WriteFile(filepath.Join(dir, "a"), content, 0644)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
err = s.storage.PutFile(filename, filepath.Join(dir, "a"))
|
||||
c.Check(err, IsNil)
|
||||
|
||||
c.Check(s.GetFile(c, filename), DeepEquals, content)
|
||||
s.AssertNoFile(c, "a/b c.txt")
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) TestFilelist(c *C) {
|
||||
paths := []string{"a", "b", "c", "testa", "test/a", "test/b", "lala/a", "lala/b", "lala/c"}
|
||||
for _, path := range paths {
|
||||
s.PutFile(c, path, []byte("test"))
|
||||
}
|
||||
|
||||
list, err := s.storage.Filelist("")
|
||||
c.Check(err, IsNil)
|
||||
c.Check(list, DeepEquals, []string{"a", "b", "c", "lala/a", "lala/b", "lala/c", "test/a", "test/b", "testa"})
|
||||
|
||||
list, err = s.storage.Filelist("test")
|
||||
c.Check(err, IsNil)
|
||||
c.Check(list, DeepEquals, []string{"a", "b"})
|
||||
|
||||
list, err = s.storage.Filelist("test2")
|
||||
c.Check(err, IsNil)
|
||||
c.Check(list, DeepEquals, []string{})
|
||||
|
||||
list, err = s.prefixedStorage.Filelist("")
|
||||
c.Check(err, IsNil)
|
||||
c.Check(list, DeepEquals, []string{"a", "b", "c"})
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) TestFilelistPlus(c *C) {
|
||||
paths := []string{"a", "b", "c", "testa", "test/a+1", "test/a 1", "lala/a+b", "lala/a b", "lala/c"}
|
||||
for _, path := range paths {
|
||||
s.PutFile(c, path, []byte("test"))
|
||||
}
|
||||
|
||||
list, err := s.storage.Filelist("")
|
||||
c.Check(err, IsNil)
|
||||
c.Check(list, DeepEquals, []string{"a", "b", "c", "lala/a b", "lala/a+b", "lala/c", "test/a 1", "test/a+1", "testa"})
|
||||
|
||||
list, err = s.storage.Filelist("test")
|
||||
c.Check(err, IsNil)
|
||||
c.Check(list, DeepEquals, []string{"a 1", "a+1"})
|
||||
|
||||
list, err = s.storage.Filelist("test2")
|
||||
c.Check(err, IsNil)
|
||||
c.Check(list, DeepEquals, []string{})
|
||||
|
||||
list, err = s.prefixedStorage.Filelist("")
|
||||
c.Check(err, IsNil)
|
||||
c.Check(list, DeepEquals, []string{"a b", "a+b", "c"})
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) TestRemove(c *C) {
|
||||
s.PutFile(c, "a/b", []byte("test"))
|
||||
|
||||
err := s.storage.Remove("a/b")
|
||||
c.Check(err, IsNil)
|
||||
|
||||
s.AssertNoFile(c, "a/b")
|
||||
|
||||
s.PutFile(c, "lala/xyz", []byte("test"))
|
||||
|
||||
err = s.prefixedStorage.Remove("xyz")
|
||||
c.Check(err, IsNil)
|
||||
|
||||
s.AssertNoFile(c, "lala/xyz")
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) TestRemovePlus(c *C) {
|
||||
s.PutFile(c, "a/b+c", []byte("test"))
|
||||
s.PutFile(c, "a/b", []byte("test"))
|
||||
|
||||
err := s.storage.Remove("a/b+c")
|
||||
c.Check(err, IsNil)
|
||||
|
||||
s.AssertNoFile(c, "a/b+c")
|
||||
s.AssertNoFile(c, "a/b c")
|
||||
|
||||
err = s.storage.Remove("a/b")
|
||||
c.Check(err, IsNil)
|
||||
|
||||
s.AssertNoFile(c, "a/b")
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) TestRemoveDirs(c *C) {
|
||||
paths := []string{"a", "b", "c", "testa", "test/a", "test/b", "lala/a", "lala/b", "lala/c"}
|
||||
for _, path := range paths {
|
||||
s.PutFile(c, path, []byte("test"))
|
||||
}
|
||||
|
||||
err := s.storage.RemoveDirs("test", nil)
|
||||
c.Check(err, IsNil)
|
||||
|
||||
list, err := s.storage.Filelist("")
|
||||
c.Check(err, IsNil)
|
||||
c.Check(list, DeepEquals, []string{"a", "b", "c", "lala/a", "lala/b", "lala/c", "testa"})
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) TestRemoveDirsPlus(c *C) {
|
||||
paths := []string{"a", "b", "c", "testa", "test/a+1", "test/a 1", "lala/a+b", "lala/a b", "lala/c"}
|
||||
for _, path := range paths {
|
||||
s.PutFile(c, path, []byte("test"))
|
||||
}
|
||||
|
||||
err := s.storage.RemoveDirs("test", nil)
|
||||
c.Check(err, IsNil)
|
||||
|
||||
list, err := s.storage.Filelist("")
|
||||
c.Check(err, IsNil)
|
||||
c.Check(list, DeepEquals, []string{"a", "b", "c", "lala/a b", "lala/a+b", "lala/c", "testa"})
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) TestRenameFile(c *C) {
|
||||
dir := c.MkDir()
|
||||
err := ioutil.WriteFile(filepath.Join(dir, "a"), []byte("Welcome to Azure!"), 0644)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
err = s.storage.PutFile("source.txt", filepath.Join(dir, "a"))
|
||||
c.Check(err, IsNil)
|
||||
|
||||
err = s.storage.RenameFile("source.txt", "dest.txt")
|
||||
c.Check(err, IsNil)
|
||||
|
||||
c.Check(s.GetFile(c, "dest.txt"), DeepEquals, []byte("Welcome to Azure!"))
|
||||
|
||||
exists, err := s.storage.FileExists("source.txt")
|
||||
c.Check(err, IsNil)
|
||||
c.Check(exists, Equals, false)
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) TestLinkFromPool(c *C) {
|
||||
root := c.MkDir()
|
||||
pool := files.NewPackagePool(root, false)
|
||||
cs := files.NewMockChecksumStorage()
|
||||
|
||||
tmpFile1 := filepath.Join(c.MkDir(), "mars-invaders_1.03.deb")
|
||||
err := ioutil.WriteFile(tmpFile1, []byte("Contents"), 0644)
|
||||
c.Assert(err, IsNil)
|
||||
cksum1 := utils.ChecksumInfo{MD5: "c1df1da7a1ce305a3b60af9d5733ac1d"}
|
||||
|
||||
tmpFile2 := filepath.Join(c.MkDir(), "mars-invaders_1.03.deb")
|
||||
err = ioutil.WriteFile(tmpFile2, []byte("Spam"), 0644)
|
||||
c.Assert(err, IsNil)
|
||||
cksum2 := utils.ChecksumInfo{MD5: "e9dfd31cc505d51fc26975250750deab"}
|
||||
|
||||
tmpFile3 := filepath.Join(c.MkDir(), "netboot/boot.img.gz")
|
||||
os.MkdirAll(filepath.Dir(tmpFile3), 0777)
|
||||
err = ioutil.WriteFile(tmpFile3, []byte("Contents"), 0644)
|
||||
c.Assert(err, IsNil)
|
||||
cksum3 := utils.ChecksumInfo{MD5: "c1df1da7a1ce305a3b60af9d5733ac1d"}
|
||||
|
||||
src1, err := pool.Import(tmpFile1, "mars-invaders_1.03.deb", &cksum1, true, cs)
|
||||
c.Assert(err, IsNil)
|
||||
src2, err := pool.Import(tmpFile2, "mars-invaders_1.03.deb", &cksum2, true, cs)
|
||||
c.Assert(err, IsNil)
|
||||
src3, err := pool.Import(tmpFile3, "netboot/boot.img.gz", &cksum3, true, cs)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// first link from pool
|
||||
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, src1, cksum1, false)
|
||||
c.Check(err, IsNil)
|
||||
|
||||
c.Check(s.GetFile(c, "pool/main/m/mars-invaders/mars-invaders_1.03.deb"), DeepEquals, []byte("Contents"))
|
||||
|
||||
// duplicate link from pool
|
||||
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, src1, cksum1, false)
|
||||
c.Check(err, IsNil)
|
||||
|
||||
c.Check(s.GetFile(c, "pool/main/m/mars-invaders/mars-invaders_1.03.deb"), DeepEquals, []byte("Contents"))
|
||||
|
||||
// link from pool with conflict
|
||||
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, src2, cksum2, false)
|
||||
c.Check(err, ErrorMatches, ".*file already exists and is different.*")
|
||||
|
||||
c.Check(s.GetFile(c, "pool/main/m/mars-invaders/mars-invaders_1.03.deb"), DeepEquals, []byte("Contents"))
|
||||
|
||||
// link from pool with conflict and force
|
||||
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, src2, cksum2, true)
|
||||
c.Check(err, IsNil)
|
||||
|
||||
c.Check(s.GetFile(c, "pool/main/m/mars-invaders/mars-invaders_1.03.deb"), DeepEquals, []byte("Spam"))
|
||||
|
||||
// for prefixed storage:
|
||||
// first link from pool
|
||||
err = s.prefixedStorage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, src1, cksum1, false)
|
||||
c.Check(err, IsNil)
|
||||
|
||||
// 2nd link from pool, providing wrong path for source file
|
||||
//
|
||||
// this test should check that file already exists in S3 and skip upload (which would fail if not skipped)
|
||||
s.prefixedStorage.pathCache = nil
|
||||
err = s.prefixedStorage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, "wrong-looks-like-pathcache-doesnt-work", cksum1, false)
|
||||
c.Check(err, IsNil)
|
||||
|
||||
c.Check(s.GetFile(c, "lala/pool/main/m/mars-invaders/mars-invaders_1.03.deb"), DeepEquals, []byte("Contents"))
|
||||
|
||||
// link from pool with nested file name
|
||||
err = s.storage.LinkFromPool("dists/jessie/non-free/installer-i386/current/images", "netboot/boot.img.gz", pool, src3, cksum3, false)
|
||||
c.Check(err, IsNil)
|
||||
|
||||
c.Check(s.GetFile(c, "dists/jessie/non-free/installer-i386/current/images/netboot/boot.img.gz"), DeepEquals, []byte("Contents"))
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) TestSymLink(c *C) {
|
||||
s.PutFile(c, "a/b", []byte("test"))
|
||||
|
||||
err := s.storage.SymLink("a/b", "a/b.link")
|
||||
c.Check(err, IsNil)
|
||||
|
||||
var link string
|
||||
link, err = s.storage.ReadLink("a/b.link")
|
||||
c.Check(err, IsNil)
|
||||
c.Check(link, Equals, "a/b")
|
||||
|
||||
c.Skip("copy not available in s3test")
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) TestFileExists(c *C) {
|
||||
s.PutFile(c, "a/b", []byte("test"))
|
||||
|
||||
exists, err := s.storage.FileExists("a/b")
|
||||
c.Check(err, IsNil)
|
||||
c.Check(exists, Equals, true)
|
||||
|
||||
exists, _ = s.storage.FileExists("a/b.invalid")
|
||||
c.Check(err, IsNil)
|
||||
c.Check(exists, Equals, false)
|
||||
}
|
||||
Reference in New Issue
Block a user