Merge branch 'master' into tautological_condition

This commit is contained in:
JupiterRider
2025-08-30 18:51:27 +02:00
committed by GitHub
22 changed files with 524 additions and 214 deletions

View File

@@ -71,3 +71,4 @@ List of contributors, in chronological order:
* Silke Hofstra (https://github.com/silkeh)
* Itay Porezky (https://github.com/itayporezky)
* JupiterRider (https://github.com/JupiterRider)
* Agustin Henze (https://github.com/agustinhenze)

View File

@@ -63,7 +63,7 @@ Define Release APT sources in ``/etc/apt/sources.list.d/aptly.list``::
deb [signed-by=/etc/apt/keyrings/aptly.asc] http://repo.aptly.info/release DIST main
Where DIST is one of: ``buster``, ``bullseye``, ``bookworm``, ``focal``, ``jammy``, ``noble``
Where DIST is one of: ``bullseye``, ``bookworm``, ``trixie``, ``focal``, ``jammy``, ``noble``
Install aptly packages::
@@ -80,7 +80,7 @@ Define CI APT sources in ``/etc/apt/sources.list.d/aptly-ci.list``::
deb [signed-by=/etc/apt/keyrings/aptly.asc] http://repo.aptly.info/ci DIST main
Where DIST is one of: ``buster``, ``bullseye``, ``bookworm``, ``focal``, ``jammy``, ``noble``
Where DIST is one of: ``bullseye``, ``bookworm``, ``trixie``, ``focal``, ``jammy``, ``noble``
Note: same gpg key is used as for the Upstream Debian Packages.

View File

@@ -70,7 +70,7 @@ func apiReady(isReady *atomic.Value) func(*gin.Context) {
return
}
status := aptlyStatus{Status: "Aptly is ready"}
status := aptlyStatus{Status: "Aptly is ready"}
c.JSON(200, status)
}
}
@@ -178,7 +178,7 @@ func truthy(value interface{}) bool {
if value == nil {
return false
}
switch v := value.(type) {
switch v := value.(type) {
case string:
switch strings.ToLower(v) {
case "n", "no", "f", "false", "0", "off":

View File

@@ -901,10 +901,10 @@ func apiReposIncludePackageFromDir(c *gin.Context) {
out.Printf("Failed files: %s\n", strings.Join(failedFiles, ", "))
}
ret := reposIncludePackageFromDirResponse{
ret := reposIncludePackageFromDirResponse{
Report: reporter,
FailedFiles: failedFiles,
}
}
return &task.ProcessReturnValue{Code: http.StatusOK, Value: ret}, nil
})
}

View File

@@ -104,7 +104,7 @@ func (pool *PackagePool) Open(path string) (aptly.ReadSeekerCloser, error) {
if err != nil {
return nil, errors.Wrapf(err, "error creating tempfile for %s", path)
}
defer func () { _ = os.Remove(temp.Name()) }()
defer func() { _ = os.Remove(temp.Name()) }()
_, err = pool.az.client.DownloadFile(context.TODO(), pool.az.container, path, temp, nil)
if err != nil {

View File

@@ -7,7 +7,7 @@ import (
"path/filepath"
"runtime"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/aptly-dev/aptly/aptly"
"github.com/aptly-dev/aptly/files"
"github.com/aptly-dev/aptly/utils"
@@ -50,10 +50,10 @@ func (s *PackagePoolSuite) SetUpTest(c *C) {
s.pool, err = NewPackagePool(s.accountName, s.accountKey, container, "", s.endpoint)
c.Assert(err, IsNil)
publicAccessType := azblob.PublicAccessTypeContainer
_, err = s.pool.az.client.CreateContainer(context.TODO(), s.pool.az.container, &azblob.CreateContainerOptions{
Access: &publicAccessType,
})
publicAccessType := azblob.PublicAccessTypeContainer
_, err = s.pool.az.client.CreateContainer(context.TODO(), s.pool.az.container, &azblob.CreateContainerOptions{
Access: &publicAccessType,
})
c.Assert(err, IsNil)
s.prefixedPool, err = NewPackagePool(s.accountName, s.accountKey, container, prefix, s.endpoint)

View File

@@ -1,17 +1,17 @@
package azure
import (
"bytes"
"context"
"crypto/md5"
"crypto/rand"
"io"
"os"
"path/filepath"
"bytes"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/aptly-dev/aptly/files"
"github.com/aptly-dev/aptly/utils"
. "gopkg.in/check.v1"
@@ -69,10 +69,10 @@ func (s *PublishedStorageSuite) SetUpTest(c *C) {
s.storage, err = NewPublishedStorage(s.accountName, s.accountKey, container, "", s.endpoint)
c.Assert(err, IsNil)
publicAccessType := azblob.PublicAccessTypeContainer
_, err = s.storage.az.client.CreateContainer(context.Background(), s.storage.az.container, &azblob.CreateContainerOptions{
Access: &publicAccessType,
})
publicAccessType := azblob.PublicAccessTypeContainer
_, err = s.storage.az.client.CreateContainer(context.Background(), s.storage.az.container, &azblob.CreateContainerOptions{
Access: &publicAccessType,
})
c.Assert(err, IsNil)
s.prefixedStorage, err = NewPublishedStorage(s.accountName, s.accountKey, container, prefix, s.endpoint)
@@ -80,12 +80,12 @@ func (s *PublishedStorageSuite) SetUpTest(c *C) {
}
func (s *PublishedStorageSuite) TearDownTest(c *C) {
_, err := s.storage.az.client.DeleteContainer(context.Background(), s.storage.az.container, nil)
_, err := s.storage.az.client.DeleteContainer(context.Background(), s.storage.az.container, nil)
c.Assert(err, IsNil)
}
func (s *PublishedStorageSuite) GetFile(c *C, path string) []byte {
resp, err := s.storage.az.client.DownloadStream(context.Background(), s.storage.az.container, path, nil)
resp, err := s.storage.az.client.DownloadStream(context.Background(), s.storage.az.container, path, nil)
c.Assert(err, IsNil)
data, err := io.ReadAll(resp.Body)
c.Assert(err, IsNil)
@@ -93,26 +93,26 @@ func (s *PublishedStorageSuite) GetFile(c *C, path string) []byte {
}
func (s *PublishedStorageSuite) AssertNoFile(c *C, path string) {
serviceClient := s.storage.az.client.ServiceClient()
containerClient := serviceClient.NewContainerClient(s.storage.az.container)
blobClient := containerClient.NewBlobClient(path)
_, err := blobClient.GetProperties(context.Background(), nil)
serviceClient := s.storage.az.client.ServiceClient()
containerClient := serviceClient.NewContainerClient(s.storage.az.container)
blobClient := containerClient.NewBlobClient(path)
_, err := blobClient.GetProperties(context.Background(), nil)
c.Assert(err, NotNil)
storageError, ok := err.(*azcore.ResponseError)
storageError, ok := err.(*azcore.ResponseError)
c.Assert(ok, Equals, true)
c.Assert(storageError.StatusCode, Equals, 404)
}
func (s *PublishedStorageSuite) PutFile(c *C, path string, data []byte) {
hash := md5.Sum(data)
uploadOptions := &azblob.UploadStreamOptions{
HTTPHeaders: &blob.HTTPHeaders{
BlobContentMD5: hash[:],
},
}
reader := bytes.NewReader(data)
_, err := s.storage.az.client.UploadStream(context.Background(), s.storage.az.container, path, reader, uploadOptions)
uploadOptions := &azblob.UploadStreamOptions{
HTTPHeaders: &blob.HTTPHeaders{
BlobContentMD5: hash[:],
},
}
reader := bytes.NewReader(data)
_, err := s.storage.az.client.UploadStream(context.Background(), s.storage.az.container, path, reader, uploadOptions)
c.Assert(err, IsNil)
}

View File

@@ -11,7 +11,7 @@ func Test(t *testing.T) {
TestingT(t)
}
type ProgressSuite struct {}
type ProgressSuite struct{}
var _ = Suite(&ProgressSuite{})

View File

@@ -14,7 +14,7 @@ func Test(t *testing.T) {
}
type EtcDDBSuite struct {
db database.Storage
db database.Storage
}
var _ = Suite(&EtcDDBSuite{})
@@ -133,7 +133,7 @@ func (s *EtcDDBSuite) TestTransactionCommit(c *C) {
v, err := s.db.Get(key)
c.Assert(err, IsNil)
c.Check(v, DeepEquals, value)
err = transaction.Delete(key)
err = transaction.Delete(key)
c.Assert(err, IsNil)
_, err = transaction.Get(key2)
@@ -156,4 +156,3 @@ func (s *EtcDDBSuite) TestTransactionCommit(c *C) {
_, err = transaction.Get(key)
c.Assert(err, NotNil)
}

View File

@@ -65,7 +65,7 @@ func (s *PackageRefListSuite) TestNewPackageListFromRefList(c *C) {
list, err := NewPackageListFromRefList(reflist, coll, nil)
c.Assert(err, IsNil)
c.Check(list.Len(), Equals, 4)
c.Check(list.Add(s.p4), ErrorMatches, "package already exists and is different: .*")
c.Check(list.Add(s.p4), ErrorMatches, "package already exists and is different: .*")
list, err = NewPackageListFromRefList(nil, coll, nil)
c.Assert(err, IsNil)

View File

@@ -31,8 +31,7 @@ func BenchmarkSnapshotCollectionForEach(b *testing.B) {
for i := 0; i < b.N; i++ {
collection = NewSnapshotCollection(db)
_ = collection.ForEach(func(s *Snapshot) error {
_ = collection.ForEach(func(s *Snapshot) error {
return nil
})
}

View File

@@ -50,7 +50,7 @@ func compareLexicographic(s1, s2 string) int {
i := 0
l1, l2 := len(s1), len(s2)
for !(i == l1 && i == l2) { // break if s1 equal to s2
for !(i == l1 && i == l2) { // break if s1 equal to s2
if i == l2 {
// s1 is longer than s2

View File

@@ -0,0 +1,283 @@
package files
import (
"fmt"
"os"
"path/filepath"
"sync"
"time"
"github.com/aptly-dev/aptly/aptly"
"github.com/aptly-dev/aptly/utils"
. "gopkg.in/check.v1"
)
type LinkFromPoolConcurrencySuite struct {
root string
poolDir string
storage *PublishedStorage
pool *PackagePool
cs aptly.ChecksumStorage
testFile string
testContent []byte
testChecksums utils.ChecksumInfo
srcPoolPath string
}
var _ = Suite(&LinkFromPoolConcurrencySuite{})
func (s *LinkFromPoolConcurrencySuite) SetUpTest(c *C) {
s.root = c.MkDir()
s.poolDir = filepath.Join(s.root, "pool")
publishDir := filepath.Join(s.root, "public")
// Create package pool and published storage
s.pool = NewPackagePool(s.poolDir, true)
s.storage = NewPublishedStorage(publishDir, "copy", "checksum")
s.cs = NewMockChecksumStorage()
// Create test file content
s.testContent = []byte("test package content for concurrency testing")
s.testFile = filepath.Join(s.root, "test-package.deb")
err := os.WriteFile(s.testFile, s.testContent, 0644)
c.Assert(err, IsNil)
// Calculate checksums
md5sum, err := utils.MD5ChecksumForFile(s.testFile)
c.Assert(err, IsNil)
s.testChecksums = utils.ChecksumInfo{
Size: int64(len(s.testContent)),
MD5: md5sum,
}
// Import the test file into the pool
s.srcPoolPath, err = s.pool.Import(s.testFile, "test-package.deb", &s.testChecksums, false, s.cs)
c.Assert(err, IsNil)
}
func (s *LinkFromPoolConcurrencySuite) TestLinkFromPoolConcurrency(c *C) {
// Test concurrent LinkFromPool operations to ensure no race conditions
concurrency := 5000
iterations := 10
for iter := 0; iter < iterations; iter++ {
c.Logf("Iteration %d: Testing concurrent LinkFromPool with %d goroutines", iter+1, concurrency)
destPath := fmt.Sprintf("main/t/test%d", iter)
var wg sync.WaitGroup
errors := make(chan error, concurrency)
successes := make(chan struct{}, concurrency)
start := time.Now()
// Launch concurrent LinkFromPool operations
for i := 0; i < concurrency; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
// Use force=true to test the most vulnerable code path (remove-then-create)
err := s.storage.LinkFromPool(
"", // publishedPrefix
destPath, // publishedRelPath
"test-package.deb", // fileName
s.pool, // sourcePool
s.srcPoolPath, // sourcePath
s.testChecksums, // sourceChecksums
true, // force - this triggers vulnerable remove-then-create pattern
)
if err != nil {
errors <- fmt.Errorf("goroutine %d failed: %v", id, err)
} else {
successes <- struct{}{}
}
}(i)
}
// Wait for completion
wg.Wait()
duration := time.Since(start)
close(errors)
close(successes)
// Count results
errorCount := 0
successCount := 0
var firstError error
for err := range errors {
errorCount++
if firstError == nil {
firstError = err
}
c.Logf("Race condition error: %v", err)
}
for range successes {
successCount++
}
c.Logf("Results: %d successes, %d errors, took %v", successCount, errorCount, duration)
// Assert no race conditions occurred
if errorCount > 0 {
c.Fatalf("Race condition detected in iteration %d! "+
"Errors: %d out of %d operations (%.1f%% failure rate). "+
"First error: %v. "+
"This indicates the fix is not working properly.",
iter+1, errorCount, concurrency,
float64(errorCount)/float64(concurrency)*100, firstError)
}
// Verify the final file exists and has correct content
finalFile := filepath.Join(s.storage.rootPath, destPath, "test-package.deb")
_, err := os.Stat(finalFile)
c.Assert(err, IsNil, Commentf("Final file should exist after concurrent operations"))
content, err := os.ReadFile(finalFile)
c.Assert(err, IsNil, Commentf("Should be able to read final file"))
c.Assert(content, DeepEquals, s.testContent, Commentf("File content should be intact after concurrent operations"))
c.Logf("✓ Iteration %d: No race conditions detected", iter+1)
}
c.Logf("SUCCESS: Handled %d total concurrent operations across %d iterations with no race conditions",
concurrency*iterations, iterations)
}
func (s *LinkFromPoolConcurrencySuite) TestLinkFromPoolConcurrencyDifferentFiles(c *C) {
// Test concurrent operations on different files to ensure no blocking
concurrency := 10
var wg sync.WaitGroup
errors := make(chan error, concurrency)
start := time.Now()
// Launch concurrent operations on different destination files
for i := 0; i < concurrency; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
destPath := fmt.Sprintf("main/t/test-file-%d", id)
err := s.storage.LinkFromPool(
"", // publishedPrefix
destPath, // publishedRelPath
"test-package.deb", // fileName
s.pool, // sourcePool
s.srcPoolPath, // sourcePath
s.testChecksums, // sourceChecksums
false, // force
)
if err != nil {
errors <- fmt.Errorf("goroutine %d failed: %v", id, err)
}
}(i)
}
// Wait for completion
wg.Wait()
duration := time.Since(start)
close(errors)
// Count errors
errorCount := 0
for err := range errors {
errorCount++
c.Logf("Error: %v", err)
}
c.Assert(errorCount, Equals, 0, Commentf("No errors should occur when linking to different files"))
c.Logf("SUCCESS: %d concurrent operations on different files completed in %v", concurrency, duration)
// Verify all files were created correctly
for i := 0; i < concurrency; i++ {
finalFile := filepath.Join(s.storage.rootPath, fmt.Sprintf("main/t/test-file-%d", i), "test-package.deb")
_, err := os.Stat(finalFile)
c.Assert(err, IsNil, Commentf("File %d should exist", i))
content, err := os.ReadFile(finalFile)
c.Assert(err, IsNil, Commentf("Should be able to read file %d", i))
c.Assert(content, DeepEquals, s.testContent, Commentf("File %d content should be correct", i))
}
}
func (s *LinkFromPoolConcurrencySuite) TestLinkFromPoolWithoutForceNoConcurrencyIssues(c *C) {
// Test that when force=false, concurrent operations fail gracefully without corruption
concurrency := 20
destPath := "main/t/single-dest"
var wg sync.WaitGroup
errors := make(chan error, concurrency)
successes := make(chan struct{}, concurrency)
// First, create the file so subsequent operations will conflict
err := s.storage.LinkFromPool("", destPath, "test-package.deb", s.pool, s.srcPoolPath, s.testChecksums, false)
c.Assert(err, IsNil)
start := time.Now()
// Launch concurrent operations that should mostly fail
for i := 0; i < concurrency; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
err := s.storage.LinkFromPool(
"", // publishedPrefix
destPath, // publishedRelPath
"test-package.deb", // fileName
s.pool, // sourcePool
s.srcPoolPath, // sourcePath
s.testChecksums, // sourceChecksums
false, // force=false - should fail if file exists and is same
)
if err != nil {
errors <- err
} else {
successes <- struct{}{}
}
}(i)
}
// Wait for completion
wg.Wait()
duration := time.Since(start)
close(errors)
close(successes)
errorCount := 0
successCount := 0
for range errors {
errorCount++
}
for range successes {
successCount++
}
c.Logf("Results with force=false: %d successes, %d errors, took %v", successCount, errorCount, duration)
// With force=false and identical files, operations should succeed (file already exists with same content)
// No race conditions should cause crashes or corruption
c.Assert(errorCount, Equals, 0, Commentf("With identical files and force=false, operations should succeed"))
// Verify the file still exists and has correct content
finalFile := filepath.Join(s.storage.rootPath, destPath, "test-package.deb")
content, err := os.ReadFile(finalFile)
c.Assert(err, IsNil)
c.Assert(content, DeepEquals, s.testContent, Commentf("File should not be corrupted by concurrent access"))
}

View File

@@ -241,7 +241,7 @@ func (pool *PackagePool) Import(srcPath, basename string, checksums *utils.Check
return "", err
}
defer func() {
_ = source.Close()
_ = source.Close()
}()
sourceInfo, err := source.Stat()

View File

@@ -22,6 +22,26 @@ type PublishedStorage struct {
verifyMethod uint
}
// Global mutex map to prevent concurrent access to the same destinationPath in LinkFromPool
var (
fileLockMutex sync.Mutex
fileLocks = make(map[string]*sync.Mutex)
)
// getFileLock returns a mutex for a specific file path to prevent concurrent modifications
func getFileLock(filePath string) *sync.Mutex {
fileLockMutex.Lock()
defer fileLockMutex.Unlock()
if mutex, exists := fileLocks[filePath]; exists {
return mutex
}
mutex := &sync.Mutex{}
fileLocks[filePath] = mutex
return mutex
}
// Check interfaces
var (
_ aptly.PublishedStorage = (*PublishedStorage)(nil)
@@ -136,6 +156,12 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
baseName := filepath.Base(fileName)
poolPath := filepath.Join(storage.rootPath, publishedPrefix, publishedRelPath, filepath.Dir(fileName))
destinationPath := filepath.Join(poolPath, baseName)
// Acquire file-specific lock to prevent concurrent access to the same file
fileLock := getFileLock(destinationPath)
fileLock.Lock()
defer fileLock.Unlock()
var localSourcePool aptly.LocalPackagePool
if storage.linkMethod != LinkMethodCopy {
@@ -154,7 +180,7 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
var dstStat os.FileInfo
dstStat, err = os.Stat(filepath.Join(poolPath, baseName))
dstStat, err = os.Stat(destinationPath)
if err == nil {
// already exists, check source file
@@ -173,7 +199,7 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
} else {
// if source and destination have the same checksums, no need to copy
var dstMD5 string
dstMD5, err = utils.MD5ChecksumForFile(filepath.Join(poolPath, baseName))
dstMD5, err = utils.MD5ChecksumForFile(destinationPath)
if err != nil {
return err
@@ -204,11 +230,11 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
// source and destination have different inodes, if !forced, this is fatal error
if !force {
return fmt.Errorf("error linking file to %s: file already exists and is different", filepath.Join(poolPath, baseName))
return fmt.Errorf("error linking file to %s: file already exists and is different", destinationPath)
}
// forced, so remove destination
err = os.Remove(filepath.Join(poolPath, baseName))
err = os.Remove(destinationPath)
if err != nil {
return err
}
@@ -223,7 +249,7 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
}
var dst *os.File
dst, err = os.Create(filepath.Join(poolPath, baseName))
dst, err = os.Create(destinationPath)
if err != nil {
_ = r.Close()
return err
@@ -244,9 +270,9 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
err = dst.Close()
} else if storage.linkMethod == LinkMethodSymLink {
err = localSourcePool.Symlink(sourcePath, filepath.Join(poolPath, baseName))
err = localSourcePool.Symlink(sourcePath, destinationPath)
} else {
err = localSourcePool.Link(sourcePath, filepath.Join(poolPath, baseName))
err = localSourcePool.Link(sourcePath, destinationPath)
}
return err

View File

@@ -240,7 +240,7 @@ func (downloader *downloaderImpl) download(req *http.Request, url, destination s
}
if resp.Body != nil {
defer func() {
_ = resp.Body.Close()
_ = resp.Body.Close()
}()
}

View File

@@ -1,3 +1,4 @@
//go:build !go1.7
// +build !go1.7
package http

View File

@@ -49,9 +49,9 @@ func (d *GrabDownloader) Download(ctx context.Context, url string, destination s
func (d *GrabDownloader) DownloadWithChecksum(ctx context.Context, url string, destination string, expected *utils.ChecksumInfo, ignoreMismatch bool) error {
maxTries := d.maxTries
// FIXME: const delayMax = time.Duration(5 * time.Minute)
// FIXME: const delayMax = time.Duration(5 * time.Minute)
delay := time.Duration(1 * time.Second)
// FIXME: const delayMultiplier = 2
// FIXME: const delayMultiplier = 2
err := fmt.Errorf("no tries available")
for maxTries > 0 {
err = d.download(ctx, url, destination, expected, ignoreMismatch)
@@ -133,17 +133,17 @@ func (d *GrabDownloader) download(_ context.Context, url string, destination str
resp := d.client.Do(req)
<-resp.Done
<-resp.Done
// download is complete
// Loop:
// for {
// select {
// case <-resp.Done:
// // download is complete
// break Loop
// }
// }
// Loop:
// for {
// select {
// case <-resp.Done:
// // download is complete
// break Loop
// }
// }
err = resp.Err()
if err != nil && err == grab.ErrBadChecksum && ignoreMismatch {
fmt.Printf("Ignoring checksum mismatch for %s\n", url)

View File

@@ -112,9 +112,11 @@ func NewServer(config *Config) (*Server, error) {
buckets: make(map[string]*bucket),
config: config,
}
go func() { _ = http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
srv.serveHTTP(w, req)
})) }()
go func() {
_ = http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
srv.serveHTTP(w, req)
}))
}()
return srv, nil
}
@@ -527,14 +529,13 @@ func (bucketResource) post(a *action) interface{} {
// and dashes (-). You can use uppercase letters for buckets only in the
// US Standard region.
//
// Must start with a number or letter
// # Must start with a number or letter
//
// Must be between 3 and 255 characters long
// # Must be between 3 and 255 characters long
//
// There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4)
// but the real S3 server does not seem to check that rule, so we will not
// check it either.
//
func validBucketName(name string) bool {
if len(name) < 3 || len(name) > 255 {
return false

View File

@@ -50,5 +50,5 @@ func (s *ListSuite) TestList(c *check.C) {
c.Check(detail, check.Equals, "Details")
_, deleteErr := list.DeleteTaskByID(task.ID)
c.Check(deleteErr, check.IsNil)
list.Stop()
list.Stop()
}

View File

@@ -19,8 +19,8 @@ func (s *ConfigSuite) TestLoadConfig(c *C) {
_, _ = f.WriteString(configFile)
_ = f.Close()
// start with empty config
s.config = ConfigStructure{}
// start with empty config
s.config = ConfigStructure{}
err := LoadConfig(configname, &s.config)
c.Assert(err, IsNil)
@@ -32,8 +32,8 @@ func (s *ConfigSuite) TestLoadConfig(c *C) {
func (s *ConfigSuite) TestSaveConfig(c *C) {
configname := filepath.Join(c.MkDir(), "aptly.json2")
// start with empty config
s.config = ConfigStructure{}
// start with empty config
s.config = ConfigStructure{}
s.config.RootDir = "/tmp/aptly"
s.config.DownloadConcurrency = 5
@@ -71,93 +71,93 @@ func (s *ConfigSuite) TestSaveConfig(c *C) {
_, _ = f.Read(buf)
c.Check(string(buf), Equals, ""+
"{\n" +
" \"rootDir\": \"/tmp/aptly\",\n" +
" \"logLevel\": \"info\",\n" +
" \"logFormat\": \"json\",\n" +
" \"databaseOpenAttempts\": 5,\n" +
" \"architectures\": null,\n" +
" \"skipLegacyPool\": false,\n" +
" \"dependencyFollowSuggests\": false,\n" +
" \"dependencyFollowRecommends\": false,\n" +
" \"dependencyFollowAllVariants\": false,\n" +
" \"dependencyFollowSource\": false,\n" +
" \"dependencyVerboseResolve\": false,\n" +
" \"ppaDistributorID\": \"\",\n" +
" \"ppaCodename\": \"\",\n" +
" \"serveInAPIMode\": false,\n" +
" \"enableMetricsEndpoint\": false,\n" +
" \"enableSwaggerEndpoint\": false,\n" +
" \"AsyncAPI\": false,\n" +
" \"databaseBackend\": {\n" +
" \"type\": \"\",\n" +
" \"dbPath\": \"\",\n" +
" \"url\": \"\"\n" +
" },\n" +
" \"downloader\": \"\",\n" +
" \"downloadConcurrency\": 5,\n" +
" \"downloadSpeedLimit\": 0,\n" +
" \"downloadRetries\": 0,\n" +
" \"downloadSourcePackages\": false,\n" +
" \"gpgProvider\": \"gpg\",\n" +
" \"gpgDisableSign\": false,\n" +
" \"gpgDisableVerify\": false,\n" +
" \"skipContentsPublishing\": false,\n" +
" \"skipBz2Publishing\": false,\n" +
" \"FileSystemPublishEndpoints\": {\n" +
" \"test\": {\n" +
" \"rootDir\": \"/opt/aptly-publish\",\n" +
" \"linkMethod\": \"\",\n" +
" \"verifyMethod\": \"\"\n" +
" }\n" +
" },\n" +
" \"S3PublishEndpoints\": {\n" +
" \"test\": {\n" +
" \"region\": \"us-east-1\",\n" +
" \"bucket\": \"repo\",\n" +
" \"prefix\": \"\",\n" +
" \"acl\": \"\",\n" +
" \"awsAccessKeyID\": \"\",\n" +
" \"awsSecretAccessKey\": \"\",\n" +
" \"awsSessionToken\": \"\",\n" +
" \"endpoint\": \"\",\n" +
" \"storageClass\": \"\",\n" +
" \"encryptionMethod\": \"\",\n" +
" \"plusWorkaround\": false,\n" +
" \"disableMultiDel\": false,\n" +
" \"forceSigV2\": false,\n" +
" \"forceVirtualHostedStyle\": false,\n" +
" \"debug\": false\n" +
" }\n" +
" },\n" +
" \"SwiftPublishEndpoints\": {\n" +
" \"test\": {\n" +
" \"container\": \"repo\",\n" +
" \"prefix\": \"\",\n" +
" \"osname\": \"\",\n" +
" \"password\": \"\",\n" +
" \"tenant\": \"\",\n" +
" \"tenantid\": \"\",\n" +
" \"domain\": \"\",\n" +
" \"domainid\": \"\",\n" +
" \"tenantdomain\": \"\",\n" +
" \"tenantdomainid\": \"\",\n" +
" \"authurl\": \"\"\n" +
" }\n" +
" },\n" +
" \"AzurePublishEndpoints\": {\n" +
" \"test\": {\n" +
" \"container\": \"repo\",\n" +
" \"prefix\": \"\",\n" +
" \"accountName\": \"\",\n" +
" \"accountKey\": \"\",\n" +
" \"endpoint\": \"\"\n" +
" }\n" +
" },\n" +
" \"packagePoolStorage\": {\n" +
" \"type\": \"local\",\n" +
" \"path\": \"/tmp/aptly-pool\"\n" +
" }\n" +
"{\n"+
" \"rootDir\": \"/tmp/aptly\",\n"+
" \"logLevel\": \"info\",\n"+
" \"logFormat\": \"json\",\n"+
" \"databaseOpenAttempts\": 5,\n"+
" \"architectures\": null,\n"+
" \"skipLegacyPool\": false,\n"+
" \"dependencyFollowSuggests\": false,\n"+
" \"dependencyFollowRecommends\": false,\n"+
" \"dependencyFollowAllVariants\": false,\n"+
" \"dependencyFollowSource\": false,\n"+
" \"dependencyVerboseResolve\": false,\n"+
" \"ppaDistributorID\": \"\",\n"+
" \"ppaCodename\": \"\",\n"+
" \"serveInAPIMode\": false,\n"+
" \"enableMetricsEndpoint\": false,\n"+
" \"enableSwaggerEndpoint\": false,\n"+
" \"AsyncAPI\": false,\n"+
" \"databaseBackend\": {\n"+
" \"type\": \"\",\n"+
" \"dbPath\": \"\",\n"+
" \"url\": \"\"\n"+
" },\n"+
" \"downloader\": \"\",\n"+
" \"downloadConcurrency\": 5,\n"+
" \"downloadSpeedLimit\": 0,\n"+
" \"downloadRetries\": 0,\n"+
" \"downloadSourcePackages\": false,\n"+
" \"gpgProvider\": \"gpg\",\n"+
" \"gpgDisableSign\": false,\n"+
" \"gpgDisableVerify\": false,\n"+
" \"skipContentsPublishing\": false,\n"+
" \"skipBz2Publishing\": false,\n"+
" \"FileSystemPublishEndpoints\": {\n"+
" \"test\": {\n"+
" \"rootDir\": \"/opt/aptly-publish\",\n"+
" \"linkMethod\": \"\",\n"+
" \"verifyMethod\": \"\"\n"+
" }\n"+
" },\n"+
" \"S3PublishEndpoints\": {\n"+
" \"test\": {\n"+
" \"region\": \"us-east-1\",\n"+
" \"bucket\": \"repo\",\n"+
" \"prefix\": \"\",\n"+
" \"acl\": \"\",\n"+
" \"awsAccessKeyID\": \"\",\n"+
" \"awsSecretAccessKey\": \"\",\n"+
" \"awsSessionToken\": \"\",\n"+
" \"endpoint\": \"\",\n"+
" \"storageClass\": \"\",\n"+
" \"encryptionMethod\": \"\",\n"+
" \"plusWorkaround\": false,\n"+
" \"disableMultiDel\": false,\n"+
" \"forceSigV2\": false,\n"+
" \"forceVirtualHostedStyle\": false,\n"+
" \"debug\": false\n"+
" }\n"+
" },\n"+
" \"SwiftPublishEndpoints\": {\n"+
" \"test\": {\n"+
" \"container\": \"repo\",\n"+
" \"prefix\": \"\",\n"+
" \"osname\": \"\",\n"+
" \"password\": \"\",\n"+
" \"tenant\": \"\",\n"+
" \"tenantid\": \"\",\n"+
" \"domain\": \"\",\n"+
" \"domainid\": \"\",\n"+
" \"tenantdomain\": \"\",\n"+
" \"tenantdomainid\": \"\",\n"+
" \"authurl\": \"\"\n"+
" }\n"+
" },\n"+
" \"AzurePublishEndpoints\": {\n"+
" \"test\": {\n"+
" \"container\": \"repo\",\n"+
" \"prefix\": \"\",\n"+
" \"accountName\": \"\",\n"+
" \"accountKey\": \"\",\n"+
" \"endpoint\": \"\"\n"+
" }\n"+
" },\n"+
" \"packagePoolStorage\": {\n"+
" \"type\": \"local\",\n"+
" \"path\": \"/tmp/aptly-pool\"\n"+
" }\n"+
"}")
}
@@ -167,8 +167,8 @@ func (s *ConfigSuite) TestLoadYAMLConfig(c *C) {
_, _ = f.WriteString(configFileYAML)
_ = f.Close()
// start with empty config
s.config = ConfigStructure{}
// start with empty config
s.config = ConfigStructure{}
err := LoadConfig(configname, &s.config)
c.Assert(err, IsNil)
@@ -183,8 +183,8 @@ func (s *ConfigSuite) TestLoadYAMLErrorConfig(c *C) {
_, _ = f.WriteString(configFileYAMLError)
_ = f.Close()
// start with empty config
s.config = ConfigStructure{}
// start with empty config
s.config = ConfigStructure{}
err := LoadConfig(configname, &s.config)
c.Assert(err.Error(), Equals, "invalid yaml (unknown pool storage type: invalid) or json (invalid character 'p' looking for beginning of value)")
@@ -196,13 +196,13 @@ func (s *ConfigSuite) TestSaveYAMLConfig(c *C) {
_, _ = f.WriteString(configFileYAML)
_ = f.Close()
// start with empty config
s.config = ConfigStructure{}
// start with empty config
s.config = ConfigStructure{}
err := LoadConfig(configname, &s.config)
c.Assert(err, IsNil)
err = SaveConfigYAML(configname, &s.config)
err = SaveConfigYAML(configname, &s.config)
c.Assert(err, IsNil)
f, _ = os.Open(configname)
@@ -218,17 +218,17 @@ func (s *ConfigSuite) TestSaveYAMLConfig(c *C) {
}
func (s *ConfigSuite) TestSaveYAML2Config(c *C) {
// start with empty config
s.config = ConfigStructure{}
// start with empty config
s.config = ConfigStructure{}
s.config.PackagePoolStorage.Local = &LocalPoolStorage{"/tmp/aptly-pool"}
s.config.PackagePoolStorage.Azure = nil
s.config.PackagePoolStorage.Azure = nil
configname := filepath.Join(c.MkDir(), "aptly.yaml4")
err := SaveConfigYAML(configname, &s.config)
err := SaveConfigYAML(configname, &s.config)
c.Assert(err, IsNil)
f, _ := os.Open(configname)
f, _ := os.Open(configname)
defer func() {
_ = f.Close()
}()
@@ -237,44 +237,44 @@ func (s *ConfigSuite) TestSaveYAML2Config(c *C) {
buf := make([]byte, st.Size())
_, _ = f.Read(buf)
c.Check(string(buf), Equals, "" +
"root_dir: \"\"\n" +
"log_level: \"\"\n" +
"log_format: \"\"\n" +
"database_open_attempts: 0\n" +
"architectures: []\n" +
"skip_legacy_pool: false\n" +
"dep_follow_suggests: false\n" +
"dep_follow_recommends: false\n" +
"dep_follow_all_variants: false\n" +
"dep_follow_source: false\n" +
"dep_verboseresolve: false\n" +
"ppa_distributor_id: \"\"\n" +
"ppa_codename: \"\"\n" +
"serve_in_api_mode: false\n" +
"enable_metrics_endpoint: false\n" +
"enable_swagger_endpoint: false\n" +
"async_api: false\n" +
"database_backend:\n" +
" type: \"\"\n" +
" db_path: \"\"\n" +
" url: \"\"\n" +
"downloader: \"\"\n" +
"download_concurrency: 0\n" +
"download_limit: 0\n" +
"download_retries: 0\n" +
"download_sourcepackages: false\n" +
"gpg_provider: \"\"\n" +
"gpg_disable_sign: false\n" +
"gpg_disable_verify: false\n" +
"skip_contents_publishing: false\n" +
"skip_bz2_publishing: false\n" +
"filesystem_publish_endpoints: {}\n" +
"s3_publish_endpoints: {}\n" +
"swift_publish_endpoints: {}\n" +
"azure_publish_endpoints: {}\n" +
"packagepool_storage:\n" +
" type: local\n" +
c.Check(string(buf), Equals, ""+
"root_dir: \"\"\n"+
"log_level: \"\"\n"+
"log_format: \"\"\n"+
"database_open_attempts: 0\n"+
"architectures: []\n"+
"skip_legacy_pool: false\n"+
"dep_follow_suggests: false\n"+
"dep_follow_recommends: false\n"+
"dep_follow_all_variants: false\n"+
"dep_follow_source: false\n"+
"dep_verboseresolve: false\n"+
"ppa_distributor_id: \"\"\n"+
"ppa_codename: \"\"\n"+
"serve_in_api_mode: false\n"+
"enable_metrics_endpoint: false\n"+
"enable_swagger_endpoint: false\n"+
"async_api: false\n"+
"database_backend:\n"+
" type: \"\"\n"+
" db_path: \"\"\n"+
" url: \"\"\n"+
"downloader: \"\"\n"+
"download_concurrency: 0\n"+
"download_limit: 0\n"+
"download_retries: 0\n"+
"download_sourcepackages: false\n"+
"gpg_provider: \"\"\n"+
"gpg_disable_sign: false\n"+
"gpg_disable_verify: false\n"+
"skip_contents_publishing: false\n"+
"skip_bz2_publishing: false\n"+
"filesystem_publish_endpoints: {}\n"+
"s3_publish_endpoints: {}\n"+
"swift_publish_endpoints: {}\n"+
"azure_publish_endpoints: {}\n"+
"packagepool_storage:\n"+
" type: local\n"+
" path: /tmp/aptly-pool\n")
}
@@ -283,8 +283,8 @@ func (s *ConfigSuite) TestLoadEmptyConfig(c *C) {
f, _ := os.Create(configname)
_ = f.Close()
// start with empty config
s.config = ConfigStructure{}
// start with empty config
s.config = ConfigStructure{}
err := LoadConfig(configname, &s.config)
c.Assert(err.Error(), Equals, "invalid yaml (EOF) or json (EOF)")

View File

@@ -34,7 +34,7 @@ func (s *UtilsSuite) TestDirIsAccessibleNotExist(c *C) {
func (s *UtilsSuite) TestDirIsAccessibleNotAccessible(c *C) {
accessible := DirIsAccessible(s.tempfile.Name())
if accessible == nil {
c.Fatalf("Test dir should not be accessible: %s", s.tempfile.Name())
}
c.Fatalf("Test dir should not be accessible: %s", s.tempfile.Name())
}
c.Check(accessible.Error(), Equals, fmt.Errorf("'%s' is inaccessible, check access rights", s.tempfile.Name()).Error())
}