mirror of
https://github.com/aptly-dev/aptly.git
synced 2026-01-11 03:11:50 +00:00
Merge branch 'master' into s3-reupload-fix
This commit is contained in:
8
.github/workflows/ci.yml
vendored
8
.github/workflows/ci.yml
vendored
@@ -109,10 +109,10 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
name: ["Debian 13/testing", "Debian 12/bookworm", "Debian 11/bullseye", "Debian 10/buster", "Ubuntu 24.04", "Ubuntu 22.04", "Ubuntu 20.04"]
|
||||
name: ["Debian 13/trixie", "Debian 12/bookworm", "Debian 11/bullseye", "Ubuntu 24.04", "Ubuntu 22.04", "Ubuntu 20.04"]
|
||||
arch: ["amd64", "i386" , "arm64" , "armhf"]
|
||||
include:
|
||||
- name: "Debian 13/testing"
|
||||
- name: "Debian 13/trixie"
|
||||
suite: trixie
|
||||
image: debian:trixie-slim
|
||||
- name: "Debian 12/bookworm"
|
||||
@@ -121,9 +121,6 @@ jobs:
|
||||
- name: "Debian 11/bullseye"
|
||||
suite: bullseye
|
||||
image: debian:bullseye-slim
|
||||
- name: "Debian 10/buster"
|
||||
suite: buster
|
||||
image: debian:buster-slim
|
||||
- name: "Ubuntu 24.04"
|
||||
suite: noble
|
||||
image: ubuntu:24.04
|
||||
@@ -135,6 +132,7 @@ jobs:
|
||||
image: ubuntu:20.04
|
||||
container:
|
||||
image: ${{ matrix.image }}
|
||||
options: --user root
|
||||
env:
|
||||
APT_LISTCHANGES_FRONTEND: none
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
|
||||
3
AUTHORS
3
AUTHORS
@@ -71,3 +71,6 @@ List of contributors, in chronological order:
|
||||
* Silke Hofstra (https://github.com/silkeh)
|
||||
* Itay Porezky (https://github.com/itayporezky)
|
||||
* Alejandro Guijarro Monerris (https://github.com/alguimodd)
|
||||
* JupiterRider (https://github.com/JupiterRider)
|
||||
* Agustin Henze (https://github.com/agustinhenze)
|
||||
* Tobias Assarsson (https://github.com/daedaluz)
|
||||
|
||||
36
Makefile
36
Makefile
@@ -7,11 +7,23 @@ COVERAGE_DIR?=$(shell mktemp -d)
|
||||
GOOS=$(shell go env GOHOSTOS)
|
||||
GOARCH=$(shell go env GOHOSTARCH)
|
||||
|
||||
export PODMAN_USERNS = keep-id
|
||||
DOCKER_RUN = docker run --security-opt label=disable -it --user 0:0 --rm -v ${PWD}:/work/src
|
||||
|
||||
# Setting TZ for certificates
|
||||
export TZ=UTC
|
||||
# Unit Tests and some sysmte tests rely on expired certificates, turn back the time
|
||||
export TEST_FAKETIME := 2025-01-02 03:04:05
|
||||
|
||||
# run with 'COVERAGE_SKIP=1' to skip coverage checks during system tests
|
||||
ifeq ($(COVERAGE_SKIP),1)
|
||||
COVERAGE_ARG_BUILD :=
|
||||
COVERAGE_ARG_TEST := --coverage-skip
|
||||
else
|
||||
COVERAGE_ARG_BUILD := -coverpkg="./..."
|
||||
COVERAGE_ARG_TEST := --coverage-dir $(COVERAGE_DIR)
|
||||
endif
|
||||
|
||||
# export CAPUTRE=1 for regenrating test gold files
|
||||
ifeq ($(CAPTURE),1)
|
||||
CAPTURE_ARG := --capture
|
||||
@@ -103,13 +115,13 @@ test: prepare swagger etcd-install ## Run unit tests (add TEST=regex to specify
|
||||
|
||||
system-test: prepare swagger etcd-install ## Run system tests
|
||||
# build coverage binary
|
||||
go test -v -coverpkg="./..." -c -tags testruncli
|
||||
go test -v $(COVERAGE_ARG_BUILD) -c -tags testruncli
|
||||
# Download fixture-db, fixture-pool, etcd.db
|
||||
if [ ! -e ~/aptly-fixture-db ]; then git clone https://github.com/aptly-dev/aptly-fixture-db.git ~/aptly-fixture-db/; fi
|
||||
if [ ! -e ~/aptly-fixture-pool ]; then git clone https://github.com/aptly-dev/aptly-fixture-pool.git ~/aptly-fixture-pool/; fi
|
||||
test -f ~/etcd.db || (curl -o ~/etcd.db.xz http://repo.aptly.info/system-tests/etcd.db.xz && xz -d ~/etcd.db.xz)
|
||||
# Run system tests
|
||||
PATH=$(BINPATH)/:$(PATH) FORCE_COLOR=1 $(PYTHON) system/run.py --long --coverage-dir $(COVERAGE_DIR) $(CAPTURE_ARG) $(TEST)
|
||||
PATH=$(BINPATH)/:$(PATH) FORCE_COLOR=1 $(PYTHON) system/run.py --long $(COVERAGE_ARG_TEST) $(CAPTURE_ARG) $(TEST)
|
||||
|
||||
bench:
|
||||
@echo "\e[33m\e[1mRunning benchmark ...\e[0m"
|
||||
@@ -173,16 +185,16 @@ docker-image-no-cache: ## Build aptly-dev docker image (no cache)
|
||||
@docker build --no-cache -f system/Dockerfile . -t aptly-dev
|
||||
|
||||
docker-build: ## Build aptly in docker container
|
||||
@docker run -it --rm -v ${PWD}:/work/src aptly-dev /work/src/system/docker-wrapper build
|
||||
@$(DOCKER_RUN) aptly-dev /work/src/system/docker-wrapper build
|
||||
|
||||
docker-shell: ## Run aptly and other commands in docker container
|
||||
@docker run -it --rm -p 3142:3142 -v ${PWD}:/work/src aptly-dev /work/src/system/docker-wrapper || true
|
||||
@$(DOCKER_RUN) -p 3142:3142 aptly-dev /work/src/system/docker-wrapper || true
|
||||
|
||||
docker-deb: ## Build debian packages in docker container
|
||||
@docker run -it --rm -v ${PWD}:/work/src aptly-dev /work/src/system/docker-wrapper dpkg DEBARCH=amd64
|
||||
@$(DOCKER_RUN) aptly-dev /work/src/system/docker-wrapper dpkg DEBARCH=amd64
|
||||
|
||||
docker-unit-test: ## Run unit tests in docker container (add TEST=regex to specify which tests to run)
|
||||
@docker run -it --rm -v ${PWD}:/work/src aptly-dev /work/src/system/docker-wrapper \
|
||||
@$(DOCKER_RUN) aptly-dev /work/src/system/docker-wrapper \
|
||||
azurite-start \
|
||||
AZURE_STORAGE_ENDPOINT=http://127.0.0.1:10000/devstoreaccount1 \
|
||||
AZURE_STORAGE_ACCOUNT=devstoreaccount1 \
|
||||
@@ -191,27 +203,27 @@ docker-unit-test: ## Run unit tests in docker container (add TEST=regex to spec
|
||||
azurite-stop
|
||||
|
||||
docker-system-test: ## Run system tests in docker container (add TEST=t04_mirror or TEST=UpdateMirror26Test to run only specific tests)
|
||||
@docker run -it --rm -v ${PWD}:/work/src aptly-dev /work/src/system/docker-wrapper \
|
||||
@$(DOCKER_RUN) aptly-dev /work/src/system/docker-wrapper \
|
||||
azurite-start \
|
||||
AZURE_STORAGE_ENDPOINT=http://127.0.0.1:10000/devstoreaccount1 \
|
||||
AZURE_STORAGE_ACCOUNT=devstoreaccount1 \
|
||||
AZURE_STORAGE_ACCESS_KEY="Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" \
|
||||
AWS_ACCESS_KEY_ID=$(AWS_ACCESS_KEY_ID) \
|
||||
AWS_SECRET_ACCESS_KEY=$(AWS_SECRET_ACCESS_KEY) \
|
||||
system-test TEST=$(TEST) \
|
||||
system-test TEST=$(TEST) CAPTURE=$(CAPTURE) COVERAGE_SKIP=$(COVERAGE_SKIP) \
|
||||
azurite-stop
|
||||
|
||||
docker-serve: ## Run development server (auto recompiling) on http://localhost:3142
|
||||
@docker run -it --rm -p 3142:3142 -v ${PWD}:/work/src -v /tmp/cache-go-aptly:/var/lib/aptly/.cache/go-build aptly-dev /work/src/system/docker-wrapper serve || true
|
||||
@$(DOCKER_RUN) -p 3142:3142 -v /tmp/cache-go-aptly:/var/lib/aptly/.cache/go-build aptly-dev /work/src/system/docker-wrapper serve || true
|
||||
|
||||
docker-lint: ## Run golangci-lint in docker container
|
||||
@docker run -it --rm -v ${PWD}:/work/src aptly-dev /work/src/system/docker-wrapper lint
|
||||
@$(DOCKER_RUN) aptly-dev /work/src/system/docker-wrapper lint
|
||||
|
||||
docker-binaries: ## Build binary releases (FreeBSD, macOS, Linux generic) in docker container
|
||||
@docker run -it --rm -v ${PWD}:/work/src aptly-dev /work/src/system/docker-wrapper binaries
|
||||
@$(DOCKER_RUN) aptly-dev /work/src/system/docker-wrapper binaries
|
||||
|
||||
docker-man: ## Create man page in docker container
|
||||
@docker run -it --rm -v ${PWD}:/work/src aptly-dev /work/src/system/docker-wrapper man
|
||||
@$(DOCKER_RUN) aptly-dev /work/src/system/docker-wrapper man
|
||||
|
||||
mem.png: mem.dat mem.gp
|
||||
gnuplot mem.gp
|
||||
|
||||
@@ -63,7 +63,7 @@ Define Release APT sources in ``/etc/apt/sources.list.d/aptly.list``::
|
||||
|
||||
deb [signed-by=/etc/apt/keyrings/aptly.asc] http://repo.aptly.info/release DIST main
|
||||
|
||||
Where DIST is one of: ``buster``, ``bullseye``, ``bookworm``, ``focal``, ``jammy``, ``noble``
|
||||
Where DIST is one of: ``bullseye``, ``bookworm``, ``trixie``, ``focal``, ``jammy``, ``noble``
|
||||
|
||||
Install aptly packages::
|
||||
|
||||
@@ -80,7 +80,7 @@ Define CI APT sources in ``/etc/apt/sources.list.d/aptly-ci.list``::
|
||||
|
||||
deb [signed-by=/etc/apt/keyrings/aptly.asc] http://repo.aptly.info/ci DIST main
|
||||
|
||||
Where DIST is one of: ``buster``, ``bullseye``, ``bookworm``, ``focal``, ``jammy``, ``noble``
|
||||
Where DIST is one of: ``bullseye``, ``bookworm``, ``trixie``, ``focal``, ``jammy``, ``noble``
|
||||
|
||||
Note: same gpg key is used as for the Upstream Debian Packages.
|
||||
|
||||
|
||||
@@ -13,5 +13,6 @@ git push origin v$version master
|
||||
- run swagger locally (`make docker-serve`)
|
||||
- copy generated docs/swagger.json to https://github.com/aptly-dev/www.aptly.info/tree/master/static/swagger/aptly_1.x.y.json
|
||||
- add new version to select tag in content/doc/api/swagger.md line 48
|
||||
- update version in content/download.md
|
||||
- push commit to master
|
||||
- create release announcement on https://github.com/aptly-dev/aptly/discussions
|
||||
|
||||
@@ -70,7 +70,7 @@ func apiReady(isReady *atomic.Value) func(*gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
status := aptlyStatus{Status: "Aptly is ready"}
|
||||
status := aptlyStatus{Status: "Aptly is ready"}
|
||||
c.JSON(200, status)
|
||||
}
|
||||
}
|
||||
@@ -178,7 +178,7 @@ func truthy(value interface{}) bool {
|
||||
if value == nil {
|
||||
return false
|
||||
}
|
||||
switch v := value.(type) {
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
switch strings.ToLower(v) {
|
||||
case "n", "no", "f", "false", "0", "off":
|
||||
|
||||
@@ -393,7 +393,6 @@ type publishedRepoUpdateSwitchParams struct {
|
||||
// @Description
|
||||
// @Description See also: `aptly publish update` / `aptly publish switch`
|
||||
// @Tags Publish
|
||||
// @Produce json
|
||||
// @Param prefix path string true "publishing prefix"
|
||||
// @Param distribution path string true "distribution name"
|
||||
// @Param _async query bool false "Run in background and return task object"
|
||||
|
||||
13
api/repos.go
13
api/repos.go
@@ -195,17 +195,18 @@ func apiReposEdit(c *gin.Context) {
|
||||
collectionFactory := context.NewCollectionFactory()
|
||||
collection := collectionFactory.LocalRepoCollection()
|
||||
|
||||
repo, err := collection.ByName(c.Params.ByName("name"))
|
||||
name := c.Params.ByName("name")
|
||||
repo, err := collection.ByName(name)
|
||||
if err != nil {
|
||||
AbortWithJSONError(c, 404, err)
|
||||
return
|
||||
}
|
||||
|
||||
if b.Name != nil {
|
||||
if b.Name != nil && *b.Name != name {
|
||||
_, err := collection.ByName(*b.Name)
|
||||
if err == nil {
|
||||
// already exists
|
||||
AbortWithJSONError(c, 404, err)
|
||||
AbortWithJSONError(c, 404, fmt.Errorf("local repo with name %q already exists", *b.Name))
|
||||
return
|
||||
}
|
||||
repo.Name = *b.Name
|
||||
@@ -455,7 +456,7 @@ func apiReposPackagesDelete(c *gin.Context) {
|
||||
// @Tags Repos
|
||||
// @Param name path string true "Repository name"
|
||||
// @Param dir path string true "Directory of packages"
|
||||
// @Param file path string false "Filename (optional)"
|
||||
// @Param file path string true "Filename"
|
||||
// @Param _async query bool false "Run in background and return task object"
|
||||
// @Produce json
|
||||
// @Success 200 {string} string "OK"
|
||||
@@ -901,10 +902,10 @@ func apiReposIncludePackageFromDir(c *gin.Context) {
|
||||
out.Printf("Failed files: %s\n", strings.Join(failedFiles, ", "))
|
||||
}
|
||||
|
||||
ret := reposIncludePackageFromDirResponse{
|
||||
ret := reposIncludePackageFromDirResponse{
|
||||
Report: reporter,
|
||||
FailedFiles: failedFiles,
|
||||
}
|
||||
}
|
||||
return &task.ProcessReturnValue{Code: http.StatusOK, Value: ret}, nil
|
||||
})
|
||||
}
|
||||
|
||||
@@ -217,10 +217,9 @@ type snapshotsCreateFromRepositoryParams struct {
|
||||
// @Summary Snapshot Repository
|
||||
// @Description **Create a snapshot of a repository by name**
|
||||
// @Tags Snapshots
|
||||
// @Param name path string true "Repository name"
|
||||
// @Consume json
|
||||
// @Param request body snapshotsCreateFromRepositoryParams true "Parameters"
|
||||
// @Param name path string true "Name of the snapshot"
|
||||
// @Param name path string true "Repository name"
|
||||
// @Param _async query bool false "Run in background and return task object"
|
||||
// @Produce json
|
||||
// @Success 201 {object} deb.Snapshot "Created snapshot object"
|
||||
|
||||
@@ -104,7 +104,7 @@ func (pool *PackagePool) Open(path string) (aptly.ReadSeekerCloser, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating tempfile for %s", path)
|
||||
}
|
||||
defer func () { _ = os.Remove(temp.Name()) }()
|
||||
defer func() { _ = os.Remove(temp.Name()) }()
|
||||
|
||||
_, err = pool.az.client.DownloadFile(context.TODO(), pool.az.container, path, temp, nil)
|
||||
if err != nil {
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||
"github.com/aptly-dev/aptly/aptly"
|
||||
"github.com/aptly-dev/aptly/files"
|
||||
"github.com/aptly-dev/aptly/utils"
|
||||
@@ -50,10 +50,10 @@ func (s *PackagePoolSuite) SetUpTest(c *C) {
|
||||
|
||||
s.pool, err = NewPackagePool(s.accountName, s.accountKey, container, "", s.endpoint)
|
||||
c.Assert(err, IsNil)
|
||||
publicAccessType := azblob.PublicAccessTypeContainer
|
||||
_, err = s.pool.az.client.CreateContainer(context.TODO(), s.pool.az.container, &azblob.CreateContainerOptions{
|
||||
Access: &publicAccessType,
|
||||
})
|
||||
publicAccessType := azblob.PublicAccessTypeContainer
|
||||
_, err = s.pool.az.client.CreateContainer(context.TODO(), s.pool.az.container, &azblob.CreateContainerOptions{
|
||||
Access: &publicAccessType,
|
||||
})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
s.prefixedPool, err = NewPackagePool(s.accountName, s.accountKey, container, prefix, s.endpoint)
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"bytes"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
||||
"github.com/aptly-dev/aptly/files"
|
||||
"github.com/aptly-dev/aptly/utils"
|
||||
. "gopkg.in/check.v1"
|
||||
@@ -69,10 +69,10 @@ func (s *PublishedStorageSuite) SetUpTest(c *C) {
|
||||
|
||||
s.storage, err = NewPublishedStorage(s.accountName, s.accountKey, container, "", s.endpoint)
|
||||
c.Assert(err, IsNil)
|
||||
publicAccessType := azblob.PublicAccessTypeContainer
|
||||
_, err = s.storage.az.client.CreateContainer(context.Background(), s.storage.az.container, &azblob.CreateContainerOptions{
|
||||
Access: &publicAccessType,
|
||||
})
|
||||
publicAccessType := azblob.PublicAccessTypeContainer
|
||||
_, err = s.storage.az.client.CreateContainer(context.Background(), s.storage.az.container, &azblob.CreateContainerOptions{
|
||||
Access: &publicAccessType,
|
||||
})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
s.prefixedStorage, err = NewPublishedStorage(s.accountName, s.accountKey, container, prefix, s.endpoint)
|
||||
@@ -80,12 +80,12 @@ func (s *PublishedStorageSuite) SetUpTest(c *C) {
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) TearDownTest(c *C) {
|
||||
_, err := s.storage.az.client.DeleteContainer(context.Background(), s.storage.az.container, nil)
|
||||
_, err := s.storage.az.client.DeleteContainer(context.Background(), s.storage.az.container, nil)
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) GetFile(c *C, path string) []byte {
|
||||
resp, err := s.storage.az.client.DownloadStream(context.Background(), s.storage.az.container, path, nil)
|
||||
resp, err := s.storage.az.client.DownloadStream(context.Background(), s.storage.az.container, path, nil)
|
||||
c.Assert(err, IsNil)
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
c.Assert(err, IsNil)
|
||||
@@ -93,26 +93,26 @@ func (s *PublishedStorageSuite) GetFile(c *C, path string) []byte {
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) AssertNoFile(c *C, path string) {
|
||||
serviceClient := s.storage.az.client.ServiceClient()
|
||||
containerClient := serviceClient.NewContainerClient(s.storage.az.container)
|
||||
blobClient := containerClient.NewBlobClient(path)
|
||||
_, err := blobClient.GetProperties(context.Background(), nil)
|
||||
serviceClient := s.storage.az.client.ServiceClient()
|
||||
containerClient := serviceClient.NewContainerClient(s.storage.az.container)
|
||||
blobClient := containerClient.NewBlobClient(path)
|
||||
_, err := blobClient.GetProperties(context.Background(), nil)
|
||||
c.Assert(err, NotNil)
|
||||
|
||||
storageError, ok := err.(*azcore.ResponseError)
|
||||
storageError, ok := err.(*azcore.ResponseError)
|
||||
c.Assert(ok, Equals, true)
|
||||
c.Assert(storageError.StatusCode, Equals, 404)
|
||||
}
|
||||
|
||||
func (s *PublishedStorageSuite) PutFile(c *C, path string, data []byte) {
|
||||
hash := md5.Sum(data)
|
||||
uploadOptions := &azblob.UploadStreamOptions{
|
||||
HTTPHeaders: &blob.HTTPHeaders{
|
||||
BlobContentMD5: hash[:],
|
||||
},
|
||||
}
|
||||
reader := bytes.NewReader(data)
|
||||
_, err := s.storage.az.client.UploadStream(context.Background(), s.storage.az.container, path, reader, uploadOptions)
|
||||
uploadOptions := &azblob.UploadStreamOptions{
|
||||
HTTPHeaders: &blob.HTTPHeaders{
|
||||
BlobContentMD5: hash[:],
|
||||
},
|
||||
}
|
||||
reader := bytes.NewReader(data)
|
||||
_, err := s.storage.az.client.UploadStream(context.Background(), s.storage.az.container, path, reader, uploadOptions)
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ func Test(t *testing.T) {
|
||||
TestingT(t)
|
||||
}
|
||||
|
||||
type ProgressSuite struct {}
|
||||
type ProgressSuite struct{}
|
||||
|
||||
var _ = Suite(&ProgressSuite{})
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ func Test(t *testing.T) {
|
||||
}
|
||||
|
||||
type EtcDDBSuite struct {
|
||||
db database.Storage
|
||||
db database.Storage
|
||||
}
|
||||
|
||||
var _ = Suite(&EtcDDBSuite{})
|
||||
@@ -133,7 +133,7 @@ func (s *EtcDDBSuite) TestTransactionCommit(c *C) {
|
||||
v, err := s.db.Get(key)
|
||||
c.Assert(err, IsNil)
|
||||
c.Check(v, DeepEquals, value)
|
||||
err = transaction.Delete(key)
|
||||
err = transaction.Delete(key)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
_, err = transaction.Get(key2)
|
||||
@@ -156,4 +156,3 @@ func (s *EtcDDBSuite) TestTransactionCommit(c *C) {
|
||||
_, err = transaction.Get(key)
|
||||
c.Assert(err, NotNil)
|
||||
}
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ func (s *PackageRefListSuite) TestNewPackageListFromRefList(c *C) {
|
||||
list, err := NewPackageListFromRefList(reflist, coll, nil)
|
||||
c.Assert(err, IsNil)
|
||||
c.Check(list.Len(), Equals, 4)
|
||||
c.Check(list.Add(s.p4), ErrorMatches, "package already exists and is different: .*")
|
||||
c.Check(list.Add(s.p4), ErrorMatches, "package already exists and is different: .*")
|
||||
|
||||
list, err = NewPackageListFromRefList(nil, coll, nil)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
@@ -574,7 +574,7 @@ func (repo *RemoteRepo) DownloadPackageIndexes(progress aptly.Progress, d aptly.
|
||||
if progress != nil {
|
||||
progress.ColoredPrintf("@y[!]@| @!skipping package %s: duplicate in packages index@|", p)
|
||||
}
|
||||
} else if err != nil {
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,8 +31,7 @@ func BenchmarkSnapshotCollectionForEach(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
collection = NewSnapshotCollection(db)
|
||||
|
||||
|
||||
_ = collection.ForEach(func(s *Snapshot) error {
|
||||
_ = collection.ForEach(func(s *Snapshot) error {
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ func compareLexicographic(s1, s2 string) int {
|
||||
i := 0
|
||||
l1, l2 := len(s1), len(s2)
|
||||
|
||||
for !(i == l1 && i == l2) { // break if s1 equal to s2
|
||||
for !(i == l1 && i == l2) { // break if s1 equal to s2
|
||||
|
||||
if i == l2 {
|
||||
// s1 is longer than s2
|
||||
|
||||
283
files/linkfrompool_concurrency_test.go
Normal file
283
files/linkfrompool_concurrency_test.go
Normal file
@@ -0,0 +1,283 @@
|
||||
package files
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aptly-dev/aptly/aptly"
|
||||
"github.com/aptly-dev/aptly/utils"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
type LinkFromPoolConcurrencySuite struct {
|
||||
root string
|
||||
poolDir string
|
||||
storage *PublishedStorage
|
||||
pool *PackagePool
|
||||
cs aptly.ChecksumStorage
|
||||
testFile string
|
||||
testContent []byte
|
||||
testChecksums utils.ChecksumInfo
|
||||
srcPoolPath string
|
||||
}
|
||||
|
||||
var _ = Suite(&LinkFromPoolConcurrencySuite{})
|
||||
|
||||
func (s *LinkFromPoolConcurrencySuite) SetUpTest(c *C) {
|
||||
s.root = c.MkDir()
|
||||
s.poolDir = filepath.Join(s.root, "pool")
|
||||
publishDir := filepath.Join(s.root, "public")
|
||||
|
||||
// Create package pool and published storage
|
||||
s.pool = NewPackagePool(s.poolDir, true)
|
||||
s.storage = NewPublishedStorage(publishDir, "copy", "checksum")
|
||||
s.cs = NewMockChecksumStorage()
|
||||
|
||||
// Create test file content
|
||||
s.testContent = []byte("test package content for concurrency testing")
|
||||
s.testFile = filepath.Join(s.root, "test-package.deb")
|
||||
|
||||
err := os.WriteFile(s.testFile, s.testContent, 0644)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// Calculate checksums
|
||||
md5sum, err := utils.MD5ChecksumForFile(s.testFile)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
s.testChecksums = utils.ChecksumInfo{
|
||||
Size: int64(len(s.testContent)),
|
||||
MD5: md5sum,
|
||||
}
|
||||
|
||||
// Import the test file into the pool
|
||||
s.srcPoolPath, err = s.pool.Import(s.testFile, "test-package.deb", &s.testChecksums, false, s.cs)
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
func (s *LinkFromPoolConcurrencySuite) TestLinkFromPoolConcurrency(c *C) {
|
||||
// Test concurrent LinkFromPool operations to ensure no race conditions
|
||||
concurrency := 5000
|
||||
iterations := 10
|
||||
|
||||
for iter := 0; iter < iterations; iter++ {
|
||||
c.Logf("Iteration %d: Testing concurrent LinkFromPool with %d goroutines", iter+1, concurrency)
|
||||
|
||||
destPath := fmt.Sprintf("main/t/test%d", iter)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errors := make(chan error, concurrency)
|
||||
successes := make(chan struct{}, concurrency)
|
||||
|
||||
start := time.Now()
|
||||
|
||||
// Launch concurrent LinkFromPool operations
|
||||
for i := 0; i < concurrency; i++ {
|
||||
wg.Add(1)
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
|
||||
// Use force=true to test the most vulnerable code path (remove-then-create)
|
||||
err := s.storage.LinkFromPool(
|
||||
"", // publishedPrefix
|
||||
destPath, // publishedRelPath
|
||||
"test-package.deb", // fileName
|
||||
s.pool, // sourcePool
|
||||
s.srcPoolPath, // sourcePath
|
||||
s.testChecksums, // sourceChecksums
|
||||
true, // force - this triggers vulnerable remove-then-create pattern
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
errors <- fmt.Errorf("goroutine %d failed: %v", id, err)
|
||||
} else {
|
||||
successes <- struct{}{}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Wait for completion
|
||||
wg.Wait()
|
||||
duration := time.Since(start)
|
||||
|
||||
close(errors)
|
||||
close(successes)
|
||||
|
||||
// Count results
|
||||
errorCount := 0
|
||||
successCount := 0
|
||||
var firstError error
|
||||
|
||||
for err := range errors {
|
||||
errorCount++
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
}
|
||||
c.Logf("Race condition error: %v", err)
|
||||
}
|
||||
|
||||
for range successes {
|
||||
successCount++
|
||||
}
|
||||
|
||||
c.Logf("Results: %d successes, %d errors, took %v", successCount, errorCount, duration)
|
||||
|
||||
// Assert no race conditions occurred
|
||||
if errorCount > 0 {
|
||||
c.Fatalf("Race condition detected in iteration %d! "+
|
||||
"Errors: %d out of %d operations (%.1f%% failure rate). "+
|
||||
"First error: %v. "+
|
||||
"This indicates the fix is not working properly.",
|
||||
iter+1, errorCount, concurrency,
|
||||
float64(errorCount)/float64(concurrency)*100, firstError)
|
||||
}
|
||||
|
||||
// Verify the final file exists and has correct content
|
||||
finalFile := filepath.Join(s.storage.rootPath, destPath, "test-package.deb")
|
||||
_, err := os.Stat(finalFile)
|
||||
c.Assert(err, IsNil, Commentf("Final file should exist after concurrent operations"))
|
||||
|
||||
content, err := os.ReadFile(finalFile)
|
||||
c.Assert(err, IsNil, Commentf("Should be able to read final file"))
|
||||
c.Assert(content, DeepEquals, s.testContent, Commentf("File content should be intact after concurrent operations"))
|
||||
|
||||
c.Logf("✓ Iteration %d: No race conditions detected", iter+1)
|
||||
}
|
||||
|
||||
c.Logf("SUCCESS: Handled %d total concurrent operations across %d iterations with no race conditions",
|
||||
concurrency*iterations, iterations)
|
||||
}
|
||||
|
||||
func (s *LinkFromPoolConcurrencySuite) TestLinkFromPoolConcurrencyDifferentFiles(c *C) {
|
||||
// Test concurrent operations on different files to ensure no blocking
|
||||
concurrency := 10
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errors := make(chan error, concurrency)
|
||||
|
||||
start := time.Now()
|
||||
|
||||
// Launch concurrent operations on different destination files
|
||||
for i := 0; i < concurrency; i++ {
|
||||
wg.Add(1)
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
|
||||
destPath := fmt.Sprintf("main/t/test-file-%d", id)
|
||||
|
||||
err := s.storage.LinkFromPool(
|
||||
"", // publishedPrefix
|
||||
destPath, // publishedRelPath
|
||||
"test-package.deb", // fileName
|
||||
s.pool, // sourcePool
|
||||
s.srcPoolPath, // sourcePath
|
||||
s.testChecksums, // sourceChecksums
|
||||
false, // force
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
errors <- fmt.Errorf("goroutine %d failed: %v", id, err)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Wait for completion
|
||||
wg.Wait()
|
||||
duration := time.Since(start)
|
||||
|
||||
close(errors)
|
||||
|
||||
// Count errors
|
||||
errorCount := 0
|
||||
for err := range errors {
|
||||
errorCount++
|
||||
c.Logf("Error: %v", err)
|
||||
}
|
||||
|
||||
c.Assert(errorCount, Equals, 0, Commentf("No errors should occur when linking to different files"))
|
||||
c.Logf("SUCCESS: %d concurrent operations on different files completed in %v", concurrency, duration)
|
||||
|
||||
// Verify all files were created correctly
|
||||
for i := 0; i < concurrency; i++ {
|
||||
finalFile := filepath.Join(s.storage.rootPath, fmt.Sprintf("main/t/test-file-%d", i), "test-package.deb")
|
||||
_, err := os.Stat(finalFile)
|
||||
c.Assert(err, IsNil, Commentf("File %d should exist", i))
|
||||
|
||||
content, err := os.ReadFile(finalFile)
|
||||
c.Assert(err, IsNil, Commentf("Should be able to read file %d", i))
|
||||
c.Assert(content, DeepEquals, s.testContent, Commentf("File %d content should be correct", i))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LinkFromPoolConcurrencySuite) TestLinkFromPoolWithoutForceNoConcurrencyIssues(c *C) {
|
||||
// Test that when force=false, concurrent operations fail gracefully without corruption
|
||||
concurrency := 20
|
||||
destPath := "main/t/single-dest"
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errors := make(chan error, concurrency)
|
||||
successes := make(chan struct{}, concurrency)
|
||||
|
||||
// First, create the file so subsequent operations will conflict
|
||||
err := s.storage.LinkFromPool("", destPath, "test-package.deb", s.pool, s.srcPoolPath, s.testChecksums, false)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
start := time.Now()
|
||||
|
||||
// Launch concurrent operations that should mostly fail
|
||||
for i := 0; i < concurrency; i++ {
|
||||
wg.Add(1)
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
|
||||
err := s.storage.LinkFromPool(
|
||||
"", // publishedPrefix
|
||||
destPath, // publishedRelPath
|
||||
"test-package.deb", // fileName
|
||||
s.pool, // sourcePool
|
||||
s.srcPoolPath, // sourcePath
|
||||
s.testChecksums, // sourceChecksums
|
||||
false, // force=false - should fail if file exists and is same
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
errors <- err
|
||||
} else {
|
||||
successes <- struct{}{}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Wait for completion
|
||||
wg.Wait()
|
||||
duration := time.Since(start)
|
||||
|
||||
close(errors)
|
||||
close(successes)
|
||||
|
||||
errorCount := 0
|
||||
successCount := 0
|
||||
|
||||
for range errors {
|
||||
errorCount++
|
||||
}
|
||||
|
||||
for range successes {
|
||||
successCount++
|
||||
}
|
||||
|
||||
c.Logf("Results with force=false: %d successes, %d errors, took %v", successCount, errorCount, duration)
|
||||
|
||||
// With force=false and identical files, operations should succeed (file already exists with same content)
|
||||
// No race conditions should cause crashes or corruption
|
||||
c.Assert(errorCount, Equals, 0, Commentf("With identical files and force=false, operations should succeed"))
|
||||
|
||||
// Verify the file still exists and has correct content
|
||||
finalFile := filepath.Join(s.storage.rootPath, destPath, "test-package.deb")
|
||||
content, err := os.ReadFile(finalFile)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(content, DeepEquals, s.testContent, Commentf("File should not be corrupted by concurrent access"))
|
||||
}
|
||||
@@ -241,7 +241,7 @@ func (pool *PackagePool) Import(srcPath, basename string, checksums *utils.Check
|
||||
return "", err
|
||||
}
|
||||
defer func() {
|
||||
_ = source.Close()
|
||||
_ = source.Close()
|
||||
}()
|
||||
|
||||
sourceInfo, err := source.Stat()
|
||||
|
||||
@@ -22,6 +22,26 @@ type PublishedStorage struct {
|
||||
verifyMethod uint
|
||||
}
|
||||
|
||||
// Global mutex map to prevent concurrent access to the same destinationPath in LinkFromPool
|
||||
var (
|
||||
fileLockMutex sync.Mutex
|
||||
fileLocks = make(map[string]*sync.Mutex)
|
||||
)
|
||||
|
||||
// getFileLock returns a mutex for a specific file path to prevent concurrent modifications
|
||||
func getFileLock(filePath string) *sync.Mutex {
|
||||
fileLockMutex.Lock()
|
||||
defer fileLockMutex.Unlock()
|
||||
|
||||
if mutex, exists := fileLocks[filePath]; exists {
|
||||
return mutex
|
||||
}
|
||||
|
||||
mutex := &sync.Mutex{}
|
||||
fileLocks[filePath] = mutex
|
||||
return mutex
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var (
|
||||
_ aptly.PublishedStorage = (*PublishedStorage)(nil)
|
||||
@@ -136,6 +156,12 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
|
||||
|
||||
baseName := filepath.Base(fileName)
|
||||
poolPath := filepath.Join(storage.rootPath, publishedPrefix, publishedRelPath, filepath.Dir(fileName))
|
||||
destinationPath := filepath.Join(poolPath, baseName)
|
||||
|
||||
// Acquire file-specific lock to prevent concurrent access to the same file
|
||||
fileLock := getFileLock(destinationPath)
|
||||
fileLock.Lock()
|
||||
defer fileLock.Unlock()
|
||||
|
||||
var localSourcePool aptly.LocalPackagePool
|
||||
if storage.linkMethod != LinkMethodCopy {
|
||||
@@ -154,7 +180,7 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
|
||||
|
||||
var dstStat os.FileInfo
|
||||
|
||||
dstStat, err = os.Stat(filepath.Join(poolPath, baseName))
|
||||
dstStat, err = os.Stat(destinationPath)
|
||||
if err == nil {
|
||||
// already exists, check source file
|
||||
|
||||
@@ -173,7 +199,7 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
|
||||
} else {
|
||||
// if source and destination have the same checksums, no need to copy
|
||||
var dstMD5 string
|
||||
dstMD5, err = utils.MD5ChecksumForFile(filepath.Join(poolPath, baseName))
|
||||
dstMD5, err = utils.MD5ChecksumForFile(destinationPath)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -204,11 +230,11 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
|
||||
|
||||
// source and destination have different inodes, if !forced, this is fatal error
|
||||
if !force {
|
||||
return fmt.Errorf("error linking file to %s: file already exists and is different", filepath.Join(poolPath, baseName))
|
||||
return fmt.Errorf("error linking file to %s: file already exists and is different", destinationPath)
|
||||
}
|
||||
|
||||
// forced, so remove destination
|
||||
err = os.Remove(filepath.Join(poolPath, baseName))
|
||||
err = os.Remove(destinationPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -223,7 +249,7 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
|
||||
}
|
||||
|
||||
var dst *os.File
|
||||
dst, err = os.Create(filepath.Join(poolPath, baseName))
|
||||
dst, err = os.Create(destinationPath)
|
||||
if err != nil {
|
||||
_ = r.Close()
|
||||
return err
|
||||
@@ -244,9 +270,9 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
|
||||
|
||||
err = dst.Close()
|
||||
} else if storage.linkMethod == LinkMethodSymLink {
|
||||
err = localSourcePool.Symlink(sourcePath, filepath.Join(poolPath, baseName))
|
||||
err = localSourcePool.Symlink(sourcePath, destinationPath)
|
||||
} else {
|
||||
err = localSourcePool.Link(sourcePath, filepath.Join(poolPath, baseName))
|
||||
err = localSourcePool.Link(sourcePath, destinationPath)
|
||||
}
|
||||
|
||||
return err
|
||||
|
||||
@@ -240,7 +240,7 @@ func (downloader *downloaderImpl) download(req *http.Request, url, destination s
|
||||
}
|
||||
if resp.Body != nil {
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !go1.7
|
||||
// +build !go1.7
|
||||
|
||||
package http
|
||||
|
||||
22
http/grab.go
22
http/grab.go
@@ -49,9 +49,9 @@ func (d *GrabDownloader) Download(ctx context.Context, url string, destination s
|
||||
|
||||
func (d *GrabDownloader) DownloadWithChecksum(ctx context.Context, url string, destination string, expected *utils.ChecksumInfo, ignoreMismatch bool) error {
|
||||
maxTries := d.maxTries
|
||||
// FIXME: const delayMax = time.Duration(5 * time.Minute)
|
||||
// FIXME: const delayMax = time.Duration(5 * time.Minute)
|
||||
delay := time.Duration(1 * time.Second)
|
||||
// FIXME: const delayMultiplier = 2
|
||||
// FIXME: const delayMultiplier = 2
|
||||
err := fmt.Errorf("no tries available")
|
||||
for maxTries > 0 {
|
||||
err = d.download(ctx, url, destination, expected, ignoreMismatch)
|
||||
@@ -133,17 +133,17 @@ func (d *GrabDownloader) download(_ context.Context, url string, destination str
|
||||
|
||||
resp := d.client.Do(req)
|
||||
|
||||
<-resp.Done
|
||||
<-resp.Done
|
||||
// download is complete
|
||||
|
||||
// Loop:
|
||||
// for {
|
||||
// select {
|
||||
// case <-resp.Done:
|
||||
// // download is complete
|
||||
// break Loop
|
||||
// }
|
||||
// }
|
||||
// Loop:
|
||||
// for {
|
||||
// select {
|
||||
// case <-resp.Done:
|
||||
// // download is complete
|
||||
// break Loop
|
||||
// }
|
||||
// }
|
||||
err = resp.Err()
|
||||
if err != nil && err == grab.ErrBadChecksum && ignoreMismatch {
|
||||
fmt.Printf("Ignoring checksum mismatch for %s\n", url)
|
||||
|
||||
@@ -112,9 +112,11 @@ func NewServer(config *Config) (*Server, error) {
|
||||
buckets: make(map[string]*bucket),
|
||||
config: config,
|
||||
}
|
||||
go func() { _ = http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
srv.serveHTTP(w, req)
|
||||
})) }()
|
||||
go func() {
|
||||
_ = http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
srv.serveHTTP(w, req)
|
||||
}))
|
||||
}()
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
@@ -527,14 +529,13 @@ func (bucketResource) post(a *action) interface{} {
|
||||
// and dashes (-). You can use uppercase letters for buckets only in the
|
||||
// US Standard region.
|
||||
//
|
||||
// Must start with a number or letter
|
||||
// # Must start with a number or letter
|
||||
//
|
||||
// Must be between 3 and 255 characters long
|
||||
// # Must be between 3 and 255 characters long
|
||||
//
|
||||
// There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4)
|
||||
// but the real S3 server does not seem to check that rule, so we will not
|
||||
// check it either.
|
||||
//
|
||||
func validBucketName(name string) bool {
|
||||
if len(name) < 3 || len(name) > 255 {
|
||||
return false
|
||||
|
||||
@@ -15,4 +15,4 @@ else
|
||||
fi
|
||||
|
||||
cd /work/src
|
||||
sudo -u aptly PATH=$PATH:/work/src/build GOPATH=/work/src/.go $cmd
|
||||
sudo -u aptly PATH=$PATH:/work/src/build GOPATH=/work/src/.go GOCACHE=/work/src/.go/cache $cmd
|
||||
|
||||
@@ -310,7 +310,9 @@ class BaseTest(object):
|
||||
|
||||
if command[0] == "aptly":
|
||||
aptly_testing_bin = Path(__file__).parent / ".." / "aptly.test"
|
||||
command = [str(aptly_testing_bin), f"-test.coverprofile={Path(self.coverage_dir) / self.__class__.__name__}-{uuid4()}.out", *command[1:]]
|
||||
command = [str(aptly_testing_bin), *command[1:]]
|
||||
if self.coverage_dir is not None:
|
||||
command.insert(1, f"-test.coverprofile={Path(self.coverage_dir) / self.__class__.__name__}-{uuid4()}.out")
|
||||
|
||||
if self.faketime:
|
||||
command = ["faketime", os.environ.get("TEST_FAKETIME", "2025-01-02 03:04:05")] + command
|
||||
@@ -337,7 +339,7 @@ class BaseTest(object):
|
||||
if is_aptly_command:
|
||||
# remove the last two rows as go tests always print PASS/FAIL and coverage in those
|
||||
# two lines. This would otherwise fail the tests as they would not match gold
|
||||
matches = re.findall(r"((.|\n)*)EXIT: (\d)\n.*\ncoverage: .*", raw_output)
|
||||
matches = re.findall(r"((.|\n)*)EXIT: (\d)\n.*(?:\ncoverage: .*|$)", raw_output)
|
||||
if not matches:
|
||||
raise Exception("no matches found in command output '%s'" % raw_output)
|
||||
|
||||
@@ -517,7 +519,7 @@ class BaseTest(object):
|
||||
if gold != output:
|
||||
diff = "".join(difflib.unified_diff(
|
||||
[l + "\n" for l in gold.split("\n")], [l + "\n" for l in output.split("\n")]))
|
||||
raise Exception("content doesn't match:\n" + diff + "\n\nOutput:\n" + orig + "\n")
|
||||
raise Exception(f"content doesn't match:\n{diff}\n\nOutput:\n{orig}\n")
|
||||
|
||||
check = check_output
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ def natural_key(string_):
|
||||
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
|
||||
|
||||
|
||||
def run(include_long_tests=False, capture_results=False, tests=None, filters=None, coverage_dir=None):
|
||||
def run(include_long_tests=False, capture_results=False, tests=None, filters=None, coverage_dir=None, coverage_skip=False):
|
||||
"""
|
||||
Run system test.
|
||||
"""
|
||||
@@ -47,7 +47,7 @@ def run(include_long_tests=False, capture_results=False, tests=None, filters=Non
|
||||
fails = []
|
||||
numTests = numFailed = numSkipped = 0
|
||||
lastBase = None
|
||||
if not coverage_dir:
|
||||
if not coverage_dir and not coverage_skip:
|
||||
coverage_dir = mkdtemp(suffix="aptly-coverage")
|
||||
|
||||
failed = False
|
||||
@@ -213,6 +213,7 @@ if __name__ == "__main__":
|
||||
include_long_tests = False
|
||||
capture_results = False
|
||||
coverage_dir = None
|
||||
coverage_skip = False
|
||||
tests = None
|
||||
args = sys.argv[1:]
|
||||
|
||||
@@ -224,6 +225,8 @@ if __name__ == "__main__":
|
||||
elif args[0] == "--coverage-dir":
|
||||
coverage_dir = args[1]
|
||||
args = args[1:]
|
||||
elif args[0] == "--coverage-skip":
|
||||
coverage_skip = True
|
||||
|
||||
args = args[1:]
|
||||
|
||||
@@ -236,4 +239,4 @@ if __name__ == "__main__":
|
||||
else:
|
||||
filters.append(arg)
|
||||
|
||||
run(include_long_tests, capture_results, tests, filters, coverage_dir)
|
||||
run(include_long_tests, capture_results, tests, filters, coverage_dir, coverage_skip)
|
||||
|
||||
@@ -50,5 +50,5 @@ func (s *ListSuite) TestList(c *check.C) {
|
||||
c.Check(detail, check.Equals, "Details")
|
||||
_, deleteErr := list.DeleteTaskByID(task.ID)
|
||||
c.Check(deleteErr, check.IsNil)
|
||||
list.Stop()
|
||||
list.Stop()
|
||||
}
|
||||
|
||||
@@ -19,8 +19,8 @@ func (s *ConfigSuite) TestLoadConfig(c *C) {
|
||||
_, _ = f.WriteString(configFile)
|
||||
_ = f.Close()
|
||||
|
||||
// start with empty config
|
||||
s.config = ConfigStructure{}
|
||||
// start with empty config
|
||||
s.config = ConfigStructure{}
|
||||
|
||||
err := LoadConfig(configname, &s.config)
|
||||
c.Assert(err, IsNil)
|
||||
@@ -32,8 +32,8 @@ func (s *ConfigSuite) TestLoadConfig(c *C) {
|
||||
func (s *ConfigSuite) TestSaveConfig(c *C) {
|
||||
configname := filepath.Join(c.MkDir(), "aptly.json2")
|
||||
|
||||
// start with empty config
|
||||
s.config = ConfigStructure{}
|
||||
// start with empty config
|
||||
s.config = ConfigStructure{}
|
||||
|
||||
s.config.RootDir = "/tmp/aptly"
|
||||
s.config.DownloadConcurrency = 5
|
||||
@@ -71,93 +71,93 @@ func (s *ConfigSuite) TestSaveConfig(c *C) {
|
||||
_, _ = f.Read(buf)
|
||||
|
||||
c.Check(string(buf), Equals, ""+
|
||||
"{\n" +
|
||||
" \"rootDir\": \"/tmp/aptly\",\n" +
|
||||
" \"logLevel\": \"info\",\n" +
|
||||
" \"logFormat\": \"json\",\n" +
|
||||
" \"databaseOpenAttempts\": 5,\n" +
|
||||
" \"architectures\": null,\n" +
|
||||
" \"skipLegacyPool\": false,\n" +
|
||||
" \"dependencyFollowSuggests\": false,\n" +
|
||||
" \"dependencyFollowRecommends\": false,\n" +
|
||||
" \"dependencyFollowAllVariants\": false,\n" +
|
||||
" \"dependencyFollowSource\": false,\n" +
|
||||
" \"dependencyVerboseResolve\": false,\n" +
|
||||
" \"ppaDistributorID\": \"\",\n" +
|
||||
" \"ppaCodename\": \"\",\n" +
|
||||
" \"serveInAPIMode\": false,\n" +
|
||||
" \"enableMetricsEndpoint\": false,\n" +
|
||||
" \"enableSwaggerEndpoint\": false,\n" +
|
||||
" \"AsyncAPI\": false,\n" +
|
||||
" \"databaseBackend\": {\n" +
|
||||
" \"type\": \"\",\n" +
|
||||
" \"dbPath\": \"\",\n" +
|
||||
" \"url\": \"\"\n" +
|
||||
" },\n" +
|
||||
" \"downloader\": \"\",\n" +
|
||||
" \"downloadConcurrency\": 5,\n" +
|
||||
" \"downloadSpeedLimit\": 0,\n" +
|
||||
" \"downloadRetries\": 0,\n" +
|
||||
" \"downloadSourcePackages\": false,\n" +
|
||||
" \"gpgProvider\": \"gpg\",\n" +
|
||||
" \"gpgDisableSign\": false,\n" +
|
||||
" \"gpgDisableVerify\": false,\n" +
|
||||
" \"skipContentsPublishing\": false,\n" +
|
||||
" \"skipBz2Publishing\": false,\n" +
|
||||
" \"FileSystemPublishEndpoints\": {\n" +
|
||||
" \"test\": {\n" +
|
||||
" \"rootDir\": \"/opt/aptly-publish\",\n" +
|
||||
" \"linkMethod\": \"\",\n" +
|
||||
" \"verifyMethod\": \"\"\n" +
|
||||
" }\n" +
|
||||
" },\n" +
|
||||
" \"S3PublishEndpoints\": {\n" +
|
||||
" \"test\": {\n" +
|
||||
" \"region\": \"us-east-1\",\n" +
|
||||
" \"bucket\": \"repo\",\n" +
|
||||
" \"prefix\": \"\",\n" +
|
||||
" \"acl\": \"\",\n" +
|
||||
" \"awsAccessKeyID\": \"\",\n" +
|
||||
" \"awsSecretAccessKey\": \"\",\n" +
|
||||
" \"awsSessionToken\": \"\",\n" +
|
||||
" \"endpoint\": \"\",\n" +
|
||||
" \"storageClass\": \"\",\n" +
|
||||
" \"encryptionMethod\": \"\",\n" +
|
||||
" \"plusWorkaround\": false,\n" +
|
||||
" \"disableMultiDel\": false,\n" +
|
||||
" \"forceSigV2\": false,\n" +
|
||||
" \"forceVirtualHostedStyle\": false,\n" +
|
||||
" \"debug\": false\n" +
|
||||
" }\n" +
|
||||
" },\n" +
|
||||
" \"SwiftPublishEndpoints\": {\n" +
|
||||
" \"test\": {\n" +
|
||||
" \"container\": \"repo\",\n" +
|
||||
" \"prefix\": \"\",\n" +
|
||||
" \"osname\": \"\",\n" +
|
||||
" \"password\": \"\",\n" +
|
||||
" \"tenant\": \"\",\n" +
|
||||
" \"tenantid\": \"\",\n" +
|
||||
" \"domain\": \"\",\n" +
|
||||
" \"domainid\": \"\",\n" +
|
||||
" \"tenantdomain\": \"\",\n" +
|
||||
" \"tenantdomainid\": \"\",\n" +
|
||||
" \"authurl\": \"\"\n" +
|
||||
" }\n" +
|
||||
" },\n" +
|
||||
" \"AzurePublishEndpoints\": {\n" +
|
||||
" \"test\": {\n" +
|
||||
" \"container\": \"repo\",\n" +
|
||||
" \"prefix\": \"\",\n" +
|
||||
" \"accountName\": \"\",\n" +
|
||||
" \"accountKey\": \"\",\n" +
|
||||
" \"endpoint\": \"\"\n" +
|
||||
" }\n" +
|
||||
" },\n" +
|
||||
" \"packagePoolStorage\": {\n" +
|
||||
" \"type\": \"local\",\n" +
|
||||
" \"path\": \"/tmp/aptly-pool\"\n" +
|
||||
" }\n" +
|
||||
"{\n"+
|
||||
" \"rootDir\": \"/tmp/aptly\",\n"+
|
||||
" \"logLevel\": \"info\",\n"+
|
||||
" \"logFormat\": \"json\",\n"+
|
||||
" \"databaseOpenAttempts\": 5,\n"+
|
||||
" \"architectures\": null,\n"+
|
||||
" \"skipLegacyPool\": false,\n"+
|
||||
" \"dependencyFollowSuggests\": false,\n"+
|
||||
" \"dependencyFollowRecommends\": false,\n"+
|
||||
" \"dependencyFollowAllVariants\": false,\n"+
|
||||
" \"dependencyFollowSource\": false,\n"+
|
||||
" \"dependencyVerboseResolve\": false,\n"+
|
||||
" \"ppaDistributorID\": \"\",\n"+
|
||||
" \"ppaCodename\": \"\",\n"+
|
||||
" \"serveInAPIMode\": false,\n"+
|
||||
" \"enableMetricsEndpoint\": false,\n"+
|
||||
" \"enableSwaggerEndpoint\": false,\n"+
|
||||
" \"AsyncAPI\": false,\n"+
|
||||
" \"databaseBackend\": {\n"+
|
||||
" \"type\": \"\",\n"+
|
||||
" \"dbPath\": \"\",\n"+
|
||||
" \"url\": \"\"\n"+
|
||||
" },\n"+
|
||||
" \"downloader\": \"\",\n"+
|
||||
" \"downloadConcurrency\": 5,\n"+
|
||||
" \"downloadSpeedLimit\": 0,\n"+
|
||||
" \"downloadRetries\": 0,\n"+
|
||||
" \"downloadSourcePackages\": false,\n"+
|
||||
" \"gpgProvider\": \"gpg\",\n"+
|
||||
" \"gpgDisableSign\": false,\n"+
|
||||
" \"gpgDisableVerify\": false,\n"+
|
||||
" \"skipContentsPublishing\": false,\n"+
|
||||
" \"skipBz2Publishing\": false,\n"+
|
||||
" \"FileSystemPublishEndpoints\": {\n"+
|
||||
" \"test\": {\n"+
|
||||
" \"rootDir\": \"/opt/aptly-publish\",\n"+
|
||||
" \"linkMethod\": \"\",\n"+
|
||||
" \"verifyMethod\": \"\"\n"+
|
||||
" }\n"+
|
||||
" },\n"+
|
||||
" \"S3PublishEndpoints\": {\n"+
|
||||
" \"test\": {\n"+
|
||||
" \"region\": \"us-east-1\",\n"+
|
||||
" \"bucket\": \"repo\",\n"+
|
||||
" \"prefix\": \"\",\n"+
|
||||
" \"acl\": \"\",\n"+
|
||||
" \"awsAccessKeyID\": \"\",\n"+
|
||||
" \"awsSecretAccessKey\": \"\",\n"+
|
||||
" \"awsSessionToken\": \"\",\n"+
|
||||
" \"endpoint\": \"\",\n"+
|
||||
" \"storageClass\": \"\",\n"+
|
||||
" \"encryptionMethod\": \"\",\n"+
|
||||
" \"plusWorkaround\": false,\n"+
|
||||
" \"disableMultiDel\": false,\n"+
|
||||
" \"forceSigV2\": false,\n"+
|
||||
" \"forceVirtualHostedStyle\": false,\n"+
|
||||
" \"debug\": false\n"+
|
||||
" }\n"+
|
||||
" },\n"+
|
||||
" \"SwiftPublishEndpoints\": {\n"+
|
||||
" \"test\": {\n"+
|
||||
" \"container\": \"repo\",\n"+
|
||||
" \"prefix\": \"\",\n"+
|
||||
" \"osname\": \"\",\n"+
|
||||
" \"password\": \"\",\n"+
|
||||
" \"tenant\": \"\",\n"+
|
||||
" \"tenantid\": \"\",\n"+
|
||||
" \"domain\": \"\",\n"+
|
||||
" \"domainid\": \"\",\n"+
|
||||
" \"tenantdomain\": \"\",\n"+
|
||||
" \"tenantdomainid\": \"\",\n"+
|
||||
" \"authurl\": \"\"\n"+
|
||||
" }\n"+
|
||||
" },\n"+
|
||||
" \"AzurePublishEndpoints\": {\n"+
|
||||
" \"test\": {\n"+
|
||||
" \"container\": \"repo\",\n"+
|
||||
" \"prefix\": \"\",\n"+
|
||||
" \"accountName\": \"\",\n"+
|
||||
" \"accountKey\": \"\",\n"+
|
||||
" \"endpoint\": \"\"\n"+
|
||||
" }\n"+
|
||||
" },\n"+
|
||||
" \"packagePoolStorage\": {\n"+
|
||||
" \"type\": \"local\",\n"+
|
||||
" \"path\": \"/tmp/aptly-pool\"\n"+
|
||||
" }\n"+
|
||||
"}")
|
||||
}
|
||||
|
||||
@@ -167,8 +167,8 @@ func (s *ConfigSuite) TestLoadYAMLConfig(c *C) {
|
||||
_, _ = f.WriteString(configFileYAML)
|
||||
_ = f.Close()
|
||||
|
||||
// start with empty config
|
||||
s.config = ConfigStructure{}
|
||||
// start with empty config
|
||||
s.config = ConfigStructure{}
|
||||
|
||||
err := LoadConfig(configname, &s.config)
|
||||
c.Assert(err, IsNil)
|
||||
@@ -183,8 +183,8 @@ func (s *ConfigSuite) TestLoadYAMLErrorConfig(c *C) {
|
||||
_, _ = f.WriteString(configFileYAMLError)
|
||||
_ = f.Close()
|
||||
|
||||
// start with empty config
|
||||
s.config = ConfigStructure{}
|
||||
// start with empty config
|
||||
s.config = ConfigStructure{}
|
||||
|
||||
err := LoadConfig(configname, &s.config)
|
||||
c.Assert(err.Error(), Equals, "invalid yaml (unknown pool storage type: invalid) or json (invalid character 'p' looking for beginning of value)")
|
||||
@@ -196,13 +196,13 @@ func (s *ConfigSuite) TestSaveYAMLConfig(c *C) {
|
||||
_, _ = f.WriteString(configFileYAML)
|
||||
_ = f.Close()
|
||||
|
||||
// start with empty config
|
||||
s.config = ConfigStructure{}
|
||||
// start with empty config
|
||||
s.config = ConfigStructure{}
|
||||
|
||||
err := LoadConfig(configname, &s.config)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
err = SaveConfigYAML(configname, &s.config)
|
||||
err = SaveConfigYAML(configname, &s.config)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
f, _ = os.Open(configname)
|
||||
@@ -218,17 +218,17 @@ func (s *ConfigSuite) TestSaveYAMLConfig(c *C) {
|
||||
}
|
||||
|
||||
func (s *ConfigSuite) TestSaveYAML2Config(c *C) {
|
||||
// start with empty config
|
||||
s.config = ConfigStructure{}
|
||||
// start with empty config
|
||||
s.config = ConfigStructure{}
|
||||
|
||||
s.config.PackagePoolStorage.Local = &LocalPoolStorage{"/tmp/aptly-pool"}
|
||||
s.config.PackagePoolStorage.Azure = nil
|
||||
s.config.PackagePoolStorage.Azure = nil
|
||||
|
||||
configname := filepath.Join(c.MkDir(), "aptly.yaml4")
|
||||
err := SaveConfigYAML(configname, &s.config)
|
||||
err := SaveConfigYAML(configname, &s.config)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
f, _ := os.Open(configname)
|
||||
f, _ := os.Open(configname)
|
||||
defer func() {
|
||||
_ = f.Close()
|
||||
}()
|
||||
@@ -237,44 +237,44 @@ func (s *ConfigSuite) TestSaveYAML2Config(c *C) {
|
||||
buf := make([]byte, st.Size())
|
||||
_, _ = f.Read(buf)
|
||||
|
||||
c.Check(string(buf), Equals, "" +
|
||||
"root_dir: \"\"\n" +
|
||||
"log_level: \"\"\n" +
|
||||
"log_format: \"\"\n" +
|
||||
"database_open_attempts: 0\n" +
|
||||
"architectures: []\n" +
|
||||
"skip_legacy_pool: false\n" +
|
||||
"dep_follow_suggests: false\n" +
|
||||
"dep_follow_recommends: false\n" +
|
||||
"dep_follow_all_variants: false\n" +
|
||||
"dep_follow_source: false\n" +
|
||||
"dep_verboseresolve: false\n" +
|
||||
"ppa_distributor_id: \"\"\n" +
|
||||
"ppa_codename: \"\"\n" +
|
||||
"serve_in_api_mode: false\n" +
|
||||
"enable_metrics_endpoint: false\n" +
|
||||
"enable_swagger_endpoint: false\n" +
|
||||
"async_api: false\n" +
|
||||
"database_backend:\n" +
|
||||
" type: \"\"\n" +
|
||||
" db_path: \"\"\n" +
|
||||
" url: \"\"\n" +
|
||||
"downloader: \"\"\n" +
|
||||
"download_concurrency: 0\n" +
|
||||
"download_limit: 0\n" +
|
||||
"download_retries: 0\n" +
|
||||
"download_sourcepackages: false\n" +
|
||||
"gpg_provider: \"\"\n" +
|
||||
"gpg_disable_sign: false\n" +
|
||||
"gpg_disable_verify: false\n" +
|
||||
"skip_contents_publishing: false\n" +
|
||||
"skip_bz2_publishing: false\n" +
|
||||
"filesystem_publish_endpoints: {}\n" +
|
||||
"s3_publish_endpoints: {}\n" +
|
||||
"swift_publish_endpoints: {}\n" +
|
||||
"azure_publish_endpoints: {}\n" +
|
||||
"packagepool_storage:\n" +
|
||||
" type: local\n" +
|
||||
c.Check(string(buf), Equals, ""+
|
||||
"root_dir: \"\"\n"+
|
||||
"log_level: \"\"\n"+
|
||||
"log_format: \"\"\n"+
|
||||
"database_open_attempts: 0\n"+
|
||||
"architectures: []\n"+
|
||||
"skip_legacy_pool: false\n"+
|
||||
"dep_follow_suggests: false\n"+
|
||||
"dep_follow_recommends: false\n"+
|
||||
"dep_follow_all_variants: false\n"+
|
||||
"dep_follow_source: false\n"+
|
||||
"dep_verboseresolve: false\n"+
|
||||
"ppa_distributor_id: \"\"\n"+
|
||||
"ppa_codename: \"\"\n"+
|
||||
"serve_in_api_mode: false\n"+
|
||||
"enable_metrics_endpoint: false\n"+
|
||||
"enable_swagger_endpoint: false\n"+
|
||||
"async_api: false\n"+
|
||||
"database_backend:\n"+
|
||||
" type: \"\"\n"+
|
||||
" db_path: \"\"\n"+
|
||||
" url: \"\"\n"+
|
||||
"downloader: \"\"\n"+
|
||||
"download_concurrency: 0\n"+
|
||||
"download_limit: 0\n"+
|
||||
"download_retries: 0\n"+
|
||||
"download_sourcepackages: false\n"+
|
||||
"gpg_provider: \"\"\n"+
|
||||
"gpg_disable_sign: false\n"+
|
||||
"gpg_disable_verify: false\n"+
|
||||
"skip_contents_publishing: false\n"+
|
||||
"skip_bz2_publishing: false\n"+
|
||||
"filesystem_publish_endpoints: {}\n"+
|
||||
"s3_publish_endpoints: {}\n"+
|
||||
"swift_publish_endpoints: {}\n"+
|
||||
"azure_publish_endpoints: {}\n"+
|
||||
"packagepool_storage:\n"+
|
||||
" type: local\n"+
|
||||
" path: /tmp/aptly-pool\n")
|
||||
}
|
||||
|
||||
@@ -283,8 +283,8 @@ func (s *ConfigSuite) TestLoadEmptyConfig(c *C) {
|
||||
f, _ := os.Create(configname)
|
||||
_ = f.Close()
|
||||
|
||||
// start with empty config
|
||||
s.config = ConfigStructure{}
|
||||
// start with empty config
|
||||
s.config = ConfigStructure{}
|
||||
|
||||
err := LoadConfig(configname, &s.config)
|
||||
c.Assert(err.Error(), Equals, "invalid yaml (EOF) or json (EOF)")
|
||||
|
||||
@@ -34,7 +34,7 @@ func (s *UtilsSuite) TestDirIsAccessibleNotExist(c *C) {
|
||||
func (s *UtilsSuite) TestDirIsAccessibleNotAccessible(c *C) {
|
||||
accessible := DirIsAccessible(s.tempfile.Name())
|
||||
if accessible == nil {
|
||||
c.Fatalf("Test dir should not be accessible: %s", s.tempfile.Name())
|
||||
}
|
||||
c.Fatalf("Test dir should not be accessible: %s", s.tempfile.Name())
|
||||
}
|
||||
c.Check(accessible.Error(), Equals, fmt.Errorf("'%s' is inaccessible, check access rights", s.tempfile.Name()).Error())
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user