Merge pull request #1504 from bwitt/error_outofspace

Return error on out of space
This commit is contained in:
André Roth
2026-01-16 14:49:20 +01:00
committed by GitHub
7 changed files with 1003 additions and 35 deletions
+64 -19
View File
@@ -17,8 +17,32 @@ env:
DEBIAN_FRONTEND: noninteractive DEBIAN_FRONTEND: noninteractive
jobs: jobs:
unit-test:
name: "Unit Tests"
runs-on: ubuntu-22.04
continue-on-error: false
timeout-minutes: 30
steps:
- name: "Checkout Repository"
uses: actions/checkout@v4
with:
# fetch the whole repo for `git describe` to work
fetch-depth: 0
- name: "Docker Image"
run: |
make docker-image
- name: "Unit Tests"
run: |
make docker-unit-tests
mkdir -p out/coverage
mv unit.out out/coverage/
- uses: actions/upload-artifact@v4
with:
name: unit-tests-coverage
path: out/
test: test:
name: "Test (Ubuntu 22.04)" name: "System Test"
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
continue-on-error: false continue-on-error: false
timeout-minutes: 30 timeout-minutes: 30
@@ -63,21 +87,10 @@ jobs:
with: with:
directory: ${{ runner.temp }} directory: ${{ runner.temp }}
- name: "Run Unit Tests"
env:
RUN_LONG_TESTS: 'yes'
AZURE_STORAGE_ENDPOINT: "http://127.0.0.1:10000/devstoreaccount1"
AZURE_STORAGE_ACCOUNT: "devstoreaccount1"
AZURE_STORAGE_ACCESS_KEY: "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
run: |
sudo mkdir -p /srv ; sudo chown runner /srv
COVERAGE_DIR=${{ runner.temp }} make test
- name: "Run Benchmark" - name: "Run Benchmark"
run: | run: |
COVERAGE_DIR=${{ runner.temp }} make bench mkdir -p out/coverage
COVERAGE_DIR=$PWD/out/coverage make bench
- name: "Run System Tests" - name: "Run System Tests"
env: env:
@@ -89,22 +102,53 @@ jobs:
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
run: | run: |
sudo mkdir -p /srv ; sudo chown runner /srv sudo mkdir -p /srv ; sudo chown runner /srv
COVERAGE_DIR=${{ runner.temp }} make system-test mkdir -p out/coverage
COVERAGE_DIR=$PWD/out/coverage make system-test
- uses: actions/upload-artifact@v4
with:
name: system-tests-coverage
path: out/
coverage:
name: "Upload Coverage"
runs-on: ubuntu-22.04
continue-on-error: false
timeout-minutes: 30
needs:
- unit-test
- test
steps:
- name: "Checkout Repository"
uses: actions/checkout@v4
- name: "Download Unit Test Coverage"
uses: actions/download-artifact@v4
with:
name: unit-tests-coverage
- name: "Download System Test Coverage"
uses: actions/download-artifact@v4
with:
name: system-tests-coverage
- name: "Merge Code Coverage" - name: "Merge Code Coverage"
run: | run: |
go install github.com/wadey/gocovmerge@v0.0.0-20160331181800-b5bfa59ec0ad go install github.com/wadey/gocovmerge@v0.0.0-20160331181800-b5bfa59ec0ad
~/go/bin/gocovmerge unit.out ${{ runner.temp }}/*.out > coverage.txt ~/go/bin/gocovmerge coverage/*.out > coverage.txt
- name: "Upload Code Coverage" - name: "Upload Code Coverage"
uses: codecov/codecov-action@v2 uses: codecov/codecov-action@v5
with: with:
token: ${{ secrets.CODECOV_TOKEN }} token: ${{ secrets.CODECOV_TOKEN }}
files: coverage.txt files: coverage.txt
fail_ci_if_error: true
ci-debian-build: ci-debian-build:
name: "Build" name: "Build"
needs: test needs:
- coverage
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
fail-fast: false fail-fast: false
@@ -224,7 +268,8 @@ jobs:
ci-binary-build: ci-binary-build:
name: "Build" name: "Build"
needs: test needs:
- coverage
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
+1
View File
@@ -78,3 +78,4 @@ List of contributors, in chronological order:
* Juan Calderon-Perez (https://github.com/gaby) * Juan Calderon-Perez (https://github.com/gaby)
* Ato Araki (https://github.com/atotto) * Ato Araki (https://github.com/atotto)
* Roman Lebedev (https://github.com/LebedevRI) * Roman Lebedev (https://github.com/LebedevRI)
* Brian Witt (https://github.com/bwitt)
+12 -12
View File
@@ -8,7 +8,7 @@ GOOS=$(shell go env GOHOSTOS)
GOARCH=$(shell go env GOHOSTARCH) GOARCH=$(shell go env GOHOSTARCH)
export PODMAN_USERNS = keep-id export PODMAN_USERNS = keep-id
DOCKER_RUN = docker run --security-opt label=disable -it --user 0:0 --rm -v ${PWD}:/work/src DOCKER_RUN = docker run --security-opt label=disable --user 0:0 --rm -v ${PWD}:/work/src
# Setting TZ for certificates # Setting TZ for certificates
export TZ=UTC export TZ=UTC
@@ -185,16 +185,16 @@ docker-image-no-cache: ## Build aptly-dev docker image (no cache)
@docker build --no-cache -f system/Dockerfile . -t aptly-dev @docker build --no-cache -f system/Dockerfile . -t aptly-dev
docker-build: ## Build aptly in docker container docker-build: ## Build aptly in docker container
@$(DOCKER_RUN) aptly-dev /work/src/system/docker-wrapper build @$(DOCKER_RUN) -t aptly-dev /work/src/system/docker-wrapper build
docker-shell: ## Run aptly and other commands in docker container docker-shell: ## Run aptly and other commands in docker container
@$(DOCKER_RUN) -p 3142:3142 aptly-dev /work/src/system/docker-wrapper || true @$(DOCKER_RUN) -it -p 3142:3142 aptly-dev /work/src/system/docker-wrapper || true
docker-deb: ## Build debian packages in docker container docker-deb: ## Build debian packages in docker container
@$(DOCKER_RUN) aptly-dev /work/src/system/docker-wrapper dpkg DEBARCH=amd64 @$(DOCKER_RUN) -t aptly-dev /work/src/system/docker-wrapper dpkg DEBARCH=amd64
docker-unit-test: ## Run unit tests in docker container (add TEST=regex to specify which tests to run) docker-unit-tests: ## Run unit tests in docker container (add TEST=regex to specify which tests to run)
@$(DOCKER_RUN) aptly-dev /work/src/system/docker-wrapper \ $(DOCKER_RUN) -t --tmpfs /smallfs:rw,size=1m aptly-dev /work/src/system/docker-wrapper \
azurite-start \ azurite-start \
AZURE_STORAGE_ENDPOINT=http://127.0.0.1:10000/devstoreaccount1 \ AZURE_STORAGE_ENDPOINT=http://127.0.0.1:10000/devstoreaccount1 \
AZURE_STORAGE_ACCOUNT=devstoreaccount1 \ AZURE_STORAGE_ACCOUNT=devstoreaccount1 \
@@ -203,7 +203,7 @@ docker-unit-test: ## Run unit tests in docker container (add TEST=regex to spec
azurite-stop azurite-stop
docker-system-test: ## Run system tests in docker container (add TEST=t04_mirror or TEST=UpdateMirror26Test to run only specific tests) docker-system-test: ## Run system tests in docker container (add TEST=t04_mirror or TEST=UpdateMirror26Test to run only specific tests)
@$(DOCKER_RUN) aptly-dev /work/src/system/docker-wrapper \ @$(DOCKER_RUN) -t aptly-dev /work/src/system/docker-wrapper \
azurite-start \ azurite-start \
AZURE_STORAGE_ENDPOINT=http://127.0.0.1:10000/devstoreaccount1 \ AZURE_STORAGE_ENDPOINT=http://127.0.0.1:10000/devstoreaccount1 \
AZURE_STORAGE_ACCOUNT=devstoreaccount1 \ AZURE_STORAGE_ACCOUNT=devstoreaccount1 \
@@ -214,16 +214,16 @@ docker-system-test: ## Run system tests in docker container (add TEST=t04_mirro
azurite-stop azurite-stop
docker-serve: ## Run development server (auto recompiling) on http://localhost:3142 docker-serve: ## Run development server (auto recompiling) on http://localhost:3142
@$(DOCKER_RUN) -p 3142:3142 -v /tmp/cache-go-aptly:/var/lib/aptly/.cache/go-build aptly-dev /work/src/system/docker-wrapper serve || true @$(DOCKER_RUN) -it -p 3142:3142 -v /tmp/cache-go-aptly:/var/lib/aptly/.cache/go-build aptly-dev /work/src/system/docker-wrapper serve || true
docker-lint: ## Run golangci-lint in docker container docker-lint: ## Run golangci-lint in docker container
@$(DOCKER_RUN) aptly-dev /work/src/system/docker-wrapper lint @$(DOCKER_RUN) -t aptly-dev /work/src/system/docker-wrapper lint
docker-binaries: ## Build binary releases (FreeBSD, macOS, Linux generic) in docker container docker-binaries: ## Build binary releases (FreeBSD, macOS, Linux generic) in docker container
@$(DOCKER_RUN) aptly-dev /work/src/system/docker-wrapper binaries @$(DOCKER_RUN) -t aptly-dev /work/src/system/docker-wrapper binaries
docker-man: ## Create man page in docker container docker-man: ## Create man page in docker container
@$(DOCKER_RUN) aptly-dev /work/src/system/docker-wrapper man @$(DOCKER_RUN) -t aptly-dev /work/src/system/docker-wrapper man
mem.png: mem.dat mem.gp mem.png: mem.dat mem.gp
gnuplot mem.gp gnuplot mem.gp
@@ -240,4 +240,4 @@ clean: ## remove local build and module cache
rm -f unit.out aptly.test VERSION docs/docs.go docs/swagger.json docs/swagger.yaml docs/swagger.conf rm -f unit.out aptly.test VERSION docs/docs.go docs/swagger.json docs/swagger.yaml docs/swagger.conf
find system/ -type d -name __pycache__ -exec rm -rf {} \; 2>/dev/null || true find system/ -type d -name __pycache__ -exec rm -rf {} \; 2>/dev/null || true
.PHONY: help man prepare swagger version binaries build docker-release docker-system-test docker-unit-test docker-lint docker-build docker-image docker-man docker-shell docker-serve clean releasetype dpkg serve flake8 .PHONY: help man prepare swagger version binaries build docker-release docker-system-tests docker-unit-test docker-lint docker-build docker-image docker-man docker-shell docker-serve clean releasetype dpkg serve flake8
+41 -2
View File
@@ -13,6 +13,10 @@ import (
"github.com/saracen/walker" "github.com/saracen/walker"
) )
// syncFile is a seam to allow tests to force fsync failures (e.g. ENOSPC).
// In production it calls (*os.File).Sync().
var syncFile = func(f *os.File) error { return f.Sync() }
func verifyPath(path string) bool { func verifyPath(path string) bool {
path = filepath.Clean(path) path = filepath.Clean(path)
for _, part := range strings.Split(path, string(filepath.Separator)) { for _, part := range strings.Split(path, string(filepath.Separator)) {
@@ -114,34 +118,69 @@ func apiFilesUpload(c *gin.Context) {
} }
stored := []string{} stored := []string{}
openFiles := []*os.File{}
// Write all files first
for _, files := range c.Request.MultipartForm.File { for _, files := range c.Request.MultipartForm.File {
for _, file := range files { for _, file := range files {
src, err := file.Open() src, err := file.Open()
if err != nil { if err != nil {
// Close any files we've opened
for _, f := range openFiles {
_ = f.Close()
}
AbortWithJSONError(c, 500, err) AbortWithJSONError(c, 500, err)
return return
} }
defer func() { _ = src.Close() }()
destPath := filepath.Join(path, filepath.Base(file.Filename)) destPath := filepath.Join(path, filepath.Base(file.Filename))
dst, err := os.Create(destPath) dst, err := os.Create(destPath)
if err != nil { if err != nil {
_ = src.Close()
// Close any files we've opened
for _, f := range openFiles {
_ = f.Close()
}
AbortWithJSONError(c, 500, err) AbortWithJSONError(c, 500, err)
return return
} }
defer func() { _ = dst.Close() }()
_, err = io.Copy(dst, src) _, err = io.Copy(dst, src)
_ = src.Close()
if err != nil { if err != nil {
_ = dst.Close()
// Close any files we've opened
for _, f := range openFiles {
_ = f.Close()
}
AbortWithJSONError(c, 500, err) AbortWithJSONError(c, 500, err)
return return
} }
// Keep file open for batch sync
openFiles = append(openFiles, dst)
stored = append(stored, filepath.Join(c.Params.ByName("dir"), filepath.Base(file.Filename))) stored = append(stored, filepath.Join(c.Params.ByName("dir"), filepath.Base(file.Filename)))
} }
} }
// Sync all files at once to catch ENOSPC errors
for i, dst := range openFiles {
err := syncFile(dst)
if err != nil {
// Close all files
for _, f := range openFiles {
_ = f.Close()
}
AbortWithJSONError(c, 500, fmt.Errorf("error syncing file %s: %s", stored[i], err))
return
}
}
// Close all files
for _, dst := range openFiles {
_ = dst.Close()
}
apiFilesUploadedCounter.WithLabelValues(c.Params.ByName("dir")).Inc() apiFilesUploadedCounter.WithLabelValues(c.Params.ByName("dir")).Inc()
c.JSON(200, stored) c.JSON(200, stored)
} }
+476
View File
@@ -0,0 +1,476 @@
package api
import (
"bytes"
"encoding/json"
"io"
"mime/multipart"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/aptly-dev/aptly/aptly"
ctx "github.com/aptly-dev/aptly/context"
"github.com/gin-gonic/gin"
"github.com/smira/flag"
. "gopkg.in/check.v1"
)
type FilesUploadDiskFullSuite struct {
aptlyContext *ctx.AptlyContext
flags *flag.FlagSet
configFile *os.File
router http.Handler
}
var _ = Suite(&FilesUploadDiskFullSuite{})
func (s *FilesUploadDiskFullSuite) SetUpTest(c *C) {
aptly.Version = "testVersion"
file, err := os.CreateTemp("", "aptly")
c.Assert(err, IsNil)
s.configFile = file
jsonString, err := json.Marshal(gin.H{
"architectures": []string{},
"rootDir": c.MkDir(),
})
c.Assert(err, IsNil)
_, err = file.Write(jsonString)
c.Assert(err, IsNil)
_ = file.Close()
flags := flag.NewFlagSet("fakeFlags", flag.ContinueOnError)
flags.Bool("no-lock", false, "dummy")
flags.Int("db-open-attempts", 3, "dummy")
flags.String("config", s.configFile.Name(), "dummy")
flags.String("architectures", "", "dummy")
s.flags = flags
aptlyContext, err := ctx.NewContext(s.flags)
c.Assert(err, IsNil)
s.aptlyContext = aptlyContext
s.router = Router(aptlyContext)
context = aptlyContext
}
func (s *FilesUploadDiskFullSuite) TearDownTest(c *C) {
if s.configFile != nil {
_ = os.Remove(s.configFile.Name())
}
if s.aptlyContext != nil {
s.aptlyContext.Shutdown()
}
}
func (s *FilesUploadDiskFullSuite) TestUploadSuccessWithSync(c *C) {
testContent := []byte("test file content for upload")
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile("file", "testfile.txt")
c.Assert(err, IsNil)
_, err = part.Write(testContent)
c.Assert(err, IsNil)
err = writer.Close()
c.Assert(err, IsNil)
req, err := http.NewRequest("POST", "/api/files/testdir", body)
c.Assert(err, IsNil)
req.Header.Set("Content-Type", writer.FormDataContentType())
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 200)
uploadedFile := filepath.Join(s.aptlyContext.Config().GetRootDir(), "upload", "testdir", "testfile.txt")
content, err := os.ReadFile(uploadedFile)
c.Assert(err, IsNil)
c.Check(content, DeepEquals, testContent)
}
func (s *FilesUploadDiskFullSuite) TestUploadVerifiesFileIntegrity(c *C) {
testContent := bytes.Repeat([]byte("A"), 10000)
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile("file", "largefile.bin")
c.Assert(err, IsNil)
_, err = io.Copy(part, bytes.NewReader(testContent))
c.Assert(err, IsNil)
err = writer.Close()
c.Assert(err, IsNil)
req, err := http.NewRequest("POST", "/api/files/testdir2", body)
c.Assert(err, IsNil)
req.Header.Set("Content-Type", writer.FormDataContentType())
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 200)
uploadedFile := filepath.Join(s.aptlyContext.Config().GetRootDir(), "upload", "testdir2", "largefile.bin")
content, err := os.ReadFile(uploadedFile)
c.Assert(err, IsNil)
c.Check(len(content), Equals, len(testContent))
c.Check(content, DeepEquals, testContent)
}
func (s *FilesUploadDiskFullSuite) TestUploadMultipleFilesWithBatchSync(c *C) {
testFiles := map[string][]byte{
"file1.txt": []byte("content of file 1"),
"file2.txt": bytes.Repeat([]byte("B"), 5000),
"file3.deb": []byte("debian package content"),
}
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
for filename, content := range testFiles {
part, err := writer.CreateFormFile("file", filename)
c.Assert(err, IsNil)
_, err = part.Write(content)
c.Assert(err, IsNil)
}
err := writer.Close()
c.Assert(err, IsNil)
req, err := http.NewRequest("POST", "/api/files/multitest", body)
c.Assert(err, IsNil)
req.Header.Set("Content-Type", writer.FormDataContentType())
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 200)
uploadDir := filepath.Join(s.aptlyContext.Config().GetRootDir(), "upload", "multitest")
for filename, expectedContent := range testFiles {
uploadedFile := filepath.Join(uploadDir, filename)
content, err := os.ReadFile(uploadedFile)
c.Assert(err, IsNil, Commentf("Failed to read %s", filename))
c.Check(content, DeepEquals, expectedContent, Commentf("Content mismatch for %s", filename))
}
}
func (s *FilesUploadDiskFullSuite) TestUploadReturnsErrorOnSyncFailure(c *C) {
oldSyncFile := syncFile
syncFile = func(f *os.File) error {
if filepath.Base(f.Name()) == "syncfail.txt" {
return syscall.ENOSPC
}
return nil
}
defer func() { syncFile = oldSyncFile }()
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part1, err := writer.CreateFormFile("file", "ok.txt")
c.Assert(err, IsNil)
_, err = part1.Write([]byte("ok"))
c.Assert(err, IsNil)
part2, err := writer.CreateFormFile("file", "syncfail.txt")
c.Assert(err, IsNil)
_, err = part2.Write([]byte("will fail on sync"))
c.Assert(err, IsNil)
err = writer.Close()
c.Assert(err, IsNil)
req, err := http.NewRequest("POST", "/api/files/syncfaildir", body)
c.Assert(err, IsNil)
req.Header.Set("Content-Type", writer.FormDataContentType())
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 500)
c.Check(bytes.Contains(w.Body.Bytes(), []byte("error syncing file")), Equals, true)
}
func (s *FilesUploadDiskFullSuite) TestVerifyPath(c *C) {
c.Check(verifyPath("a/b/c"), Equals, true)
c.Check(verifyPath("../x"), Equals, false)
c.Check(verifyPath("./x"), Equals, true)
c.Check(verifyPath(".."), Equals, false)
c.Check(verifyPath("."), Equals, false)
}
func (s *FilesUploadDiskFullSuite) TestListDirsEmptyWhenUploadMissing(c *C) {
_ = os.RemoveAll(s.aptlyContext.UploadPath())
req, err := http.NewRequest("GET", "/api/files", nil)
c.Assert(err, IsNil)
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 200)
c.Check(strings.TrimSpace(w.Body.String()), Equals, "[]")
}
func (s *FilesUploadDiskFullSuite) TestListDirsReturnsDirectories(c *C) {
uploadRoot := s.aptlyContext.UploadPath()
c.Assert(os.MkdirAll(filepath.Join(uploadRoot, "d1"), 0777), IsNil)
c.Assert(os.MkdirAll(filepath.Join(uploadRoot, "d2"), 0777), IsNil)
c.Assert(os.WriteFile(filepath.Join(uploadRoot, "rootfile"), []byte("x"), 0644), IsNil)
req, err := http.NewRequest("GET", "/api/files", nil)
c.Assert(err, IsNil)
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 200)
body := w.Body.String()
c.Check(strings.Contains(body, "d1"), Equals, true)
c.Check(strings.Contains(body, "d2"), Equals, true)
}
func (s *FilesUploadDiskFullSuite) TestListFilesNotFound(c *C) {
req, err := http.NewRequest("GET", "/api/files/does-not-exist", nil)
c.Assert(err, IsNil)
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 404)
}
func (s *FilesUploadDiskFullSuite) TestListFilesReturnsFiles(c *C) {
base := filepath.Join(s.aptlyContext.UploadPath(), "dir")
c.Assert(os.MkdirAll(base, 0777), IsNil)
c.Assert(os.WriteFile(filepath.Join(base, "a.txt"), []byte("a"), 0644), IsNil)
c.Assert(os.WriteFile(filepath.Join(base, "b.txt"), []byte("b"), 0644), IsNil)
req, err := http.NewRequest("GET", "/api/files/dir", nil)
c.Assert(err, IsNil)
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 200)
body := w.Body.String()
c.Check(strings.Contains(body, "a.txt"), Equals, true)
c.Check(strings.Contains(body, "b.txt"), Equals, true)
}
func (s *FilesUploadDiskFullSuite) TestDeleteDirRemovesDirectory(c *C) {
base := filepath.Join(s.aptlyContext.UploadPath(), "todel")
c.Assert(os.MkdirAll(base, 0777), IsNil)
c.Assert(os.WriteFile(filepath.Join(base, "a.txt"), []byte("a"), 0644), IsNil)
req, err := http.NewRequest("DELETE", "/api/files/todel", nil)
c.Assert(err, IsNil)
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 200)
_, statErr := os.Stat(base)
c.Check(os.IsNotExist(statErr), Equals, true)
}
func (s *FilesUploadDiskFullSuite) TestDeleteFileRemovesFile(c *C) {
base := filepath.Join(s.aptlyContext.UploadPath(), "todel2")
c.Assert(os.MkdirAll(base, 0777), IsNil)
c.Assert(os.WriteFile(filepath.Join(base, "a.txt"), []byte("a"), 0644), IsNil)
req, err := http.NewRequest("DELETE", "/api/files/todel2/a.txt", nil)
c.Assert(err, IsNil)
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 200)
_, statErr := os.Stat(filepath.Join(base, "a.txt"))
c.Check(os.IsNotExist(statErr), Equals, true)
}
func (s *FilesUploadDiskFullSuite) TestDeleteFileNotFoundStillOk(c *C) {
base := filepath.Join(s.aptlyContext.UploadPath(), "todel3")
c.Assert(os.MkdirAll(base, 0777), IsNil)
req, err := http.NewRequest("DELETE", "/api/files/todel3/nope.txt", nil)
c.Assert(err, IsNil)
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 200)
}
func (s *FilesUploadDiskFullSuite) TestRejectsInvalidDir(c *C) {
req, err := http.NewRequest("DELETE", "/api/files/..", nil)
c.Assert(err, IsNil)
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 400)
}
func (s *FilesUploadDiskFullSuite) TestRejectsInvalidFileName(c *C) {
base := filepath.Join(s.aptlyContext.UploadPath(), "dirx")
c.Assert(os.MkdirAll(base, 0777), IsNil)
req, err := http.NewRequest("DELETE", "/api/files/dirx/..", nil)
c.Assert(err, IsNil)
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 400)
}
func (s *FilesUploadDiskFullSuite) TestListDirsEmptyIfUploadPathIsNotDir(c *C) {
_ = os.RemoveAll(s.aptlyContext.UploadPath())
c.Assert(os.WriteFile(s.aptlyContext.UploadPath(), []byte("not a dir"), 0644), IsNil)
req, err := http.NewRequest("GET", "/api/files", nil)
c.Assert(err, IsNil)
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 200)
c.Check(strings.TrimSpace(w.Body.String()), Equals, "[]")
}
func (s *FilesUploadDiskFullSuite) TestListFilesReturns500OnPermissionError(c *C) {
base := filepath.Join(s.aptlyContext.UploadPath(), "noperms")
c.Assert(os.MkdirAll(base, 0777), IsNil)
c.Assert(os.WriteFile(filepath.Join(base, "a.txt"), []byte("a"), 0644), IsNil)
c.Assert(os.Chmod(base, 0), IsNil)
defer func() { _ = os.Chmod(base, 0777) }()
req, err := http.NewRequest("GET", "/api/files/noperms", nil)
c.Assert(err, IsNil)
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 500)
}
func (s *FilesUploadDiskFullSuite) TestDeleteFileReturns500OnNonNotExistError(c *C) {
base := filepath.Join(s.aptlyContext.UploadPath(), "dirisfile")
c.Assert(os.MkdirAll(base, 0777), IsNil)
subdir := filepath.Join(base, "subdir")
c.Assert(os.MkdirAll(subdir, 0777), IsNil)
c.Assert(os.WriteFile(filepath.Join(subdir, "x"), []byte("x"), 0644), IsNil)
req, err := http.NewRequest("DELETE", "/api/files/dirisfile/subdir", nil)
c.Assert(err, IsNil)
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 500)
}
func (s *FilesUploadDiskFullSuite) TestUploadBadMultipartReturns400(c *C) {
req, err := http.NewRequest("POST", "/api/files/badmultipart", bytes.NewBufferString("not multipart"))
c.Assert(err, IsNil)
req.Header.Set("Content-Type", "multipart/form-data; boundary=missing")
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 400)
}
func (s *FilesUploadDiskFullSuite) TestUploadRejectsInvalidDir(c *C) {
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile("file", "a.txt")
c.Assert(err, IsNil)
_, err = part.Write([]byte("x"))
c.Assert(err, IsNil)
c.Assert(writer.Close(), IsNil)
req, err := http.NewRequest("POST", "/api/files/..", body)
c.Assert(err, IsNil)
req.Header.Set("Content-Type", writer.FormDataContentType())
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 400)
}
func (s *FilesUploadDiskFullSuite) TestUploadReturns500IfUploadRootIsNotDir(c *C) {
_ = os.RemoveAll(s.aptlyContext.UploadPath())
c.Assert(os.WriteFile(s.aptlyContext.UploadPath(), []byte("not a dir"), 0644), IsNil)
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile("file", "a.txt")
c.Assert(err, IsNil)
_, err = part.Write([]byte("x"))
c.Assert(err, IsNil)
c.Assert(writer.Close(), IsNil)
req, err := http.NewRequest("POST", "/api/files/testdir", body)
c.Assert(err, IsNil)
req.Header.Set("Content-Type", writer.FormDataContentType())
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 500)
}
func (s *FilesUploadDiskFullSuite) TestUploadReturns500OnFileOpenFailure(c *C) {
// Pre-populate MultipartForm to inject a FileHeader that fails on Open().
form := &multipart.Form{
File: map[string][]*multipart.FileHeader{
"file": {{Filename: "broken.bin"}},
},
}
req, err := http.NewRequest("POST", "/api/files/openfaildir", nil)
c.Assert(err, IsNil)
req.MultipartForm = form
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 500)
}
func (s *FilesUploadDiskFullSuite) TestUploadReturns500OnCreateFailure(c *C) {
base := filepath.Join(s.aptlyContext.UploadPath(), "readonly")
c.Assert(os.MkdirAll(base, 0777), IsNil)
c.Assert(os.Chmod(base, 0555), IsNil)
defer func() { _ = os.Chmod(base, 0777) }()
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile("file", "a.txt")
c.Assert(err, IsNil)
_, err = part.Write([]byte("x"))
c.Assert(err, IsNil)
c.Assert(writer.Close(), IsNil)
req, err := http.NewRequest("POST", "/api/files/readonly", body)
c.Assert(err, IsNil)
req.Header.Set("Content-Type", writer.FormDataContentType())
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 500)
}
func (s *FilesUploadDiskFullSuite) TestDeleteDirReturns500OnRemoveFailure(c *C) {
parent := s.aptlyContext.UploadPath()
base := filepath.Join(parent, "cantremove")
c.Assert(os.MkdirAll(base, 0777), IsNil)
c.Assert(os.WriteFile(filepath.Join(base, "a.txt"), []byte("a"), 0644), IsNil)
c.Assert(os.Chmod(parent, 0555), IsNil)
defer func() { _ = os.Chmod(parent, 0777) }()
req, err := http.NewRequest("DELETE", "/api/files/cantremove", nil)
c.Assert(err, IsNil)
w := httptest.NewRecorder()
s.router.ServeHTTP(w, req)
c.Assert(w.Code, Equals, 500)
}
+23 -2
View File
@@ -15,6 +15,10 @@ import (
"github.com/saracen/walker" "github.com/saracen/walker"
) )
// syncFile is a seam to allow tests to force fsync failures (e.g. ENOSPC).
// In production it calls (*os.File).Sync().
var syncFile = func(f *os.File) error { return f.Sync() }
// PublishedStorage abstract file system with public dirs (published repos) // PublishedStorage abstract file system with public dirs (published repos)
type PublishedStorage struct { type PublishedStorage struct {
rootPath string rootPath string
@@ -25,7 +29,7 @@ type PublishedStorage struct {
// Global mutex map to prevent concurrent access to the same destinationPath in LinkFromPool // Global mutex map to prevent concurrent access to the same destinationPath in LinkFromPool
var ( var (
fileLockMutex sync.Mutex fileLockMutex sync.Mutex
fileLocks = make(map[string]*sync.Mutex) fileLocks = make(map[string]*sync.Mutex)
) )
// getFileLock returns a mutex for a specific file path to prevent concurrent modifications // getFileLock returns a mutex for a specific file path to prevent concurrent modifications
@@ -119,7 +123,17 @@ func (storage *PublishedStorage) PutFile(path string, sourceFilename string) err
}() }()
_, err = io.Copy(f, source) _, err = io.Copy(f, source)
return err if err != nil {
return err
}
// Sync to ensure all data is written to disk and catch ENOSPC errors
err = syncFile(f)
if err != nil {
return fmt.Errorf("error syncing file %s: %s", path, err)
}
return nil
} }
// Remove removes single file under public path // Remove removes single file under public path
@@ -268,6 +282,13 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
return err return err
} }
// Sync to ensure all data is written to disk and catch ENOSPC errors
err = syncFile(dst)
if err != nil {
_ = dst.Close()
return fmt.Errorf("error syncing file %s: %s", destinationPath, err)
}
err = dst.Close() err = dst.Close()
} else if storage.linkMethod == LinkMethodSymLink { } else if storage.linkMethod == LinkMethodSymLink {
err = localSourcePool.Symlink(sourcePath, destinationPath) err = localSourcePool.Symlink(sourcePath, destinationPath)
+386
View File
@@ -1,8 +1,14 @@
package files package files
import ( import (
"bytes"
"errors"
"io"
"os" "os"
"os/exec"
"path/filepath" "path/filepath"
"runtime"
"strings"
"syscall" "syscall"
"github.com/aptly-dev/aptly/aptly" "github.com/aptly-dev/aptly/aptly"
@@ -11,6 +17,77 @@ import (
. "gopkg.in/check.v1" . "gopkg.in/check.v1"
) )
type fakeProgress struct{ bytes.Buffer }
func (p *fakeProgress) Start() {}
func (p *fakeProgress) Shutdown() {}
func (p *fakeProgress) Flush() {}
func (p *fakeProgress) InitBar(count int64, isBytes bool, barType aptly.BarType) {
}
func (p *fakeProgress) ShutdownBar() {}
func (p *fakeProgress) AddBar(count int) {}
func (p *fakeProgress) SetBar(count int) {}
func (p *fakeProgress) Printf(msg string, a ...interface{}) {
}
func (p *fakeProgress) ColoredPrintf(msg string, a ...interface{}) {
}
func (p *fakeProgress) PrintfStdErr(msg string, a ...interface{}) {
}
type fakeRSC struct {
*bytes.Reader
closeErr error
}
func (r *fakeRSC) Close() error { return r.closeErr }
type fakePool struct {
sizeErr error
openFn func(string) (aptly.ReadSeekerCloser, error)
}
type fakeLocalPool struct {
fakePool
statErr error
}
func (p *fakeLocalPool) Stat(path string) (os.FileInfo, error) { return nil, p.statErr }
func (p *fakeLocalPool) GenerateTempPath(filename string) (string, error) {
return "", nil
}
func (p *fakeLocalPool) Link(path, dstPath string) error { return nil }
func (p *fakeLocalPool) Symlink(path, dstPath string) error { return nil }
func (p *fakeLocalPool) FullPath(path string) string { return path }
func (p *fakePool) Verify(poolPath, basename string, checksums *utils.ChecksumInfo, checksumStorage aptly.ChecksumStorage) (string, bool, error) {
return "", false, nil
}
func (p *fakePool) Import(srcPath, basename string, checksums *utils.ChecksumInfo, move bool, storage aptly.ChecksumStorage) (string, error) {
return "", nil
}
func (p *fakePool) LegacyPath(filename string, checksums *utils.ChecksumInfo) (string, error) {
return "", nil
}
func (p *fakePool) Size(path string) (int64, error) {
if p.sizeErr != nil {
return 0, p.sizeErr
}
return int64(len(path)), nil
}
func (p *fakePool) Open(path string) (aptly.ReadSeekerCloser, error) {
if p.openFn != nil {
return p.openFn(path)
}
return nil, io.EOF
}
func (p *fakePool) FilepathList(progress aptly.Progress) ([]string, error) { return nil, nil }
func (p *fakePool) Remove(path string) (int64, error) { return 0, nil }
type PublishedStorageSuite struct { type PublishedStorageSuite struct {
root string root string
storage *PublishedStorage storage *PublishedStorage
@@ -69,6 +146,14 @@ func (s *PublishedStorageSuite) TestPutFile(c *C) {
c.Assert(err, IsNil) c.Assert(err, IsNil)
} }
func (s *PublishedStorageSuite) TestPutFileReturnsErrorIfSourceMissing(c *C) {
err := s.storage.MkDir("ppa/dists/squeeze/")
c.Assert(err, IsNil)
err = s.storage.PutFile("ppa/dists/squeeze/Release", filepath.Join(s.root, "no-such-file"))
c.Assert(err, NotNil)
}
func (s *PublishedStorageSuite) TestFilelist(c *C) { func (s *PublishedStorageSuite) TestFilelist(c *C) {
err := s.storage.MkDir("ppa/pool/main/a/ab/") err := s.storage.MkDir("ppa/pool/main/a/ab/")
c.Assert(err, IsNil) c.Assert(err, IsNil)
@@ -134,6 +219,11 @@ func (s *PublishedStorageSuite) TestSymLink(c *C) {
c.Assert(linkTarget, Equals, "ppa/dists/squeeze/Release") c.Assert(linkTarget, Equals, "ppa/dists/squeeze/Release")
} }
func (s *PublishedStorageSuite) TestReadLinkReturnsErrorOnMissingPath(c *C) {
_, err := s.storage.ReadLink("does/not/exist")
c.Assert(err, NotNil)
}
func (s *PublishedStorageSuite) TestHardLink(c *C) { func (s *PublishedStorageSuite) TestHardLink(c *C) {
err := s.storage.MkDir("ppa/dists/squeeze/") err := s.storage.MkDir("ppa/dists/squeeze/")
c.Assert(err, IsNil) c.Assert(err, IsNil)
@@ -163,6 +253,18 @@ func (s *PublishedStorageSuite) TestRemoveDirs(c *C) {
c.Assert(os.IsNotExist(err), Equals, true) c.Assert(os.IsNotExist(err), Equals, true)
} }
func (s *PublishedStorageSuite) TestRemoveDirsWithProgress(c *C) {
err := s.storage.MkDir("ppa/dists/squeeze/")
c.Assert(err, IsNil)
err = s.storage.PutFile("ppa/dists/squeeze/Release", "/dev/null")
c.Assert(err, IsNil)
p := &fakeProgress{}
err = s.storage.RemoveDirs("ppa/dists/", p)
c.Assert(err, IsNil)
}
func (s *PublishedStorageSuite) TestRemove(c *C) { func (s *PublishedStorageSuite) TestRemove(c *C) {
err := s.storage.MkDir("ppa/dists/squeeze/") err := s.storage.MkDir("ppa/dists/squeeze/")
c.Assert(err, IsNil) c.Assert(err, IsNil)
@@ -337,3 +439,287 @@ func (s *PublishedStorageSuite) TestRootRemove(c *C) {
dirStorage := NewPublishedStorage(pwd, "", "") dirStorage := NewPublishedStorage(pwd, "", "")
c.Assert(func() { _ = dirStorage.RemoveDirs("", nil) }, PanicMatches, "trying to remove the root directory") c.Assert(func() { _ = dirStorage.RemoveDirs("", nil) }, PanicMatches, "trying to remove the root directory")
} }
// DiskFullSuite uses a loopback mount; requires Linux + root.
type DiskFullSuite struct {
root string
}
var _ = Suite(&DiskFullSuite{})
func (s *DiskFullSuite) SetUpTest(c *C) {
if runtime.GOOS != "linux" {
c.Skip("disk full tests only run on Linux")
}
s.root = c.MkDir()
}
func (s *DiskFullSuite) TestPutFileOutOfSpace(c *C) {
mountPoint := "/smallfs"
if os.Geteuid() == 0 {
mountPoint = filepath.Join(s.root, "smallfs")
err := os.MkdirAll(mountPoint, 0777)
c.Assert(err, IsNil)
fsImage := filepath.Join(s.root, "small.img")
cmd := exec.Command("dd", "if=/dev/zero", "of="+fsImage, "bs=1M", "count=1")
err = cmd.Run()
c.Assert(err, IsNil)
cmd = exec.Command("mkfs.ext4", "-F", fsImage)
err = cmd.Run()
c.Assert(err, IsNil)
cmd = exec.Command("mount", "-o", "loop", fsImage, mountPoint)
err = cmd.Run()
c.Assert(err, IsNil)
defer func() {
_ = exec.Command("umount", mountPoint).Run()
}()
}
storage := NewPublishedStorage(mountPoint, "", "")
largeFile := filepath.Join(s.root, "largefile")
cmd := exec.Command("dd", "if=/dev/zero", "of="+largeFile, "bs=1M", "count=2")
err := cmd.Run()
c.Assert(err, IsNil)
err = storage.PutFile("testfile", largeFile)
c.Assert(err, NotNil)
c.Check(strings.Contains(err.Error(), "no space left on device") ||
strings.Contains(err.Error(), "sync"), Equals, true,
Commentf("Expected disk full error, got: %v", err))
}
func (s *DiskFullSuite) TestLinkFromPoolCopyOutOfSpace(c *C) {
mountPoint := "/smallfs"
if os.Geteuid() == 0 {
mountPoint = filepath.Join(s.root, "smallfs")
err := os.MkdirAll(mountPoint, 0777)
c.Assert(err, IsNil)
fsImage := filepath.Join(s.root, "small.img")
cmd := exec.Command("dd", "if=/dev/zero", "of="+fsImage, "bs=1M", "count=1")
err = cmd.Run()
c.Assert(err, IsNil)
cmd = exec.Command("mkfs.ext4", "-F", fsImage)
err = cmd.Run()
c.Assert(err, IsNil)
cmd = exec.Command("mount", "-o", "loop", fsImage, mountPoint)
err = cmd.Run()
c.Assert(err, IsNil)
defer func() {
_ = exec.Command("umount", mountPoint).Run()
}()
}
storage := NewPublishedStorage(mountPoint, "copy", "")
poolPath := filepath.Join(s.root, "pool")
pool := NewPackagePool(poolPath, false)
cs := NewMockChecksumStorage()
largeFile := filepath.Join(s.root, "package.deb")
cmd := exec.Command("dd", "if=/dev/zero", "of="+largeFile, "bs=1M", "count=2")
err := cmd.Run()
c.Assert(err, IsNil)
sourceChecksum, err := utils.ChecksumsForFile(largeFile)
c.Assert(err, IsNil)
srcPoolPath, err := pool.Import(largeFile, "package.deb",
&utils.ChecksumInfo{MD5: "d41d8cd98f00b204e9800998ecf8427e"}, false, cs)
c.Assert(err, IsNil)
err = storage.LinkFromPool("", "pool/main/p/package", "package.deb",
pool, srcPoolPath, sourceChecksum, false)
c.Assert(err, NotNil)
c.Check(strings.Contains(err.Error(), "no space left on device") ||
strings.Contains(err.Error(), "sync"), Equals, true,
Commentf("Expected disk full error, got: %v", err))
}
type DiskFullNoRootSuite struct {
root string
}
var _ = Suite(&DiskFullNoRootSuite{})
func (s *DiskFullNoRootSuite) SetUpTest(c *C) {
s.root = c.MkDir()
}
func (s *DiskFullNoRootSuite) TestSyncIsCalled(c *C) {
storage := NewPublishedStorage(s.root, "", "")
sourceFile := filepath.Join(s.root, "source.txt")
err := os.WriteFile(sourceFile, []byte("test content"), 0644)
c.Assert(err, IsNil)
err = storage.PutFile("dest.txt", sourceFile)
c.Assert(err, IsNil)
content, err := os.ReadFile(filepath.Join(s.root, "dest.txt"))
c.Assert(err, IsNil)
c.Check(string(content), Equals, "test content")
}
func (s *DiskFullNoRootSuite) TestLinkFromPoolCopySyncIsCalled(c *C) {
storage := NewPublishedStorage(s.root, "copy", "")
poolPath := filepath.Join(s.root, "pool")
pool := NewPackagePool(poolPath, false)
cs := NewMockChecksumStorage()
pkgFile := filepath.Join(s.root, "package.deb")
err := os.WriteFile(pkgFile, []byte("package content"), 0644)
c.Assert(err, IsNil)
sourceChecksum, err := utils.ChecksumsForFile(pkgFile)
c.Assert(err, IsNil)
srcPoolPath, err := pool.Import(pkgFile, "package.deb",
&utils.ChecksumInfo{MD5: "d41d8cd98f00b204e9800998ecf8427e"}, false, cs)
c.Assert(err, IsNil)
err = storage.LinkFromPool("", "pool/main/p/package", "package.deb",
pool, srcPoolPath, sourceChecksum, false)
c.Assert(err, IsNil)
destPath := filepath.Join(s.root, "pool/main/p/package/package.deb")
content, err := os.ReadFile(destPath)
c.Assert(err, IsNil)
c.Check(string(content), Equals, "package content")
}
func (s *DiskFullNoRootSuite) TestPutFileSyncErrorIsReturned(c *C) {
storage := NewPublishedStorage(s.root, "", "")
sourceFile := filepath.Join(s.root, "source-syncfail.txt")
err := os.WriteFile(sourceFile, []byte("test content"), 0644)
c.Assert(err, IsNil)
oldSyncFile := syncFile
syncFile = func(_ *os.File) error { return syscall.ENOSPC }
defer func() { syncFile = oldSyncFile }()
err = storage.PutFile("dest-syncfail.txt", sourceFile)
c.Assert(err, NotNil)
c.Check(strings.Contains(err.Error(), "error syncing file"), Equals, true)
}
func (s *DiskFullNoRootSuite) TestLinkFromPoolCopySyncErrorIsReturned(c *C) {
storage := NewPublishedStorage(s.root, "copy", "")
poolPath := filepath.Join(s.root, "pool")
pool := NewPackagePool(poolPath, false)
cs := NewMockChecksumStorage()
pkgFile := filepath.Join(s.root, "package-syncfail.deb")
err := os.WriteFile(pkgFile, []byte("package content"), 0644)
c.Assert(err, IsNil)
sourceChecksum, err := utils.ChecksumsForFile(pkgFile)
c.Assert(err, IsNil)
srcPoolPath, err := pool.Import(pkgFile, "package-syncfail.deb",
&utils.ChecksumInfo{MD5: "d41d8cd98f00b204e9800998ecf8427e"}, false, cs)
c.Assert(err, IsNil)
oldSyncFile := syncFile
syncFile = func(_ *os.File) error { return syscall.ENOSPC }
defer func() { syncFile = oldSyncFile }()
err = storage.LinkFromPool("", "pool/main/p/package", "package-syncfail.deb",
pool, srcPoolPath, sourceChecksum, false)
c.Assert(err, NotNil)
c.Check(strings.Contains(err.Error(), "error syncing file"), Equals, true)
}
func (s *DiskFullNoRootSuite) TestGetFileLockReusesMutex(c *C) {
a := getFileLock(filepath.Join(s.root, "a"))
b := getFileLock(filepath.Join(s.root, "a"))
c.Check(a == b, Equals, true)
c1 := getFileLock(filepath.Join(s.root, "c1"))
c2 := getFileLock(filepath.Join(s.root, "c2"))
c.Check(c1 == c2, Equals, false)
}
func (s *DiskFullNoRootSuite) TestPutFileFailsIfDestinationDirMissing(c *C) {
storage := NewPublishedStorage(s.root, "", "")
sourceFile := filepath.Join(s.root, "src.txt")
err := os.WriteFile(sourceFile, []byte("x"), 0644)
c.Assert(err, IsNil)
err = storage.PutFile("missingdir/dest.txt", sourceFile)
c.Assert(err, NotNil)
}
func (s *DiskFullNoRootSuite) TestLinkFromPoolRejectsNonLocalPoolForHardlink(c *C) {
storage := NewPublishedStorage(s.root, "", "")
pool := &fakePool{}
err := storage.LinkFromPool("", "pool/main/p/pkg", "x.deb", pool, "x", utils.ChecksumInfo{MD5: "x"}, false)
c.Assert(err, NotNil)
c.Check(strings.Contains(err.Error(), "cannot link"), Equals, true)
}
func (s *DiskFullNoRootSuite) TestLinkFromPoolCopyReturnsErrorIfOpenFails(c *C) {
storage := NewPublishedStorage(s.root, "copy", "")
pool := &fakePool{openFn: func(string) (aptly.ReadSeekerCloser, error) { return nil, io.ErrUnexpectedEOF }}
err := storage.LinkFromPool("", "pool/main/p/pkg", "x.deb", pool, "x", utils.ChecksumInfo{MD5: "x"}, false)
c.Assert(err, NotNil)
}
func (s *DiskFullNoRootSuite) TestLinkFromPoolCopyReturnsErrorIfReaderCloseFails(c *C) {
storage := NewPublishedStorage(s.root, "copy", "")
pool := &fakePool{openFn: func(string) (aptly.ReadSeekerCloser, error) {
return &fakeRSC{Reader: bytes.NewReader([]byte("data")), closeErr: io.ErrClosedPipe}, nil
}}
err := storage.LinkFromPool("", "pool/main/p/pkg", "x.deb", pool, "x", utils.ChecksumInfo{MD5: "x"}, false)
c.Assert(err, NotNil)
c.Check(err, Equals, io.ErrClosedPipe)
}
func (s *DiskFullNoRootSuite) TestLinkFromPoolCopyReturnsErrorIfSizeFailsWhenDestExists(c *C) {
storage := NewPublishedStorage(s.root, "copy", "size")
pool := &fakePool{sizeErr: io.ErrUnexpectedEOF, openFn: func(string) (aptly.ReadSeekerCloser, error) {
return &fakeRSC{Reader: bytes.NewReader([]byte("data")), closeErr: nil}, nil
}}
destDir := filepath.Join(s.root, "pool/main/p/pkg")
c.Assert(os.MkdirAll(destDir, 0777), IsNil)
c.Assert(os.WriteFile(filepath.Join(destDir, "x.deb"), []byte("old"), 0644), IsNil)
err := storage.LinkFromPool("", "pool/main/p/pkg", "x.deb", pool, "x", utils.ChecksumInfo{MD5: "x"}, false)
c.Assert(err, NotNil)
c.Check(err, Equals, io.ErrUnexpectedEOF)
}
func (s *DiskFullNoRootSuite) TestLinkFromPoolCopyChecksumReturnsErrorIfDstMD5Fails(c *C) {
storage := NewPublishedStorage(s.root, "copy", "")
pool := &fakePool{openFn: func(string) (aptly.ReadSeekerCloser, error) {
return &fakeRSC{Reader: bytes.NewReader([]byte("data")), closeErr: nil}, nil
}}
// Make destinationPath a directory so MD5ChecksumForFile fails.
destDir := filepath.Join(s.root, "pool/main/p/pkg")
c.Assert(os.MkdirAll(destDir, 0777), IsNil)
c.Assert(os.MkdirAll(filepath.Join(destDir, "x.deb"), 0777), IsNil)
err := storage.LinkFromPool("", "pool/main/p/pkg", "x.deb", pool, "x", utils.ChecksumInfo{MD5: "x"}, false)
c.Assert(err, NotNil)
}
func (s *DiskFullNoRootSuite) TestLinkFromPoolHardlinkReturnsErrorIfStatFailsWhenDestExists(c *C) {
storage := NewPublishedStorage(c.MkDir(), "hardlink", "")
pool := &fakeLocalPool{statErr: errors.New("stat failed")}
destDir := filepath.Join(storage.rootPath, "pool", "main", "p", "pkg")
c.Assert(os.MkdirAll(destDir, 0777), IsNil)
c.Assert(os.WriteFile(filepath.Join(destDir, "x.deb"), []byte("x"), 0644), IsNil)
err := storage.LinkFromPool("", "pool/main/p/pkg", "x.deb", pool, "x", utils.ChecksumInfo{MD5: "x"}, false)
c.Assert(err, NotNil)
}