Merge pull request #1439 from aptly-dev/feature/go-1.24

go: use version 1.24
This commit is contained in:
André Roth
2025-05-01 10:14:27 +02:00
committed by GitHub
119 changed files with 808 additions and 744 deletions

View File

@@ -1,16 +1,11 @@
run:
tests: false
version: "2"
linters:
disable-all: true
enable:
- goconst
- gofmt
- goimports
- govet
- ineffassign
- misspell
- revive
- staticcheck
- vetshadow
settings:
staticcheck:
checks:
- "all"
- "-QF1004" # could use strings.ReplaceAll instead
- "-QF1012" # Use fmt.Fprintf(...) instead of WriteString(fmt.Sprintf(...))
- "-QF1003" # could use tagged switch
- "-ST1000" # at least one file in a package should have a package comment
- "-QF1001" # could apply De Morgan's law

View File

@@ -2,7 +2,7 @@ GOPATH=$(shell go env GOPATH)
VERSION=$(shell make -s version)
PYTHON?=python3
BINPATH?=$(GOPATH)/bin
GOLANGCI_LINT_VERSION=v1.64.5 # version supporting go 1.23
GOLANGCI_LINT_VERSION=v2.0.2 # version supporting go 1.24
COVERAGE_DIR?=$(shell mktemp -d)
GOOS=$(shell go env GOHOSTOS)
GOARCH=$(shell go env GOHOSTARCH)
@@ -71,9 +71,9 @@ flake8: ## run flake8 on system test python files
lint: prepare
# Install golangci-lint
@test -f $(BINPATH)/golangci-lint || go install github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
@test -f $(BINPATH)/golangci-lint || go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
# Running lint
@PATH=$(BINPATH)/:$(PATH) golangci-lint run --max-issues-per-linter 0 --max-same-issues 0
@NO_COLOR=true PATH=$(BINPATH)/:$(PATH) golangci-lint run --max-issues-per-linter=0 --max-same-issues=0
build: prepare swagger ## Build aptly
@@ -164,6 +164,9 @@ binaries: prepare swagger ## Build binary releases (FreeBSD, macOS, Linux gener
docker-image: ## Build aptly-dev docker image
@docker build -f system/Dockerfile . -t aptly-dev
docker-image-no-cache: ## Build aptly-dev docker image (no cache)
@docker build --no-cache -f system/Dockerfile . -t aptly-dev
docker-build: ## Build aptly in docker container
@docker run -it --rm -v ${PWD}:/work/src aptly-dev /work/src/system/docker-wrapper build

View File

@@ -41,7 +41,10 @@ type aptlyVersion struct {
// @Success 200 {object} aptlyVersion
// @Router /api/version [get]
func apiVersion(c *gin.Context) {
c.JSON(200, gin.H{"Version": aptly.Version})
version := aptlyVersion{
Version: aptly.Version,
}
c.JSON(200, version)
}
type aptlyStatus struct {
@@ -67,7 +70,8 @@ func apiReady(isReady *atomic.Value) func(*gin.Context) {
return
}
c.JSON(200, gin.H{"Status": "Aptly is ready"})
status := aptlyStatus{Status: "Aptly is ready"}
c.JSON(200, status)
}
}
@@ -165,7 +169,7 @@ func runTaskInBackground(name string, resources []string, proc task.Process) (ta
return nil, err
}
defer releaseDatabaseConnection()
defer func() { _ = releaseDatabaseConnection() }()
return proc(out, detail)
})
}
@@ -174,18 +178,18 @@ func truthy(value interface{}) bool {
if value == nil {
return false
}
switch value.(type) {
switch v := value.(type) {
case string:
switch strings.ToLower(value.(string)) {
switch strings.ToLower(v) {
case "n", "no", "f", "false", "0", "off":
return false
default:
return true
}
case int:
return !(value.(int) == 0)
return v != 0
case bool:
return value.(bool)
return v
}
return true
}
@@ -210,11 +214,11 @@ func maybeRunTaskInBackground(c *gin.Context, name string, resources []string, p
}
// wait for task to finish
context.TaskList().WaitForTaskByID(task.ID)
_, _ = context.TaskList().WaitForTaskByID(task.ID)
retValue, _ := context.TaskList().GetTaskReturnValueByID(task.ID)
err, _ := context.TaskList().GetTaskErrorByID(task.ID)
context.TaskList().DeleteTaskByID(task.ID)
_, _ = context.TaskList().DeleteTaskByID(task.ID)
if err != nil {
AbortWithJSONError(c, retValue.Code, err)
return
@@ -282,11 +286,11 @@ func showPackages(c *gin.Context, reflist *deb.PackageRefList, collectionFactory
// filter packages by version
if c.Request.URL.Query().Get("maximumVersion") == "1" {
list.PrepareIndex()
list.ForEach(func(p *deb.Package) error {
_ = list.ForEach(func(p *deb.Package) error {
versionQ, err := query.Parse(fmt.Sprintf("Name (%s), $Version (<= %s)", p.Name, p.Version))
if err != nil {
fmt.Println("filter packages by version, query string parse err: ", err)
c.AbortWithError(500, fmt.Errorf("unable to parse %s maximum version query string: %s", p.Name, err))
_ = c.AbortWithError(500, fmt.Errorf("unable to parse %s maximum version query string: %s", p.Name, err))
} else {
tmpList, err := list.Filter(deb.FilterOptions{
Queries: []deb.PackageQuery{versionQ},
@@ -294,15 +298,15 @@ func showPackages(c *gin.Context, reflist *deb.PackageRefList, collectionFactory
if err == nil {
if tmpList.Len() > 0 {
tmpList.ForEach(func(tp *deb.Package) error {
_ = tmpList.ForEach(func(tp *deb.Package) error {
list.Remove(tp)
return nil
})
list.Add(p)
_ = list.Add(p)
}
} else {
fmt.Println("filter packages by version, filter err: ", err)
c.AbortWithError(500, fmt.Errorf("unable to get %s maximum version: %s", p.Name, err))
_ = c.AbortWithError(500, fmt.Errorf("unable to get %s maximum version: %s", p.Name, err))
}
}
@@ -311,7 +315,7 @@ func showPackages(c *gin.Context, reflist *deb.PackageRefList, collectionFactory
}
if c.Request.URL.Query().Get("format") == "details" {
list.ForEach(func(p *deb.Package) error {
_ = list.ForEach(func(p *deb.Package) error {
result = append(result, p)
return nil
})
@@ -322,7 +326,7 @@ func showPackages(c *gin.Context, reflist *deb.PackageRefList, collectionFactory
}
}
func AbortWithJSONError(c *gin.Context, code int, err error) *gin.Error {
func AbortWithJSONError(c *gin.Context, code int, err error) {
c.Writer.Header().Set("Content-Type", "application/json; charset=utf-8")
return c.AbortWithError(code, err)
_ = c.AbortWithError(code, err)
}

View File

@@ -24,14 +24,14 @@ func Test(t *testing.T) {
TestingT(t)
}
type ApiSuite struct {
type APISuite struct {
context *ctx.AptlyContext
flags *flag.FlagSet
configFile *os.File
router http.Handler
}
var _ = Suite(&ApiSuite{})
var _ = Suite(&APISuite{})
func createTestConfig() *os.File {
file, err := os.CreateTemp("", "aptly")
@@ -45,11 +45,11 @@ func createTestConfig() *os.File {
if err != nil {
return nil
}
file.Write(jsonString)
_, _ = file.Write(jsonString)
return file
}
func (s *ApiSuite) setupContext() error {
func (s *APISuite) setupContext() error {
aptly.Version = "testVersion"
file := createTestConfig()
if nil == file {
@@ -75,23 +75,23 @@ func (s *ApiSuite) setupContext() error {
return nil
}
func (s *ApiSuite) SetUpSuite(c *C) {
func (s *APISuite) SetUpSuite(c *C) {
err := s.setupContext()
c.Assert(err, IsNil)
}
func (s *ApiSuite) TearDownSuite(c *C) {
os.Remove(s.configFile.Name())
func (s *APISuite) TearDownSuite(c *C) {
_ = os.Remove(s.configFile.Name())
s.context.Shutdown()
}
func (s *ApiSuite) SetUpTest(c *C) {
func (s *APISuite) SetUpTest(c *C) {
}
func (s *ApiSuite) TearDownTest(c *C) {
func (s *APISuite) TearDownTest(c *C) {
}
func (s *ApiSuite) HTTPRequest(method string, url string, body io.Reader) (*httptest.ResponseRecorder, error) {
func (s *APISuite) HTTPRequest(method string, url string, body io.Reader) (*httptest.ResponseRecorder, error) {
w := httptest.NewRecorder()
req, err := http.NewRequest(method, url, body)
if err != nil {
@@ -102,32 +102,32 @@ func (s *ApiSuite) HTTPRequest(method string, url string, body io.Reader) (*http
return w, nil
}
func (s *ApiSuite) TestGinRunsInReleaseMode(c *C) {
func (s *APISuite) TestGinRunsInReleaseMode(c *C) {
c.Check(gin.Mode(), Equals, gin.ReleaseMode)
}
func (s *ApiSuite) TestGetVersion(c *C) {
func (s *APISuite) TestGetVersion(c *C) {
response, err := s.HTTPRequest("GET", "/api/version", nil)
c.Assert(err, IsNil)
c.Check(response.Code, Equals, 200)
c.Check(response.Body.String(), Matches, "{\"Version\":\""+aptly.Version+"\"}")
}
func (s *ApiSuite) TestGetReadiness(c *C) {
func (s *APISuite) TestGetReadiness(c *C) {
response, err := s.HTTPRequest("GET", "/api/ready", nil)
c.Assert(err, IsNil)
c.Check(response.Code, Equals, 200)
c.Check(response.Body.String(), Matches, "{\"Status\":\"Aptly is ready\"}")
}
func (s *ApiSuite) TestGetHealthiness(c *C) {
func (s *APISuite) TestGetHealthiness(c *C) {
response, err := s.HTTPRequest("GET", "/api/healthy", nil)
c.Assert(err, IsNil)
c.Check(response.Code, Equals, 200)
c.Check(response.Body.String(), Matches, "{\"Status\":\"Aptly is healthy\"}")
}
func (s *ApiSuite) TestGetMetrics(c *C) {
func (s *APISuite) TestGetMetrics(c *C) {
response, err := s.HTTPRequest("GET", "/api/metrics", nil)
c.Assert(err, IsNil)
c.Check(response.Code, Equals, 200)
@@ -141,7 +141,7 @@ func (s *ApiSuite) TestGetMetrics(c *C) {
c.Check(b, Matches, ".*aptly_build_info.*version=\"testVersion\".*")
}
func (s *ApiSuite) TestRepoCreate(c *C) {
func (s *APISuite) TestRepoCreate(c *C) {
body, err := json.Marshal(gin.H{
"Name": "dummy",
})
@@ -150,7 +150,7 @@ func (s *ApiSuite) TestRepoCreate(c *C) {
c.Assert(err, IsNil)
}
func (s *ApiSuite) TestTruthy(c *C) {
func (s *APISuite) TestTruthy(c *C) {
c.Check(truthy("no"), Equals, false)
c.Check(truthy("n"), Equals, false)
c.Check(truthy("off"), Equals, false)

View File

@@ -21,7 +21,7 @@ import (
// @Success 200 {object} string "Output"
// @Failure 404 {object} Error "Not Found"
// @Router /api/db/cleanup [post]
func apiDbCleanup(c *gin.Context) {
func apiDBCleanup(c *gin.Context) {
resources := []string{string(task.AllResourcesKey)}
maybeRunTaskInBackground(c, "Clean up db", resources, func(out aptly.Progress, detail *task.Detail) (*task.ProcessReturnValue, error) {
var err error
@@ -109,8 +109,8 @@ func apiDbCleanup(c *gin.Context) {
if toDelete.Len() > 0 {
batch := db.CreateBatch()
toDelete.ForEach(func(ref []byte) error {
collectionFactory.PackageCollection().DeleteByKey(ref, batch)
_ = toDelete.ForEach(func(ref []byte) error {
_ = collectionFactory.PackageCollection().DeleteByKey(ref, batch)
return nil
})

View File

@@ -122,7 +122,7 @@ func apiFilesUpload(c *gin.Context) {
AbortWithJSONError(c, 500, err)
return
}
defer src.Close()
defer func() { _ = src.Close() }()
destPath := filepath.Join(path, filepath.Base(file.Filename))
dst, err := os.Create(destPath)
@@ -130,7 +130,7 @@ func apiFilesUpload(c *gin.Context) {
AbortWithJSONError(c, 500, err)
return
}
defer dst.Close()
defer func() { _ = dst.Close() }()
_, err = io.Copy(dst, src)
if err != nil {

View File

@@ -60,7 +60,7 @@ func apiGPGAddKey(c *gin.Context) {
AbortWithJSONError(c, 400, err)
return
}
defer os.RemoveAll(tempdir)
defer func() { _ = os.RemoveAll(tempdir) }()
keypath := filepath.Join(tempdir, "key")
keyfile, e := os.Create(keypath)

View File

@@ -67,17 +67,17 @@ func (s *MiddlewareSuite) TestJSONMiddleware4xx(c *C) {
outC := make(chan string)
go func() {
var buf bytes.Buffer
io.Copy(&buf, s.logReader)
_, _ = io.Copy(&buf, s.logReader)
fmt.Println(buf.String())
outC <- buf.String()
}()
s.HTTPRequest(http.MethodGet, "/", nil)
s.logWriter.Close()
_ = s.logWriter.Close()
capturedOutput := <-outC
var jsonMap map[string]interface{}
json.Unmarshal([]byte(capturedOutput), &jsonMap)
_ = json.Unmarshal([]byte(capturedOutput), &jsonMap)
if val, ok := jsonMap["level"]; ok {
c.Check(val, Equals, "warn")
@@ -130,17 +130,17 @@ func (s *MiddlewareSuite) TestJSONMiddleware2xx(c *C) {
outC := make(chan string)
go func() {
var buf bytes.Buffer
io.Copy(&buf, s.logReader)
_, _ = io.Copy(&buf, s.logReader)
fmt.Println(buf.String())
outC <- buf.String()
}()
s.HTTPRequest(http.MethodGet, "/api/healthy", nil)
s.logWriter.Close()
_ = s.logWriter.Close()
capturedOutput := <-outC
var jsonMap map[string]interface{}
json.Unmarshal([]byte(capturedOutput), &jsonMap)
_ = json.Unmarshal([]byte(capturedOutput), &jsonMap)
if val, ok := jsonMap["level"]; ok {
c.Check(val, Equals, "info")
@@ -153,17 +153,17 @@ func (s *MiddlewareSuite) TestJSONMiddleware5xx(c *C) {
outC := make(chan string)
go func() {
var buf bytes.Buffer
io.Copy(&buf, s.logReader)
_, _ = io.Copy(&buf, s.logReader)
fmt.Println(buf.String())
outC <- buf.String()
}()
s.HTTPRequest(http.MethodGet, "/api/ready", nil)
s.logWriter.Close()
_ = s.logWriter.Close()
capturedOutput := <-outC
var jsonMap map[string]interface{}
json.Unmarshal([]byte(capturedOutput), &jsonMap)
_ = json.Unmarshal([]byte(capturedOutput), &jsonMap)
if val, ok := jsonMap["level"]; ok {
c.Check(val, Equals, "error")
@@ -176,17 +176,17 @@ func (s *MiddlewareSuite) TestJSONMiddlewareRaw(c *C) {
outC := make(chan string)
go func() {
var buf bytes.Buffer
io.Copy(&buf, s.logReader)
_, _ = io.Copy(&buf, s.logReader)
fmt.Println(buf.String())
outC <- buf.String()
}()
s.HTTPRequest(http.MethodGet, "/api/healthy?test=raw", nil)
s.logWriter.Close()
_ = s.logWriter.Close()
capturedOutput := <-outC
var jsonMap map[string]interface{}
json.Unmarshal([]byte(capturedOutput), &jsonMap)
_ = json.Unmarshal([]byte(capturedOutput), &jsonMap)
fmt.Println(capturedOutput)

View File

@@ -43,7 +43,7 @@ func apiMirrorsList(c *gin.Context) {
collection := collectionFactory.RemoteRepoCollection()
result := []*deb.RemoteRepo{}
collection.ForEach(func(repo *deb.RemoteRepo) error {
_ = collection.ForEach(func(repo *deb.RemoteRepo) error {
result = append(result, repo)
return nil
})
@@ -319,7 +319,7 @@ func apiMirrorsPackages(c *gin.Context) {
}
if c.Request.URL.Query().Get("format") == "details" {
list.ForEach(func(p *deb.Package) error {
_ = list.ForEach(func(p *deb.Package) error {
result = append(result, p)
return nil
})
@@ -491,7 +491,7 @@ func apiMirrorsUpdate(c *gin.Context) {
e := context.ReOpenDatabase()
if e == nil {
remote.MarkAsIdle()
collection.Update(remote)
_ = collection.Update(remote)
}
}()
@@ -579,7 +579,7 @@ func apiMirrorsUpdate(c *gin.Context) {
file, e = os.CreateTemp("", task.File.Filename)
if e == nil {
task.TempDownPath = file.Name()
file.Close()
_ = file.Close()
}
}
if e != nil {
@@ -653,7 +653,7 @@ func apiMirrorsUpdate(c *gin.Context) {
}
log.Info().Msgf("%s: Finalizing download...", b.Name)
remote.FinalizeDownload(collectionFactory, out)
_ = remote.FinalizeDownload(collectionFactory, out)
err = collectionFactory.RemoteRepoCollection().Update(remote)
if err != nil {
return &task.ProcessReturnValue{Code: http.StatusInternalServerError, Value: nil}, fmt.Errorf("unable to update: %s", err)

View File

@@ -9,7 +9,7 @@ import (
)
type MirrorSuite struct {
ApiSuite
APISuite
}
var _ = Suite(&MirrorSuite{})

View File

@@ -5,7 +5,7 @@ import (
)
type PackagesSuite struct {
ApiSuite
APISuite
}
var _ = Suite(&PackagesSuite{})

View File

@@ -343,7 +343,7 @@ func apiPublishRepoOrSnapshot(c *gin.Context) {
duplicate := collection.CheckDuplicate(published)
if duplicate != nil {
collectionFactory.PublishedRepoCollection().LoadComplete(duplicate, collectionFactory)
_ = collectionFactory.PublishedRepoCollection().LoadComplete(duplicate, collectionFactory)
return &task.ProcessReturnValue{Code: http.StatusBadRequest, Value: nil}, fmt.Errorf("prefix/distribution already used by another published repo: %s", duplicate)
}
@@ -471,7 +471,7 @@ func apiPublishUpdateSwitch(c *gin.Context) {
maybeRunTaskInBackground(c, taskName, resources, func(out aptly.Progress, _ *task.Detail) (*task.ProcessReturnValue, error) {
err = collection.LoadComplete(published, collectionFactory)
if err != nil {
return &task.ProcessReturnValue{Code: http.StatusInternalServerError, Value: nil}, fmt.Errorf("Unable to update: %s", err)
return &task.ProcessReturnValue{Code: http.StatusInternalServerError, Value: nil}, fmt.Errorf("unable to update: %s", err)
}
revision := published.ObtainRevision()
@@ -487,12 +487,12 @@ func apiPublishUpdateSwitch(c *gin.Context) {
result, err := published.Update(collectionFactory, out)
if err != nil {
return &task.ProcessReturnValue{Code: http.StatusInternalServerError, Value: nil}, fmt.Errorf("Unable to update: %s", err)
return &task.ProcessReturnValue{Code: http.StatusInternalServerError, Value: nil}, fmt.Errorf("unable to update: %s", err)
}
err = published.Publish(context.PackagePool(), context, collectionFactory, signer, out, b.ForceOverwrite, context.SkelPath())
if err != nil {
return &task.ProcessReturnValue{Code: http.StatusInternalServerError, Value: nil}, fmt.Errorf("Unable to update: %s", err)
return &task.ProcessReturnValue{Code: http.StatusInternalServerError, Value: nil}, fmt.Errorf("unable to update: %s", err)
}
err = collection.Update(published)

View File

@@ -29,14 +29,14 @@ func reposListInAPIMode(localRepos map[string]utils.FileSystemPublishRoot) gin.H
return func(c *gin.Context) {
c.Writer.Header().Set("Content-Type", "text/html; charset=utf-8")
c.Writer.Flush()
c.Writer.WriteString("<pre>\n")
_, _ = c.Writer.WriteString("<pre>\n")
if len(localRepos) == 0 {
c.Writer.WriteString("<a href=\"-/\">default</a>\n")
_, _ = c.Writer.WriteString("<a href=\"-/\">default</a>\n")
}
for publishPrefix := range localRepos {
c.Writer.WriteString(fmt.Sprintf("<a href=\"%[1]s/\">%[1]s</a>\n", publishPrefix))
_, _ = c.Writer.WriteString(fmt.Sprintf("<a href=\"%[1]s/\">%[1]s</a>\n", publishPrefix))
}
c.Writer.WriteString("</pre>")
_, _ = c.Writer.WriteString("</pre>")
c.Writer.Flush()
}
}
@@ -76,7 +76,7 @@ func apiReposList(c *gin.Context) {
collectionFactory := context.NewCollectionFactory()
collection := collectionFactory.LocalRepoCollection()
collection.ForEach(func(r *deb.LocalRepo) error {
_ = collection.ForEach(func(r *deb.LocalRepo) error {
result = append(result, r)
return nil
})
@@ -570,7 +570,7 @@ func apiReposPackageFromDir(c *gin.Context) {
}
// atempt to remove dir, if it fails, that's fine: probably it's not empty
os.Remove(filepath.Join(context.UploadPath(), dirParam))
_ = os.Remove(filepath.Join(context.UploadPath(), dirParam))
}
if failedFiles == nil {
@@ -776,14 +776,8 @@ func apiReposIncludePackageFromFile(c *gin.Context) {
apiReposIncludePackageFromDir(c)
}
type reposIncludePackageFromDirReport struct {
Warnings []string
Added []string
Deleted []string
}
type reposIncludePackageFromDirResponse struct {
Report reposIncludePackageFromDirReport
Report *aptly.RecordingResultReporter
FailedFiles []string
}
@@ -836,7 +830,7 @@ func apiReposIncludePackageFromDir(c *gin.Context) {
}
var resources []string
if len(repoTemplate.Tree.Root.Nodes) > 1 {
if len(repoTemplate.Root.Nodes) > 1 {
resources = append(resources, task.AllLocalReposResourcesKey)
} else {
// repo template string is simple text so only use resource key of specific repository
@@ -876,7 +870,7 @@ func apiReposIncludePackageFromDir(c *gin.Context) {
if !noRemoveFiles {
// atempt to remove dir, if it fails, that's fine: probably it's not empty
os.Remove(filepath.Join(context.UploadPath(), dirParam))
_ = os.Remove(filepath.Join(context.UploadPath(), dirParam))
}
if failedFiles == nil {
@@ -896,9 +890,10 @@ func apiReposIncludePackageFromDir(c *gin.Context) {
out.Printf("Failed files: %s\n", strings.Join(failedFiles, ", "))
}
return &task.ProcessReturnValue{Code: http.StatusOK, Value: gin.H{
"Report": reporter,
"FailedFiles": failedFiles,
}}, nil
ret := reposIncludePackageFromDirResponse{
Report: reporter,
FailedFiles: failedFiles,
}
return &task.ProcessReturnValue{Code: http.StatusOK, Value: ret}, nil
})
}

View File

@@ -215,7 +215,7 @@ func Router(c *ctx.AptlyContext) http.Handler {
api.GET("/graph.:ext", apiGraph)
}
{
api.POST("/db/cleanup", apiDbCleanup)
api.POST("/db/cleanup", apiDBCleanup)
}
{
api.GET("/tasks", apiTasksList)

View File

@@ -33,7 +33,7 @@ func apiSnapshotsList(c *gin.Context) {
}
result := []*deb.Snapshot{}
collection.ForEachSorted(SortMethodString, func(snapshot *deb.Snapshot) error {
_ = collection.ForEachSorted(SortMethodString, func(snapshot *deb.Snapshot) error {
result = append(result, snapshot)
return nil
})
@@ -555,7 +555,7 @@ func apiSnapshotsMerge(c *gin.Context) {
}
if len(body.Sources) < 1 {
AbortWithJSONError(c, http.StatusBadRequest, fmt.Errorf("At least one source snapshot is required"))
AbortWithJSONError(c, http.StatusBadRequest, fmt.Errorf("minimum one source snapshot is required"))
return
}
@@ -765,7 +765,7 @@ func apiSnapshotsPull(c *gin.Context) {
addedPackages := []string{}
alreadySeen := map[string]bool{}
destinationPackageList.ForEachIndexed(func(pkg *deb.Package) error {
_ = destinationPackageList.ForEachIndexed(func(pkg *deb.Package) error {
key := pkg.Architecture + "_" + pkg.Name
_, seen := alreadySeen[key]
@@ -781,7 +781,7 @@ func apiSnapshotsPull(c *gin.Context) {
// If !allMatches, add only first matching name-arch package
if !seen || allMatches {
toPackageList.Add(pkg)
_ = toPackageList.Add(pkg)
addedPackages = append(addedPackages, pkg.String())
}

View File

@@ -1,4 +1,4 @@
package aptly
// Default aptly.conf (filled in at link time)
// AptlyConf holds the default aptly.conf (filled in at link time)
var AptlyConf []byte

View File

@@ -29,7 +29,7 @@ func NewPackagePool(accountName, accountKey, container, prefix, endpoint string)
return &PackagePool{az: azctx}, nil
}
// String
// String returns the storage as string
func (pool *PackagePool) String() string {
return pool.az.String()
}
@@ -104,7 +104,7 @@ func (pool *PackagePool) Open(path string) (aptly.ReadSeekerCloser, error) {
if err != nil {
return nil, errors.Wrapf(err, "error creating tempfile for %s", path)
}
defer os.Remove(temp.Name())
defer func () { _ = os.Remove(temp.Name()) }()
_, err = pool.az.client.DownloadFile(context.TODO(), pool.az.container, path, temp, nil)
if err != nil {
@@ -156,7 +156,7 @@ func (pool *PackagePool) Import(srcPath, basename string, checksums *utils.Check
if err != nil {
return "", err
}
defer source.Close()
defer func() { _ = source.Close() }()
err = pool.az.putFile(path, source, checksums.MD5)
if err != nil {

View File

@@ -2,7 +2,7 @@ package azure
import (
"context"
"io/ioutil"
"io"
"os"
"path/filepath"
"runtime"
@@ -69,8 +69,8 @@ func (s *PackagePoolSuite) TestFilepathList(c *C) {
c.Check(err, IsNil)
c.Check(list, DeepEquals, []string{})
s.pool.Import(s.debFile, "a.deb", &utils.ChecksumInfo{}, false, s.cs)
s.pool.Import(s.debFile, "b.deb", &utils.ChecksumInfo{}, false, s.cs)
_, _ = s.pool.Import(s.debFile, "a.deb", &utils.ChecksumInfo{}, false, s.cs)
_, _ = s.pool.Import(s.debFile, "b.deb", &utils.ChecksumInfo{}, false, s.cs)
list, err = s.pool.FilepathList(nil)
c.Check(err, IsNil)
@@ -81,8 +81,8 @@ func (s *PackagePoolSuite) TestFilepathList(c *C) {
}
func (s *PackagePoolSuite) TestRemove(c *C) {
s.pool.Import(s.debFile, "a.deb", &utils.ChecksumInfo{}, false, s.cs)
s.pool.Import(s.debFile, "b.deb", &utils.ChecksumInfo{}, false, s.cs)
_, _ = s.pool.Import(s.debFile, "a.deb", &utils.ChecksumInfo{}, false, s.cs)
_, _ = s.pool.Import(s.debFile, "b.deb", &utils.ChecksumInfo{}, false, s.cs)
size, err := s.pool.Remove("c7/6b/4bd12fd92e4dfe1b55b18a67a669_a.deb")
c.Check(err, IsNil)
@@ -247,7 +247,7 @@ func (s *PackagePoolSuite) TestOpen(c *C) {
f, err := s.pool.Open(path)
c.Assert(err, IsNil)
contents, err := ioutil.ReadAll(f)
contents, err := io.ReadAll(f)
c.Assert(err, IsNil)
c.Check(len(contents), Equals, 2738)
c.Check(f.Close(), IsNil)

View File

@@ -18,7 +18,7 @@ import (
// PublishedStorage abstract file system with published files (actually hosted on Azure)
type PublishedStorage struct {
prefix string
// FIXME: unused ???? prefix string
az *azContext
pathCache map[string]map[string]string
}
@@ -38,7 +38,7 @@ func NewPublishedStorage(accountName, accountKey, container, prefix, endpoint st
return &PublishedStorage{az: azctx}, nil
}
// String
// String returns the storage as string
func (storage *PublishedStorage) String() string {
return storage.az.String()
}
@@ -65,7 +65,7 @@ func (storage *PublishedStorage) PutFile(path string, sourceFilename string) err
if err != nil {
return err
}
defer source.Close()
defer func() { _ = source.Close() }()
err = storage.az.putFile(path, source, sourceMD5)
if err != nil {
@@ -158,7 +158,7 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
if err != nil {
return err
}
defer source.Close()
defer func() { _ = source.Close() }()
err = storage.az.putFile(relFilePath, source, sourceMD5)
if err == nil {
@@ -193,7 +193,9 @@ func (storage *PublishedStorage) internalCopyOrMoveBlob(src, dst string, metadat
if err != nil {
return fmt.Errorf("error acquiring lease on source blob %s", src)
}
defer blobLeaseClient.BreakLease(context.Background(), &lease.BlobBreakOptions{BreakPeriod: to.Ptr(int32(60))})
defer func() {
_, _ = blobLeaseClient.BreakLease(context.Background(), &lease.BlobBreakOptions{BreakPeriod: to.Ptr(int32(60))})
}()
dstBlobClient := containerClient.NewBlobClient(dst)
copyResp, err := dstBlobClient.StartCopyFromURL(context.Background(), srcBlobClient.URL(), &blob.StartCopyFromURLOptions{

View File

@@ -4,7 +4,7 @@ import (
"context"
"crypto/md5"
"crypto/rand"
"io/ioutil"
"io"
"os"
"path/filepath"
"bytes"
@@ -36,7 +36,7 @@ func randString(n int) string {
}
const alphanum = "0123456789abcdefghijklmnopqrstuvwxyz"
var bytes = make([]byte, n)
rand.Read(bytes)
_, _ = rand.Read(bytes)
for i, b := range bytes {
bytes[i] = alphanum[b%byte(len(alphanum))]
}
@@ -87,7 +87,7 @@ func (s *PublishedStorageSuite) TearDownTest(c *C) {
func (s *PublishedStorageSuite) GetFile(c *C, path string) []byte {
resp, err := s.storage.az.client.DownloadStream(context.Background(), s.storage.az.container, path, nil)
c.Assert(err, IsNil)
data, err := ioutil.ReadAll(resp.Body)
data, err := io.ReadAll(resp.Body)
c.Assert(err, IsNil)
return data
}
@@ -121,7 +121,7 @@ func (s *PublishedStorageSuite) TestPutFile(c *C) {
filename := "a/b.txt"
dir := c.MkDir()
err := ioutil.WriteFile(filepath.Join(dir, "a"), content, 0644)
err := os.WriteFile(filepath.Join(dir, "a"), content, 0644)
c.Assert(err, IsNil)
err = s.storage.PutFile(filename, filepath.Join(dir, "a"))
@@ -140,7 +140,7 @@ func (s *PublishedStorageSuite) TestPutFilePlus(c *C) {
filename := "a/b+c.txt"
dir := c.MkDir()
err := ioutil.WriteFile(filepath.Join(dir, "a"), content, 0644)
err := os.WriteFile(filepath.Join(dir, "a"), content, 0644)
c.Assert(err, IsNil)
err = s.storage.PutFile(filename, filepath.Join(dir, "a"))
@@ -258,7 +258,7 @@ func (s *PublishedStorageSuite) TestRemoveDirsPlus(c *C) {
func (s *PublishedStorageSuite) TestRenameFile(c *C) {
dir := c.MkDir()
err := ioutil.WriteFile(filepath.Join(dir, "a"), []byte("Welcome to Azure!"), 0644)
err := os.WriteFile(filepath.Join(dir, "a"), []byte("Welcome to Azure!"), 0644)
c.Assert(err, IsNil)
err = s.storage.PutFile("source.txt", filepath.Join(dir, "a"))
@@ -280,18 +280,18 @@ func (s *PublishedStorageSuite) TestLinkFromPool(c *C) {
cs := files.NewMockChecksumStorage()
tmpFile1 := filepath.Join(c.MkDir(), "mars-invaders_1.03.deb")
err := ioutil.WriteFile(tmpFile1, []byte("Contents"), 0644)
err := os.WriteFile(tmpFile1, []byte("Contents"), 0644)
c.Assert(err, IsNil)
cksum1 := utils.ChecksumInfo{MD5: "c1df1da7a1ce305a3b60af9d5733ac1d"}
tmpFile2 := filepath.Join(c.MkDir(), "mars-invaders_1.03.deb")
err = ioutil.WriteFile(tmpFile2, []byte("Spam"), 0644)
err = os.WriteFile(tmpFile2, []byte("Spam"), 0644)
c.Assert(err, IsNil)
cksum2 := utils.ChecksumInfo{MD5: "e9dfd31cc505d51fc26975250750deab"}
tmpFile3 := filepath.Join(c.MkDir(), "netboot/boot.img.gz")
os.MkdirAll(filepath.Dir(tmpFile3), 0777)
err = ioutil.WriteFile(tmpFile3, []byte("Contents"), 0644)
_ = os.MkdirAll(filepath.Dir(tmpFile3), 0777)
err = os.WriteFile(tmpFile3, []byte("Contents"), 0644)
c.Assert(err, IsNil)
cksum3 := utils.ChecksumInfo{MD5: "c1df1da7a1ce305a3b60af9d5733ac1d"}

View File

@@ -46,7 +46,7 @@ func aptlyAPIServe(cmd *commander.Command, args []string) error {
}
if err == nil && len(listeners) == 1 {
listener := listeners[0]
defer listener.Close()
defer func() { _ = listener.Close() }()
fmt.Printf("\nTaking over web server at: %s (press Ctrl+C to quit)...\n", listener.Addr().String())
err = http.Serve(listener, api.Router(context))
if err != nil {
@@ -67,7 +67,7 @@ func aptlyAPIServe(cmd *commander.Command, args []string) error {
if _, ok := <-sigchan; ok {
fmt.Printf("\nShutdown signal received, waiting for background tasks...\n")
context.TaskList().Wait()
server.Shutdown(stdcontext.Background())
_ = server.Shutdown(stdcontext.Background())
}
})()
defer close(sigchan)
@@ -75,14 +75,14 @@ func aptlyAPIServe(cmd *commander.Command, args []string) error {
listenURL, err := url.Parse(listen)
if err == nil && listenURL.Scheme == "unix" {
file := listenURL.Path
os.Remove(file)
_ = os.Remove(file)
var listener net.Listener
listener, err = net.Listen("unix", file)
if err != nil {
return fmt.Errorf("failed to listen on: %s\n%s", file, err)
}
defer listener.Close()
defer func() { _ = listener.Close() }()
err = server.Serve(listener)
} else {

View File

@@ -97,7 +97,7 @@ package environment to new version.`,
Flag: *flag.NewFlagSet("aptly", flag.ExitOnError),
Subcommands: []*commander.Command{
makeCmdConfig(),
makeCmdDb(),
makeCmdDB(),
makeCmdGraph(),
makeCmdMirror(),
makeCmdRepo(),

View File

@@ -5,7 +5,7 @@ import (
"fmt"
"github.com/smira/commander"
"gopkg.in/yaml.v3"
yaml "gopkg.in/yaml.v3"
)
func aptlyConfigShow(_ *commander.Command, _ []string) error {

View File

@@ -4,13 +4,13 @@ import (
"github.com/smira/commander"
)
func makeCmdDb() *commander.Command {
func makeCmdDB() *commander.Command {
return &commander.Command{
UsageLine: "db",
Short: "manage aptly's internal database and package pool",
Subcommands: []*commander.Command{
makeCmdDbCleanup(),
makeCmdDbRecover(),
makeCmdDBCleanup(),
makeCmdDBRecover(),
},
}
}

View File

@@ -12,7 +12,7 @@ import (
)
// aptly db cleanup
func aptlyDbCleanup(cmd *commander.Command, args []string) error {
func aptlyDBCleanup(cmd *commander.Command, args []string) error {
var err error
if len(args) != 0 {
@@ -48,7 +48,7 @@ func aptlyDbCleanup(cmd *commander.Command, args []string) error {
if verbose {
description := fmt.Sprintf("mirror %s", repo.Name)
repo.RefList().ForEach(func(key []byte) error {
_ = repo.RefList().ForEach(func(key []byte) error {
packageRefSources[string(key)] = append(packageRefSources[string(key)], description)
return nil
})
@@ -81,7 +81,7 @@ func aptlyDbCleanup(cmd *commander.Command, args []string) error {
if verbose {
description := fmt.Sprintf("local repo %s", repo.Name)
repo.RefList().ForEach(func(key []byte) error {
_ = repo.RefList().ForEach(func(key []byte) error {
packageRefSources[string(key)] = append(packageRefSources[string(key)], description)
return nil
})
@@ -113,7 +113,7 @@ func aptlyDbCleanup(cmd *commander.Command, args []string) error {
if verbose {
description := fmt.Sprintf("snapshot %s", snapshot.Name)
snapshot.RefList().ForEach(func(key []byte) error {
_ = snapshot.RefList().ForEach(func(key []byte) error {
packageRefSources[string(key)] = append(packageRefSources[string(key)], description)
return nil
})
@@ -146,7 +146,7 @@ func aptlyDbCleanup(cmd *commander.Command, args []string) error {
if verbose {
description := fmt.Sprintf("published repository %s:%s/%s component %s",
published.Storage, published.Prefix, published.Distribution, component)
published.RefList(component).ForEach(func(key []byte) error {
_ = published.RefList(component).ForEach(func(key []byte) error {
packageRefSources[string(key)] = append(packageRefSources[string(key)], description)
return nil
})
@@ -291,9 +291,9 @@ func aptlyDbCleanup(cmd *commander.Command, args []string) error {
return err
}
func makeCmdDbCleanup() *commander.Command {
func makeCmdDBCleanup() *commander.Command {
cmd := &commander.Command{
Run: aptlyDbCleanup,
Run: aptlyDBCleanup,
UsageLine: "cleanup",
Short: "cleanup DB and package pool",
Long: `

View File

@@ -7,7 +7,7 @@ import (
)
// aptly db recover
func aptlyDbRecover(cmd *commander.Command, args []string) error {
func aptlyDBRecover(cmd *commander.Command, args []string) error {
var err error
if len(args) != 0 {
@@ -21,9 +21,9 @@ func aptlyDbRecover(cmd *commander.Command, args []string) error {
return err
}
func makeCmdDbRecover() *commander.Command {
func makeCmdDBRecover() *commander.Command {
cmd := &commander.Command{
Run: aptlyDbRecover,
Run: aptlyDBRecover,
UsageLine: "recover",
Short: "recover DB after crash",
Long: `

View File

@@ -38,8 +38,8 @@ func aptlyGraph(cmd *commander.Command, args []string) error {
if err != nil {
return err
}
tempfile.Close()
os.Remove(tempfile.Name())
_ = tempfile.Close()
_ = os.Remove(tempfile.Name())
format := context.Flags().Lookup("format").Value.String()
output := context.Flags().Lookup("output").Value.String()

View File

@@ -20,7 +20,7 @@ func getVerifier(flags *flag.FlagSet) (pgp.Verifier, error) {
verifier.AddKeyring(keyRing)
}
err := verifier.InitKeyring(ignoreSignatures == false) // be verbose only if verifying signatures is requested
err := verifier.InitKeyring(!ignoreSignatures) // be verbose only if verifying signatures is requested
if err != nil {
return nil, err
}

View File

@@ -32,7 +32,7 @@ func aptlyMirrorListTxt(cmd *commander.Command, _ []string) error {
repos := make([]string, collectionFactory.RemoteRepoCollection().Len())
i := 0
collectionFactory.RemoteRepoCollection().ForEach(func(repo *deb.RemoteRepo) error {
_ = collectionFactory.RemoteRepoCollection().ForEach(func(repo *deb.RemoteRepo) error {
if raw {
repos[i] = repo.Name
} else {
@@ -42,7 +42,7 @@ func aptlyMirrorListTxt(cmd *commander.Command, _ []string) error {
return nil
})
context.CloseDatabase()
_ = context.CloseDatabase()
sort.Strings(repos)
@@ -70,13 +70,13 @@ func aptlyMirrorListJSON(_ *commander.Command, _ []string) error {
repos := make([]*deb.RemoteRepo, context.NewCollectionFactory().RemoteRepoCollection().Len())
i := 0
context.NewCollectionFactory().RemoteRepoCollection().ForEach(func(repo *deb.RemoteRepo) error {
_ = context.NewCollectionFactory().RemoteRepoCollection().ForEach(func(repo *deb.RemoteRepo) error {
repos[i] = repo
i++
return nil
})
context.CloseDatabase()
_ = context.CloseDatabase()
sort.Slice(repos, func(i, j int) bool {
return repos[i].Name < repos[j].Name

View File

@@ -86,7 +86,7 @@ func aptlyMirrorShowTxt(_ *commander.Command, args []string) error {
if repo.LastDownloadDate.IsZero() {
fmt.Printf("Unable to show package list, mirror hasn't been downloaded yet.\n")
} else {
ListPackagesRefList(repo.RefList(), collectionFactory)
_ = ListPackagesRefList(repo.RefList(), collectionFactory)
}
}
@@ -119,7 +119,7 @@ func aptlyMirrorShowJSON(_ *commander.Command, args []string) error {
}
list.PrepareIndex()
list.ForEachIndexed(func(p *deb.Package) error {
_ = list.ForEachIndexed(func(p *deb.Package) error {
repo.Packages = append(repo.Packages, p.GetFullName())
return nil
})

View File

@@ -101,7 +101,7 @@ func aptlyMirrorUpdate(cmd *commander.Command, args []string) error {
err = context.ReOpenDatabase()
if err == nil {
repo.MarkAsIdle()
collectionFactory.RemoteRepoCollection().Update(repo)
_ = collectionFactory.RemoteRepoCollection().Update(repo)
}
}()
@@ -173,7 +173,7 @@ func aptlyMirrorUpdate(cmd *commander.Command, args []string) error {
file, e = os.CreateTemp("", task.File.Filename)
if e == nil {
task.TempDownPath = file.Name()
file.Close()
_ = file.Close()
}
}
if e != nil {
@@ -261,7 +261,7 @@ func aptlyMirrorUpdate(cmd *commander.Command, args []string) error {
return fmt.Errorf("unable to update: download errors:\n %s", strings.Join(errors, "\n "))
}
repo.FinalizeDownload(collectionFactory, context.Progress())
_ = repo.FinalizeDownload(collectionFactory, context.Progress())
err = collectionFactory.RemoteRepoCollection().Update(repo)
if err != nil {
return fmt.Errorf("unable to update: %s", err)

View File

@@ -40,7 +40,7 @@ func aptlyPackageSearch(cmd *commander.Command, args []string) error {
}
format := context.Flags().Lookup("format").Value.String()
PrintPackageList(result, format, "")
_ = PrintPackageList(result, format, "")
return err
}

View File

@@ -84,8 +84,8 @@ func aptlyPackageShow(cmd *commander.Command, args []string) error {
result := q.Query(collectionFactory.PackageCollection())
err = result.ForEach(func(p *deb.Package) error {
p.Stanza().WriteTo(w, p.IsSource, false, false)
w.Flush()
_ = p.Stanza().WriteTo(w, p.IsSource, false, false)
_ = w.Flush()
fmt.Printf("\n")
if withFiles {
@@ -109,7 +109,7 @@ func aptlyPackageShow(cmd *commander.Command, args []string) error {
if withReferences {
fmt.Printf("References to package:\n")
printReferencesTo(p, collectionFactory)
_ = printReferencesTo(p, collectionFactory)
fmt.Printf("\n")
}

View File

@@ -53,7 +53,7 @@ func aptlyPublishListTxt(cmd *commander.Command, _ []string) error {
return fmt.Errorf("unable to load list of repos: %s", err)
}
context.CloseDatabase()
_ = context.CloseDatabase()
sort.Strings(published)
@@ -99,7 +99,7 @@ func aptlyPublishListJSON(_ *commander.Command, _ []string) error {
return fmt.Errorf("unable to load list of repos: %s", err)
}
context.CloseDatabase()
_ = context.CloseDatabase()
sort.Slice(repos, func(i, j int) bool {
return repos[i].GetPath() < repos[j].GetPath()

View File

@@ -156,7 +156,7 @@ func aptlyPublishSnapshotOrRepo(cmd *commander.Command, args []string) error {
duplicate := collectionFactory.PublishedRepoCollection().CheckDuplicate(published)
if duplicate != nil {
collectionFactory.PublishedRepoCollection().LoadComplete(duplicate, collectionFactory)
_ = collectionFactory.PublishedRepoCollection().LoadComplete(duplicate, collectionFactory)
return fmt.Errorf("prefix/distribution already used by another published repo: %s", duplicate)
}

View File

@@ -32,7 +32,7 @@ func aptlyRepoListTxt(cmd *commander.Command, _ []string) error {
collectionFactory := context.NewCollectionFactory()
repos := make([]string, collectionFactory.LocalRepoCollection().Len())
i := 0
collectionFactory.LocalRepoCollection().ForEach(func(repo *deb.LocalRepo) error {
_ = collectionFactory.LocalRepoCollection().ForEach(func(repo *deb.LocalRepo) error {
if raw {
repos[i] = repo.Name
} else {
@@ -47,7 +47,7 @@ func aptlyRepoListTxt(cmd *commander.Command, _ []string) error {
return nil
})
context.CloseDatabase()
_ = context.CloseDatabase()
sort.Strings(repos)
@@ -76,7 +76,7 @@ func aptlyRepoListJSON(_ *commander.Command, _ []string) error {
repos := make([]*deb.LocalRepo, context.NewCollectionFactory().LocalRepoCollection().Len())
i := 0
context.NewCollectionFactory().LocalRepoCollection().ForEach(func(repo *deb.LocalRepo) error {
_ = context.NewCollectionFactory().LocalRepoCollection().ForEach(func(repo *deb.LocalRepo) error {
e := context.NewCollectionFactory().LocalRepoCollection().LoadComplete(repo)
if e != nil {
return e
@@ -87,7 +87,7 @@ func aptlyRepoListJSON(_ *commander.Command, _ []string) error {
return nil
})
context.CloseDatabase()
_ = context.CloseDatabase()
sort.Slice(repos, func(i, j int) bool {
return repos[i].Name < repos[j].Name

View File

@@ -54,7 +54,7 @@ func aptlyRepoRemove(cmd *commander.Command, args []string) error {
return fmt.Errorf("unable to remove: %s", err)
}
toRemove.ForEach(func(p *deb.Package) error {
_ = toRemove.ForEach(func(p *deb.Package) error {
list.Remove(p)
context.Progress().ColoredPrintf("@r[-]@| %s removed", p)
return nil

View File

@@ -52,7 +52,7 @@ func aptlyRepoShowTxt(_ *commander.Command, args []string) error {
withPackages := context.Flags().Lookup("with-packages").Value.Get().(bool)
if withPackages {
ListPackagesRefList(repo.RefList(), collectionFactory)
_ = ListPackagesRefList(repo.RefList(), collectionFactory)
}
return err

View File

@@ -33,7 +33,7 @@ func aptlySnapshotListTxt(cmd *commander.Command, _ []string) error {
collection := collectionFactory.SnapshotCollection()
if raw {
collection.ForEachSorted(sortMethodString, func(snapshot *deb.Snapshot) error {
_ = collection.ForEachSorted(sortMethodString, func(snapshot *deb.Snapshot) error {
fmt.Printf("%s\n", snapshot.Name)
return nil
})
@@ -68,7 +68,7 @@ func aptlySnapshotListJSON(cmd *commander.Command, _ []string) error {
jsonSnapshots := make([]*deb.Snapshot, collection.Len())
i := 0
collection.ForEachSorted(sortMethodString, func(snapshot *deb.Snapshot) error {
_ = collection.ForEachSorted(sortMethodString, func(snapshot *deb.Snapshot) error {
jsonSnapshots[i] = snapshot
i++
return nil

View File

@@ -116,7 +116,7 @@ func aptlySnapshotPull(cmd *commander.Command, args []string) error {
alreadySeen := map[string]bool{}
result.ForEachIndexed(func(pkg *deb.Package) error {
_ = result.ForEachIndexed(func(pkg *deb.Package) error {
key := pkg.Architecture + "_" + pkg.Name
_, seen := alreadySeen[key]
@@ -132,7 +132,7 @@ func aptlySnapshotPull(cmd *commander.Command, args []string) error {
// If !allMatches, add only first matching name-arch package
if !seen || allMatches {
packageList.Add(pkg)
_ = packageList.Add(pkg)
context.Progress().ColoredPrintf("@g[+]@| %s added", pkg)
}

View File

@@ -123,7 +123,7 @@ func aptlySnapshotMirrorRepoSearch(cmd *commander.Command, args []string) error
}
format := context.Flags().Lookup("format").Value.String()
PrintPackageList(result, format, "")
_ = PrintPackageList(result, format, "")
return err
}

View File

@@ -79,7 +79,7 @@ func aptlySnapshotShowTxt(_ *commander.Command, args []string) error {
withPackages := context.Flags().Lookup("with-packages").Value.Get().(bool)
if withPackages {
ListPackagesRefList(snapshot.RefList(), collectionFactory)
_ = ListPackagesRefList(snapshot.RefList(), collectionFactory)
}
return err
@@ -139,7 +139,7 @@ func aptlySnapshotShowJSON(_ *commander.Command, args []string) error {
}
list.PrepareIndex()
list.ForEachIndexed(func(p *deb.Package) error {
_ = list.ForEachIndexed(func(p *deb.Package) error {
snapshot.Packages = append(snapshot.Packages, p.GetFullName())
return nil
})

View File

@@ -6,7 +6,7 @@ import (
"os"
"strings"
"github.com/mattn/go-shellwords"
shellwords "github.com/mattn/go-shellwords"
"github.com/smira/commander"
)
@@ -31,7 +31,7 @@ func aptlyTaskRun(cmd *commander.Command, args []string) error {
if err != nil {
return err
}
defer file.Close()
defer func() { _ = file.Close() }()
scanner := bufio.NewScanner(file)

View File

@@ -115,7 +115,7 @@ func (context *AptlyContext) config() *utils.ConfigStructure {
if err != nil {
fmt.Fprintf(os.Stderr, "Config file not found, creating default config at %s\n\n", homeLocation)
utils.SaveConfigRaw(homeLocation, aptly.AptlyConf)
_ = utils.SaveConfigRaw(homeLocation, aptly.AptlyConf)
err = utils.LoadConfig(homeLocation, &utils.Config)
if err != nil {
Fatal(fmt.Errorf("error loading config file %s: %s", homeLocation, err))
@@ -241,7 +241,7 @@ func (context *AptlyContext) newDownloader(progress aptly.Progress) aptly.Downlo
// If flag is defined prefer it to global setting
maxTries = maxTriesFlag.Value.Get().(int)
}
var downloader string = context.config().Downloader
var downloader = context.config().Downloader
downloaderFlag := context.flags.Lookup("downloader")
if downloaderFlag != nil {
downloader = downloaderFlag.Value.String()
@@ -303,8 +303,8 @@ func (context *AptlyContext) _database() (database.Storage, error) {
switch context.config().DatabaseBackend.Type {
case "leveldb":
dbPath := filepath.Join(context.config().GetRootDir(), "db")
if len(context.config().DatabaseBackend.DbPath) != 0 {
dbPath = context.config().DatabaseBackend.DbPath
if len(context.config().DatabaseBackend.DBPath) != 0 {
dbPath = context.config().DatabaseBackend.DBPath
}
context.database, err = goleveldb.NewDB(dbPath)
case "etcd":
@@ -452,7 +452,7 @@ func (context *AptlyContext) GetPublishedStorage(name string) aptly.PublishedSto
} else if strings.HasPrefix(name, "azure:") {
params, ok := context.config().AzurePublishRoots[name[6:]]
if !ok {
Fatal(fmt.Errorf("Published Azure storage %v not configured", name[6:]))
Fatal(fmt.Errorf("published Azure storage %v not configured", name[6:]))
}
var err error
@@ -597,17 +597,17 @@ func (context *AptlyContext) Shutdown() {
if aptly.EnableDebug {
if context.fileMemProfile != nil {
pprof.WriteHeapProfile(context.fileMemProfile)
context.fileMemProfile.Close()
_ = pprof.WriteHeapProfile(context.fileMemProfile)
_ = context.fileMemProfile.Close()
context.fileMemProfile = nil
}
if context.fileCPUProfile != nil {
pprof.StopCPUProfile()
context.fileCPUProfile.Close()
_ = context.fileCPUProfile.Close()
context.fileCPUProfile = nil
}
if context.fileMemProfile != nil {
context.fileMemProfile.Close()
_ = context.fileMemProfile.Close()
context.fileMemProfile = nil
}
}
@@ -615,7 +615,7 @@ func (context *AptlyContext) Shutdown() {
context.taskList.Stop()
}
if context.database != nil {
context.database.Close()
_ = context.database.Close()
context.database = nil
}
if context.downloader != nil {
@@ -660,7 +660,7 @@ func NewContext(flags *flag.FlagSet) (*AptlyContext, error) {
if err != nil {
return nil, err
}
pprof.StartCPUProfile(context.fileCPUProfile)
_ = pprof.StartCPUProfile(context.fileCPUProfile)
}
memprofile := flags.Lookup("memprofile").Value.String()
@@ -680,7 +680,7 @@ func NewContext(flags *flag.FlagSet) (*AptlyContext, error) {
return nil, err
}
context.fileMemStats.WriteString("# Time\tHeapSys\tHeapAlloc\tHeapIdle\tHeapReleased\n")
_, _ = context.fileMemStats.WriteString("# Time\tHeapSys\tHeapAlloc\tHeapIdle\tHeapReleased\n")
go func() {
var stats runtime.MemStats
@@ -690,7 +690,7 @@ func NewContext(flags *flag.FlagSet) (*AptlyContext, error) {
for {
runtime.ReadMemStats(&stats)
if context.fileMemStats != nil {
context.fileMemStats.WriteString(fmt.Sprintf("%d\t%d\t%d\t%d\t%d\n",
_, _ = context.fileMemStats.WriteString(fmt.Sprintf("%d\t%d\t%d\t%d\t%d\n",
(time.Now().UnixNano()-start)/1000000, stats.HeapSys, stats.HeapAlloc, stats.HeapIdle, stats.HeapReleased))
time.Sleep(interval)
} else {

View File

@@ -14,7 +14,6 @@ func Test(t *testing.T) {
}
type EtcDDBSuite struct {
url string
db database.Storage
}
@@ -67,17 +66,17 @@ func (s *EtcDDBSuite) TestDelete(c *C) {
func (s *EtcDDBSuite) TestByPrefix(c *C) {
//c.Check(s.db.FetchByPrefix([]byte{0x80}), DeepEquals, [][]byte{})
s.db.Put([]byte{0x80, 0x01}, []byte{0x01})
s.db.Put([]byte{0x80, 0x03}, []byte{0x03})
s.db.Put([]byte{0x80, 0x02}, []byte{0x02})
_ = s.db.Put([]byte{0x80, 0x01}, []byte{0x01})
_ = s.db.Put([]byte{0x80, 0x03}, []byte{0x03})
_ = s.db.Put([]byte{0x80, 0x02}, []byte{0x02})
c.Check(s.db.FetchByPrefix([]byte{0x80}), DeepEquals, [][]byte{{0x01}, {0x02}, {0x03}})
c.Check(s.db.KeysByPrefix([]byte{0x80}), DeepEquals, [][]byte{{0x80, 0x01}, {0x80, 0x02}, {0x80, 0x03}})
s.db.Put([]byte{0x90, 0x01}, []byte{0x04})
_ = s.db.Put([]byte{0x90, 0x01}, []byte{0x04})
c.Check(s.db.FetchByPrefix([]byte{0x80}), DeepEquals, [][]byte{{0x01}, {0x02}, {0x03}})
c.Check(s.db.KeysByPrefix([]byte{0x80}), DeepEquals, [][]byte{{0x80, 0x01}, {0x80, 0x02}, {0x80, 0x03}})
s.db.Put([]byte{0x00, 0x01}, []byte{0x05})
_ = s.db.Put([]byte{0x00, 0x01}, []byte{0x05})
c.Check(s.db.FetchByPrefix([]byte{0x80}), DeepEquals, [][]byte{{0x01}, {0x02}, {0x03}})
c.Check(s.db.KeysByPrefix([]byte{0x80}), DeepEquals, [][]byte{{0x80, 0x01}, {0x80, 0x02}, {0x80, 0x03}})
@@ -109,7 +108,7 @@ func (s *EtcDDBSuite) TestHasPrefix(c *C) {
//c.Check(s.db.HasPrefix([]byte(nil)), Equals, false)
//c.Check(s.db.HasPrefix([]byte{0x80}), Equals, false)
s.db.Put([]byte{0x80, 0x01}, []byte{0x01})
_ = s.db.Put([]byte{0x80, 0x01}, []byte{0x01})
c.Check(s.db.HasPrefix([]byte(nil)), Equals, true)
c.Check(s.db.HasPrefix([]byte{0x80}), Equals, true)
@@ -124,13 +123,15 @@ func (s *EtcDDBSuite) TestTransactionCommit(c *C) {
value2 = []byte("value2")
)
transaction, err := s.db.OpenTransaction()
c.Assert(err, IsNil)
err = s.db.Put(key, value)
c.Assert(err, IsNil)
c.Assert(err, IsNil)
transaction.Put(key2, value2)
_ = transaction.Put(key2, value2)
v, err := s.db.Get(key)
c.Assert(err, IsNil)
c.Check(v, DeepEquals, value)
err = transaction.Delete(key)
c.Assert(err, IsNil)

View File

@@ -145,7 +145,7 @@ func (s *EtcDStorage) Close() error {
return err
}
// Reopen tries to open (re-open) the database
// Open returns the database
func (s *EtcDStorage) Open() error {
if s.db != nil {
return nil

View File

@@ -67,8 +67,7 @@ func (t *transaction) Commit() (err error) {
// Discard is safe to call after Commit(), it would be no-op
func (t *transaction) Discard() {
t.ops = []clientv3.Op{}
t.tmpdb.Drop()
return
_ = t.tmpdb.Drop()
}
// transaction should implement database.Transaction

View File

@@ -51,8 +51,8 @@ func RecoverDB(path string) error {
return err
}
db.Close()
stor.Close()
_ = db.Close()
_ = stor.Close()
return nil
}

View File

@@ -119,17 +119,17 @@ func (s *LevelDBSuite) TestDelete(c *C) {
func (s *LevelDBSuite) TestByPrefix(c *C) {
c.Check(s.db.FetchByPrefix([]byte{0x80}), DeepEquals, [][]byte{})
s.db.Put([]byte{0x80, 0x01}, []byte{0x01})
s.db.Put([]byte{0x80, 0x03}, []byte{0x03})
s.db.Put([]byte{0x80, 0x02}, []byte{0x02})
_ = s.db.Put([]byte{0x80, 0x01}, []byte{0x01})
_ = s.db.Put([]byte{0x80, 0x03}, []byte{0x03})
_ = s.db.Put([]byte{0x80, 0x02}, []byte{0x02})
c.Check(s.db.FetchByPrefix([]byte{0x80}), DeepEquals, [][]byte{{0x01}, {0x02}, {0x03}})
c.Check(s.db.KeysByPrefix([]byte{0x80}), DeepEquals, [][]byte{{0x80, 0x01}, {0x80, 0x02}, {0x80, 0x03}})
s.db.Put([]byte{0x90, 0x01}, []byte{0x04})
_ = s.db.Put([]byte{0x90, 0x01}, []byte{0x04})
c.Check(s.db.FetchByPrefix([]byte{0x80}), DeepEquals, [][]byte{{0x01}, {0x02}, {0x03}})
c.Check(s.db.KeysByPrefix([]byte{0x80}), DeepEquals, [][]byte{{0x80, 0x01}, {0x80, 0x02}, {0x80, 0x03}})
s.db.Put([]byte{0x00, 0x01}, []byte{0x05})
_ = s.db.Put([]byte{0x00, 0x01}, []byte{0x05})
c.Check(s.db.FetchByPrefix([]byte{0x80}), DeepEquals, [][]byte{{0x01}, {0x02}, {0x03}})
c.Check(s.db.KeysByPrefix([]byte{0x80}), DeepEquals, [][]byte{{0x80, 0x01}, {0x80, 0x02}, {0x80, 0x03}})
@@ -161,7 +161,7 @@ func (s *LevelDBSuite) TestHasPrefix(c *C) {
c.Check(s.db.HasPrefix([]byte(nil)), Equals, false)
c.Check(s.db.HasPrefix([]byte{0x80}), Equals, false)
s.db.Put([]byte{0x80, 0x01}, []byte{0x01})
_ = s.db.Put([]byte{0x80, 0x01}, []byte{0x01})
c.Check(s.db.HasPrefix([]byte(nil)), Equals, true)
c.Check(s.db.HasPrefix([]byte{0x80}), Equals, true)
@@ -180,8 +180,8 @@ func (s *LevelDBSuite) TestBatch(c *C) {
c.Assert(err, IsNil)
batch := s.db.CreateBatch()
batch.Put(key2, value2)
batch.Delete(key)
_ = batch.Put(key2, value2)
_ = batch.Delete(key)
v, err := s.db.Get(key)
c.Check(err, IsNil)
@@ -202,9 +202,9 @@ func (s *LevelDBSuite) TestBatch(c *C) {
}
func (s *LevelDBSuite) TestCompactDB(c *C) {
s.db.Put([]byte{0x80, 0x01}, []byte{0x01})
s.db.Put([]byte{0x80, 0x03}, []byte{0x03})
s.db.Put([]byte{0x80, 0x02}, []byte{0x02})
_ = s.db.Put([]byte{0x80, 0x01}, []byte{0x01})
_ = s.db.Put([]byte{0x80, 0x03}, []byte{0x03})
_ = s.db.Put([]byte{0x80, 0x02}, []byte{0x02})
c.Check(s.db.CompactDB(), IsNil)
}

View File

@@ -60,14 +60,14 @@ func (c *Changes) VerifyAndParse(acceptUnsigned, ignoreSignature bool, verifier
if err != nil {
return err
}
defer input.Close()
defer func() { _ = input.Close() }()
isClearSigned, err := verifier.IsClearSigned(input)
if err != nil {
return err
}
input.Seek(0, 0)
_, _ = input.Seek(0, 0)
if !isClearSigned && !acceptUnsigned {
return fmt.Errorf(".changes file is not signed and unsigned processing hasn't been enabled")
@@ -79,7 +79,7 @@ func (c *Changes) VerifyAndParse(acceptUnsigned, ignoreSignature bool, verifier
if err != nil {
return err
}
input.Seek(0, 0)
_, _ = input.Seek(0, 0)
c.SignatureKeys = keyInfo.GoodKeys
}
@@ -91,7 +91,7 @@ func (c *Changes) VerifyAndParse(acceptUnsigned, ignoreSignature bool, verifier
if err != nil {
return err
}
defer text.Close()
defer func() { _ = text.Close() }()
} else {
text = input
}
@@ -307,7 +307,7 @@ func ImportChangesFiles(changesFiles []string, reporter aptly.ResultReporter, ac
if err != nil {
failedFiles = append(failedFiles, path)
reporter.Warning("unable to process file %s: %s", changes.ChangesName, err)
changes.Cleanup()
_ = changes.Cleanup()
continue
}
@@ -315,7 +315,7 @@ func ImportChangesFiles(changesFiles []string, reporter aptly.ResultReporter, ac
if err != nil {
failedFiles = append(failedFiles, path)
reporter.Warning("unable to process file %s: %s", changes.ChangesName, err)
changes.Cleanup()
_ = changes.Cleanup()
continue
}
@@ -334,7 +334,7 @@ func ImportChangesFiles(changesFiles []string, reporter aptly.ResultReporter, ac
if err != nil {
failedFiles = append(failedFiles, path)
reporter.Warning("unable to process file %s: %s", changes.ChangesName, err)
changes.Cleanup()
_ = changes.Cleanup()
continue
}
@@ -354,7 +354,7 @@ func ImportChangesFiles(changesFiles []string, reporter aptly.ResultReporter, ac
failedFiles = append(failedFiles, path)
reporter.Warning("changes file skipped due to uploaders config: %s, keys %#v: %s",
changes.ChangesName, changes.SignatureKeys, err)
changes.Cleanup()
_ = changes.Cleanup()
continue
}
}

View File

@@ -51,7 +51,7 @@ func (s *ChangesSuite) SetUpTest(c *C) {
func (s *ChangesSuite) TearDownTest(c *C) {
s.progress.Shutdown()
s.db.Close()
_ = s.db.Close()
}
func (s *ChangesSuite) TestParseAndVerify(c *C) {
@@ -108,13 +108,13 @@ func (s *ChangesSuite) TestImportChangesFiles(c *C) {
for _, path := range origFailedFiles {
filename := filepath.Join(s.Dir, filepath.Base(path))
utils.CopyFile(path, filename)
_ = utils.CopyFile(path, filename)
expectedFailedFiles = append(expectedFailedFiles, filename)
}
for _, path := range origProcessedFiles {
filename := filepath.Join(s.Dir, filepath.Base(path))
utils.CopyFile(path, filename)
_ = utils.CopyFile(path, filename)
expectedProcessedFiles = append(expectedProcessedFiles, filename)
}

View File

@@ -28,7 +28,7 @@ func (s *ChecksumCollectionSuite) SetUpTest(c *C) {
}
func (s *ChecksumCollectionSuite) TearDownTest(c *C) {
s.db.Close()
_ = s.db.Close()
}
func (s *ChecksumCollectionSuite) TestFlow(c *C) {

View File

@@ -17,7 +17,7 @@ import (
"github.com/aptly-dev/aptly/pgp"
"github.com/kjk/lzma"
"github.com/klauspost/compress/zstd"
"github.com/smira/go-xz"
xz "github.com/smira/go-xz"
)
// Source kinds
@@ -35,7 +35,7 @@ func GetControlFileFromDeb(packageFile string) (Stanza, error) {
if err != nil {
return nil, err
}
defer file.Close()
defer func() { _ = file.Close() }()
library := ar.NewReader(file)
for {
@@ -66,14 +66,14 @@ func GetControlFileFromDeb(packageFile string) (Stanza, error) {
if err != nil {
return nil, errors.Wrapf(err, "unable to ungzip %s from %s", header.Name, packageFile)
}
defer ungzip.Close()
defer func() { _ = ungzip.Close() }()
tarInput = ungzip
case "control.tar.xz":
unxz, err := xz.NewReader(bufReader)
if err != nil {
return nil, errors.Wrapf(err, "unable to unxz %s from %s", header.Name, packageFile)
}
defer unxz.Close()
defer func() { _ = unxz.Close() }()
tarInput = unxz
case "control.tar.zst":
unzstd, err := zstd.NewReader(bufReader)
@@ -116,10 +116,10 @@ func GetControlFileFromDsc(dscFile string, verifier pgp.Verifier) (Stanza, error
if err != nil {
return nil, err
}
defer file.Close()
defer func() { _ = file.Close() }()
isClearSigned, err := verifier.IsClearSigned(file)
file.Seek(0, 0)
_, _ = file.Seek(0, 0)
if err != nil {
return nil, err
@@ -132,7 +132,7 @@ func GetControlFileFromDsc(dscFile string, verifier pgp.Verifier) (Stanza, error
if err != nil {
return nil, err
}
defer text.Close()
defer func() { _ = text.Close() }()
} else {
text = file
}
@@ -181,7 +181,7 @@ func GetContentsFromDeb(file io.Reader, packageFile string) ([]string, error) {
if err != nil {
return nil, errors.Wrapf(err, "unable to ungzip data.tar.gz from %s", packageFile)
}
defer ungzip.Close()
defer func() { _ = ungzip.Close() }()
tarInput = ungzip
}
case "data.tar.bz2":
@@ -191,11 +191,11 @@ func GetContentsFromDeb(file io.Reader, packageFile string) ([]string, error) {
if err != nil {
return nil, errors.Wrapf(err, "unable to unxz data.tar.xz from %s", packageFile)
}
defer unxz.Close()
defer func() { _ = unxz.Close() }()
tarInput = unxz
case "data.tar.lzma":
unlzma := lzma.NewReader(bufReader)
defer unlzma.Close()
defer func() { _ = unlzma.Close() }()
tarInput = unlzma
case "data.tar.zst":
unzstd, err := zstd.NewReader(bufReader)

View File

@@ -163,7 +163,7 @@ func (s *ControlFileSuite) TestCanonicalCase(c *C) {
func (s *ControlFileSuite) TestLongFields(c *C) {
f, err := os.Open("long.stanza")
c.Assert(err, IsNil)
defer f.Close()
defer func() { _ = f.Close() }()
r := NewControlFileReader(f, false, false)
stanza, e := r.ReadStanza()

View File

@@ -12,15 +12,15 @@ func BuildGraph(collectionFactory *CollectionFactory, layout string) (gographviz
var err error
graph := gographviz.NewEscape()
graph.SetDir(true)
graph.SetName("aptly")
_ = graph.SetDir(true)
_ = graph.SetName("aptly")
var labelStart string
var labelEnd string
switch layout {
case "vertical":
graph.AddAttr("aptly", "rankdir", "LR")
_ = graph.AddAttr("aptly", "rankdir", "LR")
labelStart = ""
labelEnd = ""
case "horizontal":
@@ -38,7 +38,7 @@ func BuildGraph(collectionFactory *CollectionFactory, layout string) (gographviz
return e
}
graph.AddNode("aptly", repo.UUID, map[string]string{
_ = graph.AddNode("aptly", repo.UUID, map[string]string{
"shape": "Mrecord",
"style": "filled",
"fillcolor": "darkgoldenrod1",
@@ -60,7 +60,7 @@ func BuildGraph(collectionFactory *CollectionFactory, layout string) (gographviz
return e
}
graph.AddNode("aptly", repo.UUID, map[string]string{
_ = graph.AddNode("aptly", repo.UUID, map[string]string{
"shape": "Mrecord",
"style": "filled",
"fillcolor": "mediumseagreen",
@@ -75,7 +75,7 @@ func BuildGraph(collectionFactory *CollectionFactory, layout string) (gographviz
return nil, err
}
collectionFactory.SnapshotCollection().ForEach(func(snapshot *Snapshot) error {
_ = collectionFactory.SnapshotCollection().ForEach(func(snapshot *Snapshot) error {
existingNodes[snapshot.UUID] = true
return nil
})
@@ -91,7 +91,7 @@ func BuildGraph(collectionFactory *CollectionFactory, layout string) (gographviz
description = "Snapshot from repo"
}
graph.AddNode("aptly", snapshot.UUID, map[string]string{
_ = graph.AddNode("aptly", snapshot.UUID, map[string]string{
"shape": "Mrecord",
"style": "filled",
"fillcolor": "cadetblue1",
@@ -103,7 +103,7 @@ func BuildGraph(collectionFactory *CollectionFactory, layout string) (gographviz
for _, uuid := range snapshot.SourceIDs {
_, exists := existingNodes[uuid]
if exists {
graph.AddEdge(uuid, snapshot.UUID, true, nil)
_ = graph.AddEdge(uuid, snapshot.UUID, true, nil)
}
}
}
@@ -114,8 +114,8 @@ func BuildGraph(collectionFactory *CollectionFactory, layout string) (gographviz
return nil, err
}
collectionFactory.PublishedRepoCollection().ForEach(func(repo *PublishedRepo) error {
graph.AddNode("aptly", repo.UUID, map[string]string{
_ = collectionFactory.PublishedRepoCollection().ForEach(func(repo *PublishedRepo) error {
_ = graph.AddNode("aptly", repo.UUID, map[string]string{
"shape": "Mrecord",
"style": "filled",
"fillcolor": "darkolivegreen1",
@@ -127,7 +127,7 @@ func BuildGraph(collectionFactory *CollectionFactory, layout string) (gographviz
for _, uuid := range repo.Sources {
_, exists := existingNodes[uuid]
if exists {
graph.AddEdge(uuid, repo.UUID, true, nil)
_ = graph.AddEdge(uuid, repo.UUID, true, nil)
}
}

View File

@@ -59,24 +59,24 @@ func (file *indexFile) Finalize(signer pgp.Signer) error {
if file.discardable {
return nil
}
file.BufWriter()
_, _ = file.BufWriter()
}
err := file.w.Flush()
if err != nil {
file.tempFile.Close()
_ = file.tempFile.Close()
return fmt.Errorf("unable to write to index file: %s", err)
}
if file.compressable {
err = utils.CompressFile(file.tempFile, file.onlyGzip || file.parent.skipBz2)
if err != nil {
file.tempFile.Close()
_ = file.tempFile.Close()
return fmt.Errorf("unable to compress index file: %s", err)
}
}
file.tempFile.Close()
_ = file.tempFile.Close()
exts := []string{""}
cksumExts := exts
@@ -220,11 +220,11 @@ func packageIndexByHash(file *indexFile, ext string, hash string, sum string) er
// If we managed to resolve the link target: delete it. This is the
// oldest physical index file we no longer need. Once we drop our
// old symlink we'll essentially forget about it existing at all.
file.parent.publishedStorage.Remove(linkTarget)
_ = file.parent.publishedStorage.Remove(linkTarget)
}
file.parent.publishedStorage.Remove(oldIndexPath)
_ = file.parent.publishedStorage.Remove(oldIndexPath)
}
file.parent.publishedStorage.RenameFile(indexPath, oldIndexPath)
_ = file.parent.publishedStorage.RenameFile(indexPath, oldIndexPath)
}
// create symlink

View File

@@ -438,7 +438,7 @@ func (l *PackageList) Scan(q PackageQuery) (result *PackageList) {
result = NewPackageListWithDuplicates(l.duplicatesAllowed, 0)
for _, pkg := range l.packages {
if q.Matches(pkg) {
result.Add(pkg)
_ = result.Add(pkg)
}
}
@@ -456,7 +456,7 @@ func (l *PackageList) SearchByKey(arch, name, version string) (result *PackageLi
pkg := l.packages["P"+arch+" "+name+" "+version]
if pkg != nil {
result.Add(pkg)
_ = result.Add(pkg)
}
return

View File

@@ -96,7 +96,7 @@ func (s *PackageListSuite) SetUpTest(c *C) {
{Name: "dpkg", Version: "1.7", Architecture: "source", SourceArchitecture: "any", IsSource: true, deps: &PackageDependencies{}},
}
for _, p := range s.packages {
s.il.Add(p)
_ = s.il.Add(p)
}
s.il.PrepareIndex()
@@ -110,7 +110,7 @@ func (s *PackageListSuite) SetUpTest(c *C) {
{Name: "app", Version: "3.0", Architecture: "amd64", deps: &PackageDependencies{PreDepends: []string{"dpkg >= 1.6)"}, Depends: []string{"lib (>> 0.9)", "data (>= 1.0)"}}},
}
for _, p := range s.packages2 {
s.il2.Add(p)
_ = s.il2.Add(p)
}
s.il2.PrepareIndex()
@@ -202,8 +202,8 @@ func (s *PackageListSuite) TestRemoveWhenIndexed(c *C) {
}
func (s *PackageListSuite) TestForeach(c *C) {
s.list.Add(s.p1)
s.list.Add(s.p3)
_ = s.list.Add(s.p1)
_ = s.list.Add(s.p3)
Len := 0
err := s.list.ForEach(func(*Package) error {
@@ -232,21 +232,21 @@ func (s *PackageListSuite) TestIndex(c *C) {
}
func (s *PackageListSuite) TestAppend(c *C) {
s.list.Add(s.p1)
s.list.Add(s.p3)
_ = s.list.Add(s.p1)
_ = s.list.Add(s.p3)
err := s.list.Append(s.il)
c.Check(err, IsNil)
c.Check(s.list.Len(), Equals, 16)
list := NewPackageList()
list.Add(s.p4)
_ = list.Add(s.p4)
err = s.list.Append(list)
c.Check(err, ErrorMatches, "package already exists and is different: .*")
s.list.PrepareIndex()
c.Check(func() { s.list.Append(s.il) }, Panics, "Append not supported when indexed")
c.Check(func() { _ = s.list.Append(s.il) }, Panics, "Append not supported when indexed")
}
func (s *PackageListSuite) TestSearch(c *C) {
@@ -312,7 +312,7 @@ func (s *PackageListSuite) TestSearch(c *C) {
func (s *PackageListSuite) TestFilter(c *C) {
c.Check(func() {
s.list.Filter(FilterOptions{
_, _ = s.list.Filter(FilterOptions{
Queries: []PackageQuery{&PkgQuery{"abcd", "0.3", "i386"}},
})
}, Panics, "list not indexed, can't filter")
@@ -479,7 +479,7 @@ func (s *PackageListSuite) TestVerifyDependencies(c *C) {
{Pkg: "mail-agent", Relation: VersionDontCare, Version: "", Architecture: "arm"}})
for _, p := range s.sourcePackages {
s.il.Add(p)
_ = s.il.Add(p)
}
missing, err = s.il.VerifyDependencies(DepFollowSource, []string{"i386", "amd64"}, s.il, nil)

View File

@@ -69,7 +69,7 @@ func (repo *LocalRepo) Encode() []byte {
var buf bytes.Buffer
encoder := codec.NewEncoder(&buf, &codec.MsgpackHandle{})
encoder.Encode(repo)
_ = encoder.Encode(repo)
return buf.Bytes()
}
@@ -116,7 +116,7 @@ func (collection *LocalRepoCollection) search(filter func(*LocalRepo) bool, uniq
return result
}
collection.db.ProcessByPrefix([]byte("L"), func(_, blob []byte) error {
_ = collection.db.ProcessByPrefix([]byte("L"), func(_, blob []byte) error {
r := &LocalRepo{}
if err := r.Decode(blob); err != nil {
log.Printf("Error decoding local repo: %s\n", err)
@@ -159,9 +159,9 @@ func (collection *LocalRepoCollection) Add(repo *LocalRepo) error {
// Update stores updated information about repo in DB
func (collection *LocalRepoCollection) Update(repo *LocalRepo) error {
batch := collection.db.CreateBatch()
batch.Put(repo.Key(), repo.Encode())
_ = batch.Put(repo.Key(), repo.Encode())
if repo.packageRefs != nil {
batch.Put(repo.RefKey(), repo.packageRefs.Encode())
_ = batch.Put(repo.RefKey(), repo.packageRefs.Encode())
}
return batch.Write()
}
@@ -247,7 +247,7 @@ func (collection *LocalRepoCollection) Drop(repo *LocalRepo) error {
delete(collection.cache, repo.UUID)
batch := collection.db.CreateBatch()
batch.Delete(repo.Key())
batch.Delete(repo.RefKey())
_ = batch.Delete(repo.Key())
_ = batch.Delete(repo.RefKey())
return batch.Write()
}

View File

@@ -21,8 +21,8 @@ var _ = Suite(&LocalRepoSuite{})
func (s *LocalRepoSuite) SetUpTest(c *C) {
s.db, _ = goleveldb.NewOpenDB(c.MkDir())
s.list = NewPackageList()
s.list.Add(&Package{Name: "lib", Version: "1.7", Architecture: "i386"})
s.list.Add(&Package{Name: "app", Version: "1.9", Architecture: "amd64"})
_ = s.list.Add(&Package{Name: "lib", Version: "1.7", Architecture: "i386"})
_ = s.list.Add(&Package{Name: "app", Version: "1.9", Architecture: "amd64"})
s.reflist = NewPackageRefListFromPackageList(s.list)
@@ -31,7 +31,7 @@ func (s *LocalRepoSuite) SetUpTest(c *C) {
}
func (s *LocalRepoSuite) TearDownTest(c *C) {
s.db.Close()
_ = s.db.Close()
}
func (s *LocalRepoSuite) TestString(c *C) {
@@ -88,14 +88,14 @@ func (s *LocalRepoCollectionSuite) SetUpTest(c *C) {
s.collection = NewLocalRepoCollection(s.db)
s.list = NewPackageList()
s.list.Add(&Package{Name: "lib", Version: "1.7", Architecture: "i386"})
s.list.Add(&Package{Name: "app", Version: "1.9", Architecture: "amd64"})
_ = s.list.Add(&Package{Name: "lib", Version: "1.7", Architecture: "i386"})
_ = s.list.Add(&Package{Name: "app", Version: "1.9", Architecture: "amd64"})
s.reflist = NewPackageRefListFromPackageList(s.list)
}
func (s *LocalRepoCollectionSuite) TearDownTest(c *C) {
s.db.Close()
_ = s.db.Close()
}
func (s *LocalRepoCollectionSuite) TestAddByName(c *C) {
@@ -156,7 +156,7 @@ func (s *LocalRepoCollectionSuite) TestUpdateLoadComplete(c *C) {
func (s *LocalRepoCollectionSuite) TestForEachAndLen(c *C) {
repo := NewLocalRepo("local1", "Comment 1")
s.collection.Add(repo)
_ = s.collection.Add(repo)
count := 0
err := s.collection.ForEach(func(*LocalRepo) error {
@@ -178,10 +178,10 @@ func (s *LocalRepoCollectionSuite) TestForEachAndLen(c *C) {
func (s *LocalRepoCollectionSuite) TestDrop(c *C) {
repo1 := NewLocalRepo("local1", "Comment 1")
s.collection.Add(repo1)
_ = s.collection.Add(repo1)
repo2 := NewLocalRepo("local2", "Comment 2")
s.collection.Add(repo2)
_ = s.collection.Add(repo2)
r1, _ := s.collection.ByUUID(repo1.UUID)
c.Check(r1, Equals, repo1)
@@ -208,6 +208,6 @@ func (s *LocalRepoCollectionSuite) TestDropNonExisting(c *C) {
_, err := s.collection.ByUUID(repo.UUID)
c.Check(err, ErrorMatches, "local repo .* not found")
err = s.collection.Drop(repo)
_ = s.collection.Drop(repo)
c.Check(s.collection.Drop(repo), ErrorMatches, "local repo not found")
}

View File

@@ -565,7 +565,7 @@ func (p *Package) CalculateContents(packagePool aptly.PackagePool, progress aptl
}
return nil, err
}
defer reader.Close()
defer func() { _ = reader.Close() }()
contents, err := GetContentsFromDeb(reader, file.Filename)
if err != nil {

View File

@@ -309,7 +309,7 @@ func (collection *PackageCollection) Scan(q PackageQuery) (result *PackageList)
}
if q.Matches(pkg) {
result.Add(pkg)
_ = result.Add(pkg)
}
}
@@ -337,7 +337,7 @@ func (collection *PackageCollection) SearchByKey(arch, name, version string) (re
}
if pkg.Architecture == arch && pkg.Name == name && pkg.Version == version {
result.Add(pkg)
_ = result.Add(pkg)
}
}

View File

@@ -23,7 +23,7 @@ func (s *PackageCollectionSuite) SetUpTest(c *C) {
}
func (s *PackageCollectionSuite) TearDownTest(c *C) {
s.db.Close()
_ = s.db.Close()
}
func (s *PackageCollectionSuite) TestUpdate(c *C) {
@@ -67,7 +67,7 @@ func (s *PackageCollectionSuite) TestByKey(c *C) {
func (s *PackageCollectionSuite) TestByKeyOld0_3(c *C) {
key := []byte("Pi386 vmware-view-open-client 4.5.0-297975+dfsg-4+b1")
s.db.Put(key, old0_3Package)
_ = s.db.Put(key, old0_3Package)
p, err := s.collection.ByKey(key)
c.Check(err, IsNil)

View File

@@ -64,7 +64,7 @@ func (files PackageFiles) Hash() uint64 {
for _, f := range files {
h.Write([]byte(f.Filename))
binary.Write(h, binary.BigEndian, f.Checksums.Size)
_ = binary.Write(h, binary.BigEndian, f.Checksums.Size)
h.Write([]byte(f.Checksums.MD5))
h.Write([]byte(f.Checksums.SHA1))
h.Write([]byte(f.Checksums.SHA256))

View File

@@ -1,7 +1,7 @@
package deb
import (
"io/ioutil"
"os"
"path/filepath"
"github.com/aptly-dev/aptly/aptly"
@@ -39,7 +39,7 @@ func (s *PackageFilesSuite) TestVerify(c *C) {
c.Check(result, Equals, false)
tmpFilepath := filepath.Join(c.MkDir(), "file")
c.Assert(ioutil.WriteFile(tmpFilepath, []byte("abcde"), 0777), IsNil)
c.Assert(os.WriteFile(tmpFilepath, []byte("abcde"), 0777), IsNil)
s.files[0].PoolPath, _ = packagePool.Import(tmpFilepath, s.files[0].Filename, &s.files[0].Checksums, false, s.cs)

View File

@@ -2,7 +2,7 @@ package deb
import (
"bytes"
"io/ioutil"
"os"
"path/filepath"
"regexp"
@@ -395,7 +395,7 @@ func (s *PackageSuite) TestLinkFromPool(c *C) {
p := NewPackageFromControlFile(s.stanza)
tmpFilepath := filepath.Join(c.MkDir(), "file")
c.Assert(ioutil.WriteFile(tmpFilepath, nil, 0777), IsNil)
c.Assert(os.WriteFile(tmpFilepath, nil, 0777), IsNil)
p.Files()[0].PoolPath, _ = packagePool.Import(tmpFilepath, p.Files()[0].Filename, &p.Files()[0].Checksums, false, cs)
@@ -434,7 +434,7 @@ func (s *PackageSuite) TestDownloadList(c *C) {
})
tmpFilepath := filepath.Join(c.MkDir(), "file")
c.Assert(ioutil.WriteFile(tmpFilepath, []byte("abcde"), 0777), IsNil)
c.Assert(os.WriteFile(tmpFilepath, []byte("abcde"), 0777), IsNil)
p.Files()[0].PoolPath, _ = packagePool.Import(tmpFilepath, p.Files()[0].Filename, &p.Files()[0].Checksums, false, cs)
list, err = p.DownloadList(packagePool, cs)
@@ -449,7 +449,7 @@ func (s *PackageSuite) TestVerifyFiles(c *C) {
cs := files.NewMockChecksumStorage()
tmpFilepath := filepath.Join(c.MkDir(), "file")
c.Assert(ioutil.WriteFile(tmpFilepath, []byte("abcde"), 0777), IsNil)
c.Assert(os.WriteFile(tmpFilepath, []byte("abcde"), 0777), IsNil)
p.Files()[0].PoolPath, _ = packagePool.Import(tmpFilepath, p.Files()[0].Filename, &p.Files()[0].Checksums, false, cs)

View File

@@ -631,7 +631,7 @@ func (p *PublishedRepo) Components() []string {
return result
}
// Components returns sorted list of published repo source names
// SourceNames returns sorted list of published repo source names
func (p *PublishedRepo) SourceNames() []string {
var sources = []string{}
@@ -702,7 +702,7 @@ func (p *PublishedRepo) Encode() []byte {
var buf bytes.Buffer
encoder := codec.NewEncoder(&buf, &codec.MsgpackHandle{})
encoder.Encode(p)
_ = encoder.Encode(p)
return buf.Bytes()
}
@@ -884,7 +884,7 @@ func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorageP
if err != nil {
return err
}
defer os.RemoveAll(tempDir)
defer func() { _ = os.RemoveAll(tempDir) }()
indexes := newIndexFiles(publishedStorage, basePath, tempDir, suffix, p.AcquireByHash, p.SkipBz2)
@@ -970,7 +970,7 @@ func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorageP
contentIndexesMap[key] = contentIndex
}
contentIndex.Push(qualifiedName, contents, batch)
_ = contentIndex.Push(qualifiedName, contents, batch)
}
}
@@ -1275,11 +1275,11 @@ func (collection *PublishedRepoCollection) CheckDuplicate(repo *PublishedRepo) *
// Update stores updated information about repo in DB
func (collection *PublishedRepoCollection) Update(repo *PublishedRepo) error {
batch := collection.db.CreateBatch()
batch.Put(repo.Key(), repo.Encode())
_ = batch.Put(repo.Key(), repo.Encode())
if repo.SourceKind == SourceLocalRepo {
for component, item := range repo.sourceItems {
batch.Put(repo.RefKey(component), item.packageRefs.Encode())
_ = batch.Put(repo.RefKey(component), item.packageRefs.Encode())
}
}
return batch.Write()
@@ -1324,7 +1324,7 @@ func (collection *PublishedRepoCollection) LoadShallow(repo *PublishedRepo, coll
// LoadComplete loads complete information on the sources of the repo *and* their packages
func (collection *PublishedRepoCollection) LoadComplete(repo *PublishedRepo, collectionFactory *CollectionFactory) (err error) {
collection.LoadShallow(repo, collectionFactory)
_ = collection.LoadShallow(repo, collectionFactory)
if repo.SourceKind == SourceSnapshot {
for _, item := range repo.sourceItems {
@@ -1502,7 +1502,7 @@ func (collection *PublishedRepoCollection) listReferencedFilesByComponent(prefix
return nil, err
}
packageList.ForEach(func(p *Package) error {
_ = packageList.ForEach(func(p *Package) error {
poolDir, err := p.PoolDirectory()
if err != nil {
return err
@@ -1575,7 +1575,7 @@ func (collection *PublishedRepoCollection) CleanupPrefixComponentFiles(published
return err
}
packageList.ForEach(func(p *Package) error {
_ = packageList.ForEach(func(p *Package) error {
poolDir, err := p.PoolDirectory()
if err != nil {
return err
@@ -1709,10 +1709,10 @@ func (collection *PublishedRepoCollection) Remove(publishedStorageProvider aptly
}
batch := collection.db.CreateBatch()
batch.Delete(repo.Key())
_ = batch.Delete(repo.Key())
for _, component := range repo.Components() {
batch.Delete(repo.RefKey(component))
_ = batch.Delete(repo.RefKey(component))
}
return batch.Write()

View File

@@ -19,13 +19,13 @@ func BenchmarkListReferencedFiles(b *testing.B) {
if err != nil {
b.Fatal(err)
}
defer os.RemoveAll(tmpDir)
defer func() { _ = os.RemoveAll(tmpDir) }()
db, err := goleveldb.NewOpenDB(tmpDir)
if err != nil {
b.Fatal(err)
}
defer db.Close()
defer func() { _ = db.Close() }()
factory := NewCollectionFactory(db)
packageCollection := factory.PackageCollection()
@@ -49,7 +49,7 @@ func BenchmarkListReferencedFiles(b *testing.B) {
Filename: fmt.Sprintf("pkg-shared_%d.deb", pkgIndex),
}})
packageCollection.UpdateInTransaction(p, transaction)
_ = packageCollection.UpdateInTransaction(p, transaction)
sharedRefs.Refs = append(sharedRefs.Refs, p.Key(""))
}
@@ -78,7 +78,7 @@ func BenchmarkListReferencedFiles(b *testing.B) {
Filename: fmt.Sprintf("pkg%d_%d.deb", repoIndex, pkgIndex),
}})
packageCollection.UpdateInTransaction(p, transaction)
_ = packageCollection.UpdateInTransaction(p, transaction)
refs.Refs = append(refs.Refs, p.Key(""))
}
@@ -92,16 +92,16 @@ func BenchmarkListReferencedFiles(b *testing.B) {
repo.DefaultDistribution = fmt.Sprintf("dist%d", repoIndex)
repo.DefaultComponent = defaultComponent
repo.UpdateRefList(refs.Merge(sharedRefs, false, true))
repoCollection.Add(repo)
_ = repoCollection.Add(repo)
publish, err := NewPublishedRepo("", "test", "", nil, []string{defaultComponent}, []interface{}{repo}, factory, false)
if err != nil {
b.Fatal(err)
}
publishCollection.Add(publish)
_ = publishCollection.Add(publish)
}
db.CompactDB()
_ = db.CompactDB()
b.ResetTimer()
for i := 0; i < b.N; i++ {

View File

@@ -5,7 +5,6 @@ import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
@@ -51,11 +50,11 @@ func (n *NullSigner) SetPassphrase(passphrase, passphraseFile string) {
}
func (n *NullSigner) DetachedSign(source string, destination string) error {
return ioutil.WriteFile(destination, []byte{}, 0644)
return os.WriteFile(destination, []byte{}, 0644)
}
func (n *NullSigner) ClearSign(source string, destination string) error {
return ioutil.WriteFile(destination, []byte{}, 0644)
return os.WriteFile(destination, []byte{}, 0644)
}
type FakeStorageProvider struct {
@@ -104,7 +103,7 @@ func (s *PublishedRepoSuite) SetUpTest(c *C) {
s.cs = files.NewMockChecksumStorage()
tmpFilepath := filepath.Join(c.MkDir(), "file")
c.Assert(ioutil.WriteFile(tmpFilepath, nil, 0777), IsNil)
c.Assert(os.WriteFile(tmpFilepath, nil, 0777), IsNil)
var err error
s.p1.Files()[0].PoolPath, err = s.packagePool.Import(tmpFilepath, s.p1.Files()[0].Filename, &s.p1.Files()[0].Checksums, false, s.cs)
@@ -118,22 +117,22 @@ func (s *PublishedRepoSuite) SetUpTest(c *C) {
repo, _ := NewRemoteRepo("yandex", "http://mirror.yandex.ru/debian/", "squeeze", []string{"main"}, []string{}, false, false, false)
repo.packageRefs = s.reflist
s.factory.RemoteRepoCollection().Add(repo)
_ = s.factory.RemoteRepoCollection().Add(repo)
s.localRepo = NewLocalRepo("local1", "comment1")
s.localRepo.packageRefs = s.reflist
s.factory.LocalRepoCollection().Add(s.localRepo)
_ = s.factory.LocalRepoCollection().Add(s.localRepo)
s.snapshot, _ = NewSnapshotFromRepository("snap", repo)
s.factory.SnapshotCollection().Add(s.snapshot)
_ = s.factory.SnapshotCollection().Add(s.snapshot)
s.snapshot2, _ = NewSnapshotFromRepository("snap", repo)
s.factory.SnapshotCollection().Add(s.snapshot2)
_ = s.factory.SnapshotCollection().Add(s.snapshot2)
s.packageCollection = s.factory.PackageCollection()
s.packageCollection.Update(s.p1)
s.packageCollection.Update(s.p2)
s.packageCollection.Update(s.p3)
_ = s.packageCollection.Update(s.p1)
_ = s.packageCollection.Update(s.p2)
_ = s.packageCollection.Update(s.p3)
s.repo, _ = NewPublishedRepo("", "ppa", "squeeze", nil, []string{"main"}, []interface{}{s.snapshot}, s.factory, false)
s.repo.SkipContents = true
@@ -152,7 +151,7 @@ func (s *PublishedRepoSuite) SetUpTest(c *C) {
}
func (s *PublishedRepoSuite) TearDownTest(c *C) {
s.db.Close()
_ = s.db.Close()
}
func (s *PublishedRepoSuite) TestNewPublishedRepo(c *C) {
@@ -179,12 +178,12 @@ func (s *PublishedRepoSuite) TestNewPublishedRepo(c *C) {
c.Check(s.repo3.RefList("main").Len(), Equals, 3)
c.Check(s.repo3.RefList("contrib").Len(), Equals, 3)
c.Check(func() { NewPublishedRepo("", ".", "a", nil, nil, nil, s.factory, false) }, PanicMatches, "publish with empty sources")
c.Check(func() { _, _ = NewPublishedRepo("", ".", "a", nil, nil, nil, s.factory, false) }, PanicMatches, "publish with empty sources")
c.Check(func() {
NewPublishedRepo("", ".", "a", nil, []string{"main"}, []interface{}{s.snapshot, s.snapshot2}, s.factory, false)
_, _ = NewPublishedRepo("", ".", "a", nil, []string{"main"}, []interface{}{s.snapshot, s.snapshot2}, s.factory, false)
}, PanicMatches, "sources and components should be equal in size")
c.Check(func() {
NewPublishedRepo("", ".", "a", nil, []string{"main", "contrib"}, []interface{}{s.localRepo, s.snapshot2}, s.factory, false)
_, _ = NewPublishedRepo("", ".", "a", nil, []string{"main", "contrib"}, []interface{}{s.localRepo, s.snapshot2}, s.factory, false)
}, PanicMatches, "interface conversion:.*")
_, err := NewPublishedRepo("", ".", "a", nil, []string{"main", "main"}, []interface{}{s.snapshot, s.snapshot2}, s.factory, false)
@@ -337,7 +336,7 @@ func (s *PublishedRepoSuite) TestDistributionComponentGuessing(c *C) {
s.localRepo.DefaultDistribution = "precise"
s.localRepo.DefaultComponent = "contrib"
s.factory.LocalRepoCollection().Update(s.localRepo)
_ = s.factory.LocalRepoCollection().Update(s.localRepo)
repo, err = NewPublishedRepo("", "ppa", "", nil, []string{""}, []interface{}{s.localRepo}, s.factory, false)
c.Check(err, IsNil)
@@ -526,8 +525,8 @@ func (s *PublishedRepoSuite) TestPublishedRepoRevision(c *C) {
bytes, err := json.Marshal(revision)
c.Assert(err, IsNil)
json_expected := `{"Sources":[{"Component":"main","Name":"local1"},{"Component":"test1","Name":"snap1"},{"Component":"test2","Name":"snap2"}]}`
c.Assert(string(bytes), Equals, json_expected)
jsonExpected := `{"Sources":[{"Component":"main","Name":"local1"},{"Component":"test1","Name":"snap1"},{"Component":"test2","Name":"snap2"}]}`
c.Assert(string(bytes), Equals, jsonExpected)
c.Assert(s.repo2.DropRevision(), DeepEquals, revision)
c.Assert(s.repo2.Revision, IsNil)
@@ -564,11 +563,11 @@ func (s *PublishedRepoCollectionSuite) SetUpTest(c *C) {
sort.Sort(snap2Refs)
s.snap2 = NewSnapshotFromRefList("snap2", []*Snapshot{}, snap2Refs, "desc2")
s.snapshotCollection.Add(s.snap1)
s.snapshotCollection.Add(s.snap2)
_ = s.snapshotCollection.Add(s.snap1)
_ = s.snapshotCollection.Add(s.snap2)
s.localRepo = NewLocalRepo("local1", "comment1")
s.factory.LocalRepoCollection().Add(s.localRepo)
_ = s.factory.LocalRepoCollection().Add(s.localRepo)
s.repo1, _ = NewPublishedRepo("", "ppa", "anaconda", []string{}, []string{"main"}, []interface{}{s.snap1}, s.factory, false)
s.repo2, _ = NewPublishedRepo("", "", "anaconda", []string{}, []string{"main", "contrib"}, []interface{}{s.snap2, s.snap1}, s.factory, false)
@@ -580,7 +579,7 @@ func (s *PublishedRepoCollectionSuite) SetUpTest(c *C) {
}
func (s *PublishedRepoCollectionSuite) TearDownTest(c *C) {
s.db.Close()
_ = s.db.Close()
}
func (s *PublishedRepoCollectionSuite) TestAddByStoragePrefixDistribution(c *C) {
@@ -677,7 +676,7 @@ func (s *PublishedRepoCollectionSuite) TestLoadPre0_6(c *C) {
var buf bytes.Buffer
encoder := codec.NewEncoder(&buf, &codec.MsgpackHandle{})
encoder.Encode(&old)
_ = encoder.Encode(&old)
c.Assert(s.db.Put(s.repo1.Key(), buf.Bytes()), IsNil)
c.Assert(s.db.Put(s.repo1.RefKey(""), s.localRepo.RefList().Encode()), IsNil)
@@ -695,7 +694,7 @@ func (s *PublishedRepoCollectionSuite) TestLoadPre0_6(c *C) {
}
func (s *PublishedRepoCollectionSuite) TestForEachAndLen(c *C) {
s.collection.Add(s.repo1)
_ = s.collection.Add(s.repo1)
count := 0
err := s.collection.ForEach(func(*PublishedRepo) error {
@@ -755,7 +754,7 @@ func (s *PublishedRepoCollectionSuite) TestListReferencedFiles(c *C) {
})
snap3 := NewSnapshotFromRefList("snap3", []*Snapshot{}, s.snap2.RefList(), "desc3")
s.snapshotCollection.Add(snap3)
_ = s.snapshotCollection.Add(snap3)
// Ensure that adding a second publish point with matching files doesn't give duplicate results.
repo3, err := NewPublishedRepo("", "", "anaconda-2", []string{}, []string{"main"}, []interface{}{snap3}, s.factory, false)
@@ -799,7 +798,7 @@ func (s *PublishedRepoRemoveSuite) SetUpTest(c *C) {
s.snap1 = NewSnapshotFromPackageList("snap1", []*Snapshot{}, NewPackageList(), "desc1")
s.snapshotCollection.Add(s.snap1)
_ = s.snapshotCollection.Add(s.snap1)
s.repo1, _ = NewPublishedRepo("", "ppa", "anaconda", []string{}, []string{"main"}, []interface{}{s.snap1}, s.factory, false)
s.repo2, _ = NewPublishedRepo("", "", "anaconda", []string{}, []string{"main"}, []interface{}{s.snap1}, s.factory, false)
@@ -808,26 +807,26 @@ func (s *PublishedRepoRemoveSuite) SetUpTest(c *C) {
s.repo5, _ = NewPublishedRepo("files:other", "ppa", "osminog", []string{}, []string{"contrib"}, []interface{}{s.snap1}, s.factory, false)
s.collection = s.factory.PublishedRepoCollection()
s.collection.Add(s.repo1)
s.collection.Add(s.repo2)
s.collection.Add(s.repo3)
s.collection.Add(s.repo4)
s.collection.Add(s.repo5)
_ = s.collection.Add(s.repo1)
_ = s.collection.Add(s.repo2)
_ = s.collection.Add(s.repo3)
_ = s.collection.Add(s.repo4)
_ = s.collection.Add(s.repo5)
s.root = c.MkDir()
s.publishedStorage = files.NewPublishedStorage(s.root, "", "")
s.publishedStorage.MkDir("ppa/dists/anaconda")
s.publishedStorage.MkDir("ppa/dists/meduza")
s.publishedStorage.MkDir("ppa/dists/osminog")
s.publishedStorage.MkDir("ppa/pool/main")
s.publishedStorage.MkDir("ppa/pool/contrib")
s.publishedStorage.MkDir("dists/anaconda")
s.publishedStorage.MkDir("pool/main")
_ = s.publishedStorage.MkDir("ppa/dists/anaconda")
_ = s.publishedStorage.MkDir("ppa/dists/meduza")
_ = s.publishedStorage.MkDir("ppa/dists/osminog")
_ = s.publishedStorage.MkDir("ppa/pool/main")
_ = s.publishedStorage.MkDir("ppa/pool/contrib")
_ = s.publishedStorage.MkDir("dists/anaconda")
_ = s.publishedStorage.MkDir("pool/main")
s.root2 = c.MkDir()
s.publishedStorage2 = files.NewPublishedStorage(s.root2, "", "")
s.publishedStorage2.MkDir("ppa/dists/osminog")
s.publishedStorage2.MkDir("ppa/pool/contrib")
_ = s.publishedStorage2.MkDir("ppa/dists/osminog")
_ = s.publishedStorage2.MkDir("ppa/pool/contrib")
s.provider = &FakeStorageProvider{map[string]aptly.PublishedStorage{
"": s.publishedStorage,
@@ -835,11 +834,11 @@ func (s *PublishedRepoRemoveSuite) SetUpTest(c *C) {
}
func (s *PublishedRepoRemoveSuite) TearDownTest(c *C) {
s.db.Close()
_ = s.db.Close()
}
func (s *PublishedRepoRemoveSuite) TestRemoveFilesOnlyDist(c *C) {
s.repo1.RemoveFiles(s.provider, false, []string{}, nil)
_ = s.repo1.RemoveFiles(s.provider, false, []string{}, nil)
c.Check(filepath.Join(s.publishedStorage.PublicPath(), "ppa/dists/anaconda"), Not(PathExists))
c.Check(filepath.Join(s.publishedStorage.PublicPath(), "ppa/dists/meduza"), PathExists)
@@ -853,7 +852,7 @@ func (s *PublishedRepoRemoveSuite) TestRemoveFilesOnlyDist(c *C) {
}
func (s *PublishedRepoRemoveSuite) TestRemoveFilesWithPool(c *C) {
s.repo1.RemoveFiles(s.provider, false, []string{"main"}, nil)
_ = s.repo1.RemoveFiles(s.provider, false, []string{"main"}, nil)
c.Check(filepath.Join(s.publishedStorage.PublicPath(), "ppa/dists/anaconda"), Not(PathExists))
c.Check(filepath.Join(s.publishedStorage.PublicPath(), "ppa/dists/meduza"), PathExists)
@@ -867,7 +866,7 @@ func (s *PublishedRepoRemoveSuite) TestRemoveFilesWithPool(c *C) {
}
func (s *PublishedRepoRemoveSuite) TestRemoveFilesWithTwoPools(c *C) {
s.repo1.RemoveFiles(s.provider, false, []string{"main", "contrib"}, nil)
_ = s.repo1.RemoveFiles(s.provider, false, []string{"main", "contrib"}, nil)
c.Check(filepath.Join(s.publishedStorage.PublicPath(), "ppa/dists/anaconda"), Not(PathExists))
c.Check(filepath.Join(s.publishedStorage.PublicPath(), "ppa/dists/meduza"), PathExists)
@@ -881,7 +880,7 @@ func (s *PublishedRepoRemoveSuite) TestRemoveFilesWithTwoPools(c *C) {
}
func (s *PublishedRepoRemoveSuite) TestRemoveFilesWithPrefix(c *C) {
s.repo1.RemoveFiles(s.provider, true, []string{"main"}, nil)
_ = s.repo1.RemoveFiles(s.provider, true, []string{"main"}, nil)
c.Check(filepath.Join(s.publishedStorage.PublicPath(), "ppa/dists/anaconda"), Not(PathExists))
c.Check(filepath.Join(s.publishedStorage.PublicPath(), "ppa/dists/meduza"), Not(PathExists))
@@ -895,7 +894,7 @@ func (s *PublishedRepoRemoveSuite) TestRemoveFilesWithPrefix(c *C) {
}
func (s *PublishedRepoRemoveSuite) TestRemoveFilesWithPrefixRoot(c *C) {
s.repo2.RemoveFiles(s.provider, true, []string{"main"}, nil)
_ = s.repo2.RemoveFiles(s.provider, true, []string{"main"}, nil)
c.Check(filepath.Join(s.publishedStorage.PublicPath(), "ppa/dists/anaconda"), PathExists)
c.Check(filepath.Join(s.publishedStorage.PublicPath(), "ppa/dists/meduza"), PathExists)

View File

@@ -89,7 +89,7 @@ func (q *OrQuery) Fast(list PackageCatalog) bool {
func (q *OrQuery) Query(list PackageCatalog) (result *PackageList) {
if q.Fast(list) {
result = q.L.Query(list)
result.Append(q.R.Query(list))
_ = result.Append(q.R.Query(list))
} else {
result = list.Scan(q)
}
@@ -245,7 +245,7 @@ func (q *DependencyQuery) Query(list PackageCatalog) (result *PackageList) {
if q.Fast(list) {
result = NewPackageList()
for _, pkg := range list.Search(q.Dep, true, true) {
result.Add(pkg)
_ = result.Add(pkg)
}
} else {
result = list.Scan(q)

View File

@@ -54,7 +54,7 @@ func (l *PackageRefList) Swap(i, j int) {
l.Refs[i], l.Refs[j] = l.Refs[j], l.Refs[i]
}
// Compare compares two refs in lexographical order
// Less compares two refs in lexographical order
func (l *PackageRefList) Less(i, j int) bool {
return bytes.Compare(l.Refs[i], l.Refs[j]) < 0
}
@@ -64,7 +64,7 @@ func (l *PackageRefList) Encode() []byte {
var buf bytes.Buffer
encoder := codec.NewEncoder(&buf, &codec.MsgpackHandle{})
encoder.Encode(l)
_ = encoder.Encode(l)
return buf.Bytes()
}

View File

@@ -42,6 +42,6 @@ func BenchmarkReflistDecode(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
(&PackageRefList{}).Decode(data)
_ = (&PackageRefList{}).Decode(data)
}
}

View File

@@ -46,21 +46,21 @@ func (s *PackageRefListSuite) SetUpTest(c *C) {
func (s *PackageRefListSuite) TestNewPackageListFromRefList(c *C) {
db, _ := goleveldb.NewOpenDB(c.MkDir())
coll := NewPackageCollection(db)
coll.Update(s.p1)
coll.Update(s.p3)
_ = coll.Update(s.p1)
_ = coll.Update(s.p3)
s.list.Add(s.p1)
s.list.Add(s.p3)
s.list.Add(s.p5)
s.list.Add(s.p6)
_ = s.list.Add(s.p1)
_ = s.list.Add(s.p3)
_ = s.list.Add(s.p5)
_ = s.list.Add(s.p6)
reflist := NewPackageRefListFromPackageList(s.list)
_, err := NewPackageListFromRefList(reflist, coll, nil)
c.Assert(err, ErrorMatches, "unable to load package with key.*")
coll.Update(s.p5)
coll.Update(s.p6)
_ = coll.Update(s.p5)
_ = coll.Update(s.p6)
list, err := NewPackageListFromRefList(reflist, coll, nil)
c.Assert(err, IsNil)
@@ -73,10 +73,10 @@ func (s *PackageRefListSuite) TestNewPackageListFromRefList(c *C) {
}
func (s *PackageRefListSuite) TestNewPackageRefList(c *C) {
s.list.Add(s.p1)
s.list.Add(s.p3)
s.list.Add(s.p5)
s.list.Add(s.p6)
_ = s.list.Add(s.p1)
_ = s.list.Add(s.p3)
_ = s.list.Add(s.p5)
_ = s.list.Add(s.p6)
reflist := NewPackageRefListFromPackageList(s.list)
c.Assert(reflist.Len(), Equals, 4)
@@ -90,10 +90,10 @@ func (s *PackageRefListSuite) TestNewPackageRefList(c *C) {
}
func (s *PackageRefListSuite) TestPackageRefListEncodeDecode(c *C) {
s.list.Add(s.p1)
s.list.Add(s.p3)
s.list.Add(s.p5)
s.list.Add(s.p6)
_ = s.list.Add(s.p1)
_ = s.list.Add(s.p3)
_ = s.list.Add(s.p5)
_ = s.list.Add(s.p6)
reflist := NewPackageRefListFromPackageList(s.list)
@@ -105,10 +105,10 @@ func (s *PackageRefListSuite) TestPackageRefListEncodeDecode(c *C) {
}
func (s *PackageRefListSuite) TestPackageRefListForeach(c *C) {
s.list.Add(s.p1)
s.list.Add(s.p3)
s.list.Add(s.p5)
s.list.Add(s.p6)
_ = s.list.Add(s.p1)
_ = s.list.Add(s.p3)
_ = s.list.Add(s.p5)
_ = s.list.Add(s.p6)
reflist := NewPackageRefListFromPackageList(s.list)
@@ -131,9 +131,9 @@ func (s *PackageRefListSuite) TestPackageRefListForeach(c *C) {
}
func (s *PackageRefListSuite) TestHas(c *C) {
s.list.Add(s.p1)
s.list.Add(s.p3)
s.list.Add(s.p5)
_ = s.list.Add(s.p1)
_ = s.list.Add(s.p3)
_ = s.list.Add(s.p5)
reflist := NewPackageRefListFromPackageList(s.list)
c.Check(reflist.Has(s.p1), Equals, true)
@@ -180,21 +180,21 @@ func (s *PackageRefListSuite) TestDiff(c *C) {
}
for _, p := range packages {
coll.Update(p)
_ = coll.Update(p)
}
listA := NewPackageList()
listA.Add(packages[0])
listA.Add(packages[1])
listA.Add(packages[2])
listA.Add(packages[3])
listA.Add(packages[6])
_ = listA.Add(packages[0])
_ = listA.Add(packages[1])
_ = listA.Add(packages[2])
_ = listA.Add(packages[3])
_ = listA.Add(packages[6])
listB := NewPackageList()
listB.Add(packages[0])
listB.Add(packages[2])
listB.Add(packages[4])
listB.Add(packages[5])
_ = listB.Add(packages[0])
_ = listB.Add(packages[2])
_ = listB.Add(packages[4])
_ = listB.Add(packages[5])
reflistA := NewPackageRefListFromPackageList(listA)
reflistB := NewPackageRefListFromPackageList(listB)
@@ -248,15 +248,15 @@ func (s *PackageRefListSuite) TestDiffCompactsAtEnd(c *C) {
}
for _, p := range packages {
coll.Update(p)
_ = coll.Update(p)
}
listA := NewPackageList()
listA.Add(packages[0])
_ = listA.Add(packages[0])
listB := NewPackageList()
listB.Add(packages[1])
listB.Add(packages[2])
_ = listB.Add(packages[1])
_ = listB.Add(packages[2])
reflistA := NewPackageRefListFromPackageList(listA)
reflistB := NewPackageRefListFromPackageList(listB)
@@ -291,27 +291,27 @@ func (s *PackageRefListSuite) TestMerge(c *C) {
for _, p := range packages {
p.V06Plus = true
coll.Update(p)
_ = coll.Update(p)
}
listA := NewPackageList()
listA.Add(packages[0])
listA.Add(packages[1])
listA.Add(packages[2])
listA.Add(packages[3])
listA.Add(packages[7])
_ = listA.Add(packages[0])
_ = listA.Add(packages[1])
_ = listA.Add(packages[2])
_ = listA.Add(packages[3])
_ = listA.Add(packages[7])
listB := NewPackageList()
listB.Add(packages[0])
listB.Add(packages[2])
listB.Add(packages[4])
listB.Add(packages[5])
listB.Add(packages[6])
_ = listB.Add(packages[0])
_ = listB.Add(packages[2])
_ = listB.Add(packages[4])
_ = listB.Add(packages[5])
_ = listB.Add(packages[6])
listC := NewPackageList()
listC.Add(packages[0])
listC.Add(packages[8])
listC.Add(packages[9])
_ = listC.Add(packages[0])
_ = listC.Add(packages[8])
_ = listC.Add(packages[9])
reflistA := NewPackageRefListFromPackageList(listA)
reflistB := NewPackageRefListFromPackageList(listB)
@@ -372,14 +372,14 @@ func (s *PackageRefListSuite) TestFilterLatestRefs(c *C) {
}
rl := NewPackageList()
rl.Add(packages[0])
rl.Add(packages[1])
rl.Add(packages[2])
rl.Add(packages[3])
rl.Add(packages[4])
rl.Add(packages[5])
rl.Add(packages[6])
rl.Add(packages[7])
_ = rl.Add(packages[0])
_ = rl.Add(packages[1])
_ = rl.Add(packages[2])
_ = rl.Add(packages[3])
_ = rl.Add(packages[4])
_ = rl.Add(packages[5])
_ = rl.Add(packages[6])
_ = rl.Add(packages[7])
result := NewPackageRefListFromPackageList(rl)
result.FilterLatestRefs()

View File

@@ -120,7 +120,7 @@ func NewRemoteRepo(name string, archiveRoot string, distribution string, compone
// SetArchiveRoot of remote repo
func (repo *RemoteRepo) SetArchiveRoot(archiveRoot string) {
repo.ArchiveRoot = archiveRoot
repo.prepare()
_ = repo.prepare()
}
func (repo *RemoteRepo) prepare() error {
@@ -302,14 +302,14 @@ func (repo *RemoteRepo) Fetch(d aptly.Downloader, verifier pgp.Verifier, ignoreS
if err != nil {
goto splitsignature
}
defer inrelease.Close()
defer func() { _ = inrelease.Close() }()
_, err = verifier.VerifyClearsigned(inrelease, true)
if err != nil {
goto splitsignature
}
inrelease.Seek(0, 0)
_, _ = inrelease.Seek(0, 0)
release, err = verifier.ExtractClearsigned(inrelease)
if err != nil {
@@ -342,7 +342,7 @@ func (repo *RemoteRepo) Fetch(d aptly.Downloader, verifier pgp.Verifier, ignoreS
}
ok:
defer release.Close()
defer func() { _ = release.Close() }()
sreader := NewControlFileReader(release, true, false)
stanza, err := sreader.ReadStanza()
@@ -528,7 +528,7 @@ func (repo *RemoteRepo) DownloadPackageIndexes(progress aptly.Progress, d aptly.
return err
}
}
defer packagesFile.Close()
defer func() { _ = packagesFile.Close() }()
if progress != nil {
stat, _ := packagesFile.Stat()
@@ -705,7 +705,7 @@ func (repo *RemoteRepo) Encode() []byte {
var buf bytes.Buffer
encoder := codec.NewEncoder(&buf, &codec.MsgpackHandle{})
encoder.Encode(repo)
_ = encoder.Encode(repo)
return buf.Bytes()
}
@@ -804,7 +804,7 @@ func (collection *RemoteRepoCollection) search(filter func(*RemoteRepo) bool, un
return result
}
collection.db.ProcessByPrefix([]byte("R"), func(_, blob []byte) error {
_ = collection.db.ProcessByPrefix([]byte("R"), func(_, blob []byte) error {
r := &RemoteRepo{}
if err := r.Decode(blob); err != nil {
log.Printf("Error decoding remote repo: %s\n", err)
@@ -848,9 +848,9 @@ func (collection *RemoteRepoCollection) Add(repo *RemoteRepo) error {
func (collection *RemoteRepoCollection) Update(repo *RemoteRepo) error {
batch := collection.db.CreateBatch()
batch.Put(repo.Key(), repo.Encode())
_ = batch.Put(repo.Key(), repo.Encode())
if repo.packageRefs != nil {
batch.Put(repo.RefKey(), repo.packageRefs.Encode())
_ = batch.Put(repo.RefKey(), repo.packageRefs.Encode())
}
return batch.Write()
}
@@ -936,7 +936,7 @@ func (collection *RemoteRepoCollection) Drop(repo *RemoteRepo) error {
delete(collection.cache, repo.UUID)
batch := collection.db.CreateBatch()
batch.Delete(repo.Key())
batch.Delete(repo.RefKey())
_ = batch.Delete(repo.Key())
_ = batch.Delete(repo.RefKey())
return batch.Write()
}

View File

@@ -38,9 +38,9 @@ func (n *NullVerifier) VerifyClearsigned(clearsigned io.Reader, hint bool) (*pgp
func (n *NullVerifier) ExtractClearsigned(clearsigned io.Reader) (text *os.File, err error) {
text, _ = os.CreateTemp("", "aptly-test")
io.Copy(text, clearsigned)
text.Seek(0, 0)
os.Remove(text.Name())
_, _ = io.Copy(text, clearsigned)
_, _ = text.Seek(0, 0)
_ = os.Remove(text.Name())
return
}
@@ -68,9 +68,9 @@ func (s *PackageListMixinSuite) SetUpPackages() {
stanza["Filename"] = "pool/contrib/l/lonely-strangers/lonely-strangers_7.40-2_i386.deb"
s.p3 = NewPackageFromControlFile(stanza)
s.list.Add(s.p1)
s.list.Add(s.p2)
s.list.Add(s.p3)
_ = s.list.Add(s.p1)
_ = s.list.Add(s.p2)
_ = s.list.Add(s.p3)
s.reflist = NewPackageRefListFromPackageList(s.list)
}
@@ -104,7 +104,7 @@ func (s *RemoteRepoSuite) SetUpTest(c *C) {
func (s *RemoteRepoSuite) TearDownTest(c *C) {
s.progress.Shutdown()
s.db.Close()
_ = s.db.Close()
}
func (s *RemoteRepoSuite) TestInvalidURL(c *C) {
@@ -287,7 +287,7 @@ func (s *RemoteRepoSuite) TestDownload(c *C) {
c.Check(queue, HasLen, 1)
c.Check(queue[0].File.DownloadURL(), Equals, "pool/main/a/amanda/amanda-client_3.3.1-3~bpo60+1_amd64.deb")
s.repo.FinalizeDownload(s.collectionFactory, nil)
_ = s.repo.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.repo.packageRefs, NotNil)
pkg, err := s.collectionFactory.PackageCollection().ByKey(s.repo.packageRefs.Refs[0])
@@ -313,7 +313,7 @@ func (s *RemoteRepoSuite) TestDownload(c *C) {
c.Check(size, Equals, int64(0))
c.Check(queue, HasLen, 0)
s.repo.FinalizeDownload(s.collectionFactory, nil)
_ = s.repo.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.repo.packageRefs, NotNil)
// Next call must return the download list without option "skip-existing-packages"
@@ -335,7 +335,7 @@ func (s *RemoteRepoSuite) TestDownload(c *C) {
c.Check(queue, HasLen, 1)
c.Check(queue[0].File.DownloadURL(), Equals, "pool/main/a/amanda/amanda-client_3.3.1-3~bpo60+1_amd64.deb")
s.repo.FinalizeDownload(s.collectionFactory, nil)
_ = s.repo.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.repo.packageRefs, NotNil)
}
@@ -369,7 +369,7 @@ func (s *RemoteRepoSuite) TestDownloadWithInstaller(c *C) {
c.Check(q[0], Equals, "dists/squeeze/main/installer-i386/current/images/MANIFEST")
c.Check(q[1], Equals, "pool/main/a/amanda/amanda-client_3.3.1-3~bpo60+1_amd64.deb")
s.repo.FinalizeDownload(s.collectionFactory, nil)
_ = s.repo.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.repo.packageRefs, NotNil)
pkg, err := s.collectionFactory.PackageCollection().ByKey(s.repo.packageRefs.Refs[0])
@@ -415,7 +415,7 @@ func (s *RemoteRepoSuite) TestDownloadWithSources(c *C) {
c.Check(q[2], Equals, "pool/main/a/access-modifier-checker/access-modifier-checker_1.0.orig.tar.gz")
c.Check(q[0], Equals, "pool/main/a/access-modifier-checker/access-modifier-checker_1.0-4.debian.tar.gz")
s.repo.FinalizeDownload(s.collectionFactory, nil)
_ = s.repo.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.repo.packageRefs, NotNil)
pkg, err := s.collectionFactory.PackageCollection().ByKey(s.repo.packageRefs.Refs[0])
@@ -449,7 +449,7 @@ func (s *RemoteRepoSuite) TestDownloadWithSources(c *C) {
c.Check(size, Equals, int64(0))
c.Check(queue, HasLen, 0)
s.repo.FinalizeDownload(s.collectionFactory, nil)
_ = s.repo.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.repo.packageRefs, NotNil)
// Next call must return the download list without option "skip-existing-packages"
@@ -474,7 +474,7 @@ func (s *RemoteRepoSuite) TestDownloadWithSources(c *C) {
c.Check(size, Equals, int64(15))
c.Check(queue, HasLen, 4)
s.repo.FinalizeDownload(s.collectionFactory, nil)
_ = s.repo.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.repo.packageRefs, NotNil)
}
@@ -499,7 +499,7 @@ func (s *RemoteRepoSuite) TestDownloadFlat(c *C) {
c.Check(queue, HasLen, 1)
c.Check(queue[0].File.DownloadURL(), Equals, "pool/main/a/amanda/amanda-client_3.3.1-3~bpo60+1_amd64.deb")
s.flat.FinalizeDownload(s.collectionFactory, nil)
_ = s.flat.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.flat.packageRefs, NotNil)
pkg, err := s.collectionFactory.PackageCollection().ByKey(s.flat.packageRefs.Refs[0])
@@ -526,7 +526,7 @@ func (s *RemoteRepoSuite) TestDownloadFlat(c *C) {
c.Check(size, Equals, int64(0))
c.Check(queue, HasLen, 0)
s.flat.FinalizeDownload(s.collectionFactory, nil)
_ = s.flat.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.flat.packageRefs, NotNil)
// Next call must return the download list without option "skip-existing-packages"
@@ -549,7 +549,7 @@ func (s *RemoteRepoSuite) TestDownloadFlat(c *C) {
c.Check(queue, HasLen, 1)
c.Check(queue[0].File.DownloadURL(), Equals, "pool/main/a/amanda/amanda-client_3.3.1-3~bpo60+1_amd64.deb")
s.flat.FinalizeDownload(s.collectionFactory, nil)
_ = s.flat.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.flat.packageRefs, NotNil)
}
@@ -589,7 +589,7 @@ func (s *RemoteRepoSuite) TestDownloadWithSourcesFlat(c *C) {
c.Check(q[2], Equals, "pool/main/a/access-modifier-checker/access-modifier-checker_1.0.orig.tar.gz")
c.Check(q[0], Equals, "pool/main/a/access-modifier-checker/access-modifier-checker_1.0-4.debian.tar.gz")
s.flat.FinalizeDownload(s.collectionFactory, nil)
_ = s.flat.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.flat.packageRefs, NotNil)
pkg, err := s.collectionFactory.PackageCollection().ByKey(s.flat.packageRefs.Refs[0])
@@ -625,7 +625,7 @@ func (s *RemoteRepoSuite) TestDownloadWithSourcesFlat(c *C) {
c.Check(size, Equals, int64(0))
c.Check(queue, HasLen, 0)
s.flat.FinalizeDownload(s.collectionFactory, nil)
_ = s.flat.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.flat.packageRefs, NotNil)
// Next call must return the download list without option "skip-existing-packages"
@@ -651,7 +651,7 @@ func (s *RemoteRepoSuite) TestDownloadWithSourcesFlat(c *C) {
c.Check(size, Equals, int64(15))
c.Check(queue, HasLen, 4)
s.flat.FinalizeDownload(s.collectionFactory, nil)
_ = s.flat.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.flat.packageRefs, NotNil)
}
@@ -670,7 +670,7 @@ func (s *RemoteRepoCollectionSuite) SetUpTest(c *C) {
}
func (s *RemoteRepoCollectionSuite) TearDownTest(c *C) {
s.db.Close()
_ = s.db.Close()
}
func (s *RemoteRepoCollectionSuite) TestAddByName(c *C) {
@@ -731,7 +731,7 @@ func (s *RemoteRepoCollectionSuite) TestUpdateLoadComplete(c *C) {
func (s *RemoteRepoCollectionSuite) TestForEachAndLen(c *C) {
repo, _ := NewRemoteRepo("yandex", "http://mirror.yandex.ru/debian/", "squeeze", []string{"main"}, []string{}, false, false, false)
s.collection.Add(repo)
_ = s.collection.Add(repo)
count := 0
err := s.collection.ForEach(func(*RemoteRepo) error {
@@ -753,10 +753,10 @@ func (s *RemoteRepoCollectionSuite) TestForEachAndLen(c *C) {
func (s *RemoteRepoCollectionSuite) TestDrop(c *C) {
repo1, _ := NewRemoteRepo("yandex", "http://mirror.yandex.ru/debian/", "squeeze", []string{"main"}, []string{}, false, false, false)
s.collection.Add(repo1)
_ = s.collection.Add(repo1)
repo2, _ := NewRemoteRepo("tyndex", "http://mirror.yandex.ru/debian/", "wheezy", []string{"main"}, []string{}, false, false, false)
s.collection.Add(repo2)
_ = s.collection.Add(repo2)
r1, _ := s.collection.ByUUID(repo1.UUID)
c.Check(r1, Equals, repo1)

View File

@@ -142,7 +142,7 @@ func (s *Snapshot) Encode() []byte {
var buf bytes.Buffer
encoder := codec.NewEncoder(&buf, &codec.MsgpackHandle{})
encoder.Encode(s)
_ = encoder.Encode(s)
return buf.Bytes()
}
@@ -228,9 +228,9 @@ func (collection *SnapshotCollection) Add(snapshot *Snapshot) error {
func (collection *SnapshotCollection) Update(snapshot *Snapshot) error {
batch := collection.db.CreateBatch()
batch.Put(snapshot.Key(), snapshot.Encode())
_ = batch.Put(snapshot.Key(), snapshot.Encode())
if snapshot.packageRefs != nil {
batch.Put(snapshot.RefKey(), snapshot.packageRefs.Encode())
_ = batch.Put(snapshot.RefKey(), snapshot.packageRefs.Encode())
}
return batch.Write()
@@ -259,7 +259,7 @@ func (collection *SnapshotCollection) search(filter func(*Snapshot) bool, unique
return result
}
collection.db.ProcessByPrefix([]byte("S"), func(_, blob []byte) error {
_ = collection.db.ProcessByPrefix([]byte("S"), func(_, blob []byte) error {
s := &Snapshot{}
if err := s.Decode(blob); err != nil {
log.Printf("Error decoding snapshot: %s\n", err)
@@ -400,8 +400,8 @@ func (collection *SnapshotCollection) Drop(snapshot *Snapshot) error {
delete(collection.cache, snapshot.UUID)
batch := collection.db.CreateBatch()
batch.Delete(snapshot.Key())
batch.Delete(snapshot.RefKey())
_ = batch.Delete(snapshot.Key())
_ = batch.Delete(snapshot.RefKey())
return batch.Write()
}

View File

@@ -12,10 +12,10 @@ func BenchmarkSnapshotCollectionForEach(b *testing.B) {
const count = 1024
tmpDir := os.TempDir()
defer os.RemoveAll(tmpDir)
defer func() { _ = os.RemoveAll(tmpDir) }()
db, _ := goleveldb.NewOpenDB(tmpDir)
defer db.Close()
defer func() { _ = db.Close() }()
collection := NewSnapshotCollection(db)
@@ -31,7 +31,8 @@ func BenchmarkSnapshotCollectionForEach(b *testing.B) {
for i := 0; i < b.N; i++ {
collection = NewSnapshotCollection(db)
collection.ForEach(func(s *Snapshot) error {
_ = collection.ForEach(func(s *Snapshot) error {
return nil
})
}
@@ -41,10 +42,10 @@ func BenchmarkSnapshotCollectionByUUID(b *testing.B) {
const count = 1024
tmpDir := os.TempDir()
defer os.RemoveAll(tmpDir)
defer func() { _ = os.RemoveAll(tmpDir) }()
db, _ := goleveldb.NewOpenDB(tmpDir)
defer db.Close()
defer func() { _ = db.Close() }()
collection := NewSnapshotCollection(db)
@@ -72,10 +73,10 @@ func BenchmarkSnapshotCollectionByName(b *testing.B) {
const count = 1024
tmpDir := os.TempDir()
defer os.RemoveAll(tmpDir)
defer func() { _ = os.RemoveAll(tmpDir) }()
db, _ := goleveldb.NewOpenDB(tmpDir)
defer db.Close()
defer func() { _ = db.Close() }()
collection := NewSnapshotCollection(db)

View File

@@ -136,7 +136,7 @@ func (s *SnapshotCollectionSuite) SetUpTest(c *C) {
}
func (s *SnapshotCollectionSuite) TearDownTest(c *C) {
s.db.Close()
_ = s.db.Close()
}
func (s *SnapshotCollectionSuite) TestAddByNameByUUID(c *C) {
@@ -179,8 +179,8 @@ func (s *SnapshotCollectionSuite) TestUpdateLoadComplete(c *C) {
}
func (s *SnapshotCollectionSuite) TestForEachAndLen(c *C) {
s.collection.Add(s.snapshot1)
s.collection.Add(s.snapshot2)
_ = s.collection.Add(s.snapshot1)
_ = s.collection.Add(s.snapshot2)
count := 0
err := s.collection.ForEach(func(*Snapshot) error {
@@ -200,10 +200,10 @@ func (s *SnapshotCollectionSuite) TestForEachAndLen(c *C) {
}
func (s *SnapshotCollectionSuite) TestForEachSorted(c *C) {
s.collection.Add(s.snapshot2)
s.collection.Add(s.snapshot1)
s.collection.Add(s.snapshot4)
s.collection.Add(s.snapshot3)
_ = s.collection.Add(s.snapshot2)
_ = s.collection.Add(s.snapshot1)
_ = s.collection.Add(s.snapshot4)
_ = s.collection.Add(s.snapshot3)
names := []string{}
@@ -263,8 +263,8 @@ func (s *SnapshotCollectionSuite) TestFindSnapshotSource(c *C) {
}
func (s *SnapshotCollectionSuite) TestDrop(c *C) {
s.collection.Add(s.snapshot1)
s.collection.Add(s.snapshot2)
_ = s.collection.Add(s.snapshot1)
_ = s.collection.Add(s.snapshot2)
snap, _ := s.collection.ByUUID(s.snapshot1.UUID)
c.Check(snap, Equals, s.snapshot1)

View File

@@ -41,7 +41,7 @@ func NewUploadersFromFile(path string) (*Uploaders, error) {
if err != nil {
return nil, fmt.Errorf("error loading uploaders file: %s", err)
}
defer f.Close()
defer func() { _ = f.Close() }()
err = json.NewDecoder(JsonConfigReader.New(f)).Decode(&uploaders)
if err != nil {

View File

@@ -50,11 +50,7 @@ func compareLexicographic(s1, s2 string) int {
i := 0
l1, l2 := len(s1), len(s2)
for {
if i == l1 && i == l2 {
// s1 equal to s2
break
}
for !(i == l1 && i == l2) { // break if s1 equal to s2
if i == l2 {
// s1 is longer than s2

View File

@@ -240,7 +240,9 @@ func (pool *PackagePool) Import(srcPath, basename string, checksums *utils.Check
if err != nil {
return "", err
}
defer source.Close()
defer func() {
_ = source.Close()
}()
sourceInfo, err := source.Stat()
if err != nil {
@@ -351,7 +353,9 @@ func (pool *PackagePool) Import(srcPath, basename string, checksums *utils.Check
if err != nil {
return "", err
}
defer target.Close()
defer func() {
_ = target.Close()
}()
_, err = io.Copy(target, source)

View File

@@ -2,7 +2,6 @@ package files
import (
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -51,18 +50,18 @@ func (s *PackagePoolSuite) TestFilepathList(c *C) {
c.Check(err, IsNil)
c.Check(list, IsNil)
os.MkdirAll(filepath.Join(s.pool.rootPath, "bd", "0b"), 0755)
os.MkdirAll(filepath.Join(s.pool.rootPath, "bd", "0a"), 0755)
os.MkdirAll(filepath.Join(s.pool.rootPath, "ae", "0c"), 0755)
_ = os.MkdirAll(filepath.Join(s.pool.rootPath, "bd", "0b"), 0755)
_ = os.MkdirAll(filepath.Join(s.pool.rootPath, "bd", "0a"), 0755)
_ = os.MkdirAll(filepath.Join(s.pool.rootPath, "ae", "0c"), 0755)
list, err = s.pool.FilepathList(nil)
c.Check(err, IsNil)
c.Check(list, DeepEquals, []string{})
ioutil.WriteFile(filepath.Join(s.pool.rootPath, "ae", "0c", "1.deb"), nil, 0644)
ioutil.WriteFile(filepath.Join(s.pool.rootPath, "ae", "0c", "2.deb"), nil, 0644)
ioutil.WriteFile(filepath.Join(s.pool.rootPath, "bd", "0a", "3.deb"), nil, 0644)
ioutil.WriteFile(filepath.Join(s.pool.rootPath, "bd", "0b", "4.deb"), nil, 0644)
_ = os.WriteFile(filepath.Join(s.pool.rootPath, "ae", "0c", "1.deb"), nil, 0644)
_ = os.WriteFile(filepath.Join(s.pool.rootPath, "ae", "0c", "2.deb"), nil, 0644)
_ = os.WriteFile(filepath.Join(s.pool.rootPath, "bd", "0a", "3.deb"), nil, 0644)
_ = os.WriteFile(filepath.Join(s.pool.rootPath, "bd", "0b", "4.deb"), nil, 0644)
list, err = s.pool.FilepathList(nil)
c.Check(err, IsNil)
@@ -70,14 +69,14 @@ func (s *PackagePoolSuite) TestFilepathList(c *C) {
}
func (s *PackagePoolSuite) TestRemove(c *C) {
os.MkdirAll(filepath.Join(s.pool.rootPath, "bd", "0b"), 0755)
os.MkdirAll(filepath.Join(s.pool.rootPath, "bd", "0a"), 0755)
os.MkdirAll(filepath.Join(s.pool.rootPath, "ae", "0c"), 0755)
_ = os.MkdirAll(filepath.Join(s.pool.rootPath, "bd", "0b"), 0755)
_ = os.MkdirAll(filepath.Join(s.pool.rootPath, "bd", "0a"), 0755)
_ = os.MkdirAll(filepath.Join(s.pool.rootPath, "ae", "0c"), 0755)
ioutil.WriteFile(filepath.Join(s.pool.rootPath, "ae", "0c", "1.deb"), []byte("1"), 0644)
ioutil.WriteFile(filepath.Join(s.pool.rootPath, "ae", "0c", "2.deb"), []byte("22"), 0644)
ioutil.WriteFile(filepath.Join(s.pool.rootPath, "bd", "0a", "3.deb"), []byte("333"), 0644)
ioutil.WriteFile(filepath.Join(s.pool.rootPath, "bd", "0b", "4.deb"), []byte("4444"), 0644)
_ = os.WriteFile(filepath.Join(s.pool.rootPath, "ae", "0c", "1.deb"), []byte("1"), 0644)
_ = os.WriteFile(filepath.Join(s.pool.rootPath, "ae", "0c", "2.deb"), []byte("22"), 0644)
_ = os.WriteFile(filepath.Join(s.pool.rootPath, "bd", "0a", "3.deb"), []byte("333"), 0644)
_ = os.WriteFile(filepath.Join(s.pool.rootPath, "bd", "0b", "4.deb"), []byte("4444"), 0644)
size, err := s.pool.Remove("ae/0c/2.deb")
c.Check(err, IsNil)
@@ -99,7 +98,9 @@ func isSameDevice(s *PackagePoolSuite) bool {
source, _ := os.Open(s.debFile)
sourceInfo, _ := source.Stat()
defer source.Close()
defer func() {
_ = source.Close()
}()
return poolDirInfo.Sys().(*syscall.Stat_t).Dev == sourceInfo.Sys().(*syscall.Stat_t).Dev
}
@@ -157,7 +158,7 @@ func (s *PackagePoolSuite) TestImportOk(c *C) {
}
func (s *PackagePoolSuite) TestImportLegacy(c *C) {
os.MkdirAll(filepath.Join(s.pool.rootPath, "00", "35"), 0755)
_ = os.MkdirAll(filepath.Join(s.pool.rootPath, "00", "35"), 0755)
err := utils.CopyFile(s.debFile, filepath.Join(s.pool.rootPath, "00", "35", "libboost-program-options-dev_1.49.0.1_i386.deb"))
c.Assert(err, IsNil)
@@ -178,7 +179,7 @@ func (s *PackagePoolSuite) TestVerifyLegacy(c *C) {
c.Check(err, IsNil)
c.Check(exists, Equals, false)
os.MkdirAll(filepath.Join(s.pool.rootPath, "00", "35"), 0755)
_ = os.MkdirAll(filepath.Join(s.pool.rootPath, "00", "35"), 0755)
err = utils.CopyFile(s.debFile, filepath.Join(s.pool.rootPath, "00", "35", "libboost-program-options-dev_1.49.0.1_i386.deb"))
c.Assert(err, IsNil)
@@ -299,8 +300,8 @@ func (s *PackagePoolSuite) TestImportNotExist(c *C) {
}
func (s *PackagePoolSuite) TestImportOverwrite(c *C) {
os.MkdirAll(filepath.Join(s.pool.rootPath, "c7", "6b"), 0755)
ioutil.WriteFile(filepath.Join(s.pool.rootPath, "c7", "6b", "4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb"), []byte("1"), 0644)
_ = os.MkdirAll(filepath.Join(s.pool.rootPath, "c7", "6b"), 0755)
_ = os.WriteFile(filepath.Join(s.pool.rootPath, "c7", "6b", "4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb"), []byte("1"), 0644)
_, err := s.pool.Import(s.debFile, filepath.Base(s.debFile), &s.checksum, false, s.cs)
c.Check(err, ErrorMatches, "unable to import into pool.*")
@@ -336,7 +337,7 @@ func (s *PackagePoolSuite) TestOpen(c *C) {
f, err := s.pool.Open(path)
c.Assert(err, IsNil)
contents, err := ioutil.ReadAll(f)
contents, err := io.ReadAll(f)
c.Assert(err, IsNil)
c.Check(len(contents), Equals, 2738)
c.Check(f.Close(), IsNil)

View File

@@ -86,13 +86,17 @@ func (storage *PublishedStorage) PutFile(path string, sourceFilename string) err
if err != nil {
return err
}
defer source.Close()
defer func() {
_ = source.Close()
}()
f, err = os.Create(filepath.Join(storage.rootPath, path))
if err != nil {
return err
}
defer f.Close()
defer func() {
_ = f.Close()
}()
_, err = io.Copy(f, source)
return err
@@ -221,20 +225,20 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath,
var dst *os.File
dst, err = os.Create(filepath.Join(poolPath, baseName))
if err != nil {
r.Close()
_ = r.Close()
return err
}
_, err = io.Copy(dst, r)
if err != nil {
r.Close()
dst.Close()
_ = r.Close()
_ = dst.Close()
return err
}
err = r.Close()
if err != nil {
dst.Close()
_ = dst.Close()
return err
}

View File

@@ -1,7 +1,6 @@
package files
import (
"io/ioutil"
"os"
"path/filepath"
"syscall"
@@ -222,8 +221,8 @@ func (s *PublishedStorageSuite) TestLinkFromPool(c *C) {
for _, t := range tests {
tmpPath := filepath.Join(c.MkDir(), t.sourcePath)
os.MkdirAll(filepath.Dir(tmpPath), 0777)
err := ioutil.WriteFile(tmpPath, []byte("Contents"), 0644)
_ = os.MkdirAll(filepath.Dir(tmpPath), 0777)
err := os.WriteFile(tmpPath, []byte("Contents"), 0644)
c.Assert(err, IsNil)
sourceChecksum, err := utils.ChecksumsForFile(tmpPath)
@@ -276,7 +275,7 @@ func (s *PublishedStorageSuite) TestLinkFromPool(c *C) {
// test linking files to duplicate final name
tmpPath := filepath.Join(c.MkDir(), "mars-invaders_1.03.deb")
err := ioutil.WriteFile(tmpPath, []byte("cONTENTS"), 0644)
err := os.WriteFile(tmpPath, []byte("cONTENTS"), 0644)
c.Assert(err, IsNil)
sourceChecksum, err := utils.ChecksumsForFile(tmpPath)
@@ -330,11 +329,11 @@ func (s *PublishedStorageSuite) TestRootRemove(c *C) {
// Symlink
linkedDir := filepath.Join(pwd, "linkedDir")
os.Symlink(s.root, linkedDir)
_ = os.Symlink(s.root, linkedDir)
linkStorage := NewPublishedStorage(linkedDir, "", "")
c.Assert(func() { linkStorage.Remove("") }, PanicMatches, "trying to remove empty path")
c.Assert(func() { _ = linkStorage.Remove("") }, PanicMatches, "trying to remove empty path")
// Actual dir
dirStorage := NewPublishedStorage(pwd, "", "")
c.Assert(func() { dirStorage.RemoveDirs("", nil) }, PanicMatches, "trying to remove the root directory")
c.Assert(func() { _ = dirStorage.RemoveDirs("", nil) }, PanicMatches, "trying to remove the root directory")
}

2
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/aptly-dev/aptly
go 1.23
go 1.24
require (
github.com/AlekSi/pointer v1.1.0

17
go.sum
View File

@@ -322,17 +322,14 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -349,8 +346,6 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -360,8 +355,6 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -390,8 +383,6 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -399,8 +390,6 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -410,8 +399,6 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
@@ -423,8 +410,6 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -46,8 +46,10 @@ func (s *CompressionSuite) TestDownloadTryCompression(c *C) {
d.ExpectResponse("http://example.com/file.bz2", bzipData)
r, file, err := DownloadTryCompression(s.ctx, d, s.baseURL, "file", expectedChecksums, false)
c.Assert(err, IsNil)
defer file.Close()
io.ReadFull(r, buf)
defer func() {
_ = file.Close()
}()
_, _ = io.ReadFull(r, buf)
c.Assert(string(buf), Equals, rawData)
c.Assert(d.Empty(), Equals, true)
@@ -58,8 +60,10 @@ func (s *CompressionSuite) TestDownloadTryCompression(c *C) {
d.ExpectResponse("http://example.com/file.gz", gzipData)
r, file, err = DownloadTryCompression(s.ctx, d, s.baseURL, "file", expectedChecksums, false)
c.Assert(err, IsNil)
defer file.Close()
io.ReadFull(r, buf)
defer func() {
_ = file.Close()
}()
_, _ = io.ReadFull(r, buf)
c.Assert(string(buf), Equals, rawData)
c.Assert(d.Empty(), Equals, true)
@@ -71,8 +75,10 @@ func (s *CompressionSuite) TestDownloadTryCompression(c *C) {
d.ExpectResponse("http://example.com/file.xz", xzData)
r, file, err = DownloadTryCompression(s.ctx, d, s.baseURL, "file", expectedChecksums, false)
c.Assert(err, IsNil)
defer file.Close()
io.ReadFull(r, buf)
defer func() {
_ = file.Close()
}()
_, _ = io.ReadFull(r, buf)
c.Assert(string(buf), Equals, rawData)
c.Assert(d.Empty(), Equals, true)
@@ -85,8 +91,10 @@ func (s *CompressionSuite) TestDownloadTryCompression(c *C) {
d.ExpectResponse("http://example.com/file", rawData)
r, file, err = DownloadTryCompression(s.ctx, d, s.baseURL, "file", expectedChecksums, false)
c.Assert(err, IsNil)
defer file.Close()
io.ReadFull(r, buf)
defer func() {
_ = file.Close()
}()
_, _ = io.ReadFull(r, buf)
c.Assert(string(buf), Equals, rawData)
c.Assert(d.Empty(), Equals, true)
@@ -114,8 +122,10 @@ func (s *CompressionSuite) TestDownloadTryCompressionLongestSuffix(c *C) {
d.ExpectResponse("http://example.com/subdir/file.bz2", bzipData)
r, file, err := DownloadTryCompression(s.ctx, d, s.baseURL, "subdir/file", expectedChecksums, false)
c.Assert(err, IsNil)
defer file.Close()
io.ReadFull(r, buf)
defer func() {
_ = file.Close()
}()
_, _ = io.ReadFull(r, buf)
c.Assert(string(buf), Equals, rawData)
c.Assert(d.Empty(), Equals, true)
}

View File

@@ -226,7 +226,7 @@ func (downloader *downloaderImpl) DownloadWithChecksum(ctx context.Context, url
err = os.Rename(temppath, destination)
if err != nil {
os.Remove(temppath)
_ = os.Remove(temppath)
return errors.Wrap(err, url)
}
@@ -239,7 +239,9 @@ func (downloader *downloaderImpl) download(req *http.Request, url, destination s
return "", errors.Wrap(err, url)
}
if resp.Body != nil {
defer resp.Body.Close()
defer func() {
_ = resp.Body.Close()
}()
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
@@ -257,7 +259,9 @@ func (downloader *downloaderImpl) download(req *http.Request, url, destination s
if err != nil {
return "", errors.Wrap(err, url)
}
defer outfile.Close()
defer func() {
_ = outfile.Close()
}()
checksummer := utils.NewChecksumWriter()
writers := []io.Writer{outfile, downloader.aggWriter}
@@ -270,7 +274,7 @@ func (downloader *downloaderImpl) download(req *http.Request, url, destination s
_, err = io.Copy(w, resp.Body)
if err != nil {
os.Remove(temppath)
_ = os.Remove(temppath)
return "", errors.Wrap(err, url)
}
@@ -295,7 +299,7 @@ func (downloader *downloaderImpl) download(req *http.Request, url, destination s
downloader.progress.Printf("WARNING: %s\n", err.Error())
}
} else {
os.Remove(temppath)
_ = os.Remove(temppath)
return "", err
}
} else {

View File

@@ -32,13 +32,13 @@ func (s *DownloaderSuiteBase) SetUpTest(c *C) {
mux := http.NewServeMux()
mux.HandleFunc("/test", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello, %s", r.URL.Path)
_, _ = fmt.Fprintf(w, "Hello, %s", r.URL.Path)
})
s.ch = make(chan struct{})
go func() {
http.Serve(s.l, mux)
_ = http.Serve(s.l, mux)
close(s.ch)
}()
@@ -52,11 +52,11 @@ func (s *DownloaderSuiteBase) SetUpTest(c *C) {
func (s *DownloaderSuiteBase) TearDownTest(c *C) {
s.progress.Shutdown()
s.l.Close()
_ = s.l.Close()
<-s.ch
os.Remove(s.tempfile.Name())
s.tempfile.Close()
_ = os.Remove(s.tempfile.Name())
_ = s.tempfile.Close()
}
type DownloaderSuite struct {

View File

@@ -104,7 +104,9 @@ func (f *FakeDownloader) DownloadWithChecksum(_ context.Context, url string, fil
if err != nil {
return err
}
defer outfile.Close()
defer func() {
_ = outfile.Close()
}()
cks := utils.NewChecksumWriter()
w := io.MultiWriter(outfile, cks)

View File

@@ -14,7 +14,7 @@ import (
"golang.org/x/time/rate"
"github.com/aptly-dev/aptly/utils"
"github.com/cavaliergopher/grab/v3"
grab "github.com/cavaliergopher/grab/v3"
"github.com/pkg/errors"
"github.com/aptly-dev/aptly/aptly"
@@ -49,10 +49,10 @@ func (d *GrabDownloader) Download(ctx context.Context, url string, destination s
func (d *GrabDownloader) DownloadWithChecksum(ctx context.Context, url string, destination string, expected *utils.ChecksumInfo, ignoreMismatch bool) error {
maxTries := d.maxTries
const delayMax = time.Duration(5 * time.Minute)
// FIXME: const delayMax = time.Duration(5 * time.Minute)
delay := time.Duration(1 * time.Second)
const delayMultiplier = 2
err := fmt.Errorf("No tries available")
// FIXME: const delayMultiplier = 2
err := fmt.Errorf("no tries available")
for maxTries > 0 {
err = d.download(ctx, url, destination, expected, ignoreMismatch)
if err == nil {
@@ -125,7 +125,7 @@ func (d *GrabDownloader) download(_ context.Context, url string, destination str
req.RateLimiter = rate.NewLimiter(rate.Limit(d.downLimit), int(d.downLimit))
}
d.maybeSetupChecksum(req, expected)
err = d.maybeSetupChecksum(req, expected)
if err != nil {
d.log("Error setting up checksum: %v\n", err)
return errors.Wrap(err, url)
@@ -133,14 +133,17 @@ func (d *GrabDownloader) download(_ context.Context, url string, destination str
resp := d.client.Do(req)
Loop:
for {
select {
case <-resp.Done:
// download is complete
break Loop
}
}
<-resp.Done
// download is complete
// Loop:
// for {
// select {
// case <-resp.Done:
// // download is complete
// break Loop
// }
// }
err = resp.Err()
if err != nil && err == grab.ErrBadChecksum && ignoreMismatch {
fmt.Printf("Ignoring checksum mismatch for %s\n", url)

View File

@@ -31,13 +31,13 @@ func (s *GrabDownloaderSuiteBase) SetUpTest(c *C) {
mux := http.NewServeMux()
mux.HandleFunc("/test", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello, %s", r.URL.Path)
_, _ = fmt.Fprintf(w, "Hello, %s", r.URL.Path)
})
s.ch = make(chan struct{})
go func() {
http.Serve(s.l, mux)
_ = http.Serve(s.l, mux)
close(s.ch)
}()
@@ -51,11 +51,11 @@ func (s *GrabDownloaderSuiteBase) SetUpTest(c *C) {
func (s *GrabDownloaderSuiteBase) TearDownTest(c *C) {
s.progress.Shutdown()
s.l.Close()
_ = s.l.Close()
<-s.ch
os.Remove(s.tempfile.Name())
s.tempfile.Close()
_ = os.Remove(s.tempfile.Name())
_ = s.tempfile.Close()
}
type GrabDownloaderSuite struct {

View File

@@ -12,7 +12,7 @@ type Error struct {
URL string
}
// Error
// Error returns HTTP error message
func (e *Error) Error() string {
return fmt.Sprintf("HTTP code %d while fetching %s", e.Code, e.URL)
}

View File

@@ -24,7 +24,7 @@ func DownloadTempWithChecksum(ctx context.Context, downloader aptly.Downloader,
if err != nil {
return nil, err
}
defer os.RemoveAll(tempdir)
defer func() { _ = os.RemoveAll(tempdir) }()
tempfile := filepath.Join(tempdir, "buffer")

View File

@@ -25,11 +25,11 @@ func (s *TempSuite) TearDownTest(c *C) {
func (s *TempSuite) TestDownloadTemp(c *C) {
f, err := DownloadTemp(s.ctx, s.d, s.url+"/test")
c.Assert(err, IsNil)
defer f.Close()
defer func() { _ = f.Close() }()
buf := make([]byte, 1)
f.Read(buf)
_, _ = f.Read(buf)
c.Assert(buf, DeepEquals, []byte("H"))
_, err = os.Stat(f.Name())

View File

@@ -203,7 +203,9 @@ func (g *GpgVerifier) runGpgv(args []string, context string, showKeyTip bool) (*
if err != nil {
return nil, err
}
defer tempf.Close()
defer func() {
_ = tempf.Close()
}()
err = os.Remove(tempf.Name())
if err != nil {
@@ -216,7 +218,9 @@ func (g *GpgVerifier) runGpgv(args []string, context string, showKeyTip bool) (*
if err != nil {
return nil, err
}
defer stderr.Close()
defer func() {
_ = stderr.Close()
}()
err = cmd.Start()
if err != nil {
@@ -232,7 +236,7 @@ func (g *GpgVerifier) runGpgv(args []string, context string, showKeyTip bool) (*
cmderr := cmd.Wait()
tempf.Seek(0, 0)
_, _ = tempf.Seek(0, 0)
statusr := bufio.NewScanner(tempf)
@@ -281,8 +285,10 @@ func (g *GpgVerifier) VerifyDetachedSignature(signature, cleartext io.Reader, sh
if err != nil {
return err
}
defer os.Remove(sigf.Name())
defer sigf.Close()
defer func() {
_ = os.Remove(sigf.Name())
_ = sigf.Close()
}()
_, err = io.Copy(sigf, signature)
if err != nil {
@@ -293,8 +299,10 @@ func (g *GpgVerifier) VerifyDetachedSignature(signature, cleartext io.Reader, sh
if err != nil {
return err
}
defer os.Remove(clearf.Name())
defer clearf.Close()
defer func() {
_ = os.Remove(clearf.Name())
_ = clearf.Close()
}()
_, err = io.Copy(clearf, cleartext)
if err != nil {
@@ -326,8 +334,10 @@ func (g *GpgVerifier) VerifyClearsigned(clearsigned io.Reader, showKeyTip bool)
if err != nil {
return nil, err
}
defer os.Remove(clearf.Name())
defer clearf.Close()
defer func() {
_ = os.Remove(clearf.Name())
_ = clearf.Close()
}()
_, err = io.Copy(clearf, clearsigned)
if err != nil {
@@ -344,8 +354,10 @@ func (g *GpgVerifier) ExtractClearsigned(clearsigned io.Reader) (text *os.File,
if err != nil {
return
}
defer os.Remove(clearf.Name())
defer clearf.Close()
defer func() {
_ = os.Remove(clearf.Name())
_ = clearf.Close()
}()
_, err = io.Copy(clearf, clearsigned)
if err != nil {
@@ -356,7 +368,9 @@ func (g *GpgVerifier) ExtractClearsigned(clearsigned io.Reader) (text *os.File,
if err != nil {
return
}
defer os.Remove(text.Name())
defer func() {
_ = os.Remove(text.Name())
}()
args := []string{"--no-auto-check-trustdb", "--decrypt", "--batch", "--skip-verify", "--output", "-", clearf.Name()}
@@ -365,7 +379,9 @@ func (g *GpgVerifier) ExtractClearsigned(clearsigned io.Reader) (text *os.File,
if err != nil {
return nil, err
}
defer stdout.Close()
defer func() {
_ = stdout.Close()
}()
err = cmd.Start()
if err != nil {

View File

@@ -23,8 +23,8 @@ func (s *GnupgSuite) SetUpSuite(c *C) {
// If gpg == gpg1 = pick gpg
func (s *GnupgSuite) TestGPG1(c *C) {
origPath := os.Getenv("PATH")
os.Setenv("PATH", filepath.Join(s.bins, "gpg1"))
defer func() { os.Setenv("PATH", origPath) }()
_ = os.Setenv("PATH", filepath.Join(s.bins, "gpg1"))
defer func() { _ = os.Setenv("PATH", origPath) }()
signer := NewGpgSigner(GPG1Finder())
c.Assert(signer.gpg, Equals, "gpg")
@@ -33,8 +33,8 @@ func (s *GnupgSuite) TestGPG1(c *C) {
// gpg(2) + gpg1 installed = pick gpg1
func (s *GnupgSuite) TestGPG1Not2(c *C) {
origPath := os.Getenv("PATH")
os.Setenv("PATH", filepath.Join(s.bins, "gpg2-and-1"))
defer func() { os.Setenv("PATH", origPath) }()
_ = os.Setenv("PATH", filepath.Join(s.bins, "gpg2-and-1"))
defer func() { _ = os.Setenv("PATH", origPath) }()
signer := NewGpgSigner(GPG1Finder())
c.Assert(signer.gpg, Equals, "gpg1")
@@ -43,8 +43,8 @@ func (s *GnupgSuite) TestGPG1Not2(c *C) {
// If gpg == gpg2 and no gpg1 is available = error
func (s *GnupgSuite) TestGPGNothing(c *C) {
origPath := os.Getenv("PATH")
os.Setenv("PATH", filepath.Join(s.bins, "gpg2-only"))
defer func() { os.Setenv("PATH", origPath) }()
_ = os.Setenv("PATH", filepath.Join(s.bins, "gpg2-only"))
defer func() { _ = os.Setenv("PATH", origPath) }()
c.Assert(func() { NewGpgSigner(GPG1Finder()) }, PanicMatches, `Couldn't find a suitable gpg executable.+`)
}
@@ -52,8 +52,8 @@ func (s *GnupgSuite) TestGPGNothing(c *C) {
// If gpgv == gpgv1 = pick gpgv
func (s *GnupgSuite) TestGPGV1(c *C) {
origPath := os.Getenv("PATH")
os.Setenv("PATH", filepath.Join(s.bins, "gpgv1")+":"+filepath.Join(s.bins, "gpg1"))
defer func() { os.Setenv("PATH", origPath) }()
_ = os.Setenv("PATH", filepath.Join(s.bins, "gpgv1")+":"+filepath.Join(s.bins, "gpg1"))
defer func() { _ = os.Setenv("PATH", origPath) }()
verifier := NewGpgVerifier(GPG1Finder())
c.Assert(verifier.gpgv, Equals, "gpgv")
@@ -62,8 +62,8 @@ func (s *GnupgSuite) TestGPGV1(c *C) {
// gpgv(2) + gpgv1 installed = pick gpgv1
func (s *GnupgSuite) TestGPGV1Not2(c *C) {
origPath := os.Getenv("PATH")
os.Setenv("PATH", filepath.Join(s.bins, "gpgv2-and-1")+":"+filepath.Join(s.bins, "gpg1"))
defer func() { os.Setenv("PATH", origPath) }()
_ = os.Setenv("PATH", filepath.Join(s.bins, "gpgv2-and-1")+":"+filepath.Join(s.bins, "gpg1"))
defer func() { _ = os.Setenv("PATH", origPath) }()
verifier := NewGpgVerifier(GPG1Finder())
c.Assert(verifier.gpgv, Equals, "gpgv1")
@@ -72,8 +72,8 @@ func (s *GnupgSuite) TestGPGV1Not2(c *C) {
// If gpgv == gpgv2 and no gpgv1 is available = error
func (s *GnupgSuite) TestGPGVNothing(c *C) {
origPath := os.Getenv("PATH")
os.Setenv("PATH", filepath.Join(s.bins, "gpgv2-only")+":"+filepath.Join(s.bins, "gpg1"))
defer func() { os.Setenv("PATH", origPath) }()
_ = os.Setenv("PATH", filepath.Join(s.bins, "gpgv2-only")+":"+filepath.Join(s.bins, "gpg1"))
defer func() { _ = os.Setenv("PATH", origPath) }()
c.Assert(func() { NewGpgVerifier(GPG1Finder()) }, PanicMatches, `Couldn't find a suitable gpg executable.+`)
}
@@ -220,6 +220,6 @@ func (s *Gnupg2SignerSuite) SetUpTest(c *C) {
func (s *Gnupg2SignerSuite) TearDownTest(c *C) {
s.SignerSuite.TearDownTest(c)
os.Remove("../system/files/aptly2.gpg")
os.Remove("../system/files/aptly2_passphrase.gpg")
_ = os.Remove("../system/files/aptly2.gpg")
_ = os.Remove("../system/files/aptly2_passphrase.gpg")
}

View File

@@ -79,7 +79,9 @@ func (g *GoSigner) Init() error {
if err != nil {
return errors.Wrap(err, "error opening passphrase file")
}
defer passF.Close()
defer func() {
_ = passF.Close()
}()
contents, err := io.ReadAll(passF)
if err != nil {
@@ -224,13 +226,17 @@ func (g *GoSigner) DetachedSign(source string, destination string) error {
if err != nil {
return errors.Wrap(err, "error opening source file")
}
defer message.Close()
defer func() {
_ = message.Close()
}()
signature, err := os.Create(destination)
if err != nil {
return errors.Wrap(err, "error creating signature file")
}
defer signature.Close()
defer func() {
_ = signature.Close()
}()
err = openpgp.ArmoredDetachSign(signature, g.signer, message, g.signerConfig)
if err != nil {
@@ -248,13 +254,17 @@ func (g *GoSigner) ClearSign(source string, destination string) error {
if err != nil {
return errors.Wrap(err, "error opening source file")
}
defer message.Close()
defer func() {
_ = message.Close()
}()
clearsigned, err := os.Create(destination)
if err != nil {
return errors.Wrap(err, "error creating clearsigned file")
}
defer clearsigned.Close()
defer func() {
_ = clearsigned.Close()
}()
stream, err := clearsign.Encode(clearsigned, g.signer.PrivateKey, g.signerConfig)
if err != nil {
@@ -263,7 +273,7 @@ func (g *GoSigner) ClearSign(source string, destination string) error {
_, err = io.Copy(stream, message)
if err != nil {
stream.Close()
_ = stream.Close()
return errors.Wrap(err, "error generating clearsigned signature")
}
@@ -463,7 +473,9 @@ func (g *GoVerifier) ExtractClearsigned(clearsigned io.Reader) (text *os.File, e
if err != nil {
return
}
defer os.Remove(text.Name())
defer func() {
_ = os.Remove(text.Name())
}()
_, err = text.Write(block.Bytes)
if err != nil {
@@ -494,7 +506,9 @@ func loadKeyRing(name string, ignoreMissing bool) (openpgp.EntityList, error) {
return nil, err
}
defer f.Close()
defer func() {
_ = f.Close()
}()
return openpgp.ReadKeyRing(f)
}

View File

@@ -3,7 +3,6 @@ package pgp
import (
"crypto/rand"
"io"
"io/ioutil"
"os"
"path"
@@ -57,14 +56,14 @@ func (s *SignerSuite) SetUpTest(c *C) {
_, err = f.Write([]byte("verysecret"))
c.Assert(err, IsNil)
f.Close()
_ = f.Close()
s.signer.SetBatch(true)
}
func (s *SignerSuite) TearDownTest(c *C) {
s.clearF.Close()
s.signedF.Close()
_ = s.clearF.Close()
_ = s.signedF.Close()
}
func (s *SignerSuite) testSignDetached(c *C) {
@@ -137,9 +136,11 @@ func (s *SignerSuite) testClearSign(c *C, expectedKey Key) {
c.Assert(err, IsNil)
extractedF, err := s.verifier.ExtractClearsigned(s.signedF)
c.Assert(err, IsNil)
defer extractedF.Close()
defer func() {
_ = extractedF.Close()
}()
extracted, err := ioutil.ReadAll(extractedF)
extracted, err := io.ReadAll(extractedF)
c.Assert(err, IsNil)
c.Assert(extracted, DeepEquals, s.cleartext)

View File

@@ -2,7 +2,7 @@ package pgp
import (
"bytes"
"io/ioutil"
"io"
"os"
. "gopkg.in/check.v1"
@@ -31,8 +31,8 @@ func (s *VerifierSuite) TestVerifyDetached(c *C) {
err = s.verifier.VerifyDetachedSignature(signature, cleartext, false)
c.Assert(err, IsNil)
signature.Close()
cleartext.Close()
_ = signature.Close()
_ = cleartext.Close()
}
}
@@ -50,7 +50,7 @@ func (s *VerifierSuite) TestVerifyClearsigned(c *C) {
c.Check(keyInfo.GoodKeys, DeepEquals, []Key{"04EE7237B7D453EC", "648ACFD622F3D138", "DCC9EFBF77E11517"})
c.Check(keyInfo.MissingKeys, DeepEquals, []Key(nil))
clearsigned.Close()
_ = clearsigned.Close()
}
}
@@ -70,15 +70,15 @@ func (s *VerifierSuite) TestExtractClearsigned(c *C) {
c.Assert(err, IsNil)
c.Check(is, Equals, true)
clearsigned.Seek(0, 0)
_, _ = clearsigned.Seek(0, 0)
extractedF, err := s.verifier.ExtractClearsigned(clearsigned)
c.Assert(err, IsNil)
expected, err := ioutil.ReadAll(cleartext)
expected, err := io.ReadAll(cleartext)
c.Assert(err, IsNil)
extracted, err := ioutil.ReadAll(extractedF)
extracted, err := io.ReadAll(extractedF)
c.Assert(err, IsNil)
// normalize newlines
@@ -87,8 +87,8 @@ func (s *VerifierSuite) TestExtractClearsigned(c *C) {
c.Check(extracted, DeepEquals, expected)
extractedF.Close()
clearsigned.Close()
cleartext.Close()
_ = extractedF.Close()
_ = clearsigned.Close()
_ = cleartext.Close()
}
}

Some files were not shown because too many files have changed in this diff Show More