Merge pull request #539 from smira/public-pool-paths

New package pool with configurable hashing base
This commit is contained in:
Andrey Smirnov
2017-04-27 16:24:14 +03:00
committed by GitHub
80 changed files with 3884 additions and 985 deletions

View File

@@ -27,3 +27,4 @@ List of contributors, in chronological order:
* Johannes Layher (https://github.com/jola5)
* Charles Hsu (https://github.com/charz)
* Clemens Rabe (https://github.com/seeraven)
* TJ Merritt (https://github.com/tjmerritt)

8
Gopkg.lock generated
View File

@@ -1,4 +1,4 @@
memo = "4c0fec0262b9b442de718ea62c0caa79c04721ff5434072bf8d9e5e73e660a2c"
memo = "b793b143db9dc49ef630454bb15d4d259e88d411827adac1838ec9fd6727f229"
[[projects]]
name = "github.com/AlekSi/pointer"
@@ -101,6 +101,12 @@ memo = "4c0fec0262b9b442de718ea62c0caa79c04721ff5434072bf8d9e5e73e660a2c"
packages = [".","swifttest"]
revision = "8e9b10220613abdbc2896808ee6b43e411a4fa6c"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
branch = "master"
name = "github.com/smira/commander"

View File

@@ -325,7 +325,7 @@ func apiReposPackageFromDir(c *gin.Context) {
}
processedFiles, failedFiles2, err = deb.ImportPackageFiles(list, packageFiles, forceReplace, verifier, context.PackagePool(),
context.CollectionFactory().PackageCollection(), reporter, nil)
context.CollectionFactory().PackageCollection(), reporter, nil, context.CollectionFactory().ChecksumCollection())
failedFiles = append(failedFiles, failedFiles2...)
if err != nil {

View File

@@ -4,24 +4,57 @@ package aptly
import (
"io"
"os"
"github.com/smira/aptly/utils"
)
// ReadSeekerCloser = ReadSeeker + Closer
type ReadSeekerCloser interface {
io.ReadSeeker
io.Closer
}
// PackagePool is asbtraction of package pool storage.
//
// PackagePool stores all the package files, deduplicating them.
type PackagePool interface {
// Path returns full path to package file in pool given any name and hash of file contents
Path(filename string, checksums utils.ChecksumInfo) (string, error)
// RelativePath returns path relative to pool's root for package files given checksums and original filename
RelativePath(filename string, checksums utils.ChecksumInfo) (string, error)
// Verify checks whether file exists in the pool and fills back checksum info
//
// if poolPath is empty, poolPath is generated automatically based on checksum info (if available)
// in any case, if function returns true, it also fills back checksums with complete information about the file in the pool
Verify(poolPath, basename string, checksums *utils.ChecksumInfo, checksumStorage ChecksumStorage) (bool, error)
// Import copies file into package pool
//
// - srcPath is full path to source file as it is now
// - basename is desired human-readable name (canonical filename)
// - checksums are used to calculate file placement
// - move indicates whether srcPath can be removed
Import(srcPath, basename string, checksums *utils.ChecksumInfo, move bool, storage ChecksumStorage) (path string, err error)
// LegacyPath returns legacy (pre 1.1) path to package file (relative to root)
LegacyPath(filename string, checksums *utils.ChecksumInfo) (string, error)
// Stat returns Unix stat(2) info
Stat(path string) (os.FileInfo, error)
// Open returns ReadSeekerCloser to access the file
Open(path string) (ReadSeekerCloser, error)
// FilepathList returns file paths of all the files in the pool
FilepathList(progress Progress) ([]string, error)
// Remove deletes file in package pool returns its size
Remove(path string) (size int64, err error)
// Import copies file into package pool
Import(path string, checksums utils.ChecksumInfo) error
}
// LocalPackagePool is implemented by PackagePools residing on the same filesystem
type LocalPackagePool interface {
// GenerateTempPath generates temporary path for download (which is fast to import into package pool later on)
GenerateTempPath(filename string) (string, error)
// Link generates hardlink to destination path
Link(path, dstPath string) error
// Symlink generates symlink to destination path
Symlink(path, dstPath string) error
// FullPath generates full path to the file in pool
//
// Please use with care: it's not supposed to be used to access files
FullPath(path string) string
}
// PublishedStorage is abstraction of filesystem storing all published repositories
@@ -35,7 +68,7 @@ type PublishedStorage interface {
// Remove removes single file under public path
Remove(path string) error
// LinkFromPool links package file from pool to dist's pool location
LinkFromPool(publishedDirectory string, sourcePool PackagePool, sourcePath string, sourceChecksums utils.ChecksumInfo, force bool) error
LinkFromPool(publishedDirectory, baseName string, sourcePool PackagePool, sourcePath string, sourceChecksums utils.ChecksumInfo, force bool) error
// Filelist returns list of files under prefix
Filelist(prefix string) ([]string, error)
// RenameFile renames (moves) file
@@ -81,18 +114,17 @@ type Progress interface {
// Downloader is parallel HTTP fetcher
type Downloader interface {
// Download starts new download task
Download(url string, destination string, result chan<- error)
Download(url string, destination string) error
// DownloadWithChecksum starts new download task with checksum verification
DownloadWithChecksum(url string, destination string, result chan<- error, expected utils.ChecksumInfo, ignoreMismatch bool, maxTries int)
// Pause pauses task processing
Pause()
// Resume resumes task processing
Resume()
// Shutdown stops downloader after current tasks are finished,
// but doesn't process rest of queue
Shutdown()
// Abort stops downloader without waiting for shutdown
Abort()
DownloadWithChecksum(url string, destination string, expected *utils.ChecksumInfo, ignoreMismatch bool, maxTries int) error
// GetProgress returns Progress object
GetProgress() Progress
}
// ChecksumStorage is stores checksums in some (persistent) storage
type ChecksumStorage interface {
// Get finds checksums in DB by path
Get(path string) (*utils.ChecksumInfo, error)
// Update adds or updates information about checksum in DB
Update(path string, c *utils.ChecksumInfo) error
}

View File

@@ -5,7 +5,9 @@ import (
"os"
"os/signal"
"strings"
"sync"
"github.com/smira/aptly/aptly"
"github.com/smira/aptly/deb"
"github.com/smira/aptly/query"
"github.com/smira/aptly/utils"
@@ -84,7 +86,9 @@ func aptlyMirrorUpdate(cmd *commander.Command, args []string) error {
skipExistingPackages := context.Flags().Lookup("skip-existing-packages").Value.Get().(bool)
context.Progress().Printf("Building download queue...\n")
queue, downloadSize, err = repo.BuildDownloadQueue(context.PackagePool(), skipExistingPackages)
queue, downloadSize, err = repo.BuildDownloadQueue(context.PackagePool(), context.CollectionFactory().PackageCollection(),
context.CollectionFactory().ChecksumCollection(), skipExistingPackages)
if err != nil {
return fmt.Errorf("unable to update: %s", err)
}
@@ -112,6 +116,14 @@ func aptlyMirrorUpdate(cmd *commander.Command, args []string) error {
// Catch ^C
sigch := make(chan os.Signal)
signal.Notify(sigch, os.Interrupt)
defer signal.Stop(sigch)
abort := make(chan struct{})
go func() {
<-sigch
signal.Stop(sigch)
close(abort)
}()
count := len(queue)
context.Progress().Printf("Download queue: %d items (%s)\n", count, utils.HumanBytes(downloadSize))
@@ -119,37 +131,82 @@ func aptlyMirrorUpdate(cmd *commander.Command, args []string) error {
// Download from the queue
context.Progress().InitBar(downloadSize, true)
// Download all package files
ch := make(chan error, count)
downloadQueue := make(chan int)
var (
errors []string
errLock sync.Mutex
)
pushError := func(err error) {
errLock.Lock()
errors = append(errors, err.Error())
errLock.Unlock()
}
// In separate goroutine (to avoid blocking main), push queue to downloader
go func() {
for _, task := range queue {
context.Downloader().DownloadWithChecksum(repo.PackageURL(task.RepoURI).String(), task.DestinationPath, ch, task.Checksums, ignoreMismatch, maxTries)
for idx := range queue {
select {
case downloadQueue <- idx:
case <-abort:
return
}
}
// We don't need queue after this point
queue = nil
close(downloadQueue)
}()
// Wait for all downloads to finish
var errors []string
var wg sync.WaitGroup
for count > 0 {
select {
case <-sigch:
signal.Stop(sigch)
return fmt.Errorf("unable to update: interrupted")
case err = <-ch:
if err != nil {
errors = append(errors, err.Error())
for i := 0; i < context.Config().DownloadConcurrency; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case idx, ok := <-downloadQueue:
if !ok {
return
}
task := &queue[idx]
var e error
// provision download location
task.TempDownPath, e = context.PackagePool().(aptly.LocalPackagePool).GenerateTempPath(task.File.Filename)
if e != nil {
pushError(e)
continue
}
// download file...
e = context.Downloader().DownloadWithChecksum(
repo.PackageURL(task.File.DownloadURL()).String(),
task.TempDownPath,
&task.File.Checksums,
ignoreMismatch,
maxTries)
if e != nil {
pushError(e)
continue
}
case <-abort:
return
}
}
count--
}
}()
}
// Wait for all downloads to finish
wg.Wait()
select {
case <-abort:
return fmt.Errorf("unable to update: interrupted")
default:
}
context.Progress().ShutdownBar()
signal.Stop(sigch)
if len(errors) > 0 {
return fmt.Errorf("unable to update: download errors:\n %s", strings.Join(errors, "\n "))
@@ -160,7 +217,37 @@ func aptlyMirrorUpdate(cmd *commander.Command, args []string) error {
return fmt.Errorf("unable to update: %s", err)
}
repo.FinalizeDownload()
// Import downloaded files
context.Progress().InitBar(int64(len(queue)), false)
for idx := range queue {
context.Progress().AddBar(1)
task := &queue[idx]
// and import it back to the pool
task.File.PoolPath, err = context.PackagePool().Import(task.TempDownPath, task.File.Filename, &task.File.Checksums, true, context.CollectionFactory().ChecksumCollection())
if err != nil {
return fmt.Errorf("unable to import file: %s", err)
}
// update "attached" files if any
for _, additionalTask := range task.Additional {
additionalTask.File.PoolPath = task.File.PoolPath
additionalTask.File.Checksums = task.File.Checksums
}
select {
case <-abort:
return fmt.Errorf("unable to update: interrupted")
default:
}
}
context.Progress().ShutdownBar()
repo.FinalizeDownload(context.CollectionFactory(), context.Progress())
err = context.CollectionFactory().RemoteRepoCollection().Update(repo)
if err != nil {
return fmt.Errorf("unable to update: %s", err)

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"os"
"github.com/smira/aptly/aptly"
"github.com/smira/aptly/deb"
"github.com/smira/aptly/query"
"github.com/smira/commander"
@@ -87,11 +88,17 @@ func aptlyPackageShow(cmd *commander.Command, args []string) error {
if withFiles {
fmt.Printf("Files in the pool:\n")
packagePool := context.PackagePool()
for _, f := range p.Files() {
path, err := context.PackagePool().Path(f.Filename, f.Checksums)
path, err := f.GetPoolPath(packagePool)
if err != nil {
return err
}
if pp, ok := packagePool.(aptly.LocalPackagePool); ok {
path = pp.FullPath(path)
}
fmt.Printf(" %s\n", path)
}
fmt.Printf("\n")

View File

@@ -48,7 +48,8 @@ func aptlyRepoAdd(cmd *commander.Command, args []string) error {
var processedFiles, failedFiles2 []string
processedFiles, failedFiles2, err = deb.ImportPackageFiles(list, packageFiles, forceReplace, verifier, context.PackagePool(),
context.CollectionFactory().PackageCollection(), &aptly.ConsoleResultReporter{Progress: context.Progress()}, nil)
context.CollectionFactory().PackageCollection(), &aptly.ConsoleResultReporter{Progress: context.Progress()}, nil,
context.CollectionFactory().ChecksumCollection())
failedFiles = append(failedFiles, failedFiles2...)
if err != nil {
return fmt.Errorf("unable to import package files: %s", err)

View File

@@ -151,7 +151,7 @@ func aptlyRepoInclude(cmd *commander.Command, args []string) error {
var processedFiles2, failedFiles2 []string
processedFiles2, failedFiles2, err = deb.ImportPackageFiles(list, packageFiles, forceReplace, verifier, context.PackagePool(),
context.CollectionFactory().PackageCollection(), reporter, restriction)
context.CollectionFactory().PackageCollection(), reporter, restriction, context.CollectionFactory().ChecksumCollection())
if err != nil {
return fmt.Errorf("unable to import package files: %s", err)

View File

@@ -101,6 +101,9 @@ func (context *AptlyContext) config() *utils.ConfigStructure {
if err != nil {
fmt.Fprintf(os.Stderr, "Config file not found, creating default config at %s\n\n", configLocations[0])
// as this is fresh aptly installation, we don't need to support legacy pool locations
utils.Config.SkipLegacyPool = true
utils.SaveConfig(configLocations[0], &utils.Config)
}
}
@@ -204,8 +207,7 @@ func (context *AptlyContext) Downloader() aptly.Downloader {
if downloadLimit == 0 {
downloadLimit = context.config().DownloadLimit
}
context.downloader = http.NewDownloader(context.config().DownloadConcurrency,
downloadLimit*1024, context._progress())
context.downloader = http.NewDownloader(downloadLimit*1024, context._progress())
}
return context.downloader
@@ -303,7 +305,7 @@ func (context *AptlyContext) PackagePool() aptly.PackagePool {
defer context.Unlock()
if context.packagePool == nil {
context.packagePool = files.NewPackagePool(context.config().RootDir)
context.packagePool = files.NewPackagePool(context.config().RootDir, !context.config().SkipLegacyPool)
}
return context.packagePool
@@ -416,7 +418,6 @@ func (context *AptlyContext) Shutdown() {
context.database = nil
}
if context.downloader != nil {
context.downloader.Abort()
context.downloader = nil
}
if context.progress != nil {
@@ -431,7 +432,6 @@ func (context *AptlyContext) Cleanup() {
defer context.Unlock()
if context.downloader != nil {
context.downloader.Shutdown()
context.downloader = nil
}
if context.progress != nil {

View File

@@ -0,0 +1,67 @@
package deb
import (
"bytes"
"github.com/smira/aptly/aptly"
"github.com/smira/aptly/database"
"github.com/smira/aptly/utils"
"github.com/ugorji/go/codec"
)
// ChecksumCollection does management of ChecksumInfo in DB
type ChecksumCollection struct {
db database.Storage
codecHandle *codec.MsgpackHandle
}
// NewChecksumCollection creates new ChecksumCollection and binds it to database
func NewChecksumCollection(db database.Storage) *ChecksumCollection {
return &ChecksumCollection{
db: db,
codecHandle: &codec.MsgpackHandle{},
}
}
func (collection *ChecksumCollection) dbKey(path string) []byte {
return []byte("C" + path)
}
// Get finds checksums in DB by path
func (collection *ChecksumCollection) Get(path string) (*utils.ChecksumInfo, error) {
encoded, err := collection.db.Get(collection.dbKey(path))
if err != nil {
if err == database.ErrNotFound {
return nil, nil
}
return nil, err
}
c := &utils.ChecksumInfo{}
decoder := codec.NewDecoderBytes(encoded, collection.codecHandle)
err = decoder.Decode(c)
if err != nil {
return nil, err
}
return c, nil
}
// Update adds or updates information about checksum in DB
func (collection *ChecksumCollection) Update(path string, c *utils.ChecksumInfo) error {
var encodeBuffer bytes.Buffer
encoder := codec.NewEncoder(&encodeBuffer, collection.codecHandle)
err := encoder.Encode(c)
if err != nil {
return err
}
return collection.db.Put(collection.dbKey(path), encodeBuffer.Bytes())
}
// Check interface
var (
_ aptly.ChecksumStorage = &ChecksumCollection{}
)

View File

@@ -0,0 +1,47 @@
package deb
import (
"github.com/smira/aptly/database"
"github.com/smira/aptly/utils"
. "gopkg.in/check.v1"
)
type ChecksumCollectionSuite struct {
collection *ChecksumCollection
c utils.ChecksumInfo
db database.Storage
}
var _ = Suite(&ChecksumCollectionSuite{})
func (s *ChecksumCollectionSuite) SetUpTest(c *C) {
s.c = utils.ChecksumInfo{
Size: 124,
MD5: "da39a3ee5e6b4b0d3255bfef95601890afd80709",
SHA1: "da39a3ee5e6b4b0d3255bfef95601890afd80709",
SHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
}
s.db, _ = database.OpenDB(c.MkDir())
s.collection = NewChecksumCollection(s.db)
}
func (s *ChecksumCollectionSuite) TearDownTest(c *C) {
s.db.Close()
}
func (s *ChecksumCollectionSuite) TestFlow(c *C) {
// checksum not stored
checksum, err := s.collection.Get("some/path")
c.Assert(err, IsNil)
c.Check(checksum, IsNil)
// store checksum
err = s.collection.Update("some/path", &s.c)
c.Assert(err, IsNil)
// load it back
checksum, err = s.collection.Get("some/path")
c.Assert(err, IsNil)
c.Check(*checksum, DeepEquals, s.c)
}

View File

@@ -15,6 +15,7 @@ type CollectionFactory struct {
snapshots *SnapshotCollection
localRepos *LocalRepoCollection
publishedRepos *PublishedRepoCollection
checksums *ChecksumCollection
}
// NewCollectionFactory creates new factory
@@ -89,6 +90,18 @@ func (factory *CollectionFactory) PublishedRepoCollection() *PublishedRepoCollec
return factory.publishedRepos
}
// ChecksumCollection returns (or creates) new ChecksumCollection
func (factory *CollectionFactory) ChecksumCollection() *ChecksumCollection {
factory.Lock()
defer factory.Unlock()
if factory.checksums == nil {
factory.checksums = NewChecksumCollection(factory.db)
}
return factory.checksums
}
// Flush removes all references to collections, so that memory could be reclaimed
func (factory *CollectionFactory) Flush() {
factory.Lock()
@@ -99,4 +112,5 @@ func (factory *CollectionFactory) Flush() {
factory.remoteRepos = nil
factory.publishedRepos = nil
factory.packages = nil
factory.checksums = nil
}

View File

@@ -12,7 +12,9 @@ import (
"github.com/h2non/filetype/matchers"
"github.com/mkrautz/goar"
"github.com/pkg/errors"
"github.com/smira/aptly/aptly"
"github.com/smira/aptly/utils"
"github.com/smira/go-xz"
"github.com/smira/lzma"
@@ -105,13 +107,7 @@ func GetControlFileFromDsc(dscFile string, verifier utils.Verifier) (Stanza, err
}
// GetContentsFromDeb returns list of files installed by .deb package
func GetContentsFromDeb(packageFile string) ([]string, error) {
file, err := os.Open(packageFile)
if err != nil {
return nil, err
}
defer file.Close()
func GetContentsFromDeb(file aptly.ReadSeekerCloser, packageFile string) ([]string, error) {
library := ar.NewReader(file)
for {
header, err := library.Next()
@@ -119,7 +115,7 @@ func GetContentsFromDeb(packageFile string) ([]string, error) {
return nil, fmt.Errorf("unable to find data.tar.* part in %s", packageFile)
}
if err != nil {
return nil, fmt.Errorf("unable to read .deb archive from %s: %s", packageFile, err)
return nil, errors.Wrapf(err, "unable to read .deb archive from %s", packageFile)
}
if strings.HasPrefix(header.Name, "data.tar") {
@@ -142,7 +138,7 @@ func GetContentsFromDeb(packageFile string) ([]string, error) {
} else {
ungzip, err := gzip.NewReader(bufReader)
if err != nil {
return nil, fmt.Errorf("unable to ungzip data.tar.gz from %s: %s", packageFile, err)
return nil, errors.Wrapf(err, "unable to ungzip data.tar.gz from %s", packageFile)
}
defer ungzip.Close()
tarInput = ungzip
@@ -152,7 +148,7 @@ func GetContentsFromDeb(packageFile string) ([]string, error) {
case "data.tar.xz":
unxz, err := xz.NewReader(bufReader)
if err != nil {
return nil, fmt.Errorf("unable to unxz data.tar.xz from %s: %s", packageFile, err)
return nil, errors.Wrapf(err, "unable to unxz data.tar.xz from %s", packageFile)
}
defer unxz.Close()
tarInput = unxz
@@ -172,7 +168,7 @@ func GetContentsFromDeb(packageFile string) ([]string, error) {
return results, nil
}
if err != nil {
return nil, fmt.Errorf("unable to read .tar archive from %s: %s", packageFile, err)
return nil, errors.Wrapf(err, "unable to read .tar archive from %s", packageFile)
}
if tarHeader.Typeflag == tar.TypeDir {

View File

@@ -1,6 +1,7 @@
package deb
import (
"os"
"path/filepath"
"runtime"
@@ -59,13 +60,19 @@ func (s *DebSuite) TestGetControlFileFromDsc(c *C) {
}
func (s *DebSuite) TestGetContentsFromDeb(c *C) {
contents, err := GetContentsFromDeb(s.debFile)
f, err := os.Open(s.debFile)
c.Assert(err, IsNil)
contents, err := GetContentsFromDeb(f, s.debFile)
c.Check(err, IsNil)
c.Check(contents, DeepEquals, []string{"usr/share/doc/libboost-program-options-dev/changelog.gz",
"usr/share/doc/libboost-program-options-dev/copyright"})
c.Assert(f.Close(), IsNil)
contents, err = GetContentsFromDeb(s.debFile2)
f, err = os.Open(s.debFile2)
c.Assert(err, IsNil)
contents, err = GetContentsFromDeb(f, s.debFile2)
c.Check(err, IsNil)
c.Check(contents, DeepEquals, []string{"usr/bin/hardlink", "usr/share/man/man1/hardlink.1.gz",
"usr/share/doc/hardlink/changelog.gz", "usr/share/doc/hardlink/copyright", "usr/share/doc/hardlink/NEWS.Debian.gz"})
c.Assert(f.Close(), IsNil)
}

View File

@@ -60,7 +60,8 @@ func CollectPackageFiles(locations []string, reporter aptly.ResultReporter) (pac
// ImportPackageFiles imports files into local repository
func ImportPackageFiles(list *PackageList, packageFiles []string, forceReplace bool, verifier utils.Verifier,
pool aptly.PackagePool, collection *PackageCollection, reporter aptly.ResultReporter, restriction PackageQuery) (processedFiles []string, failedFiles []string, err error) {
pool aptly.PackagePool, collection *PackageCollection, reporter aptly.ResultReporter, restriction PackageQuery,
checksumStorage aptly.ChecksumStorage) (processedFiles []string, failedFiles []string, err error) {
if forceReplace {
list.PrepareIndex()
}
@@ -116,19 +117,24 @@ func ImportPackageFiles(list *PackageList, packageFiles []string, forceReplace b
continue
}
var files PackageFiles
if isSourcePackage {
files = p.Files()
}
var checksums utils.ChecksumInfo
checksums, err = utils.ChecksumsForFile(file)
if err != nil {
return nil, nil, err
}
if isSourcePackage {
p.UpdateFiles(append(p.Files(), PackageFile{Filename: filepath.Base(file), Checksums: checksums}))
} else {
p.UpdateFiles([]PackageFile{{Filename: filepath.Base(file), Checksums: checksums}})
mainPackageFile := PackageFile{
Filename: filepath.Base(file),
Checksums: checksums,
}
err = pool.Import(file, checksums)
mainPackageFile.PoolPath, err = pool.Import(file, mainPackageFile.Filename, &mainPackageFile.Checksums, false, checksumStorage)
if err != nil {
reporter.Warning("Unable to import file %s into pool: %s", file, err)
failedFiles = append(failedFiles, file)
@@ -137,13 +143,10 @@ func ImportPackageFiles(list *PackageList, packageFiles []string, forceReplace b
candidateProcessedFiles = append(candidateProcessedFiles, file)
// go over all files, except for the last one (.dsc/.deb itself)
for _, f := range p.Files() {
if filepath.Base(f.Filename) == filepath.Base(file) {
continue
}
sourceFile := filepath.Join(filepath.Dir(file), filepath.Base(f.Filename))
err = pool.Import(sourceFile, f.Checksums)
// go over all the other files
for i := range files {
sourceFile := filepath.Join(filepath.Dir(file), filepath.Base(files[i].Filename))
files[i].PoolPath, err = pool.Import(sourceFile, files[i].Filename, &files[i].Checksums, false, checksumStorage)
if err != nil {
reporter.Warning("Unable to import file %s into pool: %s", sourceFile, err)
failedFiles = append(failedFiles, file)
@@ -157,6 +160,8 @@ func ImportPackageFiles(list *PackageList, packageFiles []string, forceReplace b
continue
}
p.UpdateFiles(append(files, mainPackageFile))
if restriction != nil && !restriction.Matches(p) {
reporter.Warning("%s has been ignored as it doesn't match restriction", p)
failedFiles = append(failedFiles, file)

View File

@@ -418,7 +418,7 @@ func (p *Package) CalculateContents(packagePool aptly.PackagePool, progress aptl
}
file := p.Files()[0]
path, err := packagePool.Path(file.Filename, file.Checksums)
poolPath, err := file.GetPoolPath(packagePool)
if err != nil {
if progress != nil {
progress.ColoredPrintf("@y[!]@| @!Failed to build pool path: @| %s", err)
@@ -426,7 +426,16 @@ func (p *Package) CalculateContents(packagePool aptly.PackagePool, progress aptl
return nil, err
}
contents, err := GetContentsFromDeb(path)
reader, err := packagePool.Open(poolPath)
if err != nil {
if progress != nil {
progress.ColoredPrintf("@y[!]@| @!Failed to open package in pool: @| %s", err)
}
return nil, err
}
defer reader.Close()
contents, err := GetContentsFromDeb(reader, file.Filename)
if err != nil {
if progress != nil {
progress.ColoredPrintf("@y[!]@| @!Failed to generate package contents: @| %s", err)
@@ -547,7 +556,7 @@ func (p *Package) LinkFromPool(publishedStorage aptly.PublishedStorage, packageP
}
for i, f := range p.Files() {
sourcePath, err := packagePool.Path(f.Filename, f.Checksums)
sourcePoolPath, err := f.GetPoolPath(packagePool)
if err != nil {
return err
}
@@ -555,7 +564,7 @@ func (p *Package) LinkFromPool(publishedStorage aptly.PublishedStorage, packageP
relPath := filepath.Join("pool", component, poolDir)
publishedDirectory := filepath.Join(prefix, relPath)
err = publishedStorage.LinkFromPool(publishedDirectory, packagePool, sourcePath, f.Checksums, force)
err = publishedStorage.LinkFromPool(publishedDirectory, f.Filename, packagePool, sourcePoolPath, f.Checksums, force)
if err != nil {
return err
}
@@ -596,29 +605,24 @@ func (p *Package) PoolDirectory() (string, error) {
// PackageDownloadTask is a element of download queue for the package
type PackageDownloadTask struct {
RepoURI string
DestinationPath string
Checksums utils.ChecksumInfo
File *PackageFile
Additional []PackageDownloadTask
TempDownPath string
}
// DownloadList returns list of missing package files for download in format
// [[srcpath, dstpath]]
func (p *Package) DownloadList(packagePool aptly.PackagePool) (result []PackageDownloadTask, err error) {
func (p *Package) DownloadList(packagePool aptly.PackagePool, checksumStorage aptly.ChecksumStorage) (result []PackageDownloadTask, err error) {
result = make([]PackageDownloadTask, 0, 1)
for _, f := range p.Files() {
poolPath, err := packagePool.Path(f.Filename, f.Checksums)
if err != nil {
return nil, err
}
verified, err := f.Verify(packagePool)
for idx, f := range p.Files() {
verified, err := f.Verify(packagePool, checksumStorage)
if err != nil {
return nil, err
}
if !verified {
result = append(result, PackageDownloadTask{RepoURI: f.DownloadURL(), DestinationPath: poolPath, Checksums: f.Checksums})
result = append(result, PackageDownloadTask{File: &p.Files()[idx]})
}
}
@@ -626,11 +630,11 @@ func (p *Package) DownloadList(packagePool aptly.PackagePool) (result []PackageD
}
// VerifyFiles verifies that all package files have neen correctly downloaded
func (p *Package) VerifyFiles(packagePool aptly.PackagePool) (result bool, err error) {
func (p *Package) VerifyFiles(packagePool aptly.PackagePool, checksumStorage aptly.ChecksumStorage) (result bool, err error) {
result = true
for _, f := range p.Files() {
result, err = f.Verify(packagePool)
result, err = f.Verify(packagePool, checksumStorage)
if err != nil || !result {
return
}
@@ -645,7 +649,7 @@ func (p *Package) FilepathList(packagePool aptly.PackagePool) ([]string, error)
result := make([]string, len(p.Files()))
for i, f := range p.Files() {
result[i], err = packagePool.RelativePath(f.Filename, f.Checksums)
result[i], err = f.GetPoolPath(packagePool)
if err != nil {
return nil, err
}

View File

@@ -4,7 +4,6 @@ import (
"encoding/binary"
"fmt"
"hash/fnv"
"os"
"path/filepath"
"sort"
"strconv"
@@ -20,25 +19,28 @@ type PackageFile struct {
Filename string
// Hashes for the file
Checksums utils.ChecksumInfo
// PoolPath persists relative path to file in the package pool
PoolPath string
// Temporary field used while downloading, stored relative path on the mirror
downloadPath string
}
// Verify that package file is present and correct
func (f *PackageFile) Verify(packagePool aptly.PackagePool) (bool, error) {
poolPath, err := packagePool.Path(f.Filename, f.Checksums)
if err != nil {
return false, err
func (f *PackageFile) Verify(packagePool aptly.PackagePool, checksumStorage aptly.ChecksumStorage) (bool, error) {
return packagePool.Verify(f.PoolPath, f.Filename, &f.Checksums, checksumStorage)
}
// GetPoolPath returns path to the file in the pool
//
// For legacy packages which do not have PoolPath field set, that calculates LegacyPath via pool
func (f *PackageFile) GetPoolPath(packagePool aptly.PackagePool) (string, error) {
var err error
if f.PoolPath == "" {
f.PoolPath, err = packagePool.LegacyPath(f.Filename, &f.Checksums)
}
st, err := os.Stat(poolPath)
if err != nil {
return false, nil
}
// verify size
// TODO: verify checksum if configured
return st.Size() == f.Checksums.Size, nil
return f.PoolPath, err
}
// DownloadURL return relative URL to package download location

View File

@@ -1,9 +1,10 @@
package deb
import (
"os"
"io/ioutil"
"path/filepath"
"github.com/smira/aptly/aptly"
"github.com/smira/aptly/files"
"github.com/smira/aptly/utils"
@@ -12,11 +13,13 @@ import (
type PackageFilesSuite struct {
files PackageFiles
cs aptly.ChecksumStorage
}
var _ = Suite(&PackageFilesSuite{})
func (s *PackageFilesSuite) SetUpTest(c *C) {
s.cs = files.NewMockChecksumStorage()
s.files = PackageFiles{PackageFile{
Filename: "alien-arena-common_7.40-2_i386.deb",
downloadPath: "pool/contrib/a/alien-arena",
@@ -29,27 +32,24 @@ func (s *PackageFilesSuite) SetUpTest(c *C) {
}
func (s *PackageFilesSuite) TestVerify(c *C) {
packagePool := files.NewPackagePool(c.MkDir())
poolPath, _ := packagePool.Path(s.files[0].Filename, s.files[0].Checksums)
packagePool := files.NewPackagePool(c.MkDir(), false)
result, err := s.files[0].Verify(packagePool)
result, err := s.files[0].Verify(packagePool, s.cs)
c.Check(err, IsNil)
c.Check(result, Equals, false)
err = os.MkdirAll(filepath.Dir(poolPath), 0755)
c.Assert(err, IsNil)
tmpFilepath := filepath.Join(c.MkDir(), "file")
c.Assert(ioutil.WriteFile(tmpFilepath, []byte("abcde"), 0777), IsNil)
file, err := os.Create(poolPath)
c.Assert(err, IsNil)
file.WriteString("abcde")
file.Close()
s.files[0].PoolPath, _ = packagePool.Import(tmpFilepath, s.files[0].Filename, &s.files[0].Checksums, false, s.cs)
result, err = s.files[0].Verify(packagePool)
s.files[0].Checksums.Size = 187518
result, err = s.files[0].Verify(packagePool, s.cs)
c.Check(err, IsNil)
c.Check(result, Equals, false)
s.files[0].Checksums.Size = 5
result, err = s.files[0].Verify(packagePool)
result, err = s.files[0].Verify(packagePool, s.cs)
c.Check(err, IsNil)
c.Check(result, Equals, true)
}

View File

@@ -2,12 +2,11 @@ package deb
import (
"bytes"
"os"
"io/ioutil"
"path/filepath"
"regexp"
"github.com/smira/aptly/files"
"github.com/smira/aptly/utils"
. "gopkg.in/check.v1"
)
@@ -363,19 +362,17 @@ func (s *PackageSuite) TestPoolDirectory(c *C) {
}
func (s *PackageSuite) TestLinkFromPool(c *C) {
packagePool := files.NewPackagePool(c.MkDir())
packagePool := files.NewPackagePool(c.MkDir(), false)
cs := files.NewMockChecksumStorage()
publishedStorage := files.NewPublishedStorage(c.MkDir(), "", "")
p := NewPackageFromControlFile(s.stanza)
poolPath, _ := packagePool.Path(p.Files()[0].Filename, p.Files()[0].Checksums)
err := os.MkdirAll(filepath.Dir(poolPath), 0755)
c.Assert(err, IsNil)
tmpFilepath := filepath.Join(c.MkDir(), "file")
c.Assert(ioutil.WriteFile(tmpFilepath, nil, 0777), IsNil)
file, err := os.Create(poolPath)
c.Assert(err, IsNil)
file.Close()
p.Files()[0].PoolPath, _ = packagePool.Import(tmpFilepath, p.Files()[0].Filename, &p.Files()[0].Checksums, false, cs)
err = p.LinkFromPool(publishedStorage, packagePool, "", "non-free", false)
err := p.LinkFromPool(publishedStorage, packagePool, "", "non-free", false)
c.Check(err, IsNil)
c.Check(p.Files()[0].Filename, Equals, "alien-arena-common_7.40-2_i386.deb")
c.Check(p.Files()[0].downloadPath, Equals, "pool/non-free/a/alien-arena")
@@ -387,7 +384,7 @@ func (s *PackageSuite) TestLinkFromPool(c *C) {
}
func (s *PackageSuite) TestFilepathList(c *C) {
packagePool := files.NewPackagePool(c.MkDir())
packagePool := files.NewPackagePool(c.MkDir(), true)
p := NewPackageFromControlFile(s.stanza)
list, err := p.FilepathList(packagePool)
@@ -396,31 +393,24 @@ func (s *PackageSuite) TestFilepathList(c *C) {
}
func (s *PackageSuite) TestDownloadList(c *C) {
packagePool := files.NewPackagePool(c.MkDir())
packagePool := files.NewPackagePool(c.MkDir(), false)
cs := files.NewMockChecksumStorage()
p := NewPackageFromControlFile(s.stanza)
p.Files()[0].Checksums.Size = 5
poolPath, _ := packagePool.Path(p.Files()[0].Filename, p.Files()[0].Checksums)
list, err := p.DownloadList(packagePool)
list, err := p.DownloadList(packagePool, cs)
c.Check(err, IsNil)
c.Check(list, DeepEquals, []PackageDownloadTask{
{
RepoURI: "pool/contrib/a/alien-arena/alien-arena-common_7.40-2_i386.deb",
DestinationPath: poolPath,
Checksums: utils.ChecksumInfo{Size: 5,
MD5: "1e8cba92c41420aa7baa8a5718d67122",
SHA1: "46955e48cad27410a83740a21d766ce362364024",
SHA256: "eb4afb9885cba6dc70cccd05b910b2dbccc02c5900578be5e99f0d3dbf9d76a5"}}})
File: &p.Files()[0],
},
})
err = os.MkdirAll(filepath.Dir(poolPath), 0755)
c.Assert(err, IsNil)
tmpFilepath := filepath.Join(c.MkDir(), "file")
c.Assert(ioutil.WriteFile(tmpFilepath, []byte("abcde"), 0777), IsNil)
p.Files()[0].PoolPath, _ = packagePool.Import(tmpFilepath, p.Files()[0].Filename, &p.Files()[0].Checksums, false, cs)
file, err := os.Create(poolPath)
c.Assert(err, IsNil)
file.WriteString("abcde")
file.Close()
list, err = p.DownloadList(packagePool)
list, err = p.DownloadList(packagePool, cs)
c.Check(err, IsNil)
c.Check(list, DeepEquals, []PackageDownloadTask{})
}
@@ -428,24 +418,22 @@ func (s *PackageSuite) TestDownloadList(c *C) {
func (s *PackageSuite) TestVerifyFiles(c *C) {
p := NewPackageFromControlFile(s.stanza)
packagePool := files.NewPackagePool(c.MkDir())
poolPath, _ := packagePool.Path(p.Files()[0].Filename, p.Files()[0].Checksums)
packagePool := files.NewPackagePool(c.MkDir(), false)
cs := files.NewMockChecksumStorage()
err := os.MkdirAll(filepath.Dir(poolPath), 0755)
c.Assert(err, IsNil)
tmpFilepath := filepath.Join(c.MkDir(), "file")
c.Assert(ioutil.WriteFile(tmpFilepath, []byte("abcde"), 0777), IsNil)
file, err := os.Create(poolPath)
c.Assert(err, IsNil)
file.WriteString("abcde")
file.Close()
p.Files()[0].PoolPath, _ = packagePool.Import(tmpFilepath, p.Files()[0].Filename, &p.Files()[0].Checksums, false, cs)
result, err := p.VerifyFiles(packagePool)
p.Files()[0].Checksums.Size = 100
result, err := p.VerifyFiles(packagePool, cs)
c.Check(err, IsNil)
c.Check(result, Equals, false)
p.Files()[0].Checksums.Size = 5
result, err = p.VerifyFiles(packagePool)
result, err = p.VerifyFiles(packagePool, cs)
c.Check(err, IsNil)
c.Check(result, Equals, true)
}

View File

@@ -74,6 +74,7 @@ type PublishedRepoSuite struct {
provider *FakeStorageProvider
publishedStorage, publishedStorage2 *files.PublishedStorage
packagePool aptly.PackagePool
cs aptly.ChecksumStorage
localRepo *LocalRepo
snapshot, snapshot2 *Snapshot
db database.Storage
@@ -96,7 +97,21 @@ func (s *PublishedRepoSuite) SetUpTest(c *C) {
s.provider = &FakeStorageProvider{map[string]aptly.PublishedStorage{
"": s.publishedStorage,
"files:other": s.publishedStorage2}}
s.packagePool = files.NewPackagePool(s.root)
s.packagePool = files.NewPackagePool(s.root, false)
s.cs = files.NewMockChecksumStorage()
tmpFilepath := filepath.Join(c.MkDir(), "file")
c.Assert(ioutil.WriteFile(tmpFilepath, nil, 0777), IsNil)
var err error
s.p1.Files()[0].PoolPath, err = s.packagePool.Import(tmpFilepath, s.p1.Files()[0].Filename, &s.p1.Files()[0].Checksums, false, s.cs)
c.Assert(err, IsNil)
s.p1.UpdateFiles(s.p1.Files())
s.p2.UpdateFiles(s.p1.Files())
s.p3.UpdateFiles(s.p1.Files())
s.reflist = NewPackageRefListFromPackageList(s.list)
repo, _ := NewRemoteRepo("yandex", "http://mirror.yandex.ru/debian/", "squeeze", []string{"main"}, []string{}, false, false)
repo.packageRefs = s.reflist
@@ -131,12 +146,6 @@ func (s *PublishedRepoSuite) SetUpTest(c *C) {
s.repo5, _ = NewPublishedRepo("files:other", "ppa", "maverick", []string{"source"}, []string{"main"}, []interface{}{s.localRepo}, s.factory)
s.repo5.SkipContents = true
poolPath, _ := s.packagePool.Path(s.p1.Files()[0].Filename, s.p1.Files()[0].Checksums)
err := os.MkdirAll(filepath.Dir(poolPath), 0755)
f, err := os.Create(poolPath)
c.Assert(err, IsNil)
f.Close()
}
func (s *PublishedRepoSuite) TearDownTest(c *C) {

View File

@@ -477,11 +477,6 @@ func (repo *RemoteRepo) DownloadPackageIndexes(progress aptly.Progress, d aptly.
return err
}
}
err = collectionFactory.PackageCollection().Update(p)
if err != nil {
return err
}
}
progress.ShutdownBar()
@@ -506,50 +501,82 @@ func (repo *RemoteRepo) ApplyFilter(dependencyOptions int, filterQuery PackageQu
}
// BuildDownloadQueue builds queue, discards current PackageList
func (repo *RemoteRepo) BuildDownloadQueue(packagePool aptly.PackagePool, skipExistingPackages bool) (queue []PackageDownloadTask, downloadSize int64, err error) {
func (repo *RemoteRepo) BuildDownloadQueue(packagePool aptly.PackagePool, packageCollection *PackageCollection, checksumStorage aptly.ChecksumStorage, skipExistingPackages bool) (queue []PackageDownloadTask, downloadSize int64, err error) {
queue = make([]PackageDownloadTask, 0, repo.packageList.Len())
seen := make(map[string]struct{}, repo.packageList.Len())
seen := make(map[string]int, repo.packageList.Len())
err = repo.packageList.ForEach(func(p *Package) error {
download := true
if repo.packageRefs != nil && skipExistingPackages {
download = !repo.packageRefs.Has(p)
}
if download {
list, err2 := p.DownloadList(packagePool)
if err2 != nil {
return err2
}
p.files = nil
for _, task := range list {
key := task.RepoURI + "-" + task.DestinationPath
_, found := seen[key]
if !found {
queue = append(queue, task)
downloadSize += task.Checksums.Size
seen[key] = struct{}{}
if repo.packageRefs.Has(p) {
// skip this package, but load checksums/files from package in DB
var prevP *Package
prevP, err = packageCollection.ByKey(p.Key(""))
if err != nil {
return err
}
p.UpdateFiles(prevP.Files())
return nil
}
}
list, err2 := p.DownloadList(packagePool, checksumStorage)
if err2 != nil {
return err2
}
for _, task := range list {
key := task.File.DownloadURL()
idx, found := seen[key]
if !found {
queue = append(queue, task)
downloadSize += task.File.Checksums.Size
seen[key] = len(queue) - 1
} else {
// hook up the task to duplicate entry already on the list
queue[idx].Additional = append(queue[idx].Additional, task)
}
}
return nil
})
if err != nil {
return
}
repo.tempPackageRefs = NewPackageRefListFromPackageList(repo.packageList)
// free up package list, we don't need it after this point
repo.packageList = nil
return
}
// FinalizeDownload swaps for final value of package refs
func (repo *RemoteRepo) FinalizeDownload() {
func (repo *RemoteRepo) FinalizeDownload(collectionFactory *CollectionFactory, progress aptly.Progress) error {
repo.LastDownloadDate = time.Now()
repo.packageRefs = repo.tempPackageRefs
if progress != nil {
progress.InitBar(int64(repo.packageList.Len()), true)
}
var i int
// update all the packages in collection
err := repo.packageList.ForEach(func(p *Package) error {
i++
if progress != nil {
progress.SetBar(i)
}
// download process might have updated checksums
p.UpdateFiles(p.Files())
return collectionFactory.PackageCollection().Update(p)
})
repo.packageRefs = NewPackageRefListFromPackageList(repo.packageList)
if progress != nil {
progress.ShutdownBar()
}
repo.packageList = nil
return err
}
// Encode does msgpack encoding of RemoteRepo

View File

@@ -81,6 +81,7 @@ type RemoteRepoSuite struct {
db database.Storage
collectionFactory *CollectionFactory
packagePool aptly.PackagePool
cs aptly.ChecksumStorage
}
var _ = Suite(&RemoteRepoSuite{})
@@ -92,7 +93,8 @@ func (s *RemoteRepoSuite) SetUpTest(c *C) {
s.progress = console.NewProgress()
s.db, _ = database.OpenDB(c.MkDir())
s.collectionFactory = NewCollectionFactory(s.db)
s.packagePool = files.NewPackagePool(c.MkDir())
s.packagePool = files.NewPackagePool(c.MkDir(), false)
s.cs = files.NewMockChecksumStorage()
s.SetUpPackages()
s.progress.Start()
}
@@ -266,12 +268,13 @@ func (s *RemoteRepoSuite) TestDownload(c *C) {
c.Assert(err, IsNil)
c.Assert(s.downloader.Empty(), Equals, true)
queue, size, err := s.repo.BuildDownloadQueue(s.packagePool, false)
queue, size, err := s.repo.BuildDownloadQueue(s.packagePool, s.collectionFactory.PackageCollection(), s.cs, false)
c.Assert(err, IsNil)
c.Check(size, Equals, int64(3))
c.Check(queue, HasLen, 1)
c.Check(queue[0].RepoURI, Equals, "pool/main/a/amanda/amanda-client_3.3.1-3~bpo60+1_amd64.deb")
c.Check(queue[0].File.DownloadURL(), Equals, "pool/main/a/amanda/amanda-client_3.3.1-3~bpo60+1_amd64.deb")
s.repo.FinalizeDownload()
s.repo.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.repo.packageRefs, NotNil)
pkg, err := s.collectionFactory.PackageCollection().ByKey(s.repo.packageRefs.Refs[0])
@@ -292,11 +295,12 @@ func (s *RemoteRepoSuite) TestDownload(c *C) {
c.Assert(err, IsNil)
c.Assert(s.downloader.Empty(), Equals, true)
queue, size, err = s.repo.BuildDownloadQueue(s.packagePool, true)
queue, size, err = s.repo.BuildDownloadQueue(s.packagePool, s.collectionFactory.PackageCollection(), s.cs, true)
c.Assert(err, IsNil)
c.Check(size, Equals, int64(0))
c.Check(queue, HasLen, 0)
s.repo.FinalizeDownload()
s.repo.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.repo.packageRefs, NotNil)
// Next call must return the download list without option "skip-existing-packages"
@@ -312,12 +316,13 @@ func (s *RemoteRepoSuite) TestDownload(c *C) {
c.Assert(err, IsNil)
c.Assert(s.downloader.Empty(), Equals, true)
queue, size, err = s.repo.BuildDownloadQueue(s.packagePool, false)
queue, size, err = s.repo.BuildDownloadQueue(s.packagePool, s.collectionFactory.PackageCollection(), s.cs, false)
c.Assert(err, IsNil)
c.Check(size, Equals, int64(3))
c.Check(queue, HasLen, 1)
c.Check(queue[0].RepoURI, Equals, "pool/main/a/amanda/amanda-client_3.3.1-3~bpo60+1_amd64.deb")
c.Check(queue[0].File.DownloadURL(), Equals, "pool/main/a/amanda/amanda-client_3.3.1-3~bpo60+1_amd64.deb")
s.repo.FinalizeDownload()
s.repo.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.repo.packageRefs, NotNil)
}
@@ -339,13 +344,14 @@ func (s *RemoteRepoSuite) TestDownloadWithSources(c *C) {
c.Assert(err, IsNil)
c.Assert(s.downloader.Empty(), Equals, true)
queue, size, err := s.repo.BuildDownloadQueue(s.packagePool, false)
queue, size, err := s.repo.BuildDownloadQueue(s.packagePool, s.collectionFactory.PackageCollection(), s.cs, false)
c.Assert(err, IsNil)
c.Check(size, Equals, int64(15))
c.Check(queue, HasLen, 4)
q := make([]string, 4)
for i := range q {
q[i] = queue[i].RepoURI
q[i] = queue[i].File.DownloadURL()
}
sort.Strings(q)
c.Check(q[3], Equals, "pool/main/a/amanda/amanda-client_3.3.1-3~bpo60+1_amd64.deb")
@@ -353,7 +359,7 @@ func (s *RemoteRepoSuite) TestDownloadWithSources(c *C) {
c.Check(q[2], Equals, "pool/main/a/access-modifier-checker/access-modifier-checker_1.0.orig.tar.gz")
c.Check(q[0], Equals, "pool/main/a/access-modifier-checker/access-modifier-checker_1.0-4.debian.tar.gz")
s.repo.FinalizeDownload()
s.repo.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.repo.packageRefs, NotNil)
pkg, err := s.collectionFactory.PackageCollection().ByKey(s.repo.packageRefs.Refs[0])
@@ -382,11 +388,12 @@ func (s *RemoteRepoSuite) TestDownloadWithSources(c *C) {
c.Assert(err, IsNil)
c.Assert(s.downloader.Empty(), Equals, true)
queue, size, err = s.repo.BuildDownloadQueue(s.packagePool, true)
queue, size, err = s.repo.BuildDownloadQueue(s.packagePool, s.collectionFactory.PackageCollection(), s.cs, true)
c.Assert(err, IsNil)
c.Check(size, Equals, int64(0))
c.Check(queue, HasLen, 0)
s.repo.FinalizeDownload()
s.repo.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.repo.packageRefs, NotNil)
// Next call must return the download list without option "skip-existing-packages"
@@ -406,11 +413,12 @@ func (s *RemoteRepoSuite) TestDownloadWithSources(c *C) {
c.Assert(err, IsNil)
c.Assert(s.downloader.Empty(), Equals, true)
queue, size, err = s.repo.BuildDownloadQueue(s.packagePool, false)
queue, size, err = s.repo.BuildDownloadQueue(s.packagePool, s.collectionFactory.PackageCollection(), s.cs, false)
c.Assert(err, IsNil)
c.Check(size, Equals, int64(15))
c.Check(queue, HasLen, 4)
s.repo.FinalizeDownload()
s.repo.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.repo.packageRefs, NotNil)
}
@@ -429,12 +437,13 @@ func (s *RemoteRepoSuite) TestDownloadFlat(c *C) {
c.Assert(err, IsNil)
c.Assert(downloader.Empty(), Equals, true)
queue, size, err := s.flat.BuildDownloadQueue(s.packagePool, false)
queue, size, err := s.flat.BuildDownloadQueue(s.packagePool, s.collectionFactory.PackageCollection(), s.cs, false)
c.Assert(err, IsNil)
c.Check(size, Equals, int64(3))
c.Check(queue, HasLen, 1)
c.Check(queue[0].RepoURI, Equals, "pool/main/a/amanda/amanda-client_3.3.1-3~bpo60+1_amd64.deb")
c.Check(queue[0].File.DownloadURL(), Equals, "pool/main/a/amanda/amanda-client_3.3.1-3~bpo60+1_amd64.deb")
s.flat.FinalizeDownload()
s.flat.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.flat.packageRefs, NotNil)
pkg, err := s.collectionFactory.PackageCollection().ByKey(s.flat.packageRefs.Refs[0])
@@ -456,11 +465,12 @@ func (s *RemoteRepoSuite) TestDownloadFlat(c *C) {
c.Assert(err, IsNil)
c.Assert(downloader.Empty(), Equals, true)
queue, size, err = s.flat.BuildDownloadQueue(s.packagePool, true)
queue, size, err = s.flat.BuildDownloadQueue(s.packagePool, s.collectionFactory.PackageCollection(), s.cs, true)
c.Assert(err, IsNil)
c.Check(size, Equals, int64(0))
c.Check(queue, HasLen, 0)
s.flat.FinalizeDownload()
s.flat.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.flat.packageRefs, NotNil)
// Next call must return the download list without option "skip-existing-packages"
@@ -477,12 +487,13 @@ func (s *RemoteRepoSuite) TestDownloadFlat(c *C) {
c.Assert(err, IsNil)
c.Assert(downloader.Empty(), Equals, true)
queue, size, err = s.flat.BuildDownloadQueue(s.packagePool, false)
queue, size, err = s.flat.BuildDownloadQueue(s.packagePool, s.collectionFactory.PackageCollection(), s.cs, false)
c.Assert(err, IsNil)
c.Check(size, Equals, int64(3))
c.Check(queue, HasLen, 1)
c.Check(queue[0].RepoURI, Equals, "pool/main/a/amanda/amanda-client_3.3.1-3~bpo60+1_amd64.deb")
c.Check(queue[0].File.DownloadURL(), Equals, "pool/main/a/amanda/amanda-client_3.3.1-3~bpo60+1_amd64.deb")
s.flat.FinalizeDownload()
s.flat.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.flat.packageRefs, NotNil)
}
@@ -507,13 +518,14 @@ func (s *RemoteRepoSuite) TestDownloadWithSourcesFlat(c *C) {
c.Assert(err, IsNil)
c.Assert(downloader.Empty(), Equals, true)
queue, size, err := s.flat.BuildDownloadQueue(s.packagePool, false)
queue, size, err := s.flat.BuildDownloadQueue(s.packagePool, s.collectionFactory.PackageCollection(), s.cs, false)
c.Assert(err, IsNil)
c.Check(size, Equals, int64(15))
c.Check(queue, HasLen, 4)
q := make([]string, 4)
for i := range q {
q[i] = queue[i].RepoURI
q[i] = queue[i].File.DownloadURL()
}
sort.Strings(q)
c.Check(q[3], Equals, "pool/main/a/amanda/amanda-client_3.3.1-3~bpo60+1_amd64.deb")
@@ -521,7 +533,7 @@ func (s *RemoteRepoSuite) TestDownloadWithSourcesFlat(c *C) {
c.Check(q[2], Equals, "pool/main/a/access-modifier-checker/access-modifier-checker_1.0.orig.tar.gz")
c.Check(q[0], Equals, "pool/main/a/access-modifier-checker/access-modifier-checker_1.0-4.debian.tar.gz")
s.flat.FinalizeDownload()
s.flat.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.flat.packageRefs, NotNil)
pkg, err := s.collectionFactory.PackageCollection().ByKey(s.flat.packageRefs.Refs[0])
@@ -552,11 +564,12 @@ func (s *RemoteRepoSuite) TestDownloadWithSourcesFlat(c *C) {
c.Assert(err, IsNil)
c.Assert(downloader.Empty(), Equals, true)
queue, size, err = s.flat.BuildDownloadQueue(s.packagePool, true)
queue, size, err = s.flat.BuildDownloadQueue(s.packagePool, s.collectionFactory.PackageCollection(), s.cs, true)
c.Assert(err, IsNil)
c.Check(size, Equals, int64(0))
c.Check(queue, HasLen, 0)
s.flat.FinalizeDownload()
s.flat.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.flat.packageRefs, NotNil)
// Next call must return the download list without option "skip-existing-packages"
@@ -577,11 +590,12 @@ func (s *RemoteRepoSuite) TestDownloadWithSourcesFlat(c *C) {
c.Assert(err, IsNil)
c.Assert(downloader.Empty(), Equals, true)
queue, size, err = s.flat.BuildDownloadQueue(s.packagePool, false)
queue, size, err = s.flat.BuildDownloadQueue(s.packagePool, s.collectionFactory.PackageCollection(), s.cs, false)
c.Assert(err, IsNil)
c.Check(size, Equals, int64(15))
c.Check(queue, HasLen, 4)
s.flat.FinalizeDownload()
s.flat.FinalizeDownload(s.collectionFactory, nil)
c.Assert(s.flat.packageRefs, NotNil)
}

36
files/mocks.go Normal file
View File

@@ -0,0 +1,36 @@
package files
import (
"github.com/smira/aptly/aptly"
"github.com/smira/aptly/utils"
)
type mockChecksumStorage struct {
store map[string]utils.ChecksumInfo
}
// NewMockChecksumStorage creates aptly.ChecksumStorage for tests
func NewMockChecksumStorage() aptly.ChecksumStorage {
return &mockChecksumStorage{
store: make(map[string]utils.ChecksumInfo),
}
}
func (st *mockChecksumStorage) Get(path string) (*utils.ChecksumInfo, error) {
c, ok := st.store[path]
if !ok {
return nil, nil
}
return &c, nil
}
func (st *mockChecksumStorage) Update(path string, c *utils.ChecksumInfo) error {
st.store[path] = *c
return nil
}
// Check interface
var (
_ aptly.ChecksumStorage = &mockChecksumStorage{}
)

View File

@@ -7,6 +7,9 @@ import (
"os"
"path/filepath"
"sync"
"syscall"
"github.com/smira/go-uuid/uuid"
"github.com/smira/aptly/aptly"
"github.com/smira/aptly/utils"
@@ -15,21 +18,33 @@ import (
// PackagePool is deduplicated storage of package files on filesystem
type PackagePool struct {
sync.Mutex
rootPath string
rootPath string
supportLegacyPaths bool
}
// Check interface
var (
_ aptly.PackagePool = (*PackagePool)(nil)
_ aptly.PackagePool = (*PackagePool)(nil)
_ aptly.LocalPackagePool = (*PackagePool)(nil)
)
// NewPackagePool creates new instance of PackagePool which specified root
func NewPackagePool(root string) *PackagePool {
return &PackagePool{rootPath: filepath.Join(root, "pool")}
func NewPackagePool(root string, supportLegacyPaths bool) *PackagePool {
rootPath := filepath.Join(root, "pool")
rootPath, err := filepath.Abs(rootPath)
if err != nil {
panic(err)
}
return &PackagePool{
rootPath: rootPath,
supportLegacyPaths: supportLegacyPaths,
}
}
// RelativePath returns path relative to pool's root for package files given checksum info and original filename
func (pool *PackagePool) RelativePath(filename string, checksums utils.ChecksumInfo) (string, error) {
// LegacyPath returns path relative to pool's root for pre-1.1 aptly (based on MD5)
func (pool *PackagePool) LegacyPath(filename string, checksums *utils.ChecksumInfo) (string, error) {
filename = filepath.Base(filename)
if filename == "." || filename == "/" {
return "", fmt.Errorf("filename %s is invalid", filename)
@@ -44,14 +59,21 @@ func (pool *PackagePool) RelativePath(filename string, checksums utils.ChecksumI
return filepath.Join(hashMD5[0:2], hashMD5[2:4], filename), nil
}
// Path returns full path to package file in pool given filename and hash of file contents
func (pool *PackagePool) Path(filename string, checksums utils.ChecksumInfo) (string, error) {
relative, err := pool.RelativePath(filename, checksums)
if err != nil {
return "", err
// buildPoolPath generates pool path based on file checksum
func (pool *PackagePool) buildPoolPath(filename string, checksums *utils.ChecksumInfo) (string, error) {
filename = filepath.Base(filename)
if filename == "." || filename == "/" {
return "", fmt.Errorf("filename %s is invalid", filename)
}
return filepath.Join(pool.rootPath, relative), nil
hash := checksums.SHA256
if len(hash) < 4 {
// this should never happen in real life
return "", fmt.Errorf("unable to compute pool location for filename %v, SHA256 is missing", filename)
}
return filepath.Join(hash[0:2], hash[2:4], hash[4:32]+"_"+filename), nil
}
// FilepathList returns file paths of all the files in the pool
@@ -116,57 +138,276 @@ func (pool *PackagePool) Remove(path string) (size int64, err error) {
return info.Size(), err
}
func (pool *PackagePool) ensureChecksums(poolPath, fullPoolPath string, checksumStorage aptly.ChecksumStorage) (targetChecksums *utils.ChecksumInfo, err error) {
targetChecksums, err = checksumStorage.Get(poolPath)
if err != nil {
return
}
if targetChecksums == nil {
// we don't have checksums stored yet for this file
targetChecksums = &utils.ChecksumInfo{}
*targetChecksums, err = utils.ChecksumsForFile(fullPoolPath)
if err != nil {
return
}
err = checksumStorage.Update(poolPath, targetChecksums)
}
return
}
// Verify checks whether file exists in the pool and fills back checksum info
//
// if poolPath is empty, poolPath is generated automatically based on checksum info (if available)
// in any case, if function returns true, it also fills back checksums with complete information about the file in the pool
func (pool *PackagePool) Verify(poolPath, basename string, checksums *utils.ChecksumInfo, checksumStorage aptly.ChecksumStorage) (bool, error) {
possiblePoolPaths := []string{}
if poolPath != "" {
possiblePoolPaths = append(possiblePoolPaths, poolPath)
} else {
// try to guess
if checksums.SHA256 != "" {
modernPath, err := pool.buildPoolPath(basename, checksums)
if err != nil {
return false, err
}
possiblePoolPaths = append(possiblePoolPaths, modernPath)
}
if pool.supportLegacyPaths && checksums.MD5 != "" {
legacyPath, err := pool.LegacyPath(basename, checksums)
if err != nil {
return false, err
}
possiblePoolPaths = append(possiblePoolPaths, legacyPath)
}
}
for _, path := range possiblePoolPaths {
fullPoolPath := filepath.Join(pool.rootPath, path)
targetInfo, err := os.Stat(fullPoolPath)
if err != nil {
if !os.IsNotExist(err) {
// unable to stat target location?
return false, err
}
// doesn't exist, skip it
continue
}
if targetInfo.Size() != checksums.Size {
// oops, wrong file?
continue
}
var targetChecksums *utils.ChecksumInfo
targetChecksums, err = pool.ensureChecksums(path, fullPoolPath, checksumStorage)
if err != nil {
return false, err
}
if checksums.MD5 != "" && targetChecksums.MD5 != checksums.MD5 ||
checksums.SHA256 != "" && targetChecksums.SHA256 != checksums.SHA256 {
// wrong file?
return false, nil
}
// fill back checksums
*checksums = *targetChecksums
return true, nil
}
return false, nil
}
// Import copies file into package pool
func (pool *PackagePool) Import(path string, checksums utils.ChecksumInfo) error {
//
// - srcPath is full path to source file as it is now
// - basename is desired human-readable name (canonical filename)
// - checksums are used to calculate file placement
// - move indicates whether srcPath can be removed
func (pool *PackagePool) Import(srcPath, basename string, checksums *utils.ChecksumInfo, move bool, checksumStorage aptly.ChecksumStorage) (string, error) {
pool.Lock()
defer pool.Unlock()
source, err := os.Open(path)
source, err := os.Open(srcPath)
if err != nil {
return err
return "", err
}
defer source.Close()
sourceInfo, err := source.Stat()
if err != nil {
return err
return "", err
}
poolPath, err := pool.Path(path, checksums)
if checksums.MD5 == "" || checksums.SHA256 == "" || checksums.Size != sourceInfo.Size() {
// need to update checksums, MD5 and SHA256 should be always defined
*checksums, err = utils.ChecksumsForFile(srcPath)
if err != nil {
return "", err
}
}
// build target path
poolPath, err := pool.buildPoolPath(basename, checksums)
if err != nil {
return err
return "", err
}
targetInfo, err := os.Stat(poolPath)
fullPoolPath := filepath.Join(pool.rootPath, poolPath)
targetInfo, err := os.Stat(fullPoolPath)
if err != nil {
if !os.IsNotExist(err) {
// unable to stat target location?
return err
return "", err
}
} else {
// target already exists
if targetInfo.Size() != sourceInfo.Size() {
// trying to overwrite file?
return fmt.Errorf("unable to import into pool: file %s already exists", poolPath)
// target already exists and same size
if targetInfo.Size() == sourceInfo.Size() {
var targetChecksums *utils.ChecksumInfo
targetChecksums, err = pool.ensureChecksums(poolPath, fullPoolPath, checksumStorage)
if err != nil {
return "", err
}
*checksums = *targetChecksums
return poolPath, nil
}
// assume that target is already there
return nil
// trying to overwrite file?
return "", fmt.Errorf("unable to import into pool: file %s already exists", fullPoolPath)
}
if pool.supportLegacyPaths {
// file doesn't exist at new location, check legacy location
var (
legacyTargetInfo os.FileInfo
legacyPath, legacyFullPath string
)
legacyPath, err = pool.LegacyPath(basename, checksums)
if err != nil {
return "", err
}
legacyFullPath = filepath.Join(pool.rootPath, legacyPath)
legacyTargetInfo, err = os.Stat(legacyFullPath)
if err != nil {
if !os.IsNotExist(err) {
return "", err
}
} else {
// legacy file exists
if legacyTargetInfo.Size() == sourceInfo.Size() {
// file exists at legacy path and it's same size, consider it's already in the pool
var targetChecksums *utils.ChecksumInfo
targetChecksums, err = pool.ensureChecksums(legacyPath, legacyFullPath, checksumStorage)
if err != nil {
return "", err
}
*checksums = *targetChecksums
return legacyPath, nil
}
// size is different, import at new path
}
}
// create subdirs as necessary
err = os.MkdirAll(filepath.Dir(poolPath), 0777)
poolDir := filepath.Dir(fullPoolPath)
err = os.MkdirAll(poolDir, 0777)
if err != nil {
return err
return "", err
}
target, err := os.Create(poolPath)
// check if we can use hardlinks instead of copying/moving
poolDirInfo, err := os.Stat(poolDir)
if err != nil {
return err
return "", err
}
defer target.Close()
_, err = io.Copy(target, source)
if poolDirInfo.Sys().(*syscall.Stat_t).Dev == sourceInfo.Sys().(*syscall.Stat_t).Dev {
// same filesystem, try to use hardlink
err = os.Link(srcPath, fullPoolPath)
} else {
err = os.ErrInvalid
}
return err
if err != nil {
// different filesystems or failed hardlink, fallback to copy
var target *os.File
target, err = os.Create(fullPoolPath)
if err != nil {
return "", err
}
defer target.Close()
_, err = io.Copy(target, source)
if err == nil {
err = target.Close()
}
}
if err == nil {
if !checksums.Complete() {
// need full checksums here
*checksums, err = utils.ChecksumsForFile(srcPath)
if err != nil {
return "", err
}
}
err = checksumStorage.Update(poolPath, checksums)
}
if err == nil && move {
err = os.Remove(srcPath)
}
return poolPath, err
}
// Open returns io.ReadCloser to access the file
func (pool *PackagePool) Open(path string) (aptly.ReadSeekerCloser, error) {
return os.Open(filepath.Join(pool.rootPath, path))
}
// Stat returns Unix stat(2) info
func (pool *PackagePool) Stat(path string) (os.FileInfo, error) {
return os.Stat(filepath.Join(pool.rootPath, path))
}
// Link generates hardlink to destination path
func (pool *PackagePool) Link(path, dstPath string) error {
return os.Link(filepath.Join(pool.rootPath, path), dstPath)
}
// Symlink generates symlink to destination path
func (pool *PackagePool) Symlink(path, dstPath string) error {
return os.Symlink(filepath.Join(pool.rootPath, path), dstPath)
}
// FullPath generates full path to the file in pool
//
// Please use with care: it's not supposed to be used to access files
func (pool *PackagePool) FullPath(path string) string {
return filepath.Join(pool.rootPath, path)
}
// GenerateTempPath generates temporary path for download (which is fast to import into package pool later on)
func (pool *PackagePool) GenerateTempPath(filename string) (string, error) {
random := uuid.NewRandom().String()
return filepath.Join(pool.rootPath, random[0:2], random[2:4], random[4:]+filename), nil
}

View File

@@ -1,11 +1,14 @@
package files
import (
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"syscall"
"github.com/smira/aptly/aptly"
"github.com/smira/aptly/utils"
. "gopkg.in/check.v1"
@@ -14,39 +17,35 @@ import (
type PackagePoolSuite struct {
pool *PackagePool
checksum utils.ChecksumInfo
debFile string
cs aptly.ChecksumStorage
}
var _ = Suite(&PackagePoolSuite{})
func (s *PackagePoolSuite) SetUpTest(c *C) {
s.pool = NewPackagePool(c.MkDir())
s.pool = NewPackagePool(c.MkDir(), true)
s.checksum = utils.ChecksumInfo{
MD5: "91b1a1480b90b9e269ca44d897b12575",
MD5: "0035d7822b2f8f0ec4013f270fd650c2",
}
_, _File, _, _ := runtime.Caller(0)
s.debFile = filepath.Join(filepath.Dir(_File), "../system/files/libboost-program-options-dev_1.49.0.1_i386.deb")
s.cs = NewMockChecksumStorage()
}
func (s *PackagePoolSuite) TestRelativePath(c *C) {
path, err := s.pool.RelativePath("a/b/package.deb", s.checksum)
func (s *PackagePoolSuite) TestLegacyPath(c *C) {
path, err := s.pool.LegacyPath("a/b/package.deb", &s.checksum)
c.Assert(err, IsNil)
c.Assert(path, Equals, "91/b1/package.deb")
c.Assert(path, Equals, "00/35/package.deb")
_, err = s.pool.RelativePath("/", s.checksum)
_, err = s.pool.LegacyPath("/", &s.checksum)
c.Assert(err, ErrorMatches, ".*is invalid")
_, err = s.pool.RelativePath("", s.checksum)
_, err = s.pool.LegacyPath("", &s.checksum)
c.Assert(err, ErrorMatches, ".*is invalid")
_, err = s.pool.RelativePath("a/b/package.deb", utils.ChecksumInfo{MD5: "9"})
_, err = s.pool.LegacyPath("a/b/package.deb", &utils.ChecksumInfo{MD5: "9"})
c.Assert(err, ErrorMatches, ".*MD5 is missing")
}
func (s *PackagePoolSuite) TestPath(c *C) {
path, err := s.pool.Path("a/b/package.deb", s.checksum)
c.Assert(err, IsNil)
c.Assert(path, Equals, filepath.Join(s.pool.rootPath, "91/b1/package.deb"))
_, err = s.pool.Path("/", s.checksum)
c.Assert(err, ErrorMatches, ".*is invalid")
}
func (s *PackagePoolSuite) TestFilepathList(c *C) {
list, err := s.pool.FilepathList(nil)
c.Check(err, IsNil)
@@ -93,33 +92,253 @@ func (s *PackagePoolSuite) TestRemove(c *C) {
}
func (s *PackagePoolSuite) TestImportOk(c *C) {
_, _File, _, _ := runtime.Caller(0)
debFile := filepath.Join(filepath.Dir(_File), "../system/files/libboost-program-options-dev_1.49.0.1_i386.deb")
err := s.pool.Import(debFile, s.checksum)
path, err := s.pool.Import(s.debFile, filepath.Base(s.debFile), &s.checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb")
// SHA256 should be automatically calculated
c.Check(s.checksum.SHA256, Equals, "c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12")
// checksum storage is filled with new checksum
c.Check(s.cs.(*mockChecksumStorage).store[path].SHA256, Equals, "c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12")
info, err := os.Stat(filepath.Join(s.pool.rootPath, "91", "b1", "libboost-program-options-dev_1.49.0.1_i386.deb"))
c.Check(err, IsNil)
info, err := s.pool.Stat(path)
c.Assert(err, IsNil)
c.Check(info.Size(), Equals, int64(2738))
c.Check(info.Sys().(*syscall.Stat_t).Nlink > 1, Equals, true)
// import as different name
path, err = s.pool.Import(s.debFile, "some.deb", &s.checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_some.deb")
// checksum storage is filled with new checksum
c.Check(s.cs.(*mockChecksumStorage).store[path].SHA256, Equals, "c76b4bd12fd92e4dfe1b55b18a67a669d92f62985d6a96c8a21d96120982cf12")
// double import, should be ok
err = s.pool.Import(debFile, s.checksum)
s.checksum.SHA512 = "" // clear checksum
path, err = s.pool.Import(s.debFile, filepath.Base(s.debFile), &s.checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb")
// checksum is filled back based on checksum storage
c.Check(s.checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// clear checksum storage, and do double-import
delete(s.cs.(*mockChecksumStorage).store, path)
s.checksum.SHA512 = "" // clear checksum
path, err = s.pool.Import(s.debFile, filepath.Base(s.debFile), &s.checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb")
// checksum is filled back based on re-calculation of file in the pool
c.Check(s.checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// import under new name, but with checksums already filled in
s.checksum.SHA512 = "" // clear checksum
path, err = s.pool.Import(s.debFile, "other.deb", &s.checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_other.deb")
// checksum is filled back based on re-calculation of source file
c.Check(s.checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
}
func (s *PackagePoolSuite) TestImportLegacy(c *C) {
os.MkdirAll(filepath.Join(s.pool.rootPath, "00", "35"), 0755)
err := utils.CopyFile(s.debFile, filepath.Join(s.pool.rootPath, "00", "35", "libboost-program-options-dev_1.49.0.1_i386.deb"))
c.Assert(err, IsNil)
s.checksum.Size = 2738
var path string
path, err = s.pool.Import(s.debFile, filepath.Base(s.debFile), &s.checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "00/35/libboost-program-options-dev_1.49.0.1_i386.deb")
// checksum is filled back based on checksum storage
c.Check(s.checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
}
func (s *PackagePoolSuite) TestVerifyLegacy(c *C) {
s.checksum.Size = 2738
// file doesn't exist yet
exists, err := s.pool.Verify("", filepath.Base(s.debFile), &s.checksum, s.cs)
c.Check(err, IsNil)
c.Check(exists, Equals, false)
os.MkdirAll(filepath.Join(s.pool.rootPath, "00", "35"), 0755)
err = utils.CopyFile(s.debFile, filepath.Join(s.pool.rootPath, "00", "35", "libboost-program-options-dev_1.49.0.1_i386.deb"))
c.Assert(err, IsNil)
// check existence (and fills back checksum)
exists, err = s.pool.Verify("", filepath.Base(s.debFile), &s.checksum, s.cs)
c.Check(err, IsNil)
c.Check(exists, Equals, true)
c.Check(s.checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
}
func (s *PackagePoolSuite) TestVerify(c *C) {
// file doesn't exist yet
exists, err := s.pool.Verify("", filepath.Base(s.debFile), &s.checksum, s.cs)
c.Check(err, IsNil)
c.Check(exists, Equals, false)
// import file
path, err := s.pool.Import(s.debFile, filepath.Base(s.debFile), &s.checksum, false, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb")
// check existence
exists, err = s.pool.Verify("", filepath.Base(s.debFile), &s.checksum, s.cs)
c.Check(err, IsNil)
c.Check(exists, Equals, true)
c.Check(s.checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// check existence with fixed path
exists, err = s.pool.Verify(path, filepath.Base(s.debFile), &s.checksum, s.cs)
c.Check(err, IsNil)
c.Check(exists, Equals, true)
c.Check(s.checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// check existence, but with missing checksum
s.checksum.SHA512 = ""
exists, err = s.pool.Verify("", filepath.Base(s.debFile), &s.checksum, s.cs)
c.Check(err, IsNil)
c.Check(exists, Equals, true)
// checksum is filled back based on checksum storage
c.Check(s.checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// check existence, with missing checksum info but correct path and size available
ck := utils.ChecksumInfo{
Size: s.checksum.Size,
}
exists, err = s.pool.Verify(path, filepath.Base(s.debFile), &ck, s.cs)
c.Check(err, IsNil)
c.Check(exists, Equals, true)
// checksum is filled back based on checksum storage
c.Check(ck.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// check existence, with wrong checksum info but correct path and size available
ck.SHA256 = "abc"
exists, err = s.pool.Verify(path, filepath.Base(s.debFile), &ck, s.cs)
c.Check(err, IsNil)
c.Check(exists, Equals, false)
// check existence, with missing checksum and no info in checksum storage
delete(s.cs.(*mockChecksumStorage).store, path)
s.checksum.SHA512 = ""
exists, err = s.pool.Verify("", filepath.Base(s.debFile), &s.checksum, s.cs)
c.Check(err, IsNil)
c.Check(exists, Equals, true)
// checksum is filled back based on re-calculation
c.Check(s.checksum.SHA512, Equals, "d7302241373da972aa9b9e71d2fd769b31a38f71182aa71bc0d69d090d452c69bb74b8612c002ccf8a89c279ced84ac27177c8b92d20f00023b3d268e6cec69c")
// check existence, with wrong size
s.checksum.Size = 13455
exists, err = s.pool.Verify("", filepath.Base(s.debFile), &s.checksum, s.cs)
c.Check(err, IsNil)
c.Check(exists, Equals, false)
// check existence, with empty checksum info
exists, err = s.pool.Verify("", filepath.Base(s.debFile), &utils.ChecksumInfo{}, s.cs)
c.Check(err, IsNil)
c.Check(exists, Equals, false)
}
func (s *PackagePoolSuite) TestImportMove(c *C) {
tmpDir := c.MkDir()
tmpPath := filepath.Join(tmpDir, filepath.Base(s.debFile))
dst, err := os.Create(tmpPath)
c.Assert(err, IsNil)
src, err := os.Open(s.debFile)
c.Assert(err, IsNil)
_, err = io.Copy(dst, src)
c.Assert(err, IsNil)
c.Assert(dst.Close(), IsNil)
c.Assert(src.Close(), IsNil)
path, err := s.pool.Import(tmpPath, filepath.Base(tmpPath), &s.checksum, true, s.cs)
c.Check(err, IsNil)
c.Check(path, Equals, "c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb")
info, err := s.pool.Stat(path)
c.Assert(err, IsNil)
c.Check(info.Size(), Equals, int64(2738))
c.Check(int(info.Sys().(*syscall.Stat_t).Nlink), Equals, 1)
}
func (s *PackagePoolSuite) TestImportNotExist(c *C) {
err := s.pool.Import("no-such-file", s.checksum)
_, err := s.pool.Import("no-such-file", "a.deb", &s.checksum, false, s.cs)
c.Check(err, ErrorMatches, ".*no such file or directory")
}
func (s *PackagePoolSuite) TestImportOverwrite(c *C) {
_, _File, _, _ := runtime.Caller(0)
debFile := filepath.Join(filepath.Dir(_File), "../system/files/libboost-program-options-dev_1.49.0.1_i386.deb")
os.MkdirAll(filepath.Join(s.pool.rootPath, "c7", "6b"), 0755)
ioutil.WriteFile(filepath.Join(s.pool.rootPath, "c7", "6b", "4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb"), []byte("1"), 0644)
os.MkdirAll(filepath.Join(s.pool.rootPath, "91", "b1"), 0755)
ioutil.WriteFile(filepath.Join(s.pool.rootPath, "91", "b1", "libboost-program-options-dev_1.49.0.1_i386.deb"), []byte("1"), 0644)
err := s.pool.Import(debFile, s.checksum)
_, err := s.pool.Import(s.debFile, filepath.Base(s.debFile), &s.checksum, false, s.cs)
c.Check(err, ErrorMatches, "unable to import into pool.*")
}
func (s *PackagePoolSuite) TestStat(c *C) {
path, err := s.pool.Import(s.debFile, filepath.Base(s.debFile), &s.checksum, false, s.cs)
c.Check(err, IsNil)
info, err := s.pool.Stat(path)
c.Assert(err, IsNil)
c.Check(info.Size(), Equals, int64(2738))
_, err = s.pool.Stat("do/es/ntexist")
c.Assert(os.IsNotExist(err), Equals, true)
}
func (s *PackagePoolSuite) TestOpen(c *C) {
path, err := s.pool.Import(s.debFile, filepath.Base(s.debFile), &s.checksum, false, s.cs)
c.Check(err, IsNil)
f, err := s.pool.Open(path)
c.Assert(err, IsNil)
contents, err := ioutil.ReadAll(f)
c.Assert(err, IsNil)
c.Check(len(contents), Equals, 2738)
c.Check(f.Close(), IsNil)
_, err = s.pool.Open("do/es/ntexist")
c.Assert(os.IsNotExist(err), Equals, true)
}
func (s *PackagePoolSuite) TestLink(c *C) {
path, err := s.pool.Import(s.debFile, filepath.Base(s.debFile), &s.checksum, false, s.cs)
c.Check(err, IsNil)
tmpDir := c.MkDir()
dstPath := filepath.Join(tmpDir, filepath.Base(s.debFile))
c.Check(s.pool.Link(path, dstPath), IsNil)
info, err := os.Stat(dstPath)
c.Assert(err, IsNil)
c.Check(info.Size(), Equals, int64(2738))
c.Check(info.Sys().(*syscall.Stat_t).Nlink > 2, Equals, true)
}
func (s *PackagePoolSuite) TestSymlink(c *C) {
path, err := s.pool.Import(s.debFile, filepath.Base(s.debFile), &s.checksum, false, s.cs)
c.Check(err, IsNil)
tmpDir := c.MkDir()
dstPath := filepath.Join(tmpDir, filepath.Base(s.debFile))
c.Check(s.pool.Symlink(path, dstPath), IsNil)
info, err := os.Stat(dstPath)
c.Assert(err, IsNil)
c.Check(info.Size(), Equals, int64(2738))
c.Check(info.Sys().(*syscall.Stat_t).Nlink > 2, Equals, true)
info, err = os.Lstat(dstPath)
c.Assert(err, IsNil)
c.Check(int(info.Sys().(*syscall.Stat_t).Mode&syscall.S_IFMT), Equals, int(syscall.S_IFLNK))
}
func (s *PackagePoolSuite) TestGenerateRandomPath(c *C) {
path, err := s.pool.GenerateTempPath("a.deb")
c.Check(err, IsNil)
c.Check(path, Matches, ".+/[0-9a-f][0-9a-f]/[0-9a-f][0-9a-f]/[0-9a-f-]+a\\.deb")
}

View File

@@ -114,15 +114,12 @@ func (storage *PublishedStorage) RemoveDirs(path string, progress aptly.Progress
//
// publishedDirectory is desired location in pool (like prefix/pool/component/liba/libav/)
// sourcePool is instance of aptly.PackagePool
// sourcePath is filepath to package file in package pool
// sourcePath is a relative path to package file in package pool
//
// LinkFromPool returns relative path for the published file to be included in package index
func (storage *PublishedStorage) LinkFromPool(publishedDirectory string, sourcePool aptly.PackagePool,
func (storage *PublishedStorage) LinkFromPool(publishedDirectory, baseName string, sourcePool aptly.PackagePool,
sourcePath string, sourceChecksums utils.ChecksumInfo, force bool) error {
// verify that package pool is local pool is filesystem pool
_ = sourcePool.(*PackagePool)
baseName := filepath.Base(sourcePath)
poolPath := filepath.Join(storage.rootPath, publishedDirectory)
err := os.MkdirAll(poolPath, 0777)
@@ -135,7 +132,7 @@ func (storage *PublishedStorage) LinkFromPool(publishedDirectory string, sourceP
dstStat, err = os.Stat(filepath.Join(poolPath, baseName))
if err == nil {
// already exists, check source file
srcStat, err = os.Stat(sourcePath)
srcStat, err = sourcePool.Stat(sourcePath)
if err != nil {
// source file doesn't exist? problem!
return err
@@ -184,13 +181,39 @@ func (storage *PublishedStorage) LinkFromPool(publishedDirectory string, sourceP
}
}
// destination doesn't exist (or forced), create link
// destination doesn't exist (or forced), create link or copy
if storage.linkMethod == LinkMethodCopy {
err = utils.CopyFile(sourcePath, filepath.Join(poolPath, baseName))
var r aptly.ReadSeekerCloser
r, err = sourcePool.Open(sourcePath)
if err != nil {
return err
}
var dst *os.File
dst, err = os.Create(filepath.Join(poolPath, baseName))
if err != nil {
r.Close()
return err
}
_, err = io.Copy(dst, r)
if err != nil {
r.Close()
dst.Close()
return err
}
err = r.Close()
if err != nil {
dst.Close()
return err
}
err = dst.Close()
} else if storage.linkMethod == LinkMethodSymLink {
err = os.Symlink(sourcePath, filepath.Join(poolPath, baseName))
err = sourcePool.(aptly.LocalPackagePool).Symlink(sourcePath, filepath.Join(poolPath, baseName))
} else {
err = os.Link(sourcePath, filepath.Join(poolPath, baseName))
err = sourcePool.(aptly.LocalPackagePool).Link(sourcePath, filepath.Join(poolPath, baseName))
}
return err

View File

@@ -6,6 +6,7 @@ import (
"path/filepath"
"syscall"
"github.com/smira/aptly/aptly"
"github.com/smira/aptly/utils"
. "gopkg.in/check.v1"
@@ -17,6 +18,7 @@ type PublishedStorageSuite struct {
storageSymlink *PublishedStorage
storageCopy *PublishedStorage
storageCopySize *PublishedStorage
cs aptly.ChecksumStorage
}
var _ = Suite(&PublishedStorageSuite{})
@@ -27,6 +29,7 @@ func (s *PublishedStorageSuite) SetUpTest(c *C) {
s.storageSymlink = NewPublishedStorage(filepath.Join(s.root, "public_symlink"), "symlink", "")
s.storageCopy = NewPublishedStorage(filepath.Join(s.root, "public_copy"), "copy", "")
s.storageCopySize = NewPublishedStorage(filepath.Join(s.root, "public_copysize"), "copy", "size")
s.cs = NewMockChecksumStorage()
}
func (s *PublishedStorageSuite) TestLinkMethodField(c *C) {
@@ -108,6 +111,7 @@ func (s *PublishedStorageSuite) TestRemoveDirs(c *C) {
c.Assert(err, IsNil)
err = s.storage.RemoveDirs("ppa/dists/", nil)
c.Assert(err, IsNil)
_, err = os.Stat(filepath.Join(s.storage.rootPath, "ppa/dists/squeeze/Release"))
c.Assert(err, NotNil)
@@ -122,6 +126,7 @@ func (s *PublishedStorageSuite) TestRemove(c *C) {
c.Assert(err, IsNil)
err = s.storage.Remove("ppa/dists/squeeze/Release")
c.Assert(err, IsNil)
_, err = os.Stat(filepath.Join(s.storage.rootPath, "ppa/dists/squeeze/Release"))
c.Assert(err, NotNil)
@@ -139,68 +144,69 @@ func (s *PublishedStorageSuite) TestLinkFromPool(c *C) {
{ // package name regular
prefix: "",
component: "main",
sourcePath: "pool/01/ae/mars-invaders_1.03.deb",
sourcePath: "mars-invaders_1.03.deb",
poolDirectory: "m/mars-invaders",
expectedFilename: "pool/main/m/mars-invaders/mars-invaders_1.03.deb",
},
{ // lib-like filename
prefix: "",
component: "main",
sourcePath: "pool/01/ae/libmars-invaders_1.03.deb",
sourcePath: "libmars-invaders_1.03.deb",
poolDirectory: "libm/libmars-invaders",
expectedFilename: "pool/main/libm/libmars-invaders/libmars-invaders_1.03.deb",
},
{ // duplicate link, shouldn't panic
prefix: "",
component: "main",
sourcePath: "pool/01/ae/mars-invaders_1.03.deb",
sourcePath: "mars-invaders_1.03.deb",
poolDirectory: "m/mars-invaders",
expectedFilename: "pool/main/m/mars-invaders/mars-invaders_1.03.deb",
},
{ // prefix & component
prefix: "ppa",
component: "contrib",
sourcePath: "pool/01/ae/libmars-invaders_1.04.deb",
sourcePath: "libmars-invaders_1.04.deb",
poolDirectory: "libm/libmars-invaders",
expectedFilename: "pool/contrib/libm/libmars-invaders/libmars-invaders_1.04.deb",
},
}
pool := NewPackagePool(s.root)
pool := NewPackagePool(s.root, false)
for _, t := range tests {
t.sourcePath = filepath.Join(s.root, t.sourcePath)
err := os.MkdirAll(filepath.Dir(t.sourcePath), 0755)
tmpPath := filepath.Join(c.MkDir(), t.sourcePath)
err := ioutil.WriteFile(tmpPath, []byte("Contents"), 0644)
c.Assert(err, IsNil)
err = ioutil.WriteFile(t.sourcePath, []byte("Contents"), 0644)
sourceChecksum, err := utils.ChecksumsForFile(tmpPath)
c.Assert(err, IsNil)
sourceChecksum, err := utils.ChecksumsForFile(t.sourcePath)
srcPoolPath, err := pool.Import(tmpPath, t.sourcePath, &utils.ChecksumInfo{MD5: "c1df1da7a1ce305a3b60af9d5733ac1d"}, false, s.cs)
c.Assert(err, IsNil)
err = s.storage.LinkFromPool(filepath.Join(t.prefix, "pool", t.component, t.poolDirectory), pool, t.sourcePath, sourceChecksum, false)
// Test using hardlinks
err = s.storage.LinkFromPool(filepath.Join(t.prefix, "pool", t.component, t.poolDirectory), t.sourcePath, pool, srcPoolPath, sourceChecksum, false)
c.Assert(err, IsNil)
st, err := os.Stat(filepath.Join(s.storage.rootPath, t.prefix, t.expectedFilename))
c.Assert(err, IsNil)
info := st.Sys().(*syscall.Stat_t)
c.Check(int(info.Nlink), Equals, 2)
c.Check(int(info.Nlink), Equals, 3)
// Test using symlinks
err = s.storageSymlink.LinkFromPool(filepath.Join(t.prefix, "pool", t.component, t.poolDirectory), pool, t.sourcePath, sourceChecksum, false)
err = s.storageSymlink.LinkFromPool(filepath.Join(t.prefix, "pool", t.component, t.poolDirectory), t.sourcePath, pool, srcPoolPath, sourceChecksum, false)
c.Assert(err, IsNil)
st, err = os.Stat(filepath.Join(s.storageSymlink.rootPath, t.prefix, t.expectedFilename))
st, err = os.Lstat(filepath.Join(s.storageSymlink.rootPath, t.prefix, t.expectedFilename))
c.Assert(err, IsNil)
info = st.Sys().(*syscall.Stat_t)
c.Check(int(info.Nlink), Equals, 2)
c.Check(int(info.Nlink), Equals, 1)
c.Check(int(info.Mode&syscall.S_IFMT), Equals, int(syscall.S_IFLNK))
// Test using copy with checksum verification
err = s.storageCopy.LinkFromPool(filepath.Join(t.prefix, "pool", t.component, t.poolDirectory), pool, t.sourcePath, sourceChecksum, false)
err = s.storageCopy.LinkFromPool(filepath.Join(t.prefix, "pool", t.component, t.poolDirectory), t.sourcePath, pool, srcPoolPath, sourceChecksum, false)
c.Assert(err, IsNil)
st, err = os.Stat(filepath.Join(s.storageCopy.rootPath, t.prefix, t.expectedFilename))
@@ -210,7 +216,7 @@ func (s *PublishedStorageSuite) TestLinkFromPool(c *C) {
c.Check(int(info.Nlink), Equals, 1)
// Test using copy with size verification
err = s.storageCopySize.LinkFromPool(filepath.Join(t.prefix, "pool", t.component, t.poolDirectory), pool, t.sourcePath, sourceChecksum, false)
err = s.storageCopySize.LinkFromPool(filepath.Join(t.prefix, "pool", t.component, t.poolDirectory), t.sourcePath, pool, srcPoolPath, sourceChecksum, false)
c.Assert(err, IsNil)
st, err = os.Stat(filepath.Join(s.storageCopySize.rootPath, t.prefix, t.expectedFilename))
@@ -221,51 +227,50 @@ func (s *PublishedStorageSuite) TestLinkFromPool(c *C) {
}
// test linking files to duplicate final name
sourcePath := filepath.Join(s.root, "pool/02/bc/mars-invaders_1.03.deb")
err := os.MkdirAll(filepath.Dir(sourcePath), 0755)
tmpPath := filepath.Join(c.MkDir(), "mars-invaders_1.03.deb")
err := ioutil.WriteFile(tmpPath, []byte("cONTENTS"), 0644)
c.Assert(err, IsNil)
// use same size to ensure copy with size check will fail on this one
err = ioutil.WriteFile(sourcePath, []byte("cONTENTS"), 0644)
sourceChecksum, err := utils.ChecksumsForFile(tmpPath)
c.Assert(err, IsNil)
sourceChecksum, err := utils.ChecksumsForFile(sourcePath)
srcPoolPath, err := pool.Import(tmpPath, "mars-invaders_1.03.deb", &utils.ChecksumInfo{MD5: "02bcda7a1ce305a3b60af9d5733ac1d"}, true, s.cs)
c.Assert(err, IsNil)
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath, sourceChecksum, false)
st, err := pool.Stat(srcPoolPath)
c.Assert(err, IsNil)
nlinks := int(st.Sys().(*syscall.Stat_t).Nlink)
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, srcPoolPath, sourceChecksum, false)
c.Check(err, ErrorMatches, ".*file already exists and is different")
st, err := os.Stat(sourcePath)
st, err = pool.Stat(srcPoolPath)
c.Assert(err, IsNil)
info := st.Sys().(*syscall.Stat_t)
c.Check(int(info.Nlink), Equals, 1)
c.Check(int(st.Sys().(*syscall.Stat_t).Nlink), Equals, nlinks)
// linking with force
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath, sourceChecksum, true)
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, srcPoolPath, sourceChecksum, true)
c.Check(err, IsNil)
st, err = os.Stat(sourcePath)
st, err = pool.Stat(srcPoolPath)
c.Assert(err, IsNil)
info = st.Sys().(*syscall.Stat_t)
c.Check(int(info.Nlink), Equals, 2)
c.Check(int(st.Sys().(*syscall.Stat_t).Nlink), Equals, nlinks+1)
// Test using symlinks
err = s.storageSymlink.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath, sourceChecksum, false)
err = s.storageSymlink.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, srcPoolPath, sourceChecksum, false)
c.Check(err, ErrorMatches, ".*file already exists and is different")
err = s.storageSymlink.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath, sourceChecksum, true)
err = s.storageSymlink.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, srcPoolPath, sourceChecksum, true)
c.Check(err, IsNil)
// Test using copy with checksum verification
err = s.storageCopy.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath, sourceChecksum, false)
err = s.storageCopy.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, srcPoolPath, sourceChecksum, false)
c.Check(err, ErrorMatches, ".*file already exists and is different")
err = s.storageCopy.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath, sourceChecksum, true)
err = s.storageCopy.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, srcPoolPath, sourceChecksum, true)
c.Check(err, IsNil)
// Test using copy with size verification (this will NOT detect the difference)
err = s.storageCopySize.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath, sourceChecksum, false)
err = s.storageCopySize.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, srcPoolPath, sourceChecksum, false)
c.Check(err, IsNil)
}

86
http/compression.go Normal file
View File

@@ -0,0 +1,86 @@
package http
import (
"compress/bzip2"
"compress/gzip"
"fmt"
"io"
"os"
"strings"
"github.com/smira/aptly/aptly"
"github.com/smira/aptly/utils"
xz "github.com/smira/go-xz"
)
// List of extensions + corresponding uncompression support
var compressionMethods = []struct {
extenstion string
transformation func(io.Reader) (io.Reader, error)
}{
{
extenstion: ".bz2",
transformation: func(r io.Reader) (io.Reader, error) { return bzip2.NewReader(r), nil },
},
{
extenstion: ".gz",
transformation: func(r io.Reader) (io.Reader, error) { return gzip.NewReader(r) },
},
{
extenstion: ".xz",
transformation: func(r io.Reader) (io.Reader, error) { return xz.NewReader(r) },
},
{
extenstion: "",
transformation: func(r io.Reader) (io.Reader, error) { return r, nil },
},
}
// DownloadTryCompression tries to download from URL .bz2, .gz and raw extension until
// it finds existing file.
func DownloadTryCompression(downloader aptly.Downloader, url string, expectedChecksums map[string]utils.ChecksumInfo, ignoreMismatch bool, maxTries int) (io.Reader, *os.File, error) {
var err error
for _, method := range compressionMethods {
var file *os.File
tryURL := url + method.extenstion
foundChecksum := false
for suffix, expected := range expectedChecksums {
if strings.HasSuffix(tryURL, suffix) {
file, err = DownloadTempWithChecksum(downloader, tryURL, &expected, ignoreMismatch, maxTries)
foundChecksum = true
break
}
}
if !foundChecksum {
if !ignoreMismatch {
continue
}
file, err = DownloadTemp(downloader, tryURL)
}
if err != nil {
if err1, ok := err.(*Error); ok && (err1.Code == 404 || err1.Code == 403) {
continue
}
return nil, nil, err
}
var uncompressed io.Reader
uncompressed, err = method.transformation(file)
if err != nil {
return nil, nil, err
}
return uncompressed, file, err
}
if err == nil {
err = fmt.Errorf("no candidates for %s found", url)
}
return nil, nil, err
}

118
http/compression_test.go Normal file
View File

@@ -0,0 +1,118 @@
package http
import (
"errors"
"io"
"github.com/smira/aptly/utils"
. "gopkg.in/check.v1"
)
type CompressionSuite struct{}
var _ = Suite(&CompressionSuite{})
const (
bzipData = "BZh91AY&SY\xcc\xc3q\xd4\x00\x00\x02A\x80\x00\x10\x02\x00\x0c\x00 \x00!\x9ah3M\x19\x97\x8b\xb9\"\x9c(Hfa\xb8\xea\x00"
gzipData = "\x1f\x8b\x08\x00\xc8j\xb0R\x00\x03+I-.\xe1\x02\x00\xc65\xb9;\x05\x00\x00\x00"
xzData = "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x04\x74\x65\x73\x74\x0a\x00\x00\x00\x00\x9d\xed\x31\x1d\x0f\x9f\xd7\xe6\x00\x01\x1d\x05\xb8\x2d\x80\xaf\x1f\xb6\xf3\x7d\x01\x00\x00\x00\x00\x04\x59\x5a"
rawData = "test"
)
func (s *CompressionSuite) TestDownloadTryCompression(c *C) {
var buf []byte
expectedChecksums := map[string]utils.ChecksumInfo{
"file.bz2": {Size: int64(len(bzipData))},
"file.gz": {Size: int64(len(gzipData))},
"file.xz": {Size: int64(len(xzData))},
"file": {Size: int64(len(rawData))},
}
// bzip2 only available
buf = make([]byte, 4)
d := NewFakeDownloader()
d.ExpectResponse("http://example.com/file.bz2", bzipData)
r, file, err := DownloadTryCompression(d, "http://example.com/file", expectedChecksums, false, 1)
c.Assert(err, IsNil)
defer file.Close()
io.ReadFull(r, buf)
c.Assert(string(buf), Equals, rawData)
c.Assert(d.Empty(), Equals, true)
// bzip2 not available, but gz is
buf = make([]byte, 4)
d = NewFakeDownloader()
d.ExpectError("http://example.com/file.bz2", &Error{Code: 404})
d.ExpectResponse("http://example.com/file.gz", gzipData)
r, file, err = DownloadTryCompression(d, "http://example.com/file", expectedChecksums, false, 1)
c.Assert(err, IsNil)
defer file.Close()
io.ReadFull(r, buf)
c.Assert(string(buf), Equals, rawData)
c.Assert(d.Empty(), Equals, true)
// bzip2 & gzip not available, but xz is
buf = make([]byte, 4)
d = NewFakeDownloader()
d.ExpectError("http://example.com/file.bz2", &Error{Code: 404})
d.ExpectError("http://example.com/file.gz", &Error{Code: 404})
d.ExpectResponse("http://example.com/file.xz", xzData)
r, file, err = DownloadTryCompression(d, "http://example.com/file", expectedChecksums, false, 1)
c.Assert(err, IsNil)
defer file.Close()
io.ReadFull(r, buf)
c.Assert(string(buf), Equals, rawData)
c.Assert(d.Empty(), Equals, true)
// bzip2, gzip & xz not available, but raw is
buf = make([]byte, 4)
d = NewFakeDownloader()
d.ExpectError("http://example.com/file.bz2", &Error{Code: 404})
d.ExpectError("http://example.com/file.gz", &Error{Code: 404})
d.ExpectError("http://example.com/file.xz", &Error{Code: 404})
d.ExpectResponse("http://example.com/file", rawData)
r, file, err = DownloadTryCompression(d, "http://example.com/file", expectedChecksums, false, 1)
c.Assert(err, IsNil)
defer file.Close()
io.ReadFull(r, buf)
c.Assert(string(buf), Equals, rawData)
c.Assert(d.Empty(), Equals, true)
// gzip available, but broken
d = NewFakeDownloader()
d.ExpectError("http://example.com/file.bz2", &Error{Code: 404})
d.ExpectResponse("http://example.com/file.gz", "x")
_, file, err = DownloadTryCompression(d, "http://example.com/file", nil, true, 1)
c.Assert(err, ErrorMatches, "unexpected EOF")
c.Assert(d.Empty(), Equals, true)
}
func (s *CompressionSuite) TestDownloadTryCompressionErrors(c *C) {
d := NewFakeDownloader()
_, _, err := DownloadTryCompression(d, "http://example.com/file", nil, true, 1)
c.Assert(err, ErrorMatches, "unexpected request.*")
d = NewFakeDownloader()
d.ExpectError("http://example.com/file.bz2", &Error{Code: 404})
d.ExpectError("http://example.com/file.gz", &Error{Code: 404})
d.ExpectError("http://example.com/file.xz", &Error{Code: 404})
d.ExpectError("http://example.com/file", errors.New("403"))
_, _, err = DownloadTryCompression(d, "http://example.com/file", nil, true, 1)
c.Assert(err, ErrorMatches, "403")
d = NewFakeDownloader()
d.ExpectError("http://example.com/file.bz2", &Error{Code: 404})
d.ExpectError("http://example.com/file.gz", &Error{Code: 404})
d.ExpectError("http://example.com/file.xz", &Error{Code: 404})
d.ExpectResponse("http://example.com/file", rawData)
expectedChecksums := map[string]utils.ChecksumInfo{
"file.bz2": {Size: 7},
"file.gz": {Size: 7},
"file.xz": {Size: 7},
"file": {Size: 7},
}
_, _, err = DownloadTryCompression(d, "http://example.com/file", expectedChecksums, false, 1)
c.Assert(err, ErrorMatches, "checksums don't match.*")
}

View File

@@ -1,11 +1,8 @@
package http
import (
"compress/bzip2"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
@@ -13,23 +10,12 @@ import (
"time"
"github.com/mxk/go-flowrate/flowrate"
"github.com/pkg/errors"
"github.com/smira/aptly/aptly"
"github.com/smira/aptly/utils"
"github.com/smira/go-ftp-protocol/protocol"
"github.com/smira/go-xz"
)
// Error is download error connected to HTTP code
type Error struct {
Code int
URL string
}
// Error
func (e *Error) Error() string {
return fmt.Sprintf("HTTP code %d while fetching %s", e.Code, e.URL)
}
// Check interface
var (
_ aptly.Downloader = (*downloaderImpl)(nil)
@@ -37,30 +23,14 @@ var (
// downloaderImpl is implementation of Downloader interface
type downloaderImpl struct {
queue chan *downloadTask
stop chan struct{}
stopped chan struct{}
pause chan struct{}
unpause chan struct{}
progress aptly.Progress
aggWriter io.Writer
threads int
client *http.Client
}
// downloadTask represents single item in queue
type downloadTask struct {
url string
destination string
result chan<- error
expected utils.ChecksumInfo
ignoreMismatch bool
triesLeft int
}
// NewDownloader creates new instance of Downloader which specified number
// of threads and download limit in bytes/sec
func NewDownloader(threads int, downLimit int64, progress aptly.Progress) aptly.Downloader {
func NewDownloader(downLimit int64, progress aptly.Progress) aptly.Downloader {
transport := http.Transport{}
transport.Proxy = http.DefaultTransport.(*http.Transport).Proxy
transport.ResponseHeaderTimeout = 30 * time.Second
@@ -71,12 +41,6 @@ func NewDownloader(threads int, downLimit int64, progress aptly.Progress) aptly.
transport.RegisterProtocol("ftp", &protocol.FTPRoundTripper{})
downloader := &downloaderImpl{
queue: make(chan *downloadTask, 1000),
stop: make(chan struct{}, threads),
stopped: make(chan struct{}, threads),
pause: make(chan struct{}),
unpause: make(chan struct{}),
threads: threads,
progress: progress,
client: &http.Client{
Transport: &transport,
@@ -89,70 +53,28 @@ func NewDownloader(threads int, downLimit int64, progress aptly.Progress) aptly.
downloader.aggWriter = progress
}
for i := 0; i < downloader.threads; i++ {
go downloader.process()
}
return downloader
}
// Shutdown stops downloader after current tasks are finished,
// but doesn't process rest of queue
func (downloader *downloaderImpl) Shutdown() {
for i := 0; i < downloader.threads; i++ {
downloader.stop <- struct{}{}
}
for i := 0; i < downloader.threads; i++ {
<-downloader.stopped
}
}
// Abort stops downloader but doesn't wait for downloader to stop
func (downloader *downloaderImpl) Abort() {
for i := 0; i < downloader.threads; i++ {
downloader.stop <- struct{}{}
}
}
// Pause pauses task processing
func (downloader *downloaderImpl) Pause() {
for i := 0; i < downloader.threads; i++ {
downloader.pause <- struct{}{}
}
}
// Resume resumes task processing
func (downloader *downloaderImpl) Resume() {
for i := 0; i < downloader.threads; i++ {
downloader.unpause <- struct{}{}
}
}
// GetProgress returns Progress object
func (downloader *downloaderImpl) GetProgress() aptly.Progress {
return downloader.progress
}
// Download starts new download task
func (downloader *downloaderImpl) Download(url string, destination string, result chan<- error) {
downloader.DownloadWithChecksum(url, destination, result, utils.ChecksumInfo{Size: -1}, false, 1)
func (downloader *downloaderImpl) Download(url string, destination string) error {
return downloader.DownloadWithChecksum(url, destination, nil, false, 1)
}
// DownloadWithChecksum starts new download task with checksum verification
func (downloader *downloaderImpl) DownloadWithChecksum(url string, destination string, result chan<- error,
expected utils.ChecksumInfo, ignoreMismatch bool, maxTries int) {
downloader.queue <- &downloadTask{url: url, destination: destination, result: result, expected: expected, ignoreMismatch: ignoreMismatch, triesLeft: maxTries}
}
func (downloader *downloaderImpl) DownloadWithChecksum(url string, destination string,
expected *utils.ChecksumInfo, ignoreMismatch bool, maxTries int) error {
// handleTask processes single download task
func (downloader *downloaderImpl) handleTask(task *downloadTask) {
downloader.progress.Printf("Downloading %s...\n", task.url)
downloader.progress.Printf("Downloading %s...\n", url)
req, err := http.NewRequest("GET", task.url, nil)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
task.result <- fmt.Errorf("%s: %s", task.url, err)
return
return errors.Wrap(err, url)
}
req.Close = true
@@ -163,12 +85,11 @@ func (downloader *downloaderImpl) handleTask(task *downloadTask) {
}
var temppath string
for task.triesLeft > 0 {
temppath, err = downloader.downloadTask(req, task)
for maxTries > 0 {
temppath, err = downloader.download(req, url, destination, expected, ignoreMismatch)
if err != nil {
task.triesLeft--
maxTries--
} else {
// successful download
break
@@ -177,50 +98,48 @@ func (downloader *downloaderImpl) handleTask(task *downloadTask) {
// still an error after retrying, giving up
if err != nil {
task.result <- err
return
return err
}
err = os.Rename(temppath, task.destination)
err = os.Rename(temppath, destination)
if err != nil {
os.Remove(temppath)
task.result <- fmt.Errorf("%s: %s", task.url, err)
return
return errors.Wrap(err, url)
}
task.result <- nil
return nil
}
func (downloader *downloaderImpl) downloadTask(req *http.Request, task *downloadTask) (string, error) {
func (downloader *downloaderImpl) download(req *http.Request, url, destination string, expected *utils.ChecksumInfo, ignoreMismatch bool) (string, error) {
resp, err := downloader.client.Do(req)
if err != nil {
return "", fmt.Errorf("%s: %s", task.url, err)
return "", errors.Wrap(err, url)
}
if resp.Body != nil {
defer resp.Body.Close()
}
if resp.StatusCode < 200 || resp.StatusCode > 299 {
return "", &Error{Code: resp.StatusCode, URL: task.url}
return "", &Error{Code: resp.StatusCode, URL: url}
}
err = os.MkdirAll(filepath.Dir(task.destination), 0777)
err = os.MkdirAll(filepath.Dir(destination), 0777)
if err != nil {
return "", fmt.Errorf("%s: %s", task.url, err)
return "", errors.Wrap(err, url)
}
temppath := task.destination + ".down"
temppath := destination + ".down"
outfile, err := os.Create(temppath)
if err != nil {
return "", fmt.Errorf("%s: %s", task.url, err)
return "", errors.Wrap(err, url)
}
defer outfile.Close()
checksummer := utils.NewChecksumWriter()
writers := []io.Writer{outfile, downloader.aggWriter}
if task.expected.Size != -1 {
if expected != nil {
writers = append(writers, checksummer)
}
@@ -229,161 +148,36 @@ func (downloader *downloaderImpl) downloadTask(req *http.Request, task *download
_, err = io.Copy(w, resp.Body)
if err != nil {
os.Remove(temppath)
return "", fmt.Errorf("%s: %s", task.url, err)
return "", errors.Wrap(err, url)
}
if task.expected.Size != -1 {
if expected != nil {
actual := checksummer.Sum()
if actual.Size != task.expected.Size {
err = fmt.Errorf("%s: size check mismatch %d != %d", task.url, actual.Size, task.expected.Size)
} else if task.expected.MD5 != "" && actual.MD5 != task.expected.MD5 {
err = fmt.Errorf("%s: md5 hash mismatch %#v != %#v", task.url, actual.MD5, task.expected.MD5)
} else if task.expected.SHA1 != "" && actual.SHA1 != task.expected.SHA1 {
err = fmt.Errorf("%s: sha1 hash mismatch %#v != %#v", task.url, actual.SHA1, task.expected.SHA1)
} else if task.expected.SHA256 != "" && actual.SHA256 != task.expected.SHA256 {
err = fmt.Errorf("%s: sha256 hash mismatch %#v != %#v", task.url, actual.SHA256, task.expected.SHA256)
} else if task.expected.SHA512 != "" && actual.SHA512 != task.expected.SHA512 {
err = fmt.Errorf("%s: sha512 hash mismatch %#v != %#v", task.url, actual.SHA512, task.expected.SHA512)
if actual.Size != expected.Size {
err = fmt.Errorf("%s: size check mismatch %d != %d", url, actual.Size, expected.Size)
} else if expected.MD5 != "" && actual.MD5 != expected.MD5 {
err = fmt.Errorf("%s: md5 hash mismatch %#v != %#v", url, actual.MD5, expected.MD5)
} else if expected.SHA1 != "" && actual.SHA1 != expected.SHA1 {
err = fmt.Errorf("%s: sha1 hash mismatch %#v != %#v", url, actual.SHA1, expected.SHA1)
} else if expected.SHA256 != "" && actual.SHA256 != expected.SHA256 {
err = fmt.Errorf("%s: sha256 hash mismatch %#v != %#v", url, actual.SHA256, expected.SHA256)
} else if expected.SHA512 != "" && actual.SHA512 != expected.SHA512 {
err = fmt.Errorf("%s: sha512 hash mismatch %#v != %#v", url, actual.SHA512, expected.SHA512)
}
if err != nil {
if task.ignoreMismatch {
if ignoreMismatch {
downloader.progress.Printf("WARNING: %s\n", err.Error())
} else {
os.Remove(temppath)
return "", err
}
} else {
// update checksums if they match, so that they contain exactly expected set
*expected = actual
}
}
return temppath, nil
}
// process implements download thread in goroutine
func (downloader *downloaderImpl) process() {
for {
select {
case <-downloader.stop:
downloader.stopped <- struct{}{}
return
case <-downloader.pause:
<-downloader.unpause
case task := <-downloader.queue:
downloader.handleTask(task)
}
}
}
// DownloadTemp starts new download to temporary file and returns File
//
// Temporary file would be already removed, so no need to cleanup
func DownloadTemp(downloader aptly.Downloader, url string) (*os.File, error) {
return DownloadTempWithChecksum(downloader, url, utils.ChecksumInfo{Size: -1}, false, 1)
}
// DownloadTempWithChecksum is a DownloadTemp with checksum verification
//
// Temporary file would be already removed, so no need to cleanup
func DownloadTempWithChecksum(downloader aptly.Downloader, url string, expected utils.ChecksumInfo, ignoreMismatch bool, maxTries int) (*os.File, error) {
tempdir, err := ioutil.TempDir(os.TempDir(), "aptly")
if err != nil {
return nil, err
}
defer os.RemoveAll(tempdir)
tempfile := filepath.Join(tempdir, "buffer")
if expected.Size != -1 && downloader.GetProgress() != nil {
downloader.GetProgress().InitBar(expected.Size, true)
defer downloader.GetProgress().ShutdownBar()
}
ch := make(chan error, 1)
downloader.DownloadWithChecksum(url, tempfile, ch, expected, ignoreMismatch, maxTries)
err = <-ch
if err != nil {
return nil, err
}
file, err := os.Open(tempfile)
if err != nil {
return nil, err
}
return file, nil
}
// List of extensions + corresponding uncompression support
var compressionMethods = []struct {
extenstion string
transformation func(io.Reader) (io.Reader, error)
}{
{
extenstion: ".bz2",
transformation: func(r io.Reader) (io.Reader, error) { return bzip2.NewReader(r), nil },
},
{
extenstion: ".gz",
transformation: func(r io.Reader) (io.Reader, error) { return gzip.NewReader(r) },
},
{
extenstion: ".xz",
transformation: func(r io.Reader) (io.Reader, error) { return xz.NewReader(r) },
},
{
extenstion: "",
transformation: func(r io.Reader) (io.Reader, error) { return r, nil },
},
}
// DownloadTryCompression tries to download from URL .bz2, .gz and raw extension until
// it finds existing file.
func DownloadTryCompression(downloader aptly.Downloader, url string, expectedChecksums map[string]utils.ChecksumInfo, ignoreMismatch bool, maxTries int) (io.Reader, *os.File, error) {
var err error
for _, method := range compressionMethods {
var file *os.File
tryURL := url + method.extenstion
foundChecksum := false
for suffix, expected := range expectedChecksums {
if strings.HasSuffix(tryURL, suffix) {
file, err = DownloadTempWithChecksum(downloader, tryURL, expected, ignoreMismatch, maxTries)
foundChecksum = true
break
}
}
if !foundChecksum {
if !ignoreMismatch {
continue
}
file, err = DownloadTemp(downloader, tryURL)
}
if err != nil {
if err1, ok := err.(*Error); ok && (err1.Code == 404 || err1.Code == 403) {
continue
}
return nil, nil, err
}
var uncompressed io.Reader
uncompressed, err = method.transformation(file)
if err != nil {
return nil, nil, err
}
return uncompressed, file, err
}
if err == nil {
err = fmt.Errorf("no candidates for %s found", url)
}
return nil, nil, err
}

View File

@@ -1,15 +1,11 @@
package http
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"runtime"
"time"
"github.com/smira/aptly/aptly"
"github.com/smira/aptly/console"
@@ -18,17 +14,16 @@ import (
. "gopkg.in/check.v1"
)
type DownloaderSuite struct {
type DownloaderSuiteBase struct {
tempfile *os.File
l net.Listener
url string
ch chan bool
ch chan struct{}
progress aptly.Progress
d aptly.Downloader
}
var _ = Suite(&DownloaderSuite{})
func (s *DownloaderSuite) SetUpTest(c *C) {
func (s *DownloaderSuiteBase) SetUpTest(c *C) {
s.tempfile, _ = ioutil.TempFile(os.TempDir(), "aptly-test")
s.l, _ = net.ListenTCP("tcp4", &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1)})
s.url = fmt.Sprintf("http://localhost:%d", s.l.Addr().(*net.TCPAddr).Port)
@@ -38,18 +33,20 @@ func (s *DownloaderSuite) SetUpTest(c *C) {
fmt.Fprintf(w, "Hello, %s", r.URL.Path)
})
s.ch = make(chan bool)
s.ch = make(chan struct{})
go func() {
http.Serve(s.l, mux)
s.ch <- true
close(s.ch)
}()
s.progress = console.NewProgress()
s.progress.Start()
s.d = NewDownloader(0, s.progress)
}
func (s *DownloaderSuite) TearDownTest(c *C) {
func (s *DownloaderSuiteBase) TearDownTest(c *C) {
s.progress.Shutdown()
s.l.Close()
@@ -59,249 +56,67 @@ func (s *DownloaderSuite) TearDownTest(c *C) {
s.tempfile.Close()
}
func (s *DownloaderSuite) TestStartupShutdown(c *C) {
goroutines := runtime.NumGoroutine()
d := NewDownloader(10, 100, s.progress)
d.Shutdown()
// wait for goroutines to shutdown
time.Sleep(100 * time.Millisecond)
if runtime.NumGoroutine()-goroutines > 1 {
c.Errorf("Number of goroutines %d, expected %d", runtime.NumGoroutine(), goroutines)
}
type DownloaderSuite struct {
DownloaderSuiteBase
}
func (s *DownloaderSuite) TestPauseResume(c *C) {
d := NewDownloader(2, 0, s.progress)
defer d.Shutdown()
var _ = Suite(&DownloaderSuite{})
d.Pause()
d.Resume()
func (s *DownloaderSuite) SetUpTest(c *C) {
s.DownloaderSuiteBase.SetUpTest(c)
}
func (s *DownloaderSuite) TearDownTest(c *C) {
s.DownloaderSuiteBase.TearDownTest(c)
}
func (s *DownloaderSuite) TestDownloadOK(c *C) {
d := NewDownloader(2, 0, s.progress)
defer d.Shutdown()
ch := make(chan error)
d.Download(s.url+"/test", s.tempfile.Name(), ch)
res := <-ch
c.Assert(res, IsNil)
c.Assert(s.d.Download(s.url+"/test", s.tempfile.Name()), IsNil)
}
func (s *DownloaderSuite) TestDownloadWithChecksum(c *C) {
d := NewDownloader(2, 0, s.progress)
defer d.Shutdown()
ch := make(chan error)
c.Assert(s.d.DownloadWithChecksum(s.url+"/test", s.tempfile.Name(), &utils.ChecksumInfo{}, false, 1),
ErrorMatches, ".*size check mismatch 12 != 0")
d.DownloadWithChecksum(s.url+"/test", s.tempfile.Name(), ch, utils.ChecksumInfo{}, false, 1)
res := <-ch
c.Assert(res, ErrorMatches, ".*size check mismatch 12 != 0")
c.Assert(s.d.DownloadWithChecksum(s.url+"/test", s.tempfile.Name(), &utils.ChecksumInfo{Size: 12, MD5: "abcdef"}, false, 1),
ErrorMatches, ".*md5 hash mismatch \"a1acb0fe91c7db45ec4d775192ec5738\" != \"abcdef\"")
d.DownloadWithChecksum(s.url+"/test", s.tempfile.Name(), ch, utils.ChecksumInfo{Size: 12, MD5: "abcdef"}, false, 1)
res = <-ch
c.Assert(res, ErrorMatches, ".*md5 hash mismatch \"a1acb0fe91c7db45ec4d775192ec5738\" != \"abcdef\"")
c.Assert(s.d.DownloadWithChecksum(s.url+"/test", s.tempfile.Name(), &utils.ChecksumInfo{Size: 12, MD5: "abcdef"}, true, 1),
IsNil)
d.DownloadWithChecksum(s.url+"/test", s.tempfile.Name(), ch, utils.ChecksumInfo{Size: 12, MD5: "abcdef"}, true, 1)
res = <-ch
c.Assert(res, IsNil)
c.Assert(s.d.DownloadWithChecksum(s.url+"/test", s.tempfile.Name(), &utils.ChecksumInfo{Size: 12, MD5: "a1acb0fe91c7db45ec4d775192ec5738"}, false, 1),
IsNil)
d.DownloadWithChecksum(s.url+"/test", s.tempfile.Name(), ch, utils.ChecksumInfo{Size: 12, MD5: "a1acb0fe91c7db45ec4d775192ec5738"}, false, 1)
res = <-ch
c.Assert(res, IsNil)
c.Assert(s.d.DownloadWithChecksum(s.url+"/test", s.tempfile.Name(), &utils.ChecksumInfo{Size: 12, MD5: "a1acb0fe91c7db45ec4d775192ec5738", SHA1: "abcdef"}, false, 1),
ErrorMatches, ".*sha1 hash mismatch \"921893bae6ad6fd818401875d6779254ef0ff0ec\" != \"abcdef\"")
d.DownloadWithChecksum(s.url+"/test", s.tempfile.Name(), ch, utils.ChecksumInfo{Size: 12, MD5: "a1acb0fe91c7db45ec4d775192ec5738", SHA1: "abcdef"}, false, 1)
res = <-ch
c.Assert(res, ErrorMatches, ".*sha1 hash mismatch \"921893bae6ad6fd818401875d6779254ef0ff0ec\" != \"abcdef\"")
c.Assert(s.d.DownloadWithChecksum(s.url+"/test", s.tempfile.Name(), &utils.ChecksumInfo{Size: 12, MD5: "a1acb0fe91c7db45ec4d775192ec5738",
SHA1: "921893bae6ad6fd818401875d6779254ef0ff0ec"}, false, 1),
IsNil)
d.DownloadWithChecksum(s.url+"/test", s.tempfile.Name(), ch, utils.ChecksumInfo{Size: 12, MD5: "a1acb0fe91c7db45ec4d775192ec5738",
SHA1: "921893bae6ad6fd818401875d6779254ef0ff0ec"}, false, 1)
res = <-ch
c.Assert(res, IsNil)
c.Assert(s.d.DownloadWithChecksum(s.url+"/test", s.tempfile.Name(), &utils.ChecksumInfo{Size: 12, MD5: "a1acb0fe91c7db45ec4d775192ec5738",
SHA1: "921893bae6ad6fd818401875d6779254ef0ff0ec", SHA256: "abcdef"}, false, 1),
ErrorMatches, ".*sha256 hash mismatch \"b3c92ee1246176ed35f6e8463cd49074f29442f5bbffc3f8591cde1dcc849dac\" != \"abcdef\"")
d.DownloadWithChecksum(s.url+"/test", s.tempfile.Name(), ch, utils.ChecksumInfo{Size: 12, MD5: "a1acb0fe91c7db45ec4d775192ec5738",
SHA1: "921893bae6ad6fd818401875d6779254ef0ff0ec", SHA256: "abcdef"}, false, 1)
res = <-ch
c.Assert(res, ErrorMatches, ".*sha256 hash mismatch \"b3c92ee1246176ed35f6e8463cd49074f29442f5bbffc3f8591cde1dcc849dac\" != \"abcdef\"")
d.DownloadWithChecksum(s.url+"/test", s.tempfile.Name(), ch, utils.ChecksumInfo{Size: 12, MD5: "a1acb0fe91c7db45ec4d775192ec5738",
SHA1: "921893bae6ad6fd818401875d6779254ef0ff0ec", SHA256: "b3c92ee1246176ed35f6e8463cd49074f29442f5bbffc3f8591cde1dcc849dac"}, false, 1)
res = <-ch
c.Assert(res, IsNil)
checksums := utils.ChecksumInfo{Size: 12, MD5: "a1acb0fe91c7db45ec4d775192ec5738",
SHA1: "921893bae6ad6fd818401875d6779254ef0ff0ec", SHA256: "b3c92ee1246176ed35f6e8463cd49074f29442f5bbffc3f8591cde1dcc849dac"}
c.Assert(s.d.DownloadWithChecksum(s.url+"/test", s.tempfile.Name(), &checksums, false, 1),
IsNil)
// download backfills missing checksums
c.Check(checksums.SHA512, Equals, "bac18bf4e564856369acc2ed57300fecba3a2c1af5ae8304021e4252488678feb18118466382ee4e1210fe1f065080210e453a80cfb37ccb8752af3269df160e")
}
func (s *DownloaderSuite) TestDownload404(c *C) {
d := NewDownloader(2, 0, s.progress)
defer d.Shutdown()
ch := make(chan error)
d.Download(s.url+"/doesntexist", s.tempfile.Name(), ch)
res := <-ch
c.Assert(res, ErrorMatches, "HTTP code 404.*")
c.Assert(s.d.Download(s.url+"/doesntexist", s.tempfile.Name()),
ErrorMatches, "HTTP code 404.*")
}
func (s *DownloaderSuite) TestDownloadConnectError(c *C) {
d := NewDownloader(2, 0, s.progress)
defer d.Shutdown()
ch := make(chan error)
d.Download("http://nosuch.localhost/", s.tempfile.Name(), ch)
res := <-ch
c.Assert(res, ErrorMatches, ".*no such host")
c.Assert(s.d.Download("http://nosuch.localhost/", s.tempfile.Name()),
ErrorMatches, ".*no such host")
}
func (s *DownloaderSuite) TestDownloadFileError(c *C) {
d := NewDownloader(2, 0, s.progress)
defer d.Shutdown()
ch := make(chan error)
d.Download(s.url+"/test", "/", ch)
res := <-ch
c.Assert(res, ErrorMatches, ".*permission denied")
}
func (s *DownloaderSuite) TestDownloadTemp(c *C) {
d := NewDownloader(2, 0, s.progress)
defer d.Shutdown()
f, err := DownloadTemp(d, s.url+"/test")
c.Assert(err, IsNil)
defer f.Close()
buf := make([]byte, 1)
f.Read(buf)
c.Assert(buf, DeepEquals, []byte("H"))
_, err = os.Stat(f.Name())
c.Assert(os.IsNotExist(err), Equals, true)
}
func (s *DownloaderSuite) TestDownloadTempWithChecksum(c *C) {
d := NewDownloader(2, 0, s.progress)
defer d.Shutdown()
f, err := DownloadTempWithChecksum(d, s.url+"/test", utils.ChecksumInfo{Size: 12, MD5: "a1acb0fe91c7db45ec4d775192ec5738",
SHA1: "921893bae6ad6fd818401875d6779254ef0ff0ec", SHA256: "b3c92ee1246176ed35f6e8463cd49074f29442f5bbffc3f8591cde1dcc849dac"}, false, 1)
defer f.Close()
c.Assert(err, IsNil)
_, err = DownloadTempWithChecksum(d, s.url+"/test", utils.ChecksumInfo{Size: 13}, false, 1)
c.Assert(err, ErrorMatches, ".*size check mismatch 12 != 13")
}
func (s *DownloaderSuite) TestDownloadTempError(c *C) {
d := NewDownloader(2, 0, s.progress)
defer d.Shutdown()
f, err := DownloadTemp(d, s.url+"/doesntexist")
c.Assert(err, NotNil)
c.Assert(f, IsNil)
c.Assert(err, ErrorMatches, "HTTP code 404.*")
}
const (
bzipData = "BZh91AY&SY\xcc\xc3q\xd4\x00\x00\x02A\x80\x00\x10\x02\x00\x0c\x00 \x00!\x9ah3M\x19\x97\x8b\xb9\"\x9c(Hfa\xb8\xea\x00"
gzipData = "\x1f\x8b\x08\x00\xc8j\xb0R\x00\x03+I-.\xe1\x02\x00\xc65\xb9;\x05\x00\x00\x00"
xzData = "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x04\x74\x65\x73\x74\x0a\x00\x00\x00\x00\x9d\xed\x31\x1d\x0f\x9f\xd7\xe6\x00\x01\x1d\x05\xb8\x2d\x80\xaf\x1f\xb6\xf3\x7d\x01\x00\x00\x00\x00\x04\x59\x5a"
rawData = "test"
)
func (s *DownloaderSuite) TestDownloadTryCompression(c *C) {
var buf []byte
expectedChecksums := map[string]utils.ChecksumInfo{
"file.bz2": {Size: int64(len(bzipData))},
"file.gz": {Size: int64(len(gzipData))},
"file.xz": {Size: int64(len(xzData))},
"file": {Size: int64(len(rawData))},
}
// bzip2 only available
buf = make([]byte, 4)
d := NewFakeDownloader()
d.ExpectResponse("http://example.com/file.bz2", bzipData)
r, file, err := DownloadTryCompression(d, "http://example.com/file", expectedChecksums, false, 1)
c.Assert(err, IsNil)
defer file.Close()
io.ReadFull(r, buf)
c.Assert(string(buf), Equals, rawData)
c.Assert(d.Empty(), Equals, true)
// bzip2 not available, but gz is
buf = make([]byte, 4)
d = NewFakeDownloader()
d.ExpectError("http://example.com/file.bz2", &Error{Code: 404})
d.ExpectResponse("http://example.com/file.gz", gzipData)
r, file, err = DownloadTryCompression(d, "http://example.com/file", expectedChecksums, false, 1)
c.Assert(err, IsNil)
defer file.Close()
io.ReadFull(r, buf)
c.Assert(string(buf), Equals, rawData)
c.Assert(d.Empty(), Equals, true)
// bzip2 & gzip not available, but xz is
buf = make([]byte, 4)
d = NewFakeDownloader()
d.ExpectError("http://example.com/file.bz2", &Error{Code: 404})
d.ExpectError("http://example.com/file.gz", &Error{Code: 404})
d.ExpectResponse("http://example.com/file.xz", xzData)
r, file, err = DownloadTryCompression(d, "http://example.com/file", expectedChecksums, false, 1)
c.Assert(err, IsNil)
defer file.Close()
io.ReadFull(r, buf)
c.Assert(string(buf), Equals, rawData)
c.Assert(d.Empty(), Equals, true)
// bzip2, gzip & xz not available, but raw is
buf = make([]byte, 4)
d = NewFakeDownloader()
d.ExpectError("http://example.com/file.bz2", &Error{Code: 404})
d.ExpectError("http://example.com/file.gz", &Error{Code: 404})
d.ExpectError("http://example.com/file.xz", &Error{Code: 404})
d.ExpectResponse("http://example.com/file", rawData)
r, file, err = DownloadTryCompression(d, "http://example.com/file", expectedChecksums, false, 1)
c.Assert(err, IsNil)
defer file.Close()
io.ReadFull(r, buf)
c.Assert(string(buf), Equals, rawData)
c.Assert(d.Empty(), Equals, true)
// gzip available, but broken
d = NewFakeDownloader()
d.ExpectError("http://example.com/file.bz2", &Error{Code: 404})
d.ExpectResponse("http://example.com/file.gz", "x")
_, file, err = DownloadTryCompression(d, "http://example.com/file", nil, true, 1)
c.Assert(err, ErrorMatches, "unexpected EOF")
c.Assert(d.Empty(), Equals, true)
}
func (s *DownloaderSuite) TestDownloadTryCompressionErrors(c *C) {
d := NewFakeDownloader()
_, _, err := DownloadTryCompression(d, "http://example.com/file", nil, true, 1)
c.Assert(err, ErrorMatches, "unexpected request.*")
d = NewFakeDownloader()
d.ExpectError("http://example.com/file.bz2", &Error{Code: 404})
d.ExpectError("http://example.com/file.gz", &Error{Code: 404})
d.ExpectError("http://example.com/file.xz", &Error{Code: 404})
d.ExpectError("http://example.com/file", errors.New("403"))
_, _, err = DownloadTryCompression(d, "http://example.com/file", nil, true, 1)
c.Assert(err, ErrorMatches, "403")
d = NewFakeDownloader()
d.ExpectError("http://example.com/file.bz2", &Error{Code: 404})
d.ExpectError("http://example.com/file.gz", &Error{Code: 404})
d.ExpectError("http://example.com/file.xz", &Error{Code: 404})
d.ExpectResponse("http://example.com/file", rawData)
expectedChecksums := map[string]utils.ChecksumInfo{
"file.bz2": {Size: 7},
"file.gz": {Size: 7},
"file.xz": {Size: 7},
"file": {Size: 7},
}
_, _, err = DownloadTryCompression(d, "http://example.com/file", expectedChecksums, false, 1)
c.Assert(err, ErrorMatches, "checksums don't match.*")
c.Assert(s.d.Download(s.url+"/test", "/"),
ErrorMatches, ".*permission denied")
}

View File

@@ -60,7 +60,7 @@ func (f *FakeDownloader) Empty() bool {
}
// DownloadWithChecksum performs fake download by matching against first expectation in the queue or any expectation, with cheksum verification
func (f *FakeDownloader) DownloadWithChecksum(url string, filename string, result chan<- error, expected utils.ChecksumInfo, ignoreMismatch bool, maxTries int) {
func (f *FakeDownloader) DownloadWithChecksum(url string, filename string, expected *utils.ChecksumInfo, ignoreMismatch bool, maxTries int) error {
var expectation expectedRequest
if len(f.expected) > 0 && f.expected[0].URL == url {
expectation, f.expected = f.expected[0], f.expected[1:]
@@ -68,25 +68,21 @@ func (f *FakeDownloader) DownloadWithChecksum(url string, filename string, resul
expectation = f.anyExpected[url]
delete(f.anyExpected, url)
} else {
result <- fmt.Errorf("unexpected request for %s", url)
return
return fmt.Errorf("unexpected request for %s", url)
}
if expectation.Err != nil {
result <- expectation.Err
return
return expectation.Err
}
err := os.MkdirAll(filepath.Dir(filename), 0755)
if err != nil {
result <- err
return
return err
}
outfile, err := os.Create(filename)
if err != nil {
result <- err
return
return err
}
defer outfile.Close()
@@ -95,45 +91,26 @@ func (f *FakeDownloader) DownloadWithChecksum(url string, filename string, resul
_, err = w.Write([]byte(expectation.Response))
if err != nil {
result <- err
return
return err
}
if expected.Size != -1 {
if expected != nil {
if expected.Size != cks.Sum().Size || expected.MD5 != "" && expected.MD5 != cks.Sum().MD5 ||
expected.SHA1 != "" && expected.SHA1 != cks.Sum().SHA1 || expected.SHA256 != "" && expected.SHA256 != cks.Sum().SHA256 {
if ignoreMismatch {
fmt.Printf("WARNING: checksums don't match: %#v != %#v for %s\n", expected, cks.Sum(), url)
} else {
result <- fmt.Errorf("checksums don't match: %#v != %#v for %s", expected, cks.Sum(), url)
return
return fmt.Errorf("checksums don't match: %#v != %#v for %s", expected, cks.Sum(), url)
}
}
}
result <- nil
return
return nil
}
// Download performs fake download by matching against first expectation in the queue
func (f *FakeDownloader) Download(url string, filename string, result chan<- error) {
f.DownloadWithChecksum(url, filename, result, utils.ChecksumInfo{Size: -1}, false, 1)
}
// Shutdown does nothing
func (f *FakeDownloader) Shutdown() {
}
// Abort does nothing
func (f *FakeDownloader) Abort() {
}
// Pause does nothing
func (f *FakeDownloader) Pause() {
}
// Resume does nothing
func (f *FakeDownloader) Resume() {
func (f *FakeDownloader) Download(url string, filename string) error {
return f.DownloadWithChecksum(url, filename, nil, false, 1)
}
// GetProgress returns Progress object

View File

@@ -1,2 +1,17 @@
// Package http provides all HTTP (and FTP)-related operations
package http
import (
"fmt"
)
// Error is download error connected to HTTP code
type Error struct {
Code int
URL string
}
// Error
func (e *Error) Error() string {
return fmt.Sprintf("HTTP code %d while fetching %s", e.Code, e.URL)
}

47
http/temp.go Normal file
View File

@@ -0,0 +1,47 @@
package http
import (
"io/ioutil"
"os"
"path/filepath"
"github.com/smira/aptly/aptly"
"github.com/smira/aptly/utils"
)
// DownloadTemp starts new download to temporary file and returns File
//
// Temporary file would be already removed, so no need to cleanup
func DownloadTemp(downloader aptly.Downloader, url string) (*os.File, error) {
return DownloadTempWithChecksum(downloader, url, nil, false, 1)
}
// DownloadTempWithChecksum is a DownloadTemp with checksum verification
//
// Temporary file would be already removed, so no need to cleanup
func DownloadTempWithChecksum(downloader aptly.Downloader, url string, expected *utils.ChecksumInfo, ignoreMismatch bool, maxTries int) (*os.File, error) {
tempdir, err := ioutil.TempDir(os.TempDir(), "aptly")
if err != nil {
return nil, err
}
defer os.RemoveAll(tempdir)
tempfile := filepath.Join(tempdir, "buffer")
if expected != nil && downloader.GetProgress() != nil {
downloader.GetProgress().InitBar(expected.Size, true)
defer downloader.GetProgress().ShutdownBar()
}
err = downloader.DownloadWithChecksum(url, tempfile, expected, ignoreMismatch, maxTries)
if err != nil {
return nil, err
}
file, err := os.Open(tempfile)
if err != nil {
return nil, err
}
return file, nil
}

54
http/temp_test.go Normal file
View File

@@ -0,0 +1,54 @@
package http
import (
"os"
"github.com/smira/aptly/utils"
. "gopkg.in/check.v1"
)
type TempSuite struct {
DownloaderSuiteBase
}
var _ = Suite(&TempSuite{})
func (s *TempSuite) SetUpTest(c *C) {
s.DownloaderSuiteBase.SetUpTest(c)
}
func (s *TempSuite) TearDownTest(c *C) {
s.DownloaderSuiteBase.TearDownTest(c)
}
func (s *TempSuite) TestDownloadTemp(c *C) {
f, err := DownloadTemp(s.d, s.url+"/test")
c.Assert(err, IsNil)
defer f.Close()
buf := make([]byte, 1)
f.Read(buf)
c.Assert(buf, DeepEquals, []byte("H"))
_, err = os.Stat(f.Name())
c.Assert(os.IsNotExist(err), Equals, true)
}
func (s *TempSuite) TestDownloadTempWithChecksum(c *C) {
f, err := DownloadTempWithChecksum(s.d, s.url+"/test", &utils.ChecksumInfo{Size: 12, MD5: "a1acb0fe91c7db45ec4d775192ec5738",
SHA1: "921893bae6ad6fd818401875d6779254ef0ff0ec", SHA256: "b3c92ee1246176ed35f6e8463cd49074f29442f5bbffc3f8591cde1dcc849dac"}, false, 1)
defer f.Close()
c.Assert(err, IsNil)
_, err = DownloadTempWithChecksum(s.d, s.url+"/test", &utils.ChecksumInfo{Size: 13}, false, 1)
c.Assert(err, ErrorMatches, ".*size check mismatch 12 != 13")
}
func (s *TempSuite) TestDownloadTempError(c *C) {
f, err := DownloadTemp(s.d, s.url+"/doesntexist")
c.Assert(err, NotNil)
c.Assert(f, IsNil)
c.Assert(err, ErrorMatches, "HTTP code 404.*")
}

View File

@@ -47,6 +47,7 @@ Configuration file is stored in JSON format (default values shown below):
"gpgDisableSign": false,
"gpgDisableVerify": false,
"downloadSourcePackages": false,
"skipLegacyPool": true,
"ppaDistributorID": "ubuntu",
"ppaCodename": "",
"skipContentsPublishing": false,
@@ -151,6 +152,10 @@ don\(cqt verify remote mirrors with gpg(1), also can be disabled on per\-mirror
if enabled, all mirrors created would have flag set to download source packages; this setting could be controlled on per\-mirror basis with \fB\-with\-sources\fR flag
.
.TP
\fBskipLegacyPool\fR
in aptly up to version 1\.0\.0, package files were stored in internal package pool with MD5\-dervied path, since 1\.1\.0 package pool layout was changed; if option is enabled, aptly stops checking for legacy paths; by default option is enabled for new aptly installations and disabled when upgrading from older versions
.
.TP
\fBppaDistributorID\fR, \fBppaCodename\fR
specifies paramaters for short PPA url expansion, if left blank they default to output of \fBlsb_release\fR command
.
@@ -1943,5 +1948,8 @@ Charles Hsu (https://github\.com/charz)
.IP "\[ci]" 4
Clemens Rabe (https://github\.com/seeraven)
.
.IP "\[ci]" 4
TJ Merritt (https://github\.com/tjmerritt)
.
.IP "" 0

View File

@@ -39,6 +39,7 @@ Configuration file is stored in JSON format (default values shown below):
"gpgDisableSign": false,
"gpgDisableVerify": false,
"downloadSourcePackages": false,
"skipLegacyPool": true,
"ppaDistributorID": "ubuntu",
"ppaCodename": "",
"skipContentsPublishing": false,
@@ -130,6 +131,13 @@ Options:
if enabled, all mirrors created would have flag set to download source packages;
this setting could be controlled on per-mirror basis with `-with-sources` flag
* `skipLegacyPool`:
in aptly up to version 1.0.0, package files were stored in internal package pool
with MD5-dervied path, since 1.1.0 package pool layout was changed;
if option is enabled, aptly stops checking for legacy paths;
by default option is enabled for new aptly installations and disabled when
upgrading from older versions
* `ppaDistributorID`, `ppaCodename`:
specifies paramaters for short PPA url expansion, if left blank they default
to output of `lsb_release` command

View File

@@ -2,6 +2,7 @@ package s3
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
@@ -12,8 +13,8 @@ import (
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/pkg/errors"
"github.com/smira/aptly/aptly"
"github.com/smira/aptly/files"
"github.com/smira/aptly/utils"
"github.com/smira/go-aws-auth"
)
@@ -135,6 +136,17 @@ func (storage *PublishedStorage) PutFile(path string, sourceFilename string) err
}
defer source.Close()
err = storage.putFile(path, source)
if err != nil {
err = errors.Wrap(err, fmt.Sprintf("error uploading %s to %s", sourceFilename, storage))
}
return err
}
// putFile uploads file-like object to
func (storage *PublishedStorage) putFile(path string, source io.ReadSeeker) error {
params := &s3.PutObjectInput{
Bucket: aws.String(storage.bucket),
Key: aws.String(filepath.Join(storage.prefix, path)),
@@ -148,13 +160,18 @@ func (storage *PublishedStorage) PutFile(path string, sourceFilename string) err
params.ServerSideEncryption = aws.String(storage.encryptionMethod)
}
_, err = storage.s3.PutObject(params)
_, err := storage.s3.PutObject(params)
if err != nil {
return fmt.Errorf("error uploading %s to %s: %s", sourceFilename, storage, err)
return err
}
if storage.plusWorkaround && strings.Index(path, "+") != -1 {
return storage.PutFile(strings.Replace(path, "+", " ", -1), sourceFilename)
_, err = source.Seek(0, 0)
if err != nil {
return err
}
return storage.putFile(strings.Replace(path, "+", " ", -1), source)
}
return nil
}
@@ -167,7 +184,7 @@ func (storage *PublishedStorage) Remove(path string) error {
}
_, err := storage.s3.DeleteObject(params)
if err != nil {
return fmt.Errorf("error deleting %s from %s: %s", path, storage, err)
return errors.Wrap(err, fmt.Sprintf("error deleting %s from %s", path, storage))
}
if storage.plusWorkaround && strings.Index(path, "+") != -1 {
@@ -240,23 +257,16 @@ func (storage *PublishedStorage) RemoveDirs(path string, progress aptly.Progress
// sourcePath is filepath to package file in package pool
//
// LinkFromPool returns relative path for the published file to be included in package index
func (storage *PublishedStorage) LinkFromPool(publishedDirectory string, sourcePool aptly.PackagePool,
func (storage *PublishedStorage) LinkFromPool(publishedDirectory, baseName string, sourcePool aptly.PackagePool,
sourcePath string, sourceChecksums utils.ChecksumInfo, force bool) error {
// verify that package pool is local pool in filesystem
_ = sourcePool.(*files.PackagePool)
baseName := filepath.Base(sourcePath)
relPath := filepath.Join(publishedDirectory, baseName)
poolPath := filepath.Join(storage.prefix, relPath)
var (
err error
)
if storage.pathCache == nil {
paths, md5s, err := storage.internalFilelist(storage.prefix, true)
if err != nil {
return fmt.Errorf("error caching paths under prefix: %s", err)
return errors.Wrap(err, "error caching paths under prefix")
}
storage.pathCache = make(map[string]string, len(paths))
@@ -284,9 +294,17 @@ func (storage *PublishedStorage) LinkFromPool(publishedDirectory string, sourceP
}
}
err = storage.PutFile(relPath, sourcePath)
source, err := sourcePool.Open(sourcePath)
if err != nil {
return err
}
defer source.Close()
err = storage.putFile(relPath, source)
if err == nil {
storage.pathCache[relPath] = sourceMD5
} else {
err = errors.Wrap(err, fmt.Sprintf("error uploading %s to %s: %s", sourcePath, storage, poolPath))
}
return err

View File

@@ -3,7 +3,6 @@ package s3
import (
"bytes"
"io/ioutil"
"os"
"path/filepath"
. "gopkg.in/check.v1"
@@ -217,42 +216,44 @@ func (s *PublishedStorageSuite) TestRenameFile(c *C) {
func (s *PublishedStorageSuite) TestLinkFromPool(c *C) {
root := c.MkDir()
pool := files.NewPackagePool(root)
pool := files.NewPackagePool(root, false)
cs := files.NewMockChecksumStorage()
sourcePath := filepath.Join(root, "pool/c1/df/mars-invaders_1.03.deb")
err := os.MkdirAll(filepath.Dir(sourcePath), 0755)
tmpFile1 := filepath.Join(c.MkDir(), "mars-invaders_1.03.deb")
err := ioutil.WriteFile(tmpFile1, []byte("Contents"), 0644)
c.Assert(err, IsNil)
cksum1 := utils.ChecksumInfo{MD5: "c1df1da7a1ce305a3b60af9d5733ac1d"}
err = ioutil.WriteFile(sourcePath, []byte("Contents"), 0644)
tmpFile2 := filepath.Join(c.MkDir(), "mars-invaders_1.03.deb")
err = ioutil.WriteFile(tmpFile2, []byte("Spam"), 0644)
c.Assert(err, IsNil)
cksum2 := utils.ChecksumInfo{MD5: "e9dfd31cc505d51fc26975250750deab"}
sourcePath2 := filepath.Join(root, "pool/e9/df/mars-invaders_1.03.deb")
err = os.MkdirAll(filepath.Dir(sourcePath2), 0755)
src1, err := pool.Import(tmpFile1, "mars-invaders_1.03.deb", &cksum1, true, cs)
c.Assert(err, IsNil)
err = ioutil.WriteFile(sourcePath2, []byte("Spam"), 0644)
src2, err := pool.Import(tmpFile2, "mars-invaders_1.03.deb", &cksum2, true, cs)
c.Assert(err, IsNil)
// first link from pool
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath, utils.ChecksumInfo{MD5: "c1df1da7a1ce305a3b60af9d5733ac1d"}, false)
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, src1, cksum1, false)
c.Check(err, IsNil)
c.Check(s.GetFile(c, "pool/main/m/mars-invaders/mars-invaders_1.03.deb"), DeepEquals, []byte("Contents"))
// duplicate link from pool
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath, utils.ChecksumInfo{MD5: "c1df1da7a1ce305a3b60af9d5733ac1d"}, false)
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, src1, cksum1, false)
c.Check(err, IsNil)
c.Check(s.GetFile(c, "pool/main/m/mars-invaders/mars-invaders_1.03.deb"), DeepEquals, []byte("Contents"))
// link from pool with conflict
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath2, utils.ChecksumInfo{MD5: "e9dfd31cc505d51fc26975250750deab"}, false)
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, src2, cksum2, false)
c.Check(err, ErrorMatches, ".*file already exists and is different.*")
c.Check(s.GetFile(c, "pool/main/m/mars-invaders/mars-invaders_1.03.deb"), DeepEquals, []byte("Contents"))
// link from pool with conflict and force
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath2, utils.ChecksumInfo{MD5: "e9dfd31cc505d51fc26975250750deab"}, true)
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, src2, cksum2, true)
c.Check(err, IsNil)
c.Check(s.GetFile(c, "pool/main/m/mars-invaders/mars-invaders_1.03.deb"), DeepEquals, []byte("Spam"))

View File

@@ -3,14 +3,15 @@ package swift
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"time"
"github.com/ncw/swift"
"github.com/pkg/errors"
"github.com/smira/aptly/aptly"
"github.com/smira/aptly/files"
"github.com/smira/aptly/utils"
)
@@ -129,12 +130,18 @@ func (storage *PublishedStorage) PutFile(path string, sourceFilename string) err
}
defer source.Close()
_, err = storage.conn.ObjectPut(storage.container, filepath.Join(storage.prefix, path), source, false, "", "", nil)
err = storage.putFile(path, source)
if err != nil {
return fmt.Errorf("error uploading %s to %s: %s", sourceFilename, storage, err)
err = errors.Wrap(err, fmt.Sprintf("error uploading %s to %s", sourceFilename, storage))
}
return nil
return err
}
func (storage *PublishedStorage) putFile(path string, source io.Reader) error {
_, err := storage.conn.ObjectPut(storage.container, filepath.Join(storage.prefix, path), source, false, "", "", nil)
return err
}
// Remove removes single file under public path
@@ -186,12 +193,9 @@ func (storage *PublishedStorage) RemoveDirs(path string, progress aptly.Progress
// sourcePath is filepath to package file in package pool
//
// LinkFromPool returns relative path for the published file to be included in package index
func (storage *PublishedStorage) LinkFromPool(publishedDirectory string, sourcePool aptly.PackagePool,
func (storage *PublishedStorage) LinkFromPool(publishedDirectory, baseName string, sourcePool aptly.PackagePool,
sourcePath string, sourceChecksums utils.ChecksumInfo, force bool) error {
// verify that package pool is local pool in filesystem
_ = sourcePool.(*files.PackagePool)
baseName := filepath.Base(sourcePath)
relPath := filepath.Join(publishedDirectory, baseName)
poolPath := filepath.Join(storage.prefix, relPath)
@@ -216,7 +220,18 @@ func (storage *PublishedStorage) LinkFromPool(publishedDirectory string, sourceP
}
}
return storage.PutFile(relPath, sourcePath)
source, err := sourcePool.Open(sourcePath)
if err != nil {
return err
}
defer source.Close()
err = storage.putFile(relPath, source)
if err != nil {
err = errors.Wrap(err, fmt.Sprintf("error uploading %s to %s: %s", sourcePath, storage, poolPath))
}
return err
}
// Filelist returns list of files under prefix

View File

@@ -4,7 +4,6 @@ import (
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"time"
@@ -145,24 +144,26 @@ func (s *PublishedStorageSuite) TestRenameFile(c *C) {
func (s *PublishedStorageSuite) TestLinkFromPool(c *C) {
root := c.MkDir()
pool := files.NewPackagePool(root)
pool := files.NewPackagePool(root, false)
cs := files.NewMockChecksumStorage()
sourcePath := filepath.Join(root, "pool/c1/df/mars-invaders_1.03.deb")
err := os.MkdirAll(filepath.Dir(sourcePath), 0755)
tmpFile1 := filepath.Join(c.MkDir(), "mars-invaders_1.03.deb")
err := ioutil.WriteFile(tmpFile1, []byte("Contents"), 0644)
c.Assert(err, IsNil)
cksum1 := utils.ChecksumInfo{MD5: "c1df1da7a1ce305a3b60af9d5733ac1d"}
err = ioutil.WriteFile(sourcePath, []byte("Contents"), 0644)
tmpFile2 := filepath.Join(c.MkDir(), "mars-invaders_1.03.deb")
err = ioutil.WriteFile(tmpFile2, []byte("Spam"), 0644)
c.Assert(err, IsNil)
cksum2 := utils.ChecksumInfo{MD5: "e9dfd31cc505d51fc26975250750deab"}
sourcePath2 := filepath.Join(root, "pool/e9/df/mars-invaders_1.03.deb")
err = os.MkdirAll(filepath.Dir(sourcePath2), 0755)
src1, err := pool.Import(tmpFile1, "mars-invaders_1.03.deb", &cksum1, true, cs)
c.Assert(err, IsNil)
err = ioutil.WriteFile(sourcePath2, []byte("Spam"), 0644)
src2, err := pool.Import(tmpFile2, "mars-invaders_1.03.deb", &cksum2, true, cs)
c.Assert(err, IsNil)
// first link from pool
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath, utils.ChecksumInfo{MD5: "c1df1da7a1ce305a3b60af9d5733ac1d"}, false)
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, src1, cksum1, false)
c.Check(err, IsNil)
data, err := s.storage.conn.ObjectGetBytes("test", "pool/main/m/mars-invaders/mars-invaders_1.03.deb")
@@ -170,7 +171,7 @@ func (s *PublishedStorageSuite) TestLinkFromPool(c *C) {
c.Check(data, DeepEquals, []byte("Contents"))
// duplicate link from pool
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath, utils.ChecksumInfo{MD5: "c1df1da7a1ce305a3b60af9d5733ac1d"}, false)
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, src1, cksum1, false)
c.Check(err, IsNil)
data, err = s.storage.conn.ObjectGetBytes("test", "pool/main/m/mars-invaders/mars-invaders_1.03.deb")
@@ -178,7 +179,7 @@ func (s *PublishedStorageSuite) TestLinkFromPool(c *C) {
c.Check(data, DeepEquals, []byte("Contents"))
// link from pool with conflict
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath2, utils.ChecksumInfo{MD5: "e9dfd31cc505d51fc26975250750deab"}, false)
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, src2, cksum2, false)
c.Check(err, ErrorMatches, ".*file already exists and is different.*")
data, err = s.storage.conn.ObjectGetBytes("test", "pool/main/m/mars-invaders/mars-invaders_1.03.deb")
@@ -186,7 +187,7 @@ func (s *PublishedStorageSuite) TestLinkFromPool(c *C) {
c.Check(data, DeepEquals, []byte("Contents"))
// link from pool with conflict and force
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), pool, sourcePath2, utils.ChecksumInfo{MD5: "e9dfd31cc505d51fc26975250750deab"}, true)
err = s.storage.LinkFromPool(filepath.Join("", "pool", "main", "m/mars-invaders"), "mars-invaders_1.03.deb", pool, src2, cksum2, true)
c.Check(err, IsNil)
data, err = s.storage.conn.ObjectGetBytes("test", "pool/main/m/mars-invaders/mars-invaders_1.03.deb")

View File

@@ -11,6 +11,7 @@
"gpgDisableSign": false,
"gpgDisableVerify": false,
"downloadSourcePackages": false,
"skipLegacyPool": false,
"ppaDistributorID": "ubuntu",
"ppaCodename": "",
"skipContentsPublishing": false,

View File

@@ -11,6 +11,7 @@
"gpgDisableSign": false,
"gpgDisableVerify": false,
"downloadSourcePackages": false,
"skipLegacyPool": true,
"ppaDistributorID": "ubuntu",
"ppaCodename": "",
"skipContentsPublishing": false,

View File

@@ -0,0 +1,9 @@
Building download queue...
Download queue: 1 items (2.67 KiB)
Downloading & parsing package files...
Downloading https://dl.bintray.com/smira/deb/Packages.bz2...
Downloading https://dl.bintray.com/smira/deb/Release...
Downloading https://dl.bintray.com/smira/deb/libboost-program-options-dev_1.49.0.1_i386.deb...
Mirror `bintray` has been successfully updated.

View File

@@ -0,0 +1,9 @@
Building download queue...
Download queue: 1 items (2.67 KiB)
Downloading & parsing package files...
Downloading https://dl.bintray.com/smira/deb/Packages.bz2...
Downloading https://dl.bintray.com/smira/deb/Release...
Downloading https://dl.bintray.com/smira/deb/libboost-program-options-dev_1.49.0.1_i386.deb...
Mirror `bintray` has been successfully updated.

View File

@@ -0,0 +1,10 @@
Applying filter...
Building download queue...
Download queue: 0 items (0 B)
Downloading & parsing package files...
Downloading http://mirror.yandex.ru/debian/dists/wheezy/Release...
Downloading http://mirror.yandex.ru/debian/dists/wheezy/main/binary-i386/Packages.bz2...
Mirror `wheezy` has been successfully updated.
Packages filtered: 36024 -> 1.

View File

@@ -0,0 +1,11 @@
Applying filter...
Building download queue...
Download queue: 1 items (2.67 KiB)
Downloading & parsing package files...
Downloading http://mirror.yandex.ru/debian/dists/wheezy/Release...
Downloading http://mirror.yandex.ru/debian/dists/wheezy/main/binary-i386/Packages.bz2...
Downloading http://mirror.yandex.ru/debian/pool/main/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb...
Mirror `wheezy` has been successfully updated.
Packages filtered: 36024 -> 1.

View File

@@ -1,5 +1,8 @@
import string
import re
import os
import shutil
import inspect
from lib import BaseTest
@@ -201,3 +204,106 @@ class UpdateMirror14Test(BaseTest):
def output_processor(self, output):
return "\n".join(sorted(output.split("\n")))
class UpdateMirror15Test(BaseTest):
"""
update mirrors: update for mirror without MD5 checksums
"""
longTest = False
fixtureCmds = [
"aptly mirror create --ignore-signatures bintray https://dl.bintray.com/smira/deb/ ./",
]
runCmd = "aptly mirror update --ignore-signatures bintray"
def output_processor(self, output):
return "\n".join(sorted(output.split("\n")))
def check(self):
super(UpdateMirror15Test, self).check()
# check pool
self.check_exists('pool/c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb')
class UpdateMirror16Test(BaseTest):
"""
update mirrors: update for mirror without MD5 checksums but with file in pool on legacy MD5 location
as mirror lacks MD5 checksum, file would be downloaded but not re-imported
"""
longTest = False
fixtureCmds = [
"aptly mirror create --ignore-signatures bintray https://dl.bintray.com/smira/deb/ ./",
]
runCmd = "aptly mirror update --ignore-signatures bintray"
def output_processor(self, output):
return "\n".join(sorted(output.split("\n")))
def prepare(self):
super(UpdateMirror16Test, self).prepare()
os.makedirs(os.path.join(os.environ["HOME"], ".aptly", "pool", "00", "35"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "libboost-program-options-dev_1.49.0.1_i386.deb"),
os.path.join(os.environ["HOME"], ".aptly", "pool", "00", "35"))
def check(self):
super(UpdateMirror16Test, self).check()
# check pool
self.check_not_exists('pool/c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb')
class UpdateMirror17Test(BaseTest):
"""
update mirrors: update for mirror but with file in pool on legacy MD5 location
"""
longTest = False
fixtureCmds = [
"aptly mirror create -ignore-signatures -architectures=i386 -filter=libboost-program-options-dev wheezy http://mirror.yandex.ru/debian wheezy main",
]
runCmd = "aptly mirror update -ignore-signatures wheezy"
def output_processor(self, output):
return "\n".join(sorted(output.split("\n")))
def prepare(self):
super(UpdateMirror17Test, self).prepare()
os.makedirs(os.path.join(os.environ["HOME"], ".aptly", "pool", "00", "35"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "libboost-program-options-dev_1.49.0.1_i386.deb"),
os.path.join(os.environ["HOME"], ".aptly", "pool", "00", "35"))
def check(self):
super(UpdateMirror17Test, self).check()
# check pool
self.check_not_exists('pool/c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb')
class UpdateMirror18Test(BaseTest):
"""
update mirrors: update for mirror but with file in pool on legacy MD5 location and disabled legacy path support
"""
longTest = False
fixtureCmds = [
"aptly mirror create -ignore-signatures -architectures=i386 -filter=libboost-program-options-dev wheezy http://mirror.yandex.ru/debian wheezy main",
]
runCmd = "aptly mirror update -ignore-signatures wheezy"
configOverride = {'skipLegacyPool': True}
def output_processor(self, output):
return "\n".join(sorted(output.split("\n")))
def prepare(self):
super(UpdateMirror18Test, self).prepare()
os.makedirs(os.path.join(os.environ["HOME"], ".aptly", "pool", "00", "35"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "libboost-program-options-dev_1.49.0.1_i386.deb"),
os.path.join(os.environ["HOME"], ".aptly", "pool", "00", "35"))
def check(self):
super(UpdateMirror18Test, self).check()
# check pool
self.check_exists('pool/c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb')

View File

@@ -1,6 +1,6 @@
Loading packages...
Generating metadata files and linking package files...
[!] Failed to generate package contents: unable to read .tar archive from ${HOME}/.aptly/pool/a5/d5/libboost-broken-program-options-dev_1.49.0.1_i386.deb: unexpected EOF
[!] Failed to generate package contents: unable to read .tar archive from libboost-broken-program-options-dev_1.49.0.1_i386.deb: unexpected EOF
Finalizing metadata files...
Signing file 'Release' with gpg, please enter your passphrase when prompted:
Clearsigning file 'Release' with gpg, please enter your passphrase when prompted:

View File

@@ -8,6 +8,8 @@
2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz
2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz
2f5bd47cf38852b6fc927a50f98c1448 893 pyspi-0.6.1-1.3.stripped.dsc
384b5e94b4113262e41bda1a2563f4f439cb8c97f43e2caefe16d7626718c21b36d3145b915eed24053eaa7fe3b6186494a87a3fcf9627f6e653b54bb3caa897 3456 pyspi_0.6.1-1.3.diff.gz
384b5e94b4113262e41bda1a2563f4f439cb8c97f43e2caefe16d7626718c21b36d3145b915eed24053eaa7fe3b6186494a87a3fcf9627f6e653b54bb3caa897 3456 pyspi_0.6.1-1.3.diff.gz
5005fbd1f30637edc1d380b30f45db9b79100d07 893 pyspi-0.6.1-1.3.stripped.dsc
56c8a9b1f4ab636052be8966690998cbe865cd6c 1782 pyspi_0.6.1-1.3.dsc
64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz
@@ -17,6 +19,8 @@
9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz
9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz
b72cb94699298a117b7c82641c68b6fd 1782 pyspi_0.6.1-1.3.dsc
c278f52953203292bcc828bcf05aee456b160f91716f51ec1a1dbbcdb8b08fc29183d0a1135629fc0ebe86a3e84cedc685c3aa1714b70cc5db8877d40e754d7f 29063 pyspi_0.6.1.orig.tar.gz
c278f52953203292bcc828bcf05aee456b160f91716f51ec1a1dbbcdb8b08fc29183d0a1135629fc0ebe86a3e84cedc685c3aa1714b70cc5db8877d40e754d7f 29063 pyspi_0.6.1.orig.tar.gz
d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc
def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz
def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz

View File

@@ -1,6 +1,6 @@
Loading packages...
Generating metadata files and linking package files...
[!] Failed to generate package contents: unable to read .tar archive from ${HOME}/.aptly/pool/3a/0a/libboost-broken-program-options-dev_1.49.0.1_i386.deb: unexpected EOF
[!] Failed to generate package contents: unable to read .tar archive from libboost-broken-program-options-dev_1.49.0.1_i386.deb: unexpected EOF
Finalizing metadata files...
Signing file 'Release' with gpg, please enter your passphrase when prompted:
Clearsigning file 'Release' with gpg, please enter your passphrase when prompted:

View File

@@ -8,6 +8,8 @@
2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz
2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz
2f5bd47cf38852b6fc927a50f98c1448 893 pyspi-0.6.1-1.3.stripped.dsc
384b5e94b4113262e41bda1a2563f4f439cb8c97f43e2caefe16d7626718c21b36d3145b915eed24053eaa7fe3b6186494a87a3fcf9627f6e653b54bb3caa897 3456 pyspi_0.6.1-1.3.diff.gz
384b5e94b4113262e41bda1a2563f4f439cb8c97f43e2caefe16d7626718c21b36d3145b915eed24053eaa7fe3b6186494a87a3fcf9627f6e653b54bb3caa897 3456 pyspi_0.6.1-1.3.diff.gz
5005fbd1f30637edc1d380b30f45db9b79100d07 893 pyspi-0.6.1-1.3.stripped.dsc
56c8a9b1f4ab636052be8966690998cbe865cd6c 1782 pyspi_0.6.1-1.3.dsc
64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz
@@ -17,6 +19,8 @@
9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz
9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz
b72cb94699298a117b7c82641c68b6fd 1782 pyspi_0.6.1-1.3.dsc
c278f52953203292bcc828bcf05aee456b160f91716f51ec1a1dbbcdb8b08fc29183d0a1135629fc0ebe86a3e84cedc685c3aa1714b70cc5db8877d40e754d7f 29063 pyspi_0.6.1.orig.tar.gz
c278f52953203292bcc828bcf05aee456b160f91716f51ec1a1dbbcdb8b08fc29183d0a1135629fc0ebe86a3e84cedc685c3aa1714b70cc5db8877d40e754d7f 29063 pyspi_0.6.1.orig.tar.gz
d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc
def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz
def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz

View File

@@ -58,6 +58,10 @@ SHA256: 27760f636f6dbfe387dfbede1131fe7a0dd5fd3b0ab562213193ffa7cfcadfb5
SHA256: 2920249908a8297f85006def6a55fb99abfcc8466cac2b9f28d01ce8315df065
SHA256: 8361f45f51a7e70e3367e5b2df59fa8defc8648a76afa4159da3f249460f5b33
SHA256: b626c3320c0ba2c41c5214bf8175c713f3713cc393e9361a977dc0202c197875
SHA512: 7223dbbf4a847c48f040c3b7dd1e9f3b45b1a837794aa8368456cc2d522a21180751755a6ea0d919f8cb4b092d742e525e138c6ea9185aedea55bc1d85475e76
SHA512: ad84c2798fd91f94782f08f371007e62fe933bcd079041874e3e54d6e91a66f953a8b8da2d563e88d800fd9819353362f0e700701ddcf5b44f490123e57f8b38
SHA512: adda521ac2837bba7bf4e4a6a633f79187dff5ec3806d1bbe97544ac0a00024a41583f70434362755ef33aa788f79c7dc1a0bdb3daaf9a0465d1b90600ba30ee
SHA512: bd1d7374808541d85fdb965c91b1ac2d0a98580cfac524d2bbdd6021fa6689a54ce7effd9c899237300b2c4966a824c7ed4aa6a4f6c4cb869a7163702960d68c
Section: utils
Section: utils
Section: utils

View File

@@ -58,6 +58,10 @@ SHA256: 27760f636f6dbfe387dfbede1131fe7a0dd5fd3b0ab562213193ffa7cfcadfb5
SHA256: 6898801e3f3c97a30bef1ee50381479b69360a28807fb63fcce4abef4da1aec7
SHA256: b852d7681ea328bd8b45140973624781e65d0363961d92bcc2ab0bbf1cc6ed52
SHA256: cff40c87faea248c77de7d9fc50fcbc80631cd1bc8cec2b1033e0db452e08ea6
SHA512: 6e2f3b4add560a19154717ad4dea1af9e07ef251403c85c87ef5b30e3f8639d74ab8bb1a27b7e4d3346a66f0c9180c70435557caa4e6eded9bd9010f3f5d7123
SHA512: adda521ac2837bba7bf4e4a6a633f79187dff5ec3806d1bbe97544ac0a00024a41583f70434362755ef33aa788f79c7dc1a0bdb3daaf9a0465d1b90600ba30ee
SHA512: e1c69691ceb3afb10ad8287e34ef4af75046f99e8aa51d5f15c1e8ac904377ac44023aed1bd7572ebc64c68aca9f99dbd485e13952f6b65e41cf47598af5e03f
SHA512: e6277d5e08210fc7258fc239d1715657a4bd9a4c3c190e41a0b3e4d101bd3abfd7b5c87ed8111a1f3efec239b27938a42cd25a582a6f9d93fdb28fc9684cf14c
Section: utils
Section: utils
Section: utils

View File

@@ -25,6 +25,8 @@ SHA1: 5d32171182e956f8277d44378b1623bbeae23110
SHA1: dbe121bae44db6eb6108311f41997c4ede1178b2
SHA256: 4abcb1191d8a3e58d88fb56084f9d784255ba68c767babc3c2819b7a1a689b78
SHA256: dd7230f9d025c47e8c94e4101e2970e94aed50ec0c65801f9c7cd0a03d6723e1
SHA512: 520ff1cc4053499609c87329d9458560817c5638bd6871ba3b0598c3cb95420859601aa09728998d8227a4ab29930f4d0474660e26921829641b7bed2751ec5e
SHA512: a0d5e55d8e183f19111cf3067fa8d434a263bad5bafce39fedaeb3ef7fd97577fef852e4f2bb5e04e27b2a0b79d3e707ff12af5b54879f33f124cdf1626a3dea
Section: debian-installer
Section: debian-installer
Size: 130734

View File

@@ -25,6 +25,8 @@ SHA1: e64cb327e89ba41ba6aaeca7e9e69cf18479ed40
SHA1: f6937084ae96b269131a08bb365619e704f91d21
SHA256: 7d86005e0f2a7bdeff3204ccb0e50d6d06b07011621acb56ad322480bd11494c
SHA256: 96eae21eb31fa79d196dfbec63594f62c39753aad59d02d69bf9495ad486ec01
SHA512: 0de8d92708fbdd6c14b196124ff4fb8a047daf75b942eae24987a0707293578ca86b2de8d61aad72472e653e4536ec62b83bb60ee0a422f317212bd6159a1753
SHA512: 5954d3f4d8960a2444f89192d05781087410f296a9d810a6bff2a7bc3955f952a3f063c47d575b0215dd60681d99c3e08852c9e3df027ad94ec448fc1749da57
Section: debian-installer
Section: debian-installer
Size: 125582

View File

@@ -8,6 +8,8 @@
2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz
2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz
2f5bd47cf38852b6fc927a50f98c1448 893 pyspi-0.6.1-1.3.stripped.dsc
384b5e94b4113262e41bda1a2563f4f439cb8c97f43e2caefe16d7626718c21b36d3145b915eed24053eaa7fe3b6186494a87a3fcf9627f6e653b54bb3caa897 3456 pyspi_0.6.1-1.3.diff.gz
384b5e94b4113262e41bda1a2563f4f439cb8c97f43e2caefe16d7626718c21b36d3145b915eed24053eaa7fe3b6186494a87a3fcf9627f6e653b54bb3caa897 3456 pyspi_0.6.1-1.3.diff.gz
5005fbd1f30637edc1d380b30f45db9b79100d07 893 pyspi-0.6.1-1.3.stripped.dsc
56c8a9b1f4ab636052be8966690998cbe865cd6c 1782 pyspi_0.6.1-1.3.dsc
64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz
@@ -17,6 +19,8 @@
9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz
9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz
b72cb94699298a117b7c82641c68b6fd 1782 pyspi_0.6.1-1.3.dsc
c278f52953203292bcc828bcf05aee456b160f91716f51ec1a1dbbcdb8b08fc29183d0a1135629fc0ebe86a3e84cedc685c3aa1714b70cc5db8877d40e754d7f 29063 pyspi_0.6.1.orig.tar.gz
c278f52953203292bcc828bcf05aee456b160f91716f51ec1a1dbbcdb8b08fc29183d0a1135629fc0ebe86a3e84cedc685c3aa1714b70cc5db8877d40e754d7f 29063 pyspi_0.6.1.orig.tar.gz
d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc
def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz
def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz

View File

@@ -8,6 +8,8 @@
2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz
2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz
2f5bd47cf38852b6fc927a50f98c1448 893 pyspi-0.6.1-1.3.stripped.dsc
384b5e94b4113262e41bda1a2563f4f439cb8c97f43e2caefe16d7626718c21b36d3145b915eed24053eaa7fe3b6186494a87a3fcf9627f6e653b54bb3caa897 3456 pyspi_0.6.1-1.3.diff.gz
384b5e94b4113262e41bda1a2563f4f439cb8c97f43e2caefe16d7626718c21b36d3145b915eed24053eaa7fe3b6186494a87a3fcf9627f6e653b54bb3caa897 3456 pyspi_0.6.1-1.3.diff.gz
5005fbd1f30637edc1d380b30f45db9b79100d07 893 pyspi-0.6.1-1.3.stripped.dsc
56c8a9b1f4ab636052be8966690998cbe865cd6c 1782 pyspi_0.6.1-1.3.dsc
64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz
@@ -17,6 +19,8 @@
9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz
9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz
b72cb94699298a117b7c82641c68b6fd 1782 pyspi_0.6.1-1.3.dsc
c278f52953203292bcc828bcf05aee456b160f91716f51ec1a1dbbcdb8b08fc29183d0a1135629fc0ebe86a3e84cedc685c3aa1714b70cc5db8877d40e754d7f 29063 pyspi_0.6.1.orig.tar.gz
c278f52953203292bcc828bcf05aee456b160f91716f51ec1a1dbbcdb8b08fc29183d0a1135629fc0ebe86a3e84cedc685c3aa1714b70cc5db8877d40e754d7f 29063 pyspi_0.6.1.orig.tar.gz
d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc
def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz
def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz

View File

@@ -7,6 +7,8 @@
2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz
2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz
2f5bd47cf38852b6fc927a50f98c1448 893 pyspi-0.6.1-1.3.stripped.dsc
384b5e94b4113262e41bda1a2563f4f439cb8c97f43e2caefe16d7626718c21b36d3145b915eed24053eaa7fe3b6186494a87a3fcf9627f6e653b54bb3caa897 3456 pyspi_0.6.1-1.3.diff.gz
384b5e94b4113262e41bda1a2563f4f439cb8c97f43e2caefe16d7626718c21b36d3145b915eed24053eaa7fe3b6186494a87a3fcf9627f6e653b54bb3caa897 3456 pyspi_0.6.1-1.3.diff.gz
5005fbd1f30637edc1d380b30f45db9b79100d07 893 pyspi-0.6.1-1.3.stripped.dsc
56c8a9b1f4ab636052be8966690998cbe865cd6c 1782 pyspi_0.6.1-1.3.dsc
64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz
@@ -16,6 +18,8 @@
9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz
9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz
b72cb94699298a117b7c82641c68b6fd 1782 pyspi_0.6.1-1.3.dsc
c278f52953203292bcc828bcf05aee456b160f91716f51ec1a1dbbcdb8b08fc29183d0a1135629fc0ebe86a3e84cedc685c3aa1714b70cc5db8877d40e754d7f 29063 pyspi_0.6.1.orig.tar.gz
c278f52953203292bcc828bcf05aee456b160f91716f51ec1a1dbbcdb8b08fc29183d0a1135629fc0ebe86a3e84cedc685c3aa1714b70cc5db8877d40e754d7f 29063 pyspi_0.6.1.orig.tar.gz
d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc
def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz
def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz

View File

@@ -7,6 +7,8 @@
2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz
2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz
2f5bd47cf38852b6fc927a50f98c1448 893 pyspi-0.6.1-1.3.stripped.dsc
384b5e94b4113262e41bda1a2563f4f439cb8c97f43e2caefe16d7626718c21b36d3145b915eed24053eaa7fe3b6186494a87a3fcf9627f6e653b54bb3caa897 3456 pyspi_0.6.1-1.3.diff.gz
384b5e94b4113262e41bda1a2563f4f439cb8c97f43e2caefe16d7626718c21b36d3145b915eed24053eaa7fe3b6186494a87a3fcf9627f6e653b54bb3caa897 3456 pyspi_0.6.1-1.3.diff.gz
5005fbd1f30637edc1d380b30f45db9b79100d07 893 pyspi-0.6.1-1.3.stripped.dsc
56c8a9b1f4ab636052be8966690998cbe865cd6c 1782 pyspi_0.6.1-1.3.dsc
64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz
@@ -16,6 +18,8 @@
9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz
9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz
b72cb94699298a117b7c82641c68b6fd 1782 pyspi_0.6.1-1.3.dsc
c278f52953203292bcc828bcf05aee456b160f91716f51ec1a1dbbcdb8b08fc29183d0a1135629fc0ebe86a3e84cedc685c3aa1714b70cc5db8877d40e754d7f 29063 pyspi_0.6.1.orig.tar.gz
c278f52953203292bcc828bcf05aee456b160f91716f51ec1a1dbbcdb8b08fc29183d0a1135629fc0ebe86a3e84cedc685c3aa1714b70cc5db8877d40e754d7f 29063 pyspi_0.6.1.orig.tar.gz
d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc
def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz
def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz

View File

@@ -1,5 +1,5 @@
Loading packages...
[!] Unable to import file /pyspi_0.6.1.orig.tar.gz into pool: unable to import into pool: file ${HOME}/.aptly/pool/de/f3/pyspi_0.6.1.orig.tar.gz already exists
[!] Unable to import file /pyspi_0.6.1.orig.tar.gz into pool: unable to import into pool: file ${HOME}/.aptly/pool/64/06/9ee828c50b1c597d10a3fefbba27_pyspi_0.6.1.orig.tar.gz already exists
[!] Some files were skipped due to errors:
/pyspi_0.6.1-1.3.dsc
ERROR: some files failed to be added

View File

@@ -19,7 +19,7 @@ class AddRepo1Test(BaseTest):
self.check_cmd_output("aptly repo show -with-packages repo1", "repo_show")
# check pool
self.check_exists('pool/00/35/libboost-program-options-dev_1.49.0.1_i386.deb')
self.check_exists('pool/c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb')
class AddRepo2Test(BaseTest):
@@ -36,10 +36,10 @@ class AddRepo2Test(BaseTest):
self.check_cmd_output("aptly repo show -with-packages repo2", "repo_show")
# check pool
self.check_exists('pool/22/ff/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/b7/2c/pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/de/f3/pyspi_0.6.1.orig.tar.gz')
self.check_exists('pool/2f/5b/pyspi-0.6.1-1.3.stripped.dsc')
self.check_exists('pool/2e/77/0b28df948f3197ed0b679bdea99f_pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/d4/94/aaf526f1ec6b02f14c2f81e060a5_pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/64/06/9ee828c50b1c597d10a3fefbba27_pyspi_0.6.1.orig.tar.gz')
self.check_exists('pool/28/9d/3aefa970876e9c43686ce2b02f47_pyspi-0.6.1-1.3.stripped.dsc')
class AddRepo3Test(BaseTest):
@@ -56,11 +56,11 @@ class AddRepo3Test(BaseTest):
self.check_cmd_output("aptly repo show -with-packages repo3", "repo_show")
# check pool
self.check_exists('pool/00/35/libboost-program-options-dev_1.49.0.1_i386.deb')
self.check_exists('pool/22/ff/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/b7/2c/pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/de/f3/pyspi_0.6.1.orig.tar.gz')
self.check_exists('pool/2f/5b/pyspi-0.6.1-1.3.stripped.dsc')
self.check_exists('pool/c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb')
self.check_exists('pool/2e/77/0b28df948f3197ed0b679bdea99f_pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/d4/94/aaf526f1ec6b02f14c2f81e060a5_pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/64/06/9ee828c50b1c597d10a3fefbba27_pyspi_0.6.1.orig.tar.gz')
self.check_exists('pool/28/9d/3aefa970876e9c43686ce2b02f47_pyspi-0.6.1-1.3.stripped.dsc')
class AddRepo4Test(BaseTest):
@@ -97,10 +97,10 @@ class AddRepo4Test(BaseTest):
self.check_cmd_output("aptly repo show -with-packages repo4", "repo_show")
# check pool
self.check_exists('pool/00/35/libboost-program-options-dev_1.49.0.1_i386.deb')
self.check_exists('pool/22/ff/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/b7/2c/pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/de/f3/pyspi_0.6.1.orig.tar.gz')
self.check_exists('pool/c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb')
self.check_exists('pool/2e/77/0b28df948f3197ed0b679bdea99f_pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/d4/94/aaf526f1ec6b02f14c2f81e060a5_pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/64/06/9ee828c50b1c597d10a3fefbba27_pyspi_0.6.1.orig.tar.gz')
path = os.path.join(self.tempSrcDir, "01", "libboost-program-options-dev_1.49.0.1_i386.deb")
if os.path.exists(path):
@@ -198,8 +198,8 @@ class AddRepo9Test(BaseTest):
def prepare(self):
super(AddRepo9Test, self).prepare()
os.makedirs(os.path.join(os.environ["HOME"], ".aptly", "pool/de/f3/"))
with open(os.path.join(os.environ["HOME"], ".aptly", "pool/de/f3/pyspi_0.6.1.orig.tar.gz"), "w") as f:
os.makedirs(os.path.join(os.environ["HOME"], ".aptly", "pool/64/06/"))
with open(os.path.join(os.environ["HOME"], ".aptly", "pool/64/06/9ee828c50b1c597d10a3fefbba27_pyspi_0.6.1.orig.tar.gz"), "w") as f:
f.write("abcd")
@@ -248,7 +248,7 @@ class AddRepo12Test(BaseTest):
self.check_cmd_output("aptly repo show -with-packages repo12", "repo_show")
# check pool
self.check_exists('pool/72/16/dmraid-udeb_1.0.0.rc16-4.1_amd64.udeb')
self.check_exists('pool/ef/ae/69921b97494e40437712053b60a5_dmraid-udeb_1.0.0.rc16-4.1_amd64.udeb')
class AddRepo13Test(BaseTest):
@@ -265,8 +265,8 @@ class AddRepo13Test(BaseTest):
self.check_cmd_output("aptly repo show -with-packages repo13", "repo_show")
# check pool
self.check_exists('pool/72/16/dmraid-udeb_1.0.0.rc16-4.1_amd64.udeb')
self.check_exists('pool/b7/2c/pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/ef/ae/69921b97494e40437712053b60a5_dmraid-udeb_1.0.0.rc16-4.1_amd64.udeb')
self.check_exists('pool/d4/94/aaf526f1ec6b02f14c2f81e060a5_pyspi_0.6.1-1.3.dsc')
class AddRepo14Test(BaseTest):
@@ -275,14 +275,15 @@ class AddRepo14Test(BaseTest):
"""
fixtureCmds = [
"aptly repo create -comment=Repo14 -distribution=squeeze repo14",
"aptly repo add repo14 ${files}/libboost-program-options-dev_1.49.0.1_i386.deb"
"aptly repo add repo14 ${files}/libboost-program-options-dev_1.49.0.1_i386.deb",
"aptly publish repo -distribution=test1 -skip-signing repo14"
]
runCmd = "aptly repo add repo14 $aptlyroot/pool/00/35/libboost-program-options-dev_1.49.0.1_i386.deb"
runCmd = "aptly repo add repo14 $aptlyroot/public/pool/"
def check(self):
super(AddRepo14Test, self).check()
# check pool
self.check_file_not_empty('pool/00/35/libboost-program-options-dev_1.49.0.1_i386.deb')
self.check_exists('pool/c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb')
class AddRepo15Test(BaseTest):

View File

@@ -25,9 +25,9 @@ class IncludeRepo1Test(BaseTest):
self.check_cmd_output("aptly repo show -with-packages unstable", "repo_show")
# check pool
self.check_exists('pool//20/81/hardlink_0.2.1_amd64.deb')
self.check_exists('pool/4e/fc/hardlink_0.2.1.dsc')
self.check_exists('pool/8e/2c/hardlink_0.2.1.tar.gz')
self.check_exists('pool/66/83/99580590bf1ffcd9eb161b6e5747_hardlink_0.2.1_amd64.deb')
self.check_exists('pool/c0/d7/458aa2ca3886cd6885f395a289ef_hardlink_0.2.1.dsc')
self.check_exists('pool/4d/f0/adce005526a1f0e1b38171ddb1f0_hardlink_0.2.1.tar.gz')
class IncludeRepo2Test(BaseTest):
@@ -46,9 +46,9 @@ class IncludeRepo2Test(BaseTest):
self.check_cmd_output("aptly repo show -with-packages my-unstable", "repo_show")
# check pool
self.check_exists('pool//20/81/hardlink_0.2.1_amd64.deb')
self.check_exists('pool/4e/fc/hardlink_0.2.1.dsc')
self.check_exists('pool/8e/2c/hardlink_0.2.1.tar.gz')
self.check_exists('pool/66/83/99580590bf1ffcd9eb161b6e5747_hardlink_0.2.1_amd64.deb')
self.check_exists('pool/c0/d7/458aa2ca3886cd6885f395a289ef_hardlink_0.2.1.dsc')
self.check_exists('pool/4d/f0/adce005526a1f0e1b38171ddb1f0_hardlink_0.2.1.tar.gz')
class IncludeRepo3Test(BaseTest):
@@ -101,9 +101,9 @@ class IncludeRepo5Test(BaseTest):
self.check_cmd_output("aptly repo show -with-packages unstable", "repo_show")
# check pool
self.check_exists('pool//20/81/hardlink_0.2.1_amd64.deb')
self.check_exists('pool/4e/fc/hardlink_0.2.1.dsc')
self.check_exists('pool/8e/2c/hardlink_0.2.1.tar.gz')
self.check_exists('pool/66/83/99580590bf1ffcd9eb161b6e5747_hardlink_0.2.1_amd64.deb')
self.check_exists('pool/c0/d7/458aa2ca3886cd6885f395a289ef_hardlink_0.2.1.dsc')
self.check_exists('pool/4d/f0/adce005526a1f0e1b38171ddb1f0_hardlink_0.2.1.tar.gz')
for path in ["hardlink_0.2.1.dsc", "hardlink_0.2.1.tar.gz", "hardlink_0.2.1_amd64.changes", "hardlink_0.2.1_amd64.deb"]:
path = os.path.join(self.tempSrcDir, "01", path)

View File

@@ -1,51 +1,57 @@
Package: pyspi
Binary: python-at-spi
Version: 0.6.1-1.3
Maintainer: Jose Carlos Garcia Sogo <jsogo@debian.org>
Build-Depends: debhelper (>= 5), cdbs, libatspi-dev, python-pyrex, python-support (>= 0.4), python-all-dev, libx11-dev
Architecture: any
Standards-Version: 3.7.3
Format: 1.0
Files:
22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz
b72cb94699298a117b7c82641c68b6fd 1782 pyspi_0.6.1-1.3.dsc
def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz
Homepage: http://people.redhat.com/zcerza/dogtail
Checksums-Sha256:
2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz
d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc
64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz
Checksums-Sha1:
95a2468e4bbce730ba286f2211fa41861b9f1d90 3456 pyspi_0.6.1-1.3.diff.gz
56c8a9b1f4ab636052be8966690998cbe865cd6c 1782 pyspi_0.6.1-1.3.dsc
9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz
Vcs-Svn: svn://svn.tribulaciones.org/srv/svn/pyspi/trunk
Checksums-Sha512:
fde06b7dc5762a04986d0669420822f6a1e82b195322ae9cbd2dae40bda557c57ad77fe3546007ea645f801c4cd30ef4eb0e96efb2dee6b71c4c9a187d643683 1782 pyspi_0.6.1-1.3.dsc
References to package:
local repo [a]
Package: pyspi
Binary: python-at-spi
Version: 0.6.1-1.3
Maintainer: Jose Carlos Garcia Sogo <jsogo@debian.org>
Build-Depends: debhelper (>= 5), cdbs, libatspi-dev, python-pyrex, python-support (>= 0.4), python-all-dev, libx11-dev
Architecture: any
Standards-Version: 3.7.3
Format: 1.0
Files:
d95c4fb8bf5066968b524e04f35c6d34 458 pyspi_0.6.1-1.3.conflict.dsc
d41d8cd98f00b204e9800998ecf8427e 0 pyspi_0.6.1.orig.tar.gz
Vcs-Svn: svn://svn.tribulaciones.org/srv/svn/pyspi/trunk
Checksums-Sha512:
ec9b3ea45d9a14f341c947bfd4b4d70ee508f9ffe9374ff2eceaa5df45ee48e3103f67d0af57d62308fee62957dae2b60c4ff5649543ea6dbfef1bccf151b27e 458 pyspi_0.6.1-1.3.conflict.dsc
Checksums-Sha256:
33dc6feab9ff1cf863b27f4d622985fe0114252d157a744dcc3d575bf7cfaad8 458 pyspi_0.6.1-1.3.conflict.dsc
Checksums-Sha1:
4d94f5e09bc745af159ddf9ce7a13a84ac3434d0 458 pyspi_0.6.1-1.3.conflict.dsc
Homepage: http://people.redhat.com/zcerza/dogtail
References to package:
local repo [b]
22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz
2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz
33dc6feab9ff1cf863b27f4d622985fe0114252d157a744dcc3d575bf7cfaad8 458 pyspi_0.6.1-1.3.conflict.dsc
384b5e94b4113262e41bda1a2563f4f439cb8c97f43e2caefe16d7626718c21b36d3145b915eed24053eaa7fe3b6186494a87a3fcf9627f6e653b54bb3caa897 3456 pyspi_0.6.1-1.3.diff.gz
4d94f5e09bc745af159ddf9ce7a13a84ac3434d0 458 pyspi_0.6.1-1.3.conflict.dsc
56c8a9b1f4ab636052be8966690998cbe865cd6c 1782 pyspi_0.6.1-1.3.dsc
64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz
95a2468e4bbce730ba286f2211fa41861b9f1d90 3456 pyspi_0.6.1-1.3.diff.gz
9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz
b72cb94699298a117b7c82641c68b6fd 1782 pyspi_0.6.1-1.3.dsc
c278f52953203292bcc828bcf05aee456b160f91716f51ec1a1dbbcdb8b08fc29183d0a1135629fc0ebe86a3e84cedc685c3aa1714b70cc5db8877d40e754d7f 29063 pyspi_0.6.1.orig.tar.gz
cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e 0 pyspi_0.6.1.orig.tar.gz
d41d8cd98f00b204e9800998ecf8427e 0 pyspi_0.6.1.orig.tar.gz
d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc
d95c4fb8bf5066968b524e04f35c6d34 458 pyspi_0.6.1-1.3.conflict.dsc
da39a3ee5e6b4b0d3255bfef95601890afd80709 0 pyspi_0.6.1.orig.tar.gz
def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz
e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 0 pyspi_0.6.1.orig.tar.gz
ec9b3ea45d9a14f341c947bfd4b4d70ee508f9ffe9374ff2eceaa5df45ee48e3103f67d0af57d62308fee62957dae2b60c4ff5649543ea6dbfef1bccf151b27e 458 pyspi_0.6.1-1.3.conflict.dsc
fde06b7dc5762a04986d0669420822f6a1e82b195322ae9cbd2dae40bda557c57ad77fe3546007ea645f801c4cd30ef4eb0e96efb2dee6b71c4c9a187d643683 1782 pyspi_0.6.1-1.3.dsc
Architecture: any
Architecture: any
Binary: python-at-spi
Binary: python-at-spi
Build-Depends: debhelper (>= 5), cdbs, libatspi-dev, python-pyrex, python-support (>= 0.4), python-all-dev, libx11-dev
Build-Depends: debhelper (>= 5), cdbs, libatspi-dev, python-pyrex, python-support (>= 0.4), python-all-dev, libx11-dev
Checksums-Sha1:
Checksums-Sha1:
Checksums-Sha256:
Checksums-Sha256:
Checksums-Sha512:
Checksums-Sha512:
Files:
Files:
Format: 1.0
Format: 1.0
Homepage: http://people.redhat.com/zcerza/dogtail
Homepage: http://people.redhat.com/zcerza/dogtail
Maintainer: Jose Carlos Garcia Sogo <jsogo@debian.org>
Maintainer: Jose Carlos Garcia Sogo <jsogo@debian.org>
Package: pyspi
Package: pyspi
References to package:
References to package:
Standards-Version: 3.7.3
Standards-Version: 3.7.3
Vcs-Svn: svn://svn.tribulaciones.org/srv/svn/pyspi/trunk
Vcs-Svn: svn://svn.tribulaciones.org/srv/svn/pyspi/trunk
Version: 0.6.1-1.3
Version: 0.6.1-1.3

View File

@@ -27,7 +27,7 @@ class PackagesAPITestShow(APITest):
'Build-Depends': 'debhelper (>= 5), cdbs, libatspi-dev, python-pyrex, python-support (>= 0.4), python-all-dev, libx11-dev',
'Checksums-Sha1': ' 95a2468e4bbce730ba286f2211fa41861b9f1d90 3456 pyspi_0.6.1-1.3.diff.gz\n 56c8a9b1f4ab636052be8966690998cbe865cd6c 1782 pyspi_0.6.1-1.3.dsc\n 9694b80acc171c0a5bc99f707933864edfce555e 29063 pyspi_0.6.1.orig.tar.gz\n',
'Checksums-Sha256': ' 2e770b28df948f3197ed0b679bdea99f3f2bf745e9ddb440c677df9c3aeaee3c 3456 pyspi_0.6.1-1.3.diff.gz\n d494aaf526f1ec6b02f14c2f81e060a5722d6532ddc760ec16972e45c2625989 1782 pyspi_0.6.1-1.3.dsc\n 64069ee828c50b1c597d10a3fefbba279f093a4723965388cdd0ac02f029bfb9 29063 pyspi_0.6.1.orig.tar.gz\n',
'Checksums-Sha512': ' fde06b7dc5762a04986d0669420822f6a1e82b195322ae9cbd2dae40bda557c57ad77fe3546007ea645f801c4cd30ef4eb0e96efb2dee6b71c4c9a187d643683 1782 pyspi_0.6.1-1.3.dsc\n',
'Checksums-Sha512': ' 384b5e94b4113262e41bda1a2563f4f439cb8c97f43e2caefe16d7626718c21b36d3145b915eed24053eaa7fe3b6186494a87a3fcf9627f6e653b54bb3caa897 3456 pyspi_0.6.1-1.3.diff.gz\n fde06b7dc5762a04986d0669420822f6a1e82b195322ae9cbd2dae40bda557c57ad77fe3546007ea645f801c4cd30ef4eb0e96efb2dee6b71c4c9a187d643683 1782 pyspi_0.6.1-1.3.dsc\n c278f52953203292bcc828bcf05aee456b160f91716f51ec1a1dbbcdb8b08fc29183d0a1135629fc0ebe86a3e84cedc685c3aa1714b70cc5db8877d40e754d7f 29063 pyspi_0.6.1.orig.tar.gz\n',
'Files': ' 22ff26db69b73d3438fdde21ab5ba2f1 3456 pyspi_0.6.1-1.3.diff.gz\n b72cb94699298a117b7c82641c68b6fd 1782 pyspi_0.6.1-1.3.dsc\n def336bd566ea688a06ec03db7ccf1f4 29063 pyspi_0.6.1.orig.tar.gz\n',
'FilesHash': '3a8b37cbd9a3559e',
'Format': '1.0',

View File

@@ -37,6 +37,11 @@ type ChecksumInfo struct {
SHA512 string
}
// Complete checks if all the checksums are present
func (cksum *ChecksumInfo) Complete() bool {
return cksum.MD5 != "" && cksum.SHA1 != "" && cksum.SHA256 != "" && cksum.SHA512 != ""
}
// ChecksumsForFile generates size, MD5, SHA1 & SHA256 checksums for given file
func ChecksumsForFile(path string) (ChecksumInfo, error) {
file, err := os.Open(path)

View File

@@ -20,6 +20,7 @@ type ConfigStructure struct {
GpgDisableSign bool `json:"gpgDisableSign"`
GpgDisableVerify bool `json:"gpgDisableVerify"`
DownloadSourcePackages bool `json:"downloadSourcePackages"`
SkipLegacyPool bool `json:"skipLegacyPool"`
PpaDistributorID string `json:"ppaDistributorID"`
PpaCodename string `json:"ppaCodename"`
SkipContentsPublishing bool `json:"skipContentsPublishing"`
@@ -81,6 +82,7 @@ var Config = ConfigStructure{
GpgDisableSign: false,
GpgDisableVerify: false,
DownloadSourcePackages: false,
SkipLegacyPool: false,
PpaDistributorID: "ubuntu",
PpaCodename: "",
FileSystemPublishRoots: map[string]FileSystemPublishRoot{},

View File

@@ -65,6 +65,7 @@ func (s *ConfigSuite) TestSaveConfig(c *C) {
" \"gpgDisableSign\": false,\n"+
" \"gpgDisableVerify\": false,\n"+
" \"downloadSourcePackages\": false,\n"+
" \"skipLegacyPool\": false,\n"+
" \"ppaDistributorID\": \"\",\n"+
" \"ppaCodename\": \"\",\n"+
" \"skipContentsPublishing\": false,\n"+

24
vendor/github.com/pkg/errors/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

11
vendor/github.com/pkg/errors/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,11 @@
language: go
go_import_path: github.com/pkg/errors
go:
- 1.4.3
- 1.5.4
- 1.6.2
- 1.7.1
- tip
script:
- go test -v ./...

23
vendor/github.com/pkg/errors/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,23 @@
Copyright (c) 2015, Dave Cheney <dave@cheney.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

52
vendor/github.com/pkg/errors/README.md generated vendored Normal file
View File

@@ -0,0 +1,52 @@
# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors)
Package errors provides simple error handling primitives.
`go get github.com/pkg/errors`
The traditional error handling idiom in Go is roughly akin to
```go
if err != nil {
return err
}
```
which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
## Adding context to an error
The errors.Wrap function returns a new error that adds context to the original error. For example
```go
_, err := ioutil.ReadAll(r)
if err != nil {
return errors.Wrap(err, "read failed")
}
```
## Retrieving the cause of an error
Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
```go
type causer interface {
Cause() error
}
```
`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
```go
switch err := errors.Cause(err).(type) {
case *MyError:
// handle specifically
default:
// unknown error
}
```
[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
## Contributing
We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
Before proposing a change, please discuss your change by raising an issue.
## Licence
BSD-2-Clause

32
vendor/github.com/pkg/errors/appveyor.yml generated vendored Normal file
View File

@@ -0,0 +1,32 @@
version: build-{build}.{branch}
clone_folder: C:\gopath\src\github.com\pkg\errors
shallow_clone: true # for startup speed
environment:
GOPATH: C:\gopath
platform:
- x64
# http://www.appveyor.com/docs/installed-software
install:
# some helpful output for debugging builds
- go version
- go env
# pre-installed MinGW at C:\MinGW is 32bit only
# but MSYS2 at C:\msys64 has mingw64
- set PATH=C:\msys64\mingw64\bin;%PATH%
- gcc --version
- g++ --version
build_script:
- go install -v ./...
test_script:
- set PATH=C:\gopath\bin;%PATH%
- go test -v ./...
#artifacts:
# - path: '%GOPATH%\bin\*.exe'
deploy: off

59
vendor/github.com/pkg/errors/bench_test.go generated vendored Normal file
View File

@@ -0,0 +1,59 @@
// +build go1.7
package errors
import (
"fmt"
"testing"
stderrors "errors"
)
func noErrors(at, depth int) error {
if at >= depth {
return stderrors.New("no error")
}
return noErrors(at+1, depth)
}
func yesErrors(at, depth int) error {
if at >= depth {
return New("ye error")
}
return yesErrors(at+1, depth)
}
func BenchmarkErrors(b *testing.B) {
var toperr error
type run struct {
stack int
std bool
}
runs := []run{
{10, false},
{10, true},
{100, false},
{100, true},
{1000, false},
{1000, true},
}
for _, r := range runs {
part := "pkg/errors"
if r.std {
part = "errors"
}
name := fmt.Sprintf("%s-stack-%d", part, r.stack)
b.Run(name, func(b *testing.B) {
var err error
f := yesErrors
if r.std {
f = noErrors
}
b.ReportAllocs()
for i := 0; i < b.N; i++ {
err = f(0, r.stack)
}
b.StopTimer()
toperr = err
})
}
}

269
vendor/github.com/pkg/errors/errors.go generated vendored Normal file
View File

@@ -0,0 +1,269 @@
// Package errors provides simple error handling primitives.
//
// The traditional error handling idiom in Go is roughly akin to
//
// if err != nil {
// return err
// }
//
// which applied recursively up the call stack results in error reports
// without context or debugging information. The errors package allows
// programmers to add context to the failure path in their code in a way
// that does not destroy the original value of the error.
//
// Adding context to an error
//
// The errors.Wrap function returns a new error that adds context to the
// original error by recording a stack trace at the point Wrap is called,
// and the supplied message. For example
//
// _, err := ioutil.ReadAll(r)
// if err != nil {
// return errors.Wrap(err, "read failed")
// }
//
// If additional control is required the errors.WithStack and errors.WithMessage
// functions destructure errors.Wrap into its component operations of annotating
// an error with a stack trace and an a message, respectively.
//
// Retrieving the cause of an error
//
// Using errors.Wrap constructs a stack of errors, adding context to the
// preceding error. Depending on the nature of the error it may be necessary
// to reverse the operation of errors.Wrap to retrieve the original error
// for inspection. Any error value which implements this interface
//
// type causer interface {
// Cause() error
// }
//
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
// the topmost error which does not implement causer, which is assumed to be
// the original cause. For example:
//
// switch err := errors.Cause(err).(type) {
// case *MyError:
// // handle specifically
// default:
// // unknown error
// }
//
// causer interface is not exported by this package, but is considered a part
// of stable public API.
//
// Formatted printing of errors
//
// All error values returned from this package implement fmt.Formatter and can
// be formatted by the fmt package. The following verbs are supported
//
// %s print the error. If the error has a Cause it will be
// printed recursively
// %v see %s
// %+v extended format. Each Frame of the error's StackTrace will
// be printed in detail.
//
// Retrieving the stack trace of an error or wrapper
//
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
// invoked. This information can be retrieved with the following interface.
//
// type stackTracer interface {
// StackTrace() errors.StackTrace
// }
//
// Where errors.StackTrace is defined as
//
// type StackTrace []Frame
//
// The Frame type represents a call site in the stack trace. Frame supports
// the fmt.Formatter interface that can be used for printing information about
// the stack trace of this error. For example:
//
// if err, ok := err.(stackTracer); ok {
// for _, f := range err.StackTrace() {
// fmt.Printf("%+s:%d", f)
// }
// }
//
// stackTracer interface is not exported by this package, but is considered a part
// of stable public API.
//
// See the documentation for Frame.Format for more details.
package errors
import (
"fmt"
"io"
)
// New returns an error with the supplied message.
// New also records the stack trace at the point it was called.
func New(message string) error {
return &fundamental{
msg: message,
stack: callers(),
}
}
// Errorf formats according to a format specifier and returns the string
// as a value that satisfies error.
// Errorf also records the stack trace at the point it was called.
func Errorf(format string, args ...interface{}) error {
return &fundamental{
msg: fmt.Sprintf(format, args...),
stack: callers(),
}
}
// fundamental is an error that has a message and a stack, but no caller.
type fundamental struct {
msg string
*stack
}
func (f *fundamental) Error() string { return f.msg }
func (f *fundamental) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
io.WriteString(s, f.msg)
f.stack.Format(s, verb)
return
}
fallthrough
case 's':
io.WriteString(s, f.msg)
case 'q':
fmt.Fprintf(s, "%q", f.msg)
}
}
// WithStack annotates err with a stack trace at the point WithStack was called.
// If err is nil, WithStack returns nil.
func WithStack(err error) error {
if err == nil {
return nil
}
return &withStack{
err,
callers(),
}
}
type withStack struct {
error
*stack
}
func (w *withStack) Cause() error { return w.error }
func (w *withStack) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%+v", w.Cause())
w.stack.Format(s, verb)
return
}
fallthrough
case 's':
io.WriteString(s, w.Error())
case 'q':
fmt.Fprintf(s, "%q", w.Error())
}
}
// Wrap returns an error annotating err with a stack trace
// at the point Wrap is called, and the supplied message.
// If err is nil, Wrap returns nil.
func Wrap(err error, message string) error {
if err == nil {
return nil
}
err = &withMessage{
cause: err,
msg: message,
}
return &withStack{
err,
callers(),
}
}
// Wrapf returns an error annotating err with a stack trace
// at the point Wrapf is call, and the format specifier.
// If err is nil, Wrapf returns nil.
func Wrapf(err error, format string, args ...interface{}) error {
if err == nil {
return nil
}
err = &withMessage{
cause: err,
msg: fmt.Sprintf(format, args...),
}
return &withStack{
err,
callers(),
}
}
// WithMessage annotates err with a new message.
// If err is nil, WithMessage returns nil.
func WithMessage(err error, message string) error {
if err == nil {
return nil
}
return &withMessage{
cause: err,
msg: message,
}
}
type withMessage struct {
cause error
msg string
}
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
func (w *withMessage) Cause() error { return w.cause }
func (w *withMessage) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%+v\n", w.Cause())
io.WriteString(s, w.msg)
return
}
fallthrough
case 's', 'q':
io.WriteString(s, w.Error())
}
}
// Cause returns the underlying cause of the error, if possible.
// An error value has a cause if it implements the following
// interface:
//
// type causer interface {
// Cause() error
// }
//
// If the error does not implement Cause, the original error will
// be returned. If the error is nil, nil will be returned without further
// investigation.
func Cause(err error) error {
type causer interface {
Cause() error
}
for err != nil {
cause, ok := err.(causer)
if !ok {
break
}
err = cause.Cause()
}
return err
}

226
vendor/github.com/pkg/errors/errors_test.go generated vendored Normal file
View File

@@ -0,0 +1,226 @@
package errors
import (
"errors"
"fmt"
"io"
"reflect"
"testing"
)
func TestNew(t *testing.T) {
tests := []struct {
err string
want error
}{
{"", fmt.Errorf("")},
{"foo", fmt.Errorf("foo")},
{"foo", New("foo")},
{"string with format specifiers: %v", errors.New("string with format specifiers: %v")},
}
for _, tt := range tests {
got := New(tt.err)
if got.Error() != tt.want.Error() {
t.Errorf("New.Error(): got: %q, want %q", got, tt.want)
}
}
}
func TestWrapNil(t *testing.T) {
got := Wrap(nil, "no error")
if got != nil {
t.Errorf("Wrap(nil, \"no error\"): got %#v, expected nil", got)
}
}
func TestWrap(t *testing.T) {
tests := []struct {
err error
message string
want string
}{
{io.EOF, "read error", "read error: EOF"},
{Wrap(io.EOF, "read error"), "client error", "client error: read error: EOF"},
}
for _, tt := range tests {
got := Wrap(tt.err, tt.message).Error()
if got != tt.want {
t.Errorf("Wrap(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want)
}
}
}
type nilError struct{}
func (nilError) Error() string { return "nil error" }
func TestCause(t *testing.T) {
x := New("error")
tests := []struct {
err error
want error
}{{
// nil error is nil
err: nil,
want: nil,
}, {
// explicit nil error is nil
err: (error)(nil),
want: nil,
}, {
// typed nil is nil
err: (*nilError)(nil),
want: (*nilError)(nil),
}, {
// uncaused error is unaffected
err: io.EOF,
want: io.EOF,
}, {
// caused error returns cause
err: Wrap(io.EOF, "ignored"),
want: io.EOF,
}, {
err: x, // return from errors.New
want: x,
}, {
WithMessage(nil, "whoops"),
nil,
}, {
WithMessage(io.EOF, "whoops"),
io.EOF,
}, {
WithStack(nil),
nil,
}, {
WithStack(io.EOF),
io.EOF,
}}
for i, tt := range tests {
got := Cause(tt.err)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("test %d: got %#v, want %#v", i+1, got, tt.want)
}
}
}
func TestWrapfNil(t *testing.T) {
got := Wrapf(nil, "no error")
if got != nil {
t.Errorf("Wrapf(nil, \"no error\"): got %#v, expected nil", got)
}
}
func TestWrapf(t *testing.T) {
tests := []struct {
err error
message string
want string
}{
{io.EOF, "read error", "read error: EOF"},
{Wrapf(io.EOF, "read error without format specifiers"), "client error", "client error: read error without format specifiers: EOF"},
{Wrapf(io.EOF, "read error with %d format specifier", 1), "client error", "client error: read error with 1 format specifier: EOF"},
}
for _, tt := range tests {
got := Wrapf(tt.err, tt.message).Error()
if got != tt.want {
t.Errorf("Wrapf(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want)
}
}
}
func TestErrorf(t *testing.T) {
tests := []struct {
err error
want string
}{
{Errorf("read error without format specifiers"), "read error without format specifiers"},
{Errorf("read error with %d format specifier", 1), "read error with 1 format specifier"},
}
for _, tt := range tests {
got := tt.err.Error()
if got != tt.want {
t.Errorf("Errorf(%v): got: %q, want %q", tt.err, got, tt.want)
}
}
}
func TestWithStackNil(t *testing.T) {
got := WithStack(nil)
if got != nil {
t.Errorf("WithStack(nil): got %#v, expected nil", got)
}
}
func TestWithStack(t *testing.T) {
tests := []struct {
err error
want string
}{
{io.EOF, "EOF"},
{WithStack(io.EOF), "EOF"},
}
for _, tt := range tests {
got := WithStack(tt.err).Error()
if got != tt.want {
t.Errorf("WithStack(%v): got: %v, want %v", tt.err, got, tt.want)
}
}
}
func TestWithMessageNil(t *testing.T) {
got := WithMessage(nil, "no error")
if got != nil {
t.Errorf("WithMessage(nil, \"no error\"): got %#v, expected nil", got)
}
}
func TestWithMessage(t *testing.T) {
tests := []struct {
err error
message string
want string
}{
{io.EOF, "read error", "read error: EOF"},
{WithMessage(io.EOF, "read error"), "client error", "client error: read error: EOF"},
}
for _, tt := range tests {
got := WithMessage(tt.err, tt.message).Error()
if got != tt.want {
t.Errorf("WithMessage(%v, %q): got: %q, want %q", tt.err, tt.message, got, tt.want)
}
}
}
// errors.New, etc values are not expected to be compared by value
// but the change in errors#27 made them incomparable. Assert that
// various kinds of errors have a functional equality operator, even
// if the result of that equality is always false.
func TestErrorEquality(t *testing.T) {
vals := []error{
nil,
io.EOF,
errors.New("EOF"),
New("EOF"),
Errorf("EOF"),
Wrap(io.EOF, "EOF"),
Wrapf(io.EOF, "EOF%d", 2),
WithMessage(nil, "whoops"),
WithMessage(io.EOF, "whoops"),
WithStack(io.EOF),
WithStack(nil),
}
for i := range vals {
for j := range vals {
_ = vals[i] == vals[j] // mustn't panic
}
}
}

205
vendor/github.com/pkg/errors/example_test.go generated vendored Normal file
View File

@@ -0,0 +1,205 @@
package errors_test
import (
"fmt"
"github.com/pkg/errors"
)
func ExampleNew() {
err := errors.New("whoops")
fmt.Println(err)
// Output: whoops
}
func ExampleNew_printf() {
err := errors.New("whoops")
fmt.Printf("%+v", err)
// Example output:
// whoops
// github.com/pkg/errors_test.ExampleNew_printf
// /home/dfc/src/github.com/pkg/errors/example_test.go:17
// testing.runExample
// /home/dfc/go/src/testing/example.go:114
// testing.RunExamples
// /home/dfc/go/src/testing/example.go:38
// testing.(*M).Run
// /home/dfc/go/src/testing/testing.go:744
// main.main
// /github.com/pkg/errors/_test/_testmain.go:106
// runtime.main
// /home/dfc/go/src/runtime/proc.go:183
// runtime.goexit
// /home/dfc/go/src/runtime/asm_amd64.s:2059
}
func ExampleWithMessage() {
cause := errors.New("whoops")
err := errors.WithMessage(cause, "oh noes")
fmt.Println(err)
// Output: oh noes: whoops
}
func ExampleWithStack() {
cause := errors.New("whoops")
err := errors.WithStack(cause)
fmt.Println(err)
// Output: whoops
}
func ExampleWithStack_printf() {
cause := errors.New("whoops")
err := errors.WithStack(cause)
fmt.Printf("%+v", err)
// Example Output:
// whoops
// github.com/pkg/errors_test.ExampleWithStack_printf
// /home/fabstu/go/src/github.com/pkg/errors/example_test.go:55
// testing.runExample
// /usr/lib/go/src/testing/example.go:114
// testing.RunExamples
// /usr/lib/go/src/testing/example.go:38
// testing.(*M).Run
// /usr/lib/go/src/testing/testing.go:744
// main.main
// github.com/pkg/errors/_test/_testmain.go:106
// runtime.main
// /usr/lib/go/src/runtime/proc.go:183
// runtime.goexit
// /usr/lib/go/src/runtime/asm_amd64.s:2086
// github.com/pkg/errors_test.ExampleWithStack_printf
// /home/fabstu/go/src/github.com/pkg/errors/example_test.go:56
// testing.runExample
// /usr/lib/go/src/testing/example.go:114
// testing.RunExamples
// /usr/lib/go/src/testing/example.go:38
// testing.(*M).Run
// /usr/lib/go/src/testing/testing.go:744
// main.main
// github.com/pkg/errors/_test/_testmain.go:106
// runtime.main
// /usr/lib/go/src/runtime/proc.go:183
// runtime.goexit
// /usr/lib/go/src/runtime/asm_amd64.s:2086
}
func ExampleWrap() {
cause := errors.New("whoops")
err := errors.Wrap(cause, "oh noes")
fmt.Println(err)
// Output: oh noes: whoops
}
func fn() error {
e1 := errors.New("error")
e2 := errors.Wrap(e1, "inner")
e3 := errors.Wrap(e2, "middle")
return errors.Wrap(e3, "outer")
}
func ExampleCause() {
err := fn()
fmt.Println(err)
fmt.Println(errors.Cause(err))
// Output: outer: middle: inner: error
// error
}
func ExampleWrap_extended() {
err := fn()
fmt.Printf("%+v\n", err)
// Example output:
// error
// github.com/pkg/errors_test.fn
// /home/dfc/src/github.com/pkg/errors/example_test.go:47
// github.com/pkg/errors_test.ExampleCause_printf
// /home/dfc/src/github.com/pkg/errors/example_test.go:63
// testing.runExample
// /home/dfc/go/src/testing/example.go:114
// testing.RunExamples
// /home/dfc/go/src/testing/example.go:38
// testing.(*M).Run
// /home/dfc/go/src/testing/testing.go:744
// main.main
// /github.com/pkg/errors/_test/_testmain.go:104
// runtime.main
// /home/dfc/go/src/runtime/proc.go:183
// runtime.goexit
// /home/dfc/go/src/runtime/asm_amd64.s:2059
// github.com/pkg/errors_test.fn
// /home/dfc/src/github.com/pkg/errors/example_test.go:48: inner
// github.com/pkg/errors_test.fn
// /home/dfc/src/github.com/pkg/errors/example_test.go:49: middle
// github.com/pkg/errors_test.fn
// /home/dfc/src/github.com/pkg/errors/example_test.go:50: outer
}
func ExampleWrapf() {
cause := errors.New("whoops")
err := errors.Wrapf(cause, "oh noes #%d", 2)
fmt.Println(err)
// Output: oh noes #2: whoops
}
func ExampleErrorf_extended() {
err := errors.Errorf("whoops: %s", "foo")
fmt.Printf("%+v", err)
// Example output:
// whoops: foo
// github.com/pkg/errors_test.ExampleErrorf
// /home/dfc/src/github.com/pkg/errors/example_test.go:101
// testing.runExample
// /home/dfc/go/src/testing/example.go:114
// testing.RunExamples
// /home/dfc/go/src/testing/example.go:38
// testing.(*M).Run
// /home/dfc/go/src/testing/testing.go:744
// main.main
// /github.com/pkg/errors/_test/_testmain.go:102
// runtime.main
// /home/dfc/go/src/runtime/proc.go:183
// runtime.goexit
// /home/dfc/go/src/runtime/asm_amd64.s:2059
}
func Example_stackTrace() {
type stackTracer interface {
StackTrace() errors.StackTrace
}
err, ok := errors.Cause(fn()).(stackTracer)
if !ok {
panic("oops, err does not implement stackTracer")
}
st := err.StackTrace()
fmt.Printf("%+v", st[0:2]) // top two frames
// Example output:
// github.com/pkg/errors_test.fn
// /home/dfc/src/github.com/pkg/errors/example_test.go:47
// github.com/pkg/errors_test.Example_stackTrace
// /home/dfc/src/github.com/pkg/errors/example_test.go:127
}
func ExampleCause_printf() {
err := errors.Wrap(func() error {
return func() error {
return errors.Errorf("hello %s", fmt.Sprintf("world"))
}()
}(), "failed")
fmt.Printf("%v", err)
// Output: failed: hello world
}

535
vendor/github.com/pkg/errors/format_test.go generated vendored Normal file
View File

@@ -0,0 +1,535 @@
package errors
import (
"errors"
"fmt"
"io"
"regexp"
"strings"
"testing"
)
func TestFormatNew(t *testing.T) {
tests := []struct {
error
format string
want string
}{{
New("error"),
"%s",
"error",
}, {
New("error"),
"%v",
"error",
}, {
New("error"),
"%+v",
"error\n" +
"github.com/pkg/errors.TestFormatNew\n" +
"\t.+/github.com/pkg/errors/format_test.go:26",
}, {
New("error"),
"%q",
`"error"`,
}}
for i, tt := range tests {
testFormatRegexp(t, i, tt.error, tt.format, tt.want)
}
}
func TestFormatErrorf(t *testing.T) {
tests := []struct {
error
format string
want string
}{{
Errorf("%s", "error"),
"%s",
"error",
}, {
Errorf("%s", "error"),
"%v",
"error",
}, {
Errorf("%s", "error"),
"%+v",
"error\n" +
"github.com/pkg/errors.TestFormatErrorf\n" +
"\t.+/github.com/pkg/errors/format_test.go:56",
}}
for i, tt := range tests {
testFormatRegexp(t, i, tt.error, tt.format, tt.want)
}
}
func TestFormatWrap(t *testing.T) {
tests := []struct {
error
format string
want string
}{{
Wrap(New("error"), "error2"),
"%s",
"error2: error",
}, {
Wrap(New("error"), "error2"),
"%v",
"error2: error",
}, {
Wrap(New("error"), "error2"),
"%+v",
"error\n" +
"github.com/pkg/errors.TestFormatWrap\n" +
"\t.+/github.com/pkg/errors/format_test.go:82",
}, {
Wrap(io.EOF, "error"),
"%s",
"error: EOF",
}, {
Wrap(io.EOF, "error"),
"%v",
"error: EOF",
}, {
Wrap(io.EOF, "error"),
"%+v",
"EOF\n" +
"error\n" +
"github.com/pkg/errors.TestFormatWrap\n" +
"\t.+/github.com/pkg/errors/format_test.go:96",
}, {
Wrap(Wrap(io.EOF, "error1"), "error2"),
"%+v",
"EOF\n" +
"error1\n" +
"github.com/pkg/errors.TestFormatWrap\n" +
"\t.+/github.com/pkg/errors/format_test.go:103\n",
}, {
Wrap(New("error with space"), "context"),
"%q",
`"context: error with space"`,
}}
for i, tt := range tests {
testFormatRegexp(t, i, tt.error, tt.format, tt.want)
}
}
func TestFormatWrapf(t *testing.T) {
tests := []struct {
error
format string
want string
}{{
Wrapf(io.EOF, "error%d", 2),
"%s",
"error2: EOF",
}, {
Wrapf(io.EOF, "error%d", 2),
"%v",
"error2: EOF",
}, {
Wrapf(io.EOF, "error%d", 2),
"%+v",
"EOF\n" +
"error2\n" +
"github.com/pkg/errors.TestFormatWrapf\n" +
"\t.+/github.com/pkg/errors/format_test.go:134",
}, {
Wrapf(New("error"), "error%d", 2),
"%s",
"error2: error",
}, {
Wrapf(New("error"), "error%d", 2),
"%v",
"error2: error",
}, {
Wrapf(New("error"), "error%d", 2),
"%+v",
"error\n" +
"github.com/pkg/errors.TestFormatWrapf\n" +
"\t.+/github.com/pkg/errors/format_test.go:149",
}}
for i, tt := range tests {
testFormatRegexp(t, i, tt.error, tt.format, tt.want)
}
}
func TestFormatWithStack(t *testing.T) {
tests := []struct {
error
format string
want []string
}{{
WithStack(io.EOF),
"%s",
[]string{"EOF"},
}, {
WithStack(io.EOF),
"%v",
[]string{"EOF"},
}, {
WithStack(io.EOF),
"%+v",
[]string{"EOF",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:175"},
}, {
WithStack(New("error")),
"%s",
[]string{"error"},
}, {
WithStack(New("error")),
"%v",
[]string{"error"},
}, {
WithStack(New("error")),
"%+v",
[]string{"error",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:189",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:189"},
}, {
WithStack(WithStack(io.EOF)),
"%+v",
[]string{"EOF",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:197",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:197"},
}, {
WithStack(WithStack(Wrapf(io.EOF, "message"))),
"%+v",
[]string{"EOF",
"message",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:205",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:205",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:205"},
}, {
WithStack(Errorf("error%d", 1)),
"%+v",
[]string{"error1",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:216",
"github.com/pkg/errors.TestFormatWithStack\n" +
"\t.+/github.com/pkg/errors/format_test.go:216"},
}}
for i, tt := range tests {
testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true)
}
}
func TestFormatWithMessage(t *testing.T) {
tests := []struct {
error
format string
want []string
}{{
WithMessage(New("error"), "error2"),
"%s",
[]string{"error2: error"},
}, {
WithMessage(New("error"), "error2"),
"%v",
[]string{"error2: error"},
}, {
WithMessage(New("error"), "error2"),
"%+v",
[]string{
"error",
"github.com/pkg/errors.TestFormatWithMessage\n" +
"\t.+/github.com/pkg/errors/format_test.go:244",
"error2"},
}, {
WithMessage(io.EOF, "addition1"),
"%s",
[]string{"addition1: EOF"},
}, {
WithMessage(io.EOF, "addition1"),
"%v",
[]string{"addition1: EOF"},
}, {
WithMessage(io.EOF, "addition1"),
"%+v",
[]string{"EOF", "addition1"},
}, {
WithMessage(WithMessage(io.EOF, "addition1"), "addition2"),
"%v",
[]string{"addition2: addition1: EOF"},
}, {
WithMessage(WithMessage(io.EOF, "addition1"), "addition2"),
"%+v",
[]string{"EOF", "addition1", "addition2"},
}, {
Wrap(WithMessage(io.EOF, "error1"), "error2"),
"%+v",
[]string{"EOF", "error1", "error2",
"github.com/pkg/errors.TestFormatWithMessage\n" +
"\t.+/github.com/pkg/errors/format_test.go:272"},
}, {
WithMessage(Errorf("error%d", 1), "error2"),
"%+v",
[]string{"error1",
"github.com/pkg/errors.TestFormatWithMessage\n" +
"\t.+/github.com/pkg/errors/format_test.go:278",
"error2"},
}, {
WithMessage(WithStack(io.EOF), "error"),
"%+v",
[]string{
"EOF",
"github.com/pkg/errors.TestFormatWithMessage\n" +
"\t.+/github.com/pkg/errors/format_test.go:285",
"error"},
}, {
WithMessage(Wrap(WithStack(io.EOF), "inside-error"), "outside-error"),
"%+v",
[]string{
"EOF",
"github.com/pkg/errors.TestFormatWithMessage\n" +
"\t.+/github.com/pkg/errors/format_test.go:293",
"inside-error",
"github.com/pkg/errors.TestFormatWithMessage\n" +
"\t.+/github.com/pkg/errors/format_test.go:293",
"outside-error"},
}}
for i, tt := range tests {
testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true)
}
}
func TestFormatGeneric(t *testing.T) {
starts := []struct {
err error
want []string
}{
{New("new-error"), []string{
"new-error",
"github.com/pkg/errors.TestFormatGeneric\n" +
"\t.+/github.com/pkg/errors/format_test.go:315"},
}, {Errorf("errorf-error"), []string{
"errorf-error",
"github.com/pkg/errors.TestFormatGeneric\n" +
"\t.+/github.com/pkg/errors/format_test.go:319"},
}, {errors.New("errors-new-error"), []string{
"errors-new-error"},
},
}
wrappers := []wrapper{
{
func(err error) error { return WithMessage(err, "with-message") },
[]string{"with-message"},
}, {
func(err error) error { return WithStack(err) },
[]string{
"github.com/pkg/errors.(func·002|TestFormatGeneric.func2)\n\t" +
".+/github.com/pkg/errors/format_test.go:333",
},
}, {
func(err error) error { return Wrap(err, "wrap-error") },
[]string{
"wrap-error",
"github.com/pkg/errors.(func·003|TestFormatGeneric.func3)\n\t" +
".+/github.com/pkg/errors/format_test.go:339",
},
}, {
func(err error) error { return Wrapf(err, "wrapf-error%d", 1) },
[]string{
"wrapf-error1",
"github.com/pkg/errors.(func·004|TestFormatGeneric.func4)\n\t" +
".+/github.com/pkg/errors/format_test.go:346",
},
},
}
for s := range starts {
err := starts[s].err
want := starts[s].want
testFormatCompleteCompare(t, s, err, "%+v", want, false)
testGenericRecursive(t, err, want, wrappers, 3)
}
}
func testFormatRegexp(t *testing.T, n int, arg interface{}, format, want string) {
got := fmt.Sprintf(format, arg)
gotLines := strings.SplitN(got, "\n", -1)
wantLines := strings.SplitN(want, "\n", -1)
if len(wantLines) > len(gotLines) {
t.Errorf("test %d: wantLines(%d) > gotLines(%d):\n got: %q\nwant: %q", n+1, len(wantLines), len(gotLines), got, want)
return
}
for i, w := range wantLines {
match, err := regexp.MatchString(w, gotLines[i])
if err != nil {
t.Fatal(err)
}
if !match {
t.Errorf("test %d: line %d: fmt.Sprintf(%q, err):\n got: %q\nwant: %q", n+1, i+1, format, got, want)
}
}
}
var stackLineR = regexp.MustCompile(`\.`)
// parseBlocks parses input into a slice, where:
// - incase entry contains a newline, its a stacktrace
// - incase entry contains no newline, its a solo line.
//
// Detecting stack boundaries only works incase the WithStack-calls are
// to be found on the same line, thats why it is optionally here.
//
// Example use:
//
// for _, e := range blocks {
// if strings.ContainsAny(e, "\n") {
// // Match as stack
// } else {
// // Match as line
// }
// }
//
func parseBlocks(input string, detectStackboundaries bool) ([]string, error) {
var blocks []string
stack := ""
wasStack := false
lines := map[string]bool{} // already found lines
for _, l := range strings.Split(input, "\n") {
isStackLine := stackLineR.MatchString(l)
switch {
case !isStackLine && wasStack:
blocks = append(blocks, stack, l)
stack = ""
lines = map[string]bool{}
case isStackLine:
if wasStack {
// Detecting two stacks after another, possible cause lines match in
// our tests due to WithStack(WithStack(io.EOF)) on same line.
if detectStackboundaries {
if lines[l] {
if len(stack) == 0 {
return nil, errors.New("len of block must not be zero here")
}
blocks = append(blocks, stack)
stack = l
lines = map[string]bool{l: true}
continue
}
}
stack = stack + "\n" + l
} else {
stack = l
}
lines[l] = true
case !isStackLine && !wasStack:
blocks = append(blocks, l)
default:
return nil, errors.New("must not happen")
}
wasStack = isStackLine
}
// Use up stack
if stack != "" {
blocks = append(blocks, stack)
}
return blocks, nil
}
func testFormatCompleteCompare(t *testing.T, n int, arg interface{}, format string, want []string, detectStackBoundaries bool) {
gotStr := fmt.Sprintf(format, arg)
got, err := parseBlocks(gotStr, detectStackBoundaries)
if err != nil {
t.Fatal(err)
}
if len(got) != len(want) {
t.Fatalf("test %d: fmt.Sprintf(%s, err) -> wrong number of blocks: got(%d) want(%d)\n got: %s\nwant: %s\ngotStr: %q",
n+1, format, len(got), len(want), prettyBlocks(got), prettyBlocks(want), gotStr)
}
for i := range got {
if strings.ContainsAny(want[i], "\n") {
// Match as stack
match, err := regexp.MatchString(want[i], got[i])
if err != nil {
t.Fatal(err)
}
if !match {
t.Fatalf("test %d: block %d: fmt.Sprintf(%q, err):\ngot:\n%q\nwant:\n%q\nall-got:\n%s\nall-want:\n%s\n",
n+1, i+1, format, got[i], want[i], prettyBlocks(got), prettyBlocks(want))
}
} else {
// Match as message
if got[i] != want[i] {
t.Fatalf("test %d: fmt.Sprintf(%s, err) at block %d got != want:\n got: %q\nwant: %q", n+1, format, i+1, got[i], want[i])
}
}
}
}
type wrapper struct {
wrap func(err error) error
want []string
}
func prettyBlocks(blocks []string, prefix ...string) string {
var out []string
for _, b := range blocks {
out = append(out, fmt.Sprintf("%v", b))
}
return " " + strings.Join(out, "\n ")
}
func testGenericRecursive(t *testing.T, beforeErr error, beforeWant []string, list []wrapper, maxDepth int) {
if len(beforeWant) == 0 {
panic("beforeWant must not be empty")
}
for _, w := range list {
if len(w.want) == 0 {
panic("want must not be empty")
}
err := w.wrap(beforeErr)
// Copy required cause append(beforeWant, ..) modified beforeWant subtly.
beforeCopy := make([]string, len(beforeWant))
copy(beforeCopy, beforeWant)
beforeWant := beforeCopy
last := len(beforeWant) - 1
var want []string
// Merge two stacks behind each other.
if strings.ContainsAny(beforeWant[last], "\n") && strings.ContainsAny(w.want[0], "\n") {
want = append(beforeWant[:last], append([]string{beforeWant[last] + "((?s).*)" + w.want[0]}, w.want[1:]...)...)
} else {
want = append(beforeWant, w.want...)
}
testFormatCompleteCompare(t, maxDepth, err, "%+v", want, false)
if maxDepth > 0 {
testGenericRecursive(t, err, want, list, maxDepth-1)
}
}
}

178
vendor/github.com/pkg/errors/stack.go generated vendored Normal file
View File

@@ -0,0 +1,178 @@
package errors
import (
"fmt"
"io"
"path"
"runtime"
"strings"
)
// Frame represents a program counter inside a stack frame.
type Frame uintptr
// pc returns the program counter for this frame;
// multiple frames may have the same PC value.
func (f Frame) pc() uintptr { return uintptr(f) - 1 }
// file returns the full path to the file that contains the
// function for this Frame's pc.
func (f Frame) file() string {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return "unknown"
}
file, _ := fn.FileLine(f.pc())
return file
}
// line returns the line number of source code of the
// function for this Frame's pc.
func (f Frame) line() int {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return 0
}
_, line := fn.FileLine(f.pc())
return line
}
// Format formats the frame according to the fmt.Formatter interface.
//
// %s source file
// %d source line
// %n function name
// %v equivalent to %s:%d
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+s path of source file relative to the compile time GOPATH
// %+v equivalent to %+s:%d
func (f Frame) Format(s fmt.State, verb rune) {
switch verb {
case 's':
switch {
case s.Flag('+'):
pc := f.pc()
fn := runtime.FuncForPC(pc)
if fn == nil {
io.WriteString(s, "unknown")
} else {
file, _ := fn.FileLine(pc)
fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
}
default:
io.WriteString(s, path.Base(f.file()))
}
case 'd':
fmt.Fprintf(s, "%d", f.line())
case 'n':
name := runtime.FuncForPC(f.pc()).Name()
io.WriteString(s, funcname(name))
case 'v':
f.Format(s, 's')
io.WriteString(s, ":")
f.Format(s, 'd')
}
}
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
type StackTrace []Frame
func (st StackTrace) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case s.Flag('+'):
for _, f := range st {
fmt.Fprintf(s, "\n%+v", f)
}
case s.Flag('#'):
fmt.Fprintf(s, "%#v", []Frame(st))
default:
fmt.Fprintf(s, "%v", []Frame(st))
}
case 's':
fmt.Fprintf(s, "%s", []Frame(st))
}
}
// stack represents a stack of program counters.
type stack []uintptr
func (s *stack) Format(st fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case st.Flag('+'):
for _, pc := range *s {
f := Frame(pc)
fmt.Fprintf(st, "\n%+v", f)
}
}
}
}
func (s *stack) StackTrace() StackTrace {
f := make([]Frame, len(*s))
for i := 0; i < len(f); i++ {
f[i] = Frame((*s)[i])
}
return f
}
func callers() *stack {
const depth = 32
var pcs [depth]uintptr
n := runtime.Callers(3, pcs[:])
var st stack = pcs[0:n]
return &st
}
// funcname removes the path prefix component of a function's name reported by func.Name().
func funcname(name string) string {
i := strings.LastIndex(name, "/")
name = name[i+1:]
i = strings.Index(name, ".")
return name[i+1:]
}
func trimGOPATH(name, file string) string {
// Here we want to get the source file path relative to the compile time
// GOPATH. As of Go 1.6.x there is no direct way to know the compiled
// GOPATH at runtime, but we can infer the number of path segments in the
// GOPATH. We note that fn.Name() returns the function name qualified by
// the import path, which does not include the GOPATH. Thus we can trim
// segments from the beginning of the file path until the number of path
// separators remaining is one more than the number of path separators in
// the function name. For example, given:
//
// GOPATH /home/user
// file /home/user/src/pkg/sub/file.go
// fn.Name() pkg/sub.Type.Method
//
// We want to produce:
//
// pkg/sub/file.go
//
// From this we can easily see that fn.Name() has one less path separator
// than our desired output. We count separators from the end of the file
// path until it finds two more than in the function name and then move
// one character forward to preserve the initial path segment without a
// leading separator.
const sep = "/"
goal := strings.Count(name, sep) + 2
i := len(file)
for n := 0; n < goal; n++ {
i = strings.LastIndex(file[:i], sep)
if i == -1 {
// not enough separators found, set i so that the slice expression
// below leaves file unmodified
i = -len(sep)
break
}
}
// get back to 0 or trim the leading separator
file = file[i+len(sep):]
return file
}

292
vendor/github.com/pkg/errors/stack_test.go generated vendored Normal file
View File

@@ -0,0 +1,292 @@
package errors
import (
"fmt"
"runtime"
"testing"
)
var initpc, _, _, _ = runtime.Caller(0)
func TestFrameLine(t *testing.T) {
var tests = []struct {
Frame
want int
}{{
Frame(initpc),
9,
}, {
func() Frame {
var pc, _, _, _ = runtime.Caller(0)
return Frame(pc)
}(),
20,
}, {
func() Frame {
var pc, _, _, _ = runtime.Caller(1)
return Frame(pc)
}(),
28,
}, {
Frame(0), // invalid PC
0,
}}
for _, tt := range tests {
got := tt.Frame.line()
want := tt.want
if want != got {
t.Errorf("Frame(%v): want: %v, got: %v", uintptr(tt.Frame), want, got)
}
}
}
type X struct{}
func (x X) val() Frame {
var pc, _, _, _ = runtime.Caller(0)
return Frame(pc)
}
func (x *X) ptr() Frame {
var pc, _, _, _ = runtime.Caller(0)
return Frame(pc)
}
func TestFrameFormat(t *testing.T) {
var tests = []struct {
Frame
format string
want string
}{{
Frame(initpc),
"%s",
"stack_test.go",
}, {
Frame(initpc),
"%+s",
"github.com/pkg/errors.init\n" +
"\t.+/github.com/pkg/errors/stack_test.go",
}, {
Frame(0),
"%s",
"unknown",
}, {
Frame(0),
"%+s",
"unknown",
}, {
Frame(initpc),
"%d",
"9",
}, {
Frame(0),
"%d",
"0",
}, {
Frame(initpc),
"%n",
"init",
}, {
func() Frame {
var x X
return x.ptr()
}(),
"%n",
`\(\*X\).ptr`,
}, {
func() Frame {
var x X
return x.val()
}(),
"%n",
"X.val",
}, {
Frame(0),
"%n",
"",
}, {
Frame(initpc),
"%v",
"stack_test.go:9",
}, {
Frame(initpc),
"%+v",
"github.com/pkg/errors.init\n" +
"\t.+/github.com/pkg/errors/stack_test.go:9",
}, {
Frame(0),
"%v",
"unknown:0",
}}
for i, tt := range tests {
testFormatRegexp(t, i, tt.Frame, tt.format, tt.want)
}
}
func TestFuncname(t *testing.T) {
tests := []struct {
name, want string
}{
{"", ""},
{"runtime.main", "main"},
{"github.com/pkg/errors.funcname", "funcname"},
{"funcname", "funcname"},
{"io.copyBuffer", "copyBuffer"},
{"main.(*R).Write", "(*R).Write"},
}
for _, tt := range tests {
got := funcname(tt.name)
want := tt.want
if got != want {
t.Errorf("funcname(%q): want: %q, got %q", tt.name, want, got)
}
}
}
func TestTrimGOPATH(t *testing.T) {
var tests = []struct {
Frame
want string
}{{
Frame(initpc),
"github.com/pkg/errors/stack_test.go",
}}
for i, tt := range tests {
pc := tt.Frame.pc()
fn := runtime.FuncForPC(pc)
file, _ := fn.FileLine(pc)
got := trimGOPATH(fn.Name(), file)
testFormatRegexp(t, i, got, "%s", tt.want)
}
}
func TestStackTrace(t *testing.T) {
tests := []struct {
err error
want []string
}{{
New("ooh"), []string{
"github.com/pkg/errors.TestStackTrace\n" +
"\t.+/github.com/pkg/errors/stack_test.go:172",
},
}, {
Wrap(New("ooh"), "ahh"), []string{
"github.com/pkg/errors.TestStackTrace\n" +
"\t.+/github.com/pkg/errors/stack_test.go:177", // this is the stack of Wrap, not New
},
}, {
Cause(Wrap(New("ooh"), "ahh")), []string{
"github.com/pkg/errors.TestStackTrace\n" +
"\t.+/github.com/pkg/errors/stack_test.go:182", // this is the stack of New
},
}, {
func() error { return New("ooh") }(), []string{
`github.com/pkg/errors.(func·009|TestStackTrace.func1)` +
"\n\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New
"github.com/pkg/errors.TestStackTrace\n" +
"\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New's caller
},
}, {
Cause(func() error {
return func() error {
return Errorf("hello %s", fmt.Sprintf("world"))
}()
}()), []string{
`github.com/pkg/errors.(func·010|TestStackTrace.func2.1)` +
"\n\t.+/github.com/pkg/errors/stack_test.go:196", // this is the stack of Errorf
`github.com/pkg/errors.(func·011|TestStackTrace.func2)` +
"\n\t.+/github.com/pkg/errors/stack_test.go:197", // this is the stack of Errorf's caller
"github.com/pkg/errors.TestStackTrace\n" +
"\t.+/github.com/pkg/errors/stack_test.go:198", // this is the stack of Errorf's caller's caller
},
}}
for i, tt := range tests {
x, ok := tt.err.(interface {
StackTrace() StackTrace
})
if !ok {
t.Errorf("expected %#v to implement StackTrace() StackTrace", tt.err)
continue
}
st := x.StackTrace()
for j, want := range tt.want {
testFormatRegexp(t, i, st[j], "%+v", want)
}
}
}
func stackTrace() StackTrace {
const depth = 8
var pcs [depth]uintptr
n := runtime.Callers(1, pcs[:])
var st stack = pcs[0:n]
return st.StackTrace()
}
func TestStackTraceFormat(t *testing.T) {
tests := []struct {
StackTrace
format string
want string
}{{
nil,
"%s",
`\[\]`,
}, {
nil,
"%v",
`\[\]`,
}, {
nil,
"%+v",
"",
}, {
nil,
"%#v",
`\[\]errors.Frame\(nil\)`,
}, {
make(StackTrace, 0),
"%s",
`\[\]`,
}, {
make(StackTrace, 0),
"%v",
`\[\]`,
}, {
make(StackTrace, 0),
"%+v",
"",
}, {
make(StackTrace, 0),
"%#v",
`\[\]errors.Frame{}`,
}, {
stackTrace()[:2],
"%s",
`\[stack_test.go stack_test.go\]`,
}, {
stackTrace()[:2],
"%v",
`\[stack_test.go:225 stack_test.go:272\]`,
}, {
stackTrace()[:2],
"%+v",
"\n" +
"github.com/pkg/errors.stackTrace\n" +
"\t.+/github.com/pkg/errors/stack_test.go:225\n" +
"github.com/pkg/errors.TestStackTraceFormat\n" +
"\t.+/github.com/pkg/errors/stack_test.go:276",
}, {
stackTrace()[:2],
"%#v",
`\[\]errors.Frame{stack_test.go:225, stack_test.go:284}`,
}}
for i, tt := range tests {
testFormatRegexp(t, i, tt.StackTrace, tt.format, tt.want)
}
}