Merge branch 'master' into with_installer

This commit is contained in:
Oliver Sauder
2018-09-21 13:26:15 +02:00
committed by GitHub
30 changed files with 783 additions and 300 deletions

View File

@@ -24,11 +24,11 @@ matrix:
env: RUN_LONG_TESTS=no
fast_finish: true
include:
- go: 1.8.x
env: RUN_LONG_TESTS=no
- go: 1.9.x
env: RUN_LONG_TESTS=yes
env: RUN_LONG_TESTS=no
- go: 1.10.x
env: RUN_LONG_TESTS=yes
- go: 1.11.x
env:
- RUN_LONG_TESTS=yes
- DEPLOY_BINARIES=yes

View File

@@ -33,3 +33,4 @@ List of contributors, in chronological order:
* Petr Jediny (https://github.com/pjediny)
* Maximilian Stein (https://github.com/steinymity)
* Strajan Sebastian (https://github.com/strajansebastian)
* Artem Smirnov (https://github.com/urpylka)

View File

@@ -86,8 +86,8 @@ to prepend it or to skip this test if you're security conscious.
As Go is using repository path in import paths, it's better to clone aptly repo (not your fork) at default location:
mkdir -p ~/go/src/github.com/smira
cd ~/go/src/github.com/smira
mkdir -p ~/go/src/github.com/aptly-dev
cd ~/go/src/github.com/aptly-dev
git clone git@github.com:aptly-dev/aptly.git
cd aptly

View File

@@ -13,7 +13,7 @@ RUN_LONG_TESTS?=yes
GO_1_10_AND_HIGHER=$(shell (printf '%s\n' go1.10 $(GOVERSION) | sort -cV >/dev/null 2>&1) && echo "yes")
all: test check system-test
all: test bench check system-test
prepare:
go get -u github.com/alecthomas/gometalinter
@@ -57,6 +57,9 @@ else
go test -v `go list ./... | grep -v vendor/` -gocheck.v=true
endif
bench:
go test -v ./deb -run=nothing -bench=. -benchmem
mem.png: mem.dat mem.gp
gnuplot mem.gp
open mem.png

View File

@@ -64,14 +64,14 @@ If you would like to use nightly builds (unstable), please use following reposit
Binary executables (depends almost only on libc) are available for download from `Bintray <http://dl.bintray.com/smira/aptly/>`_.
If you have Go environment set up, you can build aptly from source by running (go 1.8+ required)::
If you have Go environment set up, you can build aptly from source by running (go 1.9+ required)::
mkdir -p $GOPATH/src/github.com/aptly-dev/aptly
git clone https://github.com/aptly-dev/aptly $GOPATH/src/github.com/aptly-dev/aptly
cd $GOPATH/src/github.com/aptly-dev/aptly
make install
Binary would be installed to ```$GOPATH/bin/aptly``.
Binary would be installed to ``$GOPATH/bin/aptly``.
Contributing
------------
@@ -90,7 +90,7 @@ Vagrant:
Docker:
- `Docker container <https://github.com/mikepurvis/aptly-docker>`_ with aptly inside by Mike Purvis
- `Docker container <https://github.com/bryanhong/docker-aptly>`_ with aptly and nginx by Bryan Hong
- `Docker container <https://github.com/urpylka/docker-aptly>`_ with aptly and nginx by Artem Smirnov
With configuration management systems:

View File

@@ -59,6 +59,8 @@ func aptlyDbCleanup(cmd *commander.Command, args []string) error {
return err
}
context.CollectionFactory().Flush()
if verbose {
context.Progress().ColoredPrintf("@{y}Loading local repos:@|")
}
@@ -90,6 +92,8 @@ func aptlyDbCleanup(cmd *commander.Command, args []string) error {
return err
}
context.CollectionFactory().Flush()
if verbose {
context.Progress().ColoredPrintf("@{y}Loading snapshots:@|")
}
@@ -118,6 +122,8 @@ func aptlyDbCleanup(cmd *commander.Command, args []string) error {
return err
}
context.CollectionFactory().Flush()
if verbose {
context.Progress().ColoredPrintf("@{y}Loading published repositories:@|")
}
@@ -150,6 +156,8 @@ func aptlyDbCleanup(cmd *commander.Command, args []string) error {
return err
}
context.CollectionFactory().Flush()
// ... and compare it to the list of all packages
context.Progress().ColoredPrintf("@{w!}Loading list of all packages...@|")
allPackageRefs := context.CollectionFactory().PackageCollection().AllPackageRefs()
@@ -192,6 +200,8 @@ func aptlyDbCleanup(cmd *commander.Command, args []string) error {
}
}
context.CollectionFactory().Flush()
// now, build a list of files that should be present in Repository (package pool)
context.Progress().ColoredPrintf("@{w!}Building list of files referenced by packages...@|")
referencedFiles := make([]string, 0, existingPackageRefs.Len())

View File

@@ -78,14 +78,17 @@ func (file *indexFile) Finalize(signer pgp.Signer) error {
file.tempFile.Close()
exts := []string{""}
cksumExts := exts
if file.compressable {
exts = append(exts, ".gz", ".bz2")
cksumExts = exts
if file.onlyGzip {
exts = []string{".gz"}
cksumExts = []string{"", ".gz"}
}
}
for _, ext := range exts {
for _, ext := range cksumExts {
var checksumInfo utils.ChecksumInfo
checksumInfo, err = utils.ChecksumsForFile(file.tempFilename + ext)

View File

@@ -2,6 +2,7 @@ package deb
import (
"bytes"
"errors"
"fmt"
"log"
"sync"
@@ -93,46 +94,68 @@ func (repo *LocalRepo) RefKey() []byte {
// LocalRepoCollection does listing, updating/adding/deleting of LocalRepos
type LocalRepoCollection struct {
*sync.RWMutex
db database.Storage
list []*LocalRepo
db database.Storage
cache map[string]*LocalRepo
}
// NewLocalRepoCollection loads LocalRepos from DB and makes up collection
func NewLocalRepoCollection(db database.Storage) *LocalRepoCollection {
result := &LocalRepoCollection{
return &LocalRepoCollection{
RWMutex: &sync.RWMutex{},
db: db,
cache: make(map[string]*LocalRepo),
}
}
blobs := db.FetchByPrefix([]byte("L"))
result.list = make([]*LocalRepo, 0, len(blobs))
for _, blob := range blobs {
r := &LocalRepo{}
if err := r.Decode(blob); err != nil {
log.Printf("Error decoding repo: %s\n", err)
} else {
result.list = append(result.list, r)
func (collection *LocalRepoCollection) search(filter func(*LocalRepo) bool, unique bool) []*LocalRepo {
result := []*LocalRepo(nil)
for _, r := range collection.cache {
if filter(r) {
result = append(result, r)
}
}
if unique && len(result) > 0 {
return result
}
collection.db.ProcessByPrefix([]byte("L"), func(key, blob []byte) error {
r := &LocalRepo{}
if err := r.Decode(blob); err != nil {
log.Printf("Error decoding local repo: %s\n", err)
return nil
}
if filter(r) {
if _, exists := collection.cache[r.UUID]; !exists {
collection.cache[r.UUID] = r
result = append(result, r)
if unique {
return errors.New("abort")
}
}
}
return nil
})
return result
}
// Add appends new repo to collection and saves it
func (collection *LocalRepoCollection) Add(repo *LocalRepo) error {
for _, r := range collection.list {
if r.Name == repo.Name {
return fmt.Errorf("local repo with name %s already exists", repo.Name)
}
_, err := collection.ByName(repo.Name)
if err == nil {
return fmt.Errorf("local repo with name %s already exists", repo.Name)
}
err := collection.Update(repo)
err = collection.Update(repo)
if err != nil {
return err
}
collection.list = append(collection.list, repo)
collection.cache[repo.UUID] = repo
return nil
}
@@ -167,58 +190,66 @@ func (collection *LocalRepoCollection) LoadComplete(repo *LocalRepo) error {
// ByName looks up repository by name
func (collection *LocalRepoCollection) ByName(name string) (*LocalRepo, error) {
for _, r := range collection.list {
if r.Name == name {
return r, nil
}
result := collection.search(func(r *LocalRepo) bool { return r.Name == name }, true)
if len(result) == 0 {
return nil, fmt.Errorf("local repo with name %s not found", name)
}
return nil, fmt.Errorf("local repo with name %s not found", name)
return result[0], nil
}
// ByUUID looks up repository by uuid
func (collection *LocalRepoCollection) ByUUID(uuid string) (*LocalRepo, error) {
for _, r := range collection.list {
if r.UUID == uuid {
return r, nil
}
if r, ok := collection.cache[uuid]; ok {
return r, nil
}
return nil, fmt.Errorf("local repo with uuid %s not found", uuid)
key := (&LocalRepo{UUID: uuid}).Key()
value, err := collection.db.Get(key)
if err == database.ErrNotFound {
return nil, fmt.Errorf("local repo with uuid %s not found", uuid)
}
if err != nil {
return nil, err
}
r := &LocalRepo{}
err = r.Decode(value)
if err == nil {
collection.cache[r.UUID] = r
}
return r, err
}
// ForEach runs method for each repository
func (collection *LocalRepoCollection) ForEach(handler func(*LocalRepo) error) error {
var err error
for _, r := range collection.list {
err = handler(r)
if err != nil {
return err
return collection.db.ProcessByPrefix([]byte("L"), func(key, blob []byte) error {
r := &LocalRepo{}
if err := r.Decode(blob); err != nil {
log.Printf("Error decoding repo: %s\n", err)
return nil
}
}
return err
return handler(r)
})
}
// Len returns number of remote repos
func (collection *LocalRepoCollection) Len() int {
return len(collection.list)
return len(collection.db.KeysByPrefix([]byte("L")))
}
// Drop removes remote repo from collection
func (collection *LocalRepoCollection) Drop(repo *LocalRepo) error {
repoPosition := -1
for i, r := range collection.list {
if r == repo {
repoPosition = i
break
}
}
if repoPosition == -1 {
if _, err := collection.db.Get(repo.Key()); err == database.ErrNotFound {
panic("local repo not found!")
}
collection.list[len(collection.list)-1], collection.list[repoPosition], collection.list =
nil, collection.list[len(collection.list)-1], collection.list[:len(collection.list)-1]
delete(collection.cache, repo.UUID)
err := collection.db.Delete(repo.Key())
if err != nil {

View File

@@ -124,6 +124,11 @@ func (s *LocalRepoCollectionSuite) TestByUUID(c *C) {
r, err := s.collection.ByUUID(repo.UUID)
c.Assert(err, IsNil)
c.Assert(r, Equals, repo)
collection := NewLocalRepoCollection(s.db)
r, err = collection.ByUUID(repo.UUID)
c.Assert(err, IsNil)
c.Assert(r.String(), Equals, repo.String())
}

View File

@@ -859,28 +859,34 @@ type PublishedRepoCollection struct {
// NewPublishedRepoCollection loads PublishedRepos from DB and makes up collection
func NewPublishedRepoCollection(db database.Storage) *PublishedRepoCollection {
result := &PublishedRepoCollection{
return &PublishedRepoCollection{
RWMutex: &sync.RWMutex{},
db: db,
}
}
blobs := db.FetchByPrefix([]byte("U"))
result.list = make([]*PublishedRepo, 0, len(blobs))
func (collection *PublishedRepoCollection) loadList() {
if collection.list != nil {
return
}
blobs := collection.db.FetchByPrefix([]byte("U"))
collection.list = make([]*PublishedRepo, 0, len(blobs))
for _, blob := range blobs {
r := &PublishedRepo{}
if err := r.Decode(blob); err != nil {
log.Printf("Error decoding published repo: %s\n", err)
} else {
result.list = append(result.list, r)
collection.list = append(collection.list, r)
}
}
return result
}
// Add appends new repo to collection and saves it
func (collection *PublishedRepoCollection) Add(repo *PublishedRepo) error {
collection.loadList()
if collection.CheckDuplicate(repo) != nil {
return fmt.Errorf("published repo with storage/prefix/distribution %s/%s/%s already exists", repo.Storage, repo.Prefix, repo.Distribution)
}
@@ -896,6 +902,8 @@ func (collection *PublishedRepoCollection) Add(repo *PublishedRepo) error {
// CheckDuplicate verifies that there's no published repo with the same name
func (collection *PublishedRepoCollection) CheckDuplicate(repo *PublishedRepo) *PublishedRepo {
collection.loadList()
for _, r := range collection.list {
if r.Prefix == repo.Prefix && r.Distribution == repo.Distribution && r.Storage == repo.Storage {
return r
@@ -985,6 +993,8 @@ func (collection *PublishedRepoCollection) LoadComplete(repo *PublishedRepo, col
// ByStoragePrefixDistribution looks up repository by storage, prefix & distribution
func (collection *PublishedRepoCollection) ByStoragePrefixDistribution(storage, prefix, distribution string) (*PublishedRepo, error) {
collection.loadList()
for _, r := range collection.list {
if r.Prefix == prefix && r.Distribution == distribution && r.Storage == storage {
return r, nil
@@ -998,6 +1008,8 @@ func (collection *PublishedRepoCollection) ByStoragePrefixDistribution(storage,
// ByUUID looks up repository by uuid
func (collection *PublishedRepoCollection) ByUUID(uuid string) (*PublishedRepo, error) {
collection.loadList()
for _, r := range collection.list {
if r.UUID == uuid {
return r, nil
@@ -1008,6 +1020,8 @@ func (collection *PublishedRepoCollection) ByUUID(uuid string) (*PublishedRepo,
// BySnapshot looks up repository by snapshot source
func (collection *PublishedRepoCollection) BySnapshot(snapshot *Snapshot) []*PublishedRepo {
collection.loadList()
var result []*PublishedRepo
for _, r := range collection.list {
if r.SourceKind == SourceSnapshot {
@@ -1028,6 +1042,8 @@ func (collection *PublishedRepoCollection) BySnapshot(snapshot *Snapshot) []*Pub
// ByLocalRepo looks up repository by local repo source
func (collection *PublishedRepoCollection) ByLocalRepo(repo *LocalRepo) []*PublishedRepo {
collection.loadList()
var result []*PublishedRepo
for _, r := range collection.list {
if r.SourceKind == SourceLocalRepo {
@@ -1048,18 +1064,21 @@ func (collection *PublishedRepoCollection) ByLocalRepo(repo *LocalRepo) []*Publi
// ForEach runs method for each repository
func (collection *PublishedRepoCollection) ForEach(handler func(*PublishedRepo) error) error {
var err error
for _, r := range collection.list {
err = handler(r)
if err != nil {
return err
return collection.db.ProcessByPrefix([]byte("U"), func(key, blob []byte) error {
r := &PublishedRepo{}
if err := r.Decode(blob); err != nil {
log.Printf("Error decoding published repo: %s\n", err)
return nil
}
}
return err
return handler(r)
})
}
// Len returns number of remote repos
func (collection *PublishedRepoCollection) Len() int {
collection.loadList()
return len(collection.list)
}
@@ -1067,6 +1086,8 @@ func (collection *PublishedRepoCollection) Len() int {
func (collection *PublishedRepoCollection) CleanupPrefixComponentFiles(prefix string, components []string,
publishedStorage aptly.PublishedStorage, collectionFactory *CollectionFactory, progress aptly.Progress) error {
collection.loadList()
var err error
referencedFiles := map[string][]string{}
@@ -1148,6 +1169,9 @@ func (collection *PublishedRepoCollection) CleanupPrefixComponentFiles(prefix st
func (collection *PublishedRepoCollection) Remove(publishedStorageProvider aptly.PublishedStorageProvider,
storage, prefix, distribution string, collectionFactory *CollectionFactory, progress aptly.Progress,
force, skipCleanup bool) error {
collection.loadList()
repo, err := collection.ByStoragePrefixDistribution(storage, prefix, distribution)
if err != nil {
return err

View File

@@ -3,6 +3,7 @@ package deb
import (
"bytes"
gocontext "context"
"errors"
"fmt"
"log"
"net/url"
@@ -717,46 +718,68 @@ func (repo *RemoteRepo) RefKey() []byte {
// RemoteRepoCollection does listing, updating/adding/deleting of RemoteRepos
type RemoteRepoCollection struct {
*sync.RWMutex
db database.Storage
list []*RemoteRepo
db database.Storage
cache map[string]*RemoteRepo
}
// NewRemoteRepoCollection loads RemoteRepos from DB and makes up collection
func NewRemoteRepoCollection(db database.Storage) *RemoteRepoCollection {
result := &RemoteRepoCollection{
return &RemoteRepoCollection{
RWMutex: &sync.RWMutex{},
db: db,
cache: make(map[string]*RemoteRepo),
}
}
blobs := db.FetchByPrefix([]byte("R"))
result.list = make([]*RemoteRepo, 0, len(blobs))
for _, blob := range blobs {
r := &RemoteRepo{}
if err := r.Decode(blob); err != nil {
log.Printf("Error decoding mirror: %s\n", err)
} else {
result.list = append(result.list, r)
func (collection *RemoteRepoCollection) search(filter func(*RemoteRepo) bool, unique bool) []*RemoteRepo {
result := []*RemoteRepo(nil)
for _, r := range collection.cache {
if filter(r) {
result = append(result, r)
}
}
if unique && len(result) > 0 {
return result
}
collection.db.ProcessByPrefix([]byte("R"), func(key, blob []byte) error {
r := &RemoteRepo{}
if err := r.Decode(blob); err != nil {
log.Printf("Error decoding remote repo: %s\n", err)
return nil
}
if filter(r) {
if _, exists := collection.cache[r.UUID]; !exists {
collection.cache[r.UUID] = r
result = append(result, r)
if unique {
return errors.New("abort")
}
}
}
return nil
})
return result
}
// Add appends new repo to collection and saves it
func (collection *RemoteRepoCollection) Add(repo *RemoteRepo) error {
for _, r := range collection.list {
if r.Name == repo.Name {
return fmt.Errorf("mirror with name %s already exists", repo.Name)
}
_, err := collection.ByName(repo.Name)
if err == nil {
return fmt.Errorf("mirror with name %s already exists", repo.Name)
}
err := collection.Update(repo)
err = collection.Update(repo)
if err != nil {
return err
}
collection.list = append(collection.list, repo)
collection.cache[repo.UUID] = repo
return nil
}
@@ -791,58 +814,65 @@ func (collection *RemoteRepoCollection) LoadComplete(repo *RemoteRepo) error {
// ByName looks up repository by name
func (collection *RemoteRepoCollection) ByName(name string) (*RemoteRepo, error) {
for _, r := range collection.list {
if r.Name == name {
return r, nil
}
result := collection.search(func(r *RemoteRepo) bool { return r.Name == name }, true)
if len(result) == 0 {
return nil, fmt.Errorf("mirror with name %s not found", name)
}
return nil, fmt.Errorf("mirror with name %s not found", name)
return result[0], nil
}
// ByUUID looks up repository by uuid
func (collection *RemoteRepoCollection) ByUUID(uuid string) (*RemoteRepo, error) {
for _, r := range collection.list {
if r.UUID == uuid {
return r, nil
}
if r, ok := collection.cache[uuid]; ok {
return r, nil
}
return nil, fmt.Errorf("mirror with uuid %s not found", uuid)
key := (&RemoteRepo{UUID: uuid}).Key()
value, err := collection.db.Get(key)
if err == database.ErrNotFound {
return nil, fmt.Errorf("mirror with uuid %s not found", uuid)
}
if err != nil {
return nil, err
}
r := &RemoteRepo{}
err = r.Decode(value)
if err == nil {
collection.cache[r.UUID] = r
}
return r, err
}
// ForEach runs method for each repository
func (collection *RemoteRepoCollection) ForEach(handler func(*RemoteRepo) error) error {
var err error
for _, r := range collection.list {
err = handler(r)
if err != nil {
return err
return collection.db.ProcessByPrefix([]byte("R"), func(key, blob []byte) error {
r := &RemoteRepo{}
if err := r.Decode(blob); err != nil {
log.Printf("Error decoding mirror: %s\n", err)
return nil
}
}
return err
return handler(r)
})
}
// Len returns number of remote repos
func (collection *RemoteRepoCollection) Len() int {
return len(collection.list)
return len(collection.db.KeysByPrefix([]byte("R")))
}
// Drop removes remote repo from collection
func (collection *RemoteRepoCollection) Drop(repo *RemoteRepo) error {
repoPosition := -1
for i, r := range collection.list {
if r == repo {
repoPosition = i
break
}
}
if repoPosition == -1 {
if _, err := collection.db.Get(repo.Key()); err == database.ErrNotFound {
panic("repo not found!")
}
collection.list[len(collection.list)-1], collection.list[repoPosition], collection.list =
nil, collection.list[len(collection.list)-1], collection.list[:len(collection.list)-1]
delete(collection.cache, repo.UUID)
err := collection.db.Delete(repo.Key())
if err != nil {

View File

@@ -699,6 +699,11 @@ func (s *RemoteRepoCollectionSuite) TestByUUID(c *C) {
r, err := s.collection.ByUUID(repo.UUID)
c.Assert(err, IsNil)
c.Assert(r, Equals, repo)
collection := NewRemoteRepoCollection(s.db)
r, err = collection.ByUUID(repo.UUID)
c.Assert(err, IsNil)
c.Assert(r.String(), Equals, repo.String())
}

View File

@@ -173,50 +173,36 @@ func (s *Snapshot) Decode(input []byte) error {
// SnapshotCollection does listing, updating/adding/deleting of Snapshots
type SnapshotCollection struct {
*sync.RWMutex
db database.Storage
list []*Snapshot
db database.Storage
cache map[string]*Snapshot
}
// NewSnapshotCollection loads Snapshots from DB and makes up collection
func NewSnapshotCollection(db database.Storage) *SnapshotCollection {
result := &SnapshotCollection{
return &SnapshotCollection{
RWMutex: &sync.RWMutex{},
db: db,
cache: map[string]*Snapshot{},
}
blobs := db.FetchByPrefix([]byte("S"))
result.list = make([]*Snapshot, 0, len(blobs))
for _, blob := range blobs {
s := &Snapshot{}
if err := s.Decode(blob); err != nil {
log.Printf("Error decoding snapshot: %s\n", err)
} else {
result.list = append(result.list, s)
}
}
return result
}
// Add appends new repo to collection and saves it
func (collection *SnapshotCollection) Add(snapshot *Snapshot) error {
for _, s := range collection.list {
if s.Name == snapshot.Name {
return fmt.Errorf("snapshot with name %s already exists", snapshot.Name)
}
_, err := collection.ByName(snapshot.Name)
if err == nil {
return fmt.Errorf("snapshot with name %s already exists", snapshot.Name)
}
err := collection.Update(snapshot)
err = collection.Update(snapshot)
if err != nil {
return err
}
collection.list = append(collection.list, snapshot)
collection.cache[snapshot.UUID] = snapshot
return nil
}
// Update stores updated information about repo in DB
// Update stores updated information about snapshot in DB
func (collection *SnapshotCollection) Update(snapshot *Snapshot) error {
err := collection.db.Put(snapshot.Key(), snapshot.Encode())
if err != nil {
@@ -239,83 +225,132 @@ func (collection *SnapshotCollection) LoadComplete(snapshot *Snapshot) error {
return snapshot.packageRefs.Decode(encoded)
}
// ByName looks up snapshot by name
func (collection *SnapshotCollection) ByName(name string) (*Snapshot, error) {
for _, s := range collection.list {
if s.Name == name {
return s, nil
func (collection *SnapshotCollection) search(filter func(*Snapshot) bool, unique bool) []*Snapshot {
result := []*Snapshot(nil)
for _, s := range collection.cache {
if filter(s) {
result = append(result, s)
}
}
if unique && len(result) > 0 {
return result
}
collection.db.ProcessByPrefix([]byte("S"), func(key, blob []byte) error {
s := &Snapshot{}
if err := s.Decode(blob); err != nil {
log.Printf("Error decoding snapshot: %s\n", err)
return nil
}
if filter(s) {
if _, exists := collection.cache[s.UUID]; !exists {
collection.cache[s.UUID] = s
result = append(result, s)
if unique {
return errors.New("abort")
}
}
}
return nil
})
return result
}
// ByName looks up snapshot by name
func (collection *SnapshotCollection) ByName(name string) (*Snapshot, error) {
result := collection.search(func(s *Snapshot) bool { return s.Name == name }, true)
if len(result) > 0 {
return result[0], nil
}
return nil, fmt.Errorf("snapshot with name %s not found", name)
}
// ByUUID looks up snapshot by UUID
func (collection *SnapshotCollection) ByUUID(uuid string) (*Snapshot, error) {
for _, s := range collection.list {
if s.UUID == uuid {
return s, nil
}
if s, ok := collection.cache[uuid]; ok {
return s, nil
}
return nil, fmt.Errorf("snapshot with uuid %s not found", uuid)
key := (&Snapshot{UUID: uuid}).Key()
value, err := collection.db.Get(key)
if err == database.ErrNotFound {
return nil, fmt.Errorf("snapshot with uuid %s not found", uuid)
}
if err != nil {
return nil, err
}
s := &Snapshot{}
err = s.Decode(value)
if err == nil {
collection.cache[s.UUID] = s
}
return s, err
}
// ByRemoteRepoSource looks up snapshots that have specified RemoteRepo as a source
func (collection *SnapshotCollection) ByRemoteRepoSource(repo *RemoteRepo) []*Snapshot {
var result []*Snapshot
for _, s := range collection.list {
if s.SourceKind == SourceRemoteRepo && utils.StrSliceHasItem(s.SourceIDs, repo.UUID) {
result = append(result, s)
}
}
return result
return collection.search(func(s *Snapshot) bool {
return s.SourceKind == SourceRemoteRepo && utils.StrSliceHasItem(s.SourceIDs, repo.UUID)
}, false)
}
// ByLocalRepoSource looks up snapshots that have specified LocalRepo as a source
func (collection *SnapshotCollection) ByLocalRepoSource(repo *LocalRepo) []*Snapshot {
var result []*Snapshot
for _, s := range collection.list {
if s.SourceKind == SourceLocalRepo && utils.StrSliceHasItem(s.SourceIDs, repo.UUID) {
result = append(result, s)
}
}
return result
return collection.search(func(s *Snapshot) bool {
return s.SourceKind == SourceLocalRepo && utils.StrSliceHasItem(s.SourceIDs, repo.UUID)
}, false)
}
// BySnapshotSource looks up snapshots that have specified snapshot as a source
func (collection *SnapshotCollection) BySnapshotSource(snapshot *Snapshot) []*Snapshot {
var result []*Snapshot
for _, s := range collection.list {
if s.SourceKind == "snapshot" && utils.StrSliceHasItem(s.SourceIDs, snapshot.UUID) {
result = append(result, s)
}
}
return result
return collection.search(func(s *Snapshot) bool {
return s.SourceKind == "snapshot" && utils.StrSliceHasItem(s.SourceIDs, snapshot.UUID)
}, false)
}
// ForEach runs method for each snapshot
func (collection *SnapshotCollection) ForEach(handler func(*Snapshot) error) error {
var err error
for _, s := range collection.list {
err = handler(s)
if err != nil {
return err
return collection.db.ProcessByPrefix([]byte("S"), func(key, blob []byte) error {
s := &Snapshot{}
if err := s.Decode(blob); err != nil {
log.Printf("Error decoding snapshot: %s\n", err)
return nil
}
}
return err
return handler(s)
})
}
// ForEachSorted runs method for each snapshot following some sort order
func (collection *SnapshotCollection) ForEachSorted(sortMethod string, handler func(*Snapshot) error) error {
sorter, err := newSnapshotSorter(sortMethod, collection)
blobs := collection.db.FetchByPrefix([]byte("S"))
list := make([]*Snapshot, 0, len(blobs))
for _, blob := range blobs {
s := &Snapshot{}
if err := s.Decode(blob); err != nil {
log.Printf("Error decoding snapshot: %s\n", err)
} else {
list = append(list, s)
}
}
sorter, err := newSnapshotSorter(sortMethod, list)
if err != nil {
return err
}
for _, i := range sorter.list {
err = handler(collection.list[i])
for _, s := range sorter.list {
err = handler(s)
if err != nil {
return err
}
@@ -327,26 +362,16 @@ func (collection *SnapshotCollection) ForEachSorted(sortMethod string, handler f
// Len returns number of snapshots in collection
// ForEach runs method for each snapshot
func (collection *SnapshotCollection) Len() int {
return len(collection.list)
return len(collection.db.KeysByPrefix([]byte("S")))
}
// Drop removes snapshot from collection
func (collection *SnapshotCollection) Drop(snapshot *Snapshot) error {
snapshotPosition := -1
for i, s := range collection.list {
if s == snapshot {
snapshotPosition = i
break
}
}
if snapshotPosition == -1 {
if _, err := collection.db.Get(snapshot.Key()); err == database.ErrNotFound {
panic("snapshot not found!")
}
collection.list[len(collection.list)-1], collection.list[snapshotPosition], collection.list =
nil, collection.list[len(collection.list)-1], collection.list[:len(collection.list)-1]
delete(collection.cache, snapshot.UUID)
err := collection.db.Delete(snapshot.Key())
if err != nil {
@@ -363,13 +388,12 @@ const (
)
type snapshotSorter struct {
list []int
collection *SnapshotCollection
list []*Snapshot
sortMethod int
}
func newSnapshotSorter(sortMethod string, collection *SnapshotCollection) (*snapshotSorter, error) {
s := &snapshotSorter{collection: collection}
func newSnapshotSorter(sortMethod string, list []*Snapshot) (*snapshotSorter, error) {
s := &snapshotSorter{list: list}
switch sortMethod {
case "time", "Time":
@@ -380,11 +404,6 @@ func newSnapshotSorter(sortMethod string, collection *SnapshotCollection) (*snap
return nil, fmt.Errorf("sorting method \"%s\" unknown", sortMethod)
}
s.list = make([]int, len(collection.list))
for i := range s.list {
s.list[i] = i
}
sort.Sort(s)
return s, nil
@@ -397,9 +416,9 @@ func (s *snapshotSorter) Swap(i, j int) {
func (s *snapshotSorter) Less(i, j int) bool {
switch s.sortMethod {
case SortName:
return s.collection.list[s.list[i]].Name < s.collection.list[s.list[j]].Name
return s.list[i].Name < s.list[j].Name
case SortTime:
return s.collection.list[s.list[i]].CreatedAt.Before(s.collection.list[s.list[j]].CreatedAt)
return s.list[i].CreatedAt.Before(s.list[j].CreatedAt)
}
panic("unknown sort method")
}

View File

@@ -0,0 +1,98 @@
package deb
import (
"fmt"
"os"
"testing"
"github.com/aptly-dev/aptly/database"
)
func BenchmarkSnapshotCollectionForEach(b *testing.B) {
const count = 1024
tmpDir := os.TempDir()
defer os.RemoveAll(tmpDir)
db, _ := database.NewOpenDB(tmpDir)
defer db.Close()
collection := NewSnapshotCollection(db)
for i := 0; i < count; i++ {
snapshot := NewSnapshotFromRefList(fmt.Sprintf("snapshot%d", i), nil, NewPackageRefList(), fmt.Sprintf("Snapshot number %d", i))
if collection.Add(snapshot) != nil {
b.FailNow()
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
collection = NewSnapshotCollection(db)
collection.ForEach(func(s *Snapshot) error {
return nil
})
}
}
func BenchmarkSnapshotCollectionByUUID(b *testing.B) {
const count = 1024
tmpDir := os.TempDir()
defer os.RemoveAll(tmpDir)
db, _ := database.NewOpenDB(tmpDir)
defer db.Close()
collection := NewSnapshotCollection(db)
uuids := []string{}
for i := 0; i < count; i++ {
snapshot := NewSnapshotFromRefList(fmt.Sprintf("snapshot%d", i), nil, NewPackageRefList(), fmt.Sprintf("Snapshot number %d", i))
if collection.Add(snapshot) != nil {
b.FailNow()
}
uuids = append(uuids, snapshot.UUID)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
collection = NewSnapshotCollection(db)
if _, err := collection.ByUUID(uuids[i%len(uuids)]); err != nil {
b.FailNow()
}
}
}
func BenchmarkSnapshotCollectionByName(b *testing.B) {
const count = 1024
tmpDir := os.TempDir()
defer os.RemoveAll(tmpDir)
db, _ := database.NewOpenDB(tmpDir)
defer db.Close()
collection := NewSnapshotCollection(db)
for i := 0; i < count; i++ {
snapshot := NewSnapshotFromRefList(fmt.Sprintf("snapshot%d", i), nil, NewPackageRefList(), fmt.Sprintf("Snapshot number %d", i))
if collection.Add(snapshot) != nil {
b.FailNow()
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
collection = NewSnapshotCollection(db)
if _, err := collection.ByName(fmt.Sprintf("snapshot%d", i%count)); err != nil {
b.FailNow()
}
}
}

View File

@@ -2,6 +2,7 @@ package deb
import (
"errors"
"sort"
"github.com/aptly-dev/aptly/database"
@@ -158,6 +159,10 @@ func (s *SnapshotCollectionSuite) TestAddByNameByUUID(c *C) {
snapshot, err = collection.ByUUID(s.snapshot1.UUID)
c.Assert(err, IsNil)
c.Assert(snapshot.String(), Equals, s.snapshot1.String())
snapshot, err = collection.ByUUID(s.snapshot2.UUID)
c.Assert(err, IsNil)
c.Assert(snapshot.String(), Equals, s.snapshot2.String())
}
func (s *SnapshotCollectionSuite) TestUpdateLoadComplete(c *C) {
@@ -193,6 +198,23 @@ func (s *SnapshotCollectionSuite) TestForEachAndLen(c *C) {
c.Assert(err, Equals, e)
}
func (s *SnapshotCollectionSuite) TestForEachSorted(c *C) {
s.collection.Add(s.snapshot2)
s.collection.Add(s.snapshot1)
s.collection.Add(s.snapshot4)
s.collection.Add(s.snapshot3)
names := []string{}
err := s.collection.ForEachSorted("name", func(snapshot *Snapshot) error {
names = append(names, snapshot.Name)
return nil
})
c.Assert(err, IsNil)
c.Check(sort.StringsAreSorted(names), Equals, true)
}
func (s *SnapshotCollectionSuite) TestFindByRemoteRepoSource(c *C) {
c.Assert(s.collection.Add(s.snapshot1), IsNil)
c.Assert(s.collection.Add(s.snapshot2), IsNil)
@@ -230,7 +252,11 @@ func (s *SnapshotCollectionSuite) TestFindSnapshotSource(c *C) {
c.Assert(s.collection.Add(snapshot4), IsNil)
c.Assert(s.collection.Add(snapshot5), IsNil)
c.Check(s.collection.BySnapshotSource(s.snapshot1), DeepEquals, []*Snapshot{snapshot3, snapshot4})
list := s.collection.BySnapshotSource(s.snapshot1)
sorter, _ := newSnapshotSorter("name", list)
sort.Sort(sorter)
c.Check(sorter.list, DeepEquals, []*Snapshot{snapshot3, snapshot4})
c.Check(s.collection.BySnapshotSource(s.snapshot2), DeepEquals, []*Snapshot{snapshot3})
c.Check(s.collection.BySnapshotSource(snapshot5), DeepEquals, []*Snapshot(nil))
}

View File

@@ -174,7 +174,7 @@ type GpgVerifier struct {
keyRings []string
}
// NewGpgVerifier creates a new gpg signer
// NewGpgVerifier creates a new gpg verifier
func NewGpgVerifier() *GpgVerifier {
gpg, err := findGPG1()
if err != nil {
@@ -191,18 +191,6 @@ func NewGpgVerifier() *GpgVerifier {
// InitKeyring verifies that gpg is installed and some keys are trusted
func (g *GpgVerifier) InitKeyring() error {
cmd, err := findGPG1()
if err != nil {
return err
}
g.gpg = cmd
cmd, err = findGPGV1()
if err != nil {
return err
}
g.gpgv = cmd
if len(g.keyRings) == 0 {
// using default keyring
output, err := exec.Command(g.gpg, "--no-default-keyring", "--no-auto-check-trustdb", "--keyring", "trustedkeys.gpg", "--list-keys").Output()

View File

@@ -77,3 +77,35 @@ func (s *GnupgSuite) TestGPGVNothing(c *C) {
c.Assert(func() { NewGpgVerifier() }, PanicMatches, `Couldn't find a suitable gpgv executable.+`)
}
type Gnupg1VerifierSuite struct {
VerifierSuite
}
var _ = Suite(&Gnupg1VerifierSuite{})
func (s *Gnupg1VerifierSuite) SetUpTest(c *C) {
s.verifier = NewGpgVerifier()
s.verifier.AddKeyring("./trusted.gpg")
c.Assert(s.verifier.InitKeyring(), IsNil)
}
type Gnupg1SignerSuite struct {
SignerSuite
}
var _ = Suite(&Gnupg1SignerSuite{})
func (s *Gnupg1SignerSuite) SetUpTest(c *C) {
s.signer = NewGpgSigner()
s.signer.SetBatch(true)
s.verifier = &GoVerifier{}
s.verifier.AddKeyring("./keyrings/aptly.pub")
s.verifier.AddKeyring("./keyrings/aptly_passphrase.pub")
c.Assert(s.verifier.InitKeyring(), IsNil)
s.SignerSuite.SetUpTest(c)
}

View File

@@ -1,14 +1,11 @@
package pgp
import (
"io/ioutil"
"os"
. "gopkg.in/check.v1"
)
type GoVerifierSuite struct {
verifier Verifier
VerifierSuite
}
var _ = Suite(&GoVerifierSuite{})
@@ -20,77 +17,21 @@ func (s *GoVerifierSuite) SetUpTest(c *C) {
c.Assert(s.verifier.InitKeyring(), IsNil)
}
func (s *GoVerifierSuite) TestVerifyDetached(c *C) {
for _, test := range []struct {
textName, signatureName string
}{
{"1.text", "1.signature"},
{"2.text", "2.signature"},
{"3.text", "3.signature"},
} {
cleartext, err := os.Open(test.textName)
c.Assert(err, IsNil)
signature, err := os.Open(test.signatureName)
c.Assert(err, IsNil)
err = s.verifier.VerifyDetachedSignature(signature, cleartext, false)
c.Assert(err, IsNil)
signature.Close()
cleartext.Close()
}
type GoSignerSuite struct {
SignerSuite
}
func (s *GoVerifierSuite) TestVerifyClearsigned(c *C) {
for _, test := range []struct {
clearSignedName string
}{
{"1.clearsigned"},
} {
clearsigned, err := os.Open(test.clearSignedName)
c.Assert(err, IsNil)
var _ = Suite(&GoSignerSuite{})
keyInfo, err := s.verifier.VerifyClearsigned(clearsigned, false)
c.Assert(err, IsNil)
c.Check(keyInfo.GoodKeys, DeepEquals, []Key{"8B48AD6246925553", "7638D0442B90D010"})
c.Check(keyInfo.MissingKeys, DeepEquals, []Key(nil))
func (s *GoSignerSuite) SetUpTest(c *C) {
s.signer = &GoSigner{}
s.signer.SetBatch(true)
clearsigned.Close()
}
}
func (s *GoVerifierSuite) TestExtractClearsigned(c *C) {
for _, test := range []struct {
clearSignedName, clearTextName string
}{
{"1.clearsigned", "1.cleartext"},
} {
clearsigned, err := os.Open(test.clearSignedName)
c.Assert(err, IsNil)
cleartext, err := os.Open(test.clearTextName)
c.Assert(err, IsNil)
is, err := s.verifier.IsClearSigned(clearsigned)
c.Assert(err, IsNil)
c.Check(is, Equals, true)
clearsigned.Seek(0, 0)
extractedF, err := s.verifier.ExtractClearsigned(clearsigned)
c.Assert(err, IsNil)
expected, err := ioutil.ReadAll(cleartext)
c.Assert(err, IsNil)
extracted, err := ioutil.ReadAll(extractedF)
c.Assert(err, IsNil)
c.Check(expected, DeepEquals, extracted)
extractedF.Close()
clearsigned.Close()
cleartext.Close()
}
s.verifier = &GoVerifier{}
s.verifier.AddKeyring("./keyrings/aptly.pub")
s.verifier.AddKeyring("./keyrings/aptly_passphrase.pub")
c.Assert(s.verifier.InitKeyring(), IsNil)
s.SignerSuite.SetUpTest(c)
}

BIN
pgp/keyrings/aptly.pub Normal file

Binary file not shown.

BIN
pgp/keyrings/aptly.sec Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

134
pgp/sign_test.go Normal file
View File

@@ -0,0 +1,134 @@
package pgp
import (
"crypto/rand"
"io"
"io/ioutil"
"os"
"path"
. "gopkg.in/check.v1"
)
// Common set of tests shared by internal & external GnuPG implementations
type SignerSuite struct {
signer Signer
verifier Verifier
clearF *os.File
signedF *os.File
cleartext []byte
passwordFile string
}
func (s *SignerSuite) SetUpTest(c *C) {
tempDir := c.MkDir()
var err error
s.clearF, err = os.Create(path.Join(tempDir, "cleartext"))
c.Assert(err, IsNil)
s.cleartext = make([]byte, 0, 1024)
_, err = rand.Read(s.cleartext)
c.Assert(err, IsNil)
_, err = s.clearF.Write(s.cleartext)
c.Assert(err, IsNil)
_, err = s.clearF.Seek(0, io.SeekStart)
c.Assert(err, IsNil)
s.signedF, err = os.Create(path.Join(tempDir, "signed"))
c.Assert(err, IsNil)
s.passwordFile = path.Join(tempDir, "password")
f, err := os.OpenFile(s.passwordFile, os.O_CREATE|os.O_WRONLY, 0600)
c.Assert(err, IsNil)
_, err = f.Write([]byte("verysecret"))
c.Assert(err, IsNil)
f.Close()
s.signer.SetBatch(true)
}
func (s *SignerSuite) TearDownTest(c *C) {
s.clearF.Close()
s.signedF.Close()
}
func (s *SignerSuite) testSignDetached(c *C) {
c.Assert(s.signer.Init(), IsNil)
err := s.signer.DetachedSign(s.clearF.Name(), s.signedF.Name())
c.Assert(err, IsNil)
err = s.verifier.VerifyDetachedSignature(s.signedF, s.clearF, false)
c.Assert(err, IsNil)
}
func (s *SignerSuite) TestSignDetachedNoPassphrase(c *C) {
s.signer.SetKeyRing("keyrings/aptly.pub", "keyrings/aptly.sec")
s.testSignDetached(c)
}
func (s *SignerSuite) TestSignDetachedPassphrase(c *C) {
s.signer.SetKeyRing("keyrings/aptly_passphrase.pub", "keyrings/aptly_passphrase.sec")
s.signer.SetPassphrase("verysecret", "")
s.testSignDetached(c)
}
func (s *SignerSuite) TestSignDetachedPassphraseFile(c *C) {
s.signer.SetKeyRing("keyrings/aptly_passphrase.pub", "keyrings/aptly_passphrase.sec")
s.signer.SetPassphrase("", s.passwordFile)
s.testSignDetached(c)
}
func (s *SignerSuite) testClearSign(c *C, expectedKey Key) {
c.Assert(s.signer.Init(), IsNil)
err := s.signer.ClearSign(s.clearF.Name(), s.signedF.Name())
c.Assert(err, IsNil)
keyInfo, err := s.verifier.VerifyClearsigned(s.signedF, false)
c.Assert(err, IsNil)
c.Assert(keyInfo.GoodKeys, DeepEquals, []Key{expectedKey})
c.Assert(keyInfo.MissingKeys, DeepEquals, []Key(nil))
_, err = s.signedF.Seek(0, io.SeekStart)
c.Assert(err, IsNil)
extractedF, err := s.verifier.ExtractClearsigned(s.signedF)
c.Assert(err, IsNil)
defer extractedF.Close()
extracted, err := ioutil.ReadAll(extractedF)
c.Assert(err, IsNil)
c.Assert(extracted, DeepEquals, s.cleartext)
}
func (s *SignerSuite) TestClearSignNoPassphrase(c *C) {
s.signer.SetKeyRing("keyrings/aptly.pub", "keyrings/aptly.sec")
s.testClearSign(c, "21DBB89C16DB3E6D")
}
func (s *SignerSuite) TestClearSignPassphrase(c *C) {
s.signer.SetKeyRing("keyrings/aptly_passphrase.pub", "keyrings/aptly_passphrase.sec")
s.signer.SetPassphrase("verysecret", "")
s.testClearSign(c, "F30E8CB9CDDE2AF8")
}
func (s *SignerSuite) TestClearSignPassphraseFile(c *C) {
s.signer.SetKeyRing("keyrings/aptly_passphrase.pub", "keyrings/aptly_passphrase.sec")
s.signer.SetPassphrase("", s.passwordFile)
s.testClearSign(c, "F30E8CB9CDDE2AF8")
}

93
pgp/verify_test.go Normal file
View File

@@ -0,0 +1,93 @@
package pgp
import (
"bytes"
"io/ioutil"
"os"
. "gopkg.in/check.v1"
)
// Common set of tests shared by internal & external GnuPG implementations
type VerifierSuite struct {
verifier Verifier
}
func (s *VerifierSuite) TestVerifyDetached(c *C) {
for _, test := range []struct {
textName, signatureName string
}{
{"1.text", "1.signature"},
{"2.text", "2.signature"},
{"3.text", "3.signature"},
} {
cleartext, err := os.Open(test.textName)
c.Assert(err, IsNil)
signature, err := os.Open(test.signatureName)
c.Assert(err, IsNil)
err = s.verifier.VerifyDetachedSignature(signature, cleartext, false)
c.Assert(err, IsNil)
signature.Close()
cleartext.Close()
}
}
func (s *VerifierSuite) TestVerifyClearsigned(c *C) {
for _, test := range []struct {
clearSignedName string
}{
{"1.clearsigned"},
} {
clearsigned, err := os.Open(test.clearSignedName)
c.Assert(err, IsNil)
keyInfo, err := s.verifier.VerifyClearsigned(clearsigned, false)
c.Assert(err, IsNil)
c.Check(keyInfo.GoodKeys, DeepEquals, []Key{"8B48AD6246925553", "7638D0442B90D010"})
c.Check(keyInfo.MissingKeys, DeepEquals, []Key(nil))
clearsigned.Close()
}
}
func (s *VerifierSuite) TestExtractClearsigned(c *C) {
for _, test := range []struct {
clearSignedName, clearTextName string
}{
{"1.clearsigned", "1.cleartext"},
} {
clearsigned, err := os.Open(test.clearSignedName)
c.Assert(err, IsNil)
cleartext, err := os.Open(test.clearTextName)
c.Assert(err, IsNil)
is, err := s.verifier.IsClearSigned(clearsigned)
c.Assert(err, IsNil)
c.Check(is, Equals, true)
clearsigned.Seek(0, 0)
extractedF, err := s.verifier.ExtractClearsigned(clearsigned)
c.Assert(err, IsNil)
expected, err := ioutil.ReadAll(cleartext)
c.Assert(err, IsNil)
extracted, err := ioutil.ReadAll(extractedF)
c.Assert(err, IsNil)
// normalize newlines
extracted = bytes.TrimRight(bytes.Replace(extracted, []byte("\r\n"), []byte("\n"), -1), "\n")
expected = bytes.Replace(expected, []byte("\r\n"), []byte("\n"), -1)
c.Check(extracted, DeepEquals, expected)
extractedF.Close()
clearsigned.Close()
cleartext.Close()
}
}

View File

@@ -6,6 +6,6 @@ gpgv: Good signature from "Package Maintainer (PagerDuty, Inc.) <packages@pagerd
Downloading & parsing package files...
Downloading http://packages.pagerduty.com/pdagent/deb/Packages.gz...
Building download queue...
Download queue: 13 items (1.66 MiB)
Download queue: 15 items (1.87 MiB)
Mirror `pagerduty` has been successfully updated.

View File

@@ -6,6 +6,6 @@ openpgp: Good signature from "Package Maintainer (PagerDuty, Inc.) <packages@pag
Downloading & parsing package files...
Downloading http://packages.pagerduty.com/pdagent/deb/Packages.gz...
Building download queue...
Download queue: 13 items (1.66 MiB)
Download queue: 15 items (1.87 MiB)
Mirror `pagerduty` has been successfully updated.

View File

@@ -69,6 +69,10 @@ class PublishRepo1Test(BaseTest):
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
if "Contents" in path and not path.endswith(".gz"):
# "Contents" are present in index, but not really written to disk
continue
pathsSeen.add(path)
fileSize = int(fileSize)
@@ -464,6 +468,10 @@ class PublishRepo17Test(BaseTest):
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
if "Contents" in path and not path.endswith(".gz"):
# "Contents" are present in index, but not really written to disk
continue
pathsSeen.add(path)
fileSize = int(fileSize)

View File

@@ -79,6 +79,10 @@ class PublishSnapshot1Test(BaseTest):
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
if "Contents" in path and not path.endswith(".gz"):
# "Contents" are present in index, but not really written to disk
continue
pathsSeen.add(path)
fileSize = int(fileSize)
@@ -724,6 +728,10 @@ class PublishSnapshot26Test(BaseTest):
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
if "Contents" in path and not path.endswith(".gz"):
# "Contents" are present in index, but not really written to disk
continue
pathsSeen.add(path)
fileSize = int(fileSize)
@@ -947,6 +955,10 @@ class PublishSnapshot35Test(BaseTest):
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
if "Contents" in path and not path.endswith(".gz"):
# "Contents" are present in index, but not really written to disk
continue
pathsSeen.add(path)
fileSize = int(fileSize)

View File

@@ -61,6 +61,10 @@ class PublishSwitch1Test(BaseTest):
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
if "Contents" in path and not path.endswith(".gz"):
# "Contents" are present in index, but not really written to disk
continue
pathsSeen.add(path)
fileSize = int(fileSize)
@@ -321,6 +325,10 @@ class PublishSwitch8Test(BaseTest):
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
if "Contents" in path and not path.endswith(".gz"):
# "Contents" are present in index, but not really written to disk
continue
pathsSeen.add(path)
fileSize = int(fileSize)
@@ -506,6 +514,10 @@ class PublishSwitch14Test(BaseTest):
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
if "Contents" in path and not path.endswith(".gz"):
# "Contents" are present in index, but not really written to disk
continue
pathsSeen.add(path)
fileSize = int(fileSize)

View File

@@ -61,6 +61,10 @@ class PublishUpdate1Test(BaseTest):
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
if "Contents" in path and not path.endswith(".gz"):
# "Contents" are present in index, but not really written to disk
continue
pathsSeen.add(path)
fileSize = int(fileSize)
@@ -402,6 +406,10 @@ class PublishUpdate12Test(BaseTest):
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
if "Contents" in path and not path.endswith(".gz"):
# "Contents" are present in index, but not really written to disk
continue
pathsSeen.add(path)
fileSize = int(fileSize)