mirror of
https://github.com/aptly-dev/aptly.git
synced 2026-01-12 03:21:33 +00:00
Rework the way database is open/re-open in aptly
Allow database to be initialized without opening, unify all the open paths to retry on failure. In API router make sure open requests are matched with acks in explicit way. This also enables re-open attempts in all the aptly commands, so it should make running aptly CLI much easier now hopefully. Fix up system tests for oldoldstable ;)
This commit is contained in:
51
api/api.go
51
api/api.go
@@ -23,11 +23,18 @@ func apiVersion(c *gin.Context) {
|
||||
c.JSON(200, gin.H{"Version": aptly.Version})
|
||||
}
|
||||
|
||||
type dbRequestKind int
|
||||
|
||||
const (
|
||||
acquiredb = iota
|
||||
acquiredb dbRequestKind = iota
|
||||
releasedb
|
||||
)
|
||||
|
||||
type dbRequest struct {
|
||||
kind dbRequestKind
|
||||
err chan<- error
|
||||
}
|
||||
|
||||
// Flushes all collections which cache in-memory objects
|
||||
func flushColections() {
|
||||
// lock everything to eliminate in-progress calls
|
||||
@@ -52,50 +59,48 @@ func flushColections() {
|
||||
}
|
||||
|
||||
// Periodically flushes CollectionFactory to free up memory used by
|
||||
// collections, flushing caches. If the two channels are provided,
|
||||
// they are used to acquire and release the database.
|
||||
// collections, flushing caches.
|
||||
//
|
||||
// Should be run in goroutine!
|
||||
func cacheFlusher(requests chan int, acks chan error) {
|
||||
func cacheFlusher() {
|
||||
ticker := time.Tick(15 * time.Minute)
|
||||
|
||||
for {
|
||||
<-ticker
|
||||
|
||||
// if aptly API runs in -no-lock mode,
|
||||
// caches are flushed when DB is closed anyway, no need
|
||||
// to flush them here
|
||||
if requests == nil {
|
||||
flushColections()
|
||||
}
|
||||
flushColections()
|
||||
}
|
||||
}
|
||||
|
||||
// Acquire database lock and release it when not needed anymore. Two
|
||||
// channels must be provided. The first one is to receive requests to
|
||||
// acquire/release the database and the second one is to send acks.
|
||||
// Acquire database lock and release it when not needed anymore.
|
||||
//
|
||||
// Should be run in a goroutine!
|
||||
func acquireDatabase(requests chan int, acks chan error) {
|
||||
func acquireDatabase(requests <-chan dbRequest) {
|
||||
clients := 0
|
||||
for {
|
||||
request := <-requests
|
||||
switch request {
|
||||
for request := range requests {
|
||||
var err error
|
||||
|
||||
switch request.kind {
|
||||
case acquiredb:
|
||||
if clients == 0 {
|
||||
acks <- context.ReOpenDatabase()
|
||||
} else {
|
||||
acks <- nil
|
||||
err = context.ReOpenDatabase()
|
||||
}
|
||||
|
||||
request.err <- err
|
||||
|
||||
if err == nil {
|
||||
clients++
|
||||
}
|
||||
clients++
|
||||
case releasedb:
|
||||
clients--
|
||||
if clients == 0 {
|
||||
flushColections()
|
||||
acks <- context.CloseDatabase()
|
||||
err = context.CloseDatabase()
|
||||
} else {
|
||||
acks <- nil
|
||||
err = nil
|
||||
}
|
||||
|
||||
request.err <- err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,35 +20,35 @@ func Router(c *ctx.AptlyContext) http.Handler {
|
||||
// We use a goroutine to count the number of
|
||||
// concurrent requests. When no more requests are
|
||||
// running, we close the database to free the lock.
|
||||
requests := make(chan int)
|
||||
acks := make(chan error)
|
||||
requests := make(chan dbRequest)
|
||||
|
||||
go acquireDatabase(requests, acks)
|
||||
go cacheFlusher(requests, acks)
|
||||
go acquireDatabase(requests)
|
||||
|
||||
router.Use(func(c *gin.Context) {
|
||||
var err error
|
||||
|
||||
requests <- acquiredb
|
||||
errCh := make(chan error)
|
||||
requests <- dbRequest{acquiredb, errCh}
|
||||
|
||||
err = <-errCh
|
||||
if err != nil {
|
||||
c.Fail(500, err)
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
requests <- releasedb
|
||||
err = <-acks
|
||||
requests <- dbRequest{releasedb, errCh}
|
||||
err = <-errCh
|
||||
if err != nil {
|
||||
c.Fail(500, err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = <-acks
|
||||
if err != nil {
|
||||
c.Fail(500, err)
|
||||
return
|
||||
}
|
||||
c.Next()
|
||||
})
|
||||
|
||||
} else {
|
||||
go cacheFlusher(nil, nil)
|
||||
go cacheFlusher()
|
||||
}
|
||||
|
||||
root := router.Group("/api")
|
||||
|
||||
@@ -111,6 +111,7 @@ package environment to new version.`,
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flag.Int("db-open-attempts", 10, "number of attempts to open DB if it's locked by other instance")
|
||||
cmd.Flag.Bool("dep-follow-suggests", false, "when processing dependencies, follow Suggests")
|
||||
cmd.Flag.Bool("dep-follow-source", false, "when processing dependencies, follow from binary to Source packages")
|
||||
cmd.Flag.Bool("dep-follow-recommends", false, "when processing dependencies, follow Recommends")
|
||||
|
||||
@@ -3,6 +3,7 @@ package context
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@@ -238,13 +239,34 @@ func (context *AptlyContext) _database() (database.Storage, error) {
|
||||
if context.database == nil {
|
||||
var err error
|
||||
|
||||
context.database, err = database.OpenDB(context.dbPath())
|
||||
context.database, err = database.NewDB(context.dbPath())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't open database: %s", err)
|
||||
return nil, fmt.Errorf("can't instanciate database: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return context.database, nil
|
||||
tries := context.flags.Lookup("db-open-attempts").Value.Get().(int)
|
||||
const BaseDelay = 10 * time.Second
|
||||
const Jitter = 1 * time.Second
|
||||
|
||||
for ; tries >= 0; tries-- {
|
||||
err := context.database.Open()
|
||||
if err == nil || !strings.Contains(err.Error(), "resource temporarily unavailable") {
|
||||
return context.database, err
|
||||
}
|
||||
|
||||
if tries > 0 {
|
||||
delay := time.Duration(rand.NormFloat64()*float64(Jitter) + float64(BaseDelay))
|
||||
if delay < 0 {
|
||||
delay = time.Second
|
||||
}
|
||||
|
||||
context._progress().Printf("Unable to open database, sleeping %s, attempts left %d...\n", delay, tries)
|
||||
time.Sleep(delay)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unable to reopen the DB, maximum number of retries reached")
|
||||
}
|
||||
|
||||
// CloseDatabase closes the db temporarily
|
||||
@@ -261,26 +283,9 @@ func (context *AptlyContext) CloseDatabase() error {
|
||||
|
||||
// ReOpenDatabase reopens the db after close
|
||||
func (context *AptlyContext) ReOpenDatabase() error {
|
||||
context.Lock()
|
||||
defer context.Unlock()
|
||||
_, err := context.Database()
|
||||
|
||||
if context.database == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
const MaxTries = 10
|
||||
const Delay = 10 * time.Second
|
||||
|
||||
for try := 0; try < MaxTries; try++ {
|
||||
err := context.database.ReOpen()
|
||||
if err == nil || !strings.Contains(err.Error(), "resource temporarily unavailable") {
|
||||
return err
|
||||
}
|
||||
context._progress().Printf("Unable to reopen database, sleeping %s\n", Delay)
|
||||
<-time.After(Delay)
|
||||
}
|
||||
|
||||
return fmt.Errorf("unable to reopen the DB, maximum number of retries reached")
|
||||
return err
|
||||
}
|
||||
|
||||
// CollectionFactory builds factory producing all kinds of collections
|
||||
|
||||
@@ -32,8 +32,8 @@ type Storage interface {
|
||||
ProcessByPrefix(prefix []byte, proc StorageProcessor) error
|
||||
KeysByPrefix(prefix []byte) [][]byte
|
||||
FetchByPrefix(prefix []byte) [][]byte
|
||||
Open() error
|
||||
Close() error
|
||||
ReOpen() error
|
||||
StartBatch()
|
||||
FinishBatch() error
|
||||
CompactDB() error
|
||||
@@ -66,13 +66,19 @@ func internalOpen(path string, throttleCompaction bool) (*leveldb.DB, error) {
|
||||
return leveldb.OpenFile(path, o)
|
||||
}
|
||||
|
||||
// OpenDB opens (creates) LevelDB database
|
||||
func OpenDB(path string) (Storage, error) {
|
||||
db, err := internalOpen(path, false)
|
||||
// NewDB creates new instance of DB, but doesn't open it (yet)
|
||||
func NewDB(path string) (Storage, error) {
|
||||
return &levelDB{path: path}, nil
|
||||
}
|
||||
|
||||
// NewOpenDB creates new instance of DB and opens it
|
||||
func NewOpenDB(path string) (Storage, error) {
|
||||
db, err := NewDB(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &levelDB{db: db, path: path}, nil
|
||||
|
||||
return db, db.Open()
|
||||
}
|
||||
|
||||
// RecoverDB recovers LevelDB database from corruption
|
||||
@@ -215,8 +221,8 @@ func (l *levelDB) Close() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reopen tries to re-open the database
|
||||
func (l *levelDB) ReOpen() error {
|
||||
// Reopen tries to open (re-open) the database
|
||||
func (l *levelDB) Open() error {
|
||||
if l.db != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ func (s *LevelDBSuite) SetUpTest(c *C) {
|
||||
var err error
|
||||
|
||||
s.path = c.MkDir()
|
||||
s.db, err = OpenDB(s.path)
|
||||
s.db, err = NewOpenDB(s.path)
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ func (s *LevelDBSuite) TestRecoverDB(c *C) {
|
||||
err = RecoverDB(s.path)
|
||||
c.Check(err, IsNil)
|
||||
|
||||
s.db, err = OpenDB(s.path)
|
||||
s.db, err = NewOpenDB(s.path)
|
||||
c.Check(err, IsNil)
|
||||
|
||||
result, err := s.db.Get(key)
|
||||
@@ -223,7 +223,7 @@ func (s *LevelDBSuite) TestReOpen(c *C) {
|
||||
err = s.db.Close()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
err = s.db.ReOpen()
|
||||
err = s.db.Open()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
result, err := s.db.Get(key)
|
||||
|
||||
@@ -22,7 +22,7 @@ func (s *ChecksumCollectionSuite) SetUpTest(c *C) {
|
||||
SHA1: "da39a3ee5e6b4b0d3255bfef95601890afd80709",
|
||||
SHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
}
|
||||
s.db, _ = database.OpenDB(c.MkDir())
|
||||
s.db, _ = database.NewOpenDB(c.MkDir())
|
||||
s.collection = NewChecksumCollection(s.db)
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ type LocalRepoSuite struct {
|
||||
var _ = Suite(&LocalRepoSuite{})
|
||||
|
||||
func (s *LocalRepoSuite) SetUpTest(c *C) {
|
||||
s.db, _ = database.OpenDB(c.MkDir())
|
||||
s.db, _ = database.NewOpenDB(c.MkDir())
|
||||
s.list = NewPackageList()
|
||||
s.list.Add(&Package{Name: "lib", Version: "1.7", Architecture: "i386"})
|
||||
s.list.Add(&Package{Name: "app", Version: "1.9", Architecture: "amd64"})
|
||||
@@ -83,7 +83,7 @@ type LocalRepoCollectionSuite struct {
|
||||
var _ = Suite(&LocalRepoCollectionSuite{})
|
||||
|
||||
func (s *LocalRepoCollectionSuite) SetUpTest(c *C) {
|
||||
s.db, _ = database.OpenDB(c.MkDir())
|
||||
s.db, _ = database.NewOpenDB(c.MkDir())
|
||||
s.collection = NewLocalRepoCollection(s.db)
|
||||
|
||||
s.list = NewPackageList()
|
||||
|
||||
@@ -17,7 +17,7 @@ var _ = Suite(&PackageCollectionSuite{})
|
||||
|
||||
func (s *PackageCollectionSuite) SetUpTest(c *C) {
|
||||
s.p = NewPackageFromControlFile(packageStanza.Copy())
|
||||
s.db, _ = database.OpenDB(c.MkDir())
|
||||
s.db, _ = database.NewOpenDB(c.MkDir())
|
||||
s.collection = NewPackageCollection(s.db)
|
||||
}
|
||||
|
||||
|
||||
@@ -87,7 +87,7 @@ var _ = Suite(&PublishedRepoSuite{})
|
||||
func (s *PublishedRepoSuite) SetUpTest(c *C) {
|
||||
s.SetUpPackages()
|
||||
|
||||
s.db, _ = database.OpenDB(c.MkDir())
|
||||
s.db, _ = database.NewOpenDB(c.MkDir())
|
||||
s.factory = NewCollectionFactory(s.db)
|
||||
|
||||
s.root = c.MkDir()
|
||||
@@ -449,7 +449,7 @@ type PublishedRepoCollectionSuite struct {
|
||||
var _ = Suite(&PublishedRepoCollectionSuite{})
|
||||
|
||||
func (s *PublishedRepoCollectionSuite) SetUpTest(c *C) {
|
||||
s.db, _ = database.OpenDB(c.MkDir())
|
||||
s.db, _ = database.NewOpenDB(c.MkDir())
|
||||
s.factory = NewCollectionFactory(s.db)
|
||||
|
||||
s.snapshotCollection = s.factory.SnapshotCollection()
|
||||
@@ -640,7 +640,7 @@ type PublishedRepoRemoveSuite struct {
|
||||
var _ = Suite(&PublishedRepoRemoveSuite{})
|
||||
|
||||
func (s *PublishedRepoRemoveSuite) SetUpTest(c *C) {
|
||||
s.db, _ = database.OpenDB(c.MkDir())
|
||||
s.db, _ = database.NewOpenDB(c.MkDir())
|
||||
s.factory = NewCollectionFactory(s.db)
|
||||
|
||||
s.snapshotCollection = s.factory.SnapshotCollection()
|
||||
|
||||
@@ -44,7 +44,7 @@ func (s *PackageRefListSuite) SetUpTest(c *C) {
|
||||
}
|
||||
|
||||
func (s *PackageRefListSuite) TestNewPackageListFromRefList(c *C) {
|
||||
db, _ := database.OpenDB(c.MkDir())
|
||||
db, _ := database.NewOpenDB(c.MkDir())
|
||||
coll := NewPackageCollection(db)
|
||||
coll.Update(s.p1)
|
||||
coll.Update(s.p3)
|
||||
@@ -166,7 +166,7 @@ func (s *PackageRefListSuite) TestSubstract(c *C) {
|
||||
}
|
||||
|
||||
func (s *PackageRefListSuite) TestDiff(c *C) {
|
||||
db, _ := database.OpenDB(c.MkDir())
|
||||
db, _ := database.NewOpenDB(c.MkDir())
|
||||
coll := NewPackageCollection(db)
|
||||
|
||||
packages := []*Package{
|
||||
@@ -238,7 +238,7 @@ func (s *PackageRefListSuite) TestDiff(c *C) {
|
||||
}
|
||||
|
||||
func (s *PackageRefListSuite) TestMerge(c *C) {
|
||||
db, _ := database.OpenDB(c.MkDir())
|
||||
db, _ := database.NewOpenDB(c.MkDir())
|
||||
coll := NewPackageCollection(db)
|
||||
|
||||
packages := []*Package{
|
||||
|
||||
@@ -92,7 +92,7 @@ func (s *RemoteRepoSuite) SetUpTest(c *C) {
|
||||
s.flat, _ = NewRemoteRepo("exp42", "http://repos.express42.com/virool/precise/", "./", []string{}, []string{}, false, false)
|
||||
s.downloader = http.NewFakeDownloader().ExpectResponse("http://mirror.yandex.ru/debian/dists/squeeze/Release", exampleReleaseFile)
|
||||
s.progress = console.NewProgress()
|
||||
s.db, _ = database.OpenDB(c.MkDir())
|
||||
s.db, _ = database.NewOpenDB(c.MkDir())
|
||||
s.collectionFactory = NewCollectionFactory(s.db)
|
||||
s.packagePool = files.NewPackagePool(c.MkDir(), false)
|
||||
s.cs = files.NewMockChecksumStorage()
|
||||
@@ -615,7 +615,7 @@ type RemoteRepoCollectionSuite struct {
|
||||
var _ = Suite(&RemoteRepoCollectionSuite{})
|
||||
|
||||
func (s *RemoteRepoCollectionSuite) SetUpTest(c *C) {
|
||||
s.db, _ = database.OpenDB(c.MkDir())
|
||||
s.db, _ = database.NewOpenDB(c.MkDir())
|
||||
s.collection = NewRemoteRepoCollection(s.db)
|
||||
s.SetUpPackages()
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ type SnapshotCollectionSuite struct {
|
||||
var _ = Suite(&SnapshotCollectionSuite{})
|
||||
|
||||
func (s *SnapshotCollectionSuite) SetUpTest(c *C) {
|
||||
s.db, _ = database.OpenDB(c.MkDir())
|
||||
s.db, _ = database.NewOpenDB(c.MkDir())
|
||||
s.collection = NewSnapshotCollection(s.db)
|
||||
s.SetUpPackages()
|
||||
|
||||
|
||||
4
main.go
4
main.go
@@ -1,7 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/smira/aptly/aptly"
|
||||
"github.com/smira/aptly/cmd"
|
||||
@@ -17,5 +19,7 @@ func main() {
|
||||
|
||||
aptly.Version = Version
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
os.Exit(cmd.Run(cmd.RootCommand(), os.Args[1:], true))
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
.\" generated with Ronn/v0.7.3
|
||||
.\" http://github.com/rtomayko/ronn/tree/0.7.3
|
||||
.
|
||||
.TH "APTLY" "1" "April 2017" "" ""
|
||||
.TH "APTLY" "1" "July 2017" "" ""
|
||||
.
|
||||
.SH "NAME"
|
||||
\fBaptly\fR \- Debian repository management tool
|
||||
@@ -392,6 +392,10 @@ list of architectures to consider during (comma\-separated), default to all avai
|
||||
location of configuration file (default locations are /etc/aptly\.conf, ~/\.aptly\.conf)
|
||||
.
|
||||
.TP
|
||||
\-\fBdb\-open\-attempts\fR=10
|
||||
number of attempts to open DB if it\(cqs locked by other instance
|
||||
.
|
||||
.TP
|
||||
\-\fBdep\-follow\-all\-variants\fR=false
|
||||
when processing dependencies, follow a & b if dependency is \(cqa|b\(cq
|
||||
.
|
||||
|
||||
@@ -13,6 +13,7 @@ package environment to new version.
|
||||
Options:
|
||||
-architectures="": list of architectures to consider during (comma-separated), default to all available
|
||||
-config="": location of configuration file (default locations are /etc/aptly.conf, ~/.aptly.conf)
|
||||
-db-open-attempts=10: number of attempts to open DB if it's locked by other instance
|
||||
-dep-follow-all-variants=false: when processing dependencies, follow a & b if dependency is 'a|b'
|
||||
-dep-follow-recommends=false: when processing dependencies, follow Recommends
|
||||
-dep-follow-source=false: when processing dependencies, follow from binary to Source packages
|
||||
|
||||
@@ -21,6 +21,7 @@ Use "aptly help <command>" for more information about a command.
|
||||
Options:
|
||||
-architectures="": list of architectures to consider during (comma-separated), default to all available
|
||||
-config="": location of configuration file (default locations are /etc/aptly.conf, ~/.aptly.conf)
|
||||
-db-open-attempts=10: number of attempts to open DB if it's locked by other instance
|
||||
-dep-follow-all-variants=false: when processing dependencies, follow a & b if dependency is 'a|b'
|
||||
-dep-follow-recommends=false: when processing dependencies, follow Recommends
|
||||
-dep-follow-source=false: when processing dependencies, follow from binary to Source packages
|
||||
|
||||
@@ -15,6 +15,7 @@ Example:
|
||||
Options:
|
||||
-architectures="": list of architectures to consider during (comma-separated), default to all available
|
||||
-config="": location of configuration file (default locations are /etc/aptly.conf, ~/.aptly.conf)
|
||||
-db-open-attempts=10: number of attempts to open DB if it's locked by other instance
|
||||
-dep-follow-all-variants=false: when processing dependencies, follow a & b if dependency is 'a|b'
|
||||
-dep-follow-recommends=false: when processing dependencies, follow Recommends
|
||||
-dep-follow-source=false: when processing dependencies, follow from binary to Source packages
|
||||
|
||||
@@ -6,6 +6,7 @@ aptly mirror create - create new mirror
|
||||
Options:
|
||||
-architectures="": list of architectures to consider during (comma-separated), default to all available
|
||||
-config="": location of configuration file (default locations are /etc/aptly.conf, ~/.aptly.conf)
|
||||
-db-open-attempts=10: number of attempts to open DB if it's locked by other instance
|
||||
-dep-follow-all-variants=false: when processing dependencies, follow a & b if dependency is 'a|b'
|
||||
-dep-follow-recommends=false: when processing dependencies, follow Recommends
|
||||
-dep-follow-source=false: when processing dependencies, follow from binary to Source packages
|
||||
|
||||
@@ -17,6 +17,7 @@ Use "mirror help <command>" for more information about a command.
|
||||
Options:
|
||||
-architectures="": list of architectures to consider during (comma-separated), default to all available
|
||||
-config="": location of configuration file (default locations are /etc/aptly.conf, ~/.aptly.conf)
|
||||
-db-open-attempts=10: number of attempts to open DB if it's locked by other instance
|
||||
-dep-follow-all-variants=false: when processing dependencies, follow a & b if dependency is 'a|b'
|
||||
-dep-follow-recommends=false: when processing dependencies, follow Recommends
|
||||
-dep-follow-source=false: when processing dependencies, follow from binary to Source packages
|
||||
|
||||
@@ -17,6 +17,7 @@ Use "mirror help <command>" for more information about a command.
|
||||
Options:
|
||||
-architectures="": list of architectures to consider during (comma-separated), default to all available
|
||||
-config="": location of configuration file (default locations are /etc/aptly.conf, ~/.aptly.conf)
|
||||
-db-open-attempts=10: number of attempts to open DB if it's locked by other instance
|
||||
-dep-follow-all-variants=false: when processing dependencies, follow a & b if dependency is 'a|b'
|
||||
-dep-follow-recommends=false: when processing dependencies, follow Recommends
|
||||
-dep-follow-source=false: when processing dependencies, follow from binary to Source packages
|
||||
|
||||
@@ -7,6 +7,7 @@ aptly mirror create - create new mirror
|
||||
Options:
|
||||
-architectures="": list of architectures to consider during (comma-separated), default to all available
|
||||
-config="": location of configuration file (default locations are /etc/aptly.conf, ~/.aptly.conf)
|
||||
-db-open-attempts=10: number of attempts to open DB if it's locked by other instance
|
||||
-dep-follow-all-variants=false: when processing dependencies, follow a & b if dependency is 'a|b'
|
||||
-dep-follow-recommends=false: when processing dependencies, follow Recommends
|
||||
-dep-follow-source=false: when processing dependencies, follow from binary to Source packages
|
||||
|
||||
@@ -11,10 +11,10 @@ Information from release file:
|
||||
Architectures: amd64 armel armhf i386 ia64 kfreebsd-amd64 kfreebsd-i386 mips mipsel powerpc s390 s390x sparc
|
||||
Codename: wheezy
|
||||
Components: main contrib non-free
|
||||
Date: Sat, 04 Jun 2016 11:47:54 UTC
|
||||
Date: Sat, 17 Jun 2017 08:55:32 UTC
|
||||
Description: Debian 7.11 Released 04 June 2016
|
||||
|
||||
Label: Debian
|
||||
Origin: Debian
|
||||
Suite: oldstable
|
||||
Suite: oldoldstable
|
||||
Version: 7.11
|
||||
|
||||
@@ -11,10 +11,10 @@ Information from release file:
|
||||
Architectures: amd64 armel armhf i386 ia64 kfreebsd-amd64 kfreebsd-i386 mips mipsel powerpc s390 s390x sparc
|
||||
Codename: wheezy
|
||||
Components: main contrib non-free
|
||||
Date: Sat, 04 Jun 2016 11:47:54 UTC
|
||||
Date: Sat, 17 Jun 2017 08:55:32 UTC
|
||||
Description: Debian 7.11 Released 04 June 2016
|
||||
|
||||
Label: Debian
|
||||
Origin: Debian
|
||||
Suite: oldstable
|
||||
Suite: oldoldstable
|
||||
Version: 7.11
|
||||
|
||||
@@ -11,10 +11,10 @@ Information from release file:
|
||||
Architectures: amd64 armel armhf i386 ia64 kfreebsd-amd64 kfreebsd-i386 mips mipsel powerpc s390 s390x sparc
|
||||
Codename: wheezy
|
||||
Components: main contrib non-free
|
||||
Date: Sat, 04 Jun 2016 11:47:54 UTC
|
||||
Date: Sat, 17 Jun 2017 08:55:32 UTC
|
||||
Description: Debian 7.11 Released 04 June 2016
|
||||
|
||||
Label: Debian
|
||||
Origin: Debian
|
||||
Suite: oldstable
|
||||
Suite: oldoldstable
|
||||
Version: 7.11
|
||||
|
||||
@@ -15,5 +15,5 @@ Description: Long Term Support for Debian 7
|
||||
|
||||
Label: Debian-Security
|
||||
Origin: Debian
|
||||
Suite: oldstable
|
||||
Suite: oldoldstable
|
||||
Version: 7.0
|
||||
|
||||
@@ -11,10 +11,10 @@ Information from release file:
|
||||
Architectures: amd64 armel armhf i386 ia64 kfreebsd-amd64 kfreebsd-i386 mips mipsel powerpc s390 s390x sparc
|
||||
Codename: wheezy
|
||||
Components: main contrib non-free
|
||||
Date: Sat, 04 Jun 2016 11:47:54 UTC
|
||||
Date: Sat, 17 Jun 2017 08:55:32 UTC
|
||||
Description: Debian 7.11 Released 04 June 2016
|
||||
|
||||
Label: Debian
|
||||
Origin: Debian
|
||||
Suite: oldstable
|
||||
Suite: oldoldstable
|
||||
Version: 7.11
|
||||
|
||||
@@ -17,5 +17,5 @@ Description: Long Term Support for Debian 7
|
||||
|
||||
Label: Debian-Security
|
||||
Origin: Debian
|
||||
Suite: oldstable
|
||||
Suite: oldoldstable
|
||||
Version: 7.0
|
||||
|
||||
@@ -11,10 +11,10 @@ Information from release file:
|
||||
Architectures: amd64 armel armhf i386 ia64 kfreebsd-amd64 kfreebsd-i386 mips mipsel powerpc s390 s390x sparc
|
||||
Codename: wheezy
|
||||
Components: main contrib non-free
|
||||
Date: Sat, 04 Jun 2016 11:47:54 UTC
|
||||
Date: Sat, 17 Jun 2017 08:55:32 UTC
|
||||
Description: Debian 7.11 Released 04 June 2016
|
||||
|
||||
Label: Debian
|
||||
Origin: Debian
|
||||
Suite: oldstable
|
||||
Suite: oldoldstable
|
||||
Version: 7.11
|
||||
|
||||
@@ -11,10 +11,10 @@ Information from release file:
|
||||
Architectures: amd64 armel armhf i386 ia64 kfreebsd-amd64 kfreebsd-i386 mips mipsel powerpc s390 s390x sparc
|
||||
Codename: wheezy
|
||||
Components: main contrib non-free
|
||||
Date: Sat, 04 Jun 2016 11:47:54 UTC
|
||||
Date: Sat, 17 Jun 2017 08:55:32 UTC
|
||||
Description: Debian 7.11 Released 04 June 2016
|
||||
|
||||
Label: Debian
|
||||
Origin: Debian
|
||||
Suite: oldstable
|
||||
Suite: oldoldstable
|
||||
Version: 7.11
|
||||
|
||||
@@ -11,10 +11,10 @@ Information from release file:
|
||||
Architectures: amd64 armel armhf i386 ia64 kfreebsd-amd64 kfreebsd-i386 mips mipsel powerpc s390 s390x sparc
|
||||
Codename: wheezy
|
||||
Components: main contrib non-free
|
||||
Date: Sat, 04 Jun 2016 11:47:54 UTC
|
||||
Date: Sat, 17 Jun 2017 08:55:32 UTC
|
||||
Description: Debian 7.11 Released 04 June 2016
|
||||
|
||||
Label: Debian
|
||||
Origin: Debian
|
||||
Suite: oldstable
|
||||
Suite: oldoldstable
|
||||
Version: 7.11
|
||||
|
||||
@@ -11,10 +11,10 @@ Information from release file:
|
||||
Architectures: amd64 armel armhf i386 ia64 kfreebsd-amd64 kfreebsd-i386 mips mipsel powerpc s390 s390x sparc
|
||||
Codename: wheezy
|
||||
Components: main contrib non-free
|
||||
Date: Sat, 04 Jun 2016 11:47:54 UTC
|
||||
Date: Sat, 17 Jun 2017 08:55:32 UTC
|
||||
Description: Debian 7.11 Released 04 June 2016
|
||||
|
||||
Label: Debian
|
||||
Origin: Debian
|
||||
Suite: oldstable
|
||||
Suite: oldoldstable
|
||||
Version: 7.11
|
||||
|
||||
@@ -15,5 +15,5 @@ Description: Long Term Support for Debian 7
|
||||
|
||||
Label: Debian-Security
|
||||
Origin: Debian
|
||||
Suite: oldstable
|
||||
Suite: oldoldstable
|
||||
Version: 7.0
|
||||
|
||||
@@ -11,10 +11,10 @@ Information from release file:
|
||||
Architectures: amd64 armel armhf i386 ia64 kfreebsd-amd64 kfreebsd-i386 mips mipsel powerpc s390 s390x sparc
|
||||
Codename: wheezy
|
||||
Components: main contrib non-free
|
||||
Date: Sat, 04 Jun 2016 11:47:54 UTC
|
||||
Date: Sat, 17 Jun 2017 08:55:32 UTC
|
||||
Description: Debian 7.11 Released 04 June 2016
|
||||
|
||||
Label: Debian
|
||||
Origin: Debian
|
||||
Suite: oldstable
|
||||
Suite: oldoldstable
|
||||
Version: 7.11
|
||||
|
||||
@@ -11,10 +11,10 @@ Information from release file:
|
||||
Architectures: amd64 armel armhf i386 ia64 kfreebsd-amd64 kfreebsd-i386 mips mipsel powerpc s390 s390x sparc
|
||||
Codename: wheezy
|
||||
Components: main contrib non-free
|
||||
Date: Sat, 04 Jun 2016 11:47:54 UTC
|
||||
Date: Sat, 17 Jun 2017 08:55:32 UTC
|
||||
Description: Debian 7.11 Released 04 June 2016
|
||||
|
||||
Label: Debian
|
||||
Origin: Debian
|
||||
Suite: oldstable
|
||||
Suite: oldoldstable
|
||||
Version: 7.11
|
||||
|
||||
@@ -17,5 +17,5 @@ Description: Long Term Support for Debian 7
|
||||
|
||||
Label: Debian-Security
|
||||
Origin: Debian
|
||||
Suite: oldstable
|
||||
Suite: oldoldstable
|
||||
Version: 7.0
|
||||
|
||||
Reference in New Issue
Block a user