mirror of
https://github.com/aptly-dev/aptly.git
synced 2026-02-15 09:01:30 +00:00
In current aptly, each repository and snapshot has its own reflist in the database. This brings a few problems with it: - Given a sufficiently large repositories and snapshots, these lists can get enormous, reaching >1MB. This is a problem for LevelDB's overall performance, as it tends to prefer values around the confiruged block size (defaults to just 4KiB). - When you take these large repositories and snapshot them, you have a full, new copy of the reflist, even if only a few packages changed. This means that having a lot of snapshots with a few changes causes the database to basically be full of largely duplicate reflists. - All the duplication also means that many of the same refs are being loaded repeatedly, which can cause some slowdown but, more notably, eats up huge amounts of memory. - Adding on more and more new repositories and snapshots will cause the time and memory spent on things like cleanup and publishing to grow roughly linearly. At the core, there are two problems here: - Reflists get very big because there are just a lot of packages. - Different reflists can tend to duplicate much of the same contents. *Split reflists* aim at solving this by separating reflists into 64 *buckets*. Package refs are sorted into individual buckets according to the following system: - Take the first 3 letters of the package name, after dropping a `lib` prefix. (Using only the first 3 letters will cause packages with similar prefixes to end up in the same bucket, under the assumption that packages with similar names tend to be updated together.) - Take the 64-bit xxhash of these letters. (xxhash was chosen because it relatively good distribution across the individual bits, which is important for the next step.) - Use the first 6 bits of the hash (range [0:63]) as an index into the buckets. Once refs are placed in buckets, a sha256 digest of all the refs in the bucket is taken. These buckets are then stored in the database, split into roughly block-sized segments, and all the repositories and snapshots simply store an array of bucket digests. This approach means that *repositories and snapshots can share their reflist buckets*. If a snapshot is taken of a repository, it will have the same contents, so its split reflist will point to the same buckets as the base repository, and only one copy of each bucket is stored in the database. When some packages in the repository change, only the buckets containing those packages will be modified; all the other buckets will remain unchanged, and thus their contents will still be shared. Later on, when these reflists are loaded, each bucket is only loaded once, short-cutting loaded many megabytes of data. In effect, split reflists are essentially copy-on-write, with only the changed buckets stored individually. Changing the disk format means that a migration needs to take place, so that task is moved into the database cleanup step, which will migrate reflists over to split reflists, as well as delete any unused reflist buckets. All the reflist tests are also changed to additionally test out split reflists; although the internal logic is all shared (since buckets are, themselves, just normal reflists), some special additions are needed to have native versions of the various reflist helper methods. In our tests, we've observed the following improvements: - Memory usage during publish and database cleanup, with `GOMEMLIMIT=2GiB`, goes down from ~3.2GiB (larger than the memory limit!) to ~0.7GiB, a decrease of ~4.5x. - Database size decreases from 1.3GB to 367MB. *In my local tests*, publish times had also decreased down to mere seconds but the same effect wasn't observed on the server, with the times staying around the same. My suspicions are that this is due to I/O performance: my local system is an M1 MBP, which almost certainly has much faster disk speeds than our DigitalOcean block volumes. Split reflists include a side effect of requiring more random accesses from reading all the buckets by their keys, so if your random I/O performance is slower, it might cancel out the benefits. That being said, even in that case, the memory usage and database size advantages still persist. Signed-off-by: Ryan Gonzalez <ryan.gonzalez@collabora.com>
329 lines
8.1 KiB
Go
329 lines
8.1 KiB
Go
// Package api provides implementation of aptly REST API
|
|
package api
|
|
|
|
import (
|
|
"fmt"
|
|
"net/http"
|
|
"sort"
|
|
"strconv"
|
|
"strings"
|
|
"sync/atomic"
|
|
|
|
"github.com/aptly-dev/aptly/aptly"
|
|
"github.com/aptly-dev/aptly/deb"
|
|
"github.com/aptly-dev/aptly/query"
|
|
"github.com/aptly-dev/aptly/task"
|
|
"github.com/gin-gonic/gin"
|
|
"github.com/rs/zerolog/log"
|
|
)
|
|
|
|
// Lock order acquisition (canonical):
|
|
// 1. RemoteRepoCollection
|
|
// 2. LocalRepoCollection
|
|
// 3. SnapshotCollection
|
|
// 4. PublishedRepoCollection
|
|
|
|
type aptlyVersion struct {
|
|
// Aptly Version
|
|
Version string `json:"Version"`
|
|
}
|
|
|
|
// @Summary Aptly Version
|
|
// @Description **Get aptly version**
|
|
// @Description
|
|
// @Description **Example:**
|
|
// @Description ```
|
|
// @Description $ curl http://localhost:8080/api/version
|
|
// @Description {"Version":"0.9~dev"}
|
|
// @Description ```
|
|
// @Tags Status
|
|
// @Produce json
|
|
// @Success 200 {object} aptlyVersion
|
|
// @Router /api/version [get]
|
|
func apiVersion(c *gin.Context) {
|
|
c.JSON(200, gin.H{"Version": aptly.Version})
|
|
}
|
|
|
|
type aptlyStatus struct {
|
|
// Aptly Status
|
|
Status string `json:"Status" example:"'Aptly is ready', 'Aptly is unavailable', 'Aptly is healthy'"`
|
|
}
|
|
|
|
// @Summary Get Ready State
|
|
// @Description **Get aptly ready state**
|
|
// @Description
|
|
// @Description Return aptly ready state:
|
|
// @Description - `Aptly is ready` (HTTP 200)
|
|
// @Description - `Aptly is unavailable` (HTTP 503)
|
|
// @Tags Status
|
|
// @Produce json
|
|
// @Success 200 {object} aptlyStatus "Aptly is ready"
|
|
// @Failure 503 {object} aptlyStatus "Aptly is unavailable"
|
|
// @Router /api/ready [get]
|
|
func apiReady(isReady *atomic.Value) func(*gin.Context) {
|
|
return func(c *gin.Context) {
|
|
if isReady == nil || !isReady.Load().(bool) {
|
|
c.JSON(503, gin.H{"Status": "Aptly is unavailable"})
|
|
return
|
|
}
|
|
|
|
c.JSON(200, gin.H{"Status": "Aptly is ready"})
|
|
}
|
|
}
|
|
|
|
// @Summary Get Health State
|
|
// @Description **Get aptly health state**
|
|
// @Description
|
|
// @Description Return aptly health state:
|
|
// @Description - `Aptly is healthy` (HTTP 200)
|
|
// @Tags Status
|
|
// @Produce json
|
|
// @Success 200 {object} aptlyStatus
|
|
// @Router /api/healthy [get]
|
|
func apiHealthy(c *gin.Context) {
|
|
c.JSON(200, gin.H{"Status": "Aptly is healthy"})
|
|
}
|
|
|
|
type dbRequestKind int
|
|
|
|
const (
|
|
acquiredb dbRequestKind = iota
|
|
releasedb
|
|
)
|
|
|
|
type dbRequest struct {
|
|
kind dbRequestKind
|
|
err chan<- error
|
|
}
|
|
|
|
var dbRequests chan dbRequest
|
|
|
|
// Acquire database lock and release it when not needed anymore.
|
|
//
|
|
// Should be run in a goroutine!
|
|
func acquireDatabase() {
|
|
clients := 0
|
|
for request := range dbRequests {
|
|
var err error
|
|
|
|
switch request.kind {
|
|
case acquiredb:
|
|
if clients == 0 {
|
|
err = context.ReOpenDatabase()
|
|
}
|
|
|
|
request.err <- err
|
|
|
|
if err == nil {
|
|
clients++
|
|
}
|
|
case releasedb:
|
|
clients--
|
|
if clients == 0 {
|
|
err = context.CloseDatabase()
|
|
} else {
|
|
err = nil
|
|
}
|
|
|
|
request.err <- err
|
|
}
|
|
}
|
|
}
|
|
|
|
// Should be called before database access is needed in any api call.
|
|
// Happens per default for each api call. It is important that you run
|
|
// runTaskInBackground to run a task which accquire database.
|
|
// Important do not forget to defer to releaseDatabaseConnection
|
|
func acquireDatabaseConnection() error {
|
|
if dbRequests == nil {
|
|
return nil
|
|
}
|
|
|
|
errCh := make(chan error)
|
|
dbRequests <- dbRequest{acquiredb, errCh}
|
|
|
|
return <-errCh
|
|
}
|
|
|
|
// Release database connection when not needed anymore
|
|
func releaseDatabaseConnection() error {
|
|
if dbRequests == nil {
|
|
return nil
|
|
}
|
|
|
|
errCh := make(chan error)
|
|
dbRequests <- dbRequest{releasedb, errCh}
|
|
return <-errCh
|
|
}
|
|
|
|
// runs tasks in background. Acquires database connection first.
|
|
func runTaskInBackground(name string, resources []string, proc task.Process) (task.Task, *task.ResourceConflictError) {
|
|
return context.TaskList().RunTaskInBackground(name, resources, func(out aptly.Progress, detail *task.Detail) (*task.ProcessReturnValue, error) {
|
|
err := acquireDatabaseConnection()
|
|
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
defer releaseDatabaseConnection()
|
|
return proc(out, detail)
|
|
})
|
|
}
|
|
|
|
func truthy(value interface{}) bool {
|
|
if value == nil {
|
|
return false
|
|
}
|
|
switch value.(type) {
|
|
case string:
|
|
switch strings.ToLower(value.(string)) {
|
|
case "n", "no", "f", "false", "0", "off":
|
|
return false
|
|
default:
|
|
return true
|
|
}
|
|
case int:
|
|
return !(value.(int) == 0)
|
|
case bool:
|
|
return value.(bool)
|
|
}
|
|
return true
|
|
}
|
|
|
|
func maybeRunTaskInBackground(c *gin.Context, name string, resources []string, proc task.Process) {
|
|
// Run this task in background if configured globally or per-request
|
|
background := truthy(c.DefaultQuery("_async", strconv.FormatBool(context.Config().AsyncAPI)))
|
|
if background {
|
|
log.Debug().Msg("Executing task asynchronously")
|
|
task, conflictErr := runTaskInBackground(name, resources, proc)
|
|
if conflictErr != nil {
|
|
AbortWithJSONError(c, 409, conflictErr)
|
|
return
|
|
}
|
|
c.JSON(202, task)
|
|
} else {
|
|
log.Debug().Msg("Executing task synchronously")
|
|
task, conflictErr := runTaskInBackground(name, resources, proc)
|
|
if conflictErr != nil {
|
|
AbortWithJSONError(c, 409, conflictErr)
|
|
return
|
|
}
|
|
|
|
// wait for task to finish
|
|
context.TaskList().WaitForTaskByID(task.ID)
|
|
|
|
retValue, _ := context.TaskList().GetTaskReturnValueByID(task.ID)
|
|
err, _ := context.TaskList().GetTaskErrorByID(task.ID)
|
|
context.TaskList().DeleteTaskByID(task.ID)
|
|
if err != nil {
|
|
AbortWithJSONError(c, retValue.Code, err)
|
|
return
|
|
}
|
|
if retValue != nil {
|
|
c.JSON(retValue.Code, retValue.Value)
|
|
} else {
|
|
c.JSON(http.StatusOK, nil)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Common piece of code to show list of packages,
|
|
// with searching & details if requested
|
|
func showPackages(c *gin.Context, reflist deb.AnyRefList, collectionFactory *deb.CollectionFactory) {
|
|
result := []*deb.Package{}
|
|
|
|
list, err := deb.NewPackageListFromRefList(reflist, collectionFactory.PackageCollection(), nil)
|
|
if err != nil {
|
|
AbortWithJSONError(c, 404, err)
|
|
return
|
|
}
|
|
|
|
queryS := c.Request.URL.Query().Get("q")
|
|
if queryS != "" {
|
|
q, err := query.Parse(c.Request.URL.Query().Get("q"))
|
|
if err != nil {
|
|
AbortWithJSONError(c, 400, err)
|
|
return
|
|
}
|
|
|
|
withDeps := c.Request.URL.Query().Get("withDeps") == "1"
|
|
architecturesList := []string{}
|
|
|
|
if withDeps {
|
|
if len(context.ArchitecturesList()) > 0 {
|
|
architecturesList = context.ArchitecturesList()
|
|
} else {
|
|
architecturesList = list.Architectures(false)
|
|
}
|
|
|
|
sort.Strings(architecturesList)
|
|
|
|
if len(architecturesList) == 0 {
|
|
AbortWithJSONError(c, 400, fmt.Errorf("unable to determine list of architectures, please specify explicitly"))
|
|
return
|
|
}
|
|
}
|
|
|
|
list.PrepareIndex()
|
|
|
|
list, err = list.Filter(deb.FilterOptions{
|
|
Queries: []deb.PackageQuery{q},
|
|
WithDependencies: withDeps,
|
|
Source: nil,
|
|
DependencyOptions: context.DependencyOptions(),
|
|
Architectures: architecturesList,
|
|
})
|
|
if err != nil {
|
|
AbortWithJSONError(c, 500, fmt.Errorf("unable to search: %s", err))
|
|
return
|
|
}
|
|
}
|
|
|
|
// filter packages by version
|
|
if c.Request.URL.Query().Get("maximumVersion") == "1" {
|
|
list.PrepareIndex()
|
|
list.ForEach(func(p *deb.Package) error {
|
|
versionQ, err := query.Parse(fmt.Sprintf("Name (%s), $Version (<= %s)", p.Name, p.Version))
|
|
if err != nil {
|
|
fmt.Println("filter packages by version, query string parse err: ", err)
|
|
c.AbortWithError(500, fmt.Errorf("unable to parse %s maximum version query string: %s", p.Name, err))
|
|
} else {
|
|
tmpList, err := list.Filter(deb.FilterOptions{
|
|
Queries: []deb.PackageQuery{versionQ},
|
|
})
|
|
|
|
if err == nil {
|
|
if tmpList.Len() > 0 {
|
|
tmpList.ForEach(func(tp *deb.Package) error {
|
|
list.Remove(tp)
|
|
return nil
|
|
})
|
|
list.Add(p)
|
|
}
|
|
} else {
|
|
fmt.Println("filter packages by version, filter err: ", err)
|
|
c.AbortWithError(500, fmt.Errorf("unable to get %s maximum version: %s", p.Name, err))
|
|
}
|
|
}
|
|
|
|
return nil
|
|
})
|
|
}
|
|
|
|
if c.Request.URL.Query().Get("format") == "details" {
|
|
list.ForEach(func(p *deb.Package) error {
|
|
result = append(result, p)
|
|
return nil
|
|
})
|
|
|
|
c.JSON(200, result)
|
|
} else {
|
|
c.JSON(200, list.Strings())
|
|
}
|
|
}
|
|
|
|
func AbortWithJSONError(c *gin.Context, code int, err error) *gin.Error {
|
|
c.Writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
return c.AbortWithError(code, err)
|
|
}
|