Add support for Azure package pools

This adds support for storing packages directly on Azure, with no truly
"local" (on-disk) repo used. The existing Azure PublishedStorage
implementation was refactored to move the shared code to a separate
context struct, which can then be re-used by the new PackagePool. In
addition, the files package's mockChecksumStorage was made public so
that it could be used in the Azure PackagePool tests as well.

Signed-off-by: Ryan Gonzalez <ryan.gonzalez@collabora.com>
This commit is contained in:
Ryan Gonzalez
2022-05-17 08:52:59 -05:00
committed by André Roth
parent 810df17009
commit f9325fbc91
16 changed files with 820 additions and 148 deletions

View File

@@ -42,7 +42,21 @@ func (cksum *ChecksumInfo) Complete() bool {
return cksum.MD5 != "" && cksum.SHA1 != "" && cksum.SHA256 != "" && cksum.SHA512 != ""
}
// ChecksumsForFile generates size, MD5, SHA1 & SHA256 checksums for given file
// ChecksumsForReader generates size, MD5, SHA1 & SHA256 checksums for the given
// io.Reader
func ChecksumsForReader(rd io.Reader) (ChecksumInfo, error) {
w := NewChecksumWriter()
_, err := io.Copy(w, rd)
if err != nil {
return ChecksumInfo{}, err
}
return w.Sum(), nil
}
// ChecksumsForFile generates size, MD5, SHA1 & SHA256 checksums for the file at
// the given path
func ChecksumsForFile(path string) (ChecksumInfo, error) {
file, err := os.Open(path)
if err != nil {
@@ -50,14 +64,7 @@ func ChecksumsForFile(path string) (ChecksumInfo, error) {
}
defer file.Close()
w := NewChecksumWriter()
_, err = io.Copy(w, file)
if err != nil {
return ChecksumInfo{}, err
}
return w.Sum(), nil
return ChecksumsForReader(file)
}
// ChecksumWriter is a writer that does checksum calculation on the fly passing data

View File

@@ -2,6 +2,7 @@ package utils
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
)
@@ -24,7 +25,7 @@ type ConfigStructure struct { // nolint: maligned
GpgDisableVerify bool `json:"gpgDisableVerify"`
GpgProvider string `json:"gpgProvider"`
DownloadSourcePackages bool `json:"downloadSourcePackages"`
PackagePoolStorage PackagePool `json:"packagePoolStorage"`
PackagePoolStorage PackagePoolStorage `json:"packagePoolStorage"`
SkipLegacyPool bool `json:"skipLegacyPool"`
PpaDistributorID string `json:"ppaDistributorID"`
PpaCodename string `json:"ppaCodename"`
@@ -33,7 +34,7 @@ type ConfigStructure struct { // nolint: maligned
FileSystemPublishRoots map[string]FileSystemPublishRoot `json:"FileSystemPublishEndpoints"`
S3PublishRoots map[string]S3PublishRoot `json:"S3PublishEndpoints"`
SwiftPublishRoots map[string]SwiftPublishRoot `json:"SwiftPublishEndpoints"`
AzurePublishRoots map[string]AzurePublishRoot `json:"AzurePublishEndpoints"`
AzurePublishRoots map[string]AzureEndpoint `json:"AzurePublishEndpoints"`
AsyncAPI bool `json:"AsyncAPI"`
EnableMetricsEndpoint bool `json:"enableMetricsEndpoint"`
LogLevel string `json:"logLevel"`
@@ -41,8 +42,52 @@ type ConfigStructure struct { // nolint: maligned
ServeInAPIMode bool `json:"serveInAPIMode"`
}
type PackagePool struct {
Path string `json:"path"`
type LocalPoolStorage struct {
Path string `json:"path,omitempty"`
}
type PackagePoolStorage struct {
Local *LocalPoolStorage
Azure *AzureEndpoint
}
func (pool *PackagePoolStorage) UnmarshalJSON(data []byte) error {
var discriminator struct {
Type string `json:"type"`
}
if err := json.Unmarshal(data, &discriminator); err != nil {
return err
}
switch discriminator.Type {
case "azure":
pool.Azure = &AzureEndpoint{}
return json.Unmarshal(data, &pool.Azure)
case "local", "":
pool.Local = &LocalPoolStorage{}
return json.Unmarshal(data, &pool.Local)
default:
return fmt.Errorf("unknown pool storage type: %s", discriminator.Type)
}
}
func (pool *PackagePoolStorage) MarshalJSON() ([]byte, error) {
var wrapper struct {
Type string `json:"type,omitempty"`
*LocalPoolStorage
*AzureEndpoint
}
if pool.Azure != nil {
wrapper.Type = "azure"
wrapper.AzureEndpoint = pool.Azure
} else if pool.Local.Path != "" {
wrapper.Type = "local"
wrapper.LocalPoolStorage = pool.Local
}
return json.Marshal(wrapper)
}
// FileSystemPublishRoot describes single filesystem publishing entry point
@@ -86,8 +131,8 @@ type SwiftPublishRoot struct {
Container string `json:"container"`
}
// AzurePublishRoot describes single Azure publishing entry point
type AzurePublishRoot struct {
// AzureEndpoint describes single Azure publishing entry point
type AzureEndpoint struct {
AccountName string `json:"accountName"`
AccountKey string `json:"accountKey"`
Container string `json:"container"`
@@ -111,14 +156,16 @@ var Config = ConfigStructure{
GpgDisableSign: false,
GpgDisableVerify: false,
DownloadSourcePackages: false,
PackagePoolStorage: PackagePool{Path: ""},
PackagePoolStorage: PackagePoolStorage{
Local: &LocalPoolStorage{Path: ""},
},
SkipLegacyPool: false,
PpaDistributorID: "ubuntu",
PpaCodename: "",
FileSystemPublishRoots: map[string]FileSystemPublishRoot{},
S3PublishRoots: map[string]S3PublishRoot{},
SwiftPublishRoots: map[string]SwiftPublishRoot{},
AzurePublishRoots: map[string]AzurePublishRoot{},
AzurePublishRoots: map[string]AzureEndpoint{},
AsyncAPI: false,
EnableMetricsEndpoint: false,
LogLevel: "debug",

View File

@@ -34,7 +34,7 @@ func (s *ConfigSuite) TestSaveConfig(c *C) {
s.config.DatabaseOpenAttempts = 5
s.config.GpgProvider = "gpg"
s.config.PackagePoolStorage.Path = "/tmp/aptly-pool"
s.config.PackagePoolStorage.Local = &LocalPoolStorage{"/tmp/aptly-pool"}
s.config.FileSystemPublishRoots = map[string]FileSystemPublishRoot{"test": {
RootDir: "/opt/aptly-publish"}}
@@ -46,7 +46,7 @@ func (s *ConfigSuite) TestSaveConfig(c *C) {
s.config.SwiftPublishRoots = map[string]SwiftPublishRoot{"test": {
Container: "repo"}}
s.config.AzurePublishRoots = map[string]AzurePublishRoot{"test": {
s.config.AzurePublishRoots = map[string]AzureEndpoint{"test": {
Container: "repo"}}
s.config.LogLevel = "info"
@@ -81,6 +81,7 @@ func (s *ConfigSuite) TestSaveConfig(c *C) {
" \"gpgProvider\": \"gpg\",\n"+
" \"downloadSourcePackages\": false,\n"+
" \"packagePoolStorage\": {\n"+
" \"type\": \"local\",\n"+
" \"path\": \"/tmp/aptly-pool\"\n"+
" },\n"+
" \"skipLegacyPool\": false,\n"+