Merge pull request #1186 from aptly-dev/feature/384-generate-checksums-for-component-files

Feature/384-generate-checksums-for-component-files
This commit is contained in:
André Roth
2024-11-17 17:33:41 +01:00
committed by GitHub
14 changed files with 193 additions and 17 deletions

View File

@@ -65,3 +65,4 @@ List of contributors, in chronological order:
* Cookie Fei (https://github.com/wuhuang26)
* Andrey Loukhnov (https://github.com/aol-nnov)
* Christoph Fiehe (https://github.com/cfiehe)
* Blake Kostner (https://github.com/btkostner)

View File

@@ -340,7 +340,7 @@ func apiPublishRepoOrSnapshot(c *gin.Context) {
return &task.ProcessReturnValue{Code: http.StatusBadRequest, Value: nil}, fmt.Errorf("prefix/distribution already used by another published repo: %s", duplicate)
}
err = published.Publish(context.PackagePool(), context, collectionFactory, signer, publishOutput, b.ForceOverwrite)
err = published.Publish(context.PackagePool(), context, collectionFactory, signer, publishOutput, b.ForceOverwrite, context.SkelPath())
if err != nil {
return &task.ProcessReturnValue{Code: http.StatusInternalServerError, Value: nil}, fmt.Errorf("unable to publish: %s", err)
}
@@ -482,7 +482,7 @@ func apiPublishUpdateSwitch(c *gin.Context) {
return &task.ProcessReturnValue{Code: http.StatusInternalServerError, Value: nil}, fmt.Errorf("Unable to update: %s", err)
}
err = published.Publish(context.PackagePool(), context, collectionFactory, signer, out, b.ForceOverwrite)
err = published.Publish(context.PackagePool(), context, collectionFactory, signer, out, b.ForceOverwrite, context.SkelPath())
if err != nil {
return &task.ProcessReturnValue{Code: http.StatusInternalServerError, Value: nil}, fmt.Errorf("Unable to update: %s", err)
}
@@ -1018,7 +1018,7 @@ func apiPublishUpdate(c *gin.Context) {
return &task.ProcessReturnValue{Code: http.StatusInternalServerError, Value: nil}, fmt.Errorf("unable to update: %s", err)
}
err = published.Publish(context.PackagePool(), context, collectionFactory, signer, out, b.ForceOverwrite)
err = published.Publish(context.PackagePool(), context, collectionFactory, signer, out, b.ForceOverwrite, context.SkelPath())
if err != nil {
return &task.ProcessReturnValue{Code: http.StatusInternalServerError, Value: nil}, fmt.Errorf("unable to update: %s", err)
}

View File

@@ -170,7 +170,7 @@ func aptlyPublishSnapshotOrRepo(cmd *commander.Command, args []string) error {
context.Progress().ColoredPrintf("@rWARNING@|: force overwrite mode enabled, aptly might corrupt other published repositories sharing the same package pool.\n")
}
err = published.Publish(context.PackagePool(), context, collectionFactory, signer, context.Progress(), forceOverwrite)
err = published.Publish(context.PackagePool(), context, collectionFactory, signer, context.Progress(), forceOverwrite, context.SkelPath())
if err != nil {
return fmt.Errorf("unable to publish: %s", err)
}

View File

@@ -103,7 +103,7 @@ func aptlyPublishSwitch(cmd *commander.Command, args []string) error {
published.MultiDist = context.Flags().Lookup("multi-dist").Value.Get().(bool)
}
err = published.Publish(context.PackagePool(), context, collectionFactory, signer, context.Progress(), forceOverwrite)
err = published.Publish(context.PackagePool(), context, collectionFactory, signer, context.Progress(), forceOverwrite, context.SkelPath())
if err != nil {
return fmt.Errorf("unable to publish: %s", err)
}

View File

@@ -64,7 +64,7 @@ func aptlyPublishUpdate(cmd *commander.Command, args []string) error {
published.MultiDist = context.Flags().Lookup("multi-dist").Value.Get().(bool)
}
err = published.Publish(context.PackagePool(), context, collectionFactory, signer, context.Progress(), forceOverwrite)
err = published.Publish(context.PackagePool(), context, collectionFactory, signer, context.Progress(), forceOverwrite, context.SkelPath())
if err != nil {
return fmt.Errorf("unable to publish: %s", err)
}

View File

@@ -297,7 +297,7 @@ func (context *AptlyContext) _database() (database.Storage, error) {
if len(context.config().DatabaseBackend.DbPath) == 0 {
return nil, errors.New("leveldb databaseBackend config invalid")
}
dbPath := filepath.Join(context.config().RootDir, context.config().DatabaseBackend.DbPath)
dbPath := filepath.Join(context.config().GetRootDir(), context.config().DatabaseBackend.DbPath)
context.database, err = goleveldb.NewDB(dbPath)
case "etcd":
context.database, err = etcddb.NewDB(context.config().DatabaseBackend.URL)
@@ -388,7 +388,7 @@ func (context *AptlyContext) PackagePool() aptly.PackagePool {
} else {
poolRoot := context.config().PackagePoolStorage.Local.Path
if poolRoot == "" {
poolRoot = filepath.Join(context.config().RootDir, "pool")
poolRoot = filepath.Join(context.config().GetRootDir(), "pool")
}
context.packagePool = files.NewPackagePool(poolRoot, !context.config().SkipLegacyPool)
@@ -527,6 +527,11 @@ func (context *AptlyContext) GetVerifier() pgp.Verifier {
return pgp.NewGpgVerifier(context.getGPGFinder())
}
// SkelPath builds the local skeleton folder
func (context *AptlyContext) SkelPath() string {
return filepath.Join(context.config().GetRootDir(), "skel")
}
// UpdateFlags sets internal copy of flags in the context
func (context *AptlyContext) UpdateFlags(flags *flag.FlagSet) {
context.Lock()

View File

@@ -389,6 +389,27 @@ func (files *indexFiles) LegacyContentsIndex(arch string, udeb bool) *indexFile
return file
}
func (files *indexFiles) SkelIndex(component, path string) *indexFile {
key := fmt.Sprintf("si-%s-%s", component, path)
file, ok := files.indexes[key]
if !ok {
relativePath := filepath.Join(component, path)
file = &indexFile{
parent: files,
discardable: false,
compressable: false,
onlyGzip: false,
relativePath: relativePath,
}
files.indexes[key] = file
}
return file
}
func (files *indexFiles) ReleaseFile() *indexFile {
return &indexFile{
parent: files,

View File

@@ -773,9 +773,47 @@ func (p *PublishedRepo) GetCodename() string {
return p.Codename
}
// GetSkelFiles returns a map of files to be added to a repo. Key being the relative
// path from component folder, and value being the full local FS path.
func (p *PublishedRepo) GetSkelFiles(skelDir string, component string) (map[string]string, error) {
files := make(map[string]string)
if skelDir == "" {
return files, nil
}
fsPath := filepath.Join(skelDir, p.Prefix, "dists", p.Distribution, component)
if err := filepath.Walk(fsPath, func(path string, _ os.FileInfo, err error) error {
if err != nil {
return err
}
stat, err := os.Stat(path)
if err != nil {
return err
}
if !stat.Mode().IsRegular() {
return nil
}
relativePath, err := filepath.Rel(fsPath, path)
if err != nil {
return err
}
files[relativePath] = path
return nil
}); err != nil && !os.IsNotExist(err) {
return files, err
}
return files, nil
}
// Publish publishes snapshot (repository) contents, links package files, generates Packages & Release files, signs them
func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorageProvider aptly.PublishedStorageProvider,
collectionFactory *CollectionFactory, signer pgp.Signer, progress aptly.Progress, forceOverwrite bool) error {
collectionFactory *CollectionFactory, signer pgp.Signer, progress aptly.Progress, forceOverwrite bool, skelDir string) error {
publishedStorage := publishedStorageProvider.GetPublishedStorage(p.Storage)
err := publishedStorage.MkDir(filepath.Join(p.Prefix, "pool"))
@@ -984,6 +1022,30 @@ func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorageP
}
}
for component := range p.sourceItems {
skelFiles, err := p.GetSkelFiles(skelDir, component)
if err != nil {
return fmt.Errorf("unable to get skeleton files: %v", err)
}
for relPath, absPath := range skelFiles {
bufWriter, err := indexes.SkelIndex(component, relPath).BufWriter()
if err != nil {
return fmt.Errorf("unable to generate skeleton index: %v", err)
}
file, err := os.Open(absPath)
if err != nil {
return fmt.Errorf("unable to read skeleton file: %v", err)
}
_, err = bufio.NewReader(file).WriteTo(bufWriter)
if err != nil {
return fmt.Errorf("unable to write skeleton file: %v", err)
}
}
}
udebs := []bool{false}
if hadUdebs {
udebs = append(udebs, true)

View File

@@ -197,7 +197,7 @@ func (s *PublishedRepoSuite) TestNewPublishedRepo(c *C) {
func (s *PublishedRepoSuite) TestMultiDistPool(c *C) {
repo, err := NewPublishedRepo("", "ppa", "squeeze", nil, []string{"main"}, []interface{}{s.snapshot}, s.factory, true)
c.Assert(err, IsNil)
err = repo.Publish(s.packagePool, s.provider, s.factory, &NullSigner{}, nil, false)
err = repo.Publish(s.packagePool, s.provider, s.factory, &NullSigner{}, nil, false, "")
c.Assert(err, IsNil)
publishedStorage := files.NewPublishedStorage(s.root, "", "")
@@ -380,7 +380,7 @@ func (s *PublishedRepoSuite) TestUpdate(c *C) {
}
func (s *PublishedRepoSuite) TestPublish(c *C) {
err := s.repo.Publish(s.packagePool, s.provider, s.factory, &NullSigner{}, nil, false)
err := s.repo.Publish(s.packagePool, s.provider, s.factory, &NullSigner{}, nil, false, "")
c.Assert(err, IsNil)
c.Check(s.repo.Architectures, DeepEquals, []string{"i386"})
@@ -427,7 +427,7 @@ func (s *PublishedRepoSuite) TestPublish(c *C) {
}
func (s *PublishedRepoSuite) TestPublishNoSigner(c *C) {
err := s.repo.Publish(s.packagePool, s.provider, s.factory, nil, nil, false)
err := s.repo.Publish(s.packagePool, s.provider, s.factory, nil, nil, false, "")
c.Assert(err, IsNil)
c.Check(filepath.Join(s.publishedStorage.PublicPath(), "ppa/dists/squeeze/Release"), PathExists)
@@ -435,7 +435,7 @@ func (s *PublishedRepoSuite) TestPublishNoSigner(c *C) {
}
func (s *PublishedRepoSuite) TestPublishLocalRepo(c *C) {
err := s.repo2.Publish(s.packagePool, s.provider, s.factory, nil, nil, false)
err := s.repo2.Publish(s.packagePool, s.provider, s.factory, nil, nil, false, "")
c.Assert(err, IsNil)
c.Check(filepath.Join(s.publishedStorage.PublicPath(), "ppa/dists/maverick/Release"), PathExists)
@@ -443,7 +443,7 @@ func (s *PublishedRepoSuite) TestPublishLocalRepo(c *C) {
}
func (s *PublishedRepoSuite) TestPublishLocalSourceRepo(c *C) {
err := s.repo4.Publish(s.packagePool, s.provider, s.factory, nil, nil, false)
err := s.repo4.Publish(s.packagePool, s.provider, s.factory, nil, nil, false, "")
c.Assert(err, IsNil)
c.Check(filepath.Join(s.publishedStorage.PublicPath(), "ppa/dists/maverick/Release"), PathExists)
@@ -451,7 +451,7 @@ func (s *PublishedRepoSuite) TestPublishLocalSourceRepo(c *C) {
}
func (s *PublishedRepoSuite) TestPublishOtherStorage(c *C) {
err := s.repo5.Publish(s.packagePool, s.provider, s.factory, nil, nil, false)
err := s.repo5.Publish(s.packagePool, s.provider, s.factory, nil, nil, false, "")
c.Assert(err, IsNil)
c.Check(filepath.Join(s.publishedStorage2.PublicPath(), "ppa/dists/maverick/Release"), PathExists)

View File

@@ -121,7 +121,8 @@ Options:
* `rootDir`:
is root of directory storage to store database (`rootDir`/db),
the default for downloaded packages (`rootDir`/pool) and
the default for published repositories (`rootDir`/public)
the default for published repositories (`rootDir`/public) and
skeleton files (`rootDir`/skel)
* `databaseBackend`:
the database config; if this config is empty, use levledb backend by default

View File

@@ -402,6 +402,15 @@ class BaseTest(object):
else:
raise
def write_file(self, path, content):
full_path = os.path.join(os.environ["HOME"], ".aptly", path)
if not os.path.exists(os.path.dirname(full_path)):
os.makedirs(os.path.dirname(full_path), 0o755)
with open(full_path, "w") as f:
f.write(content)
def read_file(self, path, mode=''):
with open(os.path.join(os.environ["HOME"], self.aptlyDir, path), "r" + mode) as f:
return f.read()

View File

@@ -0,0 +1,14 @@
Loading packages...
Generating metadata files and linking package files...
Finalizing metadata files...
Signing file 'Release' with gpg, please enter your passphrase when prompted:
Clearsigning file 'Release' with gpg, please enter your passphrase when prompted:
Local repo local-repo has been successfully published.
Please setup your webserver to serve directory '${HOME}/.aptly/public' with autoindexing.
Now you can add following line to apt sources:
deb http://your-server/ maverick main
deb-src http://your-server/ maverick main
Don't forget to add your GPG key to apt with apt-key.
You can also use `aptly serve` to publish your repositories over HTTP quickly.

View File

@@ -888,3 +888,66 @@ class PublishRepo33Test(BaseTest):
self.check_exists('public/dists/maverick/main/binary-amd64/Packages')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.gz')
self.check_not_exists('public/dists/maverick/main/binary-amd64/Packages.bz2')
class PublishRepo34Test(BaseTest):
"""
publish repo: skeleton files
"""
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files}"
]
runCmd = "aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick -skip-contents local-repo"
gold_processor = BaseTest.expand_environ
def prepare_fixture(self):
super(PublishRepo34Test, self).prepare_fixture()
self.write_file(os.path.join('skel', 'dists', 'maverick', 'main', 'dep11', 'README'), 'README test file')
self.write_file(os.path.join('skel', 'dists', 'maverick', 'Release'), 'Release test file')
def check(self):
super(PublishRepo34Test, self).check()
self.check_exists('public/dists/maverick/main/dep11/README')
self.check_exists('public/dists/maverick/Release')
readme = self.read_file('public/dists/maverick/main/dep11/README')
if readme != 'README test file':
raise Exception("README file not copied on publish")
release = self.read_file('public/dists/maverick/Release')
if release == 'Release test file':
raise Exception("Release file was copied on publish")
release = self.read_file('public/dists/maverick/Release').split("\n")
release = [l for l in release if l.startswith(" ")]
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
pathsSeen.add(path)
fileSize = int(fileSize)
st = os.stat(os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/', path))
if fileSize != st.st_size:
raise Exception("file size doesn't match for %s: %d != %d" % (path, fileSize, st.st_size))
if len(fileHash) == 32:
h = hashlib.md5()
elif len(fileHash) == 40:
h = hashlib.sha1()
elif len(fileHash) == 64:
h = hashlib.sha256()
else:
h = hashlib.sha512()
h.update(self.read_file(os.path.join('public/dists/maverick', path), mode='b'))
if h.hexdigest() != fileHash:
raise Exception("file hash doesn't match for %s: %s != %s" % (path, fileHash, h.hexdigest()))
if 'main/dep11/README' not in pathsSeen:
raise Exception("README file not included in release file")

View File

@@ -21,7 +21,7 @@ func (s *ConfigSuite) TestLoadConfig(c *C) {
err := LoadConfig(configname, &s.config)
c.Assert(err, IsNil)
c.Check(s.config.RootDir, Equals, "/opt/aptly/")
c.Check(s.config.GetRootDir(), Equals, "/opt/aptly/")
c.Check(s.config.DownloadConcurrency, Equals, 33)
c.Check(s.config.DatabaseOpenAttempts, Equals, 33)
}