Major refactoring of the publishing method, now uses helper indexFile(s). #108

This commit is contained in:
Andrey Smirnov
2014-09-27 01:39:02 +04:00
parent 8f9944117c
commit db499f872d
45 changed files with 399 additions and 210 deletions
+254
View File
@@ -0,0 +1,254 @@
package deb
import (
"bufio"
"fmt"
"github.com/smira/aptly/aptly"
"github.com/smira/aptly/utils"
"os"
"path/filepath"
"strings"
)
type indexFiles struct {
publishedStorage aptly.PublishedStorage
basePath string
renameMap map[string]string
generatedFiles map[string]utils.ChecksumInfo
tempDir string
suffix string
indexes map[string]*indexFile
}
type indexFile struct {
parent *indexFiles
discardable bool
compressable bool
signable bool
relativePath string
tempFilename string
tempFile *os.File
w *bufio.Writer
}
func (file *indexFile) BufWriter() (*bufio.Writer, error) {
if file.w == nil {
var err error
file.tempFilename = filepath.Join(file.parent.tempDir, strings.Replace(file.relativePath, "/", "_", -1))
file.tempFile, err = os.Create(file.tempFilename)
if err != nil {
return nil, fmt.Errorf("unable to create temporary index file: %s", err)
}
file.w = bufio.NewWriter(file.tempFile)
}
return file.w, nil
}
func (file *indexFile) Finalize(signer utils.Signer) error {
if file.w == nil {
if file.discardable {
return nil
}
file.BufWriter()
}
err := file.w.Flush()
if err != nil {
file.tempFile.Close()
return fmt.Errorf("unable to write to index file: %s", err)
}
if file.compressable {
err = utils.CompressFile(file.tempFile)
if err != nil {
file.tempFile.Close()
return fmt.Errorf("unable to compress index file: %s", err)
}
}
file.tempFile.Close()
exts := []string{""}
if file.compressable {
exts = append(exts, ".gz", ".bz2")
}
for _, ext := range exts {
var checksumInfo utils.ChecksumInfo
checksumInfo, err = utils.ChecksumsForFile(file.tempFilename + ext)
if err != nil {
return fmt.Errorf("unable to collect checksums: %s", err)
}
file.parent.generatedFiles[file.relativePath+ext] = checksumInfo
}
err = file.parent.publishedStorage.MkDir(filepath.Dir(filepath.Join(file.parent.basePath, file.relativePath)))
if err != nil {
return fmt.Errorf("unable to create dir: %s", err)
}
for _, ext := range exts {
err = file.parent.publishedStorage.PutFile(filepath.Join(file.parent.basePath, file.relativePath+file.parent.suffix+ext),
file.tempFilename+ext)
if err != nil {
return fmt.Errorf("unable to publish file: %s", err)
}
if file.parent.suffix != "" {
file.parent.renameMap[filepath.Join(file.parent.basePath, file.relativePath+file.parent.suffix+ext)] =
filepath.Join(file.parent.basePath, file.relativePath+ext)
}
}
if file.signable && signer != nil {
err = signer.DetachedSign(file.tempFilename, file.tempFilename+".gpg")
if err != nil {
return fmt.Errorf("unable to detached sign file: %s", err)
}
err = signer.ClearSign(file.tempFilename, filepath.Join(filepath.Dir(file.tempFilename), "In"+filepath.Base(file.tempFilename)))
if err != nil {
return fmt.Errorf("unable to clearsign file: %s", err)
}
if file.parent.suffix != "" {
file.parent.renameMap[filepath.Join(file.parent.basePath, file.relativePath+file.parent.suffix+".gpg")] =
filepath.Join(file.parent.basePath, file.relativePath+".gpg")
file.parent.renameMap[filepath.Join(file.parent.basePath, "In"+file.relativePath+file.parent.suffix)] =
filepath.Join(file.parent.basePath, "In"+file.relativePath)
}
err = file.parent.publishedStorage.PutFile(filepath.Join(file.parent.basePath, file.relativePath+file.parent.suffix+".gpg"),
file.tempFilename+".gpg")
if err != nil {
return fmt.Errorf("unable to publish file: %s", err)
}
err = file.parent.publishedStorage.PutFile(filepath.Join(file.parent.basePath, "In"+file.relativePath+file.parent.suffix),
filepath.Join(filepath.Dir(file.tempFilename), "In"+filepath.Base(file.tempFilename)))
if err != nil {
return fmt.Errorf("unable to publish file: %s", err)
}
}
return nil
}
func newIndexFiles(publishedStorage aptly.PublishedStorage, basePath, tempDir, suffix string) *indexFiles {
return &indexFiles{
publishedStorage: publishedStorage,
basePath: basePath,
renameMap: make(map[string]string),
generatedFiles: make(map[string]utils.ChecksumInfo),
tempDir: tempDir,
suffix: suffix,
indexes: make(map[string]*indexFile),
}
}
func (files *indexFiles) PackageIndex(component, arch string, udeb bool) *indexFile {
key := fmt.Sprintf("pi-%s-%s-%s", component, arch, udeb)
file, ok := files.indexes[key]
if !ok {
var relativePath string
if arch == "source" {
relativePath = filepath.Join(component, "source", "Sources")
} else {
if udeb {
relativePath = filepath.Join(component, "debian-installer", fmt.Sprintf("binary-%s", arch), "Packages")
} else {
relativePath = filepath.Join(component, fmt.Sprintf("binary-%s", arch), "Packages")
}
}
file = &indexFile{
parent: files,
discardable: udeb,
compressable: true,
signable: false,
relativePath: relativePath,
}
files.indexes[key] = file
}
return file
}
func (files *indexFiles) ReleaseIndex(component, arch string, udeb bool) *indexFile {
key := fmt.Sprintf("ri-%s-%s-%s", component, arch, udeb)
file, ok := files.indexes[key]
if !ok {
var relativePath string
if arch == "source" {
relativePath = filepath.Join(component, "source", "Release")
} else {
if udeb {
relativePath = filepath.Join(component, "debian-installer", fmt.Sprintf("binary-%s", arch), "Release")
} else {
relativePath = filepath.Join(component, fmt.Sprintf("binary-%s", arch), "Release")
}
}
file = &indexFile{
parent: files,
discardable: udeb,
compressable: false,
signable: false,
relativePath: relativePath,
}
files.indexes[key] = file
}
return file
}
func (files *indexFiles) ReleaseFile() *indexFile {
return &indexFile{
parent: files,
discardable: false,
compressable: false,
signable: true,
relativePath: "Release",
}
}
func (files *indexFiles) FinalizeAll(progress aptly.Progress) (err error) {
if progress != nil {
progress.InitBar(int64(len(files.indexes)), false)
defer progress.ShutdownBar()
}
for _, file := range files.indexes {
err = file.Finalize(nil)
if err != nil {
return
}
if progress != nil {
progress.AddBar(1)
}
}
files.indexes = make(map[string]*indexFile)
return
}
func (files *indexFiles) RenameFiles() error {
var err error
for oldName, newName := range files.renameMap {
err = files.publishedStorage.RenameFile(oldName, newName)
if err != nil {
return fmt.Errorf("unable to rename: %s", err)
}
}
return nil
}
+72 -180
View File
@@ -1,7 +1,6 @@
package deb
import (
"bufio"
"bytes"
"code.google.com/p/go-uuid/uuid"
"fmt"
@@ -440,9 +439,6 @@ func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorageP
suffix = ".tmp"
}
generatedFiles := map[string]utils.ChecksumInfo{}
renameMap := map[string]string{}
if progress != nil {
progress.Printf("Generating metadata files and linking package files...\n")
}
@@ -454,41 +450,36 @@ func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorageP
}
defer os.RemoveAll(tempDir)
indexes := newIndexFiles(publishedStorage, basePath, tempDir, suffix)
for component, list := range lists {
var relativePath string
hadUdebs := false
// For all architectures, generate packages/sources files
// For all architectures, pregenerate packages/sources files
for _, arch := range p.Architectures {
indexes.PackageIndex(component, arch, false)
}
if progress != nil {
progress.InitBar(int64(list.Len()), false)
}
err = list.ForEach(func(pkg *Package) error {
if progress != nil {
progress.InitBar(int64(list.Len()), false)
progress.AddBar(1)
}
if arch == "source" {
relativePath = filepath.Join(component, "source", "Sources")
} else {
relativePath = filepath.Join(component, fmt.Sprintf("binary-%s", arch), "Packages")
}
err = publishedStorage.MkDir(filepath.Dir(filepath.Join(basePath, relativePath)))
if err != nil {
return err
}
var packagesFile *os.File
packagesFileName := filepath.Join(tempDir, fmt.Sprintf("pkgs_%s_%s", component, arch))
packagesFile, err = os.Create(packagesFileName)
if err != nil {
return fmt.Errorf("unable to create temporary Packages file: %s", err)
}
bufWriter := bufio.NewWriter(packagesFile)
err = list.ForEach(func(pkg *Package) error {
if progress != nil {
progress.AddBar(1)
if pkg.Architecture == "all" || utils.StrSliceHasItem(p.Architectures, pkg.Architecture) {
hadUdebs = hadUdebs || pkg.IsUdeb
err = pkg.LinkFromPool(publishedStorage, packagePool, p.Prefix, component, forceOverwrite)
if err != nil {
return err
}
}
for _, arch := range p.Architectures {
if pkg.MatchesArchitecture(arch) {
err = pkg.LinkFromPool(publishedStorage, packagePool, p.Prefix, component, forceOverwrite)
bufWriter, err := indexes.PackageIndex(component, arch, pkg.IsUdeb).BufWriter()
if err != nil {
return err
}
@@ -501,113 +492,58 @@ func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorageP
if err != nil {
return err
}
pkg.files = nil
pkg.deps = nil
pkg.extra = nil
}
return nil
})
if err != nil {
return fmt.Errorf("unable to process packages: %s", err)
}
err = bufWriter.Flush()
if err != nil {
return fmt.Errorf("unable to write Packages file: %s", err)
}
err = utils.CompressFile(packagesFile)
if err != nil {
return fmt.Errorf("unable to compress Packages files: %s", err)
}
packagesFile.Close()
for _, ext := range []string{"", ".gz", ".bz2"} {
var checksumInfo utils.ChecksumInfo
checksumInfo, err = utils.ChecksumsForFile(packagesFileName + ext)
if err != nil {
return fmt.Errorf("unable to collect checksums: %s", err)
}
generatedFiles[relativePath+ext] = checksumInfo
err = publishedStorage.PutFile(filepath.Join(basePath, relativePath+suffix+ext), packagesFileName+ext)
if err != nil {
return fmt.Errorf("unable to publish file: %s", err)
}
if suffix != "" {
renameMap[filepath.Join(basePath, relativePath+suffix+ext)] = filepath.Join(basePath, relativePath+ext)
}
}
if progress != nil {
progress.ShutdownBar()
}
pkg.files = nil
pkg.deps = nil
pkg.extra = nil
return nil
})
if err != nil {
return fmt.Errorf("unable to process packages: %s", err)
}
if progress != nil {
progress.ShutdownBar()
}
udebs := []bool{false}
if hadUdebs {
udebs = append(udebs, true)
}
// For all architectures, generate Release files
for _, arch := range p.Architectures {
release := make(Stanza)
release["Archive"] = p.Distribution
release["Architecture"] = arch
release["Component"] = component
release["Origin"] = p.GetOrigin()
release["Label"] = p.GetLabel()
for _, udeb := range udebs {
release := make(Stanza)
release["Archive"] = p.Distribution
release["Architecture"] = arch
release["Component"] = component
release["Origin"] = p.GetOrigin()
release["Label"] = p.GetLabel()
if arch == "source" {
relativePath = filepath.Join(component, "source", "Release")
} else {
relativePath = filepath.Join(component, fmt.Sprintf("binary-%s", arch), "Release")
bufWriter, err := indexes.ReleaseIndex(component, arch, udeb).BufWriter()
err = release.WriteTo(bufWriter)
if err != nil {
return fmt.Errorf("unable to create Release file: %s", err)
}
}
var file *os.File
fileName := filepath.Join(tempDir, fmt.Sprintf("release_%s_%s", component, arch))
file, err = os.Create(fileName)
if err != nil {
return fmt.Errorf("unable to create temporary Release file: %s", err)
}
bufWriter := bufio.NewWriter(file)
err = release.WriteTo(bufWriter)
if err != nil {
return fmt.Errorf("unable to create Release file: %s", err)
}
err = bufWriter.Flush()
if err != nil {
return fmt.Errorf("unable to create Release file: %s", err)
}
file.Close()
var checksumInfo utils.ChecksumInfo
checksumInfo, err = utils.ChecksumsForFile(fileName)
if err != nil {
return fmt.Errorf("unable to collect checksums: %s", err)
}
generatedFiles[relativePath] = checksumInfo
err = publishedStorage.PutFile(filepath.Join(basePath, relativePath+suffix), fileName)
if err != nil {
file.Close()
return fmt.Errorf("unable to publish file: %s", err)
}
if suffix != "" {
renameMap[filepath.Join(basePath, relativePath+suffix)] = filepath.Join(basePath, relativePath)
}
}
}
if progress != nil {
progress.Printf("Finalizing metadata files...\n")
}
err = indexes.FinalizeAll(progress)
if err != nil {
return err
}
release := make(Stanza)
release["Origin"] = p.GetOrigin()
release["Label"] = p.GetLabel()
@@ -621,80 +557,36 @@ func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorageP
release["Components"] = strings.Join(p.Components(), " ")
for path, info := range generatedFiles {
for path, info := range indexes.generatedFiles {
release["MD5Sum"] += fmt.Sprintf(" %s %8d %s\n", info.MD5, info.Size, path)
release["SHA1"] += fmt.Sprintf(" %s %8d %s\n", info.SHA1, info.Size, path)
release["SHA256"] += fmt.Sprintf(" %s %8d %s\n", info.SHA256, info.Size, path)
}
var releaseFile *os.File
releaseFilename := filepath.Join(tempDir, "Release")
releaseFile, err = os.Create(releaseFilename)
releaseFile := indexes.ReleaseFile()
bufWriter, err := releaseFile.BufWriter()
if err != nil {
return fmt.Errorf("unable to create temporary Release file: %s", err)
return err
}
bufWriter := bufio.NewWriter(releaseFile)
err = release.WriteTo(bufWriter)
if err != nil {
return fmt.Errorf("unable to create Release file: %s", err)
}
err = bufWriter.Flush()
if err != nil {
return fmt.Errorf("unable to create Release file: %s", err)
}
releaseFile.Close()
if suffix != "" {
renameMap[filepath.Join(basePath, "Release"+suffix)] = filepath.Join(basePath, "Release")
}
err = publishedStorage.PutFile(filepath.Join(basePath, "Release"+suffix), releaseFilename)
if err != nil {
return fmt.Errorf("unable to publish file: %s", err)
}
// Signing files might output to console, so flush progress writer first
if progress != nil {
progress.Flush()
}
if signer != nil {
err = signer.DetachedSign(releaseFilename, releaseFilename+".gpg")
if err != nil {
return fmt.Errorf("unable to sign Release file: %s", err)
}
err = signer.ClearSign(releaseFilename, filepath.Join(filepath.Dir(releaseFilename), "InRelease"+suffix))
if err != nil {
return fmt.Errorf("unable to sign Release file: %s", err)
}
if suffix != "" {
renameMap[filepath.Join(basePath, "Release"+suffix+".gpg")] = filepath.Join(basePath, "Release.gpg")
renameMap[filepath.Join(basePath, "InRelease"+suffix)] = filepath.Join(basePath, "InRelease")
}
err = publishedStorage.PutFile(filepath.Join(basePath, "Release"+suffix+".gpg"), releaseFilename+".gpg")
if err != nil {
return fmt.Errorf("unable to publish file: %s", err)
}
err = publishedStorage.PutFile(filepath.Join(basePath, "InRelease"+suffix),
filepath.Join(filepath.Dir(releaseFilename), "InRelease"+suffix))
if err != nil {
return fmt.Errorf("unable to publish file: %s", err)
}
err = releaseFile.Finalize(signer)
if err != nil {
return err
}
for oldName, newName := range renameMap {
err = publishedStorage.RenameFile(oldName, newName)
if err != nil {
return fmt.Errorf("unable to rename: %s", err)
}
err = indexes.RenameFiles()
if err != nil {
return err
}
return nil