mirror of
https://github.com/aptly-dev/aptly.git
synced 2026-05-06 22:18:28 +00:00
Conver to regular Go vendor + dep tool
This commit is contained in:
+28
@@ -0,0 +1,28 @@
|
||||
Copyright (c) 2011 Mikkel Krautz
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
- Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
- Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
- Neither the name of the Mumble Developers nor the names of its
|
||||
contributors may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
`AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
|
||||
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
+3
@@ -0,0 +1,3 @@
|
||||
Package ar implements reading and writing of ar archives.
|
||||
It supports reading archives in the GNU and BSD formats, but
|
||||
only supports writing in the BSD format.
|
||||
+81
@@ -0,0 +1,81 @@
|
||||
// Copyright (c) 2011 Mikkel Krautz
|
||||
// The use of this source code is goverened by a BSD-style
|
||||
// license that can be found in the LICENSE-file.
|
||||
|
||||
// Package ar implements reading and writing of ar archives.
|
||||
// It supports reading archives in the GNU and BSD formats, but
|
||||
// only supports writing in the BSD format.
|
||||
package ar
|
||||
|
||||
import "errors"
|
||||
|
||||
// A Header represents a single file header in an ar archive.
|
||||
type Header struct {
|
||||
Name string
|
||||
Mode int64
|
||||
Uid int
|
||||
Gid int
|
||||
Size int64
|
||||
Mtime int64
|
||||
}
|
||||
|
||||
var (
|
||||
globalHeader = "!<arch>\n"
|
||||
bsdLongFileNamePrefix = "#1/"
|
||||
fileHeaderMagic = "`\n"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrMissingGlobalHeader = errors.New("ar: missing global header")
|
||||
ErrFileHeader = errors.New("ar: invalid ar file header")
|
||||
ErrWriteAfterClose = errors.New("ar: write after close")
|
||||
ErrWriteTooLong = errors.New("ar: write too long")
|
||||
)
|
||||
|
||||
type skippingWriter struct{}
|
||||
|
||||
func (sw skippingWriter) Write(buf []byte) (int, error) {
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
// nulTerminated returns a nul terminated string read from buf.
|
||||
func nulTerminated(buf []byte) string {
|
||||
for i := 0; i < len(buf); i++ {
|
||||
if buf[i] == 0 {
|
||||
return string(buf[0:i])
|
||||
}
|
||||
}
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
// gnuArString reads a linefeed terminated string from buf.
|
||||
// If a linefeed character is not found in buf, the function
|
||||
// returns an error.
|
||||
func gnuArString(buf []byte) (string, error) {
|
||||
for i := 0; i < len(buf); i++ {
|
||||
if buf[i] == '\n' {
|
||||
return string(buf[:i]), nil
|
||||
}
|
||||
}
|
||||
return "", errors.New("ar: missing linefeed in parsing ar string")
|
||||
}
|
||||
|
||||
// arString reads a whitespace terminated string from the string in.
|
||||
func arString(in string) string {
|
||||
for i, rune := range in {
|
||||
if rune == ' ' || rune == '\t' {
|
||||
return in[:i]
|
||||
}
|
||||
}
|
||||
return in
|
||||
}
|
||||
|
||||
// Encode a string to the whitespace-padded format used in the ar header.
|
||||
func encodeArString(in string, total int) string {
|
||||
remain := total - len(in)
|
||||
var spaces []byte
|
||||
for i := 0; i < remain; i++ {
|
||||
spaces = append(spaces, ' ')
|
||||
}
|
||||
return in + string(spaces)
|
||||
}
|
||||
+261
@@ -0,0 +1,261 @@
|
||||
// Copyright (c) 2011 Mikkel Krautz
|
||||
// The use of this source code is goverened by a BSD-style
|
||||
// license that can be found in the LICENSE-file.
|
||||
|
||||
package ar
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A Reader provides sequential access to the contents of a BSD or GNU-style ar archive.
|
||||
// An archive file consists of a sequence of files.
|
||||
// The Next method advances to the next file in the archive (including the first).
|
||||
// After Next has returned a header, the Reader can be treated as an io.Reader to
|
||||
// access the data of the file described by the header received from Next.
|
||||
//
|
||||
// Example:
|
||||
// tr := ar.NewReader(r)
|
||||
// for {
|
||||
// hdr, err := tr.Next()
|
||||
// if err == io.EOF {
|
||||
// // end of archive
|
||||
// break
|
||||
// }
|
||||
// if err != nil {
|
||||
// // handle error
|
||||
// }
|
||||
// io.Copy(data, tr)
|
||||
// }
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
offset int64
|
||||
dataRemain int64
|
||||
gnuLongFn []byte
|
||||
}
|
||||
|
||||
// NewReader creates a new Reader reading from r.
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return &Reader{r, 0, 0, nil}
|
||||
}
|
||||
|
||||
// Next advances to the next entry in the archive.
|
||||
func (ar *Reader) Next() (hdr *Header, err error) {
|
||||
// If this is our first read, we should check whether a global
|
||||
// ar header is present.
|
||||
if ar.offset == 0 {
|
||||
ghdr := make([]byte, len(globalHeader))
|
||||
nread, err := io.ReadFull(ar.r, ghdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ar.offset += int64(nread)
|
||||
if globalHeader != string(ghdr) {
|
||||
return nil, ErrMissingGlobalHeader
|
||||
}
|
||||
}
|
||||
|
||||
// If an entry wasn't fully read, skip the remaining bytes
|
||||
if ar.dataRemain > 0 {
|
||||
sw := skippingWriter{}
|
||||
ncopied, err := io.CopyN(sw, ar.r, ar.dataRemain)
|
||||
if err == io.EOF || err == nil {
|
||||
ar.offset += ncopied
|
||||
ar.dataRemain -= ncopied
|
||||
if ar.dataRemain > 0 {
|
||||
return nil, errors.New("ar: skip failed")
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Read a file header from the archive.
|
||||
hdr, err = ar.consumeHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the consumed header is a GNU long file name section,
|
||||
// read its filename table and update the Reader struct with it.
|
||||
if hdr.Name == "//" {
|
||||
// Return an error if we've already read a GNU long filename
|
||||
// section.
|
||||
if ar.gnuLongFn != nil {
|
||||
return nil, errors.New("ar: malformed archive, duplicate gnu long filename sections")
|
||||
}
|
||||
|
||||
ar.dataRemain = hdr.Size
|
||||
buf := make([]byte, int(hdr.Size))
|
||||
_, err = io.ReadFull(ar, buf)
|
||||
// We expect the GNU long filename section
|
||||
// to be as long as noted in the header.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ar.gnuLongFn = buf
|
||||
|
||||
// The special header has been consumed.
|
||||
// Read the next file header in the file so we can return
|
||||
// that to the user.
|
||||
hdr, err = ar.consumeHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
ar.dataRemain = hdr.Size
|
||||
return hdr, nil
|
||||
}
|
||||
|
||||
// Read reads from the current entry in the archive.
|
||||
// It returns 0, io.EOF when it reaches the end of that entry,
|
||||
// until Next is called to advance to the next entry.
|
||||
func (ar *Reader) Read(b []byte) (n int, err error) {
|
||||
if ar.dataRemain == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if int64(len(b)) > ar.dataRemain {
|
||||
b = b[:ar.dataRemain]
|
||||
}
|
||||
n, err = ar.r.Read(b)
|
||||
ar.offset += int64(n)
|
||||
ar.dataRemain -= int64(n)
|
||||
if ar.dataRemain == 0 {
|
||||
err = io.EOF
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (ar *Reader) consumeHeader() (*Header, error) {
|
||||
// Data sections are required to always end on a 2-byte boundary.
|
||||
// Simply check if we're at a 2-byte offset before consuming a new
|
||||
// file header. If not, consume the padding byte and check that it
|
||||
// is a '/n' like we expect.
|
||||
if ar.offset%2 != 0 {
|
||||
lineFeed := make([]byte, 1)
|
||||
_, err := ar.r.Read(lineFeed)
|
||||
if err != nil {
|
||||
if lineFeed[0] != '\n' {
|
||||
return nil, errors.New("ar: alignment byte read, not '\n'")
|
||||
}
|
||||
}
|
||||
ar.offset += 1
|
||||
}
|
||||
|
||||
fhdr := make([]byte, 60)
|
||||
nread, err := io.ReadFull(ar.r, fhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ar.offset += int64(nread)
|
||||
|
||||
hdr := &Header{}
|
||||
fileName := arString(string(fhdr[0:16]))
|
||||
mtime := arString(string(fhdr[16:28]))
|
||||
uid := arString(string(fhdr[28:34]))
|
||||
gid := arString(string(fhdr[34:40]))
|
||||
mode := arString(string(fhdr[40:48]))
|
||||
size := arString(string(fhdr[48:58]))
|
||||
magic := arString(string(fhdr[58:60]))
|
||||
|
||||
if magic != fileHeaderMagic {
|
||||
return nil, ErrFileHeader
|
||||
}
|
||||
|
||||
if mtime != "" {
|
||||
hdr.Mtime, err = strconv.ParseInt(mtime, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if uid != "" {
|
||||
hdr.Uid, err = strconv.Atoi(uid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if gid != "" {
|
||||
hdr.Gid, err = strconv.Atoi(gid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
hdr.Size, err = strconv.ParseInt(size, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if mode != "" {
|
||||
hdr.Mode, err = strconv.ParseInt(mode, 8, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// GNU-style ar archives use '/' as a filename terminator for everything
|
||||
// but special sections (sections that start with a '/'), so we strip trailing
|
||||
// slashes from all filenames that do not start with a slash themselves.
|
||||
if len(fileName) > 0 && fileName[0] != '/' && fileName[len(fileName)-1] == '/' {
|
||||
fileName = fileName[:len(fileName)-1]
|
||||
}
|
||||
|
||||
// The file name is stored as a BSD long filename
|
||||
// That is, the filename is stored directly after the file header, as
|
||||
// part of the data section.
|
||||
if strings.HasPrefix(fileName, bsdLongFileNamePrefix) {
|
||||
fnLengthStr := arString(fileName[len(bsdLongFileNamePrefix):])
|
||||
fnLength, err := strconv.Atoi(fnLengthStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if int64(fnLength) > hdr.Size {
|
||||
return nil, errors.New("ar: invalid bsd long filename in file")
|
||||
}
|
||||
longFn := make([]byte, fnLength)
|
||||
nread, err = io.ReadFull(ar.r, longFn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ar.offset += int64(nread)
|
||||
hdr.Size -= int64(nread)
|
||||
hdr.Name = nulTerminated(longFn)
|
||||
|
||||
// The file name is stored as a GNU long filename
|
||||
} else if fhdr[0] == '/' && fhdr[1] >= '0' && fhdr[1] <= '9' {
|
||||
// We must have read a GNU-style long filename section for this lookup
|
||||
// to succeed.
|
||||
if ar.gnuLongFn == nil {
|
||||
return nil, errors.New("ar: encountered gnu-style long fn without corresponding long fn section")
|
||||
}
|
||||
gnuOffset, err := strconv.ParseInt(fileName[1:], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if gnuOffset <= int64(len(ar.gnuLongFn)) {
|
||||
fnStr, err := gnuArString(ar.gnuLongFn[gnuOffset:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fnStr[len(fnStr)-1] != '/' {
|
||||
return nil, errors.New("ar: gnu long filename is not terminated")
|
||||
}
|
||||
hdr.Name = fnStr[:len(fnStr)-1]
|
||||
} else {
|
||||
// The offset overflows our long filename section
|
||||
return nil, errors.New("ar: gnu long filename lookup out of bounds")
|
||||
}
|
||||
|
||||
// Regular short file name
|
||||
} else {
|
||||
hdr.Name = fileName
|
||||
}
|
||||
|
||||
return hdr, nil
|
||||
}
|
||||
+231
@@ -0,0 +1,231 @@
|
||||
// Copyright (c) 2011 Mikkel Krautz
|
||||
// The use of this source code is goverened by a BSD-style
|
||||
// license that can be found in the LICENSE-file.
|
||||
|
||||
package ar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var fbsd82Archive []archiveTest = []archiveTest{
|
||||
{
|
||||
&Header{
|
||||
Name: "/",
|
||||
Mode: 0,
|
||||
Mtime: 1315607407,
|
||||
Uid: 0,
|
||||
Gid: 0,
|
||||
Size: 4,
|
||||
},
|
||||
[]byte{0x0, 0x0, 0x0, 0x0},
|
||||
},
|
||||
{
|
||||
&Header{
|
||||
Name: "a",
|
||||
Mode: 0100644,
|
||||
Mtime: 1315607373,
|
||||
Uid: 1001,
|
||||
Gid: 1001,
|
||||
Size: 2,
|
||||
},
|
||||
[]byte{'a', '\n'},
|
||||
},
|
||||
{
|
||||
&Header{
|
||||
Name: "b",
|
||||
Mode: 0100644,
|
||||
Mtime: 1315607374,
|
||||
Uid: 1001,
|
||||
Gid: 1001,
|
||||
Size: 2,
|
||||
},
|
||||
[]byte{'b', '\n'},
|
||||
},
|
||||
{
|
||||
&Header{
|
||||
Name: "c",
|
||||
Mode: 0100644,
|
||||
Mtime: 1315607376,
|
||||
Uid: 1001,
|
||||
Gid: 1001,
|
||||
Size: 2,
|
||||
},
|
||||
[]byte{'c', '\n'},
|
||||
},
|
||||
}
|
||||
|
||||
var lionArchive []archiveTest = []archiveTest{
|
||||
{
|
||||
&Header{
|
||||
Name: "__.SYMDEF SORTED",
|
||||
Mode: 0100644,
|
||||
Mtime: 1315593186,
|
||||
Uid: 501,
|
||||
Gid: 20,
|
||||
Size: 8,
|
||||
},
|
||||
[]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
|
||||
},
|
||||
{
|
||||
&Header{
|
||||
Name: "a",
|
||||
Mode: 0100644,
|
||||
Mtime: 1315593158,
|
||||
Uid: 501,
|
||||
Gid: 20,
|
||||
Size: 8,
|
||||
},
|
||||
[]byte("a\n\n\n\n\n\n\n"),
|
||||
},
|
||||
{
|
||||
&Header{
|
||||
Name: "b",
|
||||
Mode: 0100644,
|
||||
Mtime: 1315593165,
|
||||
Uid: 501,
|
||||
Gid: 20,
|
||||
Size: 8,
|
||||
},
|
||||
[]byte("b\n\n\n\n\n\n\n"),
|
||||
},
|
||||
{
|
||||
&Header{
|
||||
Name: "c",
|
||||
Mode: 0100644,
|
||||
Mtime: 1315593166,
|
||||
Uid: 501,
|
||||
Gid: 20,
|
||||
Size: 8,
|
||||
},
|
||||
[]byte("c\n\n\n\n\n\n\n"),
|
||||
},
|
||||
}
|
||||
|
||||
var linuxArchive []archiveTest = []archiveTest{
|
||||
{
|
||||
&Header{
|
||||
Name: "0",
|
||||
Mode: 0100770,
|
||||
Mtime: 1369126995,
|
||||
Uid: 0,
|
||||
Gid: 1001,
|
||||
Size: 0,
|
||||
},
|
||||
[]byte{},
|
||||
},
|
||||
{
|
||||
&Header{
|
||||
Name: "1",
|
||||
Mode: 0100770,
|
||||
Mtime: 1369127013,
|
||||
Uid: 0,
|
||||
Gid: 1001,
|
||||
Size: 1,
|
||||
},
|
||||
[]byte("a"),
|
||||
},
|
||||
{
|
||||
&Header{
|
||||
Name: "2",
|
||||
Mode: 0100770,
|
||||
Mtime: 1369127016,
|
||||
Uid: 0,
|
||||
Gid: 1001,
|
||||
Size: 2,
|
||||
},
|
||||
[]byte("ab"),
|
||||
},
|
||||
{
|
||||
&Header{
|
||||
Name: "3",
|
||||
Mode: 0100770,
|
||||
Mtime: 1369127019,
|
||||
Uid: 0,
|
||||
Gid: 1001,
|
||||
Size: 3,
|
||||
},
|
||||
[]byte("abc"),
|
||||
},
|
||||
{
|
||||
&Header{
|
||||
Name: "long-long-file-name",
|
||||
Mode: 0100770,
|
||||
Mtime: 1369127028,
|
||||
Uid: 0,
|
||||
Gid: 1001,
|
||||
Size: 25,
|
||||
},
|
||||
[]byte("Gopher's name is Gordon.\n"),
|
||||
},
|
||||
}
|
||||
|
||||
func read(t *testing.T, r io.Reader, testArchive []archiveTest, readBody bool) {
|
||||
ar := NewReader(r)
|
||||
for _, testEntry := range testArchive {
|
||||
hdr, err := ar.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !headerCmp(hdr, testEntry.hdr) {
|
||||
t.Fatalf("header mismatch:\nread = %+v\norig = %+v", hdr, testEntry.hdr)
|
||||
}
|
||||
if readBody {
|
||||
fbuf := make([]byte, hdr.Size)
|
||||
_, err = io.ReadFull(ar, fbuf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(fbuf, testEntry.data) {
|
||||
t.Fatalf("data mismatch\nread = %v\norig = %v", fbuf, testEntry.data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, err := ar.Next()
|
||||
if err != io.EOF {
|
||||
t.Fatalf("expected EOF, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testRead(t *testing.T, r io.ReadSeeker, testArchive []archiveTest) {
|
||||
read(t, r, testArchive, true)
|
||||
r.Seek(0, 0)
|
||||
read(t, r, testArchive, false)
|
||||
}
|
||||
|
||||
// Test the we can correctly read and parse a FreeBSD 8.2 generated ar file.
|
||||
func TestReadFreeBSD82LibArchive(t *testing.T) {
|
||||
f, err := os.Open("testdata/test-bsd-freebsd82-libarchive.ar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
testRead(t, f, fbsd82Archive)
|
||||
}
|
||||
|
||||
// Test the we can correctly read and parse a Mac OS X Lion generated ar file.
|
||||
// It is generated in the same way as the FreeBSD archive ahove, but ar on OS X
|
||||
// seems to pad the archived files with a lot of newlines.
|
||||
// Attempting to "ar x" the archive also reproduces the newlines in the extracted
|
||||
// files, so they are not a form of padding, but are intended to be there, somehow.
|
||||
func TestReadMacOSXLionOld(t *testing.T) {
|
||||
f, err := os.Open("testdata/test-bsd-macosx.ar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
testRead(t, f, lionArchive)
|
||||
}
|
||||
|
||||
func TestReadLinux(t *testing.T) {
|
||||
f, err := os.Open("testdata/test-gnu-linux.ar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
testRead(t, f, linuxArchive)
|
||||
}
|
||||
+123
@@ -0,0 +1,123 @@
|
||||
// Copyright (c) 2011 Mikkel Krautz
|
||||
// The use of this source code is goverened by a BSD-style
|
||||
// license that can be found in the LICENSE-file.
|
||||
|
||||
package ar
|
||||
|
||||
// This file tests that our reader can read files written by our writer.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type archiveTest struct {
|
||||
hdr *Header
|
||||
data []byte
|
||||
}
|
||||
|
||||
// Long filename test
|
||||
var longFnArchive []archiveTest = []archiveTest{
|
||||
{
|
||||
&Header{
|
||||
Name: "ReadWriteCompatFileName",
|
||||
Mode: 0755,
|
||||
Mtime: 0,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Size: 6,
|
||||
},
|
||||
[]byte{0xf0, 0x00, 0xca, 0xfe, 0xba, 0xbe},
|
||||
},
|
||||
}
|
||||
|
||||
// Short filename test (we don't special case short filenames,
|
||||
// we always write out long-style filenames, but let's test it
|
||||
// just to make sure)
|
||||
var shortFnArchive []archiveTest = []archiveTest{
|
||||
{
|
||||
&Header{
|
||||
Name: "a",
|
||||
Mode: 0755,
|
||||
Mtime: 0,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Size: 2,
|
||||
},
|
||||
[]byte{'a', '\n'},
|
||||
},
|
||||
}
|
||||
|
||||
func headerCmp(hdr1 *Header, hdr2 *Header) bool {
|
||||
if hdr1 == nil || hdr2 == nil {
|
||||
return false
|
||||
}
|
||||
if hdr1.Name != hdr2.Name {
|
||||
return false
|
||||
}
|
||||
if hdr1.Mode != hdr2.Mode {
|
||||
return false
|
||||
}
|
||||
if hdr1.Mtime != hdr2.Mtime {
|
||||
return false
|
||||
}
|
||||
if hdr1.Uid != hdr2.Uid {
|
||||
return false
|
||||
}
|
||||
if hdr1.Gid != hdr2.Gid {
|
||||
return false
|
||||
}
|
||||
if hdr1.Size != hdr2.Size {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func testRwCompat(t *testing.T, testArchive []archiveTest) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
for _, testEntry := range testArchive {
|
||||
err := w.WriteHeader(testEntry.hdr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = w.Write(testEntry.data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
err := w.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rbuf := bytes.NewBuffer(buf.Bytes())
|
||||
r := NewReader(rbuf)
|
||||
for _, testEntry := range testArchive {
|
||||
hdr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !headerCmp(hdr, testEntry.hdr) {
|
||||
t.Fatalf("header mismatch:\nread = %v\norig = %v", hdr, testEntry.hdr)
|
||||
}
|
||||
fbuf := make([]byte, hdr.Size)
|
||||
_, err = io.ReadFull(r, fbuf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(fbuf, testEntry.data) {
|
||||
t.Fatalf("data mismatch\nread = %v\norig = %v", fbuf, testEntry.data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadWriteCompatLongFn(t *testing.T) {
|
||||
testRwCompat(t, longFnArchive)
|
||||
}
|
||||
|
||||
func TestReadWriteCompatShortFn(t *testing.T) {
|
||||
testRwCompat(t, shortFnArchive)
|
||||
}
|
||||
BIN
Binary file not shown.
BIN
Binary file not shown.
+13
@@ -0,0 +1,13 @@
|
||||
!<arch>
|
||||
// 22 `
|
||||
long-long-file-name/
|
||||
|
||||
0/ 1369126995 0 1001 100770 0 `
|
||||
1/ 1369127013 0 1001 100770 1 `
|
||||
a
|
||||
2/ 1369127016 0 1001 100770 2 `
|
||||
ab3/ 1369127019 0 1001 100770 3 `
|
||||
abc
|
||||
/0 1369127028 0 1001 100770 25 `
|
||||
Gopher's name is Gordon.
|
||||
|
||||
+180
@@ -0,0 +1,180 @@
|
||||
// Copyright (c) 2011 Mikkel Krautz
|
||||
// The use of this source code is goverened by a BSD-style
|
||||
// license that can be found in the LICENSE-file.
|
||||
|
||||
package ar
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// A Writer provides sequential writing of an ar archive in BSD format. It does not support
|
||||
// writing in the GNU format, since the GNU-style extended filenames cannot be written sequentially.
|
||||
// The BSD ar format is widely compatible with most modern ar readers out there.
|
||||
//
|
||||
// An ar archive consists of a sequence of files. Call WriteHeader to begin a new file,
|
||||
// and then call Write to supply that file's data, writing at most hdr.Size bytes in total.
|
||||
//
|
||||
// Example:
|
||||
// aw := ar.NewWriter(w)
|
||||
// hdr := new(ar.Header)
|
||||
// hdr.Size = length of data in bytes
|
||||
// // populate other hdr fields as desired
|
||||
// if err := aw.WriteHeader(hdr); err != nil {
|
||||
// // handle error
|
||||
// }
|
||||
// io.Copy(tw, data)
|
||||
// tw.Close()
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
offset int64
|
||||
dataRemain int64
|
||||
closed bool
|
||||
}
|
||||
|
||||
// NewWriter creates a new Writer writing to w.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return &Writer{w, 0, 0, false}
|
||||
}
|
||||
|
||||
// Closes the ar achive, flushing any unwritten data to the underlying writer.
|
||||
func (aw *Writer) Close() error {
|
||||
err := aw.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
aw.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush finishes writing the current file (optional).
|
||||
func (aw *Writer) Flush() error {
|
||||
if aw.closed {
|
||||
return ErrWriteAfterClose
|
||||
}
|
||||
if aw.offset%2 != 0 {
|
||||
_, err := io.WriteString(aw.w, "\n")
|
||||
aw.offset += int64(1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes the current entry in the ar archive. Write returns the error ErrWriteTooLong
|
||||
// if more than hdr.Size bytes are written following a call to WriteHeader.
|
||||
func (aw *Writer) Write(b []byte) (n int, err error) {
|
||||
if aw.closed {
|
||||
return 0, ErrWriteAfterClose
|
||||
}
|
||||
// Overflow check
|
||||
tooLong := false
|
||||
if int64(len(b)) > aw.dataRemain {
|
||||
b = b[0:aw.dataRemain]
|
||||
tooLong = true
|
||||
}
|
||||
n, err = aw.w.Write(b)
|
||||
aw.dataRemain -= int64(n)
|
||||
// Warn if the write would have overflowed the
|
||||
// space set aside for the provided data.
|
||||
if err == nil && tooLong {
|
||||
err = ErrWriteTooLong
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// WriteHeader writes hdr and prepares to accept the file's content. WriteHeader calls Flush to
|
||||
// correctly pad the last written file. Calling after WriteHeader a Close will return ErrWriteAfterClose.
|
||||
func (aw *Writer) WriteHeader(hdr *Header) (err error) {
|
||||
if aw.closed {
|
||||
return ErrWriteAfterClose
|
||||
}
|
||||
|
||||
// Flush previous data write
|
||||
err = aw.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we're at the beginning of the writer, write
|
||||
// the global header.
|
||||
if aw.offset == 0 {
|
||||
nwritten, err := io.WriteString(aw.w, globalHeader)
|
||||
aw.offset += int64(nwritten)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
newName string
|
||||
newSize int64
|
||||
)
|
||||
|
||||
longFn := len(hdr.Name) > 15
|
||||
if longFn {
|
||||
newName = bsdLongFileNamePrefix + strconv.Itoa(len(hdr.Name))
|
||||
newSize = hdr.Size + int64(len(hdr.Name))
|
||||
} else {
|
||||
newName = hdr.Name
|
||||
newSize = hdr.Size
|
||||
}
|
||||
|
||||
nwritten, err := io.WriteString(aw.w, encodeArString(newName, 16))
|
||||
aw.offset += int64(nwritten)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nwritten, err = io.WriteString(aw.w, encodeArString(strconv.FormatInt(hdr.Mtime, 10), 12))
|
||||
aw.offset += int64(nwritten)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nwritten, err = io.WriteString(aw.w, encodeArString(strconv.Itoa(hdr.Uid), 6))
|
||||
aw.offset += int64(nwritten)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nwritten, err = io.WriteString(aw.w, encodeArString(strconv.Itoa(hdr.Gid), 6))
|
||||
aw.offset += int64(nwritten)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nwritten, err = io.WriteString(aw.w, encodeArString(strconv.FormatInt(hdr.Mode, 8), 8))
|
||||
aw.offset += int64(nwritten)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nwritten, err = io.WriteString(aw.w, encodeArString(strconv.FormatInt(newSize, 10), 10))
|
||||
aw.offset += int64(nwritten)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nwritten, err = io.WriteString(aw.w, fileHeaderMagic)
|
||||
aw.offset += int64(nwritten)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if longFn {
|
||||
nwritten, err = io.WriteString(aw.w, hdr.Name)
|
||||
aw.offset += int64(nwritten)
|
||||
aw.dataRemain = newSize - int64(nwritten)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
aw.dataRemain = newSize
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
+58
@@ -0,0 +1,58 @@
|
||||
// Copyright (c) 2011 Mikkel Krautz
|
||||
// The use of this source code is goverened by a BSD-style
|
||||
// license that can be found in the LICENSE-file.
|
||||
|
||||
package ar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLongWrite(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
w := NewWriter(buf)
|
||||
err := w.WriteHeader(&Header{
|
||||
Name: "/",
|
||||
Mode: 0,
|
||||
Mtime: 1315607407,
|
||||
Uid: 0,
|
||||
Gid: 0,
|
||||
Size: 4,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = w.Write([]byte{1, 2, 3, 4, 5})
|
||||
if err != ErrWriteTooLong {
|
||||
t.Fatalf("expected ErrWriteTooLong, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteAfterClose(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
w := NewWriter(buf)
|
||||
err := w.WriteHeader(&Header{
|
||||
Name: "/",
|
||||
Mode: 0,
|
||||
Mtime: 1315607407,
|
||||
Uid: 0,
|
||||
Gid: 0,
|
||||
Size: 4,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = w.Write([]byte{1, 2, 3})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = w.Write([]byte{4})
|
||||
if err != ErrWriteAfterClose {
|
||||
t.Fatalf("expected ErrWriteAfterClose, got %v", err)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user