Update vendored deps, including AWS SDK, openpgp, ftp, ...

This commit is contained in:
Andrey Smirnov
2018-04-05 17:46:45 +03:00
parent cef4fefc40
commit 0e6ee35942
1497 changed files with 450721 additions and 68034 deletions
+18 -5
View File
@@ -32,6 +32,11 @@ type DB struct {
// Need 64-bit alignment.
seq uint64
// Stats. Need 64-bit alignment.
cWriteDelay int64 // The cumulative duration of write delays
cWriteDelayN int32 // The cumulative number of write delays
aliveSnaps, aliveIters int32
// Session.
s *session
@@ -49,9 +54,6 @@ type DB struct {
snapsMu sync.Mutex
snapsList *list.List
// Stats.
aliveSnaps, aliveIters int32
// Write.
batchPool sync.Pool
writeMergeC chan writeMerge
@@ -321,7 +323,7 @@ func recoverTable(s *session, o *opt.Options) error {
}
}
err = iter.Error()
if err != nil {
if err != nil && !errors.IsCorrupted(err) {
return
}
err = tw.Close()
@@ -392,7 +394,7 @@ func recoverTable(s *session, o *opt.Options) error {
}
imax = append(imax[:0], key...)
}
if err := iter.Error(); err != nil {
if err := iter.Error(); err != nil && !errors.IsCorrupted(err) {
iter.Release()
return err
}
@@ -904,6 +906,10 @@ func (db *DB) GetSnapshot() (*Snapshot, error) {
// Returns the number of files at level 'n'.
// leveldb.stats
// Returns statistics of the underlying DB.
// leveldb.iostats
// Returns statistics of effective disk read and write.
// leveldb.writedelay
// Returns cumulative write delay caused by compaction.
// leveldb.sstables
// Returns sstables list for each level.
// leveldb.blockpool
@@ -955,6 +961,13 @@ func (db *DB) GetProperty(name string) (value string, err error) {
level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(),
float64(read)/1048576.0, float64(write)/1048576.0)
}
case p == "iostats":
value = fmt.Sprintf("Read(MB):%.5f Write(MB):%.5f",
float64(db.s.stor.reads())/1048576.0,
float64(db.s.stor.writes())/1048576.0)
case p == "writedelay":
writeDelayN, writeDelay := atomic.LoadInt32(&db.cWriteDelayN), time.Duration(atomic.LoadInt64(&db.cWriteDelay))
value = fmt.Sprintf("DelayN:%d Delay:%s", writeDelayN, writeDelay)
case p == "sstables":
for level, tables := range v.levels {
value += fmt.Sprintf("--- level %d ---\n", level)
+2 -2
View File
@@ -767,7 +767,7 @@ func TestDB_GetEncountersEmptyLevel(t *testing.T) {
// * sstable B in level 2
// Then do enough Get() calls to arrange for an automatic compaction
// of sstable A. A bug would cause the compaction to be marked as
// occuring at level 1 (instead of the correct level 0).
// occurring at level 1 (instead of the correct level 0).
// Step 1: First place sstables in levels 0 and 2
for i := 0; ; i++ {
@@ -2736,7 +2736,7 @@ func testDB_IterTriggeredCompaction(t *testing.T, limitDiv int) {
initialSize1 = h.sizeOf(limitKey, maxKey)
)
t.Logf("inital size %s [rest %s]", shortenb(int(initialSize0)), shortenb(int(initialSize1)))
t.Logf("initial size %s [rest %s]", shortenb(int(initialSize0)), shortenb(int(initialSize1)))
for r := 0; true; r++ {
if r >= mIter {
+9 -1
View File
@@ -7,6 +7,7 @@
package leveldb
import (
"sync/atomic"
"time"
"github.com/syndtr/goleveldb/leveldb/memdb"
@@ -117,6 +118,8 @@ func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
db.writeDelayN++
} else if db.writeDelayN > 0 {
db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
atomic.AddInt32(&db.cWriteDelayN, int32(db.writeDelayN))
atomic.AddInt64(&db.cWriteDelay, int64(db.writeDelay))
db.writeDelay = 0
db.writeDelayN = 0
}
@@ -143,7 +146,7 @@ func (db *DB) unlockWrite(overflow bool, merged int, err error) {
}
}
// ourBatch if defined should equal with batch.
// ourBatch is batch that we can modify.
func (db *DB) writeLocked(batch, ourBatch *Batch, merge, sync bool) error {
// Try to flush memdb. This method would also trying to throttle writes
// if it is too fast and compaction cannot catch-up.
@@ -212,6 +215,11 @@ func (db *DB) writeLocked(batch, ourBatch *Batch, merge, sync bool) error {
}
}
// Release ourBatch if any.
if ourBatch != nil {
defer db.batchPool.Put(ourBatch)
}
// Seq number.
seq := db.seq + 1
+2 -2
View File
@@ -42,7 +42,7 @@ type session struct {
stTempFileNum int64
stSeqNum uint64 // last mem compacted seq; need external synchronization
stor storage.Storage
stor *iStorage
storLock storage.Locker
o *cachedOptions
icmp *iComparer
@@ -68,7 +68,7 @@ func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
return
}
s = &session{
stor: stor,
stor: newIStorage(stor),
storLock: storLock,
fileRef: make(map[int64]int),
}
+63
View File
@@ -0,0 +1,63 @@
package leveldb
import (
"github.com/syndtr/goleveldb/leveldb/storage"
"sync/atomic"
)
type iStorage struct {
storage.Storage
read uint64
write uint64
}
func (c *iStorage) Open(fd storage.FileDesc) (storage.Reader, error) {
r, err := c.Storage.Open(fd)
return &iStorageReader{r, c}, err
}
func (c *iStorage) Create(fd storage.FileDesc) (storage.Writer, error) {
w, err := c.Storage.Create(fd)
return &iStorageWriter{w, c}, err
}
func (c *iStorage) reads() uint64 {
return atomic.LoadUint64(&c.read)
}
func (c *iStorage) writes() uint64 {
return atomic.LoadUint64(&c.write)
}
// newIStorage returns the given storage wrapped by iStorage.
func newIStorage(s storage.Storage) *iStorage {
return &iStorage{s, 0, 0}
}
type iStorageReader struct {
storage.Reader
c *iStorage
}
func (r *iStorageReader) Read(p []byte) (n int, err error) {
n, err = r.Reader.Read(p)
atomic.AddUint64(&r.c.read, uint64(n))
return n, err
}
func (r *iStorageReader) ReadAt(p []byte, off int64) (n int, err error) {
n, err = r.Reader.ReadAt(p, off)
atomic.AddUint64(&r.c.read, uint64(n))
return n, err
}
type iStorageWriter struct {
storage.Writer
c *iStorage
}
func (w *iStorageWriter) Write(p []byte) (n int, err error) {
n, err = w.Writer.Write(p)
atomic.AddUint64(&w.c.write, uint64(n))
return n, err
}
+1 -3
View File
@@ -8,7 +8,6 @@ package storage
import (
"os"
"path/filepath"
)
type plan9FileLock struct {
@@ -48,8 +47,7 @@ func rename(oldpath, newpath string) error {
}
}
_, fname := filepath.Split(newpath)
return os.Rename(oldpath, fname)
return os.Rename(oldpath, newpath)
}
func syncDir(name string) error {
+6 -2
View File
@@ -12,7 +12,11 @@ import (
"sync"
)
const typeShift = 3
const typeShift = 4
// Verify at compile-time that typeShift is large enough to cover all FileType
// values by confirming that 0 == 0.
var _ [0]struct{} = [TypeAll >> typeShift]struct{}{}
type memStorageLock struct {
ms *memStorage
@@ -143,7 +147,7 @@ func (ms *memStorage) Remove(fd FileDesc) error {
}
func (ms *memStorage) Rename(oldfd, newfd FileDesc) error {
if FileDescOk(oldfd) || FileDescOk(newfd) {
if !FileDescOk(oldfd) || !FileDescOk(newfd) {
return ErrInvalidFile
}
if oldfd == newfd {
+52
View File
@@ -8,6 +8,7 @@ package storage
import (
"bytes"
"fmt"
"testing"
)
@@ -63,3 +64,54 @@ func TestMemStorage(t *testing.T) {
t.Fatal("expecting error")
}
}
func TestMemStorageRename(t *testing.T) {
fd1 := FileDesc{Type: TypeTable, Num: 1}
fd2 := FileDesc{Type: TypeTable, Num: 2}
m := NewMemStorage()
w, err := m.Create(fd1)
if err != nil {
t.Fatalf("Storage.Create: %v", err)
}
fmt.Fprintf(w, "abc")
w.Close()
rd, err := m.Open(fd1)
if err != nil {
t.Fatalf("Storage.Open(%v): %v", fd1, err)
}
rd.Close()
fds, err := m.List(TypeAll)
if err != nil {
t.Fatalf("Storage.List: %v", err)
}
for _, fd := range fds {
if !FileDescOk(fd) {
t.Errorf("Storage.List -> FileDescOk(%q)", fd)
}
}
err = m.Rename(fd1, fd2)
if err != nil {
t.Fatalf("Storage.Rename: %v", err)
}
rd, err = m.Open(fd2)
if err != nil {
t.Fatalf("Storage.Open(%v): %v", fd2, err)
}
rd.Close()
fds, err = m.List(TypeAll)
if err != nil {
t.Fatalf("Storage.List: %v", err)
}
for _, fd := range fds {
if !FileDescOk(fd) {
t.Errorf("Storage.List -> FileDescOk(%q)", fd)
}
}
}
+1 -1
View File
@@ -19,7 +19,7 @@ var (
// Releaser is the interface that wraps the basic Release method.
type Releaser interface {
// Release releases associated resources. Release should always success
// and can be called multipe times without causing error.
// and can be called multiple times without causing error.
Release()
}