Parcourir la source

Merge pull request #2320 from karalabe/update-goleveldb

Godeps: update goleveldb to fix a compaction data race
Jeffrey Wilcke il y a 9 ans
Parent
commit
6d3cd03a03
27 fichiers modifiés avec 194 ajouts et 2184 suppressions
  1. 12 12
      Godeps/Godeps.json
  2. 21 18
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
  3. 11 3
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
  4. 6 6
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go
  5. 16 16
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
  6. 5 7
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
  7. 10 10
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
  8. 1 1
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
  9. 5 5
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_transaction.go
  10. 7 5
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go
  11. 2 2
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
  12. 2 2
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go
  13. 42 42
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
  14. 5 5
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
  15. 2 2
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go
  16. 6 6
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
  17. 5 5
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
  18. 14 15
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
  19. 2 2
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
  20. 0 222
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go
  21. 0 21
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go
  22. 0 327
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go
  23. 0 352
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go
  24. 0 211
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go
  25. 0 694
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go
  26. 0 171
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go
  27. 20 22
      Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go

+ 12 - 12
Godeps/Godeps.json

@@ -140,51 +140,51 @@
 		},
 		{
 			"ImportPath": "github.com/syndtr/goleveldb/leveldb",
-			"Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+			"Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
 		},
 		{
 			"ImportPath": "github.com/syndtr/goleveldb/leveldb/cache",
-			"Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+			"Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
 		},
 		{
 			"ImportPath": "github.com/syndtr/goleveldb/leveldb/comparer",
-			"Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+			"Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
 		},
 		{
 			"ImportPath": "github.com/syndtr/goleveldb/leveldb/errors",
-			"Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+			"Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
 		},
 		{
 			"ImportPath": "github.com/syndtr/goleveldb/leveldb/filter",
-			"Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+			"Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
 		},
 		{
 			"ImportPath": "github.com/syndtr/goleveldb/leveldb/iterator",
-			"Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+			"Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
 		},
 		{
 			"ImportPath": "github.com/syndtr/goleveldb/leveldb/journal",
-			"Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+			"Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
 		},
 		{
 			"ImportPath": "github.com/syndtr/goleveldb/leveldb/memdb",
-			"Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+			"Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
 		},
 		{
 			"ImportPath": "github.com/syndtr/goleveldb/leveldb/opt",
-			"Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+			"Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
 		},
 		{
 			"ImportPath": "github.com/syndtr/goleveldb/leveldb/storage",
-			"Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+			"Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
 		},
 		{
 			"ImportPath": "github.com/syndtr/goleveldb/leveldb/table",
-			"Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+			"Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
 		},
 		{
 			"ImportPath": "github.com/syndtr/goleveldb/leveldb/util",
-			"Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+			"Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
 		},
 		{
 			"ImportPath": "golang.org/x/crypto/pbkdf2",

+ 21 - 18
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go

@@ -15,6 +15,7 @@ import (
 	"github.com/syndtr/goleveldb/leveldb/storage"
 )
 
+// ErrBatchCorrupted records reason of batch corruption.
 type ErrBatchCorrupted struct {
 	Reason string
 }
@@ -32,6 +33,7 @@ const (
 	batchGrowRec = 3000
 )
 
+// BatchReplay wraps basic batch operations.
 type BatchReplay interface {
 	Put(key, value []byte)
 	Delete(key []byte)
@@ -68,20 +70,20 @@ func (b *Batch) grow(n int) {
 	}
 }
 
-func (b *Batch) appendRec(kt kType, key, value []byte) {
+func (b *Batch) appendRec(kt keyType, key, value []byte) {
 	n := 1 + binary.MaxVarintLen32 + len(key)
-	if kt == ktVal {
+	if kt == keyTypeVal {
 		n += binary.MaxVarintLen32 + len(value)
 	}
 	b.grow(n)
 	off := len(b.data)
 	data := b.data[:off+n]
 	data[off] = byte(kt)
-	off += 1
+	off++
 	off += binary.PutUvarint(data[off:], uint64(len(key)))
 	copy(data[off:], key)
 	off += len(key)
-	if kt == ktVal {
+	if kt == keyTypeVal {
 		off += binary.PutUvarint(data[off:], uint64(len(value)))
 		copy(data[off:], value)
 		off += len(value)
@@ -95,13 +97,13 @@ func (b *Batch) appendRec(kt kType, key, value []byte) {
 // Put appends 'put operation' of the given key/value pair to the batch.
 // It is safe to modify the contents of the argument after Put returns.
 func (b *Batch) Put(key, value []byte) {
-	b.appendRec(ktVal, key, value)
+	b.appendRec(keyTypeVal, key, value)
 }
 
 // Delete appends 'delete operation' of the given key to the batch.
 // It is safe to modify the contents of the argument after Delete returns.
 func (b *Batch) Delete(key []byte) {
-	b.appendRec(ktDel, key, nil)
+	b.appendRec(keyTypeDel, key, nil)
 }
 
 // Dump dumps batch contents. The returned slice can be loaded into the
@@ -122,11 +124,11 @@ func (b *Batch) Load(data []byte) error {
 
 // Replay replays batch contents.
 func (b *Batch) Replay(r BatchReplay) error {
-	return b.decodeRec(func(i int, kt kType, key, value []byte) error {
+	return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
 		switch kt {
-		case ktVal:
+		case keyTypeVal:
 			r.Put(key, value)
-		case ktDel:
+		case keyTypeDel:
 			r.Delete(key)
 		}
 		return nil
@@ -195,18 +197,19 @@ func (b *Batch) decode(prevSeq uint64, data []byte) error {
 	return nil
 }
 
-func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte) error) error {
+func (b *Batch) decodeRec(f func(i int, kt keyType, key, value []byte) error) error {
 	off := batchHdrLen
 	for i := 0; i < b.rLen; i++ {
 		if off >= len(b.data) {
 			return newErrBatchCorrupted("invalid records length")
 		}
 
-		kt := kType(b.data[off])
-		if kt > ktVal {
+		kt := keyType(b.data[off])
+		if kt > keyTypeVal {
+			panic(kt)
 			return newErrBatchCorrupted("bad record: invalid type")
 		}
-		off += 1
+		off++
 
 		x, n := binary.Uvarint(b.data[off:])
 		off += n
@@ -216,7 +219,7 @@ func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte) error) erro
 		key := b.data[off : off+int(x)]
 		off += int(x)
 		var value []byte
-		if kt == ktVal {
+		if kt == keyTypeVal {
 			x, n := binary.Uvarint(b.data[off:])
 			off += n
 			if n <= 0 || off+int(x) > len(b.data) {
@@ -236,8 +239,8 @@ func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte) error) erro
 
 func (b *Batch) memReplay(to *memdb.DB) error {
 	var ikScratch []byte
-	return b.decodeRec(func(i int, kt kType, key, value []byte) error {
-		ikScratch = makeIkey(ikScratch, key, b.seq+uint64(i), kt)
+	return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
+		ikScratch = makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
 		return to.Put(ikScratch, value)
 	})
 }
@@ -251,8 +254,8 @@ func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) er
 
 func (b *Batch) revertMemReplay(to *memdb.DB) error {
 	var ikScratch []byte
-	return b.decodeRec(func(i int, kt kType, key, value []byte) error {
-		ikScratch := makeIkey(ikScratch, key, b.seq+uint64(i), kt)
+	return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
+		ikScratch := makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
 		return to.Delete(ikScratch)
 	})
 }

+ 11 - 3
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go

@@ -47,17 +47,21 @@ type Cacher interface {
 // so the the Release method will be called once object is released.
 type Value interface{}
 
-type CacheGetter struct {
+// NamespaceGetter provides convenient wrapper for namespace.
+type NamespaceGetter struct {
 	Cache *Cache
 	NS    uint64
 }
 
-func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
+// Get simply calls Cache.Get() method.
+func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
 	return g.Cache.Get(g.NS, key, setFunc)
 }
 
 // The hash tables implementation is based on:
-// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014.
+// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu,
+// Kunlong Zhang, and Michael Spear.
+// ACM Symposium on Principles of Distributed Computing, Jul 2014.
 
 const (
 	mInitialSize           = 1 << 4
@@ -610,10 +614,12 @@ func (n *Node) unrefLocked() {
 	}
 }
 
+// Handle is a 'cache handle' of a 'cache node'.
 type Handle struct {
 	n unsafe.Pointer // *Node
 }
 
+// Value returns the value of the 'cache node'.
 func (h *Handle) Value() Value {
 	n := (*Node)(atomic.LoadPointer(&h.n))
 	if n != nil {
@@ -622,6 +628,8 @@ func (h *Handle) Value() Value {
 	return nil
 }
 
+// Release releases this 'cache handle'.
+// It is safe to call release multiple times.
 func (h *Handle) Release() {
 	nPtr := atomic.LoadPointer(&h.n)
 	if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {

+ 6 - 6
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go

@@ -33,9 +33,9 @@ func (icmp *iComparer) Name() string {
 }
 
 func (icmp *iComparer) Compare(a, b []byte) int {
-	x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey())
+	x := icmp.ucmp.Compare(internalKey(a).ukey(), internalKey(b).ukey())
 	if x == 0 {
-		if m, n := iKey(a).num(), iKey(b).num(); m > n {
+		if m, n := internalKey(a).num(), internalKey(b).num(); m > n {
 			x = -1
 		} else if m < n {
 			x = 1
@@ -45,13 +45,13 @@ func (icmp *iComparer) Compare(a, b []byte) int {
 }
 
 func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
-	ua, ub := iKey(a).ukey(), iKey(b).ukey()
+	ua, ub := internalKey(a).ukey(), internalKey(b).ukey()
 	dst = icmp.ucmp.Separator(dst, ua, ub)
 	if dst == nil {
 		return nil
 	}
 	if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
-		dst = append(dst, kMaxNumBytes...)
+		dst = append(dst, keyMaxNumBytes...)
 	} else {
 		// Did not close possibilities that n maybe longer than len(ub).
 		dst = append(dst, a[len(a)-8:]...)
@@ -60,13 +60,13 @@ func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
 }
 
 func (icmp *iComparer) Successor(dst, b []byte) []byte {
-	ub := iKey(b).ukey()
+	ub := internalKey(b).ukey()
 	dst = icmp.ucmp.Successor(dst, ub)
 	if dst == nil {
 		return nil
 	}
 	if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
-		dst = append(dst, kMaxNumBytes...)
+		dst = append(dst, keyMaxNumBytes...)
 	} else {
 		// Did not close possibilities that n maybe longer than len(ub).
 		dst = append(dst, b[len(b)-8:]...)

+ 16 - 16
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go

@@ -315,7 +315,7 @@ func recoverTable(s *session, o *opt.Options) error {
 		tw := table.NewWriter(writer, o)
 		for iter.Next() {
 			key := iter.Key()
-			if validIkey(key) {
+			if validInternalKey(key) {
 				err = tw.Append(key, iter.Value())
 				if err != nil {
 					return
@@ -380,7 +380,7 @@ func recoverTable(s *session, o *opt.Options) error {
 		// Scan the table.
 		for iter.Next() {
 			key := iter.Key()
-			_, seq, _, kerr := parseIkey(key)
+			_, seq, _, kerr := parseInternalKey(key)
 			if kerr != nil {
 				tcorruptedKey++
 				continue
@@ -472,15 +472,15 @@ func recoverTable(s *session, o *opt.Options) error {
 
 func (db *DB) recoverJournal() error {
 	// Get all journals and sort it by file number.
-	fds_, err := db.s.stor.List(storage.TypeJournal)
+	rawFds, err := db.s.stor.List(storage.TypeJournal)
 	if err != nil {
 		return err
 	}
-	sortFds(fds_)
+	sortFds(rawFds)
 
 	// Journals that will be recovered.
 	var fds []storage.FileDesc
-	for _, fd := range fds_ {
+	for _, fd := range rawFds {
 		if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
 			fds = append(fds, fd)
 		}
@@ -633,15 +633,15 @@ func (db *DB) recoverJournal() error {
 
 func (db *DB) recoverJournalRO() error {
 	// Get all journals and sort it by file number.
-	fds_, err := db.s.stor.List(storage.TypeJournal)
+	rawFds, err := db.s.stor.List(storage.TypeJournal)
 	if err != nil {
 		return err
 	}
-	sortFds(fds_)
+	sortFds(rawFds)
 
 	// Journals that will be recovered.
 	var fds []storage.FileDesc
-	for _, fd := range fds_ {
+	for _, fd := range rawFds {
 		if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
 			fds = append(fds, fd)
 		}
@@ -728,16 +728,16 @@ func (db *DB) recoverJournalRO() error {
 	return nil
 }
 
-func memGet(mdb *memdb.DB, ikey iKey, icmp *iComparer) (ok bool, mv []byte, err error) {
+func memGet(mdb *memdb.DB, ikey internalKey, icmp *iComparer) (ok bool, mv []byte, err error) {
 	mk, mv, err := mdb.Find(ikey)
 	if err == nil {
-		ukey, _, kt, kerr := parseIkey(mk)
+		ukey, _, kt, kerr := parseInternalKey(mk)
 		if kerr != nil {
 			// Shouldn't have had happen.
 			panic(kerr)
 		}
 		if icmp.uCompare(ukey, ikey.ukey()) == 0 {
-			if kt == ktDel {
+			if kt == keyTypeDel {
 				return true, nil, ErrNotFound
 			}
 			return true, mv, nil
@@ -750,7 +750,7 @@ func memGet(mdb *memdb.DB, ikey iKey, icmp *iComparer) (ok bool, mv []byte, err
 }
 
 func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
-	ikey := makeIkey(nil, key, seq, ktSeek)
+	ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
 
 	if auxm != nil {
 		if ok, mv, me := memGet(auxm, ikey, db.s.icmp); ok {
@@ -788,7 +788,7 @@ func nilIfNotFound(err error) error {
 }
 
 func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
-	ikey := makeIkey(nil, key, seq, ktSeek)
+	ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
 
 	if auxm != nil {
 		if ok, _, me := memGet(auxm, ikey, db.s.icmp); ok {
@@ -997,8 +997,8 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
 
 	sizes := make(Sizes, 0, len(ranges))
 	for _, r := range ranges {
-		imin := makeIkey(nil, r.Start, kMaxSeq, ktSeek)
-		imax := makeIkey(nil, r.Limit, kMaxSeq, ktSeek)
+		imin := makeInternalKey(nil, r.Start, keyMaxSeq, keyTypeSeek)
+		imax := makeInternalKey(nil, r.Limit, keyMaxSeq, keyTypeSeek)
 		start, err := v.offsetOf(imin)
 		if err != nil {
 			return nil, err
@@ -1007,7 +1007,7 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
 		if err != nil {
 			return nil, err
 		}
-		var size uint64
+		var size int64
 		if limit >= start {
 			size = limit - start
 		}

+ 5 - 7
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go

@@ -452,7 +452,7 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
 		}
 
 		ikey := iter.Key()
-		ukey, seq, kt, kerr := parseIkey(ikey)
+		ukey, seq, kt, kerr := parseInternalKey(ikey)
 
 		if kerr == nil {
 			shouldStop := !resumed && b.c.shouldStopBefore(ikey)
@@ -478,14 +478,14 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
 
 				hasLastUkey = true
 				lastUkey = append(lastUkey[:0], ukey...)
-				lastSeq = kMaxSeq
+				lastSeq = keyMaxSeq
 			}
 
 			switch {
 			case lastSeq <= b.minSeq:
 				// Dropped because newer entry for same user key exist
 				fallthrough // (A)
-			case kt == ktDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
+			case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
 				// For this user key:
 				// (1) there is no data in higher levels
 				// (2) data in lower levels will have larger seq numbers
@@ -507,7 +507,7 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
 			// Don't drop corrupted keys.
 			hasLastUkey = false
 			lastUkey = lastUkey[:0]
-			lastSeq = kMaxSeq
+			lastSeq = keyMaxSeq
 			b.kerrCnt++
 		}
 
@@ -548,9 +548,7 @@ func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
 		db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1)
 		rec.delTable(c.sourceLevel, t.fd.Num)
 		rec.addTableFile(c.sourceLevel+1, t)
-		db.compactionTransactFunc("table@move", func(cnt *compactionTransactCounter) (err error) {
-			return db.s.commit(rec)
-		}, nil)
+		db.compactionCommit("table-move", rec)
 		return
 	}
 

+ 10 - 10
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go

@@ -19,7 +19,7 @@ import (
 )
 
 var (
-	errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key")
+	errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key")
 )
 
 type memdbReleaser struct {
@@ -70,10 +70,10 @@ func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Rang
 	if slice != nil {
 		islice = &util.Range{}
 		if slice.Start != nil {
-			islice.Start = makeIkey(nil, slice.Start, kMaxSeq, ktSeek)
+			islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek)
 		}
 		if slice.Limit != nil {
-			islice.Limit = makeIkey(nil, slice.Limit, kMaxSeq, ktSeek)
+			islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek)
 		}
 	}
 	rawIter := db.newRawIterator(auxm, auxt, islice, ro)
@@ -187,7 +187,7 @@ func (i *dbIter) Seek(key []byte) bool {
 		return false
 	}
 
-	ikey := makeIkey(nil, key, i.seq, ktSeek)
+	ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek)
 	if i.iter.Seek(ikey) {
 		i.dir = dirSOI
 		return i.next()
@@ -199,15 +199,15 @@ func (i *dbIter) Seek(key []byte) bool {
 
 func (i *dbIter) next() bool {
 	for {
-		if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
+		if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
 			i.sampleSeek()
 			if seq <= i.seq {
 				switch kt {
-				case ktDel:
+				case keyTypeDel:
 					// Skip deleted key.
 					i.key = append(i.key[:0], ukey...)
 					i.dir = dirForward
-				case ktVal:
+				case keyTypeVal:
 					if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 {
 						i.key = append(i.key[:0], ukey...)
 						i.value = append(i.value[:0], i.iter.Value()...)
@@ -250,13 +250,13 @@ func (i *dbIter) prev() bool {
 	del := true
 	if i.iter.Valid() {
 		for {
-			if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
+			if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
 				i.sampleSeek()
 				if seq <= i.seq {
 					if !del && i.icmp.uCompare(ukey, i.key) < 0 {
 						return true
 					}
-					del = (kt == ktDel)
+					del = (kt == keyTypeDel)
 					if !del {
 						i.key = append(i.key[:0], ukey...)
 						i.value = append(i.value[:0], i.iter.Value()...)
@@ -292,7 +292,7 @@ func (i *dbIter) Prev() bool {
 		return i.Last()
 	case dirForward:
 		for i.iter.Prev() {
-			if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
+			if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
 				i.sampleSeek()
 				if i.icmp.uCompare(ukey, i.key) < 0 {
 					goto cont

+ 1 - 1
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go

@@ -57,7 +57,7 @@ func (db *DB) setSeq(seq uint64) {
 	atomic.StoreUint64(&db.seq, seq)
 }
 
-func (db *DB) sampleSeek(ikey iKey) {
+func (db *DB) sampleSeek(ikey internalKey) {
 	v := db.s.version()
 	if v.sampleSeek(ikey) {
 		// Trigger table compaction.

+ 5 - 5
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_transaction.go

@@ -108,8 +108,8 @@ func (tr *Transaction) flush() error {
 	return nil
 }
 
-func (tr *Transaction) put(kt kType, key, value []byte) error {
-	tr.ikScratch = makeIkey(tr.ikScratch, key, tr.seq+1, kt)
+func (tr *Transaction) put(kt keyType, key, value []byte) error {
+	tr.ikScratch = makeInternalKey(tr.ikScratch, key, tr.seq+1, kt)
 	if tr.mem.Free() < len(tr.ikScratch)+len(value) {
 		if err := tr.flush(); err != nil {
 			return err
@@ -134,7 +134,7 @@ func (tr *Transaction) Put(key, value []byte, wo *opt.WriteOptions) error {
 	if tr.closed {
 		return errTransactionDone
 	}
-	return tr.put(ktVal, key, value)
+	return tr.put(keyTypeVal, key, value)
 }
 
 // Delete deletes the value for the given key.
@@ -148,7 +148,7 @@ func (tr *Transaction) Delete(key []byte, wo *opt.WriteOptions) error {
 	if tr.closed {
 		return errTransactionDone
 	}
-	return tr.put(ktDel, key, nil)
+	return tr.put(keyTypeDel, key, nil)
 }
 
 // Write apply the given batch to the transaction. The batch will be applied
@@ -167,7 +167,7 @@ func (tr *Transaction) Write(b *Batch, wo *opt.WriteOptions) error {
 	if tr.closed {
 		return errTransactionDone
 	}
-	return b.decodeRec(func(i int, kt kType, key, value []byte) error {
+	return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
 		return tr.put(kt, key, value)
 	})
 }

+ 7 - 5
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go

@@ -21,14 +21,16 @@ type Reader interface {
 	NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator
 }
 
-type Sizes []uint64
+// Sizes is list of size.
+type Sizes []int64
 
 // Sum returns sum of the sizes.
-func (p Sizes) Sum() (n uint64) {
-	for _, s := range p {
-		n += s
+func (sizes Sizes) Sum() int64 {
+	var sum int64
+	for _, size := range sizes {
+		sum += size
 	}
-	return n
+	return sum
 }
 
 // Logging.

+ 2 - 2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go

@@ -281,8 +281,8 @@ func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error {
 func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
 	iter := mem.NewIterator(nil)
 	defer iter.Release()
-	return (max == nil || (iter.First() && icmp.uCompare(max, iKey(iter.Key()).ukey()) >= 0)) &&
-		(min == nil || (iter.Last() && icmp.uCompare(min, iKey(iter.Key()).ukey()) <= 0))
+	return (max == nil || (iter.First() && icmp.uCompare(max, internalKey(iter.Key()).ukey()) >= 0)) &&
+		(min == nil || (iter.Last() && icmp.uCompare(min, internalKey(iter.Key()).ukey()) <= 0))
 }
 
 // CompactRange compacts the underlying DB for the given key range.

+ 2 - 2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go

@@ -15,7 +15,7 @@ type iFilter struct {
 }
 
 func (f iFilter) Contains(filter, key []byte) bool {
-	return f.Filter.Contains(filter, iKey(key).ukey())
+	return f.Filter.Contains(filter, internalKey(key).ukey())
 }
 
 func (f iFilter) NewGenerator() filter.FilterGenerator {
@@ -27,5 +27,5 @@ type iFilterGenerator struct {
 }
 
 func (g iFilterGenerator) Add(key []byte) {
-	g.FilterGenerator.Add(iKey(key).ukey())
+	g.FilterGenerator.Add(internalKey(key).ukey())
 }

+ 42 - 42
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go

@@ -14,26 +14,27 @@ import (
 	"github.com/syndtr/goleveldb/leveldb/storage"
 )
 
-type ErrIkeyCorrupted struct {
+// ErrInternalKeyCorrupted records internal key corruption.
+type ErrInternalKeyCorrupted struct {
 	Ikey   []byte
 	Reason string
 }
 
-func (e *ErrIkeyCorrupted) Error() string {
-	return fmt.Sprintf("leveldb: iKey %q corrupted: %s", e.Ikey, e.Reason)
+func (e *ErrInternalKeyCorrupted) Error() string {
+	return fmt.Sprintf("leveldb: internal key %q corrupted: %s", e.Ikey, e.Reason)
 }
 
-func newErrIkeyCorrupted(ikey []byte, reason string) error {
-	return errors.NewErrCorrupted(storage.FileDesc{}, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason})
+func newErrInternalKeyCorrupted(ikey []byte, reason string) error {
+	return errors.NewErrCorrupted(storage.FileDesc{}, &ErrInternalKeyCorrupted{append([]byte{}, ikey...), reason})
 }
 
-type kType int
+type keyType uint
 
-func (kt kType) String() string {
+func (kt keyType) String() string {
 	switch kt {
-	case ktDel:
+	case keyTypeDel:
 		return "d"
-	case ktVal:
+	case keyTypeVal:
 		return "v"
 	}
 	return "x"
@@ -42,39 +43,39 @@ func (kt kType) String() string {
 // Value types encoded as the last component of internal keys.
 // Don't modify; this value are saved to disk.
 const (
-	ktDel kType = iota
-	ktVal
+	keyTypeDel keyType = iota
+	keyTypeVal
 )
 
-// ktSeek defines the kType that should be passed when constructing an
+// keyTypeSeek defines the keyType that should be passed when constructing an
 // internal key for seeking to a particular sequence number (since we
 // sort sequence numbers in decreasing order and the value type is
 // embedded as the low 8 bits in the sequence number in internal keys,
 // we need to use the highest-numbered ValueType, not the lowest).
-const ktSeek = ktVal
+const keyTypeSeek = keyTypeVal
 
 const (
 	// Maximum value possible for sequence number; the 8-bits are
 	// used by value type, so its can packed together in single
 	// 64-bit integer.
-	kMaxSeq uint64 = (uint64(1) << 56) - 1
+	keyMaxSeq = (uint64(1) << 56) - 1
 	// Maximum value possible for packed sequence number and type.
-	kMaxNum uint64 = (kMaxSeq << 8) | uint64(ktSeek)
+	keyMaxNum = (keyMaxSeq << 8) | uint64(keyTypeSeek)
 )
 
 // Maximum number encoded in bytes.
-var kMaxNumBytes = make([]byte, 8)
+var keyMaxNumBytes = make([]byte, 8)
 
 func init() {
-	binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum)
+	binary.LittleEndian.PutUint64(keyMaxNumBytes, keyMaxNum)
 }
 
-type iKey []byte
+type internalKey []byte
 
-func makeIkey(dst, ukey []byte, seq uint64, kt kType) iKey {
-	if seq > kMaxSeq {
+func makeInternalKey(dst, ukey []byte, seq uint64, kt keyType) internalKey {
+	if seq > keyMaxSeq {
 		panic("leveldb: invalid sequence number")
-	} else if kt > ktVal {
+	} else if kt > keyTypeVal {
 		panic("leveldb: invalid type")
 	}
 
@@ -85,63 +86,62 @@ func makeIkey(dst, ukey []byte, seq uint64, kt kType) iKey {
 	}
 	copy(dst, ukey)
 	binary.LittleEndian.PutUint64(dst[len(ukey):], (seq<<8)|uint64(kt))
-	return iKey(dst)
+	return internalKey(dst)
 }
 
-func parseIkey(ik []byte) (ukey []byte, seq uint64, kt kType, err error) {
+func parseInternalKey(ik []byte) (ukey []byte, seq uint64, kt keyType, err error) {
 	if len(ik) < 8 {
-		return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid length")
+		return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid length")
 	}
 	num := binary.LittleEndian.Uint64(ik[len(ik)-8:])
-	seq, kt = uint64(num>>8), kType(num&0xff)
-	if kt > ktVal {
-		return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid type")
+	seq, kt = uint64(num>>8), keyType(num&0xff)
+	if kt > keyTypeVal {
+		return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid type")
 	}
 	ukey = ik[:len(ik)-8]
 	return
 }
 
-func validIkey(ik []byte) bool {
-	_, _, _, err := parseIkey(ik)
+func validInternalKey(ik []byte) bool {
+	_, _, _, err := parseInternalKey(ik)
 	return err == nil
 }
 
-func (ik iKey) assert() {
+func (ik internalKey) assert() {
 	if ik == nil {
-		panic("leveldb: nil iKey")
+		panic("leveldb: nil internalKey")
 	}
 	if len(ik) < 8 {
-		panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik)))
+		panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid length", []byte(ik), len(ik)))
 	}
 }
 
-func (ik iKey) ukey() []byte {
+func (ik internalKey) ukey() []byte {
 	ik.assert()
 	return ik[:len(ik)-8]
 }
 
-func (ik iKey) num() uint64 {
+func (ik internalKey) num() uint64 {
 	ik.assert()
 	return binary.LittleEndian.Uint64(ik[len(ik)-8:])
 }
 
-func (ik iKey) parseNum() (seq uint64, kt kType) {
+func (ik internalKey) parseNum() (seq uint64, kt keyType) {
 	num := ik.num()
-	seq, kt = uint64(num>>8), kType(num&0xff)
-	if kt > ktVal {
-		panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
+	seq, kt = uint64(num>>8), keyType(num&0xff)
+	if kt > keyTypeVal {
+		panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
 	}
 	return
 }
 
-func (ik iKey) String() string {
+func (ik internalKey) String() string {
 	if ik == nil {
 		return "<nil>"
 	}
 
-	if ukey, seq, kt, err := parseIkey(ik); err == nil {
+	if ukey, seq, kt, err := parseInternalKey(ik); err == nil {
 		return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq)
-	} else {
-		return "<invalid>"
 	}
+	return "<invalid>"
 }

+ 5 - 5
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go

@@ -18,6 +18,7 @@ import (
 	"github.com/syndtr/goleveldb/leveldb/storage"
 )
 
+// ErrManifestCorrupted records manifest corruption.
 type ErrManifestCorrupted struct {
 	Field  string
 	Reason string
@@ -50,8 +51,8 @@ type session struct {
 	manifestWriter storage.Writer
 	manifestFd     storage.FileDesc
 
-	stCompPtrs []iKey   // compaction pointers; need external synchronization
-	stVersion  *version // current version
+	stCompPtrs []internalKey // compaction pointers; need external synchronization
+	stVersion  *version      // current version
 	vmu        sync.Mutex
 }
 
@@ -146,7 +147,7 @@ func (s *session) recover() (err error) {
 		if err == nil {
 			// save compact pointers
 			for _, r := range rec.compPtrs {
-				s.setCompPtr(r.level, iKey(r.ikey))
+				s.setCompPtr(r.level, internalKey(r.ikey))
 			}
 			// commit record to version staging
 			staging.commit(rec)
@@ -154,9 +155,8 @@ func (s *session) recover() (err error) {
 			err = errors.SetFd(err, fd)
 			if strict || !errors.IsCorrupted(err) {
 				return
-			} else {
-				s.logf("manifest error: %v (skipped)", errors.SetFd(err, fd))
 			}
+			s.logf("manifest error: %v (skipped)", errors.SetFd(err, fd))
 		}
 		rec.resetCompPtrs()
 		rec.resetAddedTables()

+ 2 - 2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go

@@ -139,7 +139,7 @@ type compaction struct {
 	gpi               int
 	seenKey           bool
 	gpOverlappedBytes int64
-	imin, imax        iKey
+	imin, imax        internalKey
 	tPtrs             []int
 	released          bool
 
@@ -242,7 +242,7 @@ func (c *compaction) baseLevelForKey(ukey []byte) bool {
 	return true
 }
 
-func (c *compaction) shouldStopBefore(ikey iKey) bool {
+func (c *compaction) shouldStopBefore(ikey internalKey) bool {
 	for ; c.gpi < len(c.gp); c.gpi++ {
 		gp := c.gp[c.gpi]
 		if c.s.icmp.Compare(ikey, gp.imax) <= 0 {

+ 6 - 6
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go

@@ -36,15 +36,15 @@ const (
 
 type cpRecord struct {
 	level int
-	ikey  iKey
+	ikey  internalKey
 }
 
 type atRecord struct {
 	level int
 	num   int64
 	size  int64
-	imin  iKey
-	imax  iKey
+	imin  internalKey
+	imax  internalKey
 }
 
 type dtRecord struct {
@@ -96,7 +96,7 @@ func (p *sessionRecord) setSeqNum(num uint64) {
 	p.seqNum = num
 }
 
-func (p *sessionRecord) addCompPtr(level int, ikey iKey) {
+func (p *sessionRecord) addCompPtr(level int, ikey internalKey) {
 	p.hasRec |= 1 << recCompPtr
 	p.compPtrs = append(p.compPtrs, cpRecord{level, ikey})
 }
@@ -106,7 +106,7 @@ func (p *sessionRecord) resetCompPtrs() {
 	p.compPtrs = p.compPtrs[:0]
 }
 
-func (p *sessionRecord) addTable(level int, num, size int64, imin, imax iKey) {
+func (p *sessionRecord) addTable(level int, num, size int64, imin, imax internalKey) {
 	p.hasRec |= 1 << recAddTable
 	p.addedTables = append(p.addedTables, atRecord{level, num, size, imin, imax})
 }
@@ -299,7 +299,7 @@ func (p *sessionRecord) decode(r io.Reader) error {
 			level := p.readLevel("comp-ptr.level", br)
 			ikey := p.readBytes("comp-ptr.ikey", br)
 			if p.err == nil {
-				p.addCompPtr(level, iKey(ikey))
+				p.addCompPtr(level, internalKey(ikey))
 			}
 		case recAddTable:
 			level := p.readLevel("add-table.level", br)

+ 5 - 5
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go

@@ -106,17 +106,17 @@ func (s *session) reuseFileNum(num int64) {
 }
 
 // Set compaction ptr at given level; need external synchronization.
-func (s *session) setCompPtr(level int, ik iKey) {
+func (s *session) setCompPtr(level int, ik internalKey) {
 	if level >= len(s.stCompPtrs) {
-		newCompPtrs := make([]iKey, level+1)
+		newCompPtrs := make([]internalKey, level+1)
 		copy(newCompPtrs, s.stCompPtrs)
 		s.stCompPtrs = newCompPtrs
 	}
-	s.stCompPtrs[level] = append(iKey{}, ik...)
+	s.stCompPtrs[level] = append(internalKey{}, ik...)
 }
 
 // Get compaction ptr at given level; need external synchronization.
-func (s *session) getCompPtr(level int) iKey {
+func (s *session) getCompPtr(level int) internalKey {
 	if level >= len(s.stCompPtrs) {
 		return nil
 	}
@@ -165,7 +165,7 @@ func (s *session) recordCommited(rec *sessionRecord) {
 	}
 
 	for _, r := range rec.compPtrs {
-		s.setCompPtr(r.level, iKey(r.ikey))
+		s.setCompPtr(r.level, internalKey(r.ikey))
 	}
 }
 

+ 14 - 15
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go

@@ -24,7 +24,7 @@ type tFile struct {
 	fd         storage.FileDesc
 	seekLeft   int32
 	size       int64
-	imin, imax iKey
+	imin, imax internalKey
 }
 
 // Returns true if given key is after largest key of this table.
@@ -48,7 +48,7 @@ func (t *tFile) consumeSeek() int32 {
 }
 
 // Creates new tFile.
-func newTableFile(fd storage.FileDesc, size int64, imin, imax iKey) *tFile {
+func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile {
 	f := &tFile{
 		fd:   fd,
 		size: size,
@@ -136,7 +136,7 @@ func (tf tFiles) size() (sum int64) {
 
 // Searches smallest index of tables whose its smallest
 // key is after or equal with given key.
-func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int {
+func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int {
 	return sort.Search(len(tf), func(i int) bool {
 		return icmp.Compare(tf[i].imin, ikey) >= 0
 	})
@@ -144,7 +144,7 @@ func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int {
 
 // Searches smallest index of tables whose its largest
 // key is after or equal with given key.
-func (tf tFiles) searchMax(icmp *iComparer, ikey iKey) int {
+func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int {
 	return sort.Search(len(tf), func(i int) bool {
 		return icmp.Compare(tf[i].imax, ikey) >= 0
 	})
@@ -166,7 +166,7 @@ func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) boo
 	i := 0
 	if len(umin) > 0 {
 		// Find the earliest possible internal key for min.
-		i = tf.searchMax(icmp, makeIkey(nil, umin, kMaxSeq, ktSeek))
+		i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek))
 	}
 	if i >= len(tf) {
 		// Beginning of range is after all files, so no overlap.
@@ -209,7 +209,7 @@ func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, ove
 }
 
 // Returns tables key range.
-func (tf tFiles) getRange(icmp *iComparer) (imin, imax iKey) {
+func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) {
 	for i, t := range tf {
 		if i == 0 {
 			imin, imax = t.imin, t.imax
@@ -231,10 +231,10 @@ func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range
 	if slice != nil {
 		var start, limit int
 		if slice.Start != nil {
-			start = tf.searchMax(icmp, iKey(slice.Start))
+			start = tf.searchMax(icmp, internalKey(slice.Start))
 		}
 		if slice.Limit != nil {
-			limit = tf.searchMin(icmp, iKey(slice.Limit))
+			limit = tf.searchMin(icmp, internalKey(slice.Limit))
 		} else {
 			limit = tf.Len()
 		}
@@ -259,7 +259,7 @@ type tFilesArrayIndexer struct {
 }
 
 func (a *tFilesArrayIndexer) Search(key []byte) int {
-	return a.searchMax(a.icmp, iKey(key))
+	return a.searchMax(a.icmp, internalKey(key))
 }
 
 func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator {
@@ -351,9 +351,9 @@ func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) {
 			return 0, nil
 		}
 
-		var bcache *cache.CacheGetter
+		var bcache *cache.NamespaceGetter
 		if t.bcache != nil {
-			bcache = &cache.CacheGetter{Cache: t.bcache, NS: uint64(f.fd.Num)}
+			bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)}
 		}
 
 		var tr *table.Reader
@@ -393,14 +393,13 @@ func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte,
 }
 
 // Returns approximate offset of the given key.
-func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) {
+func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) {
 	ch, err := t.open(f)
 	if err != nil {
 		return
 	}
 	defer ch.Release()
-	offset_, err := ch.Value().(*table.Reader).OffsetOf(key)
-	return uint64(offset_), err
+	return ch.Value().(*table.Reader).OffsetOf(key)
 }
 
 // Creates an iterator from the given table.
@@ -515,7 +514,7 @@ func (w *tWriter) finish() (f *tFile, err error) {
 			return
 		}
 	}
-	f = newTableFile(w.fd, int64(w.tw.BytesLen()), iKey(w.first), iKey(w.last))
+	f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last))
 	return
 }
 

+ 2 - 2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go

@@ -509,7 +509,7 @@ type Reader struct {
 	mu     sync.RWMutex
 	fd     storage.FileDesc
 	reader io.ReaderAt
-	cache  *cache.CacheGetter
+	cache  *cache.NamespaceGetter
 	err    error
 	bpool  *util.BufferPool
 	// Options
@@ -988,7 +988,7 @@ func (r *Reader) Release() {
 // The fi, cache and bpool is optional and can be nil.
 //
 // The returned table reader instance is goroutine-safe.
-func NewReader(f io.ReaderAt, size int64, fd storage.FileDesc, cache *cache.CacheGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
+func NewReader(f io.ReaderAt, size int64, fd storage.FileDesc, cache *cache.NamespaceGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
 	if f == nil {
 		return nil, errors.New("leveldb/table: nil file")
 	}

+ 0 - 222
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go

@@ -1,222 +0,0 @@
-// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package testutil
-
-import (
-	"fmt"
-	"math/rand"
-
-	. "github.com/onsi/gomega"
-
-	"github.com/syndtr/goleveldb/leveldb/errors"
-	"github.com/syndtr/goleveldb/leveldb/iterator"
-	"github.com/syndtr/goleveldb/leveldb/util"
-)
-
-type DB interface{}
-
-type Put interface {
-	TestPut(key []byte, value []byte) error
-}
-
-type Delete interface {
-	TestDelete(key []byte) error
-}
-
-type Find interface {
-	TestFind(key []byte) (rkey, rvalue []byte, err error)
-}
-
-type Get interface {
-	TestGet(key []byte) (value []byte, err error)
-}
-
-type Has interface {
-	TestHas(key []byte) (ret bool, err error)
-}
-
-type NewIterator interface {
-	TestNewIterator(slice *util.Range) iterator.Iterator
-}
-
-type DBAct int
-
-func (a DBAct) String() string {
-	switch a {
-	case DBNone:
-		return "none"
-	case DBPut:
-		return "put"
-	case DBOverwrite:
-		return "overwrite"
-	case DBDelete:
-		return "delete"
-	case DBDeleteNA:
-		return "delete_na"
-	}
-	return "unknown"
-}
-
-const (
-	DBNone DBAct = iota
-	DBPut
-	DBOverwrite
-	DBDelete
-	DBDeleteNA
-)
-
-type DBTesting struct {
-	Rand *rand.Rand
-	DB   interface {
-		Get
-		Put
-		Delete
-	}
-	PostFn             func(t *DBTesting)
-	Deleted, Present   KeyValue
-	Act, LastAct       DBAct
-	ActKey, LastActKey []byte
-}
-
-func (t *DBTesting) post() {
-	if t.PostFn != nil {
-		t.PostFn(t)
-	}
-}
-
-func (t *DBTesting) setAct(act DBAct, key []byte) {
-	t.LastAct, t.Act = t.Act, act
-	t.LastActKey, t.ActKey = t.ActKey, key
-}
-
-func (t *DBTesting) text() string {
-	return fmt.Sprintf("last action was <%v> %q, <%v> %q", t.LastAct, t.LastActKey, t.Act, t.ActKey)
-}
-
-func (t *DBTesting) Text() string {
-	return "DBTesting " + t.text()
-}
-
-func (t *DBTesting) TestPresentKV(key, value []byte) {
-	rvalue, err := t.DB.TestGet(key)
-	Expect(err).ShouldNot(HaveOccurred(), "Get on key %q, %s", key, t.text())
-	Expect(rvalue).Should(Equal(value), "Value for key %q, %s", key, t.text())
-}
-
-func (t *DBTesting) TestAllPresent() {
-	t.Present.IterateShuffled(t.Rand, func(i int, key, value []byte) {
-		t.TestPresentKV(key, value)
-	})
-}
-
-func (t *DBTesting) TestDeletedKey(key []byte) {
-	_, err := t.DB.TestGet(key)
-	Expect(err).Should(Equal(errors.ErrNotFound), "Get on deleted key %q, %s", key, t.text())
-}
-
-func (t *DBTesting) TestAllDeleted() {
-	t.Deleted.IterateShuffled(t.Rand, func(i int, key, value []byte) {
-		t.TestDeletedKey(key)
-	})
-}
-
-func (t *DBTesting) TestAll() {
-	dn := t.Deleted.Len()
-	pn := t.Present.Len()
-	ShuffledIndex(t.Rand, dn+pn, 1, func(i int) {
-		if i >= dn {
-			key, value := t.Present.Index(i - dn)
-			t.TestPresentKV(key, value)
-		} else {
-			t.TestDeletedKey(t.Deleted.KeyAt(i))
-		}
-	})
-}
-
-func (t *DBTesting) Put(key, value []byte) {
-	if new := t.Present.PutU(key, value); new {
-		t.setAct(DBPut, key)
-	} else {
-		t.setAct(DBOverwrite, key)
-	}
-	t.Deleted.Delete(key)
-	err := t.DB.TestPut(key, value)
-	Expect(err).ShouldNot(HaveOccurred(), t.Text())
-	t.TestPresentKV(key, value)
-	t.post()
-}
-
-func (t *DBTesting) PutRandom() bool {
-	if t.Deleted.Len() > 0 {
-		i := t.Rand.Intn(t.Deleted.Len())
-		key, value := t.Deleted.Index(i)
-		t.Put(key, value)
-		return true
-	}
-	return false
-}
-
-func (t *DBTesting) Delete(key []byte) {
-	if exist, value := t.Present.Delete(key); exist {
-		t.setAct(DBDelete, key)
-		t.Deleted.PutU(key, value)
-	} else {
-		t.setAct(DBDeleteNA, key)
-	}
-	err := t.DB.TestDelete(key)
-	Expect(err).ShouldNot(HaveOccurred(), t.Text())
-	t.TestDeletedKey(key)
-	t.post()
-}
-
-func (t *DBTesting) DeleteRandom() bool {
-	if t.Present.Len() > 0 {
-		i := t.Rand.Intn(t.Present.Len())
-		t.Delete(t.Present.KeyAt(i))
-		return true
-	}
-	return false
-}
-
-func (t *DBTesting) RandomAct(round int) {
-	for i := 0; i < round; i++ {
-		if t.Rand.Int()%2 == 0 {
-			t.PutRandom()
-		} else {
-			t.DeleteRandom()
-		}
-	}
-}
-
-func DoDBTesting(t *DBTesting) {
-	if t.Rand == nil {
-		t.Rand = NewRand()
-	}
-
-	t.DeleteRandom()
-	t.PutRandom()
-	t.DeleteRandom()
-	t.DeleteRandom()
-	for i := t.Deleted.Len() / 2; i >= 0; i-- {
-		t.PutRandom()
-	}
-	t.RandomAct((t.Deleted.Len() + t.Present.Len()) * 10)
-
-	// Additional iterator testing
-	if db, ok := t.DB.(NewIterator); ok {
-		iter := db.TestNewIterator(nil)
-		Expect(iter.Error()).NotTo(HaveOccurred())
-
-		it := IteratorTesting{
-			KeyValue: t.Present,
-			Iter:     iter,
-		}
-
-		DoIteratorTesting(&it)
-		iter.Release()
-	}
-}

+ 0 - 21
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go

@@ -1,21 +0,0 @@
-package testutil
-
-import (
-	. "github.com/onsi/ginkgo"
-	. "github.com/onsi/gomega"
-)
-
-func RunSuite(t GinkgoTestingT, name string) {
-	RunDefer()
-
-	SynchronizedBeforeSuite(func() []byte {
-		RunDefer("setup")
-		return nil
-	}, func(data []byte) {})
-	SynchronizedAfterSuite(func() {
-		RunDefer("teardown")
-	}, func() {})
-
-	RegisterFailHandler(Fail)
-	RunSpecs(t, name)
-}

+ 0 - 327
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go

@@ -1,327 +0,0 @@
-// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package testutil
-
-import (
-	"fmt"
-	"math/rand"
-
-	. "github.com/onsi/gomega"
-
-	"github.com/syndtr/goleveldb/leveldb/iterator"
-)
-
-type IterAct int
-
-func (a IterAct) String() string {
-	switch a {
-	case IterNone:
-		return "none"
-	case IterFirst:
-		return "first"
-	case IterLast:
-		return "last"
-	case IterPrev:
-		return "prev"
-	case IterNext:
-		return "next"
-	case IterSeek:
-		return "seek"
-	case IterSOI:
-		return "soi"
-	case IterEOI:
-		return "eoi"
-	}
-	return "unknown"
-}
-
-const (
-	IterNone IterAct = iota
-	IterFirst
-	IterLast
-	IterPrev
-	IterNext
-	IterSeek
-	IterSOI
-	IterEOI
-)
-
-type IteratorTesting struct {
-	KeyValue
-	Iter         iterator.Iterator
-	Rand         *rand.Rand
-	PostFn       func(t *IteratorTesting)
-	Pos          int
-	Act, LastAct IterAct
-
-	once bool
-}
-
-func (t *IteratorTesting) init() {
-	if !t.once {
-		t.Pos = -1
-		t.once = true
-	}
-}
-
-func (t *IteratorTesting) post() {
-	if t.PostFn != nil {
-		t.PostFn(t)
-	}
-}
-
-func (t *IteratorTesting) setAct(act IterAct) {
-	t.LastAct, t.Act = t.Act, act
-}
-
-func (t *IteratorTesting) text() string {
-	return fmt.Sprintf("at pos %d and last action was <%v> -> <%v>", t.Pos, t.LastAct, t.Act)
-}
-
-func (t *IteratorTesting) Text() string {
-	return "IteratorTesting is " + t.text()
-}
-
-func (t *IteratorTesting) IsFirst() bool {
-	t.init()
-	return t.Len() > 0 && t.Pos == 0
-}
-
-func (t *IteratorTesting) IsLast() bool {
-	t.init()
-	return t.Len() > 0 && t.Pos == t.Len()-1
-}
-
-func (t *IteratorTesting) TestKV() {
-	t.init()
-	key, value := t.Index(t.Pos)
-	Expect(t.Iter.Key()).NotTo(BeNil())
-	Expect(t.Iter.Key()).Should(Equal(key), "Key is invalid, %s", t.text())
-	Expect(t.Iter.Value()).Should(Equal(value), "Value for key %q, %s", key, t.text())
-}
-
-func (t *IteratorTesting) First() {
-	t.init()
-	t.setAct(IterFirst)
-
-	ok := t.Iter.First()
-	Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
-	if t.Len() > 0 {
-		t.Pos = 0
-		Expect(ok).Should(BeTrue(), t.Text())
-		t.TestKV()
-	} else {
-		t.Pos = -1
-		Expect(ok).ShouldNot(BeTrue(), t.Text())
-	}
-	t.post()
-}
-
-func (t *IteratorTesting) Last() {
-	t.init()
-	t.setAct(IterLast)
-
-	ok := t.Iter.Last()
-	Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
-	if t.Len() > 0 {
-		t.Pos = t.Len() - 1
-		Expect(ok).Should(BeTrue(), t.Text())
-		t.TestKV()
-	} else {
-		t.Pos = 0
-		Expect(ok).ShouldNot(BeTrue(), t.Text())
-	}
-	t.post()
-}
-
-func (t *IteratorTesting) Next() {
-	t.init()
-	t.setAct(IterNext)
-
-	ok := t.Iter.Next()
-	Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
-	if t.Pos < t.Len()-1 {
-		t.Pos++
-		Expect(ok).Should(BeTrue(), t.Text())
-		t.TestKV()
-	} else {
-		t.Pos = t.Len()
-		Expect(ok).ShouldNot(BeTrue(), t.Text())
-	}
-	t.post()
-}
-
-func (t *IteratorTesting) Prev() {
-	t.init()
-	t.setAct(IterPrev)
-
-	ok := t.Iter.Prev()
-	Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
-	if t.Pos > 0 {
-		t.Pos--
-		Expect(ok).Should(BeTrue(), t.Text())
-		t.TestKV()
-	} else {
-		t.Pos = -1
-		Expect(ok).ShouldNot(BeTrue(), t.Text())
-	}
-	t.post()
-}
-
-func (t *IteratorTesting) Seek(i int) {
-	t.init()
-	t.setAct(IterSeek)
-
-	key, _ := t.Index(i)
-	oldKey, _ := t.IndexOrNil(t.Pos)
-
-	ok := t.Iter.Seek(key)
-	Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
-	Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q, to pos %d, %s", oldKey, key, i, t.text()))
-
-	t.Pos = i
-	t.TestKV()
-	t.post()
-}
-
-func (t *IteratorTesting) SeekInexact(i int) {
-	t.init()
-	t.setAct(IterSeek)
-	var key0 []byte
-	key1, _ := t.Index(i)
-	if i > 0 {
-		key0, _ = t.Index(i - 1)
-	}
-	key := BytesSeparator(key0, key1)
-	oldKey, _ := t.IndexOrNil(t.Pos)
-
-	ok := t.Iter.Seek(key)
-	Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
-	Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key1, i, t.text()))
-
-	t.Pos = i
-	t.TestKV()
-	t.post()
-}
-
-func (t *IteratorTesting) SeekKey(key []byte) {
-	t.init()
-	t.setAct(IterSeek)
-	oldKey, _ := t.IndexOrNil(t.Pos)
-	i := t.Search(key)
-
-	ok := t.Iter.Seek(key)
-	Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
-	if i < t.Len() {
-		key_, _ := t.Index(i)
-		Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key_, i, t.text()))
-		t.Pos = i
-		t.TestKV()
-	} else {
-		Expect(ok).ShouldNot(BeTrue(), fmt.Sprintf("Seek from key %q to %q, %s", oldKey, key, t.text()))
-	}
-
-	t.Pos = i
-	t.post()
-}
-
-func (t *IteratorTesting) SOI() {
-	t.init()
-	t.setAct(IterSOI)
-	Expect(t.Pos).Should(BeNumerically("<=", 0), t.Text())
-	for i := 0; i < 3; i++ {
-		t.Prev()
-	}
-	t.post()
-}
-
-func (t *IteratorTesting) EOI() {
-	t.init()
-	t.setAct(IterEOI)
-	Expect(t.Pos).Should(BeNumerically(">=", t.Len()-1), t.Text())
-	for i := 0; i < 3; i++ {
-		t.Next()
-	}
-	t.post()
-}
-
-func (t *IteratorTesting) WalkPrev(fn func(t *IteratorTesting)) {
-	t.init()
-	for old := t.Pos; t.Pos > 0; old = t.Pos {
-		fn(t)
-		Expect(t.Pos).Should(BeNumerically("<", old), t.Text())
-	}
-}
-
-func (t *IteratorTesting) WalkNext(fn func(t *IteratorTesting)) {
-	t.init()
-	for old := t.Pos; t.Pos < t.Len()-1; old = t.Pos {
-		fn(t)
-		Expect(t.Pos).Should(BeNumerically(">", old), t.Text())
-	}
-}
-
-func (t *IteratorTesting) PrevAll() {
-	t.WalkPrev(func(t *IteratorTesting) {
-		t.Prev()
-	})
-}
-
-func (t *IteratorTesting) NextAll() {
-	t.WalkNext(func(t *IteratorTesting) {
-		t.Next()
-	})
-}
-
-func DoIteratorTesting(t *IteratorTesting) {
-	if t.Rand == nil {
-		t.Rand = NewRand()
-	}
-	t.SOI()
-	t.NextAll()
-	t.First()
-	t.SOI()
-	t.NextAll()
-	t.EOI()
-	t.PrevAll()
-	t.Last()
-	t.EOI()
-	t.PrevAll()
-	t.SOI()
-
-	t.NextAll()
-	t.PrevAll()
-	t.NextAll()
-	t.Last()
-	t.PrevAll()
-	t.First()
-	t.NextAll()
-	t.EOI()
-
-	ShuffledIndex(t.Rand, t.Len(), 1, func(i int) {
-		t.Seek(i)
-	})
-
-	ShuffledIndex(t.Rand, t.Len(), 1, func(i int) {
-		t.SeekInexact(i)
-	})
-
-	ShuffledIndex(t.Rand, t.Len(), 1, func(i int) {
-		t.Seek(i)
-		if i%2 != 0 {
-			t.PrevAll()
-			t.SOI()
-		} else {
-			t.NextAll()
-			t.EOI()
-		}
-	})
-
-	for _, key := range []string{"", "foo", "bar", "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"} {
-		t.SeekKey([]byte(key))
-	}
-}

+ 0 - 352
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go

@@ -1,352 +0,0 @@
-// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package testutil
-
-import (
-	"fmt"
-	"math/rand"
-	"sort"
-	"strings"
-
-	"github.com/syndtr/goleveldb/leveldb/util"
-)
-
-type KeyValueEntry struct {
-	key, value []byte
-}
-
-type KeyValue struct {
-	entries []KeyValueEntry
-	nbytes  int
-}
-
-func (kv *KeyValue) Put(key, value []byte) {
-	if n := len(kv.entries); n > 0 && cmp.Compare(kv.entries[n-1].key, key) >= 0 {
-		panic(fmt.Sprintf("Put: keys are not in increasing order: %q, %q", kv.entries[n-1].key, key))
-	}
-	kv.entries = append(kv.entries, KeyValueEntry{key, value})
-	kv.nbytes += len(key) + len(value)
-}
-
-func (kv *KeyValue) PutString(key, value string) {
-	kv.Put([]byte(key), []byte(value))
-}
-
-func (kv *KeyValue) PutU(key, value []byte) bool {
-	if i, exist := kv.Get(key); !exist {
-		if i < kv.Len() {
-			kv.entries = append(kv.entries[:i+1], kv.entries[i:]...)
-			kv.entries[i] = KeyValueEntry{key, value}
-		} else {
-			kv.entries = append(kv.entries, KeyValueEntry{key, value})
-		}
-		kv.nbytes += len(key) + len(value)
-		return true
-	} else {
-		kv.nbytes += len(value) - len(kv.ValueAt(i))
-		kv.entries[i].value = value
-	}
-	return false
-}
-
-func (kv *KeyValue) PutUString(key, value string) bool {
-	return kv.PutU([]byte(key), []byte(value))
-}
-
-func (kv *KeyValue) Delete(key []byte) (exist bool, value []byte) {
-	i, exist := kv.Get(key)
-	if exist {
-		value = kv.entries[i].value
-		kv.DeleteIndex(i)
-	}
-	return
-}
-
-func (kv *KeyValue) DeleteIndex(i int) bool {
-	if i < kv.Len() {
-		kv.nbytes -= len(kv.KeyAt(i)) + len(kv.ValueAt(i))
-		kv.entries = append(kv.entries[:i], kv.entries[i+1:]...)
-		return true
-	}
-	return false
-}
-
-func (kv KeyValue) Len() int {
-	return len(kv.entries)
-}
-
-func (kv *KeyValue) Size() int {
-	return kv.nbytes
-}
-
-func (kv KeyValue) KeyAt(i int) []byte {
-	return kv.entries[i].key
-}
-
-func (kv KeyValue) ValueAt(i int) []byte {
-	return kv.entries[i].value
-}
-
-func (kv KeyValue) Index(i int) (key, value []byte) {
-	if i < 0 || i >= len(kv.entries) {
-		panic(fmt.Sprintf("Index #%d: out of range", i))
-	}
-	return kv.entries[i].key, kv.entries[i].value
-}
-
-func (kv KeyValue) IndexInexact(i int) (key_, key, value []byte) {
-	key, value = kv.Index(i)
-	var key0 []byte
-	var key1 = kv.KeyAt(i)
-	if i > 0 {
-		key0 = kv.KeyAt(i - 1)
-	}
-	key_ = BytesSeparator(key0, key1)
-	return
-}
-
-func (kv KeyValue) IndexOrNil(i int) (key, value []byte) {
-	if i >= 0 && i < len(kv.entries) {
-		return kv.entries[i].key, kv.entries[i].value
-	}
-	return nil, nil
-}
-
-func (kv KeyValue) IndexString(i int) (key, value string) {
-	key_, _value := kv.Index(i)
-	return string(key_), string(_value)
-}
-
-func (kv KeyValue) Search(key []byte) int {
-	return sort.Search(kv.Len(), func(i int) bool {
-		return cmp.Compare(kv.KeyAt(i), key) >= 0
-	})
-}
-
-func (kv KeyValue) SearchString(key string) int {
-	return kv.Search([]byte(key))
-}
-
-func (kv KeyValue) Get(key []byte) (i int, exist bool) {
-	i = kv.Search(key)
-	if i < kv.Len() && cmp.Compare(kv.KeyAt(i), key) == 0 {
-		exist = true
-	}
-	return
-}
-
-func (kv KeyValue) GetString(key string) (i int, exist bool) {
-	return kv.Get([]byte(key))
-}
-
-func (kv KeyValue) Iterate(fn func(i int, key, value []byte)) {
-	for i, x := range kv.entries {
-		fn(i, x.key, x.value)
-	}
-}
-
-func (kv KeyValue) IterateString(fn func(i int, key, value string)) {
-	kv.Iterate(func(i int, key, value []byte) {
-		fn(i, string(key), string(value))
-	})
-}
-
-func (kv KeyValue) IterateShuffled(rnd *rand.Rand, fn func(i int, key, value []byte)) {
-	ShuffledIndex(rnd, kv.Len(), 1, func(i int) {
-		fn(i, kv.entries[i].key, kv.entries[i].value)
-	})
-}
-
-func (kv KeyValue) IterateShuffledString(rnd *rand.Rand, fn func(i int, key, value string)) {
-	kv.IterateShuffled(rnd, func(i int, key, value []byte) {
-		fn(i, string(key), string(value))
-	})
-}
-
-func (kv KeyValue) IterateInexact(fn func(i int, key_, key, value []byte)) {
-	for i := range kv.entries {
-		key_, key, value := kv.IndexInexact(i)
-		fn(i, key_, key, value)
-	}
-}
-
-func (kv KeyValue) IterateInexactString(fn func(i int, key_, key, value string)) {
-	kv.IterateInexact(func(i int, key_, key, value []byte) {
-		fn(i, string(key_), string(key), string(value))
-	})
-}
-
-func (kv KeyValue) Clone() KeyValue {
-	return KeyValue{append([]KeyValueEntry{}, kv.entries...), kv.nbytes}
-}
-
-func (kv KeyValue) Slice(start, limit int) KeyValue {
-	if start < 0 || limit > kv.Len() {
-		panic(fmt.Sprintf("Slice %d .. %d: out of range", start, limit))
-	} else if limit < start {
-		panic(fmt.Sprintf("Slice %d .. %d: invalid range", start, limit))
-	}
-	return KeyValue{append([]KeyValueEntry{}, kv.entries[start:limit]...), kv.nbytes}
-}
-
-func (kv KeyValue) SliceKey(start, limit []byte) KeyValue {
-	start_ := 0
-	limit_ := kv.Len()
-	if start != nil {
-		start_ = kv.Search(start)
-	}
-	if limit != nil {
-		limit_ = kv.Search(limit)
-	}
-	return kv.Slice(start_, limit_)
-}
-
-func (kv KeyValue) SliceKeyString(start, limit string) KeyValue {
-	return kv.SliceKey([]byte(start), []byte(limit))
-}
-
-func (kv KeyValue) SliceRange(r *util.Range) KeyValue {
-	if r != nil {
-		return kv.SliceKey(r.Start, r.Limit)
-	}
-	return kv.Clone()
-}
-
-func (kv KeyValue) Range(start, limit int) (r util.Range) {
-	if kv.Len() > 0 {
-		if start == kv.Len() {
-			r.Start = BytesAfter(kv.KeyAt(start - 1))
-		} else {
-			r.Start = kv.KeyAt(start)
-		}
-	}
-	if limit < kv.Len() {
-		r.Limit = kv.KeyAt(limit)
-	}
-	return
-}
-
-func KeyValue_EmptyKey() *KeyValue {
-	kv := &KeyValue{}
-	kv.PutString("", "v")
-	return kv
-}
-
-func KeyValue_EmptyValue() *KeyValue {
-	kv := &KeyValue{}
-	kv.PutString("abc", "")
-	kv.PutString("abcd", "")
-	return kv
-}
-
-func KeyValue_OneKeyValue() *KeyValue {
-	kv := &KeyValue{}
-	kv.PutString("abc", "v")
-	return kv
-}
-
-func KeyValue_BigValue() *KeyValue {
-	kv := &KeyValue{}
-	kv.PutString("big1", strings.Repeat("1", 200000))
-	return kv
-}
-
-func KeyValue_SpecialKey() *KeyValue {
-	kv := &KeyValue{}
-	kv.PutString("\xff\xff", "v3")
-	return kv
-}
-
-func KeyValue_MultipleKeyValue() *KeyValue {
-	kv := &KeyValue{}
-	kv.PutString("a", "v")
-	kv.PutString("aa", "v1")
-	kv.PutString("aaa", "v2")
-	kv.PutString("aaacccccccccc", "v2")
-	kv.PutString("aaaccccccccccd", "v3")
-	kv.PutString("aaaccccccccccf", "v4")
-	kv.PutString("aaaccccccccccfg", "v5")
-	kv.PutString("ab", "v6")
-	kv.PutString("abc", "v7")
-	kv.PutString("abcd", "v8")
-	kv.PutString("accccccccccccccc", "v9")
-	kv.PutString("b", "v10")
-	kv.PutString("bb", "v11")
-	kv.PutString("bc", "v12")
-	kv.PutString("c", "v13")
-	kv.PutString("c1", "v13")
-	kv.PutString("czzzzzzzzzzzzzz", "v14")
-	kv.PutString("fffffffffffffff", "v15")
-	kv.PutString("g11", "v15")
-	kv.PutString("g111", "v15")
-	kv.PutString("g111\xff", "v15")
-	kv.PutString("zz", "v16")
-	kv.PutString("zzzzzzz", "v16")
-	kv.PutString("zzzzzzzzzzzzzzzz", "v16")
-	return kv
-}
-
-var keymap = []byte("012345678ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxy")
-
-func KeyValue_Generate(rnd *rand.Rand, n, minlen, maxlen, vminlen, vmaxlen int) *KeyValue {
-	if rnd == nil {
-		rnd = NewRand()
-	}
-	if maxlen < minlen {
-		panic("max len should >= min len")
-	}
-
-	rrand := func(min, max int) int {
-		if min == max {
-			return max
-		}
-		return rnd.Intn(max-min) + min
-	}
-
-	kv := &KeyValue{}
-	endC := byte(len(keymap) - 1)
-	gen := make([]byte, 0, maxlen)
-	for i := 0; i < n; i++ {
-		m := rrand(minlen, maxlen)
-		last := gen
-	retry:
-		gen = last[:m]
-		if k := len(last); m > k {
-			for j := k; j < m; j++ {
-				gen[j] = 0
-			}
-		} else {
-			for j := m - 1; j >= 0; j-- {
-				c := last[j]
-				if c == endC {
-					continue
-				}
-				gen[j] = c + 1
-				for j += 1; j < m; j++ {
-					gen[j] = 0
-				}
-				goto ok
-			}
-			if m < maxlen {
-				m++
-				goto retry
-			}
-			panic(fmt.Sprintf("only able to generate %d keys out of %d keys, try increasing max len", kv.Len(), n))
-		ok:
-		}
-		key := make([]byte, m)
-		for j := 0; j < m; j++ {
-			key[j] = keymap[gen[j]]
-		}
-		value := make([]byte, rrand(vminlen, vmaxlen))
-		for n := copy(value, []byte(fmt.Sprintf("v%d", i))); n < len(value); n++ {
-			value[n] = 'x'
-		}
-		kv.Put(key, value)
-	}
-	return kv
-}

+ 0 - 211
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go

@@ -1,211 +0,0 @@
-// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package testutil
-
-import (
-	"fmt"
-	"math/rand"
-
-	. "github.com/onsi/ginkgo"
-	. "github.com/onsi/gomega"
-
-	"github.com/syndtr/goleveldb/leveldb/errors"
-	"github.com/syndtr/goleveldb/leveldb/util"
-)
-
-func TestFind(db Find, kv KeyValue) {
-	ShuffledIndex(nil, kv.Len(), 1, func(i int) {
-		key_, key, value := kv.IndexInexact(i)
-
-		// Using exact key.
-		rkey, rvalue, err := db.TestFind(key)
-		Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
-		Expect(rkey).Should(Equal(key), "Key")
-		Expect(rvalue).Should(Equal(value), "Value for key %q", key)
-
-		// Using inexact key.
-		rkey, rvalue, err = db.TestFind(key_)
-		Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q)", key_, key)
-		Expect(rkey).Should(Equal(key))
-		Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key)
-	})
-}
-
-func TestFindAfterLast(db Find, kv KeyValue) {
-	var key []byte
-	if kv.Len() > 0 {
-		key_, _ := kv.Index(kv.Len() - 1)
-		key = BytesAfter(key_)
-	}
-	rkey, _, err := db.TestFind(key)
-	Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey)
-	Expect(err).Should(Equal(errors.ErrNotFound))
-}
-
-func TestGet(db Get, kv KeyValue) {
-	ShuffledIndex(nil, kv.Len(), 1, func(i int) {
-		key_, key, value := kv.IndexInexact(i)
-
-		// Using exact key.
-		rvalue, err := db.TestGet(key)
-		Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
-		Expect(rvalue).Should(Equal(value), "Value for key %q", key)
-
-		// Using inexact key.
-		if len(key_) > 0 {
-			_, err = db.TestGet(key_)
-			Expect(err).Should(HaveOccurred(), "Error for key %q", key_)
-			Expect(err).Should(Equal(errors.ErrNotFound))
-		}
-	})
-}
-
-func TestHas(db Has, kv KeyValue) {
-	ShuffledIndex(nil, kv.Len(), 1, func(i int) {
-		key_, key, _ := kv.IndexInexact(i)
-
-		// Using exact key.
-		ret, err := db.TestHas(key)
-		Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
-		Expect(ret).Should(BeTrue(), "False for key %q", key)
-
-		// Using inexact key.
-		if len(key_) > 0 {
-			ret, err = db.TestHas(key_)
-			Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key_)
-			Expect(ret).ShouldNot(BeTrue(), "True for key %q", key)
-		}
-	})
-}
-
-func TestIter(db NewIterator, r *util.Range, kv KeyValue) {
-	iter := db.TestNewIterator(r)
-	Expect(iter.Error()).ShouldNot(HaveOccurred())
-
-	t := IteratorTesting{
-		KeyValue: kv,
-		Iter:     iter,
-	}
-
-	DoIteratorTesting(&t)
-	iter.Release()
-}
-
-func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB, teardown func(DB)) {
-	if rnd == nil {
-		rnd = NewRand()
-	}
-
-	if p == nil {
-		BeforeEach(func() {
-			p = setup(kv)
-		})
-		if teardown != nil {
-			AfterEach(func() {
-				teardown(p)
-			})
-		}
-	}
-
-	It("Should find all keys with Find", func() {
-		if db, ok := p.(Find); ok {
-			TestFind(db, kv)
-		}
-	})
-
-	It("Should return error if Find on key after the last", func() {
-		if db, ok := p.(Find); ok {
-			TestFindAfterLast(db, kv)
-		}
-	})
-
-	It("Should only find exact key with Get", func() {
-		if db, ok := p.(Get); ok {
-			TestGet(db, kv)
-		}
-	})
-
-	It("Should only find present key with Has", func() {
-		if db, ok := p.(Has); ok {
-			TestHas(db, kv)
-		}
-	})
-
-	It("Should iterates and seeks correctly", func(done Done) {
-		if db, ok := p.(NewIterator); ok {
-			TestIter(db, nil, kv.Clone())
-		}
-		done <- true
-	}, 3.0)
-
-	It("Should iterates and seeks slice correctly", func(done Done) {
-		if db, ok := p.(NewIterator); ok {
-			RandomIndex(rnd, kv.Len(), Min(kv.Len(), 50), func(i int) {
-				type slice struct {
-					r            *util.Range
-					start, limit int
-				}
-
-				key_, _, _ := kv.IndexInexact(i)
-				for _, x := range []slice{
-					{&util.Range{Start: key_, Limit: nil}, i, kv.Len()},
-					{&util.Range{Start: nil, Limit: key_}, 0, i},
-				} {
-					By(fmt.Sprintf("Random index of %d .. %d", x.start, x.limit), func() {
-						TestIter(db, x.r, kv.Slice(x.start, x.limit))
-					})
-				}
-			})
-		}
-		done <- true
-	}, 50.0)
-
-	It("Should iterates and seeks slice correctly", func(done Done) {
-		if db, ok := p.(NewIterator); ok {
-			RandomRange(rnd, kv.Len(), Min(kv.Len(), 50), func(start, limit int) {
-				By(fmt.Sprintf("Random range of %d .. %d", start, limit), func() {
-					r := kv.Range(start, limit)
-					TestIter(db, &r, kv.Slice(start, limit))
-				})
-			})
-		}
-		done <- true
-	}, 50.0)
-}
-
-func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown func(DB)) {
-	Test := func(kv *KeyValue) func() {
-		return func() {
-			var p DB
-			if setup != nil {
-				Defer("setup", func() {
-					p = setup(*kv)
-				})
-			}
-			if teardown != nil {
-				Defer("teardown", func() {
-					teardown(p)
-				})
-			}
-			if body != nil {
-				p = body(*kv)
-			}
-			KeyValueTesting(rnd, *kv, p, func(KeyValue) DB {
-				return p
-			}, nil)
-		}
-	}
-
-	Describe("with no key/value (empty)", Test(&KeyValue{}))
-	Describe("with empty key", Test(KeyValue_EmptyKey()))
-	Describe("with empty value", Test(KeyValue_EmptyValue()))
-	Describe("with one key/value", Test(KeyValue_OneKeyValue()))
-	Describe("with big value", Test(KeyValue_BigValue()))
-	Describe("with special key", Test(KeyValue_SpecialKey()))
-	Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue()))
-	Describe("with generated key/value", Test(KeyValue_Generate(nil, 120, 1, 50, 10, 120)))
-}

+ 0 - 694
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go

@@ -1,694 +0,0 @@
-// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package testutil
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"math/rand"
-	"os"
-	"path/filepath"
-	"runtime"
-	"strings"
-	"sync"
-
-	. "github.com/onsi/gomega"
-
-	"github.com/syndtr/goleveldb/leveldb/storage"
-	"github.com/syndtr/goleveldb/leveldb/util"
-)
-
-var (
-	storageMu     sync.Mutex
-	storageUseFS  = true
-	storageKeepFS = false
-	storageNum    int
-)
-
-type StorageMode int
-
-const (
-	ModeOpen StorageMode = 1 << iota
-	ModeCreate
-	ModeRemove
-	ModeRename
-	ModeRead
-	ModeWrite
-	ModeSync
-	ModeClose
-)
-
-const (
-	modeOpen = iota
-	modeCreate
-	modeRemove
-	modeRename
-	modeRead
-	modeWrite
-	modeSync
-	modeClose
-
-	modeCount
-)
-
-const (
-	typeManifest = iota
-	typeJournal
-	typeTable
-	typeTemp
-
-	typeCount
-)
-
-const flattenCount = modeCount * typeCount
-
-func flattenType(m StorageMode, t storage.FileType) int {
-	var x int
-	switch m {
-	case ModeOpen:
-		x = modeOpen
-	case ModeCreate:
-		x = modeCreate
-	case ModeRemove:
-		x = modeRemove
-	case ModeRename:
-		x = modeRename
-	case ModeRead:
-		x = modeRead
-	case ModeWrite:
-		x = modeWrite
-	case ModeSync:
-		x = modeSync
-	case ModeClose:
-		x = modeClose
-	default:
-		panic("invalid storage mode")
-	}
-	x *= typeCount
-	switch t {
-	case storage.TypeManifest:
-		return x + typeManifest
-	case storage.TypeJournal:
-		return x + typeJournal
-	case storage.TypeTable:
-		return x + typeTable
-	case storage.TypeTemp:
-		return x + typeTemp
-	default:
-		panic("invalid file type")
-	}
-}
-
-func listFlattenType(m StorageMode, t storage.FileType) []int {
-	ret := make([]int, 0, flattenCount)
-	add := func(x int) {
-		x *= typeCount
-		switch {
-		case t&storage.TypeManifest != 0:
-			ret = append(ret, x+typeManifest)
-		case t&storage.TypeJournal != 0:
-			ret = append(ret, x+typeJournal)
-		case t&storage.TypeTable != 0:
-			ret = append(ret, x+typeTable)
-		case t&storage.TypeTemp != 0:
-			ret = append(ret, x+typeTemp)
-		}
-	}
-	switch {
-	case m&ModeOpen != 0:
-		add(modeOpen)
-	case m&ModeCreate != 0:
-		add(modeCreate)
-	case m&ModeRemove != 0:
-		add(modeRemove)
-	case m&ModeRename != 0:
-		add(modeRename)
-	case m&ModeRead != 0:
-		add(modeRead)
-	case m&ModeWrite != 0:
-		add(modeWrite)
-	case m&ModeSync != 0:
-		add(modeSync)
-	case m&ModeClose != 0:
-		add(modeClose)
-	}
-	return ret
-}
-
-func packFile(fd storage.FileDesc) uint64 {
-	if fd.Num>>(63-typeCount) != 0 {
-		panic("overflow")
-	}
-	return uint64(fd.Num<<typeCount) | uint64(fd.Type)
-}
-
-func unpackFile(x uint64) storage.FileDesc {
-	return storage.FileDesc{storage.FileType(x) & storage.TypeAll, int64(x >> typeCount)}
-}
-
-type emulatedError struct {
-	err error
-}
-
-func (err emulatedError) Error() string {
-	return fmt.Sprintf("emulated storage error: %v", err.err)
-}
-
-type storageLock struct {
-	s *Storage
-	r util.Releaser
-}
-
-func (l storageLock) Release() {
-	l.r.Release()
-	l.s.logI("storage lock released")
-}
-
-type reader struct {
-	s  *Storage
-	fd storage.FileDesc
-	storage.Reader
-}
-
-func (r *reader) Read(p []byte) (n int, err error) {
-	err = r.s.emulateError(ModeRead, r.fd.Type)
-	if err == nil {
-		r.s.stall(ModeRead, r.fd.Type)
-		n, err = r.Reader.Read(p)
-	}
-	r.s.count(ModeRead, r.fd.Type, n)
-	if err != nil && err != io.EOF {
-		r.s.logI("read error, fd=%s n=%d err=%v", r.fd, n, err)
-	}
-	return
-}
-
-func (r *reader) ReadAt(p []byte, off int64) (n int, err error) {
-	err = r.s.emulateError(ModeRead, r.fd.Type)
-	if err == nil {
-		r.s.stall(ModeRead, r.fd.Type)
-		n, err = r.Reader.ReadAt(p, off)
-	}
-	r.s.count(ModeRead, r.fd.Type, n)
-	if err != nil && err != io.EOF {
-		r.s.logI("readAt error, fd=%s offset=%d n=%d err=%v", r.fd, off, n, err)
-	}
-	return
-}
-
-func (r *reader) Close() (err error) {
-	return r.s.fileClose(r.fd, r.Reader)
-}
-
-type writer struct {
-	s  *Storage
-	fd storage.FileDesc
-	storage.Writer
-}
-
-func (w *writer) Write(p []byte) (n int, err error) {
-	err = w.s.emulateError(ModeWrite, w.fd.Type)
-	if err == nil {
-		w.s.stall(ModeWrite, w.fd.Type)
-		n, err = w.Writer.Write(p)
-	}
-	w.s.count(ModeWrite, w.fd.Type, n)
-	if err != nil && err != io.EOF {
-		w.s.logI("write error, fd=%s n=%d err=%v", w.fd, n, err)
-	}
-	return
-}
-
-func (w *writer) Sync() (err error) {
-	err = w.s.emulateError(ModeSync, w.fd.Type)
-	if err == nil {
-		w.s.stall(ModeSync, w.fd.Type)
-		err = w.Writer.Sync()
-	}
-	w.s.count(ModeSync, w.fd.Type, 0)
-	if err != nil {
-		w.s.logI("sync error, fd=%s err=%v", w.fd, err)
-	}
-	return
-}
-
-func (w *writer) Close() (err error) {
-	return w.s.fileClose(w.fd, w.Writer)
-}
-
-type Storage struct {
-	storage.Storage
-	path    string
-	onClose func() (preserve bool, err error)
-	onLog   func(str string)
-
-	lmu sync.Mutex
-	lb  bytes.Buffer
-
-	mu   sync.Mutex
-	rand *rand.Rand
-	// Open files, true=writer, false=reader
-	opens                   map[uint64]bool
-	counters                [flattenCount]int
-	bytesCounter            [flattenCount]int64
-	emulatedError           [flattenCount]error
-	emulatedErrorOnce       [flattenCount]bool
-	emulatedRandomError     [flattenCount]error
-	emulatedRandomErrorProb [flattenCount]float64
-	stallCond               sync.Cond
-	stalled                 [flattenCount]bool
-}
-
-func (s *Storage) log(skip int, str string) {
-	s.lmu.Lock()
-	defer s.lmu.Unlock()
-	_, file, line, ok := runtime.Caller(skip + 2)
-	if ok {
-		// Truncate file name at last file name separator.
-		if index := strings.LastIndex(file, "/"); index >= 0 {
-			file = file[index+1:]
-		} else if index = strings.LastIndex(file, "\\"); index >= 0 {
-			file = file[index+1:]
-		}
-	} else {
-		file = "???"
-		line = 1
-	}
-	fmt.Fprintf(&s.lb, "%s:%d: ", file, line)
-	lines := strings.Split(str, "\n")
-	if l := len(lines); l > 1 && lines[l-1] == "" {
-		lines = lines[:l-1]
-	}
-	for i, line := range lines {
-		if i > 0 {
-			s.lb.WriteString("\n\t")
-		}
-		s.lb.WriteString(line)
-	}
-	if s.onLog != nil {
-		s.onLog(s.lb.String())
-		s.lb.Reset()
-	} else {
-		s.lb.WriteByte('\n')
-	}
-}
-
-func (s *Storage) logISkip(skip int, format string, args ...interface{}) {
-	pc, _, _, ok := runtime.Caller(skip + 1)
-	if ok {
-		if f := runtime.FuncForPC(pc); f != nil {
-			fname := f.Name()
-			if index := strings.LastIndex(fname, "."); index >= 0 {
-				fname = fname[index+1:]
-			}
-			format = fname + ": " + format
-		}
-	}
-	s.log(skip+1, fmt.Sprintf(format, args...))
-}
-
-func (s *Storage) logI(format string, args ...interface{}) {
-	s.logISkip(1, format, args...)
-}
-
-func (s *Storage) OnLog(onLog func(log string)) {
-	s.lmu.Lock()
-	s.onLog = onLog
-	if s.lb.Len() != 0 {
-		log := s.lb.String()
-		s.onLog(log[:len(log)-1])
-		s.lb.Reset()
-	}
-	s.lmu.Unlock()
-}
-
-func (s *Storage) Log(str string) {
-	s.log(1, "Log: "+str)
-	s.Storage.Log(str)
-}
-
-func (s *Storage) Lock() (l storage.Lock, err error) {
-	l, err = s.Storage.Lock()
-	if err != nil {
-		s.logI("storage locking failed, err=%v", err)
-	} else {
-		s.logI("storage locked")
-		l = storageLock{s, l}
-	}
-	return
-}
-
-func (s *Storage) List(t storage.FileType) (fds []storage.FileDesc, err error) {
-	fds, err = s.Storage.List(t)
-	if err != nil {
-		s.logI("list failed, err=%v", err)
-		return
-	}
-	s.logI("list, type=0x%x count=%d", int(t), len(fds))
-	return
-}
-
-func (s *Storage) GetMeta() (fd storage.FileDesc, err error) {
-	fd, err = s.Storage.GetMeta()
-	if err != nil {
-		if !os.IsNotExist(err) {
-			s.logI("get meta failed, err=%v", err)
-		}
-		return
-	}
-	s.logI("get meta, fd=%s", fd)
-	return
-}
-
-func (s *Storage) SetMeta(fd storage.FileDesc) error {
-	ExpectWithOffset(1, fd.Type).To(Equal(storage.TypeManifest))
-	err := s.Storage.SetMeta(fd)
-	if err != nil {
-		s.logI("set meta failed, fd=%s err=%v", fd, err)
-	} else {
-		s.logI("set meta, fd=%s", fd)
-	}
-	return err
-}
-
-func (s *Storage) fileClose(fd storage.FileDesc, closer io.Closer) (err error) {
-	err = s.emulateError(ModeClose, fd.Type)
-	if err == nil {
-		s.stall(ModeClose, fd.Type)
-	}
-	x := packFile(fd)
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	if err == nil {
-		ExpectWithOffset(2, s.opens).To(HaveKey(x), "File closed, fd=%s", fd)
-		err = closer.Close()
-	}
-	s.countNB(ModeClose, fd.Type, 0)
-	writer := s.opens[x]
-	if err != nil {
-		s.logISkip(1, "file close failed, fd=%s writer=%v err=%v", fd, writer, err)
-	} else {
-		s.logISkip(1, "file closed, fd=%s writer=%v", fd, writer)
-		delete(s.opens, x)
-	}
-	return
-}
-
-func (s *Storage) assertOpen(fd storage.FileDesc) {
-	x := packFile(fd)
-	ExpectWithOffset(2, s.opens).NotTo(HaveKey(x), "File open, fd=%s writer=%v", fd, s.opens[x])
-}
-
-func (s *Storage) Open(fd storage.FileDesc) (r storage.Reader, err error) {
-	err = s.emulateError(ModeOpen, fd.Type)
-	if err == nil {
-		s.stall(ModeOpen, fd.Type)
-	}
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	if err == nil {
-		s.assertOpen(fd)
-		s.countNB(ModeOpen, fd.Type, 0)
-		r, err = s.Storage.Open(fd)
-	}
-	if err != nil {
-		s.logI("file open failed, fd=%s err=%v", fd, err)
-	} else {
-		s.logI("file opened, fd=%s", fd)
-		s.opens[packFile(fd)] = false
-		r = &reader{s, fd, r}
-	}
-	return
-}
-
-func (s *Storage) Create(fd storage.FileDesc) (w storage.Writer, err error) {
-	err = s.emulateError(ModeCreate, fd.Type)
-	if err == nil {
-		s.stall(ModeCreate, fd.Type)
-	}
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	if err == nil {
-		s.assertOpen(fd)
-		s.countNB(ModeCreate, fd.Type, 0)
-		w, err = s.Storage.Create(fd)
-	}
-	if err != nil {
-		s.logI("file create failed, fd=%s err=%v", fd, err)
-	} else {
-		s.logI("file created, fd=%s", fd)
-		s.opens[packFile(fd)] = true
-		w = &writer{s, fd, w}
-	}
-	return
-}
-
-func (s *Storage) Remove(fd storage.FileDesc) (err error) {
-	err = s.emulateError(ModeRemove, fd.Type)
-	if err == nil {
-		s.stall(ModeRemove, fd.Type)
-	}
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	if err == nil {
-		s.assertOpen(fd)
-		s.countNB(ModeRemove, fd.Type, 0)
-		err = s.Storage.Remove(fd)
-	}
-	if err != nil {
-		s.logI("file remove failed, fd=%s err=%v", fd, err)
-	} else {
-		s.logI("file removed, fd=%s", fd)
-	}
-	return
-}
-
-func (s *Storage) ForceRemove(fd storage.FileDesc) (err error) {
-	s.countNB(ModeRemove, fd.Type, 0)
-	if err = s.Storage.Remove(fd); err != nil {
-		s.logI("file remove failed (forced), fd=%s err=%v", fd, err)
-	} else {
-		s.logI("file removed (forced), fd=%s", fd)
-	}
-	return
-}
-
-func (s *Storage) Rename(oldfd, newfd storage.FileDesc) (err error) {
-	err = s.emulateError(ModeRename, oldfd.Type)
-	if err == nil {
-		s.stall(ModeRename, oldfd.Type)
-	}
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	if err == nil {
-		s.assertOpen(oldfd)
-		s.assertOpen(newfd)
-		s.countNB(ModeRename, oldfd.Type, 0)
-		err = s.Storage.Rename(oldfd, newfd)
-	}
-	if err != nil {
-		s.logI("file rename failed, oldfd=%s newfd=%s err=%v", oldfd, newfd, err)
-	} else {
-		s.logI("file renamed, oldfd=%s newfd=%s", oldfd, newfd)
-	}
-	return
-}
-
-func (s *Storage) ForceRename(oldfd, newfd storage.FileDesc) (err error) {
-	s.countNB(ModeRename, oldfd.Type, 0)
-	if err = s.Storage.Rename(oldfd, newfd); err != nil {
-		s.logI("file rename failed (forced), oldfd=%s newfd=%s err=%v", oldfd, newfd, err)
-	} else {
-		s.logI("file renamed (forced), oldfd=%s newfd=%s", oldfd, newfd)
-	}
-	return
-}
-
-func (s *Storage) openFiles() string {
-	out := "Open files:"
-	for x, writer := range s.opens {
-		fd := unpackFile(x)
-		out += fmt.Sprintf("\n · fd=%s writer=%v", fd, writer)
-	}
-	return out
-}
-
-func (s *Storage) CloseCheck() {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles())
-}
-
-func (s *Storage) OnClose(onClose func() (preserve bool, err error)) {
-	s.mu.Lock()
-	s.onClose = onClose
-	s.mu.Unlock()
-}
-
-func (s *Storage) Close() error {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles())
-	err := s.Storage.Close()
-	if err != nil {
-		s.logI("storage closing failed, err=%v", err)
-	} else {
-		s.logI("storage closed")
-	}
-	var preserve bool
-	if s.onClose != nil {
-		var err0 error
-		if preserve, err0 = s.onClose(); err0 != nil {
-			s.logI("onClose error, err=%v", err0)
-		}
-	}
-	if s.path != "" {
-		if storageKeepFS || preserve {
-			s.logI("storage is preserved, path=%v", s.path)
-		} else {
-			if err1 := os.RemoveAll(s.path); err1 != nil {
-				s.logI("cannot remove storage, err=%v", err1)
-			} else {
-				s.logI("storage has been removed")
-			}
-		}
-	}
-	return err
-}
-
-func (s *Storage) countNB(m StorageMode, t storage.FileType, n int) {
-	s.counters[flattenType(m, t)]++
-	s.bytesCounter[flattenType(m, t)] += int64(n)
-}
-
-func (s *Storage) count(m StorageMode, t storage.FileType, n int) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	s.countNB(m, t, n)
-}
-
-func (s *Storage) ResetCounter(m StorageMode, t storage.FileType) {
-	for _, x := range listFlattenType(m, t) {
-		s.counters[x] = 0
-		s.bytesCounter[x] = 0
-	}
-}
-
-func (s *Storage) Counter(m StorageMode, t storage.FileType) (count int, bytes int64) {
-	for _, x := range listFlattenType(m, t) {
-		count += s.counters[x]
-		bytes += s.bytesCounter[x]
-	}
-	return
-}
-
-func (s *Storage) emulateError(m StorageMode, t storage.FileType) error {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	x := flattenType(m, t)
-	if err := s.emulatedError[x]; err != nil {
-		if s.emulatedErrorOnce[x] {
-			s.emulatedError[x] = nil
-		}
-		return emulatedError{err}
-	}
-	if err := s.emulatedRandomError[x]; err != nil && s.rand.Float64() < s.emulatedRandomErrorProb[x] {
-		return emulatedError{err}
-	}
-	return nil
-}
-
-func (s *Storage) EmulateError(m StorageMode, t storage.FileType, err error) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	for _, x := range listFlattenType(m, t) {
-		s.emulatedError[x] = err
-		s.emulatedErrorOnce[x] = false
-	}
-}
-
-func (s *Storage) EmulateErrorOnce(m StorageMode, t storage.FileType, err error) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	for _, x := range listFlattenType(m, t) {
-		s.emulatedError[x] = err
-		s.emulatedErrorOnce[x] = true
-	}
-}
-
-func (s *Storage) EmulateRandomError(m StorageMode, t storage.FileType, prob float64, err error) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	for _, x := range listFlattenType(m, t) {
-		s.emulatedRandomError[x] = err
-		s.emulatedRandomErrorProb[x] = prob
-	}
-}
-
-func (s *Storage) stall(m StorageMode, t storage.FileType) {
-	x := flattenType(m, t)
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	for s.stalled[x] {
-		s.stallCond.Wait()
-	}
-}
-
-func (s *Storage) Stall(m StorageMode, t storage.FileType) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	for _, x := range listFlattenType(m, t) {
-		s.stalled[x] = true
-	}
-}
-
-func (s *Storage) Release(m StorageMode, t storage.FileType) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	for _, x := range listFlattenType(m, t) {
-		s.stalled[x] = false
-	}
-	s.stallCond.Broadcast()
-}
-
-func NewStorage() *Storage {
-	var (
-		stor storage.Storage
-		path string
-	)
-	if storageUseFS {
-		for {
-			storageMu.Lock()
-			num := storageNum
-			storageNum++
-			storageMu.Unlock()
-			path = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num))
-			if _, err := os.Stat(path); os.IsNotExist(err) {
-				stor, err = storage.OpenFile(path, false)
-				ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path)
-				break
-			}
-		}
-	} else {
-		stor = storage.NewMemStorage()
-	}
-	s := &Storage{
-		Storage: stor,
-		path:    path,
-		rand:    NewRand(),
-		opens:   make(map[uint64]bool),
-	}
-	s.stallCond.L = &s.mu
-	if s.path != "" {
-		s.logI("using FS storage")
-		s.logI("storage path: %s", s.path)
-	} else {
-		s.logI("using MEM storage")
-	}
-	return s
-}

+ 0 - 171
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go

@@ -1,171 +0,0 @@
-// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package testutil
-
-import (
-	"bytes"
-	"flag"
-	"math/rand"
-	"reflect"
-	"sync"
-
-	"github.com/onsi/ginkgo/config"
-
-	"github.com/syndtr/goleveldb/leveldb/comparer"
-)
-
-var (
-	runfn = make(map[string][]func())
-	runmu sync.Mutex
-)
-
-func Defer(args ...interface{}) bool {
-	var (
-		group string
-		fn    func()
-	)
-	for _, arg := range args {
-		v := reflect.ValueOf(arg)
-		switch v.Kind() {
-		case reflect.String:
-			group = v.String()
-		case reflect.Func:
-			r := reflect.ValueOf(&fn).Elem()
-			r.Set(v)
-		}
-	}
-	if fn != nil {
-		runmu.Lock()
-		runfn[group] = append(runfn[group], fn)
-		runmu.Unlock()
-	}
-	return true
-}
-
-func RunDefer(groups ...string) bool {
-	if len(groups) == 0 {
-		groups = append(groups, "")
-	}
-	runmu.Lock()
-	var runfn_ []func()
-	for _, group := range groups {
-		runfn_ = append(runfn_, runfn[group]...)
-		delete(runfn, group)
-	}
-	runmu.Unlock()
-	for _, fn := range runfn_ {
-		fn()
-	}
-	return runfn_ != nil
-}
-
-func RandomSeed() int64 {
-	if !flag.Parsed() {
-		panic("random seed not initialized")
-	}
-	return config.GinkgoConfig.RandomSeed
-}
-
-func NewRand() *rand.Rand {
-	return rand.New(rand.NewSource(RandomSeed()))
-}
-
-var cmp = comparer.DefaultComparer
-
-func BytesSeparator(a, b []byte) []byte {
-	if bytes.Equal(a, b) {
-		return b
-	}
-	i, n := 0, len(a)
-	if n > len(b) {
-		n = len(b)
-	}
-	for ; i < n && (a[i] == b[i]); i++ {
-	}
-	x := append([]byte{}, a[:i]...)
-	if i < n {
-		if c := a[i] + 1; c < b[i] {
-			return append(x, c)
-		}
-		x = append(x, a[i])
-		i++
-	}
-	for ; i < len(a); i++ {
-		if c := a[i]; c < 0xff {
-			return append(x, c+1)
-		} else {
-			x = append(x, c)
-		}
-	}
-	if len(b) > i && b[i] > 0 {
-		return append(x, b[i]-1)
-	}
-	return append(x, 'x')
-}
-
-func BytesAfter(b []byte) []byte {
-	var x []byte
-	for _, c := range b {
-		if c < 0xff {
-			return append(x, c+1)
-		} else {
-			x = append(x, c)
-		}
-	}
-	return append(x, 'x')
-}
-
-func RandomIndex(rnd *rand.Rand, n, round int, fn func(i int)) {
-	if rnd == nil {
-		rnd = NewRand()
-	}
-	for x := 0; x < round; x++ {
-		fn(rnd.Intn(n))
-	}
-	return
-}
-
-func ShuffledIndex(rnd *rand.Rand, n, round int, fn func(i int)) {
-	if rnd == nil {
-		rnd = NewRand()
-	}
-	for x := 0; x < round; x++ {
-		for _, i := range rnd.Perm(n) {
-			fn(i)
-		}
-	}
-	return
-}
-
-func RandomRange(rnd *rand.Rand, n, round int, fn func(start, limit int)) {
-	if rnd == nil {
-		rnd = NewRand()
-	}
-	for x := 0; x < round; x++ {
-		start := rnd.Intn(n)
-		length := 0
-		if j := n - start; j > 0 {
-			length = rnd.Intn(j)
-		}
-		fn(start, start+length)
-	}
-	return
-}
-
-func Max(x, y int) int {
-	if x > y {
-		return x
-	}
-	return y
-}
-
-func Min(x, y int) int {
-	if x < y {
-		return x
-	}
-	return y
-}

+ 20 - 22
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go

@@ -79,7 +79,7 @@ func (v *version) release() {
 	v.s.vmu.Unlock()
 }
 
-func (v *version) walkOverlapping(aux tFiles, ikey iKey, f func(level int, t *tFile) bool, lf func(level int) bool) {
+func (v *version) walkOverlapping(aux tFiles, ikey internalKey, f func(level int, t *tFile) bool, lf func(level int) bool) {
 	ukey := ikey.ukey()
 
 	// Aux level.
@@ -130,7 +130,7 @@ func (v *version) walkOverlapping(aux tFiles, ikey iKey, f func(level int, t *tF
 	}
 }
 
-func (v *version) get(aux tFiles, ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) {
+func (v *version) get(aux tFiles, ikey internalKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) {
 	ukey := ikey.ukey()
 
 	var (
@@ -140,7 +140,7 @@ func (v *version) get(aux tFiles, ikey iKey, ro *opt.ReadOptions, noValue bool)
 		// Level-0.
 		zfound bool
 		zseq   uint64
-		zkt    kType
+		zkt    keyType
 		zval   []byte
 	)
 
@@ -176,7 +176,7 @@ func (v *version) get(aux tFiles, ikey iKey, ro *opt.ReadOptions, noValue bool)
 			return false
 		}
 
-		if fukey, fseq, fkt, fkerr := parseIkey(fikey); fkerr == nil {
+		if fukey, fseq, fkt, fkerr := parseInternalKey(fikey); fkerr == nil {
 			if v.s.icmp.uCompare(ukey, fukey) == 0 {
 				// Level <= 0 may overlaps each-other.
 				if level <= 0 {
@@ -188,12 +188,12 @@ func (v *version) get(aux tFiles, ikey iKey, ro *opt.ReadOptions, noValue bool)
 					}
 				} else {
 					switch fkt {
-					case ktVal:
+					case keyTypeVal:
 						value = fval
 						err = nil
-					case ktDel:
+					case keyTypeDel:
 					default:
-						panic("leveldb: invalid iKey type")
+						panic("leveldb: invalid internalKey type")
 					}
 					return false
 				}
@@ -207,12 +207,12 @@ func (v *version) get(aux tFiles, ikey iKey, ro *opt.ReadOptions, noValue bool)
 	}, func(level int) bool {
 		if zfound {
 			switch zkt {
-			case ktVal:
+			case keyTypeVal:
 				value = zval
 				err = nil
-			case ktDel:
+			case keyTypeDel:
 			default:
-				panic("leveldb: invalid iKey type")
+				panic("leveldb: invalid internalKey type")
 			}
 			return false
 		}
@@ -227,19 +227,18 @@ func (v *version) get(aux tFiles, ikey iKey, ro *opt.ReadOptions, noValue bool)
 	return
 }
 
-func (v *version) sampleSeek(ikey iKey) (tcomp bool) {
+func (v *version) sampleSeek(ikey internalKey) (tcomp bool) {
 	var tset *tSet
 
 	v.walkOverlapping(nil, ikey, func(level int, t *tFile) bool {
 		if tset == nil {
 			tset = &tSet{level, t}
 			return true
-		} else {
-			if tset.table.consumeSeek() <= 0 {
-				tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
-			}
-			return false
 		}
+		if tset.table.consumeSeek() <= 0 {
+			tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
+		}
+		return false
 	}, nil)
 
 	return
@@ -286,12 +285,12 @@ func (v *version) tLen(level int) int {
 	return 0
 }
 
-func (v *version) offsetOf(ikey iKey) (n uint64, err error) {
+func (v *version) offsetOf(ikey internalKey) (n int64, err error) {
 	for level, tables := range v.levels {
 		for _, t := range tables {
 			if v.s.icmp.Compare(t.imax, ikey) <= 0 {
 				// Entire file is before "ikey", so just add the file size
-				n += uint64(t.size)
+				n += t.size
 			} else if v.s.icmp.Compare(t.imin, ikey) > 0 {
 				// Entire file is after "ikey", so ignore
 				if level > 0 {
@@ -303,12 +302,11 @@ func (v *version) offsetOf(ikey iKey) (n uint64, err error) {
 			} else {
 				// "ikey" falls in the range for this table. Add the
 				// approximate offset of "ikey" within the table.
-				var nn uint64
-				nn, err = v.s.tops.offsetOf(t, ikey)
-				if err != nil {
+				if m, err := v.s.tops.offsetOf(t, ikey); err == nil {
+					n += m
+				} else {
 					return 0, err
 				}
-				n += nn
 			}
 		}
 	}