aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Godeps/Godeps.json24
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go39
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go14
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go12
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go32
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go12
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go20
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go2
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_transaction.go10
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go12
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go4
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go4
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go84
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go10
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go4
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go12
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go10
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go29
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go4
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go222
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go21
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go327
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go352
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go211
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go694
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go171
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go42
-rw-r--r--core/blockchain.go492
-rw-r--r--core/blockchain_test.go91
-rw-r--r--core/headerchain.go432
-rw-r--r--core/types.go12
31 files changed, 870 insertions, 2535 deletions
diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index 7b6ba5c3c..9bcc8c756 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -140,51 +140,51 @@
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
- "Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+ "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/cache",
- "Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+ "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/comparer",
- "Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+ "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/errors",
- "Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+ "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/filter",
- "Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+ "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/iterator",
- "Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+ "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/journal",
- "Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+ "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/memdb",
- "Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+ "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/opt",
- "Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+ "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/storage",
- "Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+ "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/table",
- "Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+ "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/util",
- "Rev": "e7e6f5b5ef25adb580feac515f9ccec514d0bda8"
+ "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
},
{
"ImportPath": "golang.org/x/crypto/pbkdf2",
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
index 89fcf34bb..652fa4124 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
@@ -15,6 +15,7 @@ import (
"github.com/syndtr/goleveldb/leveldb/storage"
)
+// ErrBatchCorrupted records reason of batch corruption.
type ErrBatchCorrupted struct {
Reason string
}
@@ -32,6 +33,7 @@ const (
batchGrowRec = 3000
)
+// BatchReplay wraps basic batch operations.
type BatchReplay interface {
Put(key, value []byte)
Delete(key []byte)
@@ -68,20 +70,20 @@ func (b *Batch) grow(n int) {
}
}
-func (b *Batch) appendRec(kt kType, key, value []byte) {
+func (b *Batch) appendRec(kt keyType, key, value []byte) {
n := 1 + binary.MaxVarintLen32 + len(key)
- if kt == ktVal {
+ if kt == keyTypeVal {
n += binary.MaxVarintLen32 + len(value)
}
b.grow(n)
off := len(b.data)
data := b.data[:off+n]
data[off] = byte(kt)
- off += 1
+ off++
off += binary.PutUvarint(data[off:], uint64(len(key)))
copy(data[off:], key)
off += len(key)
- if kt == ktVal {
+ if kt == keyTypeVal {
off += binary.PutUvarint(data[off:], uint64(len(value)))
copy(data[off:], value)
off += len(value)
@@ -95,13 +97,13 @@ func (b *Batch) appendRec(kt kType, key, value []byte) {
// Put appends 'put operation' of the given key/value pair to the batch.
// It is safe to modify the contents of the argument after Put returns.
func (b *Batch) Put(key, value []byte) {
- b.appendRec(ktVal, key, value)
+ b.appendRec(keyTypeVal, key, value)
}
// Delete appends 'delete operation' of the given key to the batch.
// It is safe to modify the contents of the argument after Delete returns.
func (b *Batch) Delete(key []byte) {
- b.appendRec(ktDel, key, nil)
+ b.appendRec(keyTypeDel, key, nil)
}
// Dump dumps batch contents. The returned slice can be loaded into the
@@ -122,11 +124,11 @@ func (b *Batch) Load(data []byte) error {
// Replay replays batch contents.
func (b *Batch) Replay(r BatchReplay) error {
- return b.decodeRec(func(i int, kt kType, key, value []byte) error {
+ return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
switch kt {
- case ktVal:
+ case keyTypeVal:
r.Put(key, value)
- case ktDel:
+ case keyTypeDel:
r.Delete(key)
}
return nil
@@ -195,18 +197,19 @@ func (b *Batch) decode(prevSeq uint64, data []byte) error {
return nil
}
-func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte) error) error {
+func (b *Batch) decodeRec(f func(i int, kt keyType, key, value []byte) error) error {
off := batchHdrLen
for i := 0; i < b.rLen; i++ {
if off >= len(b.data) {
return newErrBatchCorrupted("invalid records length")
}
- kt := kType(b.data[off])
- if kt > ktVal {
+ kt := keyType(b.data[off])
+ if kt > keyTypeVal {
+ panic(kt)
return newErrBatchCorrupted("bad record: invalid type")
}
- off += 1
+ off++
x, n := binary.Uvarint(b.data[off:])
off += n
@@ -216,7 +219,7 @@ func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte) error) erro
key := b.data[off : off+int(x)]
off += int(x)
var value []byte
- if kt == ktVal {
+ if kt == keyTypeVal {
x, n := binary.Uvarint(b.data[off:])
off += n
if n <= 0 || off+int(x) > len(b.data) {
@@ -236,8 +239,8 @@ func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte) error) erro
func (b *Batch) memReplay(to *memdb.DB) error {
var ikScratch []byte
- return b.decodeRec(func(i int, kt kType, key, value []byte) error {
- ikScratch = makeIkey(ikScratch, key, b.seq+uint64(i), kt)
+ return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
+ ikScratch = makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
return to.Put(ikScratch, value)
})
}
@@ -251,8 +254,8 @@ func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) er
func (b *Batch) revertMemReplay(to *memdb.DB) error {
var ikScratch []byte
- return b.decodeRec(func(i int, kt kType, key, value []byte) error {
- ikScratch := makeIkey(ikScratch, key, b.seq+uint64(i), kt)
+ return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
+ ikScratch := makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
return to.Delete(ikScratch)
})
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
index c9670de5d..a287d0e5e 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
@@ -47,17 +47,21 @@ type Cacher interface {
// so the the Release method will be called once object is released.
type Value interface{}
-type CacheGetter struct {
+// NamespaceGetter provides convenient wrapper for namespace.
+type NamespaceGetter struct {
Cache *Cache
NS uint64
}
-func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
+// Get simply calls Cache.Get() method.
+func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
return g.Cache.Get(g.NS, key, setFunc)
}
// The hash tables implementation is based on:
-// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014.
+// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu,
+// Kunlong Zhang, and Michael Spear.
+// ACM Symposium on Principles of Distributed Computing, Jul 2014.
const (
mInitialSize = 1 << 4
@@ -610,10 +614,12 @@ func (n *Node) unrefLocked() {
}
}
+// Handle is a 'cache handle' of a 'cache node'.
type Handle struct {
n unsafe.Pointer // *Node
}
+// Value returns the value of the 'cache node'.
func (h *Handle) Value() Value {
n := (*Node)(atomic.LoadPointer(&h.n))
if n != nil {
@@ -622,6 +628,8 @@ func (h *Handle) Value() Value {
return nil
}
+// Release releases this 'cache handle'.
+// It is safe to call release multiple times.
func (h *Handle) Release() {
nPtr := atomic.LoadPointer(&h.n)
if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go
index d33d5e9c7..248bf7c21 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go
@@ -33,9 +33,9 @@ func (icmp *iComparer) Name() string {
}
func (icmp *iComparer) Compare(a, b []byte) int {
- x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey())
+ x := icmp.ucmp.Compare(internalKey(a).ukey(), internalKey(b).ukey())
if x == 0 {
- if m, n := iKey(a).num(), iKey(b).num(); m > n {
+ if m, n := internalKey(a).num(), internalKey(b).num(); m > n {
x = -1
} else if m < n {
x = 1
@@ -45,13 +45,13 @@ func (icmp *iComparer) Compare(a, b []byte) int {
}
func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
- ua, ub := iKey(a).ukey(), iKey(b).ukey()
+ ua, ub := internalKey(a).ukey(), internalKey(b).ukey()
dst = icmp.ucmp.Separator(dst, ua, ub)
if dst == nil {
return nil
}
if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
- dst = append(dst, kMaxNumBytes...)
+ dst = append(dst, keyMaxNumBytes...)
} else {
// Did not close possibilities that n maybe longer than len(ub).
dst = append(dst, a[len(a)-8:]...)
@@ -60,13 +60,13 @@ func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
}
func (icmp *iComparer) Successor(dst, b []byte) []byte {
- ub := iKey(b).ukey()
+ ub := internalKey(b).ukey()
dst = icmp.ucmp.Successor(dst, ub)
if dst == nil {
return nil
}
if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
- dst = append(dst, kMaxNumBytes...)
+ dst = append(dst, keyMaxNumBytes...)
} else {
// Did not close possibilities that n maybe longer than len(ub).
dst = append(dst, b[len(b)-8:]...)
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
index 537addb62..eb6abd0fb 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
@@ -315,7 +315,7 @@ func recoverTable(s *session, o *opt.Options) error {
tw := table.NewWriter(writer, o)
for iter.Next() {
key := iter.Key()
- if validIkey(key) {
+ if validInternalKey(key) {
err = tw.Append(key, iter.Value())
if err != nil {
return
@@ -380,7 +380,7 @@ func recoverTable(s *session, o *opt.Options) error {
// Scan the table.
for iter.Next() {
key := iter.Key()
- _, seq, _, kerr := parseIkey(key)
+ _, seq, _, kerr := parseInternalKey(key)
if kerr != nil {
tcorruptedKey++
continue
@@ -472,15 +472,15 @@ func recoverTable(s *session, o *opt.Options) error {
func (db *DB) recoverJournal() error {
// Get all journals and sort it by file number.
- fds_, err := db.s.stor.List(storage.TypeJournal)
+ rawFds, err := db.s.stor.List(storage.TypeJournal)
if err != nil {
return err
}
- sortFds(fds_)
+ sortFds(rawFds)
// Journals that will be recovered.
var fds []storage.FileDesc
- for _, fd := range fds_ {
+ for _, fd := range rawFds {
if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
fds = append(fds, fd)
}
@@ -633,15 +633,15 @@ func (db *DB) recoverJournal() error {
func (db *DB) recoverJournalRO() error {
// Get all journals and sort it by file number.
- fds_, err := db.s.stor.List(storage.TypeJournal)
+ rawFds, err := db.s.stor.List(storage.TypeJournal)
if err != nil {
return err
}
- sortFds(fds_)
+ sortFds(rawFds)
// Journals that will be recovered.
var fds []storage.FileDesc
- for _, fd := range fds_ {
+ for _, fd := range rawFds {
if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
fds = append(fds, fd)
}
@@ -728,16 +728,16 @@ func (db *DB) recoverJournalRO() error {
return nil
}
-func memGet(mdb *memdb.DB, ikey iKey, icmp *iComparer) (ok bool, mv []byte, err error) {
+func memGet(mdb *memdb.DB, ikey internalKey, icmp *iComparer) (ok bool, mv []byte, err error) {
mk, mv, err := mdb.Find(ikey)
if err == nil {
- ukey, _, kt, kerr := parseIkey(mk)
+ ukey, _, kt, kerr := parseInternalKey(mk)
if kerr != nil {
// Shouldn't have had happen.
panic(kerr)
}
if icmp.uCompare(ukey, ikey.ukey()) == 0 {
- if kt == ktDel {
+ if kt == keyTypeDel {
return true, nil, ErrNotFound
}
return true, mv, nil
@@ -750,7 +750,7 @@ func memGet(mdb *memdb.DB, ikey iKey, icmp *iComparer) (ok bool, mv []byte, err
}
func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
- ikey := makeIkey(nil, key, seq, ktSeek)
+ ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
if auxm != nil {
if ok, mv, me := memGet(auxm, ikey, db.s.icmp); ok {
@@ -788,7 +788,7 @@ func nilIfNotFound(err error) error {
}
func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
- ikey := makeIkey(nil, key, seq, ktSeek)
+ ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
if auxm != nil {
if ok, _, me := memGet(auxm, ikey, db.s.icmp); ok {
@@ -997,8 +997,8 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
sizes := make(Sizes, 0, len(ranges))
for _, r := range ranges {
- imin := makeIkey(nil, r.Start, kMaxSeq, ktSeek)
- imax := makeIkey(nil, r.Limit, kMaxSeq, ktSeek)
+ imin := makeInternalKey(nil, r.Start, keyMaxSeq, keyTypeSeek)
+ imax := makeInternalKey(nil, r.Limit, keyMaxSeq, keyTypeSeek)
start, err := v.offsetOf(imin)
if err != nil {
return nil, err
@@ -1007,7 +1007,7 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
if err != nil {
return nil, err
}
- var size uint64
+ var size int64
if limit >= start {
size = limit - start
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
index a94cf4c84..9664e64d0 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
@@ -452,7 +452,7 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
}
ikey := iter.Key()
- ukey, seq, kt, kerr := parseIkey(ikey)
+ ukey, seq, kt, kerr := parseInternalKey(ikey)
if kerr == nil {
shouldStop := !resumed && b.c.shouldStopBefore(ikey)
@@ -478,14 +478,14 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
hasLastUkey = true
lastUkey = append(lastUkey[:0], ukey...)
- lastSeq = kMaxSeq
+ lastSeq = keyMaxSeq
}
switch {
case lastSeq <= b.minSeq:
// Dropped because newer entry for same user key exist
fallthrough // (A)
- case kt == ktDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
+ case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
// For this user key:
// (1) there is no data in higher levels
// (2) data in lower levels will have larger seq numbers
@@ -507,7 +507,7 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
// Don't drop corrupted keys.
hasLastUkey = false
lastUkey = lastUkey[:0]
- lastSeq = kMaxSeq
+ lastSeq = keyMaxSeq
b.kerrCnt++
}
@@ -548,9 +548,7 @@ func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1)
rec.delTable(c.sourceLevel, t.fd.Num)
rec.addTableFile(c.sourceLevel+1, t)
- db.compactionTransactFunc("table@move", func(cnt *compactionTransactCounter) (err error) {
- return db.s.commit(rec)
- }, nil)
+ db.compactionCommit("table-move", rec)
return
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
index 86bcb99d9..03c24cdab 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
@@ -19,7 +19,7 @@ import (
)
var (
- errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key")
+ errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key")
)
type memdbReleaser struct {
@@ -70,10 +70,10 @@ func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Rang
if slice != nil {
islice = &util.Range{}
if slice.Start != nil {
- islice.Start = makeIkey(nil, slice.Start, kMaxSeq, ktSeek)
+ islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek)
}
if slice.Limit != nil {
- islice.Limit = makeIkey(nil, slice.Limit, kMaxSeq, ktSeek)
+ islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek)
}
}
rawIter := db.newRawIterator(auxm, auxt, islice, ro)
@@ -187,7 +187,7 @@ func (i *dbIter) Seek(key []byte) bool {
return false
}
- ikey := makeIkey(nil, key, i.seq, ktSeek)
+ ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek)
if i.iter.Seek(ikey) {
i.dir = dirSOI
return i.next()
@@ -199,15 +199,15 @@ func (i *dbIter) Seek(key []byte) bool {
func (i *dbIter) next() bool {
for {
- if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
+ if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
i.sampleSeek()
if seq <= i.seq {
switch kt {
- case ktDel:
+ case keyTypeDel:
// Skip deleted key.
i.key = append(i.key[:0], ukey...)
i.dir = dirForward
- case ktVal:
+ case keyTypeVal:
if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 {
i.key = append(i.key[:0], ukey...)
i.value = append(i.value[:0], i.iter.Value()...)
@@ -250,13 +250,13 @@ func (i *dbIter) prev() bool {
del := true
if i.iter.Valid() {
for {
- if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
+ if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
i.sampleSeek()
if seq <= i.seq {
if !del && i.icmp.uCompare(ukey, i.key) < 0 {
return true
}
- del = (kt == ktDel)
+ del = (kt == keyTypeDel)
if !del {
i.key = append(i.key[:0], ukey...)
i.value = append(i.value[:0], i.iter.Value()...)
@@ -292,7 +292,7 @@ func (i *dbIter) Prev() bool {
return i.Last()
case dirForward:
for i.iter.Prev() {
- if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
+ if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
i.sampleSeek()
if i.icmp.uCompare(ukey, i.key) < 0 {
goto cont
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
index 0207e221e..40f454da1 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
@@ -57,7 +57,7 @@ func (db *DB) setSeq(seq uint64) {
atomic.StoreUint64(&db.seq, seq)
}
-func (db *DB) sampleSeek(ikey iKey) {
+func (db *DB) sampleSeek(ikey internalKey) {
v := db.s.version()
if v.sampleSeek(ikey) {
// Trigger table compaction.
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_transaction.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_transaction.go
index 527028756..fca88037b 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_transaction.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_transaction.go
@@ -108,8 +108,8 @@ func (tr *Transaction) flush() error {
return nil
}
-func (tr *Transaction) put(kt kType, key, value []byte) error {
- tr.ikScratch = makeIkey(tr.ikScratch, key, tr.seq+1, kt)
+func (tr *Transaction) put(kt keyType, key, value []byte) error {
+ tr.ikScratch = makeInternalKey(tr.ikScratch, key, tr.seq+1, kt)
if tr.mem.Free() < len(tr.ikScratch)+len(value) {
if err := tr.flush(); err != nil {
return err
@@ -134,7 +134,7 @@ func (tr *Transaction) Put(key, value []byte, wo *opt.WriteOptions) error {
if tr.closed {
return errTransactionDone
}
- return tr.put(ktVal, key, value)
+ return tr.put(keyTypeVal, key, value)
}
// Delete deletes the value for the given key.
@@ -148,7 +148,7 @@ func (tr *Transaction) Delete(key []byte, wo *opt.WriteOptions) error {
if tr.closed {
return errTransactionDone
}
- return tr.put(ktDel, key, nil)
+ return tr.put(keyTypeDel, key, nil)
}
// Write apply the given batch to the transaction. The batch will be applied
@@ -167,7 +167,7 @@ func (tr *Transaction) Write(b *Batch, wo *opt.WriteOptions) error {
if tr.closed {
return errTransactionDone
}
- return b.decodeRec(func(i int, kt kType, key, value []byte) error {
+ return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
return tr.put(kt, key, value)
})
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go
index 8ec86b2ac..7fd386ca4 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go
@@ -21,14 +21,16 @@ type Reader interface {
NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator
}
-type Sizes []uint64
+// Sizes is list of size.
+type Sizes []int64
// Sum returns sum of the sizes.
-func (p Sizes) Sum() (n uint64) {
- for _, s := range p {
- n += s
+func (sizes Sizes) Sum() int64 {
+ var sum int64
+ for _, size := range sizes {
+ sum += size
}
- return n
+ return sum
}
// Logging.
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
index 5200be6fc..fb7896139 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
@@ -281,8 +281,8 @@ func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error {
func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
iter := mem.NewIterator(nil)
defer iter.Release()
- return (max == nil || (iter.First() && icmp.uCompare(max, iKey(iter.Key()).ukey()) >= 0)) &&
- (min == nil || (iter.Last() && icmp.uCompare(min, iKey(iter.Key()).ukey()) <= 0))
+ return (max == nil || (iter.First() && icmp.uCompare(max, internalKey(iter.Key()).ukey()) >= 0)) &&
+ (min == nil || (iter.Last() && icmp.uCompare(min, internalKey(iter.Key()).ukey()) <= 0))
}
// CompactRange compacts the underlying DB for the given key range.
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go
index 37c1e146b..e961e420d 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go
@@ -15,7 +15,7 @@ type iFilter struct {
}
func (f iFilter) Contains(filter, key []byte) bool {
- return f.Filter.Contains(filter, iKey(key).ukey())
+ return f.Filter.Contains(filter, internalKey(key).ukey())
}
func (f iFilter) NewGenerator() filter.FilterGenerator {
@@ -27,5 +27,5 @@ type iFilterGenerator struct {
}
func (g iFilterGenerator) Add(key []byte) {
- g.FilterGenerator.Add(iKey(key).ukey())
+ g.FilterGenerator.Add(internalKey(key).ukey())
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
index 1443c7526..d0b80aaf9 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
@@ -14,26 +14,27 @@ import (
"github.com/syndtr/goleveldb/leveldb/storage"
)
-type ErrIkeyCorrupted struct {
+// ErrInternalKeyCorrupted records internal key corruption.
+type ErrInternalKeyCorrupted struct {
Ikey []byte
Reason string
}
-func (e *ErrIkeyCorrupted) Error() string {
- return fmt.Sprintf("leveldb: iKey %q corrupted: %s", e.Ikey, e.Reason)
+func (e *ErrInternalKeyCorrupted) Error() string {
+ return fmt.Sprintf("leveldb: internal key %q corrupted: %s", e.Ikey, e.Reason)
}
-func newErrIkeyCorrupted(ikey []byte, reason string) error {
- return errors.NewErrCorrupted(storage.FileDesc{}, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason})
+func newErrInternalKeyCorrupted(ikey []byte, reason string) error {
+ return errors.NewErrCorrupted(storage.FileDesc{}, &ErrInternalKeyCorrupted{append([]byte{}, ikey...), reason})
}
-type kType int
+type keyType uint
-func (kt kType) String() string {
+func (kt keyType) String() string {
switch kt {
- case ktDel:
+ case keyTypeDel:
return "d"
- case ktVal:
+ case keyTypeVal:
return "v"
}
return "x"
@@ -42,39 +43,39 @@ func (kt kType) String() string {
// Value types encoded as the last component of internal keys.
// Don't modify; this value are saved to disk.
const (
- ktDel kType = iota
- ktVal
+ keyTypeDel keyType = iota
+ keyTypeVal
)
-// ktSeek defines the kType that should be passed when constructing an
+// keyTypeSeek defines the keyType that should be passed when constructing an
// internal key for seeking to a particular sequence number (since we
// sort sequence numbers in decreasing order and the value type is
// embedded as the low 8 bits in the sequence number in internal keys,
// we need to use the highest-numbered ValueType, not the lowest).
-const ktSeek = ktVal
+const keyTypeSeek = keyTypeVal
const (
// Maximum value possible for sequence number; the 8-bits are
// used by value type, so its can packed together in single
// 64-bit integer.
- kMaxSeq uint64 = (uint64(1) << 56) - 1
+ keyMaxSeq = (uint64(1) << 56) - 1
// Maximum value possible for packed sequence number and type.
- kMaxNum uint64 = (kMaxSeq << 8) | uint64(ktSeek)
+ keyMaxNum = (keyMaxSeq << 8) | uint64(keyTypeSeek)
)
// Maximum number encoded in bytes.
-var kMaxNumBytes = make([]byte, 8)
+var keyMaxNumBytes = make([]byte, 8)
func init() {
- binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum)
+ binary.LittleEndian.PutUint64(keyMaxNumBytes, keyMaxNum)
}
-type iKey []byte
+type internalKey []byte
-func makeIkey(dst, ukey []byte, seq uint64, kt kType) iKey {
- if seq > kMaxSeq {
+func makeInternalKey(dst, ukey []byte, seq uint64, kt keyType) internalKey {
+ if seq > keyMaxSeq {
panic("leveldb: invalid sequence number")
- } else if kt > ktVal {
+ } else if kt > keyTypeVal {
panic("leveldb: invalid type")
}
@@ -85,63 +86,62 @@ func makeIkey(dst, ukey []byte, seq uint64, kt kType) iKey {
}
copy(dst, ukey)
binary.LittleEndian.PutUint64(dst[len(ukey):], (seq<<8)|uint64(kt))
- return iKey(dst)
+ return internalKey(dst)
}
-func parseIkey(ik []byte) (ukey []byte, seq uint64, kt kType, err error) {
+func parseInternalKey(ik []byte) (ukey []byte, seq uint64, kt keyType, err error) {
if len(ik) < 8 {
- return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid length")
+ return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid length")
}
num := binary.LittleEndian.Uint64(ik[len(ik)-8:])
- seq, kt = uint64(num>>8), kType(num&0xff)
- if kt > ktVal {
- return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid type")
+ seq, kt = uint64(num>>8), keyType(num&0xff)
+ if kt > keyTypeVal {
+ return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid type")
}
ukey = ik[:len(ik)-8]
return
}
-func validIkey(ik []byte) bool {
- _, _, _, err := parseIkey(ik)
+func validInternalKey(ik []byte) bool {
+ _, _, _, err := parseInternalKey(ik)
return err == nil
}
-func (ik iKey) assert() {
+func (ik internalKey) assert() {
if ik == nil {
- panic("leveldb: nil iKey")
+ panic("leveldb: nil internalKey")
}
if len(ik) < 8 {
- panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik)))
+ panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid length", []byte(ik), len(ik)))
}
}
-func (ik iKey) ukey() []byte {
+func (ik internalKey) ukey() []byte {
ik.assert()
return ik[:len(ik)-8]
}
-func (ik iKey) num() uint64 {
+func (ik internalKey) num() uint64 {
ik.assert()
return binary.LittleEndian.Uint64(ik[len(ik)-8:])
}
-func (ik iKey) parseNum() (seq uint64, kt kType) {
+func (ik internalKey) parseNum() (seq uint64, kt keyType) {
num := ik.num()
- seq, kt = uint64(num>>8), kType(num&0xff)
- if kt > ktVal {
- panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
+ seq, kt = uint64(num>>8), keyType(num&0xff)
+ if kt > keyTypeVal {
+ panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
}
return
}
-func (ik iKey) String() string {
+func (ik internalKey) String() string {
if ik == nil {
return "<nil>"
}
- if ukey, seq, kt, err := parseIkey(ik); err == nil {
+ if ukey, seq, kt, err := parseInternalKey(ik); err == nil {
return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq)
- } else {
- return "<invalid>"
}
+ return "<invalid>"
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
index a8d7b54dc..b0d3fef1d 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
@@ -18,6 +18,7 @@ import (
"github.com/syndtr/goleveldb/leveldb/storage"
)
+// ErrManifestCorrupted records manifest corruption.
type ErrManifestCorrupted struct {
Field string
Reason string
@@ -50,8 +51,8 @@ type session struct {
manifestWriter storage.Writer
manifestFd storage.FileDesc
- stCompPtrs []iKey // compaction pointers; need external synchronization
- stVersion *version // current version
+ stCompPtrs []internalKey // compaction pointers; need external synchronization
+ stVersion *version // current version
vmu sync.Mutex
}
@@ -146,7 +147,7 @@ func (s *session) recover() (err error) {
if err == nil {
// save compact pointers
for _, r := range rec.compPtrs {
- s.setCompPtr(r.level, iKey(r.ikey))
+ s.setCompPtr(r.level, internalKey(r.ikey))
}
// commit record to version staging
staging.commit(rec)
@@ -154,9 +155,8 @@ func (s *session) recover() (err error) {
err = errors.SetFd(err, fd)
if strict || !errors.IsCorrupted(err) {
return
- } else {
- s.logf("manifest error: %v (skipped)", errors.SetFd(err, fd))
}
+ s.logf("manifest error: %v (skipped)", errors.SetFd(err, fd))
}
rec.resetCompPtrs()
rec.resetAddedTables()
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go
index 471d68d56..089cd00b2 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go
@@ -139,7 +139,7 @@ type compaction struct {
gpi int
seenKey bool
gpOverlappedBytes int64
- imin, imax iKey
+ imin, imax internalKey
tPtrs []int
released bool
@@ -242,7 +242,7 @@ func (c *compaction) baseLevelForKey(ukey []byte) bool {
return true
}
-func (c *compaction) shouldStopBefore(ikey iKey) bool {
+func (c *compaction) shouldStopBefore(ikey internalKey) bool {
for ; c.gpi < len(c.gp); c.gpi++ {
gp := c.gp[c.gpi]
if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
index 9802e1a55..854e1aa6f 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
@@ -36,15 +36,15 @@ const (
type cpRecord struct {
level int
- ikey iKey
+ ikey internalKey
}
type atRecord struct {
level int
num int64
size int64
- imin iKey
- imax iKey
+ imin internalKey
+ imax internalKey
}
type dtRecord struct {
@@ -96,7 +96,7 @@ func (p *sessionRecord) setSeqNum(num uint64) {
p.seqNum = num
}
-func (p *sessionRecord) addCompPtr(level int, ikey iKey) {
+func (p *sessionRecord) addCompPtr(level int, ikey internalKey) {
p.hasRec |= 1 << recCompPtr
p.compPtrs = append(p.compPtrs, cpRecord{level, ikey})
}
@@ -106,7 +106,7 @@ func (p *sessionRecord) resetCompPtrs() {
p.compPtrs = p.compPtrs[:0]
}
-func (p *sessionRecord) addTable(level int, num, size int64, imin, imax iKey) {
+func (p *sessionRecord) addTable(level int, num, size int64, imin, imax internalKey) {
p.hasRec |= 1 << recAddTable
p.addedTables = append(p.addedTables, atRecord{level, num, size, imin, imax})
}
@@ -299,7 +299,7 @@ func (p *sessionRecord) decode(r io.Reader) error {
level := p.readLevel("comp-ptr.level", br)
ikey := p.readBytes("comp-ptr.ikey", br)
if p.err == nil {
- p.addCompPtr(level, iKey(ikey))
+ p.addCompPtr(level, internalKey(ikey))
}
case recAddTable:
level := p.readLevel("add-table.level", br)
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
index e4fa98d92..674182fb2 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
@@ -106,17 +106,17 @@ func (s *session) reuseFileNum(num int64) {
}
// Set compaction ptr at given level; need external synchronization.
-func (s *session) setCompPtr(level int, ik iKey) {
+func (s *session) setCompPtr(level int, ik internalKey) {
if level >= len(s.stCompPtrs) {
- newCompPtrs := make([]iKey, level+1)
+ newCompPtrs := make([]internalKey, level+1)
copy(newCompPtrs, s.stCompPtrs)
s.stCompPtrs = newCompPtrs
}
- s.stCompPtrs[level] = append(iKey{}, ik...)
+ s.stCompPtrs[level] = append(internalKey{}, ik...)
}
// Get compaction ptr at given level; need external synchronization.
-func (s *session) getCompPtr(level int) iKey {
+func (s *session) getCompPtr(level int) internalKey {
if level >= len(s.stCompPtrs) {
return nil
}
@@ -165,7 +165,7 @@ func (s *session) recordCommited(rec *sessionRecord) {
}
for _, r := range rec.compPtrs {
- s.setCompPtr(r.level, iKey(r.ikey))
+ s.setCompPtr(r.level, internalKey(r.ikey))
}
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
index 7030b22ef..310ba6c22 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
@@ -24,7 +24,7 @@ type tFile struct {
fd storage.FileDesc
seekLeft int32
size int64
- imin, imax iKey
+ imin, imax internalKey
}
// Returns true if given key is after largest key of this table.
@@ -48,7 +48,7 @@ func (t *tFile) consumeSeek() int32 {
}
// Creates new tFile.
-func newTableFile(fd storage.FileDesc, size int64, imin, imax iKey) *tFile {
+func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile {
f := &tFile{
fd: fd,
size: size,
@@ -136,7 +136,7 @@ func (tf tFiles) size() (sum int64) {
// Searches smallest index of tables whose its smallest
// key is after or equal with given key.
-func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int {
+func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int {
return sort.Search(len(tf), func(i int) bool {
return icmp.Compare(tf[i].imin, ikey) >= 0
})
@@ -144,7 +144,7 @@ func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int {
// Searches smallest index of tables whose its largest
// key is after or equal with given key.
-func (tf tFiles) searchMax(icmp *iComparer, ikey iKey) int {
+func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int {
return sort.Search(len(tf), func(i int) bool {
return icmp.Compare(tf[i].imax, ikey) >= 0
})
@@ -166,7 +166,7 @@ func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) boo
i := 0
if len(umin) > 0 {
// Find the earliest possible internal key for min.
- i = tf.searchMax(icmp, makeIkey(nil, umin, kMaxSeq, ktSeek))
+ i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek))
}
if i >= len(tf) {
// Beginning of range is after all files, so no overlap.
@@ -209,7 +209,7 @@ func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, ove
}
// Returns tables key range.
-func (tf tFiles) getRange(icmp *iComparer) (imin, imax iKey) {
+func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) {
for i, t := range tf {
if i == 0 {
imin, imax = t.imin, t.imax
@@ -231,10 +231,10 @@ func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range
if slice != nil {
var start, limit int
if slice.Start != nil {
- start = tf.searchMax(icmp, iKey(slice.Start))
+ start = tf.searchMax(icmp, internalKey(slice.Start))
}
if slice.Limit != nil {
- limit = tf.searchMin(icmp, iKey(slice.Limit))
+ limit = tf.searchMin(icmp, internalKey(slice.Limit))
} else {
limit = tf.Len()
}
@@ -259,7 +259,7 @@ type tFilesArrayIndexer struct {
}
func (a *tFilesArrayIndexer) Search(key []byte) int {
- return a.searchMax(a.icmp, iKey(key))
+ return a.searchMax(a.icmp, internalKey(key))
}
func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator {
@@ -351,9 +351,9 @@ func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) {
return 0, nil
}
- var bcache *cache.CacheGetter
+ var bcache *cache.NamespaceGetter
if t.bcache != nil {
- bcache = &cache.CacheGetter{Cache: t.bcache, NS: uint64(f.fd.Num)}
+ bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)}
}
var tr *table.Reader
@@ -393,14 +393,13 @@ func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte,
}
// Returns approximate offset of the given key.
-func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) {
+func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) {
ch, err := t.open(f)
if err != nil {
return
}
defer ch.Release()
- offset_, err := ch.Value().(*table.Reader).OffsetOf(key)
- return uint64(offset_), err
+ return ch.Value().(*table.Reader).OffsetOf(key)
}
// Creates an iterator from the given table.
@@ -515,7 +514,7 @@ func (w *tWriter) finish() (f *tFile, err error) {
return
}
}
- f = newTableFile(w.fd, int64(w.tw.BytesLen()), iKey(w.first), iKey(w.last))
+ f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last))
return
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
index caeac96b5..ae61bece9 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
@@ -509,7 +509,7 @@ type Reader struct {
mu sync.RWMutex
fd storage.FileDesc
reader io.ReaderAt
- cache *cache.CacheGetter
+ cache *cache.NamespaceGetter
err error
bpool *util.BufferPool
// Options
@@ -988,7 +988,7 @@ func (r *Reader) Release() {
// The fi, cache and bpool is optional and can be nil.
//
// The returned table reader instance is goroutine-safe.
-func NewReader(f io.ReaderAt, size int64, fd storage.FileDesc, cache *cache.CacheGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
+func NewReader(f io.ReaderAt, size int64, fd storage.FileDesc, cache *cache.NamespaceGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
if f == nil {
return nil, errors.New("leveldb/table: nil file")
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go
deleted file mode 100644
index ec3f177a1..000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package testutil
-
-import (
- "fmt"
- "math/rand"
-
- . "github.com/onsi/gomega"
-
- "github.com/syndtr/goleveldb/leveldb/errors"
- "github.com/syndtr/goleveldb/leveldb/iterator"
- "github.com/syndtr/goleveldb/leveldb/util"
-)
-
-type DB interface{}
-
-type Put interface {
- TestPut(key []byte, value []byte) error
-}
-
-type Delete interface {
- TestDelete(key []byte) error
-}
-
-type Find interface {
- TestFind(key []byte) (rkey, rvalue []byte, err error)
-}
-
-type Get interface {
- TestGet(key []byte) (value []byte, err error)
-}
-
-type Has interface {
- TestHas(key []byte) (ret bool, err error)
-}
-
-type NewIterator interface {
- TestNewIterator(slice *util.Range) iterator.Iterator
-}
-
-type DBAct int
-
-func (a DBAct) String() string {
- switch a {
- case DBNone:
- return "none"
- case DBPut:
- return "put"
- case DBOverwrite:
- return "overwrite"
- case DBDelete:
- return "delete"
- case DBDeleteNA:
- return "delete_na"
- }
- return "unknown"
-}
-
-const (
- DBNone DBAct = iota
- DBPut
- DBOverwrite
- DBDelete
- DBDeleteNA
-)
-
-type DBTesting struct {
- Rand *rand.Rand
- DB interface {
- Get
- Put
- Delete
- }
- PostFn func(t *DBTesting)
- Deleted, Present KeyValue
- Act, LastAct DBAct
- ActKey, LastActKey []byte
-}
-
-func (t *DBTesting) post() {
- if t.PostFn != nil {
- t.PostFn(t)
- }
-}
-
-func (t *DBTesting) setAct(act DBAct, key []byte) {
- t.LastAct, t.Act = t.Act, act
- t.LastActKey, t.ActKey = t.ActKey, key
-}
-
-func (t *DBTesting) text() string {
- return fmt.Sprintf("last action was <%v> %q, <%v> %q", t.LastAct, t.LastActKey, t.Act, t.ActKey)
-}
-
-func (t *DBTesting) Text() string {
- return "DBTesting " + t.text()
-}
-
-func (t *DBTesting) TestPresentKV(key, value []byte) {
- rvalue, err := t.DB.TestGet(key)
- Expect(err).ShouldNot(HaveOccurred(), "Get on key %q, %s", key, t.text())
- Expect(rvalue).Should(Equal(value), "Value for key %q, %s", key, t.text())
-}
-
-func (t *DBTesting) TestAllPresent() {
- t.Present.IterateShuffled(t.Rand, func(i int, key, value []byte) {
- t.TestPresentKV(key, value)
- })
-}
-
-func (t *DBTesting) TestDeletedKey(key []byte) {
- _, err := t.DB.TestGet(key)
- Expect(err).Should(Equal(errors.ErrNotFound), "Get on deleted key %q, %s", key, t.text())
-}
-
-func (t *DBTesting) TestAllDeleted() {
- t.Deleted.IterateShuffled(t.Rand, func(i int, key, value []byte) {
- t.TestDeletedKey(key)
- })
-}
-
-func (t *DBTesting) TestAll() {
- dn := t.Deleted.Len()
- pn := t.Present.Len()
- ShuffledIndex(t.Rand, dn+pn, 1, func(i int) {
- if i >= dn {
- key, value := t.Present.Index(i - dn)
- t.TestPresentKV(key, value)
- } else {
- t.TestDeletedKey(t.Deleted.KeyAt(i))
- }
- })
-}
-
-func (t *DBTesting) Put(key, value []byte) {
- if new := t.Present.PutU(key, value); new {
- t.setAct(DBPut, key)
- } else {
- t.setAct(DBOverwrite, key)
- }
- t.Deleted.Delete(key)
- err := t.DB.TestPut(key, value)
- Expect(err).ShouldNot(HaveOccurred(), t.Text())
- t.TestPresentKV(key, value)
- t.post()
-}
-
-func (t *DBTesting) PutRandom() bool {
- if t.Deleted.Len() > 0 {
- i := t.Rand.Intn(t.Deleted.Len())
- key, value := t.Deleted.Index(i)
- t.Put(key, value)
- return true
- }
- return false
-}
-
-func (t *DBTesting) Delete(key []byte) {
- if exist, value := t.Present.Delete(key); exist {
- t.setAct(DBDelete, key)
- t.Deleted.PutU(key, value)
- } else {
- t.setAct(DBDeleteNA, key)
- }
- err := t.DB.TestDelete(key)
- Expect(err).ShouldNot(HaveOccurred(), t.Text())
- t.TestDeletedKey(key)
- t.post()
-}
-
-func (t *DBTesting) DeleteRandom() bool {
- if t.Present.Len() > 0 {
- i := t.Rand.Intn(t.Present.Len())
- t.Delete(t.Present.KeyAt(i))
- return true
- }
- return false
-}
-
-func (t *DBTesting) RandomAct(round int) {
- for i := 0; i < round; i++ {
- if t.Rand.Int()%2 == 0 {
- t.PutRandom()
- } else {
- t.DeleteRandom()
- }
- }
-}
-
-func DoDBTesting(t *DBTesting) {
- if t.Rand == nil {
- t.Rand = NewRand()
- }
-
- t.DeleteRandom()
- t.PutRandom()
- t.DeleteRandom()
- t.DeleteRandom()
- for i := t.Deleted.Len() / 2; i >= 0; i-- {
- t.PutRandom()
- }
- t.RandomAct((t.Deleted.Len() + t.Present.Len()) * 10)
-
- // Additional iterator testing
- if db, ok := t.DB.(NewIterator); ok {
- iter := db.TestNewIterator(nil)
- Expect(iter.Error()).NotTo(HaveOccurred())
-
- it := IteratorTesting{
- KeyValue: t.Present,
- Iter: iter,
- }
-
- DoIteratorTesting(&it)
- iter.Release()
- }
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go
deleted file mode 100644
index 82f3d0e81..000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package testutil
-
-import (
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-)
-
-func RunSuite(t GinkgoTestingT, name string) {
- RunDefer()
-
- SynchronizedBeforeSuite(func() []byte {
- RunDefer("setup")
- return nil
- }, func(data []byte) {})
- SynchronizedAfterSuite(func() {
- RunDefer("teardown")
- }, func() {})
-
- RegisterFailHandler(Fail)
- RunSpecs(t, name)
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go
deleted file mode 100644
index df6d9db6a..000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go
+++ /dev/null
@@ -1,327 +0,0 @@
-// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package testutil
-
-import (
- "fmt"
- "math/rand"
-
- . "github.com/onsi/gomega"
-
- "github.com/syndtr/goleveldb/leveldb/iterator"
-)
-
-type IterAct int
-
-func (a IterAct) String() string {
- switch a {
- case IterNone:
- return "none"
- case IterFirst:
- return "first"
- case IterLast:
- return "last"
- case IterPrev:
- return "prev"
- case IterNext:
- return "next"
- case IterSeek:
- return "seek"
- case IterSOI:
- return "soi"
- case IterEOI:
- return "eoi"
- }
- return "unknown"
-}
-
-const (
- IterNone IterAct = iota
- IterFirst
- IterLast
- IterPrev
- IterNext
- IterSeek
- IterSOI
- IterEOI
-)
-
-type IteratorTesting struct {
- KeyValue
- Iter iterator.Iterator
- Rand *rand.Rand
- PostFn func(t *IteratorTesting)
- Pos int
- Act, LastAct IterAct
-
- once bool
-}
-
-func (t *IteratorTesting) init() {
- if !t.once {
- t.Pos = -1
- t.once = true
- }
-}
-
-func (t *IteratorTesting) post() {
- if t.PostFn != nil {
- t.PostFn(t)
- }
-}
-
-func (t *IteratorTesting) setAct(act IterAct) {
- t.LastAct, t.Act = t.Act, act
-}
-
-func (t *IteratorTesting) text() string {
- return fmt.Sprintf("at pos %d and last action was <%v> -> <%v>", t.Pos, t.LastAct, t.Act)
-}
-
-func (t *IteratorTesting) Text() string {
- return "IteratorTesting is " + t.text()
-}
-
-func (t *IteratorTesting) IsFirst() bool {
- t.init()
- return t.Len() > 0 && t.Pos == 0
-}
-
-func (t *IteratorTesting) IsLast() bool {
- t.init()
- return t.Len() > 0 && t.Pos == t.Len()-1
-}
-
-func (t *IteratorTesting) TestKV() {
- t.init()
- key, value := t.Index(t.Pos)
- Expect(t.Iter.Key()).NotTo(BeNil())
- Expect(t.Iter.Key()).Should(Equal(key), "Key is invalid, %s", t.text())
- Expect(t.Iter.Value()).Should(Equal(value), "Value for key %q, %s", key, t.text())
-}
-
-func (t *IteratorTesting) First() {
- t.init()
- t.setAct(IterFirst)
-
- ok := t.Iter.First()
- Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
- if t.Len() > 0 {
- t.Pos = 0
- Expect(ok).Should(BeTrue(), t.Text())
- t.TestKV()
- } else {
- t.Pos = -1
- Expect(ok).ShouldNot(BeTrue(), t.Text())
- }
- t.post()
-}
-
-func (t *IteratorTesting) Last() {
- t.init()
- t.setAct(IterLast)
-
- ok := t.Iter.Last()
- Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
- if t.Len() > 0 {
- t.Pos = t.Len() - 1
- Expect(ok).Should(BeTrue(), t.Text())
- t.TestKV()
- } else {
- t.Pos = 0
- Expect(ok).ShouldNot(BeTrue(), t.Text())
- }
- t.post()
-}
-
-func (t *IteratorTesting) Next() {
- t.init()
- t.setAct(IterNext)
-
- ok := t.Iter.Next()
- Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
- if t.Pos < t.Len()-1 {
- t.Pos++
- Expect(ok).Should(BeTrue(), t.Text())
- t.TestKV()
- } else {
- t.Pos = t.Len()
- Expect(ok).ShouldNot(BeTrue(), t.Text())
- }
- t.post()
-}
-
-func (t *IteratorTesting) Prev() {
- t.init()
- t.setAct(IterPrev)
-
- ok := t.Iter.Prev()
- Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
- if t.Pos > 0 {
- t.Pos--
- Expect(ok).Should(BeTrue(), t.Text())
- t.TestKV()
- } else {
- t.Pos = -1
- Expect(ok).ShouldNot(BeTrue(), t.Text())
- }
- t.post()
-}
-
-func (t *IteratorTesting) Seek(i int) {
- t.init()
- t.setAct(IterSeek)
-
- key, _ := t.Index(i)
- oldKey, _ := t.IndexOrNil(t.Pos)
-
- ok := t.Iter.Seek(key)
- Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
- Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q, to pos %d, %s", oldKey, key, i, t.text()))
-
- t.Pos = i
- t.TestKV()
- t.post()
-}
-
-func (t *IteratorTesting) SeekInexact(i int) {
- t.init()
- t.setAct(IterSeek)
- var key0 []byte
- key1, _ := t.Index(i)
- if i > 0 {
- key0, _ = t.Index(i - 1)
- }
- key := BytesSeparator(key0, key1)
- oldKey, _ := t.IndexOrNil(t.Pos)
-
- ok := t.Iter.Seek(key)
- Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
- Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key1, i, t.text()))
-
- t.Pos = i
- t.TestKV()
- t.post()
-}
-
-func (t *IteratorTesting) SeekKey(key []byte) {
- t.init()
- t.setAct(IterSeek)
- oldKey, _ := t.IndexOrNil(t.Pos)
- i := t.Search(key)
-
- ok := t.Iter.Seek(key)
- Expect(t.Iter.Error()).ShouldNot(HaveOccurred())
- if i < t.Len() {
- key_, _ := t.Index(i)
- Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key_, i, t.text()))
- t.Pos = i
- t.TestKV()
- } else {
- Expect(ok).ShouldNot(BeTrue(), fmt.Sprintf("Seek from key %q to %q, %s", oldKey, key, t.text()))
- }
-
- t.Pos = i
- t.post()
-}
-
-func (t *IteratorTesting) SOI() {
- t.init()
- t.setAct(IterSOI)
- Expect(t.Pos).Should(BeNumerically("<=", 0), t.Text())
- for i := 0; i < 3; i++ {
- t.Prev()
- }
- t.post()
-}
-
-func (t *IteratorTesting) EOI() {
- t.init()
- t.setAct(IterEOI)
- Expect(t.Pos).Should(BeNumerically(">=", t.Len()-1), t.Text())
- for i := 0; i < 3; i++ {
- t.Next()
- }
- t.post()
-}
-
-func (t *IteratorTesting) WalkPrev(fn func(t *IteratorTesting)) {
- t.init()
- for old := t.Pos; t.Pos > 0; old = t.Pos {
- fn(t)
- Expect(t.Pos).Should(BeNumerically("<", old), t.Text())
- }
-}
-
-func (t *IteratorTesting) WalkNext(fn func(t *IteratorTesting)) {
- t.init()
- for old := t.Pos; t.Pos < t.Len()-1; old = t.Pos {
- fn(t)
- Expect(t.Pos).Should(BeNumerically(">", old), t.Text())
- }
-}
-
-func (t *IteratorTesting) PrevAll() {
- t.WalkPrev(func(t *IteratorTesting) {
- t.Prev()
- })
-}
-
-func (t *IteratorTesting) NextAll() {
- t.WalkNext(func(t *IteratorTesting) {
- t.Next()
- })
-}
-
-func DoIteratorTesting(t *IteratorTesting) {
- if t.Rand == nil {
- t.Rand = NewRand()
- }
- t.SOI()
- t.NextAll()
- t.First()
- t.SOI()
- t.NextAll()
- t.EOI()
- t.PrevAll()
- t.Last()
- t.EOI()
- t.PrevAll()
- t.SOI()
-
- t.NextAll()
- t.PrevAll()
- t.NextAll()
- t.Last()
- t.PrevAll()
- t.First()
- t.NextAll()
- t.EOI()
-
- ShuffledIndex(t.Rand, t.Len(), 1, func(i int) {
- t.Seek(i)
- })
-
- ShuffledIndex(t.Rand, t.Len(), 1, func(i int) {
- t.SeekInexact(i)
- })
-
- ShuffledIndex(t.Rand, t.Len(), 1, func(i int) {
- t.Seek(i)
- if i%2 != 0 {
- t.PrevAll()
- t.SOI()
- } else {
- t.NextAll()
- t.EOI()
- }
- })
-
- for _, key := range []string{"", "foo", "bar", "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"} {
- t.SeekKey([]byte(key))
- }
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go
deleted file mode 100644
index 471d5708c..000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package testutil
-
-import (
- "fmt"
- "math/rand"
- "sort"
- "strings"
-
- "github.com/syndtr/goleveldb/leveldb/util"
-)
-
-type KeyValueEntry struct {
- key, value []byte
-}
-
-type KeyValue struct {
- entries []KeyValueEntry
- nbytes int
-}
-
-func (kv *KeyValue) Put(key, value []byte) {
- if n := len(kv.entries); n > 0 && cmp.Compare(kv.entries[n-1].key, key) >= 0 {
- panic(fmt.Sprintf("Put: keys are not in increasing order: %q, %q", kv.entries[n-1].key, key))
- }
- kv.entries = append(kv.entries, KeyValueEntry{key, value})
- kv.nbytes += len(key) + len(value)
-}
-
-func (kv *KeyValue) PutString(key, value string) {
- kv.Put([]byte(key), []byte(value))
-}
-
-func (kv *KeyValue) PutU(key, value []byte) bool {
- if i, exist := kv.Get(key); !exist {
- if i < kv.Len() {
- kv.entries = append(kv.entries[:i+1], kv.entries[i:]...)
- kv.entries[i] = KeyValueEntry{key, value}
- } else {
- kv.entries = append(kv.entries, KeyValueEntry{key, value})
- }
- kv.nbytes += len(key) + len(value)
- return true
- } else {
- kv.nbytes += len(value) - len(kv.ValueAt(i))
- kv.entries[i].value = value
- }
- return false
-}
-
-func (kv *KeyValue) PutUString(key, value string) bool {
- return kv.PutU([]byte(key), []byte(value))
-}
-
-func (kv *KeyValue) Delete(key []byte) (exist bool, value []byte) {
- i, exist := kv.Get(key)
- if exist {
- value = kv.entries[i].value
- kv.DeleteIndex(i)
- }
- return
-}
-
-func (kv *KeyValue) DeleteIndex(i int) bool {
- if i < kv.Len() {
- kv.nbytes -= len(kv.KeyAt(i)) + len(kv.ValueAt(i))
- kv.entries = append(kv.entries[:i], kv.entries[i+1:]...)
- return true
- }
- return false
-}
-
-func (kv KeyValue) Len() int {
- return len(kv.entries)
-}
-
-func (kv *KeyValue) Size() int {
- return kv.nbytes
-}
-
-func (kv KeyValue) KeyAt(i int) []byte {
- return kv.entries[i].key
-}
-
-func (kv KeyValue) ValueAt(i int) []byte {
- return kv.entries[i].value
-}
-
-func (kv KeyValue) Index(i int) (key, value []byte) {
- if i < 0 || i >= len(kv.entries) {
- panic(fmt.Sprintf("Index #%d: out of range", i))
- }
- return kv.entries[i].key, kv.entries[i].value
-}
-
-func (kv KeyValue) IndexInexact(i int) (key_, key, value []byte) {
- key, value = kv.Index(i)
- var key0 []byte
- var key1 = kv.KeyAt(i)
- if i > 0 {
- key0 = kv.KeyAt(i - 1)
- }
- key_ = BytesSeparator(key0, key1)
- return
-}
-
-func (kv KeyValue) IndexOrNil(i int) (key, value []byte) {
- if i >= 0 && i < len(kv.entries) {
- return kv.entries[i].key, kv.entries[i].value
- }
- return nil, nil
-}
-
-func (kv KeyValue) IndexString(i int) (key, value string) {
- key_, _value := kv.Index(i)
- return string(key_), string(_value)
-}
-
-func (kv KeyValue) Search(key []byte) int {
- return sort.Search(kv.Len(), func(i int) bool {
- return cmp.Compare(kv.KeyAt(i), key) >= 0
- })
-}
-
-func (kv KeyValue) SearchString(key string) int {
- return kv.Search([]byte(key))
-}
-
-func (kv KeyValue) Get(key []byte) (i int, exist bool) {
- i = kv.Search(key)
- if i < kv.Len() && cmp.Compare(kv.KeyAt(i), key) == 0 {
- exist = true
- }
- return
-}
-
-func (kv KeyValue) GetString(key string) (i int, exist bool) {
- return kv.Get([]byte(key))
-}
-
-func (kv KeyValue) Iterate(fn func(i int, key, value []byte)) {
- for i, x := range kv.entries {
- fn(i, x.key, x.value)
- }
-}
-
-func (kv KeyValue) IterateString(fn func(i int, key, value string)) {
- kv.Iterate(func(i int, key, value []byte) {
- fn(i, string(key), string(value))
- })
-}
-
-func (kv KeyValue) IterateShuffled(rnd *rand.Rand, fn func(i int, key, value []byte)) {
- ShuffledIndex(rnd, kv.Len(), 1, func(i int) {
- fn(i, kv.entries[i].key, kv.entries[i].value)
- })
-}
-
-func (kv KeyValue) IterateShuffledString(rnd *rand.Rand, fn func(i int, key, value string)) {
- kv.IterateShuffled(rnd, func(i int, key, value []byte) {
- fn(i, string(key), string(value))
- })
-}
-
-func (kv KeyValue) IterateInexact(fn func(i int, key_, key, value []byte)) {
- for i := range kv.entries {
- key_, key, value := kv.IndexInexact(i)
- fn(i, key_, key, value)
- }
-}
-
-func (kv KeyValue) IterateInexactString(fn func(i int, key_, key, value string)) {
- kv.IterateInexact(func(i int, key_, key, value []byte) {
- fn(i, string(key_), string(key), string(value))
- })
-}
-
-func (kv KeyValue) Clone() KeyValue {
- return KeyValue{append([]KeyValueEntry{}, kv.entries...), kv.nbytes}
-}
-
-func (kv KeyValue) Slice(start, limit int) KeyValue {
- if start < 0 || limit > kv.Len() {
- panic(fmt.Sprintf("Slice %d .. %d: out of range", start, limit))
- } else if limit < start {
- panic(fmt.Sprintf("Slice %d .. %d: invalid range", start, limit))
- }
- return KeyValue{append([]KeyValueEntry{}, kv.entries[start:limit]...), kv.nbytes}
-}
-
-func (kv KeyValue) SliceKey(start, limit []byte) KeyValue {
- start_ := 0
- limit_ := kv.Len()
- if start != nil {
- start_ = kv.Search(start)
- }
- if limit != nil {
- limit_ = kv.Search(limit)
- }
- return kv.Slice(start_, limit_)
-}
-
-func (kv KeyValue) SliceKeyString(start, limit string) KeyValue {
- return kv.SliceKey([]byte(start), []byte(limit))
-}
-
-func (kv KeyValue) SliceRange(r *util.Range) KeyValue {
- if r != nil {
- return kv.SliceKey(r.Start, r.Limit)
- }
- return kv.Clone()
-}
-
-func (kv KeyValue) Range(start, limit int) (r util.Range) {
- if kv.Len() > 0 {
- if start == kv.Len() {
- r.Start = BytesAfter(kv.KeyAt(start - 1))
- } else {
- r.Start = kv.KeyAt(start)
- }
- }
- if limit < kv.Len() {
- r.Limit = kv.KeyAt(limit)
- }
- return
-}
-
-func KeyValue_EmptyKey() *KeyValue {
- kv := &KeyValue{}
- kv.PutString("", "v")
- return kv
-}
-
-func KeyValue_EmptyValue() *KeyValue {
- kv := &KeyValue{}
- kv.PutString("abc", "")
- kv.PutString("abcd", "")
- return kv
-}
-
-func KeyValue_OneKeyValue() *KeyValue {
- kv := &KeyValue{}
- kv.PutString("abc", "v")
- return kv
-}
-
-func KeyValue_BigValue() *KeyValue {
- kv := &KeyValue{}
- kv.PutString("big1", strings.Repeat("1", 200000))
- return kv
-}
-
-func KeyValue_SpecialKey() *KeyValue {
- kv := &KeyValue{}
- kv.PutString("\xff\xff", "v3")
- return kv
-}
-
-func KeyValue_MultipleKeyValue() *KeyValue {
- kv := &KeyValue{}
- kv.PutString("a", "v")
- kv.PutString("aa", "v1")
- kv.PutString("aaa", "v2")
- kv.PutString("aaacccccccccc", "v2")
- kv.PutString("aaaccccccccccd", "v3")
- kv.PutString("aaaccccccccccf", "v4")
- kv.PutString("aaaccccccccccfg", "v5")
- kv.PutString("ab", "v6")
- kv.PutString("abc", "v7")
- kv.PutString("abcd", "v8")
- kv.PutString("accccccccccccccc", "v9")
- kv.PutString("b", "v10")
- kv.PutString("bb", "v11")
- kv.PutString("bc", "v12")
- kv.PutString("c", "v13")
- kv.PutString("c1", "v13")
- kv.PutString("czzzzzzzzzzzzzz", "v14")
- kv.PutString("fffffffffffffff", "v15")
- kv.PutString("g11", "v15")
- kv.PutString("g111", "v15")
- kv.PutString("g111\xff", "v15")
- kv.PutString("zz", "v16")
- kv.PutString("zzzzzzz", "v16")
- kv.PutString("zzzzzzzzzzzzzzzz", "v16")
- return kv
-}
-
-var keymap = []byte("012345678ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxy")
-
-func KeyValue_Generate(rnd *rand.Rand, n, minlen, maxlen, vminlen, vmaxlen int) *KeyValue {
- if rnd == nil {
- rnd = NewRand()
- }
- if maxlen < minlen {
- panic("max len should >= min len")
- }
-
- rrand := func(min, max int) int {
- if min == max {
- return max
- }
- return rnd.Intn(max-min) + min
- }
-
- kv := &KeyValue{}
- endC := byte(len(keymap) - 1)
- gen := make([]byte, 0, maxlen)
- for i := 0; i < n; i++ {
- m := rrand(minlen, maxlen)
- last := gen
- retry:
- gen = last[:m]
- if k := len(last); m > k {
- for j := k; j < m; j++ {
- gen[j] = 0
- }
- } else {
- for j := m - 1; j >= 0; j-- {
- c := last[j]
- if c == endC {
- continue
- }
- gen[j] = c + 1
- for j += 1; j < m; j++ {
- gen[j] = 0
- }
- goto ok
- }
- if m < maxlen {
- m++
- goto retry
- }
- panic(fmt.Sprintf("only able to generate %d keys out of %d keys, try increasing max len", kv.Len(), n))
- ok:
- }
- key := make([]byte, m)
- for j := 0; j < m; j++ {
- key[j] = keymap[gen[j]]
- }
- value := make([]byte, rrand(vminlen, vmaxlen))
- for n := copy(value, []byte(fmt.Sprintf("v%d", i))); n < len(value); n++ {
- value[n] = 'x'
- }
- kv.Put(key, value)
- }
- return kv
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go
deleted file mode 100644
index 19d29dfc4..000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package testutil
-
-import (
- "fmt"
- "math/rand"
-
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
- "github.com/syndtr/goleveldb/leveldb/errors"
- "github.com/syndtr/goleveldb/leveldb/util"
-)
-
-func TestFind(db Find, kv KeyValue) {
- ShuffledIndex(nil, kv.Len(), 1, func(i int) {
- key_, key, value := kv.IndexInexact(i)
-
- // Using exact key.
- rkey, rvalue, err := db.TestFind(key)
- Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
- Expect(rkey).Should(Equal(key), "Key")
- Expect(rvalue).Should(Equal(value), "Value for key %q", key)
-
- // Using inexact key.
- rkey, rvalue, err = db.TestFind(key_)
- Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q)", key_, key)
- Expect(rkey).Should(Equal(key))
- Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key)
- })
-}
-
-func TestFindAfterLast(db Find, kv KeyValue) {
- var key []byte
- if kv.Len() > 0 {
- key_, _ := kv.Index(kv.Len() - 1)
- key = BytesAfter(key_)
- }
- rkey, _, err := db.TestFind(key)
- Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey)
- Expect(err).Should(Equal(errors.ErrNotFound))
-}
-
-func TestGet(db Get, kv KeyValue) {
- ShuffledIndex(nil, kv.Len(), 1, func(i int) {
- key_, key, value := kv.IndexInexact(i)
-
- // Using exact key.
- rvalue, err := db.TestGet(key)
- Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
- Expect(rvalue).Should(Equal(value), "Value for key %q", key)
-
- // Using inexact key.
- if len(key_) > 0 {
- _, err = db.TestGet(key_)
- Expect(err).Should(HaveOccurred(), "Error for key %q", key_)
- Expect(err).Should(Equal(errors.ErrNotFound))
- }
- })
-}
-
-func TestHas(db Has, kv KeyValue) {
- ShuffledIndex(nil, kv.Len(), 1, func(i int) {
- key_, key, _ := kv.IndexInexact(i)
-
- // Using exact key.
- ret, err := db.TestHas(key)
- Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
- Expect(ret).Should(BeTrue(), "False for key %q", key)
-
- // Using inexact key.
- if len(key_) > 0 {
- ret, err = db.TestHas(key_)
- Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key_)
- Expect(ret).ShouldNot(BeTrue(), "True for key %q", key)
- }
- })
-}
-
-func TestIter(db NewIterator, r *util.Range, kv KeyValue) {
- iter := db.TestNewIterator(r)
- Expect(iter.Error()).ShouldNot(HaveOccurred())
-
- t := IteratorTesting{
- KeyValue: kv,
- Iter: iter,
- }
-
- DoIteratorTesting(&t)
- iter.Release()
-}
-
-func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB, teardown func(DB)) {
- if rnd == nil {
- rnd = NewRand()
- }
-
- if p == nil {
- BeforeEach(func() {
- p = setup(kv)
- })
- if teardown != nil {
- AfterEach(func() {
- teardown(p)
- })
- }
- }
-
- It("Should find all keys with Find", func() {
- if db, ok := p.(Find); ok {
- TestFind(db, kv)
- }
- })
-
- It("Should return error if Find on key after the last", func() {
- if db, ok := p.(Find); ok {
- TestFindAfterLast(db, kv)
- }
- })
-
- It("Should only find exact key with Get", func() {
- if db, ok := p.(Get); ok {
- TestGet(db, kv)
- }
- })
-
- It("Should only find present key with Has", func() {
- if db, ok := p.(Has); ok {
- TestHas(db, kv)
- }
- })
-
- It("Should iterates and seeks correctly", func(done Done) {
- if db, ok := p.(NewIterator); ok {
- TestIter(db, nil, kv.Clone())
- }
- done <- true
- }, 3.0)
-
- It("Should iterates and seeks slice correctly", func(done Done) {
- if db, ok := p.(NewIterator); ok {
- RandomIndex(rnd, kv.Len(), Min(kv.Len(), 50), func(i int) {
- type slice struct {
- r *util.Range
- start, limit int
- }
-
- key_, _, _ := kv.IndexInexact(i)
- for _, x := range []slice{
- {&util.Range{Start: key_, Limit: nil}, i, kv.Len()},
- {&util.Range{Start: nil, Limit: key_}, 0, i},
- } {
- By(fmt.Sprintf("Random index of %d .. %d", x.start, x.limit), func() {
- TestIter(db, x.r, kv.Slice(x.start, x.limit))
- })
- }
- })
- }
- done <- true
- }, 50.0)
-
- It("Should iterates and seeks slice correctly", func(done Done) {
- if db, ok := p.(NewIterator); ok {
- RandomRange(rnd, kv.Len(), Min(kv.Len(), 50), func(start, limit int) {
- By(fmt.Sprintf("Random range of %d .. %d", start, limit), func() {
- r := kv.Range(start, limit)
- TestIter(db, &r, kv.Slice(start, limit))
- })
- })
- }
- done <- true
- }, 50.0)
-}
-
-func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown func(DB)) {
- Test := func(kv *KeyValue) func() {
- return func() {
- var p DB
- if setup != nil {
- Defer("setup", func() {
- p = setup(*kv)
- })
- }
- if teardown != nil {
- Defer("teardown", func() {
- teardown(p)
- })
- }
- if body != nil {
- p = body(*kv)
- }
- KeyValueTesting(rnd, *kv, p, func(KeyValue) DB {
- return p
- }, nil)
- }
- }
-
- Describe("with no key/value (empty)", Test(&KeyValue{}))
- Describe("with empty key", Test(KeyValue_EmptyKey()))
- Describe("with empty value", Test(KeyValue_EmptyValue()))
- Describe("with one key/value", Test(KeyValue_OneKeyValue()))
- Describe("with big value", Test(KeyValue_BigValue()))
- Describe("with special key", Test(KeyValue_SpecialKey()))
- Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue()))
- Describe("with generated key/value", Test(KeyValue_Generate(nil, 120, 1, 50, 10, 120)))
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go
deleted file mode 100644
index 1d9163ea4..000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go
+++ /dev/null
@@ -1,694 +0,0 @@
-// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package testutil
-
-import (
- "bytes"
- "fmt"
- "io"
- "math/rand"
- "os"
- "path/filepath"
- "runtime"
- "strings"
- "sync"
-
- . "github.com/onsi/gomega"
-
- "github.com/syndtr/goleveldb/leveldb/storage"
- "github.com/syndtr/goleveldb/leveldb/util"
-)
-
-var (
- storageMu sync.Mutex
- storageUseFS = true
- storageKeepFS = false
- storageNum int
-)
-
-type StorageMode int
-
-const (
- ModeOpen StorageMode = 1 << iota
- ModeCreate
- ModeRemove
- ModeRename
- ModeRead
- ModeWrite
- ModeSync
- ModeClose
-)
-
-const (
- modeOpen = iota
- modeCreate
- modeRemove
- modeRename
- modeRead
- modeWrite
- modeSync
- modeClose
-
- modeCount
-)
-
-const (
- typeManifest = iota
- typeJournal
- typeTable
- typeTemp
-
- typeCount
-)
-
-const flattenCount = modeCount * typeCount
-
-func flattenType(m StorageMode, t storage.FileType) int {
- var x int
- switch m {
- case ModeOpen:
- x = modeOpen
- case ModeCreate:
- x = modeCreate
- case ModeRemove:
- x = modeRemove
- case ModeRename:
- x = modeRename
- case ModeRead:
- x = modeRead
- case ModeWrite:
- x = modeWrite
- case ModeSync:
- x = modeSync
- case ModeClose:
- x = modeClose
- default:
- panic("invalid storage mode")
- }
- x *= typeCount
- switch t {
- case storage.TypeManifest:
- return x + typeManifest
- case storage.TypeJournal:
- return x + typeJournal
- case storage.TypeTable:
- return x + typeTable
- case storage.TypeTemp:
- return x + typeTemp
- default:
- panic("invalid file type")
- }
-}
-
-func listFlattenType(m StorageMode, t storage.FileType) []int {
- ret := make([]int, 0, flattenCount)
- add := func(x int) {
- x *= typeCount
- switch {
- case t&storage.TypeManifest != 0:
- ret = append(ret, x+typeManifest)
- case t&storage.TypeJournal != 0:
- ret = append(ret, x+typeJournal)
- case t&storage.TypeTable != 0:
- ret = append(ret, x+typeTable)
- case t&storage.TypeTemp != 0:
- ret = append(ret, x+typeTemp)
- }
- }
- switch {
- case m&ModeOpen != 0:
- add(modeOpen)
- case m&ModeCreate != 0:
- add(modeCreate)
- case m&ModeRemove != 0:
- add(modeRemove)
- case m&ModeRename != 0:
- add(modeRename)
- case m&ModeRead != 0:
- add(modeRead)
- case m&ModeWrite != 0:
- add(modeWrite)
- case m&ModeSync != 0:
- add(modeSync)
- case m&ModeClose != 0:
- add(modeClose)
- }
- return ret
-}
-
-func packFile(fd storage.FileDesc) uint64 {
- if fd.Num>>(63-typeCount) != 0 {
- panic("overflow")
- }
- return uint64(fd.Num<<typeCount) | uint64(fd.Type)
-}
-
-func unpackFile(x uint64) storage.FileDesc {
- return storage.FileDesc{storage.FileType(x) & storage.TypeAll, int64(x >> typeCount)}
-}
-
-type emulatedError struct {
- err error
-}
-
-func (err emulatedError) Error() string {
- return fmt.Sprintf("emulated storage error: %v", err.err)
-}
-
-type storageLock struct {
- s *Storage
- r util.Releaser
-}
-
-func (l storageLock) Release() {
- l.r.Release()
- l.s.logI("storage lock released")
-}
-
-type reader struct {
- s *Storage
- fd storage.FileDesc
- storage.Reader
-}
-
-func (r *reader) Read(p []byte) (n int, err error) {
- err = r.s.emulateError(ModeRead, r.fd.Type)
- if err == nil {
- r.s.stall(ModeRead, r.fd.Type)
- n, err = r.Reader.Read(p)
- }
- r.s.count(ModeRead, r.fd.Type, n)
- if err != nil && err != io.EOF {
- r.s.logI("read error, fd=%s n=%d err=%v", r.fd, n, err)
- }
- return
-}
-
-func (r *reader) ReadAt(p []byte, off int64) (n int, err error) {
- err = r.s.emulateError(ModeRead, r.fd.Type)
- if err == nil {
- r.s.stall(ModeRead, r.fd.Type)
- n, err = r.Reader.ReadAt(p, off)
- }
- r.s.count(ModeRead, r.fd.Type, n)
- if err != nil && err != io.EOF {
- r.s.logI("readAt error, fd=%s offset=%d n=%d err=%v", r.fd, off, n, err)
- }
- return
-}
-
-func (r *reader) Close() (err error) {
- return r.s.fileClose(r.fd, r.Reader)
-}
-
-type writer struct {
- s *Storage
- fd storage.FileDesc
- storage.Writer
-}
-
-func (w *writer) Write(p []byte) (n int, err error) {
- err = w.s.emulateError(ModeWrite, w.fd.Type)
- if err == nil {
- w.s.stall(ModeWrite, w.fd.Type)
- n, err = w.Writer.Write(p)
- }
- w.s.count(ModeWrite, w.fd.Type, n)
- if err != nil && err != io.EOF {
- w.s.logI("write error, fd=%s n=%d err=%v", w.fd, n, err)
- }
- return
-}
-
-func (w *writer) Sync() (err error) {
- err = w.s.emulateError(ModeSync, w.fd.Type)
- if err == nil {
- w.s.stall(ModeSync, w.fd.Type)
- err = w.Writer.Sync()
- }
- w.s.count(ModeSync, w.fd.Type, 0)
- if err != nil {
- w.s.logI("sync error, fd=%s err=%v", w.fd, err)
- }
- return
-}
-
-func (w *writer) Close() (err error) {
- return w.s.fileClose(w.fd, w.Writer)
-}
-
-type Storage struct {
- storage.Storage
- path string
- onClose func() (preserve bool, err error)
- onLog func(str string)
-
- lmu sync.Mutex
- lb bytes.Buffer
-
- mu sync.Mutex
- rand *rand.Rand
- // Open files, true=writer, false=reader
- opens map[uint64]bool
- counters [flattenCount]int
- bytesCounter [flattenCount]int64
- emulatedError [flattenCount]error
- emulatedErrorOnce [flattenCount]bool
- emulatedRandomError [flattenCount]error
- emulatedRandomErrorProb [flattenCount]float64
- stallCond sync.Cond
- stalled [flattenCount]bool
-}
-
-func (s *Storage) log(skip int, str string) {
- s.lmu.Lock()
- defer s.lmu.Unlock()
- _, file, line, ok := runtime.Caller(skip + 2)
- if ok {
- // Truncate file name at last file name separator.
- if index := strings.LastIndex(file, "/"); index >= 0 {
- file = file[index+1:]
- } else if index = strings.LastIndex(file, "\\"); index >= 0 {
- file = file[index+1:]
- }
- } else {
- file = "???"
- line = 1
- }
- fmt.Fprintf(&s.lb, "%s:%d: ", file, line)
- lines := strings.Split(str, "\n")
- if l := len(lines); l > 1 && lines[l-1] == "" {
- lines = lines[:l-1]
- }
- for i, line := range lines {
- if i > 0 {
- s.lb.WriteString("\n\t")
- }
- s.lb.WriteString(line)
- }
- if s.onLog != nil {
- s.onLog(s.lb.String())
- s.lb.Reset()
- } else {
- s.lb.WriteByte('\n')
- }
-}
-
-func (s *Storage) logISkip(skip int, format string, args ...interface{}) {
- pc, _, _, ok := runtime.Caller(skip + 1)
- if ok {
- if f := runtime.FuncForPC(pc); f != nil {
- fname := f.Name()
- if index := strings.LastIndex(fname, "."); index >= 0 {
- fname = fname[index+1:]
- }
- format = fname + ": " + format
- }
- }
- s.log(skip+1, fmt.Sprintf(format, args...))
-}
-
-func (s *Storage) logI(format string, args ...interface{}) {
- s.logISkip(1, format, args...)
-}
-
-func (s *Storage) OnLog(onLog func(log string)) {
- s.lmu.Lock()
- s.onLog = onLog
- if s.lb.Len() != 0 {
- log := s.lb.String()
- s.onLog(log[:len(log)-1])
- s.lb.Reset()
- }
- s.lmu.Unlock()
-}
-
-func (s *Storage) Log(str string) {
- s.log(1, "Log: "+str)
- s.Storage.Log(str)
-}
-
-func (s *Storage) Lock() (l storage.Lock, err error) {
- l, err = s.Storage.Lock()
- if err != nil {
- s.logI("storage locking failed, err=%v", err)
- } else {
- s.logI("storage locked")
- l = storageLock{s, l}
- }
- return
-}
-
-func (s *Storage) List(t storage.FileType) (fds []storage.FileDesc, err error) {
- fds, err = s.Storage.List(t)
- if err != nil {
- s.logI("list failed, err=%v", err)
- return
- }
- s.logI("list, type=0x%x count=%d", int(t), len(fds))
- return
-}
-
-func (s *Storage) GetMeta() (fd storage.FileDesc, err error) {
- fd, err = s.Storage.GetMeta()
- if err != nil {
- if !os.IsNotExist(err) {
- s.logI("get meta failed, err=%v", err)
- }
- return
- }
- s.logI("get meta, fd=%s", fd)
- return
-}
-
-func (s *Storage) SetMeta(fd storage.FileDesc) error {
- ExpectWithOffset(1, fd.Type).To(Equal(storage.TypeManifest))
- err := s.Storage.SetMeta(fd)
- if err != nil {
- s.logI("set meta failed, fd=%s err=%v", fd, err)
- } else {
- s.logI("set meta, fd=%s", fd)
- }
- return err
-}
-
-func (s *Storage) fileClose(fd storage.FileDesc, closer io.Closer) (err error) {
- err = s.emulateError(ModeClose, fd.Type)
- if err == nil {
- s.stall(ModeClose, fd.Type)
- }
- x := packFile(fd)
- s.mu.Lock()
- defer s.mu.Unlock()
- if err == nil {
- ExpectWithOffset(2, s.opens).To(HaveKey(x), "File closed, fd=%s", fd)
- err = closer.Close()
- }
- s.countNB(ModeClose, fd.Type, 0)
- writer := s.opens[x]
- if err != nil {
- s.logISkip(1, "file close failed, fd=%s writer=%v err=%v", fd, writer, err)
- } else {
- s.logISkip(1, "file closed, fd=%s writer=%v", fd, writer)
- delete(s.opens, x)
- }
- return
-}
-
-func (s *Storage) assertOpen(fd storage.FileDesc) {
- x := packFile(fd)
- ExpectWithOffset(2, s.opens).NotTo(HaveKey(x), "File open, fd=%s writer=%v", fd, s.opens[x])
-}
-
-func (s *Storage) Open(fd storage.FileDesc) (r storage.Reader, err error) {
- err = s.emulateError(ModeOpen, fd.Type)
- if err == nil {
- s.stall(ModeOpen, fd.Type)
- }
- s.mu.Lock()
- defer s.mu.Unlock()
- if err == nil {
- s.assertOpen(fd)
- s.countNB(ModeOpen, fd.Type, 0)
- r, err = s.Storage.Open(fd)
- }
- if err != nil {
- s.logI("file open failed, fd=%s err=%v", fd, err)
- } else {
- s.logI("file opened, fd=%s", fd)
- s.opens[packFile(fd)] = false
- r = &reader{s, fd, r}
- }
- return
-}
-
-func (s *Storage) Create(fd storage.FileDesc) (w storage.Writer, err error) {
- err = s.emulateError(ModeCreate, fd.Type)
- if err == nil {
- s.stall(ModeCreate, fd.Type)
- }
- s.mu.Lock()
- defer s.mu.Unlock()
- if err == nil {
- s.assertOpen(fd)
- s.countNB(ModeCreate, fd.Type, 0)
- w, err = s.Storage.Create(fd)
- }
- if err != nil {
- s.logI("file create failed, fd=%s err=%v", fd, err)
- } else {
- s.logI("file created, fd=%s", fd)
- s.opens[packFile(fd)] = true
- w = &writer{s, fd, w}
- }
- return
-}
-
-func (s *Storage) Remove(fd storage.FileDesc) (err error) {
- err = s.emulateError(ModeRemove, fd.Type)
- if err == nil {
- s.stall(ModeRemove, fd.Type)
- }
- s.mu.Lock()
- defer s.mu.Unlock()
- if err == nil {
- s.assertOpen(fd)
- s.countNB(ModeRemove, fd.Type, 0)
- err = s.Storage.Remove(fd)
- }
- if err != nil {
- s.logI("file remove failed, fd=%s err=%v", fd, err)
- } else {
- s.logI("file removed, fd=%s", fd)
- }
- return
-}
-
-func (s *Storage) ForceRemove(fd storage.FileDesc) (err error) {
- s.countNB(ModeRemove, fd.Type, 0)
- if err = s.Storage.Remove(fd); err != nil {
- s.logI("file remove failed (forced), fd=%s err=%v", fd, err)
- } else {
- s.logI("file removed (forced), fd=%s", fd)
- }
- return
-}
-
-func (s *Storage) Rename(oldfd, newfd storage.FileDesc) (err error) {
- err = s.emulateError(ModeRename, oldfd.Type)
- if err == nil {
- s.stall(ModeRename, oldfd.Type)
- }
- s.mu.Lock()
- defer s.mu.Unlock()
- if err == nil {
- s.assertOpen(oldfd)
- s.assertOpen(newfd)
- s.countNB(ModeRename, oldfd.Type, 0)
- err = s.Storage.Rename(oldfd, newfd)
- }
- if err != nil {
- s.logI("file rename failed, oldfd=%s newfd=%s err=%v", oldfd, newfd, err)
- } else {
- s.logI("file renamed, oldfd=%s newfd=%s", oldfd, newfd)
- }
- return
-}
-
-func (s *Storage) ForceRename(oldfd, newfd storage.FileDesc) (err error) {
- s.countNB(ModeRename, oldfd.Type, 0)
- if err = s.Storage.Rename(oldfd, newfd); err != nil {
- s.logI("file rename failed (forced), oldfd=%s newfd=%s err=%v", oldfd, newfd, err)
- } else {
- s.logI("file renamed (forced), oldfd=%s newfd=%s", oldfd, newfd)
- }
- return
-}
-
-func (s *Storage) openFiles() string {
- out := "Open files:"
- for x, writer := range s.opens {
- fd := unpackFile(x)
- out += fmt.Sprintf("\n · fd=%s writer=%v", fd, writer)
- }
- return out
-}
-
-func (s *Storage) CloseCheck() {
- s.mu.Lock()
- defer s.mu.Unlock()
- ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles())
-}
-
-func (s *Storage) OnClose(onClose func() (preserve bool, err error)) {
- s.mu.Lock()
- s.onClose = onClose
- s.mu.Unlock()
-}
-
-func (s *Storage) Close() error {
- s.mu.Lock()
- defer s.mu.Unlock()
- ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles())
- err := s.Storage.Close()
- if err != nil {
- s.logI("storage closing failed, err=%v", err)
- } else {
- s.logI("storage closed")
- }
- var preserve bool
- if s.onClose != nil {
- var err0 error
- if preserve, err0 = s.onClose(); err0 != nil {
- s.logI("onClose error, err=%v", err0)
- }
- }
- if s.path != "" {
- if storageKeepFS || preserve {
- s.logI("storage is preserved, path=%v", s.path)
- } else {
- if err1 := os.RemoveAll(s.path); err1 != nil {
- s.logI("cannot remove storage, err=%v", err1)
- } else {
- s.logI("storage has been removed")
- }
- }
- }
- return err
-}
-
-func (s *Storage) countNB(m StorageMode, t storage.FileType, n int) {
- s.counters[flattenType(m, t)]++
- s.bytesCounter[flattenType(m, t)] += int64(n)
-}
-
-func (s *Storage) count(m StorageMode, t storage.FileType, n int) {
- s.mu.Lock()
- defer s.mu.Unlock()
- s.countNB(m, t, n)
-}
-
-func (s *Storage) ResetCounter(m StorageMode, t storage.FileType) {
- for _, x := range listFlattenType(m, t) {
- s.counters[x] = 0
- s.bytesCounter[x] = 0
- }
-}
-
-func (s *Storage) Counter(m StorageMode, t storage.FileType) (count int, bytes int64) {
- for _, x := range listFlattenType(m, t) {
- count += s.counters[x]
- bytes += s.bytesCounter[x]
- }
- return
-}
-
-func (s *Storage) emulateError(m StorageMode, t storage.FileType) error {
- s.mu.Lock()
- defer s.mu.Unlock()
- x := flattenType(m, t)
- if err := s.emulatedError[x]; err != nil {
- if s.emulatedErrorOnce[x] {
- s.emulatedError[x] = nil
- }
- return emulatedError{err}
- }
- if err := s.emulatedRandomError[x]; err != nil && s.rand.Float64() < s.emulatedRandomErrorProb[x] {
- return emulatedError{err}
- }
- return nil
-}
-
-func (s *Storage) EmulateError(m StorageMode, t storage.FileType, err error) {
- s.mu.Lock()
- defer s.mu.Unlock()
- for _, x := range listFlattenType(m, t) {
- s.emulatedError[x] = err
- s.emulatedErrorOnce[x] = false
- }
-}
-
-func (s *Storage) EmulateErrorOnce(m StorageMode, t storage.FileType, err error) {
- s.mu.Lock()
- defer s.mu.Unlock()
- for _, x := range listFlattenType(m, t) {
- s.emulatedError[x] = err
- s.emulatedErrorOnce[x] = true
- }
-}
-
-func (s *Storage) EmulateRandomError(m StorageMode, t storage.FileType, prob float64, err error) {
- s.mu.Lock()
- defer s.mu.Unlock()
- for _, x := range listFlattenType(m, t) {
- s.emulatedRandomError[x] = err
- s.emulatedRandomErrorProb[x] = prob
- }
-}
-
-func (s *Storage) stall(m StorageMode, t storage.FileType) {
- x := flattenType(m, t)
- s.mu.Lock()
- defer s.mu.Unlock()
- for s.stalled[x] {
- s.stallCond.Wait()
- }
-}
-
-func (s *Storage) Stall(m StorageMode, t storage.FileType) {
- s.mu.Lock()
- defer s.mu.Unlock()
- for _, x := range listFlattenType(m, t) {
- s.stalled[x] = true
- }
-}
-
-func (s *Storage) Release(m StorageMode, t storage.FileType) {
- s.mu.Lock()
- defer s.mu.Unlock()
- for _, x := range listFlattenType(m, t) {
- s.stalled[x] = false
- }
- s.stallCond.Broadcast()
-}
-
-func NewStorage() *Storage {
- var (
- stor storage.Storage
- path string
- )
- if storageUseFS {
- for {
- storageMu.Lock()
- num := storageNum
- storageNum++
- storageMu.Unlock()
- path = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num))
- if _, err := os.Stat(path); os.IsNotExist(err) {
- stor, err = storage.OpenFile(path, false)
- ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path)
- break
- }
- }
- } else {
- stor = storage.NewMemStorage()
- }
- s := &Storage{
- Storage: stor,
- path: path,
- rand: NewRand(),
- opens: make(map[uint64]bool),
- }
- s.stallCond.L = &s.mu
- if s.path != "" {
- s.logI("using FS storage")
- s.logI("storage path: %s", s.path)
- } else {
- s.logI("using MEM storage")
- }
- return s
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go
deleted file mode 100644
index 97c5294b1..000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package testutil
-
-import (
- "bytes"
- "flag"
- "math/rand"
- "reflect"
- "sync"
-
- "github.com/onsi/ginkgo/config"
-
- "github.com/syndtr/goleveldb/leveldb/comparer"
-)
-
-var (
- runfn = make(map[string][]func())
- runmu sync.Mutex
-)
-
-func Defer(args ...interface{}) bool {
- var (
- group string
- fn func()
- )
- for _, arg := range args {
- v := reflect.ValueOf(arg)
- switch v.Kind() {
- case reflect.String:
- group = v.String()
- case reflect.Func:
- r := reflect.ValueOf(&fn).Elem()
- r.Set(v)
- }
- }
- if fn != nil {
- runmu.Lock()
- runfn[group] = append(runfn[group], fn)
- runmu.Unlock()
- }
- return true
-}
-
-func RunDefer(groups ...string) bool {
- if len(groups) == 0 {
- groups = append(groups, "")
- }
- runmu.Lock()
- var runfn_ []func()
- for _, group := range groups {
- runfn_ = append(runfn_, runfn[group]...)
- delete(runfn, group)
- }
- runmu.Unlock()
- for _, fn := range runfn_ {
- fn()
- }
- return runfn_ != nil
-}
-
-func RandomSeed() int64 {
- if !flag.Parsed() {
- panic("random seed not initialized")
- }
- return config.GinkgoConfig.RandomSeed
-}
-
-func NewRand() *rand.Rand {
- return rand.New(rand.NewSource(RandomSeed()))
-}
-
-var cmp = comparer.DefaultComparer
-
-func BytesSeparator(a, b []byte) []byte {
- if bytes.Equal(a, b) {
- return b
- }
- i, n := 0, len(a)
- if n > len(b) {
- n = len(b)
- }
- for ; i < n && (a[i] == b[i]); i++ {
- }
- x := append([]byte{}, a[:i]...)
- if i < n {
- if c := a[i] + 1; c < b[i] {
- return append(x, c)
- }
- x = append(x, a[i])
- i++
- }
- for ; i < len(a); i++ {
- if c := a[i]; c < 0xff {
- return append(x, c+1)
- } else {
- x = append(x, c)
- }
- }
- if len(b) > i && b[i] > 0 {
- return append(x, b[i]-1)
- }
- return append(x, 'x')
-}
-
-func BytesAfter(b []byte) []byte {
- var x []byte
- for _, c := range b {
- if c < 0xff {
- return append(x, c+1)
- } else {
- x = append(x, c)
- }
- }
- return append(x, 'x')
-}
-
-func RandomIndex(rnd *rand.Rand, n, round int, fn func(i int)) {
- if rnd == nil {
- rnd = NewRand()
- }
- for x := 0; x < round; x++ {
- fn(rnd.Intn(n))
- }
- return
-}
-
-func ShuffledIndex(rnd *rand.Rand, n, round int, fn func(i int)) {
- if rnd == nil {
- rnd = NewRand()
- }
- for x := 0; x < round; x++ {
- for _, i := range rnd.Perm(n) {
- fn(i)
- }
- }
- return
-}
-
-func RandomRange(rnd *rand.Rand, n, round int, fn func(start, limit int)) {
- if rnd == nil {
- rnd = NewRand()
- }
- for x := 0; x < round; x++ {
- start := rnd.Intn(n)
- length := 0
- if j := n - start; j > 0 {
- length = rnd.Intn(j)
- }
- fn(start, start+length)
- }
- return
-}
-
-func Max(x, y int) int {
- if x > y {
- return x
- }
- return y
-}
-
-func Min(x, y int) int {
- if x < y {
- return x
- }
- return y
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
index 50870ed83..d274eeff2 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
@@ -79,7 +79,7 @@ func (v *version) release() {
v.s.vmu.Unlock()
}
-func (v *version) walkOverlapping(aux tFiles, ikey iKey, f func(level int, t *tFile) bool, lf func(level int) bool) {
+func (v *version) walkOverlapping(aux tFiles, ikey internalKey, f func(level int, t *tFile) bool, lf func(level int) bool) {
ukey := ikey.ukey()
// Aux level.
@@ -130,7 +130,7 @@ func (v *version) walkOverlapping(aux tFiles, ikey iKey, f func(level int, t *tF
}
}
-func (v *version) get(aux tFiles, ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) {
+func (v *version) get(aux tFiles, ikey internalKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) {
ukey := ikey.ukey()
var (
@@ -140,7 +140,7 @@ func (v *version) get(aux tFiles, ikey iKey, ro *opt.ReadOptions, noValue bool)
// Level-0.
zfound bool
zseq uint64
- zkt kType
+ zkt keyType
zval []byte
)
@@ -176,7 +176,7 @@ func (v *version) get(aux tFiles, ikey iKey, ro *opt.ReadOptions, noValue bool)
return false
}
- if fukey, fseq, fkt, fkerr := parseIkey(fikey); fkerr == nil {
+ if fukey, fseq, fkt, fkerr := parseInternalKey(fikey); fkerr == nil {
if v.s.icmp.uCompare(ukey, fukey) == 0 {
// Level <= 0 may overlaps each-other.
if level <= 0 {
@@ -188,12 +188,12 @@ func (v *version) get(aux tFiles, ikey iKey, ro *opt.ReadOptions, noValue bool)
}
} else {
switch fkt {
- case ktVal:
+ case keyTypeVal:
value = fval
err = nil
- case ktDel:
+ case keyTypeDel:
default:
- panic("leveldb: invalid iKey type")
+ panic("leveldb: invalid internalKey type")
}
return false
}
@@ -207,12 +207,12 @@ func (v *version) get(aux tFiles, ikey iKey, ro *opt.ReadOptions, noValue bool)
}, func(level int) bool {
if zfound {
switch zkt {
- case ktVal:
+ case keyTypeVal:
value = zval
err = nil
- case ktDel:
+ case keyTypeDel:
default:
- panic("leveldb: invalid iKey type")
+ panic("leveldb: invalid internalKey type")
}
return false
}
@@ -227,19 +227,18 @@ func (v *version) get(aux tFiles, ikey iKey, ro *opt.ReadOptions, noValue bool)
return
}
-func (v *version) sampleSeek(ikey iKey) (tcomp bool) {
+func (v *version) sampleSeek(ikey internalKey) (tcomp bool) {
var tset *tSet
v.walkOverlapping(nil, ikey, func(level int, t *tFile) bool {
if tset == nil {
tset = &tSet{level, t}
return true
- } else {
- if tset.table.consumeSeek() <= 0 {
- tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
- }
- return false
}
+ if tset.table.consumeSeek() <= 0 {
+ tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
+ }
+ return false
}, nil)
return
@@ -286,12 +285,12 @@ func (v *version) tLen(level int) int {
return 0
}
-func (v *version) offsetOf(ikey iKey) (n uint64, err error) {
+func (v *version) offsetOf(ikey internalKey) (n int64, err error) {
for level, tables := range v.levels {
for _, t := range tables {
if v.s.icmp.Compare(t.imax, ikey) <= 0 {
// Entire file is before "ikey", so just add the file size
- n += uint64(t.size)
+ n += t.size
} else if v.s.icmp.Compare(t.imin, ikey) > 0 {
// Entire file is after "ikey", so ignore
if level > 0 {
@@ -303,12 +302,11 @@ func (v *version) offsetOf(ikey iKey) (n uint64, err error) {
} else {
// "ikey" falls in the range for this table. Add the
// approximate offset of "ikey" within the table.
- var nn uint64
- nn, err = v.s.tops.offsetOf(t, ikey)
- if err != nil {
+ if m, err := v.s.tops.offsetOf(t, ikey); err == nil {
+ n += m
+ } else {
return 0, err
}
- n += nn
}
}
}
diff --git a/core/blockchain.go b/core/blockchain.go
index 2c6ff24f9..83299ceec 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -18,11 +18,9 @@
package core
import (
- crand "crypto/rand"
"errors"
"fmt"
"io"
- "math"
"math/big"
mrand "math/rand"
"runtime"
@@ -82,24 +80,22 @@ const (
// included in the canonical one where as GetBlockByNumber always represents the
// canonical chain.
type BlockChain struct {
+ hc *HeaderChain
chainDb ethdb.Database
eventMux *event.TypeMux
genesisBlock *types.Block
- // Last known total difficulty
+
mu sync.RWMutex
chainmu sync.RWMutex
tsmu sync.RWMutex
procmu sync.RWMutex
- checkpoint int // checkpoint counts towards the new checkpoint
- currentHeader *types.Header // Current head of the header chain (may be above the block chain!)
- currentBlock *types.Block // Current head of the block chain
- currentFastBlock *types.Block // Current head of the fast-sync chain (may be above the block chain!)
+ checkpoint int // checkpoint counts towards the new checkpoint
+ currentBlock *types.Block // Current head of the block chain
+ currentFastBlock *types.Block // Current head of the fast-sync chain (may be above the block chain!)
- headerCache *lru.Cache // Cache for the most recent block headers
bodyCache *lru.Cache // Cache for the most recent block bodies
bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
- tdCache *lru.Cache // Cache for the most recent block total difficulties
blockCache *lru.Cache // Cache for the most recent entire blocks
futureBlocks *lru.Cache // future blocks are blocks added for later processing
@@ -110,7 +106,6 @@ type BlockChain struct {
wg sync.WaitGroup
pow pow.PoW
- rand *mrand.Rand
processor Processor
validator Validator
}
@@ -119,10 +114,8 @@ type BlockChain struct {
// available in the database. It initialiser the default Ethereum Validator and
// Processor.
func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*BlockChain, error) {
- headerCache, _ := lru.New(headerCacheLimit)
bodyCache, _ := lru.New(bodyCacheLimit)
bodyRLPCache, _ := lru.New(bodyCacheLimit)
- tdCache, _ := lru.New(tdCacheLimit)
blockCache, _ := lru.New(blockCacheLimit)
futureBlocks, _ := lru.New(maxFutureBlocks)
@@ -130,22 +123,21 @@ func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*Bl
chainDb: chainDb,
eventMux: mux,
quit: make(chan struct{}),
- headerCache: headerCache,
bodyCache: bodyCache,
bodyRLPCache: bodyRLPCache,
- tdCache: tdCache,
blockCache: blockCache,
futureBlocks: futureBlocks,
pow: pow,
}
- // Seed a fast but crypto originating random generator
- seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
+ bc.SetValidator(NewBlockValidator(bc, pow))
+ bc.SetProcessor(NewStateProcessor(bc))
+
+ gv := func() HeaderValidator { return bc.Validator() }
+ var err error
+ bc.hc, err = NewHeaderChain(chainDb, gv, bc.getProcInterrupt)
if err != nil {
return nil, err
}
- bc.rand = mrand.New(mrand.NewSource(seed.Int64()))
- bc.SetValidator(NewBlockValidator(bc, pow))
- bc.SetProcessor(NewStateProcessor(bc))
bc.genesisBlock = bc.GetBlockByNumber(0)
if bc.genesisBlock == nil {
@@ -171,6 +163,10 @@ func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*Bl
return bc, nil
}
+func (self *BlockChain) getProcInterrupt() bool {
+ return atomic.LoadInt32(&self.procInterrupt) == 1
+}
+
// loadLastState loads the last known chain state from the database. This method
// assumes that the chain manager mutex is held.
func (self *BlockChain) loadLastState() error {
@@ -189,12 +185,13 @@ func (self *BlockChain) loadLastState() error {
}
}
// Restore the last known head header
- self.currentHeader = self.currentBlock.Header()
+ currentHeader := self.currentBlock.Header()
if head := GetHeadHeaderHash(self.chainDb); head != (common.Hash{}) {
if header := self.GetHeader(head); header != nil {
- self.currentHeader = header
+ currentHeader = header
}
}
+ self.hc.SetCurrentHeader(currentHeader)
// Restore the last known head fast block
self.currentFastBlock = self.currentBlock
if head := GetHeadFastBlockHash(self.chainDb); head != (common.Hash{}) {
@@ -203,11 +200,11 @@ func (self *BlockChain) loadLastState() error {
}
}
// Issue a status log and return
- headerTd := self.GetTd(self.currentHeader.Hash())
+ headerTd := self.GetTd(self.hc.CurrentHeader().Hash())
blockTd := self.GetTd(self.currentBlock.Hash())
fastTd := self.GetTd(self.currentFastBlock.Hash())
- glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", self.currentHeader.Number, self.currentHeader.Hash().Bytes()[:4], headerTd)
+ glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", self.hc.CurrentHeader().Number, self.hc.CurrentHeader().Hash().Bytes()[:4], headerTd)
glog.V(logger.Info).Infof("Last block: #%d [%x…] TD=%v", self.currentBlock.Number(), self.currentBlock.Hash().Bytes()[:4], blockTd)
glog.V(logger.Info).Infof("Fast block: #%d [%x…] TD=%v", self.currentFastBlock.Number(), self.currentFastBlock.Hash().Bytes()[:4], fastTd)
@@ -222,71 +219,35 @@ func (bc *BlockChain) SetHead(head uint64) {
bc.mu.Lock()
defer bc.mu.Unlock()
- // Figure out the highest known canonical headers and/or blocks
- height := uint64(0)
- if bc.currentHeader != nil {
- if hh := bc.currentHeader.Number.Uint64(); hh > height {
- height = hh
- }
- }
- if bc.currentBlock != nil {
- if bh := bc.currentBlock.NumberU64(); bh > height {
- height = bh
- }
- }
- if bc.currentFastBlock != nil {
- if fbh := bc.currentFastBlock.NumberU64(); fbh > height {
- height = fbh
- }
- }
- // Gather all the hashes that need deletion
- drop := make(map[common.Hash]struct{})
-
- for bc.currentHeader != nil && bc.currentHeader.Number.Uint64() > head {
- drop[bc.currentHeader.Hash()] = struct{}{}
- bc.currentHeader = bc.GetHeader(bc.currentHeader.ParentHash)
- }
- for bc.currentBlock != nil && bc.currentBlock.NumberU64() > head {
- drop[bc.currentBlock.Hash()] = struct{}{}
- bc.currentBlock = bc.GetBlock(bc.currentBlock.ParentHash())
- }
- for bc.currentFastBlock != nil && bc.currentFastBlock.NumberU64() > head {
- drop[bc.currentFastBlock.Hash()] = struct{}{}
- bc.currentFastBlock = bc.GetBlock(bc.currentFastBlock.ParentHash())
- }
- // Roll back the canonical chain numbering
- for i := height; i > head; i-- {
- DeleteCanonicalHash(bc.chainDb, i)
- }
- // Delete everything found by the above rewind
- for hash, _ := range drop {
- DeleteHeader(bc.chainDb, hash)
+ delFn := func(hash common.Hash) {
DeleteBody(bc.chainDb, hash)
- DeleteTd(bc.chainDb, hash)
}
+ bc.hc.SetHead(head, delFn)
+
// Clear out any stale content from the caches
- bc.headerCache.Purge()
bc.bodyCache.Purge()
bc.bodyRLPCache.Purge()
bc.blockCache.Purge()
bc.futureBlocks.Purge()
// Update all computed fields to the new head
+ if bc.currentBlock != nil && bc.hc.CurrentHeader().Number.Uint64() < bc.currentBlock.NumberU64() {
+ bc.currentBlock = bc.GetBlock(bc.hc.CurrentHeader().Hash())
+ }
+ if bc.currentFastBlock != nil && bc.hc.CurrentHeader().Number.Uint64() < bc.currentFastBlock.NumberU64() {
+ bc.currentFastBlock = bc.GetBlock(bc.hc.CurrentHeader().Hash())
+ }
+
if bc.currentBlock == nil {
bc.currentBlock = bc.genesisBlock
}
- if bc.currentHeader == nil {
- bc.currentHeader = bc.genesisBlock.Header()
- }
if bc.currentFastBlock == nil {
bc.currentFastBlock = bc.genesisBlock
}
+
if err := WriteHeadBlockHash(bc.chainDb, bc.currentBlock.Hash()); err != nil {
glog.Fatalf("failed to reset head block hash: %v", err)
}
- if err := WriteHeadHeaderHash(bc.chainDb, bc.currentHeader.Hash()); err != nil {
- glog.Fatalf("failed to reset head header hash: %v", err)
- }
if err := WriteHeadFastBlockHash(bc.chainDb, bc.currentFastBlock.Hash()); err != nil {
glog.Fatalf("failed to reset head fast block hash: %v", err)
}
@@ -329,15 +290,6 @@ func (self *BlockChain) LastBlockHash() common.Hash {
return self.currentBlock.Hash()
}
-// CurrentHeader retrieves the current head header of the canonical chain. The
-// header is retrieved from the blockchain's internal cache.
-func (self *BlockChain) CurrentHeader() *types.Header {
- self.mu.RLock()
- defer self.mu.RUnlock()
-
- return self.currentHeader
-}
-
// CurrentBlock retrieves the current head block of the canonical chain. The
// block is retrieved from the blockchain's internal cache.
func (self *BlockChain) CurrentBlock() *types.Block {
@@ -425,7 +377,8 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
bc.genesisBlock = genesis
bc.insert(bc.genesisBlock)
bc.currentBlock = bc.genesisBlock
- bc.currentHeader = bc.genesisBlock.Header()
+ bc.hc.SetGenesis(bc.genesisBlock.Header())
+ bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
bc.currentFastBlock = bc.genesisBlock
}
@@ -483,10 +436,7 @@ func (bc *BlockChain) insert(block *types.Block) {
// If the block is better than out head or is on a different chain, force update heads
if updateHeads {
- if err := WriteHeadHeaderHash(bc.chainDb, block.Hash()); err != nil {
- glog.Fatalf("failed to insert head header hash: %v", err)
- }
- bc.currentHeader = block.Header()
+ bc.hc.SetCurrentHeader(block.Header())
if err := WriteHeadFastBlockHash(bc.chainDb, block.Hash()); err != nil {
glog.Fatalf("failed to insert head fast block hash: %v", err)
@@ -500,38 +450,6 @@ func (bc *BlockChain) Genesis() *types.Block {
return bc.genesisBlock
}
-// HasHeader checks if a block header is present in the database or not, caching
-// it if present.
-func (bc *BlockChain) HasHeader(hash common.Hash) bool {
- return bc.GetHeader(hash) != nil
-}
-
-// GetHeader retrieves a block header from the database by hash, caching it if
-// found.
-func (self *BlockChain) GetHeader(hash common.Hash) *types.Header {
- // Short circuit if the header's already in the cache, retrieve otherwise
- if header, ok := self.headerCache.Get(hash); ok {
- return header.(*types.Header)
- }
- header := GetHeader(self.chainDb, hash)
- if header == nil {
- return nil
- }
- // Cache the found header for next time and return
- self.headerCache.Add(header.Hash(), header)
- return header
-}
-
-// GetHeaderByNumber retrieves a block header from the database by number,
-// caching it (associated with its hash) if found.
-func (self *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
- hash := GetCanonicalHash(self.chainDb, number)
- if hash == (common.Hash{}) {
- return nil
- }
- return self.GetHeader(hash)
-}
-
// GetBody retrieves a block body (transactions and uncles) from the database by
// hash, caching it if found.
func (self *BlockChain) GetBody(hash common.Hash) *types.Body {
@@ -565,22 +483,6 @@ func (self *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
return body
}
-// GetTd retrieves a block's total difficulty in the canonical chain from the
-// database by hash, caching it if found.
-func (self *BlockChain) GetTd(hash common.Hash) *big.Int {
- // Short circuit if the td's already in the cache, retrieve otherwise
- if cached, ok := self.tdCache.Get(hash); ok {
- return cached.(*big.Int)
- }
- td := GetTd(self.chainDb, hash)
- if td == nil {
- return nil
- }
- // Cache the found body for next time and return
- self.tdCache.Add(hash, td)
- return td
-}
-
// HasBlock checks if a block is fully present in the database or not, caching
// it if present.
func (bc *BlockChain) HasBlock(hash common.Hash) bool {
@@ -625,28 +527,6 @@ func (self *BlockChain) GetBlockByNumber(number uint64) *types.Block {
return self.GetBlock(hash)
}
-// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
-// hash, fetching towards the genesis block.
-func (self *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
- // Get the origin header from which to fetch
- header := self.GetHeader(hash)
- if header == nil {
- return nil
- }
- // Iterate the headers until enough is collected or the genesis reached
- chain := make([]common.Hash, 0, max)
- for i := uint64(0); i < max; i++ {
- if header = self.GetHeader(header.ParentHash); header == nil {
- break
- }
- chain = append(chain, header.Hash())
- if header.Number.Cmp(common.Big0) == 0 {
- break
- }
- }
- return chain
-}
-
// [deprecated by eth/62]
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
func (self *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
@@ -698,195 +578,15 @@ func (self *BlockChain) procFutureBlocks() {
}
}
-type writeStatus byte
+type WriteStatus byte
const (
- NonStatTy writeStatus = iota
+ NonStatTy WriteStatus = iota
CanonStatTy
SplitStatTy
SideStatTy
)
-// writeHeader writes a header into the local chain, given that its parent is
-// already known. If the total difficulty of the newly inserted header becomes
-// greater than the current known TD, the canonical chain is re-routed.
-//
-// Note: This method is not concurrent-safe with inserting blocks simultaneously
-// into the chain, as side effects caused by reorganisations cannot be emulated
-// without the real blocks. Hence, writing headers directly should only be done
-// in two scenarios: pure-header mode of operation (light clients), or properly
-// separated header/block phases (non-archive clients).
-func (self *BlockChain) writeHeader(header *types.Header) error {
- self.wg.Add(1)
- defer self.wg.Done()
-
- // Calculate the total difficulty of the header
- ptd := self.GetTd(header.ParentHash)
- if ptd == nil {
- return ParentError(header.ParentHash)
- }
-
- localTd := self.GetTd(self.currentHeader.Hash())
- externTd := new(big.Int).Add(header.Difficulty, ptd)
-
- // Make sure no inconsistent state is leaked during insertion
- self.mu.Lock()
- defer self.mu.Unlock()
-
- // If the total difficulty is higher than our known, add it to the canonical chain
- // Second clause in the if statement reduces the vulnerability to selfish mining.
- // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
- if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) {
- // Delete any canonical number assignments above the new head
- for i := header.Number.Uint64() + 1; GetCanonicalHash(self.chainDb, i) != (common.Hash{}); i++ {
- DeleteCanonicalHash(self.chainDb, i)
- }
- // Overwrite any stale canonical number assignments
- head := self.GetHeader(header.ParentHash)
- for GetCanonicalHash(self.chainDb, head.Number.Uint64()) != head.Hash() {
- WriteCanonicalHash(self.chainDb, head.Hash(), head.Number.Uint64())
- head = self.GetHeader(head.ParentHash)
- }
- // Extend the canonical chain with the new header
- if err := WriteCanonicalHash(self.chainDb, header.Hash(), header.Number.Uint64()); err != nil {
- glog.Fatalf("failed to insert header number: %v", err)
- }
- if err := WriteHeadHeaderHash(self.chainDb, header.Hash()); err != nil {
- glog.Fatalf("failed to insert head header hash: %v", err)
- }
- self.currentHeader = types.CopyHeader(header)
- }
- // Irrelevant of the canonical status, write the header itself to the database
- if err := WriteTd(self.chainDb, header.Hash(), externTd); err != nil {
- glog.Fatalf("failed to write header total difficulty: %v", err)
- }
- if err := WriteHeader(self.chainDb, header); err != nil {
- glog.Fatalf("filed to write header contents: %v", err)
- }
- return nil
-}
-
-// InsertHeaderChain attempts to insert the given header chain in to the local
-// chain, possibly creating a reorg. If an error is returned, it will return the
-// index number of the failing header as well an error describing what went wrong.
-//
-// The verify parameter can be used to fine tune whether nonce verification
-// should be done or not. The reason behind the optional check is because some
-// of the header retrieval mechanisms already need to verfy nonces, as well as
-// because nonces can be verified sparsely, not needing to check each.
-func (self *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
- self.wg.Add(1)
- defer self.wg.Done()
-
- // Make sure only one thread manipulates the chain at once
- self.chainmu.Lock()
- defer self.chainmu.Unlock()
-
- // Collect some import statistics to report on
- stats := struct{ processed, ignored int }{}
- start := time.Now()
-
- // Generate the list of headers that should be POW verified
- verify := make([]bool, len(chain))
- for i := 0; i < len(verify)/checkFreq; i++ {
- index := i*checkFreq + self.rand.Intn(checkFreq)
- if index >= len(verify) {
- index = len(verify) - 1
- }
- verify[index] = true
- }
- verify[len(verify)-1] = true // Last should always be verified to avoid junk
-
- // Create the header verification task queue and worker functions
- tasks := make(chan int, len(chain))
- for i := 0; i < len(chain); i++ {
- tasks <- i
- }
- close(tasks)
-
- errs, failed := make([]error, len(tasks)), int32(0)
- process := func(worker int) {
- for index := range tasks {
- header, hash := chain[index], chain[index].Hash()
-
- // Short circuit insertion if shutting down or processing failed
- if atomic.LoadInt32(&self.procInterrupt) == 1 {
- return
- }
- if atomic.LoadInt32(&failed) > 0 {
- return
- }
- // Short circuit if the header is bad or already known
- if BadHashes[hash] {
- errs[index] = BadHashError(hash)
- atomic.AddInt32(&failed, 1)
- return
- }
- if self.HasHeader(hash) {
- continue
- }
- // Verify that the header honors the chain parameters
- checkPow := verify[index]
-
- var err error
- if index == 0 {
- err = self.Validator().ValidateHeader(header, self.GetHeader(header.ParentHash), checkPow)
- } else {
- err = self.Validator().ValidateHeader(header, chain[index-1], checkPow)
- }
- if err != nil {
- errs[index] = err
- atomic.AddInt32(&failed, 1)
- return
- }
- }
- }
- // Start as many worker threads as goroutines allowed
- pending := new(sync.WaitGroup)
- for i := 0; i < runtime.GOMAXPROCS(0); i++ {
- pending.Add(1)
- go func(id int) {
- defer pending.Done()
- process(id)
- }(i)
- }
- pending.Wait()
-
- // If anything failed, report
- if failed > 0 {
- for i, err := range errs {
- if err != nil {
- return i, err
- }
- }
- }
- // All headers passed verification, import them into the database
- for i, header := range chain {
- // Short circuit insertion if shutting down
- if atomic.LoadInt32(&self.procInterrupt) == 1 {
- glog.V(logger.Debug).Infoln("premature abort during header chain processing")
- break
- }
- hash := header.Hash()
-
- // If the header's already known, skip it, otherwise store
- if self.HasHeader(hash) {
- stats.ignored++
- continue
- }
- if err := self.writeHeader(header); err != nil {
- return i, err
- }
- stats.processed++
- }
- // Report some public statistics so the user has a clue what's going on
- first, last := chain[0], chain[len(chain)-1]
- glog.V(logger.Info).Infof("imported %d header(s) (%d ignored) in %v. #%v [%x… / %x…]", stats.processed, stats.ignored,
- time.Since(start), last.Number, first.Hash().Bytes()[:4], last.Hash().Bytes()[:4])
-
- return 0, nil
-}
-
// Rollback is designed to remove a chain of links from the database that aren't
// certain enough to be valid.
func (self *BlockChain) Rollback(chain []common.Hash) {
@@ -896,9 +596,8 @@ func (self *BlockChain) Rollback(chain []common.Hash) {
for i := len(chain) - 1; i >= 0; i-- {
hash := chain[i]
- if self.currentHeader.Hash() == hash {
- self.currentHeader = self.GetHeader(self.currentHeader.ParentHash)
- WriteHeadHeaderHash(self.chainDb, self.currentHeader.Hash())
+ if self.hc.CurrentHeader().Hash() == hash {
+ self.hc.SetCurrentHeader(self.GetHeader(self.hc.CurrentHeader().ParentHash))
}
if self.currentFastBlock.Hash() == hash {
self.currentFastBlock = self.GetBlock(self.currentFastBlock.ParentHash())
@@ -1055,7 +754,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
}
// WriteBlock writes the block to the chain.
-func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err error) {
+func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err error) {
self.wg.Add(1)
defer self.wg.Done()
@@ -1268,12 +967,14 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// event about them
func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
var (
- newChain types.Blocks
- commonBlock *types.Block
- oldStart = oldBlock
- newStart = newBlock
- deletedTxs types.Transactions
- deletedLogs vm.Logs
+ newChain types.Blocks
+ oldChain types.Blocks
+ commonBlock *types.Block
+ oldStart = oldBlock
+ newStart = newBlock
+ deletedTxs types.Transactions
+ deletedLogs vm.Logs
+ deletedLogsByHash = make(map[common.Hash]vm.Logs)
// collectLogs collects the logs that were generated during the
// processing of the block that corresponds with the given hash.
// These logs are later announced as deleted.
@@ -1282,6 +983,8 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
receipts := GetBlockReceipts(self.chainDb, h)
for _, receipt := range receipts {
deletedLogs = append(deletedLogs, receipt.Logs...)
+
+ deletedLogsByHash[h] = receipt.Logs
}
}
)
@@ -1290,6 +993,7 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
if oldBlock.NumberU64() > newBlock.NumberU64() {
// reduce old chain
for oldBlock = oldBlock; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) {
+ oldChain = append(oldChain, oldBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
collectLogs(oldBlock.Hash())
@@ -1313,6 +1017,8 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
commonBlock = oldBlock
break
}
+
+ oldChain = append(oldChain, oldBlock)
newChain = append(newChain, newBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
collectLogs(oldBlock.Hash())
@@ -1369,6 +1075,14 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
go self.eventMux.Post(RemovedLogsEvent{deletedLogs})
}
+ if len(oldChain) > 0 {
+ go func() {
+ for _, block := range oldChain {
+ self.eventMux.Post(ChainSideEvent{Block: block, Logs: deletedLogsByHash[block.Hash()]})
+ }
+ }()
+ }
+
return nil
}
@@ -1412,3 +1126,89 @@ func reportBlock(block *types.Block, err error) {
}
go ReportBlock(block, err)
}
+
+// InsertHeaderChain attempts to insert the given header chain in to the local
+// chain, possibly creating a reorg. If an error is returned, it will return the
+// index number of the failing header as well an error describing what went wrong.
+//
+// The verify parameter can be used to fine tune whether nonce verification
+// should be done or not. The reason behind the optional check is because some
+// of the header retrieval mechanisms already need to verfy nonces, as well as
+// because nonces can be verified sparsely, not needing to check each.
+func (self *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
+ // Make sure only one thread manipulates the chain at once
+ self.chainmu.Lock()
+ defer self.chainmu.Unlock()
+
+ self.wg.Add(1)
+ defer self.wg.Done()
+
+ whFunc := func(header *types.Header) error {
+ self.mu.Lock()
+ defer self.mu.Unlock()
+
+ _, err := self.hc.WriteHeader(header)
+ return err
+ }
+
+ return self.hc.InsertHeaderChain(chain, checkFreq, whFunc)
+}
+
+// writeHeader writes a header into the local chain, given that its parent is
+// already known. If the total difficulty of the newly inserted header becomes
+// greater than the current known TD, the canonical chain is re-routed.
+//
+// Note: This method is not concurrent-safe with inserting blocks simultaneously
+// into the chain, as side effects caused by reorganisations cannot be emulated
+// without the real blocks. Hence, writing headers directly should only be done
+// in two scenarios: pure-header mode of operation (light clients), or properly
+// separated header/block phases (non-archive clients).
+func (self *BlockChain) writeHeader(header *types.Header) error {
+ self.wg.Add(1)
+ defer self.wg.Done()
+
+ self.mu.Lock()
+ defer self.mu.Unlock()
+
+ _, err := self.hc.WriteHeader(header)
+ return err
+}
+
+// CurrentHeader retrieves the current head header of the canonical chain. The
+// header is retrieved from the HeaderChain's internal cache.
+func (self *BlockChain) CurrentHeader() *types.Header {
+ self.mu.RLock()
+ defer self.mu.RUnlock()
+
+ return self.hc.CurrentHeader()
+}
+
+// GetTd retrieves a block's total difficulty in the canonical chain from the
+// database by hash, caching it if found.
+func (self *BlockChain) GetTd(hash common.Hash) *big.Int {
+ return self.hc.GetTd(hash)
+}
+
+// GetHeader retrieves a block header from the database by hash, caching it if
+// found.
+func (self *BlockChain) GetHeader(hash common.Hash) *types.Header {
+ return self.hc.GetHeader(hash)
+}
+
+// HasHeader checks if a block header is present in the database or not, caching
+// it if present.
+func (bc *BlockChain) HasHeader(hash common.Hash) bool {
+ return bc.hc.HasHeader(hash)
+}
+
+// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
+// hash, fetching towards the genesis block.
+func (self *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
+ return self.hc.GetBlockHashesFromHash(hash, max)
+}
+
+// GetHeaderByNumber retrieves a block header from the database by number,
+// caching it (associated with its hash) if found.
+func (self *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
+ return self.hc.GetHeaderByNumber(number)
+}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 1bb5f646d..310751cca 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -25,6 +25,7 @@ import (
"runtime"
"strconv"
"testing"
+ "time"
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/common"
@@ -471,11 +472,16 @@ func makeBlockChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.B
func chm(genesis *types.Block, db ethdb.Database) *BlockChain {
var eventMux event.TypeMux
- bc := &BlockChain{chainDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}, rand: rand.New(rand.NewSource(0))}
- bc.headerCache, _ = lru.New(100)
+ bc := &BlockChain{
+ chainDb: db,
+ genesisBlock: genesis,
+ eventMux: &eventMux,
+ pow: FakePow{},
+ }
+ valFn := func() HeaderValidator { return bc.Validator() }
+ bc.hc, _ = NewHeaderChain(db, valFn, bc.getProcInterrupt)
bc.bodyCache, _ = lru.New(100)
bc.bodyRLPCache, _ = lru.New(100)
- bc.tdCache, _ = lru.New(100)
bc.blockCache, _ = lru.New(100)
bc.futureBlocks, _ = lru.New(100)
bc.SetValidator(bproc{})
@@ -1006,3 +1012,82 @@ func TestLogReorgs(t *testing.T) {
t.Error("expected logs")
}
}
+
+func TestReorgSideEvent(t *testing.T) {
+ var (
+ db, _ = ethdb.NewMemDatabase()
+ key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ genesis = WriteGenesisBlockForTesting(db, GenesisAccount{addr1, big.NewInt(10000000000000)})
+ )
+
+ evmux := &event.TypeMux{}
+ blockchain, _ := NewBlockChain(db, FakePow{}, evmux)
+
+ chain, _ := GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) {
+ if i == 2 {
+ gen.OffsetTime(9)
+ }
+ })
+ if _, err := blockchain.InsertChain(chain); err != nil {
+ t.Fatalf("failed to insert chain: %v", err)
+ }
+
+ replacementBlocks, _ := GenerateChain(genesis, db, 4, func(i int, gen *BlockGen) {
+ tx, err := types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), big.NewInt(1000000), new(big.Int), nil).SignECDSA(key1)
+ if err != nil {
+ t.Fatalf("failed to create tx: %v", err)
+ }
+ gen.AddTx(tx)
+ })
+
+ subs := evmux.Subscribe(ChainSideEvent{})
+ if _, err := blockchain.InsertChain(replacementBlocks); err != nil {
+ t.Fatalf("failed to insert chain: %v", err)
+ }
+
+ // first two block of the secondary chain are for a brief moment considered
+ // side chains because up to that point the first one is considered the
+ // heavier chain.
+ expectedSideHashes := map[common.Hash]bool{
+ replacementBlocks[0].Hash(): true,
+ replacementBlocks[1].Hash(): true,
+ chain[0].Hash(): true,
+ chain[1].Hash(): true,
+ chain[2].Hash(): true,
+ }
+
+ i := 0
+
+ const timeoutDura = 10 * time.Second
+ timeout := time.NewTimer(timeoutDura)
+done:
+ for {
+ select {
+ case ev := <-subs.Chan():
+ block := ev.Data.(ChainSideEvent).Block
+ if _, ok := expectedSideHashes[block.Hash()]; !ok {
+ t.Errorf("%d: didn't expect %x to be in side chain", i, block.Hash())
+ }
+ i++
+
+ if i == len(expectedSideHashes) {
+ timeout.Stop()
+
+ break done
+ }
+ timeout.Reset(timeoutDura)
+
+ case <-timeout.C:
+ t.Fatal("Timeout. Possibly not all blocks were triggered for sideevent")
+ }
+ }
+
+ // make sure no more events are fired
+ select {
+ case e := <-subs.Chan():
+ t.Errorf("unexectped event fired: %v", e)
+ case <-time.After(250 * time.Millisecond):
+ }
+
+}
diff --git a/core/headerchain.go b/core/headerchain.go
new file mode 100644
index 000000000..8528801a7
--- /dev/null
+++ b/core/headerchain.go
@@ -0,0 +1,432 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+package core
+
+import (
+ crand "crypto/rand"
+ "math"
+ "math/big"
+ mrand "math/rand"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/logger"
+ "github.com/ethereum/go-ethereum/logger/glog"
+ "github.com/ethereum/go-ethereum/pow"
+ "github.com/hashicorp/golang-lru"
+)
+
+// HeaderChain implements the basic block header chain logic that is shared by
+// core.BlockChain and light.LightChain. It is not usable in itself, only as
+// a part of either structure.
+// It is not thread safe either, the encapsulating chain structures should do
+// the necessary mutex locking/unlocking.
+type HeaderChain struct {
+ chainDb ethdb.Database
+ genesisHeader *types.Header
+
+ currentHeader *types.Header // Current head of the header chain (may be above the block chain!)
+ headerCache *lru.Cache // Cache for the most recent block headers
+ tdCache *lru.Cache // Cache for the most recent block total difficulties
+
+ procInterrupt func() bool
+
+ rand *mrand.Rand
+ getValidator getHeaderValidatorFn
+}
+
+// getHeaderValidatorFn returns a HeaderValidator interface
+type getHeaderValidatorFn func() HeaderValidator
+
+// NewHeaderChain creates a new HeaderChain structure.
+// getValidator should return the parent's validator
+// procInterrupt points to the parent's interrupt semaphore
+// wg points to the parent's shutdown wait group
+func NewHeaderChain(chainDb ethdb.Database, getValidator getHeaderValidatorFn, procInterrupt func() bool) (*HeaderChain, error) {
+ headerCache, _ := lru.New(headerCacheLimit)
+ tdCache, _ := lru.New(tdCacheLimit)
+
+ // Seed a fast but crypto originating random generator
+ seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
+ if err != nil {
+ return nil, err
+ }
+
+ hc := &HeaderChain{
+ chainDb: chainDb,
+ headerCache: headerCache,
+ tdCache: tdCache,
+ procInterrupt: procInterrupt,
+ rand: mrand.New(mrand.NewSource(seed.Int64())),
+ getValidator: getValidator,
+ }
+
+ hc.genesisHeader = hc.GetHeaderByNumber(0)
+ if hc.genesisHeader == nil {
+ genesisBlock, err := WriteDefaultGenesisBlock(chainDb)
+ if err != nil {
+ return nil, err
+ }
+ glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
+ hc.genesisHeader = genesisBlock.Header()
+ }
+
+ hc.currentHeader = hc.genesisHeader
+ if head := GetHeadBlockHash(chainDb); head != (common.Hash{}) {
+ if chead := hc.GetHeader(head); chead != nil {
+ hc.currentHeader = chead
+ }
+ }
+
+ return hc, nil
+}
+
+// WriteHeader writes a header into the local chain, given that its parent is
+// already known. If the total difficulty of the newly inserted header becomes
+// greater than the current known TD, the canonical chain is re-routed.
+//
+// Note: This method is not concurrent-safe with inserting blocks simultaneously
+// into the chain, as side effects caused by reorganisations cannot be emulated
+// without the real blocks. Hence, writing headers directly should only be done
+// in two scenarios: pure-header mode of operation (light clients), or properly
+// separated header/block phases (non-archive clients).
+func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, err error) {
+ // Calculate the total difficulty of the header
+ ptd := hc.GetTd(header.ParentHash)
+ if ptd == nil {
+ return NonStatTy, ParentError(header.ParentHash)
+ }
+ localTd := hc.GetTd(hc.currentHeader.Hash())
+ externTd := new(big.Int).Add(header.Difficulty, ptd)
+
+ // If the total difficulty is higher than our known, add it to the canonical chain
+ // Second clause in the if statement reduces the vulnerability to selfish mining.
+ // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
+ if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) {
+ // Delete any canonical number assignments above the new head
+ for i := header.Number.Uint64() + 1; GetCanonicalHash(hc.chainDb, i) != (common.Hash{}); i++ {
+ DeleteCanonicalHash(hc.chainDb, i)
+ }
+ // Overwrite any stale canonical number assignments
+ head := hc.GetHeader(header.ParentHash)
+ for GetCanonicalHash(hc.chainDb, head.Number.Uint64()) != head.Hash() {
+ WriteCanonicalHash(hc.chainDb, head.Hash(), head.Number.Uint64())
+ head = hc.GetHeader(head.ParentHash)
+ }
+ // Extend the canonical chain with the new header
+ if err := WriteCanonicalHash(hc.chainDb, header.Hash(), header.Number.Uint64()); err != nil {
+ glog.Fatalf("failed to insert header number: %v", err)
+ }
+ if err := WriteHeadHeaderHash(hc.chainDb, header.Hash()); err != nil {
+ glog.Fatalf("failed to insert head header hash: %v", err)
+ }
+ hc.currentHeader = types.CopyHeader(header)
+ status = CanonStatTy
+ } else {
+ status = SideStatTy
+ }
+ // Irrelevant of the canonical status, write the header itself to the database
+ if err := WriteTd(hc.chainDb, header.Hash(), externTd); err != nil {
+ glog.Fatalf("failed to write header total difficulty: %v", err)
+ }
+ if err := WriteHeader(hc.chainDb, header); err != nil {
+ glog.Fatalf("failed to write header contents: %v", err)
+ }
+ return
+}
+
+// WhCallback is a callback function for inserting individual headers.
+// A callback is used for two reasons: first, in a LightChain, status should be
+// processed and light chain events sent, while in a BlockChain this is not
+// necessary since chain events are sent after inserting blocks. Second, the
+// header writes should be protected by the parent chain mutex individually.
+type WhCallback func(*types.Header) error
+
+// InsertHeaderChain attempts to insert the given header chain in to the local
+// chain, possibly creating a reorg. If an error is returned, it will return the
+// index number of the failing header as well an error describing what went wrong.
+//
+// The verify parameter can be used to fine tune whether nonce verification
+// should be done or not. The reason behind the optional check is because some
+// of the header retrieval mechanisms already need to verfy nonces, as well as
+// because nonces can be verified sparsely, not needing to check each.
+func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, writeHeader WhCallback) (int, error) {
+ // Collect some import statistics to report on
+ stats := struct{ processed, ignored int }{}
+ start := time.Now()
+
+ // Generate the list of headers that should be POW verified
+ verify := make([]bool, len(chain))
+ for i := 0; i < len(verify)/checkFreq; i++ {
+ index := i*checkFreq + hc.rand.Intn(checkFreq)
+ if index >= len(verify) {
+ index = len(verify) - 1
+ }
+ verify[index] = true
+ }
+ verify[len(verify)-1] = true // Last should always be verified to avoid junk
+
+ // Create the header verification task queue and worker functions
+ tasks := make(chan int, len(chain))
+ for i := 0; i < len(chain); i++ {
+ tasks <- i
+ }
+ close(tasks)
+
+ errs, failed := make([]error, len(tasks)), int32(0)
+ process := func(worker int) {
+ for index := range tasks {
+ header, hash := chain[index], chain[index].Hash()
+
+ // Short circuit insertion if shutting down or processing failed
+ if hc.procInterrupt() {
+ return
+ }
+ if atomic.LoadInt32(&failed) > 0 {
+ return
+ }
+ // Short circuit if the header is bad or already known
+ if BadHashes[hash] {
+ errs[index] = BadHashError(hash)
+ atomic.AddInt32(&failed, 1)
+ return
+ }
+ if hc.HasHeader(hash) {
+ continue
+ }
+ // Verify that the header honors the chain parameters
+ checkPow := verify[index]
+
+ var err error
+ if index == 0 {
+ err = hc.getValidator().ValidateHeader(header, hc.GetHeader(header.ParentHash), checkPow)
+ } else {
+ err = hc.getValidator().ValidateHeader(header, chain[index-1], checkPow)
+ }
+ if err != nil {
+ errs[index] = err
+ atomic.AddInt32(&failed, 1)
+ return
+ }
+ }
+ }
+ // Start as many worker threads as goroutines allowed
+ pending := new(sync.WaitGroup)
+ for i := 0; i < runtime.GOMAXPROCS(0); i++ {
+ pending.Add(1)
+ go func(id int) {
+ defer pending.Done()
+ process(id)
+ }(i)
+ }
+ pending.Wait()
+
+ // If anything failed, report
+ if failed > 0 {
+ for i, err := range errs {
+ if err != nil {
+ return i, err
+ }
+ }
+ }
+ // All headers passed verification, import them into the database
+ for i, header := range chain {
+ // Short circuit insertion if shutting down
+ if hc.procInterrupt() {
+ glog.V(logger.Debug).Infoln("premature abort during header chain processing")
+ break
+ }
+ hash := header.Hash()
+
+ // If the header's already known, skip it, otherwise store
+ if hc.HasHeader(hash) {
+ stats.ignored++
+ continue
+ }
+ if err := writeHeader(header); err != nil {
+ return i, err
+ }
+ stats.processed++
+ }
+ // Report some public statistics so the user has a clue what's going on
+ first, last := chain[0], chain[len(chain)-1]
+ glog.V(logger.Info).Infof("imported %d header(s) (%d ignored) in %v. #%v [%x… / %x…]", stats.processed, stats.ignored,
+ time.Since(start), last.Number, first.Hash().Bytes()[:4], last.Hash().Bytes()[:4])
+
+ return 0, nil
+}
+
+// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
+// hash, fetching towards the genesis block.
+func (hc *HeaderChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
+ // Get the origin header from which to fetch
+ header := hc.GetHeader(hash)
+ if header == nil {
+ return nil
+ }
+ // Iterate the headers until enough is collected or the genesis reached
+ chain := make([]common.Hash, 0, max)
+ for i := uint64(0); i < max; i++ {
+ if header = hc.GetHeader(header.ParentHash); header == nil {
+ break
+ }
+ chain = append(chain, header.Hash())
+ if header.Number.Cmp(common.Big0) == 0 {
+ break
+ }
+ }
+ return chain
+}
+
+// GetTd retrieves a block's total difficulty in the canonical chain from the
+// database by hash, caching it if found.
+func (hc *HeaderChain) GetTd(hash common.Hash) *big.Int {
+ // Short circuit if the td's already in the cache, retrieve otherwise
+ if cached, ok := hc.tdCache.Get(hash); ok {
+ return cached.(*big.Int)
+ }
+ td := GetTd(hc.chainDb, hash)
+ if td == nil {
+ return nil
+ }
+ // Cache the found body for next time and return
+ hc.tdCache.Add(hash, td)
+ return td
+}
+
+// GetHeader retrieves a block header from the database by hash, caching it if
+// found.
+func (hc *HeaderChain) GetHeader(hash common.Hash) *types.Header {
+ // Short circuit if the header's already in the cache, retrieve otherwise
+ if header, ok := hc.headerCache.Get(hash); ok {
+ return header.(*types.Header)
+ }
+ header := GetHeader(hc.chainDb, hash)
+ if header == nil {
+ return nil
+ }
+ // Cache the found header for next time and return
+ hc.headerCache.Add(header.Hash(), header)
+ return header
+}
+
+// HasHeader checks if a block header is present in the database or not, caching
+// it if present.
+func (hc *HeaderChain) HasHeader(hash common.Hash) bool {
+ return hc.GetHeader(hash) != nil
+}
+
+// GetHeaderByNumber retrieves a block header from the database by number,
+// caching it (associated with its hash) if found.
+func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header {
+ hash := GetCanonicalHash(hc.chainDb, number)
+ if hash == (common.Hash{}) {
+ return nil
+ }
+ return hc.GetHeader(hash)
+}
+
+// CurrentHeader retrieves the current head header of the canonical chain. The
+// header is retrieved from the HeaderChain's internal cache.
+func (hc *HeaderChain) CurrentHeader() *types.Header {
+ return hc.currentHeader
+}
+
+// SetCurrentHeader sets the current head header of the canonical chain.
+func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
+ if err := WriteHeadHeaderHash(hc.chainDb, head.Hash()); err != nil {
+ glog.Fatalf("failed to insert head header hash: %v", err)
+ }
+ hc.currentHeader = head
+}
+
+// DeleteCallback is a callback function that is called by SetHead before
+// each header is deleted.
+type DeleteCallback func(common.Hash)
+
+// SetHead rewinds the local chain to a new head. Everything above the new head
+// will be deleted and the new one set.
+func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
+ height := uint64(0)
+ if hc.currentHeader != nil {
+ height = hc.currentHeader.Number.Uint64()
+ }
+
+ for hc.currentHeader != nil && hc.currentHeader.Number.Uint64() > head {
+ hash := hc.currentHeader.Hash()
+ if delFn != nil {
+ delFn(hash)
+ }
+ DeleteHeader(hc.chainDb, hash)
+ DeleteTd(hc.chainDb, hash)
+ hc.currentHeader = hc.GetHeader(hc.currentHeader.ParentHash)
+ }
+ // Roll back the canonical chain numbering
+ for i := height; i > head; i-- {
+ DeleteCanonicalHash(hc.chainDb, i)
+ }
+ // Clear out any stale content from the caches
+ hc.headerCache.Purge()
+ hc.tdCache.Purge()
+
+ if hc.currentHeader == nil {
+ hc.currentHeader = hc.genesisHeader
+ }
+ if err := WriteHeadHeaderHash(hc.chainDb, hc.currentHeader.Hash()); err != nil {
+ glog.Fatalf("failed to reset head header hash: %v", err)
+ }
+}
+
+// SetGenesis sets a new genesis block header for the chain
+func (hc *HeaderChain) SetGenesis(head *types.Header) {
+ hc.genesisHeader = head
+}
+
+// headerValidator is responsible for validating block headers
+//
+// headerValidator implements HeaderValidator.
+type headerValidator struct {
+ hc *HeaderChain // Canonical header chain
+ Pow pow.PoW // Proof of work used for validating
+}
+
+// NewBlockValidator returns a new block validator which is safe for re-use
+func NewHeaderValidator(chain *HeaderChain, pow pow.PoW) HeaderValidator {
+ return &headerValidator{
+ Pow: pow,
+ hc: chain,
+ }
+}
+
+// ValidateHeader validates the given header and, depending on the pow arg,
+// checks the proof of work of the given header. Returns an error if the
+// validation failed.
+func (v *headerValidator) ValidateHeader(header, parent *types.Header, checkPow bool) error {
+ // Short circuit if the parent is missing.
+ if parent == nil {
+ return ParentError(header.ParentHash)
+ }
+ // Short circuit if the header's already known or its parent missing
+ if v.hc.HasHeader(header.Hash()) {
+ return nil
+ }
+ return ValidateHeader(v.Pow, header, parent, checkPow, false)
+}
diff --git a/core/types.go b/core/types.go
index 027f628b1..60eb15662 100644
--- a/core/types.go
+++ b/core/types.go
@@ -38,14 +38,22 @@ import (
// ValidateHeader validates the given header and parent and returns an error
// if it failed to do so.
//
-// ValidateStack validates the given statedb and optionally the receipts and
+// ValidateState validates the given statedb and optionally the receipts and
// gas used. The implementor should decide what to do with the given input.
type Validator interface {
+ HeaderValidator
ValidateBlock(block *types.Block) error
- ValidateHeader(header, parent *types.Header, checkPow bool) error
ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas *big.Int) error
}
+// HeaderValidator is an interface for validating headers only
+//
+// ValidateHeader validates the given header and parent and returns an error
+// if it failed to do so.
+type HeaderValidator interface {
+ ValidateHeader(header, parent *types.Header, checkPow bool) error
+}
+
// Processor is an interface for processing blocks using a given initial state.
//
// Process takes the block to be processed and the statedb upon which the