aboutsummaryrefslogtreecommitdiffstats
path: root/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache
diff options
context:
space:
mode:
Diffstat (limited to 'Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache')
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go30
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go713
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go564
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/empty_cache.go246
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go195
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go354
6 files changed, 1298 insertions, 804 deletions
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go
new file mode 100644
index 000000000..175e22203
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go
@@ -0,0 +1,30 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build !go1.2
+
+package cache
+
+import (
+ "math/rand"
+ "testing"
+)
+
+func BenchmarkLRUCache(b *testing.B) {
+ c := NewCache(NewLRU(10000))
+
+ b.SetParallelism(10)
+ b.RunParallel(func(pb *testing.PB) {
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+ for pb.Next() {
+ key := uint64(r.Intn(1000000))
+ c.Get(0, key, func() (int, Value) {
+ return 1, key
+ }).Release()
+ }
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
index 9b6a74977..c9670de5d 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
@@ -8,118 +8,669 @@
package cache
import (
+ "sync"
"sync/atomic"
+ "unsafe"
+
+ "github.com/syndtr/goleveldb/leveldb/util"
)
-// SetFunc used by Namespace.Get method to create a cache object. SetFunc
-// may return ok false, in that case the cache object will not be created.
-type SetFunc func() (ok bool, value interface{}, charge int, fin SetFin)
+// Cacher provides interface to implements a caching functionality.
+// An implementation must be goroutine-safe.
+type Cacher interface {
+ // Capacity returns cache capacity.
+ Capacity() int
-// SetFin will be called when corresponding cache object are released.
-type SetFin func()
+ // SetCapacity sets cache capacity.
+ SetCapacity(capacity int)
-// DelFin will be called when corresponding cache object are released.
-// DelFin will be called after SetFin. The exist is true if the corresponding
-// cache object is actually exist in the cache tree.
-type DelFin func(exist bool)
+ // Promote promotes the 'cache node'.
+ Promote(n *Node)
-// PurgeFin will be called when corresponding cache object are released.
-// PurgeFin will be called after SetFin. If PurgeFin present DelFin will
-// not be executed but passed to the PurgeFin, it is up to the caller
-// to call it or not.
-type PurgeFin func(ns, key uint64, delfin DelFin)
+ // Ban evicts the 'cache node' and prevent subsequent 'promote'.
+ Ban(n *Node)
-// Cache is a cache tree.
-type Cache interface {
- // SetCapacity sets cache capacity.
- SetCapacity(capacity int)
+ // Evict evicts the 'cache node'.
+ Evict(n *Node)
- // GetNamespace gets or creates a cache namespace for the given id.
- GetNamespace(id uint64) Namespace
+ // EvictNS evicts 'cache node' with the given namespace.
+ EvictNS(ns uint64)
- // Purge purges all cache namespaces, read Namespace.Purge method documentation.
- Purge(fin PurgeFin)
+ // EvictAll evicts all 'cache node'.
+ EvictAll()
+
+ // Close closes the 'cache tree'
+ Close() error
+}
+
+// Value is a 'cacheable object'. It may implements util.Releaser, if
+// so the the Release method will be called once object is released.
+type Value interface{}
+
+type CacheGetter struct {
+ Cache *Cache
+ NS uint64
+}
- // Zap zaps all cache namespaces, read Namespace.Zap method documentation.
- Zap(closed bool)
+func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
+ return g.Cache.Get(g.NS, key, setFunc)
}
-// Namespace is a cache namespace.
-type Namespace interface {
- // Get gets cache object for the given key. The given SetFunc (if not nil) will
- // be called if the given key does not exist.
- // If the given key does not exist, SetFunc is nil or SetFunc return ok false, Get
- // will return ok false.
- Get(key uint64, setf SetFunc) (obj Object, ok bool)
+// The hash tables implementation is based on:
+// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014.
- // Get deletes cache object for the given key. If exist the cache object will
- // be deleted later when all of its handles have been released (i.e. no one use
- // it anymore) and the given DelFin (if not nil) will finally be executed. If
- // such cache object does not exist the given DelFin will be executed anyway.
- //
- // Delete returns true if such cache object exist.
- Delete(key uint64, fin DelFin) bool
+const (
+ mInitialSize = 1 << 4
+ mOverflowThreshold = 1 << 5
+ mOverflowGrowThreshold = 1 << 7
+)
- // Purge deletes all cache objects, read Delete method documentation.
- Purge(fin PurgeFin)
+type mBucket struct {
+ mu sync.Mutex
+ node []*Node
+ frozen bool
+}
- // Zap detaches the namespace from the cache tree and delete all its cache
- // objects. The cache objects deletion and finalizers execution are happen
- // immediately, even if its existing handles haven't yet been released.
- // A zapped namespace can't never be filled again.
- // If closed is false then the Get function will always call the given SetFunc
- // if it is not nil, but resultant of the SetFunc will not be cached.
- Zap(closed bool)
+func (b *mBucket) freeze() []*Node {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if !b.frozen {
+ b.frozen = true
+ }
+ return b.node
}
-// Object is a cache object.
-type Object interface {
- // Release releases the cache object. Other methods should not be called
- // after the cache object has been released.
- Release()
+func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) {
+ b.mu.Lock()
+
+ if b.frozen {
+ b.mu.Unlock()
+ return
+ }
+
+ // Scan the node.
+ for _, n := range b.node {
+ if n.hash == hash && n.ns == ns && n.key == key {
+ atomic.AddInt32(&n.ref, 1)
+ b.mu.Unlock()
+ return true, false, n
+ }
+ }
+
+ // Get only.
+ if noset {
+ b.mu.Unlock()
+ return true, false, nil
+ }
+
+ // Create node.
+ n = &Node{
+ r: r,
+ hash: hash,
+ ns: ns,
+ key: key,
+ ref: 1,
+ }
+ // Add node to bucket.
+ b.node = append(b.node, n)
+ bLen := len(b.node)
+ b.mu.Unlock()
+
+ // Update counter.
+ grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold
+ if bLen > mOverflowThreshold {
+ grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold
+ }
- // Value returns value of the cache object.
- Value() interface{}
+ // Grow.
+ if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
+ nhLen := len(h.buckets) << 1
+ nh := &mNode{
+ buckets: make([]unsafe.Pointer, nhLen),
+ mask: uint32(nhLen) - 1,
+ pred: unsafe.Pointer(h),
+ growThreshold: int32(nhLen * mOverflowThreshold),
+ shrinkThreshold: int32(nhLen >> 1),
+ }
+ ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
+ if !ok {
+ panic("BUG: failed swapping head")
+ }
+ go nh.initBuckets()
+ }
+
+ return true, true, n
}
-// Namespace state.
-type nsState int
+func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) {
+ b.mu.Lock()
-const (
- nsEffective nsState = iota
- nsZapped
- nsClosed
-)
+ if b.frozen {
+ b.mu.Unlock()
+ return
+ }
-// Node state.
-type nodeState int
+ // Scan the node.
+ var (
+ n *Node
+ bLen int
+ )
+ for i := range b.node {
+ n = b.node[i]
+ if n.ns == ns && n.key == key {
+ if atomic.LoadInt32(&n.ref) == 0 {
+ deleted = true
-const (
- nodeEffective nodeState = iota
- nodeEvicted
- nodeRemoved
-)
+ // Call releaser.
+ if n.value != nil {
+ if r, ok := n.value.(util.Releaser); ok {
+ r.Release()
+ }
+ n.value = nil
+ }
+
+ // Remove node from bucket.
+ b.node = append(b.node[:i], b.node[i+1:]...)
+ bLen = len(b.node)
+ }
+ break
+ }
+ }
+ b.mu.Unlock()
-// Fake object.
-type fakeObject struct {
- value interface{}
- fin func()
- once uint32
+ if deleted {
+ // Call OnDel.
+ for _, f := range n.onDel {
+ f()
+ }
+
+ // Update counter.
+ atomic.AddInt32(&r.size, int32(n.size)*-1)
+ shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold
+ if bLen >= mOverflowThreshold {
+ atomic.AddInt32(&h.overflow, -1)
+ }
+
+ // Shrink.
+ if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
+ nhLen := len(h.buckets) >> 1
+ nh := &mNode{
+ buckets: make([]unsafe.Pointer, nhLen),
+ mask: uint32(nhLen) - 1,
+ pred: unsafe.Pointer(h),
+ growThreshold: int32(nhLen * mOverflowThreshold),
+ shrinkThreshold: int32(nhLen >> 1),
+ }
+ ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
+ if !ok {
+ panic("BUG: failed swapping head")
+ }
+ go nh.initBuckets()
+ }
+ }
+
+ return true, deleted
}
-func (o *fakeObject) Value() interface{} {
- if atomic.LoadUint32(&o.once) == 0 {
- return o.value
+type mNode struct {
+ buckets []unsafe.Pointer // []*mBucket
+ mask uint32
+ pred unsafe.Pointer // *mNode
+ resizeInProgess int32
+
+ overflow int32
+ growThreshold int32
+ shrinkThreshold int32
+}
+
+func (n *mNode) initBucket(i uint32) *mBucket {
+ if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil {
+ return b
+ }
+
+ p := (*mNode)(atomic.LoadPointer(&n.pred))
+ if p != nil {
+ var node []*Node
+ if n.mask > p.mask {
+ // Grow.
+ pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask]))
+ if pb == nil {
+ pb = p.initBucket(i & p.mask)
+ }
+ m := pb.freeze()
+ // Split nodes.
+ for _, x := range m {
+ if x.hash&n.mask == i {
+ node = append(node, x)
+ }
+ }
+ } else {
+ // Shrink.
+ pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i]))
+ if pb0 == nil {
+ pb0 = p.initBucket(i)
+ }
+ pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))]))
+ if pb1 == nil {
+ pb1 = p.initBucket(i + uint32(len(n.buckets)))
+ }
+ m0 := pb0.freeze()
+ m1 := pb1.freeze()
+ // Merge nodes.
+ node = make([]*Node, 0, len(m0)+len(m1))
+ node = append(node, m0...)
+ node = append(node, m1...)
+ }
+ b := &mBucket{node: node}
+ if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) {
+ if len(node) > mOverflowThreshold {
+ atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold))
+ }
+ return b
+ }
+ }
+
+ return (*mBucket)(atomic.LoadPointer(&n.buckets[i]))
+}
+
+func (n *mNode) initBuckets() {
+ for i := range n.buckets {
+ n.initBucket(uint32(i))
+ }
+ atomic.StorePointer(&n.pred, nil)
+}
+
+// Cache is a 'cache map'.
+type Cache struct {
+ mu sync.RWMutex
+ mHead unsafe.Pointer // *mNode
+ nodes int32
+ size int32
+ cacher Cacher
+ closed bool
+}
+
+// NewCache creates a new 'cache map'. The cacher is optional and
+// may be nil.
+func NewCache(cacher Cacher) *Cache {
+ h := &mNode{
+ buckets: make([]unsafe.Pointer, mInitialSize),
+ mask: mInitialSize - 1,
+ growThreshold: int32(mInitialSize * mOverflowThreshold),
+ shrinkThreshold: 0,
+ }
+ for i := range h.buckets {
+ h.buckets[i] = unsafe.Pointer(&mBucket{})
+ }
+ r := &Cache{
+ mHead: unsafe.Pointer(h),
+ cacher: cacher,
+ }
+ return r
+}
+
+func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) {
+ h := (*mNode)(atomic.LoadPointer(&r.mHead))
+ i := hash & h.mask
+ b := (*mBucket)(atomic.LoadPointer(&h.buckets[i]))
+ if b == nil {
+ b = h.initBucket(i)
+ }
+ return h, b
+}
+
+func (r *Cache) delete(n *Node) bool {
+ for {
+ h, b := r.getBucket(n.hash)
+ done, deleted := b.delete(r, h, n.hash, n.ns, n.key)
+ if done {
+ return deleted
+ }
+ }
+ return false
+}
+
+// Nodes returns number of 'cache node' in the map.
+func (r *Cache) Nodes() int {
+ return int(atomic.LoadInt32(&r.nodes))
+}
+
+// Size returns sums of 'cache node' size in the map.
+func (r *Cache) Size() int {
+ return int(atomic.LoadInt32(&r.size))
+}
+
+// Capacity returns cache capacity.
+func (r *Cache) Capacity() int {
+ if r.cacher == nil {
+ return 0
+ }
+ return r.cacher.Capacity()
+}
+
+// SetCapacity sets cache capacity.
+func (r *Cache) SetCapacity(capacity int) {
+ if r.cacher != nil {
+ r.cacher.SetCapacity(capacity)
+ }
+}
+
+// Get gets 'cache node' with the given namespace and key.
+// If cache node is not found and setFunc is not nil, Get will atomically creates
+// the 'cache node' by calling setFunc. Otherwise Get will returns nil.
+//
+// The returned 'cache handle' should be released after use by calling Release
+// method.
+func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return nil
+ }
+
+ hash := murmur32(ns, key, 0xf00)
+ for {
+ h, b := r.getBucket(hash)
+ done, _, n := b.get(r, h, hash, ns, key, setFunc == nil)
+ if done {
+ if n != nil {
+ n.mu.Lock()
+ if n.value == nil {
+ if setFunc == nil {
+ n.mu.Unlock()
+ n.unref()
+ return nil
+ }
+
+ n.size, n.value = setFunc()
+ if n.value == nil {
+ n.size = 0
+ n.mu.Unlock()
+ n.unref()
+ return nil
+ }
+ atomic.AddInt32(&r.size, int32(n.size))
+ }
+ n.mu.Unlock()
+ if r.cacher != nil {
+ r.cacher.Promote(n)
+ }
+ return &Handle{unsafe.Pointer(n)}
+ }
+
+ break
+ }
}
return nil
}
-func (o *fakeObject) Release() {
- if !atomic.CompareAndSwapUint32(&o.once, 0, 1) {
+// Delete removes and ban 'cache node' with the given namespace and key.
+// A banned 'cache node' will never inserted into the 'cache tree'. Ban
+// only attributed to the particular 'cache node', so when a 'cache node'
+// is recreated it will not be banned.
+//
+// If onDel is not nil, then it will be executed if such 'cache node'
+// doesn't exist or once the 'cache node' is released.
+//
+// Delete return true is such 'cache node' exist.
+func (r *Cache) Delete(ns, key uint64, onDel func()) bool {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return false
+ }
+
+ hash := murmur32(ns, key, 0xf00)
+ for {
+ h, b := r.getBucket(hash)
+ done, _, n := b.get(r, h, hash, ns, key, true)
+ if done {
+ if n != nil {
+ if onDel != nil {
+ n.mu.Lock()
+ n.onDel = append(n.onDel, onDel)
+ n.mu.Unlock()
+ }
+ if r.cacher != nil {
+ r.cacher.Ban(n)
+ }
+ n.unref()
+ return true
+ }
+
+ break
+ }
+ }
+
+ if onDel != nil {
+ onDel()
+ }
+
+ return false
+}
+
+// Evict evicts 'cache node' with the given namespace and key. This will
+// simply call Cacher.Evict.
+//
+// Evict return true is such 'cache node' exist.
+func (r *Cache) Evict(ns, key uint64) bool {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return false
+ }
+
+ hash := murmur32(ns, key, 0xf00)
+ for {
+ h, b := r.getBucket(hash)
+ done, _, n := b.get(r, h, hash, ns, key, true)
+ if done {
+ if n != nil {
+ if r.cacher != nil {
+ r.cacher.Evict(n)
+ }
+ n.unref()
+ return true
+ }
+
+ break
+ }
+ }
+
+ return false
+}
+
+// EvictNS evicts 'cache node' with the given namespace. This will
+// simply call Cacher.EvictNS.
+func (r *Cache) EvictNS(ns uint64) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return
+ }
+
+ if r.cacher != nil {
+ r.cacher.EvictNS(ns)
+ }
+}
+
+// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll.
+func (r *Cache) EvictAll() {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
return
}
- if o.fin != nil {
- o.fin()
- o.fin = nil
+
+ if r.cacher != nil {
+ r.cacher.EvictAll()
+ }
+}
+
+// Close closes the 'cache map' and releases all 'cache node'.
+func (r *Cache) Close() error {
+ r.mu.Lock()
+ if !r.closed {
+ r.closed = true
+
+ if r.cacher != nil {
+ if err := r.cacher.Close(); err != nil {
+ return err
+ }
+ }
+
+ h := (*mNode)(r.mHead)
+ h.initBuckets()
+
+ for i := range h.buckets {
+ b := (*mBucket)(h.buckets[i])
+ for _, n := range b.node {
+ // Call releaser.
+ if n.value != nil {
+ if r, ok := n.value.(util.Releaser); ok {
+ r.Release()
+ }
+ n.value = nil
+ }
+
+ // Call OnDel.
+ for _, f := range n.onDel {
+ f()
+ }
+ }
+ }
}
+ r.mu.Unlock()
+ return nil
+}
+
+// Node is a 'cache node'.
+type Node struct {
+ r *Cache
+
+ hash uint32
+ ns, key uint64
+
+ mu sync.Mutex
+ size int
+ value Value
+
+ ref int32
+ onDel []func()
+
+ CacheData unsafe.Pointer
+}
+
+// NS returns this 'cache node' namespace.
+func (n *Node) NS() uint64 {
+ return n.ns
+}
+
+// Key returns this 'cache node' key.
+func (n *Node) Key() uint64 {
+ return n.key
+}
+
+// Size returns this 'cache node' size.
+func (n *Node) Size() int {
+ return n.size
+}
+
+// Value returns this 'cache node' value.
+func (n *Node) Value() Value {
+ return n.value
+}
+
+// Ref returns this 'cache node' ref counter.
+func (n *Node) Ref() int32 {
+ return atomic.LoadInt32(&n.ref)
+}
+
+// GetHandle returns an handle for this 'cache node'.
+func (n *Node) GetHandle() *Handle {
+ if atomic.AddInt32(&n.ref, 1) <= 1 {
+ panic("BUG: Node.GetHandle on zero ref")
+ }
+ return &Handle{unsafe.Pointer(n)}
+}
+
+func (n *Node) unref() {
+ if atomic.AddInt32(&n.ref, -1) == 0 {
+ n.r.delete(n)
+ }
+}
+
+func (n *Node) unrefLocked() {
+ if atomic.AddInt32(&n.ref, -1) == 0 {
+ n.r.mu.RLock()
+ if !n.r.closed {
+ n.r.delete(n)
+ }
+ n.r.mu.RUnlock()
+ }
+}
+
+type Handle struct {
+ n unsafe.Pointer // *Node
+}
+
+func (h *Handle) Value() Value {
+ n := (*Node)(atomic.LoadPointer(&h.n))
+ if n != nil {
+ return n.value
+ }
+ return nil
+}
+
+func (h *Handle) Release() {
+ nPtr := atomic.LoadPointer(&h.n)
+ if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {
+ n := (*Node)(nPtr)
+ n.unrefLocked()
+ }
+}
+
+func murmur32(ns, key uint64, seed uint32) uint32 {
+ const (
+ m = uint32(0x5bd1e995)
+ r = 24
+ )
+
+ k1 := uint32(ns >> 32)
+ k2 := uint32(ns)
+ k3 := uint32(key >> 32)
+ k4 := uint32(key)
+
+ k1 *= m
+ k1 ^= k1 >> r
+ k1 *= m
+
+ k2 *= m
+ k2 ^= k2 >> r
+ k2 *= m
+
+ k3 *= m
+ k3 ^= k3 >> r
+ k3 *= m
+
+ k4 *= m
+ k4 ^= k4 >> r
+ k4 *= m
+
+ h := seed
+
+ h *= m
+ h ^= k1
+ h *= m
+ h ^= k2
+ h *= m
+ h ^= k3
+ h *= m
+ h ^= k4
+
+ h ^= h >> 13
+ h *= m
+ h ^= h >> 15
+
+ return h
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
index 07a9939b2..c2a50156f 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
@@ -8,17 +8,289 @@ package cache
import (
"math/rand"
+ "runtime"
+ "sync"
+ "sync/atomic"
"testing"
+ "time"
+ "unsafe"
)
-func set(ns Namespace, key uint64, value interface{}, charge int, fin func()) Object {
- obj, _ := ns.Get(key, func() (bool, interface{}, int, SetFin) {
- return true, value, charge, fin
+type int32o int32
+
+func (o *int32o) acquire() {
+ if atomic.AddInt32((*int32)(o), 1) != 1 {
+ panic("BUG: invalid ref")
+ }
+}
+
+func (o *int32o) Release() {
+ if atomic.AddInt32((*int32)(o), -1) != 0 {
+ panic("BUG: invalid ref")
+ }
+}
+
+type releaserFunc struct {
+ fn func()
+ value Value
+}
+
+func (r releaserFunc) Release() {
+ if r.fn != nil {
+ r.fn()
+ }
+}
+
+func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle {
+ return c.Get(ns, key, func() (int, Value) {
+ if relf != nil {
+ return charge, releaserFunc{relf, value}
+ } else {
+ return charge, value
+ }
+ })
+}
+
+func TestCacheMap(t *testing.T) {
+ runtime.GOMAXPROCS(runtime.NumCPU())
+
+ nsx := []struct {
+ nobjects, nhandles, concurrent, repeat int
+ }{
+ {10000, 400, 50, 3},
+ {100000, 1000, 100, 10},
+ }
+
+ var (
+ objects [][]int32o
+ handles [][]unsafe.Pointer
+ )
+
+ for _, x := range nsx {
+ objects = append(objects, make([]int32o, x.nobjects))
+ handles = append(handles, make([]unsafe.Pointer, x.nhandles))
+ }
+
+ c := NewCache(nil)
+
+ wg := new(sync.WaitGroup)
+ var done int32
+
+ for ns, x := range nsx {
+ for i := 0; i < x.concurrent; i++ {
+ wg.Add(1)
+ go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) {
+ defer wg.Done()
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+ for j := len(objects) * repeat; j >= 0; j-- {
+ key := uint64(r.Intn(len(objects)))
+ h := c.Get(uint64(ns), key, func() (int, Value) {
+ o := &objects[key]
+ o.acquire()
+ return 1, o
+ })
+ if v := h.Value().(*int32o); v != &objects[key] {
+ t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v)
+ }
+ if objects[key] != 1 {
+ t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key])
+ }
+ if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) {
+ h.Release()
+ }
+ }
+ }(ns, i, x.repeat, objects[ns], handles[ns])
+ }
+
+ go func(handles []unsafe.Pointer) {
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+ for atomic.LoadInt32(&done) == 0 {
+ i := r.Intn(len(handles))
+ h := (*Handle)(atomic.LoadPointer(&handles[i]))
+ if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) {
+ h.Release()
+ }
+ time.Sleep(time.Millisecond)
+ }
+ }(handles[ns])
+ }
+
+ go func() {
+ handles := make([]*Handle, 100000)
+ for atomic.LoadInt32(&done) == 0 {
+ for i := range handles {
+ handles[i] = c.Get(999999999, uint64(i), func() (int, Value) {
+ return 1, 1
+ })
+ }
+ for _, h := range handles {
+ h.Release()
+ }
+ }
+ }()
+
+ wg.Wait()
+
+ atomic.StoreInt32(&done, 1)
+
+ for _, handles0 := range handles {
+ for i := range handles0 {
+ h := (*Handle)(atomic.LoadPointer(&handles0[i]))
+ if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) {
+ h.Release()
+ }
+ }
+ }
+
+ for ns, objects0 := range objects {
+ for i, o := range objects0 {
+ if o != 0 {
+ t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o)
+ }
+ }
+ }
+}
+
+func TestCacheMap_NodesAndSize(t *testing.T) {
+ c := NewCache(nil)
+ if c.Nodes() != 0 {
+ t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
+ }
+ if c.Size() != 0 {
+ t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
+ }
+ set(c, 0, 1, 1, 1, nil)
+ set(c, 0, 2, 2, 2, nil)
+ set(c, 1, 1, 3, 3, nil)
+ set(c, 2, 1, 4, 1, nil)
+ if c.Nodes() != 4 {
+ t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes())
+ }
+ if c.Size() != 7 {
+ t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size())
+ }
+}
+
+func TestLRUCache_Capacity(t *testing.T) {
+ c := NewCache(NewLRU(10))
+ if c.Capacity() != 10 {
+ t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity())
+ }
+ set(c, 0, 1, 1, 1, nil).Release()
+ set(c, 0, 2, 2, 2, nil).Release()
+ set(c, 1, 1, 3, 3, nil).Release()
+ set(c, 2, 1, 4, 1, nil).Release()
+ set(c, 2, 2, 5, 1, nil).Release()
+ set(c, 2, 3, 6, 1, nil).Release()
+ set(c, 2, 4, 7, 1, nil).Release()
+ set(c, 2, 5, 8, 1, nil).Release()
+ if c.Nodes() != 7 {
+ t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes())
+ }
+ if c.Size() != 10 {
+ t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size())
+ }
+ c.SetCapacity(9)
+ if c.Capacity() != 9 {
+ t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity())
+ }
+ if c.Nodes() != 6 {
+ t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes())
+ }
+ if c.Size() != 8 {
+ t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size())
+ }
+}
+
+func TestCacheMap_NilValue(t *testing.T) {
+ c := NewCache(NewLRU(10))
+ h := c.Get(0, 0, func() (size int, value Value) {
+ return 1, nil
})
- return obj
+ if h != nil {
+ t.Error("cache handle is non-nil")
+ }
+ if c.Nodes() != 0 {
+ t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
+ }
+ if c.Size() != 0 {
+ t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
+ }
}
-func TestCache_HitMiss(t *testing.T) {
+func TestLRUCache_GetLatency(t *testing.T) {
+ runtime.GOMAXPROCS(runtime.NumCPU())
+
+ const (
+ concurrentSet = 30
+ concurrentGet = 3
+ duration = 3 * time.Second
+ delay = 3 * time.Millisecond
+ maxkey = 100000
+ )
+
+ var (
+ set, getHit, getAll int32
+ getMaxLatency, getDuration int64
+ )
+
+ c := NewCache(NewLRU(5000))
+ wg := &sync.WaitGroup{}
+ until := time.Now().Add(duration)
+ for i := 0; i < concurrentSet; i++ {
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+ for time.Now().Before(until) {
+ c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) {
+ time.Sleep(delay)
+ atomic.AddInt32(&set, 1)
+ return 1, 1
+ }).Release()
+ }
+ }(i)
+ }
+ for i := 0; i < concurrentGet; i++ {
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+ for {
+ mark := time.Now()
+ if mark.Before(until) {
+ h := c.Get(0, uint64(r.Intn(maxkey)), nil)
+ latency := int64(time.Now().Sub(mark))
+ m := atomic.LoadInt64(&getMaxLatency)
+ if latency > m {
+ atomic.CompareAndSwapInt64(&getMaxLatency, m, latency)
+ }
+ atomic.AddInt64(&getDuration, latency)
+ if h != nil {
+ atomic.AddInt32(&getHit, 1)
+ h.Release()
+ }
+ atomic.AddInt32(&getAll, 1)
+ } else {
+ break
+ }
+ }
+ }(i)
+ }
+
+ wg.Wait()
+ getAvglatency := time.Duration(getDuration) / time.Duration(getAll)
+ t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v",
+ set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency)
+
+ if getAvglatency > delay/3 {
+ t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency)
+ }
+}
+
+func TestLRUCache_HitMiss(t *testing.T) {
cases := []struct {
key uint64
value string
@@ -36,36 +308,37 @@ func TestCache_HitMiss(t *testing.T) {
}
setfin := 0
- c := NewLRUCache(1000)
- ns := c.GetNamespace(0)
+ c := NewCache(NewLRU(1000))
for i, x := range cases {
- set(ns, x.key, x.value, len(x.value), func() {
+ set(c, 0, x.key, x.value, len(x.value), func() {
setfin++
}).Release()
for j, y := range cases {
- r, ok := ns.Get(y.key, nil)
+ h := c.Get(0, y.key, nil)
if j <= i {
// should hit
- if !ok {
+ if h == nil {
t.Errorf("case '%d' iteration '%d' is miss", i, j)
- } else if r.Value().(string) != y.value {
- t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, r.Value().(string), y.value)
+ } else {
+ if x := h.Value().(releaserFunc).value.(string); x != y.value {
+ t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
+ }
}
} else {
// should miss
- if ok {
- t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, r.Value().(string))
+ if h != nil {
+ t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string))
}
}
- if ok {
- r.Release()
+ if h != nil {
+ h.Release()
}
}
}
for i, x := range cases {
finalizerOk := false
- ns.Delete(x.key, func(exist bool) {
+ c.Delete(0, x.key, func() {
finalizerOk = true
})
@@ -74,22 +347,24 @@ func TestCache_HitMiss(t *testing.T) {
}
for j, y := range cases {
- r, ok := ns.Get(y.key, nil)
+ h := c.Get(0, y.key, nil)
if j > i {
// should hit
- if !ok {
+ if h == nil {
t.Errorf("case '%d' iteration '%d' is miss", i, j)
- } else if r.Value().(string) != y.value {
- t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, r.Value().(string), y.value)
+ } else {
+ if x := h.Value().(releaserFunc).value.(string); x != y.value {
+ t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
+ }
}
} else {
// should miss
- if ok {
- t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, r.Value().(string))
+ if h != nil {
+ t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string))
}
}
- if ok {
- r.Release()
+ if h != nil {
+ h.Release()
}
}
}
@@ -100,137 +375,180 @@ func TestCache_HitMiss(t *testing.T) {
}
func TestLRUCache_Eviction(t *testing.T) {
- c := NewLRUCache(12)
- ns := c.GetNamespace(0)
- o1 := set(ns, 1, 1, 1, nil)
- set(ns, 2, 2, 1, nil).Release()
- set(ns, 3, 3, 1, nil).Release()
- set(ns, 4, 4, 1, nil).Release()
- set(ns, 5, 5, 1, nil).Release()
- if r, ok := ns.Get(2, nil); ok { // 1,3,4,5,2
- r.Release()
- }
- set(ns, 9, 9, 10, nil).Release() // 5,2,9
-
- for _, x := range []uint64{9, 2, 5, 1} {
- r, ok := ns.Get(x, nil)
- if !ok {
- t.Errorf("miss for key '%d'", x)
+ c := NewCache(NewLRU(12))
+ o1 := set(c, 0, 1, 1, 1, nil)
+ set(c, 0, 2, 2, 1, nil).Release()
+ set(c, 0, 3, 3, 1, nil).Release()
+ set(c, 0, 4, 4, 1, nil).Release()
+ set(c, 0, 5, 5, 1, nil).Release()
+ if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2
+ h.Release()
+ }
+ set(c, 0, 9, 9, 10, nil).Release() // 5,2,9
+
+ for _, key := range []uint64{9, 2, 5, 1} {
+ h := c.Get(0, key, nil)
+ if h == nil {
+ t.Errorf("miss for key '%d'", key)
} else {
- if r.Value().(int) != int(x) {
- t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
+ if x := h.Value().(int); x != int(key) {
+ t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
- r.Release()
+ h.Release()
}
}
o1.Release()
- for _, x := range []uint64{1, 2, 5} {
- r, ok := ns.Get(x, nil)
- if !ok {
- t.Errorf("miss for key '%d'", x)
+ for _, key := range []uint64{1, 2, 5} {
+ h := c.Get(0, key, nil)
+ if h == nil {
+ t.Errorf("miss for key '%d'", key)
} else {
- if r.Value().(int) != int(x) {
- t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
+ if x := h.Value().(int); x != int(key) {
+ t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
- r.Release()
+ h.Release()
}
}
- for _, x := range []uint64{3, 4, 9} {
- r, ok := ns.Get(x, nil)
- if ok {
- t.Errorf("hit for key '%d'", x)
- if r.Value().(int) != int(x) {
- t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
+ for _, key := range []uint64{3, 4, 9} {
+ h := c.Get(0, key, nil)
+ if h != nil {
+ t.Errorf("hit for key '%d'", key)
+ if x := h.Value().(int); x != int(key) {
+ t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
- r.Release()
+ h.Release()
}
}
}
-func TestLRUCache_SetGet(t *testing.T) {
- c := NewLRUCache(13)
- ns := c.GetNamespace(0)
- for i := 0; i < 200; i++ {
- n := uint64(rand.Intn(99999) % 20)
- set(ns, n, n, 1, nil).Release()
- if p, ok := ns.Get(n, nil); ok {
- if p.Value() == nil {
- t.Errorf("key '%d' contains nil value", n)
+func TestLRUCache_Evict(t *testing.T) {
+ c := NewCache(NewLRU(6))
+ set(c, 0, 1, 1, 1, nil).Release()
+ set(c, 0, 2, 2, 1, nil).Release()
+ set(c, 1, 1, 4, 1, nil).Release()
+ set(c, 1, 2, 5, 1, nil).Release()
+ set(c, 2, 1, 6, 1, nil).Release()
+ set(c, 2, 2, 7, 1, nil).Release()
+
+ for ns := 0; ns < 3; ns++ {
+ for key := 1; key < 3; key++ {
+ if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
+ h.Release()
} else {
- got := p.Value().(uint64)
- if got != n {
- t.Errorf("invalid value for key '%d' want '%d', got '%d'", n, n, got)
- }
+ t.Errorf("Cache.Get on #%d.%d return nil", ns, key)
}
- p.Release()
- } else {
- t.Errorf("key '%d' doesn't exist", n)
}
}
-}
-func TestLRUCache_Purge(t *testing.T) {
- c := NewLRUCache(3)
- ns1 := c.GetNamespace(0)
- o1 := set(ns1, 1, 1, 1, nil)
- o2 := set(ns1, 2, 2, 1, nil)
- ns1.Purge(nil)
- set(ns1, 3, 3, 1, nil).Release()
- for _, x := range []uint64{1, 2, 3} {
- r, ok := ns1.Get(x, nil)
- if !ok {
- t.Errorf("miss for key '%d'", x)
- } else {
- if r.Value().(int) != int(x) {
- t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
+ if ok := c.Evict(0, 1); !ok {
+ t.Error("first Cache.Evict on #0.1 return false")
+ }
+ if ok := c.Evict(0, 1); ok {
+ t.Error("second Cache.Evict on #0.1 return true")
+ }
+ if h := c.Get(0, 1, nil); h != nil {
+ t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value())
+ }
+
+ c.EvictNS(1)
+ if h := c.Get(1, 1, nil); h != nil {
+ t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value())
+ }
+ if h := c.Get(1, 2, nil); h != nil {
+ t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value())
+ }
+
+ c.EvictAll()
+ for ns := 0; ns < 3; ns++ {
+ for key := 1; key < 3; key++ {
+ if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
+ t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value())
}
- r.Release()
}
}
- o1.Release()
- o2.Release()
- for _, x := range []uint64{1, 2} {
- r, ok := ns1.Get(x, nil)
- if ok {
- t.Errorf("hit for key '%d'", x)
- if r.Value().(int) != int(x) {
- t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
- }
- r.Release()
+}
+
+func TestLRUCache_Delete(t *testing.T) {
+ delFuncCalled := 0
+ delFunc := func() {
+ delFuncCalled++
+ }
+
+ c := NewCache(NewLRU(2))
+ set(c, 0, 1, 1, 1, nil).Release()
+ set(c, 0, 2, 2, 1, nil).Release()
+
+ if ok := c.Delete(0, 1, delFunc); !ok {
+ t.Error("Cache.Delete on #1 return false")
+ }
+ if h := c.Get(0, 1, nil); h != nil {
+ t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value())
+ }
+ if ok := c.Delete(0, 1, delFunc); ok {
+ t.Error("Cache.Delete on #1 return true")
+ }
+
+ h2 := c.Get(0, 2, nil)
+ if h2 == nil {
+ t.Error("Cache.Get on #2 return nil")
+ }
+ if ok := c.Delete(0, 2, delFunc); !ok {
+ t.Error("(1) Cache.Delete on #2 return false")
+ }
+ if ok := c.Delete(0, 2, delFunc); !ok {
+ t.Error("(2) Cache.Delete on #2 return false")
+ }
+
+ set(c, 0, 3, 3, 1, nil).Release()
+ set(c, 0, 4, 4, 1, nil).Release()
+ c.Get(0, 2, nil).Release()
+
+ for key := 2; key <= 4; key++ {
+ if h := c.Get(0, uint64(key), nil); h != nil {
+ h.Release()
+ } else {
+ t.Errorf("Cache.Get on #%d return nil", key)
}
}
-}
-func BenchmarkLRUCache_SetRelease(b *testing.B) {
- capacity := b.N / 100
- if capacity <= 0 {
- capacity = 10
+ h2.Release()
+ if h := c.Get(0, 2, nil); h != nil {
+ t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value())
}
- c := NewLRUCache(capacity)
- ns := c.GetNamespace(0)
- b.ResetTimer()
- for i := uint64(0); i < uint64(b.N); i++ {
- set(ns, i, nil, 1, nil).Release()
+
+ if delFuncCalled != 4 {
+ t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled)
}
}
-func BenchmarkLRUCache_SetReleaseTwice(b *testing.B) {
- capacity := b.N / 100
- if capacity <= 0 {
- capacity = 10
+func TestLRUCache_Close(t *testing.T) {
+ relFuncCalled := 0
+ relFunc := func() {
+ relFuncCalled++
+ }
+ delFuncCalled := 0
+ delFunc := func() {
+ delFuncCalled++
}
- c := NewLRUCache(capacity)
- ns := c.GetNamespace(0)
- b.ResetTimer()
- na := b.N / 2
- nb := b.N - na
+ c := NewCache(NewLRU(2))
+ set(c, 0, 1, 1, 1, relFunc).Release()
+ set(c, 0, 2, 2, 1, relFunc).Release()
- for i := uint64(0); i < uint64(na); i++ {
- set(ns, i, nil, 1, nil).Release()
+ h3 := set(c, 0, 3, 3, 1, relFunc)
+ if h3 == nil {
+ t.Error("Cache.Get on #3 return nil")
}
+ if ok := c.Delete(0, 3, delFunc); !ok {
+ t.Error("Cache.Delete on #3 return false")
+ }
+
+ c.Close()
- for i := uint64(0); i < uint64(nb); i++ {
- set(ns, i, nil, 1, nil).Release()
+ if relFuncCalled != 3 {
+ t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled)
+ }
+ if delFuncCalled != 1 {
+ t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled)
}
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/empty_cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/empty_cache.go
deleted file mode 100644
index 1fbf81459..000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/empty_cache.go
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package cache
-
-import (
- "sync"
- "sync/atomic"
-)
-
-type emptyCache struct {
- sync.Mutex
- table map[uint64]*emptyNS
-}
-
-// NewEmptyCache creates a new initialized empty cache.
-func NewEmptyCache() Cache {
- return &emptyCache{
- table: make(map[uint64]*emptyNS),
- }
-}
-
-func (c *emptyCache) GetNamespace(id uint64) Namespace {
- c.Lock()
- defer c.Unlock()
-
- if ns, ok := c.table[id]; ok {
- return ns
- }
-
- ns := &emptyNS{
- cache: c,
- id: id,
- table: make(map[uint64]*emptyNode),
- }
- c.table[id] = ns
- return ns
-}
-
-func (c *emptyCache) Purge(fin PurgeFin) {
- c.Lock()
- for _, ns := range c.table {
- ns.purgeNB(fin)
- }
- c.Unlock()
-}
-
-func (c *emptyCache) Zap(closed bool) {
- c.Lock()
- for _, ns := range c.table {
- ns.zapNB(closed)
- }
- c.table = make(map[uint64]*emptyNS)
- c.Unlock()
-}
-
-func (*emptyCache) SetCapacity(capacity int) {}
-
-type emptyNS struct {
- cache *emptyCache
- id uint64
- table map[uint64]*emptyNode
- state nsState
-}
-
-func (ns *emptyNS) Get(key uint64, setf SetFunc) (o Object, ok bool) {
- ns.cache.Lock()
-
- switch ns.state {
- case nsZapped:
- ns.cache.Unlock()
- if setf == nil {
- return
- }
-
- var value interface{}
- var fin func()
- ok, value, _, fin = setf()
- if ok {
- o = &fakeObject{
- value: value,
- fin: fin,
- }
- }
- return
- case nsClosed:
- ns.cache.Unlock()
- return
- }
-
- n, ok := ns.table[key]
- if ok {
- n.ref++
- } else {
- if setf == nil {
- ns.cache.Unlock()
- return
- }
-
- var value interface{}
- var fin func()
- ok, value, _, fin = setf()
- if !ok {
- ns.cache.Unlock()
- return
- }
-
- n = &emptyNode{
- ns: ns,
- key: key,
- value: value,
- setfin: fin,
- ref: 1,
- }
- ns.table[key] = n
- }
-
- ns.cache.Unlock()
- o = &emptyObject{node: n}
- return
-}
-
-func (ns *emptyNS) Delete(key uint64, fin DelFin) bool {
- ns.cache.Lock()
-
- if ns.state != nsEffective {
- ns.cache.Unlock()
- if fin != nil {
- fin(false)
- }
- return false
- }
-
- n, ok := ns.table[key]
- if !ok {
- ns.cache.Unlock()
- if fin != nil {
- fin(false)
- }
- return false
- }
- n.delfin = fin
- ns.cache.Unlock()
- return true
-}
-
-func (ns *emptyNS) purgeNB(fin PurgeFin) {
- if ns.state != nsEffective {
- return
- }
- for _, n := range ns.table {
- n.purgefin = fin
- }
-}
-
-func (ns *emptyNS) Purge(fin PurgeFin) {
- ns.cache.Lock()
- ns.purgeNB(fin)
- ns.cache.Unlock()
-}
-
-func (ns *emptyNS) zapNB(closed bool) {
- if ns.state != nsEffective {
- return
- }
- for _, n := range ns.table {
- n.execFin()
- }
- if closed {
- ns.state = nsClosed
- } else {
- ns.state = nsZapped
- }
- ns.table = nil
-}
-
-func (ns *emptyNS) Zap(closed bool) {
- ns.cache.Lock()
- ns.zapNB(closed)
- delete(ns.cache.table, ns.id)
- ns.cache.Unlock()
-}
-
-type emptyNode struct {
- ns *emptyNS
- key uint64
- value interface{}
- ref int
- setfin SetFin
- delfin DelFin
- purgefin PurgeFin
-}
-
-func (n *emptyNode) execFin() {
- if n.setfin != nil {
- n.setfin()
- n.setfin = nil
- }
- if n.purgefin != nil {
- n.purgefin(n.ns.id, n.key, n.delfin)
- n.delfin = nil
- n.purgefin = nil
- } else if n.delfin != nil {
- n.delfin(true)
- n.delfin = nil
- }
-}
-
-func (n *emptyNode) evict() {
- n.ns.cache.Lock()
- n.ref--
- if n.ref == 0 {
- if n.ns.state == nsEffective {
- // Remove elem.
- delete(n.ns.table, n.key)
- // Execute finalizer.
- n.execFin()
- }
- } else if n.ref < 0 {
- panic("leveldb/cache: emptyNode: negative node reference")
- }
- n.ns.cache.Unlock()
-}
-
-type emptyObject struct {
- node *emptyNode
- once uint32
-}
-
-func (o *emptyObject) Value() interface{} {
- if atomic.LoadUint32(&o.once) == 0 {
- return o.node.value
- }
- return nil
-}
-
-func (o *emptyObject) Release() {
- if !atomic.CompareAndSwapUint32(&o.once, 0, 1) {
- return
- }
- o.node.evict()
- o.node = nil
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go
new file mode 100644
index 000000000..d9a84cde1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go
@@ -0,0 +1,195 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package cache
+
+import (
+ "sync"
+ "unsafe"
+)
+
+type lruNode struct {
+ n *Node
+ h *Handle
+ ban bool
+
+ next, prev *lruNode
+}
+
+func (n *lruNode) insert(at *lruNode) {
+ x := at.next
+ at.next = n
+ n.prev = at
+ n.next = x
+ x.prev = n
+}
+
+func (n *lruNode) remove() {
+ if n.prev != nil {
+ n.prev.next = n.next
+ n.next.prev = n.prev
+ n.prev = nil
+ n.next = nil
+ } else {
+ panic("BUG: removing removed node")
+ }
+}
+
+type lru struct {
+ mu sync.Mutex
+ capacity int
+ used int
+ recent lruNode
+}
+
+func (r *lru) reset() {
+ r.recent.next = &r.recent
+ r.recent.prev = &r.recent
+ r.used = 0
+}
+
+func (r *lru) Capacity() int {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ return r.capacity
+}
+
+func (r *lru) SetCapacity(capacity int) {
+ var evicted []*lruNode
+
+ r.mu.Lock()
+ r.capacity = capacity
+ for r.used > r.capacity {
+ rn := r.recent.prev
+ if rn == nil {
+ panic("BUG: invalid LRU used or capacity counter")
+ }
+ rn.remove()
+ rn.n.CacheData = nil
+ r.used -= rn.n.Size()
+ evicted = append(evicted, rn)
+ }
+ r.mu.Unlock()
+
+ for _, rn := range evicted {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) Promote(n *Node) {
+ var evicted []*lruNode
+
+ r.mu.Lock()
+ if n.CacheData == nil {
+ if n.Size() <= r.capacity {
+ rn := &lruNode{n: n, h: n.GetHandle()}
+ rn.insert(&r.recent)
+ n.CacheData = unsafe.Pointer(rn)
+ r.used += n.Size()
+
+ for r.used > r.capacity {
+ rn := r.recent.prev
+ if rn == nil {
+ panic("BUG: invalid LRU used or capacity counter")
+ }
+ rn.remove()
+ rn.n.CacheData = nil
+ r.used -= rn.n.Size()
+ evicted = append(evicted, rn)
+ }
+ }
+ } else {
+ rn := (*lruNode)(n.CacheData)
+ if !rn.ban {
+ rn.remove()
+ rn.insert(&r.recent)
+ }
+ }
+ r.mu.Unlock()
+
+ for _, rn := range evicted {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) Ban(n *Node) {
+ r.mu.Lock()
+ if n.CacheData == nil {
+ n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true})
+ } else {
+ rn := (*lruNode)(n.CacheData)
+ if !rn.ban {
+ rn.remove()
+ rn.ban = true
+ r.used -= rn.n.Size()
+ r.mu.Unlock()
+
+ rn.h.Release()
+ rn.h = nil
+ return
+ }
+ }
+ r.mu.Unlock()
+}
+
+func (r *lru) Evict(n *Node) {
+ r.mu.Lock()
+ rn := (*lruNode)(n.CacheData)
+ if rn == nil || rn.ban {
+ r.mu.Unlock()
+ return
+ }
+ n.CacheData = nil
+ r.mu.Unlock()
+
+ rn.h.Release()
+}
+
+func (r *lru) EvictNS(ns uint64) {
+ var evicted []*lruNode
+
+ r.mu.Lock()
+ for e := r.recent.prev; e != &r.recent; {
+ rn := e
+ e = e.prev
+ if rn.n.NS() == ns {
+ rn.remove()
+ rn.n.CacheData = nil
+ r.used -= rn.n.Size()
+ evicted = append(evicted, rn)
+ }
+ }
+ r.mu.Unlock()
+
+ for _, rn := range evicted {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) EvictAll() {
+ r.mu.Lock()
+ back := r.recent.prev
+ for rn := back; rn != &r.recent; rn = rn.prev {
+ rn.n.CacheData = nil
+ }
+ r.reset()
+ r.mu.Unlock()
+
+ for rn := back; rn != &r.recent; rn = rn.prev {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) Close() error {
+ return nil
+}
+
+// NewLRU create a new LRU-cache.
+func NewLRU(capacity int) Cacher {
+ r := &lru{capacity: capacity}
+ r.reset()
+ return r
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go
deleted file mode 100644
index 3c98e076b..000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go
+++ /dev/null
@@ -1,354 +0,0 @@
-// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package cache
-
-import (
- "sync"
- "sync/atomic"
-)
-
-// lruCache represent a LRU cache state.
-type lruCache struct {
- sync.Mutex
-
- recent lruNode
- table map[uint64]*lruNs
- capacity int
- size int
-}
-
-// NewLRUCache creates a new initialized LRU cache with the given capacity.
-func NewLRUCache(capacity int) Cache {
- c := &lruCache{
- table: make(map[uint64]*lruNs),
- capacity: capacity,
- }
- c.recent.rNext = &c.recent
- c.recent.rPrev = &c.recent
- return c
-}
-
-// SetCapacity set cache capacity.
-func (c *lruCache) SetCapacity(capacity int) {
- c.Lock()
- c.capacity = capacity
- c.evict()
- c.Unlock()
-}
-
-// GetNamespace return namespace object for given id.
-func (c *lruCache) GetNamespace(id uint64) Namespace {
- c.Lock()
- defer c.Unlock()
-
- if p, ok := c.table[id]; ok {
- return p
- }
-
- p := &lruNs{
- lru: c,
- id: id,
- table: make(map[uint64]*lruNode),
- }
- c.table[id] = p
- return p
-}
-
-// Purge purge entire cache.
-func (c *lruCache) Purge(fin PurgeFin) {
- c.Lock()
- for _, ns := range c.table {
- ns.purgeNB(fin)
- }
- c.Unlock()
-}
-
-func (c *lruCache) Zap(closed bool) {
- c.Lock()
- for _, ns := range c.table {
- ns.zapNB(closed)
- }
- c.table = make(map[uint64]*lruNs)
- c.Unlock()
-}
-
-func (c *lruCache) evict() {
- top := &c.recent
- for n := c.recent.rPrev; c.size > c.capacity && n != top; {
- n.state = nodeEvicted
- n.rRemove()
- n.evictNB()
- c.size -= n.charge
- n = c.recent.rPrev
- }
-}
-
-type lruNs struct {
- lru *lruCache
- id uint64
- table map[uint64]*lruNode
- state nsState
-}
-
-func (ns *lruNs) Get(key uint64, setf SetFunc) (o Object, ok bool) {
- lru := ns.lru
- lru.Lock()
-
- switch ns.state {
- case nsZapped:
- lru.Unlock()
- if setf == nil {
- return
- }
-
- var value interface{}
- var fin func()
- ok, value, _, fin = setf()
- if ok {
- o = &fakeObject{
- value: value,
- fin: fin,
- }
- }
- return
- case nsClosed:
- lru.Unlock()
- return
- }
-
- n, ok := ns.table[key]
- if ok {
- switch n.state {
- case nodeEvicted:
- // Insert to recent list.
- n.state = nodeEffective
- n.ref++
- lru.size += n.charge
- lru.evict()
- fallthrough
- case nodeEffective:
- // Bump to front
- n.rRemove()
- n.rInsert(&lru.recent)
- }
- n.ref++
- } else {
- if setf == nil {
- lru.Unlock()
- return
- }
-
- var value interface{}
- var charge int
- var fin func()
- ok, value, charge, fin = setf()
- if !ok {
- lru.Unlock()
- return
- }
-
- n = &lruNode{
- ns: ns,
- key: key,
- value: value,
- charge: charge,
- setfin: fin,
- ref: 2,
- }
- ns.table[key] = n
- n.rInsert(&lru.recent)
-
- lru.size += charge
- lru.evict()
- }
-
- lru.Unlock()
- o = &lruObject{node: n}
- return
-}
-
-func (ns *lruNs) Delete(key uint64, fin DelFin) bool {
- lru := ns.lru
- lru.Lock()
-
- if ns.state != nsEffective {
- lru.Unlock()
- if fin != nil {
- fin(false)
- }
- return false
- }
-
- n, ok := ns.table[key]
- if !ok {
- lru.Unlock()
- if fin != nil {
- fin(false)
- }
- return false
- }
-
- n.delfin = fin
- switch n.state {
- case nodeRemoved:
- lru.Unlock()
- return false
- case nodeEffective:
- lru.size -= n.charge
- n.rRemove()
- n.evictNB()
- }
- n.state = nodeRemoved
-
- lru.Unlock()
- return true
-}
-
-func (ns *lruNs) purgeNB(fin PurgeFin) {
- lru := ns.lru
- if ns.state != nsEffective {
- return
- }
-
- for _, n := range ns.table {
- n.purgefin = fin
- if n.state == nodeEffective {
- lru.size -= n.charge
- n.rRemove()
- n.evictNB()
- }
- n.state = nodeRemoved
- }
-}
-
-func (ns *lruNs) Purge(fin PurgeFin) {
- ns.lru.Lock()
- ns.purgeNB(fin)
- ns.lru.Unlock()
-}
-
-func (ns *lruNs) zapNB(closed bool) {
- lru := ns.lru
- if ns.state != nsEffective {
- return
- }
-
- if closed {
- ns.state = nsClosed
- } else {
- ns.state = nsZapped
- }
- for _, n := range ns.table {
- if n.state == nodeEffective {
- lru.size -= n.charge
- n.rRemove()
- }
- n.state = nodeRemoved
- n.execFin()
- }
- ns.table = nil
-}
-
-func (ns *lruNs) Zap(closed bool) {
- ns.lru.Lock()
- ns.zapNB(closed)
- delete(ns.lru.table, ns.id)
- ns.lru.Unlock()
-}
-
-type lruNode struct {
- ns *lruNs
-
- rNext, rPrev *lruNode
-
- key uint64
- value interface{}
- charge int
- ref int
- state nodeState
- setfin SetFin
- delfin DelFin
- purgefin PurgeFin
-}
-
-func (n *lruNode) rInsert(at *lruNode) {
- x := at.rNext
- at.rNext = n
- n.rPrev = at
- n.rNext = x
- x.rPrev = n
-}
-
-func (n *lruNode) rRemove() bool {
- // only remove if not already removed
- if n.rPrev == nil {
- return false
- }
-
- n.rPrev.rNext = n.rNext
- n.rNext.rPrev = n.rPrev
- n.rPrev = nil
- n.rNext = nil
-
- return true
-}
-
-func (n *lruNode) execFin() {
- if n.setfin != nil {
- n.setfin()
- n.setfin = nil
- }
- if n.purgefin != nil {
- n.purgefin(n.ns.id, n.key, n.delfin)
- n.delfin = nil
- n.purgefin = nil
- } else if n.delfin != nil {
- n.delfin(true)
- n.delfin = nil
- }
-}
-
-func (n *lruNode) evictNB() {
- n.ref--
- if n.ref == 0 {
- if n.ns.state == nsEffective {
- // remove elem
- delete(n.ns.table, n.key)
- // execute finalizer
- n.execFin()
- }
- } else if n.ref < 0 {
- panic("leveldb/cache: lruCache: negative node reference")
- }
-}
-
-func (n *lruNode) evict() {
- n.ns.lru.Lock()
- n.evictNB()
- n.ns.lru.Unlock()
-}
-
-type lruObject struct {
- node *lruNode
- once uint32
-}
-
-func (o *lruObject) Value() interface{} {
- if atomic.LoadUint32(&o.once) == 0 {
- return o.node.value
- }
- return nil
-}
-
-func (o *lruObject) Release() {
- if !atomic.CompareAndSwapUint32(&o.once, 0, 1) {
- return
- }
-
- o.node.evict()
- o.node = nil
-}