aboutsummaryrefslogtreecommitdiffstats
path: root/swarm/storage/mock/db
diff options
context:
space:
mode:
authorJanoš Guljaš <janos@users.noreply.github.com>2019-02-23 17:47:33 +0800
committerViktor Trón <viktor.tron@gmail.com>2019-02-23 17:47:33 +0800
commit64d10c08726af33048e8eeb8df257628a3944870 (patch)
tree00002e147d650e1d6ec571731bbf900e77d9e307 /swarm/storage/mock/db
parent02c28046a04ebf649af5d1b2a702d0da1c8a2a39 (diff)
downloadgo-tangerine-64d10c08726af33048e8eeb8df257628a3944870.tar
go-tangerine-64d10c08726af33048e8eeb8df257628a3944870.tar.gz
go-tangerine-64d10c08726af33048e8eeb8df257628a3944870.tar.bz2
go-tangerine-64d10c08726af33048e8eeb8df257628a3944870.tar.lz
go-tangerine-64d10c08726af33048e8eeb8df257628a3944870.tar.xz
go-tangerine-64d10c08726af33048e8eeb8df257628a3944870.tar.zst
go-tangerine-64d10c08726af33048e8eeb8df257628a3944870.zip
swarm: mock store listings (#19157)
* swarm/storage/mock: implement listings methods for mem and rpc stores * swarm/storage/mock/rpc: add comments and newTestStore helper function * swarm/storage/mock/mem: add missing comments * swarm/storage/mock: add comments to new types and constants * swarm/storage/mock/db: implement listings for mock/db global store * swarm/storage/mock/test: add comments for MockStoreListings * swarm/storage/mock/explorer: initial implementation * cmd/swarm/global-store: add chunk explorer * cmd/swarm/global-store: add chunk explorer tests * swarm/storage/mock/explorer: add tests * swarm/storage/mock/explorer: add swagger api definition * swarm/storage/mock/explorer: not-zero test values for invalid addr and key * swarm/storage/mock/explorer: test wildcard cors origin * swarm/storage/mock/db: renames based on Fabio's suggestions * swarm/storage/mock/explorer: add more comments to testHandler function * cmd/swarm/global-store: terminate subprocess with Kill in tests
Diffstat (limited to 'swarm/storage/mock/db')
-rw-r--r--swarm/storage/mock/db/db.go295
-rw-r--r--swarm/storage/mock/db/db_test.go58
2 files changed, 293 insertions, 60 deletions
diff --git a/swarm/storage/mock/db/db.go b/swarm/storage/mock/db/db.go
index 73ae199e8..313a61b43 100644
--- a/swarm/storage/mock/db/db.go
+++ b/swarm/storage/mock/db/db.go
@@ -21,8 +21,12 @@ import (
"archive/tar"
"bytes"
"encoding/json"
+ "errors"
+ "fmt"
"io"
"io/ioutil"
+ "sync"
+ "time"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/util"
@@ -37,6 +41,10 @@ import (
// release resources used by the database.
type GlobalStore struct {
db *leveldb.DB
+ // protects nodes and keys indexes
+ // in Put and Delete methods
+ nodesLocks sync.Map
+ keysLocks sync.Map
}
// NewGlobalStore creates a new instance of GlobalStore.
@@ -64,14 +72,14 @@ func (s *GlobalStore) NewNodeStore(addr common.Address) *mock.NodeStore {
// Get returns chunk data if the chunk with key exists for node
// on address addr.
func (s *GlobalStore) Get(addr common.Address, key []byte) (data []byte, err error) {
- has, err := s.db.Has(nodeDBKey(addr, key), nil)
+ has, err := s.db.Has(indexForHashesPerNode(addr, key), nil)
if err != nil {
return nil, mock.ErrNotFound
}
if !has {
return nil, mock.ErrNotFound
}
- data, err = s.db.Get(dataDBKey(key), nil)
+ data, err = s.db.Get(indexDataKey(key), nil)
if err == leveldb.ErrNotFound {
err = mock.ErrNotFound
}
@@ -80,28 +88,165 @@ func (s *GlobalStore) Get(addr common.Address, key []byte) (data []byte, err err
// Put saves the chunk data for node with address addr.
func (s *GlobalStore) Put(addr common.Address, key []byte, data []byte) error {
+ unlock, err := s.lock(addr, key)
+ if err != nil {
+ return err
+ }
+ defer unlock()
+
batch := new(leveldb.Batch)
- batch.Put(nodeDBKey(addr, key), nil)
- batch.Put(dataDBKey(key), data)
+ batch.Put(indexForHashesPerNode(addr, key), nil)
+ batch.Put(indexForNodesWithHash(key, addr), nil)
+ batch.Put(indexForNodes(addr), nil)
+ batch.Put(indexForHashes(key), nil)
+ batch.Put(indexDataKey(key), data)
return s.db.Write(batch, nil)
}
// Delete removes the chunk reference to node with address addr.
func (s *GlobalStore) Delete(addr common.Address, key []byte) error {
+ unlock, err := s.lock(addr, key)
+ if err != nil {
+ return err
+ }
+ defer unlock()
+
batch := new(leveldb.Batch)
- batch.Delete(nodeDBKey(addr, key))
+ batch.Delete(indexForHashesPerNode(addr, key))
+ batch.Delete(indexForNodesWithHash(key, addr))
+
+ // check if this node contains any keys, and if not
+ // remove it from the
+ x := indexForHashesPerNodePrefix(addr)
+ if k, _ := s.db.Get(x, nil); !bytes.HasPrefix(k, x) {
+ batch.Delete(indexForNodes(addr))
+ }
+
+ x = indexForNodesWithHashPrefix(key)
+ if k, _ := s.db.Get(x, nil); !bytes.HasPrefix(k, x) {
+ batch.Delete(indexForHashes(key))
+ }
return s.db.Write(batch, nil)
}
// HasKey returns whether a node with addr contains the key.
func (s *GlobalStore) HasKey(addr common.Address, key []byte) bool {
- has, err := s.db.Has(nodeDBKey(addr, key), nil)
+ has, err := s.db.Has(indexForHashesPerNode(addr, key), nil)
if err != nil {
has = false
}
return has
}
+// Keys returns a paginated list of keys on all nodes.
+func (s *GlobalStore) Keys(startKey []byte, limit int) (keys mock.Keys, err error) {
+ return s.keys(nil, startKey, limit)
+}
+
+// Nodes returns a paginated list of all known nodes.
+func (s *GlobalStore) Nodes(startAddr *common.Address, limit int) (nodes mock.Nodes, err error) {
+ return s.nodes(nil, startAddr, limit)
+}
+
+// NodeKeys returns a paginated list of keys on a node with provided address.
+func (s *GlobalStore) NodeKeys(addr common.Address, startKey []byte, limit int) (keys mock.Keys, err error) {
+ return s.keys(&addr, startKey, limit)
+}
+
+// KeyNodes returns a paginated list of nodes that contain a particular key.
+func (s *GlobalStore) KeyNodes(key []byte, startAddr *common.Address, limit int) (nodes mock.Nodes, err error) {
+ return s.nodes(key, startAddr, limit)
+}
+
+// keys returns a paginated list of keys. If addr is not nil, only keys on that
+// node will be returned.
+func (s *GlobalStore) keys(addr *common.Address, startKey []byte, limit int) (keys mock.Keys, err error) {
+ iter := s.db.NewIterator(nil, nil)
+ defer iter.Release()
+
+ if limit <= 0 {
+ limit = mock.DefaultLimit
+ }
+
+ prefix := []byte{indexForHashesPrefix}
+ if addr != nil {
+ prefix = indexForHashesPerNodePrefix(*addr)
+ }
+ if startKey != nil {
+ if addr != nil {
+ startKey = indexForHashesPerNode(*addr, startKey)
+ } else {
+ startKey = indexForHashes(startKey)
+ }
+ } else {
+ startKey = prefix
+ }
+
+ ok := iter.Seek(startKey)
+ if !ok {
+ return keys, iter.Error()
+ }
+ for ; ok; ok = iter.Next() {
+ k := iter.Key()
+ if !bytes.HasPrefix(k, prefix) {
+ break
+ }
+ key := append([]byte(nil), bytes.TrimPrefix(k, prefix)...)
+
+ if len(keys.Keys) >= limit {
+ keys.Next = key
+ break
+ }
+
+ keys.Keys = append(keys.Keys, key)
+ }
+ return keys, iter.Error()
+}
+
+// nodes returns a paginated list of node addresses. If key is not nil,
+// only nodes that contain that key will be returned.
+func (s *GlobalStore) nodes(key []byte, startAddr *common.Address, limit int) (nodes mock.Nodes, err error) {
+ iter := s.db.NewIterator(nil, nil)
+ defer iter.Release()
+
+ if limit <= 0 {
+ limit = mock.DefaultLimit
+ }
+
+ prefix := []byte{indexForNodesPrefix}
+ if key != nil {
+ prefix = indexForNodesWithHashPrefix(key)
+ }
+ startKey := prefix
+ if startAddr != nil {
+ if key != nil {
+ startKey = indexForNodesWithHash(key, *startAddr)
+ } else {
+ startKey = indexForNodes(*startAddr)
+ }
+ }
+
+ ok := iter.Seek(startKey)
+ if !ok {
+ return nodes, iter.Error()
+ }
+ for ; ok; ok = iter.Next() {
+ k := iter.Key()
+ if !bytes.HasPrefix(k, prefix) {
+ break
+ }
+ addr := common.BytesToAddress(append([]byte(nil), bytes.TrimPrefix(k, prefix)...))
+
+ if len(nodes.Addrs) >= limit {
+ nodes.Next = &addr
+ break
+ }
+
+ nodes.Addrs = append(nodes.Addrs, addr)
+ }
+ return nodes, iter.Error()
+}
+
// Import reads tar archive from a reader that contains exported chunk data.
// It returns the number of chunks imported and an error.
func (s *GlobalStore) Import(r io.Reader) (n int, err error) {
@@ -126,12 +271,18 @@ func (s *GlobalStore) Import(r io.Reader) (n int, err error) {
return n, err
}
+ key := common.Hex2Bytes(hdr.Name)
+
batch := new(leveldb.Batch)
for _, addr := range c.Addrs {
- batch.Put(nodeDBKeyHex(addr, hdr.Name), nil)
+ batch.Put(indexForHashesPerNode(addr, key), nil)
+ batch.Put(indexForNodesWithHash(key, addr), nil)
+ batch.Put(indexForNodes(addr), nil)
}
- batch.Put(dataDBKey(common.Hex2Bytes(hdr.Name)), c.Data)
+ batch.Put(indexForHashes(key), nil)
+ batch.Put(indexDataKey(key), c.Data)
+
if err = s.db.Write(batch, nil); err != nil {
return n, err
}
@@ -150,18 +301,23 @@ func (s *GlobalStore) Export(w io.Writer) (n int, err error) {
buf := bytes.NewBuffer(make([]byte, 0, 1024))
encoder := json.NewEncoder(buf)
- iter := s.db.NewIterator(util.BytesPrefix(nodeKeyPrefix), nil)
+ snap, err := s.db.GetSnapshot()
+ if err != nil {
+ return 0, err
+ }
+
+ iter := snap.NewIterator(util.BytesPrefix([]byte{indexForHashesByNodePrefix}), nil)
defer iter.Release()
var currentKey string
var addrs []common.Address
- saveChunk := func(hexKey string) error {
- key := common.Hex2Bytes(hexKey)
+ saveChunk := func() error {
+ hexKey := currentKey
- data, err := s.db.Get(dataDBKey(key), nil)
+ data, err := snap.Get(indexDataKey(common.Hex2Bytes(hexKey)), nil)
if err != nil {
- return err
+ return fmt.Errorf("get data %s: %v", hexKey, err)
}
buf.Reset()
@@ -189,8 +345,8 @@ func (s *GlobalStore) Export(w io.Writer) (n int, err error) {
}
for iter.Next() {
- k := bytes.TrimPrefix(iter.Key(), nodeKeyPrefix)
- i := bytes.Index(k, []byte("-"))
+ k := bytes.TrimPrefix(iter.Key(), []byte{indexForHashesByNodePrefix})
+ i := bytes.Index(k, []byte{keyTermByte})
if i < 0 {
continue
}
@@ -201,7 +357,7 @@ func (s *GlobalStore) Export(w io.Writer) (n int, err error) {
}
if hexKey != currentKey {
- if err = saveChunk(currentKey); err != nil {
+ if err = saveChunk(); err != nil {
return n, err
}
@@ -209,35 +365,112 @@ func (s *GlobalStore) Export(w io.Writer) (n int, err error) {
}
currentKey = hexKey
- addrs = append(addrs, common.BytesToAddress(k[i:]))
+ addrs = append(addrs, common.BytesToAddress(k[i+1:]))
}
if len(addrs) > 0 {
- if err = saveChunk(currentKey); err != nil {
+ if err = saveChunk(); err != nil {
return n, err
}
}
- return n, err
+ return n, iter.Error()
}
var (
- nodeKeyPrefix = []byte("node-")
- dataKeyPrefix = []byte("data-")
+ // maximal time for lock to wait until it returns error
+ lockTimeout = 3 * time.Second
+ // duration between two lock checks.
+ lockCheckDelay = 30 * time.Microsecond
+ // error returned by lock method when lock timeout is reached
+ errLockTimeout = errors.New("lock timeout")
+)
+
+// lock protects parallel writes in Put and Delete methods for both
+// node with provided address and for data with provided key.
+func (s *GlobalStore) lock(addr common.Address, key []byte) (unlock func(), err error) {
+ start := time.Now()
+ nodeLockKey := addr.Hex()
+ for {
+ _, loaded := s.nodesLocks.LoadOrStore(nodeLockKey, struct{}{})
+ if !loaded {
+ break
+ }
+ time.Sleep(lockCheckDelay)
+ if time.Since(start) > lockTimeout {
+ return nil, errLockTimeout
+ }
+ }
+ start = time.Now()
+ keyLockKey := common.Bytes2Hex(key)
+ for {
+ _, loaded := s.keysLocks.LoadOrStore(keyLockKey, struct{}{})
+ if !loaded {
+ break
+ }
+ time.Sleep(lockCheckDelay)
+ if time.Since(start) > lockTimeout {
+ return nil, errLockTimeout
+ }
+ }
+ return func() {
+ s.nodesLocks.Delete(nodeLockKey)
+ s.keysLocks.Delete(keyLockKey)
+ }, nil
+}
+
+const (
+ // prefixes for different indexes
+ indexDataPrefix = 0
+ indexForNodesWithHashesPrefix = 1
+ indexForHashesByNodePrefix = 2
+ indexForNodesPrefix = 3
+ indexForHashesPrefix = 4
+
+ // keyTermByte splits keys and node addresses
+ // in database keys
+ keyTermByte = 0xff
)
-// nodeDBKey constructs a database key for key/node mappings.
-func nodeDBKey(addr common.Address, key []byte) []byte {
- return nodeDBKeyHex(addr, common.Bytes2Hex(key))
+// indexForHashesPerNode constructs a database key to store keys used in
+// NodeKeys method.
+func indexForHashesPerNode(addr common.Address, key []byte) []byte {
+ return append(indexForHashesPerNodePrefix(addr), key...)
+}
+
+// indexForHashesPerNodePrefix returns a prefix containing a node address used in
+// NodeKeys method. Node address is hex encoded to be able to use keyTermByte
+// for splitting node address and key.
+func indexForHashesPerNodePrefix(addr common.Address) []byte {
+ return append([]byte{indexForNodesWithHashesPrefix}, append([]byte(addr.Hex()), keyTermByte)...)
+}
+
+// indexForNodesWithHash constructs a database key to store keys used in
+// KeyNodes method.
+func indexForNodesWithHash(key []byte, addr common.Address) []byte {
+ return append(indexForNodesWithHashPrefix(key), addr[:]...)
+}
+
+// indexForNodesWithHashPrefix returns a prefix containing a key used in
+// KeyNodes method. Key is hex encoded to be able to use keyTermByte
+// for splitting key and node address.
+func indexForNodesWithHashPrefix(key []byte) []byte {
+ return append([]byte{indexForHashesByNodePrefix}, append([]byte(common.Bytes2Hex(key)), keyTermByte)...)
+}
+
+// indexForNodes constructs a database key to store keys used in
+// Nodes method.
+func indexForNodes(addr common.Address) []byte {
+ return append([]byte{indexForNodesPrefix}, addr[:]...)
}
-// nodeDBKeyHex constructs a database key for key/node mappings
-// using the hexadecimal string representation of the key.
-func nodeDBKeyHex(addr common.Address, hexKey string) []byte {
- return append(append(nodeKeyPrefix, []byte(hexKey+"-")...), addr[:]...)
+// indexForHashes constructs a database key to store keys used in
+// Keys method.
+func indexForHashes(key []byte) []byte {
+ return append([]byte{indexForHashesPrefix}, key...)
}
-// dataDBkey constructs a database key for key/data storage.
-func dataDBKey(key []byte) []byte {
- return append(dataKeyPrefix, key...)
+// indexDataKey constructs a database key for key/data storage.
+func indexDataKey(key []byte) []byte {
+ return append([]byte{indexDataPrefix}, key...)
}
diff --git a/swarm/storage/mock/db/db_test.go b/swarm/storage/mock/db/db_test.go
index 782faaf35..efbf942f6 100644
--- a/swarm/storage/mock/db/db_test.go
+++ b/swarm/storage/mock/db/db_test.go
@@ -1,5 +1,3 @@
-// +build go1.8
-//
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
@@ -29,47 +27,49 @@ import (
// TestDBStore is running a test.MockStore tests
// using test.MockStore function.
func TestDBStore(t *testing.T) {
- dir, err := ioutil.TempDir("", "mock_"+t.Name())
- if err != nil {
- panic(err)
- }
- defer os.RemoveAll(dir)
-
- store, err := NewGlobalStore(dir)
- if err != nil {
- t.Fatal(err)
- }
- defer store.Close()
+ store, cleanup := newTestStore(t)
+ defer cleanup()
test.MockStore(t, store, 100)
}
+// TestDBStoreListings is running test.MockStoreListings tests.
+func TestDBStoreListings(t *testing.T) {
+ store, cleanup := newTestStore(t)
+ defer cleanup()
+
+ test.MockStoreListings(t, store, 1000)
+}
+
// TestImportExport is running a test.ImportExport tests
// using test.MockStore function.
func TestImportExport(t *testing.T) {
- dir1, err := ioutil.TempDir("", "mock_"+t.Name()+"_exporter")
- if err != nil {
- panic(err)
- }
- defer os.RemoveAll(dir1)
+ store1, cleanup := newTestStore(t)
+ defer cleanup()
- store1, err := NewGlobalStore(dir1)
- if err != nil {
- t.Fatal(err)
- }
- defer store1.Close()
+ store2, cleanup := newTestStore(t)
+ defer cleanup()
+
+ test.ImportExport(t, store1, store2, 100)
+}
- dir2, err := ioutil.TempDir("", "mock_"+t.Name()+"_importer")
+// newTestStore creates a temporary GlobalStore
+// that will be closed and data deleted when
+// calling returned cleanup function.
+func newTestStore(t *testing.T) (s *GlobalStore, cleanup func()) {
+ dir, err := ioutil.TempDir("", "swarm-mock-db-")
if err != nil {
- panic(err)
+ t.Fatal(err)
}
- defer os.RemoveAll(dir2)
- store2, err := NewGlobalStore(dir2)
+ s, err = NewGlobalStore(dir)
if err != nil {
+ os.RemoveAll(dir)
t.Fatal(err)
}
- defer store2.Close()
- test.ImportExport(t, store1, store2, 100)
+ return s, func() {
+ s.Close()
+ os.RemoveAll(dir)
+ }
}