aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2019-05-10 19:09:01 +0800
committerGitHub <noreply@github.com>2019-05-10 19:09:01 +0800
commit494f5d448a1685d5de4cb1524b863cd1fc9a13b0 (patch)
tree4db9d1afe4910c888f3488cd93e8537501d88314
parentc94d582aa781b26412ba7d570f6707d193303a02 (diff)
parent9b1543c282f39d452f611eeee0307bdf828e8bc2 (diff)
downloadgo-tangerine-494f5d448a1685d5de4cb1524b863cd1fc9a13b0.tar
go-tangerine-494f5d448a1685d5de4cb1524b863cd1fc9a13b0.tar.gz
go-tangerine-494f5d448a1685d5de4cb1524b863cd1fc9a13b0.tar.bz2
go-tangerine-494f5d448a1685d5de4cb1524b863cd1fc9a13b0.tar.lz
go-tangerine-494f5d448a1685d5de4cb1524b863cd1fc9a13b0.tar.xz
go-tangerine-494f5d448a1685d5de4cb1524b863cd1fc9a13b0.tar.zst
go-tangerine-494f5d448a1685d5de4cb1524b863cd1fc9a13b0.zip
Merge pull request #19550 from ethersphere/swarm-rather-stable
swarm v0.4-rc1
-rw-r--r--cmd/swarm/config.go6
-rw-r--r--cmd/swarm/config_test.go4
-rw-r--r--cmd/swarm/db.go118
-rw-r--r--cmd/swarm/explore.go3
-rw-r--r--cmd/swarm/export_test.go164
-rw-r--r--cmd/swarm/flags.go4
-rw-r--r--cmd/swarm/hash.go3
-rw-r--r--cmd/swarm/swarm-smoke/feed_upload_and_sync.go8
-rw-r--r--cmd/swarm/swarm-smoke/main.go38
-rw-r--r--cmd/swarm/swarm-smoke/sliding_window.go18
-rw-r--r--cmd/swarm/swarm-smoke/upload_and_sync.go327
-rw-r--r--cmd/swarm/swarm-smoke/upload_speed.go8
-rw-r--r--cmd/swarm/swarm-smoke/util.go18
-rw-r--r--cmd/swarm/swarm-snapshot/create_test.go5
-rw-r--r--cmd/swarm/testdata/datastore_fixture.go1390
-rw-r--r--p2p/protocols/protocol.go5
-rw-r--r--p2p/protocols/protocol_test.go3
-rw-r--r--p2p/testing/peerpool.go2
-rw-r--r--swarm/api/api.go32
-rw-r--r--swarm/api/api_test.go108
-rw-r--r--swarm/api/client/client.go32
-rw-r--r--swarm/api/client/client_test.go21
-rw-r--r--swarm/api/config.go14
-rw-r--r--swarm/api/config_test.go1
-rw-r--r--swarm/api/filesystem_test.go3
-rw-r--r--swarm/api/http/middleware.go49
-rw-r--r--swarm/api/http/response.go2
-rw-r--r--swarm/api/http/server.go60
-rw-r--r--swarm/api/http/server_test.go152
-rw-r--r--swarm/api/http/test_server.go18
-rw-r--r--swarm/api/inspector.go37
-rw-r--r--swarm/api/manifest.go1
-rw-r--r--swarm/api/manifest_test.go7
-rw-r--r--swarm/api/storage.go85
-rw-r--r--swarm/api/storage_test.go56
-rw-r--r--swarm/chunk/chunk.go154
-rw-r--r--swarm/chunk/tag.go218
-rw-r--r--swarm/chunk/tag_test.go273
-rw-r--r--swarm/chunk/tags.go96
-rw-r--r--swarm/chunk/tags_test.go48
-rw-r--r--swarm/fuse/swarmfs_test.go5
-rw-r--r--swarm/network/hive.go2
-rw-r--r--swarm/network/hive_test.go27
-rw-r--r--swarm/network/kademlia.go157
-rw-r--r--swarm/network/kademlia_test.go112
-rw-r--r--swarm/network/protocol_test.go4
-rw-r--r--swarm/network/simulation/kademlia_test.go1
-rw-r--r--swarm/network/stream/common_test.go106
-rw-r--r--swarm/network/stream/delivery.go179
-rw-r--r--swarm/network/stream/delivery_test.go177
-rw-r--r--swarm/network/stream/intervals_test.go9
-rw-r--r--swarm/network/stream/lightnode_test.go89
-rw-r--r--swarm/network/stream/messages.go72
-rw-r--r--swarm/network/stream/peer.go184
-rw-r--r--swarm/network/stream/peer_test.go309
-rw-r--r--swarm/network/stream/snapshot_retrieval_test.go6
-rw-r--r--swarm/network/stream/snapshot_sync_test.go30
-rw-r--r--swarm/network/stream/stream.go390
-rw-r--r--swarm/network/stream/streamer_test.go191
-rw-r--r--swarm/network/stream/syncer.go209
-rw-r--r--swarm/network/stream/syncer_test.go68
-rw-r--r--swarm/network_test.go32
-rw-r--r--swarm/pss/handshake_test.go1
-rw-r--r--swarm/sctx/sctx.go17
-rw-r--r--swarm/shed/db.go71
-rw-r--r--swarm/shed/index.go8
-rw-r--r--swarm/shed/schema.go2
-rw-r--r--swarm/storage/chunker_test.go21
-rw-r--r--swarm/storage/common_test.go65
-rw-r--r--swarm/storage/database.go82
-rw-r--r--swarm/storage/feed/handler.go12
-rw-r--r--swarm/storage/feed/handler_test.go13
-rw-r--r--swarm/storage/feed/testutil.go15
-rw-r--r--swarm/storage/filestore.go41
-rw-r--r--swarm/storage/filestore_test.go67
-rw-r--r--swarm/storage/hasherstore.go15
-rw-r--r--swarm/storage/hasherstore_test.go8
-rw-r--r--swarm/storage/ldbstore.go1082
-rw-r--r--swarm/storage/ldbstore_test.go788
-rw-r--r--swarm/storage/localstore.go251
-rw-r--r--swarm/storage/localstore/export.go204
-rw-r--r--swarm/storage/localstore/export_test.go80
-rw-r--r--swarm/storage/localstore/gc.go19
-rw-r--r--swarm/storage/localstore/gc_test.go48
-rw-r--r--swarm/storage/localstore/index_test.go49
-rw-r--r--swarm/storage/localstore/localstore.go80
-rw-r--r--swarm/storage/localstore/localstore_test.go55
-rw-r--r--swarm/storage/localstore/mode_get.go63
-rw-r--r--swarm/storage/localstore/mode_get_test.go67
-rw-r--r--swarm/storage/localstore/mode_has.go28
-rw-r--r--swarm/storage/localstore/mode_has_test.go13
-rw-r--r--swarm/storage/localstore/mode_put.go118
-rw-r--r--swarm/storage/localstore/mode_put_test.go116
-rw-r--r--swarm/storage/localstore/mode_set.go63
-rw-r--r--swarm/storage/localstore/mode_set_test.go35
-rw-r--r--swarm/storage/localstore/retrieval_index_test.go17
-rw-r--r--swarm/storage/localstore/schema.go52
-rw-r--r--swarm/storage/localstore/subscription_pull.go107
-rw-r--r--swarm/storage/localstore/subscription_pull_test.go243
-rw-r--r--swarm/storage/localstore/subscription_push.go29
-rw-r--r--swarm/storage/localstore/subscription_push_test.go16
-rw-r--r--swarm/storage/localstore_test.go244
-rw-r--r--swarm/storage/memstore.go92
-rw-r--r--swarm/storage/memstore_test.go158
-rw-r--r--swarm/storage/netstore.go45
-rw-r--r--swarm/storage/netstore_test.go130
-rw-r--r--swarm/storage/pyramid.go13
-rw-r--r--swarm/storage/schema.go17
-rw-r--r--swarm/storage/types.go44
-rw-r--r--swarm/swarm.go58
-rw-r--r--swarm/swarm_test.go8
-rw-r--r--swarm/testutil/tag.go51
112 files changed, 5741 insertions, 5130 deletions
diff --git a/cmd/swarm/config.go b/cmd/swarm/config.go
index 32cd442a0..e4b333549 100644
--- a/cmd/swarm/config.go
+++ b/cmd/swarm/config.go
@@ -252,15 +252,15 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
}
if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" {
- currentConfig.LocalStoreParams.ChunkDbPath = storePath
+ currentConfig.ChunkDbPath = storePath
}
if storeCapacity := ctx.GlobalUint64(SwarmStoreCapacity.Name); storeCapacity != 0 {
- currentConfig.LocalStoreParams.DbCapacity = storeCapacity
+ currentConfig.DbCapacity = storeCapacity
}
if ctx.GlobalIsSet(SwarmStoreCacheCapacity.Name) {
- currentConfig.LocalStoreParams.CacheCapacity = ctx.GlobalUint(SwarmStoreCacheCapacity.Name)
+ currentConfig.CacheCapacity = ctx.GlobalUint(SwarmStoreCacheCapacity.Name)
}
if ctx.GlobalIsSet(SwarmBootnodeModeFlag.Name) {
diff --git a/cmd/swarm/config_test.go b/cmd/swarm/config_test.go
index 869edd0f7..484f6dec3 100644
--- a/cmd/swarm/config_test.go
+++ b/cmd/swarm/config_test.go
@@ -447,8 +447,8 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
t.Fatal("Expected Sync to be disabled, but is true")
}
- if info.LocalStoreParams.DbCapacity != 9000000 {
- t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.LocalStoreParams.DbCapacity)
+ if info.DbCapacity != 9000000 {
+ t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.DbCapacity)
}
if info.HiveParams.KeepAliveInterval != 6000000000 {
diff --git a/cmd/swarm/db.go b/cmd/swarm/db.go
index 7916beffc..b0e9f367f 100644
--- a/cmd/swarm/db.go
+++ b/cmd/swarm/db.go
@@ -17,6 +17,10 @@
package main
import (
+ "archive/tar"
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
"fmt"
"io"
"os"
@@ -25,10 +29,22 @@ import (
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/swarm/storage"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
+ "github.com/ethereum/go-ethereum/swarm/storage/localstore"
+ "github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
"gopkg.in/urfave/cli.v1"
)
+var legacyKeyIndex = byte(0)
+var keyData = byte(6)
+
+type dpaDBIndex struct {
+ Idx uint64
+ Access uint64
+}
+
var dbCommand = cli.Command{
Name: "db",
CustomHelpTemplate: helpTemplate,
@@ -67,6 +83,9 @@ The import may be quite large, consider piping the input through the Unix
pv(1) tool to get a progress bar:
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`,
+ Flags: []cli.Flag{
+ SwarmLegacyFlag,
+ },
},
},
}
@@ -77,12 +96,6 @@ func dbExport(ctx *cli.Context) {
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to write the tar archive to, - for stdout) and the base key")
}
- store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
- if err != nil {
- utils.Fatalf("error opening local chunk database: %s", err)
- }
- defer store.Close()
-
var out io.Writer
if args[1] == "-" {
out = os.Stdout
@@ -95,6 +108,23 @@ func dbExport(ctx *cli.Context) {
out = f
}
+ isLegacy := localstore.IsLegacyDatabase(args[0])
+ if isLegacy {
+ count, err := exportLegacy(args[0], common.Hex2Bytes(args[2]), out)
+ if err != nil {
+ utils.Fatalf("error exporting legacy local chunk database: %s", err)
+ }
+
+ log.Info(fmt.Sprintf("successfully exported %d chunks from legacy db", count))
+ return
+ }
+
+ store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
+ if err != nil {
+ utils.Fatalf("error opening local chunk database: %s", err)
+ }
+ defer store.Close()
+
count, err := store.Export(out)
if err != nil {
utils.Fatalf("error exporting local chunk database: %s", err)
@@ -109,6 +139,8 @@ func dbImport(ctx *cli.Context) {
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to read the tar archive from, - for stdin) and the base key")
}
+ legacy := ctx.IsSet(SwarmLegacyFlag.Name)
+
store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
@@ -127,7 +159,7 @@ func dbImport(ctx *cli.Context) {
in = f
}
- count, err := store.Import(in)
+ count, err := store.Import(in, legacy)
if err != nil {
utils.Fatalf("error importing local chunk database: %s", err)
}
@@ -135,13 +167,73 @@ func dbImport(ctx *cli.Context) {
log.Info(fmt.Sprintf("successfully imported %d chunks", count))
}
-func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) {
+func openLDBStore(path string, basekey []byte) (*localstore.DB, error) {
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
return nil, fmt.Errorf("invalid chunkdb path: %s", err)
}
- storeparams := storage.NewDefaultStoreParams()
- ldbparams := storage.NewLDBStoreParams(storeparams, path)
- ldbparams.BaseKey = basekey
- return storage.NewLDBStore(ldbparams)
+ return localstore.New(path, basekey, nil)
+}
+
+func decodeIndex(data []byte, index *dpaDBIndex) error {
+ dec := rlp.NewStream(bytes.NewReader(data), 0)
+ return dec.Decode(index)
+}
+
+func getDataKey(idx uint64, po uint8) []byte {
+ key := make([]byte, 10)
+ key[0] = keyData
+ key[1] = po
+ binary.BigEndian.PutUint64(key[2:], idx)
+
+ return key
+}
+
+func exportLegacy(path string, basekey []byte, out io.Writer) (int64, error) {
+ tw := tar.NewWriter(out)
+ defer tw.Close()
+ db, err := leveldb.OpenFile(path, &opt.Options{OpenFilesCacheCapacity: 128})
+ if err != nil {
+ return 0, err
+ }
+ defer db.Close()
+
+ it := db.NewIterator(nil, nil)
+ defer it.Release()
+ var count int64
+ for ok := it.Seek([]byte{legacyKeyIndex}); ok; ok = it.Next() {
+ key := it.Key()
+ if (key == nil) || (key[0] != legacyKeyIndex) {
+ break
+ }
+
+ var index dpaDBIndex
+
+ hash := key[1:]
+ decodeIndex(it.Value(), &index)
+
+ po := uint8(chunk.Proximity(basekey, hash))
+
+ datakey := getDataKey(index.Idx, po)
+ data, err := db.Get(datakey, nil)
+ if err != nil {
+ log.Crit(fmt.Sprintf("Chunk %x found but could not be accessed: %v, %x", key, err, datakey))
+ continue
+ }
+
+ hdr := &tar.Header{
+ Name: hex.EncodeToString(hash),
+ Mode: 0644,
+ Size: int64(len(data)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return count, err
+ }
+ if _, err := tw.Write(data); err != nil {
+ return count, err
+ }
+ count++
+ }
+
+ return count, nil
}
diff --git a/cmd/swarm/explore.go b/cmd/swarm/explore.go
index 5b5b8bf41..9566213e4 100644
--- a/cmd/swarm/explore.go
+++ b/cmd/swarm/explore.go
@@ -23,6 +23,7 @@ import (
"os"
"github.com/ethereum/go-ethereum/cmd/utils"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/storage"
"gopkg.in/urfave/cli.v1"
)
@@ -47,7 +48,7 @@ func hashes(ctx *cli.Context) {
}
defer f.Close()
- fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, storage.NewFileStoreParams())
+ fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, storage.NewFileStoreParams(), chunk.NewTags())
refs, err := fileStore.GetAllReferences(context.TODO(), f, false)
if err != nil {
utils.Fatalf("%v\n", err)
diff --git a/cmd/swarm/export_test.go b/cmd/swarm/export_test.go
index e8671eea7..19e54c21d 100644
--- a/cmd/swarm/export_test.go
+++ b/cmd/swarm/export_test.go
@@ -17,19 +17,34 @@
package main
import (
+ "archive/tar"
"bytes"
+ "compress/gzip"
"crypto/md5"
+ "encoding/base64"
+ "encoding/hex"
"io"
+ "io/ioutil"
"net/http"
"os"
+ "path"
"runtime"
"strings"
"testing"
+ "github.com/ethereum/go-ethereum/cmd/swarm/testdata"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm"
"github.com/ethereum/go-ethereum/swarm/testutil"
)
+const (
+ DATABASE_FIXTURE_BZZ_ACCOUNT = "0aa159029fa13ffa8fa1c6fff6ebceface99d6a4"
+ DATABASE_FIXTURE_PASSWORD = "pass"
+ FIXTURE_DATADIR_PREFIX = "swarm/bzz-0aa159029fa13ffa8fa1c6fff6ebceface99d6a4"
+ FixtureBaseKey = "a9f22b3d77b4bdf5f3eefce995d6c8e7cecf2636f20956f08a0d1ed95adb52ad"
+)
+
// TestCLISwarmExportImport perform the following test:
// 1. runs swarm node
// 2. uploads a random file
@@ -99,6 +114,112 @@ func TestCLISwarmExportImport(t *testing.T) {
mustEqualFiles(t, bytes.NewReader(content), res.Body)
}
+// TestExportLegacyToNew checks that an old database gets imported correctly into the new localstore structure
+// The test sequence is as follows:
+// 1. unpack database fixture to tmp dir
+// 2. try to open with new swarm binary that should complain about old database
+// 3. export from old database
+// 4. remove the chunks folder
+// 5. import the dump
+// 6. file should be accessible
+func TestExportLegacyToNew(t *testing.T) {
+ /*
+ fixture bzz account 0aa159029fa13ffa8fa1c6fff6ebceface99d6a4
+ */
+ const UPLOADED_FILE_MD5_HASH = "a001fdae53ba50cae584b8b02b06f821"
+ const UPLOADED_HASH = "67a86082ee0ea1bc7dd8d955bb1e14d04f61d55ae6a4b37b3d0296a3a95e454a"
+ tmpdir, err := ioutil.TempDir("", "swarm-test")
+ log.Trace("running legacy datastore migration test", "temp dir", tmpdir)
+ defer os.RemoveAll(tmpdir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ inflateBase64Gzip(t, testdata.DATADIR_MIGRATION_FIXTURE, tmpdir)
+
+ tmpPassword := testutil.TempFileWithContent(t, DATABASE_FIXTURE_PASSWORD)
+ defer os.Remove(tmpPassword)
+
+ flags := []string{
+ "--datadir", tmpdir,
+ "--bzzaccount", DATABASE_FIXTURE_BZZ_ACCOUNT,
+ "--password", tmpPassword,
+ }
+
+ newSwarmOldDb := runSwarm(t, flags...)
+ _, matches := newSwarmOldDb.ExpectRegexp(".+")
+ newSwarmOldDb.ExpectExit()
+
+ if len(matches) == 0 {
+ t.Fatalf("stdout not matched")
+ }
+
+ if newSwarmOldDb.ExitStatus() == 0 {
+ t.Fatal("should error")
+ }
+ t.Log("exporting legacy database")
+ actualDataDir := path.Join(tmpdir, FIXTURE_DATADIR_PREFIX)
+ exportCmd := runSwarm(t, "--verbosity", "5", "db", "export", actualDataDir+"/chunks", tmpdir+"/export.tar", FixtureBaseKey)
+ exportCmd.ExpectExit()
+
+ stat, err := os.Stat(tmpdir + "/export.tar")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // make some silly size assumption
+ if stat.Size() < 90000 {
+ t.Fatal("export size too small")
+ }
+ t.Log("removing chunk datadir")
+ err = os.RemoveAll(path.Join(actualDataDir, "chunks"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // start second cluster
+ cluster2 := newTestCluster(t, 1)
+ var info2 swarm.Info
+ if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil {
+ t.Fatal(err)
+ }
+
+ // stop second cluster, so that we close LevelDB
+ cluster2.Stop()
+ defer cluster2.Cleanup()
+
+ // import the export.tar
+ importCmd := runSwarm(t, "db", "import", "--legacy", info2.Path+"/chunks", tmpdir+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x"))
+ importCmd.ExpectExit()
+
+ // spin second cluster back up
+ cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x"))
+ t.Log("trying to http get the file")
+ // try to fetch imported file
+ res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + UPLOADED_HASH)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if res.StatusCode != 200 {
+ t.Fatalf("expected HTTP status %d, got %s", 200, res.Status)
+ }
+ h := md5.New()
+ if _, err := io.Copy(h, res.Body); err != nil {
+ t.Fatal(err)
+ }
+
+ sum := h.Sum(nil)
+
+ b, err := hex.DecodeString(UPLOADED_FILE_MD5_HASH)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(sum, b) {
+ t.Fatal("should be equal")
+ }
+}
+
func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
h := md5.New()
upLen, err := io.Copy(h, up)
@@ -117,3 +238,46 @@ func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen)
}
}
+
+func inflateBase64Gzip(t *testing.T, base64File, directory string) {
+ t.Helper()
+
+ f := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64File))
+ gzf, err := gzip.NewReader(f)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tarReader := tar.NewReader(gzf)
+
+ for {
+ header, err := tarReader.Next()
+ if err == io.EOF {
+ break
+ }
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ name := header.Name
+
+ switch header.Typeflag {
+ case tar.TypeDir:
+ err := os.Mkdir(path.Join(directory, name), os.ModePerm)
+ if err != nil {
+ t.Fatal(err)
+ }
+ case tar.TypeReg:
+ file, err := os.Create(path.Join(directory, name))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := io.Copy(file, tarReader); err != nil {
+ t.Fatal(err)
+ }
+ default:
+ t.Fatal("shouldn't happen")
+ }
+ }
+}
diff --git a/cmd/swarm/flags.go b/cmd/swarm/flags.go
index 5e1ada632..6093149e3 100644
--- a/cmd/swarm/flags.go
+++ b/cmd/swarm/flags.go
@@ -182,4 +182,8 @@ var (
Usage: "URL of the Global Store API provider (only for testing)",
EnvVar: SwarmGlobalstoreAPI,
}
+ SwarmLegacyFlag = cli.BoolFlag{
+ Name: "legacy",
+ Usage: "Use this flag when importing a db export from a legacy local store database dump (for schemas older than 'sanctuary')",
+ }
)
diff --git a/cmd/swarm/hash.go b/cmd/swarm/hash.go
index 2df02c0ed..ff786fa10 100644
--- a/cmd/swarm/hash.go
+++ b/cmd/swarm/hash.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/contracts/ens"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/storage"
"gopkg.in/urfave/cli.v1"
)
@@ -77,7 +78,7 @@ func hash(ctx *cli.Context) {
defer f.Close()
stat, _ := f.Stat()
- fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, storage.NewFileStoreParams())
+ fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, storage.NewFileStoreParams(), chunk.NewTags())
addr, _, err := fileStore.Store(context.TODO(), f, stat.Size(), false)
if err != nil {
utils.Fatalf("%v\n", err)
diff --git a/cmd/swarm/swarm-smoke/feed_upload_and_sync.go b/cmd/swarm/swarm-smoke/feed_upload_and_sync.go
index 6b3fed0c7..b5ffc43d2 100644
--- a/cmd/swarm/swarm-smoke/feed_upload_and_sync.go
+++ b/cmd/swarm/swarm-smoke/feed_upload_and_sync.go
@@ -26,11 +26,11 @@ const (
feedRandomDataLength = 8
)
-func feedUploadAndSyncCmd(ctx *cli.Context, tuid string) error {
+func feedUploadAndSyncCmd(ctx *cli.Context) error {
errc := make(chan error)
go func() {
- errc <- feedUploadAndSync(ctx, tuid)
+ errc <- feedUploadAndSync(ctx)
}()
select {
@@ -46,7 +46,7 @@ func feedUploadAndSyncCmd(ctx *cli.Context, tuid string) error {
}
}
-func feedUploadAndSync(c *cli.Context, tuid string) error {
+func feedUploadAndSync(c *cli.Context) error {
log.Info("generating and uploading feeds to " + httpEndpoint(hosts[0]) + " and syncing")
// create a random private key to sign updates with and derive the address
@@ -272,7 +272,7 @@ func feedUploadAndSync(c *cli.Context, tuid string) error {
ruid := uuid.New()[:8]
go func(url string, endpoint string, ruid string) {
for {
- err := fetch(url, endpoint, fileHash, ruid, "")
+ err := fetch(url, endpoint, fileHash, ruid)
if err != nil {
continue
}
diff --git a/cmd/swarm/swarm-smoke/main.go b/cmd/swarm/swarm-smoke/main.go
index 860fbcc1d..03e2cc2c4 100644
--- a/cmd/swarm/swarm-smoke/main.go
+++ b/cmd/swarm/swarm-smoke/main.go
@@ -37,17 +37,17 @@ var (
)
var (
- allhosts string
- hosts []string
- filesize int
- inputSeed int
- syncDelay int
- httpPort int
- wsPort int
- verbosity int
- timeout int
- single bool
- trackTimeout int
+ allhosts string
+ hosts []string
+ filesize int
+ syncDelay bool
+ inputSeed int
+ httpPort int
+ wsPort int
+ verbosity int
+ timeout int
+ single bool
+ onlyUpload bool
)
func main() {
@@ -87,10 +87,9 @@ func main() {
Usage: "file size for generated random file in KB",
Destination: &filesize,
},
- cli.IntFlag{
+ cli.BoolFlag{
Name: "sync-delay",
- Value: 5,
- Usage: "duration of delay in seconds to wait for content to be synced",
+ Usage: "wait for content to be synced",
Destination: &syncDelay,
},
cli.IntFlag{
@@ -101,7 +100,7 @@ func main() {
},
cli.IntFlag{
Name: "timeout",
- Value: 120,
+ Value: 180,
Usage: "timeout in seconds after which kill the process",
Destination: &timeout,
},
@@ -110,11 +109,10 @@ func main() {
Usage: "whether to fetch content from a single node or from all nodes",
Destination: &single,
},
- cli.IntFlag{
- Name: "track-timeout",
- Value: 5,
- Usage: "timeout in seconds to wait for GetAllReferences to return",
- Destination: &trackTimeout,
+ cli.BoolFlag{
+ Name: "only-upload",
+ Usage: "whether to only upload content to a single node without fetching",
+ Destination: &onlyUpload,
},
}
diff --git a/cmd/swarm/swarm-smoke/sliding_window.go b/cmd/swarm/swarm-smoke/sliding_window.go
index d589124bd..6ca3d3947 100644
--- a/cmd/swarm/swarm-smoke/sliding_window.go
+++ b/cmd/swarm/swarm-smoke/sliding_window.go
@@ -35,11 +35,11 @@ type uploadResult struct {
digest []byte
}
-func slidingWindowCmd(ctx *cli.Context, tuid string) error {
+func slidingWindowCmd(ctx *cli.Context) error {
errc := make(chan error)
go func() {
- errc <- slidingWindow(ctx, tuid)
+ errc <- slidingWindow(ctx)
}()
err := <-errc
@@ -49,10 +49,10 @@ func slidingWindowCmd(ctx *cli.Context, tuid string) error {
return err
}
-func slidingWindow(ctx *cli.Context, tuid string) error {
+func slidingWindow(ctx *cli.Context) error {
var hashes []uploadResult //swarm hashes of the uploads
nodes := len(hosts)
- log.Info("sliding window test started", "tuid", tuid, "nodes", nodes, "filesize(kb)", filesize, "timeout", timeout)
+ log.Info("sliding window test started", "nodes", nodes, "filesize(kb)", filesize, "timeout", timeout)
uploadedBytes := 0
networkDepth := 0
errored := false
@@ -81,9 +81,13 @@ outer:
return err
}
- log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash), "sleeping", syncDelay)
+ log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash), "wait for sync", syncDelay)
hashes = append(hashes, uploadResult{hash: hash, digest: fhash})
- time.Sleep(time.Duration(syncDelay) * time.Second)
+
+ if syncDelay {
+ waitToSync()
+ }
+
uploadedBytes += filesize * 1000
q := make(chan struct{}, 1)
d := make(chan struct{})
@@ -107,7 +111,7 @@ outer:
start = time.Now()
// fetch hangs when swarm dies out, so we have to jump through a bit more hoops to actually
// catch the timeout, but also allow this retry logic
- err := fetch(v.hash, httpEndpoint(hosts[idx]), v.digest, ruid, "")
+ err := fetch(v.hash, httpEndpoint(hosts[idx]), v.digest, ruid)
if err != nil {
log.Error("error fetching hash", "err", err)
continue
diff --git a/cmd/swarm/swarm-smoke/upload_and_sync.go b/cmd/swarm/swarm-smoke/upload_and_sync.go
index 6c20a4fa6..7338e3473 100644
--- a/cmd/swarm/swarm-smoke/upload_and_sync.go
+++ b/cmd/swarm/swarm-smoke/upload_and_sync.go
@@ -19,26 +19,27 @@ package main
import (
"bytes"
"context"
+ "encoding/hex"
"fmt"
"io/ioutil"
"math/rand"
"os"
"strings"
"sync"
+ "sync/atomic"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rpc"
- "github.com/ethereum/go-ethereum/swarm/api"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/testutil"
- "github.com/pborman/uuid"
cli "gopkg.in/urfave/cli.v1"
)
-func uploadAndSyncCmd(ctx *cli.Context, tuid string) error {
+func uploadAndSyncCmd(ctx *cli.Context) error {
// use input seed if it has been set
if inputSeed != 0 {
seed = inputSeed
@@ -49,7 +50,7 @@ func uploadAndSyncCmd(ctx *cli.Context, tuid string) error {
errc := make(chan error)
go func() {
- errc <- uploadAndSync(ctx, randomBytes, tuid)
+ errc <- uploadAndSync(ctx, randomBytes)
}()
var err error
@@ -65,7 +66,7 @@ func uploadAndSyncCmd(ctx *cli.Context, tuid string) error {
}
// trigger debug functionality on randomBytes
- e := trackChunks(randomBytes[:])
+ e := trackChunks(randomBytes[:], true)
if e != nil {
log.Error(e.Error())
}
@@ -73,51 +74,180 @@ func uploadAndSyncCmd(ctx *cli.Context, tuid string) error {
return err
}
-func trackChunks(testData []byte) error {
+func trackChunks(testData []byte, submitMetrics bool) error {
addrs, err := getAllRefs(testData)
if err != nil {
return err
}
for i, ref := range addrs {
- log.Trace(fmt.Sprintf("ref %d", i), "ref", ref)
+ log.Debug(fmt.Sprintf("ref %d", i), "ref", ref)
}
+ var globalYes, globalNo int
+ var globalMu sync.Mutex
+ var hasErr bool
+
+ var wg sync.WaitGroup
+ wg.Add(len(hosts))
+
+ var mu sync.Mutex // mutex protecting the allHostsChunks and bzzAddrs maps
+ allHostChunks := map[string]string{} // host->bitvector of presence for chunks
+ bzzAddrs := map[string]string{} // host->bzzAddr
+
for _, host := range hosts {
- httpHost := fmt.Sprintf("ws://%s:%d", host, 8546)
+ host := host
+ go func() {
+ defer wg.Done()
+ httpHost := fmt.Sprintf("ws://%s:%d", host, 8546)
- hostChunks := []string{}
+ ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
+ defer cancel()
- rpcClient, err := rpc.Dial(httpHost)
- if err != nil {
- log.Error("error dialing host", "err", err, "host", httpHost)
- continue
- }
+ rpcClient, err := rpc.DialContext(ctx, httpHost)
+ if rpcClient != nil {
+ defer rpcClient.Close()
+ }
+ if err != nil {
+ log.Error("error dialing host", "err", err, "host", httpHost)
+ hasErr = true
+ return
+ }
- var hasInfo []api.HasInfo
- err = rpcClient.Call(&hasInfo, "bzz_has", addrs)
- if err != nil {
- log.Error("error calling rpc client", "err", err, "host", httpHost)
- continue
- }
+ hostChunks, err := getChunksBitVectorFromHost(rpcClient, addrs)
+ if err != nil {
+ log.Error("error getting chunks bit vector from host", "err", err, "host", httpHost)
+ hasErr = true
+ return
+ }
+
+ bzzAddr, err := getBzzAddrFromHost(rpcClient)
+ if err != nil {
+ log.Error("error getting bzz addrs from host", "err", err, "host", httpHost)
+ hasErr = true
+ return
+ }
+
+ mu.Lock()
+ allHostChunks[host] = hostChunks
+ bzzAddrs[host] = bzzAddr
+ mu.Unlock()
+
+ yes, no := 0, 0
+ for _, val := range hostChunks {
+ if val == '1' {
+ yes++
+ } else {
+ no++
+ }
+ }
+
+ if no == 0 {
+ log.Info("host reported to have all chunks", "host", host)
+ }
- count := 0
- for _, info := range hasInfo {
- if info.Has {
- hostChunks = append(hostChunks, "1")
- } else {
- hostChunks = append(hostChunks, "0")
- count++
+ log.Debug("chunks", "chunks", hostChunks, "yes", yes, "no", no, "host", host)
+
+ if submitMetrics {
+ globalMu.Lock()
+ globalYes += yes
+ globalNo += no
+ globalMu.Unlock()
+ }
+ }()
+ }
+
+ wg.Wait()
+
+ checkChunksVsMostProxHosts(addrs, allHostChunks, bzzAddrs)
+
+ if !hasErr && submitMetrics {
+ // remove the chunks stored on the uploader node
+ globalYes -= len(addrs)
+
+ metrics.GetOrRegisterCounter("deployment.chunks.yes", nil).Inc(int64(globalYes))
+ metrics.GetOrRegisterCounter("deployment.chunks.no", nil).Inc(int64(globalNo))
+ metrics.GetOrRegisterCounter("deployment.chunks.refs", nil).Inc(int64(len(addrs)))
+ }
+
+ return nil
+}
+
+// getChunksBitVectorFromHost returns a bit vector of presence for a given slice of chunks from a given host
+func getChunksBitVectorFromHost(client *rpc.Client, addrs []storage.Address) (string, error) {
+ var hostChunks string
+
+ err := client.Call(&hostChunks, "bzz_has", addrs)
+ if err != nil {
+ return "", err
+ }
+
+ return hostChunks, nil
+}
+
+// getBzzAddrFromHost returns the bzzAddr for a given host
+func getBzzAddrFromHost(client *rpc.Client) (string, error) {
+ var hive string
+
+ err := client.Call(&hive, "bzz_hive")
+ if err != nil {
+ return "", err
+ }
+
+ // we make an ugly assumption about the output format of the hive.String() method
+ // ideally we should replace this with an API call that returns the bzz addr for a given host,
+ // but this also works for now (provided we don't change the hive.String() method, which we haven't in some time
+ ss := strings.Split(strings.Split(hive, "\n")[3], " ")
+ return ss[len(ss)-1], nil
+}
+
+// checkChunksVsMostProxHosts is checking:
+// 1. whether a chunk has been found at less than 2 hosts. Considering our NN size, this should not happen.
+// 2. if a chunk is not found at its closest node. This should also not happen.
+// Together with the --only-upload flag, we could run this smoke test and make sure that our syncing
+// functionality is correct (without even trying to retrieve the content).
+//
+// addrs - a slice with all uploaded chunk refs
+// allHostChunks - host->bit vector, showing what chunks are present on what hosts
+// bzzAddrs - host->bzz address, used when determining the most proximate host for a given chunk
+func checkChunksVsMostProxHosts(addrs []storage.Address, allHostChunks map[string]string, bzzAddrs map[string]string) {
+ for k, v := range bzzAddrs {
+ log.Trace("bzzAddr", "bzz", v, "host", k)
+ }
+
+ for i := range addrs {
+ var foundAt int
+ maxProx := -1
+ var maxProxHost string
+ for host := range allHostChunks {
+ if allHostChunks[host][i] == '1' {
+ foundAt++
+ }
+
+ ba, err := hex.DecodeString(bzzAddrs[host])
+ if err != nil {
+ panic(err)
+ }
+
+ // calculate the host closest to any chunk
+ prox := chunk.Proximity(addrs[i], ba)
+ if prox > maxProx {
+ maxProx = prox
+ maxProxHost = host
}
}
- if count == 0 {
- log.Info("host reported to have all chunks", "host", host)
+ if allHostChunks[maxProxHost][i] == '0' {
+ log.Error("chunk not found at max prox host", "ref", addrs[i], "host", maxProxHost, "bzzAddr", bzzAddrs[maxProxHost])
+ } else {
+ log.Trace("chunk present at max prox host", "ref", addrs[i], "host", maxProxHost, "bzzAddr", bzzAddrs[maxProxHost])
}
- log.Trace("chunks", "chunks", strings.Join(hostChunks, ""), "host", host)
+ // if chunk found at less than 2 hosts
+ if foundAt < 2 {
+ log.Error("chunk found at less than two hosts", "foundAt", foundAt, "ref", addrs[i])
+ }
}
- return nil
}
func getAllRefs(testData []byte) (storage.AddressCollection, error) {
@@ -126,19 +256,17 @@ func getAllRefs(testData []byte) (storage.AddressCollection, error) {
return nil, fmt.Errorf("unable to create temp dir: %v", err)
}
defer os.RemoveAll(datadir)
- fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32))
+ fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32), chunk.NewTags())
if err != nil {
return nil, err
}
- ctx, cancel := context.WithTimeout(context.Background(), time.Duration(trackTimeout)*time.Second)
- defer cancel()
reader := bytes.NewReader(testData)
- return fileStore.GetAllReferences(ctx, reader, false)
+ return fileStore.GetAllReferences(context.Background(), reader, false)
}
-func uploadAndSync(c *cli.Context, randomBytes []byte, tuid string) error {
- log.Info("uploading to "+httpEndpoint(hosts[0])+" and syncing", "tuid", tuid, "seed", seed)
+func uploadAndSync(c *cli.Context, randomBytes []byte) error {
+ log.Info("uploading to "+httpEndpoint(hosts[0])+" and syncing", "seed", seed)
t1 := time.Now()
hash, err := upload(randomBytes, httpEndpoint(hosts[0]))
@@ -155,53 +283,94 @@ func uploadAndSync(c *cli.Context, randomBytes []byte, tuid string) error {
return err
}
- log.Info("uploaded successfully", "tuid", tuid, "hash", hash, "took", t2, "digest", fmt.Sprintf("%x", fhash))
+ log.Info("uploaded successfully", "hash", hash, "took", t2, "digest", fmt.Sprintf("%x", fhash))
- time.Sleep(time.Duration(syncDelay) * time.Second)
+ // wait to sync and log chunks before fetch attempt, only if syncDelay is set to true
+ if syncDelay {
+ waitToSync()
- wg := sync.WaitGroup{}
- if single {
- randIndex := 1 + rand.Intn(len(hosts)-1)
- ruid := uuid.New()[:8]
- wg.Add(1)
- go func(endpoint string, ruid string) {
- for {
- start := time.Now()
- err := fetch(hash, endpoint, fhash, ruid, tuid)
- if err != nil {
- continue
- }
- ended := time.Since(start)
+ log.Debug("chunks before fetch attempt", "hash", hash)
- metrics.GetOrRegisterResettingTimer("upload-and-sync.single.fetch-time", nil).Update(ended)
- log.Info("fetch successful", "tuid", tuid, "ruid", ruid, "took", ended, "endpoint", endpoint)
- wg.Done()
- return
- }
- }(httpEndpoint(hosts[randIndex]), ruid)
- } else {
- for _, endpoint := range hosts[1:] {
- ruid := uuid.New()[:8]
- wg.Add(1)
- go func(endpoint string, ruid string) {
- for {
- start := time.Now()
- err := fetch(hash, endpoint, fhash, ruid, tuid)
- if err != nil {
- continue
- }
- ended := time.Since(start)
-
- metrics.GetOrRegisterResettingTimer("upload-and-sync.each.fetch-time", nil).Update(ended)
- log.Info("fetch successful", "tuid", tuid, "ruid", ruid, "took", ended, "endpoint", endpoint)
- wg.Done()
- return
- }
- }(httpEndpoint(endpoint), ruid)
+ err = trackChunks(randomBytes, false)
+ if err != nil {
+ log.Error(err.Error())
}
}
- wg.Wait()
- log.Info("all hosts synced random file successfully")
+
+ if onlyUpload {
+ log.Debug("only-upload is true, stoppping test", "hash", hash)
+ return nil
+ }
+
+ randIndex := 1 + rand.Intn(len(hosts)-1)
+
+ for {
+ start := time.Now()
+ err := fetch(hash, httpEndpoint(hosts[randIndex]), fhash, "")
+ if err != nil {
+ time.Sleep(2 * time.Second)
+ continue
+ }
+ ended := time.Since(start)
+
+ metrics.GetOrRegisterResettingTimer("upload-and-sync.single.fetch-time", nil).Update(ended)
+ log.Info("fetch successful", "took", ended, "endpoint", httpEndpoint(hosts[randIndex]))
+ break
+ }
return nil
}
+
+func isSyncing(wsHost string) (bool, error) {
+ rpcClient, err := rpc.Dial(wsHost)
+ if rpcClient != nil {
+ defer rpcClient.Close()
+ }
+
+ if err != nil {
+ log.Error("error dialing host", "err", err)
+ return false, err
+ }
+
+ var isSyncing bool
+ err = rpcClient.Call(&isSyncing, "bzz_isSyncing")
+ if err != nil {
+ log.Error("error calling host for isSyncing", "err", err)
+ return false, err
+ }
+
+ log.Debug("isSyncing result", "host", wsHost, "isSyncing", isSyncing)
+
+ return isSyncing, nil
+}
+
+func waitToSync() {
+ t1 := time.Now()
+
+ ns := uint64(1)
+
+ for ns > 0 {
+ time.Sleep(3 * time.Second)
+
+ notSynced := uint64(0)
+ var wg sync.WaitGroup
+ wg.Add(len(hosts))
+ for i := 0; i < len(hosts); i++ {
+ i := i
+ go func(idx int) {
+ stillSyncing, err := isSyncing(wsEndpoint(hosts[idx]))
+
+ if stillSyncing || err != nil {
+ atomic.AddUint64(&notSynced, 1)
+ }
+ wg.Done()
+ }(i)
+ }
+ wg.Wait()
+
+ ns = atomic.LoadUint64(&notSynced)
+ }
+
+ t2 := time.Since(t1)
+ metrics.GetOrRegisterResettingTimer("upload-and-sync.single.wait-for-sync.deployment", nil).Update(t2)
+}
diff --git a/cmd/swarm/swarm-smoke/upload_speed.go b/cmd/swarm/swarm-smoke/upload_speed.go
index 20bf7b86c..047ea0092 100644
--- a/cmd/swarm/swarm-smoke/upload_speed.go
+++ b/cmd/swarm/swarm-smoke/upload_speed.go
@@ -28,14 +28,14 @@ import (
cli "gopkg.in/urfave/cli.v1"
)
-func uploadSpeedCmd(ctx *cli.Context, tuid string) error {
- log.Info("uploading to "+hosts[0], "tuid", tuid, "seed", seed)
+func uploadSpeedCmd(ctx *cli.Context) error {
+ log.Info("uploading to "+hosts[0], "seed", seed)
randomBytes := testutil.RandomBytes(seed, filesize*1000)
errc := make(chan error)
go func() {
- errc <- uploadSpeed(ctx, tuid, randomBytes)
+ errc <- uploadSpeed(ctx, randomBytes)
}()
select {
@@ -53,7 +53,7 @@ func uploadSpeedCmd(ctx *cli.Context, tuid string) error {
}
}
-func uploadSpeed(c *cli.Context, tuid string, data []byte) error {
+func uploadSpeed(c *cli.Context, data []byte) error {
t1 := time.Now()
hash, err := upload(data, hosts[0])
if err != nil {
diff --git a/cmd/swarm/swarm-smoke/util.go b/cmd/swarm/swarm-smoke/util.go
index 87abb44b0..b95f993e8 100644
--- a/cmd/swarm/swarm-smoke/util.go
+++ b/cmd/swarm/swarm-smoke/util.go
@@ -38,7 +38,6 @@ import (
"github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/swarm/spancontext"
opentracing "github.com/opentracing/opentracing-go"
- "github.com/pborman/uuid"
cli "gopkg.in/urfave/cli.v1"
)
@@ -59,28 +58,25 @@ func wsEndpoint(host string) string {
return fmt.Sprintf("ws://%s:%d", host, wsPort)
}
-func wrapCliCommand(name string, command func(*cli.Context, string) error) func(*cli.Context) error {
+func wrapCliCommand(name string, command func(*cli.Context) error) func(*cli.Context) error {
return func(ctx *cli.Context) error {
log.PrintOrigins(true)
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(os.Stdout, log.TerminalFormat(false))))
- // test uuid
- tuid := uuid.New()[:8]
-
commandName = name
hosts = strings.Split(allhosts, ",")
defer func(now time.Time) {
totalTime := time.Since(now)
- log.Info("total time", "tuid", tuid, "time", totalTime, "kb", filesize)
+ log.Info("total time", "time", totalTime, "kb", filesize)
metrics.GetOrRegisterResettingTimer(name+".total-time", nil).Update(totalTime)
}(time.Now())
- log.Info("smoke test starting", "tuid", tuid, "task", name, "timeout", timeout)
+ log.Info("smoke test starting", "task", name, "timeout", timeout)
metrics.GetOrRegisterCounter(name, nil).Inc(1)
- return command(ctx, tuid)
+ return command(ctx)
}
}
@@ -142,11 +138,11 @@ func fetchFeed(topic string, user string, endpoint string, original []byte, ruid
}
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
-func fetch(hash string, endpoint string, original []byte, ruid string, tuid string) error {
+func fetch(hash string, endpoint string, original []byte, ruid string) error {
ctx, sp := spancontext.StartSpan(context.Background(), "upload-and-sync.fetch")
defer sp.Finish()
- log.Info("http get request", "tuid", tuid, "ruid", ruid, "endpoint", endpoint, "hash", hash)
+ log.Info("http get request", "ruid", ruid, "endpoint", endpoint, "hash", hash)
var tn time.Time
reqUri := endpoint + "/bzz:/" + hash + "/"
@@ -170,7 +166,7 @@ func fetch(hash string, endpoint string, original []byte, ruid string, tuid stri
log.Error(err.Error(), "ruid", ruid)
return err
}
- log.Info("http get response", "tuid", tuid, "ruid", ruid, "endpoint", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
+ log.Info("http get response", "ruid", ruid, "endpoint", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
if res.StatusCode != 200 {
err := fmt.Errorf("expected status code %d, got %v", 200, res.StatusCode)
diff --git a/cmd/swarm/swarm-snapshot/create_test.go b/cmd/swarm/swarm-snapshot/create_test.go
index b2e30c201..4cd78f35a 100644
--- a/cmd/swarm/swarm-snapshot/create_test.go
+++ b/cmd/swarm/swarm-snapshot/create_test.go
@@ -21,7 +21,6 @@ import (
"fmt"
"io/ioutil"
"os"
- "runtime"
"sort"
"strconv"
"strings"
@@ -34,9 +33,7 @@ import (
// It runs a few "create" commands with different flag values and loads generated
// snapshot files to validate their content.
func TestSnapshotCreate(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Skip()
- }
+ t.Skip("test is flaky. disabling until underlying problem is addressed")
for _, v := range []struct {
name string
diff --git a/cmd/swarm/testdata/datastore_fixture.go b/cmd/swarm/testdata/datastore_fixture.go
new file mode 100644
index 000000000..6a147a6a4
--- /dev/null
+++ b/cmd/swarm/testdata/datastore_fixture.go
@@ -0,0 +1,1390 @@
+package testdata
+
+const DATADIR_MIGRATION_FIXTURE = `H4sIAJSqh1wAA+zbBVQcXbow6saCBZcAwd2hm4bGCe4eIFhIA427SyAhaNDgFtwhQII7wS24BQuu
+AQKEQPC/Pp3vrHvWmZlz78xdd93ZWQ/VsmvXfmtX7ap6V9oW4e3q5uiC4AH96wovLy+Ml5f216UA
+/69LXgj0t+XvhRbMB4WCYRB+XgEILS+YD3gJouX/F/bpz+Lu6gZ3AbpiA3dwdP0f6gHVLCz+h+9/
+j+PP5f9Hiu0f46/zWJqLC8ILFuLi5eMCQx6DIVxQAS4oLzcUJsTHxw/EZMDFxQuHg/mFeCFCFnAw
+n4UFXBBYmglYWFgIIEzNEBZwM4SQkLkAHPpftwGsK/A/jT+Mn+8v4w+m5YXwQ2C8IFref8cO+P/5
++PvSw83NXRCurvTC9P/o6NJz0pu5eDu5OdIL+9KbWTtZIVyAteEIV+C4EeQyc3P5pcKvH7shvNx+
+adgcAbWAAW1C+PnNzfihvKYIfkFBPgic31xQyByGMEMIwvkFwQIWMIQ5FCogAAbeQc0QpkJm5uZC
+AgjeP9tzgrvA7V1/2ay1B9AuxEJASABuBhWCmEIhCFNzU0FzmIAprwA/P1wI2JwAvR8nva25BVDT
+9dcO0//69m+NmNvaIRzohfkgnPTAAiIAAUOhnPRO9MJgTnogJEFOele43W8BwAQsIGAE2MICZiEE
+NhUQhID5gQUYCoMCJ4eQBZQXym8qADMTRJib8cLNITAYUBMCBvOaCQrxw/h/6Yg93AxoCWYuaAaD
+geGCEAteCB+fqaAAHwJqagqBWAhBBXj5EEJQYO/D4IIwM6i5AAIqiIDz8fHCTKFwXqggL4zP7JeW
+rM1/iZ3PHG4KR1hwWQiCTbmgplBBLiFzPjMuoD9gC/Nft2sGxOuBcHG1dvwlSL//dvxdPeEu9v/K
+yR/02/zPz/+Pzf9goCIvGAKF8f1n/v93lN/G38HRHOH6LzsK/vnx5wNOhf+M/7+j/Db+KurSyv+6
+bfxy/YdC/8H7P9iv4w/m4//P9f/fUX4bf1Mfn3/43u6fnyb+6fMfuLhCwf85//8d5W/zP/Ak8C/a
+xt+7/wf/X85/KC8f9D/n/7+jmAsB+10IYiYkBEcIAHtfUFAQbgrcrwvwQYC7WLApAoiaDwyHAXf2
+gjCIgClMAHiUFzIV5Dfnh/MKCoD/3+7/f8r/vfK/mP+BM8ENwfVr1oDb3PQfuBz88/M/lA8G+8/8
+/+8o/4vxN7Nyd7D9Z54W/vnx5/t1/v/P+P/ry/9+/H+7dHPbOVr+vW38j/f/ED5+Pr6/jL8A7Jfz
+/z/5v39Tce5INScFISH9/vaXJRISxj0ndxdrN+8Jnj0qMlQk5D++RPoFCsafKyOj/wIJgAxAAaAC
+0AD3AOgADAAmAAuADbgPwAHgAvAA+AACACGACEAMIAGQAh4AyADkAArAQwAlgApADaAB0ALoAPQA
+BgAjgAnADGABsALYAOwADgAngAvADeAB8ALAAAiADwAF8AMEADCAIEAIIAwQAYgCxADiAAnAI4Ak
+QAogDZAByALkAPIABYAiQAmgDFABqALUAOoADYAmQAugDXgM0AHoAvQATwD6AAOAIcAIYAx4CjAB
+PAPAAaYAM4A5AAGwAFgCrADWABuALcAOYA9wADgCnADOABeAK8AN4A7wAHgCvADeAB+AL+A5wA/g
+D3gBeAkIALwCBAKCAMGAEEAoIAzwGhAOiABEAqIA0YAYwBtALCAOEA9IACQCkgDJgBRAKiANkA54
+C8gAZAKyANmAHEAuIA+QDygAFAKKAMWAEkApoAzwDlAOqABUAt4DPgCqANWAGkAtoA5QD2gANAKa
+AM2AFkAroA3QDvgI6AB0AroA3YAeQC+gD9APGAAMAoYAnwDDgBHAKGAMMA6YAEwCpgDTgBnALOAz
+YA4wD1gALAKWAF8Ay4AVwCpgDbAO2ABsArYA24AdwC5gD/AVsA84ABwCvgGOAMeAE8B3wCngB+AM
+cA74CbgAXAKuANeAG8At4C6Wl5iFHISEgvRf5g5MK7idnaMnAuFQ6FHNNwZCQv39e/Rfvse698ds
+AmKPMFPaYz7s0n6HO7RPO6AuM/NznEZwSi9chdYnqE/tHPxHTV8/JGCG+XPaQaID/b01UTpfvkTC
+xPxjDbY/t/n3VvzL9IaEhIT6l9d/bh/piqENJw+EhP3XsP7y9T/WPSTQ37r3t54i/T/aU2Sb9Y8j
+o/hIhP91AP7cbDEtH8K9Stuqt+iFnI6DmbIBkV1z0qwxl/lGA9VCybJxTCEI/7eqd5/vVkD4SnKK
+cr9eBBSAf6C7VVCkrJc1cE1UVQWxA5UwQGhIhCCUX4+EX64UFCC0348KJaSHf77WQmL9vQ4Qaqg1
+CPX3zw1+75XC7+9/W6JkI/2txWzkP2qjgrJR/niNJPBrDw9AghpWjm6OrlaOTrR83LwgQSlFVVTU
+3xr99TXTry/xp6gi52JAVai7LzFT9n9K+d91gggwkARQQShIwFWEAAmFAOmuG0QDNIyG9Gv5vWMo
+yKhIaKB76BiYWNhAhS584MgHPkNBvYeKfg8V454rcJ1EIUAlpEMDE9Hj35PUhBDDnRnQA2hjpUhy
+tPg+MJrykna4jEvHQV9hrGo/YjJ7wH8U75qrIVMVeMz8WMA8wa3z2URQHpnTGossDFGt456f2BV8
+Minn8XJdXteiBhOLnIKVjV1QSFhBUUnvib6BpZW1jaeXt09IaNjr8DdJySmpadkFhUXFJaXva+vq
+GxqbPnb39Pb1DwyOTU3PzH6em1/Z2Nza3tnd+3764+z858XlL1ECexfpj/LbQQEEgYr2tyiBizMy
+ECcKMgoaMhoyKmooMG4EKPiEdKi0YEkiNF56iCbxo3t8DHApEmctaIAGY2yO9AdSdFN+7aNxpmcd
+Mqsuxw9emQk4Mcs+jmPJnQCaJfs1DhgQiJw8EIqO7i/BmCMsfonH1c3d45eYXgYGBf8WV3xC4m+h
+5eX/HlxVdc3v8XV2/RHh5B8hrq3/HuTJn1EugKRBaCioqKgoaMAfNDQguPsY6EDBw8bGvE+AR0hI
+gEdAQERK+YCImIKEgICMjoyCioqGhoboAT0jPTUjJTUN9S+NIAGrot9Dx8XAwKUmJiCm/qfL3Tzo
+Psqvu48A2M/Hi3ZN6A81nE+REa04xlmrvA22X+D1hLgTrapCwqfbRYwn+y9PajJaaeSgLheZjRJf
+L3Dbk7PV4F6H39JdwW4d3xUrmKojC3IU87IDtWqZFAbHEwvsqx+SW23yGz7Y731V/a46KGBSpXDY
+OMKIwHTgNgxtc5teJygwqvdVSzEkaUNDwYeQll9Ubq93S1u0mV/KSJmNmXBe1LaYWXc4jqEqkAVG
+4uTG9lgDTppbN1ie+2T2CXOckFLCQfjekLDqieqBzmE4gxrxRjZhvgtbVLCIqcDn8UJ2Frhp5LAK
+IYZ9kVm+9ceDAkN79jkDjZ9T9qeZzCnTfETKHsNrCQd5zXcgiVNxtRA7zhSGhc51KooU3DBSIy1C
+mfdIJMxRQsqstD9Gi6QfvM1I+G6rWqHLarMq3a3CoJLz6I25ItZEgTkJtvWCtpoYjsrHK0vbp7uZ
+hIaNNdU5tbveHooGW87s5hoaGTMtwwPkDk6pa8YKLTdi7E0kh2521AEfiXW6Xz+2GOns0BCUE16f
+hvWNcvjBbJOTWco3hHxQNTvBpZw/z5tgpw/CTt4WxL9TVIGMrblhJpSoBBYyF2rlCnx+Pc5oT6RZ
+F7RfILjurB1REkQuZMVZXDxvEcz8uEQ1DWUzjWXn2riuXJg1Oq1Ruwaunuc9mn/8A2JmPkA9xA0e
+zwtIwgiWRpUklILUsnDIphih2O1jvN6em0B9ZmOU1jjxVa2n93FcItY0+8S7V9mRhJr4OrJ5rqWn
+B1k1274cQxxZ8qEpnp4i69vIbfAtH/4PNk+mG148kRq7YNQL6X6MqFES0mgYJ5QTEQN5BLEfPEpB
++1yja1HAWrEcIPD688JFXkb17k/tmCyLwljmaK1EOlLGvIBFEDSYNclMMQkSITP5XtiMgI+w00yS
+iC3FOtJ6nF5rwhwhlExW63IFui/04Smkr7TYIf0Qnz/gULiU/PKLs0y+TdTZvGWRTQaRssv0YN08
+mnjGlnhJ3+TT3q6k0ac9H1vRbPxNPl+fPxeCvn1+gxxc8MptWO4tNm/Dvbn2wcXL/OI0MdkuLK+6
+kdjsLt66MUR/EttIwl7ndHP/q+nXz24zhi0cP887jdRv79jOMcC8hM+sywdZH3K4Y2487Kt5QMh6
+9nIdyfiCnRvkKuzjVBksL754WITkH6jB+nBaQ/VpWZZG5XbGY2GuURkfsmxKYaKT5Mh6dMzvGNgv
+kd8WWGZrwasKXVIV0KNqZawDZHaeFc8F7XwSmCdiS0tktqpIE+058GEfSU4nfMZ/5hU1POf6lMZX
+IEXMsP9xFHom/6Hi0lBnfCukPdG+FRcHXGrBz9Vlye6h2YFeeYH+ztmt8Y3Qxo7OHUiIsxXG/oS+
+QYDWOo0Kp8sbOmneoiF688IkBToanxRZnd8lXn0a9eGktNLwuw5/r5PRD0y4KRQpnzopNts/+Lml
+VcE4vjE+5Tflvu+HqcXsezsoL8cLcNgh1oay9qx0ijKJXyt2fLAZVEauf+yyXpyvgnR9GhXmJSEL
+P0qFgt1PFXWZlxR+YNQWTRuhYTopj/iccl0FCHdjtgbyw986fvCQ9P8UQMppDE9WGf6wreVkM04B
+JcrjzSB7nbBWOpCWyKCpNINUYzPOgUBTY5EybYnEyqHg6B/UiQVd+C8snNTNF9kxCTpsEI98rJwR
+zZpoaLLL0MpK9pyvyl8yOqIeKDRqaFrl76vfN3wYG/32vtCNC5oFqtWuoZIli0FvLQ8Xq4LkvJBF
+QfZFT7MRGMTMrIjCrFnqhzd8w8mRV3wQQc3XtOY8HwrT3rN0v3mqoy/M/NzemFRa2kxtK6seK+Wx
+3ZrUeIVfluKI+5MutK6h9ExH8wEf0cu6icpcre5HC9Vrvu8+vroF3984yjfKMh5wQ5JTiy4rNXyW
+Edjx41roO6fvrQFxUiTXh53NxohMs/Guxq8/h6LlTg3a2Ioc6T9vWM7W2Ou6SWOOo7LLy+y+aITb
+Z5k05EDORRJ3Tlw7H8fE5b8Gg6koGPIYycNeDqopNCSeVviZwIIfJmdPJPiFtzOrtepb9zs0amy0
+kqZ9x8necx0tV+efOckQdG3XuS77oZf3UTIZkkfweAkxBbGnSfDxiThUKzM1vcj4OegaARvVYN63
+S7F7K6zAHSKWaKId94Er8F0M2cRIvad66YVJZW6rq0/998Z28R8SPFv765tE7UpX4hxVj9so5rbj
+g4e5vyKGtVpqK1XMF61TZsUnK9d8IuXl1KxHvIRKHsnLP9C2156hzWFl+/rzcKeIz0rbjPP1vhC3
+SeTpKPSbXL/GyFB12XO9Wm/NRtu6H43kC/NuDJ+rlDBiOz1GlTzCS2QadFXeFG+EjoeaR5HtiZvk
+bQtD8DEyDyUHarAnXW4kmA91+b87CrGhaQRpZuNjPtMitw7iGKGULVhi0dnz9td3VbRPWrwD5Xh/
+M6A4zLm8qF+5xA5VzwjbrBe/idHfkFS5Wuby055MxJuW4L6R3T22F5qUGzMIKk6nl7xYaNlsquRo
+reWrE23V8ZvNOsevEcHpq1TtEoFPxuDSo3yI+Xxs/MVKd4nJppII7+I2RL3QOyYHUSRjU0/5qU05
+fwvaKBQ2nrx7azJV6i7mST6yGpPnfKi/I/RqVp9Fh/4ZFbSi/PB6KVR9wG1xXkhbikIqiG7T+lDo
+piMcOUTdvsutSCSHIOteDMeSntxoalPh/nDYif7MQfyAz77glnXI0DJfxtuvacTxFzc7CegqlI/4
+cvLJAleKy8V4NPVmR8dp0banE6laxXdY75+fEKKW8mj3u7fMi2iRDIyzJQTGsxXPI4WNQ3O10gof
+cr1szoSN+9HAOg5sDh4ocexxBxQ3W9yBcLgG9PQGJicCgw7OehQMrNzrlfe3Vm5zLup3qY6YI2UG
+aV+pKHCoVeJsF1hTyxfWRX8FT5hotZeh8Ikj7JYE4NvTniRt0Tw+I/JlHdG3JXCb0qXqSUnPa5oc
+CyO2pW9iQxMeb984sEEyQ0zkR0yK9xqfFkUaNUFcZoNckk49pW2eUQnz29bf29nc8Ym2zP1+SZXJ
+edFDJP/DsUjok9t1Vll3PU5gE/l32tIz8NPt0d0P5X5PKi4b+Y29n6UYoDSX+SVleLHEfvxBl2c9
+mZULLfQgKe+jmXS51HmzVVQWmC0r5JL1IHFHdeT07ZVDpv87lUULe0Tg4nJWaaOJhZtTBI4zonlE
+yDjUbMgtNXSvAjFaOwsb1/7w/LC0h8nc3DooH7JAgl1O5BDu25h1iffwswRPSIMI8nR9jSf7WaV3
+s4Px7p7b2ffKsedbc9yfWjwdLpKT9gw9Xb6byP60VE/KDFnovTiVUC56ZU+j+IVLFS+HraU5TO9r
+70xgcuFiy9eok52PmRIRRVNo2z5U1dz6veEnjZVy+/372DwqnSEDyiqfoxkKtrM2222texvWxIoy
+zzK9QQLVfpMF/OWypcbqb88oi+RF3UVFMiC1vsNt63sDI3IrHwrt57pmT2/1xk/cfV4ofrykiQ+l
+kRw7WXCfdBXDwSsf/BTVRP2WDbIYfrDJoziUndpUZGx8X21BbrDSNEPGiOGFREkJCj6Pdxz/8gPH
+r450w27PqvS2K8bOI+ZKqJp8rkOkS/mAGw2JPchyuGbg7Db7l4AvVDzpOXVhXUUI8zRw9qm6KEOp
+WLJkyC2Nob3mbO8JxTyBobfbrcE25WS0Q8SN3JiErJLAkrdgY4GenFif3NId6F6b+4CPunBejMvU
+ZZO9TTwbSlCfMCXBIkFwsjpXHgwnw3aHa+fiDG5l+6m2qMxP1ZjJ/oSKbc/HZ1MsGeY8ud2eLK86
+mhCeskfFOr3PmXKxGYwIUR9srSl/voAFkn21d4nmE6mfeTzI7uWHCnl4OwqNKrfXLOmw+hyQITVq
+bPSjmePBPvXXJPi8i3VBwFwsVTW0T14ksCbBlybf3/T8qT2bl62lEXWo4PxM8sRK+Q7oKO1larsj
+6sLYZ4vz76N16/siGdp1HnJ4T9EQJF2RIt4+h5/FREVNfSzSLGWe9kUTpcvJqHKVdc/Gz1rozhh/
+4fd+W5M8fqGs9GagIaflYGXm+bAJTD27ppFFvOUFbPGirszuMHRS2yEgVHJVyBa9UsKEJP8lW5GD
+Iy/f5m78lgOf7x6D8By7cY13aZuhL4l4u+aYNwtpd6nNmE4Rprs7DjftNLcrg3t3Wl/td4dzO3uN
+uaTUJiLmsDXvUQaq5kP5yYb7wrYUQnjyFDxCQ0anB8pDzbSJJC/zP89C3X88dQnMoc+w72bBVLm3
+gz+2Ql76INSY/dXF4uGb+xSzFd+rt/wI60cndqc0IWqpc1gdudUIOm7mL42Fi9/2XKwZ778fFCx9
+8KZQ16EoZqJod+dByKeyioeX/iLYAoNIyOigvyY4/l7eAKUTCflvWQ1ktj9TDn9vxb9kNZD/ktVA
+/ktWA6VgVattFB+J/L9mNf54TEcupt0h3GP+ilJCqHmM56Dq+/m5ZBlRs79Dx1VgHh/zXMezj39k
+NUi+iT0NIb1nbdsuITE2k5VcohVV+Ox90YukGVVHIc76vVR+v0oR/6z0/XDd+lMfFffvMzfqPLlR
+iWGvy2dkdYuFaXmXYMnA4/XhpbfiJ7eWOfJ+X1FhwQ7jqRTjZaxaR6XvVk6aJeGhuPal3HC15cp1
+aZA5La3H0v364XT3Jh5+Z/XnUeMTwvBtj4l7HX3qEoI15OGXuA7fBFTpGZ5jRMZhLx5nfK/kMTQz
+yANVGljoRfp8UC4dXy0PJOHvU51YGmh9dOsrtbhYrI2Sh/rqnm8FUaOx0IxuLTwKw+1z7Ddhfoft
+ZNM3ytP1H/oK3yKkdh/SiobdKA6F1v0wWBJqtHQTuPjBV9K+VZOO6HPMxHBaYFZQIxFjEC7SwPgE
+Gvp2fV77XSwd3d4nqGHvy3ns/ndnfViu4JaljrFfRN9IcW2wu/8inLZEUfMb+z7ZdEBnUf8AuTfs
+q7xZp3fG02RxnsnL69ZRvOkf+MeGEobH1z7bUhdXZWSpYrdKlULneDE5PLH3k+spJbwG+lzFuz4i
+poGHhStTPhntuLo2RPnuwVvJUOw9I1ob4mZEZt4s9qx4RC5w/N64qsG5W5lIAnkkahJnF+sugg5G
+Lpamgtfs9TMC8yI9arsrc4Zwl6kfDvhFBKIn7l3cFhVxw1M/6nOZzLraYynqtNIPb1phFzh0Nrq8
+RY1UZrUFp9bDqumdFrcSin0vVM/GMvge+scnfvUWE35EFW+RU6CSGbgIfm+kNb3FsV3jnzzQsEEO
+91xpyZBVYcNmFPy8A1PVfRMi29NuPeTWQt3VNvTqAMLTbfpte+z4NgmmzkTzbqTxSPeGrHqifsTP
+5mPWPT1FnfGRIDGrtIX7atCP5YkZj1ero42v1Pn2GvypkjZzW9WsfUp9vnFIfR05128xTGXvcZjN
+Cy//GkF10TS3ny4hgvL4h8CQmQ1sa724VUjqBHe0Uhtr24FYjzzx0XxX7APhdMuWdTvPU44X1Ht1
+GztWYsKb3y8ui3bbRUfzejOFHKPrjZRc8znR4AJwATFaM0f1QoWGwh2PzR2/9M2JoUOjrLRc8YFP
+TiODmeNluG/TwCkbu5MEMmoBpribAQ8NdGONLY4rxF/qapfVpcft9uhGFD8XLPhBHZXZhph05wZP
+JIx6CXKkCxbZNSY9yKhM4eHbB2+TUFRWXEyTCb+PLeTXWWVTJGbCfxTbpk6E3dMvNSqhxF3KedGc
+POPyYE84rzrK68sWR5EgO4fR7iyvQ2yXJWf2ejNC2Ctc5FCvjBT2mEpcjMQixPDZXm9eznfhvYv0
+eepIHbbMd9+etFZtHlSIwATPWtyNVhkkF3bD9D3EHSd7dv1YQkQX0ncetKG7hc6zjBxi/Sgt/fB8
+kOQ9F3EGL3c7u2wMQQxiYUS3+Z2b7rSAoXOStHsliW09sdOhTcPudz/8iRXXpCt7Z6KZ6NwtRK/I
+x3dS2VzUkx7OJfM0iGwRauWcWergsjPD1a7v59CyCpX9mmBLYkyPGgu1hHo9RGkU4UBphosvpwO3
++XHd64UjJs17LMVed6CC9YZ3c1OrZd/4k/bfk0vyEeS+S32VAHaMRSY+tfjQYB9fDk++p6E7KMmK
+MZE/ov5MrsLbJ/IgL9O/dk7HLdtNGblmK6kCGYN0qap2SEDaTaETXTmCWvbQp83+Q4OWOGLPuzSI
+wbyu1vTrVmu4j8N64uGG+YeG/cGR29zlT2cUVHj3aqOoddyOjdhbbUaE0FUuGzB2U+t7BiFzBnN6
+9QOXvv7ESn4hCx3L2F8Wf7ru+MhjGBrVfa+jThly/pj+njSdNzOmcPf7daHfSO9Qx5eWiiP/qhQm
+efmkCVOPrTh+MGnMw/atxVGNVVlpUqVlhgbs2qB0BEnpzgWMyuibUN4sd9jrN4iPnB0WmdE7T/3X
+px1xNmFLn7CqJuCK7O/G8FKFK2wrHNJOxRzEKDnODqp2W7cSjVwS4LsJufJgiAURGN9Q/pGbzU2A
+G17ZDHuw1KniY7ZplASLjbct8ybTn82qA+thMK3maKLq4MGuaLmxXNB8RzXWsX5haZpxgarIEEu+
+xqzxYxxhf7yx4UqJiLLqDWHwJJkeTNTmp2eFcPxMK5shzadzHZv5Ln8HsxivyeahaiZI4onHbIYy
+ZWqHIY2MCAyj0ILC1Xyby6pCv1ErT3p/UZlat9aLHcdPXaXtaAFl3Y+MBteghp/0u3hI/f1Wf0PG
+Utjmw/dpAofzEptzC1EyFwb1FQ9M+MdtpqKcBj56n9qpyzUsS9B47rExBDa1qRgKy4AaVFSKZbBl
+GCG5vMnDp7zqtN+zg5VQ56c3Trl6medK8VtdlF3qOZ2VZGrpHpeoEQbJJD7L5bCbxKKSKVFl4VQh
+yNVIyDZIaLZYJb/82Oaf6/78hRz3+2x62dgBbMIxqRlKqT4Ic3k5wvAtfwLveyd+AeutpkcPB/r2
+qD3bOOPCxo0KFuEf9sOj1TUOORZ2I9R8s7OXE4RZ/K0WdMd59T9aau23kryZiTTaNt8ZfivoUETJ
+MbDJJmNcP8DczZUlLvVBS0ikShu73Cypb1KgOrlIRM8uq2pBxOFrfM60Tg9pAz6J2pXQ048pFPYX
+Q5Qe51pHS51ItFYpxfi73SiaK1i5OWCSAl5MRhes9z84WLC8DtsTOYahrIvwIe91XFPnSimBN3OK
+r3XfpsAGqecqmdy5aVl3OSyDIMp15kqxjMohvXrvUxaX+OxVTlrRrLjxclx0IAVk7AYKqZSvbSlK
+NPEl6ZhQzaUwXBYkKbHxZDcybEgj1dVsPH96FoXt/RypFCO3Ws4kTUUTfhaVoxFfLxKoZClDpExK
+XlOOpbXVmTuuNFQvPhxyIlkkWS7sZObuqvZuhDDTRFKTrfQkyoc2Ka5e5EEUlfDpKHX71sLbFPyn
+MvLojqL4lq8vKdLb/IQd0rNaFCVF/JC1c/niGZXelI7HMTGbIpTMNXNN83eCID7ToZjGY98domJ5
+oVGElrZSr8JJU+PBOfE1qPR4MtqFFSWqGfz3YapUb5qusI6tUhAZ2Q+9lS58+sD3xcBDes/n2Zkx
+5umT48NSxpg2BrDCegcVkvW4VEg/WfcPyURJD+xEXoQqFdva2ppIeivObic2v7SOw+AP85Xm1tLB
+pO3/WI01Tjrww+8eQZX1kx/UKAHi9/LmtUypQQbMHR/T0aXv5kEYSEggJDTknrWnTGtHXqQMPG+K
+BrQ3Hgxoq02yqSyYf6j9rJDg/zotK1f5oYbVAp3yYk40eNke2rwmNj8nRwZ/LUunSD8Hg+H3cDpJ
+p+7TJCd/UWpbOWtNvVIaaiDR0lTkaq3x6/EF34HCu64KazZSSOLYN87gN5+WSjd1UK17cJ5+kmgI
+PBfbuplSUKyrKnl9xbHii/fluMEy4Co5QsVlbtC8hRGZsGoioehQ76LbyTL0MkD0eXPfVfMXIv/t
+E+ixp0fnKsdCEY5HMiaCHc/hDvTwQM/bKDx3s8uShY10sYBe9Ym/iS+zSs0o2Rhwd1+TBD9VFGIa
+7hYOIqLdMvQ/ZjDwyGNQ4K8OPKfK283IJL0DNTac+SpXLtbdgR4Enht/ZYy7jWB7BSUjoqjGIlF1
+UVhZd74DRVaNLi7GFQwgzGvYaFlajV9kyuP9IDRJdvN90PZpuYmm2Yq2su3d+7DO5qUrjxfrivwu
+ORwqxWzOA1LkK6Nu4e22H1ZeOGVgN90M1dx0pS+IcvsVxH4jLRBdPbgZUj8WlHCdVyUdgvQnMcUq
+7VcuR59lrR35f2uoa0j01OS6z5zuRaxMSdTRbSSxjjf6fuok4aOSsa/ejTQNuvoPDH/jyf0vV4S3
+uxKNToURtxbfotp/DpLIkeS+1EgkSyEdveeLTvjaQCx58w6ElIYbQaIiv3OQOK3b7qBSFQ2lK7TV
+o2rRWp7cbU5kFOGyW9MkmJGPPFQopBP4TlpmLS3yhNHeucbw1XApRZDSE9nqzrlVu4mGCSz7TIHR
+h5aOxISqhETEnh67DdEcT+gjvKu62figwVNcdFuhEt/qumauP5bG2ucY4Mcp3oE0o8dxTyey/LRl
+kpq/mt+cVp8T7S/775ks5Z3jCm9fDy8EXiMC5Y1JkePy35jpaoZ4ncWcl6wu3CJ/6Ok9uAP10pwv
+cOiM1ByER2Nja1QJfGZw6XBiOfeeC0A62Z/XdqDWkijyGvlka/Oqgj1sSt2taoLsm2hplHFuvdEh
+bvDVKXGWS91rWSs8HwOxxV6KXGcrKIKVJx7Nm9X/oFgjkk2mSp/zRXGgghUu5qsvXwrUkN00zD31
+s7OcGrbSS5IuwjcrQ78oSziL8oSOKe7L33KXU8YmUFhf96wHXlu+hx5AIHJwdhxV4WLBdp9MqtCf
+Rb4Tsr2TVxt97GkB0etWSmiVvinfExZ6uzVDQAeu36iP51SOlIaJa8HF7vylaY87M9VZ4gc2xtke
+YSHlmr4qQCNeIs4DqwSWSWUaR6B6sJvPNgF3HsUoClbK2NHkXPvwlybSgvX4bKyJxLaWNS0sdyBK
+vKuJ1qyWrAay1Re3nLlxKfLHVrkj+mdCpI1rj7VegD07aY6jd1eS2UFG7CNM4P2GO9CvHc3SRxDG
+H55a19djRSuJDtjOrA2uJ8R4qzGv6jQMyoQzocG3qB1haakGqUwlcavTaHmf/e31nAv8bx6en0q8
+wFtebi+4uqRpz7k6VV829b8gOm8wae0cnuL5Andi49jPQjwXyVyPuXwJbXbT6bEnh3PmC5l5SkVL
+Dbi7ODUBvVZ63K3ctTSTcx66SClr7Z84YGNfhLukeO6X9AWMsrzjqMAdefO13fSXvq9RHg9wHukw
+OZ409G/jmtG+DwKnd6y8UWXld+92fTzVXqTS+zpRHbI3C1W0aa27f6h7qruT5KlIvujwGpbMbpPp
+cqKplTl8yzi+L/HpsL1XHn17o9JaRWJ4i8qNs52res7qTK+rlvwwk/ZFMYr0hzwsJUKHZ7jGnXM4
+vhml8a4upI8Llct0KxfX+w06KeBhvOjLlL5F8zQunadSEUMjJRqkBTKfbBAcbrHjDuEHeYvJyn5t
+OtxbWU42miSdQQt+S836z4IZ5bBzFDZfTWPINneI7qclzvMgzORfwJhN/BLTh0JKtrQZEyazna8r
+v0Odr5cz1BF3IGKrM/KzjOWfYy/ir/ZqXJxNaPgOjjTOXrCtFIpFVUfEzJlUz0dJZvZTPQnJr77+
+qUPBrUS1tDGJ581UsWkjawyUF2+p4nvdUBOuVJzN0zaeqiTpmzc1hT6d5kxCpqJHYHg9eJXZA4o7
+CfiiiF8XWzBdRiKa+dimMZhL7yiktUPNsDkY6kvgy3729PhqrI/UL6b/DsSghz/+KP9K//5M/qzp
+HYj0ly5GtEn0uWqXZukIvxaR7WtSXL3KjrWhaWK6SqM4iKZSIRmLU6u0Y4Hhj5sVZrxye4IIdQjJ
+aONNXdGIMru4PO/gBElMMbSVUy+ok/pVnJpDkuSX1Gk4IwampzsguY/TE06YZUvU1M2NupEr30z7
+MRhPISGjI/0tw0AH+nt5A5ROZJS/ZTVQ2P5MOfy9Ff+S1UD5S1YD5S9ZDdTKpAzwKD4S7X//n2VQ
+imljXAmKGswkeGzCmMMuf5wF5YyRlXdfmlbkCORJHFWuU/+R1SBQCSqCv7bg1AbRdyMPBljZ10P7
+j9YHvSKSLD7HH5ysHx0iqy//XJrhimVI5uzYRFXcreqjBbcZJeEsNJDe62q29nxSdzBU1/CMczKe
+Coy7QkEoF3Dq1+blEy39sqHlfRaThCcnQs1oUUNSWm1i3CqEj16S0tzFHPxC058o6XFkEb3loDle
+Ep+z6QBDG0NocWKaTAxlFbf2Wb5fXWS7JyefTBpiPWjjND5a6kv+zb53L3m03XtQvNpSlwsfOTpl
+GDSlV7ZrZqVTQMgSiae601WoyuFoHbqS8GHUy/n5s4WBrVQzAr8v5J2M4GGZV32H69JhPqLfQFV9
+F5CTOYwB2Wh5L9cfjP6kVDKjefFHyTRv1iU/G3fQJQfx3qdVzCm4tD5Eliirn05IobZfRMrN2s03
+NY/Uui98sRb9kYiBwWHuQm37UFxZ8FZFtJGWbvVckVqbQ+A15J5ZKKeMKYizQOtkSuBoUzwa2SEe
+gpAkv4+NFbs5RABj9GBICHe+OBkt7adrJDWu2RpumDJ61eA0eqCIu7b6sMByfDSuhXaWs6BF5F5c
+Bn8jgyPGA8JoDOddRVG+o5dRn5hqFsTS3YJenryN9esqOHieU+3AoFlHtJhAOzSd0OgRZ3K/5Wjp
+zM2P/BPMBE8zbG01MS6a3O+TnSIh+CgZx2FziJ1b/5CTgDH/s3PU4j2NBLdRXPvOEE26oDO+pLWk
+8SFZ+s/dPdzzOIZRNRlMOUmf5zzJwaucpojVOS4USshQt1EKImYkv2Fdsy5+NaGVkG2XDw/tK+Sj
+18k8YUBRd5/zG/9HQ1/oGC4l7EVKGDSWioYmaRpHsTZP0qljZGXi+l9RZmqvBtX7iMlsYiyiJTu9
+maqj9Ncy7iAqlKGRkxWnVCwxtjHtbtVJeUl2qzAhshRcSD+MKsdbLDvIE9QdWClL9knT5SYuf0D4
+6Yj2+cOJ8ZFEx54X3ObJldtMu3QOnWG3Dk+6rlhp0tnzj4vUf37JfZomJyPY7XFTMfzu2CE+mbzX
+qk1fnqb2xq0q3ue+kMBhBXUWTbMb99uUIDsFL3O6HbfdD28bUZ/an5peORcJCHb7lRy9OcDtb9hP
+32eOTWLadb5pW3XdOtup3rOEOY/9tIuW5owb2NYfAXNuN4fURr+iFOMKLcmdNI4LY0hA2VmLZORW
+k8S3Ydo129u6tlSLL1yEKRJuJpS/zv0hN1wnJl1Dg9lrAK+73aZ+7nURJHTb0iqvKfr5+kMSdwiM
+7GaTnHcjcsSk5g70tppKIoD+iqDsC03Y6yTD69SLCfSETCKW44fqIfBkbjrN/awQwQ42Wrm3k5Gr
+BuuoJ0T+rY34FOVm+Tih5EWcDfaEXmwoClVRuITYBwvRebjkfVcGbu8ZaDh3ICmnnNJpeow72I2r
+2b1ubMN8cbrRgZwFdZn5B0mTNJTg2NOIsXVwX9NT5VhWP3JUrFVxqjYpaSr7NoKuAemDdOfTlQv5
+z5RVSwu0fSEyGWaFDDvdVdtVQ5siKbeovfOFQ/enO5QJBUJcBTaSdg2/pZUridzLkNkMLSIQL0de
+7VVZnpZ3/IH46Rb2Y5KzLtSZtf/5gwhovwkCOpMw5HTUJXgNWU1SuANZNnzVPZB7c7Jsb32IP/go
+GrpgHWVN09jtP8+tH+E6jdv1mSB/Q2EzJpnSVl2VYVtl/P00LSmXpnVhM4cWfky+W0wGuj1/NoW9
+X9Mz/yOjCvt7RQq7NMLakHXK6PHNmq/4o7uKVJuYYzcq/k2ovGFfhoc3Wad2RHiMexZNfn2IQSZA
+kkCqW/AUeTtXgFUolC/l87rt9A4Eo5Y0qsPOOmaM64S+9jGxFyGXS/F2Qf2wGv58FzWn6IrLVoLi
+auxNtmYX7frJ5klAb0pLNf3PiaWlcfVxYfenkaIso55n2jQa3Ycr+ZpTx/lpmwPUVO8KaoNeixPl
+U8goqx5N1n7uN/zQMembqe/GmIarwnwvgnYNV/KcvzeAOf0SlF8/VyU/+X53+pL3KItr2fHMregY
+Rz2KKecaR7BJYVDgidsYcjyBPYNxlzSLkye6k0ze84bvV/5kP0Knh5IXE/OjsIw/qXO2BWUstv1k
+Djx0edXN20dxNAufLD11/gmP3EgUei9P6XVAE/qEgk1JHre3HIyJSReqzRqupnXoSTrkoyXKVHEi
+aDo6EnA/dhl2T8JTYEFF2KCCMuXRoIwoOBxZ+A2yT3u/B5Vw9SpatDQKXEglhjXe51vO4y8ply9c
+013NyEYpDMwPlW8y8NyJL+SDLjySbUz1FaSPxa/JHgcHyaXiklVzRhAeFmqbfTFzrH5gLq1qrQo0
+98EtLI8gNvs2nuHSKKwj/sSv8VZtwF9xYvzNJRUbpnUyUnttTLKVhcdkPGd3GMYAZG2gx/4OJEyw
+CTxAIjWPNYJTw/F2zf1MxrOGQyQcSHKO5pYkh9oIF3iHO+77T19WUzZ+cunG6E2Ri333DLF6I5o1
+STOcKVbYFoS7kvrSo9El8nnyD4KyF9roAgojJeegkxVgYv8BuSLUVyRyTBAy49nPQ93cRG5w+Qmf
+HHlyLni92xPhuhVCtNr3Ru1hxlnodIpMAVINHqeZeh4XXdymun5Lvxz1E2Ra1JacCqqWBda5qrce
+vcQvo+QwrSe9Z2EJ56vc8PPQUlJBeDOJA67KcYJ1pBd8Uu5Q6ksyej2w5be9bzVLF39o9pmY0lpH
+gpIXVpH2MKMh0kEpvSIpg3NBa9V2cgmcYgrvXskY0jQ/CAKxdGEsvrSMcGS77yxzwT0Drgl1XHDA
+yLhulfHfQs5GvtewguGB7iICHt0XmJwMz2y8UktXjhjK6ndpVVlnPeojzudkIegNxWVTTip7n+b6
+yj6SxShEc4Kpj67ubde+M17n7KJRSPzUAXEf84GY9FRvF6VLiHCc/OLH8TotiAuRder157jT3HXE
+UPSFGTUh6JP1J/GNR2+mXmCzzuJymvobPQlbnTdVbZUjomlKWX8dpX1AsDkQY716dt534jFVUsot
+Yv5kcLGlGRPeJXgyk1lxKwT/+XYxwtX/AbMwUqRHuMs639HKbmMjI4TCgcK4upeJMNP5RWpPkPqg
+sLhdXmHwyZUpXg17xH1fr/u5m+AF+SHOh+8J0XnU8TbmMHI2wzZDk5mpxlaT1eNEkxgSzNqk04vW
+nVxMM346u1CS0qnso3fknx5HlicgbTbKOGVmMd5u8cbSeCZTUzh32ca9bbakKhiYpvewPmuUCeN8
+b0zWLKO625IbiYm7vtfXJZmAdP/9rXqd8oRbbNa6y2nK6saHa6Pm61ejXazfMKIDR+IR3ND5n37V
+txY3ZF2Z5HJxmKQEN27McfpuC5sIkIx2M9nqQwnYwel9pouqo/XUrOSF3LAxBVcFs9csJz4apkL3
+ZWNw+jON4KMEupFS79q+rIysfoRzvWNq7lRisttLWb33OI2yOcAZnuPifLLVd/7E+xGudJ2oYE7D
+gkIU6QSZ2SLZEHxzqWXO3EbhgV26o4plUqdH1cMGeNPFhCZLJ2qWnTiRy1i2U1bvUNDNp9wr51Mz
+JAX6mhiGpLI+S+2pSKMH5ESbYdmZT4z0NWPYwpJSHR1xBQ9xBe2JN1aYOU3QUiuUnYXhyXmJnKrV
+IVv4BCQFLCFj1m+DUgp1NbsEJJFrMM2oJrIP8I9GE6x9vhGYic1rMahJW9QiLTvYuff7GGmlbnc+
+Q5YLyrkAO+OpdiMzvoA6MgmO0wuKJSr4dFzXZ++pjNazvAkXr9R+jopM5iUNrrlw1pTwlu3Oq2YJ
+QrdWEzHF+OHcNx2T5E9gpqF1msBon7XVv6PgafVUndXONmUq5YRiCuea1o4k537DxO7X6BWQ0y0V
+HVfBebzEQIiUaP0FuFle2D30QvbnJ5BDZgjoAvHs5LCOKLvXMOTl9wIPRF+Cl4xLz8MbuVTLOtRs
+EDS+YSHa/ELaB/OBmIzGPvKgc7+Q6IGMPdlHa0jh+I0mAQ1OvuDRoOniE7/2hvz6QsmvOcncDk/u
+LewHfVLHXd3bgKAfmLLoSp+Qi0ruoplSWA4b3ZNEUaFsmzvlGhSXgtw7ZQkREso3h5AwMyTImcTf
+Wm7UddFQv3PLikJZ0X7+gZP1Abjxq51oER1eqWBSo4L86UHXJp/n0+rj/D7Oz/fiSUtQ2/TxBoLa
+oQrg0fdCA5mmyoG8X5z9DSwTR0NYA35gOvdZSY71fKYq2OTdGmc3fcFMyP8znm7vaTXrgHDMy76R
+YKYlRdhw/dChFyvbWM0Ss8EDL8u0FomoII66exHcTwJoMCZ8EPdfibDCFOazOM1NA63DXE/ptc9p
+rbLzXXrpU8Lx6ngehRiHPkq/v63p12ZCOPGUcDXkgp6AOvKDCdg7x85c2u9sYGmUNbLVN6S55Auh
+W1ZI/GorV6uAIdlsBmyS/HDCxFaix07S2eTZPtsEQ9Lze1qtuMANfVEi6aTKmJ21j4nTJYq6WNxx
+DqXHgk+o6+Eyar7fOyPpGmtw/h0I2/79/eHPB8mrSmL4SYiO7V28bKLiy+DkDCxGFfz145wZYeXe
+GgqKMSuJ35OZ9ySQr66l12jwf4bggXH4Y6nkIsugO18/Sd8c3YHQbq9uNn+Q8PwQPFKgNnEWFmZF
+ppYj1tvYC3sTuIeSQ4KnEnajc9ES3tT6fE5qS6lTHWn7XGLo1tn6lJvf5/ScKi4gdtKAV8ZaoSx5
+SyKg7Er+kCZe/YxCovpzwSuvI7HEE+sL6B3IzlPrM1XMQN4VFw1PQ2U8UGNnWNWnWa9/x3+j6MJk
+ap2G77xDoqo3YZdnfecymmFMDP1qA6iaFR9x9WJq9iYpK3lRXRfqY+o/+w6xh2F1FVtYhGIQfuxW
+9JGpjA2Lo0M8pjTHX53k+ES8dfSs7bzjlIXn9codiGWlc9cksHg/0X8m8jLqiV6lS97GhE6PKkb3
+nlG7mclZzJL8GYW48ZSEFWJJwpZmgWosS8cPeIKv6lt8LEnoFxD/jVW94lFIzsSHp2zaxWhvJiq+
+3s692PD72SnJnKmeUmvqhzJ58x0P6NxAz01de7z60AfqLE8NaqOa2UP5s/cv4nw11Un28JCPTT77
+cmyfl3p54/bkXWdu3I60vjydbNL+WhaxtSauY/szQD0i/uqFh8R+zXVzb+Wm9vG3cJ1HvcosT3dv
+B67CLiSAO2q8RaXc10pFFPxMj0LpNvNnqZSeoIQi8KSXTOYlOqdvAs6dqeTHxAvMjvDyX7QFnFwF
+bqzffra9gD5vZr28A/HTzPedqftQSXfvxX9sITkjbW88sjmdyHKm2ThnmC7a2KCh3si/outt5LoI
+eGoTf5ki4/FzdpzuXSDPOA3/RWN77Y2Jqw7SWcOtCksy04uqO5Azy96LJoWLkj0ziunjML/K87Br
+KpWioxcthjd7dyDtm42sMD9F9qKO2683q7fbvxlDRj8zdrqRvbhlupJ5kWCgwaRbmLlAkPfSK1P8
+nRyDIg1Bz+vsHoZPmkeRN+Wyz3BEI9ZusmdvCtuTnz9ivETX1/nrb2VQkOhAfy9vgNKJgvq3rAbq
+n79AQfl7K/4lq4H6l6wG6l+yGmgH6NXjo/hILP/9L1BQi2kN8/UZ9UPs0ip1q7aR3Tvao/V/kAYT
+w2wjIk+U+oXD+//IauyhJhJ8zKBYNGkOssqZlG/1v4B6o6dkB9tECVQ94xwifSnh8cjXYxJnTVTa
+DC1hihyfj1Kvz/RmyOBC9/r4k8ZUaIO1v6B3NE2q66fiyO8JyHvGyiUMVyurW37llecl9+kQ2CHm
+/PTRUWy7S5V+0xa3a4p0U/ZhxJjErfKyfbW3P7tO9mPso/m5lHQNgzdm1k97+gkOJJ39wzgPCKur
+F/ntzu32Z9/tnx8YGv+06DyFDlDroHrgRt7LZPekaaIKpWGswaq14rtfsnC7R7yTf5rQeiChKL/l
+l867Reh6ET6UUqg0hBl7yKaleJZ4UBTesE/TrODURRfrsdfdaaaUuXs7RfTZ2RkvLy/HjafkB7//
+tYf4zLx/5rKOoVQylI2tVY7d+fZb9+t3A+EqK2KYbbzdnqmfLC43FTkqv0OWudhhk9WeTsPdm6mM
+PA46TIq3/vOdvsFGT5Qd8nCokXYuHO+x751aU9k501VGH3y8Il1e+zpww3EpP6vUv1eIeeAUU/j5
+DtRD9ow3OQJF56jpZmBFObrUgDaP4OVHpCX/O1Btyjt66mfn/gYF4TH+NMqbW34ZVSa51xvs7apS
+JRM4H30HqyRSn877FRwDk+n7V+uNAbj2nUtsSVvkUkNL7X2TfQPiC4ky9scKVWfubApNN9M1cjLl
+ZEI1Td8towIu3QZe3zC4JHN/hYbFHSo+VSVyU3011xM3lH1r3tSYQORUoq8V5sY9qvimbykvL298
+d0FeT+MMRbmdnDkZn4kj0WPJikSg7oMi13BUFUKA9NFVcICZba227iThExw2i3lZJtNKj5VK0+Xr
+pxUV7T4HNL11MjeYOKKEFFrHcM69wxeuLCSCeJP00DYNq0eYrerHFQs698FFUigNhSJ3oG8tLaMf
+6grDGlapmmbWw7rPxw3tHh+Wcj2fPmCY16MKOq3B0QuQgyIOl9cHzu5A7H71jgSZXwz9ZergtKNX
+Ga8Y08X2RLkr7kC97Xzc/TcSi2UDG1+vh5fObHQWy1J0wxGLd6DIOHIxsfKY4wRS4hMe+r0i7S+B
+3/bnVdehfqjCapUOsDoVuuZFej6qZWoFX7yfUy2EZ90ytS45hV/vW/uOvveLeHHsJ+IdLcZD92VL
+Yj2T6sOoQ1MM2lOjh7Zujsoeg1syOQXvjHq+zLMke1+lCpZ0XZ08XsNydl7UrVF1dr6fNDIole61
+Gatl9EN0VnVbX8RbkvShaFPpOBjjnNpwMYoz1kmpnmYpk2MsOJ/NyIP+xLsl1ZDp8A60brYgil5n
+3HjLJWrtNM73qTJXaVphgLV9Yvt7defk+U14bYlZ1EBoAvHXc6i78gAP7Yln0nvgMYBxNC6VqaI2
+6xqeZe7ktOsSTeN1BwLXWok+fVuXf3ualClwgqoWsaPHTHr+4Ta6zmPGqYOxbajE1qgmulhtU+sB
+AX7HLc/y8Nbek7JGmq9RNScdZpwRmk0vFxdwCi3tT5229dICjeLZ9CQak+e5eZY/mhEtq1vzLtAT
+qbfL0C7FpRXMZ60dlyRC3p+r6iwM2RsMIngYlIUmahyIBfDxGZfYXq87dxhpuc0JfujjzlsA+kam
++dTdbIGNQ0THLOR86avDi0jlq4QMrgdpnjzL79ZsS87s+bNY4ll2Rfyf9BeqWl5EVC4ZLhQK5Uo+
+cimh0zNp4/d577bnTLOOsXq9tvWaCVspODrp6tI7nXbNL9SkIDTXPuUOVL5ylhxqNr8luvgDM/D8
+XqVeGdLa8X4uYeoKZJjYpvOJMZaZSni5Ae675qxFY6eI+ZjzydbWuUFN5r1qpmv/fo/2tf6xPVns
+xY5vm3VOJrVI70S8L5XvQMcYP5dv94/f6owlUfen0QnWFEIuNDXhSMo8Pij3LaTigit00ProI5K0
+yQOzmNKIc1zBorXUEbgPs74xpbisCvsrS58Oa/XGqU/QH5IooyoavgkKK6J32mpMibCtMJ2fSWBm
+y0ucHnV1H31lWjzlkmZMjOPLvYSNwIpSyXDmBE9CBD0SveQKHMjmVCzrryY3B+vfZ1q8afZNIYpe
+8Ih2mG/FvJn42SXtzujeWCXfvLBo1If6oiBW3a9pi85ai506imBro/61unOQ3Ood6EG2G3GuiSoa
+qJ/X5px/9VnmE9JkE0XuTXNSh3HaoScbA9lf9fScr1J7lDTlYr4sHlUziiB0fb8dN0SEng+ML8Q/
+pk34pBO9dQeCxS9gs+xybagzf7lhXjs/V4R6NnUaLBKQ6/C3xEblXevjIukNCS7i1h5/ae8h4lHI
+sAabq2pg8ntgxy9N3vJLHJqT5/KfEDCmcg+wxVnnGDnw65BuxcVtBSEID83CDYM2zdxHo7VP/c+H
+PCZzvQ6i5XboGQv3YnMfq61oRLuUCm1iJTMVKvOXYfecDIeX2SljXeofkNbEyj4hZnbvozzcmzf0
+JnCoZTVPFuukk/JE1WNMMCyFpDiYYn8zabRXUsh/+FJiRkT7LUxzlQLH1KTW9t3XN8wrrA8aVYeT
+orByZJEptHXzAtMzy1I5P4bjLRotStviLvs5+pHoc5ZvTdgcbtW1vNrbWzpwnV80Vmlf5/30eK9n
+s8FIeeZ2x1DarNHSnvskmL8/mNXVVkmhtUM5aUR+ql54YxuLE78hZAA68TC93SOw8ypqc7/5tHdi
+GrZ1MFHFWs7ha5elLBqMY4z9Dlc/VhQZh7vdxSCjaSKYnLiobpAWg/ujeSmWSaS5/qzPHYgonNoc
+O7K75jv+LK22bsi01JMLJWYZC2GtmZSpYPsbwt6wVx9YjYySH9TrMYYFv4iz02FKlO1r3XTyOrDd
+7DlNyKAT8WSCE8rlk8VFMqe8FkhXtnPkLJRdamAmnyZ+XZVEXGAgboyz0YttdWDh/DlHsbnVuElp
+o6lh4WzodcZSY128s45DWgUjrzRbmB6DrTu92Ss4vaRN7CnbwEQJd8JamlIfBX2IjNlqef21tvdu
+c9jp0NWQCgf7REn8OP5sfWxbQ+HcN76lzmwd5nLxT0uK8881kTmfa7YWOesu+PKXzx3LuGX15EKX
+f1ZsbY2aL/McMIszLSLnZHh4VncxDQRwBLosRhZeTsiyI3xR271TmLTYPSnCNWPAHKo4Ca+jJ3bd
+E1kzNeIazHXWiBxKlKbjZLy5rfaDkqq+Za354y82PvBnawhcKjd/GfEkmWvpplSKej6z7ELKePOx
+pMd3stEW5TiHyGJuqEV6AtmUrlJSgZ5tU85CvbJXPZZ1C4VS7jnDpzohGSZ9g/LSfDlk7goN4rME
+VrYNmziNwR4COu5Dl0Rma4qZAhzp6E3E9xJvZ5dGX0MV0zirg2evBjGKVGayHsWGE0ilu9Oy3zin
+s85oOTyIrd2YFPx0tsHMNO+qmgQecKZl+aIxOhmkGjhC6Cn7RL3ekIOci7G5Frh7JmLwULihZVfP
+CGJ//L4jrphBSldr0qm0dFXR/6SmscIlhDobf1AjqGJH8b297m7mrGctxIglcrlNMvuk7k0J4djg
+E5eVlZhFgYNUOemn+W/1kic4w6bZSBbWhgmJHm4R0dubScWncTNFSea2dBcrHJ7UtLS0NiZq0mm2
+e2kp/dCyn4X6f3N2YFR9Zx1N7bHMUlcwzm+jo5/wuPCDXt0CxUyq3qCnJ3ZD4uIis5s8M6QLHGJb
+ouOUn88qz4uRyKy/kSg7vbBOfzPjkq0AVWcTrXd7l/R1nELnJZLLggxt9TKjZkOiHqKLYpfepJnt
+c/6si5yeohh/WfQxIVkY/cNmd0wpmeN+rGTMxqzTx4fQ7AKnYtYiwvfvvhTVGUjT54Id4hPHmVIf
+h6SSxjKGJ4ZEFyHQ3tfLxOWksuf7os7/H8LeO57N9+3jjipaWkGt+iJVe5fSmok9YsQesUqp1VpF
+UaSoqhlbaYkdo6hVo0gVtao1q/ZWYidGSMTjfp7nvu/vH/fr9Tuv/HmdryvHeZzH5/yc71xXLp7p
+wlGz6lFZWZtrpqEqjfer1DwzqhSV27Ie3nMVe/kb+/YpDhurzl9x9WbrwbVsxdJLW6H77Ie9lXC2
+GPMJP//9vtbWL8bqOx9LS3GcoQ8/gY469tAhc2JOSccGzqQSCDQjnYXN4vUhl21d9qM/McOVMx/M
+f0D1+1P8tN6NPjigkiCcFjZLZIjSL2rbIb1zzOem4ND3uY23dZONFbZt/CTNOe/NaN6NftFszDmj
+PFq1/CUtp4S31F5SW7ausOmrTIK5exgzg9f9inWNFDP4LHRl2it9pEHryvFM5AVAe9bo+R1VO3vN
+vmS05eOsdt07A4slpnBbHEqJn++YqsCAtn/HppvpadMLVb/Ht9lGe5jCtjRhFCYuGkJ5aoUG7IE+
+q19a9uNGWcRnv77L8eD7bJSk8Yu5nPL2ZFzctn694KFjEsJnFU4lYA7vvdN7PWw2xh0ktGZu+3GZ
+0/fbszSvN0YN7x8riLnECjPyWCdS/bVrTByo8jINflow9Nl0/tbTqpbKqSmW243NjDNWtoYuGQXf
+bYcjM6nLRcP3P/G0yCbftbx748mfBHG+7y818M5zxuT6p9K99DPPg/f+Sdzu3vW8tIX8DqqqLFay
+OrIyPtB7f3rE+UNNBzRd0mSgPlrD8k0vqxcKH69JaP4ozOcAmhmp3+WdrfSK7554mb9uB2OOvV5Q
+j8kbsrBBtqs321e9EK4cSrOfcWbFFz5dzpzWlSdWbPkAPWW0WoqNoFWuUs1eBSkicM4snzuzXrHA
+BXSey3FlJWt9til/VVHmDi30CwuT3EkVu4yJJtTMLgfXMMtQ8Fzt1O+Lhh5j3k9dzVm6V8VKn0LK
+1ZzivSahZ8o5YoXGtEn/RA3lbJTf4bjKL+kayS+/lHc5UTkXS55+9bHl0VdhGndj+mxuFYFVS+SV
+NavwmLjRwG829BQj6p7i6r0sPk/3RTqVI7W4QWZcjsXqoc4tquHr3ZVt0st9dLoJBZdub0jTRrd1
+ObHQqdoCoetXs2JCxuOSrquiJU1pozpVPk5+/vmxqiRz7sa8zf1gyhzCoxbm+lRcc1tt3vQG+r1I
+oyVl0b05dXfqiS/yH72usL7uLi1x2+b77v28id4lgMqy0X5BPvqFpY8OTwrnzk85nMjoPFNtsg7P
+qj1cmEYIm9ohtSkpKfooslGM+WPjQKPWuJVrfOq8zhiHR8V4lhdDk7dHhLdf3DUv5yZok02d82yN
+Lxvb/aIs/wlfeVfpMbXt4bi7cVm1Mo+rF1ybV2e9esBrdo5A5nqjYyqVYsO3r4N+RnszDvlNMG+4
+m1okZfhNOaVRUAGKRRSkioZUJwWTHYuvODZBj7OfukqZJQ3C09NZ6VqUvr06eqksTxb8rG+gshJv
++GqsQbSny6vI42257zMzCTJObyu+xP/WhP+1+y8R8yZcdUaPW7MZMoQ+RjM3jpS+1Zp7Wp7YR5fG
+aK6l4Z3sJqx+t+ttfvRwmUiB4fKU8MzOg/a6l8++2jwjOm6/3NlMPZuwaKJrNUyqMvytfqPpVi+F
+4W50L8LHNfeayYRIsJBCYCkayVw5pWZRQudlsl1WftVYi7+RSSSohX8kb4CFXji5zEv+kZO12/Hn
+nhUXrlYvPkWDFqadQJ+Wj2M/4nrY57Q0AieNv7K0aus6aaAl3iENXtpyvxZJ1i5ch6/Gco97VfHP
+/Jb3Mv/3EyhXKXgA/4kbUH67SvW/VIPqf55AufqfOv6LalD9i2pQ/YtqUEetA/G/GCgk/m+qQVV2
+x+NKRvCmXfX9tLDHY6UnU0kv19oVkxQ2BzdXWsYSr/3zP1TD94p409tiX11vHxbOzSfrQUnYjF9B
+93VLqCiFdG8ZJ919t5Ml9W2ldSZ90tX1/nvXenFF/44t/vYNNp+fUbNsFtwZ+0NfowIX7tbb4Rod
+az6t1O0+UAxrA/NlupfUfUr6G2D3ocBrY1TUCqcYI5n2kL2Z987YQ4k/533GP+wSpKpokYSp4eGU
+Js0fzR+e/JRjKY2wymYb5ULFmCpxvssIVNKUmOFewiws2KyV6NU4ljckP0tafsyjwMZkpBEv6hDW
+qGPXuIjlmWz64DskWv+Hz6ax7nPDPz01bRrM1u/fvf6RHGth0dFtnkunwMyqkJNo/Xi6Xkaz8KBV
+94VUodX6dIGPXdy8nZCk2oZ6QmfGtFiJloZMFd5t1d6AVwNZzGLCu5hEo8Wh3CCS7J2Z0vlQ2T3Q
+pvGHe/2CUdPVz5YS66O/UjIYYhnQ/Cr2pbl3C76FtGNZCq3Fv91pbRv4dsOup9E8HDq9LLyn3bl9
+fOlDRPZzu2balFumvNiIfIXrz1rPePk81JfLPsblbKcNGtQWPo8t93A0gCtGrzcXSaxZ6wlefSn4
+9Iv5qzLtsMJ5xdzvEOCOcand559CTXRCfPtn47qBxrgqCcRLo9tQoUJs1/76jLRliHhVtdGDrb/t
+z5Tiw/yUt5euJXBEE0UWU1ete3efcTjM+xfAO25CA8KZ1Vpkghq1uLec3q2vlkgsXtU9YZa+CU9j
+13vnt7Lnuc23okA3UU7160ruXPE39Ru/qnolnLhDOGcS50hsgXXTVa9Tvw4TfXOZ2JfuJrlL8B6O
+fPzn5WC07axigFNLqmPeP1MU5hPfqia+PuF8Ur3MCeXouVnol+Nw80/Zn22GGZjf+3+Ctk2v9gk/
+n7b5KfFA1Jqzg42qqMtSfmeGKrf+5B+15Uihr/RJJ74N9Q/teVL6W3dFUsU3hDshb9CM24oa5a37
+Hzz9gmx8EgfrvN56NjQnvvAzsuk1FV7rsm8kXj/mh48fAJiv0HLzlOqbLxEndhW9VjyV+uYfXnsG
++jKzIO8EZxMPG8daVtq11XYHNX+DXgCg09+zTxoYz+yu3d/FizE58zHoRSqtZdpWrSwJ7oYemMQV
+Wc6aQO3niwuXt6rlU8vmVVOX1EreezdpVhXVeFbq2UTvBR5n/i4GbJ243OQSxuepi3Uupa0eaVSx
+2pUP3KF5XCkU+pmBg5aVDSDR6w1Ned4000+/2QDz6IC/FtdVGq9I4GJFV9hrThNf1Pkdd3QlLScl
+UcvX69tKuem84DIeSOEouWaipMz6spxeZ7V0kc9meuWNqOzj6aSkpNz1lqaIWe8vTy2G7eG7Hh2s
+xj3+VxmHEmA5MfWdj8rFT8QbC9wcvNltmUux0Pf306VtdkpHamLWpMpnekfLHNTlfn/nsYvxN3jz
+V5kklLQX2NScCCrkMXu/HTD7UM6L9e3PB0aqz7yDn4tP9D76XcIkJ4Ot2DTtU3qsLSxo45Oa9cFF
+KNGF2GKax1zKHycKtbayG+D5UySw4NkQaVPprOsbrKTCPJE8ypW59Pzp9ougv0+W8On2mnsP1ZYX
+zwda68tSzE17NXhL090ndBWEd+Kr6sywWMWR+TvFMz9iIxKcWcTZKmcDf7ezV3xwrfjHnTX6Sqyw
+Vr3q0wSeuQySZfuS7ox4VUtE0F+Od26uMxnUwa6qq3TZVQZvLgDSSzFBEYb/VAar3QOFWC/H9jfb
+xCDAa2JG4tTet2zUXaknmnUTf5sMBfi0pT2TGRGZvip3zyT4fTXzHG+U1/wk+4HziEHwzWLPfyKd
+oKbd+2K0DRtHARLGmnxMiBcUsuYoQh/LZw0PUyELtdE1InROon0d0Rx0/ElNv8lCT8+g9V3CHcOQ
+KgOuxraga1/d6RPiEtFemuc9n8duFqWYm195rJOiHdDxkOmxStEJU4V8jbNdoVc1A3PjQIlloxUk
+wFbbsRMdF8R0HOCv0PbOKqCSYsVYmu3G59HYwuUD8WtqyFixFSZdF1+Ox2nXw3bvSEhopFznYPnQ
+xviFO+ngOz/Hcg7e72rkKPpURcfcpGw9jV8DblHcXN3H+r6AWnzZZ3neUUJ0y53dSKJaTy+YD2oU
+ITwW/EaeJZz17XC1/t0Al2fN323vVxT4xU7cfZbzUKzIrheO1pMJTHt8Y0AUHlbi47vvOFKm6EwT
+LKGpJI4I43RN4btGPz0vozxg8hqqQ7NLo62tlRAVh9NovHRgVQMeK79FXSy0ghK57Oee7/7zG9si
+EEcXUIDWDg5zpzMXKTqp4NxAvtMB3hnI3rO0X8g+7a3gsNpplEiL0SquwxfdSh4J5Bpf1lD1abbG
+fDGtx0jdLpWuPtNPTYXSeJhnNZX0hUgwuXKllTcZn2TTq7Jr1ZWxet1du/c5ykK5kphkS+vmONGX
+8I3VaCRYVGstvml2xWe3iaK3rbwo4LPpyftgzSOM1G7mqN9fS21naINcExXOstXQoZ0lHaDTaJKK
+vi0ntVNezi+S+GpAhrGxNMlXKXAT6655lbFS+sOzxS+FTkZwb/HTZAOOFWKU6eBketMAxWHz7Afj
+ct/pWVtF8TjbhzZCnwnktWn5wt8uL+u8GAoQIe+GC8ftrIkD9sXH+s5xgTWzHOkHSo4SVTek7apS
+GUaMZF3EdTx1d21j75SZn6ryqLcdm/Iw2sMHn6IrTWuPBY4DzShZdlqr0D7EQCla0UjDei0JWsn2
+ac0h5ix17Vyu8eyBLt3t1HUDOtoyOBSuq6e5NsD58nVz0fOEZu9CMdsqNdiLlMFnn69aW8ElWKys
+157q635OmnZlnS+5//mcEweFxmt/vgB0hAezDcHZ4u5WXeN/LT7DT20wgF0+s+bVuq6jt2XabO4U
+YTiv+RWDSHSf49dPsdX6UCvJmqbJb2Z5ry1+YiZa2OfpCV4RAeQRWPBqiI6YeRE1/3nLBX8Hl4pl
+lqnYXftToci+1N5mO/laW6z7VoY1Ed3WMflBR8dPrDGQa1D7auN+wtG9Lok/vylOWDOL7//ztS7y
+d+SYF59ZYIbFktfbZ72f0rVlS9c+6z09kAR8R/LHfJ6/O/dQKfZZ7U0OAcYEebmBrLvj/C8Lkjwt
+b0zckoiF+HDPqqUVaRvdjflZzGBSaOQWV/I+e/QG1iYJOj1jQMv2DCFiq9j+NbDpw/helv1WY+p4
+LwvRYYGm8bRohV1CqgIa1T/XI+KSKJ0y6DMTXe1GTtJhyIyLUOPszS83LFksPChI+ol1Ze1pFu9c
+6qUfauoe4eCl2Q1mkm7WP54snV5Wd2xSy+hLLxFdvZ4WrABf4y/LSJ3o3X3D1XWDzaFZUfKVu+ur
+R+P0xOCqsgCxQLHWKwN0wpJRtDeZrpzE+aVsdxLjlH895d2NLhfiuQCEJAZ/G6N30oimm6CaO66Z
+u3GPIFr33X0HxH4dSMFr5hE0Mn0X8oKKqUKVTy8J61+4EDbr+Hxh3l5QYoQo6nGMpPg+lS8jjBbh
+d/tILfr2o5EEm5hSdkE0/TV/adG5wrf/2MgMpOZWk1eLul6I+BB8rzPusF7Tf7CvfoKcmwvce5n5
+c/6c3tNf572679X3uODv6/1SGky3OGOM/+RhnMM2wzIV3U/MuvRwCnKNOgE79Tlfwbti9N+RDPAC
+Z3GJIxedJg1DphtP2wc9b7UwNU9OV3GgOe52H4p9CXbz5j/jK73jzN5/EsWVsS3a4FEAvzJ3zc13
+v9IsTpw/WvhHXn6xgSOtx+t8/Jdut3K5yvhXFi+1Xq0Y+SbjTNZOWk7HynrkU1+2uKa4P3jo+1xi
+8+zhk8Y4USaYR2tr63GNxzn+zyu15OKW369FPHtmJcw/BKX2Pza3wITuj0wc7KbXZfqFTP6QtpYA
+tdSLGvX62dnZwUI6JGzcVn2nC8T5KJJdHDPYgl1Yxl98roeLv9KLddewWt1+WpFtVMxtd2UdeV9M
+9YdaCbEzqbNGjd1NUvMmnMaAS5GvsaZKx/BBhpDlLIyl7nuiQHyD+02Fe3RmkffvDpftZJj1MsbO
+8juW2zbevl87vaafKToy8f+3yT3LXUJKmAI/33qpnjEibXAvT3lY9A+o+G7Nj0DJAW7ejvEK2wvA
+w1W9SqrJTzpcJVAXA/L8s3SmIdpnR4GjIz/z63lb5H/N5wruJuHplyInfqf80yNWkC7rtqILX460
+Hvogrf3ZcpcbrG/aiGJuuHfC8dxAuiEr3blVy3R2UBQ80SSl7t6aKczqIYLtD7hs2y8td1EtHZqW
+EpWUatbKGd1mrS9qZh384zAi2LCGFppZXNTM3pM2L0r0LH3EWONL7OaUMJ9IebBStqk1LdtnVRq+
+kt48+LhixkjqgBt7gDLSpzec91ejreAniUZEii48sbHkR5LbzQzM5nXBGfJyM9L5oDQuMPTZjnNG
+9ahVbi9do4CMkNGxjlvsnzv8xt3R7wMsZH0l2itPRLZOkqob+bN8kpt3n53ya73K2wzl6NhjhRMC
+YXWkyR5Otq5fn9yHTxJGdRoTwrAnNR6tx0nmVD8q+uzsrJVrxJmoMq5IEP4evhA5k/GokaVlzQkU
+e8EV477BltbXmsn66bQO/BT5nne9X2c9U/bmpC9wvlqEzyp0CvRAeygFz4f4f2/moPyvmzqnuarH
+NRTV45ld1jLZIxMYwt6F6x/gvzmpx+znAwbUdJFVgTPtr3fE8+szl9PKR+fWMC3zARwRIVebf91A
+KsC+yZR+QY5y+JM/ko4TIQ3fHd5L2b7Z0/PWyXnhk1Ag3U6BUcgbZKva9HpKnlao1RcQEHMXEHxz
+KB72kivKEDWBSpe1w0yPLnx7qZUxwJmr8bpM+pf0s4TmZ+2eKkwc7n3Sln8Gr3/QugCItTr09yIf
+FbvJCp1RVDMmw+V/B/M/y+tEw7D9H461ZuVL7ms3kMVdljhm3BRMla5ojby9qvrH4WnHTL1ajGeD
+onqDol31eElS9GV37BOV/Lc/zpQfLBHePzsWKyrlbr/8LpO6KjrGjylANwOf1T9zTwP/5JS/0atg
+yq43/oWjOHhY4wWTDNh4iQnt94QmU5ppfxRWCIVabZh8e4TumBvKPPpMpM6FVECfAJqOGMII4EPI
+w1nkemwvXqsUUFCaPLNGHvNaTuu69dgAHbDrDmYaUBtZUcmmWDuJWSWu4DRO9NTW8r4uDrWnCesF
+yFNI5XddLRJtOIaqgR2CwMTbf85ug6dM7njMGs0TqY6dlyOQ6d0tCNqXIYie/Oscu3+z5ihwaaqZ
+z72Eb9+UJjz+h9sU1PpOrThrBDeITlpV0fB5fd+FW/wWQ3QW58dpaxlgplRC6E1x5Zh7Y5BE381n
+n6NK2rWX6SW/Hm7efdH4RlhjWi85O6SF/3hMYtYYnIBcXbpPHrvbzSCvq72s+0Vi/mqyXeG2QMNg
+nxbET/bp4zqzyNXMP6krAy5dLfK0znbjkX3vn4RPMi7R7D7LzOZTfZG+IjP45vCzMfH98/CbjtMy
+td4mS0py83nSNJmmCaFFAqstfho+eS8BTWxFrZD3Iauxs3Tnpa6D58lJ1r3uGkHt0uFs/6YaVBQ8
+gP/EDSi/UVH/L9Wg/h+qQfWfOv6LalD/i2pQ/4tq0BjMuyX9YqCQ/b+pBnXZndD4hNHXQTp7Rzeb
+be86PzYwm/vw6+NkGPxsYfdbZDBA47+pRljtoqmdg3FFTplwjS/MwKovTOTI8oQjHeF2AaBxQnZG
+HRR4MJcA2Szq1w+d2kTGi7eModxAFoFXreOsqbfsC2DxJqqnzeslbh59yhT2uvgA/FGoJz4g/4bI
+tfE6GSOg/LMJZC481VIlJCbbzfZOcnq4Quu99H2uhm2XGkrDaVm/MVM7vTNVNe6KTU7uRFmBMhW1
+cNVI0MMK9GB/QoaKUk4cXzWvaezI69JX6F/R8PV8nSJCYA/cf6MPTXMQ8KuiTejMogIVzSVa6L8T
+GyDOo81RdmXlNWckA1WYdTcRDLMV3uA4TbfMS1KRqek54qBh1SvjAMYbw6aP/P1NoGZK00nC5jRF
+uJuqIhvVSRHLGfyCy5d2AdmZorKprTbuy2tpaXRTEc8QcEOQsXLEDMLG7wFMKzRTHycqv3KHpwJe
+WzQeHjmXHZV5eGK/mX4NXLd1vul/t0Y0YHLiQKxMrHJ9PJSUS5vLt3xmHodw268p0pjUSqWM4E7o
+YqQ2d8ftpK0/NleYnQSJc0lDIhDtOiH6MTtaVgyaV98ance1svOqEgRyO4DMdUiVRVA9ByICPY4I
+mEV4nmU/wF9hhgQYmIx3kvUIwhvqZcSXrg71DZ7KKkVHjWHtdZzqQxA358UKy5FcuUwT1f4QYITr
+p8FEUJYjv4j2ltOO39slJWWzAzhzhS+2aGBKm7miuXwsgkyDHW+JXeOFAqVPfbQYUDGPmjeuNEdg
++0w8cnwIdVAjsOrhp5S13r8z8Vt2rJ8UwhTfjjPf9Ewrg9aCaHrOOZlAdciaq2GNYXrwCQJYquM6
+CZ5i9rpSU+DVGj4EgEpbJisnMyWQmYPqjGScmaOpfcsOl05Ehmz9DO4w8cvVrN82UjKwU6k00hbr
+U0rPeSNStgXf4I63It7+rtIIEgPut54dV0oLbZwKKaDc16pBXfvuWxVIxs0paaFhvolDDpSAlhlv
+vFxejQCfJa0H5VacnVK6NE/6zNdPRZ1QnD+fUUcD5UFoy00hNwfWj815VAeLB1Uvzcj49V/W4YOc
+3efMubJ8Acqy5SFxgZZdFtmmXXigEZbuNyoqK5OJWXbpT2gOdcqqnKhZeYVeCXOBshltDVCrRaoU
+KpCXc8LLEYlHDr7dBunhwmCWNcC7EyYfY+rI7NgjjsziM8CkDLYHQJbQS/AZ7D/AFYmFvqtJDiht
+owMEf8qhl61B+TJlasrJ06Orc/FgreqYh9vpMX2ZcGeRSGX1dWDjX+hYFBbeMcQdW35UuTUOvDuw
+cRoieFMoj/lRVcjIByc+S5NMn3KRZTt4C5TZkqHUspBZxshpeqGybwvalgmwRKNn4cWJIimC+BDp
+iJut7t9SGv7GRUDqO8dGG/cOWx7vH/kH6Luh+w8KCB08WAc1uIm/LrGAh3sSlXZ2I4Nf+J3YOeNA
+epV9biPk5ZTkUInDY5NxGeMQKhDOYTeLTNdvXGapwrLe42sAvimfvrU5o45HMJ0CgKgQoPSarf/f
+K4AagLItLqPs7UZyjclYwchipKb9m4TN8uB+d+bY6tdvPhZZKKmza2ZV3aqc4Jf9/Tpw3QWO6mr0
+nGUgVW79UbBIdJAgmR+zqZWEKy8aW8Ti3RsS5UUKoHb+XK0RXBVyHU8g0AwfHE8KaDfzygq5/1GG
+qfaokOIwUSLGqJ+F54i2uAGaYS5h6BSpa4hi2qcpVmibInKAZmbsuGPFzIxJitxP3zEWmp57V7v1
+Tm8IwJYP0xW0ihzSsNYStZM8ZUVqVLNK32RvB9e9zcgqx/nvXS3QEUZN6chK8rpKnprB9rNeZz14
+0E2e66vBeiRKXweAhxfDHZ4zNfKLDo9M0pLI2MNq1SiUZZqqydeJCv23/kDeQnshZvr2M2cj2DGK
+Y9KwXzCKCchb2ZOex/tJVRkSJKyK/eXCH/7BwRDD6m7hUwKWNHQgSLQNy+J0nPXkrxuPaE9Ex6az
+QG/8LDiCCRJg/OvhASGsT9IPeQcnyh8ZWapunghY4lIPcKaUEfAuo1Nl+xzlACAlj1ZxBpUHOU6T
+BSE8flU4v5Qhg/9IA51kdsTVKc03ObJxYDTc2m4lKRY+rWxB03dcYARi+q08R9vMFTpeiZvARwO7
+fjVf3VCkgQeODMFxE2zmVL6yzJD4xmsKfj/zchpCvxjFnaDTKlJVTlnz/Smh9Z5C1B1D9q4llFOw
+7TdqQYGqYG9Scm9n5SKHh1K2hsxAa+VxuGHFxAaWSln9V1xbB4j1HeNd05FMPRdBWw+a7lYOaV98
+iT2S/xAJe6CSjwfmb61bXT+aqGzzSKSlYlwvGcPLlT54m8V35UYu4tuuYNw8s98KnzFAwZdYIgQP
+jlTIqaVmL8vx8PffzQYnoVVRNOkPbTe3yvUfZ46lC9HhD11sb61yC+kaE2S6NmSJJZlMKalIgCz/
+SkAqS29mxMT511O3aKnvVDBjY7FXKM94k2ahLDVVMCpMXsgiqYzSTiaYYmy28IM1NF1TmbsUtp9r
+dl0X7Mew+ZQRWRIEKYWy1mz/Rha6jdnzlX6M1uc/LIadiIhVzz6qkbsAtJz3wnp3y0EN2tjH69+1
+mvedz7tggj+brE2CymksD98SjS4T5QMr2uyBVfTTbHvmMixlMZPsmNNOArabvage2aOh2E3OYWAM
+jelVUcLIJkfKeYaUvtEVljjkOqIdijH9aSR3ClqXHy1+99pZbLSBOqfZ3Kn5lGlySoHyHGbITCwj
+C7D5wMwqOWk6S4V3VhirnOIO6TX775kFTwjVpKmJR8gsF5WAYYDBvk9ConYciE+KHCSr0orefWKj
+EhVXdyrICZx+lrpPuyky3AHUDHW3rLkawGqSKn1Y+VU05hZ2A8sV9lLAO8iYlqVQZaTH2cM4Bdo4
+jvFJ6hdl8KcSgXUdyXYKRTNWxpmgtpzPpyJZY+sBZrQ3byvEbc3Em6weM4iEgyMyRJGz+ndW7qS2
+couhvryWqtAMAWwFSFAem5aT7HjeCq6sO0hDFnhjcDFFy/b24c+gYP1yW9jsJIb2k1KqYox2L/Kp
+s4FqGJNO8qmpOW2VrEciTQzT+aURAHYelTvlIeV2TbIRGaWwxaNQLuEpyMP1X792RVeHYdNjMiLX
+9DFpP6TKR8ahOtzQiaSnTqiGKMvRsnT4j7c6hgqX9SSRkzS+U0n9gR26kmB6HkOeszJoEKLFw1Wp
+Xmsq/Jqx4G+1Kp+EPSh55d5TIaNNOAjZXIjCjq13G+fKVdn6hrGlCJGM1N8svmyLVWs8M+T91T+r
+6+380Vcukdbcnjks8CEv+ZYgATTKo0Me1Qk3UOpZSa6h+eAHirZvhDzezCe+O0YXaR9zWLet0cM4
+624gW7xvVHyX7WMqsnzbWDOk0fguHxpiO9mxB2orqryXw0WnYLqzY7rTPw6Pwi4SJncFNp34evkQ
+npSzzpnm3ELDrDStxq6rzKefj4xozMb6SR1/23TJOuTPYP0XvSumJ4+JVpa9u7/4ZUQAYT9d29QB
+af0evZZjAS4XAC6K5GwmTByLMam3yXSdw/g0YzwcC+vF0NiawNPTb1kSM8j00SIkdlBRBr/MBJ53
+ZiclqV0LU477dVdGFPpayUIGZ7T40rIvEOad197sPrONEHCxRAGzvKOMdEvkwBWiBKtYk/4YTt0Q
+5QD6pI7q3CJV0iB9rYTVr5+Yb2Mi6BQXgXJcUeGzlAzRVwCl87GsGiqj4zMrI8y7MBE4mFN7K4yJ
+d7/o2ub184pPJehswZ4LQLwmma7Yt4RyUXFFJhdRl2Ky+QwAw2V6nuWFH2MyqCA0eoN0Ninram5W
++plln2VIfd3ABqWqgQNIrRTPb9HNsX0jqnPOT4OEki1Ph+2cTwKRVlvYvClWvUok8YZMftnKgQK1
+Co8ZHhh/HDWhOgG2FRpuob/PW+rwnnvvgeDPmrnd7Wr+/ONKDVS8SK1MGpKP6LBxntDLo3ZtxbyZ
+b3iNfZEM9p5yz4QVQXtMsdUrvBqTFrc4+O74jCZ9KDY9hSI03vGSu5+ei8UcKbzuoPmku/R3hpSp
+6G/WDvXczMQ8LzMuedLKCjgKSdpMlCmXGW9eMmzAH0GeyxfSmgvDMYVxqWIjAmhYfLkscSI4ZH+7
+HLiu3IxxozHst+zEgv/SdKYkMsvFFBQx5ykAe95NHGYccSglpza3h7EbQsBTwYFAhl5zh7p+sIF9
+xSYxWY/ohl66ANBjt7ZOAIRW40lAhuDYD7nQg4ov/rrqB4+gmrUcVOnrqJ7XsctZv/j2Q0Dr4Bwz
+wXV4ya0n2tcIwjDCXI4RwkPag6P4je4hKQ55NuuZpt8L1JJW0lRf/jbxpZcW40LXqa+W+O02M+RP
+hmSJCumLppucaJWDK/2E3YDjpH5Uv036QBrH603osfaxnaLOdfJxRejC+wn/CwD/hucx2BVNndpo
+cluBjxDC9kMt5S4itfRxzqgsekQvWGBA7E1+dQCHwpK7g2mlNEtPhhx/uSveh4Kc2mboPsh+1/Kk
+h4RI5UBnB4tfSsuE9sEuxZKFP4dUakdvEYCot9xH6KhLdz+Nv367hshz+D099Cz+TLPcwXOD/ZjD
+OMmWDzhtkXB7vMV8I5dSo7bnd1RJkDITrwhsymhAOHsH/ejd6gYoHcvNhL4OMQ6hn3Uw8NU2CEO8
+IwC/dDUTwnbFYCDGnv3Ctq4hvgKHBu66C0BbjLkZaSpzNnTk/OeSZD8Y3ubC2UgumlkmyN3C6hIz
+i36Ie1VQJ/DRGgUrvADmow/cxxbtfXSMBcseDfAK/uo/KtE99y41AzVupCOz5q9YXxH9Vv1WJMX+
+7Y13Ravov5VmD8cq+PpPSbmtjhYuUFlLFDWvVNFV8hP90PrkJcuEEVWFlC4+H9/KuW3+pHTd7jiF
+f17CMmmINRs7z/y41VjlmWZKBI9NE0GKh0nM+LSSogXXN1aT21UKoENbkVh3K0ZfmvG2YhailRpT
+rvJtV0zNc5UNOcyr7UTl0I4bqLULgGkfLyxtzCE5+J9gX/Igw1me7NJBGIdaxGvdQx3yI/NzjjpY
+zxGYo4G0cGoW0FvmFu4d46cMX4GP30BkgL1qbWjJk0zd7g4/2ilkJlbztacPDEwiDpaPiN1PVBcd
+DLqjSGhr1pLQwWPMOC2sd2+4tQh3+qn2uJ27NXROvox401ZancuszqQm/mcsOJ81Y0wE4lXOewCa
+A05VoKgZkUMbOzE44P55H05Z6DeScJuY4Thiy5Eudws92AQdz/otivNPWkRGhKnSIXSiBY86slMP
+lVtStbO50doa/MKn9BMlN0NzGOGF7RAayxSFWq7Rq+U4wbdO08IKCOHQaKxqqAVSDfWgAVcGu/5N
+hfcg+EljqMWmw+Sf8SRUPN8F4L4JN++rnWPQort+DuUdR2FmWFoZx1kbRWkD/8dhn3xISPuh5G0F
+FPfXRtLUBcBJENIUsdPb9pDDqSOahCQ6FjxYRjWtnk3+ia6UJfc5R/w9+TfVoKbgAfwnbkD5jZrm
+f6kGzf9QDer/1PFfVIPmX1SD5l9U41rdWxWBXwwUSv831aApu+NlCDpvDaprLuM9OsK6+q5xSiTF
+6crLiw0B7j2Lrvi7+99U44gFuIGlzL8AGKUfLxLCfhZPHBbGc96WMq3sJ4YoSFZ3uKyrqZJ13Ky5
+3BVAUUpyyuFLDpWHnvAgdXZiNLdVLS6Ao6yOjaFmC8i3WhzSDrJ8SjHhEFgUvwnv7WtnBh7n8r1F
+pm18yCcDddXI3tV3wjWPnFG1hz+yjM5zhifGVmrCmB2kXEQzREegIU5xZ+BG4H5ucBzv6ZWanPRq
+KTs/rkTwGvM1COMcBLz0hP+uLJEW3YhzPqWfDmEVdfA+yzEXBEIUFEoCaEaqZLEiFbqnsiWPok58
+qwxAaeMnAkjDBXXaiYNSJKG7gAVnWDIjIbaZJ8I3iZxj5LUG1qV0N3dfysR1LJq2tThynKHgBy4U
+kY0OReLNiIXqJpj3ywQUxjKZaCd0YO42rF1pBPBgF4immxDhOkIwFslODJTQrhVJke2FwUVN/bHI
+BoSl12tUJ7m7661Yl7SZMHZygL14f8cCNt/no1xrST/Hh0wbNQn3NqNi/I4sF4Fthd0a2yk6/oYa
+nSnQP7+teqpUDGdSNhviQ29v7kjd7IlLj9ju54Sfd9QU/oR9ZgIRGtxAAzfOhaHlpyYcoy1x3+2Q
+PrjJmOxK5t4V4ymZyXIs3Zk5rjeFd+itBHMach3IDYz/rrDqz+xoVliHBbGahM4Zue+HJDB/B9cU
+7Ids9NyG+L2l0h65W10qxQ7ZjF6T7YRcpVUovDo5Ac/Da/6Fotc52PYD/GHWBEiBlvARaM4ztW0q
+o0jnXkXMiBgzp7U1HgFsd/hizqA3FoNX380nQbIXsSIgaG8/2btJK4FKIduRTscEGkCVzVuxdO5+
+ejyxWXQ8uNaXSrNDbfEpo1f/ixiJA8BtF6kNiv9u/LpoynRd7PUQKwo8bjx5GqeQxPXJ8s/mdJH2
+79wwE6xCGpY7B5yBajyS0d7iIdOMd9TEb3xo9zUt18s0q2QZfXvarki5kMid2zjW8fEg5PgreSAz
+vLcmBRjNZVkmuUie7DkIpSNgCj9nOKwqP9ZAzNicvaypvU7eOv917wIgNjEqw095yrtnVGbxQeTv
+5zdiZ3Wwr2w25VdoCsgrsAf8ywWjjdJCgNKaDwm64McEq48fuDUOQvZ3YO2GXecaqHVSJfY8h40L
+4lkHqf8z9PTSCZH/zplQbFoy/iHpQTu4Uhvc8s+iUtSjEe/RKXvJK3/nP6ZgyUjk9gXAd4wqHGhC
+CW8bFuGm5aHDgbXXBwzdjBTeXEs///i6QrLxhgjWCaP6lQbtnTU/sXFEH/3I3Q4ea0GrWwKozGEZ
+UlPOlRX7FgDWXvxrLAJSKSDRm60ZlY3TDFW0SVVq7zB+SzTGsYlYyGwoKBDLXwdT0Uzy6Zx7b0pM
+ktVUgnA/UDPmDiPqHGBd8lOoAJw1RWNY9vqv+3YiNYKaxaCI5W+0CZ9JQmHFzeLDjcYBCuFPpAR5
+Dh6VhQ1gkGfFSCQ+JFAF/PFJEUxBZtjdQDH7USEfnhvqQqcLjUkhNnjACna0YZbFj/c9QKD1ynXw
+mlb01olyCaipUVrGsZyJQ+9aBXBH8Dj8l8Q4xP9ERaxdr0RRZJnTpHWsXDagOVEQypQC7fCTsygz
+woWuRPq6fx0tKs/3ST5VVmq1ff6IuU7uDFF2FMClWmrPnmstXKbOIoHxw4R2UO5ORg2f40+YUX+D
+TFeP4PFoepId30g4lFP9u3HBsZkc97SSL5U9utYaDGPq7E9nAYRLWt8iNAr5CJ62Ih5iudTH+n+j
+XmzvAouUCW0WAEucQ+P6BG2qWYZZTCo9uLU1JyB4ojhqnAOSIZqUZ8cXAjoSMxKM+4kRhPGchHsv
+kdbbwEvkJ3kYrDpnLK3qMl1fwPcgkHZlvuNw7ZTryoko6adDAIQeElrKPlDLYq6Us24GvS4NYNbu
+2hPcFW2qeVRbnS7Rct9EFi8pbhGuQFM+NlZnIjz6MZc1L/pgYvJcl3mhvgg9B8IWo46ok2J2mcvM
+QJYfAoQOD0MUiqXWpjE0FkB8OrE4cKzRToUvlPL7/G0+40NDVLxBqbqCq6zSOKKZ/fPdWF1S+P5p
+/RREwEEXNu1gknbMmj4Jg8SvpXQrhP2ULE+Bulugus51TILYdlzINbChYhIElb9I/tXmgiz9oO34
+53we9PMgRAjz9L+O1if2Oim6kCsXgF7BC8B18ikifkST6PL/HReAi27AfYb/eu/Vlf9qlP/1ZqYr
+dBSMAB4pVWNH34iUgtrO/RGGpYM791SMHvm8Ss7/9HV48b//lRHCg0olxu+KkTLt9bZFuPWS8WkX
+gDtVGfK8aXMTCYxgkSuMyrOcmBxtzPrpx55fBDzclDfmXAxR+/XvBaDz6w2ksouTX6ver/dD/Ty8
+rBaRhlp7/HtHRodtgHDGuXDD+xhCAcC4vAZyMBRsmmNKfvBUvl9HV/gCIMUqA/4HzKq3hzxly/qG
+j7/BoLP3/YEHKHN/RAfRs957Lk80Bq+Q7EJhyyUPpYNCZXU7e6cczbIaTfFbscPrJElv1ejddFnc
+iRqy7wsCtoy6AFy9hcdChAPT35bUkC43A+KJ+ohd8q0D7U5EJVnZHkG+bjMv+NKF4RodYM1HvhWT
+Hj7z/Rvu+Hyq3bomMfsymsGlWq0OMcTp5Rgnv1xhm9wik9+gDoKNT1E9wG6ymDEm5rRc5Xa7t7DC
+usP3hfklZvLecnqR8u8WITmnfK2kWw6qZtc/ZMc0nZuscKtHbA05a4HpIScOBITD2QWgBPJX7468
+TQeixETnnVyG0U0p5Iswgsbrp8YrkeRgUZEgkMAoXcQDaczv7s3YCgEBlOdt69suzKdFOMmJgSZa
+VktYD4QPwRx+M55txCn03QXAsvJHCOabPYzgej72B2IDI7ybI+kR7pM+QzasdCIfwMQffPfTPWnw
+/eV/f471CvsocbVj4D0+CLE+sZ4Dztqpt8QHQ8g04N9Al3+SkrSQNGvZNvcvAAzWR8EXAPwV5HyP
+PmIVcUpzAUBmIOBHILwbiRZBQ3AjLSyT3xNqdfSLEOJ6rMpTrNeLVgsB3Ay10DECooRi/m74wJ28
+p1cIL0UxfaDDcRryAgxDcIg5N30/gMJnHjj5rCyenl4AIsNFIP1YDCbHNCsdVInscmhY6CUrj+FH
+CZBv79yW16mvZPycXslzftsrZRjOduTUE5eN0L4AKIgOEfxRVyATW2RQKl23CQNv/AWA+5/OfVQ9
+uf4XyRt3XhleZiqIA45PnTEGO6yTfjj+3mFilxZFPEABCZf102pc6OVgibOuocVfAOR4l/jbi8GW
+eNf9PPsFzFmoR80F4INVG+aBaDwLhMQvDLsAgBT+fr0AtJsSxi8AuevIfrLkBiKeBb7Dc3wBuLQ4
+iXqskFWHKTIbCVN5uE0qrtI31MLEXl9AzxlzI17UvCeijwy9FR+GLk7UC6gB2aL2LmPPrSGAoZlu
+SunvNzItgrvCJSH9T+Yq7gvR8OPTedeLkR9cmG0jUqtHePi2jnrZQEtLJr2k8yG1/Zs/7jyBPAgF
+byAC7CFfCUU4av6lHKtXL9LOjSHRE3GiE9FZB8w7FwC2nigiZL9KQaX8MfYCcGh2Adh22K/QwL+y
+NmsWbXbwqSHrtsTaNjyNitynLGF0omyaJqygQGaQWdAE8zIF+paOhukF4MlhWhbeVNMKj4fLjAl+
+1bPkDH64h57w3eQmh07yI/5SXQaCB10ArkmARFiFtS5Fo6vnvN8IQ5ZbCwoFkSkI4wsglDZhzqqt
+a/Gg8QKQchmE+wVAnpRBhk2SXvBT8iKXq2+bp94b2ZuzYJfHIc9y3lmT1ecgX0gXgJ/lZ5AUG9Sp
+FBnqycuYXqKbGJy+l1B8W+mr9bbWUGY7rc1d2XjXrANOTYe4Y8kdTDoraHVvGd++GoN/9OrcmmRh
+G7ZW9xwPOrH79ZuWEo/adFr7JkKPJVaaL+m5C5LzKfuRhDgYMDTR+XFC/1evpgR+wqeUVwLqC0nu
+d3m7R2mdGbjjIcAla8W4eSsL25jV4t4ftF52e94TNkRtvMtVw17JoaR6DALsGchUFDyPIV3OzHTj
+CwDnZKR76LQs4Gvnw8vROUIR8GwFLofFDRot8I+pj+9X/HZ3JV4ACKf2oEMhkubl5ki4CHcphZQV
+XUoEpXlLXb69bl+N23ozFwATDBm0egEQgQwdXKbibJfka+ItONOv8bcTZifjG8z8NeDFadkFQPTz
+5Xbs8lLRF4B/Lk9HypZcmReUq7un9y73/aDpIM1ub2yVLs1cWJ171wqZwfECIBNQdgdijmjeinFg
+ppewl257g/2J+PFUVj1TqJ9nvWuVZB2PLYlWuwBk3FgJe0gqvYxrHHHiuHZau092S3ggxy/3NL24
+ZN5mO0cV9oPDbX7tR3uU2LX7cadioWIwT9DN2wL5HUw/A3RWPC+nkCHmUoU3aD06a5PXng73ihcF
+ld5jnPGDPiV7R2uTYCR6MGytuTcLZ6wEITRbhIMkLwBv2yp7L8utLQ7B8vZHXLFUNei94Km1I1FL
+qUyZQTMR+zRNnx+acQHY1yZBUkg1pygH8s0zuQtAjCEv5FKqvN/JnLbFgafjEMBTb0GCN0lSjE3p
+1hxq/vYFoILapPeW31Qoo7/J6hrxo0rnlmSJr+y8GzTLaJCJtcPW4tN8/O1GbprylQuAl2/3c8R+
+fKI2+njqSHsIZPljQY5YfqIi2zay+wFT8rIW835urmpJB5LfITlgj/ljtbpl07srearJVnSZL10e
+kuQEqXIn3PLyqheAWNpwdTIH6ITZkuz757JoBn/mihXWtsI7zJT0zMeNgh98wOzI7MUvwR633jix
+HPK2rxS9RU23cJnO3CACYl7y/TVUNWQu4HP+6bjP+UdW5jfcYmaa21GTJOA2zU/ChL3Q5XA0kTCe
+Z5gusjapBxfPGNKZt71wR5BnQcA+/eNHBiHBoGm+LAmKIUru1wtzgb9wF4A5xisQigfeeGOSw2Z0
+pHl6Vu4ts0WWxIHY3hlHP5CF6XvTmdMistmldDSUnN+MST6iSqMkzjKZTuhBjMTqqJxEoM+rDW4E
+Tl2e0POPKW5+A6l+GWhwsNvemuuhl3bPRjkP/gnT5SqvfVwDfgDK/csCW8aFVwxerrLfHU16LtMR
+hzjtCciAtme5sIb77g9dzbRnexRyr4w/M3AJzhBbIfjEO/T0YQTCTInyAtCXB2abY8TktwEJ4qCH
+T1xFnISHYleiCDWU7tZbGgUbYX/P7OMvZWMXQ7z0SFEXAHP6RMe9fQNrxB0jZ56FIcgdH0LdgeNl
+au4mGQ5KmVFeywuPG18Q0sWhs4HScTZCIfwqFwBmufyr9RSw1X8yba79cmI9UBsT+3u3cKcnAUXQ
+Pam1YHw+g/3gUFcPuJ2jC12A4uO2PWP+eSxZKR820YQakgv3Sgle6LSMIQz2iaJMybRZ/6YaNBQ8
+gP/EDSi/0Vz7X6px7X+oBs1/6vgvqnHtX1Tj2r+oxnUPSc7HalevqP/fVONa2Z1rrW+4bp6MR9DN
+oKZ9r7FE3X1LkdUWP8CwdKw5MUl2sP1vqoHEmZ5PvnYHIMqR+DATvrCamZo+xD2wg09oDuQzR4cB
+iGfKnOwhiTqSuQA4n/+A0UQ2n7K2HuCJ785LKlfBy5B+Yhx3nDhiiqvjd5v2e50yPrDj3VeNImJ1
+4Ao/p3QjlPRRxvVT/qM50QMKbIkkqdBBsxayCn4nBObfT4A8o1zNIk6EA811EAOPEDLJ73A1XEEv
+HMQsb6ltrWeEf87VNcEI6fJ3pxo4gz9wV+gQHUSHtGriMN/CRy+XRRWCslHasTIv5VrRiB7I83wT
+3b/8ivJyXS8fX0Wv3oSWohBZWYs/9ZS4/MBjsMHTCBwxAk109zi9FJg6RVpRc/fCPMyPHjy5FLWN
++oJukFM4FQobOGsMjSK+RK+CV2PLS+DNo3lIHUuC8dHmcWuBeb85xFc7p9Wyr13V8sfExmsy18qa
+0LVL43FEbDXd9Q2FhQOjFgmBcuOWiXTDvgHQ9LR9iUosuCR9hJbmOM3ScNaa+AExocsU7sfEEclA
+bT/Zs+LC9Lx/RN2hA7ONweUg6klfiRFRpFqS2Ih6VDyRpwgtHnSvQULGaJLVqFYTuO5EnjySCUXO
+ol2iuvpL7ItqSYqgNCLMFxcmJbozMtzPATLDvAr2SzXHBoQEmofZy4xEjeGoUumENeq1WHbC+K0M
+gsCSQ/uhNF6SscSiXSGGqpuguiZiI2ROmLxBljzxRHyxOWYlSO6HshAgQwSw4H74JPbyg2i7kZ6D
+PPIRTRThjdYGrocvbHlsKnEJG4T+4Y46jTiBBJYavZ20U8r7VENiYzcp8+W0QmkK7Su3IpwtTy/N
+beO5Ach7/noilTO6v4KfVlhkGxyWhMlI5sUv621l8y5mERsliTBcCbaRrVB9ZaE/NcyBfvZlgE7E
+MWaBEhfyJLx3jLZj9DAWEk+KImURR8Gqwz8wPeQDh/jxLLLxtiL3KOJZJhdHeFh6F7r3ExF9Ducy
+y6fpIR9JmluunlnW9G+e6Fag6i0LKZdIX3fBiBeZtei0M5bqjgK4dIkFevnQpHf6AhBUM35eQABf
+k/wDW74AvCCwbYuTHfF67GVmwsVCUb17GuKpbekVbHxjfI72d/bDEVHMdhCbcXHEtAwwjgFMccQE
+f6Krhf7D+fK5Z5Xwyc+MEH4eov8pzcdKkn2F4PpL+q5dtuNjyDQxpgEz4/oJwX/vV/+bcMfLNaCU
+7oYscD0KpIAoa/CHJ/H2J1J+pagBHTFss6aN1phWgsBdu/uhkOzyWNAiZm5s1T7OKkQsrGBfl+gw
+itYoLWrIZvBf7PU1hnMkzbodJStfQf454yXpHR+cUoMdggKVYxz/tBuA2OAxO9SYnfOuaUfYjy2V
+g9GkRhaMKTMp8cZVEE358pjfR58SzzQROSPgzOOy+nGHBgftLdeoZRkCRFDLc/achKQoDyW36Aho
+RPy1uG4oOHau3bkBGVf/VO5+mnS592WNNoqJdi/FsJl4XNrUIBDUzAEKcN0UxoVpoFBzsMHdg1B1
+POl20WQxyapiahxzaG0pL9SNFVrRMHExNt131VIMS3K4tLuWLrpq7hvLhnRnqvact0+Ps/6e28X0
+lyPfM2QIfZcTaFWinUTSOp9nbcdsx0BlQ6cQ3o+njL+b8IO/gp+Fzl7RYd/6OytTlJViVEmp8UYR
+dTKKerfXcyBOddUT+wtKnyhyEh0Xtw/UQ53ElHyDfEV0vD2PJhqhGjoYEW8uAIFROJtLaRFcVf/9
+G9ZJPoDED0+eRi1su5xXo2jUeyqHcjkgdbd3a5KJKTkh8rSamg6fQFeTpl0IMi4OTXUvEanrlxUa
+2CdZmjd0G7uy8Q0aV74elDdIVORd/d07hCyAdkieXCc57KYQMHPog/BvCLbtkrJQ673jY91uF+zZ
+K6NdVNpr0jUyTSZ1gjSjLRJVVAodSIpLTjkguxVryYBrcMBaMv5I+YnNb80LwOuJmqyzVwL3ritx
+30/haKc77jrGmK7MIGdhP8K+jeA9TtlAx+qbMqEml9ZyEfQJwrYd3rR9nAb7WbPkSV7A0zRRA/Kr
+kVOV5zP4m3bpK+S+J3w1mfUT8GDZAAlDj60ujWPLY22/g8Ihf4KsCScyu/+/fuY8Lt/kpIgsf6QZ
+Dqw4k8LR8z8PjoJz97tcAC6DH95mMgiHTTksYtMISQOn1HCOd6d1qmg9B3g7ylIAKrgOh4frF59e
+pm3vuJ29sIE7YkQErreFfgQyotqgCQcN1UCxh8whqS99/AkI2W4wJJDmCvmqkvlZnlAaSQD1Lqg/
+i2DAeCSY3akJq9AUXEcX/yU9HFO43Ohu/cw6CqyscOZI1tRO61fiAD9zmJKAbMd87aDah4QgTjwu
+AIs9A6g1y70N4OKgcgo+fBiRS7xKmo0QPhz2/NOMqQMvkbvEpFfA5c9j8sUfJfF04snobVTnTNQG
+IqbOGrHa0vFz+pWdvPseWWf7NsJTX8WsjEKrbICJWUbG8zrz0mqX2mF1QZrx+O1dyLjVd5c4lDj8
+8ZulR1wiFb4h2JfV7c0vQIKdREjVPrD/IHwkFLYyE7PwioSomqk5RUxMiYsJ60C/50fL1ZrTmhud
+tX883EbUzpq9Oi9ui3xTBZkml9ZsZ12mExdu10hsAu2uXQA641OgNXsiEGr1dX4e+MjWU8g3dzvh
+5Jqn9vaO5Xl95DH2rPFV8lgz4zgL382bHKacd+/HeUiR5kPqltHbMeY6+aNq4zohZVlkPTHpbb7S
+EtD4GpEdC1v+TqcL9hcXPc+kVgqXkGz6dFo/DjqVcKjfA24nLXh7N6JXRzXKzTX9xR+JtPPWuRUV
+YWWJUbs0PVuWh27dZ5N7vsB4fCjdMXz8Xcgm2ni502qTGwESJEctEzLDHZ7Ha59F3iKVZoR/Gzv/
+jkpTRXj221Iaxi/CqLNlhXG48H6SvVuVu5biMMOmBeq9scyxHpLIcwEovACMhCJsVsiHBHLTGqED
+GNWqo1bGb26+1rPEEedR0Vq+mEFReY7lYAiNVvQvQ69iW+gjP8Czt1HvfPEwPeRcVqaSv4eqTMWH
+2sQQE4SpSaU+lftWjdtl3v+ein0aVBHhoyhKR/CPC7XJm5kEVbTZdyTKLiX7jr35KPUH4TKI49qa
+A02FUZy319IWrem0HrmN9KUpKzR4kL+SuTtriPqhedoxs4sEqVlkFAdNJyQGLxdVkciRZG4YWocS
+UYLiQJltLz3ywb8hdQSMe0y3jOdf0hHLXfUKM7pohwEJxCpa8muD1PF9PGLnTA4rOlxw9gHhvX8B
+uCdbZITrbkA4Qx6uX3piNrtLXb71p1zrla+nyXVh+Hg198eCxxF7vbs1q5NTkMfqNlm31OhSoOPD
+shcA1zOi9nkydvOP+9of6dHJ8zeXRSHRtb5ymsm+Damb5CFNg4ER4QMYNn2IzeIIv2wV++Yd0XFZ
+p8iaZEdRZhmE5zUYN0RAooSF1vg2OMcja9//5uPlMVxom4cVN8ov2p1mvXcI1xrZVsFdVwB5UM3J
+4WSiCbyi6MqJ0MY6iIeGfnEuUjljsR+ggBoAAWO590sLkKvI37bj58mYBoVjeIJ7Of4RN7dd2RHz
+NnIH2Fwy2hzXEeiP/3qvkZabZq8S6JGjMCEEYlWI4WDzqjEXJdj2nZfB9iG4dx6nmTkNw2MMY4zm
+7vvBFehh6vBYlVOl04r/h7C3imqz69a/QymFYoHiGtwLxYqTYsXdKe7WIi0tUCDF3SkOwb1oBSse
+3KWFtlhwJ1AggZD8eQ7evZ9vfHuM9yA5utfIPVbmuuY1f2uOtcBALkrZIUh4MdWNxW5QUiJoCfgb
+HF+herfg0DafITQ7K04mBUK8WTZyjk54tGVMfp/4ENuRHg3qrHCCMeuHpWvIYBg6Nxq2kNxUXJPk
+wr34E6GAIZu5bTh4c9QTAoxXkovXBC+66JckWhjOf9DmNyHRHWsGfzZz2P5BkRjCuemqIgtFWcXX
+ggT7b7Sl1EqTUx9bVlOm1Iv5zbdrpJ+y/qgb9IgyB8zaoLSi+TSSuibo5u9i3KQ4TTKIzmFrkSBd
+0e+Tf+fmvm6KrJmZql7pPrvwJp2rWUnn3mlA6VFDOs92Uek4YGa0Wl/W7D4V+cD3QFfav+u/Sg8D
+jBYWgjRq+naBmeWl65VOLAEHl2I1Ig+ZVeAqRHTc1jgVxkMqJDzW/Dw/9RX3jctt9OYWNrt7E6qV
+dv2lf1Yk823ibjKHgYHb9wJqxpn83Ev8KmmcG7aEMMQzu2V3qwXiZIahFz73wMy4ooQNYUKqY+iq
+owbg03v+pEx/3XuEDA9U2bbM5Tgykw+jnAv1Ikg5Mp2+plTzGpO8Kd24XgX9QHfTbP3yMHOJU+et
+uffFPUGZAqOUrz22P0SZJcHzhtNa6YP/ZeXT6B+5wlZ00lQYo4Uxjr1KofGA5tBXnbo5d374Lom9
+9UGBe8v4ZA2t5CwzKozWTY1y+SdqBeW9Epm0/Xc3t9VMUjWMt3lTZ8Rfq3pnZHCyRN6fO7RXVs2R
+xjGN2yCwCKbDWfs8kxkB4chKM0wdCjdji0CJ1URQ5k6LZYCs5EBf6SGfh5ycrPxFac+BqcIP9cWZ
+4ktBXl5n4TVU1XAmqksahVI8uApINTPDwULaJNmCRpvWLMUoiK5XrPrMv8pI3MOoEC/8V+Who9on
+uq0bwc/iDzm3FkY1zAfoZT1qevXmZiIBrkG/VD9/RoCfknJFwutzv/NURqfSS2vk7y9Q8vL/tQ1o
+Fxru0pMztssxCN0zMYJ67s6WY95tZZVq4dFjZs4dqy70Q0941VDilbJDqe7MBW3iexhqN432dIM+
+YncrJjdFEoIjg9BLORJ5T1RwQzFbolFf+cd6BcMdU/sGg1JkFmEX6bp8RfenVHRF95FYrWVVX23/
+80ey4T+7pDiCwik/vmHSBD9QPfvsFLapW6MZxVCm0zbLo/FsYYhS2I7R+buluo/2Q2ODT8+kybev
+PbMwtLbxjyDrOqRqxd+o98GrFI/VbrWt3AsI6SuM0y+/uIxYGFdIzibx2kkPuMd90RbSyKoJU83x
+Xp9/Tek7em9Qo6rosdSjNy5biyxajak9Im2mxpxiJBb5uumEpgCxo8wyVGfpuVuRGr6u2wLHJu3D
+5PquO79u0KBrwqb240vAj6LdoumGpdT6h8ZqjjqqOvdlIJX0F1y5m/R9Boasxn5/3TVIgLVE28Q9
+5+eHOfynbnjWlsyBhaNicCZfocziTx4m9INJyWCJVPFa8mor7gthz+6pipBn7Z8veen3wZFlqluw
++Sx6Ex7dvF6jaVRzkT8QVu1kdFa6YVKsAWQ35eRRmpYXykztneNDLwPxYRFCNvHblFs6OHPRntFj
+h7aDJgGlOhVlTmy6PQifElYutfrP8L5gOrxc+ttPxnCdZMXponW9j8a8+5W1GiqkipFb6JL5Jm7Q
+qEVGDZCLZ1cbdObe54i/NmNdoED01/fcQERtbnufwvB5nt/zXIP2xk4Vn/X9na0bqQB5pi+6G6XS
+BtAHpbtM7nj+jYQ9zJ/fEN55TQPg4i7ew42Vz9VvL7uOu0tVT4CEaEZnlU/qt7m7xwah85H0oght
+C8y/4QYBDivgv+ED3D6Ch/8LNx7+D9wg+G8D/wU3Hv4Lbjz8F9wgJH63rTxFhqPxf1/w+rCKBVtp
+R44/yhp00BZdmx7k/endiYKOxdjrZjlH7biTSkra/8CNpCvVO5eUI7PpAUPQyBtet3KHnclBYLfp
+JoWjB6f1ss9rQVEHa5bBDCNXoI7kVPvYG8MOal4okohqNe1DLOeHK4eSboztDBaQZ3VEv917bq/K
+Rnd8uvFAoNDzn1aYWc+lFhcsYGjy4rQpocks+pkN9Ka7HuPaZbHiTkmWGyjQaclrKKvXdB4Mi84T
+I6VPfrLUDMw2xwIahS4fePzhx3whJKUDlDzG/dDUrTU/Bv/+8Vx22hBtTuJjlhOPCKi4dsZErRuH
+vW+MJfzcBYWBvmDc9bttdjtroVFTYPEQmRE3mGwyEO0t/2ru5LqZAPiWUI9CVHzA3tm5n41xgQuS
+DV4bmkKD0zHUEkCwSjor16wQBYxmjbw8gv5PI/p5T2+1hAMpmIpbFT586QBzhK5XvM7+venSHz8O
+6ylK/w0b2OaGubjbwGEF/yB8N0FNJrGXxGfcJZmPXikPguJC5z/67Q/8xAJGBZZasAAShZp9V1qq
+FheMdt5Tz7vpQZx/DwLD65+R2Zxau0E8MK8NMSqQg9LqO1uyrOvBG2dhzXugdFwrKKo0aiaMBeDZ
+W5xbqm8fpM98Bu96AMMzPRu7aEsCdOYsNWcXm+4qytrf8JMNLIDa9Bzi39pId3BdRfjhyjZesU1z
+QVWh8OGP5gfECrDY6VNDxbkLB6XH/KnUX2XPTV7bfQiSxAJEPrG8RYhGpSOwgGQbIqReWahUuGsv
+HAjMuvUOMYdoor3OdcfBW0yQQf1vA9BeRzwSI9WY2nVdlGVrLMIadFc84KPBFZ1yWICJu/u+Ie12
+47Ch1DuB5XqGL/sJS3qTTmREgZ3hE0Ylbqx4/GQ+NlgA2z/RseCJBfzEdCMa0C+euv+zVTCdevcG
+kN4ALCAxavEG45p1bhyVK1uVEwNlMh9KYrb/PrWIBbz73mVz81233PrbdHftvM236yIMDiR17KZs
+6rA7vi+EKTV8bfHCYYESC/hgnw/eNlMBh10E1RcfXHfehYmegp+fXUJdSv4DqhVz0l/c6ISGS6tV
+brTms4GiJFK3Kfg5emkpwrcMvTTPiWrAuNeO/LWO3z80FpVl+2SzZW2TfIwFSB/AsYB+SdkQLEBU
+T4klffjSiDX5hLaCIRnh854Zup0spDnQ0r0Fw3NUqMy9/gyDYeQsIbDrJreV2D8S4HO61Q60Cfez
+e70fdxMFiB2kteTdPnntCQA0B1FVyEdUIQKGyKZOYgZzuYgPL2/rnVQfvwBjqL8ZNn786XPTvQyJ
+7u3+TdDZc2MVt3qCBZBjAZ8gB3cTiosFSPIHBrQucjGVPRWKg0nz8fR/lK8m/JCg+25RQ+Nuob/D
+AkjlIWsMHlsQGnRWPIYBRhS952CjY01Z5n4XwstwJtXl/RQe29arIuJ74GIs4AHkDz8V+HTeNqCW
+2PfspyRhrCS0Y72bJubxDhIjpA0JhUwwGhlHifdpp5Y/2y8wOrAtg/DfQraVW3HLWOu8PeaoBYus
+7yK9mYLpCFLDwgzaqmBfi8dTewJagETe/sAC0rexAL7uM+ky3yV1q74zkAM+ONfOkJmQrx/CesIw
+h/58a3MKUVFUI04Ma6ha/ogvw//aAVKT8ejzuB4WMEDKd3VhLS+g/veTBP/EhuujrKjXZSHHgytx
+i1r+/Pp6cFdGoQYkb/s49xRa4G7BnbYv8hntc2ifSZwJeAr3B8eVX65AMfeNPOuOpTzuqqcQwujc
+GyN/N7jXMPTKgMBpVpcklOC5OlsZsxoq3yDTAZ3xhNeQfLuAUy+ctAlk/NeoN+HbLaCNB6m7P0j6
+Coz8bkJKzBCp5qniMred88a6OjkPbXPKLIQ6ocy7pckPiXsFVsUC4nF758JEb/Hi8iHT9ua/c3vO
++qDDlkGJRxg/+28fK0vcNm3w9IJjrby4cfsXvJ27B/tWz3iCBHTimVHtSJtl0AgWoJTSK57hdoAU
+3Oc9NcqNgEX5DhPExmte2yx3r6G6Jxvxv103z3LgR7WGnryFyWABsqs7E5s3GK+zLyF8iL9FrsWy
+c/akMNKmecUclJADHSj3OYtgiGDI+5XvaMiYy0LBnVhxY7gkjMtrsYB1MHyYF5EMfcuc98AAKVmW
+ateQKzLZ0zt3XYYFbKXz5Vv/xmWvqnyABUhMYwGhSBtTLCDj0/NXutCJvwLM4A1rvqXutVsh5uOQ
+F7eWYKqXm5gGlwgPMzki58lYqHhvM8et6HXjVO09IEIierv5FUnhi4GlSwNHeaO0ZPLbBpQuDdo0
+7iAx0PMNhB+H8XZRGmSoLCrIfNKN6aRpI4DLbpYvUrrr4MUB4d0XmuEE/sW21kaO3ttzzBEYr2OJ
+my9DAxUaFGIcX7K4JnLMfMQ98lpImbt4X3rjRvO8lj3Kkac2wwLOKhunuU0nhUX1kofF1TEBXsbL
+DPYWDN/H6xn6bwUl1S8/9aT0qXVINjpMmk1EUmFsOnoRZh9jqovANybyJsDzcywAsN+6Xu7U2QAe
+Xz276QTxo98lz17Jecrx0IPEsjjqgn/GHxeyVl3YaOxkFgRFuu3sY7b0/95JpeePD4kNd9Luwifv
+GdHorovmtGtcRNYEoH9uB9GgudEEWMCFdY0mATLYmDOB957fIe+O9CbGA/phYbizm1pXIJlK9KNW
+0u11pxEW8B0yeHonBCaJql1YAOdVHHrqstbdq1mQdea2aoLDBha1tulPk+eLzodMPEtd9EBnvsgS
+b6VIKEpb7Ba6/jHsBjJFZ8VV7L0WLY/FAn5Us2epqHOGmDB8uF14HL2sEnNoewx+F9GqCgtpRrWY
+Hb+4e8a8iAMRWPfWYfSXOSP976ceNx+xgMfGNLY6K69i7rN1A9qDF4QBNMsOiyA2yhP0Mezb3Vpn
+em3HCUd97LmTzxtztN8MLREs7w9oge21ldgex1bD9aKVb3J2+2oqqnbpMYaw8QoLeChvbLOGajGi
+asr1lYwf9ZjYhBRUb+JL6T9cqmlCBKRc19DaVDQmb5UwYMh//bPhvjr81O4HELYVaIgTXBKa2mcM
+zYXg2Jx6YEiMHLqRr62uMpURl4Y9XXgq21tT+Og8MTeLyXu8U93z8/T3E9VGbk12JaZlhfzg/Zmn
+s4uJ5WtoU8ZuqvZu8SzVhTBzzrq5LCyg50cnZHAi96sz8GU895xqe+vEXvEh2torMuQgeQL9g2gR
+M6PqrYhqmID0MPTfxQcIqUlNKxYLOZVAX8VxQr8AMZmyrvEcgXm/UV66O1hA9AeDwvhpHXxKqFgd
+Ny+bV028QmkAoaCSyendmrox5uBHZ5wfGTmMOJ3j8f0pHkP4hyTvmaKuNOogZ9UEpo3bbhKU45kd
+vyFfMNJ3ecDvENQfcgl+eTd326Mg8Jw1BjrxG9LHgZ8nzB5nLy7AESqrkpWFqm+Btrs2Ng7oJd1D
+D2OK3RNnCwpPmvmen8UhHHxzbUS3XNLEyb99wRGFezFH+0jkpj1bsegz691IEuKLWrUtFPheJw6x
+HwgBbn9iQczpc9L/OB2MRn38QEnU5TFA2raKHID16i7nAI5lWhymxmeyNyZeESMBwy0sCJK/Qvvg
+5O3tRAUMc9x9RmnTqG/NID48Dh9Za5GozkVHhR+LKrMHN3xNq6jFUNzWaltkWOyImWRqrcnEnUv4
+FcchF3ru4BhyuXrqkne6dFtFRfcQSnrnCmzWl7EApu1U/8qb5lNmZhfe4a97pCcizji1Lo9E3ZVv
+mjRLbYT0zbcfityAYDbcCHN17o9xacwHPDe5TiIihcC4fsr9chgoS9Fr60+ojYxZZMGFUuQD7/dB
+4Dcba7xvjyXF+G03ySLwN7weu3CD4nBvM6KVW5G1F0775fBrfc04DveNrW+oVXgT2YQW/SGVQM6d
+2uPsd+1iAfeg96LjQ7mXnrp6+xsw+fg1PIY4+QetdElcMEi0Qt40q4z7fJPvcVoNyZMfYBfYhqP4
+sQAc2E5QXWfvPA0l7Sj8/NrYHKo5Y9alEmK8t4YF+NE5W9BCKQ2ehsV2HbuvWC7vms+mi0wUbnP4
+CFM4+qVwfUeDc2WJOxdHq7IEedY0OS5je7zFjxxcyt2CObEAjuPtP5aEhvU0Uj1Grxw14SUqLwkm
+q1kQxMtc8b0YIm4bgDvChVaZX+6legp1ELOAN3XiSkYoyG+65zaeIjfZ7F4Kftj8jdt2VdXB/Ymf
+bElCCUWE66rx6ytBtWWcBlxBRmouHAirSnUhyrPyNS8+elHH6ZkP/vWCphpS4k98/5xve41WKz2W
+r8EpIh7Z78aPlKxZavlY3WLiWE1LRazu7/2oOHTiWiIcJUHr2C1q42hzzuwOPbouf+ITLKpFyd5n
+HJHm8q0byT4mK8v5YPqZHGQEz6xf1ni1dY0vnGBoAQuggyPCfKs/McKRkPV7cicfJDTYb+u34tmG
+wn32U5Xx01SPP3oTmJWdvnvL7DAzgzBOQ2iczNGH/LAei67IdD5TDJ6FCz73u128f18BCktlZzEl
+3o+jaZ3REB60GQ2n/iIg58emDlx4IR8UZyvoovvWljIcnIs78kD/0/idZ3IYeseSZt3wuBIuYqdk
+IlpNXrKlqT4sAymHdw6jDWcVYpWnW7U/nosZyLH4js/wjfC6p9NnQ/BeY3SEgB99iLa/PPvuh3Ci
+rnuswEU8fGfrPyUFWdcKGPCrvCgZeRB1zF9/Tjj0qpfy1e5suHPSXo532n6d5FJq5xjeCQO3bKGO
+oHFlWqNeP2UEacDaBFkz2Twl7oQwiZ7Gk6Um3P7MxhNpdQJTWHn66wQWeh/efOb3nkYzI7sVpyWL
+0eTgqG4CPPJHjmA+5yZRyqehorKqT91BT2G5mw/klkKyq2H5n+YTWro5lGb9nOYmyFX2vR++kOuw
+5/G7wkdtf8nHuVxgz/Ftnuqg8JZ85/tK2J7ZtUBSDh+voNzly0ecgeNmtDKYwjkc/vpO8ajUSBNf
+DLcF5M1PP0Ega8qymSyfp6mFUDpw3ZjeY60WCZsKsWtwlUUpDhm/PTWMwgLs9Xh7OLopkZsgziE4
+hG3te216ilLr+5Lj9fwmU5/JciFuG+TL8DjGA3vFRZFA1S4eQ4fLl1Ifgel6oU8oJsbpA6OB3Pp6
+6XP3z6TXogpOaXRSJX332bnSnQMeQQhlKDkI3wqj9g+inuGuNSfX5semp67TNX2meKV2LJsJ5HiO
+qzqxsrayAjFUerJLvIYREmfPWvAV/bVXsKjnj09Yc+89IUsFMSJoOt5X32nIKDz778fgQB4R59U8
+dv2HswWLRtLqyqry0LGJp44/eJEjryLOs2r5sgHShSG1u/b9K5yhZRSVO3BwWCR7gk2I6ouJj5Rd
+emUpeFl48MFAzQWmSk5ilto4hdSJfn+2O4GP8eR4v+3TgDPNnoDbWStvVIF730AkfWK2xDLrl7O7
+/3DWR/05emaepevq4Ab48Vx0bUbSbqSCTG2EhsyT/MWluuqn2Wu/ZFRQvP++4PUhDivgv3ED3L6H
+hP9LNQj/54LXh/9t4L+oBuG/qAbhv6gG0c9ndL5TZDj6/3fLBmEVC7MGAZmfIpmgPITLo00KWJpD
+PxV1GOigX/u58U1WPib6P1RjXKPcDn+x3t1Yop0c0OGEbmvUU6Rq86TsM1J0kjgLrTYbR7INdGd1
+9Kpq95XmvPabD48IaDvdEQh6yiZGZ4fhfTgHI2Scb61Q4nnflKB5JOoydXiumnNPyvIX2yP8SAsD
+Ed/n8/FicmOqJr7eeiyh0RPWr57rwazxKT1Ep2fcWfP1f8zo2C5ZtO0WPIoBfEO4UKQluSyfk8ku
+U7oSPBcnexQsdaGMIxSXJHluyCTn7soGF4EcGYg4VfTG2V6FrBlqXVuDbRCAJyWgZFMv2gUznCr7
+WxoQ1zgh8Fdx8RCNYXKtzO6OG6NxaC8Q5YOfcvvRMdHDhKx4FPnpGTPe+oROpLQ8QZSGHu7WbTC/
+qcjQdFM1sMLD88gM4IwXlNgiFA0PgzerVcltzXgKDMEL41Knlqy9JQe/s4aP+tcrddJtrG6vbRAN
+Ud9ffoIFCFrk0x63tw7Iyyzwpo0z0sganSVD48mZHYl1iX1MuXG9FQhrXAo5dQU265Keob///BbP
+ItUmcdsbZDbD7A+RsrNyM43gMlAXi/Ecq8YHiQ+l+AQ8uP0L/Vq8BFzW5Hk4S9Uryli6HF2iVbqF
+0A+gwmPGJ8iCPmZ58IDGMm129+nfPLocUdFkGFcrfUka7clNS93pXEnVIVm3nwje6cyzWO/jAZ4k
+SWrGGmXxc9w8wFz4r3JR3Y+5be7Z+v1ZkfER8L4ddUL89I8RlvRXFiLIGuINR9Q+9PHbGlP1zhpP
+Zkn6KGs3OGLmdCU6JsmVYWh9nt/oc89TeLPFn0UzceFs/cr7XYI+IddT5spTGKOpvsvyrYdZVGSq
+hhNsGviMxraZ4mHuYBqi++iqRo1Ay2a0Ub1KBJmiRoji50hp6aDE7YrrCloDNacD72i5z2eyrw2c
+BnnJCNX8ZuPJqcmTwPJuyQRX5/WpknqPXFXUpQvff9pWFLPHAtywgPr4oWHQRUAbsjN5lOhpTX7/
+UsY03p/RbbpfI0Jls4FgscD3BFbACa3ghkvMsegiRMOOpRexd3hyfHRzpw5VzK80Xscuc4/Pwy++
+20fF7vDQ3B/QUkQMKM1KvTL2DyQa59WCvFtUzLA2VkMg+Ug5dcKTJ5zzkYg7nwqlAvTpcDVm8RWI
+Ss5XS+aw3lIs48IAHhf+dTg7p+cSqkLjf1sGA5zyW9P6JyhmtHD9fu9MZOdSobXPbuo++jatIkz8
+eH94vjowr0wMOHKUIZQHTjxA24cWXKn7edmjyq9sl0rhjLn7FD/OlQLo8rsf66XepJ+aerJLkyRJ
+dmRWcta7TvHQZf/tKG0ntfRUITg67vOvLtVlM/9tED8lXz3Mjd/t54w7oc7srwVc8Pv1lFmqjvgq
+HnBbh2sr2Kq7nFqSwQhI12KpLqdhvcsx24PIXjNRvJFbweE3FSz7JY4HzwVomU905fxljTlwEt5+
++ilHSlgpO2kVx1onQaxfyE7ZkkBk5PCHunwyoPKIwpn+RXeLGD6JaodYZh/Tojsz/YWov2b4gHnV
+pOWfBKCf/uuRt+Zd8nzvqR9VwuEAoe37WpiGNtsh/Li+yogl5c+2W8+n0kTPYGIxwWZNvMercf3k
+6y3fKrCAcsm4Z+ohpBRT7TVi72tSdE7I9WYHOtNBHUK/fVwyKdRyB051E84VMYCSnXHNZ3g7TWOY
+Ku7eK9iTzb9XDu1eI7qEUawJ52CqN8dYgIYHPicA//rUsvPpZRLwTUJiqlSMIMk9xAWmXnUDKcSu
+PFi0v21SpBRxIytBh1S5NmVm1I0LX+uL/YP7saNI4scXOw67YJvVbZS0hGcK9wOEfWUBLkyLzNba
+rcSWnMHnEkLpraQR04Uf2yn43c7f7ZAvytVxzgsoKiC5wmjLCsIxEsWBmvgS9YT416XjxQn+DqkK
+fUOZfivGTTEDJt+fqu9QhQc2GP5SisvKrc2T31tYdUzdSBYPNy0VnUjscQH287JsBRWCTXSRkn3g
+GTu5ykG0yGzbqRFxaZjRqaH54kgzqXMkAgFbae0uv+T7gJccutXeedocdKIKY47ViyBo0RruvX+B
+BZBT0syYscC9fi8vPBwQeLyx39Sf3m18RBEhGtf0fe+blB3cyY/YZy6I8auvL26pbrb8z46bUqJo
+XYmUfbODfFXom0ZjYIwHNIfx55NzUwuOI+pxoto4JetH8TSzdcUPkGh9G4lRXRIzjpXx1sauZ8Hz
+crz3Np6VMPp83rwrLhziSfG2mcdEt7JD2A7tdfK/yBtdxy4IOSxnM/9clhQk5nFaWMrqF2Q7rBhL
+coh7twAWdCdc89FJlmBHHDd0HI/B7GKP7IibhiL7bmZDu30SoEsXAUkAUi+zkJ3jiWz3t1hAZOZK
+NxV53nK4st2E3KxCFsgUoJX3l8r3nmaAvxF+b/dWIO/Edj7KecEntUIZqv6Ma4pPhTv7vX9z0foQ
+emE8pegKpbt/dTFw/l3/4kXGNV9poTXlqMLANJHvitLCe1ZYnZtGoFDxlR0kfMHQI5tiwQSy3DXZ
+QBo9hozrrqSD4tRz94KnBGsOs+fvBObxQ0lk2Ueumu38TrPCuh+JxA3zn0mSt3RzpMjFVPJ3D7uq
+GF8PQN7F/nwcXCI3P0kSWVCagRHi4LYjVzR+l+muw/hFOUUGL5QuH+lnT7zimlz21JLRNJ60Mrch
+fnbLyrLkTMlb8Qrec07iwYrz3VpCFJe9SdU1b6W3WyXT15Pw9D4WIO8pBSU2SrPHAUa8u4B5j3BT
++NpJ1ygnwRLVP7zjPmri8jL8lnlVeFqWJwzkW+qs5QzUHaMq23HpeKTIIpPYrCrSfyX2fmdC6bp2
+HkQ9594IKmMW6H39nW2iMLBex6zSJa6bYlg73Xu8uGFhlj3IjPYTAS/RKl+7euNGTXHysh6z50qh
+tABt5YfOomh4bAoaj9xO3dZDEt4nSiIEPD5lE/ILleVGj/w4XBBQsLyyvVJjHR316fgwDcQXYlp0
+TMdXdJTz0K7MsDqdk0gpbaThe0/3TW5KWAXfvXz9uLH019DCdOvO1KWeL/D49jKHEdyxuiUkP77a
+lPiGKtpo9QUrd+W3jicuW13sLkALH/+iXGvoBn9jxBztzolnPkt/aXKdq14ZQi+fQE6P6m9DYssm
+FmCKf5/luA8YIyj05HsmC+LxQED92wiR0DPzVViuxGO9aLfGTZwmuptyHBECG/B5pHazIeBNkl0i
+QuIDN0eIUYfvk9n2gIrMeyEhq0bsacqq9ZzNs75WnYVVknhhGDcvgrjSvZax5f0xPiyAMtKZpum+
+NWuyMd8pHpHgs1BblM1he2yan4hJ3KDlzYWTOefHSodQv3ap36MgpMEg9LzGNV7ZMR/6a4r/CguI
+7p80ZAmuko64MTvHR/v4WJPg3v6YmnKr6f2u7+2PrOgvH2iB5aQvsh8Xh30vM+4fEeKGnl34X42W
+b0NB4POnoExj3M556hn2Avwazz1nLRsndUeXc++z3meyBbd6sckxaLNvO/37HfN7PHiv29dGytA0
+yCIAmLJ31U/nVVEjj6z5Q6Q5x0WGogPIMJ6+fxK03q1v57n6B3//44MnXHr9t2ZGWADxXCt3Rt5d
+lelfSrKuUz056fgdCHMlfDP9iFSd5IDra0hDpsLW1yeBZMYpqNoPjdoiGggJVnncoYs31eXx21yN
+mWEy3t/nUk+s/hL8prj8/HAfC0hzin08flhjOHW1qOyvkUjs0bOBBRhrghHfORXSfAaDDMbtp/i4
+D26a7AhQImMjSHh8UOQkuIP6rpQ/tSwlVEtHleJDLwjvme4PdcgfzjhV/aaUC2o5t+0xywYMQim5
+eotZ15kzJfA5hUVHLuBNXkJGhvYj+/UzD2WLXaYLejmnZOfrGpSeezGJDY22Jv/BAn5hKsosAuGT
+M7pIzmJJuasRCmRRytuBMdiWM1fd71rcX1jAVpjEOyxA2JnnBDo8C4sGpd/yihH1sBhInZhXa0pk
+xZMyBq1wa6iKDVumeZ8uqp0LnEkeLLwwY0G2VfQxCRvwn9E/Cu1/44WZURvjZgp3y9gbs2siUeOo
+0lephg69YzBqVB2TutIhugWLV4L9GCmuTETwNLZ5fMUv/HaPtCVNuXz8rZUCY8lPG86oSbt0bZYX
+C9Ym4tL+gI57uWTx4XvILEjcBqs/nfILQO5AWVAGj2COqYNLrjbv/QqoQInQNm9zwafsuSucc3FS
+tbT09QWReH0uGNwhyqYPAf5y09jFwO7KzI0wfEVRAtoxoUG6HqdIspR7K63NDybzV5HlKwZM8vcr
+FYC+pPRhAXhCz8rhdGUcazoZcnE4O4i30b1x6gSyw+YMFHvdmT1j1uLn8IFJlYAu3bMZIhVdWh/S
+M+dCTbmfUJQ4w30yh52Lq9d9okK623XEWgMnlqs0pmuyUyrW93NpXiP0BsA4P/cRnVorocFSAjVC
+D8IWpDYwIqkczsTSqlf+TAmjF00uSGIz0GsJU7iJrsB2mC/KIEfqnX/j4rq8u7rGHkSiwHkz4zd1
+Jpc/FoBU1jsHDWal0Fq+fhDtsdZgfbh3TfaS6eUc41z7bJK18qyj1E5OySNFCK6Ox2sMCdgO5hMT
+R3bPJM3Xn7jm6r48affmUnNEDmWQlZr6cqSVzahBrMq4KuUJSeMEykHNJ0YSo2G07qh5KnFumEJx
+hta0nwzAKAYbl4FFBl/Xhh17AfYv8+knFNxB7cnUXjVeQj0d5lq9iOmWlNlH4hM6aRqyfooPSJn5
+Qp5XTM/Z7RqtxlNqDLfU/JFxGOjV1fzWiZf6s7HhYH84mXgt28B2pOFbfnILZTl8IIShbQAJodLu
+SulGh9Sb/8K5k7yH9UjTlG6268IomhBey5RYMIpwT/QIDLxypAOfb+pxreAjXDFVPTEx4SOwmy52
+B2Rfeab+dPK0ARSRnGu9CrrlPaLUVN2J0/+YzdsyZaoSkIkFZB3VCGgRNUI5zCiAxcV/hI02zqY9
+P8wG9skF/+m5x/Z6aunzlpo6l9v2m/55T0P4im7x0NhpcUURGYZ+fIKoz15gxVijZYeQSQcoaWCY
+6S0bnew3PyXGmUG8yQ0x5mBkuW8CpcO0TwgPsaOaTetUpDfPlmNsHPPww7MG6pLvra99NwKnJW88
+X0ekB0bP3oJ33lmT7j9Lg3ud1pfGSfE31KakThy/kFVf8H6dTaU3aDPuedPYokzV+wSWtZuaLG2v
+eNaNeOo6892mXXwjLk0uvWuPlrjxzZsQ8Jo0bHHkBEMT27jj90eoV/uet5lNdF1K9rFABNQ9Gjyd
+HahRFkAA+dWq9/rb2O18KGHuqwABa5aCJbFGhKMYTUuNMiiWn0rqfV+dRMJJIcTmtB4PRds1z0P6
+3JOCYFNv1H8/7Cx+4G04K7eI4/xwYwOzoOziIwXuHsjZxsTsxQPNBpYsa5/KP+Zj7TbRmbEy1OC1
+Zsqic7HO9r9uyCKJyEdywHPrGhmh9oe1jJVd6ePg7/WiVSdntQSqNFG9hKHrccgOEYqtkZo4NTgW
+IPHvXg1CHFbAf+MGuH2ERP9LNYj+p1eD8L8N/BfVIPoX1SD6F9Ugpidf954iwzH7v3s1iKpYhk4K
+j0P1wdLaLy4EmT2Y+Ok+RYkbvUCsl2oYVmA5fV/+h2pcMkNRf9xXHJXOu43lGWJ3njs3KmdzRzNq
+mZ8k1AqR3S6l7tGksll9miDtjbR2tjGiHLkUsy2d5Jultezw6AJV3tL87RL4yKasX7+EBcSLpDbn
+4l2uiKIjU5eZ47l3PhIZeZR03CYLHX0RMUTKMtdiAU/mZ0N+cLR6iD5mhVnwrZTDz54PsMRLtKx1
+7zvJmvvGjCaEJtEPM8pyb9X6WjWghQ48cE8trR6YCQwz7rnYjGyPKR4kn0BMyDy+3c5LPgq3aVIY
+V4u2OQbHE/+R6r3S0NK/jJM7qaEPBg1eXNjJEtJhAdp0P5Dk1vsRW8VvC7z4KifynuLxkclas/cY
+XOogWym3Cyiffdle4srklqcSYDcrkbKEN88/n7XNuaGpyh0iQXeHuJIlt9NVNdHXluVHI9FmgphD
+lVk+uUhYyxIz6dfR5FCXRbFt3G1X0anTCtGyXOiOAbgbCZn620HWPEhTyA1H4FDxEJ0bB0ykn5t1
+c26T6+KemZlAwmHC/XzCZ7JgExyJBeAc4K58i//SHVO3qpn24uyFX4uRQPplofyrUbt2YrpUZVp6
+6hc472lzzkktBZsiQdD9M60+onntvvll7uMwXF9xzEmMO2cg7PcoFw+4+tbA7pMaamCoBeFCadXf
+i9ZrBrZCjFrn7kne5J52VwcEm7CEeDLK1iXRfjJKAb5Z1Oh4EI76JN0WxPr3bWsdFrD+LtdXg5bo
+TED0g0a/oIAP2Y18ZOy67UYsagcL6J/AAqQq8111/4hVB8sulYFkLhCl3xKhj1Sv3jxQ7aZdIcJQ
+w1bum+28tTCdeoFeIUN377yS1clSjRdBJ3wlVC3JIvwTbIAF1PjrqhzEnaDp45z4FmJif1ShFzDu
+rgmKJzDfLbsAd1Ih8ZxOg4mduEnPrQR5fli+p8bN4e4B5Qv+3Po9fHHZxaelia7J3sjGZ7L4Fccc
+aNNcEIcx/aQ6tYuWIvcjcFh15MN0zzDc7HO/xYGAU3kefw7Zro6bT30b2zDktvEctK0Y3w7UjYb0
+5d90crqfvdgd1f+5/8GLpibr3cM0e8xVmYCLmxMFMDOkjsPkqZwUsl27YHUUCtHm+YCMfxTELGAC
+JSIRcZR7CnzjpaCFYTRLDVKwfGfiumdJ8OS9Kg2lGqijBSqz1gMc/PkV7gehc3YjZZK/vNaslS/0
+QMnhyfYjOwgpbCE3V2aE3D3xUcrmi0wUoL16rW+559aYOs8ktvJJ9dgShMtes02sog38XNkgNe92
+rI9IZp5COeqJgeJvkWEaX60Q3kYt0rdnUCzgwW/JUzeV12xeCQVXBsPX7T/kH7wy1pHGwQBB52Hs
++xHC0lcaEx2qRRgnlxaXDIY2mf3+TnMtofdX8NLxR7ovHBvEsYCWZ7q2AU2rIT/+ECIrWvbrq4S1
+uiKWfPz5Epn5NYDm9j3uo4hnxyOdnp01AZbXzWYrsNK3SOHnLBNaI4tLgqy1do2d7xm13W0PTtFz
+APSTkF3vhPSPI0Htb2zPDoveC5DT/mGgCzCEJIbK8rgNB5FiAeDTknvHTCCnjdOUEqo/R54ARjhv
+mqKO/8Xbquu1jowfWvNBVN5Mxl+ceLpGjr+GZKP1hvpibmeiv/DyzA1Getjqhvt3/eL/BfbFCLEG
+C5gZTDVfctjz23Gsm5dycPw8w2fWFJEQrpCFiKsEjyj/UJjKc2Lbs2KmomAO4KgRiZkV/oneLvfm
+tBbQK42gN5PmvZxA0GukstyaP2p8J55eWCU/igMKSxk9nufRzBq35NIL/hEJWqrUkaLy/3aoDDbX
+6KdF3zmfezg6YmGN7+CJ2clvNin0vM34qSgahW1FoMZX0R2PiNaBI2OS/tw3kkWygiZDBxrnIfxY
+wOftthHEolRpWJycwnJ0OrLFTo1Zbibn512l8iDrtkDnBPY2n3Ikw3p7YqN4ZrvdjdvHScOkzead
+McwoNbiBrzSOKaOaYCdUWQFKojK1jQWE3VmwUPLfEibsUtvF9sVrsAVT880aozQjYSmhhG92F3st
++SGHc0zGfnFtmtGmprEgbuRP55qwnCyL9nITOR4SOu9Rl5o83Ahy8Q5hXOhbVypO5Oqyl1ulewuQ
+phsV2mLriwWcK2ABkcN/g4xYZ9bP/T5expPjbfgdXagXetpsFF/XKEMp4fh/xFzkLixEfHFdjNXn
+2M3Cgc+6mwuddruizwQ4dtCxAhwBthhCOi02VvAbQj8K8gRIH+AmNzWPU8raKbuwdnhF/Dbvvma/
+Bnm6/ZgmWa+j7V9LnvslhZTDQBkHVJXCtX6k5uDSA2e+4ylC2k0VdSLJqbzfG4FgziT7a3Ow2GOt
+2LJXckYy+pDm5KafE2cUNpzWNefO3y5p1lrU97LByp/7wCoo5woeyI02qOaZmSYW4C0EZQpaFOvp
+6LPyQS+BHaaNDD8Lw2WvHJd7yXAnejqg4hPGOku8fR89dFHTfpBoTIAMjxZJF3QOtq460vVufwxG
+A5M7sCSBuWDIOAXdR2O6ZqjUtOkRCs1+KvtZ7RfWL0FgNoM5ZjMveDHZUM5zwRfSU7XXGe+ykVSr
+TpA0uSU588n5zpPexhRyCa+Mpe79/va7WdY1HQ82Q7pa0rI/dfFjIFUu6VFGYQG55u+rwpfhnVuy
+vbfL4GvQNYm4XiawSdaUj0Peml9ks1Oxt73tEaJ0PUSHXmxgFX9kwWEKDsMMZiSGCL4Z91KoKIlQ
+eJQMoya64gwDTRoBkiva3E8rbCjTjH6GIOPEjzvrQH0vwvYc+7jcv+jf2PDbrnHWWFCrtW0wv5Ao
+rCIoQhuC3/oeBw4OwB5aP6SIqo5ApdsXvY9+iksndrxgPUF11Qa7T7T7zlrsJiXEjNdXtQRyf2YL
+PDTxXOvV6gU8C0FMQ9mS6VlLwFwRrZ/laPLt8VAqJsgmkChrd35im41/kFFf5Nd9OmtVM2tmPmEh
+AiKwvKUq+uEg4dW6ar1Fhy5q4czmQQIEgWdgHoQF9F1D5pJ/oukqHWzAZy5vdH4jxhvA6zyrqMHB
+fJzaQJ6OUkG+mWeSRy4VqjIGKmBUpQ0lTiFN5B95pZ/df5sdnCfsP0vKafPkoV9GSPn5I3TE20eC
+59abJqqCndyM4G0W5qV4NqfrDT2OqOomZHnExun5eywg56/p68+uOCuQdWSX2uUBzdXj1OXu+9oR
+uvGFDcC8tAdpj8prV/lvKEDL8RbjLv2ExRiqlVfFKw1SrVC09GHh50aPxs7aWaURi6xswYYv3orR
+gvQsAQItPgU7n2/K34wB298GaomSpd5ahVkyZ2+naV7qdsRkpIfoI5DVG4xkEInXh6vTjbApVmhb
+RWoyy93fTAnaF8ACfkwY2iCfurpMEO90vS9mzVSObs0bQ8GSXTBmjIci8oX9/aZrtgpzWMAX8VWL
+SDqtlh+E5/ao6keX3+cwoLNX0E+ljjeNufMRbWbMbhLuX5rI3wdB+hVV4P7ucUaranO8oFN33gAd
+SJ9Q7nfppPb6EUPeWEz9pEsHnO++LhBvrdjDWvv17d2ryirposSbGP4s3Dv7lvaWGn8H490rVJ5E
+fat4F7znWACzGxYQYIoFEPeOcAZD7/JNihdmK+ceQ1iVjUTStBgzDRZAwxhKLpvl8Lkkw6lDEKN+
+Q/Pi1cNUsClUpa9+Bx2Kc4oFjP+5UPogdFNDOmmu3tSzsxDNaOp8DIgK8S1xBj2jDnRN/Wcjc+xJ
++alIQ7cx6ld/2XjVs0v8HXdShvhT2Ebu8HovzWithVtFJYYGw3Fu0nArNHrlMOiF6aM6iXpHphpH
+rjYUd8+J9LpoGAvI/hpbGf6C5cYFzevfviLEkF8mEPiTiluGh9xpkF9Ll9mVWYBHYO6jj53AwN2L
+vZ18+YLJsqbleb8LfPKmGnQut1XIZK1S+oUmIpmymsXb5jwuRLz7gMf2GRYgXJ60AB7Y2P/kQORg
+KkUqL9TsDCdea3itw9OGH2AnFkGIaM5f6sUCXtx97qJ9p3YwVlYwBuG9+lLb8sKAjQ69sMlZYG4g
+QHL/O9lSzvY44/yyJJDOlG+p9TtY4uueuPaW085FzdxMXImr1uI4vyQ9/ozMM8+eN5PyVRRwBy0s
+oMAER++YFYmpHz7jVJDjCJ5WS/lL9MWt4lfwIWR3fqnFeQotaP6PWZ/O/87DECkh3WnVvT6UcAXf
+EI20rqd7h5kIaFW9fTucT9hVCls+Hryy42DtrEKYoqVcGpC53+BqpZE8XZ4zGxitu5R2byvOUZDy
+LkNlmdBUNHF17R/BRcMFSIxLkRJ7oDjaKGTLbiEWsP1E+PkrLzfNWxf0AeaR9/FZA+gvFlBY7T/U
+r+V8ZZUk75rNdwZuqCrznSbh8vRqQOUlquzx2+N/rA5WpFi3sDD/Hb9zJOSHM7a4umFEOfVFCFJ7
+FhPFzDWa2T5Ku5oCiwLEYAGKcNW1jrf3vBVhsUvJph2bU36VGHwsoAfXcMCad8sI8fyTuxCJxZKw
+r81ymj4tFpA/sVXzbLdRwQ30Lskzaw7jgwWwirpg1OK3yJms5mIhA9HtWwKzmUApe/AmDNW1HxYB
+6fvQEDaMbLkLeAp1z0DzgMaAVz6Zqk5T6BG1CkfaH8BhG9TYMcg0JCBrYeDXcUJOI52PgZDLq1j5
+YMjkC3sWD3/D5GvHj/uJCqZ+rMk2p9/lPQYKD45KZ6uVMqUESHfM2QbOvUIvp5ZfT2oi7mMBdwKs
+5OgOOaB5+qUYVW2HRxn0zuUQul8N5gQvQO+ny8ZWcebgQDFEbk/futsUj/Oo9CvsNy05Fkxwff5q
+4mZy2owFJD/1XGpTciq3OcUZVnrhv0ID7UbNnaDzu4wGLuBVIiXzyagn5ne/hI8W5EFoJW60k4BO
+hfL6PpHmSuT+UC237jF/bt19uFT+OGc/BT6F1sjTl+gVmD8jzJO++GTi0zQxMndhq1aIbMxm2xL+
+kpwUw183nNiMjx6RBaNZg/buQhyfKn7U8tR0iuZPJ9tN9+0NFrB8970BzjtKVlH+EG/M38AvXlZU
+WdB9y4AFzHTfvsAClrqxvwDEuDiAe+S45AAAWdwJcqRufo2oEnZOCi/oF6u7uqUAUVZRKHeBbF6B
+kWV1js3RzkwNVtJOS3jEKjtP3Xfz+rpLuWQkZbzdxGwzNiKMnz91l7r17nkhd/X5Uf/trcRTA6NR
+2rUw8CtR9Kc4F3YS8bfu9cRIh3Mz8sBO0ww7h1Ul3zjh5FmPqAK37MXvTtoepxUKraZt1G3uNsqC
+05ePLGm+J8lD1PgSvwQMhmjJ9G81xtil4L3/aULYP6W4/IDmE7AleA5fJFJUlzSQ3fLBWIDRN/1K
+RP1o5ZORNtFI/eFpeJPfkkU4FjAcKv3H+fdtblNzhmlCu8GmiA5lcsoWnkrblF8fkOiLeYF9uvwz
+H1DnXwUB8/bT9UmG8v0A3ZGp+zItqfsRjRCiT3g3oj+pchahVMSMR0tTTc9VTdKJ8MaF+C/uqjnq
+f/dqEOGwAv4bN8DtIyL+X6pB/D+9GkT/beC/qAbxv6gG8b+oBgngtenCFBmOzf+XauD+59Eqlm+f
+j5omrDct91mrwJY6RlvVtExfXDJmeP5SLhK8epwa+h+qsSsK6rKKlH16yjjs6SW4au7PDZKskuVL
+sHWlErgZbCP9cTtQ+3acfrWdF/nZlaW+T5CqJTsrJI1lMqpdMyrseD+4EaX6Jau6MMFJyjNO3NiG
+075DJe4yme3b7Rte2qcJeE9daGW+kiMiru6x04Gr728+yFXm43hew7Eg3nfGjFodEOe26td8t98s
+F8J+SrC+6qcu739UAft5pjrtWmGFUR7Fc7M+2DXgighzmzDM2j2eumAneGb8ivQXZ8FGec+9gjS6
+gmj31rxwofIAlunNJQg5Qv7XxdcKOuAWUIQ2lkzCeYQPn68XJWCn9dIdh3+Y48yKSDqFPuq8algw
+5ij5CHIoVCQn3RP05Cw+nYbghkXm6zhsp1muSPMERTnu+qY2NvvgMc7Yn5gghXS8JA5m+Tr2KUGG
+X1YvLxNV4r5ONLS0CNW1UkTySZn/jJV9M7P6fJgP9DmFvu6cvZBoNyz9xAX+jAhfhsgzJry9yHfs
+kciT4ZDP9+MI9+mV1czY92g0Mzz+lJwh13l8crNVw56c23h8/U3dGx1c84RcZUjCgFjDpVnpLcen
+CGq0lN3O+LQ3/Ep4JtMzujPDMpodbmVYJcI3w5A+vvw+c01YMBD3s2czS5a7Y9pkFkmboKDdRYZ4
+MsSjjYzQFN9Ck+HBA45gIoR2S45tmujp9w0oYUHtHGmTU0sJs6hGa7wwA1u55lo7AAvQmwRATZSf
+4LPi+7f+0p32PVOGyDDc6xmCTwfTez4P/ibG1Gv40oT+DefW2l5gwa3EUknx2qmjTQwtoRYVt24r
+HujDdkLeDoT5zpsbFoMdfbSK9QNjud7yl1uLH9kefapXyonO/ln5khIOsfpuTuf5IG6wgWrzb/QD
+WFry439OFSTHwfnnXEGce3h4/5wViPMQB0DOKkymoG/H4nOaGlrcNNO7jvj/nSr4z41aZAlmHzdv
+hEMdNj9sCxfDFslQ2q6PshnTvStJUgbsRWfwGVIZWZmcZteizH/TvvkaeVP08ufLDtSfEXWxLIqN
+aYZdFVLnhfigVVXyNX/lm7nJFXK4NIIAX5/SkOuubATMCpJXTjA4LTfRPn+0Y5BFB2T+/Lm1MRss
+slGoykAtSmKGpxNS+JShPKWgix0uDtDqDvdMIxBhjRreggbqfhiW/NiLFmAISLwhX1/95NCsvFPU
+xA0sq1qV/RT5Jd1b8+uRxu7q91OW1wVeF3BGiYnetW8cyG423Tr/jN/y8syMbpkEuOpZRGpLSlkT
+aPynTYcZyO4PsgxS5AQir0RKCduAkl9iPxGpecUfKWUwv20nA/U8EBiJsnr1zbs9wAWW1edd8yLS
+PlBnaf3xxy7N0zctiVEDY3BlrocG5tQBNMC8PB0Sx5g2O+UVD/3NyL8m94dMzt0iY7ePj9tZYMnl
+/QK/rd67NYbUGtZqtVs5DKmLCco//or69lDXd0pAO7oXFOh/ivBMs3mCBYh0iciDOiPXm0cea0dQ
+K9DhjpfA90HKthqc1+Yef9tRhe9V54An7GwqCWICA6oDZcqeQEOCF9q/mMxag6aIEvzYDr8qPM60
+0RuVz+J82UUsj9A1fC3ZpvILRHGJBbx3jdltre1uuzr1QRd9Rjz2JfgC9nmFf+AqIvk7iplA5vnL
+VhrFo4ijWWd3vQBPykPN6jNPBWrCcKHx8oSsB6sEOnJTD7UmKoH41MxclopmiJhSFi/9wcIShY4H
+5HWcTwV2Y4Bl9fl/GJwZnDdgOaAMvHbSd07MXbQBg0/GBJPDrluSnkgmxXQKmQ48yrzO0YaY24Oo
+ax4TC8aaPGuUcqQb+kF9NDnTbMqJKW4MDx62+J3Oxc8nwEr4Xl7WG9W0FjU4vn7zjePrriXvBRZg
+kF30ShPGnrptpvYBlVocmOFN/sBcOToypdhCVi0AiuufJTQqvL3C4Ku8v1foLCwo3X9vWsa8cZuG
+WsFhW9eJ3tttykaNg93bk3Xm2pzON/IUXZ5VejEvgZ8TUOkiYw9gwKufvpdyCJs2UxVImPjzVX+S
+IzzoEBZyctWoX6jdFZFR9btlaPtratuYHa3rRM4AdBMLeF2hQysVgbOttiK6J1O8P+9y71VKBPf2
+TZZANIo6NMmxgP1hADehZDQpx8mWE2y4n3bU2z8t5i+9Y0yLoMZhWNY9Hy8DxcgVZkl011lK5fLL
+2AEgmy9z2wffF0ljlTPDMGvNPcRdBt997XOalZ8WBw+QT9AlYxfcmlaa47iiBp6nzqF09a57p6+t
+ljCwYkkib82HNGHPGRcvPHOb/rqWZvlyC/i1zF3kZK/3LeF6ajedh1/fmmeNRHgrBSVWWsroF8TR
+TMX8zvcfmgxatpAZmfM34xxrbWWK2WLHBxvHWDRhBJTz1NbaQMmoLC2nt+xMxPLQ+rqyN8Bw0af3
+1JMpG5imRvBAb01zBjaGxhmZqH2u/xQjfGxGJzo87O4r7/dXa+6lu54zanov3fp427lfSft4jJxG
+sRn1NZvt/UHC5ypju173j9Z5kgp97yYfK/R2xAK8KjS489Po+J9PcXeQMztQvXN7xftkquzG4gW+
+ZpZIbGcwh/nrtwMdHPV0UFHJLNYRP8/nRMGyMQ6eclOJLbEClouDh/FdorWHL0CU+GadXBZtB3mO
+14HvSYbVpDzrzJjNVMJOYQzLV37hhXorThu5SnCukNWVP8ySmBm0Kjy6J3IzqzcqZa7LTE9KEtW6
+d+spViolKCUfIG/pqdSEKnQjzT3nMmcRj9orfNzy5JXWt10rFqG4DRlyuChHS2yWpqABEtDUE6XJ
+OQnOL3jnQpH1mmJ6MuRQGsJRNNk58rhYPs/AYtzvoWvR8KkcAlhexahbl0NqEwjOqOMBCjSu/fk5
+OfRctzhpU9I5vQyd0FaoJU/E6LB64hzAMdGxejwE9u+DpMtLHFzeDAO8V79e4GIBmla9XaJojKpA
+tXl89pnZ0ydD2agsWUapuCwTlyLjDzMxKihIjhlD5WXh1tTozELoiyjBg4TL8Yj3sUzLrBFaCLTW
+sxF3tCZlmyYHQmKK0ntWrTOLk5ptp1EBIYu42Cbhptb0yw/pWE/ODec5BkvIL4/AQypv8fGvovkB
+m0j7wOgsvaGLgQSPHOjIefL5nPeb1QzRa5nbDRvjAyZIJTK3IfoKTtdQd+HIvTAk28iQNq2LE24m
+MsSzyrSKS0a+dI1ZQQzp6z2YegryYrps9u/wt19k97YUeHw6RJy4GWnH+ErH9p5IMOY+OAK3+Ovb
+kxNbD28btRdp0dRdL4Oz5rJUOXtPyYvxQ4QlvrJsFeYkCcn6nHutLtv3Q3F+BPkgrqJxg+rp0jwp
+h5MJnqykHgJOfZ0pOajKOuNb8HOmLcMy7RGXqMTQGeanL4tZvXzbRyR0ZXbuw9X5AnDloY/eCA4Z
+NRNt9xamSsZGm/OeDLgSJ9PJyjuzZ26QSmtNlmHuzc2LBrJKY9SOKzN3b6AXGM+mma1Ck4H3mumC
+IniwSctesoiR+XibE5lkmzY0+KAtnyAmr3a3yyt+V4M1Opl30DhthC6avCegiVHARdDl7WJPkBZz
+nxqBj+gpUQVX4PKr55skMfHSty+z9J2bvc1U/h9hZx0Uhdv1/SWXbpBeeinpFli6u1GkEUSUlpDu
+ku5aupVSBES6G5WUXBoRWRakNl6fd977vn/vzDNz/3+dmWu+c536zJlzVbcO5+Xv9c0hAWzRo9Z0
+HmLUJNNzJJRjD1xFxjuZuKdvcwPVczMfTt4O2VOiwBd22B4haKpNrsgEDp+9ZJSV79jB7JO3tAtZ
+9FLE9ceUwO3j3d2hsVm0oujpGbgLSQ4VfXARyD7sHzve7RCAxP/tWYh6GRpCd/9po+//1gzY/1Mz
+LDaaJBvbmci8awbe6U4v+LJoWLG3Whv89oI5cr5tn14vFe4qFCyOJuXMF5gO0g7ZmZmLI5hASTnv
+ssWndt3pMCe+8KicfFTEa2erT0Wov7XtMl2d63p+jaj6efGmgybnTYd7Cv0t1wCCPU22iyApu9jG
+QPqiXYj48/bZ4/6DpTH1th8zt6fWbh+6+zsAIGsFDpvZ9Tl2OdlPiG3THWCB83qpwq+C1Kg0zeT7
+2tGQMh8OBdkmZPJ1YrfUCeJTMaw4vu9hMyPKQUAEkZwpG+9E9vtL6NQvuw/HNy0fmXxYBYvXFJ7u
+hXya1357HqgMTBTs5bA4s2/qeEi/i4u1jZD/tbO14fTZ3gt4MeC6zRRCRc5KyUB4NueWSKfS4poY
+LtiXBqpbu/vx3oC5+/1X7CG5zp9cYoO3EfY/hFykudv38mwJ0kw29V5gTZEH3Ekj11OQ1ioDK6JZ
+wYrSxS2opdyBgxUItcXUn/OEMCHFNhavrU25h1+3M1UuHxceTABsXpbYtHYOvH5Z3OThzZwWu/pQ
++BFKRJR09oJNIpwutG3Ru+Nsn6arq+mlaxur2BvcMJItoNChyCtiRZ9wih93sWP7huXQL2ljV5Hr
+9xNCy79wZbknynex9Xy8d+K9vkXxeuZSyklNI9sxgP42m9bKz2fhn0zCa+8+Bk4POPCxeO6DlhG4
+eqV7P3YgYPszOQxAIDX4i5IVg4x1ytE1vwhsq+UWppzCFhXAtlEVsklQ6pJrDTv6gZhWXiCT9f3T
++x460um95qzqfx3CrpIrtT4ag2+EfSfm25cKLupSkjhZ0jDY4Qqwhbi9ynFyHkj3YcYAKJ4yaGV1
+HBZByuLJfx4EShSIYQCJFCcqx3VOt8jBfRGsjSl+1EbKhq6MMh17YHee+C/vLtIxn48fS0o+lnRS
+HVTTR1uAzmyARVoPJlFmc2tHrJBPayGB2rDV3/CrGwzgE970bbqij6hHZyhx3/JUZ4HDz9ndviVW
+2aG5M42s1yKoJuPMb76HaZ8oWXECg1sSsG7kzmZIdlGlRlakM9SdSgxkIO0BiCwQDNF+a+wDjyWN
+Jt/Udc9b4CLHm8vnIDSyACZ2KebNRWd6ZA24iuFXj6SPpmOvf9sM/ywPp3/hSA2m6FT8aW8lYVPo
+ujewftDv0vaORgvU98ce8MRZD84vEzW39bfsiQNJMY6OQ1xRWiq/CX7STwd4OADobagoonNSuDM+
+ga4VF+sEPz5CTZUKZkQfIrq50J9T2xPEjsbJ2GCngt+C0yXeOq1P7zGpnMKUvYt22WZ0LIfkZzkO
+dU9g4nB75Ud45RmEuYevi3WYV/hw3WXvDphC+75y3wnqwJ9XfuYsPq0hO3XlTnKYaRkt+9bXDk2D
+cfuG9c3AmVq6HeoMmFJsUGcmzMlMAaw8ZgEC79Cc0SJkLOkfNrIh7z3fvw9d3+PNskauv5N9M71r
+t0Y4vA9zg+XpyYCORUJN3lwY9jqx1Dt9ZrZOKJ6mpsJ2YuCx5soLqCLODNIMVfdDRTHdUqneGO2w
+hovRqzY/MDJccp0CWOB0kYtS+f/CqTVmZhn3LJZmbZ/2eJDveHf5qJx9fziey+uPlYxjGk+6N7iG
+g1uuO3I+LByIwJJVRHj+uILjR1HnPrvmQJuwU37LlFJUfb9HmhRTvLSmjUjYJpmt7At6+MpHudfV
+J3f+URdYLT6O6InUuc0BwlVezHLiYSlU9+1dnokyFjbwH7SBHfDfuAHOIAnpf6gGKd+/kcN/M/wH
+1SD9B9Ug/QfVIJOqengxR4H17H+f1SCtY0PJGKQmNX3/CO92pz71XLVXNJlNf9Od2PUSjAYNpE3M
+/Ytq4NRJdpPkTd/LOqoR4ghtDLG//jXx7o8WZ7T888bEbzDlpeDDxizroTyR6wwOQkJgw1Tgj84Q
+2ZbulxFK9BuUsHVZakCJ+kxsO4JOWxMf8tXGmYJ5lrNQhDLubOgBjLp6QDoZP8pvtaDkuVOJyKEg
+k7BrxOy2hkjR+6lSV+FCuyz5HjfmFrFnQSRE+ufEujksVi6FK3L91gscNzzEO94WlJedbbobyO5B
+sjMEPh3t0h0WMElIC1812f8lOAz0PkSMHPJVG7rshSPq5yErO5UieasqANz/ZOLNfi44VpQ+pbG9
+8mbSp8jAUpgOx/YXgsDT9MW290PrPizPlHv2VYem7vpD7pGNUhEu2eSUg5Gq29U+PYS7szBE6o2y
+/QHHviCEOgTSlHlZEj25wOcdiIvdJwQ604eBY7jCclZGysl8qOU+R9I/ntZogthJE6aVJM78qWFT
+nvkK03OMf+Tz0qlHq2QrcpxbFJyv33b/HSdKiG5ZVzdaK+HEdgJ9sVjT+64bKVc5aP+a52HCRayv
+dR8dvc7ueyXkj+u86xB1xNrfQOdKKREK7qBRsVd4jFSWeb9oY8Psh+YhIngQf1dBnjl1dGcnEke6
+xT77jFqk2QIBiy04o2/6kad3Kz+OL+xDBoZEXtryBEcvrviwQA2ZvZbuMiQ+9CQQ7vJeFD6XA3Jv
+Em1ZwqFhsugOlay8l1khoOnbZ+djPXlfW48/HeIbkqgW2Dh1DsGb98OPBkMSuhH2x4QvRyJ87mxE
+HhK6/8CK0FjTumGGiazeELcdQoI6Pr22Pos52HsWyGIIlXui5UDRH3LjoB966g/mZNQ9fej1za5v
++eLZ4bNN1KuoOaEdWNdQUCIsUMrTdCLjp8NE68uLIcXxSFhILTbhgL9IvCoW1uyw7+HLDutcUR+a
+s4WBtEvPknY2JDMLoqR1IuZkAVFiMMQuNUz4eo4zUbG6ZrRb87bGBtokTSw5/rvqdBMcJ5tKSTcA
+01ksegTHW3nZKStWf9UlP05nG5Ze+lp1N+Rlx4/DcDGeH4ITz8UbTljOf37uYmVe9d+2o2uqX48u
+ir0cbhC/7v3ZlXg15OP5dkmnw4aJ4I5O7fLJ66fyD6gm2nQmPp0rKga0L4X+3IbqXYfYQWJ6hQ4z
+J1YOPnmyd4+VVP74fGrn+LOR+Jed+7+/dKDAAErwos77LhJEfdTsE8y3LO11l+ZgtdVHixjAVGuJ
+bHxaL6/9IM7hVViCh1iAgtn6E1Ilw+TFTJ4A8Eic2mi0Q62psoxTfjjH4QBuxO2vAGkjaoLF/dBP
+HOGz/K8C0MflkEMpTl7xqUxD/89FX+P3KvjSRVRZOxtt48yqHA2j8vkHOF2pk/62KsMHjiGGsSts
+fnlKMsHrC2cDVtM3MnlZ3wITW/CxsGDlWTmEn3K2w0HXasgHco1oX/66R+W0l/6O6ZojrB7i2RoT
+DH+gn/Pn9l1uOc/ii9/JkD+rjreAf3LG8tNSBSXtEwNI5hiYFJ8rn6L4Io9Aa2VlJhMH4Tj+e/1d
+YKc7O0QIa3T6gkKKxJEW5ZDqFVdp4Z85Vl4dR1DNnAKokXpg5MKcuPDdsXObnv6kfWiHZRBxu+UP
+aSHBsT2IPq6yMLxJfRuaUu/k7rGydaOugjtIox/Yar6nbcOYY8hy44pa/WM9h39BMp1Tfxl9afwj
+KyztT5ckAWvjzwVa2Lv8phBObDNv1MoC18eVQ7ir5Lqsnd2PnFUF411kIHJ+C0qe8eIxlH/IgniB
+PLdRx+7upci78N3qo9aN+AP0q+Jmh1R+q2rijahYgtSjStdd9CGKjmNeNvFw9ENpq9wgRPPPQiYQ
+R2/uqEFzCXJ4aYQBJA88uyzUAu0QgQ68mr/OoDXOjO3e0CTqLdsxaQl/DX9tTj0Rr1izg0Qhm2/s
++AFpN0J0rIw3K1b2UUVMJsi3L7cD7hBOIDaFV/qnygizFJekC5Ch+B9TTnpKeOAlMngo+K75GK2D
+Z5Ipa270sqhBRaeM78Eg1xaV7qU6hzaLQlDWTeW+TVmTTPHvbmM9CBlXYFtrRwgNBvA1Tgw4qzI4
+KTaX6bFhpb1LB+EUENju103gHAAummpGkUSvr/M29Hejxpfbmhsu6RxSMYARv+c768k/UU9zos1K
+QUcYgOqNGZE82BL+WpHIadRug7lrJnTDDj8sGTmYeG3AIRjJnGi/cowyxWNO226AwLwftE5JaUV3
+w2maXqfSF8t6eC86vBHdZo+EvX2lYXKHcBnlAQ59uxwpjFFVBEK63pv0LKJlKRo8i27CF+OL2FjV
+fTZ/YgBYgXyLJPkfyNmGHeWnBu6brQwqND8uNcaFz07amb+KdHn8oeobmtWxDyFwhQHICMYs2VMT
+pR98+r4yJ2nBcaWbocPsAtntfRa3+X4OWYiEdQ5sqWEAL29ldzEAyMZ0sEz4NNJ2tfU3BtA3koxs
+EoImKXu4DqHcTyKOCC7fgaSF/S/biuNPviTeVlD2cvVIwNXum6vJYcnlxygTCYV8Mwf6vZ26qiy2
+wVvWIno8JQrFeKStIZ1X2xAGcP4mABGwTJP2OFxuImSBiD+94rapp/g6EO4pE5+xdd5qxDJ30223
+6/ZMLxLFy3Ng11F0V+i1iyTHAF7NZYeWodQD4NpkEWheDvbbDUi+WbpYp2HcK58nUfnea7rzNP1J
+GsNZrLr5nHf8HzzdbAXPHnEkFllBh42ndjAAbFqtJvpezYumkIpEUFxncpmo+CM6X1Qkfa4ZBmAJ
+ovZLhchDzu7ABxMx0EJ59tta8Yowujg1PdroSUnqT7toJRlzo1Etat9Z/egIDkLtQx1D4eSRQGlu
+IxkG8tKjeWwyygdvKZkUC0AHxSk7XxQ9dPtr3bX7rvgT1qpCZ0x63fQJhDKfyY9pdhCx79Mhm8nr
+b8sibfDC1N9Rj9Ip5SXTEf7ZNO7S8cUAFCFHIwefXqgkedWSSaKMv1iUXmnT+4fScowV5Yrdmm9E
+q7/6trHaKXH7+RN4doH6eurKiU1mJe0namEVHIpAofmz5OJckRhAABjJ9llP85J40cYlEAPQubd6
+uMIC6gqq98MAJrMpVbLnLcx+HvXAPRFP2LThcLhSCbMCVCK+2IsKIXUB3XbamnnFd+gXL2yoAA0o
+UYA2hLuzv/mRUuNRpnN0lpNvNK4qf+/oFja3V25aWTPaWzfxCoUuqlXpc3lNpTmuhT4poQSXoSeT
+xlF15vTsKnEVNvJGbfAQgqP4wFeRXyt0qQQn/XWHuDLEnCPcNCE051RWt+8TYVHxMgu163kXrUo8
+aqvcLWwNHD6/oi5sA+3DjlsJ7xSVreBO+yI+P9PoWONYH50LfUNuvbSyt/p8WxiEq0me1vAieYFo
+h0zfbkD1Cbu7rZr1ZvjuInWb4Tks8RvyixHqXJC1XQAu8i55aqywCPoYfUJ+qzkjE9hvazXx4q38
+ktUj260A3qyRsoswQZcU/CgejQ4yyWL2QPud7yHm6X/63vIHOxFLvlCdkXML65MkC3uYMvtdI58B
+23StqZwBxqfv0QYC2t5UsbRp3mwRKp442cTh7ft8eYck9He2Kmio8De31g0xla1m9A2SxQDaJapj
+rH8sEnH3haZTI2mCP7/Plmu1kmtGnhTlKgsAP3/7rQpJyjDTXoDIWz+f+bkhivZo1foSkHcEv8cA
+zAflFSHeA9eWQcvQ8N83tfSXg+HQkfH/+fMkL23vFefWeYcLBsBr7eS6MX5EVbmoB7uAZW34O6YV
+CdOuBf/tYl+kXbyeEhVvn8n2hBZGPo3ZPQ02lB4TUQGTt+4hjlv4ItQo9oKDer7YvoS7KKzcmIDI
+NW9hechNT/bdWxqp4Qq52tf9YwI6rD5TRRtf/eZ+3NfwPiQ5qmZWDQtyCFSqIcCBNlB4YnGSswmA
+/VRsCJT34Nlkae+W/HxCV4wH+Ng4iNtnigpP0N7Ak8jlbeJLfYZ38lSK2eYJh1iocmMSAvKvcaMF
+kLQ7AXrZA02EPo54+KKlcEY6JNZLlMo64tpKSGHhLciSALxYFKHosT9KHlYvOmD/V1Qz7t8I+7Nn
+fTMRA+yKt7uLeqt1dsh2BLA4mis41bYu34D+Kfs97xk7Txm+ygCv7kHVZGQiI0WgbEs0HqU47LEn
+ArvQIMjz8CcquXKFHGJjRU+iZG98d+NBH68DSeLOywrnEpL+3heVboSgZAUcTpvzh+Lm5UzetsWE
+sIDMqw7OC17vr13MNqlKNJvVogys26yvn8ga3n4WcG6mVkExFtMUaLKETQmsUsVmn8pLFpNxphGb
+2SRFjhD4Owrh5bytFKPnwqa9P3kXJXGN0/HoXU3SU6aUz6sPzFet1yBSz3LV2seQzsJU5PC/D2C+
+OG35Du15+NfPH/Wh9DEA1qN6wO6dlOnfgO/xe+tSsInPvsuWqFw8CuDKmPGompCagCBv2t+k73zx
+basUR41xquyX/qUinYuLFguC7HdMFCMm52krYbAjAtmkeHJDxLPsUVMYLCmD1COpK1y1Ek6nF/T4
+7QQGsPymItEQVJEoYEMz7pHZl5PISMumsY1Pkn6Q/lAG1ZWdHWavJ6NButrBrifKHBLeTxHhR/mg
+j47TlMj5XCXcL20wYMfjmSXFPBuwjChYtniBTUk5BGsvKvgHweAtrC88WWKyEkC5ZqVDzfyeC0JD
+3S72hBgvFWiLWlx+fq0ezpGNQM6P8pNcVE9P5rfwAwuLu368UXkk8EM8v5b/yyeOYXd1ssLIxAuh
+m3Z1bIsoMtvU3pB5bA4cwDZZ9Mg9tHB2npBFuKKaUbksaUQVlS0W61qYxRAEN3FJU2f53gOcSbSj
+2UV3YgCxjLXJUiFLkL193ofiuRrulbwwssTsyPs25n3CO9na2V6PLRFH0Tey/Tm1HKCeug2aKCGN
+Fp96iMS8a5JqEkkUS9O0kWBmv1Em5IupCq9L37lggOaMeFcfUdd6OcIbHWpKFhjGiQqXGL7EAKCH
+0lWn62NjN+VGhOb9sZuz9LYNDkwoOQr6394x9214NENIxtllm6Fuz4aLoBqF747D3k4yNQ0WuRnP
+Luwa3sKGrepdZhuoIElx2MwDlSATlPV6hGr/MPKtcbx5lWhCmBX6SIZE0PZnfCUG0DEghSXE0kxr
+MKKchRNxcDMxb7gbrsN/se98bxJOu52+tWb+QvFV5pa4RnwSDQTv9eudxOdsbf2+8RaC+fkiHlD0
+R/rQVxmL1EOFGoGsPfoeke0VKZHzcqoNezcuGpE0zeEZ7x09rR7kmlvBNQvcJSBYIbYhlul3mfVB
+dpIwzSkrLmX681VKmjdhdPi0htWgbmClqAtaB1urtjCIxBdNqo1/M3Zf2ZIXdFLLwAq9empYDvnc
+Vsb8+/6rdsEX6EGWGmsG+nfjz/j77mQE/wfBVP7wwb4kMbPw+KTeMkU19HsxD23Z99Zkt10gPwWr
+ikQMIAJucpMXmwIrXk7eQkItHGdXfyieXj/Lazw0aNqhI6D1++esBikWO+C/cQOcQVKy/1ANsn/P
+apD+N8N/UA2yf1ANsn9QDfL+iJQuJiysl/871SDzn2jg8xJ6ZMHxpYWp+9VQGndqv9k5tcTkdlZr
+fyeof7FG//+dlIQvmjzIm4kr1D82xSNuv9Ij7MLBc5ww3Ks7D+6b9A3XMyOnjRxAhs9OPHYGMxLQ
+ZjRfefWokfC/vwpN4297x7qFNYWWZ4UcdK9/w6z+UyGyvwr9t0vgDJKR/0ch8n8rRPbfDP+hEPk/
+FCL/h0IUEZZeOHMUWH7/++YR8jq2wG/bzBfBMlP2ABpj3JcbMjcdPbdpRpwYpadNJr5xpZP/4j7I
+wbkf2aQzFBGDBl4NYGYan62vT8uGowdc8QaBEZP137CweN042RE2iSRqqS7kLIsHPnpAmHRcaeoM
+MQ55Fzf8mZx3x5jh53FrvRiZ2dsmfVk16EFcL5VSxu7Jh4o51p77d1mkTyi12FMf1TkvFyk2chvd
+pvIZcDIPGfNRE00zDzzZOT4aT1uUTx8fZrXaL/1d2r+9HJQtU0uj2mQQgloc8yoOO/51ykxdcjHP
+6vFoOBlvINSsn6Tr0TwBR2ChmD8VC+XsrCKvYSWgp5X6sYowKbxLNX+9RCdtEcdlncp8pdRYGFYq
+h0fuqNhQrLVEESHK+HbYkIh8y6yJOOeA4H72phZxxS/5DWpK8Wweq9GZ/ysJNYiwrW5HhKXPVTO/
+nE6Z/7TX9rSFoXYOMjK9nQwUjaf+PWIeuKBoowt3TSCrjD9/N2kYhz6u1jvMDtOhQVaim6lJKFfU
+KdygYQJqt2ZQnX28aUreX5wPMo2zCsbozCpt2EvNb6of4ve+qfwqJlqJHu3UixX4II91fzKhTS/m
+V6pN4k3W8LVA+KiBtf1CykXJTW90ZVJFFHnbpDXeppVTIA+CjDyJb1BSFlUVeS2BbQ/JcBYKp4ge
+hdry6yjcHcGvnFc4I8rxiewdA9uJYypFv61aBfInZgnBaxFm2JzWWMLbYplJGVhmYK85ax0DlfN9
+WGY+Inh9lAzW9n1cJa2Zzkj5jQpvKJ43jvL9Wo9Z8rCWw3dT0YiXRdD0mHVL4cbpO0Xv8V61VfEG
+2/EGfOZSMGrBKzsmQ/yH3omZnQwjNr97a/qc6myFraXAD3vBkBnzzPToLCBFwZ83DYzHsonWqyTw
+5e53spSc5B1RlAbvods31UXA6FZ+eWYQeeFHujcOfV/70U3ykaP0ZLrVmVgNJr1sNSRx2ggjNiPx
+L+PT56rbJ3OmfzKDnOatB/AA/uCKD4wFD4BVjU+/2A2DZg4GleX3GEtzqg+fczUPm0Zyp2PTZp9H
+pabw00Zp9cRq44JoZlWex7FglWIAElqqBe/UMp+0kVOmYADd4d1cm1nHB5HxnN/ubXxWSTypRVa7
+YenqtgeZt5/xP9m44xuThBjBLh6mk4i9MD64re4dBPYbFvGTsnY8L2uxeKYTtF08wKXJIwWycP7q
+PLJqafzdmi6ijz2PAXx+nh4vT0tUpEL1eoAQaGkmx6pYz/JoxbhxG9JtQuD2eTL0/grh8OTgMJOc
+v7Z5JIarhzVb/vNhYct8uaGFxGCN2OsuxmKq3lLI67DG3GbeOYlx3TSqnmtlvkSv50aB4Blbx1jO
+r6rHjd6L6kHOd+q2nIknVhgAN+Nj9yjVqlUuj4nN7yaiojtjOxWZahzghnbeqKwapLzNnVIxb4Tk
+T/ufi7iaDe7ez4Hx0RIJFgxxCvpBTKe8UMkdO2x6l/SdNh7Rni1xY7IHudZQdeGmJnmkoRQuAavg
+WNRdF0q/hHuhpLZ3zmG/HpdDD37TGPFeXfPKBgPoyZaUOS0snx3O7vUGria0DwEjCvyeDb3xtkKR
+DtKoxzQmBehmz2kX9JIYNlaR+1oC5hSfc5XJejYEkLW/SdydWJbME2sgnPJFSAR5MZLo6ddYE3fE
+ETSlT4Ik2fV07BXA0u5sfuRh9Cf1UUoF04/tJd5atLFNZGyastXkPd/prBMMzDu3YdjKtXyYZzny
+VhuQ14Ed3FgyU+FcHtHp3Gl91ygTwYAL3oMky8XE7guyiivqEh5SBKmi63hdH7wFgmOGfWYocgnk
+yOksnmuwXSFOPnzf1SYsllQf2gANercQot6R8A7Z01sVUn+qKFLC5nICzxUOgIdS5G+IJa0lxRCE
+H36VbUfUvdFUaPqwr0VfOePzSCxnQXVM9JnKy37Fvs9/6Cd+yrSfOKdXpZS8PUyqSqKqrZbZPJs2
+WIkBeClTBNaKyNFMgvKTlumPpAtUwqRKH2IHZfIEOYmPSXAqir/w4CT+zM4lFm5ewJ9C/1VfI91Q
+XJ/VCracH99Tk4GryC1iaSduZp63oVqGtGYUnlL/MbiVkfQQ91NmGf9jwgL4hcRXmdGnPpE0nxKb
+fEzzvDgQLTy8SbofsIsImdUYmAPOzaIfbB23DLGo3AcVnn6gPrhytuJWZ668r7c50XVIvOkerJCL
+vTISx9t7Zn3WwoXLEWKuS4jjOsO4F7rgqXbfYRCBFqxS4APjchEymz7D5i2/UN2SUhpYNxGcUT7g
+G0zNiJw68B2ZyGv91eCQqL6UTWJW/FxnbIOgn5W2uZZiw3UvyVZkn57zkIgjBI40dlauSp84+lZw
+ElXyaphZqcY/UnyEfYDJyK2yWr2Wu0olQCxhDtxG66aXz0OJXH0g2rtYVZrIye562td41nDrsRZ9
+9VoPT042rbM9WVFGT4tCWFGuLBaXT8kZweNFuqTIG0eGAXjrpUQnhNy31AnJPkigVxspVRu4Mkiy
+MVXDevvWK+760ngiYpiXPfcp/3jMHx7OuSTiDXO8I7hYqI1fWuGsvoyo6BtxRXclO06TNBPqogjA
+uRGVVtMbovHEjAvx3wZ8mUorhiaI2zasyAyC3OAJ7KFz06rxQ3wi1kXcGq13RXFEcUqu6Wt7TTCJ
+Simnk9NemR8H6jG1UaGtuSYyNDY0enIsEFpHVDoVKKArgoq7FZ/B6RLkL1+jJIp4kmiXwE8PpJrr
+Np7hk56q0LqAeltGQcUcPWt/3K8MRsm6fZYHfoLr2Jl7TijvJEkkRTadXyBz9WhZxNIIP4hLErAj
+RHdnUrO61tMhkt750Rzdde10DBPRn/j3HP964t5kBw1NlIJR+6DvKfDGLeGe8jmLDzQ4vCEyM48s
+Ie3sI90DHnHU4pMDbE2UhbMBpSLt5gtLZrre6RYwPZQV/VhEbMB6ENuRKtU9+s9ZN4kOo+rhwWxU
+YOeAoxarOorOzZQEGh2IxTO4bBp5UdrRpqOmQx9U0Ih/RXL6J0xQtGGYETLb/Cprpx2fiWq79Ft3
+8v7aMBYhH5ZQbWFHwGS4HoRO6Y6tdx3Y6Z7x2IHFLvJt4o4gJMH4Iml4R3ENdsfiK7+pYjZ+sPeN
+pznkw5Egc65VIzt0Mr+ght3GL4Q4WhPf3nK70Hk9uewp1+7sJ2MDxTGqr83Xh7NXy7j6JjFtpKHv
+PDplpc+D3Bmcp/pceqLwXQ52shUe+lVzWhJuEG8x1/fj7B1MfOV7/lbyvppBLVF+Q1lHLcHwbFJO
+Tb7Osi6/KabfbKD1Qz9x4g6fWx2kkeDFj6/sjxSfXUUhv8dAqrkAva9qHSY5RAgtwfLGisYjBxN4
+DHhvuXhwsRToXhokao+wap5Nlu9JNHs4vFZlow8wfCCmoxeswxaE/fF2Y0ELJ8r5WorkmXAsgPlR
+nGS1HRdJgg9VwqprYOEka//OeZSqai0fPSiizChehrh53CpaRnyihrlauychVUsR/0vp8WRGEYAf
+aVU6wTTcN/hBWxxYTnFLfXOtSt+JxdV5+ZCK/siBPbwWNk1rorVzi3BkeyMDHSHE2WczFN1QSFI3
+pcxySBuRbU58L6RJPYpcIGdQwgDO6F5hd7DKBGsrPhliyklbWohpLFNOE3qR4zBjIETdSBWVbqW9
+3HQyQzled6vMsIN4ymj63vnaMHWCcSCDstCM5zRJTX64EyIlRrwKvt2IFgHmgU8KgYza3/vLz8Qy
+DpmjuAbFZNcvTMQCh4H2GVNOvt2z+izcBdCQT4/KZmkisQH9J52RgtRjGICgeJlhqtkALut0NStz
+xeXVDJfvD+r8Qb9M7O24ISyF/bFDGX/KvLdV8blV84mGI1hR4+L51yY6fZCT3Xj+wYC9hFDpwWIt
+zo/owAZBeI1eWE60yraq/f3wbd2SjvnfgMGIYpiSxSldTMcfwHKcSEGSX+uRoyhdoneQ/BL0lOVv
+CO0K3IsSmv4IAb149aUZ3KvosFayM05zbt80EJGnah7OcAW5Am7lyVO76wTXBCNXfpnIyzZ8pKHA
+q+MvZYXSscbmSPA+eh20ZvKyRAwKvK/ipimJjbkm3wDg68gaSpQZ6YSwaP7ZODkmLVZ6dFMnJ/6J
+sOwSUlg5MJd9HpLzh0RB019UHiShVoP/+CWpkVU2Jy55hUwjgVaDKV9A1DUWkJxjmIZ6RTIsqD5r
+2ewPXCdiPOy9K1eX8ljsyvEUUzqloGCN0tiVlksJWKsirCkiqIF7PvtKSWF43gGe4tpLBCFxBWud
+t4PaIlXURgIp+l5JVWA9w9qacLmJ+kRLMQYLof6RAyAiT5gIC08myWAZnhsJdE3IQwRFvjLO6qCq
+N3KVgyxSYlEN1Qj03VizRj/uUqC/4X5J/jxOoidNw5SKzbdf4SWd+3Ne6NfPsOTKhZvKD9vM8p80
+LGLF/OE66hqBTdjD1Dapl2nczN+YPTw4KcdVJLu8tikWkQ9i/5DKo+0aqkY7NTEA3aUya2dIkoEK
+QQqU9/v+DI/hipDoNy5BLJKwt8NiQ9REDU0r550i0QAEn6ms4/1uO82YYXZIgLNcnC6rA0L+p5EQ
+pwCqKdSo+DvVaR54S3jh+6kPjizYyIXJ+L07i1W7sVgF7mc0HbBymxHyMuiAeVQT4m3ZocLa4JpS
+Dc5l74lcmissdwwqYhdMRFl9dcbKmLr8rvLF1URBWhUOK4iGfuAHDS9TONeTF7zMFt95orYqiwHE
+DeiIAdJFsEiCHnQXOwGzNLwCbB/HhDZZmSTMDWw2aqQPZ6nVZQ8d1AU5CZkx5rtu+OD2gyJINwXa
+hCeUxZDRYTphTUWMavZKk4eq2Y0DG/yQbrWWfGRAlHe4RIwyJ8I0EU77wViozfJK60S+NHwCZZ7B
+DX46X1mxGLeTiJ3o+HhkA46rgAUEEM091JaJ2KAUva9je+ur9N1QcoKS8eO4o60Hi3lJgM4Ql9+U
+FtHCpi7Mo1hP7aW0SQLxJNsJGZs6U7VBDMEgp6QRDFzl6LO/wVfWWWUEl4ObUA9l1Q+d1d7AQIvy
+zcYK+gP4aXJioSveOTGBarq/ozu6wSFfFtZvG9yblRmdCcvLwOcQ/4cZ9uylQXRVLgNy1N/fmMLN
+vgPfax3el8HT24DzMsGgNwYagNYSxYNwWqnklmfOXhVihrQvf8eZ8v9hYUHopfM4zGiVfE34SIBT
+vlSmzKsVVHeV78ZJMCmdXWepaa2iwn5911ZGpiRYNGX6cVpGbxi7OEMh5V2XG23JSLE6CLj5S2E9
+xU1Bh/q+WR4DaNu2CE0Qu53n9JnQjqLmawtdrch5nR7+a5K75DOO4nEwq6phMl8MufK8n7VYbubz
+tTCBadeDtvyYPRVWezJYgphurwD+dmsENb3kH6oqlitpDsgw56fIXyozrBsOO2qhwIO5D2SBLO87
+xKvFS9VL758q5lYwOKlTBxpmpZlz2uNuhyAl7lvHXiz4LSTKBPJzZbkFsvKNs/ejFXmpnh9ThbnD
+NDIKfUKMin/JKN/05EuGcedd7Evae6EfaJ9NV5r+fN1WljrL0NSi4T9v1/2tLrqgms7gn5tHyLHY
+Af+NG+AMklP8h2pQ/HvzCPl/M/wH1aD4B9Wg+AfVoHR4AX4GIcV+8/9TDdx/Ha1jKw/L7h0HTL4W
+s//ODard2TL4JC4+mvEcl11Bixq3fGP1X1TDvpxdBBwfVJtzURhz9NWIfYiGCt8o7+Ba/9y6d5ly
+zj6kvps7izvGYpovoYBzyuF1jNbx0TUGANhFQ/pufDEAqbcn4dBbA4LDGwxAnrZbTALULcPFDLKt
+h3LRDZe0kff2RQ0IZ0ayE3y1/0yhF8IagObFclO0uY+pCLIdRJqdf09m+T7FAIGPXzbLnKPzPg/W
+1r2XPkm3JfXelDXM09zgjCQSxdW6sD0C7/iCfcLiijGAKSo3wNMguHnkKJhmVgctWCOMFc4HaciX
+U9PcwQDw5jwiyX9G9UVHQhKs9gh/gC9++eCHzmNrK09wjeZhB8VWRhSIlpgRYu29gOEsPoK68+XI
+H5gfgGNRy1V1F2juA6cdtOOexLDqleTMh7J3KDNfNtwMxaWIeTnOfLgFK7PTzRrP83a3+3dleQJD
+Z/uchmtvFT1yqdNYf/J7tiNXDqXEvEBTXk4i4JNhP+Bp8xnxpXTfMPQ3BiBUkGQ3aCdgbzhlNKqF
+0lrQEoUWCuouhyZqe70mPG2IE0n86Z5CZu8GHl47eRSebZfYN44B+FdCHjLdCr4k1CH4PPIVKbz2
+ifSgnlu/fsKWH+229XkZKVtdgAHEWury7iQVVDzrLa7ghVKtvHbuER4xZChpCWrOjWbGAAKyqZ8V
+3FRcdJuUYPdyjVPsQx9k3zI5JLyCLTjBIjCAeHnb31tI2ZZaBvnwObRx5Dbo8ObpTuaRNAagkgcj
+h2Wq42yks8tuLpG4lTGezj1dzmAn6QkcvFBhDTFnqLsrfr9JqCB9Wwim8YusuwEdjmBz2e0fzM1V
++tHBjrfGrfyMt1XPPxEq8oTBeNdl2LJwZp5hEfCE0Uq5XBnJhgmGmn5MxI0EF5ZRikSkAXBzXC8W
+JijodfsJ9laSJGUQt1pJvHkmvpkcKbkIlBJKSjKjzTLCCnzSr8gK6Y/qfEDFIYU1QA13eXJbZyOe
+jSMf1HpuddfYD/JjPXNDnZJHvNDay5EdGFelHVh7HRQmtKRKiJwxoUwJe0r5BGjar/qrjIv0+YGA
+24xhT5P+VyYJbHZGLFN/rWDgyRfPC/fGGC1a9TU/ly+Jt4KQIVQekZMdZEZTV+0s3oTaLbQSQn6t
+OQ68coZvQ43tqQ7vsnGF0lYtFBdsAlfDpVxS55FPczWPLI5ZiCr/FIXqgQ8xAPc4hEBtFC1l+qBi
+tk4QGs+l1VyBv+233QwtW8HTPP8pohkB4HgOv0HGXKx1ayYGIKnpc97+5JGre/XB9MKTRxapGMA3
+BO8F+4gPH72STJgbzGnCWtwJzEvVsqsAYQ2sT91YsTE222+c+ZsdV8OH1irRXRiALJ/ts74Ks3R2
+kR4957v89k3eOjYOQIYMrmBxXuw3pLvROctyzI72jyp1E5HgXve7XqNFs7QZmcXbmnw/BfCiCwag
+c2EUBjyInrjtKQ+ETurD53QMp60o0eLfLG8SQKvBA+eQ2rtmPgQuWvctV9n8o5WC3ZHIWFwKc+2X
+sey48u/Us/es8AD7QsGRJ4Hh/uy4IYuxPoJRb1nOi24EHcTRhMnsjDDNIwzgNS2XI6WU7DPkTvWN
+dQap4Xy8RCulGvQDNLPrTYKFzmeik8+jKpDX5j2bhiBkN9CF9TelXVp5/TH5UQ0Ltgut5w92SmsZ
+L135FRbGzZ55ZZGaBQWrKBm7mZRNd6jQTYB2vUzpk8BrFf1znfyPRNS+ZscI8zK/p1JchQbqrSyE
+rKFG5awkqiSa4KIuoBoJrQszq32L3WtXa7Nx30D1F65joYp1HSk9aQ3vaXSiizW7q9hjIFkSkaQx
+fsiF544Ek+FQCy8qkhtj0Zw8YlbWJqtz0ExsA50zWZOToK4zrbtDi3cFmmh5yFmL/ibizes/Sbbc
+/qcNu4do5ib1OVr31fZWl4YZWzdhgdnGpNre58wWXA8WI3GO6hC2y78sGmcsC0dz75c3pzGAub4b
+76CrsOCbmjYG2AgdbzC/DwaA5ZAOLbojYtInFUoyNDNNnsvJo41PUnmJM5KXcW0zIrvx2DsUeJI4
+UdaNrg+mnWSOvEudknAvaP+dFhdULqNhMzazrCQLoeEfWGte3Luexrk2OvNxvZmcttIDWt+ivtd7
+i12WnvJij2YNoH9mNf0KNy82oAhVB8FubF0o0R9q56LPaneeDa+yBq4MCZmZKKTn8PjO5GjZV/mp
+6wQLEwSTS+hScueHWFBMboJqPCEK/CoRnCx9x0rtXwdUtOmPx7gnbjoptKfkcmhiOLSPKkk205L8
+AR6oDt5ZDGCGnuyhnsPHyJDxhzI18C8DsM+vZp4/YQjfMNX7fU/7E43dEyLuSP3jdlUmlSIirfo3
+zgHaWbORtq5x00znvP2HEDYD6gHpSR55ktFW3hkGEIMBFHdFJVKqLTjBXWfdvsvYxckvGXkswY7b
+RfYgjaPQdFzCAxdBAboxpxCkuaKaz6KWUoLes2SPYVmk5cQCdybLFQYgTFtUnAMTcN04E5R9Xxey
+sioPPqlTHtn2YJIuGnxh6/4FAxhsPPibj4gQoTkYQJZhoSmbJ3zRyhD3rMJHfP0b7Fc3+m3Yc06b
+klV6yAsChBl+bl4k13Mt3+keapabcnxGK7aB0t/XpWi8HDQX/CW0Ez1lJhWnizCy7kJ8uniQW54U
+q0D+N4XFa477z9DT7oFWnfN7iXj3r11WMIBohJHwa2Hcb+S7KCkKLNYvZmfD2drnyOWd2U8y4xAd
+OkcZgxnHKmXRrC/wXC2xtHteynMcl4a/htud+dG+57Z6Xw0nrAp+jyh3ERow4ffM9ror1sItNonv
+y0b3Y4rjbhgH9RMpRI5gFapUSu0kIezoMwSyD+9NfZjryeTdKr9Xi/cluqFYHmLu8gvZeIkBfK17
+kna3FOLriMepTpexFZNx1jFLIcGpwnV4khVGGBUQkdeJPW0sXRP+Ltzyglcz60rU6g66uDdnuO89
+r+UycPITCbyrMnbgVWWJSoDfYgAnsUJ/1NG8B7IIuC+5vn8oJKlMETph4koI0Q5EIXwz6c0c6YMn
+ZRzZB7cNyRCaEbGT+gpWffsuueb5EmszPPRqwboKVy7V9I/yuMcd3qjOZKXBcMftZCGFDhiAI29p
+beGCSwgGMFLfXrzaaGY2a+zyzMQhMUKg62b5oozmzvDC1jqEfJu2AIEAIVxODc1tMIDFdIbCRoNX
+GMAC7yEy0LqRnNbdfWR+nuz8y8C3janHo9GDHjzy1tQo0H15FPt43Slh3oiB8dmG9p2ZK3GEkQa4
+xnCENCGs0FEHyH4MG5I+/32Ds4QBWPLblGEAbzcxgCJ4yzq2+MPjOi3tHcG5a/uHepGJWc27jbg6
+bVqcDRrCopdpSRp8spkMXXdrEjiT/DoKtjpXwOiKFhuC31XM1rHqyWDuqxWmjRjRIZV+3YGzTwIU
+E0vGJ6fnARbY375jkc8EXVDB0ijijtBnSqWdvN/1igxepMWjFqY+Gz56clnrBe6nDtEVvT6p973Y
+xtceKO55h20uRczYFPrxRP7ocEP4+0rAw4X0AHunZJJhJUemwwSZNu0la8MN1WlmOdrPvFlhibTL
+OcDwF+oFe+z0ojh9sK68DFPwjbpNUdpF4+RNi8+aFd7DtBqv28Bm82I8LRLPh7wH7whqyhx8DtEc
+4ZYW2AhHSIDDHrrhsBku0bSkVOi13kalTV7qAXgmwKE5kw4uT3IIGWpvdc6QSHuEWofknewuuUNc
+VlMy7oUrHvzBALoDEj/S8xh+LsQALKgdcH5kDcx6EAid4ly5bPKxi2RnvU+pRrkRlT5/2GwSIjVx
+0DTFT9je4Zp8/eCsTG+jq4mtpTyL3EklooMHpWZDQeZGY7xxMgcasvaw2+PPrJZgIjRfdfG2OLnF
+r4xg/cFUfVnVYphkoHxtDPYLzWGOP+vJxZP+iVpQSqEwTnw00syd91D7rIkzm4QqkZ23lDmaPhV9
+vOqPQzY28nRHYVpb1FitjNsQSLyP/Xtx58Ew5MJsL2TUn2veL6E2iQCoZonjTJEMjKgcoMFS6fNc
+zFUXqdt91Csd/yt8MGiZ3vDuDyDpjTtohSM645fxX2eBK+Hc4bwbxnumAlqE0r1O5nJwTSkfmlHG
+tuYnYZfEd008TIMNHaF1MIABX68Q6I4jBLT6VE5vSHixCMoOKWJ2np5B/jRytt8LHnoFiCPxwAoA
+7ddQgm++iwdE+Fuu/HT8qJXH6mERDg+HcSoO8EOjPiwoNPHR4WPz6rlppr9e5efhSsWfbtok9Del
+DqNlyzGKqSFWeBF3jWb8HByeY40BKAhEgs4Ef0AkhbhTUudm9qajCN4kHDSWtw2cvUtsNoTMeJ5B
+MqExkIAzINw2NFyS61qLivldaPA0iKs4BzpCXyiK2LAJJvvCbaOhrBefy3S8meCDNHozgKIdREga
+zi82Hwyymt9DdhqLl+Q/Js6wqTcyok9Rq/f1lvZGbQzpOLgzjx/l2t2UrPcEDiwVO/ePz0nEyJ+J
+QPNdixgvKYYrrU6q2aqsjDKPgmg8wE27LlaKJjXeTfDFfB6nZW76EwoKDKDAnvlLRS34f5Yv7gp/
+bDhHmqJPhvtvPELvKNBNPbVreXJh5kdIWjKsc5OeBnYMgNMMWYwCbXA25Z1gZy0VoFXGXa1QfYtG
++igx6XgMgBJo+lXkraXnV3bi6btCOxrAvkp4lu68b/4hY/tacDVamGbU9oi6sL89CPAM+qsBqhs1
+ZSYnGvLzDrWFYPXHhaYtZcP0n7sQRUoXd4pQhtmVChSUm2i64SzpErjz8tiK/o0pHiy5imZotxE5
+EQqlW27fcaMOHqv6hsAlWfwjDrl89IvZHfEKx9PiCRcBBof445rHTToDhGgi7r+dXx+sZ5E/PIZO
+LYvwvvXPc8CmYL6yMJH3nlZLYFtHQ/w0b2jiwhrXJ5J+0rKVfXFRCcJD4K86I8hSmZU4L09gT02d
+kXTV291CpYzp5Pu6MbPiea9rdB369Z6JRv+NCh7SSoZND/5XUnM/3m4gk6PDAoVefoUXqcgsg9M0
+zYR2mLB4vuaxxQfO0eTzlSjKxKB9ZQd2dIOIIfi8jNIoYrPiPc9lsYU+bRA1iv9hH2SyDoJ3/x7U
+bgY5/0GjtWnQUm19FAuWZ8KrpopMl+ROjH80ZW2tI5j0NJ6n5+1jTzxe08q/yT0Hbe7xQfsX0nrL
+sUua5asQV0ppY0XANh7fuptGOK2QMi+UcVG1hHfOz6D5fesiiC8w9qrs/MuCAD4xbHoHbYQ+y4dm
+2cjGT5OBg1xH0nmtxtUyGmRgY+XW+Vuq991DNzh33Xv3S9xPi38XrpJDCivC0HnZSRW8OvLKWt+q
+JC4alleD+qTcD0dkQj62IIKEJwfnzH1qQEUfv7qT/q2GMIDGpEmy8Ch+CJfmLgYgQOGQ0ushTh+W
+JEaTzx3ZabZsP5TyBVGt+YzQgCg/bEpCaThgvLe/LL39Dfm5Ued7dXBSML1aBlcazd3CtAjE/NVC
+/+tJLGzgP0ADO+C/4QOcQQrK/8ANSr5/k4f/ZvgPuEH5D7hB+Q+4QUWonPd6jgIr6v+HG0T/OlrH
+Vv/NKMHtJfZKbP7AFcFgIL7U4IagHQsT0jxHEfzicVH1v+DGBMVGXo4DJ+j4Gx/H05Jkqp7hvsGh
+ne+XJjiFzrroJqUjL5u+d+VOOflk4c3SaomxEIQlDmuAbrJblk1KEyk++FWZIDY9tvg3lCUXcjA+
+BW42GlylNTcPd0na2S5nsAGb1mlxkCcPSRq2/Jbk78ALm4qUC5+R3zrnJxzd6jgAPul3XmD3qKLj
+TF/32GFj0HpwFZSYTQI9GSIhpI4sLXM9QURPoN5vh2/XKRtGINS8cwUCyp8wBqN/7KC9XZ86LerF
+cZSvg5PUWb/9RjdzzlqSrDs2sXHYH+DfEAPkWxWbTG2AokQRxRylQcJ7f2vMfthRiJlu46eNlC/Q
+bjMG42hs4ZFo68jxm3qLj22LlAc45w3me9r5v6nNefOwdg6E5rJMNPisBVis0/J1KgjxDp8JLi05
+BJBKaTbTbEgjQTNvLrOf+Fl6Gw0NEGAAfmM5qOzvphTPwY+UuWpAliqBkCF76Sjk4ZU9FxIDQJiK
+ohX5ARS7odLOz6baZgqnLyF0ZlZ7wbdtlNRD/L8gJGh+vswwj/CDNhbNRQEpzcKmhNct3AvnDxYj
+1HjIBqg+r/3yAbSgI3cOOFwWz6ZbtnEcM9TJ80Mt882msmdw6MzwVBg1kqmrpaMzhcehGyulDcl6
+zKu15FOW3Y0bphuai/7WAUFJ7HMNFQ8c9ONqp9q/Qxdx+XMueDcNmO2y6xWq7iyCw2crXkZAdnzp
+6OXiByDcc6XAP53fpj5hAC2lxNRW2Af0VzSC0azghfDJmy26Xvww93GHv8HA/O1eCPmVxsMrMNxi
+26WY4a7tsk+4GANIMnqphNt4pVzNxaFH3KnO+4HLKuH5SU0123QCR8KJ0IRdzYea6W8Sgd15p+1m
+tnZTLrFbDxNd0w49PBWgC+psAeiT0TqKLMnRCG2jxMe343fuOtPMc/u6E3QoPkdtyTh+Cffz1XOr
+kECbX4FHsKu+STRTyWkD44uychWSke8m2Bfh5tIJ6iBN8jvLI/IbT+lv9h5N5CP6bAlqsNZXJRRW
+Q0kejesz567vTCR3o8LNsU2/Z93Va+hvIuCmoXBJyv5s7V4gc/F862kLjoPCgpsiiXcx7VKCdi4r
+7wU+ZesGdAWntx+hgCjQtbJXseISs7IRqsOK9FomQ1rG/mTC1/7irbFjzIU7AvScy4f7nvTljmMA
+8dR8RxemRkrKKOsHjXrvB+FU9qQ8VoC5RcW6Y6btUsWTPQr4u+Dv2NhpwkW4XhwEvLLHLZIRhFWx
+HY4lbNsvBq80YoI4az8zCGSs8V7BEy9/gSRnxYKxxvRLw07n6gPweQMxAH9HHc/4ByuMJOeMARhA
+LHMBpXyitB03LDmLPQ9lzVD1NUon+KZugJhc4yl7hiIfRQaPxvx6073dtS2//f8E31DSPXi068bX
+0udKrdrjanVk9lR2s3eNbQkmSpmBpU5o2UeLMb4qHT9IqrQd6REH6cWZI3+ioR86o9K3rcB/zOPD
+axo4NqGd5Bv4g3yD5SijP1gk8L9vAeBU+6d8r8AnuOsWwe+IAdAflNCJq/Lt1DoAr2njgrq5fULE
+SeKHv7pwLcRvk7V7BBvrtrn3nW6hDmJxfcYV6w2iol6cljRJDRyPI5QrEHWBS3VcV41E9L9soH62
+WjCJno9UtHk/fZOmja1M+h4SAXb6rpwZnBi5XXns8dajfKzfVqp7UBiMUpC0ThLKYVfvsstV1vJh
+AdXiKuNqc0Vi4jkjwbkJweJtLVw+iWj+9OKmVMgLdvSHVFm3pG/i+z0wjp9ngijhPQ/GX/H9L3OG
+83drZ4pESsduNkgiOJeeU/I8OAUN6ZBzgx6nA/pFaB9HnUMmL8qG/mbKgAi1fhYLVo/r7UaQex/Z
+rO33qPAdiTUvjUq/7jjud7zA4QkvskzyR3ofEGlRK7DXIw6zaImKNFB/T9pL6nNeo1rnFO8ihw/n
+NbbD4htWN+EjpW/qQsbyRnAqaaDMU791VMrkR8P4qvoz1iXkDQI0ADyFJKUYAKn1Nvvk3Mf9yulZ
+DEBmYgO8aKVSZfMu5oY1sw68OGbLNZEHPhvs5bOqu7N47moE+FZsSaXCBQpdCKl5y5And6USc+KG
+C+/tyJEc7hWsRSp9yVXdoLD4memVl9NUHT9OjT8GkrD1JSW7gosiLCzEjUrLrbPaP9c1J4pmacXg
+phOUXwRXpcY3MOSVGojylk7k/O3Qzht+apmQiF3IltjKsQAwgI5xAxWrFL4Dn09DfDIakG9X2jQO
+/F9H3Q3lGjAAPGt0DRREsx3zgZOCxH841KqgWj2WpDcu7H0KtTNDG+s5vwx+46KxR0qbdT4rj/ut
+6yQH99Gjw8WsyDiTNCm5bLYJXxsGRGCACvCx11AQmxo4UYMz/641UCIzYVydKyXwRnFWZ9aIaUkO
+A3jN/DpSuySb4/g7f0ZqwGLUKZ3DMSTpio6SYghHftCXCm71XLTGjrb7fSOjsoxWbWY0YzrnB85w
+CTobLz8bu1O4fnVS2CXhOEi5TUTG1hsiYhIeFTaAXgNzYQDuPQVvLyF/Yg+B3QJXksCWCrUX6wbR
+DstnEIllgWuj/kE+nIoQO50BAlekf1VC7fSwhNG0hJVam+MwVd9T6x8rJC/9c8I8LZEzEX+GZo5+
+2eF3hNBrKfnhUlKeJhFOcFc2CFgdL9PiCVPxohuZsQ09T+rVDKnCJVKNcupMSIo1N7AsW/UyaI/N
+aqiFYk7paxA24Rydh8ZfYcZais34316Sq/SyclNGcq540w5Ks7jR+8RMPQheH6eMeON/WjeToCR/
+mdbNXJ9DpKvZKZL49sL4JKuanfNN3VYmi4xAGhVzkBv+FQE/+TYsMUejCvzpQcNUpKk0PKA6mkzD
+MkGDkUOkVPB6usY6M0QpdNGBbY3JoxYnxlyorQrM5sy1w17nvUq8b+RF2I5rG6S8wsG9PByDXNVR
+ImeXR9cd/Wl5cK7mhZq3qXpU6lAxH2xtBpKbxw5zAMT5ZsYfU9JKAMn6FumzoAIzNSy9a2yzOEoI
+YSfZ0AV1Fyi2BtqT5TOBUmyCiTNCSkzh2jtoCalqYewo+L4s8q7t1Fc9rfkw7awgkDlKD+X5eYG3
+ljW9YZ9hzQVxU5ehGFnas1K4safPos2l7IB8U8+e+H/a++64prZ07UBoQVpIqCKEXoU0QkIPHaR3
+FJEWmihNpErvvUlXqkgTECyIqID0Ik0UVJQOoiBVioAfzpk545yZezhnZn7nm3vH54/snb3eVZJ3
+P2uv510rK4IcMVV8sBm1T1P5jHWZsoKQXnz5qsygjkQZS4JzVyjFQP+B59klRP98OQFKlfv06aed
+aor0wWsGra6CQPv+/Uz26IuDwcfnOMxHFCdPN/dduEjCC8ycmZeVNmbuUiaQsfuAplrleUBmzZrS
+hq8kRY1TqYLwOfpoosj7Nx4EeOeaS1n7bJpezxdU4+0YDZ2UexiuPMgxO06iyPw5/nqisaUE+Al/
+Yg63RunpjZGoRccxEWVKrxmEQXlnblryvtbihEWaxM5jut6O6FIEMQUe+WGp2dPYfSXpZjPCeCD3
+LeglciBRElizfD9T03JUke6RHPe4fe+xD+z3t/DuFFocj9aXC8gi7XMYqjyuECN2KitlwXb+QU9a
+2qmWih/tPX8zoqJR3MW3XMyn4tbHD+MUEiXmgNxQqiKaDa1BWte/6k6mSaKFQEzdsETqKVltXBKV
+i19CMoR6oP55scmULTVNs9ullyj2bq5XWy41QXj39Bja94TO3RZLjN79CphUMjl3+wYVhRUUsOw1
+dbc/ny54IT3MsnmL+nQGYdYkYM55WhXTGy9piUjzLP80cvpOOmEM6c0l5VucmR+pSj68p+8fZJ0Y
+En1bfiEqXZNEYIirqcXUeYDd2F2MnpKtuy64zt05BOTA0rryNPkyUi3gJNmsvJrg+sHdqGL9dhEN
+9bK0HGsHt/thLVShVJv0VTg59YlFYp+sTauRTJKgEB4lPJ+FUCH6FgWR5bHW29J3eZuaKTJF/MoX
+7RXJ5QeDPuVdNuDxssnwUoIUrIy2SxHAUMt+9sCXJgj2vWPkzx6aVAbJc3K25kG06HnAERpbbcMd
+uWcFzjELJaVtFJ6ehQvamn+wG3VWhbVaK1b0KlDjWwP7qTnnaoJRYml1aQimZJo+eeqYzrD5LQLg
+pcWDnOaburLTvTfXjSVbGxP4/ccLLicl2FMUSftnmG6r4IPHKMzuuQKZOwCQKCMdD+WX4om+n7IL
+3xse0xMTORvQbbMmpAAXu162S8cWE7I/dKU1JwiXI9SDGrci6YqKXHjXZzNrR05bzlmbQBSlnW2e
+Psab09m/OleU00JMekIY3w0CT38YiVWwyHi5qf96ZtBTDGj5QceaxeBN2IqAlyb1wugZ5rxJNTtL
+JtAtkD2oi2240Jc4Ze2er+5LXb08+rXkFYPqmGKrj3yaHqlPFeWYWbVPafmRI1WU92azgNwqqHEq
+dTX6AO3CZJuFIsHAfmOTfJ2AuQpeLuKR3BiCvdru+bYIH/oDJzonkk8j1gKBd4PlgIvXBCvUQsjf
+G+4Up2ZzTk9u2svGPNp+pSufm3s7EHgXktZL0nrizlZNN5aTRl02O8e9fGZIJyO7hEox3q+7BsWS
+vN1Q/R6AJL6RkDFMh9fbFpw6pwSazEeQcIOT/LMHyhjoScn9rg0+LMjFfQELRfLaZwl80Rcd4ZO3
+wLOt35DSjQZ18S/LlgPreJOlEqHyBflR2w0Fpfx6yQcL2DU+6fbNvm6tpzjI9M25kun1WAr0OfO+
+6JlZSLvNpqv8FKF5xow5LRXCA3o184L4JSyMbULm7mGPTpR+C69CEr96Wp6UDCTVtdoWalgGTiCd
+LLku4sUZkOiJCmbUeGGfXXzPlmIfhJHnIJNzuhPvM89eOyFF2tRA433gHFtKvUqqRTHffFuO6Z0L
+RUaIeKG7za0gkOrVxLlo2mhL1kiRyOUi+xMfBbda/M+WXOkM+gqA5m8HNuv2jh8O48IFPjaHLdMq
+WRjtk22oVa1wh9jcCzJsGtokG2zSnKDT2LkeyMiWNhWS24HfqxgHhQ4FBQorBDD0cMLMGqX2BjMt
+7ydYt8sDMy77v0CuqyPuH4raGS6DDb2SE2E22iJCgDfqkRx5I9fIDOmLePir3dD5GgUw5uHHx+iK
+dNVXZmFdNHS0H6e+FKEZhatp3iqnT/Dq63M/jxnCN35++7KETbUNtgI9EU+0M1c3qY3fx1afBEho
+JxolCWLX1DaivtSrIV3PcyoW321ls2pNrDfw18nmm02KoqRp8a605WZUZFdaROalPxOORzXjBeEd
+qh1bCnjmZyFPrcCDKcGrZLnkZ+IoS+U9TIKwpQC82HXFqcIDmMgmLbRmYHrymdQ52YpjoJnuxp6D
+RxbH0u/weHd+nDO66UfpVsuxPqJfkZTCOTHCfe/azgJvL0V6zkBvZ7Mm89qmuSd3OI64fy77NJUG
+iN0zMxEjYwjAVpqpT4m4s6biSAuPZ3awkKzlDxcCX7ypusC8j90t7WtwszIz9W3cfTA7G5jYdhW2
+t5wleQKj1UuR55BVC5rO9ZXJt5PRfRIWRAfrsqkQeg9+SvMqWa+Z/C2aLQ5Mwts4YkKkyh/h72FA
+Q0RMTvnXCAMn4Ki4AbAFTP/XqAa94M8hh6MyfhfVoP8uqkH/XVQDov6Slamfjij6H/9Uh74U1sV7
+Y6A7u2cDFSKjT/m27lHbfdUsbQP917703RZLgPiSv0Q19FS7FdcVmW6aubP7vJMeVG/irNSA5PmN
+22Zu234FGPLgM2cebwnAHzioUpKvCE48F5VDsBfNOGrY7szso7Zca1YCy4tBc+GwLPuGYDZoMJdj
+dhOzCDDxsmW4eSDxOiOrU1e930XL65+yyR98ya8dZfa+nupRlQg0WF8dwuVaCmLor6yjn2GYQ7Ny
+r0hEDHIBu7dWJa4UmZPPKhhKURz29yFKm66nNWdtTgMMg2OdlXxSboKoHk71JnOBR6yipiAmDRbR
+cIQcrdaCF64z+sznzPHoG17hV8gsk6WbI0vr1grI10lDI0I07szGPD0+w6Gczf20sDjSc3VHsd4n
+kb1yI27hBSnx47WgT5veEy5iTCC1CdcvNemDiAldT1MQpFrLz549cJZj1b57WP6xjRp/5iua4ef0
+Ps8G6JPHaRZCpE907ldFP0ekTp1y5tlorm52n3jQaTrSvMlixNMZhrrGsn6KIkluObD8TQ4gkO95
+Ce+zVEPsrWMJZ8Rtr22YKiFSH5grgbfMB7YhUJ3XhLgFNBOFmpLaddj529HppzsYeNhiYLv1zupx
+JOGhhQVi3A1QSVnpq7Nm1+heySpiCnshUkWcIU9PnEwrrcGQZl7v1hXQgkgtrZ5VY8e7BcBuXi1N
+OHHqU+BrPBcj+R61wae8hXyYmtRcEJ82eSwvkBWY8CAGhgtRNxcss9R+RpVrnZ8uLxFJquomnrZI
+4K5DwK3drc82upWxbbWBsW4LxXi6dbSWbpVGs1bq7H6Ak2/sqe5R1ckbnIqwg0pPwMfFBkklMxH8
+thhvel1k9VwOAUxRFz+Aj+RXPD21JKh1pvnNAMPzh6v2s2U6hlHktAJP6hjCivdN7Ex3zieRPBAe
+BFK4SQb5bTXeCjrWtdP2cupeHInJQuBd+Zq7L6VUAycUaab3TISSrMo+AE6MBsUGk0dfc3pM+omn
+be8ki82mKdW6RChV+5ACDzhA4OR0eotakE++jWaqy9UTFwKYHbKvru2nCq7ONGqohMSmy+UCm3nR
+vMEUYCSp9kSXmmZNAN2O6KYCNsyKiP+ppnR2Uq6/4v1POfwXyOWReRNbprbwuuVik1BaiBfnCVZb
+8DV2v0IuzUoRWQNem2byCZEsJIewcaI8PAIsnGhm4XwtaFsu72pNEkUy6GopOdHLHQbwWq7QyKeO
+kdh7mYUB2gelhfLFtAprHdExYTc4GEAgXye/28iarsVGZ9dzaMIpkANet7AJL8W5q2XF5xoI8BgB
+xCdw+VxKKtTkiorRx696nbhMTOmubTQVnSpC59vL76JivWqTuRa27XOj3Pz2NplsYRYtjnbjDr0J
+l1/CW/093Z7RjxwGzQuxzTrhgbwf9UV1mgywtDGcqHQFWI1bpmVzQpnPGv5zls26rkpac8FHtf0v
+hXAl8R2KLwW1yRRw054RPJsd9e4+cdeordgHpSsvAylF8nI888byHgRU3k8QaF9N2l9KO20n80Kz
+Qqw/tqs1Xq/uBUHkWmFrlin/5zGSelgFSJNtEeIBz21muxoEc9itdKCFuxg2UluH8DmXRGBh0eNY
+ZZVM2kwqn/NTs6EpGpyDsCHu24s1Bc1RRKU67szbBXT06dIfeGS71nIgZMdBM8lvYe63eG7lM7DX
+2vWyqMGZml+fDmLUPuw93MtZVi68MwJ2caGhYY9PyxIJ8KfcsMTPpBikQAXo3JO3CwkqwXshteaz
+G8hqIjeBg4ISVaF+md72pWPYa5n7A7WoiLQoyvvxef0cl4QoguSpyKeEshrsP9a2+uxXLUVdoNO3
+ZuYY3FLpN1A7IPkKQEft5K8s3e5FEdM2nxnicY85iGOChYvduoWI7KEIbKhh8qj9bBphAycX262D
+omCY0Bp6Qe+MonHWOd7x+mmhlzzV6Amj2ytQ2bPEWjaQe8Uc1EFpjMxkNM8fa4qE7xbYQXdq1gNP
+paNFHMznv5g9nUS9zNYsJmUHCn7WmSuc8KwPHQAuLl0XIjk5LwQDsYhXqpUtxLSvoM5aNz2JKbZV
+TauVHlxyTHP2Ve1Ts1+CsusQ0p3LqvNshhVOraYcOA7ZqeukiMerkvjCTJZ4zOV87V/U6/R0jcvz
+KaV6PTwZZVNQdqyfs8Nzcj1drAm/48DFUCC3b6jLOW6zOE8ljdMcNEyIEGqhR1LYnzsvOSPXZjZV
+G6naeIsazO9EsIWF7bAIP6aUUq3/Uobrmp169FYEe2dTr4RYmKSNz+Ruj3jJfAb+i2B2dwKP4o7D
+IMUV8SD5znBpBO3oc+q5BlVtGk/291a5rprMfB/6p3rpNbU5no0t63bQb9q250VOPVX6uF383qvr
+Hr2haAnI0oV4UFGtt5GonekeSSM0+isAcOUrgN4fHXwJx8MZnQNsfi1kHd9ERQvPczYBDIrEJR2U
+g8yGEsdUPfWDRpUDjzV7fuTmpapuQLalPKVtw3NN2ympkc8Ehtq2LLrfUwGTYLgw7jzrEnfj+WA6
+KFyeZxJSX+99jKPfVwCBrlsEvG7hwCy5NxpfdDJqaeg5C74YVh6lSuw4T59qPS/83r1JKpg5NK9s
+b2wQbIwzlenscJxUb9KxGdKaEZTv6Oryw2SMFeCTseCP+UgI/bm76SWVce3eNpcfEHsznyh6uzp1
+B3zqIcy1cBtcw9Be1HyNnKgJKydudgvq7DZcGDnlGLSB108V2i4Bgy+beM5Gh9nqE6np8n2Q7vU6
+xeLfPxIUtXLTpn/5S41AVLDIbU1OaVod1oMq32B+jtWJW+/4mlexe2bzIQGpzcoCHM5+2+0Tzd2+
+gZ91yuMw0QJyuzVVQ2tfAXQZL1/1XYQBH6v7++yd1SikoT/x1v6S1eLOsD5J0ZR2wGUqzQInG5n7
+bdiByop9IyWZXMj+IA4Yg/oKcDixAqAlHnzPwHlmu/OsRd6Nqjcyw0mz7SZhSnMFTy5Bb0CwwYM8
+tG6yokMhO86l4otJEmLCktA8UjIuB5/u9jA9ADDuVrpkOc4ilj+Hu82n4srSLKUWKzwKsVPiQN5Y
+RmcmWKr9sZ53y5L+HcZ74POYLidbD8Sao8WUFM3fzG+vFSy/Nu4nfZUlmxE8gZURt3S2hAxeG5t2
+R6lPYff1c2wvSD+Z/NDmoMYVxQkOS8jqzAfOqmLko3lUOfYK57vl7tBN+T8HqQTxybeX0yeZSrwZ
+YqUrZtFl7z9RUb1UzBNGt84svpLsDXk4Dg3sc2rn6GIL0MCzYILpv3hn14Ncm3JFwzyX7AdL8LNt
+tOT0MgbMQ6ngpPlyZpkONhRkLS9GfiQJMUYRte3Ee9FHLNXCOU4neMdzt77kHu6dFuVZH4IcaWrG
+k0sUyTHK3bI+JXFOvsW9UYu0Ds4exy3W9MXeQU+CpRXX6f2J1nUiyDQr+ZJG08IuqJ9v9xOYsacp
+S5Ynkrt3OfxexEirUppcanZWE6a5lu5wuB/1FeCEBfHt3uoN4zRcI18sl/R5bq0KGvOQ82Pi5J99
+kOCycQztz/WsoiKZpAsaWywDKrWOUkpVcc1rgHpPRa+K90A1LGBPqC/eeHYtcfEBeeTeQDRLHjBm
+HtfaqEXm7iZXcez+1F0jHbE2DccQ70K7tZc8n7/4mzeWJZmu4iHHS/VcTQtJUsfkR2Ic5N6NmDwE
+aOSB6Uc0CEJobXew5FVC50Lralen2Ih7okph8KzjbIp3w5T5h+WMh027JXTG+HV5N7svBYsN8kwR
+uBJgHl1gwxlQ1FQz8Sx/46upmkzc8Y7i57vVrGkLJqpjaH22rwBsDat9ggA5i+qjY5NF9mm4oNpM
+omlLRxsafpKzPWLGomkbugsrwtvHqCytJl1Y++Kq4QKqVI+FE7d5Kj+48WuOzwkcp7Jy8N7EN1Fe
+nSlKZ+eLo2ABh9Xu4AE7qZofGzijAXm6KVVMUZlvT3XIxfaLPWdQKvRzTLLuKSYK93tIoLvEMDOc
+a7ImQN6yItw428YWpoUIiaBs3b6t12epQv6wS4sUVnSV7PQ9d3NyC8GbLhfCQ98YGlOT1ybGyHrE
+zry/1Xn9wx6mNywy/INY6oEhtAPevGJ6SqhmPoAuDySjC0GT0rSASpBmveCOyXJiFvLOdeHIWqlq
+PgfXSPJgKsX6G+cARHINziT8ikQvzO9x04SI1Bh4RVMcGF8Z51js6miRzHf1IVKTRTk54emwxNPa
+CyJ0/MzjxauXNbijnHniVTNX1YgsrNf6MQ52eDjOUeWkKEiusA2mPVPnwn5tv7imKv/uFsdiD7t0
+7eMNaM5n+8XekOzQDZfFrMqvgDa02XE7xU1dP7+iPTVaHZFxv9gPH4t2K1napDhmQqywy+9zlM1I
+pMEjKbLvpb8CtlSGWbzv2a4AqZueyB177HANnQ8+kWToqXtbw/hse9wN4guU1fVK7Gr1VOSPATxd
+1+5GzTWd5AKD2pVZ1xyLGI1p8wPLDWLsVAMWEozvzEdFtPsoPG41MzzX1Te5Ixw279cY3DcUV401
+ouKezs5nIwpk/+SzbyB5EBLDIcJel8Oa8kklf9wwQ4y7WQPGGRXC7PAps9CZMwoY0tTRxervfXNo
+Acc/DAo4Jem9jbtiOyIc1mjcuP4VQCJLfR7ECt8pNTNouDOuroqnJJaX2Rh24o9Q2dM3r2AIf7lR
+z8en75nGHpcaZH0lcnmaUGiDF1ZiDzcf4wRDUgoecrldPRe6Ka7RSLJS0GaQu/dGYN+NlrycKiKh
+w6nMX7QLQmmzpOFG/xwa+bzNsA4jnfUYulNEjtaRNqVPygie/gpwNpSIZ1inZQAryvoPNV59CjCa
+SNmaqqfraDjLw5RGXteWNOBjXQWNWDXgFUlNxjtXh7ZlNFsjTtdwbckbV+jsBbDa3zCd3XVWt5SK
+yg8cq16l9stcddVaNNaLNYSQjl7lZ1YFTOfNZTNrk8iwd3vyqGm+UTH4cnOFk+f68D0jF6cgTTb6
+8DQlelCWal+COxEfN7vmDSqaSykp9b2gp+uemk5XjOMKbTbqPdiX214FJ7qfk35cRaWdoR3wEVQ+
+s1NCkstvivriMNMnmX4/fYq/KSEGVyZqsAh+faM1wWZNZzamLOkxK5tdsLQh4GaamBj/yQ46mAgg
+xvm8uhjD6Hycpqz0y6j7UYGF2WdwuQHku1UncyPnRs/cQinnRQpO9RMxfYyIQJy+um0XRXo9eEuR
++KJjAGsVobNW95kDsbfXpvhM0sQa9RpTsGIMsWka/LFqOS7INJM0wEnOPk7mnvpnpRGLrIz5MzCW
+6DwBTtUXzCX00s+mZHPyXQnXE3uL4iP7PaeIMymDmgD01UFLZa+JkLv4smhIfXp16nzHxAMBALDs
+eBLiVdmMZBP7qwQKkExFuvNnR95Wt+BP7xomVLhXvdSkW3tIXsBTQ239vHgTmW2TpPvdlqNF1GJI
+k80zeWjvC0ntRDLPSPMfPgCtM90rGQFw5DTvTlajKRPRUz35PD2w+1dAbTMje1kxWbvYQ2EEsoT+
+adplrMiwjWxXmiyrnmI6w9ZBvG/7RCRqIDgEdAMXUAlrYqr5fnsN+m/baxwRNwC20EP+GtWA/Ly9
+Bv1RGb+LakC+i2pAvotqQOtVMNr9dERJ/ziqASmFTQJmHZexjVqL7SuemcQxqxbn38U/L1B6Ricj
+k1SHeDPxl6hGafh8scvyLrcTYG4RClil6gnTCLbESZbSha1DLiUr0lTb84rR9NhPfsjBphoZXZWu
+eHVfQvgT1P44SuV6QcFMMWIXQ5/rCAaWVt3u6jH77P/SPVrAZddY1pmxfVRaKy3+4tqpRPxO9f2v
+gPAVGUcvUX2RsC3oqaHr5hl3qtjLh4iyaBaAO85wvTCma/z3becDzpOSDyrzvUkuo8UcjsAn1ei8
+ltMFiGUS8/brYKDxme2KlHckPfumqueCu/UpYBzlZCC6npFtf+lsL2b8/Yj6WcCU5PlcznTslo6H
+WtfE+z1DcGtdsCJQPIu+bjGeaYB+fuzpxpbh1rq1HuwAauarDY7+/KQCGYgp1JshiPVAzOka+Cyv
+Xxn7XJ9MbJmgGngZxYrWhnt2H3sgaRh8DWOeC2wzmzWUOzGK76ap0HxcTRR/04Kyc1UpcUamZ86b
+htTjlfZXgFEQd75cbaCnqnzYE71G6b0+Ifkt2o+6BRl3dZ7K69w4eOv8yZbzK6DiUV84x1z2hxU+
+OlkDm5E011DREDIzafbtm2eaBk9w7tJvLxcaN4pvEdnA0Mmy4QfCiQp4RezJyDQFnmAAd2zfPZBb
+6ORXwBLJOsUcodOvnBpgpDaN3eyQ0f9EVQTQpD/V74nHGkm00l7hUpJ+aTpwLkeMm87q4T1QGjI5
+IwSARXT0cXEkgumEEXGZos1NAsfbHuAqA6FiQicYloWA5C6xJFmFQeGR6h3reo3QTfLErryYezJ5
+sZc+1vLAbMWqD85Iom17pc1dP4/JXnyed9xyAarTwnxBooy2/8C4V4iqbqcaxj/Cw3kQyngrHH84
+/u+0zhBkeRRRXStGTJbzGbyJJwcCJmroNFdNoXdHON4wafX6mx50BysmVXJc6gApcm8zRXJyp3ZW
+0oacF/PyJEk5zTsaLb9YF4NLpUpB2i/GXOZjzXMzUnj1wLiilcyvJqfDqB/SJ1/XZ1BnUvrgo1iX
+aWsxgl6WA4pPu2wUkhrkZdPKwqizdZ5AJeG5Lddg9GbIzsuQT3AqOOp6Qim1YZYL5C5lYx/dlHGc
+YIuuRwLHVLCHIxP85YLsMvAeG6/JG7Mg2VJPy67qFL6YTDcFE6O4pJIZ3jhe/vieIKWgT10f5x2S
+mu1VscPe1UXlAEtkRHurbw3lAMGv4sliFqR3Xy9bzTp0JRGEby5kxK989N7PvJ/W6v9xDMbbmSkS
+PIyCrKu9S7LlfzP4nlIwame+3XZIdDWVTj6iOpas9HNWomGnAuT96baJAivo1Z1yVpv4dAjH+Nll
+mPAJwW2c8GQuV1Cm14xvIuS0FrYzLbrm1PD+oHz35Lp++daTNT7JIljGcok7Z+8LNvzTp+rNAf0P
+dECpN7sMak58NEwMi/ggqnOJRo0TpEMf1867vqMUMaGzmxE2sr5zE/r20+TZUqWg0P0BdcFJqLW/
+VEv5xikOcUZu3qiMz4JBemHWX3KY33Qc362M9PQxpbVrAa7168mYMdCeR60vz9VRxz7P3DOF+AU+
+DJNygZl2Inxl/F/geTLzidGau68uknv0AXp9s4O8vhw4CdZc4jKtFqfUEAgjmYlxTqVg2N8fK6l3
+h6HYng8lUOMTLSyd3EPPA2/XhGT6iG/3x5TzhebLjO1kP8jF749kBUKYyE1Ud4XpbUmus78jDeYz
+T97A5WSl60rMymGUTIJknu6VcVJFqezFHRw7GaYyvZTYxldEnDdIqBLucG+tohkk57PtJwTZxtJP
+FIypP15IDs9U6ipTZt1IgYwNvxq4i66xcAUn60Cgp9AUenmkr+QZ1zifr+n0GRrjxWjeueEduQiy
+ByJL8hNYyu0mVyPVgcSEZb1dIRFfp93Hn+COVhPe/ob9kGHfmW6zvnAgqU13G9GhK2fKfGStmxAQ
+25hNRchY7dSz3FkFztEehkayPs0aNp6yS0Uc59YOztucYf0KAIuvYWKlp9UWXt2k32O0tg6oHAK2
+nWPQkXpHPSVABFwW6gmS5/HdHAci9Wkf5b5dNOMyWra5WE85La3a8H6Ys8Vv7XogLVUQUlrRij12
+qS01M9/OOoDR+/3WO086KgyAJZ49pFcPIAmhX5t0bTHSA9Ki3/BGZareXqwSrMtW3Dm25fs8Ti+M
+hn717LChndYnwRmv4XMMJMSSsfy55RYPGXlU+K6wd2/Yn89MDpo5sTfko/kZYyQRIkyt2v2kkPMa
+pOWybwmF/8S5C5NLN/WVk2EZgAMNfUV3sQVBq2fDrMNLEhO3CVPrgZ7e08MjszsK76jkaK251OKZ
+GaxA23Or14nP7q3eilHspficevfcZc2780UWesdbx+3pZK+8uyvwFcC/bkn/nC9C26dAJ2mERwPg
+b/pU8XVb2/ZA4YQ69vhrVDPtOfu5mK20JCnJqHitHFw2yTFoQgY+D5vO0xpOS8QeDZ4fIFrRDn++
+MsuahjuXFfm5wNXXIBXY8URWcIyHyNGQhUMdodcbLaqkJD+212NXpOYQCHfwS3uVhJsP3eDoO/us
+SezBR9FuFUgjZKZGUvJAuoHzXJ9NlfOo0pgb4irU09DVfKp49QorEROariwMNEur+kAzzVRwvkIs
+s5IxhlO1hSNLXy84CKZ74nHTTR+qO8fa1GQAVGsyIvosNJTcXSf6z++WX4FxCto3RS9MCCQv3dJ8
+NHYx97FgDMu5onJLl7Thy8tOZNs1lh+Ln/Uqh1RxtIQSk/c5w3bqpSWtrz7FpenwmGJPGfBjSpUF
+z59zugaykzahmI1NcwKOx6YyKeZNvR4g7EwttfUzcuT0cpaz5a+n8TxfZ2T7YMKsEytTsN0jcVnN
+nsBeEOHCdYOihd0/yqX3BY9S3COgig6GM73nDJPV+r6zH1YZur2xddDFnl4eKB7cvjYc6XOLstbN
+lyx0bX6x4RGLb+CGIyWbcKrp9YVwGlMU+EPf8dQ0mzzCe9jN1qJNVe3jm7YKCd1cFdqEW/h3e2SQ
+ccWDq5IBgvxsNTVZFPQaqT5zPcQc1Rlf6gSYwirfMFQV8d7Delie4AR+HmaUl6rs7CQDDc9szC62
+EPwSD8bm+xbAL9NBd+i632SWRqY2XFRgHXoA0Hz0pAGbbkKeGimi3Ew+xjHCw8FLpwx8Si2jjMUr
+cDRUjU7ybeN4zltf91GUM9Ki6wRQSrvrNMi9pQIKUumbTucMLuGJq0XitsUGwlJp7J8wuzpD1GlD
+oq6K8WzP4erm0vURw5GQPu1n8W85d4VCvPLig7Bwjdw+q8ov3TJ3Ej7uGaZlM7yZeWF+wYJPePDV
+TppvbePg9UX7bCbzx9JizFXT9kwpdTxnA70CL/POxsVELmWbSYF5uAd71t4eEzGXFGm7XPnoPruL
+vvPuql0V4uDqy7hGO5/HL6z6WOXJLi8DeTnHz0B8om/TAQcNU5WOjXysXGPnTx1JLxQ59a68FY0G
+b4mr6W77PqK4YMuuIjI5PdXGV72Yf+pBGR0C/uJzcWBKYu32mdSNEQiV33J03ckzx13fzQUD0nu3
+/UcNDkc/B+BLV1+pZmazO2WOUkq9pkaoC8q1WYV5KlxR0y9xXWHStnFS0XMlE+8VzMBnBGvce4Ta
+5pgT5KHLWE0MJTS2hL1QX+0d1XvUHDk+5UE40atYzacTIIjegbwmsLT4f3LZzQd1S/OXF+mPBqTR
+tgbpS1o4Vd5uJn1IRHIZf/aUFKPN5rkeG2u5qa8AjT3drHU9OnjH+Eqs4nT57NLTqeLTBXrE3Bk2
+Sq/pQbS3Z+ZXNXgZWiuWM9zK62uG2yYmT6CyQwEDZqxTjHhv2+lNMyFGLenoRZG0KTeTbL5W9ife
+wzR6TdwRd29zf/4K8JSPlN9/0ZXabuB/54ULCDqf0pVJz8WU8bp/nFmNuo0swTunoijUcQOyerqA
+C38KF+dfvIpvXcPTIKd76a6Y5G43sOWWPhC4fC1wic1NxbVo8oD+7rMY0b6VsxX9ElolVBV10xy8
+QRyy78Vv4WJ4ekJ6LndsNM4/jLfh00gl23LlXusfyZPv5CBnmNVkbUev6rDLjJg2ArqNJnQFSni2
+CIivgLsdKdAADmjeZctVM6OVOt3BYkbLlFanfuMrFtZsu3K9BZlktKTyTLcvJ7DJXqwtuCYj43tL
+XELCa7uBnH9jhMVX1slojJS6aNDEhhrc02qhg1Jg0rqWgum7LNjflufgmgMNyX+oKo4SGwkKvlpf
+p+seIBQLJZEygF5HDtPchj3ZGu99cI3adcci8vFWqrpqbPCmexRsw/dLQUpB03vcfrr54KBd/MJV
+YmZ+DTZmogN24RUOxnPhn+bb8HCPGsaRyHVt3Fzro3VDyEgvxP54X5A6Qt8bFK5LU6WBvu36NrDG
+pQ9AqfJshZaYzs8vpyyjIQmKu2gSAC2jOD7O0/WI3SklS0KumhGv1kRM6CSJO/XomF1vUiRERqjW
+K3fWaNOjh/c8yKdBsc/CXtwuIAellfD+ul0aCKu65hDWteqWf4kWWsU9q86rrpC/RsJ2UJ4k8IKD
+EEqFE5QkRoZQ8cCu03mA36wr9yKpgLY0ensDr8kLEWzDN0m6qoN2tj5M9bdT7fh8qRE4qH5c3Dlq
+4VP9FcCx9mXEunsvQ2WWjLBhJMPU43qMqE9zgHXUWmmn3ohcQhNlEMT8JLDPQ/rEV4Ax6WvnU9vZ
+740hWzrmyoacQIpuycj3+hqswhUMZAEv7MRITUu6V8zl1hlbC0JNRtwBWN2xdMnbAU91u+ZPVQxH
+jqw+iRWQqtHYCCiLimtYt/8KGDlXHs9Iphgsl7fYCnlq6P7WuG+XrCiCe8w2aK4i2VigP1h1lOMy
+d9wwUtr79nMu6S07DIdnaFB/7DOBz33ItvqMHC46OQKvZs4n1dvQyVYELUjRgTDHrjz/WVY4l94v
+4GPnTOe4vvsFCsox13Uldc3nile/SJn2UfmY1bRysZIDwqKk5trIRyYaSDcvfQiXdx+B06IpxCiJ
+RVFXoXxWzFWs0xPlNOzGRELXRSA6F57yNxGe64bqNwSSEG+xGT+kk/8Stp9Q0VsshMtLAZBTTjnd
+Fj6VQOIdxracWXsrhfUA+n5uD/oVAG2RfpJ9LKOsAJh+7l7fdPLekLUs/4LhaxLNQvawQAxDwYck
+Eh2VTzkS6pREHOX9guyY8/wxD8+S8gy5QEUWx0iJZlDJcWB9RRqSbrD0SyFSdsPmff2n0ktrFaqe
+Co7k51YKRI8xpJWGiRop9DxUK2C40TdBb/a+h/HwUarFH3NsdDgmmpyKu0TsId3TB2dosDH1e+Y8
+cx8Z69SUGPo4d9wkrDRCLVykNy2zFKOm7nM/duJL+ETtrg9hXRvmD9W8TmkIajS+O2YLStU6FwOL
+cTv56r64aorCQzEjcK9+POPm3guV+W2oAMz+YdN1/cb1W6bO01MPMkvfp3OdzReJkB6Aw4p9a9Wu
+FYKnyh6pDC30zSgulu2Qs1DNcqLOcguTi+0UwZDeFUq0UrMrV9xWDawzNTFFPNJtucEkV+52eG4t
+Nr2lLgOyqM2Xq6Cm6lJPu9oGfh/VgBBxAo6KGwBbINC/RjWgP0c1IEdl/C6qAf0uqgH9LqrBEFR1
+fEMMSJTxjzcNhUaR8qMR1RUpz+/NXLLzeCG8foa2QgB6bFLKizrQ7MuF9RA9LdhPWVEEz1oDh/ab
+gSpGF21OnYY4P0x/efak7Uz9iddl784mlizQL/J9AJbR663SXtTyG/XHV0AeBlxs/hJahOIba7Zs
+OlNsxmMW4ZxdbVw7T+zZ/CTBbJMxHCpxPjZuTaNTMqbTifiqz/uzVajUKzbDJVtjCX6zj6QSJN/3
+vJ9+MBxPcbwz0QN8s95GTswpii9qd/NzWMEA863WXeuqAkyR3Er1NDtFQ8QJ6q3nwcde571yo2AI
+5YoiymiM7aab/Kw88vLgnLl/bNxQmLfap03qenMuWxstw/Hc/sqXV07vvltuCfEBKF3U5thv8K6t
+L+Xe3Fy0d5tlE02IUcfhRPoA8Avh5fPLX29agcm7Of0/PIisSPV3veX1Ca99usejVsZWK+bTTSgz
+uwYFnacCnahsIN/5BzjaoizW/oiPvjZ6FXdqLmXkHkQe9X/GR/0z0FF77B71c6WjFv4ctZfLUSG2
+o27WI3fA/W5TGugha466L4EtUIa/sobh501poEdl/I41DN+xhuE71jC2nvjoHEJEdP0fs4bhNZF9
+qWXIMk1B45XRMaOH7NABHauR03PFdX4yxOlFZRbKGnV/NvXjIly85O5I8OCSPOPH5WDl4cAlyYWU
+QKFRCCs7K2sc0hZtjSGgJdAYjATKFoO0tkOJW8MPU5BYBBpuTUCgrCUk4LYSduI4O2uMrR0KixRH
+cIlw2bhcvHRYsqGPK+GwQMcLVvYEMSdXgv1h0gUX28NraCRchMvD0ffwVAKFhYv/6fq5S44Xvtkj
+4QjcSTjqJAJpiEBJohCScLQw/PAVznXl7JXvHcFw6IijPiqwhYHxr45g/NkRDEdl/M4RjN85gvE7
+RzABfuA3wcPLyv2CmLWv70m4lRVCHAdH4uysECg7Oyvs4dEGY2dnhyFY2xDsrGwIOJwtxgotZuPg
+efG8h5gWXltdRdnA8CT8T/iVOg5TMWg07NtRAiP+pyMc+dP7n4DBwBAoNBohcXiPiovDDhMl4GgA
+7NfK/LfB0+OSlfthU5ysLrp4/IrdoZmd3a+k//RJYD8f/5egHm6owgUgImJ1JlwmONtaiyr4XCJ4
+OXoQFF0uuFq5W11ycScGAIlIACXJHRRkACJiIiAxydHF/sD/Evzz/NfUUTz12+o4kv+HfPlb/qPQ
+EuI/+P9H4J/3v6KRvr6ytuFvqONI/yN/6X80XALxw/9/BH7xFKc8OscP/F/Cv9L/q/7GOo7iPwaD
+/pn/SDj6T/0//Af//xDI/C1gWlbuMARSBPZN6cEEFJUNBWG/MKE8VH5oCUk0QlQCi8CKI2HOLvby
+zgR7wkVbmErLfW3PCyqOzgSYQcv9b0eDQyUJ0265r3yoZn1gii33FaxsfzpX+NO5grOLzXnYKULL
+/VMEH2V3dxd3mFLLfSV3F1dXwp8NNVvua34bnML0Wu4bENwOK4AZttw3PFSkys5Wrh4E2+9ahEOg
+JJAwW2t5F1fCRdi3F8eL9t+nix8KZthlgruHo8tF+UOfXvrW5jNnvzUXrvDtaPPt7fc5cDgs+luJ
+hzeI4+Fo+Js9EqZ6aP5XIywcgUFif67W1uUi4VsTcaISaLQ4DnHB40+m2EO1fHgFh0FIfDO1cXbx
+IMC+vf6liT8ZiMPh4vC/GvylMDRSQhSNQbTc9fi39tH/BP+/fWuEkx6H3wVB1NZa7KcnuOjhXfA/
+1XHk8x8l8Qv+i2MQmB/8/yPgtipFy3yo//789tuRiNSVcEgQkouezs7/P5v2A38A/nX+H60Df4/+
+g0t8G/+Lw9E/nv9/CP51/x+tA3+//hNHo37E//4Q/K3+Q/3Qf/9l+Hf0/0fpwCP4j8CgUb8Y/6El
+0Mgf/P8j8E/qP4ykOEoUdehPJO4/RP/91CIEShyD+4f678/p4uLiv1X//ZQDeTgigf/P+u/PRkgM
+Cv0P9N83aYPDHOq/f0lmwxFo+H+WzIYjvhH598jowxy4XxPmcARWAg5zcvF0v2jlLO9OsHE5LN3n
+W6mI762QSBz2763+fHJYIkz+e3OUuATq96l9LAKFRByh9r8ZYSX+ztsSOFHxbwERzN/IfXE4Cgf/
+dbmPwf0DuY/EYUSREuL/brn/d/jX+/9fjCD+QR1Hjv/EEb8Y/2GQ6B/jvz8EHo9EoEfN/xIDSX5M
++f4fxb9N/4laW53/H+r4/foPg5CA/+D/H4Ef83//3fiJ/xddbAkeP0XyEb8Wyf/ncBT/Ed/F///E
+f4Q4HCnxg/9/BIavYxiYfhH/J//ziJmIXr/zHUwSQET8farQoZSwcpYsOPuMGO5i3bVHdCDX0wr6
+QHS+D9PPOd/2SqL9q20oMFvSg+D2c7E/8J+K7/n/21d0/j4c+fz/5fpPhDhaHPWD/38Efqz//O/G
+9/z/7Ss6fx+O5P8v138i0D/0/x+E7/3/21d0/j78bv2HQGPQP/TfH4If+u+/G3/b///WFZ2/D0fx
+H3N48W/m/xBohPiP9R9/CP6ViSksHI1AYP6TJqawCLQE/FemmbAIHAaO+j0zQlgkCidx1IwQFoXB
+YP9+/ScSLopEYXEY9N/OCCEO2fCrM0IIBA739zNCCBRSFIX+ty8A/YEf+IH/Wvw/TzqIjQCQAQA=`
diff --git a/p2p/protocols/protocol.go b/p2p/protocols/protocol.go
index 1600a11f9..a9a00984d 100644
--- a/p2p/protocols/protocol.go
+++ b/p2p/protocols/protocol.go
@@ -243,7 +243,7 @@ func (p *Peer) Run(handler func(ctx context.Context, msg interface{}) error) err
// Drop disconnects a peer.
// TODO: may need to implement protocol drop only? don't want to kick off the peer
// if they are useful for other protocols
-func (p *Peer) Drop(err error) {
+func (p *Peer) Drop() {
p.Disconnect(p2p.DiscSubprotocolError)
}
@@ -254,6 +254,7 @@ func (p *Peer) Drop(err error) {
func (p *Peer) Send(ctx context.Context, msg interface{}) error {
defer metrics.GetOrRegisterResettingTimer("peer.send_t", nil).UpdateSince(time.Now())
metrics.GetOrRegisterCounter("peer.send", nil).Inc(1)
+ metrics.GetOrRegisterCounter(fmt.Sprintf("peer.send.%T", msg), nil).Inc(1)
var b bytes.Buffer
if tracing.Enabled {
@@ -291,7 +292,7 @@ func (p *Peer) Send(ctx context.Context, msg interface{}) error {
if p.spec.Hook != nil {
err := p.spec.Hook.Send(p, wmsg.Size, msg)
if err != nil {
- p.Drop(err)
+ p.Drop()
return err
}
}
diff --git a/p2p/protocols/protocol_test.go b/p2p/protocols/protocol_test.go
index 9ac76ea2f..00526b97a 100644
--- a/p2p/protocols/protocol_test.go
+++ b/p2p/protocols/protocol_test.go
@@ -126,7 +126,7 @@ func newProtocol(pp *p2ptest.TestPeerPool) func(*p2p.Peer, p2p.MsgReadWriter) er
case *kill:
// demonstrates use of peerPool, killing another peer connection as a response to a message
id := msg.C
- pp.Get(id).Drop(errors.New("killed"))
+ pp.Get(id).Drop()
return nil
case *drop:
@@ -269,6 +269,7 @@ func TestProtocolHook(t *testing.T) {
panic(err)
}
tester := p2ptest.NewProtocolTester(prvkey, 2, runFunc)
+ defer tester.Stop()
err = tester.TestExchanges(p2ptest.Exchange{
Expects: []p2ptest.Expect{
{
diff --git a/p2p/testing/peerpool.go b/p2p/testing/peerpool.go
index 01ccce67e..09db4b246 100644
--- a/p2p/testing/peerpool.go
+++ b/p2p/testing/peerpool.go
@@ -26,7 +26,7 @@ import (
type TestPeer interface {
ID() enode.ID
- Drop(error)
+ Drop()
}
// TestPeerPool is an example peerPool to demonstrate registration of peer connections
diff --git a/swarm/api/api.go b/swarm/api/api.go
index 86c111923..96fb86e1c 100644
--- a/swarm/api/api.go
+++ b/swarm/api/api.go
@@ -41,6 +41,7 @@ import (
"github.com/ethereum/go-ethereum/contracts/ens"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethereum/go-ethereum/swarm/storage"
@@ -53,8 +54,6 @@ import (
var (
apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil)
apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil)
- apiPutCount = metrics.NewRegisteredCounter("api.put.count", nil)
- apiPutFail = metrics.NewRegisteredCounter("api.put.fail", nil)
apiGetCount = metrics.NewRegisteredCounter("api.get.count", nil)
apiGetNotFound = metrics.NewRegisteredCounter("api.get.notfound", nil)
apiGetHTTP300 = metrics.NewRegisteredCounter("api.get.http.300", nil)
@@ -188,15 +187,17 @@ type API struct {
feed *feed.Handler
fileStore *storage.FileStore
dns Resolver
+ Tags *chunk.Tags
Decryptor func(context.Context, string) DecryptFunc
}
// NewAPI the api constructor initialises a new API instance.
-func NewAPI(fileStore *storage.FileStore, dns Resolver, feedHandler *feed.Handler, pk *ecdsa.PrivateKey) (self *API) {
+func NewAPI(fileStore *storage.FileStore, dns Resolver, feedHandler *feed.Handler, pk *ecdsa.PrivateKey, tags *chunk.Tags) (self *API) {
self = &API{
fileStore: fileStore,
dns: dns,
feed: feedHandler,
+ Tags: tags,
Decryptor: func(ctx context.Context, credentials string) DecryptFunc {
return self.doDecrypt(ctx, credentials, pk)
},
@@ -297,31 +298,6 @@ func (a *API) ResolveURI(ctx context.Context, uri *URI, credentials string) (sto
return addr, nil
}
-// Put provides singleton manifest creation on top of FileStore store
-func (a *API) Put(ctx context.Context, content string, contentType string, toEncrypt bool) (k storage.Address, wait func(context.Context) error, err error) {
- apiPutCount.Inc(1)
- r := strings.NewReader(content)
- key, waitContent, err := a.fileStore.Store(ctx, r, int64(len(content)), toEncrypt)
- if err != nil {
- apiPutFail.Inc(1)
- return nil, nil, err
- }
- manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType)
- r = strings.NewReader(manifest)
- key, waitManifest, err := a.fileStore.Store(ctx, r, int64(len(manifest)), toEncrypt)
- if err != nil {
- apiPutFail.Inc(1)
- return nil, nil, err
- }
- return key, func(ctx context.Context) error {
- err := waitContent(ctx)
- if err != nil {
- return err
- }
- return waitManifest(ctx)
- }, nil
-}
-
// Get uses iterative manifest retrieval and prefix matching
// to resolve basePath to content using FileStore retrieve
// it returns a section reader, mimeType, status, the key of the actual content and an error
diff --git a/swarm/api/api_test.go b/swarm/api/api_test.go
index eb896f32a..4a5f92362 100644
--- a/swarm/api/api_test.go
+++ b/swarm/api/api_test.go
@@ -19,6 +19,7 @@ package api
import (
"bytes"
"context"
+ crand "crypto/rand"
"errors"
"flag"
"fmt"
@@ -26,13 +27,16 @@ import (
"io/ioutil"
"math/big"
"os"
+ "strings"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/sctx"
"github.com/ethereum/go-ethereum/swarm/storage"
+ "github.com/ethereum/go-ethereum/swarm/testutil"
)
func init() {
@@ -41,19 +45,21 @@ func init() {
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true)))))
}
-func testAPI(t *testing.T, f func(*API, bool)) {
- datadir, err := ioutil.TempDir("", "bzz-test")
- if err != nil {
- t.Fatalf("unable to create temp dir: %v", err)
- }
- defer os.RemoveAll(datadir)
- fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32))
- if err != nil {
- return
+func testAPI(t *testing.T, f func(*API, *chunk.Tags, bool)) {
+ for _, v := range []bool{true, false} {
+ datadir, err := ioutil.TempDir("", "bzz-test")
+ if err != nil {
+ t.Fatalf("unable to create temp dir: %v", err)
+ }
+ defer os.RemoveAll(datadir)
+ tags := chunk.NewTags()
+ fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32), tags)
+ if err != nil {
+ return
+ }
+ api := NewAPI(fileStore, nil, nil, nil, tags)
+ f(api, tags, v)
}
- api := NewAPI(fileStore, nil, nil, nil)
- f(api, false)
- f(api, true)
}
type testResponse struct {
@@ -61,6 +67,13 @@ type testResponse struct {
*Response
}
+type Response struct {
+ MimeType string
+ Status int
+ Size int64
+ Content string
+}
+
func checkResponse(t *testing.T, resp *testResponse, exp *Response) {
if resp.MimeType != exp.MimeType {
@@ -111,15 +124,14 @@ func testGet(t *testing.T, api *API, bzzhash, path string) *testResponse {
}
reader.Seek(0, 0)
return &testResponse{reader, &Response{mimeType, status, size, string(s)}}
- // return &testResponse{reader, &Response{mimeType, status, reader.Size(), nil}}
}
func TestApiPut(t *testing.T) {
- testAPI(t, func(api *API, toEncrypt bool) {
+ testAPI(t, func(api *API, tags *chunk.Tags, toEncrypt bool) {
content := "hello"
exp := expResponse(content, "text/plain", 0)
ctx := context.TODO()
- addr, wait, err := api.Put(ctx, content, exp.MimeType, toEncrypt)
+ addr, wait, err := putString(ctx, api, content, exp.MimeType, toEncrypt)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
@@ -129,6 +141,40 @@ func TestApiPut(t *testing.T) {
}
resp := testGet(t, api, addr.Hex(), "")
checkResponse(t, resp, exp)
+ tag := tags.All()[0]
+ testutil.CheckTag(t, tag, 2, 2, 0, 2) //1 chunk data, 1 chunk manifest
+ })
+}
+
+// TestApiTagLarge tests that the the number of chunks counted is larger for a larger input
+func TestApiTagLarge(t *testing.T) {
+ const contentLength = 4096 * 4095
+ testAPI(t, func(api *API, tags *chunk.Tags, toEncrypt bool) {
+ randomContentReader := io.LimitReader(crand.Reader, int64(contentLength))
+ tag, err := api.Tags.New("unnamed-tag", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ctx := sctx.SetTag(context.Background(), tag.Uid)
+ key, waitContent, err := api.Store(ctx, randomContentReader, int64(contentLength), toEncrypt)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = waitContent(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tag.DoneSplit(key)
+
+ if toEncrypt {
+ tag := tags.All()[0]
+ expect := int64(4095 + 64 + 1)
+ testutil.CheckTag(t, tag, expect, expect, 0, expect)
+ } else {
+ tag := tags.All()[0]
+ expect := int64(4095 + 32 + 1)
+ testutil.CheckTag(t, tag, expect, expect, 0, expect)
+ }
})
}
@@ -391,7 +437,7 @@ func TestDecryptOriginForbidden(t *testing.T) {
Access: &AccessEntry{Type: AccessTypePass},
}
- api := NewAPI(nil, nil, nil, nil)
+ api := NewAPI(nil, nil, nil, nil, chunk.NewTags())
f := api.Decryptor(ctx, "")
err := f(me)
@@ -425,7 +471,7 @@ func TestDecryptOrigin(t *testing.T) {
Access: &AccessEntry{Type: AccessTypePass},
}
- api := NewAPI(nil, nil, nil, nil)
+ api := NewAPI(nil, nil, nil, nil, chunk.NewTags())
f := api.Decryptor(ctx, "")
err := f(me)
@@ -500,3 +546,31 @@ func TestDetectContentType(t *testing.T) {
})
}
}
+
+// putString provides singleton manifest creation on top of api.API
+func putString(ctx context.Context, a *API, content string, contentType string, toEncrypt bool) (k storage.Address, wait func(context.Context) error, err error) {
+ r := strings.NewReader(content)
+ tag, err := a.Tags.New("unnamed-tag", 0)
+
+ log.Trace("created new tag", "uid", tag.Uid)
+
+ cCtx := sctx.SetTag(ctx, tag.Uid)
+ key, waitContent, err := a.Store(cCtx, r, int64(len(content)), toEncrypt)
+ if err != nil {
+ return nil, nil, err
+ }
+ manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType)
+ r = strings.NewReader(manifest)
+ key, waitManifest, err := a.Store(cCtx, r, int64(len(manifest)), toEncrypt)
+ if err != nil {
+ return nil, nil, err
+ }
+ tag.DoneSplit(key)
+ return key, func(ctx context.Context) error {
+ err := waitContent(ctx)
+ if err != nil {
+ return err
+ }
+ return waitManifest(ctx)
+ }, nil
+}
diff --git a/swarm/api/client/client.go b/swarm/api/client/client.go
index 5e293cca7..9ad0948f4 100644
--- a/swarm/api/client/client.go
+++ b/swarm/api/client/client.go
@@ -40,6 +40,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/api"
+ swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
"github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
"github.com/pborman/uuid"
@@ -75,6 +76,8 @@ func (c *Client) UploadRaw(r io.Reader, size int64, toEncrypt bool) (string, err
return "", err
}
req.ContentLength = size
+ req.Header.Set(swarmhttp.SwarmTagHeaderName, fmt.Sprintf("raw_upload_%d", time.Now().Unix()))
+
res, err := http.DefaultClient.Do(req)
if err != nil {
return "", err
@@ -111,6 +114,7 @@ func (c *Client) DownloadRaw(hash string) (io.ReadCloser, bool, error) {
type File struct {
io.ReadCloser
api.ManifestEntry
+ Tag string
}
// Open opens a local file which can then be passed to client.Upload to upload
@@ -139,6 +143,7 @@ func Open(path string) (*File, error) {
Size: stat.Size(),
ModTime: stat.ModTime(),
},
+ Tag: filepath.Base(path),
}, nil
}
@@ -422,6 +427,7 @@ func (c *Client) List(hash, prefix, credentials string) (*api.ManifestList, erro
// Uploader uploads files to swarm using a provided UploadFn
type Uploader interface {
Upload(UploadFn) error
+ Tag() string
}
type UploaderFunc func(UploadFn) error
@@ -430,12 +436,23 @@ func (u UploaderFunc) Upload(upload UploadFn) error {
return u(upload)
}
+func (u UploaderFunc) Tag() string {
+ return fmt.Sprintf("multipart_upload_%d", time.Now().Unix())
+}
+
+// DirectoryUploader implements Uploader
+var _ Uploader = &DirectoryUploader{}
+
// DirectoryUploader uploads all files in a directory, optionally uploading
// a file to the default path
type DirectoryUploader struct {
Dir string
}
+func (d *DirectoryUploader) Tag() string {
+ return filepath.Base(d.Dir)
+}
+
// Upload performs the upload of the directory and default path
func (d *DirectoryUploader) Upload(upload UploadFn) error {
return filepath.Walk(d.Dir, func(path string, f os.FileInfo, err error) error {
@@ -458,11 +475,17 @@ func (d *DirectoryUploader) Upload(upload UploadFn) error {
})
}
+var _ Uploader = &FileUploader{}
+
// FileUploader uploads a single file
type FileUploader struct {
File *File
}
+func (f *FileUploader) Tag() string {
+ return f.File.Tag
+}
+
// Upload performs the upload of the file
func (f *FileUploader) Upload(upload UploadFn) error {
return upload(f.File)
@@ -509,6 +532,14 @@ func (c *Client) TarUpload(hash string, uploader Uploader, defaultPath string, t
req.URL.RawQuery = q.Encode()
}
+ tag := uploader.Tag()
+ if tag == "" {
+ tag = "unnamed_tag_" + fmt.Sprintf("%d", time.Now().Unix())
+ }
+ log.Trace("setting upload tag", "tag", tag)
+
+ req.Header.Set(swarmhttp.SwarmTagHeaderName, tag)
+
// use 'Expect: 100-continue' so we don't send the request body if
// the server refuses the request
req.Header.Set("Expect", "100-continue")
@@ -574,6 +605,7 @@ func (c *Client) MultipartUpload(hash string, uploader Uploader) (string, error)
mw := multipart.NewWriter(reqW)
req.Header.Set("Content-Type", fmt.Sprintf("multipart/form-data; boundary=%q", mw.Boundary()))
+ req.Header.Set(swarmhttp.SwarmTagHeaderName, fmt.Sprintf("multipart_upload_%d", time.Now().Unix()))
// define an UploadFn which adds files to the multipart form
uploadFn := func(file *File) error {
diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go
index 9c9bde5d6..92489849c 100644
--- a/swarm/api/client/client_test.go
+++ b/swarm/api/client/client_test.go
@@ -25,16 +25,14 @@ import (
"sort"
"testing"
- "github.com/ethereum/go-ethereum/swarm/testutil"
-
- "github.com/ethereum/go-ethereum/swarm/storage"
- "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
-
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/swarm/api"
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
+ "github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
+ "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
+ "github.com/ethereum/go-ethereum/swarm/testutil"
)
func serverFunc(api *api.API) swarmhttp.TestServer {
@@ -68,6 +66,10 @@ func testClientUploadDownloadRaw(toEncrypt bool, t *testing.T) {
t.Fatal(err)
}
+ // check the tag was created successfully
+ tag := srv.Tags.All()[0]
+ testutil.CheckTag(t, tag, 1, 1, 0, 1)
+
// check we can download the same data
res, isEncrypted, err := client.DownloadRaw(hash)
if err != nil {
@@ -209,6 +211,10 @@ func TestClientUploadDownloadDirectory(t *testing.T) {
t.Fatalf("error uploading directory: %s", err)
}
+ // check the tag was created successfully
+ tag := srv.Tags.All()[0]
+ testutil.CheckTag(t, tag, 9, 9, 0, 9)
+
// check we can download the individual files
checkDownloadFile := func(path string, expected []byte) {
file, err := client.Download(hash, path)
@@ -323,6 +329,7 @@ func TestClientMultipartUpload(t *testing.T) {
defer srv.Close()
// define an uploader which uploads testDirFiles with some data
+ // note: this test should result in SEEN chunks. assert accordingly
data := []byte("some-data")
uploader := UploaderFunc(func(upload UploadFn) error {
for _, name := range testDirFiles {
@@ -348,6 +355,10 @@ func TestClientMultipartUpload(t *testing.T) {
t.Fatal(err)
}
+ // check the tag was created successfully
+ tag := srv.Tags.All()[0]
+ testutil.CheckTag(t, tag, 9, 9, 7, 9)
+
// check we can download the individual files
checkDownloadFile := func(path string) {
file, err := client.Download(hash, path)
diff --git a/swarm/api/config.go b/swarm/api/config.go
index 0a7100c57..3a87488cc 100644
--- a/swarm/api/config.go
+++ b/swarm/api/config.go
@@ -45,7 +45,13 @@ const (
type Config struct {
// serialised/persisted fields
*storage.FileStoreParams
- *storage.LocalStoreParams
+
+ // LocalStore
+ ChunkDbPath string
+ DbCapacity uint64
+ CacheCapacity uint
+ BaseKey []byte
+
*network.HiveParams
Swap *swap.LocalProfile
Pss *pss.PssParams
@@ -78,7 +84,6 @@ type Config struct {
func NewConfig() (c *Config) {
c = &Config{
- LocalStoreParams: storage.NewDefaultLocalStoreParams(),
FileStoreParams: storage.NewFileStoreParams(),
HiveParams: network.NewHiveParams(),
Swap: swap.NewDefaultSwapParams(),
@@ -130,8 +135,9 @@ func (c *Config) Init(prvKey *ecdsa.PrivateKey, nodeKey *ecdsa.PrivateKey) error
c.Swap.Init(c.Contract, prvKey)
}
- c.LocalStoreParams.Init(c.Path)
- c.LocalStoreParams.BaseKey = common.FromHex(c.BzzKey)
+ c.privateKey = prvKey
+ c.ChunkDbPath = filepath.Join(c.Path, "chunks")
+ c.BaseKey = common.FromHex(c.BzzKey)
c.Pss = c.Pss.WithPrivateKey(c.privateKey)
return nil
diff --git a/swarm/api/config_test.go b/swarm/api/config_test.go
index a55da6f7b..82f29f8ea 100644
--- a/swarm/api/config_test.go
+++ b/swarm/api/config_test.go
@@ -41,7 +41,6 @@ func TestConfig(t *testing.T) {
one := NewConfig()
two := NewConfig()
- one.LocalStoreParams = two.LocalStoreParams
if equal := reflect.DeepEqual(one, two); !equal {
t.Fatal("Two default configs are not equal")
}
diff --git a/swarm/api/filesystem_test.go b/swarm/api/filesystem_test.go
index 02f5bff65..b8f37fdd5 100644
--- a/swarm/api/filesystem_test.go
+++ b/swarm/api/filesystem_test.go
@@ -25,13 +25,14 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/storage"
)
var testDownloadDir, _ = ioutil.TempDir(os.TempDir(), "bzz-test")
func testFileSystem(t *testing.T, f func(*FileSystem, bool)) {
- testAPI(t, func(api *API, toEncrypt bool) {
+ testAPI(t, func(api *API, _ *chunk.Tags, toEncrypt bool) {
f(NewFileSystem(api), toEncrypt)
})
}
diff --git a/swarm/api/http/middleware.go b/swarm/api/http/middleware.go
index 320da3046..e6e263f4c 100644
--- a/swarm/api/http/middleware.go
+++ b/swarm/api/http/middleware.go
@@ -9,6 +9,7 @@ import (
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/api"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/sctx"
"github.com/ethereum/go-ethereum/swarm/spancontext"
@@ -86,6 +87,54 @@ func InitLoggingResponseWriter(h http.Handler) http.Handler {
})
}
+// InitUploadTag creates a new tag for an upload to the local HTTP proxy
+// if a tag is not named using the SwarmTagHeaderName, a fallback name will be used
+// when the Content-Length header is set, an ETA on chunking will be available since the
+// number of chunks to be split is known in advance (not including enclosing manifest chunks)
+// the tag can later be accessed using the appropriate identifier in the request context
+func InitUploadTag(h http.Handler, tags *chunk.Tags) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ var (
+ tagName string
+ err error
+ estimatedTotal int64 = 0
+ contentType = r.Header.Get("Content-Type")
+ headerTag = r.Header.Get(SwarmTagHeaderName)
+ )
+ if headerTag != "" {
+ tagName = headerTag
+ log.Trace("got tag name from http header", "tagName", tagName)
+ } else {
+ tagName = fmt.Sprintf("unnamed_tag_%d", time.Now().Unix())
+ }
+
+ if !strings.Contains(contentType, "multipart") && r.ContentLength > 0 {
+ log.Trace("calculating tag size", "contentType", contentType, "contentLength", r.ContentLength)
+ uri := GetURI(r.Context())
+ if uri != nil {
+ log.Debug("got uri from context")
+ if uri.Addr == "encrypt" {
+ estimatedTotal = calculateNumberOfChunks(r.ContentLength, true)
+ } else {
+ estimatedTotal = calculateNumberOfChunks(r.ContentLength, false)
+ }
+ }
+ }
+
+ log.Trace("creating tag", "tagName", tagName, "estimatedTotal", estimatedTotal)
+
+ t, err := tags.New(tagName, estimatedTotal)
+ if err != nil {
+ log.Error("error creating tag", "err", err, "tagName", tagName)
+ }
+
+ log.Trace("setting tag id to context", "uid", t.Uid)
+ ctx := sctx.SetTag(r.Context(), t.Uid)
+
+ h.ServeHTTP(w, r.WithContext(ctx))
+ })
+}
+
func InstrumentOpenTracing(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
uri := GetURI(r.Context())
diff --git a/swarm/api/http/response.go b/swarm/api/http/response.go
index d4e81d7f6..c851a3992 100644
--- a/swarm/api/http/response.go
+++ b/swarm/api/http/response.go
@@ -79,7 +79,7 @@ func respondTemplate(w http.ResponseWriter, r *http.Request, templateName, msg s
}
func respondError(w http.ResponseWriter, r *http.Request, msg string, code int) {
- log.Info("respondError", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context()), "code", code)
+ log.Info("respondError", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context()), "code", code, "msg", msg)
respondTemplate(w, r, "error", msg, code)
}
diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go
index 3c6735a73..a336bd82f 100644
--- a/swarm/api/http/server.go
+++ b/swarm/api/http/server.go
@@ -26,6 +26,7 @@ import (
"fmt"
"io"
"io/ioutil"
+ "math"
"mime"
"mime/multipart"
"net/http"
@@ -38,7 +39,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/api"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/log"
+ "github.com/ethereum/go-ethereum/swarm/sctx"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
"github.com/rs/cors"
@@ -60,6 +63,8 @@ var (
getListFail = metrics.NewRegisteredCounter("api.http.get.list.fail", nil)
)
+const SwarmTagHeaderName = "x-swarm-tag"
+
type methodHandler map[string]http.Handler
func (m methodHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
@@ -94,6 +99,12 @@ func NewServer(api *api.API, corsString string) *Server {
InstrumentOpenTracing,
}
+ tagAdapter := Adapter(func(h http.Handler) http.Handler {
+ return InitUploadTag(h, api.Tags)
+ })
+
+ defaultPostMiddlewares := append(defaultMiddlewares, tagAdapter)
+
mux := http.NewServeMux()
mux.Handle("/bzz:/", methodHandler{
"GET": Adapt(
@@ -102,7 +113,7 @@ func NewServer(api *api.API, corsString string) *Server {
),
"POST": Adapt(
http.HandlerFunc(server.HandlePostFiles),
- defaultMiddlewares...,
+ defaultPostMiddlewares...,
),
"DELETE": Adapt(
http.HandlerFunc(server.HandleDelete),
@@ -116,7 +127,7 @@ func NewServer(api *api.API, corsString string) *Server {
),
"POST": Adapt(
http.HandlerFunc(server.HandlePostRaw),
- defaultMiddlewares...,
+ defaultPostMiddlewares...,
),
})
mux.Handle("/bzz-immutable:/", methodHandler{
@@ -230,6 +241,12 @@ func (s *Server) HandlePostRaw(w http.ResponseWriter, r *http.Request) {
ruid := GetRUID(r.Context())
log.Debug("handle.post.raw", "ruid", ruid)
+ tagUid := sctx.GetTag(r.Context())
+ tag, err := s.api.Tags.Get(tagUid)
+ if err != nil {
+ log.Error("handle post raw got an error retrieving tag for DoneSplit", "tagUid", tagUid, "err", err)
+ }
+
postRawCount.Inc(1)
toEncrypt := false
@@ -256,13 +273,16 @@ func (s *Server) HandlePostRaw(w http.ResponseWriter, r *http.Request) {
return
}
- addr, _, err := s.api.Store(r.Context(), r.Body, r.ContentLength, toEncrypt)
+ addr, wait, err := s.api.Store(r.Context(), r.Body, r.ContentLength, toEncrypt)
if err != nil {
postRawFail.Inc(1)
respondError(w, r, err.Error(), http.StatusInternalServerError)
return
}
+ wait(r.Context())
+ tag.DoneSplit(addr)
+
log.Debug("stored content", "ruid", ruid, "key", addr)
w.Header().Set("Content-Type", "text/plain")
@@ -311,7 +331,6 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *http.Request) {
}
log.Debug("new manifest", "ruid", ruid, "key", addr)
}
-
newAddr, err := s.api.UpdateManifest(r.Context(), addr, func(mw *api.ManifestWriter) error {
switch contentType {
case "application/x-tar":
@@ -334,6 +353,15 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *http.Request) {
return
}
+ tagUid := sctx.GetTag(r.Context())
+ tag, err := s.api.Tags.Get(tagUid)
+ if err != nil {
+ log.Error("got an error retrieving tag for DoneSplit", "tagUid", tagUid, "err", err)
+ }
+
+ log.Debug("done splitting, setting tag total", "SPLIT", tag.Get(chunk.StateSplit), "TOTAL", tag.Total())
+ tag.DoneSplit(newAddr)
+
log.Debug("stored content", "ruid", ruid, "key", newAddr)
w.Header().Set("Content-Type", "text/plain")
@@ -342,7 +370,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *http.Request) {
}
func (s *Server) handleTarUpload(r *http.Request, mw *api.ManifestWriter) (storage.Address, error) {
- log.Debug("handle.tar.upload", "ruid", GetRUID(r.Context()))
+ log.Debug("handle.tar.upload", "ruid", GetRUID(r.Context()), "tag", sctx.GetTag(r.Context()))
defaultPath := r.URL.Query().Get("defaultpath")
@@ -837,6 +865,28 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *http.Request) {
http.ServeContent(w, r, fileName, time.Now(), newBufferedReadSeeker(reader, getFileBufferSize))
}
+// calculateNumberOfChunks calculates the number of chunks in an arbitrary content length
+func calculateNumberOfChunks(contentLength int64, isEncrypted bool) int64 {
+ if contentLength < 4096 {
+ return 1
+ }
+ branchingFactor := 128
+ if isEncrypted {
+ branchingFactor = 64
+ }
+
+ dataChunks := math.Ceil(float64(contentLength) / float64(4096))
+ totalChunks := dataChunks
+ intermediate := dataChunks / float64(branchingFactor)
+
+ for intermediate > 1 {
+ totalChunks += math.Ceil(intermediate)
+ intermediate = intermediate / float64(branchingFactor)
+ }
+
+ return int64(totalChunks) + 1
+}
+
// The size of buffer used for bufio.Reader on LazyChunkReader passed to
// http.ServeContent in HandleGetFile.
// Warning: This value influences the number of chunk requests and chunker join goroutines
diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go
index e82762ce0..1de41d18d 100644
--- a/swarm/api/http/server_test.go
+++ b/swarm/api/http/server_test.go
@@ -44,7 +44,6 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/api"
- swarm "github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
"github.com/ethereum/go-ethereum/swarm/testutil"
@@ -755,6 +754,7 @@ func testBzzTar(encrypted bool, t *testing.T) {
t.Fatal(err)
}
req.Header.Add("Content-Type", "application/x-tar")
+ req.Header.Add(SwarmTagHeaderName, "test-upload")
client := &http.Client{}
resp2, err := client.Do(req)
if err != nil {
@@ -763,6 +763,11 @@ func testBzzTar(encrypted bool, t *testing.T) {
if resp2.StatusCode != http.StatusOK {
t.Fatalf("err %s", resp2.Status)
}
+
+ // check that the tag was written correctly
+ tag := srv.Tags.All()[0]
+ testutil.CheckTag(t, tag, 4, 4, 0, 4)
+
swarmHash, err := ioutil.ReadAll(resp2.Body)
resp2.Body.Close()
if err != nil {
@@ -834,6 +839,75 @@ func testBzzTar(encrypted bool, t *testing.T) {
t.Fatalf("file %s did not pass content assertion", hdr.Name)
}
}
+
+ // now check the tags endpoint
+}
+
+// TestBzzCorrectTagEstimate checks that the HTTP middleware sets the total number of chunks
+// in the tag according to an estimate from the HTTP request Content-Length header divided
+// by chunk size (4096). It is needed to be checked BEFORE chunking is done, therefore
+// concurrency was introduced to slow down the HTTP request
+func TestBzzCorrectTagEstimate(t *testing.T) {
+ srv := NewTestSwarmServer(t, serverFunc, nil)
+ defer srv.Close()
+
+ for _, v := range []struct {
+ toEncrypt bool
+ expChunks int64
+ }{
+ {toEncrypt: false, expChunks: 248},
+ {toEncrypt: true, expChunks: 250},
+ } {
+ pr, pw := io.Pipe()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ addr := ""
+ if v.toEncrypt {
+ addr = "encrypt"
+ }
+ req, err := http.NewRequest("POST", srv.URL+"/bzz:/"+addr, pr)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ req = req.WithContext(ctx)
+ req.ContentLength = 1000000
+ req.Header.Add(SwarmTagHeaderName, "1000000")
+
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-time.After(1 * time.Millisecond):
+ _, err := pw.Write([]byte{0})
+ if err != nil {
+ t.Error(err)
+ }
+ }
+ }
+ }()
+ go func() {
+ transport := http.DefaultTransport
+ _, err := transport.RoundTrip(req)
+ if err != nil {
+ t.Error(err)
+ }
+ }()
+ done := false
+ for !done {
+ switch len(srv.Tags.All()) {
+ case 0:
+ <-time.After(10 * time.Millisecond)
+ case 1:
+ tag := srv.Tags.All()[0]
+ testutil.CheckTag(t, tag, 0, 0, 0, v.expChunks)
+ srv.Tags.Delete(tag.Uid)
+ done = true
+ }
+ }
+ }
}
// TestBzzRootRedirect tests that getting the root path of a manifest without
@@ -851,19 +925,11 @@ func testBzzRootRedirect(toEncrypt bool, t *testing.T) {
defer srv.Close()
// create a manifest with some data at the root path
- client := swarm.NewClient(srv.URL)
data := []byte("data")
- file := &swarm.File{
- ReadCloser: ioutil.NopCloser(bytes.NewReader(data)),
- ManifestEntry: api.ManifestEntry{
- Path: "",
- ContentType: "text/plain",
- Size: int64(len(data)),
- },
- }
- hash, err := client.Upload(file, "", toEncrypt)
- if err != nil {
- t.Fatal(err)
+ headers := map[string]string{"Content-Type": "text/plain"}
+ res, hash := httpDo("POST", srv.URL+"/bzz:/", bytes.NewReader(data), headers, false, t)
+ if res.StatusCode != http.StatusOK {
+ t.Fatalf("unexpected status code from server %d want %d", res.StatusCode, http.StatusOK)
}
// define a CheckRedirect hook which ensures there is only a single
@@ -1046,21 +1112,10 @@ func TestGet(t *testing.T) {
func TestModify(t *testing.T) {
srv := NewTestSwarmServer(t, serverFunc, nil)
defer srv.Close()
-
- swarmClient := swarm.NewClient(srv.URL)
- data := []byte("data")
- file := &swarm.File{
- ReadCloser: ioutil.NopCloser(bytes.NewReader(data)),
- ManifestEntry: api.ManifestEntry{
- Path: "",
- ContentType: "text/plain",
- Size: int64(len(data)),
- },
- }
-
- hash, err := swarmClient.Upload(file, "", false)
- if err != nil {
- t.Fatal(err)
+ headers := map[string]string{"Content-Type": "text/plain"}
+ res, hash := httpDo("POST", srv.URL+"/bzz:/", bytes.NewReader([]byte("data")), headers, false, t)
+ if res.StatusCode != http.StatusOK {
+ t.Fatalf("unexpected status code from server %d want %d", res.StatusCode, http.StatusOK)
}
for _, testCase := range []struct {
@@ -1283,6 +1338,46 @@ func TestBzzGetFileWithResolver(t *testing.T) {
}
}
+// TestCalculateNumberOfChunks is a unit test for the chunk-number-according-to-content-length
+// calculation
+func TestCalculateNumberOfChunks(t *testing.T) {
+
+ //test cases:
+ for _, tc := range []struct{ len, chunks int64 }{
+ {len: 1000, chunks: 1},
+ {len: 5000, chunks: 3},
+ {len: 10000, chunks: 4},
+ {len: 100000, chunks: 26},
+ {len: 1000000, chunks: 248},
+ {len: 325839339210, chunks: 79550620 + 621490 + 4856 + 38 + 1},
+ } {
+ res := calculateNumberOfChunks(tc.len, false)
+ if res != tc.chunks {
+ t.Fatalf("expected result for %d bytes to be %d got %d", tc.len, tc.chunks, res)
+ }
+ }
+}
+
+// TestCalculateNumberOfChunksEncrypted is a unit test for the chunk-number-according-to-content-length
+// calculation with encryption (branching factor=64)
+func TestCalculateNumberOfChunksEncrypted(t *testing.T) {
+
+ //test cases:
+ for _, tc := range []struct{ len, chunks int64 }{
+ {len: 1000, chunks: 1},
+ {len: 5000, chunks: 3},
+ {len: 10000, chunks: 4},
+ {len: 100000, chunks: 26},
+ {len: 1000000, chunks: 245 + 4 + 1},
+ {len: 325839339210, chunks: 79550620 + 1242979 + 19422 + 304 + 5 + 1},
+ } {
+ res := calculateNumberOfChunks(tc.len, true)
+ if res != tc.chunks {
+ t.Fatalf("expected result for %d bytes to be %d got %d", tc.len, tc.chunks, res)
+ }
+ }
+}
+
// testResolver implements the Resolver interface and either returns the given
// hash if it is set, or returns a "name not found" error
type testResolveValidator struct {
@@ -1308,6 +1403,7 @@ func (t *testResolveValidator) Resolve(addr string) (common.Hash, error) {
func (t *testResolveValidator) Owner(node [32]byte) (addr common.Address, err error) {
return
}
+
func (t *testResolveValidator) HeaderByNumber(context.Context, *big.Int) (header *types.Header, err error) {
return
}
diff --git a/swarm/api/http/test_server.go b/swarm/api/http/test_server.go
index 97fdf0d8a..a3be01e99 100644
--- a/swarm/api/http/test_server.go
+++ b/swarm/api/http/test_server.go
@@ -24,8 +24,10 @@ import (
"testing"
"github.com/ethereum/go-ethereum/swarm/api"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
+ "github.com/ethereum/go-ethereum/swarm/storage/localstore"
)
type TestServer interface {
@@ -37,17 +39,15 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, reso
if err != nil {
t.Fatal(err)
}
-
- storeParams := storage.NewDefaultLocalStoreParams()
- storeParams.DbCapacity = 5000000
- storeParams.CacheCapacity = 5000
- storeParams.Init(swarmDir)
- localStore, err := storage.NewLocalStore(storeParams, nil)
+ localStore, err := localstore.New(swarmDir, make([]byte, 32), nil)
if err != nil {
os.RemoveAll(swarmDir)
t.Fatal(err)
}
- fileStore := storage.NewFileStore(localStore, storage.NewFileStoreParams())
+
+ tags := chunk.NewTags()
+ fileStore := storage.NewFileStore(localStore, storage.NewFileStoreParams(), tags)
+
// Swarm feeds test setup
feedsDir, err := ioutil.TempDir("", "swarm-feeds-test")
if err != nil {
@@ -59,12 +59,13 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, reso
t.Fatal(err)
}
- swarmApi := api.NewAPI(fileStore, resolver, feeds.Handler, nil)
+ swarmApi := api.NewAPI(fileStore, resolver, feeds.Handler, nil, tags)
apiServer := httptest.NewServer(serverFunc(swarmApi))
tss := &TestSwarmServer{
Server: apiServer,
FileStore: fileStore,
+ Tags: tags,
dir: swarmDir,
Hasher: storage.MakeHashFunc(storage.DefaultHash)(),
cleanup: func() {
@@ -84,6 +85,7 @@ type TestSwarmServer struct {
*httptest.Server
Hasher storage.SwarmHash
FileStore *storage.FileStore
+ Tags *chunk.Tags
dir string
cleanup func()
CurrentTime uint64
diff --git a/swarm/api/inspector.go b/swarm/api/inspector.go
index ea3c4c049..c4151bf20 100644
--- a/swarm/api/inspector.go
+++ b/swarm/api/inspector.go
@@ -19,7 +19,11 @@ package api
import (
"context"
"fmt"
+ "strings"
+ "time"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/storage"
)
@@ -47,21 +51,34 @@ func (inspector *Inspector) ListKnown() []string {
return res
}
-type HasInfo struct {
- Addr string `json:"address"`
- Has bool `json:"has"`
+func (inspector *Inspector) IsSyncing() bool {
+ lastReceivedChunksMsg := metrics.GetOrRegisterGauge("network.stream.received_chunks", nil)
+
+ // last received chunks msg time
+ lrct := time.Unix(0, lastReceivedChunksMsg.Value())
+
+ // if last received chunks msg time is after now-15sec. (i.e. within the last 15sec.) then we say that the node is still syncing
+ // technically this is not correct, because this might have been a retrieve request, but for the time being it works for our purposes
+ // because we know we are not making retrieve requests on the node while checking this
+ return lrct.After(time.Now().Add(-15 * time.Second))
}
// Has checks whether each chunk address is present in the underlying datastore,
// the bool in the returned structs indicates if the underlying datastore has
// the chunk stored with the given address (true), or not (false)
-func (inspector *Inspector) Has(chunkAddresses []storage.Address) []HasInfo {
- results := make([]HasInfo, 0)
+func (inspector *Inspector) Has(chunkAddresses []storage.Address) string {
+ hostChunks := []string{}
for _, addr := range chunkAddresses {
- res := HasInfo{}
- res.Addr = addr.String()
- res.Has = inspector.netStore.Has(context.Background(), addr)
- results = append(results, res)
+ has, err := inspector.netStore.Has(context.Background(), addr)
+ if err != nil {
+ log.Error(err.Error())
+ }
+ if has {
+ hostChunks = append(hostChunks, "1")
+ } else {
+ hostChunks = append(hostChunks, "0")
+ }
}
- return results
+
+ return strings.Join(hostChunks, "")
}
diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go
index 890ed88bd..d753b3f2e 100644
--- a/swarm/api/manifest.go
+++ b/swarm/api/manifest.go
@@ -235,7 +235,6 @@ func loadManifest(ctx context.Context, fileStore *storage.FileStore, addr storag
}
func readManifest(mr storage.LazySectionReader, addr storage.Address, fileStore *storage.FileStore, isEncrypted bool, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand
-
// TODO check size for oversized manifests
size, err := mr.Size(mr.Context(), quitC)
if err != nil { // size == 0
diff --git a/swarm/api/manifest_test.go b/swarm/api/manifest_test.go
index 1c8e53c43..c193ebcb4 100644
--- a/swarm/api/manifest_test.go
+++ b/swarm/api/manifest_test.go
@@ -25,6 +25,7 @@ import (
"strings"
"testing"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/storage"
)
@@ -42,7 +43,7 @@ func manifest(paths ...string) (manifestReader storage.LazySectionReader) {
func testGetEntry(t *testing.T, path, match string, multiple bool, paths ...string) *manifestTrie {
quitC := make(chan bool)
- fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams())
+ fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams(), chunk.NewTags())
ref := make([]byte, fileStore.HashSize())
trie, err := readManifest(manifest(paths...), ref, fileStore, false, quitC, NOOPDecrypt)
if err != nil {
@@ -99,7 +100,7 @@ func TestGetEntry(t *testing.T) {
func TestExactMatch(t *testing.T) {
quitC := make(chan bool)
mf := manifest("shouldBeExactMatch.css", "shouldBeExactMatch.css.map")
- fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams())
+ fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams(), chunk.NewTags())
ref := make([]byte, fileStore.HashSize())
trie, err := readManifest(mf, ref, fileStore, false, quitC, nil)
if err != nil {
@@ -132,7 +133,7 @@ func TestAddFileWithManifestPath(t *testing.T) {
reader := &storage.LazyTestSectionReader{
SectionReader: io.NewSectionReader(bytes.NewReader(manifest), 0, int64(len(manifest))),
}
- fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams())
+ fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams(), chunk.NewTags())
ref := make([]byte, fileStore.HashSize())
trie, err := readManifest(reader, ref, fileStore, false, nil, NOOPDecrypt)
if err != nil {
diff --git a/swarm/api/storage.go b/swarm/api/storage.go
deleted file mode 100644
index 254375b77..000000000
--- a/swarm/api/storage.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package api
-
-import (
- "context"
- "path"
-
- "github.com/ethereum/go-ethereum/swarm/storage"
-)
-
-type Response struct {
- MimeType string
- Status int
- Size int64
- // Content []byte
- Content string
-}
-
-// implements a service
-//
-// DEPRECATED: Use the HTTP API instead
-type Storage struct {
- api *API
-}
-
-func NewStorage(api *API) *Storage {
- return &Storage{api}
-}
-
-// Put uploads the content to the swarm with a simple manifest speficying
-// its content type
-//
-// DEPRECATED: Use the HTTP API instead
-func (s *Storage) Put(ctx context.Context, content string, contentType string, toEncrypt bool) (storage.Address, func(context.Context) error, error) {
- return s.api.Put(ctx, content, contentType, toEncrypt)
-}
-
-// Get retrieves the content from bzzpath and reads the response in full
-// It returns the Response object, which serialises containing the
-// response body as the value of the Content field
-// NOTE: if error is non-nil, sResponse may still have partial content
-// the actual size of which is given in len(resp.Content), while the expected
-// size is resp.Size
-//
-// DEPRECATED: Use the HTTP API instead
-func (s *Storage) Get(ctx context.Context, bzzpath string) (*Response, error) {
- uri, err := Parse(path.Join("bzz:/", bzzpath))
- if err != nil {
- return nil, err
- }
- addr, err := s.api.Resolve(ctx, uri.Addr)
- if err != nil {
- return nil, err
- }
- reader, mimeType, status, _, err := s.api.Get(ctx, nil, addr, uri.Path)
- if err != nil {
- return nil, err
- }
- quitC := make(chan bool)
- expsize, err := reader.Size(ctx, quitC)
- if err != nil {
- return nil, err
- }
- body := make([]byte, expsize)
- size, err := reader.Read(body)
- if int64(size) == expsize {
- err = nil
- }
- return &Response{mimeType, status, expsize, string(body[:size])}, err
-}
diff --git a/swarm/api/storage_test.go b/swarm/api/storage_test.go
deleted file mode 100644
index ef96972b6..000000000
--- a/swarm/api/storage_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package api
-
-import (
- "context"
- "testing"
-)
-
-func testStorage(t *testing.T, f func(*Storage, bool)) {
- testAPI(t, func(api *API, toEncrypt bool) {
- f(NewStorage(api), toEncrypt)
- })
-}
-
-func TestStoragePutGet(t *testing.T) {
- testStorage(t, func(api *Storage, toEncrypt bool) {
- content := "hello"
- exp := expResponse(content, "text/plain", 0)
- // exp := expResponse([]byte(content), "text/plain", 0)
- ctx := context.TODO()
- bzzkey, wait, err := api.Put(ctx, content, exp.MimeType, toEncrypt)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- err = wait(ctx)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- bzzhash := bzzkey.Hex()
- // to check put against the API#Get
- resp0 := testGet(t, api.api, bzzhash, "")
- checkResponse(t, resp0, exp)
-
- // check storage#Get
- resp, err := api.Get(context.TODO(), bzzhash)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- checkResponse(t, &testResponse{nil, resp}, exp)
- })
-}
diff --git a/swarm/chunk/chunk.go b/swarm/chunk/chunk.go
index 7540af8ce..c44292bb9 100644
--- a/swarm/chunk/chunk.go
+++ b/swarm/chunk/chunk.go
@@ -1,6 +1,23 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
package chunk
import (
+ "context"
"errors"
"fmt"
@@ -28,7 +45,7 @@ type chunk struct {
sdata []byte
}
-func NewChunk(addr Address, data []byte) *chunk {
+func NewChunk(addr Address, data []byte) Chunk {
return &chunk{
addr: addr,
sdata: data,
@@ -107,3 +124,138 @@ func Proximity(one, other []byte) (ret int) {
}
return MaxPO
}
+
+// ModeGet enumerates different Getter modes.
+type ModeGet int
+
+func (m ModeGet) String() string {
+ switch m {
+ case ModeGetRequest:
+ return "Request"
+ case ModeGetSync:
+ return "Sync"
+ case ModeGetLookup:
+ return "Lookup"
+ default:
+ return "Unknown"
+ }
+}
+
+// Getter modes.
+const (
+ // ModeGetRequest: when accessed for retrieval
+ ModeGetRequest ModeGet = iota
+ // ModeGetSync: when accessed for syncing or proof of custody request
+ ModeGetSync
+ // ModeGetLookup: when accessed to lookup a a chunk in feeds or other places
+ ModeGetLookup
+)
+
+// ModePut enumerates different Putter modes.
+type ModePut int
+
+func (m ModePut) String() string {
+ switch m {
+ case ModePutRequest:
+ return "Request"
+ case ModePutSync:
+ return "Sync"
+ case ModePutUpload:
+ return "Upload"
+ default:
+ return "Unknown"
+ }
+}
+
+// Putter modes.
+const (
+ // ModePutRequest: when a chunk is received as a result of retrieve request and delivery
+ ModePutRequest ModePut = iota
+ // ModePutSync: when a chunk is received via syncing
+ ModePutSync
+ // ModePutUpload: when a chunk is created by local upload
+ ModePutUpload
+)
+
+// ModeSet enumerates different Setter modes.
+type ModeSet int
+
+func (m ModeSet) String() string {
+ switch m {
+ case ModeSetAccess:
+ return "Access"
+ case ModeSetSync:
+ return "Sync"
+ case ModeSetRemove:
+ return "Remove"
+ default:
+ return "Unknown"
+ }
+}
+
+// Setter modes.
+const (
+ // ModeSetAccess: when an update request is received for a chunk or chunk is retrieved for delivery
+ ModeSetAccess ModeSet = iota
+ // ModeSetSync: when a chunk is added to a pull sync batch or when a push sync receipt is received
+ ModeSetSync
+ // ModeSetRemove: when a chunk is removed
+ ModeSetRemove
+)
+
+// Descriptor holds information required for Pull syncing. This struct
+// is provided by subscribing to pull index.
+type Descriptor struct {
+ Address Address
+ BinID uint64
+}
+
+func (d *Descriptor) String() string {
+ if d == nil {
+ return ""
+ }
+ return fmt.Sprintf("%s bin id %v", d.Address.Hex(), d.BinID)
+}
+
+type Store interface {
+ Get(ctx context.Context, mode ModeGet, addr Address) (ch Chunk, err error)
+ Put(ctx context.Context, mode ModePut, ch Chunk) (exists bool, err error)
+ Has(ctx context.Context, addr Address) (yes bool, err error)
+ Set(ctx context.Context, mode ModeSet, addr Address) (err error)
+ LastPullSubscriptionBinID(bin uint8) (id uint64, err error)
+ SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan Descriptor, stop func())
+ Close() (err error)
+}
+
+// Validator validates a chunk.
+type Validator interface {
+ Validate(ch Chunk) bool
+}
+
+// ValidatorStore encapsulates Store by decorting the Put method
+// with validators check.
+type ValidatorStore struct {
+ Store
+ validators []Validator
+}
+
+// NewValidatorStore returns a new ValidatorStore which uses
+// provided validators to validate chunks on Put.
+func NewValidatorStore(store Store, validators ...Validator) (s *ValidatorStore) {
+ return &ValidatorStore{
+ Store: store,
+ validators: validators,
+ }
+}
+
+// Put overrides Store put method with validators check. If one of the validators
+// return true, the chunk is considered valid and Store Put method is called.
+// If all validators return false, ErrChunkInvalid is returned.
+func (s *ValidatorStore) Put(ctx context.Context, mode ModePut, ch Chunk) (exists bool, err error) {
+ for _, v := range s.validators {
+ if v.Validate(ch) {
+ return s.Store.Put(ctx, mode, ch)
+ }
+ }
+ return false, ErrChunkInvalid
+}
diff --git a/swarm/chunk/tag.go b/swarm/chunk/tag.go
new file mode 100644
index 000000000..ee700d22b
--- /dev/null
+++ b/swarm/chunk/tag.go
@@ -0,0 +1,218 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package chunk
+
+import (
+ "encoding/binary"
+ "errors"
+ "sync/atomic"
+ "time"
+)
+
+var (
+ errExists = errors.New("already exists")
+ errNA = errors.New("not available yet")
+ errNoETA = errors.New("unable to calculate ETA")
+ errTagNotFound = errors.New("tag not found")
+)
+
+// State is the enum type for chunk states
+type State = uint32
+
+const (
+ StateSplit State = iota // chunk has been processed by filehasher/swarm safe call
+ StateStored // chunk stored locally
+ StateSeen // chunk previously seen
+ StateSent // chunk sent to neighbourhood
+ StateSynced // proof is received; chunk removed from sync db; chunk is available everywhere
+)
+
+// Tag represents info on the status of new chunks
+type Tag struct {
+ Uid uint32 // a unique identifier for this tag
+ Name string // a name tag for this tag
+ Address Address // the associated swarm hash for this tag
+ total int64 // total chunks belonging to a tag
+ split int64 // number of chunks already processed by splitter for hashing
+ seen int64 // number of chunks already seen
+ stored int64 // number of chunks already stored locally
+ sent int64 // number of chunks sent for push syncing
+ synced int64 // number of chunks synced with proof
+ startedAt time.Time // tag started to calculate ETA
+}
+
+// New creates a new tag, stores it by the name and returns it
+// it returns an error if the tag with this name already exists
+func NewTag(uid uint32, s string, total int64) *Tag {
+ t := &Tag{
+ Uid: uid,
+ Name: s,
+ startedAt: time.Now(),
+ total: total,
+ }
+ return t
+}
+
+// Inc increments the count for a state
+func (t *Tag) Inc(state State) {
+ var v *int64
+ switch state {
+ case StateSplit:
+ v = &t.split
+ case StateStored:
+ v = &t.stored
+ case StateSeen:
+ v = &t.seen
+ case StateSent:
+ v = &t.sent
+ case StateSynced:
+ v = &t.synced
+ }
+ atomic.AddInt64(v, 1)
+}
+
+// Get returns the count for a state on a tag
+func (t *Tag) Get(state State) int64 {
+ var v *int64
+ switch state {
+ case StateSplit:
+ v = &t.split
+ case StateStored:
+ v = &t.stored
+ case StateSeen:
+ v = &t.seen
+ case StateSent:
+ v = &t.sent
+ case StateSynced:
+ v = &t.synced
+ }
+ return atomic.LoadInt64(v)
+}
+
+// GetTotal returns the total count
+func (t *Tag) Total() int64 {
+ return atomic.LoadInt64(&t.total)
+}
+
+// DoneSplit sets total count to SPLIT count and sets the associated swarm hash for this tag
+// is meant to be called when splitter finishes for input streams of unknown size
+func (t *Tag) DoneSplit(address Address) int64 {
+ total := atomic.LoadInt64(&t.split)
+ atomic.StoreInt64(&t.total, total)
+ t.Address = address
+ return total
+}
+
+// Status returns the value of state and the total count
+func (t *Tag) Status(state State) (int64, int64, error) {
+ count, seen, total := t.Get(state), atomic.LoadInt64(&t.seen), atomic.LoadInt64(&t.total)
+ if total == 0 {
+ return count, total, errNA
+ }
+ switch state {
+ case StateSplit, StateStored, StateSeen:
+ return count, total, nil
+ case StateSent, StateSynced:
+ stored := atomic.LoadInt64(&t.stored)
+ if stored < total {
+ return count, total - seen, errNA
+ }
+ return count, total - seen, nil
+ }
+ return count, total, errNA
+}
+
+// ETA returns the time of completion estimated based on time passed and rate of completion
+func (t *Tag) ETA(state State) (time.Time, error) {
+ cnt, total, err := t.Status(state)
+ if err != nil {
+ return time.Time{}, err
+ }
+ if cnt == 0 || total == 0 {
+ return time.Time{}, errNoETA
+ }
+ diff := time.Since(t.startedAt)
+ dur := time.Duration(total) * diff / time.Duration(cnt)
+ return t.startedAt.Add(dur), nil
+}
+
+// MarshalBinary marshals the tag into a byte slice
+func (tag *Tag) MarshalBinary() (data []byte, err error) {
+ buffer := make([]byte, 4)
+ binary.BigEndian.PutUint32(buffer, tag.Uid)
+ encodeInt64Append(&buffer, tag.total)
+ encodeInt64Append(&buffer, tag.split)
+ encodeInt64Append(&buffer, tag.seen)
+ encodeInt64Append(&buffer, tag.stored)
+ encodeInt64Append(&buffer, tag.sent)
+ encodeInt64Append(&buffer, tag.synced)
+
+ intBuffer := make([]byte, 8)
+
+ n := binary.PutVarint(intBuffer, tag.startedAt.Unix())
+ buffer = append(buffer, intBuffer[:n]...)
+
+ n = binary.PutVarint(intBuffer, int64(len(tag.Address)))
+ buffer = append(buffer, intBuffer[:n]...)
+
+ buffer = append(buffer, tag.Address[:]...)
+
+ buffer = append(buffer, []byte(tag.Name)...)
+
+ return buffer, nil
+}
+
+// UnmarshalBinary unmarshals a byte slice into a tag
+func (tag *Tag) UnmarshalBinary(buffer []byte) error {
+ if len(buffer) < 13 {
+ return errors.New("buffer too short")
+ }
+ tag.Uid = binary.BigEndian.Uint32(buffer)
+ buffer = buffer[4:]
+
+ tag.total = decodeInt64Splice(&buffer)
+ tag.split = decodeInt64Splice(&buffer)
+ tag.seen = decodeInt64Splice(&buffer)
+ tag.stored = decodeInt64Splice(&buffer)
+ tag.sent = decodeInt64Splice(&buffer)
+ tag.synced = decodeInt64Splice(&buffer)
+
+ t, n := binary.Varint(buffer)
+ tag.startedAt = time.Unix(t, 0)
+ buffer = buffer[n:]
+
+ t, n = binary.Varint(buffer)
+ buffer = buffer[n:]
+ if t > 0 {
+ tag.Address = buffer[:t]
+ }
+ tag.Name = string(buffer[t:])
+
+ return nil
+}
+
+func encodeInt64Append(buffer *[]byte, val int64) {
+ intBuffer := make([]byte, 8)
+ n := binary.PutVarint(intBuffer, val)
+ *buffer = append(*buffer, intBuffer[:n]...)
+}
+
+func decodeInt64Splice(buffer *[]byte) int64 {
+ val, n := binary.Varint((*buffer))
+ *buffer = (*buffer)[n:]
+ return val
+}
diff --git a/swarm/chunk/tag_test.go b/swarm/chunk/tag_test.go
new file mode 100644
index 000000000..e6acfb185
--- /dev/null
+++ b/swarm/chunk/tag_test.go
@@ -0,0 +1,273 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package chunk
+
+import (
+ "bytes"
+ "sync"
+ "testing"
+ "time"
+)
+
+var (
+ allStates = []State{StateSplit, StateStored, StateSeen, StateSent, StateSynced}
+)
+
+// TestTagSingleIncrements tests if Inc increments the tag state value
+func TestTagSingleIncrements(t *testing.T) {
+ tg := &Tag{total: 10}
+
+ tc := []struct {
+ state uint32
+ inc int
+ expcount int64
+ exptotal int64
+ }{
+ {state: StateSplit, inc: 10, expcount: 10, exptotal: 10},
+ {state: StateStored, inc: 9, expcount: 9, exptotal: 9},
+ {state: StateSeen, inc: 1, expcount: 1, exptotal: 10},
+ {state: StateSent, inc: 9, expcount: 9, exptotal: 9},
+ {state: StateSynced, inc: 9, expcount: 9, exptotal: 9},
+ }
+
+ for _, tc := range tc {
+ for i := 0; i < tc.inc; i++ {
+ tg.Inc(tc.state)
+ }
+ }
+
+ for _, tc := range tc {
+ if tg.Get(tc.state) != tc.expcount {
+ t.Fatalf("not incremented")
+ }
+ }
+}
+
+// TestTagStatus is a unit test to cover Tag.Status method functionality
+func TestTagStatus(t *testing.T) {
+ tg := &Tag{total: 10}
+ tg.Inc(StateSeen)
+ tg.Inc(StateSent)
+ tg.Inc(StateSynced)
+
+ for i := 0; i < 10; i++ {
+ tg.Inc(StateSplit)
+ tg.Inc(StateStored)
+ }
+ for _, v := range []struct {
+ state State
+ expVal int64
+ expTotal int64
+ }{
+ {state: StateStored, expVal: 10, expTotal: 10},
+ {state: StateSplit, expVal: 10, expTotal: 10},
+ {state: StateSeen, expVal: 1, expTotal: 10},
+ {state: StateSent, expVal: 1, expTotal: 9},
+ {state: StateSynced, expVal: 1, expTotal: 9},
+ } {
+ val, total, err := tg.Status(v.state)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if val != v.expVal {
+ t.Fatalf("should be %d, got %d", v.expVal, val)
+ }
+ if total != v.expTotal {
+ t.Fatalf("expected total to be %d, got %d", v.expTotal, total)
+ }
+ }
+}
+
+// tests ETA is precise
+func TestTagETA(t *testing.T) {
+ now := time.Now()
+ maxDiff := 100000 // 100 microsecond
+ tg := &Tag{total: 10, startedAt: now}
+ time.Sleep(100 * time.Millisecond)
+ tg.Inc(StateSplit)
+ eta, err := tg.ETA(StateSplit)
+ if err != nil {
+ t.Fatal(err)
+ }
+ diff := time.Until(eta) - 9*time.Since(now)
+ if int(diff) > maxDiff {
+ t.Fatalf("ETA is not precise, got diff %v > .1ms", diff)
+ }
+}
+
+// TestTagConcurrentIncrements tests Inc calls concurrently
+func TestTagConcurrentIncrements(t *testing.T) {
+ tg := &Tag{}
+ n := 1000
+ wg := sync.WaitGroup{}
+ wg.Add(5 * n)
+ for _, f := range allStates {
+ go func(f State) {
+ for j := 0; j < n; j++ {
+ go func() {
+ tg.Inc(f)
+ wg.Done()
+ }()
+ }
+ }(f)
+ }
+ wg.Wait()
+ for _, f := range allStates {
+ v := tg.Get(f)
+ if v != int64(n) {
+ t.Fatalf("expected state %v to be %v, got %v", f, n, v)
+ }
+ }
+}
+
+// TestTagsMultipleConcurrentIncrements tests Inc calls concurrently
+func TestTagsMultipleConcurrentIncrementsSyncMap(t *testing.T) {
+ ts := NewTags()
+ n := 100
+ wg := sync.WaitGroup{}
+ wg.Add(10 * 5 * n)
+ for i := 0; i < 10; i++ {
+ s := string([]byte{uint8(i)})
+ tag, err := ts.New(s, int64(n))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, f := range allStates {
+ go func(tag *Tag, f State) {
+ for j := 0; j < n; j++ {
+ go func() {
+ tag.Inc(f)
+ wg.Done()
+ }()
+ }
+ }(tag, f)
+ }
+ }
+ wg.Wait()
+ i := 0
+ ts.Range(func(k, v interface{}) bool {
+ i++
+ uid := k.(uint32)
+ for _, f := range allStates {
+ tag, err := ts.Get(uid)
+ if err != nil {
+ t.Fatal(err)
+ }
+ stateVal := tag.Get(f)
+ if stateVal != int64(n) {
+ t.Fatalf("expected tag %v state %v to be %v, got %v", uid, f, n, v)
+ }
+ }
+ return true
+
+ })
+ if i != 10 {
+ t.Fatal("not enough tagz")
+ }
+}
+
+// TestMarshallingWithAddr tests that marshalling and unmarshalling is done correctly when the
+// tag Address (byte slice) contains some arbitrary value
+func TestMarshallingWithAddr(t *testing.T) {
+ tg := NewTag(111, "test/tag", 10)
+ tg.Address = []byte{0, 1, 2, 3, 4, 5, 6}
+
+ for _, f := range allStates {
+ tg.Inc(f)
+ }
+
+ b, err := tg.MarshalBinary()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ unmarshalledTag := &Tag{}
+ err = unmarshalledTag.UnmarshalBinary(b)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if unmarshalledTag.Uid != tg.Uid {
+ t.Fatalf("tag uids not equal. want %d got %d", tg.Uid, unmarshalledTag.Uid)
+ }
+
+ if unmarshalledTag.Name != tg.Name {
+ t.Fatalf("tag names not equal. want %s got %s", tg.Name, unmarshalledTag.Name)
+ }
+
+ for _, state := range allStates {
+ uv, tv := unmarshalledTag.Get(state), tg.Get(state)
+ if uv != tv {
+ t.Fatalf("state %d inconsistent. expected %d to equal %d", state, uv, tv)
+ }
+ }
+
+ if unmarshalledTag.Total() != tg.Total() {
+ t.Fatalf("tag names not equal. want %d got %d", tg.Total(), unmarshalledTag.Total())
+ }
+
+ if len(unmarshalledTag.Address) != len(tg.Address) {
+ t.Fatalf("tag addresses length mismatch, want %d, got %d", len(tg.Address), len(unmarshalledTag.Address))
+ }
+
+ if !bytes.Equal(unmarshalledTag.Address, tg.Address) {
+ t.Fatalf("expected tag address to be %v got %v", unmarshalledTag.Address, tg.Address)
+ }
+}
+
+// TestMarshallingNoAddress tests that marshalling and unmarshalling is done correctly
+// when the tag Address (byte slice) is empty in this case
+func TestMarshallingNoAddr(t *testing.T) {
+ tg := NewTag(111, "test/tag", 10)
+ for _, f := range allStates {
+ tg.Inc(f)
+ }
+
+ b, err := tg.MarshalBinary()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ unmarshalledTag := &Tag{}
+ err = unmarshalledTag.UnmarshalBinary(b)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if unmarshalledTag.Uid != tg.Uid {
+ t.Fatalf("tag uids not equal. want %d got %d", tg.Uid, unmarshalledTag.Uid)
+ }
+
+ if unmarshalledTag.Name != tg.Name {
+ t.Fatalf("tag names not equal. want %s got %s", tg.Name, unmarshalledTag.Name)
+ }
+
+ for _, state := range allStates {
+ uv, tv := unmarshalledTag.Get(state), tg.Get(state)
+ if uv != tv {
+ t.Fatalf("state %d inconsistent. expected %d to equal %d", state, uv, tv)
+ }
+ }
+
+ if unmarshalledTag.Total() != tg.Total() {
+ t.Fatalf("tag names not equal. want %d got %d", tg.Total(), unmarshalledTag.Total())
+ }
+
+ if len(unmarshalledTag.Address) != len(tg.Address) {
+ t.Fatalf("expected tag addresses to be equal length")
+ }
+}
diff --git a/swarm/chunk/tags.go b/swarm/chunk/tags.go
new file mode 100644
index 000000000..435f5d706
--- /dev/null
+++ b/swarm/chunk/tags.go
@@ -0,0 +1,96 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package chunk
+
+import (
+ "context"
+ "errors"
+ "math/rand"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/swarm/sctx"
+)
+
+// Tags hold tag information indexed by a unique random uint32
+type Tags struct {
+ tags *sync.Map
+ rng *rand.Rand
+}
+
+// NewTags creates a tags object
+func NewTags() *Tags {
+ return &Tags{
+ tags: &sync.Map{},
+ rng: rand.New(rand.NewSource(time.Now().Unix())),
+ }
+}
+
+// New creates a new tag, stores it by the name and returns it
+// it returns an error if the tag with this name already exists
+func (ts *Tags) New(s string, total int64) (*Tag, error) {
+ t := &Tag{
+ Uid: ts.rng.Uint32(),
+ Name: s,
+ startedAt: time.Now(),
+ total: total,
+ }
+ if _, loaded := ts.tags.LoadOrStore(t.Uid, t); loaded {
+ return nil, errExists
+ }
+ return t, nil
+}
+
+// All returns all existing tags in Tags' sync.Map
+// Note that tags are returned in no particular order
+func (ts *Tags) All() (t []*Tag) {
+ ts.tags.Range(func(k, v interface{}) bool {
+ t = append(t, v.(*Tag))
+
+ return true
+ })
+
+ return t
+}
+
+// Get returns the undelying tag for the uid or an error if not found
+func (ts *Tags) Get(uid uint32) (*Tag, error) {
+ t, ok := ts.tags.Load(uid)
+ if !ok {
+ return nil, errors.New("tag not found")
+ }
+ return t.(*Tag), nil
+}
+
+// GetFromContext gets a tag from the tag uid stored in the context
+func (ts *Tags) GetFromContext(ctx context.Context) (*Tag, error) {
+ uid := sctx.GetTag(ctx)
+ t, ok := ts.tags.Load(uid)
+ if !ok {
+ return nil, errTagNotFound
+ }
+ return t.(*Tag), nil
+}
+
+// Range exposes sync.Map's iterator
+func (ts *Tags) Range(fn func(k, v interface{}) bool) {
+ ts.tags.Range(fn)
+}
+
+func (ts *Tags) Delete(k interface{}) {
+ ts.tags.Delete(k)
+}
diff --git a/swarm/chunk/tags_test.go b/swarm/chunk/tags_test.go
new file mode 100644
index 000000000..f818c4c5c
--- /dev/null
+++ b/swarm/chunk/tags_test.go
@@ -0,0 +1,48 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package chunk
+
+import "testing"
+
+func TestAll(t *testing.T) {
+ ts := NewTags()
+
+ ts.New("1", 1)
+ ts.New("2", 1)
+
+ all := ts.All()
+
+ if len(all) != 2 {
+ t.Fatalf("expected length to be 2 got %d", len(all))
+ }
+
+ if n := all[0].Total(); n != 1 {
+ t.Fatalf("expected tag 0 total to be 1 got %d", n)
+ }
+
+ if n := all[1].Total(); n != 1 {
+ t.Fatalf("expected tag 1 total to be 1 got %d", n)
+ }
+
+ ts.New("3", 1)
+ all = ts.All()
+
+ if len(all) != 3 {
+ t.Fatalf("expected length to be 3 got %d", len(all))
+ }
+
+}
diff --git a/swarm/fuse/swarmfs_test.go b/swarm/fuse/swarmfs_test.go
index 460e31c4e..77573f0fc 100644
--- a/swarm/fuse/swarmfs_test.go
+++ b/swarm/fuse/swarmfs_test.go
@@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/api"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/testutil"
colorable "github.com/mattn/go-colorable"
@@ -1614,11 +1615,11 @@ func TestFUSE(t *testing.T) {
}
defer os.RemoveAll(datadir)
- fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32))
+ fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32), chunk.NewTags())
if err != nil {
t.Fatal(err)
}
- ta := &testAPI{api: api.NewAPI(fileStore, nil, nil, nil)}
+ ta := &testAPI{api: api.NewAPI(fileStore, nil, nil, nil, chunk.NewTags())}
//run a short suite of tests
//approx time: 28s
diff --git a/swarm/network/hive.go b/swarm/network/hive.go
index 2eb521f1d..ad51b29c2 100644
--- a/swarm/network/hive.go
+++ b/swarm/network/hive.go
@@ -116,7 +116,7 @@ func (h *Hive) Stop() error {
log.Info(fmt.Sprintf("%08x hive stopped, dropping peers", h.BaseAddr()[:4]))
h.EachConn(nil, 255, func(p *Peer, _ int) bool {
log.Info(fmt.Sprintf("%08x dropping peer %08x", h.BaseAddr()[:4], p.Address()[:4]))
- p.Drop(nil)
+ p.Drop()
return true
})
diff --git a/swarm/network/hive_test.go b/swarm/network/hive_test.go
index d03db42bc..3e9732216 100644
--- a/swarm/network/hive_test.go
+++ b/swarm/network/hive_test.go
@@ -117,7 +117,7 @@ func TestHiveStatePersistance(t *testing.T) {
const peersCount = 5
- startHive := func(t *testing.T, dir string) (h *Hive) {
+ startHive := func(t *testing.T, dir string) (h *Hive, cleanupFunc func()) {
store, err := state.NewDBStore(dir)
if err != nil {
t.Fatal(err)
@@ -137,27 +137,30 @@ func TestHiveStatePersistance(t *testing.T) {
if err := h.Start(s.Server); err != nil {
t.Fatal(err)
}
- return h
+
+ cleanupFunc = func() {
+ err := h.Stop()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ s.Stop()
+ }
+ return h, cleanupFunc
}
- h1 := startHive(t, dir)
+ h1, cleanup1 := startHive(t, dir)
peers := make(map[string]bool)
for i := 0; i < peersCount; i++ {
raddr := RandomAddr()
h1.Register(raddr)
peers[raddr.String()] = true
}
- if err = h1.Stop(); err != nil {
- t.Fatal(err)
- }
+ cleanup1()
// start the hive and check that we know of all expected peers
- h2 := startHive(t, dir)
- defer func() {
- if err = h2.Stop(); err != nil {
- t.Fatal(err)
- }
- }()
+ h2, cleanup2 := startHive(t, dir)
+ cleanup2()
i := 0
h2.Kademlia.EachAddr(nil, 256, func(addr *BzzAddr, po int) bool {
diff --git a/swarm/network/kademlia.go b/swarm/network/kademlia.go
index dd6de44fd..90491ab31 100644
--- a/swarm/network/kademlia.go
+++ b/swarm/network/kademlia.go
@@ -25,6 +25,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/pot"
sv "github.com/ethereum/go-ethereum/swarm/version"
@@ -82,14 +83,14 @@ func NewKadParams() *KadParams {
// Kademlia is a table of live peers and a db of known peers (node records)
type Kademlia struct {
lock sync.RWMutex
- *KadParams // Kademlia configuration parameters
- base []byte // immutable baseaddress of the table
- addrs *pot.Pot // pots container for known peer addresses
- conns *pot.Pot // pots container for live peer connections
- depth uint8 // stores the last current depth of saturation
- nDepth int // stores the last neighbourhood depth
- nDepthC chan int // returned by DepthC function to signal neighbourhood depth change
- addrCountC chan int // returned by AddrCountC function to signal peer count change
+ *KadParams // Kademlia configuration parameters
+ base []byte // immutable baseaddress of the table
+ addrs *pot.Pot // pots container for known peer addresses
+ conns *pot.Pot // pots container for live peer connections
+ depth uint8 // stores the last current depth of saturation
+ nDepth int // stores the last neighbourhood depth
+ nDepthMu sync.RWMutex // protects neighbourhood depth nDepth
+ nDepthSig []chan struct{} // signals when neighbourhood depth nDepth is changed
}
// NewKademlia creates a Kademlia table for base address addr
@@ -138,6 +139,9 @@ func (e *entry) Hex() string {
func (k *Kademlia) Register(peers ...*BzzAddr) error {
k.lock.Lock()
defer k.lock.Unlock()
+
+ metrics.GetOrRegisterCounter("kad.register", nil).Inc(1)
+
var known, size int
for _, p := range peers {
log.Trace("kademlia trying to register", "addr", p)
@@ -164,8 +168,6 @@ func (k *Kademlia) Register(peers ...*BzzAddr) error {
return newEntry(p)
}
- log.Trace("found among known peers, underlay addr is same, do nothing", "new", p, "old", e.BzzAddr)
-
return v
})
if found {
@@ -173,12 +175,8 @@ func (k *Kademlia) Register(peers ...*BzzAddr) error {
}
size++
}
- // send new address count value only if there are new addresses
- if k.addrCountC != nil && size-known > 0 {
- k.addrCountC <- k.addrs.Size()
- }
- k.sendNeighbourhoodDepthChange()
+ k.setNeighbourhoodDepth()
return nil
}
@@ -186,6 +184,9 @@ func (k *Kademlia) Register(peers ...*BzzAddr) error {
func (k *Kademlia) SuggestPeer() (suggestedPeer *BzzAddr, saturationDepth int, changed bool) {
k.lock.Lock()
defer k.lock.Unlock()
+
+ metrics.GetOrRegisterCounter("kad.suggestpeer", nil).Inc(1)
+
radius := neighbourhoodRadiusForPot(k.conns, k.NeighbourhoodSize, k.base)
// collect undersaturated bins in ascending order of number of connected peers
// and from shallow to deep (ascending order of PO)
@@ -297,6 +298,9 @@ func (k *Kademlia) SuggestPeer() (suggestedPeer *BzzAddr, saturationDepth int, c
func (k *Kademlia) On(p *Peer) (uint8, bool) {
k.lock.Lock()
defer k.lock.Unlock()
+
+ metrics.GetOrRegisterCounter("kad.on", nil).Inc(1)
+
var ins bool
k.conns, _, _, _ = pot.Swap(k.conns, p, Pof, func(v pot.Val) pot.Val {
// if not found live
@@ -315,12 +319,7 @@ func (k *Kademlia) On(p *Peer) (uint8, bool) {
k.addrs, _, _, _ = pot.Swap(k.addrs, p, Pof, func(v pot.Val) pot.Val {
return a
})
- // send new address count value only if the peer is inserted
- if k.addrCountC != nil {
- k.addrCountC <- k.addrs.Size()
- }
}
- log.Trace(k.string())
// calculate if depth of saturation changed
depth := uint8(k.saturation())
var changed bool
@@ -328,75 +327,72 @@ func (k *Kademlia) On(p *Peer) (uint8, bool) {
changed = true
k.depth = depth
}
- k.sendNeighbourhoodDepthChange()
+ k.setNeighbourhoodDepth()
return k.depth, changed
}
-// NeighbourhoodDepthC returns the channel that sends a new kademlia
-// neighbourhood depth on each change.
-// Not receiving from the returned channel will block On function
-// when the neighbourhood depth is changed.
-// TODO: Why is this exported, and if it should be; why can't we have more subscribers than one?
-func (k *Kademlia) NeighbourhoodDepthC() <-chan int {
- k.lock.Lock()
- defer k.lock.Unlock()
- if k.nDepthC == nil {
- k.nDepthC = make(chan int)
+// setNeighbourhoodDepth calculates neighbourhood depth with depthForPot,
+// sets it to the nDepth and sends a signal to every nDepthSig channel.
+func (k *Kademlia) setNeighbourhoodDepth() {
+ nDepth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
+ var changed bool
+ k.nDepthMu.Lock()
+ if nDepth != k.nDepth {
+ k.nDepth = nDepth
+ changed = true
}
- return k.nDepthC
-}
+ k.nDepthMu.Unlock()
-// CloseNeighbourhoodDepthC closes the channel returned by
-// NeighbourhoodDepthC and stops sending neighbourhood change.
-func (k *Kademlia) CloseNeighbourhoodDepthC() {
- k.lock.Lock()
- defer k.lock.Unlock()
-
- if k.nDepthC != nil {
- close(k.nDepthC)
- k.nDepthC = nil
+ if len(k.nDepthSig) > 0 && changed {
+ for _, c := range k.nDepthSig {
+ // Every nDepthSig channel has a buffer capacity of 1,
+ // so every receiver will get the signal even if the
+ // select statement has the default case to avoid blocking.
+ select {
+ case c <- struct{}{}:
+ default:
+ }
+ }
}
}
-// sendNeighbourhoodDepthChange sends new neighbourhood depth to k.nDepth channel
-// if it is initialized.
-func (k *Kademlia) sendNeighbourhoodDepthChange() {
- // nDepthC is initialized when NeighbourhoodDepthC is called and returned by it.
- // It provides signaling of neighbourhood depth change.
- // This part of the code is sending new neighbourhood depth to nDepthC if that condition is met.
- if k.nDepthC != nil {
- nDepth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
- if nDepth != k.nDepth {
- k.nDepth = nDepth
- k.nDepthC <- nDepth
- }
- }
+// NeighbourhoodDepth returns the value calculated by depthForPot function
+// in setNeighbourhoodDepth method.
+func (k *Kademlia) NeighbourhoodDepth() int {
+ k.nDepthMu.RLock()
+ defer k.nDepthMu.RUnlock()
+ return k.nDepth
}
-// AddrCountC returns the channel that sends a new
-// address count value on each change.
-// Not receiving from the returned channel will block Register function
-// when address count value changes.
-func (k *Kademlia) AddrCountC() <-chan int {
+// SubscribeToNeighbourhoodDepthChange returns the channel that signals
+// when neighbourhood depth value is changed. The current neighbourhood depth
+// is returned by NeighbourhoodDepth method. Returned function unsubscribes
+// the channel from signaling and releases the resources. Returned function is safe
+// to be called multiple times.
+func (k *Kademlia) SubscribeToNeighbourhoodDepthChange() (c <-chan struct{}, unsubscribe func()) {
+ channel := make(chan struct{}, 1)
+ var closeOnce sync.Once
+
k.lock.Lock()
defer k.lock.Unlock()
- if k.addrCountC == nil {
- k.addrCountC = make(chan int)
- }
- return k.addrCountC
-}
+ k.nDepthSig = append(k.nDepthSig, channel)
-// CloseAddrCountC closes the channel returned by
-// AddrCountC and stops sending address count change.
-func (k *Kademlia) CloseAddrCountC() {
- k.lock.Lock()
- defer k.lock.Unlock()
+ unsubscribe = func() {
+ k.lock.Lock()
+ defer k.lock.Unlock()
- if k.addrCountC != nil {
- close(k.addrCountC)
- k.addrCountC = nil
+ for i, c := range k.nDepthSig {
+ if c == channel {
+ k.nDepthSig = append(k.nDepthSig[:i], k.nDepthSig[i+1:]...)
+ break
+ }
+ }
+
+ closeOnce.Do(func() { close(channel) })
}
+
+ return channel, unsubscribe
}
// Off removes a peer from among live peers
@@ -422,11 +418,7 @@ func (k *Kademlia) Off(p *Peer) {
// v cannot be nil, but no need to check
return nil
})
- // send new address count value only if the peer is deleted
- if k.addrCountC != nil {
- k.addrCountC <- k.addrs.Size()
- }
- k.sendNeighbourhoodDepthChange()
+ k.setNeighbourhoodDepth()
}
}
@@ -484,13 +476,6 @@ func (k *Kademlia) eachAddr(base []byte, o int, f func(*BzzAddr, int) bool) {
})
}
-// NeighbourhoodDepth returns the depth for the pot, see depthForPot
-func (k *Kademlia) NeighbourhoodDepth() (depth int) {
- k.lock.RLock()
- defer k.lock.RUnlock()
- return depthForPot(k.conns, k.NeighbourhoodSize, k.base)
-}
-
// neighbourhoodRadiusForPot returns the neighbourhood radius of the kademlia
// neighbourhood radius encloses the nearest neighbour set with size >= neighbourhoodSize
// i.e., neighbourhood radius is the deepest PO such that all bins not shallower altogether
@@ -608,7 +593,7 @@ func (k *Kademlia) string() string {
if len(sv.GitCommit) > 0 {
rows = append(rows, fmt.Sprintf("commit hash: %s", sv.GitCommit))
}
- rows = append(rows, fmt.Sprintf("%v KΛÐΞMLIΛ hive: queen's address: %x", time.Now().UTC().Format(time.UnixDate), k.BaseAddr()[:3]))
+ rows = append(rows, fmt.Sprintf("%v KΛÐΞMLIΛ hive: queen's address: %x", time.Now().UTC().Format(time.UnixDate), k.BaseAddr()))
rows = append(rows, fmt.Sprintf("population: %d (%d), NeighbourhoodSize: %d, MinBinSize: %d, MaxBinSize: %d", k.conns.Size(), k.addrs.Size(), k.NeighbourhoodSize, k.MinBinSize, k.MaxBinSize))
liverows := make([]string, k.MaxProxDisplay)
diff --git a/swarm/network/kademlia_test.go b/swarm/network/kademlia_test.go
index b4663eee5..035879cd3 100644
--- a/swarm/network/kademlia_test.go
+++ b/swarm/network/kademlia_test.go
@@ -541,7 +541,7 @@ func TestKademliaHiveString(t *testing.T) {
tk.Register("10000000", "10000001")
tk.MaxProxDisplay = 8
h := tk.String()
- expH := "\n=========================================================================\nMon Feb 27 12:10:28 UTC 2017 KΛÐΞMLIΛ hive: queen's address: 000000\npopulation: 2 (4), NeighbourhoodSize: 2, MinBinSize: 2, MaxBinSize: 4\n============ DEPTH: 0 ==========================================\n000 0 | 2 8100 (0) 8000 (0)\n001 1 4000 | 1 4000 (0)\n002 1 2000 | 1 2000 (0)\n003 0 | 0\n004 0 | 0\n005 0 | 0\n006 0 | 0\n007 0 | 0\n========================================================================="
+ expH := "\n=========================================================================\nMon Feb 27 12:10:28 UTC 2017 KΛÐΞMLIΛ hive: queen's address: 0000000000000000000000000000000000000000000000000000000000000000\npopulation: 2 (4), NeighbourhoodSize: 2, MinBinSize: 2, MaxBinSize: 4\n============ DEPTH: 0 ==========================================\n000 0 | 2 8100 (0) 8000 (0)\n001 1 4000 | 1 4000 (0)\n002 1 2000 | 1 2000 (0)\n003 0 | 0\n004 0 | 0\n005 0 | 0\n006 0 | 0\n007 0 | 0\n========================================================================="
if expH[104:] != h[104:] {
t.Fatalf("incorrect hive output. expected %v, got %v", expH, h)
}
@@ -560,3 +560,113 @@ func newTestDiscoveryPeer(addr pot.Address, kad *Kademlia) *Peer {
}
return NewPeer(bp, kad)
}
+
+// TestKademlia_SubscribeToNeighbourhoodDepthChange checks if correct
+// signaling over SubscribeToNeighbourhoodDepthChange channels are made
+// when neighbourhood depth is changed.
+func TestKademlia_SubscribeToNeighbourhoodDepthChange(t *testing.T) {
+
+ testSignal := func(t *testing.T, k *testKademlia, prevDepth int, c <-chan struct{}) (newDepth int) {
+ t.Helper()
+
+ select {
+ case _, ok := <-c:
+ if !ok {
+ t.Error("closed signal channel")
+ }
+ newDepth = k.NeighbourhoodDepth()
+ if prevDepth == newDepth {
+ t.Error("depth not changed")
+ }
+ return newDepth
+ case <-time.After(2 * time.Second):
+ t.Error("timeout")
+ }
+ return newDepth
+ }
+
+ t.Run("single subscription", func(t *testing.T) {
+ k := newTestKademlia(t, "00000000")
+
+ c, u := k.SubscribeToNeighbourhoodDepthChange()
+ defer u()
+
+ depth := k.NeighbourhoodDepth()
+
+ k.On("11111101", "01000000", "10000000", "00000010")
+
+ testSignal(t, k, depth, c)
+ })
+
+ t.Run("multiple subscriptions", func(t *testing.T) {
+ k := newTestKademlia(t, "00000000")
+
+ c1, u1 := k.SubscribeToNeighbourhoodDepthChange()
+ defer u1()
+
+ c2, u2 := k.SubscribeToNeighbourhoodDepthChange()
+ defer u2()
+
+ depth := k.NeighbourhoodDepth()
+
+ k.On("11111101", "01000000", "10000000", "00000010")
+
+ testSignal(t, k, depth, c1)
+
+ testSignal(t, k, depth, c2)
+ })
+
+ t.Run("multiple changes", func(t *testing.T) {
+ k := newTestKademlia(t, "00000000")
+
+ c, u := k.SubscribeToNeighbourhoodDepthChange()
+ defer u()
+
+ depth := k.NeighbourhoodDepth()
+
+ k.On("11111101", "01000000", "10000000", "00000010")
+
+ depth = testSignal(t, k, depth, c)
+
+ k.On("11111101", "01000010", "10000010", "00000110")
+
+ testSignal(t, k, depth, c)
+ })
+
+ t.Run("no depth change", func(t *testing.T) {
+ k := newTestKademlia(t, "00000000")
+
+ c, u := k.SubscribeToNeighbourhoodDepthChange()
+ defer u()
+
+ // does not trigger the depth change
+ k.On("11111101")
+
+ select {
+ case _, ok := <-c:
+ if !ok {
+ t.Error("closed signal channel")
+ }
+ t.Error("signal received")
+ case <-time.After(1 * time.Second):
+ // all fine
+ }
+ })
+
+ t.Run("no new peers", func(t *testing.T) {
+ k := newTestKademlia(t, "00000000")
+
+ changeC, unsubscribe := k.SubscribeToNeighbourhoodDepthChange()
+ defer unsubscribe()
+
+ select {
+ case _, ok := <-changeC:
+ if !ok {
+ t.Error("closed signal channel")
+ }
+ t.Error("signal received")
+ case <-time.After(1 * time.Second):
+ // all fine
+ }
+ })
+}
diff --git a/swarm/network/protocol_test.go b/swarm/network/protocol_test.go
index 2207ba308..737ad0784 100644
--- a/swarm/network/protocol_test.go
+++ b/swarm/network/protocol_test.go
@@ -235,6 +235,7 @@ func TestBzzHandshakeNetworkIDMismatch(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+ defer s.Stop()
node := s.Nodes[0]
err = s.testHandshake(
@@ -258,6 +259,7 @@ func TestBzzHandshakeVersionMismatch(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+ defer s.Stop()
node := s.Nodes[0]
err = s.testHandshake(
@@ -281,6 +283,7 @@ func TestBzzHandshakeSuccess(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+ defer s.Stop()
node := s.Nodes[0]
err = s.testHandshake(
@@ -312,6 +315,7 @@ func TestBzzHandshakeLightNode(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+ defer pt.Stop()
node := pt.Nodes[0]
addr := NewAddr(node)
diff --git a/swarm/network/simulation/kademlia_test.go b/swarm/network/simulation/kademlia_test.go
index 0ac1e7803..4d7dc6240 100644
--- a/swarm/network/simulation/kademlia_test.go
+++ b/swarm/network/simulation/kademlia_test.go
@@ -156,6 +156,7 @@ func createSimServiceMap(discovery bool) map[string]ServiceFunc {
// Call WaitTillSnapshotRecreated() function and wait until it returns
// Iterate the nodes and check if all the connections are successfully recreated
func TestWaitTillSnapshotRecreated(t *testing.T) {
+ t.Skip("test is flaky. disabling until underlying problem is addressed")
var err error
sim := New(createSimServiceMap(true))
_, err = sim.AddNodesAndConnectRing(16)
diff --git a/swarm/network/stream/common_test.go b/swarm/network/stream/common_test.go
index 917c440d2..615b3b68f 100644
--- a/swarm/network/stream/common_test.go
+++ b/swarm/network/stream/common_test.go
@@ -30,16 +30,19 @@ import (
"sync/atomic"
"time"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/network/simulation"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
- mockmem "github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
+ "github.com/ethereum/go-ethereum/swarm/storage/localstore"
+ "github.com/ethereum/go-ethereum/swarm/storage/mock"
"github.com/ethereum/go-ethereum/swarm/testutil"
colorable "github.com/mattn/go-colorable"
)
@@ -51,7 +54,6 @@ var (
useMockStore = flag.Bool("mockstore", false, "disabled mock store (default: enabled)")
longrunning = flag.Bool("longrunning", false, "do run long-running tests")
- bucketKeyDB = simulation.BucketKey("db")
bucketKeyStore = simulation.BucketKey("store")
bucketKeyFileStore = simulation.BucketKey("filestore")
bucketKeyNetStore = simulation.BucketKey("netstore")
@@ -113,26 +115,24 @@ func newNetStoreAndDeliveryWithRequestFunc(ctx *adapters.ServiceContext, bucket
func netStoreAndDeliveryWithAddr(ctx *adapters.ServiceContext, bucket *sync.Map, addr *network.BzzAddr) (*storage.NetStore, *Delivery, func(), error) {
n := ctx.Config.Node()
- store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
- if *useMockStore {
- store, datadir, err = createMockStore(mockmem.NewGlobalStore(), n.ID(), addr)
- }
+ localStore, localStoreCleanup, err := newTestLocalStore(n.ID(), addr, nil)
if err != nil {
return nil, nil, nil, err
}
- localStore := store.(*storage.LocalStore)
+
netStore, err := storage.NewNetStore(localStore, nil)
if err != nil {
+ localStore.Close()
+ localStoreCleanup()
return nil, nil, nil, err
}
- fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
+ fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams(), chunk.NewTags())
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
delivery := NewDelivery(kad, netStore)
- bucket.Store(bucketKeyStore, store)
- bucket.Store(bucketKeyDB, netStore)
+ bucket.Store(bucketKeyStore, localStore)
bucket.Store(bucketKeyDelivery, delivery)
bucket.Store(bucketKeyFileStore, fileStore)
// for the kademlia object, we use the global key from the simulation package,
@@ -141,13 +141,13 @@ func netStoreAndDeliveryWithAddr(ctx *adapters.ServiceContext, bucket *sync.Map,
cleanup := func() {
netStore.Close()
- os.RemoveAll(datadir)
+ localStoreCleanup()
}
return netStore, delivery, cleanup, nil
}
-func newStreamerTester(registryOptions *RegistryOptions) (*p2ptest.ProtocolTester, *Registry, *storage.LocalStore, func(), error) {
+func newStreamerTester(registryOptions *RegistryOptions) (*p2ptest.ProtocolTester, *Registry, *localstore.DB, func(), error) {
// setup
addr := network.RandomAddr() // tested peers peer address
to := network.NewKademlia(addr.OAddr, network.NewKadParams())
@@ -161,11 +161,7 @@ func newStreamerTester(registryOptions *RegistryOptions) (*p2ptest.ProtocolTeste
os.RemoveAll(datadir)
}
- params := storage.NewDefaultLocalStoreParams()
- params.Init(datadir)
- params.BaseKey = addr.Over()
-
- localStore, err := storage.NewTestLocalStoreForAddr(params)
+ localStore, err := localstore.New(datadir, addr.Over(), nil)
if err != nil {
removeDataDir()
return nil, nil, nil, nil, err
@@ -173,17 +169,16 @@ func newStreamerTester(registryOptions *RegistryOptions) (*p2ptest.ProtocolTeste
netStore, err := storage.NewNetStore(localStore, nil)
if err != nil {
+ localStore.Close()
removeDataDir()
return nil, nil, nil, nil, err
}
delivery := NewDelivery(to, netStore)
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
- streamer := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), registryOptions, nil)
- teardown := func() {
- streamer.Close()
- removeDataDir()
- }
+ intervalsStore := state.NewInmemoryStore()
+ streamer := NewRegistry(addr.ID(), delivery, netStore, intervalsStore, registryOptions, nil)
+
prvkey, err := crypto.GenerateKey()
if err != nil {
removeDataDir()
@@ -191,7 +186,13 @@ func newStreamerTester(registryOptions *RegistryOptions) (*p2ptest.ProtocolTeste
}
protocolTester := p2ptest.NewProtocolTester(prvkey, 1, streamer.runProtocol)
-
+ teardown := func() {
+ protocolTester.Stop()
+ streamer.Close()
+ intervalsStore.Close()
+ netStore.Close()
+ removeDataDir()
+ }
err = waitForPeers(streamer, 10*time.Second, 1)
if err != nil {
teardown()
@@ -228,24 +229,37 @@ func newRoundRobinStore(stores ...storage.ChunkStore) *roundRobinStore {
}
// not used in this context, only to fulfill ChunkStore interface
-func (rrs *roundRobinStore) Has(ctx context.Context, addr storage.Address) bool {
- panic("RoundRobinStor doesn't support HasChunk")
+func (rrs *roundRobinStore) Has(_ context.Context, _ storage.Address) (bool, error) {
+ return false, errors.New("roundRobinStore doesn't support Has")
}
-func (rrs *roundRobinStore) Get(ctx context.Context, addr storage.Address) (storage.Chunk, error) {
- return nil, errors.New("get not well defined on round robin store")
+func (rrs *roundRobinStore) Get(_ context.Context, _ chunk.ModeGet, _ storage.Address) (storage.Chunk, error) {
+ return nil, errors.New("roundRobinStore doesn't support Get")
}
-func (rrs *roundRobinStore) Put(ctx context.Context, chunk storage.Chunk) error {
+func (rrs *roundRobinStore) Put(ctx context.Context, mode chunk.ModePut, ch storage.Chunk) (bool, error) {
i := atomic.AddUint32(&rrs.index, 1)
idx := int(i) % len(rrs.stores)
- return rrs.stores[idx].Put(ctx, chunk)
+ return rrs.stores[idx].Put(ctx, mode, ch)
+}
+
+func (rrs *roundRobinStore) Set(ctx context.Context, mode chunk.ModeSet, addr chunk.Address) (err error) {
+ return errors.New("roundRobinStore doesn't support Set")
+}
+
+func (rrs *roundRobinStore) LastPullSubscriptionBinID(bin uint8) (id uint64, err error) {
+ return 0, errors.New("roundRobinStore doesn't support LastPullSubscriptionBinID")
+}
+
+func (rrs *roundRobinStore) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan chunk.Descriptor, stop func()) {
+ return nil, nil
}
-func (rrs *roundRobinStore) Close() {
+func (rrs *roundRobinStore) Close() error {
for _, store := range rrs.stores {
store.Close()
}
+ return nil
}
func readAll(fileStore *storage.FileStore, hash []byte) (int64, error) {
@@ -311,24 +325,28 @@ func generateRandomFile() (string, error) {
return string(b), nil
}
-//create a local store for the given node
-func createTestLocalStorageForID(id enode.ID, addr *network.BzzAddr) (storage.ChunkStore, string, error) {
- var datadir string
- var err error
- datadir, err = ioutil.TempDir("", fmt.Sprintf("syncer-test-%s", id.TerminalString()))
+func newTestLocalStore(id enode.ID, addr *network.BzzAddr, globalStore mock.GlobalStorer) (localStore *localstore.DB, cleanup func(), err error) {
+ dir, err := ioutil.TempDir("", "swarm-stream-")
if err != nil {
- return nil, "", err
+ return nil, nil, err
+ }
+ cleanup = func() {
+ os.RemoveAll(dir)
+ }
+
+ var mockStore *mock.NodeStore
+ if globalStore != nil {
+ mockStore = globalStore.NewNodeStore(common.BytesToAddress(id.Bytes()))
}
- var store storage.ChunkStore
- params := storage.NewDefaultLocalStoreParams()
- params.ChunkDbPath = datadir
- params.BaseKey = addr.Over()
- store, err = storage.NewTestLocalStoreForAddr(params)
+
+ localStore, err = localstore.New(dir, addr.Over(), &localstore.Options{
+ MockStore: mockStore,
+ })
if err != nil {
- os.RemoveAll(datadir)
- return nil, "", err
+ cleanup()
+ return nil, nil, err
}
- return store, datadir, nil
+ return localStore, cleanup, nil
}
// watchDisconnections receives simulation peer events in a new goroutine and sets atomic value
diff --git a/swarm/network/stream/delivery.go b/swarm/network/stream/delivery.go
index bc4f1f665..1b4a14ea2 100644
--- a/swarm/network/stream/delivery.go
+++ b/swarm/network/stream/delivery.go
@@ -20,9 +20,11 @@ import (
"context"
"errors"
"fmt"
+ "time"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/spancontext"
@@ -32,11 +34,6 @@ import (
olog "github.com/opentracing/opentracing-go/log"
)
-const (
- swarmChunkServerStreamName = "RETRIEVE_REQUEST"
- deliveryCap = 32
-)
-
var (
processReceivedChunksCount = metrics.NewRegisteredCounter("network.stream.received_chunks.count", nil)
handleRetrieveRequestMsgCount = metrics.NewRegisteredCounter("network.stream.handle_retrieve_request_msg.count", nil)
@@ -44,93 +41,25 @@ var (
requestFromPeersCount = metrics.NewRegisteredCounter("network.stream.request_from_peers.count", nil)
requestFromPeersEachCount = metrics.NewRegisteredCounter("network.stream.request_from_peers_each.count", nil)
+
+ lastReceivedChunksMsg = metrics.GetOrRegisterGauge("network.stream.received_chunks", nil)
)
type Delivery struct {
- chunkStore storage.SyncChunkStore
- kad *network.Kademlia
- getPeer func(enode.ID) *Peer
+ netStore *storage.NetStore
+ kad *network.Kademlia
+ getPeer func(enode.ID) *Peer
+ quit chan struct{}
}
-func NewDelivery(kad *network.Kademlia, chunkStore storage.SyncChunkStore) *Delivery {
+func NewDelivery(kad *network.Kademlia, netStore *storage.NetStore) *Delivery {
return &Delivery{
- chunkStore: chunkStore,
- kad: kad,
+ netStore: netStore,
+ kad: kad,
+ quit: make(chan struct{}),
}
}
-// SwarmChunkServer implements Server
-type SwarmChunkServer struct {
- deliveryC chan []byte
- batchC chan []byte
- chunkStore storage.ChunkStore
- currentLen uint64
- quit chan struct{}
-}
-
-// NewSwarmChunkServer is SwarmChunkServer constructor
-func NewSwarmChunkServer(chunkStore storage.ChunkStore) *SwarmChunkServer {
- s := &SwarmChunkServer{
- deliveryC: make(chan []byte, deliveryCap),
- batchC: make(chan []byte),
- chunkStore: chunkStore,
- quit: make(chan struct{}),
- }
- go s.processDeliveries()
- return s
-}
-
-// processDeliveries handles delivered chunk hashes
-func (s *SwarmChunkServer) processDeliveries() {
- var hashes []byte
- var batchC chan []byte
- for {
- select {
- case <-s.quit:
- return
- case hash := <-s.deliveryC:
- hashes = append(hashes, hash...)
- batchC = s.batchC
- case batchC <- hashes:
- hashes = nil
- batchC = nil
- }
- }
-}
-
-// SessionIndex returns zero in all cases for SwarmChunkServer.
-func (s *SwarmChunkServer) SessionIndex() (uint64, error) {
- return 0, nil
-}
-
-// SetNextBatch
-func (s *SwarmChunkServer) SetNextBatch(_, _ uint64) (hashes []byte, from uint64, to uint64, proof *HandoverProof, err error) {
- select {
- case hashes = <-s.batchC:
- case <-s.quit:
- return
- }
-
- from = s.currentLen
- s.currentLen += uint64(len(hashes))
- to = s.currentLen
- return
-}
-
-// Close needs to be called on a stream server
-func (s *SwarmChunkServer) Close() {
- close(s.quit)
-}
-
-// GetData retrives chunk data from db store
-func (s *SwarmChunkServer) GetData(ctx context.Context, key []byte) ([]byte, error) {
- chunk, err := s.chunkStore.Get(ctx, storage.Address(key))
- if err != nil {
- return nil, err
- }
- return chunk.Data(), nil
-}
-
// RetrieveRequestMsg is the protocol msg for chunk retrieve requests
type RetrieveRequestMsg struct {
Addr storage.Address
@@ -149,12 +78,6 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *
osp.LogFields(olog.String("ref", req.Addr.String()))
- s, err := sp.getServer(NewStream(swarmChunkServerStreamName, "", true))
- if err != nil {
- return err
- }
- streamer := s.Server.(*SwarmChunkServer)
-
var cancel func()
// TODO: do something with this hardcoded timeout, maybe use TTL in the future
ctx = context.WithValue(ctx, "peer", sp.ID().String())
@@ -164,36 +87,26 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *
go func() {
select {
case <-ctx.Done():
- case <-streamer.quit:
+ case <-d.quit:
}
cancel()
}()
go func() {
defer osp.Finish()
- chunk, err := d.chunkStore.Get(ctx, req.Addr)
+ ch, err := d.netStore.Get(ctx, chunk.ModeGetRequest, req.Addr)
if err != nil {
retrieveChunkFail.Inc(1)
log.Debug("ChunkStore.Get can not retrieve chunk", "peer", sp.ID().String(), "addr", req.Addr, "hopcount", req.HopCount, "err", err)
return
}
- if req.SkipCheck {
- syncing := false
- osp.LogFields(olog.Bool("skipCheck", true))
+ syncing := false
- err = sp.Deliver(ctx, chunk, s.priority, syncing)
- if err != nil {
- log.Warn("ERROR in handleRetrieveRequestMsg", "err", err)
- }
- osp.LogFields(olog.Bool("delivered", true))
- return
- }
- osp.LogFields(olog.Bool("skipCheck", false))
- select {
- case streamer.deliveryC <- chunk.Address()[:]:
- case <-streamer.quit:
+ err = sp.Deliver(ctx, ch, Top, syncing)
+ if err != nil {
+ log.Warn("ERROR in handleRetrieveRequestMsg", "err", err)
}
-
+ osp.LogFields(olog.Bool("delivered", true))
}()
return nil
@@ -216,7 +129,7 @@ type ChunkDeliveryMsgRetrieval ChunkDeliveryMsg
type ChunkDeliveryMsgSyncing ChunkDeliveryMsg
// chunk delivery msg is response to retrieverequest msg
-func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *ChunkDeliveryMsg) error {
+func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req interface{}) error {
var osp opentracing.Span
ctx, osp = spancontext.StartSpan(
ctx,
@@ -224,36 +137,58 @@ func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *Ch
processReceivedChunksCount.Inc(1)
- // retrieve the span for the originating retrieverequest
- spanId := fmt.Sprintf("stream.send.request.%v.%v", sp.ID(), req.Addr)
- span := tracing.ShiftSpanByKey(spanId)
+ // record the last time we received a chunk delivery message
+ lastReceivedChunksMsg.Update(time.Now().UnixNano())
+
+ var msg *ChunkDeliveryMsg
+ var mode chunk.ModePut
+ switch r := req.(type) {
+ case *ChunkDeliveryMsgRetrieval:
+ msg = (*ChunkDeliveryMsg)(r)
+ peerPO := chunk.Proximity(sp.BzzAddr.Over(), msg.Addr)
+ po := chunk.Proximity(d.kad.BaseAddr(), msg.Addr)
+ depth := d.kad.NeighbourhoodDepth()
+ // chunks within the area of responsibility should always sync
+ // https://github.com/ethersphere/go-ethereum/pull/1282#discussion_r269406125
+ if po >= depth || peerPO < po {
+ mode = chunk.ModePutSync
+ } else {
+ // do not sync if peer that is sending us a chunk is closer to the chunk then we are
+ mode = chunk.ModePutRequest
+ }
+ case *ChunkDeliveryMsgSyncing:
+ msg = (*ChunkDeliveryMsg)(r)
+ mode = chunk.ModePutSync
+ case *ChunkDeliveryMsg:
+ msg = r
+ mode = chunk.ModePutSync
+ }
- log.Trace("handle.chunk.delivery", "ref", req.Addr, "from peer", sp.ID())
+ log.Trace("handle.chunk.delivery", "ref", msg.Addr, "from peer", sp.ID())
go func() {
defer osp.Finish()
- if span != nil {
- span.LogFields(olog.String("finish", "from handleChunkDeliveryMsg"))
- defer span.Finish()
- }
-
- req.peer = sp
- log.Trace("handle.chunk.delivery", "put", req.Addr)
- err := d.chunkStore.Put(ctx, storage.NewChunk(req.Addr, req.SData))
+ msg.peer = sp
+ log.Trace("handle.chunk.delivery", "put", msg.Addr)
+ _, err := d.netStore.Put(ctx, mode, storage.NewChunk(msg.Addr, msg.SData))
if err != nil {
if err == storage.ErrChunkInvalid {
// we removed this log because it spams the logs
// TODO: Enable this log line
- // log.Warn("invalid chunk delivered", "peer", sp.ID(), "chunk", req.Addr, )
- req.peer.Drop(err)
+ // log.Warn("invalid chunk delivered", "peer", sp.ID(), "chunk", msg.Addr, )
+ msg.peer.Drop()
}
}
- log.Trace("handle.chunk.delivery", "done put", req.Addr, "err", err)
+ log.Trace("handle.chunk.delivery", "done put", msg.Addr, "err", err)
}()
return nil
}
+func (d *Delivery) Close() {
+ close(d.quit)
+}
+
// RequestFromPeers sends a chunk retrieve request to a peer
// The most eligible peer that hasn't already been sent to is chosen
// TODO: define "eligible"
diff --git a/swarm/network/stream/delivery_test.go b/swarm/network/stream/delivery_test.go
index 50b788150..fc0f9d5df 100644
--- a/swarm/network/stream/delivery_test.go
+++ b/swarm/network/stream/delivery_test.go
@@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/p2p/protocols"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/network"
pq "github.com/ethereum/go-ethereum/swarm/network/priorityqueue"
@@ -40,64 +41,11 @@ import (
"github.com/ethereum/go-ethereum/swarm/testutil"
)
-//Tests initializing a retrieve request
-func TestStreamerRetrieveRequest(t *testing.T) {
- regOpts := &RegistryOptions{
- Retrieval: RetrievalClientOnly,
- Syncing: SyncingDisabled,
- }
- tester, streamer, _, teardown, err := newStreamerTester(regOpts)
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- node := tester.Nodes[0]
-
- ctx := context.Background()
- req := network.NewRequest(
- storage.Address(hash0[:]),
- true,
- &sync.Map{},
- )
- streamer.delivery.RequestFromPeers(ctx, req)
-
- stream := NewStream(swarmChunkServerStreamName, "", true)
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "RetrieveRequestMsg",
- Expects: []p2ptest.Expect{
- { //start expecting a subscription for RETRIEVE_REQUEST due to `RetrievalClientOnly`
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- History: nil,
- Priority: Top,
- },
- Peer: node.ID(),
- },
- { //expect a retrieve request message for the given hash
- Code: 5,
- Msg: &RetrieveRequestMsg{
- Addr: hash0[:],
- SkipCheck: true,
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-}
-
//Test requesting a chunk from a peer then issuing a "empty" OfferedHashesMsg (no hashes available yet)
//Should time out as the peer does not have the chunk (no syncing happened previously)
func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
- tester, streamer, _, teardown, err := newStreamerTester(&RegistryOptions{
- Retrieval: RetrievalEnabled,
- Syncing: SyncingDisabled, //do no syncing
+ tester, _, _, teardown, err := newStreamerTester(&RegistryOptions{
+ Syncing: SyncingDisabled, //do no syncing
})
if err != nil {
t.Fatal(err)
@@ -108,30 +56,8 @@ func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
- peer := streamer.getPeer(node.ID())
-
- stream := NewStream(swarmChunkServerStreamName, "", true)
- //simulate pre-subscription to RETRIEVE_REQUEST stream on peer
- peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
- Stream: stream,
- History: nil,
- Priority: Top,
- })
-
//test the exchange
err = tester.TestExchanges(p2ptest.Exchange{
- Expects: []p2ptest.Expect{
- { //first expect a subscription to the RETRIEVE_REQUEST stream
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- History: nil,
- Priority: Top,
- },
- Peer: node.ID(),
- },
- },
- }, p2ptest.Exchange{
Label: "RetrieveRequestMsg",
Triggers: []p2ptest.Trigger{
{ //then the actual RETRIEVE_REQUEST....
@@ -158,7 +84,7 @@ func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
//should fail with a timeout as the peer we are requesting
//the chunk from does not have the chunk
- expectedError := `exchange #1 "RetrieveRequestMsg": timed out`
+ expectedError := `exchange #0 "RetrieveRequestMsg": timed out`
if err == nil || err.Error() != expectedError {
t.Fatalf("Expected error %v, got %v", expectedError, err)
}
@@ -167,9 +93,8 @@ func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
// upstream request server receives a retrieve Request and responds with
// offered hashes or delivery if skipHash is set to true
func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
- tester, streamer, localStore, teardown, err := newStreamerTester(&RegistryOptions{
- Retrieval: RetrievalEnabled,
- Syncing: SyncingDisabled,
+ tester, _, localStore, teardown, err := newStreamerTester(&RegistryOptions{
+ Syncing: SyncingDisabled,
})
if err != nil {
t.Fatal(err)
@@ -178,36 +103,14 @@ func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
node := tester.Nodes[0]
- peer := streamer.getPeer(node.ID())
-
- stream := NewStream(swarmChunkServerStreamName, "", true)
-
- peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
- Stream: stream,
- History: nil,
- Priority: Top,
- })
-
- hash := storage.Address(hash0[:])
- chunk := storage.NewChunk(hash, hash)
- err = localStore.Put(context.TODO(), chunk)
+ hash := storage.Address(hash1[:])
+ ch := storage.NewChunk(hash, hash1[:])
+ _, err = localStore.Put(context.TODO(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatalf("Expected no err got %v", err)
}
err = tester.TestExchanges(p2ptest.Exchange{
- Expects: []p2ptest.Expect{
- {
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- History: nil,
- Priority: Top,
- },
- Peer: node.ID(),
- },
- },
- }, p2ptest.Exchange{
Label: "RetrieveRequestMsg",
Triggers: []p2ptest.Trigger{
{
@@ -220,51 +123,10 @@ func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
},
Expects: []p2ptest.Expect{
{
- Code: 1,
- Msg: &OfferedHashesMsg{
- HandoverProof: &HandoverProof{
- Handover: &Handover{},
- },
- Hashes: hash,
- From: 0,
- // TODO: why is this 32???
- To: 32,
- Stream: stream,
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatal(err)
- }
-
- hash = storage.Address(hash1[:])
- chunk = storage.NewChunk(hash, hash1[:])
- err = localStore.Put(context.TODO(), chunk)
- if err != nil {
- t.Fatalf("Expected no err got %v", err)
- }
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "RetrieveRequestMsg",
- Triggers: []p2ptest.Trigger{
- {
- Code: 5,
- Msg: &RetrieveRequestMsg{
- Addr: hash,
- SkipCheck: true,
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- {
Code: 6,
Msg: &ChunkDeliveryMsg{
- Addr: hash,
- SData: hash,
+ Addr: ch.Address(),
+ SData: ch.Data(),
},
Peer: node.ID(),
},
@@ -294,7 +156,7 @@ func TestRequestFromPeers(t *testing.T) {
// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
sp := &Peer{
- Peer: protocolsPeer,
+ BzzPeer: &network.BzzPeer{Peer: protocolsPeer, BzzAddr: addr},
pq: pq.New(int(PriorityQueue), PriorityQueueCap),
streamer: r,
}
@@ -334,7 +196,7 @@ func TestRequestFromPeersWithLightNode(t *testing.T) {
r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
// an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
sp := &Peer{
- Peer: protocolsPeer,
+ BzzPeer: &network.BzzPeer{Peer: protocolsPeer, BzzAddr: addr},
pq: pq.New(int(PriorityQueue), PriorityQueueCap),
streamer: r,
}
@@ -358,8 +220,7 @@ func TestRequestFromPeersWithLightNode(t *testing.T) {
func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
tester, streamer, localStore, teardown, err := newStreamerTester(&RegistryOptions{
- Retrieval: RetrievalDisabled,
- Syncing: SyncingDisabled,
+ Syncing: SyncingDisabled,
})
if err != nil {
t.Fatal(err)
@@ -420,14 +281,14 @@ func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
defer cancel()
// wait for the chunk to get stored
- storedChunk, err := localStore.Get(ctx, chunkKey)
+ storedChunk, err := localStore.Get(ctx, chunk.ModeGetRequest, chunkKey)
for err != nil {
select {
case <-ctx.Done():
t.Fatalf("Chunk is not in localstore after timeout, err: %v", err)
default:
}
- storedChunk, err = localStore.Get(ctx, chunkKey)
+ storedChunk, err = localStore.Get(ctx, chunk.ModeGetRequest, chunkKey)
time.Sleep(50 * time.Millisecond)
}
@@ -471,7 +332,6 @@ func testDeliveryFromNodes(t *testing.T, nodes, chunkCount int, skipCheck bool)
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
SkipCheck: skipCheck,
Syncing: SyncingDisabled,
- Retrieval: RetrievalEnabled,
}, nil)
bucket.Store(bucketKeyRegistry, r)
@@ -520,7 +380,7 @@ func testDeliveryFromNodes(t *testing.T, nodes, chunkCount int, skipCheck bool)
i++
}
//...which then gets passed to the round-robin file store
- roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
+ roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams(), chunk.NewTags())
//now we can actually upload a (random) file to the round-robin store
size := chunkCount * chunkSize
log.Debug("Storing data to file store")
@@ -622,7 +482,6 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, chunkCount int, skipCheck b
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
SkipCheck: skipCheck,
Syncing: SyncingDisabled,
- Retrieval: RetrievalDisabled,
SyncUpdateDelay: 0,
}, nil)
bucket.Store(bucketKeyRegistry, r)
@@ -700,7 +559,7 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, chunkCount int, skipCheck b
errs := make(chan error)
for _, hash := range hashes {
go func(h storage.Address) {
- _, err := netStore.Get(ctx, h)
+ _, err := netStore.Get(ctx, chunk.ModeGetRequest, h)
log.Warn("test check netstore get", "hash", h, "err", err)
errs <- err
}(hash)
diff --git a/swarm/network/stream/intervals_test.go b/swarm/network/stream/intervals_test.go
index 009a941ef..660954857 100644
--- a/swarm/network/stream/intervals_test.go
+++ b/swarm/network/stream/intervals_test.go
@@ -66,7 +66,6 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
}
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
- Retrieval: RetrievalDisabled,
Syncing: SyncingRegisterOnly,
SkipCheck: skipCheck,
}, nil)
@@ -287,20 +286,20 @@ func enableNotifications(r *Registry, peerID enode.ID, s Stream) error {
type testExternalClient struct {
hashes chan []byte
- store storage.SyncChunkStore
+ netStore *storage.NetStore
enableNotificationsC chan struct{}
}
-func newTestExternalClient(store storage.SyncChunkStore) *testExternalClient {
+func newTestExternalClient(netStore *storage.NetStore) *testExternalClient {
return &testExternalClient{
hashes: make(chan []byte),
- store: store,
+ netStore: netStore,
enableNotificationsC: make(chan struct{}),
}
}
func (c *testExternalClient) NeedData(ctx context.Context, hash []byte) func(context.Context) error {
- wait := c.store.FetchFunc(ctx, storage.Address(hash))
+ wait := c.netStore.FetchFunc(ctx, storage.Address(hash))
if wait == nil {
return nil
}
diff --git a/swarm/network/stream/lightnode_test.go b/swarm/network/stream/lightnode_test.go
index 501660fab..eb4e73d47 100644
--- a/swarm/network/stream/lightnode_test.go
+++ b/swarm/network/stream/lightnode_test.go
@@ -22,94 +22,10 @@ import (
)
// This test checks the default behavior of the server, that is
-// when it is serving Retrieve requests.
-func TestLigthnodeRetrieveRequestWithRetrieve(t *testing.T) {
- registryOptions := &RegistryOptions{
- Retrieval: RetrievalClientOnly,
- Syncing: SyncingDisabled,
- }
- tester, _, _, teardown, err := newStreamerTester(registryOptions)
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- node := tester.Nodes[0]
-
- stream := NewStream(swarmChunkServerStreamName, "", false)
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "SubscribeMsg",
- Triggers: []p2ptest.Trigger{
- {
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- },
- Peer: node.ID(),
- },
- },
- })
- if err != nil {
- t.Fatalf("Got %v", err)
- }
-
- err = tester.TestDisconnected(&p2ptest.Disconnect{Peer: node.ID()})
- if err == nil || err.Error() != "timed out waiting for peers to disconnect" {
- t.Fatalf("Expected no disconnect, got %v", err)
- }
-}
-
-// This test checks the Lightnode behavior of server, when serving Retrieve
-// requests are disabled
-func TestLigthnodeRetrieveRequestWithoutRetrieve(t *testing.T) {
- registryOptions := &RegistryOptions{
- Retrieval: RetrievalDisabled,
- Syncing: SyncingDisabled,
- }
- tester, _, _, teardown, err := newStreamerTester(registryOptions)
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- node := tester.Nodes[0]
-
- stream := NewStream(swarmChunkServerStreamName, "", false)
-
- err = tester.TestExchanges(
- p2ptest.Exchange{
- Label: "SubscribeMsg",
- Triggers: []p2ptest.Trigger{
- {
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- {
- Code: 7,
- Msg: &SubscribeErrorMsg{
- Error: "stream RETRIEVE_REQUEST not registered",
- },
- Peer: node.ID(),
- },
- },
- })
- if err != nil {
- t.Fatalf("Got %v", err)
- }
-}
-
-// This test checks the default behavior of the server, that is
// when syncing is enabled.
func TestLigthnodeRequestSubscriptionWithSync(t *testing.T) {
registryOptions := &RegistryOptions{
- Retrieval: RetrievalDisabled,
- Syncing: SyncingRegisterOnly,
+ Syncing: SyncingRegisterOnly,
}
tester, _, _, teardown, err := newStreamerTester(registryOptions)
if err != nil {
@@ -153,8 +69,7 @@ func TestLigthnodeRequestSubscriptionWithSync(t *testing.T) {
// when syncing is disabled.
func TestLigthnodeRequestSubscriptionWithoutSync(t *testing.T) {
registryOptions := &RegistryOptions{
- Retrieval: RetrievalDisabled,
- Syncing: SyncingDisabled,
+ Syncing: SyncingDisabled,
}
tester, _, _, teardown, err := newStreamerTester(registryOptions)
if err != nil {
diff --git a/swarm/network/stream/messages.go b/swarm/network/stream/messages.go
index b293724cc..339101b88 100644
--- a/swarm/network/stream/messages.go
+++ b/swarm/network/stream/messages.go
@@ -24,9 +24,7 @@ import (
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/log"
bv "github.com/ethereum/go-ethereum/swarm/network/bitvector"
- "github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethereum/go-ethereum/swarm/storage"
- "github.com/opentracing/opentracing-go"
)
var syncBatchTimeout = 30 * time.Second
@@ -175,7 +173,11 @@ type QuitMsg struct {
}
func (p *Peer) handleQuitMsg(req *QuitMsg) error {
- return p.removeClient(req.Stream)
+ err := p.removeClient(req.Stream)
+ if _, ok := err.(*notFoundError); ok {
+ return nil
+ }
+ return err
}
// OfferedHashesMsg is the protocol msg for offering to hand over a
@@ -197,12 +199,6 @@ func (m OfferedHashesMsg) String() string {
func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg) error {
metrics.GetOrRegisterCounter("peer.handleofferedhashes", nil).Inc(1)
- var sp opentracing.Span
- ctx, sp = spancontext.StartSpan(
- ctx,
- "handle.offered.hashes")
- defer sp.Finish()
-
c, _, err := p.getOrSetClient(req.Stream, req.From, req.To)
if err != nil {
return err
@@ -219,6 +215,9 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg
return fmt.Errorf("error initiaising bitvector of length %v: %v", lenHashes/HashSize, err)
}
+ var wantDelaySet bool
+ var wantDelay time.Time
+
ctr := 0
errC := make(chan error)
ctx, cancel := context.WithTimeout(ctx, syncBatchTimeout)
@@ -230,6 +229,13 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg
if wait := c.NeedData(ctx, hash); wait != nil {
ctr++
want.Set(i/HashSize, true)
+
+ // measure how long it takes before we mark chunks for retrieval, and actually send the request
+ if !wantDelaySet {
+ wantDelaySet = true
+ wantDelay = time.Now()
+ }
+
// create request and wait until the chunk data arrives and is stored
go func(w func(context.Context) error) {
select {
@@ -247,7 +253,7 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg
case err := <-errC:
if err != nil {
log.Debug("client.handleOfferedHashesMsg() error waiting for chunk, dropping peer", "peer", p.ID(), "err", err)
- p.Drop(err)
+ p.Drop()
return
}
case <-ctx.Done():
@@ -283,28 +289,34 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg
From: from,
To: to,
}
- go func() {
- log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
- select {
- case err := <-c.next:
- if err != nil {
- log.Warn("c.next error dropping peer", "err", err)
- p.Drop(err)
- return
- }
- case <-c.quit:
- log.Debug("client.handleOfferedHashesMsg() quit")
- return
- case <-ctx.Done():
- log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err())
- return
- }
- log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
- err := p.SendPriority(ctx, msg, c.priority)
+
+ log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
+ select {
+ case err := <-c.next:
if err != nil {
- log.Warn("SendPriority error", "err", err)
+ log.Warn("c.next error dropping peer", "err", err)
+ p.Drop()
+ return err
}
- }()
+ case <-c.quit:
+ log.Debug("client.handleOfferedHashesMsg() quit")
+ return nil
+ case <-ctx.Done():
+ log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err())
+ return nil
+ }
+ log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
+
+ // record want delay
+ if wantDelaySet {
+ metrics.GetOrRegisterResettingTimer("handleoffered.wantdelay", nil).UpdateSince(wantDelay)
+ }
+
+ err = p.SendPriority(ctx, msg, c.priority)
+ if err != nil {
+ log.Warn("SendPriority error", "err", err)
+ }
+
return nil
}
diff --git a/swarm/network/stream/peer.go b/swarm/network/stream/peer.go
index 152814bd4..28fd06e4d 100644
--- a/swarm/network/stream/peer.go
+++ b/swarm/network/stream/peer.go
@@ -24,8 +24,10 @@ import (
"time"
"github.com/ethereum/go-ethereum/metrics"
- "github.com/ethereum/go-ethereum/p2p/protocols"
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/log"
+ "github.com/ethereum/go-ethereum/swarm/network"
pq "github.com/ethereum/go-ethereum/swarm/network/priorityqueue"
"github.com/ethereum/go-ethereum/swarm/network/stream/intervals"
"github.com/ethereum/go-ethereum/swarm/spancontext"
@@ -54,7 +56,7 @@ var ErrMaxPeerServers = errors.New("max peer servers")
// Peer is the Peer extension for the streaming protocol
type Peer struct {
- *protocols.Peer
+ *network.BzzPeer
streamer *Registry
pq *pq.PriorityQueue
serverMu sync.RWMutex
@@ -74,9 +76,9 @@ type WrappedPriorityMsg struct {
}
// NewPeer is the constructor for Peer
-func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer {
+func NewPeer(peer *network.BzzPeer, streamer *Registry) *Peer {
p := &Peer{
- Peer: peer,
+ BzzPeer: peer,
pq: pq.New(int(PriorityQueue), PriorityQueueCap),
streamer: streamer,
servers: make(map[Stream]*server),
@@ -90,7 +92,7 @@ func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer {
err := p.Send(wmsg.Context, wmsg.Msg)
if err != nil {
log.Error("Message send error, dropping peer", "peer", p.ID(), "err", err)
- p.Drop(err)
+ p.Drop()
}
})
@@ -134,7 +136,7 @@ func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer {
func (p *Peer) Deliver(ctx context.Context, chunk storage.Chunk, priority uint8, syncing bool) error {
var msg interface{}
- spanName := "send.chunk.delivery"
+ metrics.GetOrRegisterCounter("peer.deliver", nil).Inc(1)
//we send different types of messages if delivery is for syncing or retrievals,
//even if handling and content of the message are the same,
@@ -144,16 +146,13 @@ func (p *Peer) Deliver(ctx context.Context, chunk storage.Chunk, priority uint8,
Addr: chunk.Address(),
SData: chunk.Data(),
}
- spanName += ".syncing"
} else {
msg = &ChunkDeliveryMsgRetrieval{
Addr: chunk.Address(),
SData: chunk.Data(),
}
- spanName += ".retrieval"
}
- ctx = context.WithValue(ctx, "stream_send_tag", nil)
return p.SendPriority(ctx, msg, priority)
}
@@ -416,7 +415,174 @@ func (p *Peer) removeClientParams(s Stream) error {
}
func (p *Peer) close() {
+ p.serverMu.Lock()
+ defer p.serverMu.Unlock()
+
for _, s := range p.servers {
s.Close()
}
+
+ p.servers = nil
+}
+
+// runUpdateSyncing is a long running function that creates the initial
+// syncing subscriptions to the peer and waits for neighbourhood depth change
+// to create new ones or quit existing ones based on the new neighbourhood depth
+// and if peer enters or leaves nearest neighbourhood by using
+// syncSubscriptionsDiff and updateSyncSubscriptions functions.
+func (p *Peer) runUpdateSyncing() {
+ timer := time.NewTimer(p.streamer.syncUpdateDelay)
+ defer timer.Stop()
+
+ select {
+ case <-timer.C:
+ case <-p.streamer.quit:
+ return
+ }
+
+ kad := p.streamer.delivery.kad
+ po := chunk.Proximity(p.BzzAddr.Over(), kad.BaseAddr())
+
+ depth := kad.NeighbourhoodDepth()
+
+ log.Debug("update syncing subscriptions: initial", "peer", p.ID(), "po", po, "depth", depth)
+
+ // initial subscriptions
+ p.updateSyncSubscriptions(syncSubscriptionsDiff(po, -1, depth, kad.MaxProxDisplay))
+
+ depthChangeSignal, unsubscribeDepthChangeSignal := kad.SubscribeToNeighbourhoodDepthChange()
+ defer unsubscribeDepthChangeSignal()
+
+ prevDepth := depth
+ for {
+ select {
+ case _, ok := <-depthChangeSignal:
+ if !ok {
+ return
+ }
+ // update subscriptions for this peer when depth changes
+ depth := kad.NeighbourhoodDepth()
+ log.Debug("update syncing subscriptions", "peer", p.ID(), "po", po, "depth", depth)
+ p.updateSyncSubscriptions(syncSubscriptionsDiff(po, prevDepth, depth, kad.MaxProxDisplay))
+ prevDepth = depth
+ case <-p.streamer.quit:
+ return
+ }
+ }
+ log.Debug("update syncing subscriptions: exiting", "peer", p.ID())
+}
+
+// updateSyncSubscriptions accepts two slices of integers, the first one
+// representing proximity order bins for required syncing subscriptions
+// and the second one representing bins for syncing subscriptions that
+// need to be removed. This function sends request for subscription
+// messages and quit messages for provided bins.
+func (p *Peer) updateSyncSubscriptions(subBins, quitBins []int) {
+ if p.streamer.getPeer(p.ID()) == nil {
+ log.Debug("update syncing subscriptions", "peer not found", p.ID())
+ return
+ }
+ log.Debug("update syncing subscriptions", "peer", p.ID(), "subscribe", subBins, "quit", quitBins)
+ for _, po := range subBins {
+ p.subscribeSync(po)
+ }
+ for _, po := range quitBins {
+ p.quitSync(po)
+ }
+}
+
+// subscribeSync send the request for syncing subscriptions to the peer
+// using subscriptionFunc. This function is used to request syncing subscriptions
+// when new peer is added to the registry and on neighbourhood depth change.
+func (p *Peer) subscribeSync(po int) {
+ err := subscriptionFunc(p.streamer, p.ID(), uint8(po))
+ if err != nil {
+ log.Error("subscription", "err", err)
+ }
+}
+
+// quitSync sends the quit message for live and history syncing streams to the peer.
+// This function is used in runUpdateSyncing indirectly over updateSyncSubscriptions
+// to remove unneeded syncing subscriptions on neighbourhood depth change.
+func (p *Peer) quitSync(po int) {
+ live := NewStream("SYNC", FormatSyncBinKey(uint8(po)), true)
+ history := getHistoryStream(live)
+ err := p.streamer.Quit(p.ID(), live)
+ if err != nil && err != p2p.ErrShuttingDown {
+ log.Error("quit", "err", err, "peer", p.ID(), "stream", live)
+ }
+ err = p.streamer.Quit(p.ID(), history)
+ if err != nil && err != p2p.ErrShuttingDown {
+ log.Error("quit", "err", err, "peer", p.ID(), "stream", history)
+ }
+
+ err = p.removeServer(live)
+ if err != nil {
+ log.Error("remove server", "err", err, "peer", p.ID(), "stream", live)
+ }
+ err = p.removeServer(history)
+ if err != nil {
+ log.Error("remove server", "err", err, "peer", p.ID(), "stream", live)
+ }
+}
+
+// syncSubscriptionsDiff calculates to which proximity order bins a peer
+// (with po peerPO) needs to be subscribed after kademlia neighbourhood depth
+// change from prevDepth to newDepth. Max argument limits the number of
+// proximity order bins. Returned values are slices of integers which represent
+// proximity order bins, the first one to which additional subscriptions need to
+// be requested and the second one which subscriptions need to be quit. Argument
+// prevDepth with value less then 0 represents no previous depth, used for
+// initial syncing subscriptions.
+func syncSubscriptionsDiff(peerPO, prevDepth, newDepth, max int) (subBins, quitBins []int) {
+ newStart, newEnd := syncBins(peerPO, newDepth, max)
+ if prevDepth < 0 {
+ // no previous depth, return the complete range
+ // for subscriptions requests and nothing for quitting
+ return intRange(newStart, newEnd), nil
+ }
+
+ prevStart, prevEnd := syncBins(peerPO, prevDepth, max)
+
+ if newStart < prevStart {
+ subBins = append(subBins, intRange(newStart, prevStart)...)
+ }
+
+ if prevStart < newStart {
+ quitBins = append(quitBins, intRange(prevStart, newStart)...)
+ }
+
+ if newEnd < prevEnd {
+ quitBins = append(quitBins, intRange(newEnd, prevEnd)...)
+ }
+
+ if prevEnd < newEnd {
+ subBins = append(subBins, intRange(prevEnd, newEnd)...)
+ }
+
+ return subBins, quitBins
+}
+
+// syncBins returns the range to which proximity order bins syncing
+// subscriptions need to be requested, based on peer proximity and
+// kademlia neighbourhood depth. Returned range is [start,end), inclusive for
+// start and exclusive for end.
+func syncBins(peerPO, depth, max int) (start, end int) {
+ if peerPO < depth {
+ // subscribe only to peerPO bin if it is not
+ // in the nearest neighbourhood
+ return peerPO, peerPO + 1
+ }
+ // subscribe from depth to max bin if the peer
+ // is in the nearest neighbourhood
+ return depth, max + 1
+}
+
+// intRange returns the slice of integers [start,end). The start
+// is inclusive and the end is not.
+func intRange(start, end int) (r []int) {
+ for i := start; i < end; i++ {
+ r = append(r, i)
+ }
+ return r
}
diff --git a/swarm/network/stream/peer_test.go b/swarm/network/stream/peer_test.go
new file mode 100644
index 000000000..98c5cc010
--- /dev/null
+++ b/swarm/network/stream/peer_test.go
@@ -0,0 +1,309 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package stream
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "sort"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
+ "github.com/ethereum/go-ethereum/swarm/network"
+ "github.com/ethereum/go-ethereum/swarm/network/simulation"
+ "github.com/ethereum/go-ethereum/swarm/state"
+)
+
+// TestSyncSubscriptionsDiff validates the output of syncSubscriptionsDiff
+// function for various arguments.
+func TestSyncSubscriptionsDiff(t *testing.T) {
+ max := network.NewKadParams().MaxProxDisplay
+ for _, tc := range []struct {
+ po, prevDepth, newDepth int
+ subBins, quitBins []int
+ }{
+ {
+ po: 0, prevDepth: -1, newDepth: 0,
+ subBins: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ },
+ {
+ po: 1, prevDepth: -1, newDepth: 0,
+ subBins: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ },
+ {
+ po: 2, prevDepth: -1, newDepth: 0,
+ subBins: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ },
+ {
+ po: 0, prevDepth: -1, newDepth: 1,
+ subBins: []int{0},
+ },
+ {
+ po: 1, prevDepth: -1, newDepth: 1,
+ subBins: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ },
+ {
+ po: 2, prevDepth: -1, newDepth: 2,
+ subBins: []int{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ },
+ {
+ po: 3, prevDepth: -1, newDepth: 2,
+ subBins: []int{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ },
+ {
+ po: 1, prevDepth: -1, newDepth: 2,
+ subBins: []int{1},
+ },
+ {
+ po: 0, prevDepth: 0, newDepth: 0, // 0-16 -> 0-16
+ },
+ {
+ po: 1, prevDepth: 0, newDepth: 0, // 0-16 -> 0-16
+ },
+ {
+ po: 0, prevDepth: 0, newDepth: 1, // 0-16 -> 0
+ quitBins: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ },
+ {
+ po: 0, prevDepth: 0, newDepth: 2, // 0-16 -> 0
+ quitBins: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ },
+ {
+ po: 1, prevDepth: 0, newDepth: 1, // 0-16 -> 1-16
+ quitBins: []int{0},
+ },
+ {
+ po: 1, prevDepth: 1, newDepth: 0, // 1-16 -> 0-16
+ subBins: []int{0},
+ },
+ {
+ po: 4, prevDepth: 0, newDepth: 1, // 0-16 -> 1-16
+ quitBins: []int{0},
+ },
+ {
+ po: 4, prevDepth: 0, newDepth: 4, // 0-16 -> 4-16
+ quitBins: []int{0, 1, 2, 3},
+ },
+ {
+ po: 4, prevDepth: 0, newDepth: 5, // 0-16 -> 4
+ quitBins: []int{0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ },
+ {
+ po: 4, prevDepth: 5, newDepth: 0, // 4 -> 0-16
+ subBins: []int{0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ },
+ {
+ po: 4, prevDepth: 5, newDepth: 6, // 4 -> 4
+ },
+ } {
+ subBins, quitBins := syncSubscriptionsDiff(tc.po, tc.prevDepth, tc.newDepth, max)
+ if fmt.Sprint(subBins) != fmt.Sprint(tc.subBins) {
+ t.Errorf("po: %v, prevDepth: %v, newDepth: %v: got subBins %v, want %v", tc.po, tc.prevDepth, tc.newDepth, subBins, tc.subBins)
+ }
+ if fmt.Sprint(quitBins) != fmt.Sprint(tc.quitBins) {
+ t.Errorf("po: %v, prevDepth: %v, newDepth: %v: got quitBins %v, want %v", tc.po, tc.prevDepth, tc.newDepth, quitBins, tc.quitBins)
+ }
+ }
+}
+
+// TestUpdateSyncingSubscriptions validates that syncing subscriptions are correctly
+// made on initial node connections and that subscriptions are correctly changed
+// when kademlia neighbourhood depth is changed by connecting more nodes.
+func TestUpdateSyncingSubscriptions(t *testing.T) {
+ sim := simulation.New(map[string]simulation.ServiceFunc{
+ "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
+ addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
+ if err != nil {
+ return nil, nil, err
+ }
+ r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
+ SyncUpdateDelay: 100 * time.Millisecond,
+ Syncing: SyncingAutoSubscribe,
+ }, nil)
+ cleanup = func() {
+ r.Close()
+ clean()
+ }
+ bucket.Store("bzz-address", addr)
+ return r, cleanup, nil
+ },
+ })
+ defer sim.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
+ defer cancel()
+
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
+ // initial nodes, first one as pivot center of the start
+ ids, err := sim.AddNodesAndConnectStar(10)
+ if err != nil {
+ return err
+ }
+
+ // pivot values
+ pivotRegistryID := ids[0]
+ pivotRegistry := sim.Service("streamer", pivotRegistryID).(*Registry)
+ pivotKademlia := pivotRegistry.delivery.kad
+ // nodes proximities from the pivot node
+ nodeProximities := make(map[string]int)
+ for _, id := range ids[1:] {
+ bzzAddr, ok := sim.NodeItem(id, "bzz-address")
+ if !ok {
+ t.Fatal("no bzz address for node")
+ }
+ nodeProximities[id.String()] = chunk.Proximity(pivotKademlia.BaseAddr(), bzzAddr.(*network.BzzAddr).Over())
+ }
+ // wait until sync subscriptions are done for all nodes
+ waitForSubscriptions(t, pivotRegistry, ids[1:]...)
+
+ // check initial sync streams
+ err = checkSyncStreamsWithRetry(pivotRegistry, nodeProximities)
+ if err != nil {
+ return err
+ }
+
+ // add more nodes until the depth is changed
+ prevDepth := pivotKademlia.NeighbourhoodDepth()
+ var noDepthChangeChecked bool // true it there was a check when no depth is changed
+ for {
+ ids, err := sim.AddNodes(5)
+ if err != nil {
+ return err
+ }
+ // add new nodes to sync subscriptions check
+ for _, id := range ids {
+ bzzAddr, ok := sim.NodeItem(id, "bzz-address")
+ if !ok {
+ t.Fatal("no bzz address for node")
+ }
+ nodeProximities[id.String()] = chunk.Proximity(pivotKademlia.BaseAddr(), bzzAddr.(*network.BzzAddr).Over())
+ }
+ err = sim.Net.ConnectNodesStar(ids, pivotRegistryID)
+ if err != nil {
+ return err
+ }
+ waitForSubscriptions(t, pivotRegistry, ids...)
+
+ newDepth := pivotKademlia.NeighbourhoodDepth()
+ // depth is not changed, check if streams are still correct
+ if newDepth == prevDepth {
+ err = checkSyncStreamsWithRetry(pivotRegistry, nodeProximities)
+ if err != nil {
+ return err
+ }
+ noDepthChangeChecked = true
+ }
+ // do the final check when depth is changed and
+ // there has been at least one check
+ // for the case when depth is not changed
+ if newDepth != prevDepth && noDepthChangeChecked {
+ // check sync streams for changed depth
+ return checkSyncStreamsWithRetry(pivotRegistry, nodeProximities)
+ }
+ prevDepth = newDepth
+ }
+ })
+ if result.Error != nil {
+ t.Fatal(result.Error)
+ }
+}
+
+// waitForSubscriptions is a test helper function that blocks until
+// stream server subscriptions are established on the provided registry
+// to the nodes with provided IDs.
+func waitForSubscriptions(t *testing.T, r *Registry, ids ...enode.ID) {
+ t.Helper()
+
+ for retries := 0; retries < 100; retries++ {
+ subs := r.api.GetPeerServerSubscriptions()
+ if allSubscribed(subs, ids) {
+ return
+ }
+ time.Sleep(50 * time.Millisecond)
+ }
+ t.Fatalf("missing subscriptions")
+}
+
+// allSubscribed returns true if nodes with ids have subscriptions
+// in provided subs map.
+func allSubscribed(subs map[string][]string, ids []enode.ID) bool {
+ for _, id := range ids {
+ if s, ok := subs[id.String()]; !ok || len(s) == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// checkSyncStreamsWithRetry is calling checkSyncStreams with retries.
+func checkSyncStreamsWithRetry(r *Registry, nodeProximities map[string]int) (err error) {
+ for retries := 0; retries < 5; retries++ {
+ err = checkSyncStreams(r, nodeProximities)
+ if err == nil {
+ return nil
+ }
+ time.Sleep(500 * time.Millisecond)
+ }
+ return err
+}
+
+// checkSyncStreams validates that registry contains expected sync
+// subscriptions to nodes with proximities in a map nodeProximities.
+func checkSyncStreams(r *Registry, nodeProximities map[string]int) error {
+ depth := r.delivery.kad.NeighbourhoodDepth()
+ maxPO := r.delivery.kad.MaxProxDisplay
+ for id, po := range nodeProximities {
+ wantStreams := syncStreams(po, depth, maxPO)
+ gotStreams := nodeStreams(r, id)
+
+ if r.getPeer(enode.HexID(id)) == nil {
+ // ignore removed peer
+ continue
+ }
+
+ if !reflect.DeepEqual(gotStreams, wantStreams) {
+ return fmt.Errorf("node %s got streams %v, want %v", id, gotStreams, wantStreams)
+ }
+ }
+ return nil
+}
+
+// syncStreams returns expected sync streams that need to be
+// established between a node with kademlia neighbourhood depth
+// and a node with proximity order po.
+func syncStreams(po, depth, maxPO int) (streams []string) {
+ start, end := syncBins(po, depth, maxPO)
+ for bin := start; bin < end; bin++ {
+ streams = append(streams, NewStream("SYNC", FormatSyncBinKey(uint8(bin)), false).String())
+ streams = append(streams, NewStream("SYNC", FormatSyncBinKey(uint8(bin)), true).String())
+ }
+ return streams
+}
+
+// nodeStreams returns stream server subscriptions on a registry
+// to the peer with provided id.
+func nodeStreams(r *Registry, id string) []string {
+ streams := r.api.GetPeerServerSubscriptions()[id]
+ sort.Strings(streams)
+ return streams
+}
diff --git a/swarm/network/stream/snapshot_retrieval_test.go b/swarm/network/stream/snapshot_retrieval_test.go
index 2957999f8..e34f87951 100644
--- a/swarm/network/stream/snapshot_retrieval_test.go
+++ b/swarm/network/stream/snapshot_retrieval_test.go
@@ -25,6 +25,7 @@ import (
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/network/simulation"
"github.com/ethereum/go-ethereum/swarm/state"
@@ -118,7 +119,6 @@ var retrievalSimServiceMap = map[string]simulation.ServiceFunc{
}
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
- Retrieval: RetrievalEnabled,
Syncing: SyncingAutoSubscribe,
SyncUpdateDelay: syncUpdateDelay,
}, nil)
@@ -278,8 +278,8 @@ func runRetrievalTest(t *testing.T, chunkCount int, nodeCount int) error {
if !ok {
return fmt.Errorf("No localstore")
}
- lstore := item.(*storage.LocalStore)
- conf.hashes, err = uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore)
+ store := item.(chunk.Store)
+ conf.hashes, err = uploadFileToSingleNodeStore(node.ID(), chunkCount, store)
if err != nil {
return err
}
diff --git a/swarm/network/stream/snapshot_sync_test.go b/swarm/network/stream/snapshot_sync_test.go
index ce1e69db2..da4ff673b 100644
--- a/swarm/network/stream/snapshot_sync_test.go
+++ b/swarm/network/stream/snapshot_sync_test.go
@@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/network/simulation"
"github.com/ethereum/go-ethereum/swarm/pot"
@@ -117,7 +118,6 @@ var simServiceMap = map[string]simulation.ServiceFunc{
store := state.NewInmemoryStore()
r := NewRegistry(addr.ID(), delivery, netStore, store, &RegistryOptions{
- Retrieval: RetrievalDisabled,
Syncing: SyncingAutoSubscribe,
SyncUpdateDelay: 3 * time.Second,
}, nil)
@@ -190,10 +190,10 @@ func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulatio
node := sim.Net.GetRandomUpNode()
item, ok := sim.NodeItem(node.ID(), bucketKeyStore)
if !ok {
- return fmt.Errorf("No localstore")
+ return errors.New("no store in simulation bucket")
}
- lstore := item.(*storage.LocalStore)
- hashes, err := uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore)
+ store := item.(chunk.Store)
+ hashes, err := uploadFileToSingleNodeStore(node.ID(), chunkCount, store)
if err != nil {
return err
}
@@ -221,25 +221,25 @@ func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulatio
localChunks := conf.idToChunksMap[id]
for _, ch := range localChunks {
//get the real chunk by the index in the index array
- chunk := conf.hashes[ch]
- log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
+ ch := conf.hashes[ch]
+ log.Trace("node has chunk", "address", ch)
//check if the expected chunk is indeed in the localstore
var err error
if *useMockStore {
//use the globalStore if the mockStore should be used; in that case,
//the complete localStore stack is bypassed for getting the chunk
- _, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
+ _, err = globalStore.Get(common.BytesToAddress(id.Bytes()), ch)
} else {
//use the actual localstore
item, ok := sim.NodeItem(id, bucketKeyStore)
if !ok {
- return fmt.Errorf("Error accessing localstore")
+ return errors.New("no store in simulation bucket")
}
- lstore := item.(*storage.LocalStore)
- _, err = lstore.Get(ctx, chunk)
+ store := item.(chunk.Store)
+ _, err = store.Get(ctx, chunk.ModeGetLookup, ch)
}
if err != nil {
- log.Debug(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
+ log.Debug("chunk not found", "address", ch.Hex(), "node", id)
// Do not get crazy with logging the warn message
time.Sleep(500 * time.Millisecond)
continue REPEAT
@@ -247,10 +247,10 @@ func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulatio
evt := &simulations.Event{
Type: EventTypeChunkArrived,
Node: sim.Net.GetNode(id),
- Data: chunk.String(),
+ Data: ch.String(),
}
sim.Net.Events().Send(evt)
- log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
+ log.Trace("chunk found", "address", ch.Hex(), "node", id)
}
}
return nil
@@ -296,9 +296,9 @@ func mapKeysToNodes(conf *synctestConfig) {
}
//upload a file(chunks) to a single local node store
-func uploadFileToSingleNodeStore(id enode.ID, chunkCount int, lstore *storage.LocalStore) ([]storage.Address, error) {
+func uploadFileToSingleNodeStore(id enode.ID, chunkCount int, store chunk.Store) ([]storage.Address, error) {
log.Debug(fmt.Sprintf("Uploading to node id: %s", id))
- fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams())
+ fileStore := storage.NewFileStore(store, storage.NewFileStoreParams(), chunk.NewTags())
size := chunkSize
var rootAddrs []storage.Address
for i := 0; i < chunkCount; i++ {
diff --git a/swarm/network/stream/stream.go b/swarm/network/stream/stream.go
index 1038e52d0..9cdf5c04b 100644
--- a/swarm/network/stream/stream.go
+++ b/swarm/network/stream/stream.go
@@ -18,7 +18,6 @@ package stream
import (
"context"
- "errors"
"fmt"
"math"
"reflect"
@@ -49,7 +48,6 @@ const (
// Enumerate options for syncing and retrieval
type SyncingOption int
-type RetrievalOption int
// Syncing options
const (
@@ -61,17 +59,6 @@ const (
SyncingAutoSubscribe
)
-const (
- // Retrieval disabled. Used mostly for tests to isolate syncing features (i.e. syncing only)
- RetrievalDisabled RetrievalOption = iota
- // Only the client side of the retrieve request is registered.
- // (light nodes do not serve retrieve requests)
- // once the client is registered, subscription to retrieve request stream is always sent
- RetrievalClientOnly
- // Both client and server funcs are registered, subscribe sent automatically
- RetrievalEnabled
-)
-
// subscriptionFunc is used to determine what to do in order to perform subscriptions
// usually we would start to really subscribe to nodes, but for tests other functionality may be needed
// (see TestRequestPeerSubscriptions in streamer_test.go)
@@ -79,59 +66,58 @@ var subscriptionFunc = doRequestSubscription
// Registry registry for outgoing and incoming streamer constructors
type Registry struct {
- addr enode.ID
- api *API
- skipCheck bool
- clientMu sync.RWMutex
- serverMu sync.RWMutex
- peersMu sync.RWMutex
- serverFuncs map[string]func(*Peer, string, bool) (Server, error)
- clientFuncs map[string]func(*Peer, string, bool) (Client, error)
- peers map[enode.ID]*Peer
- delivery *Delivery
- intervalsStore state.Store
- autoRetrieval bool // automatically subscribe to retrieve request stream
- maxPeerServers int
- spec *protocols.Spec //this protocol's spec
- balance protocols.Balance //implements protocols.Balance, for accounting
- prices protocols.Prices //implements protocols.Prices, provides prices to accounting
- quit chan struct{} // terminates registry goroutines
+ addr enode.ID
+ api *API
+ skipCheck bool
+ clientMu sync.RWMutex
+ serverMu sync.RWMutex
+ peersMu sync.RWMutex
+ serverFuncs map[string]func(*Peer, string, bool) (Server, error)
+ clientFuncs map[string]func(*Peer, string, bool) (Client, error)
+ peers map[enode.ID]*Peer
+ delivery *Delivery
+ intervalsStore state.Store
+ maxPeerServers int
+ spec *protocols.Spec //this protocol's spec
+ balance protocols.Balance //implements protocols.Balance, for accounting
+ prices protocols.Prices //implements protocols.Prices, provides prices to accounting
+ quit chan struct{} // terminates registry goroutines
+ syncMode SyncingOption
+ syncUpdateDelay time.Duration
}
// RegistryOptions holds optional values for NewRegistry constructor.
type RegistryOptions struct {
SkipCheck bool
- Syncing SyncingOption // Defines syncing behavior
- Retrieval RetrievalOption // Defines retrieval behavior
+ Syncing SyncingOption // Defines syncing behavior
SyncUpdateDelay time.Duration
MaxPeerServers int // The limit of servers for each peer in registry
}
// NewRegistry is Streamer constructor
-func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.SyncChunkStore, intervalsStore state.Store, options *RegistryOptions, balance protocols.Balance) *Registry {
+func NewRegistry(localID enode.ID, delivery *Delivery, netStore *storage.NetStore, intervalsStore state.Store, options *RegistryOptions, balance protocols.Balance) *Registry {
if options == nil {
options = &RegistryOptions{}
}
if options.SyncUpdateDelay <= 0 {
options.SyncUpdateDelay = 15 * time.Second
}
- // check if retrieval has been disabled
- retrieval := options.Retrieval != RetrievalDisabled
quit := make(chan struct{})
streamer := &Registry{
- addr: localID,
- skipCheck: options.SkipCheck,
- serverFuncs: make(map[string]func(*Peer, string, bool) (Server, error)),
- clientFuncs: make(map[string]func(*Peer, string, bool) (Client, error)),
- peers: make(map[enode.ID]*Peer),
- delivery: delivery,
- intervalsStore: intervalsStore,
- autoRetrieval: retrieval,
- maxPeerServers: options.MaxPeerServers,
- balance: balance,
- quit: quit,
+ addr: localID,
+ skipCheck: options.SkipCheck,
+ serverFuncs: make(map[string]func(*Peer, string, bool) (Server, error)),
+ clientFuncs: make(map[string]func(*Peer, string, bool) (Client, error)),
+ peers: make(map[enode.ID]*Peer),
+ delivery: delivery,
+ intervalsStore: intervalsStore,
+ maxPeerServers: options.MaxPeerServers,
+ balance: balance,
+ quit: quit,
+ syncUpdateDelay: options.SyncUpdateDelay,
+ syncMode: options.Syncing,
}
streamer.setupSpec()
@@ -139,124 +125,10 @@ func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.Sy
streamer.api = NewAPI(streamer)
delivery.getPeer = streamer.getPeer
- // if retrieval is enabled, register the server func, so that retrieve requests will be served (non-light nodes only)
- if options.Retrieval == RetrievalEnabled {
- streamer.RegisterServerFunc(swarmChunkServerStreamName, func(_ *Peer, _ string, live bool) (Server, error) {
- if !live {
- return nil, errors.New("only live retrieval requests supported")
- }
- return NewSwarmChunkServer(delivery.chunkStore), nil
- })
- }
-
- // if retrieval is not disabled, register the client func (both light nodes and normal nodes can issue retrieve requests)
- if options.Retrieval != RetrievalDisabled {
- streamer.RegisterClientFunc(swarmChunkServerStreamName, func(p *Peer, t string, live bool) (Client, error) {
- return NewSwarmSyncerClient(p, syncChunkStore, NewStream(swarmChunkServerStreamName, t, live))
- })
- }
-
// If syncing is not disabled, the syncing functions are registered (both client and server)
if options.Syncing != SyncingDisabled {
- RegisterSwarmSyncerServer(streamer, syncChunkStore)
- RegisterSwarmSyncerClient(streamer, syncChunkStore)
- }
-
- // if syncing is set to automatically subscribe to the syncing stream, start the subscription process
- if options.Syncing == SyncingAutoSubscribe {
- // latestIntC function ensures that
- // - receiving from the in chan is not blocked by processing inside the for loop
- // - the latest int value is delivered to the loop after the processing is done
- // In context of NeighbourhoodDepthC:
- // after the syncing is done updating inside the loop, we do not need to update on the intermediate
- // depth changes, only to the latest one
- latestIntC := func(in <-chan int) <-chan int {
- out := make(chan int, 1)
-
- go func() {
- defer close(out)
-
- for {
- select {
- case i, ok := <-in:
- if !ok {
- return
- }
- select {
- case <-out:
- default:
- }
- out <- i
- case <-quit:
- return
- }
- }
- }()
-
- return out
- }
-
- kad := streamer.delivery.kad
- // get notification channels from Kademlia before returning
- // from this function to avoid race with Close method and
- // the goroutine created below
- depthC := latestIntC(kad.NeighbourhoodDepthC())
- addressBookSizeC := latestIntC(kad.AddrCountC())
-
- go func() {
- // wait for kademlia table to be healthy
- // but return if Registry is closed before
- select {
- case <-time.After(options.SyncUpdateDelay):
- case <-quit:
- return
- }
-
- // initial requests for syncing subscription to peers
- streamer.updateSyncing()
-
- for depth := range depthC {
- log.Debug("Kademlia neighbourhood depth change", "depth", depth)
-
- // Prevent too early sync subscriptions by waiting until there are no
- // new peers connecting. Sync streams updating will be done after no
- // peers are connected for at least SyncUpdateDelay period.
- timer := time.NewTimer(options.SyncUpdateDelay)
- // Hard limit to sync update delay, preventing long delays
- // on a very dynamic network
- maxTimer := time.NewTimer(3 * time.Minute)
- loop:
- for {
- select {
- case <-maxTimer.C:
- // force syncing update when a hard timeout is reached
- log.Trace("Sync subscriptions update on hard timeout")
- // request for syncing subscription to new peers
- streamer.updateSyncing()
- break loop
- case <-timer.C:
- // start syncing as no new peers has been added to kademlia
- // for some time
- log.Trace("Sync subscriptions update")
- // request for syncing subscription to new peers
- streamer.updateSyncing()
- break loop
- case size := <-addressBookSizeC:
- log.Trace("Kademlia address book size changed on depth change", "size", size)
- // new peers has been added to kademlia,
- // reset the timer to prevent early sync subscriptions
- if !timer.Stop() {
- <-timer.C
- }
- timer.Reset(options.SyncUpdateDelay)
- case <-quit:
- break loop
- }
- }
- timer.Stop()
- maxTimer.Stop()
- }
- }()
+ RegisterSwarmSyncerServer(streamer, netStore)
+ RegisterSwarmSyncerClient(streamer, netStore)
}
return streamer
@@ -381,7 +253,7 @@ func (r *Registry) Subscribe(peerId enode.ID, s Stream, h *Range, priority uint8
}
log.Debug("Subscribe ", "peer", peerId, "stream", s, "history", h)
- return peer.SendPriority(context.TODO(), msg, priority)
+ return peer.Send(context.TODO(), msg)
}
func (r *Registry) Unsubscribe(peerId enode.ID, s Stream) error {
@@ -422,8 +294,7 @@ func (r *Registry) Quit(peerId enode.ID, s Stream) error {
func (r *Registry) Close() error {
// Stop sending neighborhood depth change and address count
// change from Kademlia that were initiated in NewRegistry constructor.
- r.delivery.kad.CloseNeighbourhoodDepthC()
- r.delivery.kad.CloseAddrCountC()
+ r.delivery.Close()
close(r.quit)
return r.intervalsStore.Close()
}
@@ -438,6 +309,7 @@ func (r *Registry) getPeer(peerId enode.ID) *Peer {
func (r *Registry) setPeer(peer *Peer) {
r.peersMu.Lock()
r.peers[peer.ID()] = peer
+ metrics.GetOrRegisterCounter("registry.setpeer", nil).Inc(1)
metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers)))
r.peersMu.Unlock()
}
@@ -445,6 +317,7 @@ func (r *Registry) setPeer(peer *Peer) {
func (r *Registry) deletePeer(peer *Peer) {
r.peersMu.Lock()
delete(r.peers, peer.ID())
+ metrics.GetOrRegisterCounter("registry.deletepeer", nil).Inc(1)
metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers)))
r.peersMu.Unlock()
}
@@ -458,132 +331,31 @@ func (r *Registry) peersCount() (c int) {
// Run protocol run function
func (r *Registry) Run(p *network.BzzPeer) error {
- sp := NewPeer(p.Peer, r)
+ sp := NewPeer(p, r)
r.setPeer(sp)
+
+ if r.syncMode == SyncingAutoSubscribe {
+ go sp.runUpdateSyncing()
+ }
+
defer r.deletePeer(sp)
defer close(sp.quit)
defer sp.close()
- if r.autoRetrieval && !p.LightNode {
- err := r.Subscribe(p.ID(), NewStream(swarmChunkServerStreamName, "", true), nil, Top)
- if err != nil {
- return err
- }
- }
-
return sp.Run(sp.HandleMsg)
}
-// updateSyncing subscribes to SYNC streams by iterating over the
-// kademlia connections and bins. If there are existing SYNC streams
-// and they are no longer required after iteration, request to Quit
-// them will be send to appropriate peers.
-func (r *Registry) updateSyncing() {
- kad := r.delivery.kad
- // map of all SYNC streams for all peers
- // used at the and of the function to remove servers
- // that are not needed anymore
- subs := make(map[enode.ID]map[Stream]struct{})
- r.peersMu.RLock()
- for id, peer := range r.peers {
- peer.serverMu.RLock()
- for stream := range peer.servers {
- if stream.Name == "SYNC" {
- if _, ok := subs[id]; !ok {
- subs[id] = make(map[Stream]struct{})
- }
- subs[id][stream] = struct{}{}
- }
- }
- peer.serverMu.RUnlock()
- }
- r.peersMu.RUnlock()
-
- // start requesting subscriptions from peers
- r.requestPeerSubscriptions(kad, subs)
-
- // remove SYNC servers that do not need to be subscribed
- for id, streams := range subs {
- if len(streams) == 0 {
- continue
- }
- peer := r.getPeer(id)
- if peer == nil {
- continue
- }
- for stream := range streams {
- log.Debug("Remove sync server", "peer", id, "stream", stream)
- err := r.Quit(peer.ID(), stream)
- if err != nil && err != p2p.ErrShuttingDown {
- log.Error("quit", "err", err, "peer", peer.ID(), "stream", stream)
- }
- }
- }
-}
-
-// requestPeerSubscriptions calls on each live peer in the kademlia table
-// and sends a `RequestSubscription` to peers according to their bin
-// and their relationship with kademlia's depth.
-// Also check `TestRequestPeerSubscriptions` in order to understand the
-// expected behavior.
-// The function expects:
-// * the kademlia
-// * a map of subscriptions
-// * the actual function to subscribe
-// (in case of the test, it doesn't do real subscriptions)
-func (r *Registry) requestPeerSubscriptions(kad *network.Kademlia, subs map[enode.ID]map[Stream]struct{}) {
-
- var startPo int
- var endPo int
- var ok bool
-
- // kademlia's depth
- kadDepth := kad.NeighbourhoodDepth()
- // request subscriptions for all nodes and bins
- // nil as base takes the node's base; we need to pass 255 as `EachConn` runs
- // from deepest bins backwards
- kad.EachConn(nil, 255, func(p *network.Peer, po int) bool {
- // nodes that do not provide stream protocol
- // should not be subscribed, e.g. bootnodes
- if !p.HasCap("stream") {
- return true
- }
- //if the peer's bin is shallower than the kademlia depth,
- //only the peer's bin should be subscribed
- if po < kadDepth {
- startPo = po
- endPo = po
- } else {
- //if the peer's bin is equal or deeper than the kademlia depth,
- //each bin from the depth up to k.MaxProxDisplay should be subscribed
- startPo = kadDepth
- endPo = kad.MaxProxDisplay
- }
-
- for bin := startPo; bin <= endPo; bin++ {
- //do the actual subscription
- ok = subscriptionFunc(r, p, uint8(bin), subs)
- }
- return ok
- })
-}
-
// doRequestSubscription sends the actual RequestSubscription to the peer
-func doRequestSubscription(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool {
- log.Debug("Requesting subscription by registry:", "registry", r.addr, "peer", p.ID(), "bin", bin)
+func doRequestSubscription(r *Registry, id enode.ID, bin uint8) error {
+ log.Debug("Requesting subscription by registry:", "registry", r.addr, "peer", id, "bin", bin)
// bin is always less then 256 and it is safe to convert it to type uint8
stream := NewStream("SYNC", FormatSyncBinKey(bin), true)
- if streams, ok := subs[p.ID()]; ok {
- // delete live and history streams from the map, so that it won't be removed with a Quit request
- delete(streams, stream)
- delete(streams, getHistoryStream(stream))
- }
- err := r.RequestSubscription(p.ID(), stream, NewRange(0, 0), High)
+ err := r.RequestSubscription(id, stream, NewRange(0, 0), High)
if err != nil {
- log.Debug("Request subscription", "err", err, "peer", p.ID(), "stream", stream)
- return false
+ log.Debug("Request subscription", "err", err, "peer", id, "stream", stream)
+ return err
}
- return true
+ return nil
}
func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error {
@@ -619,24 +391,66 @@ func (p *Peer) HandleMsg(ctx context.Context, msg interface{}) error {
return p.handleUnsubscribeMsg(msg)
case *OfferedHashesMsg:
- return p.handleOfferedHashesMsg(ctx, msg)
+ go func() {
+ err := p.handleOfferedHashesMsg(ctx, msg)
+ if err != nil {
+ log.Error(err.Error())
+ p.Drop()
+ }
+ }()
+ return nil
case *TakeoverProofMsg:
- return p.handleTakeoverProofMsg(ctx, msg)
+ go func() {
+ err := p.handleTakeoverProofMsg(ctx, msg)
+ if err != nil {
+ log.Error(err.Error())
+ p.Drop()
+ }
+ }()
+ return nil
case *WantedHashesMsg:
- return p.handleWantedHashesMsg(ctx, msg)
+ go func() {
+ err := p.handleWantedHashesMsg(ctx, msg)
+ if err != nil {
+ log.Error(err.Error())
+ p.Drop()
+ }
+ }()
+ return nil
case *ChunkDeliveryMsgRetrieval:
// handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
- return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg)))
+ go func() {
+ err := p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg)))
+ if err != nil {
+ log.Error(err.Error())
+ p.Drop()
+ }
+ }()
+ return nil
case *ChunkDeliveryMsgSyncing:
// handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
- return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg)))
+ go func() {
+ err := p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg)))
+ if err != nil {
+ log.Error(err.Error())
+ p.Drop()
+ }
+ }()
+ return nil
case *RetrieveRequestMsg:
- return p.streamer.delivery.handleRetrieveRequestMsg(ctx, p, msg)
+ go func() {
+ err := p.streamer.delivery.handleRetrieveRequestMsg(ctx, p, msg)
+ if err != nil {
+ log.Error(err.Error())
+ p.Drop()
+ }
+ }()
+ return nil
case *RequestSubscriptionMsg:
return p.handleRequestSubscription(ctx, msg)
@@ -767,7 +581,7 @@ func (c *client) batchDone(p *Peer, req *OfferedHashesMsg, hashes []byte) error
return err
}
- if err := p.SendPriority(context.TODO(), tp, c.priority); err != nil {
+ if err := p.Send(context.TODO(), tp); err != nil {
return err
}
if c.to > 0 && tp.Takeover.End >= c.to {
@@ -969,15 +783,13 @@ func (api *API) UnsubscribeStream(peerId enode.ID, s Stream) error {
}
/*
-GetPeerSubscriptions is a API function which allows to query a peer for stream subscriptions it has.
+GetPeerServerSubscriptions is a API function which allows to query a peer for stream subscriptions it has.
It can be called via RPC.
It returns a map of node IDs with an array of string representations of Stream objects.
*/
-func (api *API) GetPeerSubscriptions() map[string][]string {
- //create the empty map
+func (api *API) GetPeerServerSubscriptions() map[string][]string {
pstreams := make(map[string][]string)
- //iterate all streamer peers
api.streamer.peersMu.RLock()
defer api.streamer.peersMu.RUnlock()
diff --git a/swarm/network/stream/streamer_test.go b/swarm/network/stream/streamer_test.go
index bdd3087bb..767112b2b 100644
--- a/swarm/network/stream/streamer_test.go
+++ b/swarm/network/stream/streamer_test.go
@@ -28,9 +28,6 @@ import (
"testing"
"time"
- "github.com/ethereum/go-ethereum/swarm/testutil"
-
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/enode"
@@ -39,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/network/simulation"
"github.com/ethereum/go-ethereum/swarm/state"
+ "github.com/ethereum/go-ethereum/swarm/testutil"
"golang.org/x/crypto/sha3"
)
@@ -539,7 +537,7 @@ func TestStreamerDownstreamCorruptHashesMsgExchange(t *testing.T) {
t.Fatal(err)
}
- expectedError := errors.New("Message handler error: (msg code 1): error invalid hashes length (len: 40)")
+ expectedError := errors.New("subprotocol error")
if err := tester.TestDisconnected(&p2ptest.Disconnect{Peer: node.ID(), Error: expectedError}); err != nil {
t.Fatal(err)
}
@@ -779,7 +777,6 @@ func TestStreamerRequestSubscriptionQuitMsgExchange(t *testing.T) {
func TestMaxPeerServersWithUnsubscribe(t *testing.T) {
var maxPeerServers = 6
tester, streamer, _, teardown, err := newStreamerTester(&RegistryOptions{
- Retrieval: RetrievalDisabled,
Syncing: SyncingDisabled,
MaxPeerServers: maxPeerServers,
})
@@ -940,8 +937,7 @@ func TestMaxPeerServersWithoutUnsubscribe(t *testing.T) {
//`Price` interface implementation
func TestHasPriceImplementation(t *testing.T) {
_, r, _, teardown, err := newStreamerTester(&RegistryOptions{
- Retrieval: RetrievalDisabled,
- Syncing: SyncingDisabled,
+ Syncing: SyncingDisabled,
})
if err != nil {
t.Fatal(err)
@@ -967,164 +963,8 @@ func TestHasPriceImplementation(t *testing.T) {
}
}
-/*
-TestRequestPeerSubscriptions is a unit test for stream's pull sync subscriptions.
-
-The test does:
- * assign each connected peer to a bin map
- * build up a known kademlia in advance
- * run the EachConn function, which returns supposed subscription bins
- * store all supposed bins per peer in a map
- * check that all peers have the expected subscriptions
-
-This kad table and its peers are copied from network.TestKademliaCase1,
-it represents an edge case but for the purpose of testing the
-syncing subscriptions it is just fine.
-
-Addresses used in this test are discovered as part of the simulation network
-in higher level tests for streaming. They were generated randomly.
-
-The resulting kademlia looks like this:
-=========================================================================
-Fri Dec 21 20:02:39 UTC 2018 KΛÐΞMLIΛ hive: queen's address: 7efef1
-population: 12 (12), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
-000 2 8196 835f | 2 8196 (0) 835f (0)
-001 2 2690 28f0 | 2 2690 (0) 28f0 (0)
-002 2 4d72 4a45 | 2 4d72 (0) 4a45 (0)
-003 1 646e | 1 646e (0)
-004 3 769c 76d1 7656 | 3 769c (0) 76d1 (0) 7656 (0)
-============ DEPTH: 5 ==========================================
-005 1 7a48 | 1 7a48 (0)
-006 1 7cbd | 1 7cbd (0)
-007 0 | 0
-008 0 | 0
-009 0 | 0
-010 0 | 0
-011 0 | 0
-012 0 | 0
-013 0 | 0
-014 0 | 0
-015 0 | 0
-=========================================================================
-*/
-func TestRequestPeerSubscriptions(t *testing.T) {
- // the pivot address; this is the actual kademlia node
- pivotAddr := "7efef1c41d77f843ad167be95f6660567eb8a4a59f39240000cce2e0d65baf8e"
-
- // a map of bin number to addresses from the given kademlia
- binMap := make(map[int][]string)
- binMap[0] = []string{
- "835fbbf1d16ba7347b6e2fc552d6e982148d29c624ea20383850df3c810fa8fc",
- "81968a2d8fb39114342ee1da85254ec51e0608d7f0f6997c2a8354c260a71009",
- }
- binMap[1] = []string{
- "28f0bc1b44658548d6e05dd16d4c2fe77f1da5d48b6774bc4263b045725d0c19",
- "2690a910c33ee37b91eb6c4e0731d1d345e2dc3b46d308503a6e85bbc242c69e",
- }
- binMap[2] = []string{
- "4a45f1fc63e1a9cb9dfa44c98da2f3d20c2923e5d75ff60b2db9d1bdb0c54d51",
- "4d72a04ddeb851a68cd197ef9a92a3e2ff01fbbff638e64929dd1a9c2e150112",
- }
- binMap[3] = []string{
- "646e9540c84f6a2f9cf6585d45a4c219573b4fd1b64a3c9a1386fc5cf98c0d4d",
- }
- binMap[4] = []string{
- "7656caccdc79cd8d7ce66d415cc96a718e8271c62fb35746bfc2b49faf3eebf3",
- "76d1e83c71ca246d042e37ff1db181f2776265fbcfdc890ce230bfa617c9c2f0",
- "769ce86aa90b518b7ed382f9fdacfbed93574e18dc98fe6c342e4f9f409c2d5a",
- }
- binMap[5] = []string{
- "7a48f75f8ca60487ae42d6f92b785581b40b91f2da551ae73d5eae46640e02e8",
- }
- binMap[6] = []string{
- "7cbd42350bde8e18ae5b955b5450f8e2cef3419f92fbf5598160c60fd78619f0",
- }
-
- // create the pivot's kademlia
- addr := common.FromHex(pivotAddr)
- k := network.NewKademlia(addr, network.NewKadParams())
-
- // construct the peers and the kademlia
- for _, binaddrs := range binMap {
- for _, a := range binaddrs {
- addr := common.FromHex(a)
- k.On(network.NewPeer(&network.BzzPeer{BzzAddr: &network.BzzAddr{OAddr: addr}}, k))
- }
- }
-
- // TODO: check kad table is same
- // currently k.String() prints date so it will never be the same :)
- // --> implement JSON representation of kad table
- log.Debug(k.String())
-
- // simulate that we would do subscriptions: just store the bin numbers
- fakeSubscriptions := make(map[string][]int)
- //after the test, we need to reset the subscriptionFunc to the default
- defer func() { subscriptionFunc = doRequestSubscription }()
- // define the function which should run for each connection
- // instead of doing real subscriptions, we just store the bin numbers
- subscriptionFunc = func(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool {
- // get the peer ID
- peerstr := fmt.Sprintf("%x", p.Over())
- // create the array of bins per peer
- if _, ok := fakeSubscriptions[peerstr]; !ok {
- fakeSubscriptions[peerstr] = make([]int, 0)
- }
- // store the (fake) bin subscription
- log.Debug(fmt.Sprintf("Adding fake subscription for peer %s with bin %d", peerstr, bin))
- fakeSubscriptions[peerstr] = append(fakeSubscriptions[peerstr], int(bin))
- return true
- }
- // create just a simple Registry object in order to be able to call...
- r := &Registry{}
- r.requestPeerSubscriptions(k, nil)
- // calculate the kademlia depth
- kdepth := k.NeighbourhoodDepth()
-
- // now, check that all peers have the expected (fake) subscriptions
- // iterate the bin map
- for bin, peers := range binMap {
- // for every peer...
- for _, peer := range peers {
- // ...get its (fake) subscriptions
- fakeSubsForPeer := fakeSubscriptions[peer]
- // if the peer's bin is shallower than the kademlia depth...
- if bin < kdepth {
- // (iterate all (fake) subscriptions)
- for _, subbin := range fakeSubsForPeer {
- // ...only the peer's bin should be "subscribed"
- // (and thus have only one subscription)
- if subbin != bin || len(fakeSubsForPeer) != 1 {
- t.Fatalf("Did not get expected subscription for bin < depth; bin of peer %s: %d, subscription: %d", peer, bin, subbin)
- }
- }
- } else { //if the peer's bin is equal or higher than the kademlia depth...
- // (iterate all (fake) subscriptions)
- for i, subbin := range fakeSubsForPeer {
- // ...each bin from the peer's bin number up to k.MaxProxDisplay should be "subscribed"
- // as we start from depth we can use the iteration index to check
- if subbin != i+kdepth {
- t.Fatalf("Did not get expected subscription for bin > depth; bin of peer %s: %d, subscription: %d", peer, bin, subbin)
- }
- // the last "subscription" should be k.MaxProxDisplay
- if i == len(fakeSubsForPeer)-1 && subbin != k.MaxProxDisplay {
- t.Fatalf("Expected last subscription to be: %d, but is: %d", k.MaxProxDisplay, subbin)
- }
- }
- }
- }
- }
- // print some output
- for p, subs := range fakeSubscriptions {
- log.Debug(fmt.Sprintf("Peer %s has the following fake subscriptions: ", p))
- for _, bin := range subs {
- log.Debug(fmt.Sprintf("%d,", bin))
- }
- }
-}
-
-// TestGetSubscriptions is a unit test for the api.GetPeerSubscriptions() function
-func TestGetSubscriptions(t *testing.T) {
+// TestGetServerSubscriptions is a unit test for the api.GetPeerServerSubscriptions() function
+func TestGetServerSubscriptions(t *testing.T) {
// create an amount of dummy peers
testPeerCount := 8
// every peer will have this amount of dummy servers
@@ -1135,7 +975,7 @@ func TestGetSubscriptions(t *testing.T) {
r := &Registry{}
api := NewAPI(r)
// call once, at this point should be empty
- regs := api.GetPeerSubscriptions()
+ regs := api.GetPeerServerSubscriptions()
if len(regs) != 0 {
t.Fatal("Expected subscription count to be 0, but it is not")
}
@@ -1159,7 +999,7 @@ func TestGetSubscriptions(t *testing.T) {
r.peers = peerMap
// call the subscriptions again
- regs = api.GetPeerSubscriptions()
+ regs = api.GetPeerServerSubscriptions()
// count how many (fake) subscriptions there are
cnt := 0
for _, reg := range regs {
@@ -1175,11 +1015,11 @@ func TestGetSubscriptions(t *testing.T) {
}
/*
-TestGetSubscriptionsRPC sets up a simulation network of `nodeCount` nodes,
+TestGetServerSubscriptionsRPC sets up a simulation network of `nodeCount` nodes,
starts the simulation, waits for SyncUpdateDelay in order to kick off
stream registration, then tests that there are subscriptions.
*/
-func TestGetSubscriptionsRPC(t *testing.T) {
+func TestGetServerSubscriptionsRPC(t *testing.T) {
if testutil.RaceEnabled && os.Getenv("TRAVIS") == "true" {
t.Skip("flaky with -race on Travis")
@@ -1206,15 +1046,13 @@ func TestGetSubscriptionsRPC(t *testing.T) {
defer func() { subscriptionFunc = doRequestSubscription }()
// we use this subscriptionFunc for this test: just increases count and calls the actual subscription
- subscriptionFunc = func(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool {
+ subscriptionFunc = func(r *Registry, id enode.ID, bin uint8) error {
// syncing starts after syncUpdateDelay and loops after that Duration; we only want to count at the first iteration
// in the first iteration, subs will be empty (no existing subscriptions), thus we can use this check
// this avoids flakyness
- if len(subs) == 0 {
- expectedMsgCount.inc()
- }
- doRequestSubscription(r, p, bin, subs)
- return true
+ expectedMsgCount.inc()
+ doRequestSubscription(r, id, bin)
+ return nil
}
// create a standard sim
sim := simulation.New(map[string]simulation.ServiceFunc{
@@ -1226,7 +1064,6 @@ func TestGetSubscriptionsRPC(t *testing.T) {
// configure so that sync registrations actually happen
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
- Retrieval: RetrievalEnabled,
Syncing: SyncingAutoSubscribe, //enable sync registrations
SyncUpdateDelay: syncUpdateDelay,
}, nil)
@@ -1321,7 +1158,7 @@ func TestGetSubscriptionsRPC(t *testing.T) {
//ask it for subscriptions
pstreams := make(map[string][]string)
- err = client.Call(&pstreams, "stream_getPeerSubscriptions")
+ err = client.Call(&pstreams, "stream_getPeerServerSubscriptions")
if err != nil {
return fmt.Errorf("client call stream_getPeerSubscriptions: %v", err)
}
diff --git a/swarm/network/stream/syncer.go b/swarm/network/stream/syncer.go
index 5f03dcff7..9bde39550 100644
--- a/swarm/network/stream/syncer.go
+++ b/swarm/network/stream/syncer.go
@@ -22,6 +22,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/storage"
)
@@ -35,27 +36,29 @@ const (
// * live request delivery with or without checkback
// * (live/non-live historical) chunk syncing per proximity bin
type SwarmSyncerServer struct {
- po uint8
- store storage.SyncChunkStore
- quit chan struct{}
+ correlateId string //used for logging
+ po uint8
+ netStore *storage.NetStore
+ quit chan struct{}
}
// NewSwarmSyncerServer is constructor for SwarmSyncerServer
-func NewSwarmSyncerServer(po uint8, syncChunkStore storage.SyncChunkStore) (*SwarmSyncerServer, error) {
+func NewSwarmSyncerServer(po uint8, netStore *storage.NetStore, correlateId string) (*SwarmSyncerServer, error) {
return &SwarmSyncerServer{
- po: po,
- store: syncChunkStore,
- quit: make(chan struct{}),
+ correlateId: correlateId,
+ po: po,
+ netStore: netStore,
+ quit: make(chan struct{}),
}, nil
}
-func RegisterSwarmSyncerServer(streamer *Registry, syncChunkStore storage.SyncChunkStore) {
- streamer.RegisterServerFunc("SYNC", func(_ *Peer, t string, _ bool) (Server, error) {
+func RegisterSwarmSyncerServer(streamer *Registry, netStore *storage.NetStore) {
+ streamer.RegisterServerFunc("SYNC", func(p *Peer, t string, _ bool) (Server, error) {
po, err := ParseSyncBinKey(t)
if err != nil {
return nil, err
}
- return NewSwarmSyncerServer(po, syncChunkStore)
+ return NewSwarmSyncerServer(po, netStore, p.ID().String()+"|"+string(po))
})
// streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) {
// return NewOutgoingProvableSwarmSyncer(po, db)
@@ -69,130 +72,138 @@ func (s *SwarmSyncerServer) Close() {
// GetData retrieves the actual chunk from netstore
func (s *SwarmSyncerServer) GetData(ctx context.Context, key []byte) ([]byte, error) {
- chunk, err := s.store.Get(ctx, storage.Address(key))
+ ch, err := s.netStore.Get(ctx, chunk.ModeGetSync, storage.Address(key))
if err != nil {
return nil, err
}
- return chunk.Data(), nil
+ return ch.Data(), nil
}
// SessionIndex returns current storage bin (po) index.
func (s *SwarmSyncerServer) SessionIndex() (uint64, error) {
- return s.store.BinIndex(s.po), nil
+ return s.netStore.LastPullSubscriptionBinID(s.po)
}
-// GetBatch retrieves the next batch of hashes from the dbstore
+// SetNextBatch retrieves the next batch of hashes from the localstore.
+// It expects a range of bin IDs, both ends inclusive in syncing, and returns
+// concatenated byte slice of chunk addresses and bin IDs of the first and
+// the last one in that slice. The batch may have up to BatchSize number of
+// chunk addresses. If at least one chunk is added to the batch and no new chunks
+// are added in batchTimeout period, the batch will be returned. This function
+// will block until new chunks are received from localstore pull subscription.
func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
- var batch []byte
- i := 0
-
- var ticker *time.Ticker
- defer func() {
- if ticker != nil {
- ticker.Stop()
+ //TODO: maybe add unit test for intervals usage in netstore/localstore together with SwarmSyncerServer?
+ if from > 0 {
+ from--
+ }
+ batchStart := time.Now()
+ descriptors, stop := s.netStore.SubscribePull(context.Background(), s.po, from, to)
+ defer stop()
+
+ const batchTimeout = 2 * time.Second
+
+ var (
+ batch []byte
+ batchSize int
+ batchStartID *uint64
+ batchEndID uint64
+ timer *time.Timer
+ timerC <-chan time.Time
+ )
+
+ defer func(start time.Time) {
+ metrics.GetOrRegisterResettingTimer("syncer.set-next-batch.total-time", nil).UpdateSince(start)
+ metrics.GetOrRegisterCounter("syncer.set-next-batch.batch-size", nil).Inc(int64(batchSize))
+ if timer != nil {
+ timer.Stop()
}
- }()
- var wait bool
- for {
- if wait {
- if ticker == nil {
- ticker = time.NewTicker(1000 * time.Millisecond)
+ }(batchStart)
+
+ for iterate := true; iterate; {
+ select {
+ case d, ok := <-descriptors:
+ if !ok {
+ iterate = false
+ break
}
- select {
- case <-ticker.C:
- case <-s.quit:
- return nil, 0, 0, nil, nil
+ batch = append(batch, d.Address[:]...)
+ // This is the most naive approach to label the chunk as synced
+ // allowing it to be garbage collected. A proper way requires
+ // validating that the chunk is successfully stored by the peer.
+ err := s.netStore.Set(context.Background(), chunk.ModeSetSync, d.Address)
+ if err != nil {
+ metrics.GetOrRegisterCounter("syncer.set-next-batch.set-sync-err", nil).Inc(1)
+ log.Debug("syncer pull subscription - err setting chunk as synced", "correlateId", s.correlateId, "err", err)
+ return nil, 0, 0, nil, err
}
- }
-
- metrics.GetOrRegisterCounter("syncer.setnextbatch.iterator", nil).Inc(1)
- err := s.store.Iterator(from, to, s.po, func(key storage.Address, idx uint64) bool {
- select {
- case <-s.quit:
- return false
- default:
+ batchSize++
+ if batchStartID == nil {
+ // set batch start id only if
+ // this is the first iteration
+ batchStartID = &d.BinID
}
- batch = append(batch, key[:]...)
- i++
- to = idx
- return i < BatchSize
- })
- if err != nil {
- return nil, 0, 0, nil, err
- }
- if len(batch) > 0 {
- break
+ batchEndID = d.BinID
+ if batchSize >= BatchSize {
+ iterate = false
+ metrics.GetOrRegisterCounter("syncer.set-next-batch.full-batch", nil).Inc(1)
+ log.Debug("syncer pull subscription - batch size reached", "correlateId", s.correlateId, "batchSize", batchSize, "batchStartID", batchStartID, "batchEndID", batchEndID)
+ }
+ if timer == nil {
+ timer = time.NewTimer(batchTimeout)
+ } else {
+ log.Debug("syncer pull subscription - stopping timer", "correlateId", s.correlateId)
+ if !timer.Stop() {
+ <-timer.C
+ }
+ log.Debug("syncer pull subscription - channel drained, resetting timer", "correlateId", s.correlateId)
+ timer.Reset(batchTimeout)
+ }
+ timerC = timer.C
+ case <-timerC:
+ // return batch if new chunks are not
+ // received after some time
+ iterate = false
+ metrics.GetOrRegisterCounter("syncer.set-next-batch.timer-expire", nil).Inc(1)
+ log.Debug("syncer pull subscription timer expired", "correlateId", s.correlateId, "batchSize", batchSize, "batchStartID", batchStartID, "batchEndID", batchEndID)
+ case <-s.quit:
+ iterate = false
+ log.Debug("syncer pull subscription - quit received", "correlateId", s.correlateId, "batchSize", batchSize, "batchStartID", batchStartID, "batchEndID", batchEndID)
}
- wait = true
}
-
- log.Trace("Swarm syncer offer batch", "po", s.po, "len", i, "from", from, "to", to, "current store count", s.store.BinIndex(s.po))
- return batch, from, to, nil, nil
+ if batchStartID == nil {
+ // if batch start id is not set, return 0
+ batchStartID = new(uint64)
+ }
+ return batch, *batchStartID, batchEndID, nil, nil
}
// SwarmSyncerClient
type SwarmSyncerClient struct {
- store storage.SyncChunkStore
- peer *Peer
- stream Stream
+ netStore *storage.NetStore
+ peer *Peer
+ stream Stream
}
// NewSwarmSyncerClient is a contructor for provable data exchange syncer
-func NewSwarmSyncerClient(p *Peer, store storage.SyncChunkStore, stream Stream) (*SwarmSyncerClient, error) {
+func NewSwarmSyncerClient(p *Peer, netStore *storage.NetStore, stream Stream) (*SwarmSyncerClient, error) {
return &SwarmSyncerClient{
- store: store,
- peer: p,
- stream: stream,
+ netStore: netStore,
+ peer: p,
+ stream: stream,
}, nil
}
-// // NewIncomingProvableSwarmSyncer is a contructor for provable data exchange syncer
-// func NewIncomingProvableSwarmSyncer(po int, priority int, index uint64, sessionAt uint64, intervals []uint64, sessionRoot storage.Address, chunker *storage.PyramidChunker, store storage.ChunkStore, p Peer) *SwarmSyncerClient {
-// retrieveC := make(storage.Chunk, chunksCap)
-// RunChunkRequestor(p, retrieveC)
-// storeC := make(storage.Chunk, chunksCap)
-// RunChunkStorer(store, storeC)
-// s := &SwarmSyncerClient{
-// po: po,
-// priority: priority,
-// sessionAt: sessionAt,
-// start: index,
-// end: index,
-// nextC: make(chan struct{}, 1),
-// intervals: intervals,
-// sessionRoot: sessionRoot,
-// sessionReader: chunker.Join(sessionRoot, retrieveC),
-// retrieveC: retrieveC,
-// storeC: storeC,
-// }
-// return s
-// }
-
-// // StartSyncing is called on the Peer to start the syncing process
-// // the idea is that it is called only after kademlia is close to healthy
-// func StartSyncing(s *Streamer, peerId enode.ID, po uint8, nn bool) {
-// lastPO := po
-// if nn {
-// lastPO = maxPO
-// }
-//
-// for i := po; i <= lastPO; i++ {
-// s.Subscribe(peerId, "SYNC", newSyncLabel("LIVE", po), 0, 0, High, true)
-// s.Subscribe(peerId, "SYNC", newSyncLabel("HISTORY", po), 0, 0, Mid, false)
-// }
-// }
-
// RegisterSwarmSyncerClient registers the client constructor function for
// to handle incoming sync streams
-func RegisterSwarmSyncerClient(streamer *Registry, store storage.SyncChunkStore) {
+func RegisterSwarmSyncerClient(streamer *Registry, netStore *storage.NetStore) {
streamer.RegisterClientFunc("SYNC", func(p *Peer, t string, live bool) (Client, error) {
- return NewSwarmSyncerClient(p, store, NewStream("SYNC", t, live))
+ return NewSwarmSyncerClient(p, netStore, NewStream("SYNC", t, live))
})
}
// NeedData
func (s *SwarmSyncerClient) NeedData(ctx context.Context, key []byte) (wait func(context.Context) error) {
- return s.store.FetchFunc(ctx, key)
+ return s.netStore.FetchFunc(ctx, key)
}
// BatchDone
diff --git a/swarm/network/stream/syncer_test.go b/swarm/network/stream/syncer_test.go
index 07586714e..b787c7bb8 100644
--- a/swarm/network/stream/syncer_test.go
+++ b/swarm/network/stream/syncer_test.go
@@ -21,22 +21,20 @@ import (
"errors"
"fmt"
"io/ioutil"
- "math"
"os"
"sync"
"testing"
"time"
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/network/simulation"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
- "github.com/ethereum/go-ethereum/swarm/storage/mock"
"github.com/ethereum/go-ethereum/swarm/testutil"
)
@@ -55,24 +53,6 @@ func TestSyncerSimulation(t *testing.T) {
}
}
-func createMockStore(globalStore mock.GlobalStorer, id enode.ID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) {
- address := common.BytesToAddress(id.Bytes())
- mockStore := globalStore.NewNodeStore(address)
- params := storage.NewDefaultLocalStoreParams()
-
- datadir, err = ioutil.TempDir("", "localMockStore-"+id.TerminalString())
- if err != nil {
- return nil, "", err
- }
- params.Init(datadir)
- params.BaseKey = addr.Over()
- lstore, err = storage.NewLocalStore(params, mockStore)
- if err != nil {
- return nil, "", err
- }
- return lstore, datadir, nil
-}
-
func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, po uint8) {
sim := simulation.New(map[string]simulation.ServiceFunc{
@@ -103,7 +83,6 @@ func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, p
}
r := NewRegistry(addr.ID(), delivery, netStore, store, &RegistryOptions{
- Retrieval: RetrievalDisabled,
Syncing: SyncingAutoSubscribe,
SkipCheck: skipCheck,
}, nil)
@@ -181,17 +160,32 @@ func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, p
if i < nodes-1 {
hashCounts[i] = hashCounts[i+1]
}
- item, ok := sim.NodeItem(nodeIDs[i], bucketKeyDB)
+ item, ok := sim.NodeItem(nodeIDs[i], bucketKeyStore)
if !ok {
return fmt.Errorf("No DB")
}
- netStore := item.(*storage.NetStore)
- netStore.Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool {
- hashes[i] = append(hashes[i], addr)
- totalHashes++
- hashCounts[i]++
- return true
- })
+ store := item.(chunk.Store)
+ until, err := store.LastPullSubscriptionBinID(po)
+ if err != nil {
+ return err
+ }
+ if until > 0 {
+ c, _ := store.SubscribePull(ctx, po, 0, until)
+ for iterate := true; iterate; {
+ select {
+ case cd, ok := <-c:
+ if !ok {
+ iterate = false
+ break
+ }
+ hashes[i] = append(hashes[i], cd.Address)
+ totalHashes++
+ hashCounts[i]++
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+ }
}
var total, found int
for _, node := range nodeIDs {
@@ -200,12 +194,12 @@ func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, p
for j := i; j < nodes; j++ {
total += len(hashes[j])
for _, key := range hashes[j] {
- item, ok := sim.NodeItem(nodeIDs[j], bucketKeyDB)
+ item, ok := sim.NodeItem(nodeIDs[j], bucketKeyStore)
if !ok {
return fmt.Errorf("No DB")
}
- db := item.(*storage.NetStore)
- _, err := db.Get(ctx, key)
+ db := item.(chunk.Store)
+ _, err := db.Get(ctx, chunk.ModeGetRequest, key)
if err == nil {
found++
}
@@ -216,7 +210,7 @@ func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, p
if total == found && total > 0 {
return nil
}
- return fmt.Errorf("Total not equallying found: total is %d", total)
+ return fmt.Errorf("Total not equallying found %v: total is %d", found, total)
})
if result.Error != nil {
@@ -237,8 +231,7 @@ func TestSameVersionID(t *testing.T) {
}
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
- Retrieval: RetrievalDisabled,
- Syncing: SyncingAutoSubscribe,
+ Syncing: SyncingAutoSubscribe,
}, nil)
bucket.Store(bucketKeyRegistry, r)
@@ -301,8 +294,7 @@ func TestDifferentVersionID(t *testing.T) {
}
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
- Retrieval: RetrievalDisabled,
- Syncing: SyncingAutoSubscribe,
+ Syncing: SyncingAutoSubscribe,
}, nil)
bucket.Store(bucketKeyRegistry, r)
diff --git a/swarm/network_test.go b/swarm/network_test.go
index 97bdd07b1..1a8c992a3 100644
--- a/swarm/network_test.go
+++ b/swarm/network_test.go
@@ -23,11 +23,13 @@ import (
"io/ioutil"
"math/rand"
"os"
+ "strings"
"sync"
"sync/atomic"
"testing"
"time"
+ "github.com/ethereum/go-ethereum/swarm/sctx"
"github.com/ethereum/go-ethereum/swarm/testutil"
"github.com/ethereum/go-ethereum/crypto"
@@ -416,7 +418,7 @@ func uploadFile(swarm *Swarm) (storage.Address, string, error) {
// uniqueness is very certain.
data := fmt.Sprintf("test content %s %x", time.Now().Round(0), b)
ctx := context.TODO()
- k, wait, err := swarm.api.Put(ctx, data, "text/plain", false)
+ k, wait, err := putString(ctx, swarm.api, data, "text/plain", false)
if err != nil {
return nil, "", err
}
@@ -530,3 +532,31 @@ func retrieve(
return uint64(totalCheckCount) - atomic.LoadUint64(totalFoundCount)
}
+
+// putString provides singleton manifest creation on top of api.API
+func putString(ctx context.Context, a *api.API, content string, contentType string, toEncrypt bool) (k storage.Address, wait func(context.Context) error, err error) {
+ r := strings.NewReader(content)
+ tag, err := a.Tags.New("unnamed-tag", 0)
+
+ log.Trace("created new tag", "uid", tag.Uid)
+
+ cCtx := sctx.SetTag(ctx, tag.Uid)
+ key, waitContent, err := a.Store(cCtx, r, int64(len(content)), toEncrypt)
+ if err != nil {
+ return nil, nil, err
+ }
+ manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType)
+ r = strings.NewReader(manifest)
+ key, waitManifest, err := a.Store(cCtx, r, int64(len(manifest)), toEncrypt)
+ if err != nil {
+ return nil, nil, err
+ }
+ tag.DoneSplit(key)
+ return key, func(ctx context.Context) error {
+ err := waitContent(ctx)
+ if err != nil {
+ return err
+ }
+ return waitManifest(ctx)
+ }, nil
+}
diff --git a/swarm/pss/handshake_test.go b/swarm/pss/handshake_test.go
index f4effc022..e47e4be19 100644
--- a/swarm/pss/handshake_test.go
+++ b/swarm/pss/handshake_test.go
@@ -28,6 +28,7 @@ import (
// asymmetrical key exchange between two directly connected peers
// full address, partial address (8 bytes) and empty address
func TestHandshake(t *testing.T) {
+ t.Skip("Handshakes have not been maintained for a longer period, and have started to fail. They should be reviewed and possible removed.")
t.Run("32", testHandshake)
t.Run("8", testHandshake)
t.Run("0", testHandshake)
diff --git a/swarm/sctx/sctx.go b/swarm/sctx/sctx.go
index fb7d35b00..adc8c7dab 100644
--- a/swarm/sctx/sctx.go
+++ b/swarm/sctx/sctx.go
@@ -5,12 +5,15 @@ import "context"
type (
HTTPRequestIDKey struct{}
requestHostKey struct{}
+ tagKey struct{}
)
+// SetHost sets the http request host in the context
func SetHost(ctx context.Context, domain string) context.Context {
return context.WithValue(ctx, requestHostKey{}, domain)
}
+// GetHost gets the request host from the context
func GetHost(ctx context.Context) string {
v, ok := ctx.Value(requestHostKey{}).(string)
if ok {
@@ -18,3 +21,17 @@ func GetHost(ctx context.Context) string {
}
return ""
}
+
+// SetTag sets the tag unique identifier in the context
+func SetTag(ctx context.Context, tagId uint32) context.Context {
+ return context.WithValue(ctx, tagKey{}, tagId)
+}
+
+// GetTag gets the tag unique identifier from the context
+func GetTag(ctx context.Context) uint32 {
+ v, ok := ctx.Value(tagKey{}).(uint32)
+ if ok {
+ return v
+ }
+ return 0
+}
diff --git a/swarm/shed/db.go b/swarm/shed/db.go
index 8c11bf48b..6fc520866 100644
--- a/swarm/shed/db.go
+++ b/swarm/shed/db.go
@@ -45,16 +45,7 @@ const (
// It provides a schema functionality to store fields and indexes
// information about naming and types.
type DB struct {
- ldb *leveldb.DB
-
- compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction
- compReadMeter metrics.Meter // Meter for measuring the data read during compaction
- compWriteMeter metrics.Meter // Meter for measuring the data written during compaction
- writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction
- writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction
- diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read
- diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written
-
+ ldb *leveldb.DB
quit chan struct{} // Quit channel to stop the metrics collection before closing the database
}
@@ -86,13 +77,10 @@ func NewDB(path string, metricsPrefix string) (db *DB, err error) {
}
}
- // Configure meters for DB
- db.configure(metricsPrefix)
-
// Create a quit channel for the periodic metrics collector and run it
db.quit = make(chan struct{})
- go db.meter(10 * time.Second)
+ go db.meter(metricsPrefix, 10*time.Second)
return db, nil
}
@@ -169,19 +157,22 @@ func (db *DB) Close() (err error) {
return db.ldb.Close()
}
-// Configure configures the database metrics collectors
-func (db *DB) configure(prefix string) {
- // Initialize all the metrics collector at the requested prefix
- db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil)
- db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil)
- db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil)
- db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil)
- db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil)
- db.writeDelayMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/duration", nil)
- db.writeDelayNMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/counter", nil)
-}
+func (db *DB) meter(prefix string, refresh time.Duration) {
+ // Meter for measuring the total time spent in database compaction
+ compTimeMeter := metrics.NewRegisteredMeter(prefix+"compact/time", nil)
+ // Meter for measuring the data read during compaction
+ compReadMeter := metrics.NewRegisteredMeter(prefix+"compact/input", nil)
+ // Meter for measuring the data written during compaction
+ compWriteMeter := metrics.NewRegisteredMeter(prefix+"compact/output", nil)
+ // Meter for measuring the write delay number due to database compaction
+ writeDelayMeter := metrics.NewRegisteredMeter(prefix+"compact/writedelay/duration", nil)
+ // Meter for measuring the write delay duration due to database compaction
+ writeDelayNMeter := metrics.NewRegisteredMeter(prefix+"compact/writedelay/counter", nil)
+ // Meter for measuring the effective amount of data read
+ diskReadMeter := metrics.NewRegisteredMeter(prefix+"disk/read", nil)
+ // Meter for measuring the effective amount of data written
+ diskWriteMeter := metrics.NewRegisteredMeter(prefix+"disk/write", nil)
-func (db *DB) meter(refresh time.Duration) {
// Create the counters to store current and previous compaction values
compactions := make([][]float64, 2)
for i := 0; i < 2; i++ {
@@ -234,14 +225,14 @@ func (db *DB) meter(refresh time.Duration) {
}
}
// Update all the requested meters
- if db.compTimeMeter != nil {
- db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000))
+ if compTimeMeter != nil {
+ compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000))
}
- if db.compReadMeter != nil {
- db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024))
+ if compReadMeter != nil {
+ compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024))
}
- if db.compWriteMeter != nil {
- db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
+ if compWriteMeter != nil {
+ compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
}
// Retrieve the write delay statistic
@@ -265,11 +256,11 @@ func (db *DB) meter(refresh time.Duration) {
log.Error("Failed to parse delay duration", "err", err)
continue
}
- if db.writeDelayNMeter != nil {
- db.writeDelayNMeter.Mark(delayN - delaystats[0])
+ if writeDelayNMeter != nil {
+ writeDelayNMeter.Mark(delayN - delaystats[0])
}
- if db.writeDelayMeter != nil {
- db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
+ if writeDelayMeter != nil {
+ writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
}
// If a warning that db is performing compaction has been displayed, any subsequent
// warnings will be withheld for one minute not to overwhelm the user.
@@ -300,11 +291,11 @@ func (db *DB) meter(refresh time.Duration) {
log.Error("Bad syntax of write entry", "entry", parts[1])
continue
}
- if db.diskReadMeter != nil {
- db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
+ if diskReadMeter != nil {
+ diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
}
- if db.diskWriteMeter != nil {
- db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
+ if diskWriteMeter != nil {
+ diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
}
iostats[0], iostats[1] = nRead, nWrite
diff --git a/swarm/shed/index.go b/swarm/shed/index.go
index 6be018d20..38afbce4c 100644
--- a/swarm/shed/index.go
+++ b/swarm/shed/index.go
@@ -40,9 +40,7 @@ type Item struct {
Data []byte
AccessTimestamp int64
StoreTimestamp int64
- // UseMockStore is a pointer to identify
- // an unset state of the field in Join function.
- UseMockStore *bool
+ BinID uint64
}
// Merge is a helper method to construct a new
@@ -61,8 +59,8 @@ func (i Item) Merge(i2 Item) (new Item) {
if i.StoreTimestamp == 0 {
i.StoreTimestamp = i2.StoreTimestamp
}
- if i.UseMockStore == nil {
- i.UseMockStore = i2.UseMockStore
+ if i.BinID == 0 {
+ i.BinID = i2.BinID
}
return i
}
diff --git a/swarm/shed/schema.go b/swarm/shed/schema.go
index cfb7c6d64..557d951fb 100644
--- a/swarm/shed/schema.go
+++ b/swarm/shed/schema.go
@@ -52,7 +52,7 @@ type indexSpec struct {
Name string `json:"name"`
}
-// schemaFieldKey retrives the complete LevelDB key for
+// schemaFieldKey retrieves the complete LevelDB key for
// a particular field form the schema definition.
func (db *DB) schemaFieldKey(name, fieldType string) (key []byte, err error) {
if name == "" {
diff --git a/swarm/storage/chunker_test.go b/swarm/storage/chunker_test.go
index 9a1259444..a0fe2e769 100644
--- a/swarm/storage/chunker_test.go
+++ b/swarm/storage/chunker_test.go
@@ -24,6 +24,7 @@ import (
"io"
"testing"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/testutil"
"golang.org/x/crypto/sha3"
)
@@ -42,8 +43,10 @@ type chunkerTester struct {
t test
}
+var mockTag = chunk.NewTag(0, "mock-tag", 0)
+
func newTestHasherStore(store ChunkStore, hash string) *hasherStore {
- return NewHasherStore(store, MakeHashFunc(hash), false)
+ return NewHasherStore(store, MakeHashFunc(hash), false, chunk.NewTag(0, "test-tag", 0))
}
func testRandomBrokenData(n int, tester *chunkerTester) {
@@ -91,7 +94,7 @@ func testRandomData(usePyramid bool, hash string, n int, tester *chunkerTester)
var err error
ctx := context.TODO()
if usePyramid {
- addr, wait, err = PyramidSplit(ctx, data, putGetter, putGetter)
+ addr, wait, err = PyramidSplit(ctx, data, putGetter, putGetter, mockTag)
} else {
addr, wait, err = TreeSplit(ctx, data, int64(n), putGetter)
}
@@ -188,7 +191,7 @@ func TestDataAppend(t *testing.T) {
putGetter := newTestHasherStore(store, SHA3Hash)
ctx := context.TODO()
- addr, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
+ addr, wait, err := PyramidSplit(ctx, data, putGetter, putGetter, mockTag)
if err != nil {
tester.t.Fatalf(err.Error())
}
@@ -208,7 +211,7 @@ func TestDataAppend(t *testing.T) {
}
putGetter = newTestHasherStore(store, SHA3Hash)
- newAddr, wait, err := PyramidAppend(ctx, addr, appendData, putGetter, putGetter)
+ newAddr, wait, err := PyramidAppend(ctx, addr, appendData, putGetter, putGetter, mockTag)
if err != nil {
tester.t.Fatalf(err.Error())
}
@@ -278,7 +281,7 @@ func benchmarkSplitJoin(n int, t *testing.B) {
putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash)
ctx := context.TODO()
- key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
+ key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter, mockTag)
if err != nil {
t.Fatalf(err.Error())
}
@@ -335,7 +338,7 @@ func benchmarkSplitPyramidBMT(n int, t *testing.B) {
putGetter := newTestHasherStore(&FakeChunkStore{}, BMTHash)
ctx := context.Background()
- _, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
+ _, wait, err := PyramidSplit(ctx, data, putGetter, putGetter, mockTag)
if err != nil {
t.Fatalf(err.Error())
}
@@ -353,7 +356,7 @@ func benchmarkSplitPyramidSHA3(n int, t *testing.B) {
putGetter := newTestHasherStore(&FakeChunkStore{}, SHA3Hash)
ctx := context.Background()
- _, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
+ _, wait, err := PyramidSplit(ctx, data, putGetter, putGetter, mockTag)
if err != nil {
t.Fatalf(err.Error())
}
@@ -374,7 +377,7 @@ func benchmarkSplitAppendPyramid(n, m int, t *testing.B) {
putGetter := newTestHasherStore(store, SHA3Hash)
ctx := context.Background()
- key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
+ key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter, mockTag)
if err != nil {
t.Fatalf(err.Error())
}
@@ -384,7 +387,7 @@ func benchmarkSplitAppendPyramid(n, m int, t *testing.B) {
}
putGetter = newTestHasherStore(store, SHA3Hash)
- _, wait, err = PyramidAppend(ctx, key, data1, putGetter, putGetter)
+ _, wait, err = PyramidAppend(ctx, key, data1, putGetter, putGetter, mockTag)
if err != nil {
t.Fatalf(err.Error())
}
diff --git a/swarm/storage/common_test.go b/swarm/storage/common_test.go
index c4d187b62..100e778a3 100644
--- a/swarm/storage/common_test.go
+++ b/swarm/storage/common_test.go
@@ -22,8 +22,6 @@ import (
"flag"
"fmt"
"io"
- "io/ioutil"
- "os"
"sync"
"testing"
"time"
@@ -59,30 +57,6 @@ func brokenLimitReader(data io.Reader, size int, errAt int) *brokenLimitedReader
}
}
-func newLDBStore(t *testing.T) (*LDBStore, func()) {
- dir, err := ioutil.TempDir("", "bzz-storage-test")
- if err != nil {
- t.Fatal(err)
- }
- log.Trace("memstore.tempdir", "dir", dir)
-
- ldbparams := NewLDBStoreParams(NewDefaultStoreParams(), dir)
- db, err := NewLDBStore(ldbparams)
- if err != nil {
- t.Fatal(err)
- }
-
- cleanup := func() {
- db.Close()
- err := os.RemoveAll(dir)
- if err != nil {
- t.Fatal(err)
- }
- }
-
- return db, cleanup
-}
-
func mputRandomChunks(store ChunkStore, n int) ([]Chunk, error) {
return mput(store, n, GenerateRandomChunk)
}
@@ -94,14 +68,15 @@ func mput(store ChunkStore, n int, f func(i int64) Chunk) (hs []Chunk, err error
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel()
for i := int64(0); i < int64(n); i++ {
- chunk := f(chunk.DefaultSize)
+ ch := f(chunk.DefaultSize)
go func() {
+ _, err := store.Put(ctx, chunk.ModePutUpload, ch)
select {
- case errc <- store.Put(ctx, chunk):
+ case errc <- err:
case <-ctx.Done():
}
}()
- hs = append(hs, chunk)
+ hs = append(hs, ch)
}
// wait for all chunks to be stored
@@ -123,13 +98,13 @@ func mget(store ChunkStore, hs []Address, f func(h Address, chunk Chunk) error)
go func(h Address) {
defer wg.Done()
// TODO: write timeout with context
- chunk, err := store.Get(context.TODO(), h)
+ ch, err := store.Get(context.TODO(), chunk.ModeGetRequest, h)
if err != nil {
errc <- err
return
}
if f != nil {
- err = f(h, chunk)
+ err = f(h, ch)
if err != nil {
errc <- err
return
@@ -250,14 +225,15 @@ func NewMapChunkStore() *MapChunkStore {
}
}
-func (m *MapChunkStore) Put(_ context.Context, ch Chunk) error {
+func (m *MapChunkStore) Put(_ context.Context, _ chunk.ModePut, ch Chunk) (bool, error) {
m.mu.Lock()
defer m.mu.Unlock()
+ _, exists := m.chunks[ch.Address().Hex()]
m.chunks[ch.Address().Hex()] = ch
- return nil
+ return exists, nil
}
-func (m *MapChunkStore) Get(_ context.Context, ref Address) (Chunk, error) {
+func (m *MapChunkStore) Get(_ context.Context, _ chunk.ModeGet, ref Address) (Chunk, error) {
m.mu.RLock()
defer m.mu.RUnlock()
chunk := m.chunks[ref.Hex()]
@@ -268,15 +244,28 @@ func (m *MapChunkStore) Get(_ context.Context, ref Address) (Chunk, error) {
}
// Need to implement Has from SyncChunkStore
-func (m *MapChunkStore) Has(ctx context.Context, ref Address) bool {
+func (m *MapChunkStore) Has(ctx context.Context, ref Address) (has bool, err error) {
m.mu.RLock()
defer m.mu.RUnlock()
- _, has := m.chunks[ref.Hex()]
- return has
+ _, has = m.chunks[ref.Hex()]
+ return has, nil
+}
+
+func (m *MapChunkStore) Set(ctx context.Context, mode chunk.ModeSet, addr chunk.Address) (err error) {
+ return nil
+}
+
+func (m *MapChunkStore) LastPullSubscriptionBinID(bin uint8) (id uint64, err error) {
+ return 0, nil
}
-func (m *MapChunkStore) Close() {
+func (m *MapChunkStore) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan chunk.Descriptor, stop func()) {
+ return nil, nil
+}
+
+func (m *MapChunkStore) Close() error {
+ return nil
}
func chunkAddresses(chunks []Chunk) []Address {
diff --git a/swarm/storage/database.go b/swarm/storage/database.go
deleted file mode 100644
index 12367b905..000000000
--- a/swarm/storage/database.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package storage
-
-// this is a clone of an earlier state of the ethereum ethdb/database
-// no need for queueing/caching
-
-import (
- "github.com/ethereum/go-ethereum/metrics"
- "github.com/syndtr/goleveldb/leveldb"
- "github.com/syndtr/goleveldb/leveldb/iterator"
- "github.com/syndtr/goleveldb/leveldb/opt"
-)
-
-const openFileLimit = 128
-
-type LDBDatabase struct {
- db *leveldb.DB
-}
-
-func NewLDBDatabase(file string) (*LDBDatabase, error) {
- // Open the db
- db, err := leveldb.OpenFile(file, &opt.Options{OpenFilesCacheCapacity: openFileLimit})
- if err != nil {
- return nil, err
- }
-
- database := &LDBDatabase{db: db}
-
- return database, nil
-}
-
-func (db *LDBDatabase) Put(key []byte, value []byte) error {
- metrics.GetOrRegisterCounter("ldbdatabase.put", nil).Inc(1)
-
- return db.db.Put(key, value, nil)
-}
-
-func (db *LDBDatabase) Get(key []byte) ([]byte, error) {
- metrics.GetOrRegisterCounter("ldbdatabase.get", nil).Inc(1)
-
- dat, err := db.db.Get(key, nil)
- if err != nil {
- return nil, err
- }
- return dat, nil
-}
-
-func (db *LDBDatabase) Delete(key []byte) error {
- return db.db.Delete(key, nil)
-}
-
-func (db *LDBDatabase) NewIterator() iterator.Iterator {
- metrics.GetOrRegisterCounter("ldbdatabase.newiterator", nil).Inc(1)
-
- return db.db.NewIterator(nil, nil)
-}
-
-func (db *LDBDatabase) Write(batch *leveldb.Batch) error {
- metrics.GetOrRegisterCounter("ldbdatabase.write", nil).Inc(1)
-
- return db.db.Write(batch, nil)
-}
-
-func (db *LDBDatabase) Close() {
- // Close the leveldb database
- db.db.Close()
-}
diff --git a/swarm/storage/feed/handler.go b/swarm/storage/feed/handler.go
index 61124e2db..0f6f2ba34 100644
--- a/swarm/storage/feed/handler.go
+++ b/swarm/storage/feed/handler.go
@@ -24,6 +24,8 @@ import (
"fmt"
"sync"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
+
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
"github.com/ethereum/go-ethereum/swarm/log"
@@ -189,7 +191,7 @@ func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error)
ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout)
defer cancel()
- chunk, err := h.chunkStore.Get(ctx, id.Addr())
+ ch, err := h.chunkStore.Get(ctx, chunk.ModeGetLookup, id.Addr())
if err != nil {
if err == context.DeadlineExceeded { // chunk not found
return nil, nil
@@ -198,7 +200,7 @@ func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error)
}
var request Request
- if err := request.fromChunk(chunk); err != nil {
+ if err := request.fromChunk(ch); err != nil {
return nil, nil
}
if request.Time <= timeLimit {
@@ -257,14 +259,14 @@ func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Ad
return nil, NewError(ErrInvalidValue, "A former update in this epoch is already known to exist")
}
- chunk, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big
+ ch, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big
if err != nil {
return nil, err
}
// send the chunk
- h.chunkStore.Put(ctx, chunk)
- log.Trace("feed update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", chunk.Data())
+ h.chunkStore.Put(ctx, chunk.ModePutUpload, ch)
+ log.Trace("feed update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", ch.Data())
// update our feed updates map cache entry if the new update is older than the one we have, if we have it.
if feedUpdate != nil && r.Epoch.After(feedUpdate.Epoch) {
feedUpdate.Epoch = r.Epoch
diff --git a/swarm/storage/feed/handler_test.go b/swarm/storage/feed/handler_test.go
index 2f8a52453..c4f6fe689 100644
--- a/swarm/storage/feed/handler_test.go
+++ b/swarm/storage/feed/handler_test.go
@@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
+ "github.com/ethereum/go-ethereum/swarm/storage/localstore"
)
var (
@@ -400,9 +401,7 @@ func TestValidatorInStore(t *testing.T) {
}
defer os.RemoveAll(datadir)
- handlerParams := storage.NewDefaultLocalStoreParams()
- handlerParams.Init(datadir)
- store, err := storage.NewLocalStore(handlerParams, nil)
+ localstore, err := localstore.New(datadir, make([]byte, 32), nil)
if err != nil {
t.Fatal(err)
}
@@ -410,7 +409,7 @@ func TestValidatorInStore(t *testing.T) {
// set up Swarm feeds handler and add is as a validator to the localstore
fhParams := &HandlerParams{}
fh := NewHandler(fhParams)
- store.Validators = append(store.Validators, fh)
+ store := chunk.NewValidatorStore(localstore, fh)
// create content addressed chunks, one good, one faulty
chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2)
@@ -447,15 +446,15 @@ func TestValidatorInStore(t *testing.T) {
}
// put the chunks in the store and check their error status
- err = store.Put(context.Background(), goodChunk)
+ _, err = store.Put(context.Background(), chunk.ModePutUpload, goodChunk)
if err == nil {
t.Fatal("expected error on good content address chunk with feed update validator only, but got nil")
}
- err = store.Put(context.Background(), badChunk)
+ _, err = store.Put(context.Background(), chunk.ModePutUpload, badChunk)
if err == nil {
t.Fatal("expected error on bad content address chunk with feed update validator only, but got nil")
}
- err = store.Put(context.Background(), uglyChunk)
+ _, err = store.Put(context.Background(), chunk.ModePutUpload, uglyChunk)
if err != nil {
t.Fatalf("expected no error on feed update chunk with feed update validator only, but got: %s", err)
}
diff --git a/swarm/storage/feed/testutil.go b/swarm/storage/feed/testutil.go
index caa39d9ff..db2d989e1 100644
--- a/swarm/storage/feed/testutil.go
+++ b/swarm/storage/feed/testutil.go
@@ -18,12 +18,13 @@ package feed
import (
"context"
- "fmt"
"path/filepath"
"sync"
"github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/storage"
+ "github.com/ethereum/go-ethereum/swarm/storage/localstore"
)
const (
@@ -53,14 +54,14 @@ func newFakeNetFetcher(context.Context, storage.Address, *sync.Map) storage.NetF
func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) {
path := filepath.Join(datadir, testDbDirName)
fh := NewHandler(params)
- localstoreparams := storage.NewDefaultLocalStoreParams()
- localstoreparams.Init(path)
- localStore, err := storage.NewLocalStore(localstoreparams, nil)
+
+ db, err := localstore.New(filepath.Join(path, "chunks"), make([]byte, 32), nil)
if err != nil {
- return nil, fmt.Errorf("localstore create fail, path %s: %v", path, err)
+ return nil, err
}
- localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(feedsHashAlgorithm)))
- localStore.Validators = append(localStore.Validators, fh)
+
+ localStore := chunk.NewValidatorStore(db, storage.NewContentAddressValidator(storage.MakeHashFunc(feedsHashAlgorithm)), fh)
+
netStore, err := storage.NewNetStore(localStore, nil)
if err != nil {
return nil, err
diff --git a/swarm/storage/filestore.go b/swarm/storage/filestore.go
index 0bad944ee..dc096e56c 100644
--- a/swarm/storage/filestore.go
+++ b/swarm/storage/filestore.go
@@ -21,6 +21,9 @@ import (
"io"
"sort"
"sync"
+
+ "github.com/ethereum/go-ethereum/swarm/chunk"
+ "github.com/ethereum/go-ethereum/swarm/storage/localstore"
)
/*
@@ -44,6 +47,7 @@ const (
type FileStore struct {
ChunkStore
hashFunc SwarmHasher
+ tags *chunk.Tags
}
type FileStoreParams struct {
@@ -57,22 +61,20 @@ func NewFileStoreParams() *FileStoreParams {
}
// for testing locally
-func NewLocalFileStore(datadir string, basekey []byte) (*FileStore, error) {
- params := NewDefaultLocalStoreParams()
- params.Init(datadir)
- localStore, err := NewLocalStore(params, nil)
+func NewLocalFileStore(datadir string, basekey []byte, tags *chunk.Tags) (*FileStore, error) {
+ localStore, err := localstore.New(datadir, basekey, nil)
if err != nil {
return nil, err
}
- localStore.Validators = append(localStore.Validators, NewContentAddressValidator(MakeHashFunc(DefaultHash)))
- return NewFileStore(localStore, NewFileStoreParams()), nil
+ return NewFileStore(chunk.NewValidatorStore(localStore, NewContentAddressValidator(MakeHashFunc(DefaultHash))), NewFileStoreParams(), tags), nil
}
-func NewFileStore(store ChunkStore, params *FileStoreParams) *FileStore {
+func NewFileStore(store ChunkStore, params *FileStoreParams, tags *chunk.Tags) *FileStore {
hashFunc := MakeHashFunc(params.Hash)
return &FileStore{
ChunkStore: store,
hashFunc: hashFunc,
+ tags: tags,
}
}
@@ -83,7 +85,11 @@ func NewFileStore(store ChunkStore, params *FileStoreParams) *FileStore {
// It returns a reader with the chunk data and whether the content was encrypted
func (f *FileStore) Retrieve(ctx context.Context, addr Address) (reader *LazyChunkReader, isEncrypted bool) {
isEncrypted = len(addr) > f.hashFunc().Size()
- getter := NewHasherStore(f.ChunkStore, f.hashFunc, isEncrypted)
+ tag, err := f.tags.GetFromContext(ctx)
+ if err != nil {
+ tag = chunk.NewTag(0, "ephemeral-retrieval-tag", 0)
+ }
+ getter := NewHasherStore(f.ChunkStore, f.hashFunc, isEncrypted, tag)
reader = TreeJoin(ctx, addr, getter, 0)
return
}
@@ -91,8 +97,17 @@ func (f *FileStore) Retrieve(ctx context.Context, addr Address) (reader *LazyChu
// Store is a public API. Main entry point for document storage directly. Used by the
// FS-aware API and httpaccess
func (f *FileStore) Store(ctx context.Context, data io.Reader, size int64, toEncrypt bool) (addr Address, wait func(context.Context) error, err error) {
- putter := NewHasherStore(f.ChunkStore, f.hashFunc, toEncrypt)
- return PyramidSplit(ctx, data, putter, putter)
+ tag, err := f.tags.GetFromContext(ctx)
+ if err != nil {
+ // some of the parts of the codebase, namely the manifest trie, do not store the context
+ // of the original request nor the tag with the trie, recalculating the trie hence
+ // loses the tag uid. thus we create an ephemeral tag here for that purpose
+
+ tag = chunk.NewTag(0, "", 0)
+ //return nil, nil, err
+ }
+ putter := NewHasherStore(f.ChunkStore, f.hashFunc, toEncrypt, tag)
+ return PyramidSplit(ctx, data, putter, putter, tag)
}
func (f *FileStore) HashSize() int {
@@ -101,12 +116,14 @@ func (f *FileStore) HashSize() int {
// GetAllReferences is a public API. This endpoint returns all chunk hashes (only) for a given file
func (f *FileStore) GetAllReferences(ctx context.Context, data io.Reader, toEncrypt bool) (addrs AddressCollection, err error) {
+ tag := chunk.NewTag(0, "ephemeral-tag", 0) //this tag is just a mock ephemeral tag since we don't want to save these results
+
// create a special kind of putter, which only will store the references
putter := &hashExplorer{
- hasherStore: NewHasherStore(f.ChunkStore, f.hashFunc, toEncrypt),
+ hasherStore: NewHasherStore(f.ChunkStore, f.hashFunc, toEncrypt, tag),
}
// do the actual splitting anyway, no way around it
- _, wait, err := PyramidSplit(ctx, data, putter, putter)
+ _, wait, err := PyramidSplit(ctx, data, putter, putter, tag)
if err != nil {
return nil, err
}
diff --git a/swarm/storage/filestore_test.go b/swarm/storage/filestore_test.go
index 06c4be1d7..d0a167a24 100644
--- a/swarm/storage/filestore_test.go
+++ b/swarm/storage/filestore_test.go
@@ -22,8 +22,11 @@ import (
"io"
"io/ioutil"
"os"
+ "path/filepath"
"testing"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
+ "github.com/ethereum/go-ethereum/swarm/storage/localstore"
"github.com/ethereum/go-ethereum/swarm/testutil"
)
@@ -35,21 +38,18 @@ func TestFileStorerandom(t *testing.T) {
}
func testFileStoreRandom(toEncrypt bool, t *testing.T) {
- tdb, cleanup, err := newTestDbStore(false, false)
- defer cleanup()
+ dir, err := ioutil.TempDir("", "swarm-storage-")
if err != nil {
- t.Fatalf("init dbStore failed: %v", err)
+ t.Fatal(err)
}
- db := tdb.LDBStore
- db.setCapacity(50000)
- memStore := NewMemStore(NewDefaultStoreParams(), db)
- localStore := &LocalStore{
- memStore: memStore,
- DbStore: db,
+ defer os.RemoveAll(dir)
+ localStore, err := localstore.New(dir, make([]byte, 32), nil)
+ if err != nil {
+ t.Fatal(err)
}
+ defer localStore.Close()
- fileStore := NewFileStore(localStore, NewFileStoreParams())
- defer os.RemoveAll("/tmp/bzz")
+ fileStore := NewFileStore(localStore, NewFileStoreParams(), chunk.NewTags())
slice := testutil.RandomBytes(1, testDataSize)
ctx := context.TODO()
@@ -76,9 +76,8 @@ func testFileStoreRandom(toEncrypt bool, t *testing.T) {
if !bytes.Equal(slice, resultSlice) {
t.Fatalf("Comparison error.")
}
- ioutil.WriteFile("/tmp/slice.bzz.16M", slice, 0666)
- ioutil.WriteFile("/tmp/result.bzz.16M", resultSlice, 0666)
- localStore.memStore = NewMemStore(NewDefaultStoreParams(), db)
+ ioutil.WriteFile(filepath.Join(dir, "slice.bzz.16M"), slice, 0666)
+ ioutil.WriteFile(filepath.Join(dir, "result.bzz.16M"), resultSlice, 0666)
resultReader, isEncrypted = fileStore.Retrieve(context.TODO(), key)
if isEncrypted != toEncrypt {
t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted)
@@ -104,18 +103,18 @@ func TestFileStoreCapacity(t *testing.T) {
}
func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
- tdb, cleanup, err := newTestDbStore(false, false)
- defer cleanup()
+ dir, err := ioutil.TempDir("", "swarm-storage-")
if err != nil {
- t.Fatalf("init dbStore failed: %v", err)
+ t.Fatal(err)
}
- db := tdb.LDBStore
- memStore := NewMemStore(NewDefaultStoreParams(), db)
- localStore := &LocalStore{
- memStore: memStore,
- DbStore: db,
+ defer os.RemoveAll(dir)
+ localStore, err := localstore.New(dir, make([]byte, 32), nil)
+ if err != nil {
+ t.Fatal(err)
}
- fileStore := NewFileStore(localStore, NewFileStoreParams())
+ defer localStore.Close()
+
+ fileStore := NewFileStore(localStore, NewFileStoreParams(), chunk.NewTags())
slice := testutil.RandomBytes(1, testDataSize)
ctx := context.TODO()
key, wait, err := fileStore.Store(ctx, bytes.NewReader(slice), testDataSize, toEncrypt)
@@ -141,10 +140,6 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
if !bytes.Equal(slice, resultSlice) {
t.Fatalf("Comparison error.")
}
- // Clear memStore
- memStore.setCapacity(0)
- // check whether it is, indeed, empty
- fileStore.ChunkStore = memStore
resultReader, isEncrypted = fileStore.Retrieve(context.TODO(), key)
if isEncrypted != toEncrypt {
t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted)
@@ -177,18 +172,18 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
// TestGetAllReferences only tests that GetAllReferences returns an expected
// number of references for a given file
func TestGetAllReferences(t *testing.T) {
- tdb, cleanup, err := newTestDbStore(false, false)
- defer cleanup()
+ dir, err := ioutil.TempDir("", "swarm-storage-")
if err != nil {
- t.Fatalf("init dbStore failed: %v", err)
+ t.Fatal(err)
}
- db := tdb.LDBStore
- memStore := NewMemStore(NewDefaultStoreParams(), db)
- localStore := &LocalStore{
- memStore: memStore,
- DbStore: db,
+ defer os.RemoveAll(dir)
+ localStore, err := localstore.New(dir, make([]byte, 32), nil)
+ if err != nil {
+ t.Fatal(err)
}
- fileStore := NewFileStore(localStore, NewFileStoreParams())
+ defer localStore.Close()
+
+ fileStore := NewFileStore(localStore, NewFileStoreParams(), chunk.NewTags())
// testRuns[i] and expectedLen[i] are dataSize and expected length respectively
testRuns := []int{1024, 8192, 16000, 30000, 1000000}
diff --git a/swarm/storage/hasherstore.go b/swarm/storage/hasherstore.go
index 345ce7430..1e702f11a 100644
--- a/swarm/storage/hasherstore.go
+++ b/swarm/storage/hasherstore.go
@@ -28,6 +28,7 @@ import (
type hasherStore struct {
store ChunkStore
+ tag *chunk.Tag
toEncrypt bool
hashFunc SwarmHasher
hashSize int // content hash size
@@ -44,7 +45,7 @@ type hasherStore struct {
// NewHasherStore creates a hasherStore object, which implements Putter and Getter interfaces.
// With the HasherStore you can put and get chunk data (which is just []byte) into a ChunkStore
// and the hasherStore will take core of encryption/decryption of data if necessary
-func NewHasherStore(store ChunkStore, hashFunc SwarmHasher, toEncrypt bool) *hasherStore {
+func NewHasherStore(store ChunkStore, hashFunc SwarmHasher, toEncrypt bool, tag *chunk.Tag) *hasherStore {
hashSize := hashFunc().Size()
refSize := int64(hashSize)
if toEncrypt {
@@ -53,6 +54,7 @@ func NewHasherStore(store ChunkStore, hashFunc SwarmHasher, toEncrypt bool) *has
h := &hasherStore{
store: store,
+ tag: tag,
toEncrypt: toEncrypt,
hashFunc: hashFunc,
hashSize: hashSize,
@@ -93,7 +95,7 @@ func (h *hasherStore) Get(ctx context.Context, ref Reference) (ChunkData, error)
return nil, err
}
- chunk, err := h.store.Get(ctx, addr)
+ chunk, err := h.store.Get(ctx, chunk.ModeGetRequest, addr)
if err != nil {
return nil, err
}
@@ -239,11 +241,16 @@ func (h *hasherStore) newDataEncryption(key encryption.Key) encryption.Encryptio
return encryption.New(key, int(chunk.DefaultSize), 0, sha3.NewLegacyKeccak256)
}
-func (h *hasherStore) storeChunk(ctx context.Context, chunk Chunk) {
+func (h *hasherStore) storeChunk(ctx context.Context, ch Chunk) {
atomic.AddUint64(&h.nrChunks, 1)
go func() {
+ seen, err := h.store.Put(ctx, chunk.ModePutUpload, ch)
+ h.tag.Inc(chunk.StateStored)
+ if seen {
+ h.tag.Inc(chunk.StateSeen)
+ }
select {
- case h.errC <- h.store.Put(ctx, chunk):
+ case h.errC <- err:
case <-h.quitC:
}
}()
diff --git a/swarm/storage/hasherstore_test.go b/swarm/storage/hasherstore_test.go
index 22cf98d0e..9dfd7ab1d 100644
--- a/swarm/storage/hasherstore_test.go
+++ b/swarm/storage/hasherstore_test.go
@@ -21,9 +21,9 @@ import (
"context"
"testing"
- "github.com/ethereum/go-ethereum/swarm/storage/encryption"
-
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
+ "github.com/ethereum/go-ethereum/swarm/storage/encryption"
)
func TestHasherStore(t *testing.T) {
@@ -43,7 +43,7 @@ func TestHasherStore(t *testing.T) {
for _, tt := range tests {
chunkStore := NewMapChunkStore()
- hasherStore := NewHasherStore(chunkStore, MakeHashFunc(DefaultHash), tt.toEncrypt)
+ hasherStore := NewHasherStore(chunkStore, MakeHashFunc(DefaultHash), tt.toEncrypt, chunk.NewTag(0, "test-tag", 0))
// Put two random chunks into the hasherStore
chunkData1 := GenerateRandomChunk(int64(tt.chunkLength)).Data()
@@ -107,7 +107,7 @@ func TestHasherStore(t *testing.T) {
}
// Check if chunk data in store is encrypted or not
- chunkInStore, err := chunkStore.Get(ctx, hash1)
+ chunkInStore, err := chunkStore.Get(ctx, chunk.ModeGetRequest, hash1)
if err != nil {
t.Fatalf("Expected no error got \"%v\"", err)
}
diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go
deleted file mode 100644
index fd5ec9e30..000000000
--- a/swarm/storage/ldbstore.go
+++ /dev/null
@@ -1,1082 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-// disk storage layer for the package bzz
-// DbStore implements the ChunkStore interface and is used by the FileStore as
-// persistent storage of chunks
-// it implements purging based on access count allowing for external control of
-// max capacity
-
-package storage
-
-import (
- "archive/tar"
- "bytes"
- "context"
- "encoding/binary"
- "encoding/hex"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "sync"
-
- "github.com/ethereum/go-ethereum/metrics"
- "github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/swarm/log"
- "github.com/ethereum/go-ethereum/swarm/storage/mock"
- "github.com/syndtr/goleveldb/leveldb"
-)
-
-const (
- defaultGCRatio = 10
- defaultMaxGCRound = 10000
- defaultMaxGCBatch = 5000
-
- wEntryCnt = 1 << 0
- wIndexCnt = 1 << 1
- wAccessCnt = 1 << 2
-)
-
-var (
- dbEntryCount = metrics.NewRegisteredCounter("ldbstore.entryCnt", nil)
-)
-
-var (
- keyIndex = byte(0)
- keyAccessCnt = []byte{2}
- keyEntryCnt = []byte{3}
- keyDataIdx = []byte{4}
- keyData = byte(6)
- keyDistanceCnt = byte(7)
- keySchema = []byte{8}
- keyGCIdx = byte(9) // access to chunk data index, used by garbage collection in ascending order from first entry
-)
-
-var (
- ErrDBClosed = errors.New("LDBStore closed")
-)
-
-type LDBStoreParams struct {
- *StoreParams
- Path string
- Po func(Address) uint8
-}
-
-// NewLDBStoreParams constructs LDBStoreParams with the specified values.
-func NewLDBStoreParams(storeparams *StoreParams, path string) *LDBStoreParams {
- return &LDBStoreParams{
- StoreParams: storeparams,
- Path: path,
- Po: func(k Address) (ret uint8) { return uint8(Proximity(storeparams.BaseKey, k[:])) },
- }
-}
-
-type garbage struct {
- maxRound int // maximum number of chunks to delete in one garbage collection round
- maxBatch int // maximum number of chunks to delete in one db request batch
- ratio int // 1/x ratio to calculate the number of chunks to gc on a low capacity db
- count int // number of chunks deleted in running round
- target int // number of chunks to delete in running round
- batch *dbBatch // the delete batch
- runC chan struct{} // struct in chan means gc is NOT running
-}
-
-type LDBStore struct {
- db *LDBDatabase
-
- // this should be stored in db, accessed transactionally
- entryCnt uint64 // number of items in the LevelDB
- accessCnt uint64 // ever-accumulating number increased every time we read/access an entry
- dataIdx uint64 // similar to entryCnt, but we only increment it
- capacity uint64
- bucketCnt []uint64
-
- hashfunc SwarmHasher
- po func(Address) uint8
-
- batchesC chan struct{}
- closed bool
- batch *dbBatch
- lock sync.RWMutex
- quit chan struct{}
- gc *garbage
-
- // Functions encodeDataFunc is used to bypass
- // the default functionality of DbStore with
- // mock.NodeStore for testing purposes.
- encodeDataFunc func(chunk Chunk) []byte
- // If getDataFunc is defined, it will be used for
- // retrieving the chunk data instead from the local
- // LevelDB database.
- getDataFunc func(key Address) (data []byte, err error)
-}
-
-type dbBatch struct {
- *leveldb.Batch
- err error
- c chan struct{}
-}
-
-func newBatch() *dbBatch {
- return &dbBatch{Batch: new(leveldb.Batch), c: make(chan struct{})}
-}
-
-// TODO: Instead of passing the distance function, just pass the address from which distances are calculated
-// to avoid the appearance of a pluggable distance metric and opportunities of bugs associated with providing
-// a function different from the one that is actually used.
-func NewLDBStore(params *LDBStoreParams) (s *LDBStore, err error) {
- s = new(LDBStore)
- s.hashfunc = params.Hash
- s.quit = make(chan struct{})
-
- s.batchesC = make(chan struct{}, 1)
- go s.writeBatches()
- s.batch = newBatch()
- // associate encodeData with default functionality
- s.encodeDataFunc = encodeData
-
- s.db, err = NewLDBDatabase(params.Path)
- if err != nil {
- return nil, err
- }
-
- s.po = params.Po
- s.setCapacity(params.DbCapacity)
-
- s.bucketCnt = make([]uint64, 0x100)
- for i := 0; i < 0x100; i++ {
- k := make([]byte, 2)
- k[0] = keyDistanceCnt
- k[1] = uint8(i)
- cnt, _ := s.db.Get(k)
- s.bucketCnt[i] = BytesToU64(cnt)
- }
- data, _ := s.db.Get(keyEntryCnt)
- s.entryCnt = BytesToU64(data)
- data, _ = s.db.Get(keyAccessCnt)
- s.accessCnt = BytesToU64(data)
- data, _ = s.db.Get(keyDataIdx)
- s.dataIdx = BytesToU64(data)
-
- // set up garbage collection
- s.gc = &garbage{
- maxBatch: defaultMaxGCBatch,
- maxRound: defaultMaxGCRound,
- ratio: defaultGCRatio,
- }
-
- s.gc.runC = make(chan struct{}, 1)
- s.gc.runC <- struct{}{}
-
- return s, nil
-}
-
-// MarkAccessed increments the access counter as a best effort for a chunk, so
-// the chunk won't get garbage collected.
-func (s *LDBStore) MarkAccessed(addr Address) {
- s.lock.Lock()
- defer s.lock.Unlock()
-
- if s.closed {
- return
- }
-
- proximity := s.po(addr)
- s.tryAccessIdx(addr, proximity)
-}
-
-// initialize and set values for processing of gc round
-func (s *LDBStore) startGC(c int) {
-
- s.gc.count = 0
- // calculate the target number of deletions
- if c >= s.gc.maxRound {
- s.gc.target = s.gc.maxRound
- } else {
- s.gc.target = c / s.gc.ratio
- }
- s.gc.batch = newBatch()
- log.Debug("startgc", "requested", c, "target", s.gc.target)
-}
-
-// NewMockDbStore creates a new instance of DbStore with
-// mockStore set to a provided value. If mockStore argument is nil,
-// this function behaves exactly as NewDbStore.
-func NewMockDbStore(params *LDBStoreParams, mockStore *mock.NodeStore) (s *LDBStore, err error) {
- s, err = NewLDBStore(params)
- if err != nil {
- return nil, err
- }
-
- // replace put and get with mock store functionality
- if mockStore != nil {
- s.encodeDataFunc = newMockEncodeDataFunc(mockStore)
- s.getDataFunc = newMockGetDataFunc(mockStore)
- }
- return
-}
-
-type dpaDBIndex struct {
- Idx uint64
- Access uint64
-}
-
-func BytesToU64(data []byte) uint64 {
- if len(data) < 8 {
- return 0
- }
- return binary.BigEndian.Uint64(data)
-}
-
-func U64ToBytes(val uint64) []byte {
- data := make([]byte, 8)
- binary.BigEndian.PutUint64(data, val)
- return data
-}
-
-func getIndexKey(hash Address) []byte {
- hashSize := len(hash)
- key := make([]byte, hashSize+1)
- key[0] = keyIndex
- copy(key[1:], hash[:])
- return key
-}
-
-func getDataKey(idx uint64, po uint8) []byte {
- key := make([]byte, 10)
- key[0] = keyData
- key[1] = po
- binary.BigEndian.PutUint64(key[2:], idx)
-
- return key
-}
-
-func getGCIdxKey(index *dpaDBIndex) []byte {
- key := make([]byte, 9)
- key[0] = keyGCIdx
- binary.BigEndian.PutUint64(key[1:], index.Access)
- return key
-}
-
-func getGCIdxValue(index *dpaDBIndex, po uint8, addr Address) []byte {
- val := make([]byte, 41) // po = 1, index.Index = 8, Address = 32
- val[0] = po
- binary.BigEndian.PutUint64(val[1:], index.Idx)
- copy(val[9:], addr)
- return val
-}
-
-func parseIdxKey(key []byte) (byte, []byte) {
- return key[0], key[1:]
-}
-
-func parseGCIdxEntry(accessCnt []byte, val []byte) (index *dpaDBIndex, po uint8, addr Address) {
- index = &dpaDBIndex{
- Idx: binary.BigEndian.Uint64(val[1:]),
- Access: binary.BigEndian.Uint64(accessCnt),
- }
- po = val[0]
- addr = val[9:]
- return
-}
-
-func encodeIndex(index *dpaDBIndex) []byte {
- data, _ := rlp.EncodeToBytes(index)
- return data
-}
-
-func encodeData(chunk Chunk) []byte {
- // Always create a new underlying array for the returned byte slice.
- // The chunk.Address array may be used in the returned slice which
- // may be changed later in the code or by the LevelDB, resulting
- // that the Address is changed as well.
- return append(append([]byte{}, chunk.Address()[:]...), chunk.Data()...)
-}
-
-func decodeIndex(data []byte, index *dpaDBIndex) error {
- dec := rlp.NewStream(bytes.NewReader(data), 0)
- return dec.Decode(index)
-}
-
-func decodeData(addr Address, data []byte) (Chunk, error) {
- return NewChunk(addr, data[32:]), nil
-}
-
-func (s *LDBStore) collectGarbage() error {
- // prevent duplicate gc from starting when one is already running
- select {
- case <-s.gc.runC:
- default:
- return nil
- }
-
- s.lock.Lock()
- entryCnt := s.entryCnt
- s.lock.Unlock()
-
- metrics.GetOrRegisterCounter("ldbstore.collectgarbage", nil).Inc(1)
-
- // calculate the amount of chunks to collect and reset counter
- s.startGC(int(entryCnt))
- log.Debug("collectGarbage", "target", s.gc.target, "entryCnt", entryCnt)
-
- for s.gc.count < s.gc.target {
- it := s.db.NewIterator()
- ok := it.Seek([]byte{keyGCIdx})
- var singleIterationCount int
-
- // every batch needs a lock so we avoid entries changing accessidx in the meantime
- s.lock.Lock()
- for ; ok && (singleIterationCount < s.gc.maxBatch); ok = it.Next() {
-
- // quit if no more access index keys
- itkey := it.Key()
- if (itkey == nil) || (itkey[0] != keyGCIdx) {
- break
- }
-
- // get chunk data entry from access index
- val := it.Value()
- index, po, hash := parseGCIdxEntry(itkey[1:], val)
- keyIdx := make([]byte, 33)
- keyIdx[0] = keyIndex
- copy(keyIdx[1:], hash)
-
- // add delete operation to batch
- s.delete(s.gc.batch.Batch, index, keyIdx, po)
- singleIterationCount++
- s.gc.count++
- log.Trace("garbage collect enqueued chunk for deletion", "key", hash)
-
- // break if target is not on max garbage batch boundary
- if s.gc.count >= s.gc.target {
- break
- }
- }
-
- s.writeBatch(s.gc.batch, wEntryCnt)
- log.Trace("garbage collect batch done", "batch", singleIterationCount, "total", s.gc.count)
- s.lock.Unlock()
- it.Release()
- }
-
- metrics.GetOrRegisterCounter("ldbstore.collectgarbage.delete", nil).Inc(int64(s.gc.count))
- log.Debug("garbage collect done", "c", s.gc.count)
- s.gc.runC <- struct{}{}
-
- return nil
-}
-
-// Export writes all chunks from the store to a tar archive, returning the
-// number of chunks written.
-func (s *LDBStore) Export(out io.Writer) (int64, error) {
- tw := tar.NewWriter(out)
- defer tw.Close()
-
- it := s.db.NewIterator()
- defer it.Release()
- var count int64
- for ok := it.Seek([]byte{keyIndex}); ok; ok = it.Next() {
- key := it.Key()
- if (key == nil) || (key[0] != keyIndex) {
- break
- }
-
- var index dpaDBIndex
-
- hash := key[1:]
- decodeIndex(it.Value(), &index)
- po := s.po(hash)
- datakey := getDataKey(index.Idx, po)
- log.Trace("store.export", "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po)
- data, err := s.db.Get(datakey)
- if err != nil {
- log.Warn(fmt.Sprintf("Chunk %x found but could not be accessed: %v", key, err))
- continue
- }
-
- hdr := &tar.Header{
- Name: hex.EncodeToString(hash),
- Mode: 0644,
- Size: int64(len(data)),
- }
- if err := tw.WriteHeader(hdr); err != nil {
- return count, err
- }
- if _, err := tw.Write(data); err != nil {
- return count, err
- }
- count++
- }
-
- return count, nil
-}
-
-// of chunks read.
-func (s *LDBStore) Import(in io.Reader) (int64, error) {
- tr := tar.NewReader(in)
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- countC := make(chan int64)
- errC := make(chan error)
- var count int64
- go func() {
- for {
- hdr, err := tr.Next()
- if err == io.EOF {
- break
- } else if err != nil {
- select {
- case errC <- err:
- case <-ctx.Done():
- }
- }
-
- if len(hdr.Name) != 64 {
- log.Warn("ignoring non-chunk file", "name", hdr.Name)
- continue
- }
-
- keybytes, err := hex.DecodeString(hdr.Name)
- if err != nil {
- log.Warn("ignoring invalid chunk file", "name", hdr.Name, "err", err)
- continue
- }
-
- data, err := ioutil.ReadAll(tr)
- if err != nil {
- select {
- case errC <- err:
- case <-ctx.Done():
- }
- }
- key := Address(keybytes)
- chunk := NewChunk(key, data[32:])
-
- go func() {
- select {
- case errC <- s.Put(ctx, chunk):
- case <-ctx.Done():
- }
- }()
-
- count++
- }
- countC <- count
- }()
-
- // wait for all chunks to be stored
- i := int64(0)
- var total int64
- for {
- select {
- case err := <-errC:
- if err != nil {
- return count, err
- }
- i++
- case total = <-countC:
- case <-ctx.Done():
- return i, ctx.Err()
- }
- if total > 0 && i == total {
- return total, nil
- }
- }
-}
-
-// Cleanup iterates over the database and deletes chunks if they pass the `f` condition
-func (s *LDBStore) Cleanup(f func(Chunk) bool) {
- var errorsFound, removed, total int
-
- it := s.db.NewIterator()
- defer it.Release()
- for ok := it.Seek([]byte{keyIndex}); ok; ok = it.Next() {
- key := it.Key()
- if (key == nil) || (key[0] != keyIndex) {
- break
- }
- total++
- var index dpaDBIndex
- err := decodeIndex(it.Value(), &index)
- if err != nil {
- log.Warn("Cannot decode")
- errorsFound++
- continue
- }
- hash := key[1:]
- po := s.po(hash)
- datakey := getDataKey(index.Idx, po)
- data, err := s.db.Get(datakey)
- if err != nil {
- found := false
-
- // The highest possible proximity is 255, so exit loop upon overflow.
- for po = uint8(1); po != 0; po++ {
- datakey = getDataKey(index.Idx, po)
- data, err = s.db.Get(datakey)
- if err == nil {
- found = true
- break
- }
- }
-
- if !found {
- log.Warn(fmt.Sprintf("Chunk %x found but count not be accessed with any po", key))
- errorsFound++
- continue
- }
- }
-
- ck := data[:32]
- c, err := decodeData(ck, data)
- if err != nil {
- log.Error("decodeData error", "err", err)
- continue
- }
-
- sdata := c.Data()
-
- cs := int64(binary.LittleEndian.Uint64(sdata[:8]))
- log.Trace("chunk", "key", fmt.Sprintf("%x", key), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(sdata), "size", cs)
-
- // if chunk is to be removed
- if f(c) {
- log.Warn("chunk for cleanup", "key", fmt.Sprintf("%x", key), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(sdata), "size", cs)
- s.deleteNow(&index, getIndexKey(key[1:]), po)
- removed++
- errorsFound++
- }
- }
-
- log.Warn(fmt.Sprintf("Found %v errors out of %v entries. Removed %v chunks.", errorsFound, total, removed))
-}
-
-// CleanGCIndex rebuilds the garbage collector index from scratch, while
-// removing inconsistent elements, e.g., indices with missing data chunks.
-// WARN: it's a pretty heavy, long running function.
-func (s *LDBStore) CleanGCIndex() error {
- s.lock.Lock()
- defer s.lock.Unlock()
-
- batch := leveldb.Batch{}
-
- var okEntryCount uint64
- var totalEntryCount uint64
-
- // throw out all gc indices, we will rebuild from cleaned index
- it := s.db.NewIterator()
- it.Seek([]byte{keyGCIdx})
- var gcDeletes int
- for it.Valid() {
- rowType, _ := parseIdxKey(it.Key())
- if rowType != keyGCIdx {
- break
- }
- batch.Delete(it.Key())
- gcDeletes++
- it.Next()
- }
- log.Debug("gc", "deletes", gcDeletes)
- if err := s.db.Write(&batch); err != nil {
- return err
- }
- batch.Reset()
-
- it.Release()
-
- // corrected po index pointer values
- var poPtrs [256]uint64
-
- // set to true if chunk count not on 4096 iteration boundary
- var doneIterating bool
-
- // last key index in previous iteration
- lastIdxKey := []byte{keyIndex}
-
- // counter for debug output
- var cleanBatchCount int
-
- // go through all key index entries
- for !doneIterating {
- cleanBatchCount++
- var idxs []dpaDBIndex
- var chunkHashes [][]byte
- var pos []uint8
- it := s.db.NewIterator()
-
- it.Seek(lastIdxKey)
-
- // 4096 is just a nice number, don't look for any hidden meaning here...
- var i int
- for i = 0; i < 4096; i++ {
-
- // this really shouldn't happen unless database is empty
- // but let's keep it to be safe
- if !it.Valid() {
- doneIterating = true
- break
- }
-
- // if it's not keyindex anymore we're done iterating
- rowType, chunkHash := parseIdxKey(it.Key())
- if rowType != keyIndex {
- doneIterating = true
- break
- }
-
- // decode the retrieved index
- var idx dpaDBIndex
- err := decodeIndex(it.Value(), &idx)
- if err != nil {
- return fmt.Errorf("corrupt index: %v", err)
- }
- po := s.po(chunkHash)
- lastIdxKey = it.Key()
-
- // if we don't find the data key, remove the entry
- // if we find it, add to the array of new gc indices to create
- dataKey := getDataKey(idx.Idx, po)
- _, err = s.db.Get(dataKey)
- if err != nil {
- log.Warn("deleting inconsistent index (missing data)", "key", chunkHash)
- batch.Delete(it.Key())
- } else {
- idxs = append(idxs, idx)
- chunkHashes = append(chunkHashes, chunkHash)
- pos = append(pos, po)
- okEntryCount++
- if idx.Idx > poPtrs[po] {
- poPtrs[po] = idx.Idx
- }
- }
- totalEntryCount++
- it.Next()
- }
- it.Release()
-
- // flush the key index corrections
- err := s.db.Write(&batch)
- if err != nil {
- return err
- }
- batch.Reset()
-
- // add correct gc indices
- for i, okIdx := range idxs {
- gcIdxKey := getGCIdxKey(&okIdx)
- gcIdxData := getGCIdxValue(&okIdx, pos[i], chunkHashes[i])
- batch.Put(gcIdxKey, gcIdxData)
- log.Trace("clean ok", "key", chunkHashes[i], "gcKey", gcIdxKey, "gcData", gcIdxData)
- }
-
- // flush them
- err = s.db.Write(&batch)
- if err != nil {
- return err
- }
- batch.Reset()
-
- log.Debug("clean gc index pass", "batch", cleanBatchCount, "checked", i, "kept", len(idxs))
- }
-
- log.Debug("gc cleanup entries", "ok", okEntryCount, "total", totalEntryCount, "batchlen", batch.Len())
-
- // lastly add updated entry count
- var entryCount [8]byte
- binary.BigEndian.PutUint64(entryCount[:], okEntryCount)
- batch.Put(keyEntryCnt, entryCount[:])
-
- // and add the new po index pointers
- var poKey [2]byte
- poKey[0] = keyDistanceCnt
- for i, poPtr := range poPtrs {
- poKey[1] = uint8(i)
- if poPtr == 0 {
- batch.Delete(poKey[:])
- } else {
- var idxCount [8]byte
- binary.BigEndian.PutUint64(idxCount[:], poPtr)
- batch.Put(poKey[:], idxCount[:])
- }
- }
-
- // if you made it this far your harddisk has survived. Congratulations
- return s.db.Write(&batch)
-}
-
-// Delete is removes a chunk and updates indices.
-// Is thread safe
-func (s *LDBStore) Delete(addr Address) error {
- s.lock.Lock()
- defer s.lock.Unlock()
-
- ikey := getIndexKey(addr)
-
- idata, err := s.db.Get(ikey)
- if err != nil {
- return err
- }
-
- var idx dpaDBIndex
- decodeIndex(idata, &idx)
- proximity := s.po(addr)
- return s.deleteNow(&idx, ikey, proximity)
-}
-
-// executes one delete operation immediately
-// see *LDBStore.delete
-func (s *LDBStore) deleteNow(idx *dpaDBIndex, idxKey []byte, po uint8) error {
- batch := new(leveldb.Batch)
- s.delete(batch, idx, idxKey, po)
- return s.db.Write(batch)
-}
-
-// adds a delete chunk operation to the provided batch
-// if called directly, decrements entrycount regardless if the chunk exists upon deletion. Risk of wrap to max uint64
-func (s *LDBStore) delete(batch *leveldb.Batch, idx *dpaDBIndex, idxKey []byte, po uint8) {
- metrics.GetOrRegisterCounter("ldbstore.delete", nil).Inc(1)
-
- gcIdxKey := getGCIdxKey(idx)
- batch.Delete(gcIdxKey)
- dataKey := getDataKey(idx.Idx, po)
- batch.Delete(dataKey)
- batch.Delete(idxKey)
- s.entryCnt--
- dbEntryCount.Dec(1)
- cntKey := make([]byte, 2)
- cntKey[0] = keyDistanceCnt
- cntKey[1] = po
- batch.Put(keyEntryCnt, U64ToBytes(s.entryCnt))
- batch.Put(cntKey, U64ToBytes(s.bucketCnt[po]))
-}
-
-func (s *LDBStore) BinIndex(po uint8) uint64 {
- s.lock.RLock()
- defer s.lock.RUnlock()
- return s.bucketCnt[po]
-}
-
-// Put adds a chunk to the database, adding indices and incrementing global counters.
-// If it already exists, it merely increments the access count of the existing entry.
-// Is thread safe
-func (s *LDBStore) Put(ctx context.Context, chunk Chunk) error {
- metrics.GetOrRegisterCounter("ldbstore.put", nil).Inc(1)
- log.Trace("ldbstore.put", "key", chunk.Address())
-
- ikey := getIndexKey(chunk.Address())
- var index dpaDBIndex
-
- po := s.po(chunk.Address())
-
- s.lock.Lock()
-
- if s.closed {
- s.lock.Unlock()
- return ErrDBClosed
- }
- batch := s.batch
-
- log.Trace("ldbstore.put: s.db.Get", "key", chunk.Address(), "ikey", fmt.Sprintf("%x", ikey))
- _, err := s.db.Get(ikey)
- if err != nil {
- s.doPut(chunk, &index, po)
- }
- idata := encodeIndex(&index)
- s.batch.Put(ikey, idata)
-
- // add the access-chunkindex index for garbage collection
- gcIdxKey := getGCIdxKey(&index)
- gcIdxData := getGCIdxValue(&index, po, chunk.Address())
- s.batch.Put(gcIdxKey, gcIdxData)
- s.lock.Unlock()
-
- select {
- case s.batchesC <- struct{}{}:
- default:
- }
-
- select {
- case <-batch.c:
- return batch.err
- case <-ctx.Done():
- return ctx.Err()
- }
-}
-
-// force putting into db, does not check or update necessary indices
-func (s *LDBStore) doPut(chunk Chunk, index *dpaDBIndex, po uint8) {
- data := s.encodeDataFunc(chunk)
- dkey := getDataKey(s.dataIdx, po)
- s.batch.Put(dkey, data)
- index.Idx = s.dataIdx
- s.bucketCnt[po] = s.dataIdx
- s.entryCnt++
- dbEntryCount.Inc(1)
- s.dataIdx++
- index.Access = s.accessCnt
- s.accessCnt++
- cntKey := make([]byte, 2)
- cntKey[0] = keyDistanceCnt
- cntKey[1] = po
- s.batch.Put(cntKey, U64ToBytes(s.bucketCnt[po]))
-}
-
-func (s *LDBStore) writeBatches() {
- for {
- select {
- case <-s.quit:
- log.Debug("DbStore: quit batch write loop")
- return
- case <-s.batchesC:
- err := s.writeCurrentBatch()
- if err != nil {
- log.Debug("DbStore: quit batch write loop", "err", err.Error())
- return
- }
- }
- }
-
-}
-
-func (s *LDBStore) writeCurrentBatch() error {
- s.lock.Lock()
- defer s.lock.Unlock()
- b := s.batch
- l := b.Len()
- if l == 0 {
- return nil
- }
- s.batch = newBatch()
- b.err = s.writeBatch(b, wEntryCnt|wAccessCnt|wIndexCnt)
- close(b.c)
- if s.entryCnt >= s.capacity {
- go s.collectGarbage()
- }
- return nil
-}
-
-// must be called non concurrently
-func (s *LDBStore) writeBatch(b *dbBatch, wFlag uint8) error {
- if wFlag&wEntryCnt > 0 {
- b.Put(keyEntryCnt, U64ToBytes(s.entryCnt))
- }
- if wFlag&wIndexCnt > 0 {
- b.Put(keyDataIdx, U64ToBytes(s.dataIdx))
- }
- if wFlag&wAccessCnt > 0 {
- b.Put(keyAccessCnt, U64ToBytes(s.accessCnt))
- }
- l := b.Len()
- if err := s.db.Write(b.Batch); err != nil {
- return fmt.Errorf("unable to write batch: %v", err)
- }
- log.Trace(fmt.Sprintf("batch write (%d entries)", l))
- return nil
-}
-
-// newMockEncodeDataFunc returns a function that stores the chunk data
-// to a mock store to bypass the default functionality encodeData.
-// The constructed function always returns the nil data, as DbStore does
-// not need to store the data, but still need to create the index.
-func newMockEncodeDataFunc(mockStore *mock.NodeStore) func(chunk Chunk) []byte {
- return func(chunk Chunk) []byte {
- if err := mockStore.Put(chunk.Address(), encodeData(chunk)); err != nil {
- log.Error(fmt.Sprintf("%T: Chunk %v put: %v", mockStore, chunk.Address().Log(), err))
- }
- return chunk.Address()[:]
- }
-}
-
-// tryAccessIdx tries to find index entry. If found then increments the access
-// count for garbage collection and returns the index entry and true for found,
-// otherwise returns nil and false.
-func (s *LDBStore) tryAccessIdx(addr Address, po uint8) (*dpaDBIndex, bool) {
- ikey := getIndexKey(addr)
- idata, err := s.db.Get(ikey)
- if err != nil {
- return nil, false
- }
-
- index := new(dpaDBIndex)
- decodeIndex(idata, index)
- oldGCIdxKey := getGCIdxKey(index)
- s.batch.Put(keyAccessCnt, U64ToBytes(s.accessCnt))
- index.Access = s.accessCnt
- idata = encodeIndex(index)
- s.accessCnt++
- s.batch.Put(ikey, idata)
- newGCIdxKey := getGCIdxKey(index)
- newGCIdxData := getGCIdxValue(index, po, ikey[1:])
- s.batch.Delete(oldGCIdxKey)
- s.batch.Put(newGCIdxKey, newGCIdxData)
- select {
- case s.batchesC <- struct{}{}:
- default:
- }
- return index, true
-}
-
-// GetSchema is returning the current named schema of the datastore as read from LevelDB
-func (s *LDBStore) GetSchema() (string, error) {
- s.lock.Lock()
- defer s.lock.Unlock()
-
- data, err := s.db.Get(keySchema)
- if err != nil {
- if err == leveldb.ErrNotFound {
- return DbSchemaNone, nil
- }
- return "", err
- }
-
- return string(data), nil
-}
-
-// PutSchema is saving a named schema to the LevelDB datastore
-func (s *LDBStore) PutSchema(schema string) error {
- s.lock.Lock()
- defer s.lock.Unlock()
-
- return s.db.Put(keySchema, []byte(schema))
-}
-
-// Get retrieves the chunk matching the provided key from the database.
-// If the chunk entry does not exist, it returns an error
-// Updates access count and is thread safe
-func (s *LDBStore) Get(_ context.Context, addr Address) (chunk Chunk, err error) {
- metrics.GetOrRegisterCounter("ldbstore.get", nil).Inc(1)
- log.Trace("ldbstore.get", "key", addr)
-
- s.lock.Lock()
- defer s.lock.Unlock()
- return s.get(addr)
-}
-
-// Has queries the underlying DB if a chunk with the given address is stored
-// Returns true if the chunk is found, false if not
-func (s *LDBStore) Has(_ context.Context, addr Address) bool {
- s.lock.RLock()
- defer s.lock.RUnlock()
-
- ikey := getIndexKey(addr)
- _, err := s.db.Get(ikey)
-
- return err == nil
-}
-
-// TODO: To conform with other private methods of this object indices should not be updated
-func (s *LDBStore) get(addr Address) (chunk Chunk, err error) {
- if s.closed {
- return nil, ErrDBClosed
- }
- proximity := s.po(addr)
- index, found := s.tryAccessIdx(addr, proximity)
- if found {
- var data []byte
- if s.getDataFunc != nil {
- // if getDataFunc is defined, use it to retrieve the chunk data
- log.Trace("ldbstore.get retrieve with getDataFunc", "key", addr)
- data, err = s.getDataFunc(addr)
- if err != nil {
- return
- }
- } else {
- // default DbStore functionality to retrieve chunk data
- datakey := getDataKey(index.Idx, proximity)
- data, err = s.db.Get(datakey)
- log.Trace("ldbstore.get retrieve", "key", addr, "indexkey", index.Idx, "datakey", fmt.Sprintf("%x", datakey), "proximity", proximity)
- if err != nil {
- log.Trace("ldbstore.get chunk found but could not be accessed", "key", addr, "err", err)
- s.deleteNow(index, getIndexKey(addr), s.po(addr))
- if err == leveldb.ErrNotFound {
- return nil, ErrChunkNotFound
- }
- return nil, err
- }
- }
-
- return decodeData(addr, data)
- } else {
- err = ErrChunkNotFound
- }
-
- return
-}
-
-// newMockGetFunc returns a function that reads chunk data from
-// the mock database, which is used as the value for DbStore.getFunc
-// to bypass the default functionality of DbStore with a mock store.
-func newMockGetDataFunc(mockStore *mock.NodeStore) func(addr Address) (data []byte, err error) {
- return func(addr Address) (data []byte, err error) {
- data, err = mockStore.Get(addr)
- if err == mock.ErrNotFound {
- // preserve ErrChunkNotFound error
- err = ErrChunkNotFound
- }
- return data, err
- }
-}
-
-func (s *LDBStore) setCapacity(c uint64) {
- s.lock.Lock()
- defer s.lock.Unlock()
-
- s.capacity = c
-
- for s.entryCnt > c {
- s.collectGarbage()
- }
-}
-
-func (s *LDBStore) Close() {
- close(s.quit)
- s.lock.Lock()
- s.closed = true
- s.lock.Unlock()
- // force writing out current batch
- s.writeCurrentBatch()
- s.db.Close()
-}
-
-// SyncIterator(start, stop, po, f) calls f on each hash of a bin po from start to stop
-func (s *LDBStore) SyncIterator(since uint64, until uint64, po uint8, f func(Address, uint64) bool) error {
- metrics.GetOrRegisterCounter("ldbstore.synciterator", nil).Inc(1)
-
- sincekey := getDataKey(since, po)
- untilkey := getDataKey(until, po)
- it := s.db.NewIterator()
- defer it.Release()
-
- for ok := it.Seek(sincekey); ok; ok = it.Next() {
- metrics.GetOrRegisterCounter("ldbstore.synciterator.seek", nil).Inc(1)
-
- dbkey := it.Key()
- if dbkey[0] != keyData || dbkey[1] != po || bytes.Compare(untilkey, dbkey) < 0 {
- break
- }
- key := make([]byte, 32)
- val := it.Value()
- copy(key, val[:32])
- if !f(Address(key), binary.BigEndian.Uint64(dbkey[2:])) {
- break
- }
- }
- return it.Error()
-}
diff --git a/swarm/storage/ldbstore_test.go b/swarm/storage/ldbstore_test.go
deleted file mode 100644
index 1cd4947be..000000000
--- a/swarm/storage/ldbstore_test.go
+++ /dev/null
@@ -1,788 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package storage
-
-import (
- "bytes"
- "context"
- "encoding/binary"
- "fmt"
- "io/ioutil"
- "os"
- "strconv"
- "strings"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/swarm/chunk"
- "github.com/ethereum/go-ethereum/swarm/log"
- "github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
- "github.com/ethereum/go-ethereum/swarm/testutil"
- ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
-)
-
-type testDbStore struct {
- *LDBStore
- dir string
-}
-
-func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) {
- dir, err := ioutil.TempDir("", "bzz-storage-test")
- if err != nil {
- return nil, func() {}, err
- }
-
- var db *LDBStore
- storeparams := NewDefaultStoreParams()
- params := NewLDBStoreParams(storeparams, dir)
- params.Po = testPoFunc
-
- if mock {
- globalStore := mem.NewGlobalStore()
- addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed")
- mockStore := globalStore.NewNodeStore(addr)
-
- db, err = NewMockDbStore(params, mockStore)
- } else {
- db, err = NewLDBStore(params)
- }
-
- cleanup := func() {
- if db != nil {
- db.Close()
- }
- err = os.RemoveAll(dir)
- if err != nil {
- panic(fmt.Sprintf("db cleanup failed: %v", err))
- }
- }
-
- return &testDbStore{db, dir}, cleanup, err
-}
-
-func testPoFunc(k Address) (ret uint8) {
- basekey := make([]byte, 32)
- return uint8(Proximity(basekey, k[:]))
-}
-
-func testDbStoreRandom(n int, mock bool, t *testing.T) {
- db, cleanup, err := newTestDbStore(mock, true)
- defer cleanup()
- if err != nil {
- t.Fatalf("init dbStore failed: %v", err)
- }
- testStoreRandom(db, n, t)
-}
-
-func testDbStoreCorrect(n int, mock bool, t *testing.T) {
- db, cleanup, err := newTestDbStore(mock, false)
- defer cleanup()
- if err != nil {
- t.Fatalf("init dbStore failed: %v", err)
- }
- testStoreCorrect(db, n, t)
-}
-
-func TestMarkAccessed(t *testing.T) {
- db, cleanup, err := newTestDbStore(false, true)
- defer cleanup()
- if err != nil {
- t.Fatalf("init dbStore failed: %v", err)
- }
-
- h := GenerateRandomChunk(chunk.DefaultSize)
-
- db.Put(context.Background(), h)
-
- var index dpaDBIndex
- addr := h.Address()
- idxk := getIndexKey(addr)
-
- idata, err := db.db.Get(idxk)
- if err != nil {
- t.Fatal(err)
- }
- decodeIndex(idata, &index)
-
- if index.Access != 0 {
- t.Fatalf("Expected the access index to be %d, but it is %d", 0, index.Access)
- }
-
- db.MarkAccessed(addr)
- db.writeCurrentBatch()
-
- idata, err = db.db.Get(idxk)
- if err != nil {
- t.Fatal(err)
- }
- decodeIndex(idata, &index)
-
- if index.Access != 1 {
- t.Fatalf("Expected the access index to be %d, but it is %d", 1, index.Access)
- }
-
-}
-
-func TestDbStoreRandom_1(t *testing.T) {
- testDbStoreRandom(1, false, t)
-}
-
-func TestDbStoreCorrect_1(t *testing.T) {
- testDbStoreCorrect(1, false, t)
-}
-
-func TestDbStoreRandom_1k(t *testing.T) {
- testDbStoreRandom(1000, false, t)
-}
-
-func TestDbStoreCorrect_1k(t *testing.T) {
- testDbStoreCorrect(1000, false, t)
-}
-
-func TestMockDbStoreRandom_1(t *testing.T) {
- testDbStoreRandom(1, true, t)
-}
-
-func TestMockDbStoreCorrect_1(t *testing.T) {
- testDbStoreCorrect(1, true, t)
-}
-
-func TestMockDbStoreRandom_1k(t *testing.T) {
- testDbStoreRandom(1000, true, t)
-}
-
-func TestMockDbStoreCorrect_1k(t *testing.T) {
- testDbStoreCorrect(1000, true, t)
-}
-
-func testDbStoreNotFound(t *testing.T, mock bool) {
- db, cleanup, err := newTestDbStore(mock, false)
- defer cleanup()
- if err != nil {
- t.Fatalf("init dbStore failed: %v", err)
- }
-
- _, err = db.Get(context.TODO(), ZeroAddr)
- if err != ErrChunkNotFound {
- t.Errorf("Expected ErrChunkNotFound, got %v", err)
- }
-}
-
-func TestDbStoreNotFound(t *testing.T) {
- testDbStoreNotFound(t, false)
-}
-func TestMockDbStoreNotFound(t *testing.T) {
- testDbStoreNotFound(t, true)
-}
-
-func testIterator(t *testing.T, mock bool) {
- var i int
- var poc uint
- chunkcount := 32
- chunkkeys := NewAddressCollection(chunkcount)
- chunkkeysResults := NewAddressCollection(chunkcount)
-
- db, cleanup, err := newTestDbStore(mock, false)
- defer cleanup()
- if err != nil {
- t.Fatalf("init dbStore failed: %v", err)
- }
-
- chunks := GenerateRandomChunks(chunk.DefaultSize, chunkcount)
-
- for i = 0; i < len(chunks); i++ {
- chunkkeys[i] = chunks[i].Address()
- err := db.Put(context.TODO(), chunks[i])
- if err != nil {
- t.Fatalf("dbStore.Put failed: %v", err)
- }
- }
-
- for i = 0; i < len(chunkkeys); i++ {
- log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i]))
- }
- i = 0
- for poc = 0; poc <= 255; poc++ {
- err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool {
- log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc)))
- chunkkeysResults[n] = k
- i++
- return true
- })
- if err != nil {
- t.Fatalf("Iterator call failed: %v", err)
- }
- }
-
- for i = 0; i < chunkcount; i++ {
- if !bytes.Equal(chunkkeys[i], chunkkeysResults[i]) {
- t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeysResults[i])
- }
- }
-
-}
-
-func TestIterator(t *testing.T) {
- testIterator(t, false)
-}
-func TestMockIterator(t *testing.T) {
- testIterator(t, true)
-}
-
-func benchmarkDbStorePut(n int, mock bool, b *testing.B) {
- db, cleanup, err := newTestDbStore(mock, true)
- defer cleanup()
- if err != nil {
- b.Fatalf("init dbStore failed: %v", err)
- }
- benchmarkStorePut(db, n, b)
-}
-
-func benchmarkDbStoreGet(n int, mock bool, b *testing.B) {
- db, cleanup, err := newTestDbStore(mock, true)
- defer cleanup()
- if err != nil {
- b.Fatalf("init dbStore failed: %v", err)
- }
- benchmarkStoreGet(db, n, b)
-}
-
-func BenchmarkDbStorePut_500(b *testing.B) {
- benchmarkDbStorePut(500, false, b)
-}
-
-func BenchmarkDbStoreGet_500(b *testing.B) {
- benchmarkDbStoreGet(500, false, b)
-}
-
-func BenchmarkMockDbStorePut_500(b *testing.B) {
- benchmarkDbStorePut(500, true, b)
-}
-
-func BenchmarkMockDbStoreGet_500(b *testing.B) {
- benchmarkDbStoreGet(500, true, b)
-}
-
-// TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and
-// retrieve them, provided we don't hit the garbage collection
-func TestLDBStoreWithoutCollectGarbage(t *testing.T) {
- capacity := 50
- n := 10
-
- ldb, cleanup := newLDBStore(t)
- ldb.setCapacity(uint64(capacity))
- defer cleanup()
-
- chunks, err := mputRandomChunks(ldb, n)
- if err != nil {
- t.Fatal(err.Error())
- }
-
- log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
-
- for _, ch := range chunks {
- ret, err := ldb.Get(context.TODO(), ch.Address())
- if err != nil {
- t.Fatal(err)
- }
-
- if !bytes.Equal(ret.Data(), ch.Data()) {
- t.Fatal("expected to get the same data back, but got smth else")
- }
- }
-
- if ldb.entryCnt != uint64(n) {
- t.Fatalf("expected entryCnt to be equal to %v, but got %v", n, ldb.entryCnt)
- }
-
- if ldb.accessCnt != uint64(2*n) {
- t.Fatalf("expected accessCnt to be equal to %v, but got %v", 2*n, ldb.accessCnt)
- }
-}
-
-// TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and
-// retrieve only some of them, because garbage collection must have partially cleared the store
-// Also tests that we can delete chunks and that we can trigger garbage collection
-func TestLDBStoreCollectGarbage(t *testing.T) {
-
- // below max ronud
- initialCap := defaultMaxGCRound / 100
- cap := initialCap / 2
- t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
-
- if testutil.RaceEnabled {
- t.Skip("only the simplest case run as others are flaky with race")
- // Note: some tests fail consistently and even locally with `-race`
- }
-
- t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
-
- // at max round
- cap = initialCap
- t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
- t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
-
- // more than max around, not on threshold
- cap = initialCap + 500
- t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
- t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
-
-}
-
-func testLDBStoreCollectGarbage(t *testing.T) {
- params := strings.Split(t.Name(), "/")
- capacity, err := strconv.Atoi(params[2])
- if err != nil {
- t.Fatal(err)
- }
- n, err := strconv.Atoi(params[3])
- if err != nil {
- t.Fatal(err)
- }
-
- ldb, cleanup := newLDBStore(t)
- ldb.setCapacity(uint64(capacity))
- defer cleanup()
-
- // retrieve the gc round target count for the db capacity
- ldb.startGC(capacity)
- roundTarget := ldb.gc.target
-
- // split put counts to gc target count threshold, and wait for gc to finish in between
- var allChunks []Chunk
- remaining := n
- for remaining > 0 {
- var putCount int
- if remaining < roundTarget {
- putCount = remaining
- } else {
- putCount = roundTarget
- }
- remaining -= putCount
- chunks, err := mputRandomChunks(ldb, putCount)
- if err != nil {
- t.Fatal(err.Error())
- }
- allChunks = append(allChunks, chunks...)
- ldb.lock.RLock()
- log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n)
- ldb.lock.RUnlock()
-
- waitGc(ldb)
- }
-
- // attempt gets on all put chunks
- var missing int
- for _, ch := range allChunks {
- ret, err := ldb.Get(context.TODO(), ch.Address())
- if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
- missing++
- continue
- }
- if err != nil {
- t.Fatal(err)
- }
-
- if !bytes.Equal(ret.Data(), ch.Data()) {
- t.Fatal("expected to get the same data back, but got smth else")
- }
-
- log.Trace("got back chunk", "chunk", ret)
- }
-
- // all surplus chunks should be missing
- expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
- if missing != expectMissing {
- t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", expectMissing, missing)
- }
-
- log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
-}
-
-// TestLDBStoreAddRemove tests that we can put and then delete a given chunk
-func TestLDBStoreAddRemove(t *testing.T) {
- ldb, cleanup := newLDBStore(t)
- ldb.setCapacity(200)
- defer cleanup()
-
- n := 100
- chunks, err := mputRandomChunks(ldb, n)
- if err != nil {
- t.Fatalf(err.Error())
- }
-
- for i := 0; i < n; i++ {
- // delete all even index chunks
- if i%2 == 0 {
- ldb.Delete(chunks[i].Address())
- }
- }
-
- log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
-
- for i := 0; i < n; i++ {
- ret, err := ldb.Get(context.TODO(), chunks[i].Address())
-
- if i%2 == 0 {
- // expect even chunks to be missing
- if err == nil {
- t.Fatal("expected chunk to be missing, but got no error")
- }
- } else {
- // expect odd chunks to be retrieved successfully
- if err != nil {
- t.Fatalf("expected no error, but got %s", err)
- }
-
- if !bytes.Equal(ret.Data(), chunks[i].Data()) {
- t.Fatal("expected to get the same data back, but got smth else")
- }
- }
- }
-}
-
-func testLDBStoreRemoveThenCollectGarbage(t *testing.T) {
- t.Skip("flaky with -race flag")
-
- params := strings.Split(t.Name(), "/")
- capacity, err := strconv.Atoi(params[2])
- if err != nil {
- t.Fatal(err)
- }
- n, err := strconv.Atoi(params[3])
- if err != nil {
- t.Fatal(err)
- }
-
- ldb, cleanup := newLDBStore(t)
- defer cleanup()
- ldb.setCapacity(uint64(capacity))
-
- // put capacity count number of chunks
- chunks := make([]Chunk, n)
- for i := 0; i < n; i++ {
- c := GenerateRandomChunk(chunk.DefaultSize)
- chunks[i] = c
- log.Trace("generate random chunk", "idx", i, "chunk", c)
- }
-
- for i := 0; i < n; i++ {
- err := ldb.Put(context.TODO(), chunks[i])
- if err != nil {
- t.Fatal(err)
- }
- }
-
- waitGc(ldb)
-
- // delete all chunks
- // (only count the ones actually deleted, the rest will have been gc'd)
- deletes := 0
- for i := 0; i < n; i++ {
- if ldb.Delete(chunks[i].Address()) == nil {
- deletes++
- }
- }
-
- log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
-
- if ldb.entryCnt != 0 {
- t.Fatalf("ldb.entrCnt expected 0 got %v", ldb.entryCnt)
- }
-
- // the manual deletes will have increased accesscnt, so we need to add this when we verify the current count
- expAccessCnt := uint64(n)
- if ldb.accessCnt != expAccessCnt {
- t.Fatalf("ldb.accessCnt expected %v got %v", expAccessCnt, ldb.accessCnt)
- }
-
- // retrieve the gc round target count for the db capacity
- ldb.startGC(capacity)
- roundTarget := ldb.gc.target
-
- remaining := n
- var puts int
- for remaining > 0 {
- var putCount int
- if remaining < roundTarget {
- putCount = remaining
- } else {
- putCount = roundTarget
- }
- remaining -= putCount
- for putCount > 0 {
- ldb.Put(context.TODO(), chunks[puts])
- ldb.lock.RLock()
- log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget)
- ldb.lock.RUnlock()
- puts++
- putCount--
- }
-
- waitGc(ldb)
- }
-
- // expect first surplus chunks to be missing, because they have the smallest access value
- expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
- for i := 0; i < expectMissing; i++ {
- _, err := ldb.Get(context.TODO(), chunks[i].Address())
- if err == nil {
- t.Fatalf("expected surplus chunk %d to be missing, but got no error", i)
- }
- }
-
- // expect last chunks to be present, as they have the largest access value
- for i := expectMissing; i < n; i++ {
- ret, err := ldb.Get(context.TODO(), chunks[i].Address())
- if err != nil {
- t.Fatalf("chunk %v: expected no error, but got %s", i, err)
- }
- if !bytes.Equal(ret.Data(), chunks[i].Data()) {
- t.Fatal("expected to get the same data back, but got smth else")
- }
- }
-}
-
-// TestLDBStoreCollectGarbageAccessUnlikeIndex tests garbage collection where accesscount differs from indexcount
-func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) {
-
- capacity := defaultMaxGCRound / 100 * 2
- n := capacity - 1
-
- ldb, cleanup := newLDBStore(t)
- ldb.setCapacity(uint64(capacity))
- defer cleanup()
-
- chunks, err := mputRandomChunks(ldb, n)
- if err != nil {
- t.Fatal(err.Error())
- }
- log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
-
- // set first added capacity/2 chunks to highest accesscount
- for i := 0; i < capacity/2; i++ {
- _, err := ldb.Get(context.TODO(), chunks[i].Address())
- if err != nil {
- t.Fatalf("fail add chunk #%d - %s: %v", i, chunks[i].Address(), err)
- }
- }
- _, err = mputRandomChunks(ldb, 2)
- if err != nil {
- t.Fatal(err.Error())
- }
-
- // wait for garbage collection to kick in on the responsible actor
- waitGc(ldb)
-
- var missing int
- for i, ch := range chunks[2 : capacity/2] {
- ret, err := ldb.Get(context.TODO(), ch.Address())
- if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
- t.Fatalf("fail find chunk #%d - %s: %v", i, ch.Address(), err)
- }
-
- if !bytes.Equal(ret.Data(), ch.Data()) {
- t.Fatal("expected to get the same data back, but got smth else")
- }
- log.Trace("got back chunk", "chunk", ret)
- }
-
- log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
-}
-
-func TestCleanIndex(t *testing.T) {
- if testutil.RaceEnabled {
- t.Skip("disabled because it times out with race detector")
- }
-
- capacity := 5000
- n := 3
-
- ldb, cleanup := newLDBStore(t)
- ldb.setCapacity(uint64(capacity))
- defer cleanup()
-
- chunks, err := mputRandomChunks(ldb, n)
- if err != nil {
- t.Fatal(err)
- }
-
- // remove the data of the first chunk
- po := ldb.po(chunks[0].Address()[:])
- dataKey := make([]byte, 10)
- dataKey[0] = keyData
- dataKey[1] = byte(po)
- // dataKey[2:10] = first chunk has storageIdx 0 on [2:10]
- if _, err := ldb.db.Get(dataKey); err != nil {
- t.Fatal(err)
- }
- if err := ldb.db.Delete(dataKey); err != nil {
- t.Fatal(err)
- }
-
- // remove the gc index row for the first chunk
- gcFirstCorrectKey := make([]byte, 9)
- gcFirstCorrectKey[0] = keyGCIdx
- if err := ldb.db.Delete(gcFirstCorrectKey); err != nil {
- t.Fatal(err)
- }
-
- // warp the gc data of the second chunk
- // this data should be correct again after the clean
- gcSecondCorrectKey := make([]byte, 9)
- gcSecondCorrectKey[0] = keyGCIdx
- binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(1))
- gcSecondCorrectVal, err := ldb.db.Get(gcSecondCorrectKey)
- if err != nil {
- t.Fatal(err)
- }
- warpedGCVal := make([]byte, len(gcSecondCorrectVal)+1)
- copy(warpedGCVal[1:], gcSecondCorrectVal)
- if err := ldb.db.Delete(gcSecondCorrectKey); err != nil {
- t.Fatal(err)
- }
- if err := ldb.db.Put(gcSecondCorrectKey, warpedGCVal); err != nil {
- t.Fatal(err)
- }
-
- if err := ldb.CleanGCIndex(); err != nil {
- t.Fatal(err)
- }
-
- // the index without corresponding data should have been deleted
- idxKey := make([]byte, 33)
- idxKey[0] = keyIndex
- copy(idxKey[1:], chunks[0].Address())
- if _, err := ldb.db.Get(idxKey); err == nil {
- t.Fatalf("expected chunk 0 idx to be pruned: %v", idxKey)
- }
-
- // the two other indices should be present
- copy(idxKey[1:], chunks[1].Address())
- if _, err := ldb.db.Get(idxKey); err != nil {
- t.Fatalf("expected chunk 1 idx to be present: %v", idxKey)
- }
-
- copy(idxKey[1:], chunks[2].Address())
- if _, err := ldb.db.Get(idxKey); err != nil {
- t.Fatalf("expected chunk 2 idx to be present: %v", idxKey)
- }
-
- // first gc index should still be gone
- if _, err := ldb.db.Get(gcFirstCorrectKey); err == nil {
- t.Fatalf("expected gc 0 idx to be pruned: %v", idxKey)
- }
-
- // second gc index should still be fixed
- if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil {
- t.Fatalf("expected gc 1 idx to be present: %v", idxKey)
- }
-
- // third gc index should be unchanged
- binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(2))
- if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil {
- t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
- }
-
- c, err := ldb.db.Get(keyEntryCnt)
- if err != nil {
- t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
- }
-
- // entrycount should now be one less
- entryCount := binary.BigEndian.Uint64(c)
- if entryCount != 2 {
- t.Fatalf("expected entrycnt to be 2, was %d", c)
- }
-
- // the chunks might accidentally be in the same bin
- // if so that bin counter will now be 2 - the highest added index.
- // if not, the total of them will be 3
- poBins := []uint8{ldb.po(chunks[1].Address()), ldb.po(chunks[2].Address())}
- if poBins[0] == poBins[1] {
- poBins = poBins[:1]
- }
-
- var binTotal uint64
- var currentBin [2]byte
- currentBin[0] = keyDistanceCnt
- if len(poBins) == 1 {
- currentBin[1] = poBins[0]
- c, err := ldb.db.Get(currentBin[:])
- if err != nil {
- t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
- }
- binCount := binary.BigEndian.Uint64(c)
- if binCount != 2 {
- t.Fatalf("expected entrycnt to be 2, was %d", binCount)
- }
- } else {
- for _, bin := range poBins {
- currentBin[1] = bin
- c, err := ldb.db.Get(currentBin[:])
- if err != nil {
- t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
- }
- binCount := binary.BigEndian.Uint64(c)
- binTotal += binCount
-
- }
- if binTotal != 3 {
- t.Fatalf("expected sum of bin indices to be 3, was %d", binTotal)
- }
- }
-
- // check that the iterator quits properly
- chunks, err = mputRandomChunks(ldb, 4100)
- if err != nil {
- t.Fatal(err)
- }
-
- po = ldb.po(chunks[4099].Address()[:])
- dataKey = make([]byte, 10)
- dataKey[0] = keyData
- dataKey[1] = byte(po)
- binary.BigEndian.PutUint64(dataKey[2:], 4099+3)
- if _, err := ldb.db.Get(dataKey); err != nil {
- t.Fatal(err)
- }
- if err := ldb.db.Delete(dataKey); err != nil {
- t.Fatal(err)
- }
-
- if err := ldb.CleanGCIndex(); err != nil {
- t.Fatal(err)
- }
-
- // entrycount should now be one less of added chunks
- c, err = ldb.db.Get(keyEntryCnt)
- if err != nil {
- t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
- }
- entryCount = binary.BigEndian.Uint64(c)
- if entryCount != 4099+2 {
- t.Fatalf("expected entrycnt to be 2, was %d", c)
- }
-}
-
-// Note: waitGc does not guarantee that we wait 1 GC round; it only
-// guarantees that if the GC is running we wait for that run to finish
-// ticket: https://github.com/ethersphere/go-ethereum/issues/1151
-func waitGc(ldb *LDBStore) {
- <-ldb.gc.runC
- ldb.gc.runC <- struct{}{}
-}
diff --git a/swarm/storage/localstore.go b/swarm/storage/localstore.go
deleted file mode 100644
index a8f6f037f..000000000
--- a/swarm/storage/localstore.go
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package storage
-
-import (
- "context"
- "path/filepath"
- "sync"
-
- "github.com/ethereum/go-ethereum/metrics"
- "github.com/ethereum/go-ethereum/swarm/log"
- "github.com/ethereum/go-ethereum/swarm/storage/mock"
-)
-
-type LocalStoreParams struct {
- *StoreParams
- ChunkDbPath string
- Validators []ChunkValidator `toml:"-"`
-}
-
-func NewDefaultLocalStoreParams() *LocalStoreParams {
- return &LocalStoreParams{
- StoreParams: NewDefaultStoreParams(),
- }
-}
-
-//this can only finally be set after all config options (file, cmd line, env vars)
-//have been evaluated
-func (p *LocalStoreParams) Init(path string) {
- if p.ChunkDbPath == "" {
- p.ChunkDbPath = filepath.Join(path, "chunks")
- }
-}
-
-// LocalStore is a combination of inmemory db over a disk persisted db
-// implements a Get/Put with fallback (caching) logic using any 2 ChunkStores
-type LocalStore struct {
- Validators []ChunkValidator
- memStore *MemStore
- DbStore *LDBStore
- mu sync.Mutex
-}
-
-// This constructor uses MemStore and DbStore as components
-func NewLocalStore(params *LocalStoreParams, mockStore *mock.NodeStore) (*LocalStore, error) {
- ldbparams := NewLDBStoreParams(params.StoreParams, params.ChunkDbPath)
- dbStore, err := NewMockDbStore(ldbparams, mockStore)
- if err != nil {
- return nil, err
- }
- return &LocalStore{
- memStore: NewMemStore(params.StoreParams, dbStore),
- DbStore: dbStore,
- Validators: params.Validators,
- }, nil
-}
-
-func NewTestLocalStoreForAddr(params *LocalStoreParams) (*LocalStore, error) {
- ldbparams := NewLDBStoreParams(params.StoreParams, params.ChunkDbPath)
- dbStore, err := NewLDBStore(ldbparams)
- if err != nil {
- return nil, err
- }
- localStore := &LocalStore{
- memStore: NewMemStore(params.StoreParams, dbStore),
- DbStore: dbStore,
- Validators: params.Validators,
- }
- return localStore, nil
-}
-
-// isValid returns true if chunk passes any of the LocalStore Validators.
-// isValid also returns true if LocalStore has no Validators.
-func (ls *LocalStore) isValid(chunk Chunk) bool {
- // by default chunks are valid. if we have 0 validators, then all chunks are valid.
- valid := true
-
- // ls.Validators contains a list of one validator per chunk type.
- // if one validator succeeds, then the chunk is valid
- for _, v := range ls.Validators {
- if valid = v.Validate(chunk); valid {
- break
- }
- }
- return valid
-}
-
-// Put is responsible for doing validation and storage of the chunk
-// by using configured ChunkValidators, MemStore and LDBStore.
-// If the chunk is not valid, its GetErrored function will
-// return ErrChunkInvalid.
-// This method will check if the chunk is already in the MemStore
-// and it will return it if it is. If there is an error from
-// the MemStore.Get, it will be returned by calling GetErrored
-// on the chunk.
-// This method is responsible for closing Chunk.ReqC channel
-// when the chunk is stored in memstore.
-// After the LDBStore.Put, it is ensured that the MemStore
-// contains the chunk with the same data, but nil ReqC channel.
-func (ls *LocalStore) Put(ctx context.Context, chunk Chunk) error {
- if !ls.isValid(chunk) {
- return ErrChunkInvalid
- }
-
- log.Trace("localstore.put", "key", chunk.Address())
- ls.mu.Lock()
- defer ls.mu.Unlock()
-
- _, err := ls.memStore.Get(ctx, chunk.Address())
- if err == nil {
- return nil
- }
- if err != nil && err != ErrChunkNotFound {
- return err
- }
- ls.memStore.Put(ctx, chunk)
- err = ls.DbStore.Put(ctx, chunk)
- return err
-}
-
-// Has queries the underlying DbStore if a chunk with the given address
-// is being stored there.
-// Returns true if it is stored, false if not
-func (ls *LocalStore) Has(ctx context.Context, addr Address) bool {
- return ls.DbStore.Has(ctx, addr)
-}
-
-// Get(chunk *Chunk) looks up a chunk in the local stores
-// This method is blocking until the chunk is retrieved
-// so additional timeout may be needed to wrap this call if
-// ChunkStores are remote and can have long latency
-func (ls *LocalStore) Get(ctx context.Context, addr Address) (chunk Chunk, err error) {
- ls.mu.Lock()
- defer ls.mu.Unlock()
-
- return ls.get(ctx, addr)
-}
-
-func (ls *LocalStore) get(ctx context.Context, addr Address) (chunk Chunk, err error) {
- chunk, err = ls.memStore.Get(ctx, addr)
-
- if err != nil && err != ErrChunkNotFound {
- metrics.GetOrRegisterCounter("localstore.get.error", nil).Inc(1)
- return nil, err
- }
-
- if err == nil {
- metrics.GetOrRegisterCounter("localstore.get.cachehit", nil).Inc(1)
- go ls.DbStore.MarkAccessed(addr)
- return chunk, nil
- }
-
- metrics.GetOrRegisterCounter("localstore.get.cachemiss", nil).Inc(1)
- chunk, err = ls.DbStore.Get(ctx, addr)
- if err != nil {
- metrics.GetOrRegisterCounter("localstore.get.error", nil).Inc(1)
- return nil, err
- }
-
- ls.memStore.Put(ctx, chunk)
- return chunk, nil
-}
-
-func (ls *LocalStore) FetchFunc(ctx context.Context, addr Address) func(context.Context) error {
- ls.mu.Lock()
- defer ls.mu.Unlock()
-
- _, err := ls.get(ctx, addr)
- if err == nil {
- return nil
- }
- return func(context.Context) error {
- return err
- }
-}
-
-func (ls *LocalStore) BinIndex(po uint8) uint64 {
- return ls.DbStore.BinIndex(po)
-}
-
-func (ls *LocalStore) Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error {
- return ls.DbStore.SyncIterator(from, to, po, f)
-}
-
-// Close the local store
-func (ls *LocalStore) Close() {
- ls.DbStore.Close()
-}
-
-// Migrate checks the datastore schema vs the runtime schema and runs
-// migrations if they don't match
-func (ls *LocalStore) Migrate() error {
- actualDbSchema, err := ls.DbStore.GetSchema()
- if err != nil {
- log.Error(err.Error())
- return err
- }
-
- if actualDbSchema == CurrentDbSchema {
- return nil
- }
-
- log.Debug("running migrations for", "schema", actualDbSchema, "runtime-schema", CurrentDbSchema)
-
- if actualDbSchema == DbSchemaNone {
- ls.migrateFromNoneToPurity()
- actualDbSchema = DbSchemaPurity
- }
-
- if err := ls.DbStore.PutSchema(actualDbSchema); err != nil {
- return err
- }
-
- if actualDbSchema == DbSchemaPurity {
- if err := ls.migrateFromPurityToHalloween(); err != nil {
- return err
- }
- actualDbSchema = DbSchemaHalloween
- }
-
- if err := ls.DbStore.PutSchema(actualDbSchema); err != nil {
- return err
- }
- return nil
-}
-
-func (ls *LocalStore) migrateFromNoneToPurity() {
- // delete chunks that are not valid, i.e. chunks that do not pass
- // any of the ls.Validators
- ls.DbStore.Cleanup(func(c Chunk) bool {
- return !ls.isValid(c)
- })
-}
-
-func (ls *LocalStore) migrateFromPurityToHalloween() error {
- return ls.DbStore.CleanGCIndex()
-}
diff --git a/swarm/storage/localstore/export.go b/swarm/storage/localstore/export.go
new file mode 100644
index 000000000..411392b4e
--- /dev/null
+++ b/swarm/storage/localstore/export.go
@@ -0,0 +1,204 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package localstore
+
+import (
+ "archive/tar"
+ "context"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/swarm/chunk"
+ "github.com/ethereum/go-ethereum/swarm/log"
+ "github.com/ethereum/go-ethereum/swarm/shed"
+)
+
+const (
+ // filename in tar archive that holds the information
+ // about exported data format version
+ exportVersionFilename = ".swarm-export-version"
+ // legacy version for previous LDBStore
+ legacyExportVersion = "1"
+ // current export format version
+ currentExportVersion = "2"
+)
+
+// Export writes a tar structured data to the writer of
+// all chunks in the retrieval data index. It returns the
+// number of chunks exported.
+func (db *DB) Export(w io.Writer) (count int64, err error) {
+ tw := tar.NewWriter(w)
+ defer tw.Close()
+
+ if err := tw.WriteHeader(&tar.Header{
+ Name: exportVersionFilename,
+ Mode: 0644,
+ Size: int64(len(currentExportVersion)),
+ }); err != nil {
+ return 0, err
+ }
+ if _, err := tw.Write([]byte(currentExportVersion)); err != nil {
+ return 0, err
+ }
+
+ err = db.retrievalDataIndex.Iterate(func(item shed.Item) (stop bool, err error) {
+ hdr := &tar.Header{
+ Name: hex.EncodeToString(item.Address),
+ Mode: 0644,
+ Size: int64(len(item.Data)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return false, err
+ }
+ if _, err := tw.Write(item.Data); err != nil {
+ return false, err
+ }
+ count++
+ return false, nil
+ }, nil)
+
+ return count, err
+}
+
+// Import reads a tar structured data from the reader and
+// stores chunks in the database. It returns the number of
+// chunks imported.
+func (db *DB) Import(r io.Reader, legacy bool) (count int64, err error) {
+ tr := tar.NewReader(r)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ errC := make(chan error)
+ doneC := make(chan struct{})
+ tokenPool := make(chan struct{}, 100)
+ var wg sync.WaitGroup
+ go func() {
+ var (
+ firstFile = true
+ // if exportVersionFilename file is not present
+ // assume legacy version
+ version = legacyExportVersion
+ )
+ for {
+ hdr, err := tr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ select {
+ case errC <- err:
+ case <-ctx.Done():
+ }
+ }
+ if firstFile {
+ firstFile = false
+ if hdr.Name == exportVersionFilename {
+ data, err := ioutil.ReadAll(tr)
+ if err != nil {
+ select {
+ case errC <- err:
+ case <-ctx.Done():
+ }
+ }
+ version = string(data)
+ continue
+ }
+ }
+
+ if len(hdr.Name) != 64 {
+ log.Warn("ignoring non-chunk file", "name", hdr.Name)
+ continue
+ }
+
+ keybytes, err := hex.DecodeString(hdr.Name)
+ if err != nil {
+ log.Warn("ignoring invalid chunk file", "name", hdr.Name, "err", err)
+ continue
+ }
+
+ data, err := ioutil.ReadAll(tr)
+ if err != nil {
+ select {
+ case errC <- err:
+ case <-ctx.Done():
+ }
+ }
+ key := chunk.Address(keybytes)
+
+ var ch chunk.Chunk
+ switch version {
+ case legacyExportVersion:
+ // LDBStore Export exported chunk data prefixed with the chunk key.
+ // That is not necessary, as the key is in the chunk filename,
+ // but backward compatibility needs to be preserved.
+ ch = chunk.NewChunk(key, data[32:])
+ case currentExportVersion:
+ ch = chunk.NewChunk(key, data)
+ default:
+ select {
+ case errC <- fmt.Errorf("unsupported export data version %q", version):
+ case <-ctx.Done():
+ }
+ }
+ tokenPool <- struct{}{}
+ wg.Add(1)
+
+ go func() {
+ _, err := db.Put(ctx, chunk.ModePutUpload, ch)
+ select {
+ case errC <- err:
+ case <-ctx.Done():
+ wg.Done()
+ <-tokenPool
+ default:
+ _, err := db.Put(ctx, chunk.ModePutUpload, ch)
+ if err != nil {
+ errC <- err
+ }
+ wg.Done()
+ <-tokenPool
+ }
+ }()
+
+ count++
+ }
+ wg.Wait()
+ close(doneC)
+ }()
+
+ // wait for all chunks to be stored
+ for {
+ select {
+ case err := <-errC:
+ if err != nil {
+ return count, err
+ }
+ case <-ctx.Done():
+ return count, ctx.Err()
+ default:
+ select {
+ case <-doneC:
+ return count, nil
+ default:
+ }
+ }
+ }
+}
diff --git a/swarm/storage/localstore/export_test.go b/swarm/storage/localstore/export_test.go
new file mode 100644
index 000000000..d7f848f80
--- /dev/null
+++ b/swarm/storage/localstore/export_test.go
@@ -0,0 +1,80 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package localstore
+
+import (
+ "bytes"
+ "context"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/swarm/chunk"
+)
+
+// TestExportImport constructs two databases, one to put and export
+// chunks and another one to import and validate that all chunks are
+// imported.
+func TestExportImport(t *testing.T) {
+ db1, cleanup1 := newTestDB(t, nil)
+ defer cleanup1()
+
+ var chunkCount = 100
+
+ chunks := make(map[string][]byte, chunkCount)
+ for i := 0; i < chunkCount; i++ {
+ ch := generateTestRandomChunk()
+
+ _, err := db1.Put(context.Background(), chunk.ModePutUpload, ch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ chunks[string(ch.Address())] = ch.Data()
+ }
+
+ var buf bytes.Buffer
+
+ c, err := db1.Export(&buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ wantChunksCount := int64(len(chunks))
+ if c != wantChunksCount {
+ t.Errorf("got export count %v, want %v", c, wantChunksCount)
+ }
+
+ db2, cleanup2 := newTestDB(t, nil)
+ defer cleanup2()
+
+ c, err = db2.Import(&buf, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if c != wantChunksCount {
+ t.Errorf("got import count %v, want %v", c, wantChunksCount)
+ }
+
+ for a, want := range chunks {
+ addr := chunk.Address([]byte(a))
+ ch, err := db2.Get(context.Background(), chunk.ModeGetRequest, addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := ch.Data()
+ if !bytes.Equal(got, want) {
+ t.Fatalf("chunk %s: got data %x, want %x", addr.Hex(), got, want)
+ }
+ }
+}
diff --git a/swarm/storage/localstore/gc.go b/swarm/storage/localstore/gc.go
index 84c4f596d..748e0d663 100644
--- a/swarm/storage/localstore/gc.go
+++ b/swarm/storage/localstore/gc.go
@@ -17,7 +17,10 @@
package localstore
import (
+ "time"
+
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/shed"
"github.com/syndtr/goleveldb/leveldb"
)
@@ -75,6 +78,15 @@ func (db *DB) collectGarbageWorker() {
// the rest of the garbage as the batch size limit is reached.
// This function is called in collectGarbageWorker.
func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
+ metricName := "localstore.gc"
+ metrics.GetOrRegisterCounter(metricName, nil).Inc(1)
+ defer totalTimeMetric(metricName, time.Now())
+ defer func() {
+ if err != nil {
+ metrics.GetOrRegisterCounter(metricName+".error", nil).Inc(1)
+ }
+ }()
+
batch := new(leveldb.Batch)
target := db.gcTarget()
@@ -86,12 +98,17 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
if err != nil {
return 0, true, err
}
+ metrics.GetOrRegisterGauge(metricName+".gcsize", nil).Update(int64(gcSize))
done = true
err = db.gcIndex.Iterate(func(item shed.Item) (stop bool, err error) {
if gcSize-collectedCount <= target {
return true, nil
}
+
+ metrics.GetOrRegisterGauge(metricName+".storets", nil).Update(item.StoreTimestamp)
+ metrics.GetOrRegisterGauge(metricName+".accessts", nil).Update(item.AccessTimestamp)
+
// delete from retrieve, pull, gc
db.retrievalDataIndex.DeleteInBatch(batch, item)
db.retrievalAccessIndex.DeleteInBatch(batch, item)
@@ -109,11 +126,13 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
if err != nil {
return 0, false, err
}
+ metrics.GetOrRegisterCounter(metricName+".collected-count", nil).Inc(int64(collectedCount))
db.gcSize.PutInBatch(batch, gcSize-collectedCount)
err = db.shed.WriteBatch(batch)
if err != nil {
+ metrics.GetOrRegisterCounter(metricName+".writebatch.err", nil).Inc(1)
return 0, false, err
}
return collectedCount, done, nil
diff --git a/swarm/storage/localstore/gc_test.go b/swarm/storage/localstore/gc_test.go
index 081e0af80..4a6e0a5f4 100644
--- a/swarm/storage/localstore/gc_test.go
+++ b/swarm/storage/localstore/gc_test.go
@@ -17,6 +17,7 @@
package localstore
import (
+ "context"
"io/ioutil"
"math/rand"
"os"
@@ -63,26 +64,23 @@ func testDB_collectGarbageWorker(t *testing.T) {
})()
defer cleanupFunc()
- uploader := db.NewPutter(ModePutUpload)
- syncer := db.NewSetter(ModeSetSync)
-
addrs := make([]chunk.Address, 0)
// upload random chunks
for i := 0; i < chunkCount; i++ {
- chunk := generateTestRandomChunk()
+ ch := generateTestRandomChunk()
- err := uploader.Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
- err = syncer.Set(chunk.Address())
+ err = db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
if err != nil {
t.Fatal(err)
}
- addrs = append(addrs, chunk.Address())
+ addrs = append(addrs, ch.Address())
}
gcTarget := db.gcTarget()
@@ -110,7 +108,7 @@ func testDB_collectGarbageWorker(t *testing.T) {
// the first synced chunk should be removed
t.Run("get the first synced chunk", func(t *testing.T) {
- _, err := db.NewGetter(ModeGetRequest).Get(addrs[0])
+ _, err := db.Get(context.Background(), chunk.ModeGetRequest, addrs[0])
if err != chunk.ErrChunkNotFound {
t.Errorf("got error %v, want %v", err, chunk.ErrChunkNotFound)
}
@@ -118,7 +116,7 @@ func testDB_collectGarbageWorker(t *testing.T) {
// last synced chunk should not be removed
t.Run("get most recent synced chunk", func(t *testing.T) {
- _, err := db.NewGetter(ModeGetRequest).Get(addrs[len(addrs)-1])
+ _, err := db.Get(context.Background(), chunk.ModeGetRequest, addrs[len(addrs)-1])
if err != nil {
t.Fatal(err)
}
@@ -134,9 +132,6 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
})
defer cleanupFunc()
- uploader := db.NewPutter(ModePutUpload)
- syncer := db.NewSetter(ModeSetSync)
-
testHookCollectGarbageChan := make(chan uint64)
defer setTestHookCollectGarbage(func(collectedCount uint64) {
testHookCollectGarbageChan <- collectedCount
@@ -146,19 +141,19 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
// upload random chunks just up to the capacity
for i := 0; i < int(db.capacity)-1; i++ {
- chunk := generateTestRandomChunk()
+ ch := generateTestRandomChunk()
- err := uploader.Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
- err = syncer.Set(chunk.Address())
+ err = db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
if err != nil {
t.Fatal(err)
}
- addrs = append(addrs, chunk.Address())
+ addrs = append(addrs, ch.Address())
}
// set update gc test hook to signal when
@@ -172,7 +167,7 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
// request the latest synced chunk
// to prioritize it in the gc index
// not to be collected
- _, err := db.NewGetter(ModeGetRequest).Get(addrs[0])
+ _, err := db.Get(context.Background(), chunk.ModeGetRequest, addrs[0])
if err != nil {
t.Fatal(err)
}
@@ -191,11 +186,11 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
// upload and sync another chunk to trigger
// garbage collection
ch := generateTestRandomChunk()
- err = uploader.Put(ch)
+ _, err = db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
- err = syncer.Set(ch.Address())
+ err = db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
if err != nil {
t.Fatal(err)
}
@@ -235,7 +230,7 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
// requested chunk should not be removed
t.Run("get requested chunk", func(t *testing.T) {
- _, err := db.NewGetter(ModeGetRequest).Get(addrs[0])
+ _, err := db.Get(context.Background(), chunk.ModeGetRequest, addrs[0])
if err != nil {
t.Fatal(err)
}
@@ -243,7 +238,7 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
// the second synced chunk should be removed
t.Run("get gc-ed chunk", func(t *testing.T) {
- _, err := db.NewGetter(ModeGetRequest).Get(addrs[1])
+ _, err := db.Get(context.Background(), chunk.ModeGetRequest, addrs[1])
if err != chunk.ErrChunkNotFound {
t.Errorf("got error %v, want %v", err, chunk.ErrChunkNotFound)
}
@@ -251,7 +246,7 @@ func TestDB_collectGarbageWorker_withRequests(t *testing.T) {
// last synced chunk should not be removed
t.Run("get most recent synced chunk", func(t *testing.T) {
- _, err := db.NewGetter(ModeGetRequest).Get(addrs[len(addrs)-1])
+ _, err := db.Get(context.Background(), chunk.ModeGetRequest, addrs[len(addrs)-1])
if err != nil {
t.Fatal(err)
}
@@ -275,20 +270,17 @@ func TestDB_gcSize(t *testing.T) {
t.Fatal(err)
}
- uploader := db.NewPutter(ModePutUpload)
- syncer := db.NewSetter(ModeSetSync)
-
count := 100
for i := 0; i < count; i++ {
- chunk := generateTestRandomChunk()
+ ch := generateTestRandomChunk()
- err := uploader.Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
- err = syncer.Set(chunk.Address())
+ err = db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
if err != nil {
t.Fatal(err)
}
diff --git a/swarm/storage/localstore/index_test.go b/swarm/storage/localstore/index_test.go
index cf19e4f6c..0f23aa10a 100644
--- a/swarm/storage/localstore/index_test.go
+++ b/swarm/storage/localstore/index_test.go
@@ -18,6 +18,7 @@ package localstore
import (
"bytes"
+ "context"
"math/rand"
"testing"
@@ -35,29 +36,22 @@ func TestDB_pullIndex(t *testing.T) {
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
- uploader := db.NewPutter(ModePutUpload)
-
chunkCount := 50
chunks := make([]testIndexChunk, chunkCount)
// upload random chunks
for i := 0; i < chunkCount; i++ {
- chunk := generateTestRandomChunk()
+ ch := generateTestRandomChunk()
- err := uploader.Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
chunks[i] = testIndexChunk{
- Chunk: chunk,
- // this timestamp is not the same as in
- // the index, but given that uploads
- // are sequential and that only ordering
- // of events matter, this information is
- // sufficient
- storeTimestamp: now(),
+ Chunk: ch,
+ binID: uint64(i),
}
}
@@ -70,10 +64,10 @@ func TestDB_pullIndex(t *testing.T) {
if poi > poj {
return false
}
- if chunks[i].storeTimestamp < chunks[j].storeTimestamp {
+ if chunks[i].binID < chunks[j].binID {
return true
}
- if chunks[i].storeTimestamp > chunks[j].storeTimestamp {
+ if chunks[i].binID > chunks[j].binID {
return false
}
return bytes.Compare(chunks[i].Address(), chunks[j].Address()) == -1
@@ -87,23 +81,21 @@ func TestDB_gcIndex(t *testing.T) {
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
- uploader := db.NewPutter(ModePutUpload)
-
chunkCount := 50
chunks := make([]testIndexChunk, chunkCount)
// upload random chunks
for i := 0; i < chunkCount; i++ {
- chunk := generateTestRandomChunk()
+ ch := generateTestRandomChunk()
- err := uploader.Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
chunks[i] = testIndexChunk{
- Chunk: chunk,
+ Chunk: ch,
}
}
@@ -123,9 +115,9 @@ func TestDB_gcIndex(t *testing.T) {
})()
t.Run("request unsynced", func(t *testing.T) {
- chunk := chunks[1]
+ ch := chunks[1]
- _, err := db.NewGetter(ModeGetRequest).Get(chunk.Address())
+ _, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
if err != nil {
t.Fatal(err)
}
@@ -140,9 +132,9 @@ func TestDB_gcIndex(t *testing.T) {
})
t.Run("sync one chunk", func(t *testing.T) {
- chunk := chunks[0]
+ ch := chunks[0]
- err := db.NewSetter(ModeSetSync).Set(chunk.Address())
+ err := db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
if err != nil {
t.Fatal(err)
}
@@ -154,10 +146,8 @@ func TestDB_gcIndex(t *testing.T) {
})
t.Run("sync all chunks", func(t *testing.T) {
- setter := db.NewSetter(ModeSetSync)
-
for i := range chunks {
- err := setter.Set(chunks[i].Address())
+ err := db.Set(context.Background(), chunk.ModeSetSync, chunks[i].Address())
if err != nil {
t.Fatal(err)
}
@@ -171,7 +161,7 @@ func TestDB_gcIndex(t *testing.T) {
t.Run("request one chunk", func(t *testing.T) {
i := 6
- _, err := db.NewGetter(ModeGetRequest).Get(chunks[i].Address())
+ _, err := db.Get(context.Background(), chunk.ModeGetRequest, chunks[i].Address())
if err != nil {
t.Fatal(err)
}
@@ -189,14 +179,13 @@ func TestDB_gcIndex(t *testing.T) {
})
t.Run("random chunk request", func(t *testing.T) {
- requester := db.NewGetter(ModeGetRequest)
rand.Shuffle(len(chunks), func(i, j int) {
chunks[i], chunks[j] = chunks[j], chunks[i]
})
- for _, chunk := range chunks {
- _, err := requester.Get(chunk.Address())
+ for _, ch := range chunks {
+ _, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
if err != nil {
t.Fatal(err)
}
@@ -212,7 +201,7 @@ func TestDB_gcIndex(t *testing.T) {
t.Run("remove one chunk", func(t *testing.T) {
i := 3
- err := db.NewSetter(modeSetRemove).Set(chunks[i].Address())
+ err := db.Set(context.Background(), chunk.ModeSetRemove, chunks[i].Address())
if err != nil {
t.Fatal(err)
}
diff --git a/swarm/storage/localstore/localstore.go b/swarm/storage/localstore/localstore.go
index 98d4c7881..3b0bd8a93 100644
--- a/swarm/storage/localstore/localstore.go
+++ b/swarm/storage/localstore/localstore.go
@@ -23,11 +23,15 @@ import (
"time"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/shed"
"github.com/ethereum/go-ethereum/swarm/storage/mock"
)
+// DB implements chunk.Store.
+var _ chunk.Store = &DB{}
+
var (
// ErrInvalidMode is retuned when an unknown Mode
// is provided to the function.
@@ -69,6 +73,10 @@ type DB struct {
pullTriggers map[uint8][]chan struct{}
pullTriggersMu sync.RWMutex
+ // binIDs stores the latest chunk serial ID for every
+ // proximity order bin
+ binIDs shed.Uint64Vector
+
// garbage collection index
gcIndex shed.Index
@@ -124,7 +132,10 @@ type Options struct {
// One goroutine for writing batches is created.
func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
if o == nil {
- o = new(Options)
+ // default options
+ o = &Options{
+ Capacity: 5000000,
+ }
}
db = &DB{
capacity: o.Capacity,
@@ -148,11 +159,23 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
if err != nil {
return nil, err
}
+
// Identify current storage schema by arbitrary name.
db.schemaName, err = db.shed.NewStringField("schema-name")
if err != nil {
return nil, err
}
+ schemaName, err := db.schemaName.Get()
+ if err != nil {
+ return nil, err
+ }
+ if schemaName == "" {
+ // initial new localstore run
+ err := db.schemaName.Put(DbSchemaSanctuary)
+ if err != nil {
+ return nil, err
+ }
+ }
// Persist gc size.
db.gcSize, err = db.shed.NewUint64Field("gc-size")
if err != nil {
@@ -165,8 +188,9 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
)
if o.MockStore != nil {
encodeValueFunc = func(fields shed.Item) (value []byte, err error) {
- b := make([]byte, 8)
- binary.BigEndian.PutUint64(b, uint64(fields.StoreTimestamp))
+ b := make([]byte, 16)
+ binary.BigEndian.PutUint64(b[:8], fields.BinID)
+ binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
err = o.MockStore.Put(fields.Address, fields.Data)
if err != nil {
return nil, err
@@ -174,25 +198,28 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
return b, nil
}
decodeValueFunc = func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
- e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[:8]))
+ e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[8:16]))
+ e.BinID = binary.BigEndian.Uint64(value[:8])
e.Data, err = o.MockStore.Get(keyItem.Address)
return e, err
}
} else {
encodeValueFunc = func(fields shed.Item) (value []byte, err error) {
- b := make([]byte, 8)
- binary.BigEndian.PutUint64(b, uint64(fields.StoreTimestamp))
+ b := make([]byte, 16)
+ binary.BigEndian.PutUint64(b[:8], fields.BinID)
+ binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
value = append(b, fields.Data...)
return value, nil
}
decodeValueFunc = func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
- e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[:8]))
- e.Data = value[8:]
+ e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[8:16]))
+ e.BinID = binary.BigEndian.Uint64(value[:8])
+ e.Data = value[16:]
return e, nil
}
}
- // Index storing actual chunk address, data and store timestamp.
- db.retrievalDataIndex, err = db.shed.NewIndex("Address->StoreTimestamp|Data", shed.IndexFuncs{
+ // Index storing actual chunk address, data and bin id.
+ db.retrievalDataIndex, err = db.shed.NewIndex("Address->StoreTimestamp|BinID|Data", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
return fields.Address, nil
},
@@ -230,33 +257,37 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
return nil, err
}
// pull index allows history and live syncing per po bin
- db.pullIndex, err = db.shed.NewIndex("PO|StoredTimestamp|Hash->nil", shed.IndexFuncs{
+ db.pullIndex, err = db.shed.NewIndex("PO|BinID->Hash", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
key = make([]byte, 41)
key[0] = db.po(fields.Address)
- binary.BigEndian.PutUint64(key[1:9], uint64(fields.StoreTimestamp))
- copy(key[9:], fields.Address[:])
+ binary.BigEndian.PutUint64(key[1:9], fields.BinID)
return key, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
- e.Address = key[9:]
- e.StoreTimestamp = int64(binary.BigEndian.Uint64(key[1:9]))
+ e.BinID = binary.BigEndian.Uint64(key[1:9])
return e, nil
},
EncodeValue: func(fields shed.Item) (value []byte, err error) {
- return nil, nil
+ return fields.Address, nil
},
DecodeValue: func(keyItem shed.Item, value []byte) (e shed.Item, err error) {
+ e.Address = value
return e, nil
},
})
if err != nil {
return nil, err
}
+ // create a vector for bin IDs
+ db.binIDs, err = db.shed.NewUint64Vector("bin-ids")
+ if err != nil {
+ return nil, err
+ }
// create a pull syncing triggers used by SubscribePull function
db.pullTriggers = make(map[uint8][]chan struct{})
// push index contains as yet unsynced chunks
- db.pushIndex, err = db.shed.NewIndex("StoredTimestamp|Hash->nil", shed.IndexFuncs{
+ db.pushIndex, err = db.shed.NewIndex("StoreTimestamp|Hash->Tags", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
key = make([]byte, 40)
binary.BigEndian.PutUint64(key[:8], uint64(fields.StoreTimestamp))
@@ -281,17 +312,17 @@ func New(path string, baseKey []byte, o *Options) (db *DB, err error) {
// create a push syncing triggers used by SubscribePush function
db.pushTriggers = make([]chan struct{}, 0)
// gc index for removable chunk ordered by ascending last access time
- db.gcIndex, err = db.shed.NewIndex("AccessTimestamp|StoredTimestamp|Hash->nil", shed.IndexFuncs{
+ db.gcIndex, err = db.shed.NewIndex("AccessTimestamp|BinID|Hash->nil", shed.IndexFuncs{
EncodeKey: func(fields shed.Item) (key []byte, err error) {
b := make([]byte, 16, 16+len(fields.Address))
binary.BigEndian.PutUint64(b[:8], uint64(fields.AccessTimestamp))
- binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
+ binary.BigEndian.PutUint64(b[8:16], fields.BinID)
key = append(b, fields.Address...)
return key, nil
},
DecodeKey: func(key []byte) (e shed.Item, err error) {
e.AccessTimestamp = int64(binary.BigEndian.Uint64(key[:8]))
- e.StoreTimestamp = int64(binary.BigEndian.Uint64(key[8:16]))
+ e.BinID = binary.BigEndian.Uint64(key[8:16])
e.Address = key[16:]
return e, nil
},
@@ -358,3 +389,12 @@ func init() {
return time.Now().UTC().UnixNano()
}
}
+
+// totalTimeMetric logs a message about time between provided start time
+// and the time when the function is called and sends a resetting timer metric
+// with provided name appended with ".total-time".
+func totalTimeMetric(name string, start time.Time) {
+ totalTime := time.Since(start)
+ log.Trace(name+" total time", "time", totalTime)
+ metrics.GetOrRegisterResettingTimer(name+".total-time", nil).Update(totalTime)
+}
diff --git a/swarm/storage/localstore/localstore_test.go b/swarm/storage/localstore/localstore_test.go
index 42e762587..6dbc4b7ad 100644
--- a/swarm/storage/localstore/localstore_test.go
+++ b/swarm/storage/localstore/localstore_test.go
@@ -18,6 +18,7 @@ package localstore
import (
"bytes"
+ "context"
"fmt"
"io/ioutil"
"math/rand"
@@ -59,23 +60,23 @@ func TestDB(t *testing.T) {
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
- chunk := generateTestRandomChunk()
+ ch := generateTestRandomChunk()
- err := db.NewPutter(ModePutUpload).Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
- got, err := db.NewGetter(ModeGetRequest).Get(chunk.Address())
+ got, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
if err != nil {
t.Fatal(err)
}
- if !bytes.Equal(got.Address(), chunk.Address()) {
- t.Errorf("got address %x, want %x", got.Address(), chunk.Address())
+ if !bytes.Equal(got.Address(), ch.Address()) {
+ t.Errorf("got address %x, want %x", got.Address(), ch.Address())
}
- if !bytes.Equal(got.Data(), chunk.Data()) {
- t.Errorf("got data %x, want %x", got.Data(), chunk.Data())
+ if !bytes.Equal(got.Data(), ch.Data()) {
+ t.Errorf("got data %x, want %x", got.Data(), ch.Data())
}
}
@@ -113,19 +114,17 @@ func TestDB_updateGCSem(t *testing.T) {
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
- chunk := generateTestRandomChunk()
+ ch := generateTestRandomChunk()
- err := db.NewPutter(ModePutUpload).Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
- getter := db.NewGetter(ModeGetRequest)
-
// get more chunks then maxParallelUpdateGC
// in time shorter then updateGCSleep
for i := 0; i < 5; i++ {
- _, err = getter.Get(chunk.Address())
+ _, err = db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
if err != nil {
t.Fatal(err)
}
@@ -237,71 +236,71 @@ func newRetrieveIndexesTest(db *DB, chunk chunk.Chunk, storeTimestamp, accessTim
// newRetrieveIndexesTestWithAccess returns a test function that validates if the right
// chunk values are in the retrieval indexes when access time must be stored.
-func newRetrieveIndexesTestWithAccess(db *DB, chunk chunk.Chunk, storeTimestamp, accessTimestamp int64) func(t *testing.T) {
+func newRetrieveIndexesTestWithAccess(db *DB, ch chunk.Chunk, storeTimestamp, accessTimestamp int64) func(t *testing.T) {
return func(t *testing.T) {
- item, err := db.retrievalDataIndex.Get(addressToItem(chunk.Address()))
+ item, err := db.retrievalDataIndex.Get(addressToItem(ch.Address()))
if err != nil {
t.Fatal(err)
}
- validateItem(t, item, chunk.Address(), chunk.Data(), storeTimestamp, 0)
+ validateItem(t, item, ch.Address(), ch.Data(), storeTimestamp, 0)
if accessTimestamp > 0 {
- item, err = db.retrievalAccessIndex.Get(addressToItem(chunk.Address()))
+ item, err = db.retrievalAccessIndex.Get(addressToItem(ch.Address()))
if err != nil {
t.Fatal(err)
}
- validateItem(t, item, chunk.Address(), nil, 0, accessTimestamp)
+ validateItem(t, item, ch.Address(), nil, 0, accessTimestamp)
}
}
}
// newPullIndexTest returns a test function that validates if the right
// chunk values are in the pull index.
-func newPullIndexTest(db *DB, chunk chunk.Chunk, storeTimestamp int64, wantError error) func(t *testing.T) {
+func newPullIndexTest(db *DB, ch chunk.Chunk, binID uint64, wantError error) func(t *testing.T) {
return func(t *testing.T) {
item, err := db.pullIndex.Get(shed.Item{
- Address: chunk.Address(),
- StoreTimestamp: storeTimestamp,
+ Address: ch.Address(),
+ BinID: binID,
})
if err != wantError {
t.Errorf("got error %v, want %v", err, wantError)
}
if err == nil {
- validateItem(t, item, chunk.Address(), nil, storeTimestamp, 0)
+ validateItem(t, item, ch.Address(), nil, 0, 0)
}
}
}
// newPushIndexTest returns a test function that validates if the right
// chunk values are in the push index.
-func newPushIndexTest(db *DB, chunk chunk.Chunk, storeTimestamp int64, wantError error) func(t *testing.T) {
+func newPushIndexTest(db *DB, ch chunk.Chunk, storeTimestamp int64, wantError error) func(t *testing.T) {
return func(t *testing.T) {
item, err := db.pushIndex.Get(shed.Item{
- Address: chunk.Address(),
+ Address: ch.Address(),
StoreTimestamp: storeTimestamp,
})
if err != wantError {
t.Errorf("got error %v, want %v", err, wantError)
}
if err == nil {
- validateItem(t, item, chunk.Address(), nil, storeTimestamp, 0)
+ validateItem(t, item, ch.Address(), nil, storeTimestamp, 0)
}
}
}
// newGCIndexTest returns a test function that validates if the right
// chunk values are in the push index.
-func newGCIndexTest(db *DB, chunk chunk.Chunk, storeTimestamp, accessTimestamp int64) func(t *testing.T) {
+func newGCIndexTest(db *DB, chunk chunk.Chunk, storeTimestamp, accessTimestamp int64, binID uint64) func(t *testing.T) {
return func(t *testing.T) {
item, err := db.gcIndex.Get(shed.Item{
Address: chunk.Address(),
- StoreTimestamp: storeTimestamp,
+ BinID: binID,
AccessTimestamp: accessTimestamp,
})
if err != nil {
t.Fatal(err)
}
- validateItem(t, item, chunk.Address(), nil, storeTimestamp, accessTimestamp)
+ validateItem(t, item, chunk.Address(), nil, 0, accessTimestamp)
}
}
@@ -349,7 +348,7 @@ func newIndexGCSizeTest(db *DB) func(t *testing.T) {
// in database. It is used for index values validations.
type testIndexChunk struct {
chunk.Chunk
- storeTimestamp int64
+ binID uint64
}
// testItemsOrder tests the order of chunks in the index. If sortFunc is not nil,
diff --git a/swarm/storage/localstore/mode_get.go b/swarm/storage/localstore/mode_get.go
index a6353e141..efef82858 100644
--- a/swarm/storage/localstore/mode_get.go
+++ b/swarm/storage/localstore/mode_get.go
@@ -17,45 +17,35 @@
package localstore
import (
+ "context"
+ "fmt"
+ "time"
+
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/shed"
"github.com/syndtr/goleveldb/leveldb"
)
-// ModeGet enumerates different Getter modes.
-type ModeGet int
-
-// Getter modes.
-const (
- // ModeGetRequest: when accessed for retrieval
- ModeGetRequest ModeGet = iota
- // ModeGetSync: when accessed for syncing or proof of custody request
- ModeGetSync
-)
-
-// Getter provides Get method to retrieve Chunks
-// from database.
-type Getter struct {
- db *DB
- mode ModeGet
-}
-
-// NewGetter returns a new Getter on database
-// with a specific Mode.
-func (db *DB) NewGetter(mode ModeGet) *Getter {
- return &Getter{
- mode: mode,
- db: db,
- }
-}
-
// Get returns a chunk from the database. If the chunk is
// not found chunk.ErrChunkNotFound will be returned.
// All required indexes will be updated required by the
-// Getter Mode.
-func (g *Getter) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
- out, err := g.db.get(g.mode, addr)
+// Getter Mode. Get is required to implement chunk.Store
+// interface.
+func (db *DB) Get(ctx context.Context, mode chunk.ModeGet, addr chunk.Address) (ch chunk.Chunk, err error) {
+ metricName := fmt.Sprintf("localstore.Get.%s", mode)
+
+ metrics.GetOrRegisterCounter(metricName, nil).Inc(1)
+ defer totalTimeMetric(metricName, time.Now())
+
+ defer func() {
+ if err != nil {
+ metrics.GetOrRegisterCounter(metricName+".error", nil).Inc(1)
+ }
+ }()
+
+ out, err := db.get(mode, addr)
if err != nil {
if err == leveldb.ErrNotFound {
return nil, chunk.ErrChunkNotFound
@@ -67,7 +57,7 @@ func (g *Getter) Get(addr chunk.Address) (ch chunk.Chunk, err error) {
// get returns Item from the retrieval index
// and updates other indexes.
-func (db *DB) get(mode ModeGet, addr chunk.Address) (out shed.Item, err error) {
+func (db *DB) get(mode chunk.ModeGet, addr chunk.Address) (out shed.Item, err error) {
item := addressToItem(addr)
out, err = db.retrievalDataIndex.Get(item)
@@ -76,7 +66,7 @@ func (db *DB) get(mode ModeGet, addr chunk.Address) (out shed.Item, err error) {
}
switch mode {
// update the access timestamp and gc index
- case ModeGetRequest:
+ case chunk.ModeGetRequest:
if db.updateGCSem != nil {
// wait before creating new goroutines
// if updateGCSem buffer id full
@@ -90,8 +80,14 @@ func (db *DB) get(mode ModeGet, addr chunk.Address) (out shed.Item, err error) {
// for a new goroutine
defer func() { <-db.updateGCSem }()
}
+
+ metricName := "localstore.updateGC"
+ metrics.GetOrRegisterCounter(metricName, nil).Inc(1)
+ defer totalTimeMetric(metricName, time.Now())
+
err := db.updateGC(out)
if err != nil {
+ metrics.GetOrRegisterCounter(metricName+".error", nil).Inc(1)
log.Error("localstore update gc", "err", err)
}
// if gc update hook is defined, call it
@@ -101,7 +97,8 @@ func (db *DB) get(mode ModeGet, addr chunk.Address) (out shed.Item, err error) {
}()
// no updates to indexes
- case ModeGetSync:
+ case chunk.ModeGetSync:
+ case chunk.ModeGetLookup:
default:
return out, ErrInvalidMode
}
diff --git a/swarm/storage/localstore/mode_get_test.go b/swarm/storage/localstore/mode_get_test.go
index 28a70ee0c..217fa5d2d 100644
--- a/swarm/storage/localstore/mode_get_test.go
+++ b/swarm/storage/localstore/mode_get_test.go
@@ -18,8 +18,11 @@ package localstore
import (
"bytes"
+ "context"
"testing"
"time"
+
+ "github.com/ethereum/go-ethereum/swarm/chunk"
)
// TestModeGetRequest validates ModeGetRequest index values on the provided DB.
@@ -32,15 +35,13 @@ func TestModeGetRequest(t *testing.T) {
return uploadTimestamp
})()
- chunk := generateTestRandomChunk()
+ ch := generateTestRandomChunk()
- err := db.NewPutter(ModePutUpload).Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
- requester := db.NewGetter(ModeGetRequest)
-
// set update gc test hook to signal when
// update gc goroutine is done by sending to
// testHookUpdateGCChan channel, which is
@@ -52,22 +53,22 @@ func TestModeGetRequest(t *testing.T) {
})()
t.Run("get unsynced", func(t *testing.T) {
- got, err := requester.Get(chunk.Address())
+ got, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
if err != nil {
t.Fatal(err)
}
// wait for update gc goroutine to be done
<-testHookUpdateGCChan
- if !bytes.Equal(got.Address(), chunk.Address()) {
- t.Errorf("got chunk address %x, want %x", got.Address(), chunk.Address())
+ if !bytes.Equal(got.Address(), ch.Address()) {
+ t.Errorf("got chunk address %x, want %x", got.Address(), ch.Address())
}
- if !bytes.Equal(got.Data(), chunk.Data()) {
- t.Errorf("got chunk data %x, want %x", got.Data(), chunk.Data())
+ if !bytes.Equal(got.Data(), ch.Data()) {
+ t.Errorf("got chunk data %x, want %x", got.Data(), ch.Data())
}
- t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, uploadTimestamp, 0))
+ t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, uploadTimestamp, 0))
t.Run("gc index count", newItemsCountTest(db.gcIndex, 0))
@@ -75,30 +76,30 @@ func TestModeGetRequest(t *testing.T) {
})
// set chunk to synced state
- err = db.NewSetter(ModeSetSync).Set(chunk.Address())
+ err = db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
if err != nil {
t.Fatal(err)
}
t.Run("first get", func(t *testing.T) {
- got, err := requester.Get(chunk.Address())
+ got, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
if err != nil {
t.Fatal(err)
}
// wait for update gc goroutine to be done
<-testHookUpdateGCChan
- if !bytes.Equal(got.Address(), chunk.Address()) {
- t.Errorf("got chunk address %x, want %x", got.Address(), chunk.Address())
+ if !bytes.Equal(got.Address(), ch.Address()) {
+ t.Errorf("got chunk address %x, want %x", got.Address(), ch.Address())
}
- if !bytes.Equal(got.Data(), chunk.Data()) {
- t.Errorf("got chunk data %x, want %x", got.Data(), chunk.Data())
+ if !bytes.Equal(got.Data(), ch.Data()) {
+ t.Errorf("got chunk data %x, want %x", got.Data(), ch.Data())
}
- t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, uploadTimestamp, uploadTimestamp))
+ t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, uploadTimestamp, uploadTimestamp))
- t.Run("gc index", newGCIndexTest(db, chunk, uploadTimestamp, uploadTimestamp))
+ t.Run("gc index", newGCIndexTest(db, ch, uploadTimestamp, uploadTimestamp, 1))
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
@@ -111,24 +112,24 @@ func TestModeGetRequest(t *testing.T) {
return accessTimestamp
})()
- got, err := requester.Get(chunk.Address())
+ got, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
if err != nil {
t.Fatal(err)
}
// wait for update gc goroutine to be done
<-testHookUpdateGCChan
- if !bytes.Equal(got.Address(), chunk.Address()) {
- t.Errorf("got chunk address %x, want %x", got.Address(), chunk.Address())
+ if !bytes.Equal(got.Address(), ch.Address()) {
+ t.Errorf("got chunk address %x, want %x", got.Address(), ch.Address())
}
- if !bytes.Equal(got.Data(), chunk.Data()) {
- t.Errorf("got chunk data %x, want %x", got.Data(), chunk.Data())
+ if !bytes.Equal(got.Data(), ch.Data()) {
+ t.Errorf("got chunk data %x, want %x", got.Data(), ch.Data())
}
- t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, uploadTimestamp, accessTimestamp))
+ t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, uploadTimestamp, accessTimestamp))
- t.Run("gc index", newGCIndexTest(db, chunk, uploadTimestamp, accessTimestamp))
+ t.Run("gc index", newGCIndexTest(db, ch, uploadTimestamp, accessTimestamp, 1))
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
@@ -146,27 +147,27 @@ func TestModeGetSync(t *testing.T) {
return uploadTimestamp
})()
- chunk := generateTestRandomChunk()
+ ch := generateTestRandomChunk()
- err := db.NewPutter(ModePutUpload).Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
- got, err := db.NewGetter(ModeGetSync).Get(chunk.Address())
+ got, err := db.Get(context.Background(), chunk.ModeGetSync, ch.Address())
if err != nil {
t.Fatal(err)
}
- if !bytes.Equal(got.Address(), chunk.Address()) {
- t.Errorf("got chunk address %x, want %x", got.Address(), chunk.Address())
+ if !bytes.Equal(got.Address(), ch.Address()) {
+ t.Errorf("got chunk address %x, want %x", got.Address(), ch.Address())
}
- if !bytes.Equal(got.Data(), chunk.Data()) {
- t.Errorf("got chunk data %x, want %x", got.Data(), chunk.Data())
+ if !bytes.Equal(got.Data(), ch.Data()) {
+ t.Errorf("got chunk data %x, want %x", got.Data(), ch.Data())
}
- t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, uploadTimestamp, 0))
+ t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, uploadTimestamp, 0))
t.Run("gc index count", newItemsCountTest(db.gcIndex, 0))
diff --git a/swarm/storage/localstore/mode_has.go b/swarm/storage/localstore/mode_has.go
index 90feaceef..a70ee31b2 100644
--- a/swarm/storage/localstore/mode_has.go
+++ b/swarm/storage/localstore/mode_has.go
@@ -17,23 +17,23 @@
package localstore
import (
+ "context"
+ "time"
+
+ "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/chunk"
)
-// Hasser provides Has method to retrieve Chunks
-// from database.
-type Hasser struct {
- db *DB
-}
+// Has returns true if the chunk is stored in database.
+func (db *DB) Has(ctx context.Context, addr chunk.Address) (bool, error) {
+ metricName := "localstore.Has"
-// NewHasser returns a new Hasser on database.
-func (db *DB) NewHasser() *Hasser {
- return &Hasser{
- db: db,
- }
-}
+ metrics.GetOrRegisterCounter(metricName, nil).Inc(1)
+ defer totalTimeMetric(metricName, time.Now())
-// Has returns true if the chunk is stored in database.
-func (h *Hasser) Has(addr chunk.Address) (bool, error) {
- return h.db.retrievalDataIndex.Has(addressToItem(addr))
+ has, err := db.retrievalDataIndex.Has(addressToItem(addr))
+ if err != nil {
+ metrics.GetOrRegisterCounter(metricName+".error", nil).Inc(1)
+ }
+ return has, err
}
diff --git a/swarm/storage/localstore/mode_has_test.go b/swarm/storage/localstore/mode_has_test.go
index 332616ca2..043b21a2b 100644
--- a/swarm/storage/localstore/mode_has_test.go
+++ b/swarm/storage/localstore/mode_has_test.go
@@ -17,7 +17,10 @@
package localstore
import (
+ "context"
"testing"
+
+ "github.com/ethereum/go-ethereum/swarm/chunk"
)
// TestHas validates that Hasser is returning true for
@@ -26,16 +29,14 @@ func TestHas(t *testing.T) {
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
- chunk := generateTestRandomChunk()
+ ch := generateTestRandomChunk()
- err := db.NewPutter(ModePutUpload).Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
- hasser := db.NewHasser()
-
- has, err := hasser.Has(chunk.Address())
+ has, err := db.Has(context.Background(), ch.Address())
if err != nil {
t.Fatal(err)
}
@@ -45,7 +46,7 @@ func TestHas(t *testing.T) {
missingChunk := generateTestRandomChunk()
- has, err = hasser.Has(missingChunk.Address())
+ has, err = db.Has(context.Background(), missingChunk.Address())
if err != nil {
t.Fatal(err)
}
diff --git a/swarm/storage/localstore/mode_put.go b/swarm/storage/localstore/mode_put.go
index 1599ca8e3..a8e355ad0 100644
--- a/swarm/storage/localstore/mode_put.go
+++ b/swarm/storage/localstore/mode_put.go
@@ -17,44 +17,31 @@
package localstore
import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/shed"
"github.com/syndtr/goleveldb/leveldb"
)
-// ModePut enumerates different Putter modes.
-type ModePut int
-
-// Putter modes.
-const (
- // ModePutRequest: when a chunk is received as a result of retrieve request and delivery
- ModePutRequest ModePut = iota
- // ModePutSync: when a chunk is received via syncing
- ModePutSync
- // ModePutUpload: when a chunk is created by local upload
- ModePutUpload
-)
+// Put stores the Chunk to database and depending
+// on the Putter mode, it updates required indexes.
+// Put is required to implement chunk.Store
+// interface.
+func (db *DB) Put(ctx context.Context, mode chunk.ModePut, ch chunk.Chunk) (exists bool, err error) {
+ metricName := fmt.Sprintf("localstore.Put.%s", mode)
-// Putter provides Put method to store Chunks
-// to database.
-type Putter struct {
- db *DB
- mode ModePut
-}
+ metrics.GetOrRegisterCounter(metricName, nil).Inc(1)
+ defer totalTimeMetric(metricName, time.Now())
-// NewPutter returns a new Putter on database
-// with a specific Mode.
-func (db *DB) NewPutter(mode ModePut) *Putter {
- return &Putter{
- mode: mode,
- db: db,
+ exists, err = db.put(mode, chunkToItem(ch))
+ if err != nil {
+ metrics.GetOrRegisterCounter(metricName+".error", nil).Inc(1)
}
-}
-
-// Put stores the Chunk to database and depending
-// on the Putter mode, it updates required indexes.
-func (p *Putter) Put(ch chunk.Chunk) (err error) {
- return p.db.put(p.mode, chunkToItem(ch))
+ return exists, err
}
// put stores Item to database and updates other
@@ -62,7 +49,7 @@ func (p *Putter) Put(ch chunk.Chunk) (err error) {
// of this function for the same address in parallel.
// Item fields Address and Data must not be
// with their nil values.
-func (db *DB) put(mode ModePut, item shed.Item) (err error) {
+func (db *DB) put(mode chunk.ModePut, item shed.Item) (exists bool, err error) {
// protect parallel updates
db.batchMu.Lock()
defer db.batchMu.Unlock()
@@ -76,7 +63,7 @@ func (db *DB) put(mode ModePut, item shed.Item) (err error) {
var triggerPushFeed bool // signal push feed subscriptions to iterate
switch mode {
- case ModePutRequest:
+ case chunk.ModePutRequest:
// put to indexes: retrieve, gc; it does not enter the syncpool
// check if the chunk already is in the database
@@ -84,20 +71,25 @@ func (db *DB) put(mode ModePut, item shed.Item) (err error) {
i, err := db.retrievalAccessIndex.Get(item)
switch err {
case nil:
+ exists = true
item.AccessTimestamp = i.AccessTimestamp
case leveldb.ErrNotFound:
+ exists = false
// no chunk accesses
default:
- return err
+ return false, err
}
i, err = db.retrievalDataIndex.Get(item)
switch err {
case nil:
+ exists = true
item.StoreTimestamp = i.StoreTimestamp
+ item.BinID = i.BinID
case leveldb.ErrNotFound:
// no chunk accesses
+ exists = false
default:
- return err
+ return false, err
}
if item.AccessTimestamp != 0 {
// delete current entry from the gc index
@@ -107,6 +99,12 @@ func (db *DB) put(mode ModePut, item shed.Item) (err error) {
if item.StoreTimestamp == 0 {
item.StoreTimestamp = now()
}
+ if item.BinID == 0 {
+ item.BinID, err = db.binIDs.IncInBatch(batch, uint64(db.po(item.Address)))
+ if err != nil {
+ return false, err
+ }
+ }
// update access timestamp
item.AccessTimestamp = now()
// update retrieve access index
@@ -117,36 +115,56 @@ func (db *DB) put(mode ModePut, item shed.Item) (err error) {
db.retrievalDataIndex.PutInBatch(batch, item)
- case ModePutUpload:
+ case chunk.ModePutUpload:
// put to indexes: retrieve, push, pull
- item.StoreTimestamp = now()
- db.retrievalDataIndex.PutInBatch(batch, item)
- db.pullIndex.PutInBatch(batch, item)
- triggerPullFeed = true
- db.pushIndex.PutInBatch(batch, item)
- triggerPushFeed = true
+ exists, err = db.retrievalDataIndex.Has(item)
+ if err != nil {
+ return false, err
+ }
+ if !exists {
+ item.StoreTimestamp = now()
+ item.BinID, err = db.binIDs.IncInBatch(batch, uint64(db.po(item.Address)))
+ if err != nil {
+ return false, err
+ }
+ db.retrievalDataIndex.PutInBatch(batch, item)
+ db.pullIndex.PutInBatch(batch, item)
+ triggerPullFeed = true
+ db.pushIndex.PutInBatch(batch, item)
+ triggerPushFeed = true
+ }
- case ModePutSync:
+ case chunk.ModePutSync:
// put to indexes: retrieve, pull
- item.StoreTimestamp = now()
- db.retrievalDataIndex.PutInBatch(batch, item)
- db.pullIndex.PutInBatch(batch, item)
- triggerPullFeed = true
+ exists, err = db.retrievalDataIndex.Has(item)
+ if err != nil {
+ return exists, err
+ }
+ if !exists {
+ item.StoreTimestamp = now()
+ item.BinID, err = db.binIDs.IncInBatch(batch, uint64(db.po(item.Address)))
+ if err != nil {
+ return false, err
+ }
+ db.retrievalDataIndex.PutInBatch(batch, item)
+ db.pullIndex.PutInBatch(batch, item)
+ triggerPullFeed = true
+ }
default:
- return ErrInvalidMode
+ return false, ErrInvalidMode
}
err = db.incGCSizeInBatch(batch, gcSizeChange)
if err != nil {
- return err
+ return false, err
}
err = db.shed.WriteBatch(batch)
if err != nil {
- return err
+ return false, err
}
if triggerPullFeed {
db.triggerPullSubscriptions(db.po(item.Address))
@@ -154,5 +172,5 @@ func (db *DB) put(mode ModePut, item shed.Item) (err error) {
if triggerPushFeed {
db.triggerPushSubscriptions()
}
- return nil
+ return exists, nil
}
diff --git a/swarm/storage/localstore/mode_put_test.go b/swarm/storage/localstore/mode_put_test.go
index 8ecae1d2e..5376aa8b3 100644
--- a/swarm/storage/localstore/mode_put_test.go
+++ b/swarm/storage/localstore/mode_put_test.go
@@ -18,6 +18,7 @@ package localstore
import (
"bytes"
+ "context"
"fmt"
"sync"
"testing"
@@ -31,9 +32,7 @@ func TestModePutRequest(t *testing.T) {
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
- putter := db.NewPutter(ModePutRequest)
-
- chunk := generateTestRandomChunk()
+ ch := generateTestRandomChunk()
// keep the record when the chunk is stored
var storeTimestamp int64
@@ -46,12 +45,12 @@ func TestModePutRequest(t *testing.T) {
storeTimestamp = wantTimestamp
- err := putter.Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutRequest, ch)
if err != nil {
t.Fatal(err)
}
- t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, wantTimestamp, wantTimestamp))
+ t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, wantTimestamp, wantTimestamp))
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
@@ -64,12 +63,12 @@ func TestModePutRequest(t *testing.T) {
return wantTimestamp
})()
- err := putter.Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutRequest, ch)
if err != nil {
t.Fatal(err)
}
- t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, storeTimestamp, wantTimestamp))
+ t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, storeTimestamp, wantTimestamp))
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
@@ -87,16 +86,16 @@ func TestModePutSync(t *testing.T) {
return wantTimestamp
})()
- chunk := generateTestRandomChunk()
+ ch := generateTestRandomChunk()
- err := db.NewPutter(ModePutSync).Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutSync, ch)
if err != nil {
t.Fatal(err)
}
- t.Run("retrieve indexes", newRetrieveIndexesTest(db, chunk, wantTimestamp, 0))
+ t.Run("retrieve indexes", newRetrieveIndexesTest(db, ch, wantTimestamp, 0))
- t.Run("pull index", newPullIndexTest(db, chunk, wantTimestamp, nil))
+ t.Run("pull index", newPullIndexTest(db, ch, 1, nil))
}
// TestModePutUpload validates ModePutUpload index values on the provided DB.
@@ -109,18 +108,18 @@ func TestModePutUpload(t *testing.T) {
return wantTimestamp
})()
- chunk := generateTestRandomChunk()
+ ch := generateTestRandomChunk()
- err := db.NewPutter(ModePutUpload).Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
- t.Run("retrieve indexes", newRetrieveIndexesTest(db, chunk, wantTimestamp, 0))
+ t.Run("retrieve indexes", newRetrieveIndexesTest(db, ch, wantTimestamp, 0))
- t.Run("pull index", newPullIndexTest(db, chunk, wantTimestamp, nil))
+ t.Run("pull index", newPullIndexTest(db, ch, 1, nil))
- t.Run("push index", newPushIndexTest(db, chunk, wantTimestamp, nil))
+ t.Run("push index", newPushIndexTest(db, ch, wantTimestamp, nil))
}
// TestModePutUpload_parallel uploads chunks in parallel
@@ -140,14 +139,13 @@ func TestModePutUpload_parallel(t *testing.T) {
// start uploader workers
for i := 0; i < workerCount; i++ {
go func(i int) {
- uploader := db.NewPutter(ModePutUpload)
for {
select {
- case chunk, ok := <-chunkChan:
+ case ch, ok := <-chunkChan:
if !ok {
return
}
- err := uploader.Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
select {
case errChan <- err:
case <-doneChan:
@@ -188,21 +186,85 @@ func TestModePutUpload_parallel(t *testing.T) {
}
// get every chunk and validate its data
- getter := db.NewGetter(ModeGetRequest)
-
chunksMu.Lock()
defer chunksMu.Unlock()
- for _, chunk := range chunks {
- got, err := getter.Get(chunk.Address())
+ for _, ch := range chunks {
+ got, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
if err != nil {
t.Fatal(err)
}
- if !bytes.Equal(got.Data(), chunk.Data()) {
- t.Fatalf("got chunk %s data %x, want %x", chunk.Address().Hex(), got.Data(), chunk.Data())
+ if !bytes.Equal(got.Data(), ch.Data()) {
+ t.Fatalf("got chunk %s data %x, want %x", ch.Address().Hex(), got.Data(), ch.Data())
}
}
}
+// TestModePut_sameChunk puts the same chunk multiple times
+// and validates that all relevant indexes have only one item
+// in them.
+func TestModePut_sameChunk(t *testing.T) {
+ ch := generateTestRandomChunk()
+
+ for _, tc := range []struct {
+ name string
+ mode chunk.ModePut
+ pullIndex bool
+ pushIndex bool
+ }{
+ {
+ name: "ModePutRequest",
+ mode: chunk.ModePutRequest,
+ pullIndex: false,
+ pushIndex: false,
+ },
+ {
+ name: "ModePutUpload",
+ mode: chunk.ModePutUpload,
+ pullIndex: true,
+ pushIndex: true,
+ },
+ {
+ name: "ModePutSync",
+ mode: chunk.ModePutSync,
+ pullIndex: true,
+ pushIndex: false,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ db, cleanupFunc := newTestDB(t, nil)
+ defer cleanupFunc()
+
+ for i := 0; i < 10; i++ {
+ exists, err := db.Put(context.Background(), tc.mode, ch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ switch exists {
+ case false:
+ if i != 0 {
+ t.Fatal("should not exist only on first Put")
+ }
+ case true:
+ if i == 0 {
+ t.Fatal("should exist on all cases other than the first one")
+ }
+ }
+
+ count := func(b bool) (c int) {
+ if b {
+ return 1
+ }
+ return 0
+ }
+
+ newItemsCountTest(db.retrievalDataIndex, 1)(t)
+ newItemsCountTest(db.pullIndex, count(tc.pullIndex))(t)
+ newItemsCountTest(db.pushIndex, count(tc.pushIndex))(t)
+ }
+ })
+ }
+}
+
// BenchmarkPutUpload runs a series of benchmarks that upload
// a specific number of chunks in parallel.
//
@@ -270,7 +332,6 @@ func benchmarkPutUpload(b *testing.B, o *Options, count, maxParallelUploads int)
db, cleanupFunc := newTestDB(b, o)
defer cleanupFunc()
- uploader := db.NewPutter(ModePutUpload)
chunks := make([]chunk.Chunk, count)
for i := 0; i < count; i++ {
chunks[i] = generateTestRandomChunk()
@@ -286,7 +347,8 @@ func benchmarkPutUpload(b *testing.B, o *Options, count, maxParallelUploads int)
go func(i int) {
defer func() { <-sem }()
- errs <- uploader.Put(chunks[i])
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, chunks[i])
+ errs <- err
}(i)
}
}()
diff --git a/swarm/storage/localstore/mode_set.go b/swarm/storage/localstore/mode_set.go
index 83fcbea52..14b48a22e 100644
--- a/swarm/storage/localstore/mode_set.go
+++ b/swarm/storage/localstore/mode_set.go
@@ -17,51 +17,37 @@
package localstore
import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/syndtr/goleveldb/leveldb"
)
-// ModeSet enumerates different Setter modes.
-type ModeSet int
-
-// Setter modes.
-const (
- // ModeSetAccess: when an update request is received for a chunk or chunk is retrieved for delivery
- ModeSetAccess ModeSet = iota
- // ModeSetSync: when push sync receipt is received
- ModeSetSync
- // modeSetRemove: when GC-d
- // unexported as no external packages should remove chunks from database
- modeSetRemove
-)
+// Set updates database indexes for a specific
+// chunk represented by the address.
+// Set is required to implement chunk.Store
+// interface.
+func (db *DB) Set(ctx context.Context, mode chunk.ModeSet, addr chunk.Address) (err error) {
+ metricName := fmt.Sprintf("localstore.Set.%s", mode)
-// Setter sets the state of a particular
-// Chunk in database by changing indexes.
-type Setter struct {
- db *DB
- mode ModeSet
-}
+ metrics.GetOrRegisterCounter(metricName, nil).Inc(1)
+ defer totalTimeMetric(metricName, time.Now())
-// NewSetter returns a new Setter on database
-// with a specific Mode.
-func (db *DB) NewSetter(mode ModeSet) *Setter {
- return &Setter{
- mode: mode,
- db: db,
+ err = db.set(mode, addr)
+ if err != nil {
+ metrics.GetOrRegisterCounter(metricName+".error", nil).Inc(1)
}
-}
-
-// Set updates database indexes for a specific
-// chunk represented by the address.
-func (s *Setter) Set(addr chunk.Address) (err error) {
- return s.db.set(s.mode, addr)
+ return err
}
// set updates database indexes for a specific
// chunk represented by the address.
// It acquires lockAddr to protect two calls
// of this function for the same address in parallel.
-func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
+func (db *DB) set(mode chunk.ModeSet, addr chunk.Address) (err error) {
// protect parallel updates
db.batchMu.Lock()
defer db.batchMu.Unlock()
@@ -76,7 +62,7 @@ func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
item := addressToItem(addr)
switch mode {
- case ModeSetAccess:
+ case chunk.ModeSetAccess:
// add to pull, insert to gc
// need to get access timestamp here as it is not
@@ -87,9 +73,14 @@ func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
switch err {
case nil:
item.StoreTimestamp = i.StoreTimestamp
+ item.BinID = i.BinID
case leveldb.ErrNotFound:
db.pushIndex.DeleteInBatch(batch, item)
item.StoreTimestamp = now()
+ item.BinID, err = db.binIDs.Inc(uint64(db.po(item.Address)))
+ if err != nil {
+ return err
+ }
default:
return err
}
@@ -112,7 +103,7 @@ func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
db.gcIndex.PutInBatch(batch, item)
gcSizeChange++
- case ModeSetSync:
+ case chunk.ModeSetSync:
// delete from push, insert to gc
// need to get access timestamp here as it is not
@@ -131,6 +122,7 @@ func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
return err
}
item.StoreTimestamp = i.StoreTimestamp
+ item.BinID = i.BinID
i, err = db.retrievalAccessIndex.Get(item)
switch err {
@@ -149,7 +141,7 @@ func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
db.gcIndex.PutInBatch(batch, item)
gcSizeChange++
- case modeSetRemove:
+ case chunk.ModeSetRemove:
// delete from retrieve, pull, gc
// need to get access timestamp here as it is not
@@ -169,6 +161,7 @@ func (db *DB) set(mode ModeSet, addr chunk.Address) (err error) {
return err
}
item.StoreTimestamp = i.StoreTimestamp
+ item.BinID = i.BinID
db.retrievalDataIndex.DeleteInBatch(batch, item)
db.retrievalAccessIndex.DeleteInBatch(batch, item)
diff --git a/swarm/storage/localstore/mode_set_test.go b/swarm/storage/localstore/mode_set_test.go
index 674aaabec..9ba62cd20 100644
--- a/swarm/storage/localstore/mode_set_test.go
+++ b/swarm/storage/localstore/mode_set_test.go
@@ -17,9 +17,11 @@
package localstore
import (
+ "context"
"testing"
"time"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/syndtr/goleveldb/leveldb"
)
@@ -28,23 +30,23 @@ func TestModeSetAccess(t *testing.T) {
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
- chunk := generateTestRandomChunk()
+ ch := generateTestRandomChunk()
wantTimestamp := time.Now().UTC().UnixNano()
defer setNow(func() (t int64) {
return wantTimestamp
})()
- err := db.NewSetter(ModeSetAccess).Set(chunk.Address())
+ err := db.Set(context.Background(), chunk.ModeSetAccess, ch.Address())
if err != nil {
t.Fatal(err)
}
- t.Run("pull index", newPullIndexTest(db, chunk, wantTimestamp, nil))
+ t.Run("pull index", newPullIndexTest(db, ch, 1, nil))
t.Run("pull index count", newItemsCountTest(db.pullIndex, 1))
- t.Run("gc index", newGCIndexTest(db, chunk, wantTimestamp, wantTimestamp))
+ t.Run("gc index", newGCIndexTest(db, ch, wantTimestamp, wantTimestamp, 1))
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
@@ -56,28 +58,28 @@ func TestModeSetSync(t *testing.T) {
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
- chunk := generateTestRandomChunk()
+ ch := generateTestRandomChunk()
wantTimestamp := time.Now().UTC().UnixNano()
defer setNow(func() (t int64) {
return wantTimestamp
})()
- err := db.NewPutter(ModePutUpload).Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
- err = db.NewSetter(ModeSetSync).Set(chunk.Address())
+ err = db.Set(context.Background(), chunk.ModeSetSync, ch.Address())
if err != nil {
t.Fatal(err)
}
- t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, wantTimestamp, wantTimestamp))
+ t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, ch, wantTimestamp, wantTimestamp))
- t.Run("push index", newPushIndexTest(db, chunk, wantTimestamp, leveldb.ErrNotFound))
+ t.Run("push index", newPushIndexTest(db, ch, wantTimestamp, leveldb.ErrNotFound))
- t.Run("gc index", newGCIndexTest(db, chunk, wantTimestamp, wantTimestamp))
+ t.Run("gc index", newGCIndexTest(db, ch, wantTimestamp, wantTimestamp, 1))
t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
@@ -89,40 +91,39 @@ func TestModeSetRemove(t *testing.T) {
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
- chunk := generateTestRandomChunk()
+ ch := generateTestRandomChunk()
- err := db.NewPutter(ModePutUpload).Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
- err = db.NewSetter(modeSetRemove).Set(chunk.Address())
+ err = db.Set(context.Background(), chunk.ModeSetRemove, ch.Address())
if err != nil {
t.Fatal(err)
}
t.Run("retrieve indexes", func(t *testing.T) {
wantErr := leveldb.ErrNotFound
- _, err := db.retrievalDataIndex.Get(addressToItem(chunk.Address()))
+ _, err := db.retrievalDataIndex.Get(addressToItem(ch.Address()))
if err != wantErr {
t.Errorf("got error %v, want %v", err, wantErr)
}
t.Run("retrieve data index count", newItemsCountTest(db.retrievalDataIndex, 0))
// access index should not be set
- _, err = db.retrievalAccessIndex.Get(addressToItem(chunk.Address()))
+ _, err = db.retrievalAccessIndex.Get(addressToItem(ch.Address()))
if err != wantErr {
t.Errorf("got error %v, want %v", err, wantErr)
}
t.Run("retrieve access index count", newItemsCountTest(db.retrievalAccessIndex, 0))
})
- t.Run("pull index", newPullIndexTest(db, chunk, 0, leveldb.ErrNotFound))
+ t.Run("pull index", newPullIndexTest(db, ch, 0, leveldb.ErrNotFound))
t.Run("pull index count", newItemsCountTest(db.pullIndex, 0))
t.Run("gc index count", newItemsCountTest(db.gcIndex, 0))
t.Run("gc size", newIndexGCSizeTest(db))
-
}
diff --git a/swarm/storage/localstore/retrieval_index_test.go b/swarm/storage/localstore/retrieval_index_test.go
index b08790124..4ca2e32e6 100644
--- a/swarm/storage/localstore/retrieval_index_test.go
+++ b/swarm/storage/localstore/retrieval_index_test.go
@@ -17,6 +17,7 @@
package localstore
import (
+ "context"
"strconv"
"testing"
@@ -61,17 +62,14 @@ func benchmarkRetrievalIndexes(b *testing.B, o *Options, count int) {
b.StopTimer()
db, cleanupFunc := newTestDB(b, o)
defer cleanupFunc()
- uploader := db.NewPutter(ModePutUpload)
- syncer := db.NewSetter(ModeSetSync)
- requester := db.NewGetter(ModeGetRequest)
addrs := make([]chunk.Address, count)
for i := 0; i < count; i++ {
- chunk := generateTestRandomChunk()
- err := uploader.Put(chunk)
+ ch := generateTestRandomChunk()
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
b.Fatal(err)
}
- addrs[i] = chunk.Address()
+ addrs[i] = ch.Address()
}
// set update gc test hook to signal when
// update gc goroutine is done by sending to
@@ -85,12 +83,12 @@ func benchmarkRetrievalIndexes(b *testing.B, o *Options, count int) {
b.StartTimer()
for i := 0; i < count; i++ {
- err := syncer.Set(addrs[i])
+ err := db.Set(context.Background(), chunk.ModeSetSync, addrs[i])
if err != nil {
b.Fatal(err)
}
- _, err = requester.Get(addrs[i])
+ _, err = db.Get(context.Background(), chunk.ModeGetRequest, addrs[i])
if err != nil {
b.Fatal(err)
}
@@ -133,7 +131,6 @@ func benchmarkUpload(b *testing.B, o *Options, count int) {
b.StopTimer()
db, cleanupFunc := newTestDB(b, o)
defer cleanupFunc()
- uploader := db.NewPutter(ModePutUpload)
chunks := make([]chunk.Chunk, count)
for i := 0; i < count; i++ {
chunk := generateTestRandomChunk()
@@ -142,7 +139,7 @@ func benchmarkUpload(b *testing.B, o *Options, count int) {
b.StartTimer()
for i := 0; i < count; i++ {
- err := uploader.Put(chunks[i])
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, chunks[i])
if err != nil {
b.Fatal(err)
}
diff --git a/swarm/storage/localstore/schema.go b/swarm/storage/localstore/schema.go
new file mode 100644
index 000000000..538c75d1f
--- /dev/null
+++ b/swarm/storage/localstore/schema.go
@@ -0,0 +1,52 @@
+package localstore
+
+import (
+ "github.com/ethereum/go-ethereum/swarm/log"
+ "github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+// The DB schema we want to use. The actual/current DB schema might differ
+// until migrations are run.
+const CurrentDbSchema = DbSchemaSanctuary
+
+// There was a time when we had no schema at all.
+const DbSchemaNone = ""
+
+// "purity" is the first formal schema of LevelDB we release together with Swarm 0.3.5
+const DbSchemaPurity = "purity"
+
+// "halloween" is here because we had a screw in the garbage collector index.
+// Because of that we had to rebuild the GC index to get rid of erroneous
+// entries and that takes a long time. This schema is used for bookkeeping,
+// so rebuild index will run just once.
+const DbSchemaHalloween = "halloween"
+
+const DbSchemaSanctuary = "sanctuary"
+
+// returns true if legacy database is in the datadir
+func IsLegacyDatabase(datadir string) bool {
+
+ var (
+ legacyDbSchemaKey = []byte{8}
+ )
+
+ db, err := leveldb.OpenFile(datadir, &opt.Options{OpenFilesCacheCapacity: 128})
+ if err != nil {
+ log.Error("got an error while trying to open leveldb path", "path", datadir, "err", err)
+ return false
+ }
+ defer db.Close()
+
+ data, err := db.Get(legacyDbSchemaKey, nil)
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ // if we haven't found anything under the legacy db schema key- we are not on legacy
+ return false
+ }
+
+ log.Error("got an unexpected error fetching legacy name from the database", "err", err)
+ }
+ log.Trace("checking if database scheme is legacy", "schema name", string(data))
+ return string(data) == DbSchemaHalloween || string(data) == DbSchemaPurity
+}
diff --git a/swarm/storage/localstore/subscription_pull.go b/swarm/storage/localstore/subscription_pull.go
index 0b96102e3..dd07add53 100644
--- a/swarm/storage/localstore/subscription_pull.go
+++ b/swarm/storage/localstore/subscription_pull.go
@@ -17,28 +17,34 @@
package localstore
import (
- "bytes"
"context"
"errors"
- "fmt"
"sync"
+ "time"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/shed"
+ "github.com/ethereum/go-ethereum/swarm/spancontext"
+ "github.com/opentracing/opentracing-go"
+ olog "github.com/opentracing/opentracing-go/log"
"github.com/syndtr/goleveldb/leveldb"
)
// SubscribePull returns a channel that provides chunk addresses and stored times from pull syncing index.
// Pull syncing index can be only subscribed to a particular proximity order bin. If since
-// is not nil, the iteration will start from the first item stored after that timestamp. If until is not nil,
-// only chunks stored up to this timestamp will be send to the channel, and the returned channel will be
-// closed. The since-until interval is open on the left and closed on the right (since,until]. Returned stop
+// is not 0, the iteration will start from the first item stored after that id. If until is not 0,
+// only chunks stored up to this id will be sent to the channel, and the returned channel will be
+// closed. The since-until interval is open on since side, and closed on until side: (since,until] <=> [since+1,until]. Returned stop
// function will terminate current and further iterations without errors, and also close the returned channel.
// Make sure that you check the second returned parameter from the channel to stop iteration when its value
// is false.
-func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until *ChunkDescriptor) (c <-chan ChunkDescriptor, stop func()) {
- chunkDescriptors := make(chan ChunkDescriptor)
+func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan chunk.Descriptor, stop func()) {
+ metricName := "localstore.SubscribePull"
+ metrics.GetOrRegisterCounter(metricName, nil).Inc(1)
+
+ chunkDescriptors := make(chan chunk.Descriptor)
trigger := make(chan struct{}, 1)
db.pullTriggersMu.Lock()
@@ -59,18 +65,20 @@ func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until *ChunkD
var errStopSubscription = errors.New("stop subscription")
go func() {
- // close the returned ChunkDescriptor channel at the end to
+ defer metrics.GetOrRegisterCounter(metricName+".stop", nil).Inc(1)
+ // close the returned chunk.Descriptor channel at the end to
// signal that the subscription is done
defer close(chunkDescriptors)
// sinceItem is the Item from which the next iteration
// should start. The first iteration starts from the first Item.
var sinceItem *shed.Item
- if since != nil {
+ if since > 0 {
sinceItem = &shed.Item{
- Address: since.Address,
- StoreTimestamp: since.StoreTimestamp,
+ Address: db.addressInBin(bin),
+ BinID: since,
}
}
+ first := true // first iteration flag for SkipStartFromItem
for {
select {
case <-trigger:
@@ -78,17 +86,23 @@ func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until *ChunkD
// - last index Item is reached
// - subscription stop is called
// - context is done
+ metrics.GetOrRegisterCounter(metricName+".iter", nil).Inc(1)
+
+ ctx, sp := spancontext.StartSpan(ctx, metricName+".iter")
+ sp.LogFields(olog.Int("bin", int(bin)), olog.Uint64("since", since), olog.Uint64("until", until))
+
+ iterStart := time.Now()
+ var count int
err := db.pullIndex.Iterate(func(item shed.Item) (stop bool, err error) {
select {
- case chunkDescriptors <- ChunkDescriptor{
- Address: item.Address,
- StoreTimestamp: item.StoreTimestamp,
+ case chunkDescriptors <- chunk.Descriptor{
+ Address: item.Address,
+ BinID: item.BinID,
}:
+ count++
// until chunk descriptor is sent
// break the iteration
- if until != nil &&
- (item.StoreTimestamp >= until.StoreTimestamp ||
- bytes.Equal(item.Address, until.Address)) {
+ if until > 0 && item.BinID >= until {
return true, errStopSubscription
}
// set next iteration start item
@@ -109,19 +123,34 @@ func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until *ChunkD
}, &shed.IterateOptions{
StartFrom: sinceItem,
// sinceItem was sent as the last Address in the previous
- // iterator call, skip it in this one
- SkipStartFromItem: true,
+ // iterator call, skip it in this one, but not the item with
+ // the provided since bin id as it should be sent to a channel
+ SkipStartFromItem: !first,
Prefix: []byte{bin},
})
+
+ totalTimeMetric(metricName+".iter", iterStart)
+
+ sp.FinishWithOptions(opentracing.FinishOptions{
+ LogRecords: []opentracing.LogRecord{
+ {
+ Timestamp: time.Now(),
+ Fields: []olog.Field{olog.Int("count", count)},
+ },
+ },
+ })
+
if err != nil {
if err == errStopSubscription {
// stop subscription without any errors
// if until is reached
return
}
+ metrics.GetOrRegisterCounter(metricName+".iter.error", nil).Inc(1)
log.Error("localstore pull subscription iteration", "bin", bin, "since", since, "until", until, "err", err)
return
}
+ first = false
case <-stopChan:
// terminate the subscription
// on stop
@@ -159,35 +188,20 @@ func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until *ChunkD
return chunkDescriptors, stop
}
-// LastPullSubscriptionChunk returns ChunkDescriptor of the latest Chunk
+// LastPullSubscriptionBinID returns chunk bin id of the latest Chunk
// in pull syncing index for a provided bin. If there are no chunks in
-// that bin, chunk.ErrChunkNotFound is returned.
-func (db *DB) LastPullSubscriptionChunk(bin uint8) (c *ChunkDescriptor, err error) {
+// that bin, 0 value is returned.
+func (db *DB) LastPullSubscriptionBinID(bin uint8) (id uint64, err error) {
+ metrics.GetOrRegisterCounter("localstore.LastPullSubscriptionBinID", nil).Inc(1)
+
item, err := db.pullIndex.Last([]byte{bin})
if err != nil {
if err == leveldb.ErrNotFound {
- return nil, chunk.ErrChunkNotFound
+ return 0, nil
}
- return nil, err
+ return 0, err
}
- return &ChunkDescriptor{
- Address: item.Address,
- StoreTimestamp: item.StoreTimestamp,
- }, nil
-}
-
-// ChunkDescriptor holds information required for Pull syncing. This struct
-// is provided by subscribing to pull index.
-type ChunkDescriptor struct {
- Address chunk.Address
- StoreTimestamp int64
-}
-
-func (c *ChunkDescriptor) String() string {
- if c == nil {
- return "none"
- }
- return fmt.Sprintf("%s stored at %v", c.Address.Hex(), c.StoreTimestamp)
+ return item.BinID, nil
}
// triggerPullSubscriptions is used internally for starting iterations
@@ -209,3 +223,12 @@ func (db *DB) triggerPullSubscriptions(bin uint8) {
}
}
}
+
+// addressInBin returns an address that is in a specific
+// proximity order bin from database base key.
+func (db *DB) addressInBin(bin uint8) (addr chunk.Address) {
+ addr = append([]byte(nil), db.baseKey...)
+ b := bin / 8
+ addr[b] = addr[b] ^ (1 << (7 - bin%8))
+ return addr
+}
diff --git a/swarm/storage/localstore/subscription_pull_test.go b/swarm/storage/localstore/subscription_pull_test.go
index d5ddae02b..bf364ed44 100644
--- a/swarm/storage/localstore/subscription_pull_test.go
+++ b/swarm/storage/localstore/subscription_pull_test.go
@@ -25,6 +25,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/swarm/chunk"
+ "github.com/ethereum/go-ethereum/swarm/shed"
)
// TestDB_SubscribePull uploads some chunks before and after
@@ -35,15 +36,13 @@ func TestDB_SubscribePull(t *testing.T) {
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
- uploader := db.NewPutter(ModePutUpload)
-
addrs := make(map[uint8][]chunk.Address)
var addrsMu sync.Mutex
var wantedChunksCount int
// prepopulate database with some chunks
// before the subscription
- uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 10)
+ uploadRandomChunksBin(t, db, addrs, &addrsMu, &wantedChunksCount, 10)
// set a timeout on subscription
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
@@ -54,22 +53,22 @@ func TestDB_SubscribePull(t *testing.T) {
errChan := make(chan error)
for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
- ch, stop := db.SubscribePull(ctx, bin, nil, nil)
+ ch, stop := db.SubscribePull(ctx, bin, 0, 0)
defer stop()
// receive and validate addresses from the subscription
- go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
+ go readPullSubscriptionBin(ctx, db, bin, ch, addrs, &addrsMu, errChan)
}
// upload some chunks just after subscribe
- uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 5)
+ uploadRandomChunksBin(t, db, addrs, &addrsMu, &wantedChunksCount, 5)
time.Sleep(200 * time.Millisecond)
// upload some chunks after some short time
// to ensure that subscription will include them
// in a dynamic environment
- uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 3)
+ uploadRandomChunksBin(t, db, addrs, &addrsMu, &wantedChunksCount, 3)
checkErrChan(ctx, t, errChan, wantedChunksCount)
}
@@ -82,15 +81,13 @@ func TestDB_SubscribePull_multiple(t *testing.T) {
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
- uploader := db.NewPutter(ModePutUpload)
-
addrs := make(map[uint8][]chunk.Address)
var addrsMu sync.Mutex
var wantedChunksCount int
// prepopulate database with some chunks
// before the subscription
- uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 10)
+ uploadRandomChunksBin(t, db, addrs, &addrsMu, &wantedChunksCount, 10)
// set a timeout on subscription
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
@@ -106,23 +103,23 @@ func TestDB_SubscribePull_multiple(t *testing.T) {
// that all of them will write every address error to errChan
for j := 0; j < subsCount; j++ {
for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
- ch, stop := db.SubscribePull(ctx, bin, nil, nil)
+ ch, stop := db.SubscribePull(ctx, bin, 0, 0)
defer stop()
// receive and validate addresses from the subscription
- go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
+ go readPullSubscriptionBin(ctx, db, bin, ch, addrs, &addrsMu, errChan)
}
}
// upload some chunks just after subscribe
- uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 5)
+ uploadRandomChunksBin(t, db, addrs, &addrsMu, &wantedChunksCount, 5)
time.Sleep(200 * time.Millisecond)
// upload some chunks after some short time
// to ensure that subscription will include them
// in a dynamic environment
- uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 3)
+ uploadRandomChunksBin(t, db, addrs, &addrsMu, &wantedChunksCount, 3)
checkErrChan(ctx, t, errChan, wantedChunksCount*subsCount)
}
@@ -135,61 +132,52 @@ func TestDB_SubscribePull_since(t *testing.T) {
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
- uploader := db.NewPutter(ModePutUpload)
-
addrs := make(map[uint8][]chunk.Address)
var addrsMu sync.Mutex
var wantedChunksCount int
- lastTimestamp := time.Now().UTC().UnixNano()
- var lastTimestampMu sync.RWMutex
- defer setNow(func() (t int64) {
- lastTimestampMu.Lock()
- defer lastTimestampMu.Unlock()
- lastTimestamp++
- return lastTimestamp
- })()
+ binIDCounter := make(map[uint8]uint64)
+ var binIDCounterMu sync.RWMutex
- uploadRandomChunks := func(count int, wanted bool) (last map[uint8]ChunkDescriptor) {
+ uploadRandomChunks := func(count int, wanted bool) (first map[uint8]uint64) {
addrsMu.Lock()
defer addrsMu.Unlock()
- last = make(map[uint8]ChunkDescriptor)
+ first = make(map[uint8]uint64)
for i := 0; i < count; i++ {
ch := generateTestRandomChunk()
- err := uploader.Put(ch)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
bin := db.po(ch.Address())
- if _, ok := addrs[bin]; !ok {
- addrs[bin] = make([]chunk.Address, 0)
- }
+ binIDCounterMu.RLock()
+ binIDCounter[bin]++
+ binIDCounterMu.RUnlock()
+
if wanted {
+ if _, ok := addrs[bin]; !ok {
+ addrs[bin] = make([]chunk.Address, 0)
+ }
addrs[bin] = append(addrs[bin], ch.Address())
wantedChunksCount++
- }
- lastTimestampMu.RLock()
- storeTimestamp := lastTimestamp
- lastTimestampMu.RUnlock()
-
- last[bin] = ChunkDescriptor{
- Address: ch.Address(),
- StoreTimestamp: storeTimestamp,
+ if _, ok := first[bin]; !ok {
+ first[bin] = binIDCounter[bin]
+ }
}
}
- return last
+ return first
}
// prepopulate database with some chunks
// before the subscription
- last := uploadRandomChunks(30, false)
+ uploadRandomChunks(30, false)
- uploadRandomChunks(25, true)
+ first := uploadRandomChunks(25, true)
// set a timeout on subscription
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
@@ -200,21 +188,18 @@ func TestDB_SubscribePull_since(t *testing.T) {
errChan := make(chan error)
for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
- var since *ChunkDescriptor
- if c, ok := last[bin]; ok {
- since = &c
+ since, ok := first[bin]
+ if !ok {
+ continue
}
- ch, stop := db.SubscribePull(ctx, bin, since, nil)
+ ch, stop := db.SubscribePull(ctx, bin, since, 0)
defer stop()
// receive and validate addresses from the subscription
- go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
+ go readPullSubscriptionBin(ctx, db, bin, ch, addrs, &addrsMu, errChan)
}
- // upload some chunks just after subscribe
- uploadRandomChunks(15, true)
-
checkErrChan(ctx, t, errChan, wantedChunksCount)
}
@@ -226,30 +211,22 @@ func TestDB_SubscribePull_until(t *testing.T) {
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
- uploader := db.NewPutter(ModePutUpload)
-
addrs := make(map[uint8][]chunk.Address)
var addrsMu sync.Mutex
var wantedChunksCount int
- lastTimestamp := time.Now().UTC().UnixNano()
- var lastTimestampMu sync.RWMutex
- defer setNow(func() (t int64) {
- lastTimestampMu.Lock()
- defer lastTimestampMu.Unlock()
- lastTimestamp++
- return lastTimestamp
- })()
+ binIDCounter := make(map[uint8]uint64)
+ var binIDCounterMu sync.RWMutex
- uploadRandomChunks := func(count int, wanted bool) (last map[uint8]ChunkDescriptor) {
+ uploadRandomChunks := func(count int, wanted bool) (last map[uint8]uint64) {
addrsMu.Lock()
defer addrsMu.Unlock()
- last = make(map[uint8]ChunkDescriptor)
+ last = make(map[uint8]uint64)
for i := 0; i < count; i++ {
ch := generateTestRandomChunk()
- err := uploader.Put(ch)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
@@ -264,14 +241,11 @@ func TestDB_SubscribePull_until(t *testing.T) {
wantedChunksCount++
}
- lastTimestampMu.RLock()
- storeTimestamp := lastTimestamp
- lastTimestampMu.RUnlock()
+ binIDCounterMu.RLock()
+ binIDCounter[bin]++
+ binIDCounterMu.RUnlock()
- last[bin] = ChunkDescriptor{
- Address: ch.Address(),
- StoreTimestamp: storeTimestamp,
- }
+ last[bin] = binIDCounter[bin]
}
return last
}
@@ -295,11 +269,11 @@ func TestDB_SubscribePull_until(t *testing.T) {
if !ok {
continue
}
- ch, stop := db.SubscribePull(ctx, bin, nil, &until)
+ ch, stop := db.SubscribePull(ctx, bin, 0, until)
defer stop()
// receive and validate addresses from the subscription
- go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
+ go readPullSubscriptionBin(ctx, db, bin, ch, addrs, &addrsMu, errChan)
}
// upload some chunks just after subscribe
@@ -316,30 +290,22 @@ func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
- uploader := db.NewPutter(ModePutUpload)
-
addrs := make(map[uint8][]chunk.Address)
var addrsMu sync.Mutex
var wantedChunksCount int
- lastTimestamp := time.Now().UTC().UnixNano()
- var lastTimestampMu sync.RWMutex
- defer setNow(func() (t int64) {
- lastTimestampMu.Lock()
- defer lastTimestampMu.Unlock()
- lastTimestamp++
- return lastTimestamp
- })()
+ binIDCounter := make(map[uint8]uint64)
+ var binIDCounterMu sync.RWMutex
- uploadRandomChunks := func(count int, wanted bool) (last map[uint8]ChunkDescriptor) {
+ uploadRandomChunks := func(count int, wanted bool) (last map[uint8]uint64) {
addrsMu.Lock()
defer addrsMu.Unlock()
- last = make(map[uint8]ChunkDescriptor)
+ last = make(map[uint8]uint64)
for i := 0; i < count; i++ {
ch := generateTestRandomChunk()
- err := uploader.Put(ch)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
@@ -354,14 +320,11 @@ func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
wantedChunksCount++
}
- lastTimestampMu.RLock()
- storeTimestamp := lastTimestamp
- lastTimestampMu.RUnlock()
+ binIDCounterMu.RLock()
+ binIDCounter[bin]++
+ binIDCounterMu.RUnlock()
- last[bin] = ChunkDescriptor{
- Address: ch.Address(),
- StoreTimestamp: storeTimestamp,
- }
+ last[bin] = binIDCounter[bin]
}
return last
}
@@ -387,9 +350,10 @@ func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
errChan := make(chan error)
for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
- var since *ChunkDescriptor
- if c, ok := upload1[bin]; ok {
- since = &c
+ since, ok := upload1[bin]
+ if ok {
+ // start from the next uploaded chunk
+ since++
}
until, ok := upload2[bin]
if !ok {
@@ -397,11 +361,11 @@ func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
// skip this bin from testing
continue
}
- ch, stop := db.SubscribePull(ctx, bin, since, &until)
+ ch, stop := db.SubscribePull(ctx, bin, since, until)
defer stop()
// receive and validate addresses from the subscription
- go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
+ go readPullSubscriptionBin(ctx, db, bin, ch, addrs, &addrsMu, errChan)
}
// upload some chunks just after subscribe
@@ -412,14 +376,14 @@ func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
// uploadRandomChunksBin uploads random chunks to database and adds them to
// the map of addresses ber bin.
-func uploadRandomChunksBin(t *testing.T, db *DB, uploader *Putter, addrs map[uint8][]chunk.Address, addrsMu *sync.Mutex, wantedChunksCount *int, count int) {
+func uploadRandomChunksBin(t *testing.T, db *DB, addrs map[uint8][]chunk.Address, addrsMu *sync.Mutex, wantedChunksCount *int, count int) {
addrsMu.Lock()
defer addrsMu.Unlock()
for i := 0; i < count; i++ {
ch := generateTestRandomChunk()
- err := uploader.Put(ch)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
@@ -434,10 +398,10 @@ func uploadRandomChunksBin(t *testing.T, db *DB, uploader *Putter, addrs map[uin
}
}
-// readPullSubscriptionBin is a helper function that reads all ChunkDescriptors from a channel and
-// sends error to errChan, even if it is nil, to count the number of ChunkDescriptors
+// readPullSubscriptionBin is a helper function that reads all chunk.Descriptors from a channel and
+// sends error to errChan, even if it is nil, to count the number of chunk.Descriptors
// returned by the channel.
-func readPullSubscriptionBin(ctx context.Context, bin uint8, ch <-chan ChunkDescriptor, addrs map[uint8][]chunk.Address, addrsMu *sync.Mutex, errChan chan error) {
+func readPullSubscriptionBin(ctx context.Context, db *DB, bin uint8, ch <-chan chunk.Descriptor, addrs map[uint8][]chunk.Address, addrsMu *sync.Mutex, errChan chan error) {
var i int // address index
for {
select {
@@ -450,9 +414,20 @@ func readPullSubscriptionBin(ctx context.Context, bin uint8, ch <-chan ChunkDesc
if i+1 > len(addrs[bin]) {
err = fmt.Errorf("got more chunk addresses %v, then expected %v, for bin %v", i+1, len(addrs[bin]), bin)
} else {
- want := addrs[bin][i]
- if !bytes.Equal(got.Address, want) {
- err = fmt.Errorf("got chunk address %v in bin %v %s, want %s", i, bin, got.Address.Hex(), want)
+ addr := addrs[bin][i]
+ if !bytes.Equal(got.Address, addr) {
+ err = fmt.Errorf("got chunk bin id %v in bin %v %v, want %v", i, bin, got.Address.Hex(), addr.Hex())
+ } else {
+ want, err := db.retrievalDataIndex.Get(shed.Item{
+ Address: addr,
+ })
+ if err != nil {
+ err = fmt.Errorf("got chunk (bin id %v in bin %v) from retrieval index %s: %v", i, bin, addrs[bin][i].Hex(), err)
+ } else {
+ if got.BinID != want.BinID {
+ err = fmt.Errorf("got chunk bin id %v in bin %v %v, want %v", i, bin, got, want)
+ }
+ }
}
}
addrsMu.Unlock()
@@ -486,27 +461,19 @@ func checkErrChan(ctx context.Context, t *testing.T, errChan chan error, wantedC
}
}
-// TestDB_LastPullSubscriptionChunk validates that LastPullSubscriptionChunk
+// TestDB_LastPullSubscriptionBinID validates that LastPullSubscriptionBinID
// is returning the last chunk descriptor for proximity order bins by
// doing a few rounds of chunk uploads.
-func TestDB_LastPullSubscriptionChunk(t *testing.T) {
+func TestDB_LastPullSubscriptionBinID(t *testing.T) {
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
- uploader := db.NewPutter(ModePutUpload)
-
addrs := make(map[uint8][]chunk.Address)
- lastTimestamp := time.Now().UTC().UnixNano()
- var lastTimestampMu sync.RWMutex
- defer setNow(func() (t int64) {
- lastTimestampMu.Lock()
- defer lastTimestampMu.Unlock()
- lastTimestamp++
- return lastTimestamp
- })()
+ binIDCounter := make(map[uint8]uint64)
+ var binIDCounterMu sync.RWMutex
- last := make(map[uint8]ChunkDescriptor)
+ last := make(map[uint8]uint64)
// do a few rounds of uploads and check if
// last pull subscription chunk is correct
@@ -516,7 +483,7 @@ func TestDB_LastPullSubscriptionChunk(t *testing.T) {
for i := 0; i < count; i++ {
ch := generateTestRandomChunk()
- err := uploader.Put(ch)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
@@ -528,32 +495,42 @@ func TestDB_LastPullSubscriptionChunk(t *testing.T) {
}
addrs[bin] = append(addrs[bin], ch.Address())
- lastTimestampMu.RLock()
- storeTimestamp := lastTimestamp
- lastTimestampMu.RUnlock()
+ binIDCounterMu.RLock()
+ binIDCounter[bin]++
+ binIDCounterMu.RUnlock()
- last[bin] = ChunkDescriptor{
- Address: ch.Address(),
- StoreTimestamp: storeTimestamp,
- }
+ last[bin] = binIDCounter[bin]
}
// check
for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
want, ok := last[bin]
- got, err := db.LastPullSubscriptionChunk(bin)
+ got, err := db.LastPullSubscriptionBinID(bin)
if ok {
if err != nil {
t.Errorf("got unexpected error value %v", err)
}
- if !bytes.Equal(got.Address, want.Address) {
- t.Errorf("got last address %s, want %s", got.Address.Hex(), want.Address.Hex())
- }
- } else {
- if err != chunk.ErrChunkNotFound {
- t.Errorf("got unexpected error value %v, want %v", err, chunk.ErrChunkNotFound)
- }
}
+ if got != want {
+ t.Errorf("got last bin id %v, want %v", got, want)
+ }
+ }
+ }
+}
+
+// TestAddressInBin validates that function addressInBin
+// returns a valid address for every proximity order bin.
+func TestAddressInBin(t *testing.T) {
+ db, cleanupFunc := newTestDB(t, nil)
+ defer cleanupFunc()
+
+ for po := uint8(0); po < chunk.MaxPO; po++ {
+ addr := db.addressInBin(po)
+
+ got := db.po(addr)
+
+ if got != uint8(po) {
+ t.Errorf("got po %v, want %v", got, po)
}
}
}
diff --git a/swarm/storage/localstore/subscription_push.go b/swarm/storage/localstore/subscription_push.go
index 5cbc2eb6f..f2463af2a 100644
--- a/swarm/storage/localstore/subscription_push.go
+++ b/swarm/storage/localstore/subscription_push.go
@@ -19,10 +19,15 @@ package localstore
import (
"context"
"sync"
+ "time"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/shed"
+ "github.com/ethereum/go-ethereum/swarm/spancontext"
+ "github.com/opentracing/opentracing-go"
+ olog "github.com/opentracing/opentracing-go/log"
)
// SubscribePush returns a channel that provides storage chunks with ordering from push syncing index.
@@ -30,6 +35,9 @@ import (
// the returned channel without any errors. Make sure that you check the second returned parameter
// from the channel to stop iteration when its value is false.
func (db *DB) SubscribePush(ctx context.Context) (c <-chan chunk.Chunk, stop func()) {
+ metricName := "localstore.SubscribePush"
+ metrics.GetOrRegisterCounter(metricName, nil).Inc(1)
+
chunks := make(chan chunk.Chunk)
trigger := make(chan struct{}, 1)
@@ -44,6 +52,7 @@ func (db *DB) SubscribePush(ctx context.Context) (c <-chan chunk.Chunk, stop fun
var stopChanOnce sync.Once
go func() {
+ defer metrics.GetOrRegisterCounter(metricName+".done", nil).Inc(1)
// close the returned chunkInfo channel at the end to
// signal that the subscription is done
defer close(chunks)
@@ -57,6 +66,12 @@ func (db *DB) SubscribePush(ctx context.Context) (c <-chan chunk.Chunk, stop fun
// - last index Item is reached
// - subscription stop is called
// - context is done
+ metrics.GetOrRegisterCounter(metricName+".iter", nil).Inc(1)
+
+ ctx, sp := spancontext.StartSpan(ctx, metricName+".iter")
+
+ iterStart := time.Now()
+ var count int
err := db.pushIndex.Iterate(func(item shed.Item) (stop bool, err error) {
// get chunk data
dataItem, err := db.retrievalDataIndex.Get(item)
@@ -66,6 +81,7 @@ func (db *DB) SubscribePush(ctx context.Context) (c <-chan chunk.Chunk, stop fun
select {
case chunks <- chunk.NewChunk(dataItem.Address, dataItem.Data):
+ count++
// set next iteration start item
// when its chunk is successfully sent to channel
sinceItem = &item
@@ -87,7 +103,20 @@ func (db *DB) SubscribePush(ctx context.Context) (c <-chan chunk.Chunk, stop fun
// iterator call, skip it in this one
SkipStartFromItem: true,
})
+
+ totalTimeMetric(metricName+".iter", iterStart)
+
+ sp.FinishWithOptions(opentracing.FinishOptions{
+ LogRecords: []opentracing.LogRecord{
+ {
+ Timestamp: time.Now(),
+ Fields: []olog.Field{olog.Int("count", count)},
+ },
+ },
+ })
+
if err != nil {
+ metrics.GetOrRegisterCounter(metricName+".iter.error", nil).Inc(1)
log.Error("localstore push subscription iteration", "err", err)
return
}
diff --git a/swarm/storage/localstore/subscription_push_test.go b/swarm/storage/localstore/subscription_push_test.go
index 30fb98eb2..6124a534b 100644
--- a/swarm/storage/localstore/subscription_push_test.go
+++ b/swarm/storage/localstore/subscription_push_test.go
@@ -34,8 +34,6 @@ func TestDB_SubscribePush(t *testing.T) {
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
- uploader := db.NewPutter(ModePutUpload)
-
chunks := make([]chunk.Chunk, 0)
var chunksMu sync.Mutex
@@ -44,14 +42,14 @@ func TestDB_SubscribePush(t *testing.T) {
defer chunksMu.Unlock()
for i := 0; i < count; i++ {
- chunk := generateTestRandomChunk()
+ ch := generateTestRandomChunk()
- err := uploader.Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
- chunks = append(chunks, chunk)
+ chunks = append(chunks, ch)
}
}
@@ -122,8 +120,6 @@ func TestDB_SubscribePush_multiple(t *testing.T) {
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
- uploader := db.NewPutter(ModePutUpload)
-
addrs := make([]chunk.Address, 0)
var addrsMu sync.Mutex
@@ -132,14 +128,14 @@ func TestDB_SubscribePush_multiple(t *testing.T) {
defer addrsMu.Unlock()
for i := 0; i < count; i++ {
- chunk := generateTestRandomChunk()
+ ch := generateTestRandomChunk()
- err := uploader.Put(chunk)
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
- addrs = append(addrs, chunk.Address())
+ addrs = append(addrs, ch.Address())
}
}
diff --git a/swarm/storage/localstore_test.go b/swarm/storage/localstore_test.go
deleted file mode 100644
index fcadcefa0..000000000
--- a/swarm/storage/localstore_test.go
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package storage
-
-import (
- "context"
- "io/ioutil"
- "os"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/swarm/chunk"
-)
-
-var (
- hashfunc = MakeHashFunc(DefaultHash)
-)
-
-// tests that the content address validator correctly checks the data
-// tests that feed update chunks are passed through content address validator
-// the test checking the resouce update validator internal correctness is found in storage/feeds/handler_test.go
-func TestValidator(t *testing.T) {
- // set up localstore
- datadir, err := ioutil.TempDir("", "storage-testvalidator")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(datadir)
-
- params := NewDefaultLocalStoreParams()
- params.Init(datadir)
- store, err := NewLocalStore(params, nil)
- if err != nil {
- t.Fatal(err)
- }
-
- // check puts with no validators, both succeed
- chunks := GenerateRandomChunks(259, 2)
- goodChunk := chunks[0]
- badChunk := chunks[1]
- copy(badChunk.Data(), goodChunk.Data())
-
- errs := putChunks(store, goodChunk, badChunk)
- if errs[0] != nil {
- t.Fatalf("expected no error on good content address chunk in spite of no validation, but got: %s", err)
- }
- if errs[1] != nil {
- t.Fatalf("expected no error on bad content address chunk in spite of no validation, but got: %s", err)
- }
-
- // add content address validator and check puts
- // bad should fail, good should pass
- store.Validators = append(store.Validators, NewContentAddressValidator(hashfunc))
- chunks = GenerateRandomChunks(chunk.DefaultSize, 2)
- goodChunk = chunks[0]
- badChunk = chunks[1]
- copy(badChunk.Data(), goodChunk.Data())
-
- errs = putChunks(store, goodChunk, badChunk)
- if errs[0] != nil {
- t.Fatalf("expected no error on good content address chunk with content address validator only, but got: %s", err)
- }
- if errs[1] == nil {
- t.Fatal("expected error on bad content address chunk with content address validator only, but got nil")
- }
-
- // append a validator that always denies
- // bad should fail, good should pass,
- var negV boolTestValidator
- store.Validators = append(store.Validators, negV)
-
- chunks = GenerateRandomChunks(chunk.DefaultSize, 2)
- goodChunk = chunks[0]
- badChunk = chunks[1]
- copy(badChunk.Data(), goodChunk.Data())
-
- errs = putChunks(store, goodChunk, badChunk)
- if errs[0] != nil {
- t.Fatalf("expected no error on good content address chunk with content address validator only, but got: %s", err)
- }
- if errs[1] == nil {
- t.Fatal("expected error on bad content address chunk with content address validator only, but got nil")
- }
-
- // append a validator that always approves
- // all shall pass
- var posV boolTestValidator = true
- store.Validators = append(store.Validators, posV)
-
- chunks = GenerateRandomChunks(chunk.DefaultSize, 2)
- goodChunk = chunks[0]
- badChunk = chunks[1]
- copy(badChunk.Data(), goodChunk.Data())
-
- errs = putChunks(store, goodChunk, badChunk)
- if errs[0] != nil {
- t.Fatalf("expected no error on good content address chunk with content address validator only, but got: %s", err)
- }
- if errs[1] != nil {
- t.Fatalf("expected no error on bad content address chunk in spite of no validation, but got: %s", err)
- }
-
-}
-
-type boolTestValidator bool
-
-func (self boolTestValidator) Validate(chunk Chunk) bool {
- return bool(self)
-}
-
-// putChunks adds chunks to localstore
-// It waits for receive on the stored channel
-// It logs but does not fail on delivery error
-func putChunks(store *LocalStore, chunks ...Chunk) []error {
- i := 0
- f := func(n int64) Chunk {
- chunk := chunks[i]
- i++
- return chunk
- }
- _, errs := put(store, len(chunks), f)
- return errs
-}
-
-func put(store *LocalStore, n int, f func(i int64) Chunk) (hs []Address, errs []error) {
- for i := int64(0); i < int64(n); i++ {
- chunk := f(chunk.DefaultSize)
- err := store.Put(context.TODO(), chunk)
- errs = append(errs, err)
- hs = append(hs, chunk.Address())
- }
- return hs, errs
-}
-
-// TestGetFrequentlyAccessedChunkWontGetGarbageCollected tests that the most
-// frequently accessed chunk is not garbage collected from LDBStore, i.e.,
-// from disk when we are at the capacity and garbage collector runs. For that
-// we start putting random chunks into the DB while continuously accessing the
-// chunk we care about then check if we can still retrieve it from disk.
-func TestGetFrequentlyAccessedChunkWontGetGarbageCollected(t *testing.T) {
- ldbCap := defaultGCRatio
- store, cleanup := setupLocalStore(t, ldbCap)
- defer cleanup()
-
- var chunks []Chunk
- for i := 0; i < ldbCap; i++ {
- chunks = append(chunks, GenerateRandomChunk(chunk.DefaultSize))
- }
-
- mostAccessed := chunks[0].Address()
- for _, chunk := range chunks {
- if err := store.Put(context.Background(), chunk); err != nil {
- t.Fatal(err)
- }
-
- if _, err := store.Get(context.Background(), mostAccessed); err != nil {
- t.Fatal(err)
- }
- // Add time for MarkAccessed() to be able to finish in a separate Goroutine
- time.Sleep(1 * time.Millisecond)
- }
-
- store.DbStore.collectGarbage()
- if _, err := store.DbStore.Get(context.Background(), mostAccessed); err != nil {
- t.Logf("most frequntly accessed chunk not found on disk (key: %v)", mostAccessed)
- t.Fatal(err)
- }
-
-}
-
-func setupLocalStore(t *testing.T, ldbCap int) (ls *LocalStore, cleanup func()) {
- t.Helper()
-
- var err error
- datadir, err := ioutil.TempDir("", "storage")
- if err != nil {
- t.Fatal(err)
- }
-
- params := &LocalStoreParams{
- StoreParams: NewStoreParams(uint64(ldbCap), uint(ldbCap), nil, nil),
- }
- params.Init(datadir)
-
- store, err := NewLocalStore(params, nil)
- if err != nil {
- _ = os.RemoveAll(datadir)
- t.Fatal(err)
- }
-
- cleanup = func() {
- store.Close()
- _ = os.RemoveAll(datadir)
- }
-
- return store, cleanup
-}
-
-func TestHas(t *testing.T) {
- ldbCap := defaultGCRatio
- store, cleanup := setupLocalStore(t, ldbCap)
- defer cleanup()
-
- nonStoredAddr := GenerateRandomChunk(128).Address()
-
- has := store.Has(context.Background(), nonStoredAddr)
- if has {
- t.Fatal("Expected Has() to return false, but returned true!")
- }
-
- storeChunks := GenerateRandomChunks(128, 3)
- for _, ch := range storeChunks {
- err := store.Put(context.Background(), ch)
- if err != nil {
- t.Fatalf("Expected store to store chunk, but it failed: %v", err)
- }
-
- has := store.Has(context.Background(), ch.Address())
- if !has {
- t.Fatal("Expected Has() to return true, but returned false!")
- }
- }
-
- //let's be paranoic and test again that the non-existent chunk returns false
- has = store.Has(context.Background(), nonStoredAddr)
- if has {
- t.Fatal("Expected Has() to return false, but returned true!")
- }
-
-}
diff --git a/swarm/storage/memstore.go b/swarm/storage/memstore.go
deleted file mode 100644
index 611ac3bc5..000000000
--- a/swarm/storage/memstore.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-// memory storage layer for the package blockhash
-
-package storage
-
-import (
- "context"
-
- lru "github.com/hashicorp/golang-lru"
-)
-
-type MemStore struct {
- cache *lru.Cache
- disabled bool
-}
-
-//NewMemStore is instantiating a MemStore cache keeping all frequently requested
-//chunks in the `cache` LRU cache.
-func NewMemStore(params *StoreParams, _ *LDBStore) (m *MemStore) {
- if params.CacheCapacity == 0 {
- return &MemStore{
- disabled: true,
- }
- }
-
- c, err := lru.New(int(params.CacheCapacity))
- if err != nil {
- panic(err)
- }
-
- return &MemStore{
- cache: c,
- }
-}
-
-// Has needed to implement SyncChunkStore
-func (m *MemStore) Has(_ context.Context, addr Address) bool {
- return m.cache.Contains(addr)
-}
-
-func (m *MemStore) Get(_ context.Context, addr Address) (Chunk, error) {
- if m.disabled {
- return nil, ErrChunkNotFound
- }
-
- c, ok := m.cache.Get(string(addr))
- if !ok {
- return nil, ErrChunkNotFound
- }
- return c.(Chunk), nil
-}
-
-func (m *MemStore) Put(_ context.Context, c Chunk) error {
- if m.disabled {
- return nil
- }
-
- m.cache.Add(string(c.Address()), c)
- return nil
-}
-
-func (m *MemStore) setCapacity(n int) {
- if n <= 0 {
- m.disabled = true
- } else {
- c, err := lru.New(n)
- if err != nil {
- panic(err)
- }
-
- *m = MemStore{
- cache: c,
- }
- }
-}
-
-func (s *MemStore) Close() {}
diff --git a/swarm/storage/memstore_test.go b/swarm/storage/memstore_test.go
deleted file mode 100644
index 8aaf486a7..000000000
--- a/swarm/storage/memstore_test.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package storage
-
-import (
- "context"
- "testing"
-
- "github.com/ethereum/go-ethereum/swarm/log"
-)
-
-func newTestMemStore() *MemStore {
- storeparams := NewDefaultStoreParams()
- return NewMemStore(storeparams, nil)
-}
-
-func testMemStoreRandom(n int, t *testing.T) {
- m := newTestMemStore()
- defer m.Close()
- testStoreRandom(m, n, t)
-}
-
-func testMemStoreCorrect(n int, t *testing.T) {
- m := newTestMemStore()
- defer m.Close()
- testStoreCorrect(m, n, t)
-}
-
-func TestMemStoreRandom_1(t *testing.T) {
- testMemStoreRandom(1, t)
-}
-
-func TestMemStoreCorrect_1(t *testing.T) {
- testMemStoreCorrect(1, t)
-}
-
-func TestMemStoreRandom_1k(t *testing.T) {
- testMemStoreRandom(1000, t)
-}
-
-func TestMemStoreCorrect_1k(t *testing.T) {
- testMemStoreCorrect(100, t)
-}
-
-func TestMemStoreNotFound(t *testing.T) {
- m := newTestMemStore()
- defer m.Close()
-
- _, err := m.Get(context.TODO(), ZeroAddr)
- if err != ErrChunkNotFound {
- t.Errorf("Expected ErrChunkNotFound, got %v", err)
- }
-}
-
-func benchmarkMemStorePut(n int, b *testing.B) {
- m := newTestMemStore()
- defer m.Close()
- benchmarkStorePut(m, n, b)
-}
-
-func benchmarkMemStoreGet(n int, b *testing.B) {
- m := newTestMemStore()
- defer m.Close()
- benchmarkStoreGet(m, n, b)
-}
-
-func BenchmarkMemStorePut_500(b *testing.B) {
- benchmarkMemStorePut(500, b)
-}
-
-func BenchmarkMemStoreGet_500(b *testing.B) {
- benchmarkMemStoreGet(500, b)
-}
-
-func TestMemStoreAndLDBStore(t *testing.T) {
- ldb, cleanup := newLDBStore(t)
- ldb.setCapacity(4000)
- defer cleanup()
-
- cacheCap := 200
- memStore := NewMemStore(NewStoreParams(4000, 200, nil, nil), nil)
-
- tests := []struct {
- n int // number of chunks to push to memStore
- chunkSize int64 // size of chunk (by default in Swarm - 4096)
- }{
- {
- n: 1,
- chunkSize: 4096,
- },
- {
- n: 101,
- chunkSize: 4096,
- },
- {
- n: 501,
- chunkSize: 4096,
- },
- {
- n: 1100,
- chunkSize: 4096,
- },
- }
-
- for i, tt := range tests {
- log.Info("running test", "idx", i, "tt", tt)
- var chunks []Chunk
-
- for i := 0; i < tt.n; i++ {
- c := GenerateRandomChunk(tt.chunkSize)
- chunks = append(chunks, c)
- }
-
- for i := 0; i < tt.n; i++ {
- err := ldb.Put(context.TODO(), chunks[i])
- if err != nil {
- t.Fatal(err)
- }
- err = memStore.Put(context.TODO(), chunks[i])
- if err != nil {
- t.Fatal(err)
- }
-
- if got := memStore.cache.Len(); got > cacheCap {
- t.Fatalf("expected to get cache capacity less than %v, but got %v", cacheCap, got)
- }
-
- }
-
- for i := 0; i < tt.n; i++ {
- _, err := memStore.Get(context.TODO(), chunks[i].Address())
- if err != nil {
- if err == ErrChunkNotFound {
- _, err := ldb.Get(context.TODO(), chunks[i].Address())
- if err != nil {
- t.Fatalf("couldn't get chunk %v from ldb, got error: %v", i, err)
- }
- } else {
- t.Fatalf("got error from memstore: %v", err)
- }
- }
- }
- }
-}
diff --git a/swarm/storage/netstore.go b/swarm/storage/netstore.go
index 7741b8f7b..b675384ce 100644
--- a/swarm/storage/netstore.go
+++ b/swarm/storage/netstore.go
@@ -25,6 +25,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/opentracing/opentracing-go"
@@ -49,8 +50,8 @@ type NetFetcher interface {
// fetchers are unique to a chunk and are stored in fetchers LRU memory cache
// fetchFuncFactory is a factory object to create a fetch function for a specific chunk address
type NetStore struct {
+ chunk.Store
mu sync.Mutex
- store SyncChunkStore
fetchers *lru.Cache
NewNetFetcherFunc NewNetFetcherFunc
closeC chan struct{}
@@ -60,13 +61,13 @@ var fetcherTimeout = 2 * time.Minute // timeout to cancel the fetcher even if re
// NewNetStore creates a new NetStore object using the given local store. newFetchFunc is a
// constructor function that can create a fetch function for a specific chunk address.
-func NewNetStore(store SyncChunkStore, nnf NewNetFetcherFunc) (*NetStore, error) {
+func NewNetStore(store chunk.Store, nnf NewNetFetcherFunc) (*NetStore, error) {
fetchers, err := lru.New(defaultChunkRequestsCacheCapacity)
if err != nil {
return nil, err
}
return &NetStore{
- store: store,
+ Store: store,
fetchers: fetchers,
NewNetFetcherFunc: nnf,
closeC: make(chan struct{}),
@@ -75,14 +76,14 @@ func NewNetStore(store SyncChunkStore, nnf NewNetFetcherFunc) (*NetStore, error)
// Put stores a chunk in localstore, and delivers to all requestor peers using the fetcher stored in
// the fetchers cache
-func (n *NetStore) Put(ctx context.Context, ch Chunk) error {
+func (n *NetStore) Put(ctx context.Context, mode chunk.ModePut, ch Chunk) (bool, error) {
n.mu.Lock()
defer n.mu.Unlock()
// put to the chunk to the store, there should be no error
- err := n.store.Put(ctx, ch)
+ exists, err := n.Store.Put(ctx, mode, ch)
if err != nil {
- return err
+ return exists, err
}
// if chunk is now put in the store, check if there was an active fetcher and call deliver on it
@@ -92,15 +93,15 @@ func (n *NetStore) Put(ctx context.Context, ch Chunk) error {
log.Trace("n.getFetcher deliver", "ref", ch.Address())
f.deliver(ctx, ch)
}
- return nil
+ return exists, nil
}
// Get retrieves the chunk from the NetStore DPA synchronously.
// It calls NetStore.get, and if the chunk is not in local Storage
// it calls fetch with the request, which blocks until the chunk
// arrived or context is done
-func (n *NetStore) Get(rctx context.Context, ref Address) (Chunk, error) {
- chunk, fetch, err := n.get(rctx, ref)
+func (n *NetStore) Get(rctx context.Context, mode chunk.ModeGet, ref Address) (Chunk, error) {
+ chunk, fetch, err := n.get(rctx, mode, ref)
if err != nil {
return nil, err
}
@@ -118,18 +119,10 @@ func (n *NetStore) Get(rctx context.Context, ref Address) (Chunk, error) {
return fetch(rctx)
}
-func (n *NetStore) BinIndex(po uint8) uint64 {
- return n.store.BinIndex(po)
-}
-
-func (n *NetStore) Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error {
- return n.store.Iterator(from, to, po, f)
-}
-
// FetchFunc returns nil if the store contains the given address. Otherwise it returns a wait function,
// which returns after the chunk is available or the context is done
func (n *NetStore) FetchFunc(ctx context.Context, ref Address) func(context.Context) error {
- chunk, fetch, _ := n.get(ctx, ref)
+ chunk, fetch, _ := n.get(ctx, chunk.ModeGetRequest, ref)
if chunk != nil {
return nil
}
@@ -140,9 +133,8 @@ func (n *NetStore) FetchFunc(ctx context.Context, ref Address) func(context.Cont
}
// Close chunk store
-func (n *NetStore) Close() {
+func (n *NetStore) Close() (err error) {
close(n.closeC)
- n.store.Close()
wg := sync.WaitGroup{}
for _, key := range n.fetchers.Keys() {
@@ -162,6 +154,8 @@ func (n *NetStore) Close() {
}
}
wg.Wait()
+
+ return n.Store.Close()
}
// get attempts at retrieving the chunk from LocalStore
@@ -172,11 +166,11 @@ func (n *NetStore) Close() {
// or all fetcher contexts are done.
// It returns a chunk, a fetcher function and an error
// If chunk is nil, the returned fetch function needs to be called with a context to return the chunk.
-func (n *NetStore) get(ctx context.Context, ref Address) (Chunk, func(context.Context) (Chunk, error), error) {
+func (n *NetStore) get(ctx context.Context, mode chunk.ModeGet, ref Address) (Chunk, func(context.Context) (Chunk, error), error) {
n.mu.Lock()
defer n.mu.Unlock()
- chunk, err := n.store.Get(ctx, ref)
+ chunk, err := n.Store.Get(ctx, mode, ref)
if err != nil {
// TODO: Fix comparison - we should be comparing against leveldb.ErrNotFound, this error should be wrapped.
if err != ErrChunkNotFound && err != leveldb.ErrNotFound {
@@ -192,13 +186,6 @@ func (n *NetStore) get(ctx context.Context, ref Address) (Chunk, func(context.Co
return chunk, nil, nil
}
-// Has is the storage layer entry point to query the underlying
-// database to return if it has a chunk or not.
-// Called from the DebugAPI
-func (n *NetStore) Has(ctx context.Context, ref Address) bool {
- return n.store.Has(ctx, ref)
-}
-
// getOrCreateFetcher attempts at retrieving an existing fetchers
// if none exists, creates one and saves it in the fetchers cache
// caller must hold the lock
diff --git a/swarm/storage/netstore_test.go b/swarm/storage/netstore_test.go
index 653877625..dc0727987 100644
--- a/swarm/storage/netstore_test.go
+++ b/swarm/storage/netstore_test.go
@@ -23,6 +23,7 @@ import (
"errors"
"fmt"
"io/ioutil"
+ "os"
"sync"
"testing"
"time"
@@ -30,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/swarm/chunk"
+ "github.com/ethereum/go-ethereum/swarm/storage/localstore"
)
var sourcePeerID = enode.HexID("99d8594b52298567d2ca3f4c441a5ba0140ee9245e26460d01102a52773c73b9")
@@ -76,45 +78,43 @@ func (m *mockNetFetchFuncFactory) newMockNetFetcher(ctx context.Context, _ Addre
return m.fetcher
}
-func mustNewNetStore(t *testing.T) *NetStore {
- netStore, _ := mustNewNetStoreWithFetcher(t)
- return netStore
-}
-
-func mustNewNetStoreWithFetcher(t *testing.T) (*NetStore, *mockNetFetcher) {
+func newTestNetStore(t *testing.T) (netStore *NetStore, fetcher *mockNetFetcher, cleanup func()) {
t.Helper()
- datadir, err := ioutil.TempDir("", "netstore")
+ dir, err := ioutil.TempDir("", "swarm-storage-")
if err != nil {
t.Fatal(err)
}
- naddr := make([]byte, 32)
- params := NewDefaultLocalStoreParams()
- params.Init(datadir)
- params.BaseKey = naddr
- localStore, err := NewTestLocalStoreForAddr(params)
+ localStore, err := localstore.New(dir, make([]byte, 32), nil)
if err != nil {
+ os.RemoveAll(dir)
t.Fatal(err)
}
+ cleanup = func() {
+ localStore.Close()
+ os.RemoveAll(dir)
+ }
- fetcher := &mockNetFetcher{}
+ fetcher = new(mockNetFetcher)
mockNetFetchFuncFactory := &mockNetFetchFuncFactory{
fetcher: fetcher,
}
- netStore, err := NewNetStore(localStore, mockNetFetchFuncFactory.newMockNetFetcher)
+ netStore, err = NewNetStore(localStore, mockNetFetchFuncFactory.newMockNetFetcher)
if err != nil {
+ cleanup()
t.Fatal(err)
}
- return netStore, fetcher
+ return netStore, fetcher, cleanup
}
// TestNetStoreGetAndPut tests calling NetStore.Get which is blocked until the same chunk is Put.
// After the Put there should no active fetchers, and the context created for the fetcher should
// be cancelled.
func TestNetStoreGetAndPut(t *testing.T) {
- netStore, fetcher := mustNewNetStoreWithFetcher(t)
+ netStore, fetcher, cleanup := newTestNetStore(t)
+ defer cleanup()
- chunk := GenerateRandomChunk(chunk.DefaultSize)
+ ch := GenerateRandomChunk(chunk.DefaultSize)
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
@@ -126,12 +126,12 @@ func TestNetStoreGetAndPut(t *testing.T) {
time.Sleep(200 * time.Millisecond) // and a little more so it is surely called
// check if netStore created a fetcher in the Get call for the unavailable chunk
- if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
+ if netStore.fetchers.Len() != 1 || netStore.getFetcher(ch.Address()) == nil {
putErrC <- errors.New("Expected netStore to use a fetcher for the Get call")
return
}
- err := netStore.Put(ctx, chunk)
+ _, err := netStore.Put(ctx, chunk.ModePutRequest, ch)
if err != nil {
putErrC <- fmt.Errorf("Expected no err got %v", err)
return
@@ -141,7 +141,7 @@ func TestNetStoreGetAndPut(t *testing.T) {
}()
close(c)
- recChunk, err := netStore.Get(ctx, chunk.Address()) // this is blocked until the Put above is done
+ recChunk, err := netStore.Get(ctx, chunk.ModeGetRequest, ch.Address()) // this is blocked until the Put above is done
if err != nil {
t.Fatalf("Expected no err got %v", err)
}
@@ -150,7 +150,7 @@ func TestNetStoreGetAndPut(t *testing.T) {
t.Fatal(err)
}
// the retrieved chunk should be the same as what we Put
- if !bytes.Equal(recChunk.Address(), chunk.Address()) || !bytes.Equal(recChunk.Data(), chunk.Data()) {
+ if !bytes.Equal(recChunk.Address(), ch.Address()) || !bytes.Equal(recChunk.Data(), ch.Data()) {
t.Fatalf("Different chunk received than what was put")
}
// the chunk is already available locally, so there should be no active fetchers waiting for it
@@ -172,26 +172,27 @@ func TestNetStoreGetAndPut(t *testing.T) {
// After the Put the chunk is available locally, so the Get can just retrieve it from LocalStore,
// there is no need to create fetchers.
func TestNetStoreGetAfterPut(t *testing.T) {
- netStore, fetcher := mustNewNetStoreWithFetcher(t)
+ netStore, fetcher, cleanup := newTestNetStore(t)
+ defer cleanup()
- chunk := GenerateRandomChunk(chunk.DefaultSize)
+ ch := GenerateRandomChunk(chunk.DefaultSize)
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel()
// First we Put the chunk, so the chunk will be available locally
- err := netStore.Put(ctx, chunk)
+ _, err := netStore.Put(ctx, chunk.ModePutRequest, ch)
if err != nil {
t.Fatalf("Expected no err got %v", err)
}
// Get should retrieve the chunk from LocalStore, without creating fetcher
- recChunk, err := netStore.Get(ctx, chunk.Address())
+ recChunk, err := netStore.Get(ctx, chunk.ModeGetRequest, ch.Address())
if err != nil {
t.Fatalf("Expected no err got %v", err)
}
// the retrieved chunk should be the same as what we Put
- if !bytes.Equal(recChunk.Address(), chunk.Address()) || !bytes.Equal(recChunk.Data(), chunk.Data()) {
+ if !bytes.Equal(recChunk.Address(), ch.Address()) || !bytes.Equal(recChunk.Data(), ch.Data()) {
t.Fatalf("Different chunk received than what was put")
}
// no fetcher offer or request should be created for a locally available chunk
@@ -207,9 +208,10 @@ func TestNetStoreGetAfterPut(t *testing.T) {
// TestNetStoreGetTimeout tests a Get call for an unavailable chunk and waits for timeout
func TestNetStoreGetTimeout(t *testing.T) {
- netStore, fetcher := mustNewNetStoreWithFetcher(t)
+ netStore, fetcher, cleanup := newTestNetStore(t)
+ defer cleanup()
- chunk := GenerateRandomChunk(chunk.DefaultSize)
+ ch := GenerateRandomChunk(chunk.DefaultSize)
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel()
@@ -221,7 +223,7 @@ func TestNetStoreGetTimeout(t *testing.T) {
time.Sleep(200 * time.Millisecond) // and a little more so it is surely called
// check if netStore created a fetcher in the Get call for the unavailable chunk
- if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
+ if netStore.fetchers.Len() != 1 || netStore.getFetcher(ch.Address()) == nil {
fetcherErrC <- errors.New("Expected netStore to use a fetcher for the Get call")
return
}
@@ -232,7 +234,7 @@ func TestNetStoreGetTimeout(t *testing.T) {
close(c)
// We call Get on this chunk, which is not in LocalStore. We don't Put it at all, so there will
// be a timeout
- _, err := netStore.Get(ctx, chunk.Address())
+ _, err := netStore.Get(ctx, chunk.ModeGetRequest, ch.Address())
// Check if the timeout happened
if err != context.DeadlineExceeded {
@@ -259,9 +261,10 @@ func TestNetStoreGetTimeout(t *testing.T) {
// TestNetStoreGetCancel tests a Get call for an unavailable chunk, then cancels the context and checks
// the errors
func TestNetStoreGetCancel(t *testing.T) {
- netStore, fetcher := mustNewNetStoreWithFetcher(t)
+ netStore, fetcher, cleanup := newTestNetStore(t)
+ defer cleanup()
- chunk := GenerateRandomChunk(chunk.DefaultSize)
+ ch := GenerateRandomChunk(chunk.DefaultSize)
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
@@ -271,7 +274,7 @@ func TestNetStoreGetCancel(t *testing.T) {
<-c // wait for the Get to be called
time.Sleep(200 * time.Millisecond) // and a little more so it is surely called
// check if netStore created a fetcher in the Get call for the unavailable chunk
- if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
+ if netStore.fetchers.Len() != 1 || netStore.getFetcher(ch.Address()) == nil {
fetcherErrC <- errors.New("Expected netStore to use a fetcher for the Get call")
return
}
@@ -283,7 +286,7 @@ func TestNetStoreGetCancel(t *testing.T) {
close(c)
// We call Get with an unavailable chunk, so it will create a fetcher and wait for delivery
- _, err := netStore.Get(ctx, chunk.Address())
+ _, err := netStore.Get(ctx, chunk.ModeGetRequest, ch.Address())
if err := <-fetcherErrC; err != nil {
t.Fatal(err)
@@ -311,9 +314,10 @@ func TestNetStoreGetCancel(t *testing.T) {
// delivered with a Put, we have to make sure all Get calls return, and they use a single fetcher
// for the chunk retrieval
func TestNetStoreMultipleGetAndPut(t *testing.T) {
- netStore, fetcher := mustNewNetStoreWithFetcher(t)
+ netStore, fetcher, cleanup := newTestNetStore(t)
+ defer cleanup()
- chunk := GenerateRandomChunk(chunk.DefaultSize)
+ ch := GenerateRandomChunk(chunk.DefaultSize)
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
@@ -327,7 +331,7 @@ func TestNetStoreMultipleGetAndPut(t *testing.T) {
putErrC <- errors.New("Expected netStore to use one fetcher for all Get calls")
return
}
- err := netStore.Put(ctx, chunk)
+ _, err := netStore.Put(ctx, chunk.ModePutRequest, ch)
if err != nil {
putErrC <- fmt.Errorf("Expected no err got %v", err)
return
@@ -340,11 +344,11 @@ func TestNetStoreMultipleGetAndPut(t *testing.T) {
errC := make(chan error)
for i := 0; i < count; i++ {
go func() {
- recChunk, err := netStore.Get(ctx, chunk.Address())
+ recChunk, err := netStore.Get(ctx, chunk.ModeGetRequest, ch.Address())
if err != nil {
errC <- fmt.Errorf("Expected no err got %v", err)
}
- if !bytes.Equal(recChunk.Address(), chunk.Address()) || !bytes.Equal(recChunk.Data(), chunk.Data()) {
+ if !bytes.Equal(recChunk.Address(), ch.Address()) || !bytes.Equal(recChunk.Data(), ch.Data()) {
errC <- errors.New("Different chunk received than what was put")
}
errC <- nil
@@ -385,7 +389,8 @@ func TestNetStoreMultipleGetAndPut(t *testing.T) {
// TestNetStoreFetchFuncTimeout tests a FetchFunc call for an unavailable chunk and waits for timeout
func TestNetStoreFetchFuncTimeout(t *testing.T) {
- netStore, fetcher := mustNewNetStoreWithFetcher(t)
+ netStore, fetcher, cleanup := newTestNetStore(t)
+ defer cleanup()
chunk := GenerateRandomChunk(chunk.DefaultSize)
@@ -424,21 +429,22 @@ func TestNetStoreFetchFuncTimeout(t *testing.T) {
// TestNetStoreFetchFuncAfterPut tests that the FetchFunc should return nil for a locally available chunk
func TestNetStoreFetchFuncAfterPut(t *testing.T) {
- netStore := mustNewNetStore(t)
+ netStore, _, cleanup := newTestNetStore(t)
+ defer cleanup()
- chunk := GenerateRandomChunk(chunk.DefaultSize)
+ ch := GenerateRandomChunk(chunk.DefaultSize)
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
// We deliver the created the chunk with a Put
- err := netStore.Put(ctx, chunk)
+ _, err := netStore.Put(ctx, chunk.ModePutRequest, ch)
if err != nil {
t.Fatalf("Expected no err got %v", err)
}
// FetchFunc should return nil, because the chunk is available locally, no need to fetch it
- wait := netStore.FetchFunc(ctx, chunk.Address())
+ wait := netStore.FetchFunc(ctx, ch.Address())
if wait != nil {
t.Fatal("Expected wait to be nil")
}
@@ -451,16 +457,17 @@ func TestNetStoreFetchFuncAfterPut(t *testing.T) {
// TestNetStoreGetCallsRequest tests if Get created a request on the NetFetcher for an unavailable chunk
func TestNetStoreGetCallsRequest(t *testing.T) {
- netStore, fetcher := mustNewNetStoreWithFetcher(t)
+ netStore, fetcher, cleanup := newTestNetStore(t)
+ defer cleanup()
- chunk := GenerateRandomChunk(chunk.DefaultSize)
+ ch := GenerateRandomChunk(chunk.DefaultSize)
ctx := context.WithValue(context.Background(), "hopcount", uint8(5))
ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
defer cancel()
// We call get for a not available chunk, it will timeout because the chunk is not delivered
- _, err := netStore.Get(ctx, chunk.Address())
+ _, err := netStore.Get(ctx, chunk.ModeGetRequest, ch.Address())
if err != context.DeadlineExceeded {
t.Fatalf("Expected context.DeadlineExceeded err got %v", err)
@@ -479,9 +486,10 @@ func TestNetStoreGetCallsRequest(t *testing.T) {
// TestNetStoreGetCallsOffer tests if Get created a request on the NetFetcher for an unavailable chunk
// in case of a source peer provided in the context.
func TestNetStoreGetCallsOffer(t *testing.T) {
- netStore, fetcher := mustNewNetStoreWithFetcher(t)
+ netStore, fetcher, cleanup := newTestNetStore(t)
+ defer cleanup()
- chunk := GenerateRandomChunk(chunk.DefaultSize)
+ ch := GenerateRandomChunk(chunk.DefaultSize)
// If a source peer is added to the context, NetStore will handle it as an offer
ctx := context.WithValue(context.Background(), "source", sourcePeerID.String())
@@ -489,7 +497,7 @@ func TestNetStoreGetCallsOffer(t *testing.T) {
defer cancel()
// We call get for a not available chunk, it will timeout because the chunk is not delivered
- _, err := netStore.Get(ctx, chunk.Address())
+ _, err := netStore.Get(ctx, chunk.ModeGetRequest, ch.Address())
if err != context.DeadlineExceeded {
t.Fatalf("Expect error %v got %v", context.DeadlineExceeded, err)
@@ -513,8 +521,8 @@ func TestNetStoreGetCallsOffer(t *testing.T) {
// TestNetStoreFetcherCountPeers tests multiple NetStore.Get calls with peer in the context.
// There is no Put call, so the Get calls timeout
func TestNetStoreFetcherCountPeers(t *testing.T) {
-
- netStore, fetcher := mustNewNetStoreWithFetcher(t)
+ netStore, fetcher, cleanup := newTestNetStore(t)
+ defer cleanup()
addr := randomAddr()
peers := []string{randomAddr().Hex(), randomAddr().Hex(), randomAddr().Hex()}
@@ -529,7 +537,7 @@ func TestNetStoreFetcherCountPeers(t *testing.T) {
peer := peers[i]
go func() {
ctx := context.WithValue(ctx, "peer", peer)
- _, err := netStore.Get(ctx, addr)
+ _, err := netStore.Get(ctx, chunk.ModeGetRequest, addr)
errC <- err
}()
}
@@ -565,21 +573,22 @@ func TestNetStoreFetcherCountPeers(t *testing.T) {
// and checks there is still exactly one fetcher for one chunk. Afthe chunk is delivered, it checks
// if the fetcher is closed.
func TestNetStoreFetchFuncCalledMultipleTimes(t *testing.T) {
- netStore, fetcher := mustNewNetStoreWithFetcher(t)
+ netStore, fetcher, cleanup := newTestNetStore(t)
+ defer cleanup()
- chunk := GenerateRandomChunk(chunk.DefaultSize)
+ ch := GenerateRandomChunk(chunk.DefaultSize)
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel()
// FetchFunc should return a non-nil wait function, because the chunk is not available
- wait := netStore.FetchFunc(ctx, chunk.Address())
+ wait := netStore.FetchFunc(ctx, ch.Address())
if wait == nil {
t.Fatal("Expected wait function to be not nil")
}
// There should be exactly one fetcher for the chunk
- if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
+ if netStore.fetchers.Len() != 1 || netStore.getFetcher(ch.Address()) == nil {
t.Fatalf("Expected netStore to have one fetcher for the requested chunk")
}
@@ -596,12 +605,12 @@ func TestNetStoreFetchFuncCalledMultipleTimes(t *testing.T) {
time.Sleep(100 * time.Millisecond)
// there should be still only one fetcher, because all wait calls are for the same chunk
- if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
+ if netStore.fetchers.Len() != 1 || netStore.getFetcher(ch.Address()) == nil {
t.Fatal("Expected netStore to have one fetcher for the requested chunk")
}
// Deliver the chunk with a Put
- err := netStore.Put(ctx, chunk)
+ _, err := netStore.Put(ctx, chunk.ModePutRequest, ch)
if err != nil {
t.Fatalf("Expected no err got %v", err)
}
@@ -630,7 +639,8 @@ func TestNetStoreFetchFuncCalledMultipleTimes(t *testing.T) {
// TestNetStoreFetcherLifeCycleWithTimeout is similar to TestNetStoreFetchFuncCalledMultipleTimes,
// the only difference is that we don't deilver the chunk, just wait for timeout
func TestNetStoreFetcherLifeCycleWithTimeout(t *testing.T) {
- netStore, fetcher := mustNewNetStoreWithFetcher(t)
+ netStore, fetcher, cleanup := newTestNetStore(t)
+ defer cleanup()
chunk := GenerateRandomChunk(chunk.DefaultSize)
diff --git a/swarm/storage/pyramid.go b/swarm/storage/pyramid.go
index 281bbe9fe..9b0d5397b 100644
--- a/swarm/storage/pyramid.go
+++ b/swarm/storage/pyramid.go
@@ -96,12 +96,12 @@ func NewPyramidSplitterParams(addr Address, reader io.Reader, putter Putter, get
When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Address), the root hash of the entire content will fill this once processing finishes.
New chunks to store are store using the putter which the caller provides.
*/
-func PyramidSplit(ctx context.Context, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error) {
- return NewPyramidSplitter(NewPyramidSplitterParams(nil, reader, putter, getter, chunk.DefaultSize)).Split(ctx)
+func PyramidSplit(ctx context.Context, reader io.Reader, putter Putter, getter Getter, tag *chunk.Tag) (Address, func(context.Context) error, error) {
+ return NewPyramidSplitter(NewPyramidSplitterParams(nil, reader, putter, getter, chunk.DefaultSize), tag).Split(ctx)
}
-func PyramidAppend(ctx context.Context, addr Address, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error) {
- return NewPyramidSplitter(NewPyramidSplitterParams(addr, reader, putter, getter, chunk.DefaultSize)).Append(ctx)
+func PyramidAppend(ctx context.Context, addr Address, reader io.Reader, putter Putter, getter Getter, tag *chunk.Tag) (Address, func(context.Context) error, error) {
+ return NewPyramidSplitter(NewPyramidSplitterParams(addr, reader, putter, getter, chunk.DefaultSize), tag).Append(ctx)
}
// Entry to create a tree node
@@ -142,6 +142,7 @@ type PyramidChunker struct {
putter Putter
getter Getter
key Address
+ tag *chunk.Tag
workerCount int64
workerLock sync.RWMutex
jobC chan *chunkJob
@@ -152,7 +153,7 @@ type PyramidChunker struct {
chunkLevel [][]*TreeEntry
}
-func NewPyramidSplitter(params *PyramidSplitterParams) (pc *PyramidChunker) {
+func NewPyramidSplitter(params *PyramidSplitterParams, tag *chunk.Tag) (pc *PyramidChunker) {
pc = &PyramidChunker{}
pc.reader = params.reader
pc.hashSize = params.hashSize
@@ -161,6 +162,7 @@ func NewPyramidSplitter(params *PyramidSplitterParams) (pc *PyramidChunker) {
pc.putter = params.putter
pc.getter = params.getter
pc.key = params.addr
+ pc.tag = tag
pc.workerCount = 0
pc.jobC = make(chan *chunkJob, 2*ChunkProcessors)
pc.wg = &sync.WaitGroup{}
@@ -273,6 +275,7 @@ func (pc *PyramidChunker) processor(ctx context.Context, id int64) {
return
}
pc.processChunk(ctx, id, job)
+ pc.tag.Inc(chunk.StateSplit)
case <-pc.quitC:
return
}
diff --git a/swarm/storage/schema.go b/swarm/storage/schema.go
deleted file mode 100644
index 91847ca0f..000000000
--- a/swarm/storage/schema.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package storage
-
-// The DB schema we want to use. The actual/current DB schema might differ
-// until migrations are run.
-const CurrentDbSchema = DbSchemaHalloween
-
-// There was a time when we had no schema at all.
-const DbSchemaNone = ""
-
-// "purity" is the first formal schema of LevelDB we release together with Swarm 0.3.5
-const DbSchemaPurity = "purity"
-
-// "halloween" is here because we had a screw in the garbage collector index.
-// Because of that we had to rebuild the GC index to get rid of erroneous
-// entries and that takes a long time. This schema is used for bookkeeping,
-// so rebuild index will run just once.
-const DbSchemaHalloween = "halloween"
diff --git a/swarm/storage/types.go b/swarm/storage/types.go
index 2f39685b4..d1d47dbe8 100644
--- a/swarm/storage/types.go
+++ b/swarm/storage/types.go
@@ -178,9 +178,7 @@ func (c ChunkData) Size() uint64 {
return binary.LittleEndian.Uint64(c[:8])
}
-type ChunkValidator interface {
- Validate(chunk Chunk) bool
-}
+type ChunkValidator = chunk.Validator
// Provides method for validation of content address in chunks
// Holds the corresponding hasher to create the address
@@ -211,20 +209,7 @@ func (v *ContentAddressValidator) Validate(ch Chunk) bool {
return bytes.Equal(hash, ch.Address())
}
-type ChunkStore interface {
- Put(ctx context.Context, ch Chunk) (err error)
- Get(rctx context.Context, ref Address) (ch Chunk, err error)
- Has(rctx context.Context, ref Address) bool
- Close()
-}
-
-// SyncChunkStore is a ChunkStore which supports syncing
-type SyncChunkStore interface {
- ChunkStore
- BinIndex(po uint8) uint64
- Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error
- FetchFunc(ctx context.Context, ref Address) func(context.Context) error
-}
+type ChunkStore = chunk.Store
// FakeChunkStore doesn't store anything, just implements the ChunkStore interface
// It can be used to inject into a hasherStore if you don't want to actually store data just do the
@@ -233,20 +218,33 @@ type FakeChunkStore struct {
}
// Put doesn't store anything it is just here to implement ChunkStore
-func (f *FakeChunkStore) Put(_ context.Context, ch Chunk) error {
- return nil
+func (f *FakeChunkStore) Put(_ context.Context, _ chunk.ModePut, ch Chunk) (bool, error) {
+ return false, nil
}
// Has doesn't do anything it is just here to implement ChunkStore
-func (f *FakeChunkStore) Has(_ context.Context, ref Address) bool {
- panic("FakeChunkStore doesn't support HasChunk")
+func (f *FakeChunkStore) Has(_ context.Context, ref Address) (bool, error) {
+ panic("FakeChunkStore doesn't support Has")
}
// Get doesn't store anything it is just here to implement ChunkStore
-func (f *FakeChunkStore) Get(_ context.Context, ref Address) (Chunk, error) {
+func (f *FakeChunkStore) Get(_ context.Context, _ chunk.ModeGet, ref Address) (Chunk, error) {
panic("FakeChunkStore doesn't support Get")
}
+func (f *FakeChunkStore) Set(ctx context.Context, mode chunk.ModeSet, addr chunk.Address) (err error) {
+ panic("FakeChunkStore doesn't support Set")
+}
+
+func (f *FakeChunkStore) LastPullSubscriptionBinID(bin uint8) (id uint64, err error) {
+ panic("FakeChunkStore doesn't support LastPullSubscriptionBinID")
+}
+
+func (f *FakeChunkStore) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan chunk.Descriptor, stop func()) {
+ panic("FakeChunkStore doesn't support SubscribePull")
+}
+
// Close doesn't store anything it is just here to implement ChunkStore
-func (f *FakeChunkStore) Close() {
+func (f *FakeChunkStore) Close() error {
+ return nil
}
diff --git a/swarm/swarm.go b/swarm/swarm.go
index 61813e23f..d004bcd2f 100644
--- a/swarm/swarm.go
+++ b/swarm/swarm.go
@@ -20,6 +20,7 @@ import (
"bytes"
"context"
"crypto/ecdsa"
+ "errors"
"fmt"
"io"
"math/big"
@@ -29,6 +30,11 @@ import (
"time"
"unicode"
+ "github.com/ethereum/go-ethereum/swarm/chunk"
+
+ "github.com/ethereum/go-ethereum/swarm/storage/feed"
+ "github.com/ethereum/go-ethereum/swarm/storage/localstore"
+
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/contracts/chequebook"
@@ -48,7 +54,6 @@ import (
"github.com/ethereum/go-ethereum/swarm/pss"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
- "github.com/ethereum/go-ethereum/swarm/storage/feed"
"github.com/ethereum/go-ethereum/swarm/storage/mock"
"github.com/ethereum/go-ethereum/swarm/swap"
"github.com/ethereum/go-ethereum/swarm/tracing"
@@ -143,11 +148,31 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
resolver = api.NewMultiResolver(opts...)
self.dns = resolver
}
+ // check that we are not in the old database schema
+ // if so - fail and exit
+ isLegacy := localstore.IsLegacyDatabase(config.ChunkDbPath)
+
+ if isLegacy {
+ return nil, errors.New("Legacy database format detected! Please read the migration announcement at: https://github.com/ethersphere/go-ethereum/wiki/Swarm-v0.4-local-store-migration")
+ }
+
+ var feedsHandler *feed.Handler
+ fhParams := &feed.HandlerParams{}
- lstore, err := storage.NewLocalStore(config.LocalStoreParams, mockStore)
+ feedsHandler = feed.NewHandler(fhParams)
+
+ localStore, err := localstore.New(config.ChunkDbPath, config.BaseKey, &localstore.Options{
+ MockStore: mockStore,
+ Capacity: config.DbCapacity,
+ })
if err != nil {
return nil, err
}
+ lstore := chunk.NewValidatorStore(
+ localStore,
+ storage.NewContentAddressValidator(storage.MakeHashFunc(storage.DefaultHash)),
+ feedsHandler,
+ )
self.netStore, err = storage.NewNetStore(lstore, nil)
if err != nil {
@@ -161,6 +186,8 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
delivery := stream.NewDelivery(to, self.netStore)
self.netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, config.DeliverySkipCheck).New
+ feedsHandler.SetStore(self.netStore)
+
if config.SwapEnabled {
balancesStore, err := state.NewDBStore(filepath.Join(config.Path, "balances.db"))
if err != nil {
@@ -177,38 +204,17 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
syncing = stream.SyncingDisabled
}
- retrieval := stream.RetrievalEnabled
- if config.LightNodeEnabled {
- retrieval = stream.RetrievalClientOnly
- }
-
registryOptions := &stream.RegistryOptions{
SkipCheck: config.DeliverySkipCheck,
Syncing: syncing,
- Retrieval: retrieval,
SyncUpdateDelay: config.SyncUpdateDelay,
MaxPeerServers: config.MaxStreamPeerServers,
}
self.streamer = stream.NewRegistry(nodeID, delivery, self.netStore, self.stateStore, registryOptions, self.swap)
+ tags := chunk.NewTags() //todo load from state store
// Swarm Hash Merklised Chunking for Arbitrary-length Document/File storage
- self.fileStore = storage.NewFileStore(self.netStore, self.config.FileStoreParams)
-
- var feedsHandler *feed.Handler
- fhParams := &feed.HandlerParams{}
-
- feedsHandler = feed.NewHandler(fhParams)
- feedsHandler.SetStore(self.netStore)
-
- lstore.Validators = []storage.ChunkValidator{
- storage.NewContentAddressValidator(storage.MakeHashFunc(storage.DefaultHash)),
- feedsHandler,
- }
-
- err = lstore.Migrate()
- if err != nil {
- return nil, err
- }
+ self.fileStore = storage.NewFileStore(self.netStore, self.config.FileStoreParams, tags)
log.Debug("Setup local storage")
@@ -223,7 +229,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
pss.SetHandshakeController(self.ps, pss.NewHandshakeParams())
}
- self.api = api.NewAPI(self.fileStore, self.dns, feedsHandler, self.privateKey)
+ self.api = api.NewAPI(self.fileStore, self.dns, feedsHandler, self.privateKey, tags)
self.sfs = fuse.NewSwarmFS(self.api)
log.Debug("Initialized FUSE filesystem")
diff --git a/swarm/swarm_test.go b/swarm/swarm_test.go
index 2a5b28513..cf2afaec9 100644
--- a/swarm/swarm_test.go
+++ b/swarm/swarm_test.go
@@ -32,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/api"
+ "github.com/ethereum/go-ethereum/swarm/sctx"
)
// TestNewSwarm validates Swarm fields in repsect to the provided configuration.
@@ -352,8 +353,11 @@ func testLocalStoreAndRetrieve(t *testing.T, swarm *Swarm, n int, randomData boo
rand.Read(slice)
}
dataPut := string(slice)
-
- ctx := context.TODO()
+ tag, err := swarm.api.Tags.New("test-local-store-and-retrieve", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ctx := sctx.SetTag(context.Background(), tag.Uid)
k, wait, err := swarm.api.Store(ctx, strings.NewReader(dataPut), int64(len(dataPut)), false)
if err != nil {
t.Fatal(err)
diff --git a/swarm/testutil/tag.go b/swarm/testutil/tag.go
new file mode 100644
index 000000000..d9908f11b
--- /dev/null
+++ b/swarm/testutil/tag.go
@@ -0,0 +1,51 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package testutil
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/swarm/chunk"
+)
+
+// CheckTag checks the first tag in the api struct to be in a certain state
+func CheckTag(t *testing.T, tag *chunk.Tag, split, stored, seen, total int64) {
+ t.Helper()
+ if tag == nil {
+ t.Fatal("no tag found")
+ }
+
+ tSplit := tag.Get(chunk.StateSplit)
+ if tSplit != split {
+ t.Fatalf("should have had split chunks, got %d want %d", tSplit, split)
+ }
+
+ tSeen := tag.Get(chunk.StateSeen)
+ if tSeen != seen {
+ t.Fatalf("should have had seen chunks, got %d want %d", tSeen, seen)
+ }
+
+ tStored := tag.Get(chunk.StateStored)
+ if tStored != stored {
+ t.Fatalf("mismatch stored chunks, got %d want %d", tStored, stored)
+ }
+
+ tTotal := tag.Total()
+ if tTotal != total {
+ t.Fatalf("mismatch total chunks, got %d want %d", tTotal, total)
+ }
+}