aboutsummaryrefslogtreecommitdiffstats
path: root/swarm/shed
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2019-05-10 19:09:01 +0800
committerGitHub <noreply@github.com>2019-05-10 19:09:01 +0800
commit494f5d448a1685d5de4cb1524b863cd1fc9a13b0 (patch)
tree4db9d1afe4910c888f3488cd93e8537501d88314 /swarm/shed
parentc94d582aa781b26412ba7d570f6707d193303a02 (diff)
parent9b1543c282f39d452f611eeee0307bdf828e8bc2 (diff)
downloadgo-tangerine-494f5d448a1685d5de4cb1524b863cd1fc9a13b0.tar
go-tangerine-494f5d448a1685d5de4cb1524b863cd1fc9a13b0.tar.gz
go-tangerine-494f5d448a1685d5de4cb1524b863cd1fc9a13b0.tar.bz2
go-tangerine-494f5d448a1685d5de4cb1524b863cd1fc9a13b0.tar.lz
go-tangerine-494f5d448a1685d5de4cb1524b863cd1fc9a13b0.tar.xz
go-tangerine-494f5d448a1685d5de4cb1524b863cd1fc9a13b0.tar.zst
go-tangerine-494f5d448a1685d5de4cb1524b863cd1fc9a13b0.zip
Merge pull request #19550 from ethersphere/swarm-rather-stable
swarm v0.4-rc1
Diffstat (limited to 'swarm/shed')
-rw-r--r--swarm/shed/db.go71
-rw-r--r--swarm/shed/index.go8
-rw-r--r--swarm/shed/schema.go2
3 files changed, 35 insertions, 46 deletions
diff --git a/swarm/shed/db.go b/swarm/shed/db.go
index 8c11bf48b..6fc520866 100644
--- a/swarm/shed/db.go
+++ b/swarm/shed/db.go
@@ -45,16 +45,7 @@ const (
// It provides a schema functionality to store fields and indexes
// information about naming and types.
type DB struct {
- ldb *leveldb.DB
-
- compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction
- compReadMeter metrics.Meter // Meter for measuring the data read during compaction
- compWriteMeter metrics.Meter // Meter for measuring the data written during compaction
- writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction
- writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction
- diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read
- diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written
-
+ ldb *leveldb.DB
quit chan struct{} // Quit channel to stop the metrics collection before closing the database
}
@@ -86,13 +77,10 @@ func NewDB(path string, metricsPrefix string) (db *DB, err error) {
}
}
- // Configure meters for DB
- db.configure(metricsPrefix)
-
// Create a quit channel for the periodic metrics collector and run it
db.quit = make(chan struct{})
- go db.meter(10 * time.Second)
+ go db.meter(metricsPrefix, 10*time.Second)
return db, nil
}
@@ -169,19 +157,22 @@ func (db *DB) Close() (err error) {
return db.ldb.Close()
}
-// Configure configures the database metrics collectors
-func (db *DB) configure(prefix string) {
- // Initialize all the metrics collector at the requested prefix
- db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil)
- db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil)
- db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil)
- db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil)
- db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil)
- db.writeDelayMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/duration", nil)
- db.writeDelayNMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/counter", nil)
-}
+func (db *DB) meter(prefix string, refresh time.Duration) {
+ // Meter for measuring the total time spent in database compaction
+ compTimeMeter := metrics.NewRegisteredMeter(prefix+"compact/time", nil)
+ // Meter for measuring the data read during compaction
+ compReadMeter := metrics.NewRegisteredMeter(prefix+"compact/input", nil)
+ // Meter for measuring the data written during compaction
+ compWriteMeter := metrics.NewRegisteredMeter(prefix+"compact/output", nil)
+ // Meter for measuring the write delay number due to database compaction
+ writeDelayMeter := metrics.NewRegisteredMeter(prefix+"compact/writedelay/duration", nil)
+ // Meter for measuring the write delay duration due to database compaction
+ writeDelayNMeter := metrics.NewRegisteredMeter(prefix+"compact/writedelay/counter", nil)
+ // Meter for measuring the effective amount of data read
+ diskReadMeter := metrics.NewRegisteredMeter(prefix+"disk/read", nil)
+ // Meter for measuring the effective amount of data written
+ diskWriteMeter := metrics.NewRegisteredMeter(prefix+"disk/write", nil)
-func (db *DB) meter(refresh time.Duration) {
// Create the counters to store current and previous compaction values
compactions := make([][]float64, 2)
for i := 0; i < 2; i++ {
@@ -234,14 +225,14 @@ func (db *DB) meter(refresh time.Duration) {
}
}
// Update all the requested meters
- if db.compTimeMeter != nil {
- db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000))
+ if compTimeMeter != nil {
+ compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000))
}
- if db.compReadMeter != nil {
- db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024))
+ if compReadMeter != nil {
+ compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024))
}
- if db.compWriteMeter != nil {
- db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
+ if compWriteMeter != nil {
+ compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
}
// Retrieve the write delay statistic
@@ -265,11 +256,11 @@ func (db *DB) meter(refresh time.Duration) {
log.Error("Failed to parse delay duration", "err", err)
continue
}
- if db.writeDelayNMeter != nil {
- db.writeDelayNMeter.Mark(delayN - delaystats[0])
+ if writeDelayNMeter != nil {
+ writeDelayNMeter.Mark(delayN - delaystats[0])
}
- if db.writeDelayMeter != nil {
- db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
+ if writeDelayMeter != nil {
+ writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
}
// If a warning that db is performing compaction has been displayed, any subsequent
// warnings will be withheld for one minute not to overwhelm the user.
@@ -300,11 +291,11 @@ func (db *DB) meter(refresh time.Duration) {
log.Error("Bad syntax of write entry", "entry", parts[1])
continue
}
- if db.diskReadMeter != nil {
- db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
+ if diskReadMeter != nil {
+ diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
}
- if db.diskWriteMeter != nil {
- db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
+ if diskWriteMeter != nil {
+ diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
}
iostats[0], iostats[1] = nRead, nWrite
diff --git a/swarm/shed/index.go b/swarm/shed/index.go
index 6be018d20..38afbce4c 100644
--- a/swarm/shed/index.go
+++ b/swarm/shed/index.go
@@ -40,9 +40,7 @@ type Item struct {
Data []byte
AccessTimestamp int64
StoreTimestamp int64
- // UseMockStore is a pointer to identify
- // an unset state of the field in Join function.
- UseMockStore *bool
+ BinID uint64
}
// Merge is a helper method to construct a new
@@ -61,8 +59,8 @@ func (i Item) Merge(i2 Item) (new Item) {
if i.StoreTimestamp == 0 {
i.StoreTimestamp = i2.StoreTimestamp
}
- if i.UseMockStore == nil {
- i.UseMockStore = i2.UseMockStore
+ if i.BinID == 0 {
+ i.BinID = i2.BinID
}
return i
}
diff --git a/swarm/shed/schema.go b/swarm/shed/schema.go
index cfb7c6d64..557d951fb 100644
--- a/swarm/shed/schema.go
+++ b/swarm/shed/schema.go
@@ -52,7 +52,7 @@ type indexSpec struct {
Name string `json:"name"`
}
-// schemaFieldKey retrives the complete LevelDB key for
+// schemaFieldKey retrieves the complete LevelDB key for
// a particular field form the schema definition.
func (db *DB) schemaFieldKey(name, fieldType string) (key []byte, err error) {
if name == "" {