diff options
author | Anton Evangelatov <anton.evangelatov@gmail.com> | 2018-12-24 00:31:32 +0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2018-12-24 00:31:32 +0800 |
commit | 9e9fc87e70accf2b81be8772ab2ab0c914e95666 (patch) | |
tree | 410ccdefb083b08796a14ac03653a5319e9895d7 /swarm | |
parent | 335760bf0674ee553f3ca65afd6f29b6557d1b55 (diff) | |
download | dexon-9e9fc87e70accf2b81be8772ab2ab0c914e95666.tar dexon-9e9fc87e70accf2b81be8772ab2ab0c914e95666.tar.gz dexon-9e9fc87e70accf2b81be8772ab2ab0c914e95666.tar.bz2 dexon-9e9fc87e70accf2b81be8772ab2ab0c914e95666.tar.lz dexon-9e9fc87e70accf2b81be8772ab2ab0c914e95666.tar.xz dexon-9e9fc87e70accf2b81be8772ab2ab0c914e95666.tar.zst dexon-9e9fc87e70accf2b81be8772ab2ab0c914e95666.zip |
swarm: remove unused/dead code (#18351)
Diffstat (limited to 'swarm')
-rw-r--r-- | swarm/api/api.go | 58 | ||||
-rw-r--r-- | swarm/api/client/client.go | 5 | ||||
-rw-r--r-- | swarm/api/storage.go | 20 | ||||
-rw-r--r-- | swarm/api/testapi.go | 12 | ||||
-rw-r--r-- | swarm/api/uri_test.go | 20 | ||||
-rw-r--r-- | swarm/network/bitvector/bitvector.go | 4 | ||||
-rw-r--r-- | swarm/network/protocol.go | 7 | ||||
-rw-r--r-- | swarm/network/protocol_test.go | 25 | ||||
-rw-r--r-- | swarm/network/stream/intervals/store_test.go | 3 | ||||
-rw-r--r-- | swarm/network/stream/stream.go | 8 | ||||
-rw-r--r-- | swarm/network/stream/syncer.go | 56 | ||||
-rw-r--r-- | swarm/pot/address.go | 4 | ||||
-rw-r--r-- | swarm/pot/pot.go | 6 | ||||
-rw-r--r-- | swarm/state.go | 28 | ||||
-rw-r--r-- | swarm/state/dbstore.go | 3 | ||||
-rw-r--r-- | swarm/storage/chunker.go | 17 | ||||
-rw-r--r-- | swarm/storage/database.go | 10 | ||||
-rw-r--r-- | swarm/storage/error.go | 12 | ||||
-rw-r--r-- | swarm/storage/ldbstore.go | 20 | ||||
-rw-r--r-- | swarm/storage/ldbstore_test.go | 10 | ||||
-rw-r--r-- | swarm/storage/mock/mock.go | 7 | ||||
-rw-r--r-- | swarm/storage/pyramid.go | 5 | ||||
-rw-r--r-- | swarm/storage/types.go | 49 | ||||
-rw-r--r-- | swarm/swarm.go | 26 |
24 files changed, 19 insertions, 396 deletions
diff --git a/swarm/api/api.go b/swarm/api/api.go index 33a8e3539..c6ca1b577 100644 --- a/swarm/api/api.go +++ b/swarm/api/api.go @@ -51,10 +51,6 @@ import ( ) var ( - ErrNotFound = errors.New("not found") -) - -var ( apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil) apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil) apiPutCount = metrics.NewRegisteredCounter("api.put.count", nil) @@ -136,13 +132,6 @@ func MultiResolverOptionWithResolver(r ResolveValidator, tld string) MultiResolv } } -// MultiResolverOptionWithNameHash is unused at the time of this writing -func MultiResolverOptionWithNameHash(nameHash func(string) common.Hash) MultiResolverOption { - return func(m *MultiResolver) { - m.nameHash = nameHash - } -} - // NewMultiResolver creates a new instance of MultiResolver. func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) { m = &MultiResolver{ @@ -173,40 +162,6 @@ func (m *MultiResolver) Resolve(addr string) (h common.Hash, err error) { return } -// ValidateOwner checks the ENS to validate that the owner of the given domain is the given eth address -func (m *MultiResolver) ValidateOwner(name string, address common.Address) (bool, error) { - rs, err := m.getResolveValidator(name) - if err != nil { - return false, err - } - var addr common.Address - for _, r := range rs { - addr, err = r.Owner(m.nameHash(name)) - // we hide the error if it is not for the last resolver we check - if err == nil { - return addr == address, nil - } - } - return false, err -} - -// HeaderByNumber uses the validator of the given domainname and retrieves the header for the given block number -func (m *MultiResolver) HeaderByNumber(ctx context.Context, name string, blockNr *big.Int) (*types.Header, error) { - rs, err := m.getResolveValidator(name) - if err != nil { - return nil, err - } - for _, r := range rs { - var header *types.Header - header, err = r.HeaderByNumber(ctx, blockNr) - // we hide the error if it is not for the last resolver we check - if err == nil { - return header, nil - } - } - return nil, err -} - // getResolveValidator uses the hostname to retrieve the resolver associated with the top level domain func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, error) { rs := m.resolvers[""] @@ -224,11 +179,6 @@ func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, er return rs, nil } -// SetNameHash sets the hasher function that hashes the domain into a name hash that ENS uses -func (m *MultiResolver) SetNameHash(nameHash func(string) common.Hash) { - m.nameHash = nameHash -} - /* API implements webserver/file system related content storage and retrieval on top of the FileStore @@ -265,9 +215,6 @@ func (a *API) Store(ctx context.Context, data io.Reader, size int64, toEncrypt b return a.fileStore.Store(ctx, data, size, toEncrypt) } -// ErrResolve is returned when an URI cannot be resolved from ENS. -type ErrResolve error - // Resolve a name into a content-addressed hash // where address could be an ENS name, or a content addressed hash func (a *API) Resolve(ctx context.Context, address string) (storage.Address, error) { @@ -980,11 +927,6 @@ func (a *API) FeedsUpdate(ctx context.Context, request *feed.Request) (storage.A return a.feed.Update(ctx, request) } -// FeedsHashSize returned the size of the digest produced by Swarm feeds' hashing function -func (a *API) FeedsHashSize() int { - return a.feed.HashSize -} - // ErrCannotLoadFeedManifest is returned when looking up a feeds manifest fails var ErrCannotLoadFeedManifest = errors.New("Cannot load feed manifest") diff --git a/swarm/api/client/client.go b/swarm/api/client/client.go index f793ca8b8..5e293cca7 100644 --- a/swarm/api/client/client.go +++ b/swarm/api/client/client.go @@ -46,11 +46,6 @@ import ( ) var ( - DefaultGateway = "http://localhost:8500" - DefaultClient = NewClient(DefaultGateway) -) - -var ( ErrUnauthorized = errors.New("unauthorized") ) diff --git a/swarm/api/storage.go b/swarm/api/storage.go index 8a48fe5bc..254375b77 100644 --- a/swarm/api/storage.go +++ b/swarm/api/storage.go @@ -83,23 +83,3 @@ func (s *Storage) Get(ctx context.Context, bzzpath string) (*Response, error) { } return &Response{mimeType, status, expsize, string(body[:size])}, err } - -// Modify(rootHash, basePath, contentHash, contentType) takes th e manifest trie rooted in rootHash, -// and merge on to it. creating an entry w conentType (mime) -// -// DEPRECATED: Use the HTTP API instead -func (s *Storage) Modify(ctx context.Context, rootHash, path, contentHash, contentType string) (newRootHash string, err error) { - uri, err := Parse("bzz:/" + rootHash) - if err != nil { - return "", err - } - addr, err := s.api.Resolve(ctx, uri.Addr) - if err != nil { - return "", err - } - addr, err = s.api.Modify(ctx, addr, path, contentHash, contentType) - if err != nil { - return "", err - } - return addr.Hex(), nil -} diff --git a/swarm/api/testapi.go b/swarm/api/testapi.go index 4c7d0982b..6fec55f55 100644 --- a/swarm/api/testapi.go +++ b/swarm/api/testapi.go @@ -29,18 +29,6 @@ func NewControl(api *API, hive *network.Hive) *Control { return &Control{api, hive} } -//func (self *Control) BlockNetworkRead(on bool) { -// self.hive.BlockNetworkRead(on) -//} -// -//func (self *Control) SyncEnabled(on bool) { -// self.hive.SyncEnabled(on) -//} -// -//func (self *Control) SwapEnabled(on bool) { -// self.hive.SwapEnabled(on) -//} -// func (c *Control) Hive() string { return c.hive.String() } diff --git a/swarm/api/uri_test.go b/swarm/api/uri_test.go index ea649e273..a03874c43 100644 --- a/swarm/api/uri_test.go +++ b/swarm/api/uri_test.go @@ -26,17 +26,15 @@ import ( func TestParseURI(t *testing.T) { type test struct { - uri string - expectURI *URI - expectErr bool - expectRaw bool - expectImmutable bool - expectList bool - expectHash bool - expectDeprecatedRaw bool - expectDeprecatedImmutable bool - expectValidKey bool - expectAddr storage.Address + uri string + expectURI *URI + expectErr bool + expectRaw bool + expectImmutable bool + expectList bool + expectHash bool + expectValidKey bool + expectAddr storage.Address } tests := []test{ { diff --git a/swarm/network/bitvector/bitvector.go b/swarm/network/bitvector/bitvector.go index edc7c50cb..958328502 100644 --- a/swarm/network/bitvector/bitvector.go +++ b/swarm/network/bitvector/bitvector.go @@ -60,7 +60,3 @@ func (bv *BitVector) Set(i int, v bool) { func (bv *BitVector) Bytes() []byte { return bv.b } - -func (bv *BitVector) Length() int { - return bv.len -} diff --git a/swarm/network/protocol.go b/swarm/network/protocol.go index 4b9b28cdc..a4b29239c 100644 --- a/swarm/network/protocol.go +++ b/swarm/network/protocol.go @@ -35,8 +35,6 @@ import ( const ( DefaultNetworkID = 3 - // ProtocolMaxMsgSize maximum allowed message size - ProtocolMaxMsgSize = 10 * 1024 * 1024 // timeout for waiting bzzHandshakeTimeout = 3000 * time.Millisecond ) @@ -250,11 +248,6 @@ func NewBzzPeer(p *protocols.Peer) *BzzPeer { return &BzzPeer{Peer: p, BzzAddr: NewAddr(p.Node())} } -// LastActive returns the time the peer was last active -func (p *BzzPeer) LastActive() time.Time { - return p.lastActive -} - // ID returns the peer's underlay node identifier. func (p *BzzPeer) ID() enode.ID { // This is here to resolve a method tie: both protocols.Peer and BzzAddr are embedded diff --git a/swarm/network/protocol_test.go b/swarm/network/protocol_test.go index 53ceda744..58477a7b8 100644 --- a/swarm/network/protocol_test.go +++ b/swarm/network/protocol_test.go @@ -20,7 +20,6 @@ import ( "flag" "fmt" "os" - "sync" "testing" "github.com/ethereum/go-ethereum/log" @@ -44,31 +43,7 @@ func init() { log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) } -type testStore struct { - sync.Mutex - - values map[string][]byte -} - -func (t *testStore) Load(key string) ([]byte, error) { - t.Lock() - defer t.Unlock() - v, ok := t.values[key] - if !ok { - return nil, fmt.Errorf("key not found: %s", key) - } - return v, nil -} - -func (t *testStore) Save(key string, v []byte) error { - t.Lock() - defer t.Unlock() - t.values[key] = v - return nil -} - func HandshakeMsgExchange(lhs, rhs *HandshakeMsg, id enode.ID) []p2ptest.Exchange { - return []p2ptest.Exchange{ { Expects: []p2ptest.Expect{ diff --git a/swarm/network/stream/intervals/store_test.go b/swarm/network/stream/intervals/store_test.go index 0ab14c065..a36814b71 100644 --- a/swarm/network/stream/intervals/store_test.go +++ b/swarm/network/stream/intervals/store_test.go @@ -17,14 +17,11 @@ package intervals import ( - "errors" "testing" "github.com/ethereum/go-ethereum/swarm/state" ) -var ErrNotFound = errors.New("not found") - // TestInmemoryStore tests basic functionality of InmemoryStore. func TestInmemoryStore(t *testing.T) { testStore(t, state.NewInmemoryStore()) diff --git a/swarm/network/stream/stream.go b/swarm/network/stream/stream.go index 32e107823..090bef8d1 100644 --- a/swarm/network/stream/stream.go +++ b/swarm/network/stream/stream.go @@ -388,14 +388,6 @@ func (r *Registry) Quit(peerId enode.ID, s Stream) error { return peer.Send(context.TODO(), msg) } -func (r *Registry) NodeInfo() interface{} { - return nil -} - -func (r *Registry) PeerInfo(id enode.ID) interface{} { - return nil -} - func (r *Registry) Close() error { return r.intervalsStore.Close() } diff --git a/swarm/network/stream/syncer.go b/swarm/network/stream/syncer.go index 4bfbac8b0..4fb8b9342 100644 --- a/swarm/network/stream/syncer.go +++ b/swarm/network/stream/syncer.go @@ -127,19 +127,9 @@ func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint6 // SwarmSyncerClient type SwarmSyncerClient struct { - sessionAt uint64 - nextC chan struct{} - sessionRoot storage.Address - sessionReader storage.LazySectionReader - retrieveC chan *storage.Chunk - storeC chan *storage.Chunk - store storage.SyncChunkStore - // chunker storage.Chunker - currentRoot storage.Address - requestFunc func(chunk *storage.Chunk) - end, start uint64 - peer *Peer - stream Stream + store storage.SyncChunkStore + peer *Peer + stream Stream } // NewSwarmSyncerClient is a contructor for provable data exchange syncer @@ -209,46 +199,6 @@ func (s *SwarmSyncerClient) BatchDone(stream Stream, from uint64, hashes []byte, return nil } -func (s *SwarmSyncerClient) TakeoverProof(stream Stream, from uint64, hashes []byte, root storage.Address) (*TakeoverProof, error) { - // for provable syncer currentRoot is non-zero length - // TODO: reenable this with putter/getter - // if s.chunker != nil { - // if from > s.sessionAt { // for live syncing currentRoot is always updated - // //expRoot, err := s.chunker.Append(s.currentRoot, bytes.NewReader(hashes), s.retrieveC, s.storeC) - // expRoot, _, err := s.chunker.Append(s.currentRoot, bytes.NewReader(hashes), s.retrieveC) - // if err != nil { - // return nil, err - // } - // if !bytes.Equal(root, expRoot) { - // return nil, fmt.Errorf("HandoverProof mismatch") - // } - // s.currentRoot = root - // } else { - // expHashes := make([]byte, len(hashes)) - // _, err := s.sessionReader.ReadAt(expHashes, int64(s.end*HashSize)) - // if err != nil && err != io.EOF { - // return nil, err - // } - // if !bytes.Equal(expHashes, hashes) { - // return nil, errors.New("invalid proof") - // } - // } - // return nil, nil - // } - s.end += uint64(len(hashes)) / HashSize - takeover := &Takeover{ - Stream: stream, - Start: s.start, - End: s.end, - Root: root, - } - // serialise and sign - return &TakeoverProof{ - Takeover: takeover, - Sig: nil, - }, nil -} - func (s *SwarmSyncerClient) Close() {} // base for parsing and formating sync bin key diff --git a/swarm/pot/address.go b/swarm/pot/address.go index 728dac14e..5af3381a7 100644 --- a/swarm/pot/address.go +++ b/swarm/pot/address.go @@ -41,10 +41,6 @@ func NewAddressFromBytes(b []byte) Address { return Address(h) } -func (a Address) IsZero() bool { - return a.Bin() == zerosBin -} - func (a Address) String() string { return fmt.Sprintf("%x", a[:]) } diff --git a/swarm/pot/pot.go b/swarm/pot/pot.go index dfda84804..a71219779 100644 --- a/swarm/pot/pot.go +++ b/swarm/pot/pot.go @@ -477,7 +477,7 @@ func (t *Pot) each(f func(Val, int) bool) bool { return f(t.pin, t.po) } -// EachFrom called with (f, start) is a synchronous iterator over the elements of a Pot +// eachFrom called with (f, start) is a synchronous iterator over the elements of a Pot // within the inclusive range starting from proximity order start // the function argument is passed the value and the proximity order wrt the root pin // it does NOT include the pinned item of the root @@ -485,10 +485,6 @@ func (t *Pot) each(f func(Val, int) bool) bool { // proximity > pinnedness // the iteration ends if the function return false or there are no more elements // end of a po range can be implemented since po is passed to the function -func (t *Pot) EachFrom(f func(Val, int) bool, po int) bool { - return t.eachFrom(f, po) -} - func (t *Pot) eachFrom(f func(Val, int) bool, po int) bool { var next bool _, lim := t.getPos(po) diff --git a/swarm/state.go b/swarm/state.go deleted file mode 100644 index 1984ab031..000000000 --- a/swarm/state.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. - -package swarm - -type Voidstore struct { -} - -func (self Voidstore) Load(string) ([]byte, error) { - return nil, nil -} - -func (self Voidstore) Save(string, []byte) error { - return nil -} diff --git a/swarm/state/dbstore.go b/swarm/state/dbstore.go index fc5dd8f7c..147e34b23 100644 --- a/swarm/state/dbstore.go +++ b/swarm/state/dbstore.go @@ -28,9 +28,6 @@ import ( // ErrNotFound is returned when no results are returned from the database var ErrNotFound = errors.New("ErrorNotFound") -// ErrInvalidArgument is returned when the argument type does not match the expected type -var ErrInvalidArgument = errors.New("ErrorInvalidArgument") - // Store defines methods required to get, set, delete values for different keys // and close the underlying resources. type Store interface { diff --git a/swarm/storage/chunker.go b/swarm/storage/chunker.go index cbe65372a..a8bfe2d1c 100644 --- a/swarm/storage/chunker.go +++ b/swarm/storage/chunker.go @@ -65,10 +65,6 @@ If all is well it is possible to implement this by simply composing readers so t The hashing itself does use extra copies and allocation though, since it does need it. */ -var ( - errAppendOppNotSuported = errors.New("Append operation not supported") -) - type ChunkerParams struct { chunkSize int64 hashSize int64 @@ -99,7 +95,6 @@ type TreeChunker struct { ctx context.Context branches int64 - hashFunc SwarmHasher dataSize int64 data io.Reader // calculated @@ -365,10 +360,6 @@ func (tc *TreeChunker) runWorker(ctx context.Context) { }() } -func (tc *TreeChunker) Append() (Address, func(), error) { - return nil, nil, errAppendOppNotSuported -} - // LazyChunkReader implements LazySectionReader type LazyChunkReader struct { ctx context.Context @@ -411,7 +402,6 @@ func (r *LazyChunkReader) Size(ctx context.Context, quitC chan bool) (n int64, e log.Debug("lazychunkreader.size", "addr", r.addr) if r.chunkData == nil { - startTime := time.Now() chunkData, err := r.getter.Get(cctx, Reference(r.addr)) if err != nil { @@ -420,13 +410,8 @@ func (r *LazyChunkReader) Size(ctx context.Context, quitC chan bool) (n int64, e } metrics.GetOrRegisterResettingTimer("lcr.getter.get", nil).UpdateSince(startTime) r.chunkData = chunkData - s := r.chunkData.Size() - log.Debug("lazychunkreader.size", "key", r.addr, "size", s) - if s < 0 { - return 0, errors.New("corrupt size") - } - return int64(s), nil } + s := r.chunkData.Size() log.Debug("lazychunkreader.size", "key", r.addr, "size", s) diff --git a/swarm/storage/database.go b/swarm/storage/database.go index e25fce31f..12367b905 100644 --- a/swarm/storage/database.go +++ b/swarm/storage/database.go @@ -64,16 +64,6 @@ func (db *LDBDatabase) Delete(key []byte) error { return db.db.Delete(key, nil) } -func (db *LDBDatabase) LastKnownTD() []byte { - data, _ := db.Get([]byte("LTD")) - - if len(data) == 0 { - data = []byte{0x0} - } - - return data -} - func (db *LDBDatabase) NewIterator() iterator.Iterator { metrics.GetOrRegisterCounter("ldbdatabase.newiterator", nil).Inc(1) diff --git a/swarm/storage/error.go b/swarm/storage/error.go index 44261c084..a9d0616fa 100644 --- a/swarm/storage/error.go +++ b/swarm/storage/error.go @@ -23,23 +23,15 @@ import ( const ( ErrInit = iota ErrNotFound - ErrIO ErrUnauthorized ErrInvalidValue ErrDataOverflow ErrNothingToReturn - ErrCorruptData ErrInvalidSignature ErrNotSynced - ErrPeriodDepth - ErrCnt ) var ( - ErrChunkNotFound = errors.New("chunk not found") - ErrFetching = errors.New("chunk still fetching") - ErrChunkInvalid = errors.New("invalid chunk") - ErrChunkForward = errors.New("cannot forward") - ErrChunkUnavailable = errors.New("chunk unavailable") - ErrChunkTimeout = errors.New("timeout") + ErrChunkNotFound = errors.New("chunk not found") + ErrChunkInvalid = errors.New("invalid chunk") ) diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go index bd4f6b916..635d33429 100644 --- a/swarm/storage/ldbstore.go +++ b/swarm/storage/ldbstore.go @@ -248,10 +248,6 @@ func U64ToBytes(val uint64) []byte { return data } -func (s *LDBStore) updateIndexAccess(index *dpaDBIndex) { - index.Access = s.accessCnt -} - func getIndexKey(hash Address) []byte { hashSize := len(hash) key := make([]byte, hashSize+1) @@ -777,18 +773,6 @@ func (s *LDBStore) BinIndex(po uint8) uint64 { return s.bucketCnt[po] } -func (s *LDBStore) Size() uint64 { - s.lock.RLock() - defer s.lock.RUnlock() - return s.entryCnt -} - -func (s *LDBStore) CurrentStorageIndex() uint64 { - s.lock.RLock() - defer s.lock.RUnlock() - return s.dataIdx -} - // Put adds a chunk to the database, adding indices and incrementing global counters. // If it already exists, it merely increments the access count of the existing entry. // Is thread safe @@ -810,11 +794,11 @@ func (s *LDBStore) Put(ctx context.Context, chunk Chunk) error { batch := s.batch log.Trace("ldbstore.put: s.db.Get", "key", chunk.Address(), "ikey", fmt.Sprintf("%x", ikey)) - idata, err := s.db.Get(ikey) + _, err := s.db.Get(ikey) if err != nil { s.doPut(chunk, &index, po) } - idata = encodeIndex(&index) + idata := encodeIndex(&index) s.batch.Put(ikey, idata) // add the access-chunkindex index for garbage collection diff --git a/swarm/storage/ldbstore_test.go b/swarm/storage/ldbstore_test.go index e8b9ae39b..1fe466f93 100644 --- a/swarm/storage/ldbstore_test.go +++ b/swarm/storage/ldbstore_test.go @@ -79,14 +79,6 @@ func testPoFunc(k Address) (ret uint8) { return uint8(Proximity(basekey, k[:])) } -func (db *testDbStore) close() { - db.Close() - err := os.RemoveAll(db.dir) - if err != nil { - panic(err) - } -} - func testDbStoreRandom(n int, chunksize int64, mock bool, t *testing.T) { db, cleanup, err := newTestDbStore(mock, true) defer cleanup() @@ -453,7 +445,7 @@ func TestLDBStoreAddRemove(t *testing.T) { log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) for i := 0; i < n; i++ { - ret, err := ldb.Get(nil, chunks[i].Address()) + ret, err := ldb.Get(context.TODO(), chunks[i].Address()) if i%2 == 0 { // expect even chunks to be missing diff --git a/swarm/storage/mock/mock.go b/swarm/storage/mock/mock.go index 1fb71b70a..626ba3fe1 100644 --- a/swarm/storage/mock/mock.go +++ b/swarm/storage/mock/mock.go @@ -103,13 +103,6 @@ type Exporter interface { Export(w io.Writer) (n int, err error) } -// ImportExporter is an interface for importing and exporting -// mock store data to and from a tar archive. -type ImportExporter interface { - Importer - Exporter -} - // ExportedChunk is the structure that is saved in tar archive for // each chunk as JSON-encoded bytes. type ExportedChunk struct { diff --git a/swarm/storage/pyramid.go b/swarm/storage/pyramid.go index f74eef06b..e5bd7a76a 100644 --- a/swarm/storage/pyramid.go +++ b/swarm/storage/pyramid.go @@ -71,11 +71,6 @@ const ( splitTimeout = time.Minute * 5 ) -const ( - DataChunk = 0 - TreeChunk = 1 -) - type PyramidSplitterParams struct { SplitterParams getter Getter diff --git a/swarm/storage/types.go b/swarm/storage/types.go index 322d95c47..8bfb34815 100644 --- a/swarm/storage/types.go +++ b/swarm/storage/types.go @@ -23,7 +23,6 @@ import ( "crypto/rand" "encoding/binary" "fmt" - "hash" "io" "github.com/ethereum/go-ethereum/common" @@ -35,50 +34,10 @@ import ( const MaxPO = 16 const AddressLength = 32 -type Hasher func() hash.Hash type SwarmHasher func() SwarmHash -// Peer is the recorded as Source on the chunk -// should probably not be here? but network should wrap chunk object -type Peer interface{} - type Address []byte -func (a Address) Size() uint { - return uint(len(a)) -} - -func (a Address) isEqual(y Address) bool { - return bytes.Equal(a, y) -} - -func (a Address) bits(i, j uint) uint { - ii := i >> 3 - jj := i & 7 - if ii >= a.Size() { - return 0 - } - - if jj+j <= 8 { - return uint((a[ii] >> jj) & ((1 << j) - 1)) - } - - res := uint(a[ii] >> jj) - jj = 8 - jj - j -= jj - for j != 0 { - ii++ - if j < 8 { - res += uint(a[ii]&((1<<j)-1)) << jj - return res - } - res += uint(a[ii]) << jj - jj += 8 - j -= 8 - } - return res -} - // Proximity(x, y) returns the proximity order of the MSB distance between x and y // // The distance metric MSB(x, y) of two equal length byte sequences x an y is the @@ -112,10 +71,6 @@ func Proximity(one, other []byte) (ret int) { return MaxPO } -func IsZeroAddr(addr Address) bool { - return len(addr) == 0 || bytes.Equal(addr, ZeroAddr) -} - var ZeroAddr = Address(common.Hash{}.Bytes()) func MakeHashFunc(hash string) SwarmHasher { @@ -304,10 +259,6 @@ func (c ChunkData) Size() uint64 { return binary.LittleEndian.Uint64(c[:8]) } -func (c ChunkData) Data() []byte { - return c[8:] -} - type ChunkValidator interface { Validate(chunk Chunk) bool } diff --git a/swarm/swarm.go b/swarm/swarm.go index 89f4e87ef..db52675fd 100644 --- a/swarm/swarm.go +++ b/swarm/swarm.go @@ -74,8 +74,6 @@ type Swarm struct { bzz *network.Bzz // the logistic manager backend chequebook.Backend // simple blockchain Backend privateKey *ecdsa.PrivateKey - corsString string - swapEnabled bool netStore *storage.NetStore sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit ps *pss.Pss @@ -86,18 +84,6 @@ type Swarm struct { tracerClose io.Closer } -type SwarmAPI struct { - Api *api.API - Backend chequebook.Backend -} - -func (self *Swarm) API() *SwarmAPI { - return &SwarmAPI{ - Api: self.api, - Backend: self.backend, - } -} - // creates a new swarm service instance // implements node.Service // If mockStore is not nil, it will be used as the storage for chunk data. @@ -479,14 +465,6 @@ func (self *Swarm) Protocols() (protos []p2p.Protocol) { return } -func (self *Swarm) RegisterPssProtocol(spec *protocols.Spec, targetprotocol *p2p.Protocol, options *pss.ProtocolParams) (*pss.Protocol, error) { - if !pss.IsActiveProtocol { - return nil, fmt.Errorf("Pss protocols not available (built with !nopssprotocol tag)") - } - topic := pss.ProtocolTopic(spec) - return pss.RegisterProtocol(self.ps, &topic, spec, targetprotocol, options) -} - // implements node.Service // APIs returns the RPC API descriptors the Swarm implementation offers func (self *Swarm) APIs() []rpc.API { @@ -535,10 +513,6 @@ func (self *Swarm) APIs() []rpc.API { return apis } -func (self *Swarm) Api() *api.API { - return self.api -} - // SetChequebook ensures that the local checquebook is set up on chain. func (self *Swarm) SetChequebook(ctx context.Context) error { err := self.config.Swap.SetChequebook(ctx, self.backend, self.config.Path) |