aboutsummaryrefslogtreecommitdiffstats
path: root/swarm
diff options
context:
space:
mode:
authorJanos Guljas <janos@resenje.org>2018-09-27 15:43:00 +0800
committerJanos Guljas <janos@resenje.org>2018-09-27 15:43:00 +0800
commita5e6bf7eefbe6f56cf688b3542fe373c4670cb65 (patch)
tree8b712ef8fb72b354346c7b1092261c469ebd7d8a /swarm
parent0d5e1e7bc9ad4044a679ab5429d118b2a0e8afe7 (diff)
parente39a9b3480af0ac8044294f46e0e9e4c3948d23c (diff)
downloadgo-tangerine-a5e6bf7eefbe6f56cf688b3542fe373c4670cb65.tar
go-tangerine-a5e6bf7eefbe6f56cf688b3542fe373c4670cb65.tar.gz
go-tangerine-a5e6bf7eefbe6f56cf688b3542fe373c4670cb65.tar.bz2
go-tangerine-a5e6bf7eefbe6f56cf688b3542fe373c4670cb65.tar.lz
go-tangerine-a5e6bf7eefbe6f56cf688b3542fe373c4670cb65.tar.xz
go-tangerine-a5e6bf7eefbe6f56cf688b3542fe373c4670cb65.tar.zst
go-tangerine-a5e6bf7eefbe6f56cf688b3542fe373c4670cb65.zip
Merge branch 'master' into max-stream-peer-servers
Diffstat (limited to 'swarm')
-rw-r--r--swarm/api/testdata/test0/img/logo.pngbin18136 -> 4119 bytes
-rw-r--r--swarm/network/fetcher.go24
-rw-r--r--swarm/network/fetcher_test.go54
-rw-r--r--swarm/network/stream/delivery.go6
-rw-r--r--swarm/network/stream/messages.go14
-rw-r--r--swarm/network/stream/stream.go2
-rw-r--r--swarm/network/stream/streamer_test.go77
-rw-r--r--swarm/storage/mru/testutil.go2
-rw-r--r--swarm/storage/netstore.go7
-rw-r--r--swarm/storage/netstore_test.go11
10 files changed, 161 insertions, 36 deletions
diff --git a/swarm/api/testdata/test0/img/logo.png b/swarm/api/testdata/test0/img/logo.png
index e0fb15ab3..9557f9605 100644
--- a/swarm/api/testdata/test0/img/logo.png
+++ b/swarm/api/testdata/test0/img/logo.png
Binary files differ
diff --git a/swarm/network/fetcher.go b/swarm/network/fetcher.go
index 5b4b61c7e..6aed57e22 100644
--- a/swarm/network/fetcher.go
+++ b/swarm/network/fetcher.go
@@ -32,6 +32,8 @@ var searchTimeout = 1 * time.Second
// Also used in stream delivery.
var RequestTimeout = 10 * time.Second
+var maxHopCount uint8 = 20 // maximum number of forwarded requests (hops), to make sure requests are not forwarded forever in peer loops
+
type RequestFunc func(context.Context, *Request) (*enode.ID, chan struct{}, error)
// Fetcher is created when a chunk is not found locally. It starts a request handler loop once and
@@ -44,7 +46,7 @@ type Fetcher struct {
protoRequestFunc RequestFunc // request function fetcher calls to issue retrieve request for a chunk
addr storage.Address // the address of the chunk to be fetched
offerC chan *enode.ID // channel of sources (peer node id strings)
- requestC chan struct{}
+ requestC chan uint8 // channel for incoming requests (with the hopCount value in it)
skipCheck bool
}
@@ -53,6 +55,7 @@ type Request struct {
Source *enode.ID // nodeID of peer to request from (can be nil)
SkipCheck bool // whether to offer the chunk first or deliver directly
peersToSkip *sync.Map // peers not to request chunk from (only makes sense if source is nil)
+ HopCount uint8 // number of forwarded requests (hops)
}
// NewRequest returns a new instance of Request based on chunk address skip check and
@@ -113,7 +116,7 @@ func NewFetcher(addr storage.Address, rf RequestFunc, skipCheck bool) *Fetcher {
addr: addr,
protoRequestFunc: rf,
offerC: make(chan *enode.ID),
- requestC: make(chan struct{}),
+ requestC: make(chan uint8),
skipCheck: skipCheck,
}
}
@@ -136,7 +139,7 @@ func (f *Fetcher) Offer(ctx context.Context, source *enode.ID) {
}
// Request is called when an upstream peer request the chunk as part of `RetrieveRequestMsg`, or from a local request through FileStore, and the node does not have the chunk locally.
-func (f *Fetcher) Request(ctx context.Context) {
+func (f *Fetcher) Request(ctx context.Context, hopCount uint8) {
// First we need to have this select to make sure that we return if context is done
select {
case <-ctx.Done():
@@ -144,10 +147,15 @@ func (f *Fetcher) Request(ctx context.Context) {
default:
}
+ if hopCount >= maxHopCount {
+ log.Debug("fetcher request hop count limit reached", "hops", hopCount)
+ return
+ }
+
// This select alone would not guarantee that we return of context is done, it could potentially
// push to offerC instead if offerC is available (see number 2 in https://golang.org/ref/spec#Select_statements)
select {
- case f.requestC <- struct{}{}:
+ case f.requestC <- hopCount + 1:
case <-ctx.Done():
}
}
@@ -161,6 +169,7 @@ func (f *Fetcher) run(ctx context.Context, peers *sync.Map) {
waitC <-chan time.Time // timer channel
sources []*enode.ID // known sources, ie. peers that offered the chunk
requested bool // true if the chunk was actually requested
+ hopCount uint8
)
gone := make(chan *enode.ID) // channel to signal that a peer we requested from disconnected
@@ -183,7 +192,7 @@ func (f *Fetcher) run(ctx context.Context, peers *sync.Map) {
doRequest = requested
// incoming request
- case <-f.requestC:
+ case hopCount = <-f.requestC:
log.Trace("new request", "request addr", f.addr)
// 2) chunk is requested, set requested flag
// launch a request iff none been launched yet
@@ -213,7 +222,7 @@ func (f *Fetcher) run(ctx context.Context, peers *sync.Map) {
// need to issue a new request
if doRequest {
var err error
- sources, err = f.doRequest(ctx, gone, peers, sources)
+ sources, err = f.doRequest(ctx, gone, peers, sources, hopCount)
if err != nil {
log.Info("unable to request", "request addr", f.addr, "err", err)
}
@@ -251,7 +260,7 @@ func (f *Fetcher) run(ctx context.Context, peers *sync.Map) {
// * the peer's address is added to the set of peers to skip
// * the peer's address is removed from prospective sources, and
// * a go routine is started that reports on the gone channel if the peer is disconnected (or terminated their streamer)
-func (f *Fetcher) doRequest(ctx context.Context, gone chan *enode.ID, peersToSkip *sync.Map, sources []*enode.ID) ([]*enode.ID, error) {
+func (f *Fetcher) doRequest(ctx context.Context, gone chan *enode.ID, peersToSkip *sync.Map, sources []*enode.ID, hopCount uint8) ([]*enode.ID, error) {
var i int
var sourceID *enode.ID
var quit chan struct{}
@@ -260,6 +269,7 @@ func (f *Fetcher) doRequest(ctx context.Context, gone chan *enode.ID, peersToSki
Addr: f.addr,
SkipCheck: f.skipCheck,
peersToSkip: peersToSkip,
+ HopCount: hopCount,
}
foundSource := false
diff --git a/swarm/network/fetcher_test.go b/swarm/network/fetcher_test.go
index b2316b097..3a926f475 100644
--- a/swarm/network/fetcher_test.go
+++ b/swarm/network/fetcher_test.go
@@ -33,7 +33,7 @@ type mockRequester struct {
// requests []Request
requestC chan *Request // when a request is coming it is pushed to requestC
waitTimes []time.Duration // with waitTimes[i] you can define how much to wait on the ith request (optional)
- ctr int //counts the number of requests
+ count int //counts the number of requests
quitC chan struct{}
}
@@ -47,9 +47,9 @@ func newMockRequester(waitTimes ...time.Duration) *mockRequester {
func (m *mockRequester) doRequest(ctx context.Context, request *Request) (*enode.ID, chan struct{}, error) {
waitTime := time.Duration(0)
- if m.ctr < len(m.waitTimes) {
- waitTime = m.waitTimes[m.ctr]
- m.ctr++
+ if m.count < len(m.waitTimes) {
+ waitTime = m.waitTimes[m.count]
+ m.count++
}
time.Sleep(waitTime)
m.requestC <- request
@@ -83,7 +83,7 @@ func TestFetcherSingleRequest(t *testing.T) {
go fetcher.run(ctx, peersToSkip)
rctx := context.Background()
- fetcher.Request(rctx)
+ fetcher.Request(rctx, 0)
select {
case request := <-requester.requestC:
@@ -100,6 +100,11 @@ func TestFetcherSingleRequest(t *testing.T) {
t.Fatalf("request.peersToSkip does not contain peer returned by the request function")
}
+ // hopCount in the forwarded request should be incremented
+ if request.HopCount != 1 {
+ t.Fatalf("Expected request.HopCount 1 got %v", request.HopCount)
+ }
+
// fetch should trigger a request, if it doesn't happen in time, test should fail
case <-time.After(200 * time.Millisecond):
t.Fatalf("fetch timeout")
@@ -123,7 +128,7 @@ func TestFetcherCancelStopsFetcher(t *testing.T) {
rctx, rcancel := context.WithTimeout(ctx, 100*time.Millisecond)
defer rcancel()
// we call Request with an active context
- fetcher.Request(rctx)
+ fetcher.Request(rctx, 0)
// fetcher should not initiate request, we can only check by waiting a bit and making sure no request is happening
select {
@@ -151,7 +156,7 @@ func TestFetcherCancelStopsRequest(t *testing.T) {
rcancel()
// we call Request with a cancelled context
- fetcher.Request(rctx)
+ fetcher.Request(rctx, 0)
// fetcher should not initiate request, we can only check by waiting a bit and making sure no request is happening
select {
@@ -162,7 +167,7 @@ func TestFetcherCancelStopsRequest(t *testing.T) {
// if there is another Request with active context, there should be a request, because the fetcher itself is not cancelled
rctx = context.Background()
- fetcher.Request(rctx)
+ fetcher.Request(rctx, 0)
select {
case <-requester.requestC:
@@ -200,7 +205,7 @@ func TestFetcherOfferUsesSource(t *testing.T) {
// call Request after the Offer
rctx = context.Background()
- fetcher.Request(rctx)
+ fetcher.Request(rctx, 0)
// there should be exactly 1 request coming from fetcher
var request *Request
@@ -241,7 +246,7 @@ func TestFetcherOfferAfterRequestUsesSourceFromContext(t *testing.T) {
// call Request first
rctx := context.Background()
- fetcher.Request(rctx)
+ fetcher.Request(rctx, 0)
// there should be a request coming from fetcher
var request *Request
@@ -296,7 +301,7 @@ func TestFetcherRetryOnTimeout(t *testing.T) {
// call the fetch function with an active context
rctx := context.Background()
- fetcher.Request(rctx)
+ fetcher.Request(rctx, 0)
// after 100ms the first request should be initiated
time.Sleep(100 * time.Millisecond)
@@ -338,7 +343,7 @@ func TestFetcherFactory(t *testing.T) {
fetcher := fetcherFactory.New(context.Background(), addr, peersToSkip)
- fetcher.Request(context.Background())
+ fetcher.Request(context.Background(), 0)
// check if the created fetchFunction really starts a fetcher and initiates a request
select {
@@ -368,7 +373,7 @@ func TestFetcherRequestQuitRetriesRequest(t *testing.T) {
go fetcher.run(ctx, peersToSkip)
rctx := context.Background()
- fetcher.Request(rctx)
+ fetcher.Request(rctx, 0)
select {
case <-requester.requestC:
@@ -457,3 +462,26 @@ func TestRequestSkipPeerPermanent(t *testing.T) {
t.Errorf("peer not skipped")
}
}
+
+func TestFetcherMaxHopCount(t *testing.T) {
+ requester := newMockRequester()
+ addr := make([]byte, 32)
+ fetcher := NewFetcher(addr, requester.doRequest, true)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ peersToSkip := &sync.Map{}
+
+ go fetcher.run(ctx, peersToSkip)
+
+ rctx := context.Background()
+ fetcher.Request(rctx, maxHopCount)
+
+ // if hopCount is already at max no request should be initiated
+ select {
+ case <-requester.requestC:
+ t.Fatalf("cancelled fetcher initiated request")
+ case <-time.After(200 * time.Millisecond):
+ }
+}
diff --git a/swarm/network/stream/delivery.go b/swarm/network/stream/delivery.go
index 431136ab1..c2adb1009 100644
--- a/swarm/network/stream/delivery.go
+++ b/swarm/network/stream/delivery.go
@@ -128,6 +128,7 @@ func (s *SwarmChunkServer) GetData(ctx context.Context, key []byte) ([]byte, err
type RetrieveRequestMsg struct {
Addr storage.Address
SkipCheck bool
+ HopCount uint8
}
func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *RetrieveRequestMsg) error {
@@ -148,7 +149,9 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *
var cancel func()
// TODO: do something with this hardcoded timeout, maybe use TTL in the future
- ctx, cancel = context.WithTimeout(context.WithValue(ctx, "peer", sp.ID().String()), network.RequestTimeout)
+ ctx = context.WithValue(ctx, "peer", sp.ID().String())
+ ctx = context.WithValue(ctx, "hopcount", req.HopCount)
+ ctx, cancel = context.WithTimeout(ctx, network.RequestTimeout)
go func() {
select {
@@ -247,6 +250,7 @@ func (d *Delivery) RequestFromPeers(ctx context.Context, req *network.Request) (
err := sp.SendPriority(ctx, &RetrieveRequestMsg{
Addr: req.Addr,
SkipCheck: req.SkipCheck,
+ HopCount: req.HopCount,
}, Top)
if err != nil {
return nil, nil, err
diff --git a/swarm/network/stream/messages.go b/swarm/network/stream/messages.go
index 1e47b7cf9..74c785d58 100644
--- a/swarm/network/stream/messages.go
+++ b/swarm/network/stream/messages.go
@@ -26,7 +26,7 @@ import (
bv "github.com/ethereum/go-ethereum/swarm/network/bitvector"
"github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethereum/go-ethereum/swarm/storage"
- opentracing "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go"
)
var syncBatchTimeout = 30 * time.Second
@@ -197,10 +197,16 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg
if err != nil {
return err
}
+
hashes := req.Hashes
- want, err := bv.New(len(hashes) / HashSize)
+ lenHashes := len(hashes)
+ if lenHashes%HashSize != 0 {
+ return fmt.Errorf("error invalid hashes length (len: %v)", lenHashes)
+ }
+
+ want, err := bv.New(lenHashes / HashSize)
if err != nil {
- return fmt.Errorf("error initiaising bitvector of length %v: %v", len(hashes)/HashSize, err)
+ return fmt.Errorf("error initiaising bitvector of length %v: %v", lenHashes/HashSize, err)
}
ctr := 0
@@ -208,7 +214,7 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg
ctx, cancel := context.WithTimeout(ctx, syncBatchTimeout)
ctx = context.WithValue(ctx, "source", p.ID().String())
- for i := 0; i < len(hashes); i += HashSize {
+ for i := 0; i < lenHashes; i += HashSize {
hash := hashes[i : i+HashSize]
if wait := c.NeedData(ctx, hash); wait != nil {
diff --git a/swarm/network/stream/stream.go b/swarm/network/stream/stream.go
index 3b1b11d36..1eda06c6a 100644
--- a/swarm/network/stream/stream.go
+++ b/swarm/network/stream/stream.go
@@ -642,7 +642,7 @@ func (c *clientParams) clientCreated() {
// Spec is the spec of the streamer protocol
var Spec = &protocols.Spec{
Name: "stream",
- Version: 6,
+ Version: 7,
MaxMsgSize: 10 * 1024 * 1024,
Messages: []interface{}{
UnsubscribeMsg{},
diff --git a/swarm/network/stream/streamer_test.go b/swarm/network/stream/streamer_test.go
index 04366cd39..0bdebefa7 100644
--- a/swarm/network/stream/streamer_test.go
+++ b/swarm/network/stream/streamer_test.go
@@ -19,6 +19,7 @@ package stream
import (
"bytes"
"context"
+ "errors"
"strconv"
"testing"
"time"
@@ -56,11 +57,12 @@ func TestStreamerRequestSubscription(t *testing.T) {
}
var (
- hash0 = sha3.Sum256([]byte{0})
- hash1 = sha3.Sum256([]byte{1})
- hash2 = sha3.Sum256([]byte{2})
- hashesTmp = append(hash0[:], hash1[:]...)
- hashes = append(hashesTmp, hash2[:]...)
+ hash0 = sha3.Sum256([]byte{0})
+ hash1 = sha3.Sum256([]byte{1})
+ hash2 = sha3.Sum256([]byte{2})
+ hashesTmp = append(hash0[:], hash1[:]...)
+ hashes = append(hashesTmp, hash2[:]...)
+ corruptHashes = append(hashes[:40])
)
type testClient struct {
@@ -460,6 +462,71 @@ func TestStreamerUpstreamSubscribeLiveAndHistory(t *testing.T) {
}
}
+func TestStreamerDownstreamCorruptHashesMsgExchange(t *testing.T) {
+ tester, streamer, _, teardown, err := newStreamerTester(t, nil)
+ defer teardown()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ stream := NewStream("foo", "", true)
+
+ var tc *testClient
+
+ streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
+ tc = newTestClient(t)
+ return tc, nil
+ })
+
+ node := tester.Nodes[0]
+
+ err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+
+ err = tester.TestExchanges(p2ptest.Exchange{
+ Label: "Subscribe message",
+ Expects: []p2ptest.Expect{
+ {
+ Code: 4,
+ Msg: &SubscribeMsg{
+ Stream: stream,
+ History: NewRange(5, 8),
+ Priority: Top,
+ },
+ Peer: node.ID(),
+ },
+ },
+ },
+ p2ptest.Exchange{
+ Label: "Corrupt offered hash message",
+ Triggers: []p2ptest.Trigger{
+ {
+ Code: 1,
+ Msg: &OfferedHashesMsg{
+ HandoverProof: &HandoverProof{
+ Handover: &Handover{},
+ },
+ Hashes: corruptHashes,
+ From: 5,
+ To: 8,
+ Stream: stream,
+ },
+ Peer: node.ID(),
+ },
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedError := errors.New("Message handler error: (msg code 1): error invalid hashes length (len: 40)")
+ if err := tester.TestDisconnected(&p2ptest.Disconnect{Peer: tester.Nodes[0].ID(), Error: expectedError}); err != nil {
+ t.Fatal(err)
+ }
+}
+
func TestStreamerDownstreamOfferedHashesMsgExchange(t *testing.T) {
tester, streamer, _, teardown, err := newStreamerTester(t, nil)
defer teardown()
diff --git a/swarm/storage/mru/testutil.go b/swarm/storage/mru/testutil.go
index 936132d40..7a5a9e4d9 100644
--- a/swarm/storage/mru/testutil.go
+++ b/swarm/storage/mru/testutil.go
@@ -40,7 +40,7 @@ func (t *TestHandler) Close() {
type mockNetFetcher struct{}
-func (m *mockNetFetcher) Request(ctx context.Context) {
+func (m *mockNetFetcher) Request(ctx context.Context, hopCount uint8) {
}
func (m *mockNetFetcher) Offer(ctx context.Context, source *enode.ID) {
}
diff --git a/swarm/storage/netstore.go b/swarm/storage/netstore.go
index a3a552232..16bc48a9a 100644
--- a/swarm/storage/netstore.go
+++ b/swarm/storage/netstore.go
@@ -34,7 +34,7 @@ type (
)
type NetFetcher interface {
- Request(ctx context.Context)
+ Request(ctx context.Context, hopCount uint8)
Offer(ctx context.Context, source *enode.ID)
}
@@ -263,6 +263,9 @@ func (f *fetcher) Fetch(rctx context.Context) (Chunk, error) {
// If there is a source in the context then it is an offer, otherwise a request
sourceIF := rctx.Value("source")
+
+ hopCount, _ := rctx.Value("hopcount").(uint8)
+
if sourceIF != nil {
var source enode.ID
if err := source.UnmarshalText([]byte(sourceIF.(string))); err != nil {
@@ -270,7 +273,7 @@ func (f *fetcher) Fetch(rctx context.Context) (Chunk, error) {
}
f.netFetcher.Offer(rctx, &source)
} else {
- f.netFetcher.Request(rctx)
+ f.netFetcher.Request(rctx, hopCount)
}
// wait until either the chunk is delivered or the context is done
diff --git a/swarm/storage/netstore_test.go b/swarm/storage/netstore_test.go
index b734c117b..8a09fa5ae 100644
--- a/swarm/storage/netstore_test.go
+++ b/swarm/storage/netstore_test.go
@@ -40,6 +40,7 @@ type mockNetFetcher struct {
offerCalled bool
quit <-chan struct{}
ctx context.Context
+ hopCounts []uint8
}
func (m *mockNetFetcher) Offer(ctx context.Context, source *enode.ID) {
@@ -47,7 +48,7 @@ func (m *mockNetFetcher) Offer(ctx context.Context, source *enode.ID) {
m.sources = append(m.sources, source)
}
-func (m *mockNetFetcher) Request(ctx context.Context) {
+func (m *mockNetFetcher) Request(ctx context.Context, hopCount uint8) {
m.requestCalled = true
var peers []Address
m.peers.Range(func(key interface{}, _ interface{}) bool {
@@ -55,6 +56,7 @@ func (m *mockNetFetcher) Request(ctx context.Context) {
return true
})
m.peersPerRequest = append(m.peersPerRequest, peers)
+ m.hopCounts = append(m.hopCounts, hopCount)
}
type mockNetFetchFuncFactory struct {
@@ -412,7 +414,8 @@ func TestNetStoreGetCallsRequest(t *testing.T) {
chunk := GenerateRandomChunk(ch.DefaultSize)
- ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
+ ctx := context.WithValue(context.Background(), "hopcount", uint8(5))
+ ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
defer cancel()
// We call get for a not available chunk, it will timeout because the chunk is not delivered
@@ -426,6 +429,10 @@ func TestNetStoreGetCallsRequest(t *testing.T) {
if !fetcher.requestCalled {
t.Fatal("Expected NetFetcher.Request to be called")
}
+
+ if fetcher.hopCounts[0] != 5 {
+ t.Fatalf("Expected NetFetcher.Request be called with hopCount 5, got %v", fetcher.hopCounts[0])
+ }
}
// TestNetStoreGetCallsOffer tests if Get created a request on the NetFetcher for an unavailable chunk