diff options
author | Péter Szilágyi <peterke@gmail.com> | 2019-02-20 16:48:12 +0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2019-02-20 16:48:12 +0800 |
commit | c942700427557e3ff6de3aaf6b916e2f056c1ec2 (patch) | |
tree | cadf68e7206d6de42b1eefc6967214cf86e35ff2 /swarm/network/stream/snapshot_sync_test.go | |
parent | 7fa3509e2eaf1a4ebc12344590e5699406690f15 (diff) | |
parent | cde35439e058b4f9579830fec9fb65ae0b998346 (diff) | |
download | go-tangerine-1.8.23.tar go-tangerine-1.8.23.tar.gz go-tangerine-1.8.23.tar.bz2 go-tangerine-1.8.23.tar.lz go-tangerine-1.8.23.tar.xz go-tangerine-1.8.23.tar.zst go-tangerine-1.8.23.zip |
Merge pull request #19029 from holiman/update1.8v1.8.23
Update1.8
Diffstat (limited to 'swarm/network/stream/snapshot_sync_test.go')
-rw-r--r-- | swarm/network/stream/snapshot_sync_test.go | 107 |
1 files changed, 53 insertions, 54 deletions
diff --git a/swarm/network/stream/snapshot_sync_test.go b/swarm/network/stream/snapshot_sync_test.go index 6af19c12a..b45d0aed5 100644 --- a/swarm/network/stream/snapshot_sync_test.go +++ b/swarm/network/stream/snapshot_sync_test.go @@ -17,11 +17,12 @@ package stream import ( "context" + "errors" "fmt" + "io/ioutil" "os" "runtime" "sync" - "sync/atomic" "testing" "time" @@ -92,6 +93,15 @@ func TestSyncingViaGlobalSync(t *testing.T) { if *longrunning { chnkCnt = []int{1, 8, 32, 256, 1024} nodeCnt = []int{16, 32, 64, 128, 256} + } else if raceTest { + // TestSyncingViaGlobalSync allocates a lot of memory + // with race detector. By reducing the number of chunks + // and nodes, memory consumption is lower and data races + // are still checked, while correctness of syncing is + // tested with more chunks and nodes in regular (!race) + // tests. + chnkCnt = []int{4} + nodeCnt = []int{16} } else { //default test chnkCnt = []int{4, 32} @@ -107,42 +117,43 @@ func TestSyncingViaGlobalSync(t *testing.T) { } var simServiceMap = map[string]simulation.ServiceFunc{ - "streamer": streamerFunc, -} + "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { + addr, netStore, delivery, clean, err := newNetStoreAndDeliveryWithRequestFunc(ctx, bucket, dummyRequestFromPeers) + if err != nil { + return nil, nil, err + } -func streamerFunc(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) { - n := ctx.Config.Node() - addr := network.NewAddr(n) - store, datadir, err := createTestLocalStorageForID(n.ID(), addr) - if err != nil { - return nil, nil, err - } - bucket.Store(bucketKeyStore, store) - localStore := store.(*storage.LocalStore) - netStore, err := storage.NewNetStore(localStore, nil) - if err != nil { - return nil, nil, err - } - kad := network.NewKademlia(addr.Over(), network.NewKadParams()) - delivery := NewDelivery(kad, netStore) - netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New - - r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{ - Retrieval: RetrievalDisabled, - Syncing: SyncingAutoSubscribe, - SyncUpdateDelay: 3 * time.Second, - }, nil) - - bucket.Store(bucketKeyRegistry, r) - - cleanup = func() { - os.RemoveAll(datadir) - netStore.Close() - r.Close() - } + var dir string + var store *state.DBStore + if raceTest { + // Use on-disk DBStore to reduce memory consumption in race tests. + dir, err = ioutil.TempDir("", "swarm-stream-") + if err != nil { + return nil, nil, err + } + store, err = state.NewDBStore(dir) + if err != nil { + return nil, nil, err + } + } else { + store = state.NewInmemoryStore() + } + + r := NewRegistry(addr.ID(), delivery, netStore, store, &RegistryOptions{ + Retrieval: RetrievalDisabled, + Syncing: SyncingAutoSubscribe, + SyncUpdateDelay: 3 * time.Second, + }, nil) - return r, cleanup, nil + bucket.Store(bucketKeyRegistry, r) + cleanup = func() { + r.Close() + clean() + } + + return r, cleanup, nil + }, } func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) { @@ -171,36 +182,24 @@ func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) { t.Fatal(err) } - disconnections := sim.PeerEvents( - context.Background(), - sim.NodeIDs(), - simulation.NewPeerEventsFilter().Drop(), - ) - - var disconnected atomic.Value - go func() { - for d := range disconnections { - if d.Error != nil { - log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID) - disconnected.Store(true) - } - } - }() - result := runSim(conf, ctx, sim, chunkCount) if result.Error != nil { t.Fatal(result.Error) } - if yes, ok := disconnected.Load().(bool); ok && yes { - t.Fatal("disconnect events received") - } log.Info("Simulation ended") } func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulation, chunkCount int) simulation.Result { - return sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error { + return sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) { + disconnected := watchDisconnections(ctx, sim) + defer func() { + if err != nil && disconnected.bool() { + err = errors.New("disconnect events received") + } + }() + nodeIDs := sim.UpNodeIDs() for _, n := range nodeIDs { //get the kademlia overlay address from this ID |