diff options
author | Janoš Guljaš <janos@users.noreply.github.com> | 2019-01-09 14:05:55 +0800 |
---|---|---|
committer | Viktor Trón <viktor.tron@gmail.com> | 2019-01-09 14:05:55 +0800 |
commit | d70c4faf20d5533e30eec5cbb9b5180eb837b78c (patch) | |
tree | b059c20fa8093b6f8479a7706077cc24cdcaba0e /swarm/storage | |
parent | 81f04fa60608a67bac693879acbe086562d3970d (diff) | |
download | dexon-d70c4faf20d5533e30eec5cbb9b5180eb837b78c.tar dexon-d70c4faf20d5533e30eec5cbb9b5180eb837b78c.tar.gz dexon-d70c4faf20d5533e30eec5cbb9b5180eb837b78c.tar.bz2 dexon-d70c4faf20d5533e30eec5cbb9b5180eb837b78c.tar.lz dexon-d70c4faf20d5533e30eec5cbb9b5180eb837b78c.tar.xz dexon-d70c4faf20d5533e30eec5cbb9b5180eb837b78c.tar.zst dexon-d70c4faf20d5533e30eec5cbb9b5180eb837b78c.zip |
swarm: Fix T.Fatal inside a goroutine in tests (#18409)
* swarm/storage: fix T.Fatal inside a goroutine
* swarm/network/simulation: fix T.Fatal inside a goroutine
* swarm/network/stream: fix T.Fatal inside a goroutine
* swarm/network/simulation: consistent failures in TestPeerEventsTimeout
* swarm/network/simulation: rename sendRunSignal to triggerSimulationRun
Diffstat (limited to 'swarm/storage')
-rw-r--r-- | swarm/storage/netstore_test.go | 122 |
1 files changed, 83 insertions, 39 deletions
diff --git a/swarm/storage/netstore_test.go b/swarm/storage/netstore_test.go index 8a09fa5ae..2ed3e0752 100644 --- a/swarm/storage/netstore_test.go +++ b/swarm/storage/netstore_test.go @@ -20,6 +20,8 @@ import ( "bytes" "context" "crypto/rand" + "errors" + "fmt" "io/ioutil" "sync" "testing" @@ -114,19 +116,24 @@ func TestNetStoreGetAndPut(t *testing.T) { defer cancel() c := make(chan struct{}) // this channel ensures that the gouroutine with the Put does not run earlier than the Get + putErrC := make(chan error) go func() { <-c // wait for the Get to be called time.Sleep(200 * time.Millisecond) // and a little more so it is surely called // check if netStore created a fetcher in the Get call for the unavailable chunk if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil { - t.Fatal("Expected netStore to use a fetcher for the Get call") + putErrC <- errors.New("Expected netStore to use a fetcher for the Get call") + return } err := netStore.Put(ctx, chunk) if err != nil { - t.Fatalf("Expected no err got %v", err) + putErrC <- fmt.Errorf("Expected no err got %v", err) + return } + + putErrC <- nil }() close(c) @@ -134,6 +141,10 @@ func TestNetStoreGetAndPut(t *testing.T) { if err != nil { t.Fatalf("Expected no err got %v", err) } + + if err := <-putErrC; err != nil { + t.Fatal(err) + } // the retrieved chunk should be the same as what we Put if !bytes.Equal(recChunk.Address(), chunk.Address()) || !bytes.Equal(recChunk.Data(), chunk.Data()) { t.Fatalf("Different chunk received than what was put") @@ -200,14 +211,18 @@ func TestNetStoreGetTimeout(t *testing.T) { defer cancel() c := make(chan struct{}) // this channel ensures that the gouroutine does not run earlier than the Get + fetcherErrC := make(chan error) go func() { <-c // wait for the Get to be called time.Sleep(200 * time.Millisecond) // and a little more so it is surely called // check if netStore created a fetcher in the Get call for the unavailable chunk if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil { - t.Fatal("Expected netStore to use a fetcher for the Get call") + fetcherErrC <- errors.New("Expected netStore to use a fetcher for the Get call") + return } + + fetcherErrC <- nil }() close(c) @@ -220,6 +235,10 @@ func TestNetStoreGetTimeout(t *testing.T) { t.Fatalf("Expected context.DeadLineExceeded err got %v", err) } + if err := <-fetcherErrC; err != nil { + t.Fatal(err) + } + // A fetcher was created, check if it has been removed after timeout if netStore.fetchers.Len() != 0 { t.Fatal("Expected netStore to remove the fetcher after timeout") @@ -243,20 +262,29 @@ func TestNetStoreGetCancel(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) c := make(chan struct{}) // this channel ensures that the gouroutine with the cancel does not run earlier than the Get + fetcherErrC := make(chan error, 1) go func() { <-c // wait for the Get to be called time.Sleep(200 * time.Millisecond) // and a little more so it is surely called // check if netStore created a fetcher in the Get call for the unavailable chunk if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil { - t.Fatal("Expected netStore to use a fetcher for the Get call") + fetcherErrC <- errors.New("Expected netStore to use a fetcher for the Get call") + return } + + fetcherErrC <- nil cancel() }() close(c) + // We call Get with an unavailable chunk, so it will create a fetcher and wait for delivery _, err := netStore.Get(ctx, chunk.Address()) + if err := <-fetcherErrC; err != nil { + t.Fatal(err) + } + // After the context is cancelled above Get should return with an error if err != context.Canceled { t.Fatalf("Expected context.Canceled err got %v", err) @@ -286,46 +314,55 @@ func TestNetStoreMultipleGetAndPut(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() + putErrC := make(chan error) go func() { // sleep to make sure Put is called after all the Get time.Sleep(500 * time.Millisecond) // check if netStore created exactly one fetcher for all Get calls if netStore.fetchers.Len() != 1 { - t.Fatal("Expected netStore to use one fetcher for all Get calls") + putErrC <- errors.New("Expected netStore to use one fetcher for all Get calls") + return } err := netStore.Put(ctx, chunk) if err != nil { - t.Fatalf("Expected no err got %v", err) + putErrC <- fmt.Errorf("Expected no err got %v", err) + return } + putErrC <- nil }() + count := 4 // call Get 4 times for the same unavailable chunk. The calls will be blocked until the Put above. - getWG := sync.WaitGroup{} - for i := 0; i < 4; i++ { - getWG.Add(1) + errC := make(chan error) + for i := 0; i < count; i++ { go func() { - defer getWG.Done() recChunk, err := netStore.Get(ctx, chunk.Address()) if err != nil { - t.Fatalf("Expected no err got %v", err) + errC <- fmt.Errorf("Expected no err got %v", err) } if !bytes.Equal(recChunk.Address(), chunk.Address()) || !bytes.Equal(recChunk.Data(), chunk.Data()) { - t.Fatalf("Different chunk received than what was put") + errC <- errors.New("Different chunk received than what was put") } + errC <- nil }() } - finishedC := make(chan struct{}) - go func() { - getWG.Wait() - close(finishedC) - }() + if err := <-putErrC; err != nil { + t.Fatal(err) + } + + timeout := time.After(1 * time.Second) // The Get calls should return after Put, so no timeout expected - select { - case <-finishedC: - case <-time.After(1 * time.Second): - t.Fatalf("Timeout waiting for Get calls to return") + for i := 0; i < count; i++ { + select { + case err := <-errC: + if err != nil { + t.Fatal(err) + } + case <-timeout: + t.Fatalf("Timeout waiting for Get calls to return") + } } // A fetcher was created, check if it has been removed after cancel @@ -448,7 +485,7 @@ func TestNetStoreGetCallsOffer(t *testing.T) { defer cancel() // We call get for a not available chunk, it will timeout because the chunk is not delivered - chunk, err := netStore.Get(ctx, chunk.Address()) + _, err := netStore.Get(ctx, chunk.Address()) if err != context.DeadlineExceeded { t.Fatalf("Expect error %v got %v", context.DeadlineExceeded, err) @@ -542,16 +579,12 @@ func TestNetStoreFetchFuncCalledMultipleTimes(t *testing.T) { t.Fatalf("Expected netStore to have one fetcher for the requested chunk") } - // Call wait three times parallelly - wg := sync.WaitGroup{} - for i := 0; i < 3; i++ { - wg.Add(1) + // Call wait three times in parallel + count := 3 + errC := make(chan error) + for i := 0; i < count; i++ { go func() { - err := wait(ctx) - if err != nil { - t.Fatalf("Expected no err got %v", err) - } - wg.Done() + errC <- wait(ctx) }() } @@ -570,7 +603,12 @@ func TestNetStoreFetchFuncCalledMultipleTimes(t *testing.T) { } // wait until all wait calls return (because the chunk is delivered) - wg.Wait() + for i := 0; i < count; i++ { + err := <-errC + if err != nil { + t.Fatal(err) + } + } // There should be no more fetchers for the delivered chunk if netStore.fetchers.Len() != 0 { @@ -606,23 +644,29 @@ func TestNetStoreFetcherLifeCycleWithTimeout(t *testing.T) { t.Fatalf("Expected netStore to have one fetcher for the requested chunk") } - // Call wait three times parallelly - wg := sync.WaitGroup{} - for i := 0; i < 3; i++ { - wg.Add(1) + // Call wait three times in parallel + count := 3 + errC := make(chan error) + for i := 0; i < count; i++ { go func() { - defer wg.Done() rctx, rcancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer rcancel() err := wait(rctx) if err != context.DeadlineExceeded { - t.Fatalf("Expected err %v got %v", context.DeadlineExceeded, err) + errC <- fmt.Errorf("Expected err %v got %v", context.DeadlineExceeded, err) + return } + errC <- nil }() } // wait until all wait calls timeout - wg.Wait() + for i := 0; i < count; i++ { + err := <-errC + if err != nil { + t.Fatal(err) + } + } // There should be no more fetchers after timeout if netStore.fetchers.Len() != 0 { |