aboutsummaryrefslogtreecommitdiffstats
path: root/swarm
diff options
context:
space:
mode:
authorFerenc Szabo <frncmx@gmail.com>2019-02-18 14:38:14 +0800
committerRafael Matias <rafael@skyle.net>2019-02-19 20:11:52 +0800
commit88577076069664b026736d8afb9b708fb26da54a (patch)
tree37ac206ee7ac406e42513c8c9f8749b266de503f /swarm
parentd6c1fcbe045c94cf629a9df956bd7aef7dcf8d72 (diff)
downloaddexon-88577076069664b026736d8afb9b708fb26da54a.tar
dexon-88577076069664b026736d8afb9b708fb26da54a.tar.gz
dexon-88577076069664b026736d8afb9b708fb26da54a.tar.bz2
dexon-88577076069664b026736d8afb9b708fb26da54a.tar.lz
dexon-88577076069664b026736d8afb9b708fb26da54a.tar.xz
dexon-88577076069664b026736d8afb9b708fb26da54a.tar.zst
dexon-88577076069664b026736d8afb9b708fb26da54a.zip
p2p, swarm: fix node up races by granular locking (#18976)
* swarm/network: DRY out repeated giga comment I not necessarily agree with the way we wait for event propagation. But I truly disagree with having duplicated giga comments. * p2p/simulations: encapsulate Node.Up field so we avoid data races The Node.Up field was accessed concurrently without "proper" locking. There was a lock on Network and that was used sometimes to access the field. Other times the locking was missed and we had a data race. For example: https://github.com/ethereum/go-ethereum/pull/18464 The case above was solved, but there were still intermittent/hard to reproduce races. So let's solve the issue permanently. resolves: ethersphere/go-ethereum#1146 * p2p/simulations: fix unmarshal of simulations.Node Making Node.Up field private in 13292ee897e345045fbfab3bda23a77589a271c1 broke TestHTTPNetwork and TestHTTPSnapshot. Because the default UnmarshalJSON does not handle unexported fields. Important: The fix is partial and not proper to my taste. But I cut scope as I think the fix may require a change to the current serialization format. New ticket: https://github.com/ethersphere/go-ethereum/issues/1177 * p2p/simulations: Add a sanity test case for Node.Config UnmarshalJSON * p2p/simulations: revert back to defer Unlock() pattern for Network It's a good patten to call `defer Unlock()` right after `Lock()` so (new) error cases won't miss to unlock. Let's get back to that pattern. The patten was abandoned in 85a79b3ad3c5863f8612d25c246bcfad339f36b7, while fixing a data race. That data race does not exist anymore, since the Node.Up field got hidden behind its own lock. * p2p/simulations: consistent naming for test providers Node.UnmarshalJSON * p2p/simulations: remove JSON annotation from private fields of Node As unexported fields are not serialized. * p2p/simulations: fix deadlock in Network.GetRandomDownNode() Problem: GetRandomDownNode() locks -> getDownNodeIDs() -> GetNodes() tries to lock -> deadlock On Network type, unexported functions must assume that `net.lock` is already acquired and should not call exported functions which might try to lock again. * p2p/simulations: ensure method conformity for Network Connect* methods were moved to p2p/simulations.Network from swarm/network/simulation. However these new methods did not follow the pattern of Network methods, i.e., all exported method locks the whole Network either for read or write. * p2p/simulations: fix deadlock during network shutdown `TestDiscoveryPersistenceSimulationSimAdapter` often got into deadlock. The execution was stuck on two locks, i.e, `Kademlia.lock` and `p2p/simulations.Network.lock`. Usually the test got stuck once in each 20 executions with high confidence. `Kademlia` was stuck in `Kademlia.EachAddr()` and `Network` in `Network.Stop()`. Solution: in `Network.Stop()` `net.lock` must be released before calling `node.Stop()` as stopping a node (somehow - I did not find the exact code path) causes `Network.InitConn()` to be called from `Kademlia.SuggestPeer()` and that blocks on `net.lock`. Related ticket: https://github.com/ethersphere/go-ethereum/issues/1223 * swarm/state: simplify if statement in DBStore.Put() * p2p/simulations: remove faulty godoc from private function The comment started with the wrong method name. The method is simple and self explanatory. Also, it's private. => Let's just remove the comment. (cherry picked from commit 50b872bf05b8644f14b9bea340092ced6968dd59)
Diffstat (limited to 'swarm')
-rw-r--r--swarm/network/simulation/node.go4
-rw-r--r--swarm/network/simulation/node_test.go63
-rw-r--r--swarm/network/simulation/service.go2
-rw-r--r--swarm/network/simulation/simulation_test.go4
-rw-r--r--swarm/network/simulations/overlay_test.go2
-rw-r--r--swarm/state/dbstore.go9
6 files changed, 33 insertions, 51 deletions
diff --git a/swarm/network/simulation/node.go b/swarm/network/simulation/node.go
index 08eb83524..24afe51a4 100644
--- a/swarm/network/simulation/node.go
+++ b/swarm/network/simulation/node.go
@@ -44,7 +44,7 @@ func (s *Simulation) NodeIDs() (ids []enode.ID) {
func (s *Simulation) UpNodeIDs() (ids []enode.ID) {
nodes := s.Net.GetNodes()
for _, node := range nodes {
- if node.Up {
+ if node.Up() {
ids = append(ids, node.ID())
}
}
@@ -55,7 +55,7 @@ func (s *Simulation) UpNodeIDs() (ids []enode.ID) {
func (s *Simulation) DownNodeIDs() (ids []enode.ID) {
nodes := s.Net.GetNodes()
for _, node := range nodes {
- if !node.Up {
+ if !node.Up() {
ids = append(ids, node.ID())
}
}
diff --git a/swarm/network/simulation/node_test.go b/swarm/network/simulation/node_test.go
index dc9189c91..bae5afb26 100644
--- a/swarm/network/simulation/node_test.go
+++ b/swarm/network/simulation/node_test.go
@@ -54,7 +54,7 @@ func TestUpDownNodeIDs(t *testing.T) {
gotIDs = sim.UpNodeIDs()
for _, id := range gotIDs {
- if !sim.Net.GetNode(id).Up {
+ if !sim.Net.GetNode(id).Up() {
t.Errorf("node %s should not be down", id)
}
}
@@ -66,7 +66,7 @@ func TestUpDownNodeIDs(t *testing.T) {
gotIDs = sim.DownNodeIDs()
for _, id := range gotIDs {
- if sim.Net.GetNode(id).Up {
+ if sim.Net.GetNode(id).Up() {
t.Errorf("node %s should not be up", id)
}
}
@@ -112,7 +112,7 @@ func TestAddNode(t *testing.T) {
t.Fatal("node not found")
}
- if !n.Up {
+ if !n.Up() {
t.Error("node not started")
}
}
@@ -327,7 +327,7 @@ func TestStartStopNode(t *testing.T) {
if n == nil {
t.Fatal("node not found")
}
- if !n.Up {
+ if !n.Up() {
t.Error("node not started")
}
@@ -335,26 +335,17 @@ func TestStartStopNode(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if n.Up {
+ if n.Up() {
t.Error("node not stopped")
}
- // Sleep here to ensure that Network.watchPeerEvents defer function
- // has set the `node.Up = false` before we start the node again.
- // p2p/simulations/network.go:215
- //
- // The same node is stopped and started again, and upon start
- // watchPeerEvents is started in a goroutine. If the node is stopped
- // and then very quickly started, that goroutine may be scheduled later
- // then start and force `node.Up = false` in its defer function.
- // This will make this test unreliable.
- time.Sleep(time.Second)
+ waitForPeerEventPropagation()
err = sim.StartNode(id)
if err != nil {
t.Fatal(err)
}
- if !n.Up {
+ if !n.Up() {
t.Error("node not started")
}
}
@@ -377,7 +368,7 @@ func TestStartStopRandomNode(t *testing.T) {
if n == nil {
t.Fatal("node not found")
}
- if n.Up {
+ if n.Up() {
t.Error("node not stopped")
}
@@ -386,16 +377,7 @@ func TestStartStopRandomNode(t *testing.T) {
t.Fatal(err)
}
- // Sleep here to ensure that Network.watchPeerEvents defer function
- // has set the `node.Up = false` before we start the node again.
- // p2p/simulations/network.go:215
- //
- // The same node is stopped and started again, and upon start
- // watchPeerEvents is started in a goroutine. If the node is stopped
- // and then very quickly started, that goroutine may be scheduled later
- // then start and force `node.Up = false` in its defer function.
- // This will make this test unreliable.
- time.Sleep(time.Second)
+ waitForPeerEventPropagation()
idStarted, err := sim.StartRandomNode()
if err != nil {
@@ -426,21 +408,12 @@ func TestStartStopRandomNodes(t *testing.T) {
if n == nil {
t.Fatal("node not found")
}
- if n.Up {
+ if n.Up() {
t.Error("node not stopped")
}
}
- // Sleep here to ensure that Network.watchPeerEvents defer function
- // has set the `node.Up = false` before we start the node again.
- // p2p/simulations/network.go:215
- //
- // The same node is stopped and started again, and upon start
- // watchPeerEvents is started in a goroutine. If the node is stopped
- // and then very quickly started, that goroutine may be scheduled later
- // then start and force `node.Up = false` in its defer function.
- // This will make this test unreliable.
- time.Sleep(time.Second)
+ waitForPeerEventPropagation()
ids, err = sim.StartRandomNodes(2)
if err != nil {
@@ -452,8 +425,20 @@ func TestStartStopRandomNodes(t *testing.T) {
if n == nil {
t.Fatal("node not found")
}
- if !n.Up {
+ if !n.Up() {
t.Error("node not started")
}
}
}
+
+func waitForPeerEventPropagation() {
+ // Sleep here to ensure that Network.watchPeerEvents defer function
+ // has set the `node.Up() = false` before we start the node again.
+ //
+ // The same node is stopped and started again, and upon start
+ // watchPeerEvents is started in a goroutine. If the node is stopped
+ // and then very quickly started, that goroutine may be scheduled later
+ // then start and force `node.Up() = false` in its defer function.
+ // This will make this test unreliable.
+ time.Sleep(1 * time.Second)
+}
diff --git a/swarm/network/simulation/service.go b/swarm/network/simulation/service.go
index 7dd4dc6d8..0ac8149a9 100644
--- a/swarm/network/simulation/service.go
+++ b/swarm/network/simulation/service.go
@@ -52,7 +52,7 @@ func (s *Simulation) Services(name string) (services map[enode.ID]node.Service)
nodes := s.Net.GetNodes()
services = make(map[enode.ID]node.Service)
for _, node := range nodes {
- if !node.Up {
+ if !node.Up() {
continue
}
simNode, ok := node.Node.(*adapters.SimNode)
diff --git a/swarm/network/simulation/simulation_test.go b/swarm/network/simulation/simulation_test.go
index f837f9382..1d0338f59 100644
--- a/swarm/network/simulation/simulation_test.go
+++ b/swarm/network/simulation/simulation_test.go
@@ -124,7 +124,7 @@ func TestClose(t *testing.T) {
var upNodeCount int
for _, n := range sim.Net.GetNodes() {
- if n.Up {
+ if n.Up() {
upNodeCount++
}
}
@@ -140,7 +140,7 @@ func TestClose(t *testing.T) {
upNodeCount = 0
for _, n := range sim.Net.GetNodes() {
- if n.Up {
+ if n.Up() {
upNodeCount++
}
}
diff --git a/swarm/network/simulations/overlay_test.go b/swarm/network/simulations/overlay_test.go
index 05d403173..41ed5ed26 100644
--- a/swarm/network/simulations/overlay_test.go
+++ b/swarm/network/simulations/overlay_test.go
@@ -179,7 +179,7 @@ func watchSimEvents(net *simulations.Network, ctx context.Context, trigger chan
case ev := <-events:
//only catch node up events
if ev.Type == simulations.EventTypeNode {
- if ev.Node.Up {
+ if ev.Node.Up() {
log.Debug("got node up event", "event", ev, "node", ev.Node.Config.ID)
select {
case trigger <- ev.Node.Config.ID:
diff --git a/swarm/state/dbstore.go b/swarm/state/dbstore.go
index 147e34b23..1b541e785 100644
--- a/swarm/state/dbstore.go
+++ b/swarm/state/dbstore.go
@@ -88,18 +88,15 @@ func (s *DBStore) Get(key string, i interface{}) (err error) {
// Put stores an object that implements Binary for a specific key.
func (s *DBStore) Put(key string, i interface{}) (err error) {
var bytes []byte
-
- marshaler, ok := i.(encoding.BinaryMarshaler)
- if !ok {
- if bytes, err = json.Marshal(i); err != nil {
+ if marshaler, ok := i.(encoding.BinaryMarshaler); ok {
+ if bytes, err = marshaler.MarshalBinary(); err != nil {
return err
}
} else {
- if bytes, err = marshaler.MarshalBinary(); err != nil {
+ if bytes, err = json.Marshal(i); err != nil {
return err
}
}
-
return s.db.Put([]byte(key), bytes, nil)
}