aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--core/block_processor.go2
-rw-r--r--crypto/key_store_passphrase.go13
-rw-r--r--eth/downloader/downloader.go30
-rw-r--r--eth/downloader/downloader_test.go8
-rw-r--r--eth/handler.go4
-rw-r--r--miner/worker.go10
-rw-r--r--p2p/discover/udp.go46
-rw-r--r--p2p/discover/udp_test.go25
8 files changed, 85 insertions, 53 deletions
diff --git a/core/block_processor.go b/core/block_processor.go
index af47069ad..059c442cc 100644
--- a/core/block_processor.go
+++ b/core/block_processor.go
@@ -197,7 +197,7 @@ func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs st
// There can be at most two uncles
if len(block.Uncles()) > 2 {
- return nil, ValidationError("Block can only contain one uncle (contained %v)", len(block.Uncles()))
+ return nil, ValidationError("Block can only contain maximum 2 uncles (contained %v)", len(block.Uncles()))
}
receipts, err := sm.TransitionState(state, parent, block, false)
diff --git a/crypto/key_store_passphrase.go b/crypto/key_store_passphrase.go
index d9a5a81f9..782f92bf1 100644
--- a/crypto/key_store_passphrase.go
+++ b/crypto/key_store_passphrase.go
@@ -28,21 +28,22 @@ the private key is encrypted and on disk uses another JSON encoding.
Cryptography:
-1. Encryption key is first 16 bytes of SHA3-256 of first 16 bytes of
- scrypt derived key from user passphrase. Scrypt parameters
+1. Encryption key is first 16 bytes of scrypt derived key
+ from user passphrase. Scrypt parameters
(work factors) [1][2] are defined as constants below.
2. Scrypt salt is 32 random bytes from CSPRNG.
- It's stored in plain next to ciphertext in key file.
-3. MAC is SHA3-256 of concatenation of ciphertext and last 16 bytes of scrypt derived key.
+ It's stored in plain next in the key file.
+3. MAC is SHA3-256 of concatenation of ciphertext and
+ last 16 bytes of scrypt derived key.
4. Plaintext is the EC private key bytes.
5. Encryption algo is AES 128 CBC [3][4]
6. CBC IV is 16 random bytes from CSPRNG.
- It's stored in plain next to ciphertext in key file.
+ It's stored in plain next in the key file.
7. Plaintext padding is PKCS #7 [5][6]
Encoding:
-1. On disk, the ciphertext, MAC, salt and IV are encoded in a nested JSON object.
+1. On disk, the ciphertext, MAC, salt and IV are encoded in a JSON object.
cat a key file to see the structure.
2. byte arrays are base64 JSON strings.
3. The EC private key bytes are in uncompressed form [7].
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 577152a21..55455262a 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -55,10 +55,9 @@ type hashPack struct {
}
type Downloader struct {
- mu sync.RWMutex
- queue *queue
- peers *peerSet
- activePeer string
+ mu sync.RWMutex
+ queue *queue
+ peers *peerSet
// Callbacks
hasBlock hashCheckFn
@@ -162,7 +161,6 @@ func (d *Downloader) Has(hash common.Hash) bool {
// syncWithPeer starts a block synchronization based on the hash chain from the
// specified peer and head hash.
func (d *Downloader) syncWithPeer(p *peer, hash common.Hash) (err error) {
- d.activePeer = p.id
defer func() {
// reset on error
if err != nil {
@@ -291,7 +289,7 @@ out:
// already fetched hash list. This can't guarantee 100% correctness but does
// a fair job. This is always either correct or false incorrect.
for _, peer := range d.peers.AllPeers() {
- if d.queue.Has(peer.head) && !attemptedPeers[p.id] {
+ if d.queue.Has(peer.head) && !attemptedPeers[peer.id] {
p = peer
break
}
@@ -416,32 +414,26 @@ out:
return nil
}
-// Deliver a chunk to the downloader. This is usually done through the BlocksMsg by
-// the protocol handler.
-func (d *Downloader) DeliverChunk(id string, blocks []*types.Block) error {
+// DeliverBlocks injects a new batch of blocks received from a remote node.
+// This is usually invoked through the BlocksMsg by the protocol handler.
+func (d *Downloader) DeliverBlocks(id string, blocks []*types.Block) error {
// Make sure the downloader is active
if atomic.LoadInt32(&d.synchronising) == 0 {
return errNoSyncActive
}
-
d.blockCh <- blockPack{id, blocks}
return nil
}
-func (d *Downloader) AddHashes(id string, hashes []common.Hash) error {
+// DeliverHashes injects a new batch of hashes received from a remote node into
+// the download schedule. This is usually invoked through the BlockHashesMsg by
+// the protocol handler.
+func (d *Downloader) DeliverHashes(id string, hashes []common.Hash) error {
// Make sure the downloader is active
if atomic.LoadInt32(&d.synchronising) == 0 {
return errNoSyncActive
}
-
- // make sure that the hashes that are being added are actually from the peer
- // that's the current active peer. hashes that have been received from other
- // peers are dropped and ignored.
- if d.activePeer != id {
- return fmt.Errorf("received hashes from %s while active peer is %s", id, d.activePeer)
- }
-
if glog.V(logger.Debug) && len(hashes) != 0 {
from, to := hashes[0], hashes[len(hashes)-1]
glog.V(logger.Debug).Infof("adding %d (T=%d) hashes [ %x / %x ] from: %s\n", len(hashes), d.queue.Pending(), from[:4], to[:4], id)
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 385ad2909..78eff011a 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -76,7 +76,7 @@ func (dl *downloadTester) getBlock(hash common.Hash) *types.Block {
}
func (dl *downloadTester) getHashes(hash common.Hash) error {
- dl.downloader.AddHashes(dl.activePeerId, dl.hashes)
+ dl.downloader.DeliverHashes(dl.activePeerId, dl.hashes)
return nil
}
@@ -87,7 +87,7 @@ func (dl *downloadTester) getBlocks(id string) func([]common.Hash) error {
blocks[i] = dl.blocks[hash]
}
- go dl.downloader.DeliverChunk(id, blocks)
+ go dl.downloader.DeliverBlocks(id, blocks)
return nil
}
@@ -188,12 +188,12 @@ func TestInactiveDownloader(t *testing.T) {
blocks := createBlocksFromHashSet(createHashSet(hashes))
tester := newTester(t, hashes, nil)
- err := tester.downloader.AddHashes("bad peer 001", hashes)
+ err := tester.downloader.DeliverHashes("bad peer 001", hashes)
if err != errNoSyncActive {
t.Error("expected no sync error, got", err)
}
- err = tester.downloader.DeliverChunk("bad peer 001", blocks)
+ err = tester.downloader.DeliverBlocks("bad peer 001", blocks)
if err != errNoSyncActive {
t.Error("expected no sync error, got", err)
}
diff --git a/eth/handler.go b/eth/handler.go
index 88394543e..b2d741295 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -224,7 +224,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
if err := msgStream.Decode(&hashes); err != nil {
break
}
- err := self.downloader.AddHashes(p.id, hashes)
+ err := self.downloader.DeliverHashes(p.id, hashes)
if err != nil {
glog.V(logger.Debug).Infoln(err)
}
@@ -264,7 +264,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
glog.V(logger.Detail).Infoln("Decode error", err)
blocks = nil
}
- self.downloader.DeliverChunk(p.id, blocks)
+ self.downloader.DeliverBlocks(p.id, blocks)
case NewBlockMsg:
var request newBlockMsgData
diff --git a/miner/worker.go b/miner/worker.go
index f737be507..c70ded434 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -45,7 +45,8 @@ type environment struct {
state *state.StateDB // apply state changes here
coinbase *state.StateObject // the miner's account
block *types.Block // the new block
- family *set.Set // family set (used for checking uncles)
+ ancestors *set.Set // ancestor set (used for checking uncle parent validity)
+ family *set.Set // family set (used for checking uncle invalidity)
uncles *set.Set // uncle set
remove *set.Set // tx which will be removed
tcount int // tx count in cycle
@@ -62,6 +63,7 @@ func env(block *types.Block, eth core.Backend) *environment {
totalUsedGas: new(big.Int),
state: state,
block: block,
+ ancestors: set.New(),
family: set.New(),
uncles: set.New(),
coinbase: state.GetOrNewStateObject(block.Coinbase()),
@@ -265,7 +267,11 @@ func (self *worker) makeCurrent() {
current := env(block, self.eth)
for _, ancestor := range self.chain.GetAncestors(block, 7) {
+ for _, uncle := range ancestor.Uncles() {
+ current.family.Add(uncle.Hash())
+ }
current.family.Add(ancestor.Hash())
+ current.ancestors.Add(ancestor.Hash())
}
accounts, _ := self.eth.AccountManager().Accounts()
// Keep track of transactions which return errors so they can be removed
@@ -363,7 +369,7 @@ func (self *worker) commitUncle(uncle *types.Header) error {
}
self.current.uncles.Add(uncle.Hash())
- if !self.current.family.Has(uncle.ParentHash) {
+ if !self.current.ancestors.Has(uncle.ParentHash) {
return core.UncleError(fmt.Sprintf("Uncle's parent unknown (%x)", uncle.ParentHash[0:4]))
}
diff --git a/p2p/discover/udp.go b/p2p/discover/udp.go
index 1213c12c8..539ccd460 100644
--- a/p2p/discover/udp.go
+++ b/p2p/discover/udp.go
@@ -363,7 +363,31 @@ const (
headSize = macSize + sigSize // space of packet frame data
)
-var headSpace = make([]byte, headSize)
+var (
+ headSpace = make([]byte, headSize)
+
+ // Neighbors responses are sent across multiple packets to
+ // stay below the 1280 byte limit. We compute the maximum number
+ // of entries by stuffing a packet until it grows too large.
+ maxNeighbors int
+)
+
+func init() {
+ p := neighbors{Expiration: ^uint64(0)}
+ maxSizeNode := rpcNode{IP: make(net.IP, 16), UDP: ^uint16(0), TCP: ^uint16(0)}
+ for n := 0; ; n++ {
+ p.Nodes = append(p.Nodes, maxSizeNode)
+ size, _, err := rlp.EncodeToReader(p)
+ if err != nil {
+ // If this ever happens, it will be caught by the unit tests.
+ panic("cannot encode: " + err.Error())
+ }
+ if headSize+size+1 >= 1280 {
+ maxNeighbors = n
+ break
+ }
+ }
+}
func (t *udp) send(toaddr *net.UDPAddr, ptype byte, req interface{}) error {
packet, err := encodePacket(t.priv, ptype, req)
@@ -402,7 +426,10 @@ func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) ([]byte,
// readLoop runs in its own goroutine. it handles incoming UDP packets.
func (t *udp) readLoop() {
defer t.conn.Close()
- buf := make([]byte, 4096) // TODO: good buffer size
+ // Discovery packets are defined to be no larger than 1280 bytes.
+ // Packets larger than this size will be cut at the end and treated
+ // as invalid because their hash won't match.
+ buf := make([]byte, 1280)
for {
nbytes, from, err := t.conn.ReadFromUDP(buf)
if err != nil {
@@ -504,15 +531,16 @@ func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte
closest := t.closest(target, bucketSize).entries
t.mutex.Unlock()
- // TODO: this conversion could use a cached version of the slice
- closestrpc := make([]rpcNode, len(closest))
+ p := neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())}
+ // Send neighbors in chunks with at most maxNeighbors per packet
+ // to stay below the 1280 byte limit.
for i, n := range closest {
- closestrpc[i] = nodeToRPC(n)
+ p.Nodes = append(p.Nodes, nodeToRPC(n))
+ if len(p.Nodes) == maxNeighbors || i == len(closest)-1 {
+ t.send(from, neighborsPacket, p)
+ p.Nodes = p.Nodes[:0]
+ }
}
- t.send(from, neighborsPacket, neighbors{
- Nodes: closestrpc,
- Expiration: uint64(time.Now().Add(expiration).Unix()),
- })
return nil
}
diff --git a/p2p/discover/udp_test.go b/p2p/discover/udp_test.go
index f175835a8..11fa31d7c 100644
--- a/p2p/discover/udp_test.go
+++ b/p2p/discover/udp_test.go
@@ -163,17 +163,22 @@ func TestUDP_findnode(t *testing.T) {
))
// check that closest neighbors are returned.
test.packetIn(nil, findnodePacket, &findnode{Target: testTarget, Expiration: futureExp})
- test.waitPacketOut(func(p *neighbors) {
- expected := test.table.closest(targetHash, bucketSize)
- if len(p.Nodes) != bucketSize {
- t.Errorf("wrong number of results: got %d, want %d", len(p.Nodes), bucketSize)
- }
- for i := range p.Nodes {
- if p.Nodes[i].ID != expected.entries[i].ID {
- t.Errorf("result mismatch at %d:\n got: %v\n want: %v", i, p.Nodes[i], expected.entries[i])
+ expected := test.table.closest(targetHash, bucketSize)
+
+ waitNeighbors := func(want []*Node) {
+ test.waitPacketOut(func(p *neighbors) {
+ if len(p.Nodes) != len(want) {
+ t.Errorf("wrong number of results: got %d, want %d", len(p.Nodes), bucketSize)
}
- }
- })
+ for i := range p.Nodes {
+ if p.Nodes[i].ID != want[i].ID {
+ t.Errorf("result mismatch at %d:\n got: %v\n want: %v", i, p.Nodes[i], expected.entries[i])
+ }
+ }
+ })
+ }
+ waitNeighbors(expected.entries[:maxNeighbors])
+ waitNeighbors(expected.entries[maxNeighbors:])
}
func TestUDP_findnodeMultiReply(t *testing.T) {