From 35d479b6d3d793ef8b32b28a9f2cfc9f8b4a0670 Mon Sep 17 00:00:00 2001
From: Henning Diedrich <hd@eonblast.com>
Date: Fri, 17 Jun 2016 09:53:54 +0200
Subject: [release/1.4.11] eth: fix #2710 filter races

and locking bugs found in its wake.

(cherry picked from commit 51f8ce26cf6dbc20ddc548af305739db981fdd41)
---
 eth/filters/api.go           | 63 ++++++++++++++++++++++++++++----------------
 eth/filters/filter_system.go | 19 ++++++++-----
 2 files changed, 54 insertions(+), 28 deletions(-)

diff --git a/eth/filters/api.go b/eth/filters/api.go
index 7278e20b9..5e8b137b6 100644
--- a/eth/filters/api.go
+++ b/eth/filters/api.go
@@ -68,8 +68,6 @@ type PublicFilterAPI struct {
 
 	transactionMu    sync.RWMutex
 	transactionQueue map[int]*hashQueue
-
-	transactMu sync.Mutex
 }
 
 // NewPublicFilterAPI returns a new PublicFilterAPI instance.
@@ -100,6 +98,7 @@ done:
 	for {
 		select {
 		case <-timer.C:
+			s.filterManager.Lock() // lock order like filterLoop()
 			s.logMu.Lock()
 			for id, filter := range s.logQueue {
 				if time.Since(filter.timeout) > filterTickerTime {
@@ -126,6 +125,7 @@ done:
 				}
 			}
 			s.transactionMu.Unlock()
+			s.filterManager.Unlock()
 		case <-s.quit:
 			break done
 		}
@@ -135,19 +135,24 @@ done:
 
 // NewBlockFilter create a new filter that returns blocks that are included into the canonical chain.
 func (s *PublicFilterAPI) NewBlockFilter() (string, error) {
+	// protect filterManager.Add() and setting of filter fields
+	s.filterManager.Lock()
+	defer s.filterManager.Unlock()
+
 	externalId, err := newFilterId()
 	if err != nil {
 		return "", err
 	}
 
-	s.blockMu.Lock()
 	filter := New(s.chainDb)
 	id, err := s.filterManager.Add(filter, ChainFilter)
 	if err != nil {
 		return "", err
 	}
 
+	s.blockMu.Lock()
 	s.blockQueue[id] = &hashQueue{timeout: time.Now()}
+	s.blockMu.Unlock()
 
 	filter.BlockCallback = func(block *types.Block, logs vm.Logs) {
 		s.blockMu.Lock()
@@ -158,8 +163,6 @@ func (s *PublicFilterAPI) NewBlockFilter() (string, error) {
 		}
 	}
 
-	defer s.blockMu.Unlock()
-
 	s.filterMapMu.Lock()
 	s.filterMapping[externalId] = id
 	s.filterMapMu.Unlock()
@@ -169,21 +172,24 @@ func (s *PublicFilterAPI) NewBlockFilter() (string, error) {
 
 // NewPendingTransactionFilter creates a filter that returns new pending transactions.
 func (s *PublicFilterAPI) NewPendingTransactionFilter() (string, error) {
+	// protect filterManager.Add() and setting of filter fields
+	s.filterManager.Lock()
+	defer s.filterManager.Unlock()
+
 	externalId, err := newFilterId()
 	if err != nil {
 		return "", err
 	}
 
-	s.transactionMu.Lock()
-	defer s.transactionMu.Unlock()
-
 	filter := New(s.chainDb)
 	id, err := s.filterManager.Add(filter, PendingTxFilter)
 	if err != nil {
 		return "", err
 	}
 
+	s.transactionMu.Lock()
 	s.transactionQueue[id] = &hashQueue{timeout: time.Now()}
+	s.transactionMu.Unlock()
 
 	filter.TransactionCallback = func(tx *types.Transaction) {
 		s.transactionMu.Lock()
@@ -203,8 +209,9 @@ func (s *PublicFilterAPI) NewPendingTransactionFilter() (string, error) {
 
 // newLogFilter creates a new log filter.
 func (s *PublicFilterAPI) newLogFilter(earliest, latest int64, addresses []common.Address, topics [][]common.Hash, callback func(log *vm.Log, removed bool)) (int, error) {
-	s.logMu.Lock()
-	defer s.logMu.Unlock()
+	// protect filterManager.Add() and setting of filter fields
+	s.filterManager.Lock()
+	defer s.filterManager.Unlock()
 
 	filter := New(s.chainDb)
 	id, err := s.filterManager.Add(filter, LogFilter)
@@ -212,7 +219,9 @@ func (s *PublicFilterAPI) newLogFilter(earliest, latest int64, addresses []commo
 		return 0, err
 	}
 
+	s.logMu.Lock()
 	s.logQueue[id] = &logQueue{timeout: time.Now()}
+	s.logMu.Unlock()
 
 	filter.SetBeginBlock(earliest)
 	filter.SetEndBlock(latest)
@@ -443,35 +452,43 @@ func (s *PublicFilterAPI) GetLogs(args NewFilterArgs) []vmlog {
 
 // UninstallFilter removes the filter with the given filter id.
 func (s *PublicFilterAPI) UninstallFilter(filterId string) bool {
-	s.filterMapMu.Lock()
-	defer s.filterMapMu.Unlock()
+	s.filterManager.Lock()
+	defer s.filterManager.Unlock()
 
+	s.filterMapMu.Lock()
 	id, ok := s.filterMapping[filterId]
 	if !ok {
+		s.filterMapMu.Unlock()
 		return false
 	}
-
-	defer s.filterManager.Remove(id)
 	delete(s.filterMapping, filterId)
+	s.filterMapMu.Unlock()
 
+	s.filterManager.Remove(id)
+
+	s.logMu.Lock()
 	if _, ok := s.logQueue[id]; ok {
-		s.logMu.Lock()
-		defer s.logMu.Unlock()
 		delete(s.logQueue, id)
+		s.logMu.Unlock()
 		return true
 	}
+	s.logMu.Unlock()
+
+	s.blockMu.Lock()
 	if _, ok := s.blockQueue[id]; ok {
-		s.blockMu.Lock()
-		defer s.blockMu.Unlock()
 		delete(s.blockQueue, id)
+		s.blockMu.Unlock()
 		return true
 	}
+	s.blockMu.Unlock()
+
+	s.transactionMu.Lock()
 	if _, ok := s.transactionQueue[id]; ok {
-		s.transactionMu.Lock()
-		defer s.transactionMu.Unlock()
 		delete(s.transactionQueue, id)
+		s.transactionMu.Unlock()
 		return true
 	}
+	s.transactionMu.Unlock()
 
 	return false
 }
@@ -525,7 +542,9 @@ func (s *PublicFilterAPI) logFilterChanged(id int) []vmlog {
 
 // GetFilterLogs returns the logs for the filter with the given id.
 func (s *PublicFilterAPI) GetFilterLogs(filterId string) []vmlog {
+	s.filterMapMu.RLock()
 	id, ok := s.filterMapping[filterId]
+	s.filterMapMu.RUnlock()
 	if !ok {
 		return toRPCLogs(nil, false)
 	}
@@ -540,9 +559,9 @@ func (s *PublicFilterAPI) GetFilterLogs(filterId string) []vmlog {
 // GetFilterChanges returns the logs for the filter with the given id since last time is was called.
 // This can be used for polling.
 func (s *PublicFilterAPI) GetFilterChanges(filterId string) interface{} {
-	s.filterMapMu.Lock()
+	s.filterMapMu.RLock()
 	id, ok := s.filterMapping[filterId]
-	s.filterMapMu.Unlock()
+	s.filterMapMu.RUnlock()
 
 	if !ok { // filter not found
 		return []interface{}{}
diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go
index 4343dfa21..256464213 100644
--- a/eth/filters/filter_system.go
+++ b/eth/filters/filter_system.go
@@ -82,11 +82,20 @@ func (fs *FilterSystem) Stop() {
 	fs.sub.Unsubscribe()
 }
 
-// Add adds a filter to the filter manager
-func (fs *FilterSystem) Add(filter *Filter, filterType FilterType) (int, error) {
+// Acquire filter system maps lock, required to force lock acquisition
+// sequence with filterMu acquired first to avoid deadlocks by callbacks
+func (fs *FilterSystem) Lock() {
 	fs.filterMu.Lock()
-	defer fs.filterMu.Unlock()
+}
+
+// Release filter system maps lock
+func (fs *FilterSystem) Unlock() {
+	fs.filterMu.Unlock()
+}
 
+// Add adds a filter to the filter manager
+// Expects filterMu to be locked.
+func (fs *FilterSystem) Add(filter *Filter, filterType FilterType) (int, error) {
 	id := fs.filterId
 	filter.created = time.Now()
 
@@ -110,10 +119,8 @@ func (fs *FilterSystem) Add(filter *Filter, filterType FilterType) (int, error)
 }
 
 // Remove removes a filter by filter id
+// Expects filterMu to be locked.
 func (fs *FilterSystem) Remove(id int) {
-	fs.filterMu.Lock()
-	defer fs.filterMu.Unlock()
-
 	delete(fs.chainFilters, id)
 	delete(fs.pendingTxFilters, id)
 	delete(fs.logFilters, id)
-- 
cgit v1.2.3


From 08a7cd74da3f353b80ced16d1e2cf2d758606699 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= <peterke@gmail.com>
Date: Tue, 19 Jul 2016 12:00:09 +0300
Subject: [release/1.4.11] eth: cancel DAO challenge on peer drop (annoying
 log)

(cherry picked from commit 91f18ffd47d766b1493016da6802befbf9739709)
---
 eth/handler.go | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/eth/handler.go b/eth/handler.go
index a498cd247..6a648d2e0 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -295,6 +295,13 @@ func (pm *ProtocolManager) handle(p *peer) error {
 			glog.V(logger.Warn).Infof("%v: timed out DAO fork-check, dropping", p)
 			pm.removePeer(p.id)
 		})
+		// Make sure it's cleaned up if the peer dies off
+		defer func() {
+			if p.forkDrop != nil {
+				p.forkDrop.Stop()
+				p.forkDrop = nil
+			}
+		}()
 	}
 	// main loop. handle incoming messages.
 	for {
-- 
cgit v1.2.3


From 9eb2873a9cfef0013efeb002b95999e2a007067a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= <peterke@gmail.com>
Date: Fri, 22 Jul 2016 17:55:46 +0300
Subject: [release/1.4.11] eth/downloader: fix the stall checks/drops during
 sync

(cherry picked from commit c7c82f1b44e07ad0906dde563cce46ea87b6fc83)
---
 eth/downloader/downloader.go | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 01c0818a0..a10253b8e 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -1555,7 +1555,14 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
 			// Check for fetch request timeouts and demote the responsible peers
 			for pid, fails := range expire() {
 				if peer := d.peers.Peer(pid); peer != nil {
-					if fails > 1 {
+					// If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps
+					// ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times
+					// out that sync wise we need to get rid of the peer.
+					//
+					// The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth
+					// and latency of a peer separately, which requires pushing the measures capacity a bit and seeing
+					// how response times reacts, to it always requests one more than the minimum (i.e. min 2).
+					if fails > 2 {
 						glog.V(logger.Detail).Infof("%s: %s delivery timeout", peer, strings.ToLower(kind))
 						setIdle(peer, 0)
 					} else {
-- 
cgit v1.2.3


From 626604e86d9a47900e6da5fc003f7009bb239940 Mon Sep 17 00:00:00 2001
From: Felix Lange <fjl@twurst.com>
Date: Thu, 4 Aug 2016 21:19:11 +0200
Subject: [release/1.4.11] Godeps: update github.com/rjeczalik/notify to
 f627deca7a51

Fixes #2829

(cherry picked from commit 4be37222efb2064140bd3d285ef553ec077e0487)
---
 Godeps/Godeps.json                                                    | 2 +-
 Godeps/_workspace/src/github.com/rjeczalik/notify/.travis.yml         | 3 +--
 Godeps/_workspace/src/github.com/rjeczalik/notify/appveyor.yml        | 1 -
 Godeps/_workspace/src/github.com/rjeczalik/notify/watcher_fsevents.go | 2 +-
 4 files changed, 3 insertions(+), 5 deletions(-)

diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index 36104b456..dc7847f33 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -117,7 +117,7 @@
 		},
 		{
 			"ImportPath": "github.com/rjeczalik/notify",
-			"Rev": "5dd6205716539662f8f14ab513552b41eab69d5d"
+			"Rev": "f627deca7a510d96f0ef9388f2d0e8b16d21f87f"
 		},
 		{
 			"ImportPath": "github.com/robertkrimen/otto",
diff --git a/Godeps/_workspace/src/github.com/rjeczalik/notify/.travis.yml b/Godeps/_workspace/src/github.com/rjeczalik/notify/.travis.yml
index 4f1f5f25e..c92863d50 100644
--- a/Godeps/_workspace/src/github.com/rjeczalik/notify/.travis.yml
+++ b/Godeps/_workspace/src/github.com/rjeczalik/notify/.travis.yml
@@ -21,10 +21,9 @@ env:
    - PATH=$HOME/bin:$PATH
 
 install:
- - go get golang.org/x/tools/cmd/vet
  - go get -t -v ./...
 
 script:
- - go tool vet -all .
+ - "(go version | grep -q 1.4) || go tool vet -all ."
  - go install $GOFLAGS ./...
  - go test -v -race $GOFLAGS ./...
diff --git a/Godeps/_workspace/src/github.com/rjeczalik/notify/appveyor.yml b/Godeps/_workspace/src/github.com/rjeczalik/notify/appveyor.yml
index 16d09ac3b..8e762d05c 100644
--- a/Godeps/_workspace/src/github.com/rjeczalik/notify/appveyor.yml
+++ b/Godeps/_workspace/src/github.com/rjeczalik/notify/appveyor.yml
@@ -11,7 +11,6 @@ environment:
 
 install:
  - go version
- - go get golang.org/x/tools/cmd/vet
  - go get -v -t ./...
 
 build_script:
diff --git a/Godeps/_workspace/src/github.com/rjeczalik/notify/watcher_fsevents.go b/Godeps/_workspace/src/github.com/rjeczalik/notify/watcher_fsevents.go
index 54334912e..9062c17c7 100644
--- a/Godeps/_workspace/src/github.com/rjeczalik/notify/watcher_fsevents.go
+++ b/Godeps/_workspace/src/github.com/rjeczalik/notify/watcher_fsevents.go
@@ -133,7 +133,7 @@ func (w *watch) Dispatch(ev []FSEvent) {
 			ev[i].Flags, ev[i].Path, i, ev[i].ID, len(ev))
 		if ev[i].Flags&failure != 0 {
 			// TODO(rjeczalik): missing error handling
-			panic("unhandled error: " + Event(ev[i].Flags).String())
+			continue
 		}
 		if !strings.HasPrefix(ev[i].Path, w.path) {
 			continue
-- 
cgit v1.2.3


From d1696dbf0746929c0ab719ef0807dc7b700bb85a Mon Sep 17 00:00:00 2001
From: Felix Lange <fjl@twurst.com>
Date: Fri, 5 Aug 2016 23:12:52 +0200
Subject: [release/1.4.11] core/vm: hide ecrecover error message

Fixes #2825

(cherry picked from commit e4736fe46938008b7fa88879f728fa81c6ce09e8)
---
 core/vm/contracts.go | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/core/vm/contracts.go b/core/vm/contracts.go
index 5cc9f903b..b45f14724 100644
--- a/core/vm/contracts.go
+++ b/core/vm/contracts.go
@@ -95,7 +95,7 @@ func ecrecoverFunc(in []byte) []byte {
 
 	// tighter sig s values in homestead only apply to tx sigs
 	if !crypto.ValidateSignatureValues(v, r, s, false) {
-		glog.V(logger.Debug).Infof("EC RECOVER FAIL: v, r or s value invalid")
+		glog.V(logger.Detail).Infof("ECRECOVER error: v, r or s value invalid")
 		return nil
 	}
 
@@ -106,7 +106,7 @@ func ecrecoverFunc(in []byte) []byte {
 	pubKey, err := crypto.Ecrecover(in[:32], rsv)
 	// make sure the public key is a valid one
 	if err != nil {
-		glog.V(logger.Error).Infof("EC RECOVER FAIL: ", err)
+		glog.V(logger.Detail).Infoln("ECRECOVER error: ", err)
 		return nil
 	}
 
-- 
cgit v1.2.3


From 0398075cedbc8a2a659ca4cc22e732e3c631fecf Mon Sep 17 00:00:00 2001
From: Felix Lange <fjl@twurst.com>
Date: Wed, 25 May 2016 14:07:57 +0200
Subject: [release/1.4.11] build: add ci.go, use it everywhere

The new build script, ci.go, replaces some of the older shell scripts.
ci.go can compile go-ethereum, run the tests, create release archives
and debian source packages.

(cherry picked from commit 6c33ba14a4db99409657e6a68a7c629e09ceee3f)
---
 .gitignore                    |   8 +-
 .gitmodules                   |   3 -
 .travis.yml                   |  54 +++--
 Makefile                      |  46 ++---
 appveyor.yml                  |  31 +++
 build/ci-notes.md             |  26 +++
 build/ci.go                   | 465 ++++++++++++++++++++++++++++++++++++++++++
 build/deb.changelog           |   5 +
 build/deb.control             |  25 +++
 build/deb.copyright           |  14 ++
 build/deb.docs                |   1 +
 build/deb.install             |   1 +
 build/deb.rules               |  13 ++
 build/env.sh                  |   5 +-
 build/test-global-coverage.sh |  15 --
 build/win-ci-compile.bat      |  26 ---
 build/win-ci-test.bat         |  15 --
 internal/build/archive.go     | 177 ++++++++++++++++
 internal/build/util.go        | 122 +++++++++++
 19 files changed, 938 insertions(+), 114 deletions(-)
 delete mode 100644 .gitmodules
 create mode 100644 appveyor.yml
 create mode 100644 build/ci-notes.md
 create mode 100644 build/ci.go
 create mode 100644 build/deb.changelog
 create mode 100644 build/deb.control
 create mode 100644 build/deb.copyright
 create mode 100644 build/deb.docs
 create mode 100644 build/deb.install
 create mode 100644 build/deb.rules
 delete mode 100755 build/test-global-coverage.sh
 delete mode 100644 build/win-ci-compile.bat
 delete mode 100644 build/win-ci-test.bat
 create mode 100644 internal/build/archive.go
 create mode 100644 internal/build/util.go

diff --git a/.gitignore b/.gitignore
index e8e10db2f..21dbd28c5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -23,17 +23,11 @@ Godeps/_workspace/bin
 .project
 .settings
 
-deploy/osx/Mist.app
-deploy/osx/Mist\ Installer.dmg
-cmd/mist/assets/ext/ethereum.js/
-
 # used by the Makefile
 /build/_workspace/
 /build/bin/
+/geth*.zip
 
 # travis
 profile.tmp
 profile.cov
-
-# vagrant
-.vagrant
diff --git a/.gitmodules b/.gitmodules
deleted file mode 100644
index 219564eb7..000000000
--- a/.gitmodules
+++ /dev/null
@@ -1,3 +0,0 @@
-[submodule "cmd/mist/assets/ext/ethereum.js"]
-	path = cmd/mist/assets/ext/ethereum.js
-	url = https://github.com/ethereum/web3.js
diff --git a/.travis.yml b/.travis.yml
index 24486d4a0..d0fd4b775 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,31 +1,45 @@
 language: go
-go:
-  - 1.4.2
-  - 1.5.4
-  - 1.6.2
+go_import_path: github.com/ethereum/go-ethereum
+sudo: false
+matrix:
+  include:
+    - os: linux
+      dist: trusty
+      go: 1.4.2
+    - os: linux
+      dist: trusty
+      go: 1.5.4
+    - os: linux
+      dist: trusty
+      go: 1.6.2
+    - os: osx
+      go: 1.6.2
+
+    # This builder does the PPA upload (and nothing else).
+    - os: linux
+      dist: trusty
+      go: 1.6.2
+      env: PPA
+      addons:
+        apt:
+          packages:
+            - devscripts
+            - debhelper
+            - dput
+      script:
+        - go run build/ci.go travis-debsrc
+
 install:
-  # - go get code.google.com/p/go.tools/cmd/goimports
-  # - go get github.com/golang/lint/golint
-  # - go get golang.org/x/tools/cmd/vet
   - go get golang.org/x/tools/cmd/cover
-before_script:
-  # - gofmt -l -w .
-  # - goimports -l -w .
-  # - golint .
-  # - go vet ./...
-  # - go test -race ./...
 script:
-  - make travis-test-with-coverage
+  - go run build/ci.go install
+  - go run build/ci.go test -coverage -vet
 after_success:
-  - bash <(curl -s https://codecov.io/bash)
-env:
-  global:
-    - secure: "U2U1AmkU4NJBgKR/uUAebQY87cNL0+1JHjnLOmmXwxYYyj5ralWb1aSuSH3qSXiT93qLBmtaUkuv9fberHVqrbAeVlztVdUsKAq7JMQH+M99iFkC9UiRMqHmtjWJ0ok4COD1sRYixxi21wb/JrMe3M1iL4QJVS61iltjHhVdM64="
-sudo: false
+  # - go run build/ci.go archive -type tar
+
 notifications:
   webhooks:
     urls:
       - https://webhooks.gitter.im/e/e09ccdce1048c5e03445
     on_success: change
     on_failure: always
-    on_start: false
diff --git a/Makefile b/Makefile
index c2fb9bb35..148cb5758 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 # with Go source code. If you know what GOPATH is then you probably
 # don't need to bother with make.
 
-.PHONY: geth geth-cross evm all test travis-test-with-coverage xgo clean
+.PHONY: geth geth-cross evm all test xgo clean
 .PHONY: geth-linux geth-linux-386 geth-linux-amd64
 .PHONY: geth-linux-arm geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
 .PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
@@ -13,10 +13,29 @@ GOBIN = build/bin
 GO ?= latest
 
 geth:
-	build/env.sh go build -i -v $(shell build/flags.sh) -o $(GOBIN)/geth ./cmd/geth
+	build/env.sh go run build/ci.go install ./cmd/geth
 	@echo "Done building."
 	@echo "Run \"$(GOBIN)/geth\" to launch geth."
 
+evm:
+	build/env.sh go run build/ci.go install ./cmd/evm
+	@echo "Done building."
+	@echo "Run \"$(GOBIN)/evm to start the evm."
+
+all:
+	build/env.sh go run build/ci.go install
+
+test: all
+	build/env.sh go run build/ci.go test
+
+clean:
+	rm -fr build/_workspace/pkg/ Godeps/_workspace/pkg $(GOBIN)/*
+
+# Cross Compilation Targets (xgo)
+
+xgo:
+	build/env.sh go get github.com/karalabe/xgo
+
 geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios
 	@echo "Full cross compilation done:"
 	@ls -ld $(GOBIN)/geth-*
@@ -96,26 +115,3 @@ geth-ios: xgo
 	build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=ios-7.0/framework -v $(shell build/flags.sh) ./cmd/geth
 	@echo "iOS framework cross compilation done:"
 	@ls -ld $(GOBIN)/geth-ios-*
-
-evm:
-	build/env.sh $(GOROOT)/bin/go install -v $(shell build/flags.sh) ./cmd/evm
-	@echo "Done building."
-	@echo "Run \"$(GOBIN)/evm to start the evm."
-
-all:
-	for cmd in `ls ./cmd/`; do \
-		 build/env.sh go build -i -v $(shell build/flags.sh) -o $(GOBIN)/$$cmd ./cmd/$$cmd; \
-	done
-
-test: all
-	build/env.sh go test ./...
-
-travis-test-with-coverage: all
-	build/env.sh go vet ./...
-	build/env.sh build/test-global-coverage.sh
-
-xgo:
-	build/env.sh go get github.com/karalabe/xgo
-
-clean:
-	rm -fr build/_workspace/pkg/ Godeps/_workspace/pkg $(GOBIN)/*
diff --git a/appveyor.yml b/appveyor.yml
new file mode 100644
index 000000000..89d3dfe3d
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,31 @@
+os: Visual Studio 2015
+
+# Clone directly into GOPATH.
+clone_folder: c:\gopath\src\github.com\ethereum\go-ethereum
+clone_depth: 5
+version: "{branch}.{build}"
+environment:
+  global:
+    GOPATH: c:\gopath
+
+# cache choco package files so we don't hit sourceforge all
+# the time.
+cache:
+  - c:\cache
+
+install:
+  - cmd: choco install --cache c:\cache golang mingw | find /v "Extracting  "
+  - refreshenv
+  - cd c:\gopath\src\github.com\ethereum\go-ethereum
+
+build_script:
+  - go run build\ci.go install
+
+test_script:
+  - go run build\ci.go test -vet -coverage
+
+after_build:
+  - go run build\ci.go archive -type zip
+
+artifacts:
+  - path: geth-*.zip
diff --git a/build/ci-notes.md b/build/ci-notes.md
new file mode 100644
index 000000000..989cba6dd
--- /dev/null
+++ b/build/ci-notes.md
@@ -0,0 +1,26 @@
+Debian Packaging
+----------------
+
+Tagged releases and develop branch commits are available as installable Debian packages
+for Ubuntu. Packages are built for the all Ubuntu versions which are supported by
+Canonical:
+
+- Trusty Tahr (14.04 LTS)
+- Wily Werewolf (15.10)
+- Xenial Xerus (16.04 LTS)
+
+Packages of develop branch commits have suffix -unstable and cannot be installed alongside
+the stable version. Switching between release streams requires user intervention.
+
+The packages are built and served by launchpad.net. We generate a Debian source package
+for each distribution and upload it. Their builder picks up the source package, builds it
+and installs the new version into the PPA repository. Launchpad requires a valid signature
+by a team member for source package uploads. The signing key is stored in an environment
+variable which Travis CI makes available to certain builds.
+
+We want to build go-ethereum with the most recent version of Go, irrespective of the Go
+version that is available in the main Ubuntu repository. In order to make this possible,
+our PPA depends on the ~gophers/ubuntu/archive PPA. Our source package build-depends on
+golang-1.6, which is co-installable alongside the regular golang package. PPA dependencies
+can be edited at https://launchpad.net/%7Elp-fjl/+archive/ubuntu/geth-ci-testing/+edit-dependencies
+
diff --git a/build/ci.go b/build/ci.go
new file mode 100644
index 000000000..33d97c182
--- /dev/null
+++ b/build/ci.go
@@ -0,0 +1,465 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// +build none
+
+/*
+The ci command is called from Continuous Integration scripts.
+
+Usage: go run ci.go <command> <command flags/arguments>
+
+Available commands are:
+
+   install    [ packages... ]                          -- builds packages and executables
+   test       [ -coverage ] [ -vet ] [ packages... ]   -- runs the tests
+   archive    [ -type zip|tar ]                        -- archives build artefacts
+   importkeys                                          -- imports signing keys from env
+   debsrc     [ -sign key-id ] [ -upload dest ]        -- creates a debian source package
+
+For all commands, -n prevents execution of external programs (dry run mode).
+
+*/
+package main
+
+import (
+	"bytes"
+	"encoding/base64"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"time"
+
+	"../internal/build"
+)
+
+var (
+	// Files that end up in the geth*.zip archive.
+	gethArchiveFiles = []string{
+		"COPYING",
+		executablePath("geth"),
+	}
+
+	// Files that end up in the geth-alltools*.zip archive.
+	allToolsArchiveFiles = []string{
+		"COPYING",
+		executablePath("abigen"),
+		executablePath("evm"),
+		executablePath("geth"),
+		executablePath("rlpdump"),
+	}
+
+	// A debian package is created for all executables listed here.
+	debExecutables = []debExecutable{
+		{
+			Name:        "geth",
+			Description: "Ethereum CLI client.",
+		},
+		{
+			Name:        "rlpdump",
+			Description: "Developer utility tool that prints RLP structures.",
+		},
+		{
+			Name:        "evm",
+			Description: "Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode.",
+		},
+		{
+			Name:        "abigen",
+			Description: "Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages.",
+		},
+	}
+
+	// Distros for which packages are created.
+	// Note: vivid is unsupported because there is no golang-1.6 package for it.
+	debDistros = []string{"trusty", "wily", "xenial", "yakkety"}
+)
+
+var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
+
+func executablePath(name string) string {
+	if runtime.GOOS == "windows" {
+		name += ".exe"
+	}
+	return filepath.Join(GOBIN, name)
+}
+
+func main() {
+	log.SetFlags(log.Lshortfile)
+
+	if _, err := os.Stat(filepath.Join("build", "ci.go")); os.IsNotExist(err) {
+		log.Fatal("this script must be run from the root of the repository")
+	}
+	if len(os.Args) < 2 {
+		log.Fatal("need subcommand as first argument")
+	}
+	switch os.Args[1] {
+	case "install":
+		doInstall(os.Args[2:])
+	case "test":
+		doTest(os.Args[2:])
+	case "archive":
+		doArchive(os.Args[2:])
+	case "debsrc":
+		doDebianSource(os.Args[2:])
+	case "travis-debsrc":
+		doTravisDebianSource(os.Args[2:])
+	default:
+		log.Fatal("unknown command ", os.Args[1])
+	}
+}
+
+// Compiling
+
+func doInstall(cmdline []string) {
+	commitHash := flag.String("gitcommit", "", "Git commit hash embedded into binary.")
+	flag.CommandLine.Parse(cmdline)
+
+	// Check Go version. People regularly open issues about compilation
+	// failure with outdated Go. This should save them the trouble.
+	if runtime.Version() < "go1.4" && !strings.HasPrefix(runtime.Version(), "devel") {
+		log.Println("You have Go version", runtime.Version())
+		log.Println("go-ethereum requires at least Go version 1.4 and cannot")
+		log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
+		os.Exit(1)
+	}
+
+	// Compile packages given as arguments, or everything if there are no arguments.
+	packages := []string{"./..."}
+	if flag.NArg() > 0 {
+		packages = flag.Args()
+	}
+
+	goinstall := goTool("install", makeBuildFlags(*commitHash)...)
+	goinstall.Args = append(goinstall.Args, "-v")
+	goinstall.Args = append(goinstall.Args, packages...)
+	build.MustRun(goinstall)
+}
+
+func makeBuildFlags(commitHash string) (flags []string) {
+	// Since Go 1.5, the separator char for link time assignments
+	// is '=' and using ' ' prints a warning. However, Go < 1.5 does
+	// not support using '='.
+	sep := " "
+	if runtime.Version() > "go1.5" || strings.Contains(runtime.Version(), "devel") {
+		sep = "="
+	}
+
+	if os.Getenv("GO_OPENCL") != "" {
+		flags = append(flags, "-tags", "opencl")
+	}
+
+	// Set gitCommit constant via link-time assignment. If this is a git checkout, we can
+	// just get the current commit hash through git. Otherwise we fall back to the hash
+	// that was passed as -gitcommit.
+	//
+	// -gitcommit is required for Debian package builds. The source package doesn't
+	// contain .git but we still want to embed the commit hash into the packaged binary.
+	// The hash is rendered into the debian/rules build script when the source package is
+	// created.
+	if _, err := os.Stat(filepath.Join(".git", "HEAD")); !os.IsNotExist(err) {
+		if c := build.GitCommit(); c != "" {
+			commitHash = c
+		}
+	}
+	if commitHash != "" {
+		flags = append(flags, "-ldflags", "-X main.gitCommit"+sep+commitHash)
+	}
+	return flags
+}
+
+func goTool(subcmd string, args ...string) *exec.Cmd {
+	gocmd := filepath.Join(runtime.GOROOT(), "bin", "go")
+	cmd := exec.Command(gocmd, subcmd)
+	cmd.Args = append(cmd.Args, args...)
+	cmd.Env = []string{
+		"GOPATH=" + build.GOPATH(),
+		"GOBIN=" + GOBIN,
+	}
+	for _, e := range os.Environ() {
+		if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") {
+			continue
+		}
+		cmd.Env = append(cmd.Env, e)
+	}
+	return cmd
+}
+
+// Running The Tests
+//
+// "tests" also includes static analysis tools such as vet.
+
+func doTest(cmdline []string) {
+	var (
+		vet      = flag.Bool("vet", false, "Whether to run go vet")
+		coverage = flag.Bool("coverage", false, "Whether to record code coverage")
+	)
+	flag.CommandLine.Parse(cmdline)
+	packages := []string{"./..."}
+	if len(flag.CommandLine.Args()) > 0 {
+		packages = flag.CommandLine.Args()
+	}
+
+	// Run analysis tools before the tests.
+	if *vet {
+		build.MustRun(goTool("vet", packages...))
+	}
+
+	// Run the actual tests.
+	gotest := goTool("test")
+	if *coverage {
+		gotest.Args = append(gotest.Args, "-covermode=atomic", "-cover")
+	}
+	gotest.Args = append(gotest.Args, packages...)
+	build.MustRun(gotest)
+}
+
+// Release Packaging
+
+func doArchive(cmdline []string) {
+	var (
+		atype = flag.String("type", "zip", "Type of archive to write (zip|tar)")
+		ext   string
+	)
+	flag.CommandLine.Parse(cmdline)
+	switch *atype {
+	case "zip":
+		ext = ".zip"
+	case "tar":
+		ext = ".tar.gz"
+	default:
+		log.Fatal("unknown archive type: ", atype)
+	}
+	base := makeArchiveBasename()
+	if err := build.WriteArchive("geth-"+base, ext, gethArchiveFiles); err != nil {
+		log.Fatal(err)
+	}
+	if err := build.WriteArchive("geth-alltools-"+base, ext, allToolsArchiveFiles); err != nil {
+		log.Fatal(err)
+	}
+}
+
+func makeArchiveBasename() string {
+	// date := time.Now().UTC().Format("200601021504")
+	platform := runtime.GOOS + "-" + runtime.GOARCH
+	archive := platform + "-" + build.VERSION()
+	if commit := build.GitCommit(); commit != "" {
+		archive += "-" + commit[:8]
+	}
+	return archive
+}
+
+// Debian Packaging
+
+// CLI entry point for Travis CI.
+func doTravisDebianSource(cmdline []string) {
+	flag.CommandLine.Parse(cmdline)
+
+	// Package only whitelisted branches.
+	switch {
+	case os.Getenv("TRAVIS_REPO_SLUG") != "ethereum/go-ethereum":
+		log.Printf("skipping because this is a fork build")
+		return
+	case os.Getenv("TRAVIS_PULL_REQUEST") != "false":
+		log.Printf("skipping because this is a PR build")
+		return
+	case os.Getenv("TRAVIS_BRANCH") != "develop" && !strings.HasPrefix(os.Getenv("TRAVIS_TAG"), "v1."):
+		log.Printf("skipping because branch %q tag %q is not on the whitelist",
+			os.Getenv("TRAVIS_BRANCH"),
+			os.Getenv("TRAVIS_TAG"))
+		return
+	}
+
+	// Import the signing key.
+	if b64key := os.Getenv("PPA_SIGNING_KEY"); b64key != "" {
+		key, err := base64.StdEncoding.DecodeString(b64key)
+		if err != nil {
+			log.Fatal("invalid base64 PPA_SIGNING_KEY")
+		}
+		gpg := exec.Command("gpg", "--import")
+		gpg.Stdin = bytes.NewReader(key)
+		build.MustRun(gpg)
+	}
+
+	// Assign unstable status to non-tag builds.
+	unstable := "true"
+	if os.Getenv("TRAVIS_BRANCH") != "develop" && os.Getenv("TRAVIS_TAG") != "" {
+		unstable = "false"
+	}
+
+	doDebianSource([]string{
+		"-signer", "Felix Lange (Geth CI Testing Key) <fjl@twurst.com>",
+		"-buildnum", os.Getenv("TRAVIS_BUILD_NUMBER"),
+		"-upload", "ppa:lp-fjl/geth-ci-testing",
+		"-unstable", unstable,
+	})
+}
+
+// CLI entry point for doing packaging locally.
+func doDebianSource(cmdline []string) {
+	var (
+		signer   = flag.String("signer", "", `Signing key name, also used as package author`)
+		upload   = flag.String("upload", "", `Where to upload the source package (usually "ppa:ethereum/ethereum")`)
+		buildnum = flag.String("buildnum", "", `Build number (included in version)`)
+		unstable = flag.Bool("unstable", false, `Use package name suffix "-unstable"`)
+		now      = time.Now()
+	)
+	flag.CommandLine.Parse(cmdline)
+
+	// Create the debian worktree in /tmp.
+	tmpdir, err := ioutil.TempDir("", "eth-deb-build-")
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	for _, distro := range debDistros {
+		meta := newDebMetadata(distro, *signer, *buildnum, *unstable, now)
+		pkgdir := stageDebianSource(tmpdir, meta)
+		debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc")
+		debuild.Dir = pkgdir
+		build.MustRun(debuild)
+
+		changes := fmt.Sprintf("%s_%s_source.changes", meta.Name(), meta.VersionString())
+		changes = filepath.Join(tmpdir, changes)
+		if *signer != "" {
+			build.MustRunCommand("debsign", changes)
+		}
+		if *upload != "" {
+			build.MustRunCommand("dput", *upload, changes)
+		}
+	}
+}
+
+type debExecutable struct {
+	Name, Description string
+}
+
+type debMetadata struct {
+	// go-ethereum version being built. Note that this
+	// is not the debian package version. The package version
+	// is constructed by VersionString.
+	Version string
+
+	Author               string // "name <email>", also selects signing key
+	Buildnum             string // build number
+	Distro, Commit, Time string
+	Executables          []debExecutable
+	Unstable             bool
+}
+
+func newDebMetadata(distro, author, buildnum string, unstable bool, t time.Time) debMetadata {
+	if author == "" {
+		// No signing key, use default author.
+		author = "Ethereum Builds <fjl@ethereum.org>"
+	}
+	return debMetadata{
+		Unstable:    unstable,
+		Author:      author,
+		Distro:      distro,
+		Commit:      build.GitCommit(),
+		Version:     build.VERSION(),
+		Buildnum:    buildnum,
+		Time:        t.Format(time.RFC1123Z),
+		Executables: debExecutables,
+	}
+}
+
+// Name returns the name of the metapackage that depends
+// on all executable packages.
+func (meta debMetadata) Name() string {
+	if meta.Unstable {
+		return "ethereum-unstable"
+	}
+	return "ethereum"
+}
+
+// VersionString returns the debian version of the packages.
+func (meta debMetadata) VersionString() string {
+	vsn := meta.Version
+	if meta.Buildnum != "" {
+		vsn += "+build" + meta.Buildnum
+	}
+	if meta.Distro != "" {
+		vsn += "+" + meta.Distro
+	}
+	return vsn
+}
+
+// ExeList returns the list of all executable packages.
+func (meta debMetadata) ExeList() string {
+	names := make([]string, len(meta.Executables))
+	for i, e := range meta.Executables {
+		names[i] = meta.ExeName(e)
+	}
+	return strings.Join(names, ", ")
+}
+
+// ExeName returns the package name of an executable package.
+func (meta debMetadata) ExeName(exe debExecutable) string {
+	if meta.Unstable {
+		return exe.Name + "-unstable"
+	}
+	return exe.Name
+}
+
+// ExeConflicts returns the content of the Conflicts field
+// for executable packages.
+func (meta debMetadata) ExeConflicts(exe debExecutable) string {
+	if meta.Unstable {
+		// Set up the conflicts list so that the *-unstable packages
+		// cannot be installed alongside the regular version.
+		//
+		// https://www.debian.org/doc/debian-policy/ch-relationships.html
+		// is very explicit about Conflicts: and says that Breaks: should
+		// be preferred and the conflicting files should be handled via
+		// alternates. We might do this eventually but using a conflict is
+		// easier now.
+		return "ethereum, " + exe.Name
+	}
+	return ""
+}
+
+func stageDebianSource(tmpdir string, meta debMetadata) (pkgdir string) {
+	pkg := meta.Name() + "-" + meta.VersionString()
+	pkgdir = filepath.Join(tmpdir, pkg)
+	if err := os.Mkdir(pkgdir, 0755); err != nil {
+		log.Fatal(err)
+	}
+
+	// Copy the source code.
+	build.MustRunCommand("git", "checkout-index", "-a", "--prefix", pkgdir+string(filepath.Separator))
+
+	// Put the debian build files in place.
+	debian := filepath.Join(pkgdir, "debian")
+	build.Render("build/deb.rules", filepath.Join(debian, "rules"), 0755, meta)
+	build.Render("build/deb.changelog", filepath.Join(debian, "changelog"), 0644, meta)
+	build.Render("build/deb.control", filepath.Join(debian, "control"), 0644, meta)
+	build.Render("build/deb.copyright", filepath.Join(debian, "copyright"), 0644, meta)
+	build.RenderString("8\n", filepath.Join(debian, "compat"), 0644, meta)
+	build.RenderString("3.0 (native)\n", filepath.Join(debian, "source/format"), 0644, meta)
+	for _, exe := range meta.Executables {
+		install := filepath.Join(debian, exe.Name+".install")
+		docs := filepath.Join(debian, exe.Name+".docs")
+		build.Render("build/deb.install", install, 0644, exe)
+		build.Render("build/deb.docs", docs, 0644, exe)
+	}
+
+	return pkgdir
+}
diff --git a/build/deb.changelog b/build/deb.changelog
new file mode 100644
index 000000000..a221f5470
--- /dev/null
+++ b/build/deb.changelog
@@ -0,0 +1,5 @@
+{{.Name}} ({{.VersionString}}) {{.Distro}}; urgency=low
+
+  * git build of {{.Commit}}
+
+ -- {{.Author}}  {{.Time}}
diff --git a/build/deb.control b/build/deb.control
new file mode 100644
index 000000000..4a65c7fac
--- /dev/null
+++ b/build/deb.control
@@ -0,0 +1,25 @@
+Source: {{.Name}}
+Section: science
+Priority: extra
+Maintainer: {{.Author}}
+Build-Depends: debhelper (>= 8.0.0), golang-1.6
+Standards-Version: 3.9.5
+Homepage: https://ethereum.org
+Vcs-Git: git://github.com/ethereum/go-ethereum.git
+Vcs-Browser: https://github.com/ethereum/go-ethereum
+
+Package: {{.Name}}
+Architecture: any
+Depends: ${misc:Depends}, {{.ExeList}}
+Description: Meta-package to install geth and other tools
+ Meta-package to install geth and other tools
+ 
+{{range .Executables}}
+Package: {{$.ExeName .}}
+Conflicts: {{$.ExeConflicts .}}
+Architecture: any
+Depends: ${shlibs:Depends}, ${misc:Depends}
+Built-Using: ${misc:Built-Using}
+Description: {{.Description}}
+ {{.Description}}
+{{end}}
diff --git a/build/deb.copyright b/build/deb.copyright
new file mode 100644
index 000000000..513be45b1
--- /dev/null
+++ b/build/deb.copyright
@@ -0,0 +1,14 @@
+Copyright 2016 The go-ethereum Authors
+
+go-ethereum is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+go-ethereum is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
diff --git a/build/deb.docs b/build/deb.docs
new file mode 100644
index 000000000..62deb0497
--- /dev/null
+++ b/build/deb.docs
@@ -0,0 +1 @@
+AUTHORS
diff --git a/build/deb.install b/build/deb.install
new file mode 100644
index 000000000..7dc76e1f5
--- /dev/null
+++ b/build/deb.install
@@ -0,0 +1 @@
+build/bin/{{.Name}} usr/bin
diff --git a/build/deb.rules b/build/deb.rules
new file mode 100644
index 000000000..3dfadb08d
--- /dev/null
+++ b/build/deb.rules
@@ -0,0 +1,13 @@
+#!/usr/bin/make -f
+# -*- makefile -*-
+
+# Uncomment this to turn on verbose mode.
+#export DH_VERBOSE=1
+
+override_dh_auto_build:
+	build/env.sh /usr/lib/go-1.6/bin/go run build/ci.go install -gitcommit {{.Commit}}
+
+override_dh_auto_test:
+
+%:
+	dh $@
diff --git a/build/env.sh b/build/env.sh
index 04401a3e1..c418dae44 100755
--- a/build/env.sh
+++ b/build/env.sh
@@ -20,9 +20,8 @@ fi
 
 # Set up the environment to use the workspace.
 # Also add Godeps workspace so we build using canned dependencies.
-GOPATH="$ethdir/go-ethereum/Godeps/_workspace:$workspace"
-GOBIN="$PWD/build/bin"
-export GOPATH GOBIN
+GOPATH="$workspace"
+export GOPATH
 
 # Run the command inside the workspace.
 cd "$ethdir/go-ethereum"
diff --git a/build/test-global-coverage.sh b/build/test-global-coverage.sh
deleted file mode 100755
index a51b6a9e5..000000000
--- a/build/test-global-coverage.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-echo "" > coverage.txt
-
-for d in $(find ./* -maxdepth 10 -type d -not -path "./build" -not -path "./Godeps/*" ); do
-    if ls $d/*.go &> /dev/null; then
-        go test  -coverprofile=profile.out -covermode=atomic $d
-        if [ -f profile.out ]; then
-            cat profile.out >> coverage.txt
-            echo '<<<<<< EOF' >> coverage.txt
-            rm profile.out
-        fi
-    fi
-done
diff --git a/build/win-ci-compile.bat b/build/win-ci-compile.bat
deleted file mode 100644
index 5750990bf..000000000
--- a/build/win-ci-compile.bat
+++ /dev/null
@@ -1,26 +0,0 @@
-@echo off
-if not exist .\build\win-ci-compile.bat (
-   echo This script must be run from the root of the repository.
-   exit /b
-)
-if not defined GOPATH (
-   echo GOPATH is not set.
-   exit /b
-)
-
-set GOPATH=%GOPATH%;%cd%\Godeps\_workspace
-set GOBIN=%cd%\build\bin
-
-rem set gitCommit when running from a Git checkout.
-set goLinkFlags=""
-if exist ".git\HEAD" (
-   where /q git
-   if not errorlevel 1 (
-      for /f %%h in ('git rev-parse HEAD') do (
-          set goLinkFlags="-X main.gitCommit=%%h"
-      )
-   )
-)
-
-@echo on
-go install -v -ldflags %goLinkFlags% ./...
diff --git a/build/win-ci-test.bat b/build/win-ci-test.bat
deleted file mode 100644
index 5945426db..000000000
--- a/build/win-ci-test.bat
+++ /dev/null
@@ -1,15 +0,0 @@
-@echo off
-if not exist .\build\win-ci-test.bat (
-   echo This script must be run from the root of the repository.
-   exit /b
-)
-if not defined GOPATH (
-   echo GOPATH is not set.
-   exit /b
-)
-
-set GOPATH=%GOPATH%;%cd%\Godeps\_workspace
-set GOBIN=%cd%\build\bin
-
-@echo on
-go test ./...
diff --git a/internal/build/archive.go b/internal/build/archive.go
new file mode 100644
index 000000000..2a7090c0d
--- /dev/null
+++ b/internal/build/archive.go
@@ -0,0 +1,177 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package build
+
+import (
+	"archive/tar"
+	"archive/zip"
+	"compress/gzip"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"strings"
+)
+
+type Archive interface {
+	// Directory adds a new directory entry to the archive and sets the
+	// directory for subsequent calls to Header.
+	Directory(name string) error
+
+	// Header adds a new file to the archive. The file is added to the directory
+	// set by Directory. The content of the file must be written to the returned
+	// writer.
+	Header(os.FileInfo) (io.Writer, error)
+
+	// Close flushes the archive and closes the underlying file.
+	Close() error
+}
+
+func NewArchive(file *os.File) Archive {
+	switch {
+	case strings.HasSuffix(file.Name(), ".zip"):
+		return NewZipArchive(file)
+	case strings.HasSuffix(file.Name(), ".tar.gz"):
+		return NewTarballArchive(file)
+	default:
+		return nil
+	}
+}
+
+// AddFile appends an existing file to an archive.
+func AddFile(a Archive, file string) error {
+	fd, err := os.Open(file)
+	if err != nil {
+		return err
+	}
+	defer fd.Close()
+	fi, err := fd.Stat()
+	if err != nil {
+		return err
+	}
+	w, err := a.Header(fi)
+	if err != nil {
+		return err
+	}
+	if _, err := io.Copy(w, fd); err != nil {
+		return err
+	}
+	return nil
+}
+
+// WriteArchive creates an archive containing the given files.
+func WriteArchive(basename, ext string, files []string) error {
+	archfd, err := os.Create(basename + ext)
+	if err != nil {
+		return err
+	}
+	defer archfd.Close()
+	archive := NewArchive(archfd)
+	if archive == nil {
+		return fmt.Errorf("unknown archive extension: %s", ext)
+	}
+	fmt.Println(basename + ext)
+	if err := archive.Directory(basename); err != nil {
+		return err
+	}
+	for _, file := range files {
+		fmt.Println("   +", filepath.Base(file))
+		if err := AddFile(archive, file); err != nil {
+			return err
+		}
+	}
+	return archive.Close()
+}
+
+type ZipArchive struct {
+	dir  string
+	zipw *zip.Writer
+	file io.Closer
+}
+
+func NewZipArchive(w io.WriteCloser) Archive {
+	return &ZipArchive{"", zip.NewWriter(w), w}
+}
+
+func (a *ZipArchive) Directory(name string) error {
+	a.dir = name + "/"
+	return nil
+}
+
+func (a *ZipArchive) Header(fi os.FileInfo) (io.Writer, error) {
+	head, err := zip.FileInfoHeader(fi)
+	if err != nil {
+		return nil, fmt.Errorf("can't make zip header: %v", err)
+	}
+	head.Name = a.dir + head.Name
+	w, err := a.zipw.CreateHeader(head)
+	if err != nil {
+		return nil, fmt.Errorf("can't add zip header: %v", err)
+	}
+	return w, nil
+}
+
+func (a *ZipArchive) Close() error {
+	if err := a.zipw.Close(); err != nil {
+		return err
+	}
+	return a.file.Close()
+}
+
+type TarballArchive struct {
+	dir  string
+	tarw *tar.Writer
+	gzw  *gzip.Writer
+	file io.Closer
+}
+
+func NewTarballArchive(w io.WriteCloser) Archive {
+	gzw := gzip.NewWriter(w)
+	tarw := tar.NewWriter(gzw)
+	return &TarballArchive{"", tarw, gzw, w}
+}
+
+func (a *TarballArchive) Directory(name string) error {
+	a.dir = name + "/"
+	return a.tarw.WriteHeader(&tar.Header{
+		Name:     a.dir,
+		Mode:     0755,
+		Typeflag: tar.TypeDir,
+	})
+}
+
+func (a *TarballArchive) Header(fi os.FileInfo) (io.Writer, error) {
+	head, err := tar.FileInfoHeader(fi, "")
+	if err != nil {
+		return nil, fmt.Errorf("can't make tar header: %v", err)
+	}
+	head.Name = a.dir + head.Name
+	if err := a.tarw.WriteHeader(head); err != nil {
+		return nil, fmt.Errorf("can't add tar header: %v", err)
+	}
+	return a.tarw, nil
+}
+
+func (a *TarballArchive) Close() error {
+	if err := a.tarw.Close(); err != nil {
+		return err
+	}
+	if err := a.gzw.Close(); err != nil {
+		return err
+	}
+	return a.file.Close()
+}
diff --git a/internal/build/util.go b/internal/build/util.go
new file mode 100644
index 000000000..eead824b2
--- /dev/null
+++ b/internal/build/util.go
@@ -0,0 +1,122 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package build
+
+import (
+	"bytes"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strings"
+	"text/template"
+)
+
+var (
+	DryRunFlag = flag.Bool("n", false, "dry run, don't execute commands")
+)
+
+// MustRun executes the given command and exits the host process for
+// any error.
+func MustRun(cmd *exec.Cmd) {
+	fmt.Println(">>>", strings.Join(cmd.Args, " "))
+	if !*DryRunFlag {
+		cmd.Stderr = os.Stderr
+		cmd.Stdout = os.Stdout
+		if err := cmd.Run(); err != nil {
+			log.Fatal(err)
+		}
+	}
+}
+
+func MustRunCommand(cmd string, args ...string) {
+	MustRun(exec.Command(cmd, args...))
+}
+
+// GOPATH returns the value that the GOPATH environment
+// variable should be set to.
+func GOPATH() string {
+	path := filepath.SplitList(os.Getenv("GOPATH"))
+	if len(path) == 0 {
+		log.Fatal("GOPATH is not set")
+	}
+	// Ensure Godeps workspace is present in the path.
+	godeps, _ := filepath.Abs(filepath.Join("Godeps", "_workspace"))
+	for _, dir := range path {
+		if dir == godeps {
+			return strings.Join(path, string(filepath.ListSeparator))
+		}
+	}
+	newpath := append(path[:1], godeps)
+	newpath = append(newpath, path[1:]...)
+	return strings.Join(newpath, string(filepath.ListSeparator))
+}
+
+func VERSION() string {
+	version, err := ioutil.ReadFile("VERSION")
+	if err != nil {
+		log.Fatal(err)
+	}
+	return string(bytes.TrimSpace(version))
+}
+
+func GitCommit() string {
+	return RunGit("rev-parse", "HEAD")
+}
+
+func RunGit(args ...string) string {
+	cmd := exec.Command("git", args...)
+	var stdout, stderr bytes.Buffer
+	cmd.Stdout, cmd.Stderr = &stdout, &stderr
+	if err := cmd.Run(); err == exec.ErrNotFound {
+		log.Println("no git in PATH")
+		return ""
+	} else if err != nil {
+		log.Fatal(strings.Join(cmd.Args, " "), ": ", err, "\n", stderr.String())
+	}
+	return strings.TrimSpace(stdout.String())
+}
+
+// Render renders the given template file.
+func Render(templateFile, outputFile string, outputPerm os.FileMode, x interface{}) {
+	tpl := template.Must(template.ParseFiles(templateFile))
+	render(tpl, outputFile, outputPerm, x)
+}
+
+func RenderString(templateContent, outputFile string, outputPerm os.FileMode, x interface{}) {
+	tpl := template.Must(template.New("").Parse(templateContent))
+	render(tpl, outputFile, outputPerm, x)
+}
+
+func render(tpl *template.Template, outputFile string, outputPerm os.FileMode, x interface{}) {
+	if err := os.MkdirAll(filepath.Dir(outputFile), 0755); err != nil {
+		log.Fatal(err)
+	}
+	out, err := os.OpenFile(outputFile, os.O_CREATE|os.O_WRONLY|os.O_EXCL, outputPerm)
+	if err != nil {
+		log.Fatal(err)
+	}
+	if err := tpl.Execute(out, x); err != nil {
+		log.Fatal(err)
+	}
+	if err := out.Close(); err != nil {
+		log.Fatal(err)
+	}
+}
-- 
cgit v1.2.3


From 2c6214e846f244e847825418b7a3de2e9b03ee69 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= <peterke@gmail.com>
Date: Mon, 8 Aug 2016 13:41:55 +0300
Subject: [release/1.4.11] Makefile, build: move cross compilation into ci.go

(cherry picked from commit 8c23f20c68b59c5534ab249d23b452114ba75f74)
---
 Makefile       | 53 +++++++++++++++++++++++++----------------------------
 build/ci.go    | 32 ++++++++++++++++++++++++++++++++
 build/flags.sh | 22 ----------------------
 3 files changed, 57 insertions(+), 50 deletions(-)
 delete mode 100755 build/flags.sh

diff --git a/Makefile b/Makefile
index 148cb5758..4bcdab299 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 # with Go source code. If you know what GOPATH is then you probably
 # don't need to bother with make.
 
-.PHONY: geth geth-cross evm all test xgo clean
+.PHONY: geth geth-cross evm all test clean
 .PHONY: geth-linux geth-linux-386 geth-linux-amd64
 .PHONY: geth-linux-arm geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
 .PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
@@ -33,9 +33,6 @@ clean:
 
 # Cross Compilation Targets (xgo)
 
-xgo:
-	build/env.sh go get github.com/karalabe/xgo
-
 geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios
 	@echo "Full cross compilation done:"
 	@ls -ld $(GOBIN)/geth-*
@@ -44,13 +41,13 @@ geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm
 	@echo "Linux cross compilation done:"
 	@ls -ld $(GOBIN)/geth-linux-*
 
-geth-linux-386: xgo
-	build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/386 -v $(shell build/flags.sh) ./cmd/geth
+geth-linux-386:
+	build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/386 -v ./cmd/geth
 	@echo "Linux 386 cross compilation done:"
 	@ls -ld $(GOBIN)/geth-linux-* | grep 386
 
-geth-linux-amd64: xgo
-	build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/amd64 -v $(shell build/flags.sh) ./cmd/geth
+geth-linux-amd64:
+	build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/amd64 -v ./cmd/geth
 	@echo "Linux amd64 cross compilation done:"
 	@ls -ld $(GOBIN)/geth-linux-* | grep amd64
 
@@ -58,23 +55,23 @@ geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-ar
 	@echo "Linux ARM cross compilation done:"
 	@ls -ld $(GOBIN)/geth-linux-* | grep arm
 
-geth-linux-arm-5: xgo
-	build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/arm-5 -v $(shell build/flags.sh) ./cmd/geth
+geth-linux-arm-5:
+	build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/arm-5 -v ./cmd/geth
 	@echo "Linux ARMv5 cross compilation done:"
 	@ls -ld $(GOBIN)/geth-linux-* | grep arm-5
 
-geth-linux-arm-6: xgo
-	build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/arm-6 -v $(shell build/flags.sh) ./cmd/geth
+geth-linux-arm-6:
+	build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/arm-6 -v ./cmd/geth
 	@echo "Linux ARMv6 cross compilation done:"
 	@ls -ld $(GOBIN)/geth-linux-* | grep arm-6
 
-geth-linux-arm-7: xgo
-	build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/arm-7 -v $(shell build/flags.sh) ./cmd/geth
+geth-linux-arm-7:
+	build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/arm-7 -v ./cmd/geth
 	@echo "Linux ARMv7 cross compilation done:"
 	@ls -ld $(GOBIN)/geth-linux-* | grep arm-7
 
-geth-linux-arm64: xgo
-	build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/arm64 -v $(shell build/flags.sh) ./cmd/geth
+geth-linux-arm64:
+	build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/arm64 -v ./cmd/geth
 	@echo "Linux ARM64 cross compilation done:"
 	@ls -ld $(GOBIN)/geth-linux-* | grep arm64
 
@@ -82,13 +79,13 @@ geth-darwin: geth-darwin-386 geth-darwin-amd64
 	@echo "Darwin cross compilation done:"
 	@ls -ld $(GOBIN)/geth-darwin-*
 
-geth-darwin-386: xgo
-	build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=darwin/386 -v $(shell build/flags.sh) ./cmd/geth
+geth-darwin-386:
+	build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=darwin/386 -v ./cmd/geth
 	@echo "Darwin 386 cross compilation done:"
 	@ls -ld $(GOBIN)/geth-darwin-* | grep 386
 
-geth-darwin-amd64: xgo
-	build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=darwin/amd64 -v $(shell build/flags.sh) ./cmd/geth
+geth-darwin-amd64:
+	build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=darwin/amd64 -v ./cmd/geth
 	@echo "Darwin amd64 cross compilation done:"
 	@ls -ld $(GOBIN)/geth-darwin-* | grep amd64
 
@@ -96,22 +93,22 @@ geth-windows: geth-windows-386 geth-windows-amd64
 	@echo "Windows cross compilation done:"
 	@ls -ld $(GOBIN)/geth-windows-*
 
-geth-windows-386: xgo
-	build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=windows/386 -v $(shell build/flags.sh) ./cmd/geth
+geth-windows-386:
+	build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=windows/386 -v ./cmd/geth
 	@echo "Windows 386 cross compilation done:"
 	@ls -ld $(GOBIN)/geth-windows-* | grep 386
 
-geth-windows-amd64: xgo
-	build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=windows/amd64 -v $(shell build/flags.sh) ./cmd/geth
+geth-windows-amd64:
+	build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=windows/amd64 -v ./cmd/geth
 	@echo "Windows amd64 cross compilation done:"
 	@ls -ld $(GOBIN)/geth-windows-* | grep amd64
 
-geth-android: xgo
-	build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=android-21/aar -v $(shell build/flags.sh) ./cmd/geth
+geth-android:
+	build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=android-21/aar -v ./cmd/geth
 	@echo "Android cross compilation done:"
 	@ls -ld $(GOBIN)/geth-android-*
 
-geth-ios: xgo
-	build/env.sh $(GOBIN)/xgo --go=$(GO) --dest=$(GOBIN) --targets=ios-7.0/framework -v $(shell build/flags.sh) ./cmd/geth
+geth-ios:
+	build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=ios-7.0/framework -v ./cmd/geth
 	@echo "iOS framework cross compilation done:"
 	@ls -ld $(GOBIN)/geth-ios-*
diff --git a/build/ci.go b/build/ci.go
index 33d97c182..3011a6976 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -28,6 +28,7 @@ Available commands are:
    archive    [ -type zip|tar ]                        -- archives build artefacts
    importkeys                                          -- imports signing keys from env
    debsrc     [ -sign key-id ] [ -upload dest ]        -- creates a debian source package
+   xgo        [ options ]                              -- cross builds according to options
 
 For all commands, -n prevents execution of external programs (dry run mode).
 
@@ -121,6 +122,8 @@ func main() {
 		doDebianSource(os.Args[2:])
 	case "travis-debsrc":
 		doTravisDebianSource(os.Args[2:])
+	case "xgo":
+		doXgo(os.Args[2:])
 	default:
 		log.Fatal("unknown command ", os.Args[1])
 	}
@@ -463,3 +466,32 @@ func stageDebianSource(tmpdir string, meta debMetadata) (pkgdir string) {
 
 	return pkgdir
 }
+
+// Cross compilation
+
+func doXgo(cmdline []string) {
+	// Make sure xgo is available for cross compilation
+	gogetxgo := goTool("get", "github.com/karalabe/xgo")
+	build.MustRun(gogetxgo)
+
+	// Execute the actual cross compilation
+	pkg := cmdline[len(cmdline)-1]
+	args := append(cmdline[:len(cmdline)-1], makeBuildFlags("")...)
+
+	build.MustRun(xgoTool(append(args, pkg)...))
+}
+
+func xgoTool(args ...string) *exec.Cmd {
+	cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...)
+	cmd.Env = []string{
+		"GOPATH=" + build.GOPATH(),
+		"GOBIN=" + GOBIN,
+	}
+	for _, e := range os.Environ() {
+		if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") {
+			continue
+		}
+		cmd.Env = append(cmd.Env, e)
+	}
+	return cmd
+}
diff --git a/build/flags.sh b/build/flags.sh
deleted file mode 100755
index e021dbad4..000000000
--- a/build/flags.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/sh
-
-set -e
-
-if [ ! -f "build/env.sh" ]; then
-    echo "$0 must be run from the root of the repository."
-    exit 2
-fi
-
-# Since Go 1.5, the separator char for link time assignments
-# is '=' and using ' ' prints a warning. However, Go < 1.5 does
-# not support using '='.
-sep=$(go version | awk '{ if ($3 >= "go1.5" || index($3, "devel")) print "="; else print " "; }' -)
-
-# set gitCommit when running from a Git checkout.
-if [ -f ".git/HEAD" ]; then
-    echo "-ldflags '-X main.gitCommit$sep$(git rev-parse HEAD)'"
-fi
-
-if [ ! -z "$GO_OPENCL" ]; then
-   echo "-tags opencl"
-fi
-- 
cgit v1.2.3


From 65da8f601fb1445b12abca321a3036805c7600c2 Mon Sep 17 00:00:00 2001
From: Felix Lange <fjl@twurst.com>
Date: Thu, 21 Jul 2016 11:36:38 +0200
Subject: [release/1.4.11] eth, eth/downloader, eth/fetcher: delete eth/61 code

The eth/61 protocol was disabled in #2776, this commit removes its
message handlers and hash-chain sync logic.

(cherry picked from commit 016007bd25f2b5e597c2ac2f7256c4e73574f70e)

Conflicts:
	eth/handler.go
	eth/handler_test.go
---
 eth/downloader/downloader.go      | 667 ++++----------------------------------
 eth/downloader/downloader_test.go | 122 +------
 eth/downloader/metrics.go         |  10 -
 eth/downloader/peer.go            |  50 ---
 eth/downloader/queue.go           | 138 +-------
 eth/downloader/types.go           |  20 --
 eth/fetcher/fetcher.go            |  98 +-----
 eth/fetcher/fetcher_test.go       | 135 +-------
 eth/fetcher/metrics.go            |   3 -
 eth/handler.go                    | 131 +-------
 eth/handler_test.go               | 154 ---------
 eth/metrics.go                    |  26 +-
 eth/peer.go                       |  40 ---
 eth/protocol.go                   |  46 +--
 eth/protocol_test.go              |   3 -
 15 files changed, 119 insertions(+), 1524 deletions(-)

diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index a10253b8e..aee21122a 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -48,23 +48,17 @@ var (
 	MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
 	MaxStateFetch   = 384 // Amount of node state values to allow fetching per request
 
-	MaxForkAncestry = 3 * params.EpochDuration.Uint64() // Maximum chain reorganisation
-
-	hashTTL        = 3 * time.Second     // [eth/61] Time it takes for a hash request to time out
-	blockTargetRTT = 3 * time.Second / 2 // [eth/61] Target time for completing a block retrieval request
-	blockTTL       = 3 * blockTargetRTT  // [eth/61] Maximum time allowance before a block request is considered expired
-
-	rttMinEstimate   = 2 * time.Second  // Minimum round-trip time to target for download requests
-	rttMaxEstimate   = 20 * time.Second // Maximum rount-trip time to target for download requests
-	rttMinConfidence = 0.1              // Worse confidence factor in our estimated RTT value
-	ttlScaling       = 3                // Constant scaling factor for RTT -> TTL conversion
-	ttlLimit         = time.Minute      // Maximum TTL allowance to prevent reaching crazy timeouts
+	MaxForkAncestry  = 3 * params.EpochDuration.Uint64() // Maximum chain reorganisation
+	rttMinEstimate   = 2 * time.Second                   // Minimum round-trip time to target for download requests
+	rttMaxEstimate   = 20 * time.Second                  // Maximum rount-trip time to target for download requests
+	rttMinConfidence = 0.1                               // Worse confidence factor in our estimated RTT value
+	ttlScaling       = 3                                 // Constant scaling factor for RTT -> TTL conversion
+	ttlLimit         = time.Minute                       // Maximum TTL allowance to prevent reaching crazy timeouts
 
 	qosTuningPeers   = 5    // Number of peers to tune based on (best peers)
 	qosConfidenceCap = 10   // Number of peers above which not to modify RTT confidence
 	qosTuningImpact  = 0.25 // Impact that a new tuning target has on the previous value
 
-	maxQueuedHashes   = 32 * 1024 // [eth/61] Maximum number of hashes to queue for import (DOS protection)
 	maxQueuedHeaders  = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
 	maxHeadersProcess = 2048      // Number of header download results to import at once into the chain
 	maxResultsProcess = 2048      // Number of content download results to import at once into the chain
@@ -84,16 +78,13 @@ var (
 	errStallingPeer            = errors.New("peer is stalling")
 	errNoPeers                 = errors.New("no peers to keep download active")
 	errTimeout                 = errors.New("timeout")
-	errEmptyHashSet            = errors.New("empty hash set by peer")
 	errEmptyHeaderSet          = errors.New("empty header set by peer")
 	errPeersUnavailable        = errors.New("no peers available or all tried for download")
-	errAlreadyInPool           = errors.New("hash already in pool")
 	errInvalidAncestor         = errors.New("retrieved ancestor is invalid")
 	errInvalidChain            = errors.New("retrieved hash chain is invalid")
 	errInvalidBlock            = errors.New("retrieved block is invalid")
 	errInvalidBody             = errors.New("retrieved block body is invalid")
 	errInvalidReceipt          = errors.New("retrieved receipt is invalid")
-	errCancelHashFetch         = errors.New("hash download canceled (requested)")
 	errCancelBlockFetch        = errors.New("block download canceled (requested)")
 	errCancelHeaderFetch       = errors.New("block header download canceled (requested)")
 	errCancelBodyFetch         = errors.New("block body download canceled (requested)")
@@ -102,6 +93,7 @@ var (
 	errCancelHeaderProcessing  = errors.New("header processing canceled (requested)")
 	errCancelContentProcessing = errors.New("content processing canceled (requested)")
 	errNoSyncActive            = errors.New("no sync active")
+	errTooOld                  = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)")
 )
 
 type Downloader struct {
@@ -146,13 +138,10 @@ type Downloader struct {
 
 	// Channels
 	newPeerCh     chan *peer
-	hashCh        chan dataPack        // [eth/61] Channel receiving inbound hashes
-	blockCh       chan dataPack        // [eth/61] Channel receiving inbound blocks
 	headerCh      chan dataPack        // [eth/62] Channel receiving inbound block headers
 	bodyCh        chan dataPack        // [eth/62] Channel receiving inbound block bodies
 	receiptCh     chan dataPack        // [eth/63] Channel receiving inbound receipts
 	stateCh       chan dataPack        // [eth/63] Channel receiving inbound node state data
-	blockWakeCh   chan bool            // [eth/61] Channel to signal the block fetcher of new tasks
 	bodyWakeCh    chan bool            // [eth/62] Channel to signal the block body fetcher of new tasks
 	receiptWakeCh chan bool            // [eth/63] Channel to signal the receipt fetcher of new tasks
 	stateWakeCh   chan bool            // [eth/63] Channel to signal the state fetcher of new tasks
@@ -199,13 +188,10 @@ func New(stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, ha
 		rollback:         rollback,
 		dropPeer:         dropPeer,
 		newPeerCh:        make(chan *peer, 1),
-		hashCh:           make(chan dataPack, 1),
-		blockCh:          make(chan dataPack, 1),
 		headerCh:         make(chan dataPack, 1),
 		bodyCh:           make(chan dataPack, 1),
 		receiptCh:        make(chan dataPack, 1),
 		stateCh:          make(chan dataPack, 1),
-		blockWakeCh:      make(chan bool, 1),
 		bodyWakeCh:       make(chan bool, 1),
 		receiptWakeCh:    make(chan bool, 1),
 		stateWakeCh:      make(chan bool, 1),
@@ -251,12 +237,11 @@ func (d *Downloader) Synchronising() bool {
 // RegisterPeer injects a new download peer into the set of block source to be
 // used for fetching hashes and blocks from.
 func (d *Downloader) RegisterPeer(id string, version int, head common.Hash,
-	getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn, // eth/61 callbacks, remove when upgrading
 	getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
 	getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error {
 
 	glog.V(logger.Detail).Infoln("Registering peer", id)
-	if err := d.peers.Register(newPeer(id, version, head, getRelHashes, getAbsHashes, getBlocks, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil {
+	if err := d.peers.Register(newPeer(id, version, head, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil {
 		glog.V(logger.Error).Infoln("Register failed:", err)
 		return err
 	}
@@ -291,7 +276,9 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
 	case errBusy:
 		glog.V(logger.Detail).Infof("Synchronisation already in progress")
 
-	case errTimeout, errBadPeer, errStallingPeer, errEmptyHashSet, errEmptyHeaderSet, errPeersUnavailable, errInvalidAncestor, errInvalidChain:
+	case errTimeout, errBadPeer, errStallingPeer,
+		errEmptyHeaderSet, errPeersUnavailable, errTooOld,
+		errInvalidAncestor, errInvalidChain:
 		glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err)
 		d.dropPeer(id)
 
@@ -323,13 +310,13 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
 	d.queue.Reset()
 	d.peers.Reset()
 
-	for _, ch := range []chan bool{d.blockWakeCh, d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} {
+	for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} {
 		select {
 		case <-ch:
 		default:
 		}
 	}
-	for _, ch := range []chan dataPack{d.hashCh, d.blockCh, d.headerCh, d.bodyCh, d.receiptCh, d.stateCh} {
+	for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh, d.stateCh} {
 		for empty := false; !empty; {
 			select {
 			case <-ch:
@@ -377,105 +364,73 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
 			d.mux.Post(DoneEvent{})
 		}
 	}()
+	if p.version < 62 {
+		return errTooOld
+	}
 
 	glog.V(logger.Debug).Infof("Synchronising with the network using: %s [eth/%d]", p.id, p.version)
 	defer func(start time.Time) {
 		glog.V(logger.Debug).Infof("Synchronisation terminated after %v", time.Since(start))
 	}(time.Now())
 
-	switch {
-	case p.version == 61:
-		// Look up the sync boundaries: the common ancestor and the target block
-		latest, err := d.fetchHeight61(p)
-		if err != nil {
-			return err
-		}
-		origin, err := d.findAncestor61(p, latest)
-		if err != nil {
-			return err
-		}
-		d.syncStatsLock.Lock()
-		if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
-			d.syncStatsChainOrigin = origin
-		}
-		d.syncStatsChainHeight = latest
-		d.syncStatsLock.Unlock()
+	// Look up the sync boundaries: the common ancestor and the target block
+	latest, err := d.fetchHeight(p)
+	if err != nil {
+		return err
+	}
+	height := latest.Number.Uint64()
 
-		// Initiate the sync using a concurrent hash and block retrieval algorithm
-		d.queue.Prepare(origin+1, d.mode, 0, nil)
-		if d.syncInitHook != nil {
-			d.syncInitHook(origin, latest)
-		}
-		return d.spawnSync(origin+1,
-			func() error { return d.fetchHashes61(p, td, origin+1) },
-			func() error { return d.fetchBlocks61(origin + 1) },
-		)
-
-	case p.version >= 62:
-		// Look up the sync boundaries: the common ancestor and the target block
-		latest, err := d.fetchHeight(p)
-		if err != nil {
-			return err
-		}
-		height := latest.Number.Uint64()
+	origin, err := d.findAncestor(p, height)
+	if err != nil {
+		return err
+	}
+	d.syncStatsLock.Lock()
+	if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
+		d.syncStatsChainOrigin = origin
+	}
+	d.syncStatsChainHeight = height
+	d.syncStatsLock.Unlock()
 
-		origin, err := d.findAncestor(p, height)
-		if err != nil {
-			return err
-		}
-		d.syncStatsLock.Lock()
-		if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
-			d.syncStatsChainOrigin = origin
-		}
-		d.syncStatsChainHeight = height
-		d.syncStatsLock.Unlock()
-
-		// Initiate the sync using a concurrent header and content retrieval algorithm
-		pivot := uint64(0)
-		switch d.mode {
-		case LightSync:
-			pivot = height
-		case FastSync:
-			// Calculate the new fast/slow sync pivot point
-			if d.fsPivotLock == nil {
-				pivotOffset, err := rand.Int(rand.Reader, big.NewInt(int64(fsPivotInterval)))
-				if err != nil {
-					panic(fmt.Sprintf("Failed to access crypto random source: %v", err))
-				}
-				if height > uint64(fsMinFullBlocks)+pivotOffset.Uint64() {
-					pivot = height - uint64(fsMinFullBlocks) - pivotOffset.Uint64()
-				}
-			} else {
-				// Pivot point locked in, use this and do not pick a new one!
-				pivot = d.fsPivotLock.Number.Uint64()
+	// Initiate the sync using a concurrent header and content retrieval algorithm
+	pivot := uint64(0)
+	switch d.mode {
+	case LightSync:
+		pivot = height
+	case FastSync:
+		// Calculate the new fast/slow sync pivot point
+		if d.fsPivotLock == nil {
+			pivotOffset, err := rand.Int(rand.Reader, big.NewInt(int64(fsPivotInterval)))
+			if err != nil {
+				panic(fmt.Sprintf("Failed to access crypto random source: %v", err))
 			}
-			// If the point is below the origin, move origin back to ensure state download
-			if pivot < origin {
-				if pivot > 0 {
-					origin = pivot - 1
-				} else {
-					origin = 0
-				}
+			if height > uint64(fsMinFullBlocks)+pivotOffset.Uint64() {
+				pivot = height - uint64(fsMinFullBlocks) - pivotOffset.Uint64()
 			}
-			glog.V(logger.Debug).Infof("Fast syncing until pivot block #%d", pivot)
+		} else {
+			// Pivot point locked in, use this and do not pick a new one!
+			pivot = d.fsPivotLock.Number.Uint64()
 		}
-		d.queue.Prepare(origin+1, d.mode, pivot, latest)
-		if d.syncInitHook != nil {
-			d.syncInitHook(origin, height)
+		// If the point is below the origin, move origin back to ensure state download
+		if pivot < origin {
+			if pivot > 0 {
+				origin = pivot - 1
+			} else {
+				origin = 0
+			}
 		}
-		return d.spawnSync(origin+1,
-			func() error { return d.fetchHeaders(p, origin+1) },    // Headers are always retrieved
-			func() error { return d.processHeaders(origin+1, td) }, // Headers are always retrieved
-			func() error { return d.fetchBodies(origin + 1) },      // Bodies are retrieved during normal and fast sync
-			func() error { return d.fetchReceipts(origin + 1) },    // Receipts are retrieved during fast sync
-			func() error { return d.fetchNodeData() },              // Node state data is retrieved during fast sync
-		)
-
-	default:
-		// Something very wrong, stop right here
-		glog.V(logger.Error).Infof("Unsupported eth protocol: %d", p.version)
-		return errBadPeer
+		glog.V(logger.Debug).Infof("Fast syncing until pivot block #%d", pivot)
+	}
+	d.queue.Prepare(origin+1, d.mode, pivot, latest)
+	if d.syncInitHook != nil {
+		d.syncInitHook(origin, height)
 	}
+	return d.spawnSync(origin+1,
+		func() error { return d.fetchHeaders(p, origin+1) },    // Headers are always retrieved
+		func() error { return d.processHeaders(origin+1, td) }, // Headers are always retrieved
+		func() error { return d.fetchBodies(origin + 1) },      // Bodies are retrieved during normal and fast sync
+		func() error { return d.fetchReceipts(origin + 1) },    // Receipts are retrieved during fast sync
+		func() error { return d.fetchNodeData() },              // Node state data is retrieved during fast sync
+	)
 }
 
 // spawnSync runs d.process and all given fetcher functions to completion in
@@ -540,452 +495,6 @@ func (d *Downloader) Terminate() {
 	d.cancel()
 }
 
-// fetchHeight61 retrieves the head block of the remote peer to aid in estimating
-// the total time a pending synchronisation would take.
-func (d *Downloader) fetchHeight61(p *peer) (uint64, error) {
-	glog.V(logger.Debug).Infof("%v: retrieving remote chain height", p)
-
-	// Request the advertised remote head block and wait for the response
-	go p.getBlocks([]common.Hash{p.head})
-
-	timeout := time.After(hashTTL)
-	for {
-		select {
-		case <-d.cancelCh:
-			return 0, errCancelBlockFetch
-
-		case packet := <-d.blockCh:
-			// Discard anything not from the origin peer
-			if packet.PeerId() != p.id {
-				glog.V(logger.Debug).Infof("Received blocks from incorrect peer(%s)", packet.PeerId())
-				break
-			}
-			// Make sure the peer actually gave something valid
-			blocks := packet.(*blockPack).blocks
-			if len(blocks) != 1 {
-				glog.V(logger.Debug).Infof("%v: invalid number of head blocks: %d != 1", p, len(blocks))
-				return 0, errBadPeer
-			}
-			return blocks[0].NumberU64(), nil
-
-		case <-timeout:
-			glog.V(logger.Debug).Infof("%v: head block timeout", p)
-			return 0, errTimeout
-
-		case <-d.hashCh:
-			// Out of bounds hashes received, ignore them
-
-		case <-d.headerCh:
-		case <-d.bodyCh:
-		case <-d.stateCh:
-		case <-d.receiptCh:
-			// Ignore eth/{62,63} packets because this is eth/61.
-			// These can arrive as a late delivery from a previous sync.
-		}
-	}
-}
-
-// findAncestor61 tries to locate the common ancestor block of the local chain and
-// a remote peers blockchain. In the general case when our node was in sync and
-// on the correct chain, checking the top N blocks should already get us a match.
-// In the rare scenario when we ended up on a long reorganisation (i.e. none of
-// the head blocks match), we do a binary search to find the common ancestor.
-func (d *Downloader) findAncestor61(p *peer, height uint64) (uint64, error) {
-	glog.V(logger.Debug).Infof("%v: looking for common ancestor", p)
-
-	// Figure out the valid ancestor range to prevent rewrite attacks
-	floor, ceil := int64(-1), d.headBlock().NumberU64()
-	if ceil >= MaxForkAncestry {
-		floor = int64(ceil - MaxForkAncestry)
-	}
-	// Request the topmost blocks to short circuit binary ancestor lookup
-	head := ceil
-	if head > height {
-		head = height
-	}
-	from := int64(head) - int64(MaxHashFetch) + 1
-	if from < 0 {
-		from = 0
-	}
-	go p.getAbsHashes(uint64(from), MaxHashFetch)
-
-	// Wait for the remote response to the head fetch
-	number, hash := uint64(0), common.Hash{}
-	timeout := time.After(hashTTL)
-
-	for finished := false; !finished; {
-		select {
-		case <-d.cancelCh:
-			return 0, errCancelHashFetch
-
-		case packet := <-d.hashCh:
-			// Discard anything not from the origin peer
-			if packet.PeerId() != p.id {
-				glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", packet.PeerId())
-				break
-			}
-			// Make sure the peer actually gave something valid
-			hashes := packet.(*hashPack).hashes
-			if len(hashes) == 0 {
-				glog.V(logger.Debug).Infof("%v: empty head hash set", p)
-				return 0, errEmptyHashSet
-			}
-			// Check if a common ancestor was found
-			finished = true
-			for i := len(hashes) - 1; i >= 0; i-- {
-				// Skip any headers that underflow/overflow our requested set
-				header := d.getHeader(hashes[i])
-				if header == nil || header.Number.Int64() < from || header.Number.Uint64() > head {
-					continue
-				}
-				// Otherwise check if we already know the header or not
-				if d.hasBlockAndState(hashes[i]) {
-					number, hash = header.Number.Uint64(), header.Hash()
-					break
-				}
-			}
-
-		case <-timeout:
-			glog.V(logger.Debug).Infof("%v: head hash timeout", p)
-			return 0, errTimeout
-
-		case <-d.blockCh:
-			// Out of bounds blocks received, ignore them
-
-		case <-d.headerCh:
-		case <-d.bodyCh:
-		case <-d.stateCh:
-		case <-d.receiptCh:
-			// Ignore eth/{62,63} packets because this is eth/61.
-			// These can arrive as a late delivery from a previous sync.
-		}
-	}
-	// If the head fetch already found an ancestor, return
-	if !common.EmptyHash(hash) {
-		if int64(number) <= floor {
-			glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor)
-			return 0, errInvalidAncestor
-		}
-		glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4])
-		return number, nil
-	}
-	// Ancestor not found, we need to binary search over our chain
-	start, end := uint64(0), head
-	if floor > 0 {
-		start = uint64(floor)
-	}
-	for start+1 < end {
-		// Split our chain interval in two, and request the hash to cross check
-		check := (start + end) / 2
-
-		timeout := time.After(hashTTL)
-		go p.getAbsHashes(uint64(check), 1)
-
-		// Wait until a reply arrives to this request
-		for arrived := false; !arrived; {
-			select {
-			case <-d.cancelCh:
-				return 0, errCancelHashFetch
-
-			case packet := <-d.hashCh:
-				// Discard anything not from the origin peer
-				if packet.PeerId() != p.id {
-					glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", packet.PeerId())
-					break
-				}
-				// Make sure the peer actually gave something valid
-				hashes := packet.(*hashPack).hashes
-				if len(hashes) != 1 {
-					glog.V(logger.Debug).Infof("%v: invalid search hash set (%d)", p, len(hashes))
-					return 0, errBadPeer
-				}
-				arrived = true
-
-				// Modify the search interval based on the response
-				if !d.hasBlockAndState(hashes[0]) {
-					end = check
-					break
-				}
-				block := d.getBlock(hashes[0]) // this doesn't check state, hence the above explicit check
-				if block.NumberU64() != check {
-					glog.V(logger.Debug).Infof("%v: non requested hash #%d [%x…], instead of #%d", p, block.NumberU64(), block.Hash().Bytes()[:4], check)
-					return 0, errBadPeer
-				}
-				start = check
-
-			case <-timeout:
-				glog.V(logger.Debug).Infof("%v: search hash timeout", p)
-				return 0, errTimeout
-
-			case <-d.blockCh:
-				// Out of bounds blocks received, ignore them
-
-			case <-d.headerCh:
-			case <-d.bodyCh:
-			case <-d.stateCh:
-			case <-d.receiptCh:
-				// Ignore eth/{62,63} packets because this is eth/61.
-				// These can arrive as a late delivery from a previous sync.
-			}
-		}
-	}
-	// Ensure valid ancestry and return
-	if int64(start) <= floor {
-		glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor)
-		return 0, errInvalidAncestor
-	}
-	glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, start, hash[:4])
-	return start, nil
-}
-
-// fetchHashes61 keeps retrieving hashes from the requested number, until no more
-// are returned, potentially throttling on the way.
-func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error {
-	glog.V(logger.Debug).Infof("%v: downloading hashes from #%d", p, from)
-
-	// Create a timeout timer, and the associated hash fetcher
-	request := time.Now()       // time of the last fetch request
-	timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
-	<-timeout.C                 // timeout channel should be initially empty
-	defer timeout.Stop()
-
-	getHashes := func(from uint64) {
-		glog.V(logger.Detail).Infof("%v: fetching %d hashes from #%d", p, MaxHashFetch, from)
-
-		request = time.Now()
-		timeout.Reset(hashTTL)
-		go p.getAbsHashes(from, MaxHashFetch)
-	}
-	// Start pulling hashes, until all are exhausted
-	getHashes(from)
-	gotHashes := false
-
-	for {
-		select {
-		case <-d.cancelCh:
-			return errCancelHashFetch
-
-		case packet := <-d.hashCh:
-			// Make sure the active peer is giving us the hashes
-			if packet.PeerId() != p.id {
-				glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", packet.PeerId())
-				break
-			}
-			hashReqTimer.UpdateSince(request)
-			timeout.Stop()
-
-			// If no more hashes are inbound, notify the block fetcher and return
-			if packet.Items() == 0 {
-				glog.V(logger.Debug).Infof("%v: no available hashes", p)
-
-				select {
-				case d.blockWakeCh <- false:
-				case <-d.cancelCh:
-				}
-				// If no hashes were retrieved at all, the peer violated it's TD promise that it had a
-				// better chain compared to ours. The only exception is if it's promised blocks were
-				// already imported by other means (e.g. fetcher):
-				//
-				// R <remote peer>, L <local node>: Both at block 10
-				// R: Mine block 11, and propagate it to L
-				// L: Queue block 11 for import
-				// L: Notice that R's head and TD increased compared to ours, start sync
-				// L: Import of block 11 finishes
-				// L: Sync begins, and finds common ancestor at 11
-				// L: Request new hashes up from 11 (R's TD was higher, it must have something)
-				// R: Nothing to give
-				if !gotHashes && td.Cmp(d.getTd(d.headBlock().Hash())) > 0 {
-					return errStallingPeer
-				}
-				return nil
-			}
-			gotHashes = true
-			hashes := packet.(*hashPack).hashes
-
-			// Otherwise insert all the new hashes, aborting in case of junk
-			glog.V(logger.Detail).Infof("%v: scheduling %d hashes from #%d", p, len(hashes), from)
-
-			inserts := d.queue.Schedule61(hashes, true)
-			if len(inserts) != len(hashes) {
-				glog.V(logger.Debug).Infof("%v: stale hashes", p)
-				return errBadPeer
-			}
-			// Notify the block fetcher of new hashes, but stop if queue is full
-			if d.queue.PendingBlocks() < maxQueuedHashes {
-				// We still have hashes to fetch, send continuation wake signal (potential)
-				select {
-				case d.blockWakeCh <- true:
-				default:
-				}
-			} else {
-				// Hash limit reached, send a termination wake signal (enforced)
-				select {
-				case d.blockWakeCh <- false:
-				case <-d.cancelCh:
-				}
-				return nil
-			}
-			// Queue not yet full, fetch the next batch
-			from += uint64(len(hashes))
-			getHashes(from)
-
-		case <-timeout.C:
-			glog.V(logger.Debug).Infof("%v: hash request timed out", p)
-			hashTimeoutMeter.Mark(1)
-			return errTimeout
-
-		case <-d.headerCh:
-		case <-d.bodyCh:
-		case <-d.stateCh:
-		case <-d.receiptCh:
-			// Ignore eth/{62,63} packets because this is eth/61.
-			// These can arrive as a late delivery from a previous sync.
-		}
-	}
-}
-
-// fetchBlocks61 iteratively downloads the scheduled hashes, taking any available
-// peers, reserving a chunk of blocks for each, waiting for delivery and also
-// periodically checking for timeouts.
-func (d *Downloader) fetchBlocks61(from uint64) error {
-	glog.V(logger.Debug).Infof("Downloading blocks from #%d", from)
-	defer glog.V(logger.Debug).Infof("Block download terminated")
-
-	// Create a timeout timer for scheduling expiration tasks
-	ticker := time.NewTicker(100 * time.Millisecond)
-	defer ticker.Stop()
-
-	update := make(chan struct{}, 1)
-
-	// Fetch blocks until the hash fetcher's done
-	finished := false
-	for {
-		select {
-		case <-d.cancelCh:
-			return errCancelBlockFetch
-
-		case packet := <-d.blockCh:
-			// If the peer was previously banned and failed to deliver it's pack
-			// in a reasonable time frame, ignore it's message.
-			if peer := d.peers.Peer(packet.PeerId()); peer != nil {
-				blocks := packet.(*blockPack).blocks
-
-				// Deliver the received chunk of blocks and check chain validity
-				accepted, err := d.queue.DeliverBlocks(peer.id, blocks)
-				if err == errInvalidChain {
-					return err
-				}
-				// Unless a peer delivered something completely else than requested (usually
-				// caused by a timed out request which came through in the end), set it to
-				// idle. If the delivery's stale, the peer should have already been idled.
-				if err != errStaleDelivery {
-					peer.SetBlocksIdle(accepted)
-				}
-				// Issue a log to the user to see what's going on
-				switch {
-				case err == nil && len(blocks) == 0:
-					glog.V(logger.Detail).Infof("%s: no blocks delivered", peer)
-				case err == nil:
-					glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blocks))
-				default:
-					glog.V(logger.Detail).Infof("%s: delivery failed: %v", peer, err)
-				}
-			}
-			// Blocks arrived, try to update the progress
-			select {
-			case update <- struct{}{}:
-			default:
-			}
-
-		case cont := <-d.blockWakeCh:
-			// The hash fetcher sent a continuation flag, check if it's done
-			if !cont {
-				finished = true
-			}
-			// Hashes arrive, try to update the progress
-			select {
-			case update <- struct{}{}:
-			default:
-			}
-
-		case <-ticker.C:
-			// Sanity check update the progress
-			select {
-			case update <- struct{}{}:
-			default:
-			}
-
-		case <-update:
-			// Short circuit if we lost all our peers
-			if d.peers.Len() == 0 {
-				return errNoPeers
-			}
-			// Check for block request timeouts and demote the responsible peers
-			for pid, fails := range d.queue.ExpireBlocks(blockTTL) {
-				if peer := d.peers.Peer(pid); peer != nil {
-					if fails > 1 {
-						glog.V(logger.Detail).Infof("%s: block delivery timeout", peer)
-						peer.SetBlocksIdle(0)
-					} else {
-						glog.V(logger.Debug).Infof("%s: stalling block delivery, dropping", peer)
-						d.dropPeer(pid)
-					}
-				}
-			}
-			// If there's nothing more to fetch, wait or terminate
-			if d.queue.PendingBlocks() == 0 {
-				if !d.queue.InFlightBlocks() && finished {
-					glog.V(logger.Debug).Infof("Block fetching completed")
-					return nil
-				}
-				break
-			}
-			// Send a download request to all idle peers, until throttled
-			throttled := false
-			idles, total := d.peers.BlockIdlePeers()
-
-			for _, peer := range idles {
-				// Short circuit if throttling activated
-				if d.queue.ShouldThrottleBlocks() {
-					throttled = true
-					break
-				}
-				// Reserve a chunk of hashes for a peer. A nil can mean either that
-				// no more hashes are available, or that the peer is known not to
-				// have them.
-				request := d.queue.ReserveBlocks(peer, peer.BlockCapacity(blockTargetRTT))
-				if request == nil {
-					continue
-				}
-				if glog.V(logger.Detail) {
-					glog.Infof("%s: requesting %d blocks", peer, len(request.Hashes))
-				}
-				// Fetch the chunk and make sure any errors return the hashes to the queue
-				if err := peer.Fetch61(request); err != nil {
-					// Although we could try and make an attempt to fix this, this error really
-					// means that we've double allocated a fetch task to a peer. If that is the
-					// case, the internal state of the downloader and the queue is very wrong so
-					// better hard crash and note the error instead of silently accumulating into
-					// a much bigger issue.
-					panic(fmt.Sprintf("%v: fetch assignment failed", peer))
-				}
-			}
-			// Make sure that we have peers available for fetching. If all peers have been tried
-			// and all failed throw an error
-			if !throttled && !d.queue.InFlightBlocks() && len(idles) == total {
-				return errPeersUnavailable
-			}
-
-		case <-d.headerCh:
-		case <-d.bodyCh:
-		case <-d.stateCh:
-		case <-d.receiptCh:
-			// Ignore eth/{62,63} packets because this is eth/61.
-			// These can arrive as a late delivery from a previous sync.
-		}
-	}
-}
-
 // fetchHeight retrieves the head header of the remote peer to aid in estimating
 // the total time a pending synchronisation would take.
 func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
@@ -1022,11 +531,6 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
 		case <-d.stateCh:
 		case <-d.receiptCh:
 			// Out of bounds delivery, ignore
-
-		case <-d.hashCh:
-		case <-d.blockCh:
-			// Ignore eth/61 packets because this is eth/62+.
-			// These can arrive as a late delivery from a previous sync.
 		}
 	}
 }
@@ -1067,7 +571,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
 	for finished := false; !finished; {
 		select {
 		case <-d.cancelCh:
-			return 0, errCancelHashFetch
+			return 0, errCancelHeaderFetch
 
 		case packet := <-d.headerCh:
 			// Discard anything not from the origin peer
@@ -1114,11 +618,6 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
 		case <-d.stateCh:
 		case <-d.receiptCh:
 			// Out of bounds delivery, ignore
-
-		case <-d.hashCh:
-		case <-d.blockCh:
-			// Ignore eth/61 packets because this is eth/62+.
-			// These can arrive as a late delivery from a previous sync.
 		}
 	}
 	// If the head fetch already found an ancestor, return
@@ -1146,7 +645,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
 		for arrived := false; !arrived; {
 			select {
 			case <-d.cancelCh:
-				return 0, errCancelHashFetch
+				return 0, errCancelHeaderFetch
 
 			case packer := <-d.headerCh:
 				// Discard anything not from the origin peer
@@ -1182,11 +681,6 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
 			case <-d.stateCh:
 			case <-d.receiptCh:
 				// Out of bounds delivery, ignore
-
-			case <-d.hashCh:
-			case <-d.blockCh:
-				// Ignore eth/61 packets because this is eth/62+.
-				// These can arrive as a late delivery from a previous sync.
 			}
 		}
 	}
@@ -1305,11 +799,6 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
 			case <-d.cancelCh:
 			}
 			return errBadPeer
-
-		case <-d.hashCh:
-		case <-d.blockCh:
-			// Ignore eth/61 packets because this is eth/62+.
-			// These can arrive as a late delivery from a previous sync.
 		}
 	}
 }
@@ -1630,11 +1119,6 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
 			if !progressed && !throttled && !running && len(idles) == total && pending() > 0 {
 				return errPeersUnavailable
 			}
-
-		case <-d.hashCh:
-		case <-d.blockCh:
-			// Ignore eth/61 packets because this is eth/62+.
-			// These can arrive as a late delivery from a previous sync.
 		}
 	}
 }
@@ -1874,19 +1358,6 @@ func (d *Downloader) processContent() error {
 	}
 }
 
-// DeliverHashes injects a new batch of hashes received from a remote node into
-// the download schedule. This is usually invoked through the BlockHashesMsg by
-// the protocol handler.
-func (d *Downloader) DeliverHashes(id string, hashes []common.Hash) (err error) {
-	return d.deliver(id, d.hashCh, &hashPack{id, hashes}, hashInMeter, hashDropMeter)
-}
-
-// DeliverBlocks injects a new batch of blocks received from a remote node.
-// This is usually invoked through the BlocksMsg by the protocol handler.
-func (d *Downloader) DeliverBlocks(id string, blocks []*types.Block) (err error) {
-	return d.deliver(id, d.blockCh, &blockPack{id, blocks}, blockInMeter, blockDropMeter)
-}
-
 // DeliverHeaders injects a new batch of block headers received from a remote
 // node into the download schedule.
 func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) {
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index fac6ef81c..4ca28091c 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -399,14 +399,12 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha
 
 	var err error
 	switch version {
-	case 61:
-		err = dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHashesFn(id, delay), dl.peerGetAbsHashesFn(id, delay), dl.peerGetBlocksFn(id, delay), nil, nil, nil, nil, nil)
 	case 62:
-		err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), nil, nil)
+		err = dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), nil, nil)
 	case 63:
-		err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
+		err = dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
 	case 64:
-		err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
+		err = dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
 	}
 	if err == nil {
 		// Assign the owned hashes, headers and blocks to the peer (deep copy)
@@ -465,86 +463,6 @@ func (dl *downloadTester) dropPeer(id string) {
 	dl.downloader.UnregisterPeer(id)
 }
 
-// peerGetRelHashesFn constructs a GetHashes function associated with a specific
-// peer in the download tester. The returned function can be used to retrieve
-// batches of hashes from the particularly requested peer.
-func (dl *downloadTester) peerGetRelHashesFn(id string, delay time.Duration) func(head common.Hash) error {
-	return func(head common.Hash) error {
-		time.Sleep(delay)
-
-		dl.lock.RLock()
-		defer dl.lock.RUnlock()
-
-		// Gather the next batch of hashes
-		hashes := dl.peerHashes[id]
-		result := make([]common.Hash, 0, MaxHashFetch)
-		for i, hash := range hashes {
-			if hash == head {
-				i++
-				for len(result) < cap(result) && i < len(hashes) {
-					result = append(result, hashes[i])
-					i++
-				}
-				break
-			}
-		}
-		// Delay delivery a bit to allow attacks to unfold
-		go func() {
-			time.Sleep(time.Millisecond)
-			dl.downloader.DeliverHashes(id, result)
-		}()
-		return nil
-	}
-}
-
-// peerGetAbsHashesFn constructs a GetHashesFromNumber function associated with
-// a particular peer in the download tester. The returned function can be used to
-// retrieve batches of hashes from the particularly requested peer.
-func (dl *downloadTester) peerGetAbsHashesFn(id string, delay time.Duration) func(uint64, int) error {
-	return func(head uint64, count int) error {
-		time.Sleep(delay)
-
-		dl.lock.RLock()
-		defer dl.lock.RUnlock()
-
-		// Gather the next batch of hashes
-		hashes := dl.peerHashes[id]
-		result := make([]common.Hash, 0, count)
-		for i := 0; i < count && len(hashes)-int(head)-1-i >= 0; i++ {
-			result = append(result, hashes[len(hashes)-int(head)-1-i])
-		}
-		// Delay delivery a bit to allow attacks to unfold
-		go func() {
-			time.Sleep(time.Millisecond)
-			dl.downloader.DeliverHashes(id, result)
-		}()
-		return nil
-	}
-}
-
-// peerGetBlocksFn constructs a getBlocks function associated with a particular
-// peer in the download tester. The returned function can be used to retrieve
-// batches of blocks from the particularly requested peer.
-func (dl *downloadTester) peerGetBlocksFn(id string, delay time.Duration) func([]common.Hash) error {
-	return func(hashes []common.Hash) error {
-		time.Sleep(delay)
-
-		dl.lock.RLock()
-		defer dl.lock.RUnlock()
-
-		blocks := dl.peerBlocks[id]
-		result := make([]*types.Block, 0, len(hashes))
-		for _, hash := range hashes {
-			if block, ok := blocks[hash]; ok {
-				result = append(result, block)
-			}
-		}
-		go dl.downloader.DeliverBlocks(id, result)
-
-		return nil
-	}
-}
-
 // peerGetRelHeadersFn constructs a GetBlockHeaders function based on a hashed
 // origin; associated with a particular peer in the download tester. The returned
 // function can be used to retrieve batches of headers from the particular peer.
@@ -730,7 +648,6 @@ func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, leng
 // Tests that simple synchronization against a canonical chain works correctly.
 // In this test common ancestor lookup should be short circuited and not require
 // binary searching.
-func TestCanonicalSynchronisation61(t *testing.T)      { testCanonicalSynchronisation(t, 61, FullSync) }
 func TestCanonicalSynchronisation62(t *testing.T)      { testCanonicalSynchronisation(t, 62, FullSync) }
 func TestCanonicalSynchronisation63Full(t *testing.T)  { testCanonicalSynchronisation(t, 63, FullSync) }
 func TestCanonicalSynchronisation63Fast(t *testing.T)  { testCanonicalSynchronisation(t, 63, FastSync) }
@@ -759,7 +676,6 @@ func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
 
 // Tests that if a large batch of blocks are being downloaded, it is throttled
 // until the cached blocks are retrieved.
-func TestThrottling61(t *testing.T)     { testThrottling(t, 61, FullSync) }
 func TestThrottling62(t *testing.T)     { testThrottling(t, 62, FullSync) }
 func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
 func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
@@ -845,7 +761,6 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) {
 // Tests that simple synchronization against a forked chain works correctly. In
 // this test common ancestor lookup should *not* be short circuited, and a full
 // binary search should be executed.
-func TestForkedSync61(t *testing.T)      { testForkedSync(t, 61, FullSync) }
 func TestForkedSync62(t *testing.T)      { testForkedSync(t, 62, FullSync) }
 func TestForkedSync63Full(t *testing.T)  { testForkedSync(t, 63, FullSync) }
 func TestForkedSync63Fast(t *testing.T)  { testForkedSync(t, 63, FastSync) }
@@ -881,7 +796,6 @@ func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
 
 // Tests that synchronising against a much shorter but much heavyer fork works
 // corrently and is not dropped.
-func TestHeavyForkedSync61(t *testing.T)      { testHeavyForkedSync(t, 61, FullSync) }
 func TestHeavyForkedSync62(t *testing.T)      { testHeavyForkedSync(t, 62, FullSync) }
 func TestHeavyForkedSync63Full(t *testing.T)  { testHeavyForkedSync(t, 63, FullSync) }
 func TestHeavyForkedSync63Fast(t *testing.T)  { testHeavyForkedSync(t, 63, FastSync) }
@@ -915,24 +829,9 @@ func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
 	assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
 }
 
-// Tests that an inactive downloader will not accept incoming hashes and blocks.
-func TestInactiveDownloader61(t *testing.T) {
-	t.Parallel()
-	tester := newTester()
-
-	// Check that neither hashes nor blocks are accepted
-	if err := tester.downloader.DeliverHashes("bad peer", []common.Hash{}); err != errNoSyncActive {
-		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
-	}
-	if err := tester.downloader.DeliverBlocks("bad peer", []*types.Block{}); err != errNoSyncActive {
-		t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
-	}
-}
-
 // Tests that chain forks are contained within a certain interval of the current
 // chain head, ensuring that malicious peers cannot waste resources by feeding
 // long dead chains.
-func TestBoundedForkedSync61(t *testing.T)      { testBoundedForkedSync(t, 61, FullSync) }
 func TestBoundedForkedSync62(t *testing.T)      { testBoundedForkedSync(t, 62, FullSync) }
 func TestBoundedForkedSync63Full(t *testing.T)  { testBoundedForkedSync(t, 63, FullSync) }
 func TestBoundedForkedSync63Fast(t *testing.T)  { testBoundedForkedSync(t, 63, FastSync) }
@@ -968,7 +867,6 @@ func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
 // Tests that chain forks are contained within a certain interval of the current
 // chain head for short but heavy forks too. These are a bit special because they
 // take different ancestor lookup paths.
-func TestBoundedHeavyForkedSync61(t *testing.T)      { testBoundedHeavyForkedSync(t, 61, FullSync) }
 func TestBoundedHeavyForkedSync62(t *testing.T)      { testBoundedHeavyForkedSync(t, 62, FullSync) }
 func TestBoundedHeavyForkedSync63Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FullSync) }
 func TestBoundedHeavyForkedSync63Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 63, FastSync) }
@@ -1039,7 +937,6 @@ func TestInactiveDownloader63(t *testing.T) {
 }
 
 // Tests that a canceled download wipes all previously accumulated state.
-func TestCancel61(t *testing.T)      { testCancel(t, 61, FullSync) }
 func TestCancel62(t *testing.T)      { testCancel(t, 62, FullSync) }
 func TestCancel63Full(t *testing.T)  { testCancel(t, 63, FullSync) }
 func TestCancel63Fast(t *testing.T)  { testCancel(t, 63, FastSync) }
@@ -1081,7 +978,6 @@ func testCancel(t *testing.T, protocol int, mode SyncMode) {
 }
 
 // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
-func TestMultiSynchronisation61(t *testing.T)      { testMultiSynchronisation(t, 61, FullSync) }
 func TestMultiSynchronisation62(t *testing.T)      { testMultiSynchronisation(t, 62, FullSync) }
 func TestMultiSynchronisation63Full(t *testing.T)  { testMultiSynchronisation(t, 63, FullSync) }
 func TestMultiSynchronisation63Fast(t *testing.T)  { testMultiSynchronisation(t, 63, FastSync) }
@@ -1112,7 +1008,6 @@ func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
 
 // Tests that synchronisations behave well in multi-version protocol environments
 // and not wreak havoc on other nodes in the network.
-func TestMultiProtoSynchronisation61(t *testing.T)      { testMultiProtoSync(t, 61, FullSync) }
 func TestMultiProtoSynchronisation62(t *testing.T)      { testMultiProtoSync(t, 62, FullSync) }
 func TestMultiProtoSynchronisation63Full(t *testing.T)  { testMultiProtoSync(t, 63, FullSync) }
 func TestMultiProtoSynchronisation63Fast(t *testing.T)  { testMultiProtoSync(t, 63, FastSync) }
@@ -1131,7 +1026,6 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
 	tester := newTester()
 	defer tester.terminate()
 
-	tester.newPeer("peer 61", 61, hashes, nil, blocks, nil)
 	tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
 	tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
 	tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
@@ -1143,7 +1037,7 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
 	assertOwnChain(t, tester, targetBlocks+1)
 
 	// Check that no peers have been dropped off
-	for _, version := range []int{61, 62, 63, 64} {
+	for _, version := range []int{62, 63, 64} {
 		peer := fmt.Sprintf("peer %d", version)
 		if _, ok := tester.peerHashes[peer]; !ok {
 			t.Errorf("%s dropped", peer)
@@ -1368,7 +1262,6 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
 
 // Tests that a peer advertising an high TD doesn't get to stall the downloader
 // afterwards by not sending any useful hashes.
-func TestHighTDStarvationAttack61(t *testing.T)      { testHighTDStarvationAttack(t, 61, FullSync) }
 func TestHighTDStarvationAttack62(t *testing.T)      { testHighTDStarvationAttack(t, 62, FullSync) }
 func TestHighTDStarvationAttack63Full(t *testing.T)  { testHighTDStarvationAttack(t, 63, FullSync) }
 func TestHighTDStarvationAttack63Fast(t *testing.T)  { testHighTDStarvationAttack(t, 63, FastSync) }
@@ -1391,7 +1284,6 @@ func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
 }
 
 // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
-func TestBlockHeaderAttackerDropping61(t *testing.T) { testBlockHeaderAttackerDropping(t, 61) }
 func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
 func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
 func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
@@ -1409,7 +1301,6 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
 		{errStallingPeer, true},             // Peer was detected to be stalling, drop it
 		{errNoPeers, false},                 // No peers to download from, soft race, no issue
 		{errTimeout, true},                  // No hashes received in due time, drop the peer
-		{errEmptyHashSet, true},             // No hashes were returned as a response, drop as it's a dead end
 		{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end
 		{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser
 		{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter
@@ -1417,7 +1308,6 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
 		{errInvalidBlock, false},            // A bad peer was detected, but not the sync origin
 		{errInvalidBody, false},             // A bad peer was detected, but not the sync origin
 		{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin
-		{errCancelHashFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
 		{errCancelBlockFetch, false},        // Synchronisation was canceled, origin may be innocent, don't drop
 		{errCancelHeaderFetch, false},       // Synchronisation was canceled, origin may be innocent, don't drop
 		{errCancelBodyFetch, false},         // Synchronisation was canceled, origin may be innocent, don't drop
@@ -1450,7 +1340,6 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
 
 // Tests that synchronisation progress (origin block number, current block number
 // and highest block number) is tracked and updated correctly.
-func TestSyncProgress61(t *testing.T)      { testSyncProgress(t, 61, FullSync) }
 func TestSyncProgress62(t *testing.T)      { testSyncProgress(t, 62, FullSync) }
 func TestSyncProgress63Full(t *testing.T)  { testSyncProgress(t, 63, FullSync) }
 func TestSyncProgress63Fast(t *testing.T)  { testSyncProgress(t, 63, FastSync) }
@@ -1524,7 +1413,6 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
 // Tests that synchronisation progress (origin block number and highest block
 // number) is tracked and updated correctly in case of a fork (or manual head
 // revertal).
-func TestForkedSyncProgress61(t *testing.T)      { testForkedSyncProgress(t, 61, FullSync) }
 func TestForkedSyncProgress62(t *testing.T)      { testForkedSyncProgress(t, 62, FullSync) }
 func TestForkedSyncProgress63Full(t *testing.T)  { testForkedSyncProgress(t, 63, FullSync) }
 func TestForkedSyncProgress63Fast(t *testing.T)  { testForkedSyncProgress(t, 63, FastSync) }
@@ -1601,7 +1489,6 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
 // Tests that if synchronisation is aborted due to some failure, then the progress
 // origin is not updated in the next sync cycle, as it should be considered the
 // continuation of the previous sync and not a new instance.
-func TestFailedSyncProgress61(t *testing.T)      { testFailedSyncProgress(t, 61, FullSync) }
 func TestFailedSyncProgress62(t *testing.T)      { testFailedSyncProgress(t, 62, FullSync) }
 func TestFailedSyncProgress63Full(t *testing.T)  { testFailedSyncProgress(t, 63, FullSync) }
 func TestFailedSyncProgress63Fast(t *testing.T)  { testFailedSyncProgress(t, 63, FastSync) }
@@ -1679,7 +1566,6 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
 
 // Tests that if an attacker fakes a chain height, after the attack is detected,
 // the progress height is successfully reduced at the next sync invocation.
-func TestFakedSyncProgress61(t *testing.T)      { testFakedSyncProgress(t, 61, FullSync) }
 func TestFakedSyncProgress62(t *testing.T)      { testFakedSyncProgress(t, 62, FullSync) }
 func TestFakedSyncProgress63Full(t *testing.T)  { testFakedSyncProgress(t, 63, FullSync) }
 func TestFakedSyncProgress63Fast(t *testing.T)  { testFakedSyncProgress(t, 63, FastSync) }
diff --git a/eth/downloader/metrics.go b/eth/downloader/metrics.go
index d6fcfa25c..0d76c7dfd 100644
--- a/eth/downloader/metrics.go
+++ b/eth/downloader/metrics.go
@@ -23,16 +23,6 @@ import (
 )
 
 var (
-	hashInMeter      = metrics.NewMeter("eth/downloader/hashes/in")
-	hashReqTimer     = metrics.NewTimer("eth/downloader/hashes/req")
-	hashDropMeter    = metrics.NewMeter("eth/downloader/hashes/drop")
-	hashTimeoutMeter = metrics.NewMeter("eth/downloader/hashes/timeout")
-
-	blockInMeter      = metrics.NewMeter("eth/downloader/blocks/in")
-	blockReqTimer     = metrics.NewTimer("eth/downloader/blocks/req")
-	blockDropMeter    = metrics.NewMeter("eth/downloader/blocks/drop")
-	blockTimeoutMeter = metrics.NewMeter("eth/downloader/blocks/timeout")
-
 	headerInMeter      = metrics.NewMeter("eth/downloader/headers/in")
 	headerReqTimer     = metrics.NewTimer("eth/downloader/headers/req")
 	headerDropMeter    = metrics.NewMeter("eth/downloader/headers/drop")
diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go
index 94d44fca4..c2b7a52d0 100644
--- a/eth/downloader/peer.go
+++ b/eth/downloader/peer.go
@@ -37,11 +37,6 @@ const (
 	measurementImpact = 0.1  // The impact a single measurement has on a peer's final throughput value.
 )
 
-// Hash and block fetchers belonging to eth/61 and below
-type relativeHashFetcherFn func(common.Hash) error
-type absoluteHashFetcherFn func(uint64, int) error
-type blockFetcherFn func([]common.Hash) error
-
 // Block header and body fetchers belonging to eth/62 and above
 type relativeHeaderFetcherFn func(common.Hash, int, int, bool) error
 type absoluteHeaderFetcherFn func(uint64, int, int, bool) error
@@ -79,10 +74,6 @@ type peer struct {
 
 	lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously)
 
-	getRelHashes relativeHashFetcherFn // [eth/61] Method to retrieve a batch of hashes from an origin hash
-	getAbsHashes absoluteHashFetcherFn // [eth/61] Method to retrieve a batch of hashes from an absolute position
-	getBlocks    blockFetcherFn        // [eth/61] Method to retrieve a batch of blocks
-
 	getRelHeaders  relativeHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an origin hash
 	getAbsHeaders  absoluteHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an absolute position
 	getBlockBodies blockBodyFetcherFn      // [eth/62] Method to retrieve a batch of block bodies
@@ -97,7 +88,6 @@ type peer struct {
 // newPeer create a new downloader peer, with specific hash and block retrieval
 // mechanisms.
 func newPeer(id string, version int, head common.Hash,
-	getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn, // eth/61 callbacks, remove when upgrading
 	getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
 	getReceipts receiptFetcherFn, getNodeData stateFetcherFn) *peer {
 	return &peer{
@@ -105,10 +95,6 @@ func newPeer(id string, version int, head common.Hash,
 		head:    head,
 		lacking: make(map[common.Hash]struct{}),
 
-		getRelHashes: getRelHashes,
-		getAbsHashes: getAbsHashes,
-		getBlocks:    getBlocks,
-
 		getRelHeaders:  getRelHeaders,
 		getAbsHeaders:  getAbsHeaders,
 		getBlockBodies: getBlockBodies,
@@ -138,28 +124,6 @@ func (p *peer) Reset() {
 	p.lacking = make(map[common.Hash]struct{})
 }
 
-// Fetch61 sends a block retrieval request to the remote peer.
-func (p *peer) Fetch61(request *fetchRequest) error {
-	// Sanity check the protocol version
-	if p.version != 61 {
-		panic(fmt.Sprintf("block fetch [eth/61] requested on eth/%d", p.version))
-	}
-	// Short circuit if the peer is already fetching
-	if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) {
-		return errAlreadyFetching
-	}
-	p.blockStarted = time.Now()
-
-	// Convert the hash set to a retrievable slice
-	hashes := make([]common.Hash, 0, len(request.Hashes))
-	for hash, _ := range request.Hashes {
-		hashes = append(hashes, hash)
-	}
-	go p.getBlocks(hashes)
-
-	return nil
-}
-
 // FetchHeaders sends a header retrieval request to the remote peer.
 func (p *peer) FetchHeaders(from uint64, count int) error {
 	// Sanity check the protocol version
@@ -481,20 +445,6 @@ func (ps *peerSet) AllPeers() []*peer {
 	return list
 }
 
-// BlockIdlePeers retrieves a flat list of all the currently idle peers within the
-// active peer set, ordered by their reputation.
-func (ps *peerSet) BlockIdlePeers() ([]*peer, int) {
-	idle := func(p *peer) bool {
-		return atomic.LoadInt32(&p.blockIdle) == 0
-	}
-	throughput := func(p *peer) float64 {
-		p.lock.RLock()
-		defer p.lock.RUnlock()
-		return p.blockThroughput
-	}
-	return ps.idlePeers(61, 61, idle, throughput)
-}
-
 // HeaderIdlePeers retrieves a flat list of all the currently header-idle peers
 // within the active peer set, ordered by their reputation.
 func (ps *peerSet) HeaderIdlePeers() ([]*peer, int) {
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index 01897af6d..fd239f7e4 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -45,7 +45,6 @@ var (
 
 var (
 	errNoFetchesPending = errors.New("no fetches pending")
-	errStateSyncPending = errors.New("state trie sync already scheduled")
 	errStaleDelivery    = errors.New("stale delivery")
 )
 
@@ -74,10 +73,6 @@ type queue struct {
 	mode          SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching
 	fastSyncPivot uint64   // Block number where the fast sync pivots into archive synchronisation mode
 
-	hashPool    map[common.Hash]int // [eth/61] Pending hashes, mapping to their insertion index (priority)
-	hashQueue   *prque.Prque        // [eth/61] Priority queue of the block hashes to fetch
-	hashCounter int                 // [eth/61] Counter indexing the added hashes to ensure retrieval order
-
 	headerHead common.Hash // [eth/62] Hash of the last queued header to verify order
 
 	// Headers are "special", they download in batches, supported by a skeleton chain
@@ -85,7 +80,6 @@ type queue struct {
 	headerTaskQueue *prque.Prque                   // [eth/62] Priority queue of the skeleton indexes to fetch the filling headers for
 	headerPeerMiss  map[string]map[uint64]struct{} // [eth/62] Set of per-peer header batches known to be unavailable
 	headerPendPool  map[string]*fetchRequest       // [eth/62] Currently pending header retrieval operations
-	headerDonePool  map[uint64]struct{}            // [eth/62] Set of the completed header fetches
 	headerResults   []*types.Header                // [eth/62] Result cache accumulating the completed headers
 	headerProced    int                            // [eth/62] Number of headers already processed from the results
 	headerOffset    uint64                         // [eth/62] Number of the first header in the result cache
@@ -124,8 +118,6 @@ type queue struct {
 func newQueue(stateDb ethdb.Database) *queue {
 	lock := new(sync.Mutex)
 	return &queue{
-		hashPool:         make(map[common.Hash]int),
-		hashQueue:        prque.New(),
 		headerPendPool:   make(map[string]*fetchRequest),
 		headerContCh:     make(chan bool),
 		blockTaskPool:    make(map[common.Hash]*types.Header),
@@ -158,10 +150,6 @@ func (q *queue) Reset() {
 	q.mode = FullSync
 	q.fastSyncPivot = 0
 
-	q.hashPool = make(map[common.Hash]int)
-	q.hashQueue.Reset()
-	q.hashCounter = 0
-
 	q.headerHead = common.Hash{}
 
 	q.headerPendPool = make(map[string]*fetchRequest)
@@ -208,7 +196,7 @@ func (q *queue) PendingBlocks() int {
 	q.lock.Lock()
 	defer q.lock.Unlock()
 
-	return q.hashQueue.Size() + q.blockTaskQueue.Size()
+	return q.blockTaskQueue.Size()
 }
 
 // PendingReceipts retrieves the number of block receipts pending for retrieval.
@@ -272,7 +260,7 @@ func (q *queue) Idle() bool {
 	q.lock.Lock()
 	defer q.lock.Unlock()
 
-	queued := q.hashQueue.Size() + q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() + q.stateTaskQueue.Size()
+	queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() + q.stateTaskQueue.Size()
 	pending := len(q.blockPendPool) + len(q.receiptPendPool) + len(q.statePendPool)
 	cached := len(q.blockDonePool) + len(q.receiptDonePool)
 
@@ -323,34 +311,6 @@ func (q *queue) ShouldThrottleReceipts() bool {
 	return pending >= len(q.resultCache)-len(q.receiptDonePool)
 }
 
-// Schedule61 adds a set of hashes for the download queue for scheduling, returning
-// the new hashes encountered.
-func (q *queue) Schedule61(hashes []common.Hash, fifo bool) []common.Hash {
-	q.lock.Lock()
-	defer q.lock.Unlock()
-
-	// Insert all the hashes prioritised in the arrival order
-	inserts := make([]common.Hash, 0, len(hashes))
-	for _, hash := range hashes {
-		// Skip anything we already have
-		if old, ok := q.hashPool[hash]; ok {
-			glog.V(logger.Warn).Infof("Hash %x already scheduled at index %v", hash, old)
-			continue
-		}
-		// Update the counters and insert the hash
-		q.hashCounter = q.hashCounter + 1
-		inserts = append(inserts, hash)
-
-		q.hashPool[hash] = q.hashCounter
-		if fifo {
-			q.hashQueue.Push(hash, -float32(q.hashCounter)) // Lowest gets schedules first
-		} else {
-			q.hashQueue.Push(hash, float32(q.hashCounter)) // Highest gets schedules first
-		}
-	}
-	return inserts
-}
-
 // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
 // up an already retrieved header skeleton.
 func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
@@ -550,15 +510,6 @@ func (q *queue) ReserveHeaders(p *peer, count int) *fetchRequest {
 	return request
 }
 
-// ReserveBlocks reserves a set of block hashes for the given peer, skipping any
-// previously failed download.
-func (q *queue) ReserveBlocks(p *peer, count int) *fetchRequest {
-	q.lock.Lock()
-	defer q.lock.Unlock()
-
-	return q.reserveHashes(p, count, q.hashQueue, nil, q.blockPendPool, len(q.resultCache)-len(q.blockDonePool))
-}
-
 // ReserveNodeData reserves a set of node data hashes for the given peer, skipping
 // any previously failed download.
 func (q *queue) ReserveNodeData(p *peer, count int) *fetchRequest {
@@ -753,11 +704,6 @@ func (q *queue) CancelHeaders(request *fetchRequest) {
 	q.cancel(request, q.headerTaskQueue, q.headerPendPool)
 }
 
-// CancelBlocks aborts a fetch request, returning all pending hashes to the queue.
-func (q *queue) CancelBlocks(request *fetchRequest) {
-	q.cancel(request, q.hashQueue, q.blockPendPool)
-}
-
 // CancelBodies aborts a body fetch request, returning all pending headers to the
 // task queue.
 func (q *queue) CancelBodies(request *fetchRequest) {
@@ -801,9 +747,6 @@ func (q *queue) Revoke(peerId string) {
 	defer q.lock.Unlock()
 
 	if request, ok := q.blockPendPool[peerId]; ok {
-		for hash, index := range request.Hashes {
-			q.hashQueue.Push(hash, float32(index))
-		}
 		for _, header := range request.Headers {
 			q.blockTaskQueue.Push(header, -float32(header.Number.Uint64()))
 		}
@@ -832,15 +775,6 @@ func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int {
 	return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter)
 }
 
-// ExpireBlocks checks for in flight requests that exceeded a timeout allowance,
-// canceling them and returning the responsible peers for penalisation.
-func (q *queue) ExpireBlocks(timeout time.Duration) map[string]int {
-	q.lock.Lock()
-	defer q.lock.Unlock()
-
-	return q.expire(timeout, q.blockPendPool, q.hashQueue, blockTimeoutMeter)
-}
-
 // ExpireBodies checks for in flight block body requests that exceeded a timeout
 // allowance, canceling them and returning the responsible peers for penalisation.
 func (q *queue) ExpireBodies(timeout time.Duration) map[string]int {
@@ -907,74 +841,6 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest,
 	return expiries
 }
 
-// DeliverBlocks injects a block retrieval response into the download queue. The
-// method returns the number of blocks accepted from the delivery and also wakes
-// any threads waiting for data delivery.
-func (q *queue) DeliverBlocks(id string, blocks []*types.Block) (int, error) {
-	q.lock.Lock()
-	defer q.lock.Unlock()
-
-	// Short circuit if the blocks were never requested
-	request := q.blockPendPool[id]
-	if request == nil {
-		return 0, errNoFetchesPending
-	}
-	blockReqTimer.UpdateSince(request.Time)
-	delete(q.blockPendPool, id)
-
-	// If no blocks were retrieved, mark them as unavailable for the origin peer
-	if len(blocks) == 0 {
-		for hash, _ := range request.Hashes {
-			request.Peer.MarkLacking(hash)
-		}
-	}
-	// Iterate over the downloaded blocks and add each of them
-	accepted, errs := 0, make([]error, 0)
-	for _, block := range blocks {
-		// Skip any blocks that were not requested
-		hash := block.Hash()
-		if _, ok := request.Hashes[hash]; !ok {
-			errs = append(errs, fmt.Errorf("non-requested block %x", hash))
-			continue
-		}
-		// Reconstruct the next result if contents match up
-		index := int(block.Number().Int64() - int64(q.resultOffset))
-		if index >= len(q.resultCache) || index < 0 {
-			errs = []error{errInvalidChain}
-			break
-		}
-		q.resultCache[index] = &fetchResult{
-			Header:       block.Header(),
-			Transactions: block.Transactions(),
-			Uncles:       block.Uncles(),
-		}
-		q.blockDonePool[block.Hash()] = struct{}{}
-
-		delete(request.Hashes, hash)
-		delete(q.hashPool, hash)
-		accepted++
-	}
-	// Return all failed or missing fetches to the queue
-	for hash, index := range request.Hashes {
-		q.hashQueue.Push(hash, float32(index))
-	}
-	// Wake up WaitResults
-	if accepted > 0 {
-		q.active.Signal()
-	}
-	// If none of the blocks were good, it's a stale delivery
-	switch {
-	case len(errs) == 0:
-		return accepted, nil
-	case len(errs) == 1 && (errs[0] == errInvalidChain || errs[0] == errInvalidBlock):
-		return accepted, errs[0]
-	case len(errs) == len(blocks):
-		return accepted, errStaleDelivery
-	default:
-		return accepted, fmt.Errorf("multiple failures: %v", errs)
-	}
-}
-
 // DeliverHeaders injects a header retrieval response into the header results
 // cache. This method either accepts all headers it received, or none of them
 // if they do not map correctly to the skeleton.
diff --git a/eth/downloader/types.go b/eth/downloader/types.go
index b67fff1f8..e10510486 100644
--- a/eth/downloader/types.go
+++ b/eth/downloader/types.go
@@ -73,26 +73,6 @@ type dataPack interface {
 	Stats() string
 }
 
-// hashPack is a batch of block hashes returned by a peer (eth/61).
-type hashPack struct {
-	peerId string
-	hashes []common.Hash
-}
-
-func (p *hashPack) PeerId() string { return p.peerId }
-func (p *hashPack) Items() int     { return len(p.hashes) }
-func (p *hashPack) Stats() string  { return fmt.Sprintf("%d", len(p.hashes)) }
-
-// blockPack is a batch of blocks returned by a peer (eth/61).
-type blockPack struct {
-	peerId string
-	blocks []*types.Block
-}
-
-func (p *blockPack) PeerId() string { return p.peerId }
-func (p *blockPack) Items() int     { return len(p.blocks) }
-func (p *blockPack) Stats() string  { return fmt.Sprintf("%d", len(p.blocks)) }
-
 // headerPack is a batch of block headers returned by a peer.
 type headerPack struct {
 	peerId  string
diff --git a/eth/fetcher/fetcher.go b/eth/fetcher/fetcher.go
index 9300717c3..bd235bb9e 100644
--- a/eth/fetcher/fetcher.go
+++ b/eth/fetcher/fetcher.go
@@ -48,9 +48,6 @@ var (
 // blockRetrievalFn is a callback type for retrieving a block from the local chain.
 type blockRetrievalFn func(common.Hash) *types.Block
 
-// blockRequesterFn is a callback type for sending a block retrieval request.
-type blockRequesterFn func([]common.Hash) error
-
 // headerRequesterFn is a callback type for sending a header retrieval request.
 type headerRequesterFn func(common.Hash) error
 
@@ -82,7 +79,6 @@ type announce struct {
 
 	origin string // Identifier of the peer originating the notification
 
-	fetch61     blockRequesterFn  // [eth/61] Fetcher function to retrieve an announced block
 	fetchHeader headerRequesterFn // [eth/62] Fetcher function to retrieve the header of an announced block
 	fetchBodies bodyRequesterFn   // [eth/62] Fetcher function to retrieve the body of an announced block
 }
@@ -191,14 +187,12 @@ func (f *Fetcher) Stop() {
 // Notify announces the fetcher of the potential availability of a new block in
 // the network.
 func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
-	blockFetcher blockRequesterFn, // eth/61 specific whole block fetcher
 	headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
 	block := &announce{
 		hash:        hash,
 		number:      number,
 		time:        time,
 		origin:      peer,
-		fetch61:     blockFetcher,
 		fetchHeader: headerFetcher,
 		fetchBodies: bodyFetcher,
 	}
@@ -224,34 +218,6 @@ func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
 	}
 }
 
-// FilterBlocks extracts all the blocks that were explicitly requested by the fetcher,
-// returning those that should be handled differently.
-func (f *Fetcher) FilterBlocks(blocks types.Blocks) types.Blocks {
-	glog.V(logger.Detail).Infof("[eth/61] filtering %d blocks", len(blocks))
-
-	// Send the filter channel to the fetcher
-	filter := make(chan []*types.Block)
-
-	select {
-	case f.blockFilter <- filter:
-	case <-f.quit:
-		return nil
-	}
-	// Request the filtering of the block list
-	select {
-	case filter <- blocks:
-	case <-f.quit:
-		return nil
-	}
-	// Retrieve the blocks remaining after filtering
-	select {
-	case blocks := <-filter:
-		return blocks
-	case <-f.quit:
-		return nil
-	}
-}
-
 // FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
 // returning those that should be handled differently.
 func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*types.Header {
@@ -413,7 +379,7 @@ func (f *Fetcher) loop() {
 					}
 				}
 			}
-			// Send out all block (eth/61) or header (eth/62) requests
+			// Send out all block header requests
 			for peer, hashes := range request {
 				if glog.V(logger.Detail) && len(hashes) > 0 {
 					list := "["
@@ -421,29 +387,17 @@ func (f *Fetcher) loop() {
 						list += fmt.Sprintf("%x…, ", hash[:4])
 					}
 					list = list[:len(list)-2] + "]"
-
-					if f.fetching[hashes[0]].fetch61 != nil {
-						glog.V(logger.Detail).Infof("[eth/61] Peer %s: fetching blocks %s", peer, list)
-					} else {
-						glog.V(logger.Detail).Infof("[eth/62] Peer %s: fetching headers %s", peer, list)
-					}
+					glog.V(logger.Detail).Infof("[eth/62] Peer %s: fetching headers %s", peer, list)
 				}
 				// Create a closure of the fetch and schedule in on a new thread
-				fetchBlocks, fetchHeader, hashes := f.fetching[hashes[0]].fetch61, f.fetching[hashes[0]].fetchHeader, hashes
+				fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
 				go func() {
 					if f.fetchingHook != nil {
 						f.fetchingHook(hashes)
 					}
-					if fetchBlocks != nil {
-						// Use old eth/61 protocol to retrieve whole blocks
-						blockFetchMeter.Mark(int64(len(hashes)))
-						fetchBlocks(hashes)
-					} else {
-						// Use new eth/62 protocol to retrieve headers first
-						for _, hash := range hashes {
-							headerFetchMeter.Mark(1)
-							fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
-						}
+					for _, hash := range hashes {
+						headerFetchMeter.Mark(1)
+						fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
 					}
 				}()
 			}
@@ -486,46 +440,6 @@ func (f *Fetcher) loop() {
 			// Schedule the next fetch if blocks are still pending
 			f.rescheduleComplete(completeTimer)
 
-		case filter := <-f.blockFilter:
-			// Blocks arrived, extract any explicit fetches, return all else
-			var blocks types.Blocks
-			select {
-			case blocks = <-filter:
-			case <-f.quit:
-				return
-			}
-			blockFilterInMeter.Mark(int64(len(blocks)))
-
-			explicit, download := []*types.Block{}, []*types.Block{}
-			for _, block := range blocks {
-				hash := block.Hash()
-
-				// Filter explicitly requested blocks from hash announcements
-				if f.fetching[hash] != nil && f.queued[hash] == nil {
-					// Discard if already imported by other means
-					if f.getBlock(hash) == nil {
-						explicit = append(explicit, block)
-					} else {
-						f.forgetHash(hash)
-					}
-				} else {
-					download = append(download, block)
-				}
-			}
-
-			blockFilterOutMeter.Mark(int64(len(download)))
-			select {
-			case filter <- download:
-			case <-f.quit:
-				return
-			}
-			// Schedule the retrieved blocks for ordered import
-			for _, block := range explicit {
-				if announce := f.fetching[block.Hash()]; announce != nil {
-					f.enqueue(announce.origin, block)
-				}
-			}
-
 		case filter := <-f.headerFilter:
 			// Headers arrived from a remote peer. Extract those that were explicitly
 			// requested by the fetcher, and return everything else so it's delivered
diff --git a/eth/fetcher/fetcher_test.go b/eth/fetcher/fetcher_test.go
index 6a32be14c..ad955a577 100644
--- a/eth/fetcher/fetcher_test.go
+++ b/eth/fetcher/fetcher_test.go
@@ -151,28 +151,6 @@ func (f *fetcherTester) dropPeer(peer string) {
 	f.drops[peer] = true
 }
 
-// makeBlockFetcher retrieves a block fetcher associated with a simulated peer.
-func (f *fetcherTester) makeBlockFetcher(blocks map[common.Hash]*types.Block) blockRequesterFn {
-	closure := make(map[common.Hash]*types.Block)
-	for hash, block := range blocks {
-		closure[hash] = block
-	}
-	// Create a function that returns blocks from the closure
-	return func(hashes []common.Hash) error {
-		// Gather the blocks to return
-		blocks := make([]*types.Block, 0, len(hashes))
-		for _, hash := range hashes {
-			if block, ok := closure[hash]; ok {
-				blocks = append(blocks, block)
-			}
-		}
-		// Return on a new thread
-		go f.fetcher.FilterBlocks(blocks)
-
-		return nil
-	}
-}
-
 // makeHeaderFetcher retrieves a block header fetcher associated with a simulated peer.
 func (f *fetcherTester) makeHeaderFetcher(blocks map[common.Hash]*types.Block, drift time.Duration) headerRequesterFn {
 	closure := make(map[common.Hash]*types.Block)
@@ -293,7 +271,6 @@ func verifyImportDone(t *testing.T, imported chan *types.Block) {
 
 // Tests that a fetcher accepts block announcements and initiates retrievals for
 // them, successfully importing into the local chain.
-func TestSequentialAnnouncements61(t *testing.T) { testSequentialAnnouncements(t, 61) }
 func TestSequentialAnnouncements62(t *testing.T) { testSequentialAnnouncements(t, 62) }
 func TestSequentialAnnouncements63(t *testing.T) { testSequentialAnnouncements(t, 63) }
 func TestSequentialAnnouncements64(t *testing.T) { testSequentialAnnouncements(t, 64) }
@@ -304,7 +281,6 @@ func testSequentialAnnouncements(t *testing.T, protocol int) {
 	hashes, blocks := makeChain(targetBlocks, 0, genesis)
 
 	tester := newTester()
-	blockFetcher := tester.makeBlockFetcher(blocks)
 	headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
 	bodyFetcher := tester.makeBodyFetcher(blocks, 0)
 
@@ -313,11 +289,7 @@ func testSequentialAnnouncements(t *testing.T, protocol int) {
 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
 
 	for i := len(hashes) - 2; i >= 0; i-- {
-		if protocol < 62 {
-			tester.fetcher.Notify("valid", hashes[i], 0, time.Now().Add(-arriveTimeout), blockFetcher, nil, nil)
-		} else {
-			tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
-		}
+		tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
 		verifyImportEvent(t, imported, true)
 	}
 	verifyImportDone(t, imported)
@@ -325,7 +297,6 @@ func testSequentialAnnouncements(t *testing.T, protocol int) {
 
 // Tests that if blocks are announced by multiple peers (or even the same buggy
 // peer), they will only get downloaded at most once.
-func TestConcurrentAnnouncements61(t *testing.T) { testConcurrentAnnouncements(t, 61) }
 func TestConcurrentAnnouncements62(t *testing.T) { testConcurrentAnnouncements(t, 62) }
 func TestConcurrentAnnouncements63(t *testing.T) { testConcurrentAnnouncements(t, 63) }
 func TestConcurrentAnnouncements64(t *testing.T) { testConcurrentAnnouncements(t, 64) }
@@ -337,15 +308,10 @@ func testConcurrentAnnouncements(t *testing.T, protocol int) {
 
 	// Assemble a tester with a built in counter for the requests
 	tester := newTester()
-	blockFetcher := tester.makeBlockFetcher(blocks)
 	headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
 	bodyFetcher := tester.makeBodyFetcher(blocks, 0)
 
 	counter := uint32(0)
-	blockWrapper := func(hashes []common.Hash) error {
-		atomic.AddUint32(&counter, uint32(len(hashes)))
-		return blockFetcher(hashes)
-	}
 	headerWrapper := func(hash common.Hash) error {
 		atomic.AddUint32(&counter, 1)
 		return headerFetcher(hash)
@@ -355,15 +321,9 @@ func testConcurrentAnnouncements(t *testing.T, protocol int) {
 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
 
 	for i := len(hashes) - 2; i >= 0; i-- {
-		if protocol < 62 {
-			tester.fetcher.Notify("first", hashes[i], 0, time.Now().Add(-arriveTimeout), blockWrapper, nil, nil)
-			tester.fetcher.Notify("second", hashes[i], 0, time.Now().Add(-arriveTimeout+time.Millisecond), blockWrapper, nil, nil)
-			tester.fetcher.Notify("second", hashes[i], 0, time.Now().Add(-arriveTimeout-time.Millisecond), blockWrapper, nil, nil)
-		} else {
-			tester.fetcher.Notify("first", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, headerWrapper, bodyFetcher)
-			tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout+time.Millisecond), nil, headerWrapper, bodyFetcher)
-			tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout-time.Millisecond), nil, headerWrapper, bodyFetcher)
-		}
+		tester.fetcher.Notify("first", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerWrapper, bodyFetcher)
+		tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout+time.Millisecond), headerWrapper, bodyFetcher)
+		tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout-time.Millisecond), headerWrapper, bodyFetcher)
 		verifyImportEvent(t, imported, true)
 	}
 	verifyImportDone(t, imported)
@@ -376,7 +336,6 @@ func testConcurrentAnnouncements(t *testing.T, protocol int) {
 
 // Tests that announcements arriving while a previous is being fetched still
 // results in a valid import.
-func TestOverlappingAnnouncements61(t *testing.T) { testOverlappingAnnouncements(t, 61) }
 func TestOverlappingAnnouncements62(t *testing.T) { testOverlappingAnnouncements(t, 62) }
 func TestOverlappingAnnouncements63(t *testing.T) { testOverlappingAnnouncements(t, 63) }
 func TestOverlappingAnnouncements64(t *testing.T) { testOverlappingAnnouncements(t, 64) }
@@ -387,7 +346,6 @@ func testOverlappingAnnouncements(t *testing.T, protocol int) {
 	hashes, blocks := makeChain(targetBlocks, 0, genesis)
 
 	tester := newTester()
-	blockFetcher := tester.makeBlockFetcher(blocks)
 	headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
 	bodyFetcher := tester.makeBodyFetcher(blocks, 0)
 
@@ -400,11 +358,7 @@ func testOverlappingAnnouncements(t *testing.T, protocol int) {
 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
 
 	for i := len(hashes) - 2; i >= 0; i-- {
-		if protocol < 62 {
-			tester.fetcher.Notify("valid", hashes[i], 0, time.Now().Add(-arriveTimeout), blockFetcher, nil, nil)
-		} else {
-			tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
-		}
+		tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
 		select {
 		case <-imported:
 		case <-time.After(time.Second):
@@ -416,7 +370,6 @@ func testOverlappingAnnouncements(t *testing.T, protocol int) {
 }
 
 // Tests that announces already being retrieved will not be duplicated.
-func TestPendingDeduplication61(t *testing.T) { testPendingDeduplication(t, 61) }
 func TestPendingDeduplication62(t *testing.T) { testPendingDeduplication(t, 62) }
 func TestPendingDeduplication63(t *testing.T) { testPendingDeduplication(t, 63) }
 func TestPendingDeduplication64(t *testing.T) { testPendingDeduplication(t, 64) }
@@ -427,22 +380,11 @@ func testPendingDeduplication(t *testing.T, protocol int) {
 
 	// Assemble a tester with a built in counter and delayed fetcher
 	tester := newTester()
-	blockFetcher := tester.makeBlockFetcher(blocks)
 	headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
 	bodyFetcher := tester.makeBodyFetcher(blocks, 0)
 
 	delay := 50 * time.Millisecond
 	counter := uint32(0)
-	blockWrapper := func(hashes []common.Hash) error {
-		atomic.AddUint32(&counter, uint32(len(hashes)))
-
-		// Simulate a long running fetch
-		go func() {
-			time.Sleep(delay)
-			blockFetcher(hashes)
-		}()
-		return nil
-	}
 	headerWrapper := func(hash common.Hash) error {
 		atomic.AddUint32(&counter, 1)
 
@@ -455,11 +397,7 @@ func testPendingDeduplication(t *testing.T, protocol int) {
 	}
 	// Announce the same block many times until it's fetched (wait for any pending ops)
 	for tester.getBlock(hashes[0]) == nil {
-		if protocol < 62 {
-			tester.fetcher.Notify("repeater", hashes[0], 0, time.Now().Add(-arriveTimeout), blockWrapper, nil, nil)
-		} else {
-			tester.fetcher.Notify("repeater", hashes[0], 1, time.Now().Add(-arriveTimeout), nil, headerWrapper, bodyFetcher)
-		}
+		tester.fetcher.Notify("repeater", hashes[0], 1, time.Now().Add(-arriveTimeout), headerWrapper, bodyFetcher)
 		time.Sleep(time.Millisecond)
 	}
 	time.Sleep(delay)
@@ -475,7 +413,6 @@ func testPendingDeduplication(t *testing.T, protocol int) {
 
 // Tests that announcements retrieved in a random order are cached and eventually
 // imported when all the gaps are filled in.
-func TestRandomArrivalImport61(t *testing.T) { testRandomArrivalImport(t, 61) }
 func TestRandomArrivalImport62(t *testing.T) { testRandomArrivalImport(t, 62) }
 func TestRandomArrivalImport63(t *testing.T) { testRandomArrivalImport(t, 63) }
 func TestRandomArrivalImport64(t *testing.T) { testRandomArrivalImport(t, 64) }
@@ -487,7 +424,6 @@ func testRandomArrivalImport(t *testing.T, protocol int) {
 	skip := targetBlocks / 2
 
 	tester := newTester()
-	blockFetcher := tester.makeBlockFetcher(blocks)
 	headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
 	bodyFetcher := tester.makeBodyFetcher(blocks, 0)
 
@@ -497,26 +433,17 @@ func testRandomArrivalImport(t *testing.T, protocol int) {
 
 	for i := len(hashes) - 1; i >= 0; i-- {
 		if i != skip {
-			if protocol < 62 {
-				tester.fetcher.Notify("valid", hashes[i], 0, time.Now().Add(-arriveTimeout), blockFetcher, nil, nil)
-			} else {
-				tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
-			}
+			tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
 			time.Sleep(time.Millisecond)
 		}
 	}
 	// Finally announce the skipped entry and check full import
-	if protocol < 62 {
-		tester.fetcher.Notify("valid", hashes[skip], 0, time.Now().Add(-arriveTimeout), blockFetcher, nil, nil)
-	} else {
-		tester.fetcher.Notify("valid", hashes[skip], uint64(len(hashes)-skip-1), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
-	}
+	tester.fetcher.Notify("valid", hashes[skip], uint64(len(hashes)-skip-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
 	verifyImportCount(t, imported, len(hashes)-1)
 }
 
 // Tests that direct block enqueues (due to block propagation vs. hash announce)
 // are correctly schedule, filling and import queue gaps.
-func TestQueueGapFill61(t *testing.T) { testQueueGapFill(t, 61) }
 func TestQueueGapFill62(t *testing.T) { testQueueGapFill(t, 62) }
 func TestQueueGapFill63(t *testing.T) { testQueueGapFill(t, 63) }
 func TestQueueGapFill64(t *testing.T) { testQueueGapFill(t, 64) }
@@ -528,7 +455,6 @@ func testQueueGapFill(t *testing.T, protocol int) {
 	skip := targetBlocks / 2
 
 	tester := newTester()
-	blockFetcher := tester.makeBlockFetcher(blocks)
 	headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
 	bodyFetcher := tester.makeBodyFetcher(blocks, 0)
 
@@ -538,11 +464,7 @@ func testQueueGapFill(t *testing.T, protocol int) {
 
 	for i := len(hashes) - 1; i >= 0; i-- {
 		if i != skip {
-			if protocol < 62 {
-				tester.fetcher.Notify("valid", hashes[i], 0, time.Now().Add(-arriveTimeout), blockFetcher, nil, nil)
-			} else {
-				tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
-			}
+			tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
 			time.Sleep(time.Millisecond)
 		}
 	}
@@ -553,7 +475,6 @@ func testQueueGapFill(t *testing.T, protocol int) {
 
 // Tests that blocks arriving from various sources (multiple propagations, hash
 // announces, etc) do not get scheduled for import multiple times.
-func TestImportDeduplication61(t *testing.T) { testImportDeduplication(t, 61) }
 func TestImportDeduplication62(t *testing.T) { testImportDeduplication(t, 62) }
 func TestImportDeduplication63(t *testing.T) { testImportDeduplication(t, 63) }
 func TestImportDeduplication64(t *testing.T) { testImportDeduplication(t, 64) }
@@ -564,7 +485,6 @@ func testImportDeduplication(t *testing.T, protocol int) {
 
 	// Create the tester and wrap the importer with a counter
 	tester := newTester()
-	blockFetcher := tester.makeBlockFetcher(blocks)
 	headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
 	bodyFetcher := tester.makeBodyFetcher(blocks, 0)
 
@@ -580,11 +500,7 @@ func testImportDeduplication(t *testing.T, protocol int) {
 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
 
 	// Announce the duplicating block, wait for retrieval, and also propagate directly
-	if protocol < 62 {
-		tester.fetcher.Notify("valid", hashes[0], 0, time.Now().Add(-arriveTimeout), blockFetcher, nil, nil)
-	} else {
-		tester.fetcher.Notify("valid", hashes[0], 1, time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
-	}
+	tester.fetcher.Notify("valid", hashes[0], 1, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
 	<-fetching
 
 	tester.fetcher.Enqueue("valid", blocks[hashes[0]])
@@ -660,14 +576,14 @@ func testDistantAnnouncementDiscarding(t *testing.T, protocol int) {
 	tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- struct{}{} }
 
 	// Ensure that a block with a lower number than the threshold is discarded
-	tester.fetcher.Notify("lower", hashes[low], blocks[hashes[low]].NumberU64(), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
+	tester.fetcher.Notify("lower", hashes[low], blocks[hashes[low]].NumberU64(), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
 	select {
 	case <-time.After(50 * time.Millisecond):
 	case <-fetching:
 		t.Fatalf("fetcher requested stale header")
 	}
 	// Ensure that a block with a higher number than the threshold is discarded
-	tester.fetcher.Notify("higher", hashes[high], blocks[hashes[high]].NumberU64(), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
+	tester.fetcher.Notify("higher", hashes[high], blocks[hashes[high]].NumberU64(), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
 	select {
 	case <-time.After(50 * time.Millisecond):
 	case <-fetching:
@@ -693,7 +609,7 @@ func testInvalidNumberAnnouncement(t *testing.T, protocol int) {
 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
 
 	// Announce a block with a bad number, check for immediate drop
-	tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
+	tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
 	verifyImportEvent(t, imported, false)
 
 	tester.lock.RLock()
@@ -704,7 +620,7 @@ func testInvalidNumberAnnouncement(t *testing.T, protocol int) {
 		t.Fatalf("peer with invalid numbered announcement not dropped")
 	}
 	// Make sure a good announcement passes without a drop
-	tester.fetcher.Notify("good", hashes[0], 1, time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
+	tester.fetcher.Notify("good", hashes[0], 1, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
 	verifyImportEvent(t, imported, true)
 
 	tester.lock.RLock()
@@ -743,7 +659,7 @@ func testEmptyBlockShortCircuit(t *testing.T, protocol int) {
 
 	// Iteratively announce blocks until all are imported
 	for i := len(hashes) - 2; i >= 0; i-- {
-		tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher)
+		tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
 
 		// All announces should fetch the header
 		verifyFetchingEvent(t, fetching, true)
@@ -760,7 +676,6 @@ func testEmptyBlockShortCircuit(t *testing.T, protocol int) {
 // Tests that a peer is unable to use unbounded memory with sending infinite
 // block announcements to a node, but that even in the face of such an attack,
 // the fetcher remains operational.
-func TestHashMemoryExhaustionAttack61(t *testing.T) { testHashMemoryExhaustionAttack(t, 61) }
 func TestHashMemoryExhaustionAttack62(t *testing.T) { testHashMemoryExhaustionAttack(t, 62) }
 func TestHashMemoryExhaustionAttack63(t *testing.T) { testHashMemoryExhaustionAttack(t, 63) }
 func TestHashMemoryExhaustionAttack64(t *testing.T) { testHashMemoryExhaustionAttack(t, 64) }
@@ -781,29 +696,19 @@ func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
 	// Create a valid chain and an infinite junk chain
 	targetBlocks := hashLimit + 2*maxQueueDist
 	hashes, blocks := makeChain(targetBlocks, 0, genesis)
-	validBlockFetcher := tester.makeBlockFetcher(blocks)
 	validHeaderFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack)
 	validBodyFetcher := tester.makeBodyFetcher(blocks, 0)
 
 	attack, _ := makeChain(targetBlocks, 0, unknownBlock)
-	attackerBlockFetcher := tester.makeBlockFetcher(nil)
 	attackerHeaderFetcher := tester.makeHeaderFetcher(nil, -gatherSlack)
 	attackerBodyFetcher := tester.makeBodyFetcher(nil, 0)
 
 	// Feed the tester a huge hashset from the attacker, and a limited from the valid peer
 	for i := 0; i < len(attack); i++ {
 		if i < maxQueueDist {
-			if protocol < 62 {
-				tester.fetcher.Notify("valid", hashes[len(hashes)-2-i], 0, time.Now(), validBlockFetcher, nil, nil)
-			} else {
-				tester.fetcher.Notify("valid", hashes[len(hashes)-2-i], uint64(i+1), time.Now(), nil, validHeaderFetcher, validBodyFetcher)
-			}
-		}
-		if protocol < 62 {
-			tester.fetcher.Notify("attacker", attack[i], 0, time.Now(), attackerBlockFetcher, nil, nil)
-		} else {
-			tester.fetcher.Notify("attacker", attack[i], 1 /* don't distance drop */, time.Now(), nil, attackerHeaderFetcher, attackerBodyFetcher)
+			tester.fetcher.Notify("valid", hashes[len(hashes)-2-i], uint64(i+1), time.Now(), validHeaderFetcher, validBodyFetcher)
 		}
+		tester.fetcher.Notify("attacker", attack[i], 1 /* don't distance drop */, time.Now(), attackerHeaderFetcher, attackerBodyFetcher)
 	}
 	if count := atomic.LoadInt32(&announces); count != hashLimit+maxQueueDist {
 		t.Fatalf("queued announce count mismatch: have %d, want %d", count, hashLimit+maxQueueDist)
@@ -813,11 +718,7 @@ func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
 
 	// Feed the remaining valid hashes to ensure DOS protection state remains clean
 	for i := len(hashes) - maxQueueDist - 2; i >= 0; i-- {
-		if protocol < 62 {
-			tester.fetcher.Notify("valid", hashes[i], 0, time.Now().Add(-arriveTimeout), validBlockFetcher, nil, nil)
-		} else {
-			tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), nil, validHeaderFetcher, validBodyFetcher)
-		}
+		tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), validHeaderFetcher, validBodyFetcher)
 		verifyImportEvent(t, imported, true)
 	}
 	verifyImportDone(t, imported)
diff --git a/eth/fetcher/metrics.go b/eth/fetcher/metrics.go
index b82d3ca01..1ed8075bf 100644
--- a/eth/fetcher/metrics.go
+++ b/eth/fetcher/metrics.go
@@ -33,12 +33,9 @@ var (
 	propBroadcastDropMeter = metrics.NewMeter("eth/fetcher/prop/broadcasts/drop")
 	propBroadcastDOSMeter  = metrics.NewMeter("eth/fetcher/prop/broadcasts/dos")
 
-	blockFetchMeter  = metrics.NewMeter("eth/fetcher/fetch/blocks")
 	headerFetchMeter = metrics.NewMeter("eth/fetcher/fetch/headers")
 	bodyFetchMeter   = metrics.NewMeter("eth/fetcher/fetch/bodies")
 
-	blockFilterInMeter   = metrics.NewMeter("eth/fetcher/filter/blocks/in")
-	blockFilterOutMeter  = metrics.NewMeter("eth/fetcher/filter/blocks/out")
 	headerFilterInMeter  = metrics.NewMeter("eth/fetcher/filter/headers/in")
 	headerFilterOutMeter = metrics.NewMeter("eth/fetcher/filter/headers/out")
 	bodyFilterInMeter    = metrics.NewMeter("eth/fetcher/filter/bodies/in")
diff --git a/eth/handler.go b/eth/handler.go
index 6a648d2e0..d546cf2e1 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -57,9 +57,6 @@ func errResp(code errCode, format string, v ...interface{}) error {
 	return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
 }
 
-type hashFetcherFn func(common.Hash) error
-type blockFetcherFn func([]common.Hash) error
-
 type ProtocolManager struct {
 	networkId int
 
@@ -275,9 +272,11 @@ func (pm *ProtocolManager) handle(p *peer) error {
 	defer pm.removePeer(p.id)
 
 	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
-	if err := pm.downloader.RegisterPeer(p.id, p.version, p.Head(),
-		p.RequestHashes, p.RequestHashesFromNumber, p.RequestBlocks, p.RequestHeadersByHash,
-		p.RequestHeadersByNumber, p.RequestBodies, p.RequestReceipts, p.RequestNodeData); err != nil {
+	err := pm.downloader.RegisterPeer(p.id, p.version, p.Head(),
+		p.RequestHeadersByHash, p.RequestHeadersByNumber,
+		p.RequestBodies, p.RequestReceipts, p.RequestNodeData,
+	)
+	if err != nil {
 		return err
 	}
 	// Propagate existing transactions. new transactions appearing
@@ -331,108 +330,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		// Status messages should never arrive after the handshake
 		return errResp(ErrExtraStatusMsg, "uncontrolled status message")
 
-	case p.version < eth62 && msg.Code == GetBlockHashesMsg:
-		// Retrieve the number of hashes to return and from which origin hash
-		var request getBlockHashesData
-		if err := msg.Decode(&request); err != nil {
-			return errResp(ErrDecode, "%v: %v", msg, err)
-		}
-		if request.Amount > uint64(downloader.MaxHashFetch) {
-			request.Amount = uint64(downloader.MaxHashFetch)
-		}
-		// Retrieve the hashes from the block chain and return them
-		hashes := pm.blockchain.GetBlockHashesFromHash(request.Hash, request.Amount)
-		if len(hashes) == 0 {
-			glog.V(logger.Debug).Infof("invalid block hash %x", request.Hash.Bytes()[:4])
-		}
-		return p.SendBlockHashes(hashes)
-
-	case p.version < eth62 && msg.Code == GetBlockHashesFromNumberMsg:
-		// Retrieve and decode the number of hashes to return and from which origin number
-		var request getBlockHashesFromNumberData
-		if err := msg.Decode(&request); err != nil {
-			return errResp(ErrDecode, "%v: %v", msg, err)
-		}
-		if request.Amount > uint64(downloader.MaxHashFetch) {
-			request.Amount = uint64(downloader.MaxHashFetch)
-		}
-		// Calculate the last block that should be retrieved, and short circuit if unavailable
-		last := pm.blockchain.GetBlockByNumber(request.Number + request.Amount - 1)
-		if last == nil {
-			last = pm.blockchain.CurrentBlock()
-			request.Amount = last.NumberU64() - request.Number + 1
-		}
-		if last.NumberU64() < request.Number {
-			return p.SendBlockHashes(nil)
-		}
-		// Retrieve the hashes from the last block backwards, reverse and return
-		hashes := []common.Hash{last.Hash()}
-		hashes = append(hashes, pm.blockchain.GetBlockHashesFromHash(last.Hash(), request.Amount-1)...)
-
-		for i := 0; i < len(hashes)/2; i++ {
-			hashes[i], hashes[len(hashes)-1-i] = hashes[len(hashes)-1-i], hashes[i]
-		}
-		return p.SendBlockHashes(hashes)
-
-	case p.version < eth62 && msg.Code == BlockHashesMsg:
-		// A batch of hashes arrived to one of our previous requests
-		var hashes []common.Hash
-		if err := msg.Decode(&hashes); err != nil {
-			break
-		}
-		// Deliver them all to the downloader for queuing
-		err := pm.downloader.DeliverHashes(p.id, hashes)
-		if err != nil {
-			glog.V(logger.Debug).Infoln(err)
-		}
-
-	case p.version < eth62 && msg.Code == GetBlocksMsg:
-		// Decode the retrieval message
-		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
-		if _, err := msgStream.List(); err != nil {
-			return err
-		}
-		// Gather blocks until the fetch or network limits is reached
-		var (
-			hash   common.Hash
-			bytes  common.StorageSize
-			blocks []*types.Block
-		)
-		for len(blocks) < downloader.MaxBlockFetch && bytes < softResponseLimit {
-			//Retrieve the hash of the next block
-			err := msgStream.Decode(&hash)
-			if err == rlp.EOL {
-				break
-			} else if err != nil {
-				return errResp(ErrDecode, "msg %v: %v", msg, err)
-			}
-			// Retrieve the requested block, stopping if enough was found
-			if block := pm.blockchain.GetBlock(hash); block != nil {
-				blocks = append(blocks, block)
-				bytes += block.Size()
-			}
-		}
-		return p.SendBlocks(blocks)
-
-	case p.version < eth62 && msg.Code == BlocksMsg:
-		// Decode the arrived block message
-		var blocks []*types.Block
-		if err := msg.Decode(&blocks); err != nil {
-			glog.V(logger.Detail).Infoln("Decode error", err)
-			blocks = nil
-		}
-		// Update the receive timestamp of each block
-		for _, block := range blocks {
-			block.ReceivedAt = msg.ReceivedAt
-			block.ReceivedFrom = p
-		}
-		// Filter out any explicitly requested blocks, deliver the rest to the downloader
-		if blocks := pm.fetcher.FilterBlocks(blocks); len(blocks) > 0 {
-			pm.downloader.DeliverBlocks(p.id, blocks)
-		}
-
 	// Block header query, collect the requested headers and reply
-	case p.version >= eth62 && msg.Code == GetBlockHeadersMsg:
+	case msg.Code == GetBlockHeadersMsg:
 		// Decode the complex header query
 		var query getBlockHeadersData
 		if err := msg.Decode(&query); err != nil {
@@ -498,7 +397,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		}
 		return p.SendBlockHeaders(headers)
 
-	case p.version >= eth62 && msg.Code == BlockHeadersMsg:
+	case msg.Code == BlockHeadersMsg:
 		// A batch of headers arrived to one of our previous requests
 		var headers []*types.Header
 		if err := msg.Decode(&headers); err != nil {
@@ -550,7 +449,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 			}
 		}
 
-	case p.version >= eth62 && msg.Code == GetBlockBodiesMsg:
+	case msg.Code == GetBlockBodiesMsg:
 		// Decode the retrieval message
 		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
 		if _, err := msgStream.List(); err != nil {
@@ -577,7 +476,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		}
 		return p.SendBlockBodiesRLP(bodies)
 
-	case p.version >= eth62 && msg.Code == BlockBodiesMsg:
+	case msg.Code == BlockBodiesMsg:
 		// A batch of block bodies arrived to one of our previous requests
 		var request blockBodiesData
 		if err := msg.Decode(&request); err != nil {
@@ -728,11 +627,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 			}
 		}
 		for _, block := range unknown {
-			if p.version < eth62 {
-				pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestBlocks, nil, nil)
-			} else {
-				pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), nil, p.RequestOneHeader, p.RequestBodies)
-			}
+			pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
 		}
 
 	case msg.Code == NewBlockMsg:
@@ -813,11 +708,7 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
 	// Otherwise if the block is indeed in out own chain, announce it
 	if pm.blockchain.HasBlock(hash) {
 		for _, peer := range peers {
-			if peer.version < eth62 {
-				peer.SendNewBlockHashes61([]common.Hash{hash})
-			} else {
-				peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
-			}
+			peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
 		}
 		glog.V(logger.Detail).Infof("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt))
 	}
diff --git a/eth/handler_test.go b/eth/handler_test.go
index c208f15c7..c745408a8 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -63,160 +63,6 @@ func TestProtocolCompatibility(t *testing.T) {
 	}
 }
 
-// Tests that hashes can be retrieved from a remote chain by hashes in reverse
-// order.
-func TestGetBlockHashes61(t *testing.T) { testGetBlockHashes(t, 61) }
-
-func testGetBlockHashes(t *testing.T, protocol int) {
-	pm := newTestProtocolManagerMust(t, false, downloader.MaxHashFetch+15, nil, nil)
-	peer, _ := newTestPeer("peer", protocol, pm, true)
-	defer peer.close()
-
-	// Create a batch of tests for various scenarios
-	limit := downloader.MaxHashFetch
-	tests := []struct {
-		origin common.Hash
-		number int
-		result int
-	}{
-		{common.Hash{}, 1, 0},                                   // Make sure non existent hashes don't return results
-		{pm.blockchain.Genesis().Hash(), 1, 0},                  // There are no hashes to retrieve up from the genesis
-		{pm.blockchain.GetBlockByNumber(5).Hash(), 5, 5},        // All the hashes including the genesis requested
-		{pm.blockchain.GetBlockByNumber(5).Hash(), 10, 5},       // More hashes than available till the genesis requested
-		{pm.blockchain.GetBlockByNumber(100).Hash(), 10, 10},    // All hashes available from the middle of the chain
-		{pm.blockchain.CurrentBlock().Hash(), 10, 10},           // All hashes available from the head of the chain
-		{pm.blockchain.CurrentBlock().Hash(), limit, limit},     // Request the maximum allowed hash count
-		{pm.blockchain.CurrentBlock().Hash(), limit + 1, limit}, // Request more than the maximum allowed hash count
-	}
-	// Run each of the tests and verify the results against the chain
-	for i, tt := range tests {
-		// Assemble the hash response we would like to receive
-		resp := make([]common.Hash, tt.result)
-		if len(resp) > 0 {
-			from := pm.blockchain.GetBlock(tt.origin).NumberU64() - 1
-			for j := 0; j < len(resp); j++ {
-				resp[j] = pm.blockchain.GetBlockByNumber(uint64(int(from) - j)).Hash()
-			}
-		}
-		// Send the hash request and verify the response
-		p2p.Send(peer.app, 0x03, getBlockHashesData{tt.origin, uint64(tt.number)})
-		if err := p2p.ExpectMsg(peer.app, 0x04, resp); err != nil {
-			t.Errorf("test %d: block hashes mismatch: %v", i, err)
-		}
-	}
-}
-
-// Tests that hashes can be retrieved from a remote chain by numbers in forward
-// order.
-func TestGetBlockHashesFromNumber61(t *testing.T) { testGetBlockHashesFromNumber(t, 61) }
-
-func testGetBlockHashesFromNumber(t *testing.T, protocol int) {
-	pm := newTestProtocolManagerMust(t, false, downloader.MaxHashFetch+15, nil, nil)
-	peer, _ := newTestPeer("peer", protocol, pm, true)
-	defer peer.close()
-
-	// Create a batch of tests for various scenarios
-	limit := downloader.MaxHashFetch
-	tests := []struct {
-		origin uint64
-		number int
-		result int
-	}{
-		{pm.blockchain.CurrentBlock().NumberU64() + 1, 1, 0},     // Out of bounds requests should return empty
-		{pm.blockchain.CurrentBlock().NumberU64(), 1, 1},         // Make sure the head hash can be retrieved
-		{pm.blockchain.CurrentBlock().NumberU64() - 4, 5, 5},     // All hashes, including the head hash requested
-		{pm.blockchain.CurrentBlock().NumberU64() - 4, 10, 5},    // More hashes requested than available till the head
-		{pm.blockchain.CurrentBlock().NumberU64() - 100, 10, 10}, // All hashes available from the middle of the chain
-		{0, 10, 10},           // All hashes available from the root of the chain
-		{0, limit, limit},     // Request the maximum allowed hash count
-		{0, limit + 1, limit}, // Request more than the maximum allowed hash count
-		{0, 1, 1},             // Make sure the genesis hash can be retrieved
-	}
-	// Run each of the tests and verify the results against the chain
-	for i, tt := range tests {
-		// Assemble the hash response we would like to receive
-		resp := make([]common.Hash, tt.result)
-		for j := 0; j < len(resp); j++ {
-			resp[j] = pm.blockchain.GetBlockByNumber(tt.origin + uint64(j)).Hash()
-		}
-		// Send the hash request and verify the response
-		p2p.Send(peer.app, 0x08, getBlockHashesFromNumberData{tt.origin, uint64(tt.number)})
-		if err := p2p.ExpectMsg(peer.app, 0x04, resp); err != nil {
-			t.Errorf("test %d: block hashes mismatch: %v", i, err)
-		}
-	}
-}
-
-// Tests that blocks can be retrieved from a remote chain based on their hashes.
-func TestGetBlocks61(t *testing.T) { testGetBlocks(t, 61) }
-
-func testGetBlocks(t *testing.T, protocol int) {
-	pm := newTestProtocolManagerMust(t, false, downloader.MaxHashFetch+15, nil, nil)
-	peer, _ := newTestPeer("peer", protocol, pm, true)
-	defer peer.close()
-
-	// Create a batch of tests for various scenarios
-	limit := downloader.MaxBlockFetch
-	tests := []struct {
-		random    int           // Number of blocks to fetch randomly from the chain
-		explicit  []common.Hash // Explicitly requested blocks
-		available []bool        // Availability of explicitly requested blocks
-		expected  int           // Total number of existing blocks to expect
-	}{
-		{1, nil, nil, 1},                                                         // A single random block should be retrievable
-		{10, nil, nil, 10},                                                       // Multiple random blocks should be retrievable
-		{limit, nil, nil, limit},                                                 // The maximum possible blocks should be retrievable
-		{limit + 1, nil, nil, limit},                                             // No more than the possible block count should be returned
-		{0, []common.Hash{pm.blockchain.Genesis().Hash()}, []bool{true}, 1},      // The genesis block should be retrievable
-		{0, []common.Hash{pm.blockchain.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
-		{0, []common.Hash{common.Hash{}}, []bool{false}, 0},                      // A non existent block should not be returned
-
-		// Existing and non-existing blocks interleaved should not cause problems
-		{0, []common.Hash{
-			common.Hash{},
-			pm.blockchain.GetBlockByNumber(1).Hash(),
-			common.Hash{},
-			pm.blockchain.GetBlockByNumber(10).Hash(),
-			common.Hash{},
-			pm.blockchain.GetBlockByNumber(100).Hash(),
-			common.Hash{},
-		}, []bool{false, true, false, true, false, true, false}, 3},
-	}
-	// Run each of the tests and verify the results against the chain
-	for i, tt := range tests {
-		// Collect the hashes to request, and the response to expect
-		hashes, seen := []common.Hash{}, make(map[int64]bool)
-		blocks := []*types.Block{}
-
-		for j := 0; j < tt.random; j++ {
-			for {
-				num := rand.Int63n(int64(pm.blockchain.CurrentBlock().NumberU64()))
-				if !seen[num] {
-					seen[num] = true
-
-					block := pm.blockchain.GetBlockByNumber(uint64(num))
-					hashes = append(hashes, block.Hash())
-					if len(blocks) < tt.expected {
-						blocks = append(blocks, block)
-					}
-					break
-				}
-			}
-		}
-		for j, hash := range tt.explicit {
-			hashes = append(hashes, hash)
-			if tt.available[j] && len(blocks) < tt.expected {
-				blocks = append(blocks, pm.blockchain.GetBlock(hash))
-			}
-		}
-		// Send the hash request and verify the response
-		p2p.Send(peer.app, 0x05, hashes)
-		if err := p2p.ExpectMsg(peer.app, 0x06, blocks); err != nil {
-			t.Errorf("test %d: blocks mismatch: %v", i, err)
-		}
-	}
-}
-
 // Tests that block headers can be retrieved from a remote chain based on user queries.
 func TestGetBlockHeaders62(t *testing.T) { testGetBlockHeaders(t, 62) }
 func TestGetBlockHeaders63(t *testing.T) { testGetBlockHeaders(t, 63) }
diff --git a/eth/metrics.go b/eth/metrics.go
index e1a89d3a9..5fa2597d4 100644
--- a/eth/metrics.go
+++ b/eth/metrics.go
@@ -34,14 +34,6 @@ var (
 	propBlockInTrafficMeter   = metrics.NewMeter("eth/prop/blocks/in/traffic")
 	propBlockOutPacketsMeter  = metrics.NewMeter("eth/prop/blocks/out/packets")
 	propBlockOutTrafficMeter  = metrics.NewMeter("eth/prop/blocks/out/traffic")
-	reqHashInPacketsMeter     = metrics.NewMeter("eth/req/hashes/in/packets")
-	reqHashInTrafficMeter     = metrics.NewMeter("eth/req/hashes/in/traffic")
-	reqHashOutPacketsMeter    = metrics.NewMeter("eth/req/hashes/out/packets")
-	reqHashOutTrafficMeter    = metrics.NewMeter("eth/req/hashes/out/traffic")
-	reqBlockInPacketsMeter    = metrics.NewMeter("eth/req/blocks/in/packets")
-	reqBlockInTrafficMeter    = metrics.NewMeter("eth/req/blocks/in/traffic")
-	reqBlockOutPacketsMeter   = metrics.NewMeter("eth/req/blocks/out/packets")
-	reqBlockOutTrafficMeter   = metrics.NewMeter("eth/req/blocks/out/traffic")
 	reqHeaderInPacketsMeter   = metrics.NewMeter("eth/req/headers/in/packets")
 	reqHeaderInTrafficMeter   = metrics.NewMeter("eth/req/headers/in/traffic")
 	reqHeaderOutPacketsMeter  = metrics.NewMeter("eth/req/headers/out/packets")
@@ -95,14 +87,9 @@ func (rw *meteredMsgReadWriter) ReadMsg() (p2p.Msg, error) {
 	// Account for the data traffic
 	packets, traffic := miscInPacketsMeter, miscInTrafficMeter
 	switch {
-	case rw.version < eth62 && msg.Code == BlockHashesMsg:
-		packets, traffic = reqHashInPacketsMeter, reqHashInTrafficMeter
-	case rw.version < eth62 && msg.Code == BlocksMsg:
-		packets, traffic = reqBlockInPacketsMeter, reqBlockInTrafficMeter
-
-	case rw.version >= eth62 && msg.Code == BlockHeadersMsg:
+	case msg.Code == BlockHeadersMsg:
 		packets, traffic = reqHeaderInPacketsMeter, reqHeaderInTrafficMeter
-	case rw.version >= eth62 && msg.Code == BlockBodiesMsg:
+	case msg.Code == BlockBodiesMsg:
 		packets, traffic = reqBodyInPacketsMeter, reqBodyInTrafficMeter
 
 	case rw.version >= eth63 && msg.Code == NodeDataMsg:
@@ -127,14 +114,9 @@ func (rw *meteredMsgReadWriter) WriteMsg(msg p2p.Msg) error {
 	// Account for the data traffic
 	packets, traffic := miscOutPacketsMeter, miscOutTrafficMeter
 	switch {
-	case rw.version < eth62 && msg.Code == BlockHashesMsg:
-		packets, traffic = reqHashOutPacketsMeter, reqHashOutTrafficMeter
-	case rw.version < eth62 && msg.Code == BlocksMsg:
-		packets, traffic = reqBlockOutPacketsMeter, reqBlockOutTrafficMeter
-
-	case rw.version >= eth62 && msg.Code == BlockHeadersMsg:
+	case msg.Code == BlockHeadersMsg:
 		packets, traffic = reqHeaderOutPacketsMeter, reqHeaderOutTrafficMeter
-	case rw.version >= eth62 && msg.Code == BlockBodiesMsg:
+	case msg.Code == BlockBodiesMsg:
 		packets, traffic = reqBodyOutPacketsMeter, reqBodyOutTrafficMeter
 
 	case rw.version >= eth63 && msg.Code == NodeDataMsg:
diff --git a/eth/peer.go b/eth/peer.go
index b97825c69..c8c207ecb 100644
--- a/eth/peer.go
+++ b/eth/peer.go
@@ -25,7 +25,6 @@ import (
 
 	"github.com/ethereum/go-ethereum/common"
 	"github.com/ethereum/go-ethereum/core/types"
-	"github.com/ethereum/go-ethereum/eth/downloader"
 	"github.com/ethereum/go-ethereum/logger"
 	"github.com/ethereum/go-ethereum/logger/glog"
 	"github.com/ethereum/go-ethereum/p2p"
@@ -154,25 +153,6 @@ func (p *peer) SendTransactions(txs types.Transactions) error {
 	return p2p.Send(p.rw, TxMsg, txs)
 }
 
-// SendBlockHashes sends a batch of known hashes to the remote peer.
-func (p *peer) SendBlockHashes(hashes []common.Hash) error {
-	return p2p.Send(p.rw, BlockHashesMsg, hashes)
-}
-
-// SendBlocks sends a batch of blocks to the remote peer.
-func (p *peer) SendBlocks(blocks []*types.Block) error {
-	return p2p.Send(p.rw, BlocksMsg, blocks)
-}
-
-// SendNewBlockHashes61 announces the availability of a number of blocks through
-// a hash notification.
-func (p *peer) SendNewBlockHashes61(hashes []common.Hash) error {
-	for _, hash := range hashes {
-		p.knownBlocks.Add(hash)
-	}
-	return p2p.Send(p.rw, NewBlockHashesMsg, hashes)
-}
-
 // SendNewBlockHashes announces the availability of a number of blocks through
 // a hash notification.
 func (p *peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error {
@@ -221,26 +201,6 @@ func (p *peer) SendReceiptsRLP(receipts []rlp.RawValue) error {
 	return p2p.Send(p.rw, ReceiptsMsg, receipts)
 }
 
-// RequestHashes fetches a batch of hashes from a peer, starting at from, going
-// towards the genesis block.
-func (p *peer) RequestHashes(from common.Hash) error {
-	glog.V(logger.Debug).Infof("%v fetching hashes (%d) from %x...", p, downloader.MaxHashFetch, from[:4])
-	return p2p.Send(p.rw, GetBlockHashesMsg, getBlockHashesData{from, uint64(downloader.MaxHashFetch)})
-}
-
-// RequestHashesFromNumber fetches a batch of hashes from a peer, starting at
-// the requested block number, going upwards towards the genesis block.
-func (p *peer) RequestHashesFromNumber(from uint64, count int) error {
-	glog.V(logger.Debug).Infof("%v fetching hashes (%d) from #%d...", p, count, from)
-	return p2p.Send(p.rw, GetBlockHashesFromNumberMsg, getBlockHashesFromNumberData{from, uint64(count)})
-}
-
-// RequestBlocks fetches a batch of blocks corresponding to the specified hashes.
-func (p *peer) RequestBlocks(hashes []common.Hash) error {
-	glog.V(logger.Debug).Infof("%v fetching %v blocks", p, len(hashes))
-	return p2p.Send(p.rw, GetBlocksMsg, hashes)
-}
-
 // RequestHeaders is a wrapper around the header query functions to fetch a
 // single header. It is used solely by the fetcher.
 func (p *peer) RequestOneHeader(hash common.Hash) error {
diff --git a/eth/protocol.go b/eth/protocol.go
index 7de0cb020..69b3be578 100644
--- a/eth/protocol.go
+++ b/eth/protocol.go
@@ -28,7 +28,6 @@ import (
 
 // Constants to match up protocol versions and messages
 const (
-	eth61 = 61
 	eth62 = 62
 	eth63 = 63
 )
@@ -49,26 +48,15 @@ const (
 
 // eth protocol message codes
 const (
-	// Protocol messages belonging to eth/61
-	StatusMsg                   = 0x00
-	NewBlockHashesMsg           = 0x01
-	TxMsg                       = 0x02
-	GetBlockHashesMsg           = 0x03
-	BlockHashesMsg              = 0x04
-	GetBlocksMsg                = 0x05
-	BlocksMsg                   = 0x06
-	NewBlockMsg                 = 0x07
-	GetBlockHashesFromNumberMsg = 0x08
-
-	// Protocol messages belonging to eth/62 (new protocol from scratch)
-	// StatusMsg          = 0x00 (uncomment after eth/61 deprecation)
-	// NewBlockHashesMsg  = 0x01 (uncomment after eth/61 deprecation)
-	// TxMsg              = 0x02 (uncomment after eth/61 deprecation)
+	// Protocol messages belonging to eth/62
+	StatusMsg          = 0x00
+	NewBlockHashesMsg  = 0x01
+	TxMsg              = 0x02
 	GetBlockHeadersMsg = 0x03
 	BlockHeadersMsg    = 0x04
 	GetBlockBodiesMsg  = 0x05
 	BlockBodiesMsg     = 0x06
-	// 	NewBlockMsg       = 0x07 (uncomment after eth/61 deprecation)
+	NewBlockMsg        = 0x07
 
 	// Protocol messages belonging to eth/63
 	GetNodeDataMsg = 0x0d
@@ -117,12 +105,6 @@ type txPool interface {
 	GetTransactions() types.Transactions
 }
 
-type chainManager interface {
-	GetBlockHashesFromHash(hash common.Hash, amount uint64) (hashes []common.Hash)
-	GetBlock(hash common.Hash) (block *types.Block)
-	Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash)
-}
-
 // statusData is the network packet for the status message.
 type statusData struct {
 	ProtocolVersion uint32
@@ -138,19 +120,6 @@ type newBlockHashesData []struct {
 	Number uint64      // Number of one particular block being announced
 }
 
-// getBlockHashesData is the network packet for the hash based hash retrieval.
-type getBlockHashesData struct {
-	Hash   common.Hash
-	Amount uint64
-}
-
-// getBlockHashesFromNumberData is the network packet for the number based hash
-// retrieval.
-type getBlockHashesFromNumberData struct {
-	Number uint64
-	Amount uint64
-}
-
 // getBlockHeadersData represents a block header query.
 type getBlockHeadersData struct {
 	Origin  hashOrNumber // Block from which to retrieve headers
@@ -209,8 +178,3 @@ type blockBody struct {
 
 // blockBodiesData is the network packet for block content distribution.
 type blockBodiesData []*blockBody
-
-// nodeDataData is the network response packet for a node data retrieval.
-type nodeDataData []struct {
-	Value []byte
-}
diff --git a/eth/protocol_test.go b/eth/protocol_test.go
index f860d0a35..4633344da 100644
--- a/eth/protocol_test.go
+++ b/eth/protocol_test.go
@@ -37,7 +37,6 @@ func init() {
 var testAccount, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 
 // Tests that handshake failures are detected and reported correctly.
-func TestStatusMsgErrors61(t *testing.T) { testStatusMsgErrors(t, 61) }
 func TestStatusMsgErrors62(t *testing.T) { testStatusMsgErrors(t, 62) }
 func TestStatusMsgErrors63(t *testing.T) { testStatusMsgErrors(t, 63) }
 
@@ -90,7 +89,6 @@ func testStatusMsgErrors(t *testing.T, protocol int) {
 }
 
 // This test checks that received transactions are added to the local pool.
-func TestRecvTransactions61(t *testing.T) { testRecvTransactions(t, 61) }
 func TestRecvTransactions62(t *testing.T) { testRecvTransactions(t, 62) }
 func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) }
 
@@ -119,7 +117,6 @@ func testRecvTransactions(t *testing.T, protocol int) {
 }
 
 // This test checks that pending transactions are sent.
-func TestSendTransactions61(t *testing.T) { testSendTransactions(t, 61) }
 func TestSendTransactions62(t *testing.T) { testSendTransactions(t, 62) }
 func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) }
 
-- 
cgit v1.2.3


From 48709d53407ee5b99f8e1717fbaa8edb072ef944 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= <peterke@gmail.com>
Date: Mon, 25 Jul 2016 15:14:14 +0300
Subject: [release/1.4.11] eth, eth/downloader: better remote head tracking

(cherry picked from commit 1dd272080dfb49a07a87c46e18d8aeaa0fd41a08)

Conflicts:
	eth/handler.go
	eth/sync.go
---
 eth/downloader/downloader.go      |  7 ++++---
 eth/downloader/downloader_test.go | 17 ++++++++++++++---
 eth/downloader/peer.go            | 13 +++++++++----
 eth/handler.go                    | 31 +++++++++++++++++--------------
 eth/peer.go                       | 34 +++++++++++-----------------------
 eth/sync.go                       | 11 +++++++----
 6 files changed, 62 insertions(+), 51 deletions(-)

diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index aee21122a..1f9ef598c 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -236,12 +236,12 @@ func (d *Downloader) Synchronising() bool {
 
 // RegisterPeer injects a new download peer into the set of block source to be
 // used for fetching hashes and blocks from.
-func (d *Downloader) RegisterPeer(id string, version int, head common.Hash,
+func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHeadRetrievalFn,
 	getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
 	getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error {
 
 	glog.V(logger.Detail).Infoln("Registering peer", id)
-	if err := d.peers.Register(newPeer(id, version, head, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil {
+	if err := d.peers.Register(newPeer(id, version, currentHead, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil {
 		glog.V(logger.Error).Infoln("Register failed:", err)
 		return err
 	}
@@ -501,7 +501,8 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
 	glog.V(logger.Debug).Infof("%v: retrieving remote chain height", p)
 
 	// Request the advertised remote head block and wait for the response
-	go p.getRelHeaders(p.head, 1, 0, false)
+	head, _ := p.currentHead()
+	go p.getRelHeaders(head, 1, 0, false)
 
 	timeout := time.After(d.requestTTL())
 	for {
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 4ca28091c..a2efc7469 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -400,11 +400,11 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha
 	var err error
 	switch version {
 	case 62:
-		err = dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), nil, nil)
+		err = dl.downloader.RegisterPeer(id, version, dl.peerCurrentHeadFn(id), dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), nil, nil)
 	case 63:
-		err = dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
+		err = dl.downloader.RegisterPeer(id, version, dl.peerCurrentHeadFn(id), dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
 	case 64:
-		err = dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
+		err = dl.downloader.RegisterPeer(id, version, dl.peerCurrentHeadFn(id), dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
 	}
 	if err == nil {
 		// Assign the owned hashes, headers and blocks to the peer (deep copy)
@@ -463,6 +463,17 @@ func (dl *downloadTester) dropPeer(id string) {
 	dl.downloader.UnregisterPeer(id)
 }
 
+// peerCurrentHeadFn constructs a function to retrieve a peer's current head hash
+// and total difficulty.
+func (dl *downloadTester) peerCurrentHeadFn(id string) func() (common.Hash, *big.Int) {
+	return func() (common.Hash, *big.Int) {
+		dl.lock.RLock()
+		defer dl.lock.RUnlock()
+
+		return dl.peerHashes[id][0], nil
+	}
+}
+
 // peerGetRelHeadersFn constructs a GetBlockHeaders function based on a hashed
 // origin; associated with a particular peer in the download tester. The returned
 // function can be used to retrieve batches of headers from the particular peer.
diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go
index c2b7a52d0..b0bfc66c8 100644
--- a/eth/downloader/peer.go
+++ b/eth/downloader/peer.go
@@ -23,6 +23,7 @@ import (
 	"errors"
 	"fmt"
 	"math"
+	"math/big"
 	"sort"
 	"strings"
 	"sync"
@@ -37,6 +38,9 @@ const (
 	measurementImpact = 0.1  // The impact a single measurement has on a peer's final throughput value.
 )
 
+// Head hash and total difficulty retriever for
+type currentHeadRetrievalFn func() (common.Hash, *big.Int)
+
 // Block header and body fetchers belonging to eth/62 and above
 type relativeHeaderFetcherFn func(common.Hash, int, int, bool) error
 type absoluteHeaderFetcherFn func(uint64, int, int, bool) error
@@ -52,8 +56,7 @@ var (
 
 // peer represents an active peer from which hashes and blocks are retrieved.
 type peer struct {
-	id   string      // Unique identifier of the peer
-	head common.Hash // Hash of the peers latest known block
+	id string // Unique identifier of the peer
 
 	headerIdle  int32 // Current header activity state of the peer (idle = 0, active = 1)
 	blockIdle   int32 // Current block activity state of the peer (idle = 0, active = 1)
@@ -74,6 +77,8 @@ type peer struct {
 
 	lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously)
 
+	currentHead currentHeadRetrievalFn // Method to fetch the currently known head of the peer
+
 	getRelHeaders  relativeHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an origin hash
 	getAbsHeaders  absoluteHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an absolute position
 	getBlockBodies blockBodyFetcherFn      // [eth/62] Method to retrieve a batch of block bodies
@@ -87,14 +92,14 @@ type peer struct {
 
 // newPeer create a new downloader peer, with specific hash and block retrieval
 // mechanisms.
-func newPeer(id string, version int, head common.Hash,
+func newPeer(id string, version int, currentHead currentHeadRetrievalFn,
 	getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
 	getReceipts receiptFetcherFn, getNodeData stateFetcherFn) *peer {
 	return &peer{
 		id:      id,
-		head:    head,
 		lacking: make(map[common.Hash]struct{}),
 
+		currentHead:    currentHead,
 		getRelHeaders:  getRelHeaders,
 		getAbsHeaders:  getAbsHeaders,
 		getBlockBodies: getBlockBodies,
diff --git a/eth/handler.go b/eth/handler.go
index d546cf2e1..e16f2c4d5 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -272,11 +272,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
 	defer pm.removePeer(p.id)
 
 	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
-	err := pm.downloader.RegisterPeer(p.id, p.version, p.Head(),
-		p.RequestHeadersByHash, p.RequestHeadersByNumber,
-		p.RequestBodies, p.RequestReceipts, p.RequestNodeData,
-	)
-	if err != nil {
+	if err := pm.downloader.RegisterPeer(p.id, p.version, p.Head, p.RequestHeadersByHash, p.RequestHeadersByNumber, p.RequestBodies, p.RequestReceipts, p.RequestNodeData); err != nil {
 		return err
 	}
 	// Propagate existing transactions. new transactions appearing
@@ -411,7 +407,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 			// If we already have a DAO header, we can check the peer's TD against it. If
 			// the peer's ahead of this, it too must have a reply to the DAO check
 			if daoHeader := pm.blockchain.GetHeaderByNumber(pm.chainconfig.DAOForkBlock.Uint64()); daoHeader != nil {
-				if p.Td().Cmp(pm.blockchain.GetTd(daoHeader.Hash())) >= 0 {
+				if _, td := p.Head(); td.Cmp(pm.blockchain.GetTd(daoHeader.Hash())) >= 0 {
 					verifyDAO = false
 				}
 			}
@@ -617,7 +613,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 		// Mark the hashes as present at the remote node
 		for _, block := range announces {
 			p.MarkBlock(block.Hash)
-			p.SetHead(block.Hash)
 		}
 		// Schedule all the unknown hashes for retrieval
 		unknown := make([]announce, 0, len(announces))
@@ -644,15 +639,23 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 
 		// Mark the peer as owning the block and schedule it for import
 		p.MarkBlock(request.Block.Hash())
-		p.SetHead(request.Block.Hash())
-
 		pm.fetcher.Enqueue(p.id, request.Block)
 
-		// Update the peers total difficulty if needed, schedule a download if gapped
-		if request.TD.Cmp(p.Td()) > 0 {
-			p.SetTd(request.TD)
-			td := pm.blockchain.GetTd(pm.blockchain.CurrentBlock().Hash())
-			if request.TD.Cmp(new(big.Int).Add(td, request.Block.Difficulty())) > 0 {
+		// Assuming the block is importable by the peer, but possibly not yet done so,
+		// calculate the head hash and TD that the peer truly must have.
+		var (
+			trueHead = request.Block.ParentHash()
+			trueTD   = new(big.Int).Sub(request.TD, request.Block.Difficulty())
+		)
+		// Update the peers total difficulty if better than the previous
+		if _, td := p.Head(); trueTD.Cmp(td) > 0 {
+			p.SetHead(trueHead, trueTD)
+
+			// Schedule a sync if above ours. Note, this will not fire a sync for a gap of
+			// a singe block (as the true TD is below the propagated block), however this
+			// scenario should easily be covered by the fetcher.
+			currentBlock := pm.blockchain.CurrentBlock()
+			if trueTD.Cmp(pm.blockchain.GetTd(currentBlock.Hash())) > 0 {
 				go pm.synchronise(p)
 			}
 		}
diff --git a/eth/peer.go b/eth/peer.go
index c8c207ecb..aa85631ea 100644
--- a/eth/peer.go
+++ b/eth/peer.go
@@ -84,43 +84,31 @@ func newPeer(version int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
 
 // Info gathers and returns a collection of metadata known about a peer.
 func (p *peer) Info() *PeerInfo {
+	hash, td := p.Head()
+
 	return &PeerInfo{
 		Version:    p.version,
-		Difficulty: p.Td(),
-		Head:       fmt.Sprintf("%x", p.Head()),
+		Difficulty: td,
+		Head:       hash.Hex(),
 	}
 }
 
-// Head retrieves a copy of the current head (most recent) hash of the peer.
-func (p *peer) Head() (hash common.Hash) {
+// Head retrieves a copy of the current head hash and total difficulty of the
+// peer.
+func (p *peer) Head() (hash common.Hash, td *big.Int) {
 	p.lock.RLock()
 	defer p.lock.RUnlock()
 
 	copy(hash[:], p.head[:])
-	return hash
+	return hash, new(big.Int).Set(p.td)
 }
 
-// SetHead updates the head (most recent) hash of the peer.
-func (p *peer) SetHead(hash common.Hash) {
+// SetHead updates the head hash and total difficulty of the peer.
+func (p *peer) SetHead(hash common.Hash, td *big.Int) {
 	p.lock.Lock()
 	defer p.lock.Unlock()
 
 	copy(p.head[:], hash[:])
-}
-
-// Td retrieves the current total difficulty of a peer.
-func (p *peer) Td() *big.Int {
-	p.lock.RLock()
-	defer p.lock.RUnlock()
-
-	return new(big.Int).Set(p.td)
-}
-
-// SetTd updates the current total difficulty of a peer.
-func (p *peer) SetTd(td *big.Int) {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
 	p.td.Set(td)
 }
 
@@ -411,7 +399,7 @@ func (ps *peerSet) BestPeer() *peer {
 		bestTd   *big.Int
 	)
 	for _, p := range ps.peers {
-		if td := p.Td(); bestPeer == nil || td.Cmp(bestTd) > 0 {
+		if _, td := p.Head(); bestPeer == nil || td.Cmp(bestTd) > 0 {
 			bestPeer, bestTd = p, td
 		}
 	}
diff --git a/eth/sync.go b/eth/sync.go
index 52f7e90e7..e0418d4d8 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -161,9 +161,12 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
 	if peer == nil {
 		return
 	}
-	// Make sure the peer's TD is higher than our own. If not drop.
-	td := pm.blockchain.GetTd(pm.blockchain.CurrentBlock().Hash())
-	if peer.Td().Cmp(td) <= 0 {
+	// Make sure the peer's TD is higher than our own
+	currentBlock := pm.blockchain.CurrentBlock()
+	td := pm.blockchain.GetTd(currentBlock.Hash())
+
+	pHead, pTd := peer.Head()
+	if pTd.Cmp(td) <= 0 {
 		return
 	}
 	// Otherwise try to sync with the downloader
@@ -171,7 +174,7 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
 	if atomic.LoadUint32(&pm.fastSync) == 1 {
 		mode = downloader.FastSync
 	}
-	if err := pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), mode); err != nil {
+	if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil {
 		return
 	}
 	atomic.StoreUint32(&pm.synced, 1) // Mark initial sync done
-- 
cgit v1.2.3


From 6c672a55c0622dfb133b67280fc593a0212eae76 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= <peterke@gmail.com>
Date: Tue, 26 Jul 2016 12:26:41 +0300
Subject: [release/1.4.11] eth, eth/downloader: don't forward the DAO challenge
 header

(cherry picked from commit 071af57bcf516d92a0b56c5bb119d9576d32b5cb)
---
 eth/downloader/downloader.go | 2 +-
 eth/handler.go               | 1 +
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 1f9ef598c..2aea30b39 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -542,7 +542,7 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
 // In the rare scenario when we ended up on a long reorganisation (i.e. none of
 // the head links match), we do a binary search to find the common ancestor.
 func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
-	glog.V(logger.Debug).Infof("%v: looking for common ancestor", p)
+	glog.V(logger.Debug).Infof("%v: looking for common ancestor (remote height %d)", p, height)
 
 	// Figure out the valid ancestor range to prevent rewrite attacks
 	floor, ceil := int64(-1), d.headHeader().Number.Uint64()
diff --git a/eth/handler.go b/eth/handler.go
index e16f2c4d5..3fa47c269 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -434,6 +434,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
 					return err
 				}
 				glog.V(logger.Debug).Infof("%v: verified to be on the same side of the DAO fork", p)
+				return nil
 			}
 			// Irrelevant of the fork checks, send the header to the fetcher just in case
 			headers = pm.fetcher.FilterHeaders(headers, time.Now())
-- 
cgit v1.2.3


From 86493f91032bd56831d6d6e6e63b23e17f24dcd2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= <peterke@gmail.com>
Date: Tue, 26 Jul 2016 13:07:12 +0300
Subject: [release/1.4.11] eth/downloader: abort sync if master drops (timeout
 prev)

(cherry picked from commit 8f0a4a25f82f48005e6252a90c008bdc76219cc3)
---
 eth/downloader/downloader.go | 17 +++++++++++++++--
 1 file changed, 15 insertions(+), 2 deletions(-)

diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 2aea30b39..b6b9d54f0 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -147,8 +147,10 @@ type Downloader struct {
 	stateWakeCh   chan bool            // [eth/63] Channel to signal the state fetcher of new tasks
 	headerProcCh  chan []*types.Header // [eth/62] Channel to feed the header processor new tasks
 
+	// Cancellation and termination
+	cancelPeer string        // Identifier of the peer currently being used as the master (cancel on drop)
 	cancelCh   chan struct{} // Channel to cancel mid-flight syncs
-	cancelLock sync.RWMutex  // Lock to protect the cancel channel in delivers
+	cancelLock sync.RWMutex  // Lock to protect the cancel channel and peer in delivers
 
 	quitCh   chan struct{} // Quit channel to signal termination
 	quitLock sync.RWMutex  // Lock to prevent double closes
@@ -254,12 +256,22 @@ func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHea
 // the specified peer. An effort is also made to return any pending fetches into
 // the queue.
 func (d *Downloader) UnregisterPeer(id string) error {
+	// Unregister the peer from the active peer set and revoke any fetch tasks
 	glog.V(logger.Detail).Infoln("Unregistering peer", id)
 	if err := d.peers.Unregister(id); err != nil {
 		glog.V(logger.Error).Infoln("Unregister failed:", err)
 		return err
 	}
 	d.queue.Revoke(id)
+
+	// If this peer was the master peer, abort sync immediately
+	d.cancelLock.RLock()
+	master := id == d.cancelPeer
+	d.cancelLock.RUnlock()
+
+	if master {
+		d.cancel()
+	}
 	return nil
 }
 
@@ -332,9 +344,10 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
 			empty = true
 		}
 	}
-	// Create cancel channel for aborting mid-flight
+	// Create cancel channel for aborting mid-flight and mark the master peer
 	d.cancelLock.Lock()
 	d.cancelCh = make(chan struct{})
+	d.cancelPeer = id
 	d.cancelLock.Unlock()
 
 	defer d.cancel() // No matter what, we can't leave the cancel channel open
-- 
cgit v1.2.3


From c4e4baf6682a32e129b19dd16258e6b693c2ba43 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= <peterke@gmail.com>
Date: Tue, 26 Jul 2016 11:15:38 +0300
Subject: [release/1.4.11] eth/downloader: fewer headers and futures too un
 ancestor lookup

(cherry picked from commit d68865f3b1b93e2463f7e3381e39fbbd137df825)
---
 eth/downloader/downloader.go | 26 +++++++++++++++++---------
 1 file changed, 17 insertions(+), 9 deletions(-)

diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index b6b9d54f0..c8f710450 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -572,11 +572,17 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
 	if head > height {
 		head = height
 	}
-	from := int64(head) - int64(MaxHeaderFetch) + 1
+	from := int64(head) - int64(MaxHeaderFetch)
 	if from < 0 {
 		from = 0
 	}
-	go p.getAbsHeaders(uint64(from), MaxHeaderFetch, 0, false)
+	// Span out with 15 block gaps into the future to catch bad head reports
+	limit := 2 * MaxHeaderFetch / 16
+	count := 1 + int((int64(ceil)-from)/16)
+	if count > limit {
+		count = limit
+	}
+	go p.getAbsHeaders(uint64(from), count, 15, false)
 
 	// Wait for the remote response to the head fetch
 	number, hash := uint64(0), common.Hash{}
@@ -601,12 +607,8 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
 			}
 			// Make sure the peer's reply conforms to the request
 			for i := 0; i < len(headers); i++ {
-				if number := headers[i].Number.Int64(); number != from+int64(i) {
-					glog.V(logger.Warn).Infof("%v: head header set (item %d) broke chain ordering: requested %d, got %d", p, i, from+int64(i), number)
-					return 0, errInvalidChain
-				}
-				if i > 0 && headers[i-1].Hash() != headers[i].ParentHash {
-					glog.V(logger.Warn).Infof("%v: head header set (item %d) broke chain ancestry: expected [%x], got [%x]", p, i, headers[i-1].Hash().Bytes()[:4], headers[i].ParentHash[:4])
+				if number := headers[i].Number.Int64(); number != from+int64(i)*16 {
+					glog.V(logger.Warn).Infof("%v: head header set (item %d) broke chain ordering: requested %d, got %d", p, i, from+int64(i)*16, number)
 					return 0, errInvalidChain
 				}
 			}
@@ -614,12 +616,18 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
 			finished = true
 			for i := len(headers) - 1; i >= 0; i-- {
 				// Skip any headers that underflow/overflow our requested set
-				if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > head {
+				if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil {
 					continue
 				}
 				// Otherwise check if we already know the header or not
 				if (d.mode == FullSync && d.hasBlockAndState(headers[i].Hash())) || (d.mode != FullSync && d.hasHeader(headers[i].Hash())) {
 					number, hash = headers[i].Number.Uint64(), headers[i].Hash()
+
+					// If every header is known, even future ones, the peer straight out lied about its head
+					if number > height && i == limit-1 {
+						glog.V(logger.Warn).Infof("%v: lied about chain head: reported %d, found above %d", p, height, number)
+						return 0, errStallingPeer
+					}
 					break
 				}
 			}
-- 
cgit v1.2.3


From bdbfe572f1fb51710256b78065a0a330a3970a89 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= <peterke@gmail.com>
Date: Tue, 16 Aug 2016 10:52:24 +0300
Subject: [release/1.4.11] Makefile: support building for the MIPS64 platforms
 (#2682)

(cherry picked from commit 4c2cc32f2e279baa3059603b8c8a4329f31606f6)
---
 Makefile | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/Makefile b/Makefile
index 4bcdab299..6cb64e8a4 100644
--- a/Makefile
+++ b/Makefile
@@ -3,7 +3,7 @@
 # don't need to bother with make.
 
 .PHONY: geth geth-cross evm all test clean
-.PHONY: geth-linux geth-linux-386 geth-linux-amd64
+.PHONY: geth-linux geth-linux-386 geth-linux-amd64 geth-linux-mips64 geth-linux-mips64le
 .PHONY: geth-linux-arm geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
 .PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
 .PHONY: geth-windows geth-windows-386 geth-windows-amd64
@@ -37,7 +37,7 @@ geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios
 	@echo "Full cross compilation done:"
 	@ls -ld $(GOBIN)/geth-*
 
-geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm
+geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm geth-linux-mips64 geth-linux-mips64le
 	@echo "Linux cross compilation done:"
 	@ls -ld $(GOBIN)/geth-linux-*
 
@@ -75,6 +75,16 @@ geth-linux-arm64:
 	@echo "Linux ARM64 cross compilation done:"
 	@ls -ld $(GOBIN)/geth-linux-* | grep arm64
 
+geth-linux-mips64:
+	build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/mips64 -v ./cmd/geth
+	@echo "Linux MIPS64 cross compilation done:"
+	@ls -ld $(GOBIN)/geth-linux-* | grep mips64
+
+geth-linux-mips64le:
+	build/env.sh go run build/ci.go xgo --go=$(GO) --dest=$(GOBIN) --targets=linux/mips64le -v ./cmd/geth
+	@echo "Linux MIPS64le cross compilation done:"
+	@ls -ld $(GOBIN)/geth-linux-* | grep mips64le
+
 geth-darwin: geth-darwin-386 geth-darwin-amd64
 	@echo "Darwin cross compilation done:"
 	@ls -ld $(GOBIN)/geth-darwin-*
-- 
cgit v1.2.3


From 0ab7e90cbbdf4a561748f20fe4825079013e15fa Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= <peterke@gmail.com>
Date: Mon, 13 Jun 2016 12:43:09 +0300
Subject: [release/1.4.11] Godeps: pull in ethash with the big endian build fix

(cherry picked from commit f0134f363bf62ba18668c2524050f14b2a43b6bd)
---
 Godeps/Godeps.json                                 |  4 +--
 .../src/github.com/ethereum/ethash/setup.py        |  0
 .../ethereum/ethash/src/libethash/endian.h         | 29 +++++++++++-----------
 .../ethereum/ethash/src/libethash/internal.c       |  2 +-
 4 files changed, 17 insertions(+), 18 deletions(-)
 mode change 100644 => 100755 Godeps/_workspace/src/github.com/ethereum/ethash/setup.py

diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index dc7847f33..e4d406655 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -20,8 +20,8 @@
 		},
 		{
 			"ImportPath": "github.com/ethereum/ethash",
-			"Comment": "v23.1-245-g25b32de",
-			"Rev": "25b32de0c0271065c28c3719c2bfe86959d72f0c"
+			"Comment": "v23.1-247-g2e80de5",
+			"Rev": "2e80de5022370cfe632195b1720db52d07ff8a77"
 		},
 		{
 			"ImportPath": "github.com/fatih/color",
diff --git a/Godeps/_workspace/src/github.com/ethereum/ethash/setup.py b/Godeps/_workspace/src/github.com/ethereum/ethash/setup.py
old mode 100644
new mode 100755
diff --git a/Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/endian.h b/Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/endian.h
index 849325a59..5b8abf03d 100644
--- a/Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/endian.h
+++ b/Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/endian.h
@@ -19,7 +19,7 @@
   # define BYTE_ORDER    LITTLE_ENDIAN
 #elif defined( __QNXNTO__ ) && defined( __BIGENDIAN__ )
   # define BIG_ENDIAN 1234
-  # define BYTE_ORDER    BIG_ENDIAN
+  # define BYTE_ORDER BIG_ENDIAN
 #else
 # include <endian.h>
 #endif
@@ -59,21 +59,20 @@
 
 #define fix_endian32(dst_, src_) dst_ = ethash_swap_u32(src_)
 #define fix_endian32_same(val_) val_ = ethash_swap_u32(val_)
-#define fix_endian64(dst_, src_) dst_ = ethash_swap_u64(src_
+#define fix_endian64(dst_, src_) dst_ = ethash_swap_u64(src_)
 #define fix_endian64_same(val_) val_ = ethash_swap_u64(val_)
-#define fix_endian_arr32(arr_, size_)			\
-	do {										\
-	for (unsigned i_ = 0; i_ < (size_), ++i_) { \
-		arr_[i_] = ethash_swap_u32(arr_[i_]);	\
-	}											\
-	while (0)
-#define fix_endian_arr64(arr_, size_)			\
-	do {										\
-	for (unsigned i_ = 0; i_ < (size_), ++i_) { \
-		arr_[i_] = ethash_swap_u64(arr_[i_]);	\
-	}											\
-	while (0)									\
-
+#define fix_endian_arr32(arr_, size_) \
+  do { \
+    for (unsigned i_ = 0; i_ < (size_); ++i_) { \
+      arr_[i_] = ethash_swap_u32(arr_[i_]); \
+    } \
+  } while (0)
+#define fix_endian_arr64(arr_, size_) \
+  do { \
+    for (unsigned i_ = 0; i_ < (size_); ++i_) { \
+      arr_[i_] = ethash_swap_u64(arr_[i_]); \
+    } \
+  } while (0)
 #else
 # error "endian not supported"
 #endif // BYTE_ORDER
diff --git a/Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/internal.c b/Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/internal.c
index 338aa5ecd..0a830fc82 100644
--- a/Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/internal.c
+++ b/Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/internal.c
@@ -257,7 +257,7 @@ static bool ethash_hash(
 void ethash_quick_hash(
 	ethash_h256_t* return_hash,
 	ethash_h256_t const* header_hash,
-	uint64_t const nonce,
+	uint64_t nonce,
 	ethash_h256_t const* mix_hash
 )
 {
-- 
cgit v1.2.3


From c4ed34f008ae508549c03ec286467f48b188272f Mon Sep 17 00:00:00 2001
From: Bas van Kervel <bas@ethdev.com>
Date: Tue, 26 Jul 2016 16:37:04 +0200
Subject: [release/1.4.11] core: ensure the canonical block is written before
 the canonical hash is set

(cherry picked from commit bb8059f6aa86d1052d7c2dd75a6985982cb278f4)

Conflicts:
	core/blockchain.go
	core/database_util.go
	core/headerchain.go
	eth/filters/filter.go
---
 core/blockchain.go      | 19 +++++++++----------
 core/blockchain_test.go | 38 ++++++++++++++++++++++++++++++++++++++
 core/database_util.go   |  6 +++++-
 core/headerchain.go     | 17 ++++++++++-------
 eth/filters/filter.go   | 12 ++++++++----
 5 files changed, 70 insertions(+), 22 deletions(-)

diff --git a/core/blockchain.go b/core/blockchain.go
index 3fbb117d3..8f00f2cc4 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -770,6 +770,14 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err
 	localTd := self.GetTd(self.currentBlock.Hash())
 	externTd := new(big.Int).Add(block.Difficulty(), ptd)
 
+	// Irrelevant of the canonical status, write the block itself to the database
+	if err := self.hc.WriteTd(block.Hash(), externTd); err != nil {
+		glog.Fatalf("failed to write block total difficulty: %v", err)
+	}
+	if err := WriteBlock(self.chainDb, block); err != nil {
+		glog.Fatalf("failed to write block contents: %v", err)
+	}
+
 	// If the total difficulty is higher than our known, add it to the canonical chain
 	// Second clause in the if statement reduces the vulnerability to selfish mining.
 	// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
@@ -780,20 +788,11 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err
 				return NonStatTy, err
 			}
 		}
-		// Insert the block as the new head of the chain
-		self.insert(block)
+		self.insert(block) // Insert the block as the new head of the chain
 		status = CanonStatTy
 	} else {
 		status = SideStatTy
 	}
-	// Irrelevant of the canonical status, write the block itself to the database
-	if err := self.hc.WriteTd(block.Hash(), externTd); err != nil {
-		glog.Fatalf("failed to write block total difficulty: %v", err)
-	}
-	if err := WriteBlock(self.chainDb, block); err != nil {
-		glog.Fatalf("failed to write block contents: %v", err)
-	}
-
 	self.futureBlocks.Remove(block.Hash())
 
 	return
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 2f6ddf9c9..18fe30f18 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -1090,3 +1090,41 @@ done:
 	}
 
 }
+
+// Tests if the canonical block can be fetched from the database during chain insertion.
+func TestCanonicalBlockRetrieval(t *testing.T) {
+	var (
+		db, _   = ethdb.NewMemDatabase()
+		genesis = WriteGenesisBlockForTesting(db)
+	)
+
+	evmux := &event.TypeMux{}
+	blockchain, _ := NewBlockChain(db, testChainConfig(), FakePow{}, evmux)
+
+	chain, _ := GenerateChain(nil, genesis, db, 10, func(i int, gen *BlockGen) {})
+
+	for i, _ := range chain {
+		go func(block *types.Block) {
+			// try to retrieve a block by its canonical hash and see if the block data can be retrieved.
+			for {
+				ch := GetCanonicalHash(db, block.NumberU64())
+				if ch == (common.Hash{}) {
+					continue // busy wait for canonical hash to be written
+				}
+				if ch != block.Hash() {
+					t.Fatalf("unknown canonical hash, want %s, got %s", block.Hash().Hex(), ch.Hex())
+				}
+				fb := GetBlock(db, ch)
+				if fb == nil {
+					t.Fatalf("unable to retrieve block %d for canonical hash: %s", block.NumberU64(), ch.Hex())
+				}
+				if fb.Hash() != block.Hash() {
+					t.Fatalf("invalid block hash for block %d, want %s, got %s", block.NumberU64(), block.Hash().Hex(), fb.Hash().Hex())
+				}
+				return
+			}
+		}(chain[i])
+
+		blockchain.InsertChain(types.Blocks{chain[i]})
+	}
+}
diff --git a/core/database_util.go b/core/database_util.go
index 3ba80062c..3a110f7d0 100644
--- a/core/database_util.go
+++ b/core/database_util.go
@@ -157,7 +157,11 @@ func GetTd(db ethdb.Database, hash common.Hash) *big.Int {
 }
 
 // GetBlock retrieves an entire block corresponding to the hash, assembling it
-// back from the stored header and body.
+// back from the stored header and body. If either the header or body could not
+// be retrieved nil is returned.
+//
+// Note, due to concurrent download of header and block body the header and thus
+// canonical hash can be stored in the database but the body data not (yet).
 func GetBlock(db ethdb.Database, hash common.Hash) *types.Block {
 	// Retrieve the block header and body contents
 	header := GetHeader(db, hash)
diff --git a/core/headerchain.go b/core/headerchain.go
index 5e0fbfb08..3503a1c33 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -129,6 +129,14 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
 	localTd := hc.GetTd(hc.currentHeaderHash)
 	externTd := new(big.Int).Add(header.Difficulty, ptd)
 
+	// Irrelevant of the canonical status, write the td and header to the database
+	if err := hc.WriteTd(hash, externTd); err != nil {
+		glog.Fatalf("failed to write header total difficulty: %v", err)
+	}
+	if err := WriteHeader(hc.chainDb, header); err != nil {
+		glog.Fatalf("failed to write header contents: %v", err)
+	}
+
 	// If the total difficulty is higher than our known, add it to the canonical chain
 	// Second clause in the if statement reduces the vulnerability to selfish mining.
 	// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
@@ -150,6 +158,7 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
 			headHeader = hc.GetHeader(headHash)
 			headNumber = headHeader.Number.Uint64()
 		}
+
 		// Extend the canonical chain with the new header
 		if err := WriteCanonicalHash(hc.chainDb, hash, number); err != nil {
 			glog.Fatalf("failed to insert header number: %v", err)
@@ -157,19 +166,13 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
 		if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil {
 			glog.Fatalf("failed to insert head header hash: %v", err)
 		}
+
 		hc.currentHeaderHash, hc.currentHeader = hash, types.CopyHeader(header)
 
 		status = CanonStatTy
 	} else {
 		status = SideStatTy
 	}
-	// Irrelevant of the canonical status, write the header itself to the database
-	if err := hc.WriteTd(hash, externTd); err != nil {
-		glog.Fatalf("failed to write header total difficulty: %v", err)
-	}
-	if err := WriteHeader(hc.chainDb, header); err != nil {
-		glog.Fatalf("failed to write header contents: %v", err)
-	}
 	hc.headerCache.Add(hash, header)
 
 	return
diff --git a/eth/filters/filter.go b/eth/filters/filter.go
index 469dfba4d..616184400 100644
--- a/eth/filters/filter.go
+++ b/eth/filters/filter.go
@@ -72,7 +72,11 @@ func (self *Filter) SetTopics(topics [][]common.Hash) {
 
 // Run filters logs with the current parameters set
 func (self *Filter) Find() vm.Logs {
-	latestBlock := core.GetBlock(self.db, core.GetHeadBlockHash(self.db))
+	latestHash := core.GetHeadBlockHash(self.db)
+	latestBlock := core.GetBlock(self.db, latestHash)
+	if latestBlock == nil {
+		return vm.Logs{}
+	}
 	var beginBlockNo uint64 = uint64(self.begin)
 	if self.begin == -1 {
 		beginBlockNo = latestBlock.NumberU64()
@@ -122,13 +126,13 @@ func (self *Filter) mipFind(start, end uint64, depth int) (logs vm.Logs) {
 }
 
 func (self *Filter) getLogs(start, end uint64) (logs vm.Logs) {
-	var block *types.Block
-
 	for i := start; i <= end; i++ {
+		var block *types.Block
 		hash := core.GetCanonicalHash(self.db, i)
 		if hash != (common.Hash{}) {
 			block = core.GetBlock(self.db, hash)
-		} else { // block not found
+		}
+		if block == nil { // block not found/written
 			return logs
 		}
 
-- 
cgit v1.2.3


From 5ca5ccf90cc5af0789268f10bc3189f4539ea15c Mon Sep 17 00:00:00 2001
From: Jeffrey Wilcke <geffobscura@gmail.com>
Date: Thu, 18 Aug 2016 14:56:57 +0200
Subject: [release/1.4.11] VERSION, cmd/geth: bumped version 1.4.11

---
 VERSION          | 2 +-
 cmd/geth/main.go | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/VERSION b/VERSION
index ac9f79cab..079d7f692 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.4.10
+1.4.11
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index 6660d9fb7..176202302 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -50,7 +50,7 @@ const (
 	clientIdentifier = "Geth"   // Client identifier to advertise over the network
 	versionMajor     = 1        // Major version component of the current release
 	versionMinor     = 4        // Minor version component of the current release
-	versionPatch     = 10       // Patch version component of the current release
+	versionPatch     = 11       // Patch version component of the current release
 	versionMeta      = "stable" // Version metadata to append to the version string
 
 	versionOracle = "0xfa7b9770ca4cb04296cac84f37736d4041251cdf" // Ethereum address of the Geth release oracle
-- 
cgit v1.2.3