aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Godeps/Godeps.json9
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/ethash/ethash.go23
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/ethash/ethash_opencl.go31
-rw-r--r--Godeps/_workspace/src/github.com/rs/xhandler/chain.go2
-rw-r--r--Godeps/_workspace/src/github.com/rs/xhandler/middleware.go2
-rw-r--r--Godeps/_workspace/src/github.com/rs/xhandler/xhandler.go2
-rw-r--r--accounts/abi/bind/bind.go39
-rw-r--r--accounts/abi/bind/bind_test.go44
-rw-r--r--cmd/utils/flags.go18
-rw-r--r--eth/gpu_mining.go3
-rw-r--r--rpc/javascript.go10
11 files changed, 123 insertions, 60 deletions
diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index 119ed6205..33c8c7d54 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -1,7 +1,6 @@
{
"ImportPath": "github.com/ethereum/go-ethereum",
"GoVersion": "go1.5.2",
- "GodepVersion": "v60",
"Packages": [
"./..."
],
@@ -21,8 +20,8 @@
},
{
"ImportPath": "github.com/ethereum/ethash",
- "Comment": "v23.1-242-gbc9ba4d",
- "Rev": "bc9ba4d6a83a0fe308fefd8c6001b8ed1607137f"
+ "Comment": "v23.1-245-g25b32de",
+ "Rev": "25b32de0c0271065c28c3719c2bfe86959d72f0c"
},
{
"ImportPath": "github.com/fatih/color",
@@ -145,8 +144,8 @@
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
},
{
- "ImportPath": "github.com/rs/cors",
- "Rev": "5950cf11d77f8a61b432a25dd4d444b4ced01379"
+ "ImportPath": "github.com/rs/cors",
+ "Rev": "5950cf11d77f8a61b432a25dd4d444b4ced01379"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
diff --git a/Godeps/_workspace/src/github.com/ethereum/ethash/ethash.go b/Godeps/_workspace/src/github.com/ethereum/ethash/ethash.go
index 60121bb3b..2a31aaf2d 100644
--- a/Godeps/_workspace/src/github.com/ethereum/ethash/ethash.go
+++ b/Godeps/_workspace/src/github.com/ethereum/ethash/ethash.go
@@ -105,6 +105,15 @@ func freeCache(cache *cache) {
cache.ptr = nil
}
+func (cache *cache) compute(dagSize uint64, hash common.Hash, nonce uint64) (ok bool, mixDigest, result common.Hash) {
+ ret := C.ethash_light_compute_internal(cache.ptr, C.uint64_t(dagSize), hashToH256(hash), C.uint64_t(nonce))
+ // Make sure cache is live until after the C call.
+ // This is important because a GC might happen and execute
+ // the finalizer before the call completes.
+ _ = cache
+ return bool(ret.success), h256ToHash(ret.mix_hash), h256ToHash(ret.result)
+}
+
// Light implements the Verify half of the proof of work. It uses a few small
// in-memory caches to verify the nonces found by Full.
type Light struct {
@@ -140,29 +149,23 @@ func (l *Light) Verify(block pow.Block) bool {
cache := l.getCache(blockNum)
dagSize := C.ethash_get_datasize(C.uint64_t(blockNum))
-
if l.test {
dagSize = dagSizeForTesting
}
// Recompute the hash using the cache.
- hash := hashToH256(block.HashNoNonce())
- ret := C.ethash_light_compute_internal(cache.ptr, dagSize, hash, C.uint64_t(block.Nonce()))
- if !ret.success {
+ ok, mixDigest, result := cache.compute(uint64(dagSize), block.HashNoNonce(), block.Nonce())
+ if !ok {
return false
}
// avoid mixdigest malleability as it's not included in a block's "hashNononce"
- if block.MixDigest() != h256ToHash(ret.mix_hash) {
+ if block.MixDigest() != mixDigest {
return false
}
- // Make sure cache is live until after the C call.
- // This is important because a GC might happen and execute
- // the finalizer before the call completes.
- _ = cache
// The actual check.
target := new(big.Int).Div(maxUint256, difficulty)
- return h256ToHash(ret.result).Big().Cmp(target) <= 0
+ return result.Big().Cmp(target) <= 0
}
func h256ToHash(in C.ethash_h256_t) common.Hash {
diff --git a/Godeps/_workspace/src/github.com/ethereum/ethash/ethash_opencl.go b/Godeps/_workspace/src/github.com/ethereum/ethash/ethash_opencl.go
index 332b7f524..451049eae 100644
--- a/Godeps/_workspace/src/github.com/ethereum/ethash/ethash_opencl.go
+++ b/Godeps/_workspace/src/github.com/ethereum/ethash/ethash_opencl.go
@@ -138,7 +138,7 @@ func PrintDevices() {
platforms, err := cl.GetPlatforms()
if err != nil {
- fmt.Println("Plaform error (check your OpenCL installation): %v", err)
+ fmt.Println("Plaform error (check your OpenCL installation):", err)
return
}
@@ -267,13 +267,13 @@ func initCLDevice(deviceId int, device *cl.Device, c *OpenCLMiner) error {
context, err := cl.CreateContext([]*cl.Device{device})
if err != nil {
- return fmt.Errorf("failed creating context:", err)
+ return fmt.Errorf("failed creating context: %v", err)
}
// TODO: test running with CL_QUEUE_PROFILING_ENABLE for profiling?
queue, err := context.CreateCommandQueue(device, 0)
if err != nil {
- return fmt.Errorf("command queue err:", err)
+ return fmt.Errorf("command queue err: %v", err)
}
// See [4] section 3.2 and [3] "clBuildProgram".
@@ -287,7 +287,7 @@ func initCLDevice(deviceId int, device *cl.Device, c *OpenCLMiner) error {
program, err := context.CreateProgramWithSource([]string{kernelCode})
if err != nil {
- return fmt.Errorf("program err:", err)
+ return fmt.Errorf("program err: %v", err)
}
/* if using AMD OpenCL impl, you can set this to debug on x86 CPU device.
@@ -303,7 +303,7 @@ func initCLDevice(deviceId int, device *cl.Device, c *OpenCLMiner) error {
buildOpts := ""
err = program.BuildProgram([]*cl.Device{device}, buildOpts)
if err != nil {
- return fmt.Errorf("program build err:", err)
+ return fmt.Errorf("program build err: %v", err)
}
var searchKernelName, hashKernelName string
@@ -313,7 +313,7 @@ func initCLDevice(deviceId int, device *cl.Device, c *OpenCLMiner) error {
searchKernel, err := program.CreateKernel(searchKernelName)
hashKernel, err := program.CreateKernel(hashKernelName)
if err != nil {
- return fmt.Errorf("kernel err:", err)
+ return fmt.Errorf("kernel err: %v", err)
}
// TODO: when this DAG size appears, patch the Go bindings
@@ -328,28 +328,28 @@ func initCLDevice(deviceId int, device *cl.Device, c *OpenCLMiner) error {
dagBuf := *(new(*cl.MemObject))
dagBuf, err = context.CreateEmptyBuffer(cl.MemReadOnly, int(c.dagSize))
if err != nil {
- return fmt.Errorf("allocating dag buf failed: ", err)
+ return fmt.Errorf("allocating dag buf failed: %v", err)
}
// write DAG to device mem
dagPtr := unsafe.Pointer(c.ethash.Full.current.ptr.data)
_, err = queue.EnqueueWriteBuffer(dagBuf, true, 0, int(c.dagSize), dagPtr, nil)
if err != nil {
- return fmt.Errorf("writing to dag buf failed: ", err)
+ return fmt.Errorf("writing to dag buf failed: %v", err)
}
searchBuffers := make([]*cl.MemObject, searchBufSize)
for i := 0; i < searchBufSize; i++ {
searchBuff, err := context.CreateEmptyBuffer(cl.MemWriteOnly, (1+maxSearchResults)*SIZEOF_UINT32)
if err != nil {
- return fmt.Errorf("search buffer err:", err)
+ return fmt.Errorf("search buffer err: %v", err)
}
searchBuffers[i] = searchBuff
}
headerBuf, err := context.CreateEmptyBuffer(cl.MemReadOnly, 32)
if err != nil {
- return fmt.Errorf("header buffer err:", err)
+ return fmt.Errorf("header buffer err: %v", err)
}
// Unique, random nonces are crucial for mining efficieny.
@@ -556,13 +556,13 @@ func (c *OpenCLMiner) Search(block pow.Block, stop <-chan struct{}, index int) (
upperNonce := uint64(binary.LittleEndian.Uint32(results[lo:hi]))
checkNonce = p.startNonce + upperNonce
if checkNonce != 0 {
- cn := C.uint64_t(checkNonce)
- ds := C.uint64_t(c.dagSize)
// We verify that the nonce is indeed a solution by
// executing the Ethash verification function (on the CPU).
- ret := C.ethash_light_compute_internal(c.ethash.Light.current.ptr, ds, hashToH256(headerHash), cn)
+ cache := c.ethash.Light.getCache(block.NumberU64())
+ ok, mixDigest, result := cache.compute(c.dagSize, headerHash, checkNonce)
+
// TODO: return result first
- if ret.success && h256ToHash(ret.result).Big().Cmp(target256) <= 0 {
+ if ok && result.Big().Cmp(target256) <= 0 {
_, err = d.queue.EnqueueUnmapMemObject(d.searchBuffers[p.bufIndex], cres, nil)
if err != nil {
fmt.Println("Error in Search clEnqueueUnmapMemObject: ", err)
@@ -573,9 +573,8 @@ func (c *OpenCLMiner) Search(block pow.Block, stop <-chan struct{}, index int) (
fmt.Println("Error in Search WaitForEvents: ", err)
}
}
- return checkNonce, C.GoBytes(unsafe.Pointer(&ret.mix_hash), C.int(32))
+ return checkNonce, mixDigest.Bytes()
}
-
_, err := d.queue.EnqueueWriteBuffer(d.searchBuffers[p.bufIndex], false, 0, 4, unsafe.Pointer(&zero), nil)
if err != nil {
fmt.Println("Error in Search cl: EnqueueWriteBuffer", err)
diff --git a/Godeps/_workspace/src/github.com/rs/xhandler/chain.go b/Godeps/_workspace/src/github.com/rs/xhandler/chain.go
index ffac67e8a..042274d17 100644
--- a/Godeps/_workspace/src/github.com/rs/xhandler/chain.go
+++ b/Godeps/_workspace/src/github.com/rs/xhandler/chain.go
@@ -3,7 +3,7 @@ package xhandler
import (
"net/http"
- "github.com/ethereum/go-ethereum/Godeps/_workspace/src/golang.org/x/net/context"
+ "golang.org/x/net/context"
)
// Chain is an helper to chain middleware handlers together for an easier
diff --git a/Godeps/_workspace/src/github.com/rs/xhandler/middleware.go b/Godeps/_workspace/src/github.com/rs/xhandler/middleware.go
index 64b180323..5de136419 100644
--- a/Godeps/_workspace/src/github.com/rs/xhandler/middleware.go
+++ b/Godeps/_workspace/src/github.com/rs/xhandler/middleware.go
@@ -4,7 +4,7 @@ import (
"net/http"
"time"
- "github.com/ethereum/go-ethereum/Godeps/_workspace/src/golang.org/x/net/context"
+ "golang.org/x/net/context"
)
// CloseHandler returns a Handler cancelling the context when the client
diff --git a/Godeps/_workspace/src/github.com/rs/xhandler/xhandler.go b/Godeps/_workspace/src/github.com/rs/xhandler/xhandler.go
index b71789804..718c25322 100644
--- a/Godeps/_workspace/src/github.com/rs/xhandler/xhandler.go
+++ b/Godeps/_workspace/src/github.com/rs/xhandler/xhandler.go
@@ -14,7 +14,7 @@ package xhandler
import (
"net/http"
- "github.com/ethereum/go-ethereum/Godeps/_workspace/src/golang.org/x/net/context"
+ "golang.org/x/net/context"
)
// HandlerC is a net/context aware http.Handler
diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go
index a9f21b21a..24fe9f770 100644
--- a/accounts/abi/bind/bind.go
+++ b/accounts/abi/bind/bind.go
@@ -23,6 +23,7 @@ package bind
import (
"bytes"
"fmt"
+ "regexp"
"strings"
"text/template"
"unicode"
@@ -122,31 +123,37 @@ func bindType(kind abi.Type) string {
stringKind := kind.String()
switch {
- case stringKind == "address":
- return "common.Address"
-
- case stringKind == "address[]":
- return "[]common.Address"
+ case strings.HasPrefix(stringKind, "address"):
+ parts := regexp.MustCompile("address(\\[[0-9]*\\])?").FindStringSubmatch(stringKind)
+ if len(parts) != 2 {
+ return stringKind
+ }
+ return fmt.Sprintf("%scommon.Address", parts[1])
case strings.HasPrefix(stringKind, "bytes"):
- if stringKind == "bytes" {
- return "[]byte"
+ parts := regexp.MustCompile("bytes([0-9]*)(\\[[0-9]*\\])?").FindStringSubmatch(stringKind)
+ if len(parts) != 3 {
+ return stringKind
}
- return fmt.Sprintf("[%s]byte", stringKind[5:])
+ return fmt.Sprintf("%s[%s]byte", parts[2], parts[1])
- case strings.HasPrefix(stringKind, "int"):
- switch stringKind[:3] {
- case "8", "16", "32", "64":
+ case strings.HasPrefix(stringKind, "int") || strings.HasPrefix(stringKind, "uint"):
+ parts := regexp.MustCompile("(u)?int([0-9]*)(\\[[0-9]*\\])?").FindStringSubmatch(stringKind)
+ if len(parts) != 4 {
return stringKind
}
- return "*big.Int"
-
- case strings.HasPrefix(stringKind, "uint"):
- switch stringKind[:4] {
+ switch parts[2] {
case "8", "16", "32", "64":
+ return fmt.Sprintf("%s%sint%s", parts[3], parts[1], parts[2])
+ }
+ return fmt.Sprintf("%s*big.Int", parts[3])
+
+ case strings.HasPrefix(stringKind, "bool") || strings.HasPrefix(stringKind, "string"):
+ parts := regexp.MustCompile("([a-z]+)(\\[[0-9]*\\])?").FindStringSubmatch(stringKind)
+ if len(parts) != 3 {
return stringKind
}
- return "*big.Int"
+ return fmt.Sprintf("%s%s", parts[2], parts[1])
default:
return stringKind
diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go
index 12c849669..3f02af017 100644
--- a/accounts/abi/bind/bind_test.go
+++ b/accounts/abi/bind/bind_test.go
@@ -228,6 +228,50 @@ var bindTests = []struct {
}
`,
},
+ // Tests that arrays/slices can be properly returned and deserialized.
+ // Only addresses are tested, remainder just compiled to keep the test small.
+ {
+ `Slicer`,
+ `
+ contract Slicer {
+ function echoAddresses(address[] input) constant returns (address[] output) {
+ return input;
+ }
+ function echoInts(int[] input) constant returns (int[] output) {
+ return input;
+ }
+ function echoFancyInts(uint24[23] input) constant returns (uint24[23] output) {
+ return input;
+ }
+ function echoBools(bool[] input) constant returns (bool[] output) {
+ return input;
+ }
+ }
+ `,
+ `606060405261015c806100126000396000f3606060405260e060020a6000350463be1127a3811461003c578063d88becc014610092578063e15a3db71461003c578063f637e5891461003c575b005b604080516020600480358082013583810285810185019096528085526100ee959294602494909392850192829185019084908082843750949650505050505050604080516020810190915260009052805b919050565b604080516102e0818101909252610138916004916102e491839060179083908390808284375090955050505050506102e0604051908101604052806017905b60008152602001906001900390816100d15790505081905061008d565b60405180806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600f02600301f1509050019250505060405180910390f35b60405180826102e0808381846000600461015cf15090500191505060405180910390f3`,
+ `[{"constant":true,"inputs":[{"name":"input","type":"address[]"}],"name":"echoAddresses","outputs":[{"name":"output","type":"address[]"}],"type":"function"},{"constant":true,"inputs":[{"name":"input","type":"uint24[23]"}],"name":"echoFancyInts","outputs":[{"name":"output","type":"uint24[23]"}],"type":"function"},{"constant":true,"inputs":[{"name":"input","type":"int256[]"}],"name":"echoInts","outputs":[{"name":"output","type":"int256[]"}],"type":"function"},{"constant":true,"inputs":[{"name":"input","type":"bool[]"}],"name":"echoBools","outputs":[{"name":"output","type":"bool[]"}],"type":"function"}]`,
+ `
+ // Generate a new random account and a funded simulator
+ key := crypto.NewKey(rand.Reader)
+ sim := backends.NewSimulatedBackend(core.GenesisAccount{Address: key.Address, Balance: big.NewInt(10000000000)})
+
+ // Convert the tester key to an authorized transactor for ease of use
+ auth := bind.NewKeyedTransactor(key)
+
+ // Deploy a slice tester contract and execute a n array call on it
+ _, _, slicer, err := DeploySlicer(auth, sim)
+ if err != nil {
+ t.Fatalf("Failed to deploy slicer contract: %v", err)
+ }
+ sim.Commit()
+
+ if out, err := slicer.EchoAddresses(nil, []common.Address{key.Address, common.Address{}}); err != nil {
+ t.Fatalf("Failed to call slice echoer: %v", err)
+ } else if !reflect.DeepEqual(out, []common.Address{key.Address, common.Address{}}) {
+ t.Fatalf("Slice return mismatch: have %v, want %v", out, []common.Address{key.Address, common.Address{}})
+ }
+ `,
+ },
// Tests that anonymous default methods can be correctly invoked
{
`Defaulter`,
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 4822a47f7..d478bb095 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -780,16 +780,20 @@ func SetupNetwork(ctx *cli.Context) {
params.TargetGasLimit = common.String2Big(ctx.GlobalString(TargetGasLimitFlag.Name))
}
-// MustMakeChainConfig reads the chain configuration from the given database.
+// MustMakeChainConfig reads the chain configuration from the database in ctx.Datadir.
func MustMakeChainConfig(ctx *cli.Context) *core.ChainConfig {
- var (
- db = MakeChainDatabase(ctx)
- genesis = core.GetBlock(db, core.GetCanonicalHash(db, 0))
- )
+ db := MakeChainDatabase(ctx)
defer db.Close()
+ return MustMakeChainConfigFromDb(ctx, db)
+}
+
+// MustMakeChainConfigFromDb reads the chain configuration from the given database.
+func MustMakeChainConfigFromDb(ctx *cli.Context, db ethdb.Database) *core.ChainConfig {
+ genesis := core.GetBlock(db, core.GetCanonicalHash(db, 0))
+
if genesis != nil {
- // Exsting genesis block, use stored config if available.
+ // Existing genesis block, use stored config if available.
storedConfig, err := core.GetChainConfig(db, genesis.Hash())
if err == nil {
return storedConfig
@@ -833,7 +837,7 @@ func MakeChain(ctx *cli.Context) (chain *core.BlockChain, chainDb ethdb.Database
}
}
- chainConfig := MustMakeChainConfig(ctx)
+ chainConfig := MustMakeChainConfigFromDb(ctx, chainDb)
var eventMux event.TypeMux
chain, err = core.NewBlockChain(chainDb, chainConfig, ethash.New(), &eventMux)
diff --git a/eth/gpu_mining.go b/eth/gpu_mining.go
index c351c2bdd..cd734bd3c 100644
--- a/eth/gpu_mining.go
+++ b/eth/gpu_mining.go
@@ -56,8 +56,7 @@ func (s *Ethereum) StartMining(threads int, gpus string) error {
}
// TODO: re-creating miner is a bit ugly
- cl := ethash.NewCL(ids)
- s.miner = miner.New(s, s.EventMux(), cl)
+ s.miner = miner.New(s, s.chainConfig, s.EventMux(), ethash.NewCL(ids))
go s.miner.Start(eb, len(ids))
return nil
}
diff --git a/rpc/javascript.go b/rpc/javascript.go
index c4fa80c0b..211a6644e 100644
--- a/rpc/javascript.go
+++ b/rpc/javascript.go
@@ -250,7 +250,15 @@ web3._extend({
[
new web3._extend.Property({
name: 'pendingTransactions',
- getter: 'eth_pendingTransactions'
+ getter: 'eth_pendingTransactions',
+ outputFormatter: function(txs) {
+ var formatted = [];
+ for (var i = 0; i < txs.length; i++) {
+ formatted.push(web3._extend.formatters.outputTransactionFormatter(txs[i]));
+ formatted[i].blockHash = null;
+ }
+ return formatted;
+ }
})
]
});