From 7e58949c3f4c67960fb0422f49f3e513a388cc5d Mon Sep 17 00:00:00 2001 From: obscuren Date: Tue, 9 Jun 2015 21:14:11 +0200 Subject: cmd/geth: develop version bump 0.9.29 --- cmd/geth/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 86868e20b..4d7d57220 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -44,7 +44,7 @@ import ( const ( ClientIdentifier = "Geth" - Version = "0.9.28" + Version = "0.9.29" ) var ( -- cgit v1.2.3 From 1cbbfbe7fa32d64d4b4dc8100394f47c8a78f142 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 9 Jun 2015 22:26:26 +0300 Subject: p2p: fix a close race in the dial test --- p2p/server_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/p2p/server_test.go b/p2p/server_test.go index 01448cc7b..e8d21a188 100644 --- a/p2p/server_test.go +++ b/p2p/server_test.go @@ -117,7 +117,6 @@ func TestServerDial(t *testing.T) { t.Error("accept error:", err) return } - conn.Close() accepted <- conn }() @@ -134,6 +133,8 @@ func TestServerDial(t *testing.T) { select { case conn := <-accepted: + defer conn.Close() + select { case peer := <-connected: if peer.ID() != remid { -- cgit v1.2.3 From 6a5c9aff3b05d971c568974b225f6d949c4b422a Mon Sep 17 00:00:00 2001 From: obscuren Date: Tue, 9 Jun 2015 22:49:33 +0200 Subject: tests: check gas limit error --- tests/helper/vm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/helper/vm.go b/tests/helper/vm.go index 5f1a3e345..2db2b82c4 100644 --- a/tests/helper/vm.go +++ b/tests/helper/vm.go @@ -183,7 +183,7 @@ func RunState(statedb *state.StateDB, env, tx map[string]string) ([]byte, state. vmenv := NewEnvFromMap(statedb, env, tx) vmenv.origin = common.BytesToAddress(keyPair.Address()) ret, _, err := core.ApplyMessage(vmenv, message, coinbase) - if core.IsNonceErr(err) || core.IsInvalidTxErr(err) { + if core.IsNonceErr(err) || core.IsInvalidTxErr(err) || state.IsGasLimitErr(err) { statedb.Set(snapshot) } statedb.Update() -- cgit v1.2.3 From b3d5ce7d48426bbe9269f3ea89029187cf939398 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 10 Jun 2015 01:20:35 +0300 Subject: cmd/geth, eth/downloader: collect and report import progress too --- cmd/geth/admin.go | 8 ++++---- eth/downloader/downloader.go | 39 +++++++++++++++++++++++++++++++++++---- 2 files changed, 39 insertions(+), 8 deletions(-) diff --git a/cmd/geth/admin.go b/cmd/geth/admin.go index 13d10de32..ea8a70923 100644 --- a/cmd/geth/admin.go +++ b/cmd/geth/admin.go @@ -51,7 +51,7 @@ func (js *jsre) adminBindings() { admin.Set("import", js.importChain) admin.Set("export", js.exportChain) admin.Set("verbosity", js.verbosity) - admin.Set("progress", js.downloadProgress) + admin.Set("progress", js.syncProgress) admin.Set("setSolc", js.setSolc) admin.Set("contractInfo", struct{}{}) @@ -324,9 +324,9 @@ func (js *jsre) setHead(call otto.FunctionCall) otto.Value { return otto.UndefinedValue() } -func (js *jsre) downloadProgress(call otto.FunctionCall) otto.Value { - pending, cached := js.ethereum.Downloader().Stats() - v, _ := call.Otto.ToValue(map[string]interface{}{"pending": pending, "cached": cached}) +func (js *jsre) syncProgress(call otto.FunctionCall) otto.Value { + pending, cached, importing := js.ethereum.Downloader().Stats() + v, _ := call.Otto.ToValue(map[string]interface{}{"pending": pending, "cached": cached, "importing": importing}) return v } diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 29b627771..efb94e5e3 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -78,6 +78,10 @@ type Downloader struct { checks map[common.Hash]*crossCheck // Pending cross checks to verify a hash chain banned *set.Set // Set of hashes we've received and banned + // Statistics + importQueue []common.Hash // Hashes of the previously taken blocks to check import progress + importLock sync.Mutex + // Callbacks hasBlock hashCheckFn getBlock getBlockFn @@ -121,8 +125,21 @@ func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock getBlockFn) *Downloa return downloader } -func (d *Downloader) Stats() (current int, max int) { - return d.queue.Size() +// Stats retrieves the current status of the downloader. +func (d *Downloader) Stats() (pending int, cached int, importing int) { + // Fetch the download status + pending, cached = d.queue.Size() + + // Generate the import status + d.importLock.Lock() + defer d.importLock.Unlock() + + for len(d.importQueue) > 0 && d.hasBlock(d.importQueue[0]) { + d.importQueue = d.importQueue[1:] + } + importing = len(d.importQueue) + + return } // Synchronising returns the state of the downloader @@ -202,7 +219,17 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error { // TakeBlocks takes blocks from the queue and yields them to the caller. func (d *Downloader) TakeBlocks() []*Block { - return d.queue.TakeBlocks() + blocks := d.queue.TakeBlocks() + if len(blocks) > 0 { + hashes := make([]common.Hash, len(blocks)) + for i, block := range blocks { + hashes[i] = block.RawBlock.Hash() + } + d.importLock.Lock() + d.importQueue = hashes + d.importLock.Unlock() + } + return blocks } // Has checks if the downloader knows about a particular hash, meaning that its @@ -255,9 +282,13 @@ func (d *Downloader) Cancel() bool { } d.cancelLock.Unlock() - // reset the queue + // Reset the queue and import statistics d.queue.Reset() + d.importLock.Lock() + d.importQueue = nil + d.importLock.Unlock() + return true } -- cgit v1.2.3 From 468501cb860508af55e1fcd586e1498df0a2d984 Mon Sep 17 00:00:00 2001 From: obscuren Date: Wed, 10 Jun 2015 10:44:46 +0200 Subject: core/vm: changed program counter to uint64 --- core/vm/context.go | 8 ++++---- core/vm/vm.go | 18 +++++++++--------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/core/vm/context.go b/core/vm/context.go index de03f84f0..e33324b53 100644 --- a/core/vm/context.go +++ b/core/vm/context.go @@ -49,13 +49,13 @@ func NewContext(caller ContextRef, object ContextRef, value, gas, price *big.Int return c } -func (c *Context) GetOp(n *big.Int) OpCode { +func (c *Context) GetOp(n uint64) OpCode { return OpCode(c.GetByte(n)) } -func (c *Context) GetByte(n *big.Int) byte { - if n.Cmp(big.NewInt(int64(len(c.Code)))) < 0 { - return c.Code[n.Int64()] +func (c *Context) GetByte(n uint64) byte { + if n < uint64(len(c.Code)) { + return c.Code[n] } return 0 diff --git a/core/vm/vm.go b/core/vm/vm.go index 2bd950385..ed4157178 100644 --- a/core/vm/vm.go +++ b/core/vm/vm.go @@ -81,17 +81,17 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { codehash = crypto.Sha3Hash(code) mem = NewMemory() stack = newStack() - pc = new(big.Int) + pc = uint64(0) statedb = self.env.State() - jump = func(from *big.Int, to *big.Int) error { + jump = func(from uint64, to *big.Int) error { if !context.jumpdests.has(codehash, code, to) { - nop := context.GetOp(to) + nop := context.GetOp(to.Uint64()) return fmt.Errorf("invalid jump destination (%v) %v", nop, to) } self.Printf(" ~> %v", to) - pc = to + pc = to.Uint64() self.Endl() @@ -519,11 +519,11 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { stack.push(self.env.GasLimit()) case PUSH1, PUSH2, PUSH3, PUSH4, PUSH5, PUSH6, PUSH7, PUSH8, PUSH9, PUSH10, PUSH11, PUSH12, PUSH13, PUSH14, PUSH15, PUSH16, PUSH17, PUSH18, PUSH19, PUSH20, PUSH21, PUSH22, PUSH23, PUSH24, PUSH25, PUSH26, PUSH27, PUSH28, PUSH29, PUSH30, PUSH31, PUSH32: - a := big.NewInt(int64(op - PUSH1 + 1)) - byts := getData(code, new(big.Int).Add(pc, big.NewInt(1)), a) + size := uint64(op - PUSH1 + 1) + byts := getData(code, new(big.Int).SetUint64(pc+1), new(big.Int).SetUint64(size)) // push value to stack stack.push(common.Bytes2Big(byts)) - pc.Add(pc, a) + pc += size self.Printf(" => 0x%x", byts) case POP: @@ -603,7 +603,7 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { case JUMPDEST: case PC: - stack.push(pc) + stack.push(new(big.Int).SetUint64(pc)) case MSIZE: stack.push(big.NewInt(int64(mem.Len()))) case GAS: @@ -708,7 +708,7 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { return nil, fmt.Errorf("Invalid opcode %x", op) } - pc.Add(pc, One) + pc++ self.Endl() } -- cgit v1.2.3 From ff5b3ef0877978699235d20b3caa9890b35ec6f8 Mon Sep 17 00:00:00 2001 From: obscuren Date: Wed, 10 Jun 2015 10:59:44 +0200 Subject: core/vm: added structured logging --- core/execution.go | 4 ---- core/vm/vm.go | 26 ++++++++++++++++++++++---- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/core/execution.go b/core/execution.go index 522c90449..9fb0210de 100644 --- a/core/execution.go +++ b/core/execution.go @@ -2,7 +2,6 @@ package core import ( "math/big" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" @@ -49,8 +48,6 @@ func (self *Execution) Create(caller vm.ContextRef) (ret []byte, err error, acco } func (self *Execution) exec(contextAddr *common.Address, code []byte, caller vm.ContextRef) (ret []byte, err error) { - start := time.Now() - env := self.env evm := self.evm if env.Depth() > int(params.CallCreateDepth.Int64()) { @@ -96,7 +93,6 @@ func (self *Execution) exec(contextAddr *common.Address, code []byte, caller vm. context.SetCallCode(contextAddr, code) ret, err = evm.Run(context, self.input) - evm.Printf("message call took %v", time.Since(start)).Endl() if err != nil { env.State().Set(snapshot) } diff --git a/core/vm/vm.go b/core/vm/vm.go index ed4157178..e6d4c8df2 100644 --- a/core/vm/vm.go +++ b/core/vm/vm.go @@ -11,10 +11,18 @@ import ( "github.com/ethereum/go-ethereum/params" ) +type log struct { + op OpCode + gas *big.Int + memory []byte + stack []*big.Int +} + type Vm struct { env Environment - logTy byte + // structured logging + Logs []log logStr string err error @@ -32,9 +40,7 @@ type Vm struct { } func New(env Environment) *Vm { - lt := LogTyPretty - - return &Vm{debug: Debug, env: env, logTy: lt, Recoverable: true} + return &Vm{env: env, Recoverable: true} } func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { @@ -106,6 +112,8 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { // Get the memory location of pc op = context.GetOp(pc) + self.Log(op, context.Gas, mem, stack) + self.Printf("(pc) %-3d -o- %-14s (m) %-4d (s) %-4d ", pc, op.String(), mem.Len(), stack.len()) newMemSize, gas, err := self.calculateGasAndSize(context, caller, op, statedb, mem, stack) if err != nil { @@ -855,6 +863,16 @@ func (self *Vm) calculateGasAndSize(context *Context, caller ContextRef, op OpCo return newMemSize, gas, nil } +func (vm *Vm) Log(op OpCode, gas *big.Int, memory *Memory, stack *stack) { + if vm.debug { + mem := make([]byte, len(memory.store)) + copy(mem, memory.store) + stck := make([]*big.Int, len(stack.data)) + copy(stck, stack.data) + vm.Logs = append(vm.Logs, log{op, new(big.Int).Set(gas), mem, stck}) + } +} + func (self *Vm) RunPrecompiled(p *PrecompiledAccount, callData []byte, context *Context) (ret []byte, err error) { gas := p.Gas(len(callData)) if context.UseGas(gas) { -- cgit v1.2.3 From 38c61f6f2567e7943c9a16e2be0a2bfedb3a1fb3 Mon Sep 17 00:00:00 2001 From: obscuren Date: Wed, 10 Jun 2015 12:23:49 +0200 Subject: core, core/vm: added structure logging This also reduces the time required spend in the VM --- core/state_transition.go | 4 ++ core/vm/environment.go | 10 +++ core/vm/gas.go | 2 +- core/vm/stack.go | 26 ++++---- core/vm/virtual_machine.go | 2 - core/vm/vm.go | 147 +++++---------------------------------------- core/vm_env.go | 11 ++++ core/vm_logger.go | 40 ++++++++++++ tests/helper/vm.go | 11 +++- 9 files changed, 104 insertions(+), 149 deletions(-) create mode 100644 core/vm_logger.go diff --git a/core/state_transition.go b/core/state_transition.go index 7672fa3ff..3dbc789f8 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -223,6 +223,10 @@ func (self *StateTransition) transitionState() (ret []byte, usedGas *big.Int, er return nil, nil, InvalidTxError(err) } + if vm.Debug { + VmStdErrFormat(vmenv.StructLogs()) + } + self.refundGas() self.state.AddBalance(self.coinbase, new(big.Int).Mul(self.gasUsed(), self.gasPrice)) diff --git a/core/vm/environment.go b/core/vm/environment.go index 282d19578..31d5d5ea6 100644 --- a/core/vm/environment.go +++ b/core/vm/environment.go @@ -20,6 +20,8 @@ type Environment interface { GasLimit() *big.Int Transfer(from, to Account, amount *big.Int) error AddLog(*state.Log) + AddStructLog(StructLog) + StructLogs() []StructLog VmType() Type @@ -31,6 +33,14 @@ type Environment interface { Create(me ContextRef, data []byte, gas, price, value *big.Int) ([]byte, error, ContextRef) } +type StructLog struct { + Pc uint64 + Op OpCode + Gas *big.Int + Memory []byte + Stack []*big.Int +} + type Account interface { SubBalance(amount *big.Int) AddBalance(amount *big.Int) diff --git a/core/vm/gas.go b/core/vm/gas.go index 32f5fec04..1c29ccb65 100644 --- a/core/vm/gas.go +++ b/core/vm/gas.go @@ -21,7 +21,7 @@ var ( GasContractByte = big.NewInt(200) ) -func baseCheck(op OpCode, stack *stack, gas *big.Int) error { +func baseCheck(op OpCode, stack *Stack, gas *big.Int) error { // PUSH and DUP are a bit special. They all cost the same but we do want to have checking on stack push limit // PUSH is also allowed to calculate the same price for all PUSHes // DUP requirements are handled elsewhere (except for the stack limit check) diff --git a/core/vm/stack.go b/core/vm/stack.go index bb232d0b9..1d0a018c6 100644 --- a/core/vm/stack.go +++ b/core/vm/stack.go @@ -5,16 +5,20 @@ import ( "math/big" ) -func newStack() *stack { - return &stack{} +func newStack() *Stack { + return &Stack{} } -type stack struct { +type Stack struct { data []*big.Int ptr int } -func (st *stack) push(d *big.Int) { +func (st *Stack) Data() []*big.Int { + return st.data +} + +func (st *Stack) push(d *big.Int) { // NOTE push limit (1024) is checked in baseCheck stackItem := new(big.Int).Set(d) if len(st.data) > st.ptr { @@ -25,36 +29,36 @@ func (st *stack) push(d *big.Int) { st.ptr++ } -func (st *stack) pop() (ret *big.Int) { +func (st *Stack) pop() (ret *big.Int) { st.ptr-- ret = st.data[st.ptr] return } -func (st *stack) len() int { +func (st *Stack) len() int { return st.ptr } -func (st *stack) swap(n int) { +func (st *Stack) swap(n int) { st.data[st.len()-n], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-n] } -func (st *stack) dup(n int) { +func (st *Stack) dup(n int) { st.push(st.data[st.len()-n]) } -func (st *stack) peek() *big.Int { +func (st *Stack) peek() *big.Int { return st.data[st.len()-1] } -func (st *stack) require(n int) error { +func (st *Stack) require(n int) error { if st.len() < n { return fmt.Errorf("stack underflow (%d <=> %d)", len(st.data), n) } return nil } -func (st *stack) Print() { +func (st *Stack) Print() { fmt.Println("### stack ###") if len(st.data) > 0 { for i, val := range st.data { diff --git a/core/vm/virtual_machine.go b/core/vm/virtual_machine.go index 6db284f42..1fd1dcd88 100644 --- a/core/vm/virtual_machine.go +++ b/core/vm/virtual_machine.go @@ -3,6 +3,4 @@ package vm type VirtualMachine interface { Env() Environment Run(context *Context, data []byte) ([]byte, error) - Printf(string, ...interface{}) VirtualMachine - Endl() VirtualMachine } diff --git a/core/vm/vm.go b/core/vm/vm.go index e6d4c8df2..7c4a7ce6d 100644 --- a/core/vm/vm.go +++ b/core/vm/vm.go @@ -7,24 +7,12 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/params" ) -type log struct { - op OpCode - gas *big.Int - memory []byte - stack []*big.Int -} - type Vm struct { env Environment - // structured logging - Logs []log - logStr string - err error // For logging debug bool @@ -40,7 +28,7 @@ type Vm struct { } func New(env Environment) *Vm { - return &Vm{env: env, Recoverable: true} + return &Vm{env: env, debug: Debug, Recoverable: true} } func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { @@ -54,8 +42,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { price = context.Price ) - self.Printf("(%d) (%x) %x (code=%d) gas: %v (d) %x", self.env.Depth(), caller.Address().Bytes()[:4], context.Address(), len(code), context.Gas, callData).Endl() - // User defer pattern to check for an error and, based on the error being nil or not, use all gas and return. defer func() { if self.After != nil { @@ -63,7 +49,7 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { } if err != nil { - self.Printf(" %v", err).Endl() + // In case of a VM exception (known exceptions) all gas consumed (panics NOT included). context.UseGas(context.Gas) @@ -96,11 +82,8 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { return fmt.Errorf("invalid jump destination (%v) %v", nop, to) } - self.Printf(" ~> %v", to) pc = to.Uint64() - self.Endl() - return nil } ) @@ -112,18 +95,14 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { // Get the memory location of pc op = context.GetOp(pc) - self.Log(op, context.Gas, mem, stack) + self.log(pc, op, context.Gas, mem, stack) - self.Printf("(pc) %-3d -o- %-14s (m) %-4d (s) %-4d ", pc, op.String(), mem.Len(), stack.len()) newMemSize, gas, err := self.calculateGasAndSize(context, caller, op, statedb, mem, stack) if err != nil { return nil, err } - self.Printf("(g) %-3v (%v)", gas, context.Gas) - if !context.UseGas(gas) { - self.Endl() tmp := new(big.Int).Set(context.Gas) @@ -137,40 +116,33 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { switch op { case ADD: x, y := stack.pop(), stack.pop() - self.Printf(" %v + %v", y, x) base.Add(x, y) U256(base) - self.Printf(" = %v", base) // pop result back on the stack stack.push(base) case SUB: x, y := stack.pop(), stack.pop() - self.Printf(" %v - %v", x, y) base.Sub(x, y) U256(base) - self.Printf(" = %v", base) // pop result back on the stack stack.push(base) case MUL: x, y := stack.pop(), stack.pop() - self.Printf(" %v * %v", y, x) base.Mul(x, y) U256(base) - self.Printf(" = %v", base) // pop result back on the stack stack.push(base) case DIV: x, y := stack.pop(), stack.pop() - self.Printf(" %v / %v", x, y) if y.Cmp(common.Big0) != 0 { base.Div(x, y) @@ -178,14 +150,11 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { U256(base) - self.Printf(" = %v", base) // pop result back on the stack stack.push(base) case SDIV: x, y := S256(stack.pop()), S256(stack.pop()) - self.Printf(" %v / %v", x, y) - if y.Cmp(common.Big0) == 0 { base.Set(common.Big0) } else { @@ -201,13 +170,10 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { U256(base) } - self.Printf(" = %v", base) stack.push(base) case MOD: x, y := stack.pop(), stack.pop() - self.Printf(" %v %% %v", x, y) - if y.Cmp(common.Big0) == 0 { base.Set(common.Big0) } else { @@ -216,13 +182,10 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { U256(base) - self.Printf(" = %v", base) stack.push(base) case SMOD: x, y := S256(stack.pop()), S256(stack.pop()) - self.Printf(" %v %% %v", x, y) - if y.Cmp(common.Big0) == 0 { base.Set(common.Big0) } else { @@ -238,20 +201,15 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { U256(base) } - self.Printf(" = %v", base) stack.push(base) case EXP: x, y := stack.pop(), stack.pop() - self.Printf(" %v ** %v", x, y) - base.Exp(x, y, Pow256) U256(base) - self.Printf(" = %v", base) - stack.push(base) case SIGNEXTEND: back := stack.pop() @@ -268,15 +226,13 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { num = U256(num) - self.Printf(" = %v", num) - stack.push(num) } case NOT: stack.push(U256(new(big.Int).Not(stack.pop()))) case LT: x, y := stack.pop(), stack.pop() - self.Printf(" %v < %v", x, y) + // x < y if x.Cmp(y) < 0 { stack.push(common.BigTrue) @@ -285,7 +241,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { } case GT: x, y := stack.pop(), stack.pop() - self.Printf(" %v > %v", x, y) // x > y if x.Cmp(y) > 0 { @@ -296,7 +251,7 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { case SLT: x, y := S256(stack.pop()), S256(stack.pop()) - self.Printf(" %v < %v", x, y) + // x < y if x.Cmp(S256(y)) < 0 { stack.push(common.BigTrue) @@ -305,7 +260,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { } case SGT: x, y := S256(stack.pop()), S256(stack.pop()) - self.Printf(" %v > %v", x, y) // x > y if x.Cmp(y) > 0 { @@ -316,7 +270,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { case EQ: x, y := stack.pop(), stack.pop() - self.Printf(" %v == %v", y, x) // x == y if x.Cmp(y) == 0 { @@ -334,17 +287,14 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { case AND: x, y := stack.pop(), stack.pop() - self.Printf(" %v & %v", y, x) stack.push(base.And(x, y)) case OR: x, y := stack.pop(), stack.pop() - self.Printf(" %v | %v", x, y) stack.push(base.Or(x, y)) case XOR: x, y := stack.pop(), stack.pop() - self.Printf(" %v ^ %v", x, y) stack.push(base.Xor(x, y)) case BYTE: @@ -358,8 +308,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { base.Set(common.BigFalse) } - self.Printf(" => 0x%x", base.Bytes()) - stack.push(base) case ADDMOD: x := stack.pop() @@ -373,8 +321,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { base = U256(base) } - self.Printf(" %v + %v %% %v = %v", x, y, z, base) - stack.push(base) case MULMOD: x := stack.pop() @@ -388,8 +334,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { U256(base) } - self.Printf(" %v + %v %% %v = %v", x, y, z, base) - stack.push(base) case SHA3: @@ -398,44 +342,35 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { stack.push(common.BigD(data)) - self.Printf(" => (%v) %x", size, data) case ADDRESS: stack.push(common.Bytes2Big(context.Address().Bytes())) - self.Printf(" => %x", context.Address()) case BALANCE: addr := common.BigToAddress(stack.pop()) balance := statedb.GetBalance(addr) stack.push(balance) - self.Printf(" => %v (%x)", balance, addr) case ORIGIN: origin := self.env.Origin() stack.push(origin.Big()) - self.Printf(" => %x", origin) case CALLER: caller := context.caller.Address() stack.push(common.Bytes2Big(caller.Bytes())) - self.Printf(" => %x", caller) case CALLVALUE: stack.push(value) - self.Printf(" => %v", value) case CALLDATALOAD: data := getData(callData, stack.pop(), common.Big32) - self.Printf(" => 0x%x", data) - stack.push(common.Bytes2Big(data)) case CALLDATASIZE: l := int64(len(callData)) stack.push(big.NewInt(l)) - self.Printf(" => %d", l) case CALLDATACOPY: var ( mOff = stack.pop() @@ -446,7 +381,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { mem.Set(mOff.Uint64(), l.Uint64(), data) - self.Printf(" => [%v, %v, %v]", mOff, cOff, l) case CODESIZE, EXTCODESIZE: var code []byte if op == EXTCODESIZE { @@ -460,7 +394,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { l := big.NewInt(int64(len(code))) stack.push(l) - self.Printf(" => %d", l) case CODECOPY, EXTCODECOPY: var code []byte if op == EXTCODECOPY { @@ -480,12 +413,9 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { mem.Set(mOff.Uint64(), l.Uint64(), codeCopy) - self.Printf(" => [%v, %v, %v] %x", mOff, cOff, l, codeCopy) case GASPRICE: stack.push(context.Price) - self.Printf(" => %x", context.Price) - case BLOCKHASH: num := stack.pop() @@ -496,33 +426,27 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { stack.push(common.Big0) } - self.Printf(" => 0x%x", stack.peek().Bytes()) case COINBASE: coinbase := self.env.Coinbase() stack.push(coinbase.Big()) - self.Printf(" => 0x%x", coinbase) case TIMESTAMP: time := self.env.Time() stack.push(big.NewInt(time)) - self.Printf(" => 0x%x", time) case NUMBER: number := self.env.BlockNumber() stack.push(U256(number)) - self.Printf(" => 0x%x", number.Bytes()) case DIFFICULTY: difficulty := self.env.Difficulty() stack.push(difficulty) - self.Printf(" => 0x%x", difficulty.Bytes()) case GASLIMIT: - self.Printf(" => %v", self.env.GasLimit()) stack.push(self.env.GasLimit()) @@ -533,19 +457,16 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { stack.push(common.Bytes2Big(byts)) pc += size - self.Printf(" => 0x%x", byts) case POP: stack.pop() case DUP1, DUP2, DUP3, DUP4, DUP5, DUP6, DUP7, DUP8, DUP9, DUP10, DUP11, DUP12, DUP13, DUP14, DUP15, DUP16: n := int(op - DUP1 + 1) stack.dup(n) - self.Printf(" => [%d] 0x%x", n, stack.peek().Bytes()) case SWAP1, SWAP2, SWAP3, SWAP4, SWAP5, SWAP6, SWAP7, SWAP8, SWAP9, SWAP10, SWAP11, SWAP12, SWAP13, SWAP14, SWAP15, SWAP16: n := int(op - SWAP1 + 2) stack.swap(n) - self.Printf(" => [%d]", n) case LOG0, LOG1, LOG2, LOG3, LOG4: n := int(op - LOG0) topics := make([]common.Hash, n) @@ -558,38 +479,32 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { log := state.NewLog(context.Address(), topics, data, self.env.BlockNumber().Uint64()) self.env.AddLog(log) - self.Printf(" => %v", log) case MLOAD: offset := stack.pop() val := common.BigD(mem.Get(offset.Int64(), 32)) stack.push(val) - self.Printf(" => 0x%x", val.Bytes()) case MSTORE: // pop value of the stack mStart, val := stack.pop(), stack.pop() mem.Set(mStart.Uint64(), 32, common.BigToBytes(val, 256)) - self.Printf(" => 0x%x", val) case MSTORE8: off, val := stack.pop().Int64(), stack.pop().Int64() mem.store[off] = byte(val & 0xff) - self.Printf(" => [%v] 0x%x", off, mem.store[off]) case SLOAD: loc := common.BigToHash(stack.pop()) val := common.Bytes2Big(statedb.GetState(context.Address(), loc)) stack.push(val) - self.Printf(" {0x%x : 0x%x}", loc, val.Bytes()) case SSTORE: loc := common.BigToHash(stack.pop()) val := stack.pop() statedb.SetState(context.Address(), loc, val) - self.Printf(" {0x%x : 0x%x}", loc, val.Bytes()) case JUMP: if err := jump(pc, stack.pop()); err != nil { return nil, err @@ -607,8 +522,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { continue } - self.Printf(" ~> false") - case JUMPDEST: case PC: stack.push(new(big.Int).SetUint64(pc)) @@ -617,7 +530,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { case GAS: stack.push(context.Gas) - self.Printf(" => %x", context.Gas) case CREATE: var ( @@ -627,14 +539,12 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { gas = new(big.Int).Set(context.Gas) addr common.Address ) - self.Endl() context.UseGas(context.Gas) ret, suberr, ref := self.env.Create(context, input, gas, price, value) if suberr != nil { stack.push(common.BigFalse) - self.Printf(" (*) 0x0 %v", suberr) } else { // gas < len(ret) * CreateDataGas == NO_CODE dataGas := big.NewInt(int64(len(ret))) @@ -659,7 +569,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { retOffset, retSize := stack.pop(), stack.pop() address := common.BigToAddress(addr) - self.Printf(" => %x", address).Endl() // Get the arguments from the memory args := mem.Get(inOffset.Int64(), inSize.Int64()) @@ -681,48 +590,40 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { if err != nil { stack.push(common.BigFalse) - self.Printf("%v").Endl() } else { stack.push(common.BigTrue) mem.Set(retOffset.Uint64(), retSize.Uint64(), ret) } - self.Printf("resume %x (%v)", context.Address(), context.Gas) + case RETURN: offset, size := stack.pop(), stack.pop() ret := mem.GetPtr(offset.Int64(), size.Int64()) - self.Printf(" => [%v, %v] (%d) 0x%x", offset, size, len(ret), ret).Endl() - return context.Return(ret), nil case SUICIDE: receiver := statedb.GetOrNewStateObject(common.BigToAddress(stack.pop())) balance := statedb.GetBalance(context.Address()) - self.Printf(" => (%x) %v", receiver.Address().Bytes()[:4], balance) - receiver.AddBalance(balance) statedb.Delete(context.Address()) fallthrough case STOP: // Stop the context - self.Endl() return context.Return(nil), nil default: - self.Printf("(pc) %-3v Invalid opcode %x\n", pc, op).Endl() return nil, fmt.Errorf("Invalid opcode %x", op) } pc++ - self.Endl() } } -func (self *Vm) calculateGasAndSize(context *Context, caller ContextRef, op OpCode, statedb *state.StateDB, mem *Memory, stack *stack) (*big.Int, *big.Int, error) { +func (self *Vm) calculateGasAndSize(context *Context, caller ContextRef, op OpCode, statedb *state.StateDB, mem *Memory, stack *Stack) (*big.Int, *big.Int, error) { var ( gas = new(big.Int) newMemSize *big.Int = new(big.Int) @@ -863,26 +764,13 @@ func (self *Vm) calculateGasAndSize(context *Context, caller ContextRef, op OpCo return newMemSize, gas, nil } -func (vm *Vm) Log(op OpCode, gas *big.Int, memory *Memory, stack *stack) { - if vm.debug { - mem := make([]byte, len(memory.store)) - copy(mem, memory.store) - stck := make([]*big.Int, len(stack.data)) - copy(stck, stack.data) - vm.Logs = append(vm.Logs, log{op, new(big.Int).Set(gas), mem, stck}) - } -} - func (self *Vm) RunPrecompiled(p *PrecompiledAccount, callData []byte, context *Context) (ret []byte, err error) { gas := p.Gas(len(callData)) if context.UseGas(gas) { ret = p.Call(callData) - self.Printf("NATIVE_FUNC => %x", ret) - self.Endl() return context.Return(ret), nil } else { - self.Printf("NATIVE_FUNC => failed").Endl() tmp := new(big.Int).Set(context.Gas) @@ -890,21 +778,14 @@ func (self *Vm) RunPrecompiled(p *PrecompiledAccount, callData []byte, context * } } -func (self *Vm) Printf(format string, v ...interface{}) VirtualMachine { - if self.debug { - self.logStr += fmt.Sprintf(format, v...) - } - - return self -} - -func (self *Vm) Endl() VirtualMachine { - if self.debug { - glog.V(0).Infoln(self.logStr) - self.logStr = "" +func (self *Vm) log(pc uint64, op OpCode, gas *big.Int, memory *Memory, stack *Stack) { + if Debug { + mem := make([]byte, len(memory.Data())) + copy(mem, memory.Data()) + stck := make([]*big.Int, len(stack.Data())) + copy(stck, stack.Data()) + self.env.AddStructLog(StructLog{pc, op, new(big.Int).Set(gas), mem, stck}) } - - return self } func (self *Vm) Env() Environment { diff --git a/core/vm_env.go b/core/vm_env.go index c439d2946..da862d5c8 100644 --- a/core/vm_env.go +++ b/core/vm_env.go @@ -16,6 +16,8 @@ type VMEnv struct { depth int chain *ChainManager typ vm.Type + // structured logging + logs []vm.StructLog } func NewEnv(state *state.StateDB, chain *ChainManager, msg Message, block *types.Block) *VMEnv { @@ -47,6 +49,7 @@ func (self *VMEnv) GetHash(n uint64) common.Hash { return common.Hash{} } + func (self *VMEnv) AddLog(log *state.Log) { self.state.AddLog(log) } @@ -68,3 +71,11 @@ func (self *VMEnv) Create(me vm.ContextRef, data []byte, gas, price, value *big. exe := NewExecution(self, nil, data, gas, price, value) return exe.Create(me) } + +func (self *VMEnv) StructLogs() []vm.StructLog { + return self.logs +} + +func (self *VMEnv) AddStructLog(log vm.StructLog) { + self.logs = append(self.logs, log) +} diff --git a/core/vm_logger.go b/core/vm_logger.go new file mode 100644 index 000000000..84fa71b24 --- /dev/null +++ b/core/vm_logger.go @@ -0,0 +1,40 @@ +package core + +import ( + "fmt" + "os" + "unicode/utf8" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/vm" +) + +func VmStdErrFormat(logs []vm.StructLog) { + fmt.Fprintf(os.Stderr, "VM Stats %d ops\n", len(logs)) + for _, log := range logs { + fmt.Fprintf(os.Stderr, "PC %-3d - %-14s\n", log.Pc, log.Op) + fmt.Fprintln(os.Stderr, "STACK =", len(log.Stack)) + for i, item := range log.Stack { + fmt.Fprintf(os.Stderr, "%04d: %x\n", i, common.LeftPadBytes(item.Bytes(), 32)) + } + + const maxMem = 10 + addr := 0 + fmt.Fprintln(os.Stderr, "MEM =", len(log.Memory)) + for i := 0; i+16 <= len(log.Memory) && addr < maxMem; i += 16 { + data := log.Memory[i : i+16] + str := fmt.Sprintf("%04d: % x ", addr*16, data) + for _, r := range data { + if r == 0 { + str += "." + } else if utf8.ValidRune(rune(r)) { + str += fmt.Sprintf("%s", string(r)) + } else { + str += "?" + } + } + addr++ + fmt.Fprintln(os.Stderr, str) + } + } +} diff --git a/tests/helper/vm.go b/tests/helper/vm.go index 5f1a3e345..87b2e070d 100644 --- a/tests/helper/vm.go +++ b/tests/helper/vm.go @@ -27,9 +27,8 @@ type Env struct { difficulty *big.Int gasLimit *big.Int - logs state.Logs - vmTest bool + logs []vm.StructLog } func NewEnv(state *state.StateDB) *Env { @@ -38,6 +37,14 @@ func NewEnv(state *state.StateDB) *Env { } } +func (self *Env) StructLogs() []vm.StructLog { + return self.logs +} + +func (self *Env) AddStructLog(log vm.StructLog) { + self.logs = append(self.logs, log) +} + func NewEnvFromMap(state *state.StateDB, envValues map[string]string, exeValues map[string]string) *Env { env := NewEnv(state) -- cgit v1.2.3 From 6fb6e6679eb7c329ac9013d0c879a7c4b17daca5 Mon Sep 17 00:00:00 2001 From: obscuren Date: Wed, 10 Jun 2015 12:57:37 +0200 Subject: core/vm, core/state: added storage to structured vm logging --- core/state/state_object.go | 16 ++++++++++++++++ core/vm/environment.go | 11 ++++++----- core/vm/vm.go | 13 ++++++++++--- core/vm_logger.go | 8 +++++++- 4 files changed, 39 insertions(+), 9 deletions(-) diff --git a/core/state/state_object.go b/core/state/state_object.go index bfc4ebc6c..6d2455d79 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -336,6 +336,22 @@ func (self *StateObject) Nonce() uint64 { return self.nonce } +func (self *StateObject) EachStorage(cb func(key, value []byte)) { + // When iterating over the storage check the cache first + for h, v := range self.storage { + cb([]byte(h), v.Bytes()) + } + + it := self.State.trie.Iterator() + for it.Next() { + // ignore cached values + key := self.State.trie.GetKey(it.Key) + if _, ok := self.storage[string(key)]; !ok { + cb(key, it.Value) + } + } +} + // // Encoding // diff --git a/core/vm/environment.go b/core/vm/environment.go index 31d5d5ea6..25bd2515e 100644 --- a/core/vm/environment.go +++ b/core/vm/environment.go @@ -34,11 +34,12 @@ type Environment interface { } type StructLog struct { - Pc uint64 - Op OpCode - Gas *big.Int - Memory []byte - Stack []*big.Int + Pc uint64 + Op OpCode + Gas *big.Int + Memory []byte + Stack []*big.Int + Storage map[common.Hash][]byte } type Account interface { diff --git a/core/vm/vm.go b/core/vm/vm.go index 7c4a7ce6d..e4f6e9268 100644 --- a/core/vm/vm.go +++ b/core/vm/vm.go @@ -95,7 +95,7 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { // Get the memory location of pc op = context.GetOp(pc) - self.log(pc, op, context.Gas, mem, stack) + self.log(pc, op, context.Gas, mem, stack, context) newMemSize, gas, err := self.calculateGasAndSize(context, caller, op, statedb, mem, stack) if err != nil { @@ -778,13 +778,20 @@ func (self *Vm) RunPrecompiled(p *PrecompiledAccount, callData []byte, context * } } -func (self *Vm) log(pc uint64, op OpCode, gas *big.Int, memory *Memory, stack *Stack) { +func (self *Vm) log(pc uint64, op OpCode, gas *big.Int, memory *Memory, stack *Stack, context *Context) { if Debug { mem := make([]byte, len(memory.Data())) copy(mem, memory.Data()) stck := make([]*big.Int, len(stack.Data())) copy(stck, stack.Data()) - self.env.AddStructLog(StructLog{pc, op, new(big.Int).Set(gas), mem, stck}) + + object := context.self.(*state.StateObject) + storage := make(map[common.Hash][]byte) + object.EachStorage(func(k, v []byte) { + storage[common.BytesToHash(k)] = v + }) + + self.env.AddStructLog(StructLog{pc, op, new(big.Int).Set(gas), mem, stck, storage}) } } diff --git a/core/vm_logger.go b/core/vm_logger.go index 84fa71b24..d0742380e 100644 --- a/core/vm_logger.go +++ b/core/vm_logger.go @@ -12,7 +12,7 @@ import ( func VmStdErrFormat(logs []vm.StructLog) { fmt.Fprintf(os.Stderr, "VM Stats %d ops\n", len(logs)) for _, log := range logs { - fmt.Fprintf(os.Stderr, "PC %-3d - %-14s\n", log.Pc, log.Op) + fmt.Fprintf(os.Stderr, "PC %08d: %s\n", log.Pc, log.Op) fmt.Fprintln(os.Stderr, "STACK =", len(log.Stack)) for i, item := range log.Stack { fmt.Fprintf(os.Stderr, "%04d: %x\n", i, common.LeftPadBytes(item.Bytes(), 32)) @@ -36,5 +36,11 @@ func VmStdErrFormat(logs []vm.StructLog) { addr++ fmt.Fprintln(os.Stderr, str) } + + fmt.Fprintln(os.Stderr, "STORAGE =", len(log.Storage)) + for h, item := range log.Storage { + fmt.Fprintf(os.Stderr, "%x: %x\n", h, common.LeftPadBytes(item, 32)) + } + } } -- cgit v1.2.3 From 1774c494560507735d7b616456be60874063101f Mon Sep 17 00:00:00 2001 From: obscuren Date: Wed, 10 Jun 2015 12:57:58 +0200 Subject: core: log tx count for each set of blocks we're importing --- core/chain_manager.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/core/chain_manager.go b/core/chain_manager.go index 6897c453c..be64b54f4 100644 --- a/core/chain_manager.go +++ b/core/chain_manager.go @@ -567,6 +567,7 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) { go verifyNonces(self.pow, chain, nonceQuit, nonceDone) defer close(nonceQuit) + txcount := 0 for i, block := range chain { bstart := time.Now() // Wait for block i's nonce to be verified before processing @@ -625,6 +626,8 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) { return i, err } + txcount += len(block.Transactions()) + cblock := self.currentBlock // Compare the TD of the last known block in the canonical chain to make sure it's greater. // At this point it's possible that a different chain (fork) becomes the new canonical chain. @@ -683,7 +686,7 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) { if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) { tend := time.Since(tstart) start, end := chain[0], chain[len(chain)-1] - glog.Infof("imported %d block(s) (%d queued %d ignored) in %v. #%v [%x / %x]\n", stats.processed, stats.queued, stats.ignored, tend, end.Number(), start.Hash().Bytes()[:4], end.Hash().Bytes()[:4]) + glog.Infof("imported %d block(s) (%d queued %d ignored) including %d txs in %v. #%v [%x / %x]\n", stats.processed, stats.queued, stats.ignored, txcount, tend, end.Number(), start.Hash().Bytes()[:4], end.Hash().Bytes()[:4]) } go self.eventMux.Post(queueEvent) -- cgit v1.2.3 From 261a8077c410c50999d662bd1ca871b7ef414909 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 10 Jun 2015 13:07:30 +0200 Subject: p2p/discover: deflake TestUDP_successfulPing --- p2p/discover/table.go | 8 +++++++ p2p/discover/udp_test.go | 55 +++++++++++++++++++----------------------------- 2 files changed, 30 insertions(+), 33 deletions(-) diff --git a/p2p/discover/table.go b/p2p/discover/table.go index 4b7ddb775..f71320425 100644 --- a/p2p/discover/table.go +++ b/p2p/discover/table.go @@ -40,6 +40,8 @@ type Table struct { bonding map[NodeID]*bondproc bondslots chan struct{} // limits total number of active bonding processes + nodeAddedHook func(*Node) // for testing + net transport self *Node // metadata of the local node } @@ -431,6 +433,9 @@ func (tab *Table) pingreplace(new *Node, b *bucket) { } copy(b.entries[1:], b.entries) b.entries[0] = new + if tab.nodeAddedHook != nil { + tab.nodeAddedHook(new) + } } // ping a remote endpoint and wait for a reply, also updating the node database @@ -466,6 +471,9 @@ outer: } if len(bucket.entries) < bucketSize { bucket.entries = append(bucket.entries, n) + if tab.nodeAddedHook != nil { + tab.nodeAddedHook(n) + } } } } diff --git a/p2p/discover/udp_test.go b/p2p/discover/udp_test.go index 11fa31d7c..b5d035a98 100644 --- a/p2p/discover/udp_test.go +++ b/p2p/discover/udp_test.go @@ -234,14 +234,12 @@ func TestUDP_findnodeMultiReply(t *testing.T) { func TestUDP_successfulPing(t *testing.T) { test := newUDPTest(t) + added := make(chan *Node, 1) + test.table.nodeAddedHook = func(n *Node) { added <- n } defer test.table.Close() - done := make(chan struct{}) - go func() { - // The remote side sends a ping packet to initiate the exchange. - test.packetIn(nil, pingPacket, &ping{From: testRemote, To: testLocalAnnounced, Version: Version, Expiration: futureExp}) - close(done) - }() + // The remote side sends a ping packet to initiate the exchange. + go test.packetIn(nil, pingPacket, &ping{From: testRemote, To: testLocalAnnounced, Version: Version, Expiration: futureExp}) // the ping is replied to. test.waitPacketOut(func(p *pong) { @@ -277,35 +275,26 @@ func TestUDP_successfulPing(t *testing.T) { }) test.packetIn(nil, pongPacket, &pong{Expiration: futureExp}) - // ping should return shortly after getting the pong packet. - <-done - - // check that the node was added. - rid := PubkeyID(&test.remotekey.PublicKey) - rnode := find(test.table, rid) - if rnode == nil { - t.Fatalf("node %v not found in table", rid) - } - if !bytes.Equal(rnode.IP, test.remoteaddr.IP) { - t.Errorf("node has wrong IP: got %v, want: %v", rnode.IP, test.remoteaddr.IP) - } - if int(rnode.UDP) != test.remoteaddr.Port { - t.Errorf("node has wrong UDP port: got %v, want: %v", rnode.UDP, test.remoteaddr.Port) - } - if rnode.TCP != testRemote.TCP { - t.Errorf("node has wrong TCP port: got %v, want: %v", rnode.TCP, testRemote.TCP) - } -} - -func find(tab *Table, id NodeID) *Node { - for _, b := range tab.buckets { - for _, e := range b.entries { - if e.ID == id { - return e - } + // the node should be added to the table shortly after getting the + // pong packet. + select { + case n := <-added: + rid := PubkeyID(&test.remotekey.PublicKey) + if n.ID != rid { + t.Errorf("node has wrong ID: got %v, want %v", n.ID, rid) } + if !bytes.Equal(n.IP, test.remoteaddr.IP) { + t.Errorf("node has wrong IP: got %v, want: %v", n.IP, test.remoteaddr.IP) + } + if int(n.UDP) != test.remoteaddr.Port { + t.Errorf("node has wrong UDP port: got %v, want: %v", n.UDP, test.remoteaddr.Port) + } + if n.TCP != testRemote.TCP { + t.Errorf("node has wrong TCP port: got %v, want: %v", n.TCP, testRemote.TCP) + } + case <-time.After(2 * time.Second): + t.Errorf("node was not added within 2 seconds") } - return nil } // dgramPipe is a fake UDP socket. It queues all sent datagrams. -- cgit v1.2.3 From cf3aabb9d3dd7554d5859b36ed290f2e031ba33a Mon Sep 17 00:00:00 2001 From: obscuren Date: Wed, 10 Jun 2015 13:52:38 +0200 Subject: miner: update gas used after tx proc for pending block --- miner/worker.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/miner/worker.go b/miner/worker.go index 611445529..bd4bc0e3c 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -270,7 +270,6 @@ func (self *worker) wait() { func (self *worker) push() { if atomic.LoadInt32(&self.mining) == 1 { - self.current.block.Header().GasUsed = self.current.totalUsedGas self.current.block.SetRoot(self.current.state.Root()) // push new work to agents @@ -510,6 +509,8 @@ func (self *worker) commitTransactions(transactions types.Transactions) { current.tcount++ } } + + self.current.block.Header().GasUsed = self.current.totalUsedGas } func (self *worker) commitTransaction(tx *types.Transaction) error { -- cgit v1.2.3 From a8e4cb6dfe9dbaf3f5bf19807406577ee116cc2a Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Wed, 10 Jun 2015 15:15:35 +0200 Subject: p2p/discover: use separate rand.Source instances in tests rand.Source isn't safe for concurrent use. --- p2p/discover/node_test.go | 25 ++++++++++++++----------- p2p/discover/table_test.go | 9 +++++---- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/p2p/discover/node_test.go b/p2p/discover/node_test.go index b1babd989..795460c49 100644 --- a/p2p/discover/node_test.go +++ b/p2p/discover/node_test.go @@ -13,11 +13,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" ) -var ( - quickrand = rand.New(rand.NewSource(time.Now().Unix())) - quickcfg = &quick.Config{MaxCount: 5000, Rand: quickrand} -) - var parseNodeTests = []struct { rawurl string wantError string @@ -176,7 +171,7 @@ func TestNodeID_distcmp(t *testing.T) { bbig := new(big.Int).SetBytes(b[:]) return new(big.Int).Xor(tbig, abig).Cmp(new(big.Int).Xor(tbig, bbig)) } - if err := quick.CheckEqual(distcmp, distcmpBig, quickcfg); err != nil { + if err := quick.CheckEqual(distcmp, distcmpBig, quickcfg()); err != nil { t.Error(err) } } @@ -195,7 +190,7 @@ func TestNodeID_logdist(t *testing.T) { abig, bbig := new(big.Int).SetBytes(a[:]), new(big.Int).SetBytes(b[:]) return new(big.Int).Xor(abig, bbig).BitLen() } - if err := quick.CheckEqual(logdist, logdistBig, quickcfg); err != nil { + if err := quick.CheckEqual(logdist, logdistBig, quickcfg()); err != nil { t.Error(err) } } @@ -211,9 +206,10 @@ func TestNodeID_logdistEqual(t *testing.T) { func TestNodeID_hashAtDistance(t *testing.T) { // we don't use quick.Check here because its output isn't // very helpful when the test fails. - for i := 0; i < quickcfg.MaxCount; i++ { - a := gen(common.Hash{}, quickrand).(common.Hash) - dist := quickrand.Intn(len(common.Hash{}) * 8) + cfg := quickcfg() + for i := 0; i < cfg.MaxCount; i++ { + a := gen(common.Hash{}, cfg.Rand).(common.Hash) + dist := cfg.Rand.Intn(len(common.Hash{}) * 8) result := hashAtDistance(a, dist) actualdist := logdist(result, a) @@ -225,7 +221,14 @@ func TestNodeID_hashAtDistance(t *testing.T) { } } -// TODO: this can be dropped when we require Go >= 1.5 +func quickcfg() *quick.Config { + return &quick.Config{ + MaxCount: 5000, + Rand: rand.New(rand.NewSource(time.Now().Unix())), + } +} + +// TODO: The Generate method can be dropped when we require Go >= 1.5 // because testing/quick learned to generate arrays in 1.5. func (NodeID) Generate(rand *rand.Rand, size int) reflect.Value { diff --git a/p2p/discover/table_test.go b/p2p/discover/table_test.go index da398d137..829899916 100644 --- a/p2p/discover/table_test.go +++ b/p2p/discover/table_test.go @@ -9,6 +9,7 @@ import ( "reflect" "testing" "testing/quick" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -74,7 +75,7 @@ func TestBucket_bumpNoDuplicates(t *testing.T) { t.Parallel() cfg := &quick.Config{ MaxCount: 1000, - Rand: quickrand, + Rand: rand.New(rand.NewSource(time.Now().Unix())), Values: func(args []reflect.Value, rand *rand.Rand) { // generate a random list of nodes. this will be the content of the bucket. n := rand.Intn(bucketSize-1) + 1 @@ -205,7 +206,7 @@ func TestTable_closest(t *testing.T) { } return true } - if err := quick.Check(test, quickcfg); err != nil { + if err := quick.Check(test, quickcfg()); err != nil { t.Error(err) } } @@ -213,7 +214,7 @@ func TestTable_closest(t *testing.T) { func TestTable_ReadRandomNodesGetAll(t *testing.T) { cfg := &quick.Config{ MaxCount: 200, - Rand: quickrand, + Rand: rand.New(rand.NewSource(time.Now().Unix())), Values: func(args []reflect.Value, rand *rand.Rand) { args[0] = reflect.ValueOf(make([]*Node, rand.Intn(1000))) }, @@ -221,7 +222,7 @@ func TestTable_ReadRandomNodesGetAll(t *testing.T) { test := func(buf []*Node) bool { tab := newTable(nil, NodeID{}, &net.UDPAddr{}, "") for i := 0; i < len(buf); i++ { - ld := quickrand.Intn(len(tab.buckets)) + ld := cfg.Rand.Intn(len(tab.buckets)) tab.add([]*Node{nodeAtDistance(tab.self.sha, ld)}) } gotN := tab.ReadRandomNodes(buf) -- cgit v1.2.3 From f249ccaa89afa3340c3abd8578516b648e633e96 Mon Sep 17 00:00:00 2001 From: obscuren Date: Wed, 10 Jun 2015 16:46:43 +0200 Subject: cmd/evm: implements vm.Environment --- cmd/evm/main.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cmd/evm/main.go b/cmd/evm/main.go index 561f1a943..599721c89 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -104,6 +104,7 @@ type VMEnv struct { depth int Gas *big.Int time int64 + logs []vm.StructLog } func NewEnv(state *state.StateDB, transactor common.Address, value *big.Int) *VMEnv { @@ -133,6 +134,12 @@ func (self *VMEnv) GetHash(n uint64) common.Hash { } return common.Hash{} } +func (self *VMEnv) AddStructLog(log vm.StructLog) { + self.logs = append(self.logs, log) +} +func (self *VMEnv) StructLogs() []vm.StructLog { + return self.logs +} func (self *VMEnv) AddLog(log *state.Log) { self.state.AddLog(log) } -- cgit v1.2.3 From 271fb20ecb48944692bdb4e7f91be9b90506981b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 10 Jun 2015 18:01:05 +0300 Subject: cmd/geth, eth/downloader: rough guess at the import eta --- cmd/geth/admin.go | 9 +++++++-- eth/downloader/downloader.go | 15 +++++++++++++-- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/cmd/geth/admin.go b/cmd/geth/admin.go index ea8a70923..7bf23829a 100644 --- a/cmd/geth/admin.go +++ b/cmd/geth/admin.go @@ -325,8 +325,13 @@ func (js *jsre) setHead(call otto.FunctionCall) otto.Value { } func (js *jsre) syncProgress(call otto.FunctionCall) otto.Value { - pending, cached, importing := js.ethereum.Downloader().Stats() - v, _ := call.Otto.ToValue(map[string]interface{}{"pending": pending, "cached": cached, "importing": importing}) + pending, cached, importing, eta := js.ethereum.Downloader().Stats() + v, _ := call.Otto.ToValue(map[string]interface{}{ + "pending": pending, + "cached": cached, + "importing": importing, + "estimate": eta.String(), + }) return v } diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index efb94e5e3..c3234ecb1 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -79,7 +79,9 @@ type Downloader struct { banned *set.Set // Set of hashes we've received and banned // Statistics + importStart time.Time // Instance when the last blocks were taken from the cache importQueue []common.Hash // Hashes of the previously taken blocks to check import progress + importDone int // Number of taken blocks already imported from the last batch importLock sync.Mutex // Callbacks @@ -126,19 +128,25 @@ func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock getBlockFn) *Downloa } // Stats retrieves the current status of the downloader. -func (d *Downloader) Stats() (pending int, cached int, importing int) { +func (d *Downloader) Stats() (pending int, cached int, importing int, estimate time.Duration) { // Fetch the download status pending, cached = d.queue.Size() - // Generate the import status + // Figure out the import progress d.importLock.Lock() defer d.importLock.Unlock() for len(d.importQueue) > 0 && d.hasBlock(d.importQueue[0]) { d.importQueue = d.importQueue[1:] + d.importDone++ } importing = len(d.importQueue) + // Make an estimate on the total sync + estimate = 0 + if d.importDone > 0 { + estimate = time.Since(d.importStart) / time.Duration(d.importDone) * time.Duration(pending+cached+importing) + } return } @@ -226,7 +234,9 @@ func (d *Downloader) TakeBlocks() []*Block { hashes[i] = block.RawBlock.Hash() } d.importLock.Lock() + d.importStart = time.Now() d.importQueue = hashes + d.importDone = 0 d.importLock.Unlock() } return blocks @@ -287,6 +297,7 @@ func (d *Downloader) Cancel() bool { d.importLock.Lock() d.importQueue = nil + d.importDone = 0 d.importLock.Unlock() return true -- cgit v1.2.3 From c4af70d0cc213b5fd906a385cdb2895268525ef7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 10 Jun 2015 18:07:23 +0300 Subject: cmd/geth: round the import ETA before converting to string --- cmd/geth/admin.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/geth/admin.go b/cmd/geth/admin.go index 7bf23829a..4f22110ad 100644 --- a/cmd/geth/admin.go +++ b/cmd/geth/admin.go @@ -330,7 +330,7 @@ func (js *jsre) syncProgress(call otto.FunctionCall) otto.Value { "pending": pending, "cached": cached, "importing": importing, - "estimate": eta.String(), + "estimate": (eta / time.Second * time.Second).String(), }) return v } -- cgit v1.2.3 From 65a48f9cd8461917d8047b1cd4901d068b61ff00 Mon Sep 17 00:00:00 2001 From: obscuren Date: Tue, 9 Jun 2015 23:46:56 +0200 Subject: core: fixed race condition in the transaction pool Removed `Stop/Start` mechanism from the transaction pool. --- core/transaction_pool.go | 60 ++++++++++++++++++++++++------------------------ eth/backend.go | 5 ---- 2 files changed, 30 insertions(+), 35 deletions(-) diff --git a/core/transaction_pool.go b/core/transaction_pool.go index a2f970195..b63a4dcab 100644 --- a/core/transaction_pool.go +++ b/core/transaction_pool.go @@ -50,7 +50,7 @@ type TxPool struct { } func NewTxPool(eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func() *big.Int) *TxPool { - return &TxPool{ + pool := &TxPool{ pending: make(map[common.Hash]*types.Transaction), queue: make(map[common.Address]map[common.Hash]*types.Transaction), quit: make(chan bool), @@ -59,9 +59,12 @@ func NewTxPool(eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func( gasLimit: gasLimitFn, pendingState: state.ManageState(currentStateFn()), } + go pool.eventLoop() + + return pool } -func (pool *TxPool) Start() { +func (pool *TxPool) eventLoop() { // Track chain events. When a chain events occurs (new chain canon block) // we need to know the new state. The new state will help us determine // the nonces in the managed state @@ -169,15 +172,10 @@ func (pool *TxPool) validateTx(tx *types.Transaction) error { return nil } +// validate and queue transactions. func (self *TxPool) add(tx *types.Transaction) error { hash := tx.Hash() - /* XXX I'm unsure about this. This is extremely dangerous and may result - in total black listing of certain transactions - if self.invalidHashes.Has(hash) { - return fmt.Errorf("Invalid transaction (%x)", hash[:4]) - } - */ if self.pending[hash] != nil { return fmt.Errorf("Known transaction (%x)", hash[:4]) } @@ -207,6 +205,30 @@ func (self *TxPool) add(tx *types.Transaction) error { return nil } +// queueTx will queue an unknown transaction +func (self *TxPool) queueTx(hash common.Hash, tx *types.Transaction) { + from, _ := tx.From() // already validated + if self.queue[from] == nil { + self.queue[from] = make(map[common.Hash]*types.Transaction) + } + self.queue[from][hash] = tx +} + +// addTx will add a transaction to the pending (processable queue) list of transactions +func (pool *TxPool) addTx(hash common.Hash, addr common.Address, tx *types.Transaction) { + if _, ok := pool.pending[hash]; !ok { + pool.pending[hash] = tx + + // Increment the nonce on the pending state. This can only happen if + // the nonce is +1 to the previous one. + pool.pendingState.SetNonce(addr, tx.AccountNonce+1) + // Notify the subscribers. This event is posted in a goroutine + // because it's possible that somewhere during the post "Remove transaction" + // gets called which will then wait for the global tx pool lock and deadlock. + go pool.eventMux.Post(TxPreEvent{tx}) + } +} + // Add queues a single transaction in the pool if it is valid. func (self *TxPool) Add(tx *types.Transaction) error { self.mu.Lock() @@ -290,28 +312,6 @@ func (self *TxPool) RemoveTransactions(txs types.Transactions) { } } -func (self *TxPool) queueTx(hash common.Hash, tx *types.Transaction) { - from, _ := tx.From() // already validated - if self.queue[from] == nil { - self.queue[from] = make(map[common.Hash]*types.Transaction) - } - self.queue[from][hash] = tx -} - -func (pool *TxPool) addTx(hash common.Hash, addr common.Address, tx *types.Transaction) { - if _, ok := pool.pending[hash]; !ok { - pool.pending[hash] = tx - - // Increment the nonce on the pending state. This can only happen if - // the nonce is +1 to the previous one. - pool.pendingState.SetNonce(addr, tx.AccountNonce+1) - // Notify the subscribers. This event is posted in a goroutine - // because it's possible that somewhere during the post "Remove transaction" - // gets called which will then wait for the global tx pool lock and deadlock. - go pool.eventMux.Post(TxPreEvent{tx}) - } -} - // checkQueue moves transactions that have become processable to main pool. func (pool *TxPool) checkQueue() { state := pool.pendingState diff --git a/eth/backend.go b/eth/backend.go index fcbea04a2..60e9359dc 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -466,8 +466,6 @@ func (s *Ethereum) Start() error { s.StartAutoDAG() } - // Start services - go s.txPool.Start() s.protocolManager.Start() if s.whisper != nil { @@ -513,9 +511,6 @@ func (s *Ethereum) StartForTest() { ClientString: s.net.Name, ProtocolVersion: ProtocolVersion, }) - - // Start services - s.txPool.Start() } // AddPeer connects to the given node and maintains the connection until the -- cgit v1.2.3 From 29f7902fee0750c571adad06bd614365ad86e8f5 Mon Sep 17 00:00:00 2001 From: obscuren Date: Wed, 10 Jun 2015 13:54:43 +0200 Subject: core: fixed test --- core/helper_test.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/core/helper_test.go b/core/helper_test.go index 1e0ed178b..a308153aa 100644 --- a/core/helper_test.go +++ b/core/helper_test.go @@ -6,8 +6,8 @@ import ( "github.com/ethereum/go-ethereum/core/types" // "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" ) @@ -76,8 +76,5 @@ func NewTestManager() *TestManager { // testManager.blockChain = NewChainManager(testManager) // testManager.stateManager = NewStateManager(testManager) - // Start the tx pool - testManager.txPool.Start() - return testManager } -- cgit v1.2.3 From 4407524d13994759230ce6d31d828914597e8f6c Mon Sep 17 00:00:00 2001 From: obscuren Date: Wed, 10 Jun 2015 13:59:45 +0200 Subject: core: fixed race condition for subscriptions --- core/transaction_pool.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/core/transaction_pool.go b/core/transaction_pool.go index b63a4dcab..4a0594228 100644 --- a/core/transaction_pool.go +++ b/core/transaction_pool.go @@ -58,6 +58,7 @@ func NewTxPool(eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func( currentState: currentStateFn, gasLimit: gasLimitFn, pendingState: state.ManageState(currentStateFn()), + events: eventMux.Subscribe(ChainEvent{}), } go pool.eventLoop() @@ -68,7 +69,6 @@ func (pool *TxPool) eventLoop() { // Track chain events. When a chain events occurs (new chain canon block) // we need to know the new state. The new state will help us determine // the nonces in the managed state - pool.events = pool.eventMux.Subscribe(ChainEvent{}) for _ = range pool.events.Chan() { pool.mu.Lock() @@ -103,7 +103,6 @@ func (pool *TxPool) resetState() { } func (pool *TxPool) Stop() { - pool.pending = make(map[common.Hash]*types.Transaction) close(pool.quit) pool.events.Unsubscribe() glog.V(logger.Info).Infoln("TX Pool stopped") -- cgit v1.2.3 From 065aff9ffa2bee1008d1f406328dd12a073cb239 Mon Sep 17 00:00:00 2001 From: obscuren Date: Wed, 10 Jun 2015 17:40:13 +0200 Subject: core/vm: documentation and name changes --- core/vm/environment.go | 4 ++++ core/vm/vm.go | 48 ++++++++++++++++++++++++++++++------------------ 2 files changed, 34 insertions(+), 18 deletions(-) diff --git a/core/vm/environment.go b/core/vm/environment.go index 25bd2515e..e61676409 100644 --- a/core/vm/environment.go +++ b/core/vm/environment.go @@ -8,6 +8,8 @@ import ( "github.com/ethereum/go-ethereum/core/state" ) +// Environment is is required by the virtual machine to get information from +// it's own isolated environment. For an example see `core.VMEnv` type Environment interface { State() *state.StateDB @@ -33,6 +35,8 @@ type Environment interface { Create(me ContextRef, data []byte, gas, price, value *big.Int) ([]byte, error, ContextRef) } +// StructLog is emited to the Environment each cycle and lists information about the curent internal state +// prior to the execution of the statement. type StructLog struct { Pc uint64 Op OpCode diff --git a/core/vm/vm.go b/core/vm/vm.go index e4f6e9268..bf8bbcdc2 100644 --- a/core/vm/vm.go +++ b/core/vm/vm.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum/go-ethereum/params" ) +// Vm implements VirtualMachine type Vm struct { env Environment @@ -27,11 +28,13 @@ type Vm struct { After func(*Context, error) } +// New returns a new Virtual Machine func New(env Environment) *Vm { return &Vm{env: env, debug: Debug, Recoverable: true} } -func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { +// Run loops and evaluates the contract's code with the given input data +func (self *Vm) Run(context *Context, input []byte) (ret []byte, err error) { self.env.SetDepth(self.env.Depth() + 1) defer self.env.SetDepth(self.env.Depth() - 1) @@ -59,7 +62,7 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { if context.CodeAddr != nil { if p := Precompiled[context.CodeAddr.Str()]; p != nil { - return self.RunPrecompiled(p, callData, context) + return self.RunPrecompiled(p, input, context) } } @@ -69,13 +72,15 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { } var ( - op OpCode - codehash = crypto.Sha3Hash(code) - mem = NewMemory() - stack = newStack() - pc = uint64(0) - statedb = self.env.State() - + op OpCode // current opcode + codehash = crypto.Sha3Hash(code) // codehash is used when doing jump dest caching + mem = NewMemory() // bound memory + stack = newStack() // local stack + pc = uint64(0) // program counter + statedb = self.env.State() // current state + + // jump evaluates and checks whether the given jump destination is a valid one + // if valid move the `pc` otherwise return an error. jump = func(from uint64, to *big.Int) error { if !context.jumpdests.has(codehash, code, to) { nop := context.GetOp(to.Uint64()) @@ -97,20 +102,22 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { self.log(pc, op, context.Gas, mem, stack, context) + // calculate the new memory size and gas price for the current executing opcode newMemSize, gas, err := self.calculateGasAndSize(context, caller, op, statedb, mem, stack) if err != nil { return nil, err } + // Use the calculated gas. When insufficient gas is present, use all gas and return an + // Out Of Gas error if !context.UseGas(gas) { - tmp := new(big.Int).Set(context.Gas) context.UseGas(context.Gas) return context.Return(nil), OOG(gas, tmp) } - + // Resize the memory calculated previously mem.Resize(newMemSize.Uint64()) switch op { @@ -364,11 +371,11 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { stack.push(value) case CALLDATALOAD: - data := getData(callData, stack.pop(), common.Big32) + data := getData(input, stack.pop(), common.Big32) stack.push(common.Bytes2Big(data)) case CALLDATASIZE: - l := int64(len(callData)) + l := int64(len(input)) stack.push(big.NewInt(l)) case CALLDATACOPY: @@ -377,7 +384,7 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { cOff = stack.pop() l = stack.pop() ) - data := getData(callData, cOff, l) + data := getData(input, cOff, l) mem.Set(mOff.Uint64(), l.Uint64(), data) @@ -623,6 +630,8 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { } } +// calculateGasAndSize calculates the required given the opcode and stack items calculates the new memorysize for +// the operation. This does not reduce gas or resizes the memory. func (self *Vm) calculateGasAndSize(context *Context, caller ContextRef, op OpCode, statedb *state.StateDB, mem *Memory, stack *Stack) (*big.Int, *big.Int, error) { var ( gas = new(big.Int) @@ -764,20 +773,22 @@ func (self *Vm) calculateGasAndSize(context *Context, caller ContextRef, op OpCo return newMemSize, gas, nil } -func (self *Vm) RunPrecompiled(p *PrecompiledAccount, callData []byte, context *Context) (ret []byte, err error) { - gas := p.Gas(len(callData)) +// RunPrecompile runs and evaluate the output of a precompiled contract defined in contracts.go +func (self *Vm) RunPrecompiled(p *PrecompiledAccount, input []byte, context *Context) (ret []byte, err error) { + gas := p.Gas(len(input)) if context.UseGas(gas) { - ret = p.Call(callData) + ret = p.Call(input) return context.Return(ret), nil } else { - tmp := new(big.Int).Set(context.Gas) return nil, OOG(gas, tmp) } } +// log emits a log event to the environment for each opcode encountered. This is not to be confused with the +// LOG* opcode. func (self *Vm) log(pc uint64, op OpCode, gas *big.Int, memory *Memory, stack *Stack, context *Context) { if Debug { mem := make([]byte, len(memory.Data())) @@ -795,6 +806,7 @@ func (self *Vm) log(pc uint64, op OpCode, gas *big.Int, memory *Memory, stack *S } } +// Environment returns the current workable state of the VM func (self *Vm) Env() Environment { return self.env } -- cgit v1.2.3 From fc2a061d510fbe09534ee1ade167d66c40ba7bf1 Mon Sep 17 00:00:00 2001 From: obscuren Date: Wed, 10 Jun 2015 17:45:21 +0200 Subject: core/vm: unexported stack again. No longer required --- core/vm/gas.go | 2 +- core/vm/stack.go | 24 ++++++++++++------------ core/vm/vm.go | 6 +++--- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/core/vm/gas.go b/core/vm/gas.go index 1c29ccb65..32f5fec04 100644 --- a/core/vm/gas.go +++ b/core/vm/gas.go @@ -21,7 +21,7 @@ var ( GasContractByte = big.NewInt(200) ) -func baseCheck(op OpCode, stack *Stack, gas *big.Int) error { +func baseCheck(op OpCode, stack *stack, gas *big.Int) error { // PUSH and DUP are a bit special. They all cost the same but we do want to have checking on stack push limit // PUSH is also allowed to calculate the same price for all PUSHes // DUP requirements are handled elsewhere (except for the stack limit check) diff --git a/core/vm/stack.go b/core/vm/stack.go index 1d0a018c6..b551de272 100644 --- a/core/vm/stack.go +++ b/core/vm/stack.go @@ -5,20 +5,20 @@ import ( "math/big" ) -func newStack() *Stack { - return &Stack{} +func newstack() *stack { + return &stack{} } -type Stack struct { +type stack struct { data []*big.Int ptr int } -func (st *Stack) Data() []*big.Int { +func (st *stack) Data() []*big.Int { return st.data } -func (st *Stack) push(d *big.Int) { +func (st *stack) push(d *big.Int) { // NOTE push limit (1024) is checked in baseCheck stackItem := new(big.Int).Set(d) if len(st.data) > st.ptr { @@ -29,36 +29,36 @@ func (st *Stack) push(d *big.Int) { st.ptr++ } -func (st *Stack) pop() (ret *big.Int) { +func (st *stack) pop() (ret *big.Int) { st.ptr-- ret = st.data[st.ptr] return } -func (st *Stack) len() int { +func (st *stack) len() int { return st.ptr } -func (st *Stack) swap(n int) { +func (st *stack) swap(n int) { st.data[st.len()-n], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-n] } -func (st *Stack) dup(n int) { +func (st *stack) dup(n int) { st.push(st.data[st.len()-n]) } -func (st *Stack) peek() *big.Int { +func (st *stack) peek() *big.Int { return st.data[st.len()-1] } -func (st *Stack) require(n int) error { +func (st *stack) require(n int) error { if st.len() < n { return fmt.Errorf("stack underflow (%d <=> %d)", len(st.data), n) } return nil } -func (st *Stack) Print() { +func (st *stack) Print() { fmt.Println("### stack ###") if len(st.data) > 0 { for i, val := range st.data { diff --git a/core/vm/vm.go b/core/vm/vm.go index bf8bbcdc2..fe380d79d 100644 --- a/core/vm/vm.go +++ b/core/vm/vm.go @@ -75,7 +75,7 @@ func (self *Vm) Run(context *Context, input []byte) (ret []byte, err error) { op OpCode // current opcode codehash = crypto.Sha3Hash(code) // codehash is used when doing jump dest caching mem = NewMemory() // bound memory - stack = newStack() // local stack + stack = newstack() // local stack pc = uint64(0) // program counter statedb = self.env.State() // current state @@ -632,7 +632,7 @@ func (self *Vm) Run(context *Context, input []byte) (ret []byte, err error) { // calculateGasAndSize calculates the required given the opcode and stack items calculates the new memorysize for // the operation. This does not reduce gas or resizes the memory. -func (self *Vm) calculateGasAndSize(context *Context, caller ContextRef, op OpCode, statedb *state.StateDB, mem *Memory, stack *Stack) (*big.Int, *big.Int, error) { +func (self *Vm) calculateGasAndSize(context *Context, caller ContextRef, op OpCode, statedb *state.StateDB, mem *Memory, stack *stack) (*big.Int, *big.Int, error) { var ( gas = new(big.Int) newMemSize *big.Int = new(big.Int) @@ -789,7 +789,7 @@ func (self *Vm) RunPrecompiled(p *PrecompiledAccount, input []byte, context *Con // log emits a log event to the environment for each opcode encountered. This is not to be confused with the // LOG* opcode. -func (self *Vm) log(pc uint64, op OpCode, gas *big.Int, memory *Memory, stack *Stack, context *Context) { +func (self *Vm) log(pc uint64, op OpCode, gas *big.Int, memory *Memory, stack *stack, context *Context) { if Debug { mem := make([]byte, len(memory.Data())) copy(mem, memory.Data()) -- cgit v1.2.3 From 2e0694b606ba6cb2fc78dbda641deb1e8d156024 Mon Sep 17 00:00:00 2001 From: Taylor Gerring Date: Tue, 9 Jun 2015 09:47:21 -0400 Subject: Updated test --- rpc/args_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/rpc/args_test.go b/rpc/args_test.go index fc10d68cf..81a2972cd 100644 --- a/rpc/args_test.go +++ b/rpc/args_test.go @@ -2519,6 +2519,14 @@ func TestSigArgs(t *testing.T) { if err := json.Unmarshal([]byte(input), &args); err != nil { t.Error(err) } + + if expected.From != args.From { + t.Errorf("From should be %v but is %v", expected.From, args.From) + } + + if expected.Data != args.Data { + t.Errorf("Data should be %v but is %v", expected.Data, args.Data) + } } func TestSigArgsEmptyData(t *testing.T) { -- cgit v1.2.3 From b9affbf9fe1ca8e76aef5efdc456a43dff38dc8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 10 Jun 2015 12:17:06 +0300 Subject: eth: discard fetched blocks that don't fit (no goroutine) --- eth/sync.go | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/eth/sync.go b/eth/sync.go index a25d4d4fd..7a3b5fcd4 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -200,23 +200,23 @@ func (pm *ProtocolManager) fetcher() { case <-pm.quitSync: return } - // If any explicit fetches were replied to, import them - if count := len(explicit); count > 0 { - glog.V(logger.Debug).Infof("Importing %d explicitly fetched blocks", count) - - // Create a closure with the retrieved blocks and origin peers - peers := make([]*peer, 0, count) - blocks := make([]*types.Block, 0, count) - for _, block := range explicit { - hash := block.Hash() - if announce := pending[hash]; announce != nil { + // Create a closure with the retrieved blocks and origin peers + peers := make([]*peer, 0, len(explicit)) + blocks = make([]*types.Block, 0, len(explicit)) + for _, block := range explicit { + hash := block.Hash() + if announce := pending[hash]; announce != nil { + // Filter out blocks too new to import anyway + if !pm.chainman.HasBlock(hash) && pm.chainman.HasBlock(block.ParentHash()) { peers = append(peers, announce.peer) blocks = append(blocks, block) - - delete(pending, hash) } + delete(pending, hash) } - // Run the importer on a new thread + } + // If any explicit fetches were replied to, import them + if count := len(blocks); count > 0 { + glog.V(logger.Debug).Infof("Importing %d explicitly fetched blocks", len(blocks)) go func() { for i := 0; i < len(blocks); i++ { if err := pm.importBlock(peers[i], blocks[i], nil); err != nil { -- cgit v1.2.3 From 355b1e3bb1c749d8ecb19e967db988a34aa36788 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 10 Jun 2015 12:30:35 +0300 Subject: eth: randomly fetch announced block (don't hammer origin) --- eth/sync.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/eth/sync.go b/eth/sync.go index 7a3b5fcd4..7817266f8 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -127,7 +127,7 @@ func (pm *ProtocolManager) txsyncLoop() { // fetcher is responsible for collecting hash notifications, and periodically // checking all unknown ones and individually fetching them. func (pm *ProtocolManager) fetcher() { - announces := make(map[common.Hash]*blockAnnounce) + announces := make(map[common.Hash][]*blockAnnounce) request := make(map[*peer][]common.Hash) pending := make(map[common.Hash]*blockAnnounce) cycle := time.Tick(notifyCheckCycle) @@ -139,7 +139,7 @@ func (pm *ProtocolManager) fetcher() { // A batch of hashes the notified, schedule them for retrieval glog.V(logger.Debug).Infof("Scheduling %d hash announcements from %s", len(notifications), notifications[0].peer.id) for _, announce := range notifications { - announces[announce.hash] = announce + announces[announce.hash] = append(announces[announce.hash], announce) } case <-cycle: @@ -150,8 +150,9 @@ func (pm *ProtocolManager) fetcher() { } } // Check if any notified blocks failed to arrive - for hash, announce := range announces { - if time.Since(announce.time) > notifyArriveTimeout { + for hash, all := range announces { + if time.Since(all[0].time) > notifyArriveTimeout { + announce := all[rand.Intn(len(all))] if !pm.chainman.HasBlock(hash) { request[announce.peer] = append(request[announce.peer], hash) pending[hash] = announce -- cgit v1.2.3 From e61db7145a07fedc16fadc5c438462ad42a7461c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 10 Jun 2015 13:08:00 +0300 Subject: eth: dedup fetches to ensure no blocks are pulled twice --- eth/sync.go | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/eth/sync.go b/eth/sync.go index 7817266f8..8fee21d7b 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -131,6 +131,7 @@ func (pm *ProtocolManager) fetcher() { request := make(map[*peer][]common.Hash) pending := make(map[common.Hash]*blockAnnounce) cycle := time.Tick(notifyCheckCycle) + done := make(chan common.Hash) // Iterate the block fetching until a quit is requested for { @@ -139,9 +140,18 @@ func (pm *ProtocolManager) fetcher() { // A batch of hashes the notified, schedule them for retrieval glog.V(logger.Debug).Infof("Scheduling %d hash announcements from %s", len(notifications), notifications[0].peer.id) for _, announce := range notifications { + // Skip if it's already pending fetch + if _, ok := pending[announce.hash]; ok { + continue + } + // Otherwise queue up the peer as a potential source announces[announce.hash] = append(announces[announce.hash], announce) } + case hash := <-done: + // A pending import finished, remove all traces + delete(pending, hash) + case <-cycle: // Clean up any expired block fetches for hash, announce := range pending { @@ -207,18 +217,26 @@ func (pm *ProtocolManager) fetcher() { for _, block := range explicit { hash := block.Hash() if announce := pending[hash]; announce != nil { - // Filter out blocks too new to import anyway - if !pm.chainman.HasBlock(hash) && pm.chainman.HasBlock(block.ParentHash()) { - peers = append(peers, announce.peer) - blocks = append(blocks, block) + // Drop the block if it surely cannot fit + if pm.chainman.HasBlock(hash) || !pm.chainman.HasBlock(block.ParentHash()) { + delete(pending, hash) + continue } - delete(pending, hash) + // Otherwise accumulate for import + peers = append(peers, announce.peer) + blocks = append(blocks, block) } } // If any explicit fetches were replied to, import them if count := len(blocks); count > 0 { glog.V(logger.Debug).Infof("Importing %d explicitly fetched blocks", len(blocks)) go func() { + // Make sure all hashes are cleaned up + for _, block := range blocks { + hash := block.Hash() + defer func() { done <- hash }() + } + // Try and actually import the blocks for i := 0; i < len(blocks); i++ { if err := pm.importBlock(peers[i], blocks[i], nil); err != nil { glog.V(logger.Detail).Infof("Failed to import explicitly fetched block: %v", err) -- cgit v1.2.3 From 3c1cccc801ec7c546a0d840fe3a08dbf9a302d2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 10 Jun 2015 20:12:22 +0300 Subject: eth/downloader: fetch the block hashes on the fly, when needed --- eth/downloader/downloader.go | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index c3234ecb1..f0a515d12 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -79,9 +79,9 @@ type Downloader struct { banned *set.Set // Set of hashes we've received and banned // Statistics - importStart time.Time // Instance when the last blocks were taken from the cache - importQueue []common.Hash // Hashes of the previously taken blocks to check import progress - importDone int // Number of taken blocks already imported from the last batch + importStart time.Time // Instance when the last blocks were taken from the cache + importQueue []*Block // Previously taken blocks to check import progress + importDone int // Number of taken blocks already imported from the last batch importLock sync.Mutex // Callbacks @@ -136,7 +136,7 @@ func (d *Downloader) Stats() (pending int, cached int, importing int, estimate t d.importLock.Lock() defer d.importLock.Unlock() - for len(d.importQueue) > 0 && d.hasBlock(d.importQueue[0]) { + for len(d.importQueue) > 0 && d.hasBlock(d.importQueue[0].RawBlock.Hash()) { d.importQueue = d.importQueue[1:] d.importDone++ } @@ -229,13 +229,9 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error { func (d *Downloader) TakeBlocks() []*Block { blocks := d.queue.TakeBlocks() if len(blocks) > 0 { - hashes := make([]common.Hash, len(blocks)) - for i, block := range blocks { - hashes[i] = block.RawBlock.Hash() - } d.importLock.Lock() d.importStart = time.Now() - d.importQueue = hashes + d.importQueue = blocks d.importDone = 0 d.importLock.Unlock() } -- cgit v1.2.3 From 10af69b57c8022bb400e1f00bb3c6413e640a7e1 Mon Sep 17 00:00:00 2001 From: obscuren Date: Wed, 10 Jun 2015 19:56:40 +0200 Subject: core, core/vm: moved logger and added gas cost to struct logging --- core/state_transition.go | 2 +- core/vm/environment.go | 1 + core/vm/logger.go | 45 +++++++++++++++++++++++++++++++++++++++++++++ core/vm/vm.go | 8 ++++---- core/vm_logger.go | 46 ---------------------------------------------- 5 files changed, 51 insertions(+), 51 deletions(-) create mode 100644 core/vm/logger.go delete mode 100644 core/vm_logger.go diff --git a/core/state_transition.go b/core/state_transition.go index 3dbc789f8..fedea8021 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -224,7 +224,7 @@ func (self *StateTransition) transitionState() (ret []byte, usedGas *big.Int, er } if vm.Debug { - VmStdErrFormat(vmenv.StructLogs()) + vm.StdErrFormat(vmenv.StructLogs()) } self.refundGas() diff --git a/core/vm/environment.go b/core/vm/environment.go index e61676409..5c04e7022 100644 --- a/core/vm/environment.go +++ b/core/vm/environment.go @@ -41,6 +41,7 @@ type StructLog struct { Pc uint64 Op OpCode Gas *big.Int + GasCost *big.Int Memory []byte Stack []*big.Int Storage map[common.Hash][]byte diff --git a/core/vm/logger.go b/core/vm/logger.go new file mode 100644 index 000000000..6d08cbebe --- /dev/null +++ b/core/vm/logger.go @@ -0,0 +1,45 @@ +package vm + +import ( + "fmt" + "os" + "unicode/utf8" + + "github.com/ethereum/go-ethereum/common" +) + +func StdErrFormat(logs []StructLog) { + fmt.Fprintf(os.Stderr, "VM Stats %d ops\n", len(logs)) + for _, log := range logs { + fmt.Fprintf(os.Stderr, "PC %08d: %s GAS: %v COST: %v\n", log.Pc, log.Op, log.Gas, log.GasCost) + fmt.Fprintln(os.Stderr, "STACK =", len(log.Stack)) + for i, item := range log.Stack { + fmt.Fprintf(os.Stderr, "%04d: %x\n", i, common.LeftPadBytes(item.Bytes(), 32)) + } + + const maxMem = 10 + addr := 0 + fmt.Fprintln(os.Stderr, "MEM =", len(log.Memory)) + for i := 0; i+16 <= len(log.Memory) && addr < maxMem; i += 16 { + data := log.Memory[i : i+16] + str := fmt.Sprintf("%04d: % x ", addr*16, data) + for _, r := range data { + if r == 0 { + str += "." + } else if utf8.ValidRune(rune(r)) { + str += fmt.Sprintf("%s", string(r)) + } else { + str += "?" + } + } + addr++ + fmt.Fprintln(os.Stderr, str) + } + + fmt.Fprintln(os.Stderr, "STORAGE =", len(log.Storage)) + for h, item := range log.Storage { + fmt.Fprintf(os.Stderr, "%x: %x\n", h, common.LeftPadBytes(item, 32)) + } + fmt.Fprintln(os.Stderr) + } +} diff --git a/core/vm/vm.go b/core/vm/vm.go index fe380d79d..117331389 100644 --- a/core/vm/vm.go +++ b/core/vm/vm.go @@ -100,14 +100,14 @@ func (self *Vm) Run(context *Context, input []byte) (ret []byte, err error) { // Get the memory location of pc op = context.GetOp(pc) - self.log(pc, op, context.Gas, mem, stack, context) - // calculate the new memory size and gas price for the current executing opcode newMemSize, gas, err := self.calculateGasAndSize(context, caller, op, statedb, mem, stack) if err != nil { return nil, err } + self.log(pc, op, context.Gas, gas, mem, stack, context) + // Use the calculated gas. When insufficient gas is present, use all gas and return an // Out Of Gas error if !context.UseGas(gas) { @@ -789,7 +789,7 @@ func (self *Vm) RunPrecompiled(p *PrecompiledAccount, input []byte, context *Con // log emits a log event to the environment for each opcode encountered. This is not to be confused with the // LOG* opcode. -func (self *Vm) log(pc uint64, op OpCode, gas *big.Int, memory *Memory, stack *stack, context *Context) { +func (self *Vm) log(pc uint64, op OpCode, gas, cost *big.Int, memory *Memory, stack *stack, context *Context) { if Debug { mem := make([]byte, len(memory.Data())) copy(mem, memory.Data()) @@ -802,7 +802,7 @@ func (self *Vm) log(pc uint64, op OpCode, gas *big.Int, memory *Memory, stack *s storage[common.BytesToHash(k)] = v }) - self.env.AddStructLog(StructLog{pc, op, new(big.Int).Set(gas), mem, stck, storage}) + self.env.AddStructLog(StructLog{pc, op, new(big.Int).Set(gas), cost, mem, stck, storage}) } } diff --git a/core/vm_logger.go b/core/vm_logger.go deleted file mode 100644 index d0742380e..000000000 --- a/core/vm_logger.go +++ /dev/null @@ -1,46 +0,0 @@ -package core - -import ( - "fmt" - "os" - "unicode/utf8" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/vm" -) - -func VmStdErrFormat(logs []vm.StructLog) { - fmt.Fprintf(os.Stderr, "VM Stats %d ops\n", len(logs)) - for _, log := range logs { - fmt.Fprintf(os.Stderr, "PC %08d: %s\n", log.Pc, log.Op) - fmt.Fprintln(os.Stderr, "STACK =", len(log.Stack)) - for i, item := range log.Stack { - fmt.Fprintf(os.Stderr, "%04d: %x\n", i, common.LeftPadBytes(item.Bytes(), 32)) - } - - const maxMem = 10 - addr := 0 - fmt.Fprintln(os.Stderr, "MEM =", len(log.Memory)) - for i := 0; i+16 <= len(log.Memory) && addr < maxMem; i += 16 { - data := log.Memory[i : i+16] - str := fmt.Sprintf("%04d: % x ", addr*16, data) - for _, r := range data { - if r == 0 { - str += "." - } else if utf8.ValidRune(rune(r)) { - str += fmt.Sprintf("%s", string(r)) - } else { - str += "?" - } - } - addr++ - fmt.Fprintln(os.Stderr, str) - } - - fmt.Fprintln(os.Stderr, "STORAGE =", len(log.Storage)) - for h, item := range log.Storage { - fmt.Fprintf(os.Stderr, "%x: %x\n", h, common.LeftPadBytes(item, 32)) - } - - } -} -- cgit v1.2.3 From f94c5473ad7139e42e22db8e099792638b73de77 Mon Sep 17 00:00:00 2001 From: obscuren Date: Wed, 10 Jun 2015 21:08:04 +0200 Subject: core/vm: fixed a bug where `Data` ignored the stack ptr --- core/vm/stack.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/vm/stack.go b/core/vm/stack.go index b551de272..2be5c3dbe 100644 --- a/core/vm/stack.go +++ b/core/vm/stack.go @@ -15,7 +15,7 @@ type stack struct { } func (st *stack) Data() []*big.Int { - return st.data + return st.data[:st.ptr] } func (st *stack) push(d *big.Int) { -- cgit v1.2.3 From e7627623b96d06f4963ae424d2cb41cf9ba86e72 Mon Sep 17 00:00:00 2001 From: obscuren Date: Wed, 10 Jun 2015 21:08:54 +0200 Subject: core/vm: reverse loop stack --- core/vm/logger.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/core/vm/logger.go b/core/vm/logger.go index 6d08cbebe..96d07dab5 100644 --- a/core/vm/logger.go +++ b/core/vm/logger.go @@ -13,8 +13,9 @@ func StdErrFormat(logs []StructLog) { for _, log := range logs { fmt.Fprintf(os.Stderr, "PC %08d: %s GAS: %v COST: %v\n", log.Pc, log.Op, log.Gas, log.GasCost) fmt.Fprintln(os.Stderr, "STACK =", len(log.Stack)) - for i, item := range log.Stack { - fmt.Fprintf(os.Stderr, "%04d: %x\n", i, common.LeftPadBytes(item.Bytes(), 32)) + + for i := len(log.Stack) - 1; i >= 0; i-- { + fmt.Fprintf(os.Stderr, "%04d: %x\n", len(log.Stack)-i-1, common.LeftPadBytes(log.Stack[i].Bytes(), 32)) } const maxMem = 10 -- cgit v1.2.3 From 5cfae0536f8499634c2fa2eba9a71fec1c0d417b Mon Sep 17 00:00:00 2001 From: obscuren Date: Wed, 10 Jun 2015 21:09:12 +0200 Subject: cmd/evm: print trace when running programs --- cmd/evm/main.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/evm/main.go b/cmd/evm/main.go index 599721c89..7c9d27fac 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -59,6 +59,7 @@ func main() { logger.AddLogSystem(logger.NewStdLogSystem(os.Stdout, log.LstdFlags, logger.LogLevel(*loglevel))) + vm.Debug = true db, _ := ethdb.NewMemDatabase() statedb := state.New(common.Hash{}, db) sender := statedb.CreateAccount(common.StringToAddress("sender")) @@ -80,6 +81,8 @@ func main() { fmt.Println(string(statedb.Dump())) } + vm.StdErrFormat(vmenv.StructLogs()) + var mem runtime.MemStats runtime.ReadMemStats(&mem) fmt.Printf("vm took %v\n", time.Since(tstart)) -- cgit v1.2.3 From 9e9bd3555789f069b6cb1950fe329d307f951eab Mon Sep 17 00:00:00 2001 From: obscuren Date: Thu, 11 Jun 2015 11:44:39 +0200 Subject: cmd/geth: Added optional debug flag for reprocess block --- cmd/geth/admin.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmd/geth/admin.go b/cmd/geth/admin.go index 13d10de32..b0b4e0954 100644 --- a/cmd/geth/admin.go +++ b/cmd/geth/admin.go @@ -271,9 +271,12 @@ func (js *jsre) debugBlock(call otto.FunctionCall) otto.Value { } tstart := time.Now() - old := vm.Debug - vm.Debug = true + + if len(call.ArgumentList) > 1 { + vm.Debug, _ = call.Argument(1).ToBoolean() + } + _, err = js.ethereum.BlockProcessor().RetryProcess(block) if err != nil { fmt.Println(err) -- cgit v1.2.3 From f599a1b5f143503817c9fa411854b9a8dac6ba72 Mon Sep 17 00:00:00 2001 From: obscuren Date: Thu, 11 Jun 2015 11:59:30 +0200 Subject: core/vm: added a comment regarding the uint64 vs *big.Int --- core/vm/vm.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/vm/vm.go b/core/vm/vm.go index 117331389..4c0ab0f47 100644 --- a/core/vm/vm.go +++ b/core/vm/vm.go @@ -76,8 +76,10 @@ func (self *Vm) Run(context *Context, input []byte) (ret []byte, err error) { codehash = crypto.Sha3Hash(code) // codehash is used when doing jump dest caching mem = NewMemory() // bound memory stack = newstack() // local stack - pc = uint64(0) // program counter statedb = self.env.State() // current state + // For optimisation reason we're using uint64 as the program counter. + // It's theoretically possible to go above 2^64. The YP defines the PC to be uint256. Pratically much less so feasible. + pc = uint64(0) // program counter // jump evaluates and checks whether the given jump destination is a valid one // if valid move the `pc` otherwise return an error. -- cgit v1.2.3 From 37111aa4bd215cfc8bcfb97cdc7e223649306196 Mon Sep 17 00:00:00 2001 From: obscuren Date: Thu, 11 Jun 2015 12:06:05 +0200 Subject: core: retry block now also parellise nonce checks --- core/block_processor.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/core/block_processor.go b/core/block_processor.go index 190e72694..3ec3c585f 100644 --- a/core/block_processor.go +++ b/core/block_processor.go @@ -151,11 +151,17 @@ func (sm *BlockProcessor) RetryProcess(block *types.Block) (logs state.Logs, err return nil, ParentError(header.ParentHash) } parent := sm.bc.GetBlock(header.ParentHash) - if !sm.Pow.Verify(block) { + + // FIXME Change to full header validation. See #1225 + errch := make(chan bool) + go func() { errch <- sm.Pow.Verify(block) }() + + logs, err = sm.processWithParent(block, parent) + if !<-errch { return nil, ValidationError("Block's nonce is invalid (= %x)", block.Nonce) } - return sm.processWithParent(block, parent) + return logs, err } // Process block will attempt to process the given block's transactions and applies them -- cgit v1.2.3 From 2f55a1d79853c1348fb1a4332fff98110167da80 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 10:23:54 +0200 Subject: restructured eth rpc API --- rpc/api/api.go | 12 + rpc/api/api_test.go | 42 +++ rpc/api/eth.go | 523 ++++++++++++++++++++++++++++++++ rpc/api/eth_args.go | 835 +++++++++++++++++++++++++++++++++++++++++++++++++++ rpc/api/eth_js.go | 3 + rpc/api/parsing.go | 460 ++++++++++++++++++++++++++++ rpc/api/utils.go | 36 +++ rpc/codec/codec.go | 47 +++ rpc/codec/json.go | 75 +++++ rpc/shared/errors.go | 96 ++++++ rpc/shared/types.go | 38 +++ 11 files changed, 2167 insertions(+) create mode 100644 rpc/api/api.go create mode 100644 rpc/api/api_test.go create mode 100644 rpc/api/eth.go create mode 100644 rpc/api/eth_args.go create mode 100644 rpc/api/eth_js.go create mode 100644 rpc/api/parsing.go create mode 100644 rpc/api/utils.go create mode 100644 rpc/codec/codec.go create mode 100644 rpc/codec/json.go create mode 100644 rpc/shared/errors.go create mode 100644 rpc/shared/types.go diff --git a/rpc/api/api.go b/rpc/api/api.go new file mode 100644 index 000000000..758e056ed --- /dev/null +++ b/rpc/api/api.go @@ -0,0 +1,12 @@ +package api + +import "github.com/ethereum/go-ethereum/rpc/shared" + +// Ethereum RPC API interface +type EthereumApi interface { + // Execute the given request and returns the response or an error + Execute(*shared.Request) (interface{}, error) + + // List of supported RCP methods this API provides + Methods() []string +} diff --git a/rpc/api/api_test.go b/rpc/api/api_test.go new file mode 100644 index 000000000..f1a47944d --- /dev/null +++ b/rpc/api/api_test.go @@ -0,0 +1,42 @@ +package api + +import ( + "testing" + + "github.com/ethereum/go-ethereum/rpc/codec" +) + +func TestParseApiString(t *testing.T) { + apis, err := ParseApiString("", codec.JSON, nil, nil) + if err == nil { + t.Errorf("Expected an err from parsing empty API string but got nil") + } + + if len(apis) != 0 { + t.Errorf("Expected 0 apis from empty API string") + } + + apis, err = ParseApiString("eth", codec.JSON, nil, nil) + if err != nil { + t.Errorf("Expected nil err from parsing empty API string but got %v", err) + } + + if len(apis) != 1 { + t.Errorf("Expected 1 apis but got %d - %v", apis, apis) + } + + apis, err = ParseApiString("eth,eth", codec.JSON, nil, nil) + if err != nil { + t.Errorf("Expected nil err from parsing empty API string but got \"%v\"", err) + } + + if len(apis) != 2 { + t.Errorf("Expected 2 apis but got %d - %v", apis, apis) + } + + apis, err = ParseApiString("eth,invalid", codec.JSON, nil, nil) + if err == nil { + t.Errorf("Expected an err but got no err") + } + +} diff --git a/rpc/api/eth.go b/rpc/api/eth.go new file mode 100644 index 000000000..fa14aa41e --- /dev/null +++ b/rpc/api/eth.go @@ -0,0 +1,523 @@ +package api + +import ( + "bytes" + "encoding/json" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/rpc/shared" + "github.com/ethereum/go-ethereum/xeth" +) + +// eth api provider +// See https://github.com/ethereum/wiki/wiki/JSON-RPC +type EthApi struct { + xeth *xeth.XEth + methods map[string]ethhandler + codec codec.ApiCoder +} + +// eth callback handler +type ethhandler func(*EthApi, *shared.Request) (interface{}, error) + +var ( + ethMapping = map[string]ethhandler{ + "eth_accounts": (*EthApi).Accounts, + "eth_blockNumber": (*EthApi).BlockNumber, + "eth_getBalance": (*EthApi).GetBalance, + "eth_protocolVersion": (*EthApi).ProtocolVersion, + "eth_coinbase": (*EthApi).Coinbase, + "eth_mining": (*EthApi).IsMining, + "eth_gasPrice": (*EthApi).GasPrice, + "eth_getStorage": (*EthApi).GetStorage, + "eth_storageAt": (*EthApi).GetStorage, + "eth_getStorageAt": (*EthApi).GetStorageAt, + "eth_getTransactionCount": (*EthApi).GetTransactionCount, + "eth_getBlockTransactionCountByHash": (*EthApi).GetBlockTransactionCountByHash, + "eth_getBlockTransactionCountByNumber": (*EthApi).GetBlockTransactionCountByNumber, + "eth_getUncleCountByBlockHash": (*EthApi).GetUncleCountByBlockHash, + "eth_getUncleCountByBlockNumber": (*EthApi).GetUncleCountByBlockNumber, + "eth_getData": (*EthApi).GetData, + "eth_getCode": (*EthApi).GetData, + "eth_sign": (*EthApi).Sign, + "eth_sendTransaction": (*EthApi).SendTransaction, + "eth_transact": (*EthApi).SendTransaction, + "eth_estimateGas": (*EthApi).EstimateGas, + "eth_call": (*EthApi).Call, + "eth_flush": (*EthApi).Flush, + "eth_getBlockByHash": (*EthApi).GetBlockByHash, + "eth_getBlockByNumber": (*EthApi).GetBlockByNumber, + "eth_getTransactionByHash": (*EthApi).GetTransactionByHash, + "eth_getTransactionByBlockHashAndIndex": (*EthApi).GetTransactionByBlockHashAndIndex, + "eth_getUncleByBlockHashAndIndex": (*EthApi).GetUncleByBlockHashAndIndex, + "eth_getUncleByBlockNumberAndIndex": (*EthApi).GetUncleByBlockNumberAndIndex, + "eth_getCompilers": (*EthApi).GetCompilers, + "eth_compileSolidity": (*EthApi).CompileSolidity, + "eth_newFilter": (*EthApi).NewFilter, + "eth_newBlockFilter": (*EthApi).NewBlockFilter, + "eth_newPendingTransactionFilter": (*EthApi).NewPendingTransactionFilter, + "eth_uninstallFilter": (*EthApi).UninstallFilter, + "eth_getFilterChanges": (*EthApi).GetFilterChanges, + "eth_getFilterLogs": (*EthApi).GetFilterLogs, + "eth_getLogs": (*EthApi).GetLogs, + "eth_hashrate": (*EthApi).Hashrate, + "eth_getWork": (*EthApi).GetWork, + "eth_submitWork": (*EthApi).SubmitWork, + } +) + +// create new EthApi instance +func NewEthApi(xeth *xeth.XEth, codec codec.Codec) *EthApi { + return &EthApi{xeth, ethMapping, codec.New(nil)} +} + +// collection with supported methods +func (self *EthApi) Methods() []string { + methods := make([]string, len(self.methods)) + i := 0 + for k := range self.methods { + methods[i] = k + i++ + } + return methods +} + +// Execute given request +func (self *EthApi) Execute(req *shared.Request) (interface{}, error) { + if callback, ok := self.methods[req.Method]; ok { + return callback(self, req) + } + + return nil, shared.NewNotImplementedError(req.Method) +} + +func (self *EthApi) Accounts(req *shared.Request) (interface{}, error) { + return self.xeth.Accounts(), nil +} + +func (self *EthApi) Hashrate(req *shared.Request) (interface{}, error) { + return newHexNum(self.xeth.HashRate()), nil +} + +func (self *EthApi) BlockNumber(req *shared.Request) (interface{}, error) { + return self.xeth.CurrentBlock().Number(), nil +} + +func (self *EthApi) GetBalance(req *shared.Request) (interface{}, error) { + args := new(GetBalanceArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + return self.xeth.AtStateNum(args.BlockNumber).BalanceAt(args.Address), nil +} + +func (self *EthApi) ProtocolVersion(req *shared.Request) (interface{}, error) { + return self.xeth.EthVersion(), nil +} + +func (self *EthApi) Coinbase(req *shared.Request) (interface{}, error) { + return newHexData(self.xeth.Coinbase()), nil +} + +func (self *EthApi) IsMining(req *shared.Request) (interface{}, error) { + return self.xeth.IsMining(), nil +} + +func (self *EthApi) GasPrice(req *shared.Request) (interface{}, error) { + return newHexNum(xeth.DefaultGasPrice().Bytes()), nil +} + +func (self *EthApi) GetStorage(req *shared.Request) (interface{}, error) { + args := new(GetStorageArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + return self.xeth.AtStateNum(args.BlockNumber).State().SafeGet(args.Address).Storage(), nil +} + +func (self *EthApi) GetStorageAt(req *shared.Request) (interface{}, error) { + args := new(GetStorageAtArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + return self.xeth.AtStateNum(args.BlockNumber).StorageAt(args.Address, args.Key), nil +} + +func (self *EthApi) GetTransactionCount(req *shared.Request) (interface{}, error) { + args := new(GetTxCountArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + count := self.xeth.AtStateNum(args.BlockNumber).TxCountAt(args.Address) + return newHexNum(big.NewInt(int64(count)).Bytes()), nil +} + +func (self *EthApi) GetBlockTransactionCountByHash(req *shared.Request) (interface{}, error) { + args := new(HashArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + block := NewBlockRes(self.xeth.EthBlockByHash(args.Hash), false) + if block == nil { + return nil, nil + } else { + return newHexNum(big.NewInt(int64(len(block.Transactions))).Bytes()), nil + } +} + +func (self *EthApi) GetBlockTransactionCountByNumber(req *shared.Request) (interface{}, error) { + args := new(BlockNumArg) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + block := NewBlockRes(self.xeth.EthBlockByNumber(args.BlockNumber), false) + if block == nil { + return nil, nil + } else { + return newHexNum(big.NewInt(int64(len(block.Transactions))).Bytes()), nil + } +} + +func (self *EthApi) GetUncleCountByBlockHash(req *shared.Request) (interface{}, error) { + args := new(HashArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + block := self.xeth.EthBlockByHash(args.Hash) + br := NewBlockRes(block, false) + if br == nil { + return nil, nil + } + return newHexNum(big.NewInt(int64(len(br.Uncles))).Bytes()), nil +} + +func (self *EthApi) GetUncleCountByBlockNumber(req *shared.Request) (interface{}, error) { + args := new(BlockNumArg) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + block := self.xeth.EthBlockByNumber(args.BlockNumber) + br := NewBlockRes(block, false) + if br == nil { + return nil, nil + } + return newHexNum(big.NewInt(int64(len(br.Uncles))).Bytes()), nil +} + +func (self *EthApi) GetData(req *shared.Request) (interface{}, error) { + args := new(GetDataArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + v := self.xeth.AtStateNum(args.BlockNumber).CodeAtBytes(args.Address) + return newHexData(v), nil +} + +func (self *EthApi) Sign(req *shared.Request) (interface{}, error) { + args := new(NewSignArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + v, err := self.xeth.Sign(args.From, args.Data, false) + if err != nil { + return nil, err + } + return v, nil +} + +func (self *EthApi) SendTransaction(req *shared.Request) (interface{}, error) { + args := new(NewTxArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + // nonce may be nil ("guess" mode) + var nonce string + if args.Nonce != nil { + nonce = args.Nonce.String() + } + + v, err := self.xeth.Transact(args.From, args.To, nonce, args.Value.String(), args.Gas.String(), args.GasPrice.String(), args.Data) + if err != nil { + return nil, err + } + return v, nil +} + +func (self *EthApi) EstimateGas(req *shared.Request) (interface{}, error) { + _, gas, err := self.doCall(req.Params) + if err != nil { + return nil, err + } + + // TODO unwrap the parent method's ToHex call + if len(gas) == 0 { + return newHexNum(0), nil + } else { + return newHexNum(gas), nil + } +} + +func (self *EthApi) Call(req *shared.Request) (interface{}, error) { + v, _, err := self.doCall(req.Params) + if err != nil { + return nil, err + } + + // TODO unwrap the parent method's ToHex call + if v == "0x0" { + return newHexData([]byte{}), nil + } else { + return newHexData(common.FromHex(v)), nil + } +} + +func (self *EthApi) Flush(req *shared.Request) (interface{}, error) { + return nil, shared.NewNotImplementedError(req.Method) +} + +func (self *EthApi) doCall(params json.RawMessage) (string, string, error) { + args := new(CallArgs) + if err := self.codec.Decode(params, &args); err != nil { + return "", "", err + } + + return self.xeth.AtStateNum(args.BlockNumber).Call(args.From, args.To, args.Value.String(), args.Gas.String(), args.GasPrice.String(), args.Data) +} + +func (self *EthApi) GetBlockByHash(req *shared.Request) (interface{}, error) { + args := new(GetBlockByHashArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + block := self.xeth.EthBlockByHash(args.BlockHash) + return NewBlockRes(block, args.IncludeTxs), nil +} + +func (self *EthApi) GetBlockByNumber(req *shared.Request) (interface{}, error) { + args := new(GetBlockByNumberArgs) + if err := json.Unmarshal(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + block := self.xeth.EthBlockByNumber(args.BlockNumber) + br := NewBlockRes(block, args.IncludeTxs) + // If request was for "pending", nil nonsensical fields + if args.BlockNumber == -2 { + br.BlockHash = nil + br.BlockNumber = nil + br.Miner = nil + br.Nonce = nil + br.LogsBloom = nil + } + return br, nil +} + +func (self *EthApi) GetTransactionByHash(req *shared.Request) (interface{}, error) { + args := new(HashArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + tx, bhash, bnum, txi := self.xeth.EthTransactionByHash(args.Hash) + if tx != nil { + v := NewTransactionRes(tx) + // if the blockhash is 0, assume this is a pending transaction + if bytes.Compare(bhash.Bytes(), bytes.Repeat([]byte{0}, 32)) != 0 { + v.BlockHash = newHexData(bhash) + v.BlockNumber = newHexNum(bnum) + v.TxIndex = newHexNum(txi) + } + return v, nil + } + return nil, nil +} + +func (self *EthApi) GetTransactionByBlockHashAndIndex(req *shared.Request) (interface{}, error) { + args := new(HashIndexArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + block := self.xeth.EthBlockByHash(args.Hash) + br := NewBlockRes(block, true) + if br == nil { + return nil, nil + } + + if args.Index >= int64(len(br.Transactions)) || args.Index < 0 { + return nil, nil + } else { + return br.Transactions[args.Index], nil + } +} + +func (self *EthApi) GetTransactionByBlockNumberAndIndex(req *shared.Request) (interface{}, error) { + args := new(BlockNumIndexArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + block := self.xeth.EthBlockByNumber(args.BlockNumber) + v := NewBlockRes(block, true) + if v == nil { + return nil, nil + } + + if args.Index >= int64(len(v.Transactions)) || args.Index < 0 { + // return NewValidationError("Index", "does not exist") + return nil, nil + } + return v.Transactions[args.Index], nil +} + +func (self *EthApi) GetUncleByBlockHashAndIndex(req *shared.Request) (interface{}, error) { + args := new(HashIndexArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + br := NewBlockRes(self.xeth.EthBlockByHash(args.Hash), false) + if br == nil { + return nil, nil + } + + if args.Index >= int64(len(br.Uncles)) || args.Index < 0 { + // return NewValidationError("Index", "does not exist") + return nil, nil + } + + return br.Uncles[args.Index], nil +} + +func (self *EthApi) GetUncleByBlockNumberAndIndex(req *shared.Request) (interface{}, error) { + args := new(BlockNumIndexArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + block := self.xeth.EthBlockByNumber(args.BlockNumber) + v := NewBlockRes(block, true) + + if v == nil { + return nil, nil + } + + if args.Index >= int64(len(v.Uncles)) || args.Index < 0 { + return nil, nil + } else { + return v.Uncles[args.Index], nil + } +} + +func (self *EthApi) GetCompilers(req *shared.Request) (interface{}, error) { + var lang string + if solc, _ := self.xeth.Solc(); solc != nil { + lang = "Solidity" + } + c := []string{lang} + return c, nil +} + +func (self *EthApi) CompileSolidity(req *shared.Request) (interface{}, error) { + solc, _ := self.xeth.Solc() + if solc == nil { + return nil, shared.NewNotAvailableError(req.Method, "solc (solidity compiler) not found") + } + + args := new(SourceArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + contracts, err := solc.Compile(args.Source) + if err != nil { + return nil, err + } + return contracts, nil +} + +func (self *EthApi) NewFilter(req *shared.Request) (interface{}, error) { + args := new(BlockFilterArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + id := self.xeth.NewLogFilter(args.Earliest, args.Latest, args.Skip, args.Max, args.Address, args.Topics) + return newHexNum(big.NewInt(int64(id)).Bytes()), nil +} + +func (self *EthApi) NewBlockFilter(req *shared.Request) (interface{}, error) { + return newHexNum(self.xeth.NewBlockFilter()), nil +} + +func (self *EthApi) NewPendingTransactionFilter(req *shared.Request) (interface{}, error) { + return newHexNum(self.xeth.NewTransactionFilter()), nil +} + +func (self *EthApi) UninstallFilter(req *shared.Request) (interface{}, error) { + args := new(FilterIdArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + return self.xeth.UninstallFilter(args.Id), nil +} + +func (self *EthApi) GetFilterChanges(req *shared.Request) (interface{}, error) { + args := new(FilterIdArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + switch self.xeth.GetFilterType(args.Id) { + case xeth.BlockFilterTy: + return NewHashesRes(self.xeth.BlockFilterChanged(args.Id)), nil + case xeth.TransactionFilterTy: + return NewHashesRes(self.xeth.TransactionFilterChanged(args.Id)), nil + case xeth.LogFilterTy: + return NewLogsRes(self.xeth.LogFilterChanged(args.Id)), nil + default: + return []string{}, nil // reply empty string slice + } +} + +func (self *EthApi) GetFilterLogs(req *shared.Request) (interface{}, error) { + args := new(FilterIdArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + return NewLogsRes(self.xeth.Logs(args.Id)), nil +} + +func (self *EthApi) GetLogs(req *shared.Request) (interface{}, error) { + args := new(BlockFilterArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + return NewLogsRes(self.xeth.AllLogs(args.Earliest, args.Latest, args.Skip, args.Max, args.Address, args.Topics)), nil +} + +func (self *EthApi) GetWork(req *shared.Request) (interface{}, error) { + self.xeth.SetMining(true, 0) + return self.xeth.RemoteMining().GetWork(), nil +} + +func (self *EthApi) SubmitWork(req *shared.Request) (interface{}, error) { + args := new(SubmitWorkArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + return self.xeth.RemoteMining().SubmitWork(args.Nonce, common.HexToHash(args.Digest), common.HexToHash(args.Header)), nil +} diff --git a/rpc/api/eth_args.go b/rpc/api/eth_args.go new file mode 100644 index 000000000..1ef6f9efb --- /dev/null +++ b/rpc/api/eth_args.go @@ -0,0 +1,835 @@ +package api + +import ( + "encoding/json" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/rpc/shared" +) + +const ( + defaultLogLimit = 100 + defaultLogOffset = 0 +) + +type GetBalanceArgs struct { + Address string + BlockNumber int64 +} + +func (args *GetBalanceArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) + } + + addstr, ok := obj[0].(string) + if !ok { + return shared.NewInvalidTypeError("address", "not a string") + } + args.Address = addstr + + if len(obj) > 1 { + if err := blockHeight(obj[1], &args.BlockNumber); err != nil { + return err + } + } else { + args.BlockNumber = -1 + } + + return nil +} + +type GetStorageArgs struct { + Address string + BlockNumber int64 +} + +func (args *GetStorageArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) + } + + addstr, ok := obj[0].(string) + if !ok { + return shared.NewInvalidTypeError("address", "not a string") + } + args.Address = addstr + + if len(obj) > 1 { + if err := blockHeight(obj[1], &args.BlockNumber); err != nil { + return err + } + } else { + args.BlockNumber = -1 + } + + return nil +} + +type GetStorageAtArgs struct { + Address string + BlockNumber int64 + Key string +} + +func (args *GetStorageAtArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) < 2 { + return shared.NewInsufficientParamsError(len(obj), 2) + } + + addstr, ok := obj[0].(string) + if !ok { + return shared.NewInvalidTypeError("address", "not a string") + } + args.Address = addstr + + keystr, ok := obj[1].(string) + if !ok { + return shared.NewInvalidTypeError("key", "not a string") + } + args.Key = keystr + + if len(obj) > 2 { + if err := blockHeight(obj[2], &args.BlockNumber); err != nil { + return err + } + } else { + args.BlockNumber = -1 + } + + return nil +} + +type GetTxCountArgs struct { + Address string + BlockNumber int64 +} + +func (args *GetTxCountArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) + } + + addstr, ok := obj[0].(string) + if !ok { + return shared.NewInvalidTypeError("address", "not a string") + } + args.Address = addstr + + if len(obj) > 1 { + if err := blockHeight(obj[1], &args.BlockNumber); err != nil { + return err + } + } else { + args.BlockNumber = -1 + } + + return nil +} + +type HashArgs struct { + Hash string +} + +func (args *HashArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) + } + + arg0, ok := obj[0].(string) + if !ok { + return shared.NewInvalidTypeError("hash", "not a string") + } + args.Hash = arg0 + + return nil +} + +type BlockNumArg struct { + BlockNumber int64 +} + +func (args *BlockNumArg) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) + } + + if err := blockHeight(obj[0], &args.BlockNumber); err != nil { + return err + } + + return nil +} + +type GetDataArgs struct { + Address string + BlockNumber int64 +} + +func (args *GetDataArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) + } + + addstr, ok := obj[0].(string) + if !ok { + return shared.NewInvalidTypeError("address", "not a string") + } + args.Address = addstr + + if len(obj) > 1 { + if err := blockHeight(obj[1], &args.BlockNumber); err != nil { + return err + } + } else { + args.BlockNumber = -1 + } + + return nil +} + +type NewSignArgs struct { + From string + Data string +} + +func (args *NewSignArgs) UnmarshalJSON(b []byte) (err error) { + var obj []json.RawMessage + var ext struct { + From string + Data string + } + + // Decode byte slice to array of RawMessages + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + // Check for sufficient params + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) + } + + // Decode 0th RawMessage to temporary struct + if err := json.Unmarshal(obj[0], &ext); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(ext.From) == 0 { + return shared.NewValidationError("from", "is required") + } + + if len(ext.Data) == 0 { + return shared.NewValidationError("data", "is required") + } + + args.From = ext.From + args.Data = ext.Data + return nil +} + +type NewTxArgs struct { + From string + To string + Nonce *big.Int + Value *big.Int + Gas *big.Int + GasPrice *big.Int + Data string + + BlockNumber int64 +} + +func (args *NewTxArgs) UnmarshalJSON(b []byte) (err error) { + var obj []json.RawMessage + var ext struct { + From string + To string + Nonce interface{} + Value interface{} + Gas interface{} + GasPrice interface{} + Data string + } + + // Decode byte slice to array of RawMessages + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + // Check for sufficient params + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) + } + + // Decode 0th RawMessage to temporary struct + if err := json.Unmarshal(obj[0], &ext); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(ext.From) == 0 { + return shared.NewValidationError("from", "is required") + } + + args.From = ext.From + args.To = ext.To + args.Data = ext.Data + + var num *big.Int + if ext.Nonce != nil { + num, err = numString(ext.Nonce) + if err != nil { + return err + } + } + args.Nonce = num + + if ext.Value == nil { + num = big.NewInt(0) + } else { + num, err = numString(ext.Value) + if err != nil { + return err + } + } + args.Value = num + + num = nil + if ext.Gas == nil { + num = big.NewInt(0) + } else { + if num, err = numString(ext.Gas); err != nil { + return err + } + } + args.Gas = num + + num = nil + if ext.GasPrice == nil { + num = big.NewInt(0) + } else { + if num, err = numString(ext.GasPrice); err != nil { + return err + } + } + args.GasPrice = num + + // Check for optional BlockNumber param + if len(obj) > 1 { + if err := blockHeightFromJson(obj[1], &args.BlockNumber); err != nil { + return err + } + } else { + args.BlockNumber = -1 + } + + return nil +} + +type SourceArgs struct { + Source string +} + +func (args *SourceArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) + } + + arg0, ok := obj[0].(string) + if !ok { + return shared.NewInvalidTypeError("source code", "not a string") + } + args.Source = arg0 + + return nil +} + +type CallArgs struct { + From string + To string + Value *big.Int + Gas *big.Int + GasPrice *big.Int + Data string + + BlockNumber int64 +} + +func (args *CallArgs) UnmarshalJSON(b []byte) (err error) { + var obj []json.RawMessage + var ext struct { + From string + To string + Value interface{} + Gas interface{} + GasPrice interface{} + Data string + } + + // Decode byte slice to array of RawMessages + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + // Check for sufficient params + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) + } + + // Decode 0th RawMessage to temporary struct + if err := json.Unmarshal(obj[0], &ext); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + args.From = ext.From + + if len(ext.To) == 0 { + return shared.NewValidationError("to", "is required") + } + args.To = ext.To + + var num *big.Int + if ext.Value == nil { + num = big.NewInt(0) + } else { + if num, err = numString(ext.Value); err != nil { + return err + } + } + args.Value = num + + if ext.Gas == nil { + num = big.NewInt(0) + } else { + if num, err = numString(ext.Gas); err != nil { + return err + } + } + args.Gas = num + + if ext.GasPrice == nil { + num = big.NewInt(0) + } else { + if num, err = numString(ext.GasPrice); err != nil { + return err + } + } + args.GasPrice = num + + args.Data = ext.Data + + // Check for optional BlockNumber param + if len(obj) > 1 { + if err := blockHeightFromJson(obj[1], &args.BlockNumber); err != nil { + return err + } + } else { + args.BlockNumber = -1 + } + + return nil +} + +type HashIndexArgs struct { + Hash string + Index int64 +} + +func (args *HashIndexArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) < 2 { + return shared.NewInsufficientParamsError(len(obj), 2) + } + + arg0, ok := obj[0].(string) + if !ok { + return shared.NewInvalidTypeError("hash", "not a string") + } + args.Hash = arg0 + + arg1, ok := obj[1].(string) + if !ok { + return shared.NewInvalidTypeError("index", "not a string") + } + args.Index = common.Big(arg1).Int64() + + return nil +} + +type BlockNumIndexArgs struct { + BlockNumber int64 + Index int64 +} + +func (args *BlockNumIndexArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) < 2 { + return shared.NewInsufficientParamsError(len(obj), 2) + } + + if err := blockHeight(obj[0], &args.BlockNumber); err != nil { + return err + } + + var arg1 *big.Int + if arg1, err = numString(obj[1]); err != nil { + return err + } + args.Index = arg1.Int64() + + return nil +} + +type GetBlockByHashArgs struct { + BlockHash string + IncludeTxs bool +} + +func (args *GetBlockByHashArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) < 2 { + return shared.NewInsufficientParamsError(len(obj), 2) + } + + argstr, ok := obj[0].(string) + if !ok { + return shared.NewInvalidTypeError("blockHash", "not a string") + } + args.BlockHash = argstr + + args.IncludeTxs = obj[1].(bool) + + return nil +} + +type GetBlockByNumberArgs struct { + BlockNumber int64 + IncludeTxs bool +} + +func (args *GetBlockByNumberArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) < 2 { + return shared.NewInsufficientParamsError(len(obj), 2) + } + + if err := blockHeight(obj[0], &args.BlockNumber); err != nil { + return err + } + + args.IncludeTxs = obj[1].(bool) + + return nil +} + +type BlockFilterArgs struct { + Earliest int64 + Latest int64 + Address []string + Topics [][]string + Skip int + Max int +} + +func (args *BlockFilterArgs) UnmarshalJSON(b []byte) (err error) { + var obj []struct { + FromBlock interface{} `json:"fromBlock"` + ToBlock interface{} `json:"toBlock"` + Limit interface{} `json:"limit"` + Offset interface{} `json:"offset"` + Address interface{} `json:"address"` + Topics interface{} `json:"topics"` + } + + if err = json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) + } + + // args.Earliest, err = toNumber(obj[0].ToBlock) + // if err != nil { + // return shared.NewDecodeParamError(fmt.Sprintf("FromBlock %v", err)) + // } + // args.Latest, err = toNumber(obj[0].FromBlock) + // if err != nil { + // return shared.NewDecodeParamError(fmt.Sprintf("ToBlock %v", err)) + + var num int64 + var numBig *big.Int + + // if blank then latest + if obj[0].FromBlock == nil { + num = -1 + } else { + if err := blockHeight(obj[0].FromBlock, &num); err != nil { + return err + } + } + // if -2 or other "silly" number, use latest + if num < 0 { + args.Earliest = -1 //latest block + } else { + args.Earliest = num + } + + // if blank than latest + if obj[0].ToBlock == nil { + num = -1 + } else { + if err := blockHeight(obj[0].ToBlock, &num); err != nil { + return err + } + } + args.Latest = num + + if obj[0].Limit == nil { + numBig = big.NewInt(defaultLogLimit) + } else { + if numBig, err = numString(obj[0].Limit); err != nil { + return err + } + } + args.Max = int(numBig.Int64()) + + if obj[0].Offset == nil { + numBig = big.NewInt(defaultLogOffset) + } else { + if numBig, err = numString(obj[0].Offset); err != nil { + return err + } + } + args.Skip = int(numBig.Int64()) + + if obj[0].Address != nil { + marg, ok := obj[0].Address.([]interface{}) + if ok { + v := make([]string, len(marg)) + for i, arg := range marg { + argstr, ok := arg.(string) + if !ok { + return shared.NewInvalidTypeError(fmt.Sprintf("address[%d]", i), "is not a string") + } + v[i] = argstr + } + args.Address = v + } else { + argstr, ok := obj[0].Address.(string) + if ok { + v := make([]string, 1) + v[0] = argstr + args.Address = v + } else { + return shared.NewInvalidTypeError("address", "is not a string or array") + } + } + } + + if obj[0].Topics != nil { + other, ok := obj[0].Topics.([]interface{}) + if ok { + topicdbl := make([][]string, len(other)) + for i, iv := range other { + if argstr, ok := iv.(string); ok { + // Found a string, push into first element of array + topicsgl := make([]string, 1) + topicsgl[0] = argstr + topicdbl[i] = topicsgl + } else if argarray, ok := iv.([]interface{}); ok { + // Found an array of other + topicdbl[i] = make([]string, len(argarray)) + for j, jv := range argarray { + if v, ok := jv.(string); ok { + topicdbl[i][j] = v + } else if jv == nil { + topicdbl[i][j] = "" + } else { + return shared.NewInvalidTypeError(fmt.Sprintf("topic[%d][%d]", i, j), "is not a string") + } + } + } else if iv == nil { + topicdbl[i] = []string{""} + } else { + return shared.NewInvalidTypeError(fmt.Sprintf("topic[%d]", i), "not a string or array") + } + } + args.Topics = topicdbl + return nil + } else { + return shared.NewInvalidTypeError("topic", "is not a string or array") + } + } + + return nil +} + +type FilterIdArgs struct { + Id int +} + +func (args *FilterIdArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) + } + + var num *big.Int + if num, err = numString(obj[0]); err != nil { + return err + } + args.Id = int(num.Int64()) + + return nil +} + +type LogRes struct { + Address *hexdata `json:"address"` + Topics []*hexdata `json:"topics"` + Data *hexdata `json:"data"` + BlockNumber *hexnum `json:"blockNumber"` + LogIndex *hexnum `json:"logIndex"` + BlockHash *hexdata `json:"blockHash"` + TransactionHash *hexdata `json:"transactionHash"` + TransactionIndex *hexnum `json:"transactionIndex"` +} + +func NewLogRes(log *state.Log) LogRes { + var l LogRes + l.Topics = make([]*hexdata, len(log.Topics)) + for j, topic := range log.Topics { + l.Topics[j] = newHexData(topic) + } + l.Address = newHexData(log.Address) + l.Data = newHexData(log.Data) + l.BlockNumber = newHexNum(log.Number) + l.LogIndex = newHexNum(log.Index) + l.TransactionHash = newHexData(log.TxHash) + l.TransactionIndex = newHexNum(log.TxIndex) + l.BlockHash = newHexData(log.BlockHash) + + return l +} + +func NewLogsRes(logs state.Logs) (ls []LogRes) { + ls = make([]LogRes, len(logs)) + + for i, log := range logs { + ls[i] = NewLogRes(log) + } + + return +} + +func NewHashesRes(hs []common.Hash) []string { + hashes := make([]string, len(hs)) + + for i, hash := range hs { + hashes[i] = hash.Hex() + } + + return hashes +} + +type SubmitWorkArgs struct { + Nonce uint64 + Header string + Digest string +} + +func (args *SubmitWorkArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err = json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) < 3 { + return shared.NewInsufficientParamsError(len(obj), 3) + } + + var objstr string + var ok bool + if objstr, ok = obj[0].(string); !ok { + return shared.NewInvalidTypeError("nonce", "not a string") + } + + args.Nonce = common.String2Big(objstr).Uint64() + if objstr, ok = obj[1].(string); !ok { + return shared.NewInvalidTypeError("header", "not a string") + } + + args.Header = objstr + + if objstr, ok = obj[2].(string); !ok { + return shared.NewInvalidTypeError("digest", "not a string") + } + + args.Digest = objstr + + return nil +} diff --git a/rpc/api/eth_js.go b/rpc/api/eth_js.go new file mode 100644 index 000000000..f7630bdd5 --- /dev/null +++ b/rpc/api/eth_js.go @@ -0,0 +1,3 @@ +package api + +// JS api provided by web3.js diff --git a/rpc/api/parsing.go b/rpc/api/parsing.go new file mode 100644 index 000000000..85a9165e5 --- /dev/null +++ b/rpc/api/parsing.go @@ -0,0 +1,460 @@ +package api + +import ( + "encoding/binary" + "encoding/hex" + "encoding/json" + "math/big" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc/shared" +) + +type hexdata struct { + data []byte + isNil bool +} + +func (d *hexdata) String() string { + return "0x" + common.Bytes2Hex(d.data) +} + +func (d *hexdata) MarshalJSON() ([]byte, error) { + if d.isNil { + return json.Marshal(nil) + } + return json.Marshal(d.String()) +} + +func newHexData(input interface{}) *hexdata { + d := new(hexdata) + + if input == nil { + d.isNil = true + return d + } + switch input := input.(type) { + case []byte: + d.data = input + case common.Hash: + d.data = input.Bytes() + case *common.Hash: + if input == nil { + d.isNil = true + } else { + d.data = input.Bytes() + } + case common.Address: + d.data = input.Bytes() + case *common.Address: + if input == nil { + d.isNil = true + } else { + d.data = input.Bytes() + } + case types.Bloom: + d.data = input.Bytes() + case *types.Bloom: + if input == nil { + d.isNil = true + } else { + d.data = input.Bytes() + } + case *big.Int: + if input == nil { + d.isNil = true + } else { + d.data = input.Bytes() + } + case int64: + d.data = big.NewInt(input).Bytes() + case uint64: + buff := make([]byte, 8) + binary.BigEndian.PutUint64(buff, input) + d.data = buff + case int: + d.data = big.NewInt(int64(input)).Bytes() + case uint: + d.data = big.NewInt(int64(input)).Bytes() + case int8: + d.data = big.NewInt(int64(input)).Bytes() + case uint8: + d.data = big.NewInt(int64(input)).Bytes() + case int16: + d.data = big.NewInt(int64(input)).Bytes() + case uint16: + buff := make([]byte, 2) + binary.BigEndian.PutUint16(buff, input) + d.data = buff + case int32: + d.data = big.NewInt(int64(input)).Bytes() + case uint32: + buff := make([]byte, 4) + binary.BigEndian.PutUint32(buff, input) + d.data = buff + case string: // hexstring + // aaargh ffs TODO: avoid back-and-forth hex encodings where unneeded + bytes, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) + if err != nil { + d.isNil = true + } else { + d.data = bytes + } + default: + d.isNil = true + } + + return d +} + +type hexnum struct { + data []byte + isNil bool +} + +func (d *hexnum) String() string { + // Get hex string from bytes + out := common.Bytes2Hex(d.data) + // Trim leading 0s + out = strings.TrimLeft(out, "0") + // Output "0x0" when value is 0 + if len(out) == 0 { + out = "0" + } + return "0x" + out +} + +func (d *hexnum) MarshalJSON() ([]byte, error) { + if d.isNil { + return json.Marshal(nil) + } + return json.Marshal(d.String()) +} + +func newHexNum(input interface{}) *hexnum { + d := new(hexnum) + + d.data = newHexData(input).data + + return d +} + +type BlockRes struct { + fullTx bool + + BlockNumber *hexnum `json:"number"` + BlockHash *hexdata `json:"hash"` + ParentHash *hexdata `json:"parentHash"` + Nonce *hexdata `json:"nonce"` + Sha3Uncles *hexdata `json:"sha3Uncles"` + LogsBloom *hexdata `json:"logsBloom"` + TransactionRoot *hexdata `json:"transactionsRoot"` + StateRoot *hexdata `json:"stateRoot"` + Miner *hexdata `json:"miner"` + Difficulty *hexnum `json:"difficulty"` + TotalDifficulty *hexnum `json:"totalDifficulty"` + Size *hexnum `json:"size"` + ExtraData *hexdata `json:"extraData"` + GasLimit *hexnum `json:"gasLimit"` + GasUsed *hexnum `json:"gasUsed"` + UnixTimestamp *hexnum `json:"timestamp"` + Transactions []*TransactionRes `json:"transactions"` + Uncles []*UncleRes `json:"uncles"` +} + +func (b *BlockRes) MarshalJSON() ([]byte, error) { + if b.fullTx { + var ext struct { + BlockNumber *hexnum `json:"number"` + BlockHash *hexdata `json:"hash"` + ParentHash *hexdata `json:"parentHash"` + Nonce *hexdata `json:"nonce"` + Sha3Uncles *hexdata `json:"sha3Uncles"` + LogsBloom *hexdata `json:"logsBloom"` + TransactionRoot *hexdata `json:"transactionsRoot"` + StateRoot *hexdata `json:"stateRoot"` + Miner *hexdata `json:"miner"` + Difficulty *hexnum `json:"difficulty"` + TotalDifficulty *hexnum `json:"totalDifficulty"` + Size *hexnum `json:"size"` + ExtraData *hexdata `json:"extraData"` + GasLimit *hexnum `json:"gasLimit"` + GasUsed *hexnum `json:"gasUsed"` + UnixTimestamp *hexnum `json:"timestamp"` + Transactions []*TransactionRes `json:"transactions"` + Uncles []*hexdata `json:"uncles"` + } + + ext.BlockNumber = b.BlockNumber + ext.BlockHash = b.BlockHash + ext.ParentHash = b.ParentHash + ext.Nonce = b.Nonce + ext.Sha3Uncles = b.Sha3Uncles + ext.LogsBloom = b.LogsBloom + ext.TransactionRoot = b.TransactionRoot + ext.StateRoot = b.StateRoot + ext.Miner = b.Miner + ext.Difficulty = b.Difficulty + ext.TotalDifficulty = b.TotalDifficulty + ext.Size = b.Size + ext.ExtraData = b.ExtraData + ext.GasLimit = b.GasLimit + ext.GasUsed = b.GasUsed + ext.UnixTimestamp = b.UnixTimestamp + ext.Transactions = b.Transactions + ext.Uncles = make([]*hexdata, len(b.Uncles)) + for i, u := range b.Uncles { + ext.Uncles[i] = u.BlockHash + } + return json.Marshal(ext) + } else { + var ext struct { + BlockNumber *hexnum `json:"number"` + BlockHash *hexdata `json:"hash"` + ParentHash *hexdata `json:"parentHash"` + Nonce *hexdata `json:"nonce"` + Sha3Uncles *hexdata `json:"sha3Uncles"` + LogsBloom *hexdata `json:"logsBloom"` + TransactionRoot *hexdata `json:"transactionsRoot"` + StateRoot *hexdata `json:"stateRoot"` + Miner *hexdata `json:"miner"` + Difficulty *hexnum `json:"difficulty"` + TotalDifficulty *hexnum `json:"totalDifficulty"` + Size *hexnum `json:"size"` + ExtraData *hexdata `json:"extraData"` + GasLimit *hexnum `json:"gasLimit"` + GasUsed *hexnum `json:"gasUsed"` + UnixTimestamp *hexnum `json:"timestamp"` + Transactions []*hexdata `json:"transactions"` + Uncles []*hexdata `json:"uncles"` + } + + ext.BlockNumber = b.BlockNumber + ext.BlockHash = b.BlockHash + ext.ParentHash = b.ParentHash + ext.Nonce = b.Nonce + ext.Sha3Uncles = b.Sha3Uncles + ext.LogsBloom = b.LogsBloom + ext.TransactionRoot = b.TransactionRoot + ext.StateRoot = b.StateRoot + ext.Miner = b.Miner + ext.Difficulty = b.Difficulty + ext.TotalDifficulty = b.TotalDifficulty + ext.Size = b.Size + ext.ExtraData = b.ExtraData + ext.GasLimit = b.GasLimit + ext.GasUsed = b.GasUsed + ext.UnixTimestamp = b.UnixTimestamp + ext.Transactions = make([]*hexdata, len(b.Transactions)) + for i, tx := range b.Transactions { + ext.Transactions[i] = tx.Hash + } + ext.Uncles = make([]*hexdata, len(b.Uncles)) + for i, u := range b.Uncles { + ext.Uncles[i] = u.BlockHash + } + return json.Marshal(ext) + } +} + +func NewBlockRes(block *types.Block, fullTx bool) *BlockRes { + if block == nil { + return nil + } + + res := new(BlockRes) + res.fullTx = fullTx + res.BlockNumber = newHexNum(block.Number()) + res.BlockHash = newHexData(block.Hash()) + res.ParentHash = newHexData(block.ParentHash()) + res.Nonce = newHexData(block.Nonce()) + res.Sha3Uncles = newHexData(block.Header().UncleHash) + res.LogsBloom = newHexData(block.Bloom()) + res.TransactionRoot = newHexData(block.Header().TxHash) + res.StateRoot = newHexData(block.Root()) + res.Miner = newHexData(block.Header().Coinbase) + res.Difficulty = newHexNum(block.Difficulty()) + res.TotalDifficulty = newHexNum(block.Td) + res.Size = newHexNum(block.Size().Int64()) + res.ExtraData = newHexData(block.Header().Extra) + res.GasLimit = newHexNum(block.GasLimit()) + res.GasUsed = newHexNum(block.GasUsed()) + res.UnixTimestamp = newHexNum(block.Time()) + + res.Transactions = make([]*TransactionRes, len(block.Transactions())) + for i, tx := range block.Transactions() { + res.Transactions[i] = NewTransactionRes(tx) + res.Transactions[i].BlockHash = res.BlockHash + res.Transactions[i].BlockNumber = res.BlockNumber + res.Transactions[i].TxIndex = newHexNum(i) + } + + res.Uncles = make([]*UncleRes, len(block.Uncles())) + for i, uncle := range block.Uncles() { + res.Uncles[i] = NewUncleRes(uncle) + } + + return res +} + +type TransactionRes struct { + Hash *hexdata `json:"hash"` + Nonce *hexnum `json:"nonce"` + BlockHash *hexdata `json:"blockHash"` + BlockNumber *hexnum `json:"blockNumber"` + TxIndex *hexnum `json:"transactionIndex"` + From *hexdata `json:"from"` + To *hexdata `json:"to"` + Value *hexnum `json:"value"` + Gas *hexnum `json:"gas"` + GasPrice *hexnum `json:"gasPrice"` + Input *hexdata `json:"input"` +} + +func NewTransactionRes(tx *types.Transaction) *TransactionRes { + if tx == nil { + return nil + } + + var v = new(TransactionRes) + v.Hash = newHexData(tx.Hash()) + v.Nonce = newHexNum(tx.Nonce()) + // v.BlockHash = + // v.BlockNumber = + // v.TxIndex = + from, _ := tx.From() + v.From = newHexData(from) + v.To = newHexData(tx.To()) + v.Value = newHexNum(tx.Value()) + v.Gas = newHexNum(tx.Gas()) + v.GasPrice = newHexNum(tx.GasPrice()) + v.Input = newHexData(tx.Data()) + return v +} + +type UncleRes struct { + BlockNumber *hexnum `json:"number"` + BlockHash *hexdata `json:"hash"` + ParentHash *hexdata `json:"parentHash"` + Nonce *hexdata `json:"nonce"` + Sha3Uncles *hexdata `json:"sha3Uncles"` + ReceiptHash *hexdata `json:"receiptHash"` + LogsBloom *hexdata `json:"logsBloom"` + TransactionRoot *hexdata `json:"transactionsRoot"` + StateRoot *hexdata `json:"stateRoot"` + Miner *hexdata `json:"miner"` + Difficulty *hexnum `json:"difficulty"` + ExtraData *hexdata `json:"extraData"` + GasLimit *hexnum `json:"gasLimit"` + GasUsed *hexnum `json:"gasUsed"` + UnixTimestamp *hexnum `json:"timestamp"` +} + +func NewUncleRes(h *types.Header) *UncleRes { + if h == nil { + return nil + } + + var v = new(UncleRes) + v.BlockNumber = newHexNum(h.Number) + v.BlockHash = newHexData(h.Hash()) + v.ParentHash = newHexData(h.ParentHash) + v.Sha3Uncles = newHexData(h.UncleHash) + v.Nonce = newHexData(h.Nonce[:]) + v.LogsBloom = newHexData(h.Bloom) + v.TransactionRoot = newHexData(h.TxHash) + v.StateRoot = newHexData(h.Root) + v.Miner = newHexData(h.Coinbase) + v.Difficulty = newHexNum(h.Difficulty) + v.ExtraData = newHexData(h.Extra) + v.GasLimit = newHexNum(h.GasLimit) + v.GasUsed = newHexNum(h.GasUsed) + v.UnixTimestamp = newHexNum(h.Time) + v.ReceiptHash = newHexData(h.ReceiptHash) + + return v +} + +// type FilterLogRes struct { +// Hash string `json:"hash"` +// Address string `json:"address"` +// Data string `json:"data"` +// BlockNumber string `json:"blockNumber"` +// TransactionHash string `json:"transactionHash"` +// BlockHash string `json:"blockHash"` +// TransactionIndex string `json:"transactionIndex"` +// LogIndex string `json:"logIndex"` +// } + +// type FilterWhisperRes struct { +// Hash string `json:"hash"` +// From string `json:"from"` +// To string `json:"to"` +// Expiry string `json:"expiry"` +// Sent string `json:"sent"` +// Ttl string `json:"ttl"` +// Topics string `json:"topics"` +// Payload string `json:"payload"` +// WorkProved string `json:"workProved"` +// } + +func numString(raw interface{}) (*big.Int, error) { + var number *big.Int + // Parse as integer + num, ok := raw.(float64) + if ok { + number = big.NewInt(int64(num)) + return number, nil + } + + // Parse as string/hexstring + str, ok := raw.(string) + if ok { + number = common.String2Big(str) + return number, nil + } + + return nil, shared.NewInvalidTypeError("", "not a number or string") +} + +func blockHeight(raw interface{}, number *int64) error { + // Parse as integer + num, ok := raw.(float64) + if ok { + *number = int64(num) + return nil + } + + // Parse as string/hexstring + str, ok := raw.(string) + if !ok { + return shared.NewInvalidTypeError("", "not a number or string") + } + + switch str { + case "earliest": + *number = 0 + case "latest": + *number = -1 + case "pending": + *number = -2 + default: + if common.HasHexPrefix(str) { + *number = common.String2Big(str).Int64() + } else { + return shared.NewInvalidTypeError("blockNumber", "is not a valid string") + } + } + + return nil +} + +func blockHeightFromJson(msg json.RawMessage, number *int64) error { + var raw interface{} + if err := json.Unmarshal(msg, &raw); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + return blockHeight(raw, number) +} diff --git a/rpc/api/utils.go b/rpc/api/utils.go new file mode 100644 index 000000000..76f00c251 --- /dev/null +++ b/rpc/api/utils.go @@ -0,0 +1,36 @@ +package api + +import ( + "strings" + + "fmt" + + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/xeth" +) + +const ( + EthApiName = "eth" +) + +// Parse a comma separated API string to individual api's +func ParseApiString(apistr string, codec codec.Codec, xeth *xeth.XEth, eth *eth.Ethereum) ([]EthereumApi, error) { + if len(strings.TrimSpace(apistr)) == 0 { + return nil, fmt.Errorf("Empty apistr provided") + } + + names := strings.Split(apistr, ",") + apis := make([]EthereumApi, len(names)) + + for i, name := range names { + switch strings.ToLower(strings.TrimSpace(name)) { + case EthApiName: + apis[i] = NewEthApi(xeth, codec) + default: + return nil, fmt.Errorf("Unknown API '%s'", name) + } + } + + return apis, nil +} diff --git a/rpc/codec/codec.go b/rpc/codec/codec.go new file mode 100644 index 000000000..5e8f38438 --- /dev/null +++ b/rpc/codec/codec.go @@ -0,0 +1,47 @@ +package codec + +import ( + "net" + "strconv" + + "github.com/ethereum/go-ethereum/rpc/shared" +) + +type Codec int + +// (de)serialization support for rpc interface +type ApiCoder interface { + // Parse message to request from underlying stream + ReadRequest() (*shared.Request, error) + // Parse response message from underlying stream + ReadResponse() (interface{}, error) + // Encode response to encoded form in underlying stream + WriteResponse(interface{}) error + // Decode single message from data + Decode([]byte, interface{}) error + // Encode msg to encoded form + Encode(msg interface{}) ([]byte, error) + // close the underlying stream + Close() +} + +// supported codecs +const ( + JSON Codec = iota + nCodecs +) + +var ( + // collection with supported coders + coders = make([]func(net.Conn) ApiCoder, nCodecs) +) + +// create a new coder instance +func (c Codec) New(conn net.Conn) ApiCoder { + switch c { + case JSON: + return NewJsonCoder(conn) + } + + panic("codec: request for codec #" + strconv.Itoa(int(c)) + " is unavailable") +} diff --git a/rpc/codec/json.go b/rpc/codec/json.go new file mode 100644 index 000000000..31024ee74 --- /dev/null +++ b/rpc/codec/json.go @@ -0,0 +1,75 @@ +package codec + +import ( + "encoding/json" + "net" + + "github.com/ethereum/go-ethereum/rpc/shared" +) + +const ( + MAX_RESPONSE_SIZE = 64 * 1024 +) + +// Json serialization support +type JsonCodec struct { + c net.Conn + d *json.Decoder + e *json.Encoder +} + +// Create new JSON coder instance +func NewJsonCoder(conn net.Conn) ApiCoder { + return &JsonCodec{ + c: conn, + d: json.NewDecoder(conn), + e: json.NewEncoder(conn), + } +} + +// Serialize obj to JSON and write it to conn +func (self *JsonCodec) ReadRequest() (*shared.Request, error) { + req := shared.Request{} + err := self.d.Decode(&req) + if err == nil { + return &req, nil + } + return nil, err +} + +func (self *JsonCodec) ReadResponse() (interface{}, error) { + var err error + buf := make([]byte, MAX_RESPONSE_SIZE) + n, _ := self.c.Read(buf) + + var failure shared.ErrorResponse + if err = json.Unmarshal(buf[:n], &failure); err == nil && failure.Error != nil { + return failure, nil + } + + var success shared.SuccessResponse + if err = json.Unmarshal(buf[:n], &success); err == nil { + return success, nil + } + + return nil, err +} + +// Encode response to encoded form in underlying stream +func (self *JsonCodec) Decode(data []byte, msg interface{}) error { + return json.Unmarshal(data, msg) +} + +func (self *JsonCodec) Encode(msg interface{}) ([]byte, error) { + return json.Marshal(msg) +} + +// Parse JSON data from conn to obj +func (self *JsonCodec) WriteResponse(res interface{}) error { + return self.e.Encode(&res) +} + +// Close decoder and encoder +func (self *JsonCodec) Close() { + self.c.Close() +} diff --git a/rpc/shared/errors.go b/rpc/shared/errors.go new file mode 100644 index 000000000..bd10b33a0 --- /dev/null +++ b/rpc/shared/errors.go @@ -0,0 +1,96 @@ +package shared + +import "fmt" + +type InvalidTypeError struct { + method string + msg string +} + +func (e *InvalidTypeError) Error() string { + return fmt.Sprintf("invalid type on field %s: %s", e.method, e.msg) +} + +func NewInvalidTypeError(method, msg string) *InvalidTypeError { + return &InvalidTypeError{ + method: method, + msg: msg, + } +} + +type InsufficientParamsError struct { + have int + want int +} + +func (e *InsufficientParamsError) Error() string { + return fmt.Sprintf("insufficient params, want %d have %d", e.want, e.have) +} + +func NewInsufficientParamsError(have int, want int) *InsufficientParamsError { + return &InsufficientParamsError{ + have: have, + want: want, + } +} + +type NotImplementedError struct { + Method string +} + +func (e *NotImplementedError) Error() string { + return fmt.Sprintf("%s method not implemented", e.Method) +} + +func NewNotImplementedError(method string) *NotImplementedError { + return &NotImplementedError{ + Method: method, + } +} + +type DecodeParamError struct { + err string +} + +func (e *DecodeParamError) Error() string { + return fmt.Sprintf("could not decode, %s", e.err) + +} + +func NewDecodeParamError(errstr string) error { + return &DecodeParamError{ + err: errstr, + } +} + +type ValidationError struct { + ParamName string + msg string +} + +func (e *ValidationError) Error() string { + return fmt.Sprintf("%s not valid, %s", e.ParamName, e.msg) +} + +func NewValidationError(param string, msg string) error { + return &ValidationError{ + ParamName: param, + msg: msg, + } +} + +type NotAvailableError struct { + Method string + Reason string +} + +func (e *NotAvailableError) Error() string { + return fmt.Sprintf("%s method not available: %s", e.Method, e.Reason) +} + +func NewNotAvailableError(method string, reason string) *NotAvailableError { + return &NotAvailableError{ + Method: method, + Reason: reason, + } +} diff --git a/rpc/shared/types.go b/rpc/shared/types.go new file mode 100644 index 000000000..46fd5552c --- /dev/null +++ b/rpc/shared/types.go @@ -0,0 +1,38 @@ +package shared + +import "encoding/json" + +// RPC request +type Request struct { + Id interface{} `json:"id"` + Jsonrpc string `json:"jsonrpc"` + Method string `json:"method"` + Params json.RawMessage `json:"params"` +} + +// RPC response +type Response struct { + Id interface{} `json:"id"` + Jsonrpc string `json:"jsonrpc"` +} + +// RPC success response +type SuccessResponse struct { + Id interface{} `json:"id"` + Jsonrpc string `json:"jsonrpc"` + Result interface{} `json:"result"` +} + +// RPC error response +type ErrorResponse struct { + Id interface{} `json:"id"` + Jsonrpc string `json:"jsonrpc"` + Error *ErrorObject `json:"error"` +} + +// RPC error response details +type ErrorObject struct { + Code int `json:"code"` + Message string `json:"message"` + // Data interface{} `json:"data"` +} -- cgit v1.2.3 From 8ebf2d8fad729a8261f237bb05b6073e6c1b652f Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 10:41:04 +0200 Subject: added RPC/IPC support --- rpc/api/utils.go | 41 +++ rpc/comms/comms.go | 7 + rpc/comms/ipc.go | 37 +++ rpc/comms/ipc_unix.go | 77 ++++++ rpc/comms/ipc_windows.go | 699 +++++++++++++++++++++++++++++++++++++++++++++++ rpc/shared/types.go | 27 +- 6 files changed, 887 insertions(+), 1 deletion(-) create mode 100644 rpc/comms/comms.go create mode 100644 rpc/comms/ipc.go create mode 100644 rpc/comms/ipc_unix.go create mode 100644 rpc/comms/ipc_windows.go diff --git a/rpc/api/utils.go b/rpc/api/utils.go index 76f00c251..a62058140 100644 --- a/rpc/api/utils.go +++ b/rpc/api/utils.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/rpc/codec" "github.com/ethereum/go-ethereum/xeth" + "github.com/ethereum/go-ethereum/rpc/shared" ) const ( @@ -34,3 +35,43 @@ func ParseApiString(apistr string, codec codec.Codec, xeth *xeth.XEth, eth *eth. return apis, nil } + +// combines multiple API's +type mergedApi struct { + apis map[string]EthereumApi +} + +// create new merged api instance +func newMergedApi(apis ...EthereumApi) *mergedApi { + mergedApi := new(mergedApi) + mergedApi.apis = make(map[string]EthereumApi) + + for _, api := range apis { + for _, method := range api.Methods() { + mergedApi.apis[method] = api + } + } + return mergedApi +} + +// Supported RPC methods +func (self *mergedApi) Methods() []string { + all := make([]string, len(self.apis)) + for method, _ := range self.apis { + all = append(all, method) + } + return all +} + +// Call the correct API's Execute method for the given request +func (self *mergedApi) Execute(req *shared.Request) (interface{}, error) { + if api, found := self.apis[req.Method]; found { + return api.Execute(req) + } + return nil, shared.NewNotImplementedError(req.Method) +} + +// Merge multiple API's to a single API instance +func Merge(apis ...EthereumApi) EthereumApi { + return newMergedApi(apis...) +} diff --git a/rpc/comms/comms.go b/rpc/comms/comms.go new file mode 100644 index 000000000..244f5a7a6 --- /dev/null +++ b/rpc/comms/comms.go @@ -0,0 +1,7 @@ +package comms + +type EthereumClient interface { + Close() + Send(interface{}) error + Recv() (interface{}, error) +} diff --git a/rpc/comms/ipc.go b/rpc/comms/ipc.go new file mode 100644 index 000000000..a75039d17 --- /dev/null +++ b/rpc/comms/ipc.go @@ -0,0 +1,37 @@ +package comms + +import ( + "github.com/ethereum/go-ethereum/rpc/api" + "github.com/ethereum/go-ethereum/rpc/codec" +) + +type IpcConfig struct { + Endpoint string +} + +type ipcClient struct { + c codec.ApiCoder +} + +func (self *ipcClient) Close() { + self.c.Close() +} + +func (self *ipcClient) Send(req interface{}) error { + return self.c.WriteResponse(req) +} + +func (self *ipcClient) Recv() (interface{}, error) { + return self.c.ReadResponse() +} + +// Create a new IPC client, UNIX domain socket on posix, named pipe on Windows +func NewIpcClient(cfg IpcConfig, codec codec.Codec) (*ipcClient, error) { + return newIpcClient(cfg, codec) +} + +// Start IPC server +func StartIpc(cfg IpcConfig, codec codec.Codec, apis ...api.EthereumApi) error { + offeredApi := api.Merge(apis...) + return startIpc(cfg, codec, offeredApi) +} diff --git a/rpc/comms/ipc_unix.go b/rpc/comms/ipc_unix.go new file mode 100644 index 000000000..bb09d9547 --- /dev/null +++ b/rpc/comms/ipc_unix.go @@ -0,0 +1,77 @@ +// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris + +package comms + +import ( + "io" + "net" + "os" + + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/rpc/api" + "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/rpc/shared" +) + +func newIpcClient(cfg IpcConfig, codec codec.Codec) (*ipcClient, error) { + c, err := net.DialUnix("unix", nil, &net.UnixAddr{cfg.Endpoint, "unix"}) + if err != nil { + return nil, err + } + + return &ipcClient{codec.New(c)}, nil +} + +func startIpc(cfg IpcConfig, codec codec.Codec, api api.EthereumApi) error { + os.Remove(cfg.Endpoint) // in case it still exists from a previous run + + l, err := net.ListenUnix("unix", &net.UnixAddr{Name: cfg.Endpoint, Net: "unix"}) + if err != nil { + return err + } + os.Chmod(cfg.Endpoint, 0600) + + go func() { + for { + conn, err := l.AcceptUnix() + if err != nil { + glog.V(logger.Error).Infof("Error accepting ipc connection - %v\n", err) + continue + } + + go func(conn net.Conn) { + codec := codec.New(conn) + + for { + req, err := codec.ReadRequest() + if err == io.EOF { + codec.Close() + return + } else if err != nil { + glog.V(logger.Error).Infof("IPC recv err - %v\n", err) + codec.Close() + return + } + + var rpcResponse interface{} + res, err := api.Execute(req) + + rpcResponse = shared.NewRpcResponse(req.Id, req.Jsonrpc, res, err) + err = codec.WriteResponse(rpcResponse) + if err != nil { + glog.V(logger.Error).Infof("IPC send err - %v\n", err) + codec.Close() + return + } + } + }(conn) + } + + os.Remove(cfg.Endpoint) + }() + + glog.V(logger.Info).Infof("IPC service started (%s)\n", cfg.Endpoint) + + return nil +} diff --git a/rpc/comms/ipc_windows.go b/rpc/comms/ipc_windows.go new file mode 100644 index 000000000..ff9015d03 --- /dev/null +++ b/rpc/comms/ipc_windows.go @@ -0,0 +1,699 @@ +// +build windows + +package comms + +import ( + "fmt" + "io" + "net" + "os" + "sync" + "syscall" + "time" + "unsafe" + + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/rpc/api" + "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/rpc/shared" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") + procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") + procCreateEventW = modkernel32.NewProc("CreateEventW") + procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") +) + +func createNamedPipe(name *uint16, openMode uint32, pipeMode uint32, maxInstances uint32, outBufSize uint32, inBufSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(openMode), uintptr(pipeMode), uintptr(maxInstances), uintptr(outBufSize), uintptr(inBufSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func cancelIoEx(handle syscall.Handle, overlapped *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func connectNamedPipe(handle syscall.Handle, overlapped *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func disconnectNamedPipe(handle syscall.Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func waitNamedPipe(name *uint16, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createEvent(sa *syscall.SecurityAttributes, manualReset bool, initialState bool, name *uint16) (handle syscall.Handle, err error) { + var _p0 uint32 + if manualReset { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if initialState { + _p1 = 1 + } else { + _p1 = 0 + } + r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(sa)), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(name)), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getOverlappedResult(handle syscall.Handle, overlapped *syscall.Overlapped, transferred *uint32, wait bool) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transferred)), uintptr(_p0), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + + +const ( + // openMode + pipe_access_duplex = 0x3 + pipe_access_inbound = 0x1 + pipe_access_outbound = 0x2 + + // openMode write flags + file_flag_first_pipe_instance = 0x00080000 + file_flag_write_through = 0x80000000 + file_flag_overlapped = 0x40000000 + + // openMode ACL flags + write_dac = 0x00040000 + write_owner = 0x00080000 + access_system_security = 0x01000000 + + // pipeMode + pipe_type_byte = 0x0 + pipe_type_message = 0x4 + + // pipeMode read mode flags + pipe_readmode_byte = 0x0 + pipe_readmode_message = 0x2 + + // pipeMode wait mode flags + pipe_wait = 0x0 + pipe_nowait = 0x1 + + // pipeMode remote-client mode flags + pipe_accept_remote_clients = 0x0 + pipe_reject_remote_clients = 0x8 + + pipe_unlimited_instances = 255 + + nmpwait_wait_forever = 0xFFFFFFFF + + // the two not-an-errors below occur if a client connects to the pipe between + // the server's CreateNamedPipe and ConnectNamedPipe calls. + error_no_data syscall.Errno = 0xE8 + error_pipe_connected syscall.Errno = 0x217 + error_pipe_busy syscall.Errno = 0xE7 + error_sem_timeout syscall.Errno = 0x79 + + error_bad_pathname syscall.Errno = 0xA1 + error_invalid_name syscall.Errno = 0x7B + + error_io_incomplete syscall.Errno = 0x3e4 +) + +var _ net.Conn = (*PipeConn)(nil) +var _ net.Listener = (*PipeListener)(nil) + +// ErrClosed is the error returned by PipeListener.Accept when Close is called +// on the PipeListener. +var ErrClosed = PipeError{"Pipe has been closed.", false} + +// PipeError is an error related to a call to a pipe +type PipeError struct { + msg string + timeout bool +} + +// Error implements the error interface +func (e PipeError) Error() string { + return e.msg +} + +// Timeout implements net.AddrError.Timeout() +func (e PipeError) Timeout() bool { + return e.timeout +} + +// Temporary implements net.AddrError.Temporary() +func (e PipeError) Temporary() bool { + return false +} + +// Dial connects to a named pipe with the given address. If the specified pipe is not available, +// it will wait indefinitely for the pipe to become available. +// +// The address must be of the form \\.\\pipe\ for local pipes and \\\pipe\ +// for remote pipes. +// +// Dial will return a PipeError if you pass in a badly formatted pipe name. +// +// Examples: +// // local pipe +// conn, err := Dial(`\\.\pipe\mypipename`) +// +// // remote pipe +// conn, err := Dial(`\\othercomp\pipe\mypipename`) +func Dial(address string) (*PipeConn, error) { + for { + conn, err := dial(address, nmpwait_wait_forever) + if err == nil { + return conn, nil + } + if isPipeNotReady(err) { + <-time.After(100 * time.Millisecond) + continue + } + return nil, err + } +} + +// DialTimeout acts like Dial, but will time out after the duration of timeout +func DialTimeout(address string, timeout time.Duration) (*PipeConn, error) { + deadline := time.Now().Add(timeout) + + now := time.Now() + for now.Before(deadline) { + millis := uint32(deadline.Sub(now) / time.Millisecond) + conn, err := dial(address, millis) + if err == nil { + return conn, nil + } + if err == error_sem_timeout { + // This is WaitNamedPipe's timeout error, so we know we're done + return nil, PipeError{fmt.Sprintf( + "Timed out waiting for pipe '%s' to come available", address), true} + } + if isPipeNotReady(err) { + left := deadline.Sub(time.Now()) + retry := 100 * time.Millisecond + if left > retry { + <-time.After(retry) + } else { + <-time.After(left - time.Millisecond) + } + now = time.Now() + continue + } + return nil, err + } + return nil, PipeError{fmt.Sprintf( + "Timed out waiting for pipe '%s' to come available", address), true} +} + +// isPipeNotReady checks the error to see if it indicates the pipe is not ready +func isPipeNotReady(err error) bool { + // Pipe Busy means another client just grabbed the open pipe end, + // and the server hasn't made a new one yet. + // File Not Found means the server hasn't created the pipe yet. + // Neither is a fatal error. + + return err == syscall.ERROR_FILE_NOT_FOUND || err == error_pipe_busy +} + +// newOverlapped creates a structure used to track asynchronous +// I/O requests that have been issued. +func newOverlapped() (*syscall.Overlapped, error) { + event, err := createEvent(nil, true, true, nil) + if err != nil { + return nil, err + } + return &syscall.Overlapped{HEvent: event}, nil +} + +// waitForCompletion waits for an asynchronous I/O request referred to by overlapped to complete. +// This function returns the number of bytes transferred by the operation and an error code if +// applicable (nil otherwise). +func waitForCompletion(handle syscall.Handle, overlapped *syscall.Overlapped) (uint32, error) { + _, err := syscall.WaitForSingleObject(overlapped.HEvent, syscall.INFINITE) + if err != nil { + return 0, err + } + var transferred uint32 + err = getOverlappedResult(handle, overlapped, &transferred, true) + return transferred, err +} + +// dial is a helper to initiate a connection to a named pipe that has been started by a server. +// The timeout is only enforced if the pipe server has already created the pipe, otherwise +// this function will return immediately. +func dial(address string, timeout uint32) (*PipeConn, error) { + name, err := syscall.UTF16PtrFromString(string(address)) + if err != nil { + return nil, err + } + // If at least one instance of the pipe has been created, this function + // will wait timeout milliseconds for it to become available. + // It will return immediately regardless of timeout, if no instances + // of the named pipe have been created yet. + // If this returns with no error, there is a pipe available. + if err := waitNamedPipe(name, timeout); err != nil { + if err == error_bad_pathname { + // badly formatted pipe name + return nil, badAddr(address) + } + return nil, err + } + pathp, err := syscall.UTF16PtrFromString(address) + if err != nil { + return nil, err + } + handle, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, + uint32(syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE), nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_OVERLAPPED, 0) + if err != nil { + return nil, err + } + return &PipeConn{handle: handle, addr: PipeAddr(address)}, nil +} + +// Listen returns a new PipeListener that will listen on a pipe with the given +// address. The address must be of the form \\.\pipe\ +// +// Listen will return a PipeError for an incorrectly formatted pipe name. +func Listen(address string) (*PipeListener, error) { + handle, err := createPipe(address, true) + if err == error_invalid_name { + return nil, badAddr(address) + } + if err != nil { + return nil, err + } + return &PipeListener{ + addr: PipeAddr(address), + handle: handle, + }, nil +} + +// PipeListener is a named pipe listener. Clients should typically +// use variables of type net.Listener instead of assuming named pipe. +type PipeListener struct { + addr PipeAddr + handle syscall.Handle + closed bool + + // acceptHandle contains the current handle waiting for + // an incoming connection or nil. + acceptHandle syscall.Handle + // acceptOverlapped is set before waiting on a connection. + // If not waiting, it is nil. + acceptOverlapped *syscall.Overlapped + // acceptMutex protects the handle and overlapped structure. + acceptMutex sync.Mutex +} + +// Accept implements the Accept method in the net.Listener interface; it +// waits for the next call and returns a generic net.Conn. +func (l *PipeListener) Accept() (net.Conn, error) { + c, err := l.AcceptPipe() + for err == error_no_data { + // Ignore clients that connect and immediately disconnect. + c, err = l.AcceptPipe() + } + if err != nil { + return nil, err + } + return c, nil +} + +// AcceptPipe accepts the next incoming call and returns the new connection. +// It might return an error if a client connected and immediately cancelled +// the connection. +func (l *PipeListener) AcceptPipe() (*PipeConn, error) { + if l == nil || l.addr == "" || l.closed { + return nil, syscall.EINVAL + } + + // the first time we call accept, the handle will have been created by the Listen + // call. This is to prevent race conditions where the client thinks the server + // isn't listening because it hasn't actually called create yet. After the first time, we'll + // have to create a new handle each time + handle := l.handle + if handle == 0 { + var err error + handle, err = createPipe(string(l.addr), false) + if err != nil { + return nil, err + } + } else { + l.handle = 0 + } + + overlapped, err := newOverlapped() + if err != nil { + return nil, err + } + defer syscall.CloseHandle(overlapped.HEvent) + if err := connectNamedPipe(handle, overlapped); err != nil && err != error_pipe_connected { + if err == error_io_incomplete || err == syscall.ERROR_IO_PENDING { + l.acceptMutex.Lock() + l.acceptOverlapped = overlapped + l.acceptHandle = handle + l.acceptMutex.Unlock() + defer func() { + l.acceptMutex.Lock() + l.acceptOverlapped = nil + l.acceptHandle = 0 + l.acceptMutex.Unlock() + }() + + _, err = waitForCompletion(handle, overlapped) + } + if err == syscall.ERROR_OPERATION_ABORTED { + // Return error compatible to net.Listener.Accept() in case the + // listener was closed. + return nil, ErrClosed + } + if err != nil { + return nil, err + } + } + return &PipeConn{handle: handle, addr: l.addr}, nil +} + +// Close stops listening on the address. +// Already Accepted connections are not closed. +func (l *PipeListener) Close() error { + if l.closed { + return nil + } + l.closed = true + if l.handle != 0 { + err := disconnectNamedPipe(l.handle) + if err != nil { + return err + } + err = syscall.CloseHandle(l.handle) + if err != nil { + return err + } + l.handle = 0 + } + l.acceptMutex.Lock() + defer l.acceptMutex.Unlock() + if l.acceptOverlapped != nil && l.acceptHandle != 0 { + // Cancel the pending IO. This call does not block, so it is safe + // to hold onto the mutex above. + if err := cancelIoEx(l.acceptHandle, l.acceptOverlapped); err != nil { + return err + } + err := syscall.CloseHandle(l.acceptOverlapped.HEvent) + if err != nil { + return err + } + l.acceptOverlapped.HEvent = 0 + err = syscall.CloseHandle(l.acceptHandle) + if err != nil { + return err + } + l.acceptHandle = 0 + } + return nil +} + +// Addr returns the listener's network address, a PipeAddr. +func (l *PipeListener) Addr() net.Addr { return l.addr } + +// PipeConn is the implementation of the net.Conn interface for named pipe connections. +type PipeConn struct { + handle syscall.Handle + addr PipeAddr + + // these aren't actually used yet + readDeadline *time.Time + writeDeadline *time.Time +} + +type iodata struct { + n uint32 + err error +} + +// completeRequest looks at iodata to see if a request is pending. If so, it waits for it to either complete or to +// abort due to hitting the specified deadline. Deadline may be set to nil to wait forever. If no request is pending, +// the content of iodata is returned. +func (c *PipeConn) completeRequest(data iodata, deadline *time.Time, overlapped *syscall.Overlapped) (int, error) { + if data.err == error_io_incomplete || data.err == syscall.ERROR_IO_PENDING { + var timer <-chan time.Time + if deadline != nil { + if timeDiff := deadline.Sub(time.Now()); timeDiff > 0 { + timer = time.After(timeDiff) + } + } + done := make(chan iodata) + go func() { + n, err := waitForCompletion(c.handle, overlapped) + done <- iodata{n, err} + }() + select { + case data = <-done: + case <-timer: + syscall.CancelIoEx(c.handle, overlapped) + data = iodata{0, timeout(c.addr.String())} + } + } + // Windows will produce ERROR_BROKEN_PIPE upon closing + // a handle on the other end of a connection. Go RPC + // expects an io.EOF error in this case. + if data.err == syscall.ERROR_BROKEN_PIPE { + data.err = io.EOF + } + return int(data.n), data.err +} + +// Read implements the net.Conn Read method. +func (c *PipeConn) Read(b []byte) (int, error) { + // Use ReadFile() rather than Read() because the latter + // contains a workaround that eats ERROR_BROKEN_PIPE. + overlapped, err := newOverlapped() + if err != nil { + return 0, err + } + defer syscall.CloseHandle(overlapped.HEvent) + var n uint32 + err = syscall.ReadFile(c.handle, b, &n, overlapped) + return c.completeRequest(iodata{n, err}, c.readDeadline, overlapped) +} + +// Write implements the net.Conn Write method. +func (c *PipeConn) Write(b []byte) (int, error) { + overlapped, err := newOverlapped() + if err != nil { + return 0, err + } + defer syscall.CloseHandle(overlapped.HEvent) + var n uint32 + err = syscall.WriteFile(c.handle, b, &n, overlapped) + return c.completeRequest(iodata{n, err}, c.writeDeadline, overlapped) +} + +// Close closes the connection. +func (c *PipeConn) Close() error { + return syscall.CloseHandle(c.handle) +} + +// LocalAddr returns the local network address. +func (c *PipeConn) LocalAddr() net.Addr { + return c.addr +} + +// RemoteAddr returns the remote network address. +func (c *PipeConn) RemoteAddr() net.Addr { + // not sure what to do here, we don't have remote addr.... + return c.addr +} + +// SetDeadline implements the net.Conn SetDeadline method. +// Note that timeouts are only supported on Windows Vista/Server 2008 and above +func (c *PipeConn) SetDeadline(t time.Time) error { + c.SetReadDeadline(t) + c.SetWriteDeadline(t) + return nil +} + +// SetReadDeadline implements the net.Conn SetReadDeadline method. +// Note that timeouts are only supported on Windows Vista/Server 2008 and above +func (c *PipeConn) SetReadDeadline(t time.Time) error { + c.readDeadline = &t + return nil +} + +// SetWriteDeadline implements the net.Conn SetWriteDeadline method. +// Note that timeouts are only supported on Windows Vista/Server 2008 and above +func (c *PipeConn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = &t + return nil +} + +// PipeAddr represents the address of a named pipe. +type PipeAddr string + +// Network returns the address's network name, "pipe". +func (a PipeAddr) Network() string { return "pipe" } + +// String returns the address of the pipe +func (a PipeAddr) String() string { + return string(a) +} + +// createPipe is a helper function to make sure we always create pipes +// with the same arguments, since subsequent calls to create pipe need +// to use the same arguments as the first one. If first is set, fail +// if the pipe already exists. +func createPipe(address string, first bool) (syscall.Handle, error) { + n, err := syscall.UTF16PtrFromString(address) + if err != nil { + return 0, err + } + mode := uint32(pipe_access_duplex | syscall.FILE_FLAG_OVERLAPPED) + if first { + mode |= file_flag_first_pipe_instance + } + return createNamedPipe(n, + mode, + pipe_type_byte, + pipe_unlimited_instances, + 512, 512, 0, nil) +} + +func badAddr(addr string) PipeError { + return PipeError{fmt.Sprintf("Invalid pipe address '%s'.", addr), false} +} +func timeout(addr string) PipeError { + return PipeError{fmt.Sprintf("Pipe IO timed out waiting for '%s'", addr), true} +} + + + +func newIpcClient(cfg IpcConfig, codec codec.Codec) (*ipcClient, error) { + c, err := Dial(cfg.Endpoint) + if err != nil { + return nil, err + } + + return &ipcClient{codec.New(c)}, nil +} + +func startIpc(cfg IpcConfig, codec codec.Codec, api api.Ethereum) error { + os.Remove(cfg.Endpoint) // in case it still exists from a previous run + + l, err := Listen(cfg.Endpoint) + if err != nil { + return err + } + os.Chmod(cfg.Endpoint, 0600) + + go func() { + for { + conn, err := l.Accept() + if err != nil { + glog.V(logger.Error).Infof("Error accepting ipc connection - %v\n", err) + continue + } + + go func(conn net.Conn) { + codec := codec.New(conn) + + for { + req, err := codec.ReadRequest() + if err == io.EOF { + codec.Close() + return + } else if err != nil { + glog.V(logger.Error).Infof("IPC recv err - %v\n", err) + codec.Close() + return + } + + var rpcResponse interface{} + res, err := api.Execute(req) + + rpcResponse = shared.NewRpcResponse(req.Id, req.Jsonrpc, res, err) + err = codec.WriteResponse(rpcResponse) + if err != nil { + glog.V(logger.Error).Infof("IPC send err - %v\n", err) + codec.Close() + return + } + } + }(conn) + } + }() + + glog.V(logger.Info).Infof("IPC service started (%s)\n", cfg.Endpoint) + + return nil +} diff --git a/rpc/shared/types.go b/rpc/shared/types.go index 46fd5552c..600d39541 100644 --- a/rpc/shared/types.go +++ b/rpc/shared/types.go @@ -1,6 +1,10 @@ package shared -import "encoding/json" +import ( + "encoding/json" + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" +) // RPC request type Request struct { @@ -36,3 +40,24 @@ type ErrorObject struct { Message string `json:"message"` // Data interface{} `json:"data"` } + +func NewRpcResponse(id interface{}, jsonrpcver string, reply interface{}, err error) *interface{} { + var response interface{} + + switch err.(type) { + case nil: + response = &SuccessResponse{Jsonrpc: jsonrpcver, Id: id, Result: reply} + case *NotImplementedError: + jsonerr := &ErrorObject{-32601, err.Error()} + response = &ErrorResponse{Jsonrpc: jsonrpcver, Id: id, Error: jsonerr} + case *DecodeParamError, *InsufficientParamsError, *ValidationError, *InvalidTypeError: + jsonerr := &ErrorObject{-32602, err.Error()} + response = &ErrorResponse{Jsonrpc: jsonrpcver, Id: id, Error: jsonerr} + default: + jsonerr := &ErrorObject{-32603, err.Error()} + response = &ErrorResponse{Jsonrpc: jsonrpcver, Id: id, Error: jsonerr} + } + + glog.V(logger.Detail).Infof("Generated response: %T %s", response, response) + return &response +} -- cgit v1.2.3 From 2a0d888326036be9cabe6680617ce2d1a27761d3 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 11:01:02 +0200 Subject: added API/IPC commandline flags --- cmd/geth/main.go | 8 ++++++++ cmd/utils/flags.go | 33 +++++++++++++++++++++++++++++++++ common/path.go | 4 ++++ rpc/api/api.go | 5 +++++ 4 files changed, 50 insertions(+) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 4d7d57220..d180c269f 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -239,6 +239,9 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso utils.RPCEnabledFlag, utils.RPCListenAddrFlag, utils.RPCPortFlag, + utils.IPCDisabledFlag, + utils.IPCApiFlag, + utils.IPCPathFlag, utils.WhisperEnabledFlag, utils.VMDebugFlag, utils.ProtocolVersionFlag, @@ -382,6 +385,11 @@ func startEth(ctx *cli.Context, eth *eth.Ethereum) { } } // Start auxiliary services if enabled. + if !ctx.GlobalBool(utils.IPCDisabledFlag.Name) { + if err := utils.StartIPC(eth, ctx); err != nil { + utils.Fatalf("Error string IPC: %v", err) + } + } if ctx.GlobalBool(utils.RPCEnabledFlag.Name) { if err := utils.StartRPC(eth, ctx); err != nil { utils.Fatalf("Error starting RPC: %v", err) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index ab7eaf023..4c3690d49 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -24,6 +24,9 @@ import ( "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/xeth" + "github.com/ethereum/go-ethereum/rpc/api" + "github.com/ethereum/go-ethereum/rpc/comms" + "github.com/ethereum/go-ethereum/rpc/codec" ) func init() { @@ -206,6 +209,20 @@ var ( Usage: "Domain on which to send Access-Control-Allow-Origin header", Value: "", } + IPCDisabledFlag = cli.BoolFlag{ + Name: "ipcdisable", + Usage: "Disable the IPC-RPC server", + } + IPCApiFlag = cli.StringFlag{ + Name: "ipcapi", + Usage: "Specify the API's which are offered over this interface", + Value: api.DefaultIpcApis, + } + IPCPathFlag = DirectoryFlag{ + Name: "ipcpath", + Usage: "Filename for IPC socket/pipe", + Value: DirectoryString{common.DefaultIpcPath()}, + } // Network Settings MaxPeersFlag = cli.IntFlag{ Name: "maxpeers", @@ -368,6 +385,22 @@ func MakeAccountManager(ctx *cli.Context) *accounts.Manager { return accounts.NewManager(ks) } +func StartIPC(eth *eth.Ethereum, ctx *cli.Context) error { + config := comms.IpcConfig{ + Endpoint: ctx.GlobalString(IPCPathFlag.Name), + } + + xeth := xeth.New(eth, nil) + codec := codec.JSON + + apis, err := api.ParseApiString(ctx.GlobalString(IPCApiFlag.Name), codec, xeth, eth) + if err != nil { + return err + } + + return comms.StartIpc(config, codec, apis...) +} + func StartRPC(eth *eth.Ethereum, ctx *cli.Context) error { config := rpc.RpcConfig{ ListenAddress: ctx.GlobalString(RPCListenAddrFlag.Name), diff --git a/common/path.go b/common/path.go index 3468b3366..63a23abcd 100644 --- a/common/path.go +++ b/common/path.go @@ -94,6 +94,10 @@ func DefaultDataDir() string { } } +func DefaultIpcPath() string { + return filepath.Join(DefaultDataDir(), "geth.ipc") +} + func IsWindows() bool { return runtime.GOOS == "windows" } diff --git a/rpc/api/api.go b/rpc/api/api.go index 758e056ed..93dc3058c 100644 --- a/rpc/api/api.go +++ b/rpc/api/api.go @@ -2,6 +2,11 @@ package api import "github.com/ethereum/go-ethereum/rpc/shared" +const ( + // List with all API's which are offered over the IPC interface by default + DefaultIpcApis = "eth" +) + // Ethereum RPC API interface type EthereumApi interface { // Execute the given request and returns the response or an error -- cgit v1.2.3 From a1a475fb9296e214292840d89811123292c7953c Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 12:43:58 +0200 Subject: added console command --- Makefile | 5 + cmd/geth/js.go | 10 +- cmd/geth/main.go | 2 + jsre/ethereum_js.go | 4853 ++++++++++++++++++++++++++++++++++++++++++++--- rpc/api/api.go | 14 +- rpc/api/eth.go | 4 + rpc/api/mergedapi.go | 56 + rpc/api/mergedapi_js.go | 1 + rpc/api/utils.go | 47 +- rpc/api/web3.go | 84 + rpc/api/web3_args.go | 5 + rpc/jeth.go | 63 +- 12 files changed, 4857 insertions(+), 287 deletions(-) create mode 100644 rpc/api/mergedapi.go create mode 100644 rpc/api/mergedapi_js.go create mode 100644 rpc/api/web3.go create mode 100644 rpc/api/web3_args.go diff --git a/Makefile b/Makefile index 03e3bf4c6..b243d2679 100644 --- a/Makefile +++ b/Makefile @@ -10,6 +10,11 @@ geth: @echo "Done building." @echo "Run \"$(GOBIN)/geth\" to launch geth." +console: + build/env.sh go install -v $(shell build/ldflags.sh) ./cmd/console + @echo "Done building." + @echo "Run \"$(GOBIN)/console\" to launch the console." + mist: build/env.sh go install -v $(shell build/ldflags.sh) ./cmd/mist @echo "Done building." diff --git a/cmd/geth/js.go b/cmd/geth/js.go index 706bc6554..d1a6cc29d 100644 --- a/cmd/geth/js.go +++ b/cmd/geth/js.go @@ -73,7 +73,7 @@ type jsre struct { prompter } -func newJSRE(ethereum *eth.Ethereum, libPath, corsDomain string, interactive bool, f xeth.Frontend) *jsre { +func newJSRE(ethereum *eth.Ethereum, libPath, corsDomain, ipcpath string, interactive bool, f xeth.Frontend) *jsre { js := &jsre{ethereum: ethereum, ps1: "> "} // set default cors domain used by startRpc from CLI flag js.corsDomain = corsDomain @@ -84,7 +84,7 @@ func newJSRE(ethereum *eth.Ethereum, libPath, corsDomain string, interactive boo js.wait = js.xeth.UpdateState() // update state in separare forever blocks js.re = re.New(libPath) - js.apiBindings(f) + js.apiBindings(ipcpath, f) js.adminBindings() if !liner.TerminalSupported() || !interactive { @@ -103,10 +103,10 @@ func newJSRE(ethereum *eth.Ethereum, libPath, corsDomain string, interactive boo return js } -func (js *jsre) apiBindings(f xeth.Frontend) { +func (js *jsre) apiBindings(ipcpath string, f xeth.Frontend) { xe := xeth.New(js.ethereum, f) ethApi := rpc.NewEthereumApi(xe) - jeth := rpc.NewJeth(ethApi, js.re) + jeth := rpc.NewJeth(ethApi, js.re, ipcpath) js.re.Set("jeth", struct{}{}) t, _ := js.re.Get("jeth") @@ -119,7 +119,7 @@ func (js *jsre) apiBindings(f xeth.Frontend) { utils.Fatalf("Error loading bignumber.js: %v", err) } - err = js.re.Compile("ethereum.js", re.Ethereum_JS) + err = js.re.Compile("ethereum.js", re.Web3_JS) if err != nil { utils.Fatalf("Error loading ethereum.js: %v", err) } diff --git a/cmd/geth/main.go b/cmd/geth/main.go index d180c269f..5d7e102c4 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -307,6 +307,7 @@ func console(ctx *cli.Context) { repl := newJSRE( ethereum, ctx.String(utils.JSpathFlag.Name), + ctx.GlobalString(utils.IPCPathFlag.Name), ctx.GlobalString(utils.RPCCORSDomainFlag.Name), true, nil, @@ -328,6 +329,7 @@ func execJSFiles(ctx *cli.Context) { repl := newJSRE( ethereum, ctx.String(utils.JSpathFlag.Name), + ctx.GlobalString(utils.IPCPathFlag.Name), ctx.GlobalString(utils.RPCCORSDomainFlag.Name), false, nil, diff --git a/jsre/ethereum_js.go b/jsre/ethereum_js.go index 74f6b2acd..5aa673906 100644 --- a/jsre/ethereum_js.go +++ b/jsre/ethereum_js.go @@ -1,7 +1,6 @@ package jsre -const Ethereum_JS = ` -require=(function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o. */ -/** +/** * @file coder.js * @author Marek Kotewicz * @date 2015 @@ -73,7 +72,7 @@ SolidityType.prototype.isType = function (name) { * @method formatInput * @param {Object} param - plain object, or an array of objects * @param {Bool} arrayType - true if a param should be encoded as an array - * @return {SolidityParam} encoded param wrapped in SolidityParam object + * @return {SolidityParam} encoded param wrapped in SolidityParam object */ SolidityType.prototype.formatInput = function (param, arrayType) { if (utils.isArray(param) && arrayType) { // TODO: should fail if this two are not the same @@ -83,7 +82,7 @@ SolidityType.prototype.formatInput = function (param, arrayType) { }).reduce(function (acc, current) { return acc.combine(current); }, f.formatInputInt(param.length)).withOffset(32); - } + } return this._inputFormatter(param); }; @@ -97,7 +96,7 @@ SolidityType.prototype.formatInput = function (param, arrayType) { */ SolidityType.prototype.formatOutput = function (param, arrayType) { if (arrayType) { - // let's assume, that we solidity will never return long arrays :P + // let's assume, that we solidity will never return long arrays :P var result = []; var length = new BigNumber(param.dynamicPart().slice(0, 64), 16); for (var i = 0; i < length * 64; i += 64) { @@ -138,7 +137,7 @@ var SolidityCoder = function (types) { * * @method _requireType * @param {String} type - * @returns {SolidityType} + * @returns {SolidityType} * @throws {Error} throws if no matching type is found */ SolidityCoder.prototype._requireType = function (type) { @@ -285,7 +284,7 @@ var coder = new SolidityCoder([ module.exports = coder; -},{"../utils/utils":6,"./formatters":2,"./param":3,"bignumber.js":"bignumber.js"}],2:[function(require,module,exports){ +},{"../utils/utils":7,"./formatters":2,"./param":3,"bignumber.js":"bignumber.js"}],2:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -302,7 +301,7 @@ module.exports = coder; You should have received a copy of the GNU Lesser General Public License along with ethereum.js. If not, see . */ -/** +/** * @file formatters.js * @author Marek Kotewicz * @date 2015 @@ -427,7 +426,7 @@ var formatOutputUInt = function (param) { * @returns {BigNumber} input bytes formatted to real */ var formatOutputReal = function (param) { - return formatOutputInt(param).dividedBy(new BigNumber(2).pow(128)); + return formatOutputInt(param).dividedBy(new BigNumber(2).pow(128)); }; /** @@ -438,7 +437,7 @@ var formatOutputReal = function (param) { * @returns {BigNumber} input bytes formatted to ureal */ var formatOutputUReal = function (param) { - return formatOutputUInt(param).dividedBy(new BigNumber(2).pow(128)); + return formatOutputUInt(param).dividedBy(new BigNumber(2).pow(128)); }; /** @@ -505,7 +504,7 @@ module.exports = { }; -},{"../utils/config":5,"../utils/utils":6,"./param":3,"bignumber.js":"bignumber.js"}],3:[function(require,module,exports){ +},{"../utils/config":5,"../utils/utils":7,"./param":3,"bignumber.js":"bignumber.js"}],3:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -522,7 +521,7 @@ module.exports = { You should have received a copy of the GNU Lesser General Public License along with ethereum.js. If not, see . */ -/** +/** * @file param.js * @author Marek Kotewicz * @date 2015 @@ -541,7 +540,7 @@ var SolidityParam = function (value, offset) { /** * This method should be used to get length of params's dynamic part - * + * * @method dynamicPartLength * @returns {Number} length of dynamic part (in bytes) */ @@ -569,7 +568,7 @@ SolidityParam.prototype.withOffset = function (offset) { * @param {SolidityParam} result of combination */ SolidityParam.prototype.combine = function (param) { - return new SolidityParam(this.value + param.value); + return new SolidityParam(this.value + param.value); }; /** @@ -601,8 +600,8 @@ SolidityParam.prototype.offsetAsBytes = function () { */ SolidityParam.prototype.staticPart = function () { if (!this.isDynamic()) { - return this.value; - } + return this.value; + } return this.offsetAsBytes(); }; @@ -634,7 +633,7 @@ SolidityParam.prototype.encode = function () { * @returns {String} */ SolidityParam.encodeList = function (params) { - + // updating offsets var totalOffset = params.length * 32; var offsetParams = params.map(function (param) { @@ -664,7 +663,7 @@ SolidityParam.encodeList = function (params) { */ SolidityParam.decodeParam = function (bytes, index) { index = index || 0; - return new SolidityParam(bytes.substr(index * 64, 64)); + return new SolidityParam(bytes.substr(index * 64, 64)); }; /** @@ -717,7 +716,7 @@ SolidityParam.decodeArray = function (bytes, index) { module.exports = SolidityParam; -},{"../utils/utils":6}],4:[function(require,module,exports){ +},{"../utils/utils":7}],4:[function(require,module,exports){ 'use strict'; // go env doesn't have and need XMLHttpRequest @@ -753,13 +752,13 @@ if (typeof XMLHttpRequest === 'undefined') { /** * Utils - * + * * @module utils */ /** * Utility functions - * + * * @class [utils] config * @constructor */ @@ -767,26 +766,34 @@ if (typeof XMLHttpRequest === 'undefined') { /// required to define ETH_BIGNUMBER_ROUNDING_MODE var BigNumber = require('bignumber.js'); -var ETH_UNITS = [ - 'wei', - 'Kwei', - 'Mwei', - 'Gwei', - 'szabo', - 'finney', - 'ether', - 'grand', - 'Mether', - 'Gether', - 'Tether', - 'Pether', - 'Eether', - 'Zether', - 'Yether', - 'Nether', - 'Dether', - 'Vether', - 'Uether' +var ETH_UNITS = [ + 'wei', + 'kwei', + 'Mwei', + 'Gwei', + 'szabo', + 'finney', + 'femtoether', + 'picoether', + 'nanoether', + 'microether', + 'milliether', + 'nano', + 'micro', + 'milli', + 'ether', + 'grand', + 'Mether', + 'Gether', + 'Tether', + 'Pether', + 'Eether', + 'Zether', + 'Yether', + 'Nether', + 'Dether', + 'Vether', + 'Uether' ]; module.exports = { @@ -817,7 +824,48 @@ module.exports = { You should have received a copy of the GNU Lesser General Public License along with ethereum.js. If not, see . */ -/** +/** + * @file sha3.js + * @author Marek Kotewicz + * @date 2015 + */ + +var utils = require('./utils'); +var sha3 = require('crypto-js/sha3'); + +module.exports = function (str, isNew) { + if (str.substr(0, 2) === '0x' && !isNew) { + console.warn('requirement of using web3.fromAscii before sha3 is deprecated'); + console.warn('new usage: \'web3.sha3("hello")\''); + console.warn('see https://github.com/ethereum/web3.js/pull/205'); + console.warn('if you need to hash hex value, you can do \'sha3("0xfff", true)\''); + str = utils.toAscii(str); + } + + return sha3(str, { + outputLength: 256 + }).toString(); +}; + + +},{"./utils":7,"crypto-js/sha3":33}],7:[function(require,module,exports){ +/* + This file is part of ethereum.js. + + ethereum.js is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + ethereum.js is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with ethereum.js. If not, see . +*/ +/** * @file utils.js * @author Marek Kotewicz * @date 2015 @@ -825,13 +873,13 @@ module.exports = { /** * Utils - * + * * @module utils */ /** * Utility functions - * + * * @class [utils] utils * @constructor */ @@ -839,22 +887,30 @@ module.exports = { var BigNumber = require('bignumber.js'); var unitMap = { - 'wei': '1', - 'kwei': '1000', - 'ada': '1000', - 'mwei': '1000000', - 'babbage': '1000000', - 'gwei': '1000000000', - 'shannon': '1000000000', - 'szabo': '1000000000000', - 'finney': '1000000000000000', - 'ether': '1000000000000000000', - 'kether': '1000000000000000000000', - 'grand': '1000000000000000000000', - 'einstein': '1000000000000000000000', - 'mether': '1000000000000000000000000', - 'gether': '1000000000000000000000000000', - 'tether': '1000000000000000000000000000000' + 'wei': '1', + 'kwei': '1000', + 'ada': '1000', + 'femtoether': '1000', + 'mwei': '1000000', + 'babbage': '1000000', + 'picoether': '1000000', + 'gwei': '1000000000', + 'shannon': '1000000000', + 'nanoether': '1000000000', + 'nano': '1000000000', + 'szabo': '1000000000000', + 'microether': '1000000000000', + 'micro': '1000000000000', + 'finney': '1000000000000000', + 'milliether': '1000000000000000', + 'milli': '1000000000000000', + 'ether': '1000000000000000000', + 'kether': '1000000000000000000000', + 'grand': '1000000000000000000000', + 'einstein': '1000000000000000000000', + 'mether': '1000000000000000000000000', + 'gether': '1000000000000000000000000000', + 'tether': '1000000000000000000000000000000' }; /** @@ -870,7 +926,7 @@ var padLeft = function (string, chars, sign) { return new Array(chars - string.length + 1).join(sign ? sign : "0") + string; }; -/** +/** * Should be called to get sting from it's hex representation * * @method toAscii @@ -895,9 +951,9 @@ var toAscii = function(hex) { return str; }; - + /** - * Shold be called to get hex representation (prefixed by 0x) of ascii string + * Shold be called to get hex representation (prefixed by 0x) of ascii string * * @method toHexNative * @param {String} string @@ -914,7 +970,7 @@ var toHexNative = function(str) { }; /** - * Shold be called to get hex representation (prefixed by 0x) of ascii string + * Shold be called to get hex representation (prefixed by 0x) of ascii string * * @method fromAscii * @param {String} string @@ -947,13 +1003,13 @@ var transformToFullName = function (json) { /** * Should be called to get display name of contract function - * + * * @method extractDisplayName * @param {String} name of function/event * @returns {String} display name for function/event eg. multiply(uint256) -> multiply */ var extractDisplayName = function (name) { - var length = name.indexOf('('); + var length = name.indexOf('('); return length !== -1 ? name.substr(0, length) : name; }; @@ -1042,13 +1098,14 @@ var getValueOfUnit = function (unit) { * Takes a number of wei and converts it to any other ether unit. * * Possible units are: - * - kwei/ada - * - mwei/babbage - * - gwei/shannon - * - szabo - * - finney - * - ether - * - kether/grand/einstein + * SI Short SI Full Effigy Other + * - kwei femtoether ada + * - mwei picoether babbage + * - gwei nanoether shannon nano + * - -- microether szabo micro + * - -- milliether finney milli + * - ether -- -- + * - kether einstein grand * - mether * - gether * - tether @@ -1061,20 +1118,21 @@ var getValueOfUnit = function (unit) { var fromWei = function(number, unit) { var returnValue = toBigNumber(number).dividedBy(getValueOfUnit(unit)); - return isBigNumber(number) ? returnValue : returnValue.toString(10); + return isBigNumber(number) ? returnValue : returnValue.toString(10); }; /** * Takes a number of a unit and converts it to wei. * * Possible units are: - * - kwei/ada - * - mwei/babbage - * - gwei/shannon - * - szabo - * - finney - * - ether - * - kether/grand/einstein + * SI Short SI Full Effigy Other + * - kwei femtoether ada + * - mwei picoether babbage + * - gwei nanoether shannon nano + * - -- microether szabo micro + * - -- milliether finney milli + * - ether -- -- + * - kether einstein grand * - mether * - gether * - tether @@ -1087,7 +1145,7 @@ var fromWei = function(number, unit) { var toWei = function(number, unit) { var returnValue = toBigNumber(number).times(getValueOfUnit(unit)); - return isBigNumber(number) ? returnValue : returnValue.toString(10); + return isBigNumber(number) ? returnValue : returnValue.toString(10); }; /** @@ -1106,7 +1164,7 @@ var toBigNumber = function(number) { if (isString(number) && (number.indexOf('0x') === 0 || number.indexOf('-0x') === 0)) { return new BigNumber(number.replace('0x',''), 16); } - + return new BigNumber(number.toString(10), 10); }; @@ -1158,7 +1216,7 @@ var toAddress = function (address) { if (isStrictAddress(address)) { return address; } - + if (/^[0-9a-f]{40}$/.test(address)) { return '0x' + address; } @@ -1172,7 +1230,7 @@ var toAddress = function (address) { * * @method isBigNumber * @param {Object} - * @return {Boolean} + * @return {Boolean} */ var isBigNumber = function (object) { return object instanceof BigNumber || @@ -1181,7 +1239,7 @@ var isBigNumber = function (object) { /** * Returns true if object is string, otherwise false - * + * * @method isString * @param {Object} * @return {Boolean} @@ -1232,12 +1290,12 @@ var isBoolean = function (object) { * @return {Boolean} */ var isArray = function (object) { - return object instanceof Array; + return object instanceof Array; }; /** * Returns true if given string is valid json object - * + * * @method isJson * @param {String} * @return {Boolean} @@ -1250,6 +1308,18 @@ var isJson = function (str) { } }; +/** + * This method should be called to check if string is valid ethereum IBAN number + * Supports direct and indirect IBANs + * + * @method isIBAN + * @param {String} + * @return {Boolean} + */ +var isIBAN = function (iban) { + return /^XE[0-9]{2}(ETH[0-9A-Z]{13}|[0-9A-Z]{30})$/.test(iban); +}; + module.exports = { padLeft: padLeft, toHex: toHex, @@ -1273,16 +1343,17 @@ module.exports = { isObject: isObject, isBoolean: isBoolean, isArray: isArray, - isJson: isJson + isJson: isJson, + isIBAN: isIBAN }; -},{"bignumber.js":"bignumber.js"}],7:[function(require,module,exports){ +},{"bignumber.js":"bignumber.js"}],8:[function(require,module,exports){ module.exports={ - "version": "0.4.3" + "version": "0.5.0" } -},{}],8:[function(require,module,exports){ +},{}],9:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -1320,17 +1391,9 @@ var utils = require('./utils/utils'); var formatters = require('./web3/formatters'); var RequestManager = require('./web3/requestmanager'); var c = require('./utils/config'); -var Method = require('./web3/method'); var Property = require('./web3/property'); var Batch = require('./web3/batch'); - -var web3Methods = [ - new Method({ - name: 'sha3', - call: 'web3_sha3', - params: 1 - }) -]; +var sha3 = require('./utils/sha3'); var web3Properties = [ new Property({ @@ -1415,6 +1478,8 @@ web3.toBigNumber = utils.toBigNumber; web3.toWei = utils.toWei; web3.fromWei = utils.fromWei; web3.isAddress = utils.isAddress; +web3.isIBAN = utils.isIBAN; +web3.sha3 = sha3; web3.createBatch = function () { return new Batch(); }; @@ -1440,8 +1505,24 @@ Object.defineProperty(web3.eth, 'defaultAccount', { } }); + +// EXTEND +web3.extend = function(extension){ + /*jshint maxcomplexity: 6 */ + + if(extension.property && !web3[extension.property]) + web3[extension.property] = {}; + + setupMethods(web3[extension.property] || web3, extension.methods || []); + setupProperties(web3[extension.property] || web3, extension.properties || []); +}; +web3.extend.formatters = formatters; +web3.extend.utils = utils; +web3.extend.Method = require('./web3/method'); +web3.extend.Property = require('./web3/property'); + + /// setups all api methods -setupMethods(web3, web3Methods); setupProperties(web3, web3Properties); setupMethods(web3.net, net.methods); setupProperties(web3.net, net.properties); @@ -1453,7 +1534,7 @@ setupMethods(web3.shh, shh.methods); module.exports = web3; -},{"./utils/config":5,"./utils/utils":6,"./version.json":7,"./web3/batch":9,"./web3/db":11,"./web3/eth":13,"./web3/filter":15,"./web3/formatters":16,"./web3/method":20,"./web3/net":21,"./web3/property":22,"./web3/requestmanager":24,"./web3/shh":25,"./web3/watches":26}],9:[function(require,module,exports){ +},{"./utils/config":5,"./utils/sha3":6,"./utils/utils":7,"./version.json":8,"./web3/batch":10,"./web3/db":12,"./web3/eth":14,"./web3/filter":16,"./web3/formatters":17,"./web3/method":22,"./web3/net":24,"./web3/property":25,"./web3/requestmanager":27,"./web3/shh":28,"./web3/watches":30}],10:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -1470,7 +1551,7 @@ module.exports = web3; You should have received a copy of the GNU Lesser General Public License along with ethereum.js. If not, see . */ -/** +/** * @file batch.js * @author Marek Kotewicz * @date 2015 @@ -1510,13 +1591,13 @@ Batch.prototype.execute = function () { requests[index].callback(err, result); } }); - }); + }); }; module.exports = Batch; -},{"./requestmanager":24}],10:[function(require,module,exports){ +},{"./requestmanager":27}],11:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -1533,13 +1614,13 @@ module.exports = Batch; You should have received a copy of the GNU Lesser General Public License along with ethereum.js. If not, see . */ -/** +/** * @file contract.js * @author Marek Kotewicz * @date 2014 */ -var web3 = require('../web3'); +var web3 = require('../web3'); var utils = require('../utils/utils'); var coder = require('../solidity/coder'); var SolidityEvent = require('./event'); @@ -1621,7 +1702,7 @@ var ContractFactory = function (abi) { /** * Should be called to create new contract on a blockchain - * + * * @method new * @param {Any} contract constructor param1 (optional) * @param {Any} contract constructor param2 (optional) @@ -1654,14 +1735,14 @@ ContractFactory.prototype.new = function () { var address = web3.eth.sendTransaction(options); return this.at(address); } - + var self = this; web3.eth.sendTransaction(options, function (err, address) { if (err) { callback(err); } - self.at(address, callback); - }); + self.at(address, callback); + }); }; /** @@ -1675,10 +1756,10 @@ ContractFactory.prototype.new = function () { */ ContractFactory.prototype.at = function (address, callback) { // TODO: address is required - + if (callback) { callback(null, new Contract(this.abi, address)); - } + } return new Contract(this.abi, address); }; @@ -1698,7 +1779,7 @@ var Contract = function (abi, address) { module.exports = contract; -},{"../solidity/coder":1,"../utils/utils":6,"../web3":8,"./event":14,"./function":17}],11:[function(require,module,exports){ +},{"../solidity/coder":1,"../utils/utils":7,"../web3":9,"./event":15,"./function":18}],12:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -1756,7 +1837,7 @@ module.exports = { methods: methods }; -},{"./method":20}],12:[function(require,module,exports){ +},{"./method":22}],13:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -1773,7 +1854,7 @@ module.exports = { You should have received a copy of the GNU Lesser General Public License along with ethereum.js. If not, see . */ -/** +/** * @file errors.js * @author Marek Kotewicz * @date 2015 @@ -1796,7 +1877,7 @@ module.exports = { }; -},{}],13:[function(require,module,exports){ +},{}],14:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -2073,7 +2154,7 @@ module.exports = { }; -},{"../utils/utils":6,"./formatters":16,"./method":20,"./property":22}],14:[function(require,module,exports){ +},{"../utils/utils":7,"./formatters":17,"./method":22,"./property":25}],15:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -2090,7 +2171,7 @@ module.exports = { You should have received a copy of the GNU Lesser General Public License along with ethereum.js. If not, see . */ -/** +/** * @file event.js * @author Marek Kotewicz * @date 2014 @@ -2100,6 +2181,7 @@ var utils = require('../utils/utils'); var coder = require('../solidity/coder'); var web3 = require('../web3'); var formatters = require('./formatters'); +var sha3 = require('../utils/sha3'); /** * This prototype should be used to create event filters @@ -2153,12 +2235,12 @@ SolidityEvent.prototype.typeName = function () { * @return {String} event signature */ SolidityEvent.prototype.signature = function () { - return web3.sha3(web3.fromAscii(this._name)).slice(2); + return sha3(this._name); }; /** * Should be used to encode indexed params and options to one final object - * + * * @method encode * @param {Object} indexed * @param {Object} options @@ -2189,7 +2271,7 @@ SolidityEvent.prototype.encode = function (indexed, options) { if (value === undefined || value === null) { return null; } - + if (utils.isArray(value)) { return value.map(function (v) { return '0x' + coder.encodeParam(i.type, v); @@ -2211,17 +2293,17 @@ SolidityEvent.prototype.encode = function (indexed, options) { * @return {Object} result object with decoded indexed && not indexed params */ SolidityEvent.prototype.decode = function (data) { - + data.data = data.data || ''; data.topics = data.topics || []; var argTopics = this._anonymous ? data.topics : data.topics.slice(1); var indexedData = argTopics.map(function (topics) { return topics.slice(2); }).join(""); - var indexedParams = coder.decodeParams(this.types(true), indexedData); + var indexedParams = coder.decodeParams(this.types(true), indexedData); var notIndexedData = data.data.slice(2); var notIndexedParams = coder.decodeParams(this.types(false), notIndexedData); - + var result = formatters.outputLogFormatter(data); result.event = this.displayName(); result.address = data.address; @@ -2269,7 +2351,7 @@ SolidityEvent.prototype.attachToContract = function (contract) { module.exports = SolidityEvent; -},{"../solidity/coder":1,"../utils/utils":6,"../web3":8,"./formatters":16}],15:[function(require,module,exports){ +},{"../solidity/coder":1,"../utils/sha3":6,"../utils/utils":7,"../web3":9,"./formatters":17}],16:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -2326,7 +2408,7 @@ var getOptions = function (options) { if (utils.isString(options)) { return options; - } + } options = options || {}; @@ -2342,8 +2424,8 @@ var getOptions = function (options) { to: options.to, address: options.address, fromBlock: formatters.inputBlockNumberFormatter(options.fromBlock), - toBlock: formatters.inputBlockNumberFormatter(options.toBlock) - }; + toBlock: formatters.inputBlockNumberFormatter(options.toBlock) + }; }; var Filter = function (options, methods, formatter) { @@ -2426,7 +2508,7 @@ Filter.prototype.get = function (callback) { module.exports = Filter; -},{"../utils/utils":6,"./formatters":16,"./requestmanager":24}],16:[function(require,module,exports){ +},{"../utils/utils":7,"./formatters":17,"./requestmanager":27}],17:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -2443,7 +2525,7 @@ module.exports = Filter; You should have received a copy of the GNU Lesser General Public License along with ethereum.js. If not, see . */ -/** +/** * @file formatters.js * @author Marek Kotewicz * @author Fabian Vogelsteller @@ -2507,12 +2589,12 @@ var inputTransactionFormatter = function (options){ options[key] = utils.fromDecimal(options[key]); }); - return options; + return options; }; /** * Formats the output of a transaction to its proper values - * + * * @method outputTransactionFormatter * @param {Object} transaction * @returns {Object} transaction @@ -2531,7 +2613,7 @@ var outputTransactionFormatter = function (tx){ * Formats the output of a block to its proper values * * @method outputBlockFormatter - * @param {Object} block object + * @param {Object} block object * @returns {Object} block object */ var outputBlockFormatter = function(block) { @@ -2558,7 +2640,7 @@ var outputBlockFormatter = function(block) { /** * Formats the output of a log - * + * * @method outputLogFormatter * @param {Object} log object * @returns {Object} log @@ -2599,7 +2681,7 @@ var inputPostFormatter = function(post) { return utils.fromAscii(topic); }); - return post; + return post; }; /** @@ -2646,7 +2728,7 @@ module.exports = { }; -},{"../utils/config":5,"../utils/utils":6}],17:[function(require,module,exports){ +},{"../utils/config":5,"../utils/utils":7}],18:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -2672,6 +2754,7 @@ module.exports = { var web3 = require('../web3'); var coder = require('../solidity/coder'); var utils = require('../utils/utils'); +var sha3 = require('../utils/sha3'); /** * This prototype should be used to call/sendTransaction to solidity functions @@ -2718,12 +2801,12 @@ SolidityFunction.prototype.toPayload = function (args) { * @return {String} function signature */ SolidityFunction.prototype.signature = function () { - return web3.sha3(web3.fromAscii(this._name)).slice(2, 10); + return sha3(this._name).slice(0, 8); }; SolidityFunction.prototype.unpackOutput = function (output) { - if (output === null) { + if (!output) { return; } @@ -2743,15 +2826,15 @@ SolidityFunction.prototype.unpackOutput = function (output) { * @return {String} output bytes */ SolidityFunction.prototype.call = function () { - var args = Array.prototype.slice.call(arguments); + var args = Array.prototype.slice.call(arguments).filter(function (a) {return a !== undefined; }); var callback = this.extractCallback(args); var payload = this.toPayload(args); if (!callback) { var output = web3.eth.call(payload); return this.unpackOutput(output); - } - + } + var self = this; web3.eth.call(payload, function (error, output) { callback(error, self.unpackOutput(output)); @@ -2765,18 +2848,35 @@ SolidityFunction.prototype.call = function () { * @param {Object} options */ SolidityFunction.prototype.sendTransaction = function () { - var args = Array.prototype.slice.call(arguments); + var args = Array.prototype.slice.call(arguments).filter(function (a) {return a !== undefined; }); var callback = this.extractCallback(args); var payload = this.toPayload(args); if (!callback) { - web3.eth.sendTransaction(payload); - return; + return web3.eth.sendTransaction(payload); } web3.eth.sendTransaction(payload, callback); }; +/** + * Should be used to estimateGas of solidity function + * + * @method estimateGas + * @param {Object} options + */ +SolidityFunction.prototype.estimateGas = function () { + var args = Array.prototype.slice.call(arguments); + var callback = this.extractCallback(args); + var payload = this.toPayload(args); + + if (!callback) { + return web3.eth.estimateGas(payload); + } + + web3.eth.estimateGas(payload, callback); +}; + /** * Should be used to get function display name * @@ -2808,10 +2908,10 @@ SolidityFunction.prototype.request = function () { var callback = this.extractCallback(args); var payload = this.toPayload(args); var format = this.unpackOutput.bind(this); - + return { callback: callback, - payload: payload, + payload: payload, format: format }; }; @@ -2844,6 +2944,7 @@ SolidityFunction.prototype.attachToContract = function (contract) { execute.request = this.request.bind(this); execute.call = this.call.bind(this); execute.sendTransaction = this.sendTransaction.bind(this); + execute.estimateGas = this.estimateGas.bind(this); var displayName = this.displayName(); if (!contract[displayName]) { contract[displayName] = execute; @@ -2854,7 +2955,7 @@ SolidityFunction.prototype.attachToContract = function (contract) { module.exports = SolidityFunction; -},{"../solidity/coder":1,"../utils/utils":6,"../web3":8}],18:[function(require,module,exports){ +},{"../solidity/coder":1,"../utils/sha3":6,"../utils/utils":7,"../web3":9}],19:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -2892,7 +2993,7 @@ HttpProvider.prototype.send = function (payload) { var request = new XMLHttpRequest(); request.open('POST', this.host, false); - + try { request.send(JSON.stringify(payload)); } catch(error) { @@ -2911,7 +3012,7 @@ HttpProvider.prototype.send = function (payload) { try { result = JSON.parse(result); } catch(e) { - throw errors.InvalidResponse(result); + throw errors.InvalidResponse(result); } return result; @@ -2927,7 +3028,7 @@ HttpProvider.prototype.sendAsync = function (payload, callback) { try { result = JSON.parse(result); } catch(e) { - error = errors.InvalidResponse(result); + error = errors.InvalidResponse(result); } callback(error, result); @@ -2946,7 +3047,117 @@ HttpProvider.prototype.sendAsync = function (payload, callback) { module.exports = HttpProvider; -},{"./errors":12,"xmlhttprequest":4}],19:[function(require,module,exports){ +},{"./errors":13,"xmlhttprequest":4}],20:[function(require,module,exports){ +/* + This file is part of ethereum.js. + + ethereum.js is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + ethereum.js is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with ethereum.js. If not, see . +*/ +/** + * @file icap.js + * @author Marek Kotewicz + * @date 2015 + */ + +var utils = require('../utils/utils'); + +/** + * This prototype should be used to extract necessary information from iban address + * + * @param {String} iban + */ +var ICAP = function (iban) { + this._iban = iban; +}; + +/** + * Should be called to check if icap is correct + * + * @method isValid + * @returns {Boolean} true if it is, otherwise false + */ +ICAP.prototype.isValid = function () { + return utils.isIBAN(this._iban); +}; + +/** + * Should be called to check if iban number is direct + * + * @method isDirect + * @returns {Boolean} true if it is, otherwise false + */ +ICAP.prototype.isDirect = function () { + return this._iban.length === 34; +}; + +/** + * Should be called to check if iban number if indirect + * + * @method isIndirect + * @returns {Boolean} true if it is, otherwise false + */ +ICAP.prototype.isIndirect = function () { + return this._iban.length === 20; +}; + +/** + * Should be called to get iban checksum + * Uses the mod-97-10 checksumming protocol (ISO/IEC 7064:2003) + * + * @method checksum + * @returns {String} checksum + */ +ICAP.prototype.checksum = function () { + return this._iban.substr(2, 2); +}; + +/** + * Should be called to get institution identifier + * eg. XREG + * + * @method institution + * @returns {String} institution identifier + */ +ICAP.prototype.institution = function () { + return this.isIndirect() ? this._iban.substr(7, 4) : ''; +}; + +/** + * Should be called to get client identifier within institution + * eg. GAVOFYORK + * + * @method client + * @returns {String} client identifier + */ +ICAP.prototype.client = function () { + return this.isIndirect() ? this._iban.substr(11) : ''; +}; + +/** + * Should be called to get client direct address + * + * @method address + * @returns {String} client direct address + */ +ICAP.prototype.address = function () { + return this.isDirect() ? this._iban.substr(4) : ''; +}; + +module.exports = ICAP; + + +},{"../utils/utils":7}],21:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -3039,7 +3250,7 @@ Jsonrpc.prototype.toBatchPayload = function (messages) { module.exports = Jsonrpc; -},{}],20:[function(require,module,exports){ +},{}],22:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -3100,7 +3311,7 @@ Method.prototype.extractCallback = function (args) { /** * Should be called to check if the number of arguments is correct - * + * * @method validateArgs * @param {Array} arguments * @throws {Error} if it is not @@ -3113,7 +3324,7 @@ Method.prototype.validateArgs = function (args) { /** * Should be called to format input args of method - * + * * @method formatInput * @param {Array} * @return {Array} @@ -3141,7 +3352,7 @@ Method.prototype.formatOutput = function (result) { /** * Should attach function to method - * + * * @method attachToObject * @param {Object} * @param {Function} @@ -3155,7 +3366,7 @@ Method.prototype.attachToObject = function (obj) { obj[name[0]] = obj[name[0]] || {}; obj[name[0]][name[1]] = func; } else { - obj[name[0]] = func; + obj[name[0]] = func; } }; @@ -3213,7 +3424,55 @@ Method.prototype.send = function () { module.exports = Method; -},{"../utils/utils":6,"./errors":12,"./requestmanager":24}],21:[function(require,module,exports){ +},{"../utils/utils":7,"./errors":13,"./requestmanager":27}],23:[function(require,module,exports){ +/* + This file is part of ethereum.js. + + ethereum.js is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + ethereum.js is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with ethereum.js. If not, see . +*/ +/** + * @file namereg.js + * @author Marek Kotewicz + * @date 2015 + */ + +var contract = require('./contract'); + +var address = '0xc6d9d2cd449a754c494264e1809c50e34d64562b'; + +var abi = [ + {"constant":true,"inputs":[{"name":"_owner","type":"address"}],"name":"name","outputs":[{"name":"o_name","type":"bytes32"}],"type":"function"}, + {"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"owner","outputs":[{"name":"","type":"address"}],"type":"function"}, + {"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"content","outputs":[{"name":"","type":"bytes32"}],"type":"function"}, + {"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"addr","outputs":[{"name":"","type":"address"}],"type":"function"}, + {"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserve","outputs":[],"type":"function"}, + {"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"subRegistrar","outputs":[{"name":"o_subRegistrar","type":"address"}],"type":"function"}, + {"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_newOwner","type":"address"}],"name":"transfer","outputs":[],"type":"function"}, + {"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_registrar","type":"address"}],"name":"setSubRegistrar","outputs":[],"type":"function"}, + {"constant":false,"inputs":[],"name":"Registrar","outputs":[],"type":"function"}, + {"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_a","type":"address"},{"name":"_primary","type":"bool"}],"name":"setAddress","outputs":[],"type":"function"}, + {"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_content","type":"bytes32"}],"name":"setContent","outputs":[],"type":"function"}, + {"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"disown","outputs":[],"type":"function"}, + {"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"register","outputs":[{"name":"","type":"address"}],"type":"function"}, + {"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"bytes32"}],"name":"Changed","type":"event"}, + {"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"bytes32"},{"indexed":true,"name":"addr","type":"address"}],"name":"PrimaryChanged","type":"event"} +]; + +module.exports = contract(abi).at(address); + + +},{"./contract":11}],24:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -3263,7 +3522,7 @@ module.exports = { }; -},{"../utils/utils":6,"./property":22}],22:[function(require,module,exports){ +},{"../utils/utils":7,"./property":25}],25:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -3299,7 +3558,7 @@ var Property = function (options) { /** * Should be called to format input args of method - * + * * @method formatInput * @param {Array} * @return {Array} @@ -3321,7 +3580,7 @@ Property.prototype.formatOutput = function (result) { /** * Should attach function to method - * + * * @method attachToObject * @param {Object} * @param {Function} @@ -3338,7 +3597,7 @@ Property.prototype.attachToObject = function (obj) { obj = obj[names[0]]; name = names[1]; } - + Object.defineProperty(obj, name, proto); var toAsyncName = function (prefix, name) { @@ -3381,7 +3640,7 @@ Property.prototype.getAsync = function (callback) { module.exports = Property; -},{"./requestmanager":24}],23:[function(require,module,exports){ +},{"./requestmanager":27}],26:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -3416,7 +3675,7 @@ QtSyncProvider.prototype.send = function (payload) { module.exports = QtSyncProvider; -},{}],24:[function(require,module,exports){ +},{}],27:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -3433,7 +3692,7 @@ module.exports = QtSyncProvider; You should have received a copy of the GNU Lesser General Public License along with ethereum.js. If not, see . */ -/** +/** * @file requestmanager.js * @author Jeffrey Wilcke * @author Marek Kotewicz @@ -3515,7 +3774,7 @@ RequestManager.prototype.sendAsync = function (data, callback) { if (err) { return callback(err); } - + if (!Jsonrpc.getInstance().isValidResponse(result)) { return callback(errors.InvalidResponse(result)); } @@ -3548,7 +3807,7 @@ RequestManager.prototype.sendBatch = function (data, callback) { } callback(err, results); - }); + }); }; /** @@ -3601,7 +3860,7 @@ RequestManager.prototype.stopPolling = function (pollId) { */ RequestManager.prototype.reset = function () { this.polls.forEach(function (poll) { - poll.uninstall(poll.id); + poll.uninstall(poll.id); }); this.polls = []; @@ -3639,7 +3898,7 @@ RequestManager.prototype.poll = function () { if (error) { return; } - + if (!utils.isArray(results)) { throw errors.InvalidResponse(results); } @@ -3664,7 +3923,7 @@ RequestManager.prototype.poll = function () { module.exports = RequestManager; -},{"../utils/config":5,"../utils/utils":6,"./errors":12,"./jsonrpc":19}],25:[function(require,module,exports){ +},{"../utils/config":5,"../utils/utils":7,"./errors":13,"./jsonrpc":21}],28:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -3691,8 +3950,8 @@ var Method = require('./method'); var formatters = require('./formatters'); var post = new Method({ - name: 'post', - call: 'shh_post', + name: 'post', + call: 'shh_post', params: 1, inputFormatter: [formatters.inputPostFormatter] }); @@ -3734,7 +3993,7 @@ module.exports = { }; -},{"./formatters":16,"./method":20}],26:[function(require,module,exports){ +},{"./formatters":17,"./method":22}],29:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -3751,54 +4010,150 @@ module.exports = { You should have received a copy of the GNU Lesser General Public License along with ethereum.js. If not, see . */ -/** @file watches.js - * @authors: - * Marek Kotewicz +/** + * @file transfer.js + * @author Marek Kotewicz * @date 2015 */ -var Method = require('./method'); +var web3 = require('../web3'); +var ICAP = require('./icap'); +var namereg = require('./namereg'); +var contract = require('./contract'); -/// @returns an array of objects describing web3.eth.filter api methods -var eth = function () { - var newFilterCall = function (args) { - var type = args[0]; +/** + * Should be used to make ICAP transfer + * + * @method transfer + * @param {String} iban number + * @param {String} from (address) + * @param {Value} value to be tranfered + * @param {Function} callback, callback + */ +var transfer = function (from, iban, value, callback) { + var icap = new ICAP(iban); + if (!icap.isValid()) { + throw new Error('invalid iban address'); + } - switch(type) { - case 'latest': - args.pop(); - this.params = 0; - return 'eth_newBlockFilter'; - case 'pending': - args.pop(); - this.params = 0; - return 'eth_newPendingTransactionFilter'; - default: - return 'eth_newFilter'; - } - }; + if (icap.isDirect()) { + return transferToAddress(from, icap.address(), value, callback); + } - var newFilter = new Method({ - name: 'newFilter', - call: newFilterCall, - params: 1 - }); + if (!callback) { + var address = namereg.addr(icap.institution()); + return deposit(from, address, value, icap.client()); + } - var uninstallFilter = new Method({ - name: 'uninstallFilter', - call: 'eth_uninstallFilter', - params: 1 + namereg.addr(icap.insitution(), function (err, address) { + return deposit(from, address, value, icap.client(), callback); }); - var getLogs = new Method({ - name: 'getLogs', - call: 'eth_getFilterLogs', - params: 1 - }); +}; - var poll = new Method({ - name: 'poll', - call: 'eth_getFilterChanges', +/** + * Should be used to transfer funds to certain address + * + * @method transferToAddress + * @param {String} address + * @param {String} from (address) + * @param {Value} value to be tranfered + * @param {Function} callback, callback + */ +var transferToAddress = function (from, address, value, callback) { + return web3.eth.sendTransaction({ + address: address, + from: from, + value: value + }, callback); +}; + +/** + * Should be used to deposit funds to generic Exchange contract (must implement deposit(bytes32) method!) + * + * @method deposit + * @param {String} address + * @param {String} from (address) + * @param {Value} value to be tranfered + * @param {String} client unique identifier + * @param {Function} callback, callback + */ +var deposit = function (from, address, value, client, callback) { + var abi = [{"constant":false,"inputs":[{"name":"name","type":"bytes32"}],"name":"deposit","outputs":[],"type":"function"}]; + return contract(abi).at(address).deposit(client, { + from: from, + value: value + }, callback); +}; + +module.exports = transfer; + + +},{"../web3":9,"./contract":11,"./icap":20,"./namereg":23}],30:[function(require,module,exports){ +/* + This file is part of ethereum.js. + + ethereum.js is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + ethereum.js is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with ethereum.js. If not, see . +*/ +/** @file watches.js + * @authors: + * Marek Kotewicz + * @date 2015 + */ + +var Method = require('./method'); + +/// @returns an array of objects describing web3.eth.filter api methods +var eth = function () { + var newFilterCall = function (args) { + var type = args[0]; + + switch(type) { + case 'latest': + args.pop(); + this.params = 0; + return 'eth_newBlockFilter'; + case 'pending': + args.pop(); + this.params = 0; + return 'eth_newPendingTransactionFilter'; + default: + return 'eth_newFilter'; + } + }; + + var newFilter = new Method({ + name: 'newFilter', + call: newFilterCall, + params: 1 + }); + + var uninstallFilter = new Method({ + name: 'uninstallFilter', + call: 'eth_uninstallFilter', + params: 1 + }); + + var getLogs = new Method({ + name: 'getLogs', + call: 'eth_getFilterLogs', + params: 1 + }); + + var poll = new Method({ + name: 'poll', + call: 'eth_getFilterChanges', params: 1 }); @@ -3850,19 +4205,4075 @@ module.exports = { }; -},{"./method":20}],27:[function(require,module,exports){ +},{"./method":22}],31:[function(require,module,exports){ + +},{}],32:[function(require,module,exports){ +;(function (root, factory) { + if (typeof exports === "object") { + // CommonJS + module.exports = exports = factory(); + } + else if (typeof define === "function" && define.amd) { + // AMD + define([], factory); + } + else { + // Global (browser) + root.CryptoJS = factory(); + } +}(this, function () { + + /** + * CryptoJS core components. + */ + var CryptoJS = CryptoJS || (function (Math, undefined) { + /** + * CryptoJS namespace. + */ + var C = {}; + + /** + * Library namespace. + */ + var C_lib = C.lib = {}; + + /** + * Base object for prototypal inheritance. + */ + var Base = C_lib.Base = (function () { + function F() {} + + return { + /** + * Creates a new object that inherits from this object. + * + * @param {Object} overrides Properties to copy into the new object. + * + * @return {Object} The new object. + * + * @static + * + * @example + * + * var MyType = CryptoJS.lib.Base.extend({ + * field: 'value', + * + * method: function () { + * } + * }); + */ + extend: function (overrides) { + // Spawn + F.prototype = this; + var subtype = new F(); + + // Augment + if (overrides) { + subtype.mixIn(overrides); + } + + // Create default initializer + if (!subtype.hasOwnProperty('init')) { + subtype.init = function () { + subtype.$super.init.apply(this, arguments); + }; + } + + // Initializer's prototype is the subtype object + subtype.init.prototype = subtype; + + // Reference supertype + subtype.$super = this; + + return subtype; + }, + + /** + * Extends this object and runs the init method. + * Arguments to create() will be passed to init(). + * + * @return {Object} The new object. + * + * @static + * + * @example + * + * var instance = MyType.create(); + */ + create: function () { + var instance = this.extend(); + instance.init.apply(instance, arguments); + + return instance; + }, + + /** + * Initializes a newly created object. + * Override this method to add some logic when your objects are created. + * + * @example + * + * var MyType = CryptoJS.lib.Base.extend({ + * init: function () { + * // ... + * } + * }); + */ + init: function () { + }, + + /** + * Copies properties into this object. + * + * @param {Object} properties The properties to mix in. + * + * @example + * + * MyType.mixIn({ + * field: 'value' + * }); + */ + mixIn: function (properties) { + for (var propertyName in properties) { + if (properties.hasOwnProperty(propertyName)) { + this[propertyName] = properties[propertyName]; + } + } + + // IE won't copy toString using the loop above + if (properties.hasOwnProperty('toString')) { + this.toString = properties.toString; + } + }, + + /** + * Creates a copy of this object. + * + * @return {Object} The clone. + * + * @example + * + * var clone = instance.clone(); + */ + clone: function () { + return this.init.prototype.extend(this); + } + }; + }()); + + /** + * An array of 32-bit words. + * + * @property {Array} words The array of 32-bit words. + * @property {number} sigBytes The number of significant bytes in this word array. + */ + var WordArray = C_lib.WordArray = Base.extend({ + /** + * Initializes a newly created word array. + * + * @param {Array} words (Optional) An array of 32-bit words. + * @param {number} sigBytes (Optional) The number of significant bytes in the words. + * + * @example + * + * var wordArray = CryptoJS.lib.WordArray.create(); + * var wordArray = CryptoJS.lib.WordArray.create([0x00010203, 0x04050607]); + * var wordArray = CryptoJS.lib.WordArray.create([0x00010203, 0x04050607], 6); + */ + init: function (words, sigBytes) { + words = this.words = words || []; + + if (sigBytes != undefined) { + this.sigBytes = sigBytes; + } else { + this.sigBytes = words.length * 4; + } + }, + + /** + * Converts this word array to a string. + * + * @param {Encoder} encoder (Optional) The encoding strategy to use. Default: CryptoJS.enc.Hex + * + * @return {string} The stringified word array. + * + * @example + * + * var string = wordArray + ''; + * var string = wordArray.toString(); + * var string = wordArray.toString(CryptoJS.enc.Utf8); + */ + toString: function (encoder) { + return (encoder || Hex).stringify(this); + }, + + /** + * Concatenates a word array to this word array. + * + * @param {WordArray} wordArray The word array to append. + * + * @return {WordArray} This word array. + * + * @example + * + * wordArray1.concat(wordArray2); + */ + concat: function (wordArray) { + // Shortcuts + var thisWords = this.words; + var thatWords = wordArray.words; + var thisSigBytes = this.sigBytes; + var thatSigBytes = wordArray.sigBytes; + + // Clamp excess bits + this.clamp(); + + // Concat + if (thisSigBytes % 4) { + // Copy one byte at a time + for (var i = 0; i < thatSigBytes; i++) { + var thatByte = (thatWords[i >>> 2] >>> (24 - (i % 4) * 8)) & 0xff; + thisWords[(thisSigBytes + i) >>> 2] |= thatByte << (24 - ((thisSigBytes + i) % 4) * 8); + } + } else if (thatWords.length > 0xffff) { + // Copy one word at a time + for (var i = 0; i < thatSigBytes; i += 4) { + thisWords[(thisSigBytes + i) >>> 2] = thatWords[i >>> 2]; + } + } else { + // Copy all words at once + thisWords.push.apply(thisWords, thatWords); + } + this.sigBytes += thatSigBytes; + + // Chainable + return this; + }, + + /** + * Removes insignificant bits. + * + * @example + * + * wordArray.clamp(); + */ + clamp: function () { + // Shortcuts + var words = this.words; + var sigBytes = this.sigBytes; + + // Clamp + words[sigBytes >>> 2] &= 0xffffffff << (32 - (sigBytes % 4) * 8); + words.length = Math.ceil(sigBytes / 4); + }, + + /** + * Creates a copy of this word array. + * + * @return {WordArray} The clone. + * + * @example + * + * var clone = wordArray.clone(); + */ + clone: function () { + var clone = Base.clone.call(this); + clone.words = this.words.slice(0); + + return clone; + }, + + /** + * Creates a word array filled with random bytes. + * + * @param {number} nBytes The number of random bytes to generate. + * + * @return {WordArray} The random word array. + * + * @static + * + * @example + * + * var wordArray = CryptoJS.lib.WordArray.random(16); + */ + random: function (nBytes) { + var words = []; + + var r = (function (m_w) { + var m_w = m_w; + var m_z = 0x3ade68b1; + var mask = 0xffffffff; + + return function () { + m_z = (0x9069 * (m_z & 0xFFFF) + (m_z >> 0x10)) & mask; + m_w = (0x4650 * (m_w & 0xFFFF) + (m_w >> 0x10)) & mask; + var result = ((m_z << 0x10) + m_w) & mask; + result /= 0x100000000; + result += 0.5; + return result * (Math.random() > .5 ? 1 : -1); + } + }); + + for (var i = 0, rcache; i < nBytes; i += 4) { + var _r = r((rcache || Math.random()) * 0x100000000); + + rcache = _r() * 0x3ade67b7; + words.push((_r() * 0x100000000) | 0); + } + + return new WordArray.init(words, nBytes); + } + }); + + /** + * Encoder namespace. + */ + var C_enc = C.enc = {}; + + /** + * Hex encoding strategy. + */ + var Hex = C_enc.Hex = { + /** + * Converts a word array to a hex string. + * + * @param {WordArray} wordArray The word array. + * + * @return {string} The hex string. + * + * @static + * + * @example + * + * var hexString = CryptoJS.enc.Hex.stringify(wordArray); + */ + stringify: function (wordArray) { + // Shortcuts + var words = wordArray.words; + var sigBytes = wordArray.sigBytes; + + // Convert + var hexChars = []; + for (var i = 0; i < sigBytes; i++) { + var bite = (words[i >>> 2] >>> (24 - (i % 4) * 8)) & 0xff; + hexChars.push((bite >>> 4).toString(16)); + hexChars.push((bite & 0x0f).toString(16)); + } + + return hexChars.join(''); + }, + + /** + * Converts a hex string to a word array. + * + * @param {string} hexStr The hex string. + * + * @return {WordArray} The word array. + * + * @static + * + * @example + * + * var wordArray = CryptoJS.enc.Hex.parse(hexString); + */ + parse: function (hexStr) { + // Shortcut + var hexStrLength = hexStr.length; + + // Convert + var words = []; + for (var i = 0; i < hexStrLength; i += 2) { + words[i >>> 3] |= parseInt(hexStr.substr(i, 2), 16) << (24 - (i % 8) * 4); + } + + return new WordArray.init(words, hexStrLength / 2); + } + }; + + /** + * Latin1 encoding strategy. + */ + var Latin1 = C_enc.Latin1 = { + /** + * Converts a word array to a Latin1 string. + * + * @param {WordArray} wordArray The word array. + * + * @return {string} The Latin1 string. + * + * @static + * + * @example + * + * var latin1String = CryptoJS.enc.Latin1.stringify(wordArray); + */ + stringify: function (wordArray) { + // Shortcuts + var words = wordArray.words; + var sigBytes = wordArray.sigBytes; + + // Convert + var latin1Chars = []; + for (var i = 0; i < sigBytes; i++) { + var bite = (words[i >>> 2] >>> (24 - (i % 4) * 8)) & 0xff; + latin1Chars.push(String.fromCharCode(bite)); + } + + return latin1Chars.join(''); + }, + + /** + * Converts a Latin1 string to a word array. + * + * @param {string} latin1Str The Latin1 string. + * + * @return {WordArray} The word array. + * + * @static + * + * @example + * + * var wordArray = CryptoJS.enc.Latin1.parse(latin1String); + */ + parse: function (latin1Str) { + // Shortcut + var latin1StrLength = latin1Str.length; + + // Convert + var words = []; + for (var i = 0; i < latin1StrLength; i++) { + words[i >>> 2] |= (latin1Str.charCodeAt(i) & 0xff) << (24 - (i % 4) * 8); + } + + return new WordArray.init(words, latin1StrLength); + } + }; + + /** + * UTF-8 encoding strategy. + */ + var Utf8 = C_enc.Utf8 = { + /** + * Converts a word array to a UTF-8 string. + * + * @param {WordArray} wordArray The word array. + * + * @return {string} The UTF-8 string. + * + * @static + * + * @example + * + * var utf8String = CryptoJS.enc.Utf8.stringify(wordArray); + */ + stringify: function (wordArray) { + try { + return decodeURIComponent(escape(Latin1.stringify(wordArray))); + } catch (e) { + throw new Error('Malformed UTF-8 data'); + } + }, + + /** + * Converts a UTF-8 string to a word array. + * + * @param {string} utf8Str The UTF-8 string. + * + * @return {WordArray} The word array. + * + * @static + * + * @example + * + * var wordArray = CryptoJS.enc.Utf8.parse(utf8String); + */ + parse: function (utf8Str) { + return Latin1.parse(unescape(encodeURIComponent(utf8Str))); + } + }; + + /** + * Abstract buffered block algorithm template. + * + * The property blockSize must be implemented in a concrete subtype. + * + * @property {number} _minBufferSize The number of blocks that should be kept unprocessed in the buffer. Default: 0 + */ + var BufferedBlockAlgorithm = C_lib.BufferedBlockAlgorithm = Base.extend({ + /** + * Resets this block algorithm's data buffer to its initial state. + * + * @example + * + * bufferedBlockAlgorithm.reset(); + */ + reset: function () { + // Initial values + this._data = new WordArray.init(); + this._nDataBytes = 0; + }, + + /** + * Adds new data to this block algorithm's buffer. + * + * @param {WordArray|string} data The data to append. Strings are converted to a WordArray using UTF-8. + * + * @example + * + * bufferedBlockAlgorithm._append('data'); + * bufferedBlockAlgorithm._append(wordArray); + */ + _append: function (data) { + // Convert string to WordArray, else assume WordArray already + if (typeof data == 'string') { + data = Utf8.parse(data); + } + + // Append + this._data.concat(data); + this._nDataBytes += data.sigBytes; + }, + + /** + * Processes available data blocks. + * + * This method invokes _doProcessBlock(offset), which must be implemented by a concrete subtype. + * + * @param {boolean} doFlush Whether all blocks and partial blocks should be processed. + * + * @return {WordArray} The processed data. + * + * @example + * + * var processedData = bufferedBlockAlgorithm._process(); + * var processedData = bufferedBlockAlgorithm._process(!!'flush'); + */ + _process: function (doFlush) { + // Shortcuts + var data = this._data; + var dataWords = data.words; + var dataSigBytes = data.sigBytes; + var blockSize = this.blockSize; + var blockSizeBytes = blockSize * 4; + + // Count blocks ready + var nBlocksReady = dataSigBytes / blockSizeBytes; + if (doFlush) { + // Round up to include partial blocks + nBlocksReady = Math.ceil(nBlocksReady); + } else { + // Round down to include only full blocks, + // less the number of blocks that must remain in the buffer + nBlocksReady = Math.max((nBlocksReady | 0) - this._minBufferSize, 0); + } + + // Count words ready + var nWordsReady = nBlocksReady * blockSize; + + // Count bytes ready + var nBytesReady = Math.min(nWordsReady * 4, dataSigBytes); + + // Process blocks + if (nWordsReady) { + for (var offset = 0; offset < nWordsReady; offset += blockSize) { + // Perform concrete-algorithm logic + this._doProcessBlock(dataWords, offset); + } + + // Remove processed words + var processedWords = dataWords.splice(0, nWordsReady); + data.sigBytes -= nBytesReady; + } + + // Return processed words + return new WordArray.init(processedWords, nBytesReady); + }, + + /** + * Creates a copy of this object. + * + * @return {Object} The clone. + * + * @example + * + * var clone = bufferedBlockAlgorithm.clone(); + */ + clone: function () { + var clone = Base.clone.call(this); + clone._data = this._data.clone(); + + return clone; + }, + + _minBufferSize: 0 + }); + + /** + * Abstract hasher template. + * + * @property {number} blockSize The number of 32-bit words this hasher operates on. Default: 16 (512 bits) + */ + var Hasher = C_lib.Hasher = BufferedBlockAlgorithm.extend({ + /** + * Configuration options. + */ + cfg: Base.extend(), + + /** + * Initializes a newly created hasher. + * + * @param {Object} cfg (Optional) The configuration options to use for this hash computation. + * + * @example + * + * var hasher = CryptoJS.algo.SHA256.create(); + */ + init: function (cfg) { + // Apply config defaults + this.cfg = this.cfg.extend(cfg); + + // Set initial values + this.reset(); + }, + + /** + * Resets this hasher to its initial state. + * + * @example + * + * hasher.reset(); + */ + reset: function () { + // Reset data buffer + BufferedBlockAlgorithm.reset.call(this); + + // Perform concrete-hasher logic + this._doReset(); + }, + + /** + * Updates this hasher with a message. + * + * @param {WordArray|string} messageUpdate The message to append. + * + * @return {Hasher} This hasher. + * + * @example + * + * hasher.update('message'); + * hasher.update(wordArray); + */ + update: function (messageUpdate) { + // Append + this._append(messageUpdate); + + // Update the hash + this._process(); + + // Chainable + return this; + }, + + /** + * Finalizes the hash computation. + * Note that the finalize operation is effectively a destructive, read-once operation. + * + * @param {WordArray|string} messageUpdate (Optional) A final message update. + * + * @return {WordArray} The hash. + * + * @example + * + * var hash = hasher.finalize(); + * var hash = hasher.finalize('message'); + * var hash = hasher.finalize(wordArray); + */ + finalize: function (messageUpdate) { + // Final message update + if (messageUpdate) { + this._append(messageUpdate); + } + + // Perform concrete-hasher logic + var hash = this._doFinalize(); + + return hash; + }, + + blockSize: 512/32, + + /** + * Creates a shortcut function to a hasher's object interface. + * + * @param {Hasher} hasher The hasher to create a helper for. + * + * @return {Function} The shortcut function. + * + * @static + * + * @example + * + * var SHA256 = CryptoJS.lib.Hasher._createHelper(CryptoJS.algo.SHA256); + */ + _createHelper: function (hasher) { + return function (message, cfg) { + return new hasher.init(cfg).finalize(message); + }; + }, + + /** + * Creates a shortcut function to the HMAC's object interface. + * + * @param {Hasher} hasher The hasher to use in this HMAC helper. + * + * @return {Function} The shortcut function. + * + * @static + * + * @example + * + * var HmacSHA256 = CryptoJS.lib.Hasher._createHmacHelper(CryptoJS.algo.SHA256); + */ + _createHmacHelper: function (hasher) { + return function (message, key) { + return new C_algo.HMAC.init(hasher, key).finalize(message); + }; + } + }); + + /** + * Algorithm namespace. + */ + var C_algo = C.algo = {}; + + return C; + }(Math)); + + + return CryptoJS; + +})); +},{}],33:[function(require,module,exports){ +;(function (root, factory, undef) { + if (typeof exports === "object") { + // CommonJS + module.exports = exports = factory(require("./core"), require("./x64-core")); + } + else if (typeof define === "function" && define.amd) { + // AMD + define(["./core", "./x64-core"], factory); + } + else { + // Global (browser) + factory(root.CryptoJS); + } +}(this, function (CryptoJS) { + + (function (Math) { + // Shortcuts + var C = CryptoJS; + var C_lib = C.lib; + var WordArray = C_lib.WordArray; + var Hasher = C_lib.Hasher; + var C_x64 = C.x64; + var X64Word = C_x64.Word; + var C_algo = C.algo; + + // Constants tables + var RHO_OFFSETS = []; + var PI_INDEXES = []; + var ROUND_CONSTANTS = []; + + // Compute Constants + (function () { + // Compute rho offset constants + var x = 1, y = 0; + for (var t = 0; t < 24; t++) { + RHO_OFFSETS[x + 5 * y] = ((t + 1) * (t + 2) / 2) % 64; + + var newX = y % 5; + var newY = (2 * x + 3 * y) % 5; + x = newX; + y = newY; + } + + // Compute pi index constants + for (var x = 0; x < 5; x++) { + for (var y = 0; y < 5; y++) { + PI_INDEXES[x + 5 * y] = y + ((2 * x + 3 * y) % 5) * 5; + } + } + + // Compute round constants + var LFSR = 0x01; + for (var i = 0; i < 24; i++) { + var roundConstantMsw = 0; + var roundConstantLsw = 0; + + for (var j = 0; j < 7; j++) { + if (LFSR & 0x01) { + var bitPosition = (1 << j) - 1; + if (bitPosition < 32) { + roundConstantLsw ^= 1 << bitPosition; + } else /* if (bitPosition >= 32) */ { + roundConstantMsw ^= 1 << (bitPosition - 32); + } + } + + // Compute next LFSR + if (LFSR & 0x80) { + // Primitive polynomial over GF(2): x^8 + x^6 + x^5 + x^4 + 1 + LFSR = (LFSR << 1) ^ 0x71; + } else { + LFSR <<= 1; + } + } + + ROUND_CONSTANTS[i] = X64Word.create(roundConstantMsw, roundConstantLsw); + } + }()); + + // Reusable objects for temporary values + var T = []; + (function () { + for (var i = 0; i < 25; i++) { + T[i] = X64Word.create(); + } + }()); + + /** + * SHA-3 hash algorithm. + */ + var SHA3 = C_algo.SHA3 = Hasher.extend({ + /** + * Configuration options. + * + * @property {number} outputLength + * The desired number of bits in the output hash. + * Only values permitted are: 224, 256, 384, 512. + * Default: 512 + */ + cfg: Hasher.cfg.extend({ + outputLength: 512 + }), + + _doReset: function () { + var state = this._state = [] + for (var i = 0; i < 25; i++) { + state[i] = new X64Word.init(); + } + + this.blockSize = (1600 - 2 * this.cfg.outputLength) / 32; + }, + + _doProcessBlock: function (M, offset) { + // Shortcuts + var state = this._state; + var nBlockSizeLanes = this.blockSize / 2; + + // Absorb + for (var i = 0; i < nBlockSizeLanes; i++) { + // Shortcuts + var M2i = M[offset + 2 * i]; + var M2i1 = M[offset + 2 * i + 1]; + + // Swap endian + M2i = ( + (((M2i << 8) | (M2i >>> 24)) & 0x00ff00ff) | + (((M2i << 24) | (M2i >>> 8)) & 0xff00ff00) + ); + M2i1 = ( + (((M2i1 << 8) | (M2i1 >>> 24)) & 0x00ff00ff) | + (((M2i1 << 24) | (M2i1 >>> 8)) & 0xff00ff00) + ); + + // Absorb message into state + var lane = state[i]; + lane.high ^= M2i1; + lane.low ^= M2i; + } + + // Rounds + for (var round = 0; round < 24; round++) { + // Theta + for (var x = 0; x < 5; x++) { + // Mix column lanes + var tMsw = 0, tLsw = 0; + for (var y = 0; y < 5; y++) { + var lane = state[x + 5 * y]; + tMsw ^= lane.high; + tLsw ^= lane.low; + } + + // Temporary values + var Tx = T[x]; + Tx.high = tMsw; + Tx.low = tLsw; + } + for (var x = 0; x < 5; x++) { + // Shortcuts + var Tx4 = T[(x + 4) % 5]; + var Tx1 = T[(x + 1) % 5]; + var Tx1Msw = Tx1.high; + var Tx1Lsw = Tx1.low; + + // Mix surrounding columns + var tMsw = Tx4.high ^ ((Tx1Msw << 1) | (Tx1Lsw >>> 31)); + var tLsw = Tx4.low ^ ((Tx1Lsw << 1) | (Tx1Msw >>> 31)); + for (var y = 0; y < 5; y++) { + var lane = state[x + 5 * y]; + lane.high ^= tMsw; + lane.low ^= tLsw; + } + } + + // Rho Pi + for (var laneIndex = 1; laneIndex < 25; laneIndex++) { + // Shortcuts + var lane = state[laneIndex]; + var laneMsw = lane.high; + var laneLsw = lane.low; + var rhoOffset = RHO_OFFSETS[laneIndex]; + + // Rotate lanes + if (rhoOffset < 32) { + var tMsw = (laneMsw << rhoOffset) | (laneLsw >>> (32 - rhoOffset)); + var tLsw = (laneLsw << rhoOffset) | (laneMsw >>> (32 - rhoOffset)); + } else /* if (rhoOffset >= 32) */ { + var tMsw = (laneLsw << (rhoOffset - 32)) | (laneMsw >>> (64 - rhoOffset)); + var tLsw = (laneMsw << (rhoOffset - 32)) | (laneLsw >>> (64 - rhoOffset)); + } + + // Transpose lanes + var TPiLane = T[PI_INDEXES[laneIndex]]; + TPiLane.high = tMsw; + TPiLane.low = tLsw; + } + + // Rho pi at x = y = 0 + var T0 = T[0]; + var state0 = state[0]; + T0.high = state0.high; + T0.low = state0.low; + + // Chi + for (var x = 0; x < 5; x++) { + for (var y = 0; y < 5; y++) { + // Shortcuts + var laneIndex = x + 5 * y; + var lane = state[laneIndex]; + var TLane = T[laneIndex]; + var Tx1Lane = T[((x + 1) % 5) + 5 * y]; + var Tx2Lane = T[((x + 2) % 5) + 5 * y]; + + // Mix rows + lane.high = TLane.high ^ (~Tx1Lane.high & Tx2Lane.high); + lane.low = TLane.low ^ (~Tx1Lane.low & Tx2Lane.low); + } + } + + // Iota + var lane = state[0]; + var roundConstant = ROUND_CONSTANTS[round]; + lane.high ^= roundConstant.high; + lane.low ^= roundConstant.low;; + } + }, + + _doFinalize: function () { + // Shortcuts + var data = this._data; + var dataWords = data.words; + var nBitsTotal = this._nDataBytes * 8; + var nBitsLeft = data.sigBytes * 8; + var blockSizeBits = this.blockSize * 32; + + // Add padding + dataWords[nBitsLeft >>> 5] |= 0x1 << (24 - nBitsLeft % 32); + dataWords[((Math.ceil((nBitsLeft + 1) / blockSizeBits) * blockSizeBits) >>> 5) - 1] |= 0x80; + data.sigBytes = dataWords.length * 4; + + // Hash final blocks + this._process(); + + // Shortcuts + var state = this._state; + var outputLengthBytes = this.cfg.outputLength / 8; + var outputLengthLanes = outputLengthBytes / 8; + + // Squeeze + var hashWords = []; + for (var i = 0; i < outputLengthLanes; i++) { + // Shortcuts + var lane = state[i]; + var laneMsw = lane.high; + var laneLsw = lane.low; + + // Swap endian + laneMsw = ( + (((laneMsw << 8) | (laneMsw >>> 24)) & 0x00ff00ff) | + (((laneMsw << 24) | (laneMsw >>> 8)) & 0xff00ff00) + ); + laneLsw = ( + (((laneLsw << 8) | (laneLsw >>> 24)) & 0x00ff00ff) | + (((laneLsw << 24) | (laneLsw >>> 8)) & 0xff00ff00) + ); + + // Squeeze state to retrieve hash + hashWords.push(laneLsw); + hashWords.push(laneMsw); + } + + // Return final computed hash + return new WordArray.init(hashWords, outputLengthBytes); + }, + + clone: function () { + var clone = Hasher.clone.call(this); + + var state = clone._state = this._state.slice(0); + for (var i = 0; i < 25; i++) { + state[i] = state[i].clone(); + } + + return clone; + } + }); + + /** + * Shortcut function to the hasher's object interface. + * + * @param {WordArray|string} message The message to hash. + * + * @return {WordArray} The hash. + * + * @static + * + * @example + * + * var hash = CryptoJS.SHA3('message'); + * var hash = CryptoJS.SHA3(wordArray); + */ + C.SHA3 = Hasher._createHelper(SHA3); + + /** + * Shortcut function to the HMAC's object interface. + * + * @param {WordArray|string} message The message to hash. + * @param {WordArray|string} key The secret key. + * + * @return {WordArray} The HMAC. + * + * @static + * + * @example + * + * var hmac = CryptoJS.HmacSHA3(message, key); + */ + C.HmacSHA3 = Hasher._createHmacHelper(SHA3); + }(Math)); + + + return CryptoJS.SHA3; + +})); +},{"./core":32,"./x64-core":34}],34:[function(require,module,exports){ +;(function (root, factory) { + if (typeof exports === "object") { + // CommonJS + module.exports = exports = factory(require("./core")); + } + else if (typeof define === "function" && define.amd) { + // AMD + define(["./core"], factory); + } + else { + // Global (browser) + factory(root.CryptoJS); + } +}(this, function (CryptoJS) { + + (function (undefined) { + // Shortcuts + var C = CryptoJS; + var C_lib = C.lib; + var Base = C_lib.Base; + var X32WordArray = C_lib.WordArray; + + /** + * x64 namespace. + */ + var C_x64 = C.x64 = {}; + + /** + * A 64-bit word. + */ + var X64Word = C_x64.Word = Base.extend({ + /** + * Initializes a newly created 64-bit word. + * + * @param {number} high The high 32 bits. + * @param {number} low The low 32 bits. + * + * @example + * + * var x64Word = CryptoJS.x64.Word.create(0x00010203, 0x04050607); + */ + init: function (high, low) { + this.high = high; + this.low = low; + } + + /** + * Bitwise NOTs this word. + * + * @return {X64Word} A new x64-Word object after negating. + * + * @example + * + * var negated = x64Word.not(); + */ + // not: function () { + // var high = ~this.high; + // var low = ~this.low; + + // return X64Word.create(high, low); + // }, + + /** + * Bitwise ANDs this word with the passed word. + * + * @param {X64Word} word The x64-Word to AND with this word. + * + * @return {X64Word} A new x64-Word object after ANDing. + * + * @example + * + * var anded = x64Word.and(anotherX64Word); + */ + // and: function (word) { + // var high = this.high & word.high; + // var low = this.low & word.low; + + // return X64Word.create(high, low); + // }, + + /** + * Bitwise ORs this word with the passed word. + * + * @param {X64Word} word The x64-Word to OR with this word. + * + * @return {X64Word} A new x64-Word object after ORing. + * + * @example + * + * var ored = x64Word.or(anotherX64Word); + */ + // or: function (word) { + // var high = this.high | word.high; + // var low = this.low | word.low; + + // return X64Word.create(high, low); + // }, + + /** + * Bitwise XORs this word with the passed word. + * + * @param {X64Word} word The x64-Word to XOR with this word. + * + * @return {X64Word} A new x64-Word object after XORing. + * + * @example + * + * var xored = x64Word.xor(anotherX64Word); + */ + // xor: function (word) { + // var high = this.high ^ word.high; + // var low = this.low ^ word.low; + + // return X64Word.create(high, low); + // }, + + /** + * Shifts this word n bits to the left. + * + * @param {number} n The number of bits to shift. + * + * @return {X64Word} A new x64-Word object after shifting. + * + * @example + * + * var shifted = x64Word.shiftL(25); + */ + // shiftL: function (n) { + // if (n < 32) { + // var high = (this.high << n) | (this.low >>> (32 - n)); + // var low = this.low << n; + // } else { + // var high = this.low << (n - 32); + // var low = 0; + // } + + // return X64Word.create(high, low); + // }, + + /** + * Shifts this word n bits to the right. + * + * @param {number} n The number of bits to shift. + * + * @return {X64Word} A new x64-Word object after shifting. + * + * @example + * + * var shifted = x64Word.shiftR(7); + */ + // shiftR: function (n) { + // if (n < 32) { + // var low = (this.low >>> n) | (this.high << (32 - n)); + // var high = this.high >>> n; + // } else { + // var low = this.high >>> (n - 32); + // var high = 0; + // } + + // return X64Word.create(high, low); + // }, + + /** + * Rotates this word n bits to the left. + * + * @param {number} n The number of bits to rotate. + * + * @return {X64Word} A new x64-Word object after rotating. + * + * @example + * + * var rotated = x64Word.rotL(25); + */ + // rotL: function (n) { + // return this.shiftL(n).or(this.shiftR(64 - n)); + // }, + + /** + * Rotates this word n bits to the right. + * + * @param {number} n The number of bits to rotate. + * + * @return {X64Word} A new x64-Word object after rotating. + * + * @example + * + * var rotated = x64Word.rotR(7); + */ + // rotR: function (n) { + // return this.shiftR(n).or(this.shiftL(64 - n)); + // }, + + /** + * Adds this word with the passed word. + * + * @param {X64Word} word The x64-Word to add with this word. + * + * @return {X64Word} A new x64-Word object after adding. + * + * @example + * + * var added = x64Word.add(anotherX64Word); + */ + // add: function (word) { + // var low = (this.low + word.low) | 0; + // var carry = (low >>> 0) < (this.low >>> 0) ? 1 : 0; + // var high = (this.high + word.high + carry) | 0; + + // return X64Word.create(high, low); + // } + }); + + /** + * An array of 64-bit words. + * + * @property {Array} words The array of CryptoJS.x64.Word objects. + * @property {number} sigBytes The number of significant bytes in this word array. + */ + var X64WordArray = C_x64.WordArray = Base.extend({ + /** + * Initializes a newly created word array. + * + * @param {Array} words (Optional) An array of CryptoJS.x64.Word objects. + * @param {number} sigBytes (Optional) The number of significant bytes in the words. + * + * @example + * + * var wordArray = CryptoJS.x64.WordArray.create(); + * + * var wordArray = CryptoJS.x64.WordArray.create([ + * CryptoJS.x64.Word.create(0x00010203, 0x04050607), + * CryptoJS.x64.Word.create(0x18191a1b, 0x1c1d1e1f) + * ]); + * + * var wordArray = CryptoJS.x64.WordArray.create([ + * CryptoJS.x64.Word.create(0x00010203, 0x04050607), + * CryptoJS.x64.Word.create(0x18191a1b, 0x1c1d1e1f) + * ], 10); + */ + init: function (words, sigBytes) { + words = this.words = words || []; + + if (sigBytes != undefined) { + this.sigBytes = sigBytes; + } else { + this.sigBytes = words.length * 8; + } + }, + + /** + * Converts this 64-bit word array to a 32-bit word array. + * + * @return {CryptoJS.lib.WordArray} This word array's data as a 32-bit word array. + * + * @example + * + * var x32WordArray = x64WordArray.toX32(); + */ + toX32: function () { + // Shortcuts + var x64Words = this.words; + var x64WordsLength = x64Words.length; + + // Convert + var x32Words = []; + for (var i = 0; i < x64WordsLength; i++) { + var x64Word = x64Words[i]; + x32Words.push(x64Word.high); + x32Words.push(x64Word.low); + } + + return X32WordArray.create(x32Words, this.sigBytes); + }, + + /** + * Creates a copy of this word array. + * + * @return {X64WordArray} The clone. + * + * @example + * + * var clone = x64WordArray.clone(); + */ + clone: function () { + var clone = Base.clone.call(this); + + // Clone "words" array + var words = clone.words = this.words.slice(0); + + // Clone each X64Word object + var wordsLength = words.length; + for (var i = 0; i < wordsLength; i++) { + words[i] = words[i].clone(); + } + + return clone; + } + }); + }()); + + + return CryptoJS; + +})); +},{"./core":32}],"bignumber.js":[function(require,module,exports){ +/*! bignumber.js v2.0.7 https://github.com/MikeMcl/bignumber.js/LICENCE */ + +;(function (global) { + 'use strict'; + + /* + bignumber.js v2.0.7 + A JavaScript library for arbitrary-precision arithmetic. + https://github.com/MikeMcl/bignumber.js + Copyright (c) 2015 Michael Mclaughlin + MIT Expat Licence + */ + + + var BigNumber, crypto, parseNumeric, + isNumeric = /^-?(\d+(\.\d*)?|\.\d+)(e[+-]?\d+)?$/i, + mathceil = Math.ceil, + mathfloor = Math.floor, + notBool = ' not a boolean or binary digit', + roundingMode = 'rounding mode', + tooManyDigits = 'number type has more than 15 significant digits', + ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ$_', + BASE = 1e14, + LOG_BASE = 14, + MAX_SAFE_INTEGER = 0x1fffffffffffff, // 2^53 - 1 + // MAX_INT32 = 0x7fffffff, // 2^31 - 1 + POWS_TEN = [1, 10, 100, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13], + SQRT_BASE = 1e7, + + /* + * The limit on the value of DECIMAL_PLACES, TO_EXP_NEG, TO_EXP_POS, MIN_EXP, MAX_EXP, and + * the arguments to toExponential, toFixed, toFormat, and toPrecision, beyond which an + * exception is thrown (if ERRORS is true). + */ + MAX = 1E9; // 0 to MAX_INT32 + + + /* + * Create and return a BigNumber constructor. + */ + function another(configObj) { + var div, + + // id tracks the caller function, so its name can be included in error messages. + id = 0, + P = BigNumber.prototype, + ONE = new BigNumber(1), + + + /********************************* EDITABLE DEFAULTS **********************************/ + + + /* + * The default values below must be integers within the inclusive ranges stated. + * The values can also be changed at run-time using BigNumber.config. + */ + + // The maximum number of decimal places for operations involving division. + DECIMAL_PLACES = 20, // 0 to MAX + + /* + * The rounding mode used when rounding to the above decimal places, and when using + * toExponential, toFixed, toFormat and toPrecision, and round (default value). + * UP 0 Away from zero. + * DOWN 1 Towards zero. + * CEIL 2 Towards +Infinity. + * FLOOR 3 Towards -Infinity. + * HALF_UP 4 Towards nearest neighbour. If equidistant, up. + * HALF_DOWN 5 Towards nearest neighbour. If equidistant, down. + * HALF_EVEN 6 Towards nearest neighbour. If equidistant, towards even neighbour. + * HALF_CEIL 7 Towards nearest neighbour. If equidistant, towards +Infinity. + * HALF_FLOOR 8 Towards nearest neighbour. If equidistant, towards -Infinity. + */ + ROUNDING_MODE = 4, // 0 to 8 + + // EXPONENTIAL_AT : [TO_EXP_NEG , TO_EXP_POS] + + // The exponent value at and beneath which toString returns exponential notation. + // Number type: -7 + TO_EXP_NEG = -7, // 0 to -MAX + + // The exponent value at and above which toString returns exponential notation. + // Number type: 21 + TO_EXP_POS = 21, // 0 to MAX + + // RANGE : [MIN_EXP, MAX_EXP] + + // The minimum exponent value, beneath which underflow to zero occurs. + // Number type: -324 (5e-324) + MIN_EXP = -1e7, // -1 to -MAX + + // The maximum exponent value, above which overflow to Infinity occurs. + // Number type: 308 (1.7976931348623157e+308) + // For MAX_EXP > 1e7, e.g. new BigNumber('1e100000000').plus(1) may be slow. + MAX_EXP = 1e7, // 1 to MAX + + // Whether BigNumber Errors are ever thrown. + ERRORS = true, // true or false + + // Change to intValidatorNoErrors if ERRORS is false. + isValidInt = intValidatorWithErrors, // intValidatorWithErrors/intValidatorNoErrors + + // Whether to use cryptographically-secure random number generation, if available. + CRYPTO = false, // true or false + + /* + * The modulo mode used when calculating the modulus: a mod n. + * The quotient (q = a / n) is calculated according to the corresponding rounding mode. + * The remainder (r) is calculated as: r = a - n * q. + * + * UP 0 The remainder is positive if the dividend is negative, else is negative. + * DOWN 1 The remainder has the same sign as the dividend. + * This modulo mode is commonly known as 'truncated division' and is + * equivalent to (a % n) in JavaScript. + * FLOOR 3 The remainder has the same sign as the divisor (Python %). + * HALF_EVEN 6 This modulo mode implements the IEEE 754 remainder function. + * EUCLID 9 Euclidian division. q = sign(n) * floor(a / abs(n)). + * The remainder is always positive. + * + * The truncated division, floored division, Euclidian division and IEEE 754 remainder + * modes are commonly used for the modulus operation. + * Although the other rounding modes can also be used, they may not give useful results. + */ + MODULO_MODE = 1, // 0 to 9 + + // The maximum number of significant digits of the result of the toPower operation. + // If POW_PRECISION is 0, there will be unlimited significant digits. + POW_PRECISION = 100, // 0 to MAX + + // The format specification used by the BigNumber.prototype.toFormat method. + FORMAT = { + decimalSeparator: '.', + groupSeparator: ',', + groupSize: 3, + secondaryGroupSize: 0, + fractionGroupSeparator: '\xA0', // non-breaking space + fractionGroupSize: 0 + }; + + + /******************************************************************************************/ + + + // CONSTRUCTOR + + + /* + * The BigNumber constructor and exported function. + * Create and return a new instance of a BigNumber object. + * + * n {number|string|BigNumber} A numeric value. + * [b] {number} The base of n. Integer, 2 to 64 inclusive. + */ + function BigNumber( n, b ) { + var c, e, i, num, len, str, + x = this; + + // Enable constructor usage without new. + if ( !( x instanceof BigNumber ) ) { + + // 'BigNumber() constructor call without new: {n}' + if (ERRORS) raise( 26, 'constructor call without new', n ); + return new BigNumber( n, b ); + } -},{}],"bignumber.js":[function(require,module,exports){ -'use strict'; + // 'new BigNumber() base not an integer: {b}' + // 'new BigNumber() base out of range: {b}' + if ( b == null || !isValidInt( b, 2, 64, id, 'base' ) ) { + + // Duplicate. + if ( n instanceof BigNumber ) { + x.s = n.s; + x.e = n.e; + x.c = ( n = n.c ) ? n.slice() : n; + id = 0; + return; + } + + if ( ( num = typeof n == 'number' ) && n * 0 == 0 ) { + x.s = 1 / n < 0 ? ( n = -n, -1 ) : 1; + + // Fast path for integers. + if ( n === ~~n ) { + for ( e = 0, i = n; i >= 10; i /= 10, e++ ); + x.e = e; + x.c = [n]; + id = 0; + return; + } + + str = n + ''; + } else { + if ( !isNumeric.test( str = n + '' ) ) return parseNumeric( x, str, num ); + x.s = str.charCodeAt(0) === 45 ? ( str = str.slice(1), -1 ) : 1; + } + } else { + b = b | 0; + str = n + ''; + + // Ensure return value is rounded to DECIMAL_PLACES as with other bases. + // Allow exponential notation to be used with base 10 argument. + if ( b == 10 ) { + x = new BigNumber( n instanceof BigNumber ? n : str ); + return round( x, DECIMAL_PLACES + x.e + 1, ROUNDING_MODE ); + } + + // Avoid potential interpretation of Infinity and NaN as base 44+ values. + // Any number in exponential form will fail due to the [Ee][+-]. + if ( ( num = typeof n == 'number' ) && n * 0 != 0 || + !( new RegExp( '^-?' + ( c = '[' + ALPHABET.slice( 0, b ) + ']+' ) + + '(?:\\.' + c + ')?$',b < 37 ? 'i' : '' ) ).test(str) ) { + return parseNumeric( x, str, num, b ); + } + + if (num) { + x.s = 1 / n < 0 ? ( str = str.slice(1), -1 ) : 1; + + if ( ERRORS && str.replace( /^0\.0*|\./, '' ).length > 15 ) { + + // 'new BigNumber() number type has more than 15 significant digits: {n}' + raise( id, tooManyDigits, n ); + } + + // Prevent later check for length on converted number. + num = false; + } else { + x.s = str.charCodeAt(0) === 45 ? ( str = str.slice(1), -1 ) : 1; + } + + str = convertBase( str, 10, b, x.s ); + } + + // Decimal point? + if ( ( e = str.indexOf('.') ) > -1 ) str = str.replace( '.', '' ); + + // Exponential form? + if ( ( i = str.search( /e/i ) ) > 0 ) { + + // Determine exponent. + if ( e < 0 ) e = i; + e += +str.slice( i + 1 ); + str = str.substring( 0, i ); + } else if ( e < 0 ) { + + // Integer. + e = str.length; + } + + // Determine leading zeros. + for ( i = 0; str.charCodeAt(i) === 48; i++ ); + + // Determine trailing zeros. + for ( len = str.length; str.charCodeAt(--len) === 48; ); + str = str.slice( i, len + 1 ); + + if (str) { + len = str.length; + + // Disallow numbers with over 15 significant digits if number type. + // 'new BigNumber() number type has more than 15 significant digits: {n}' + if ( num && ERRORS && len > 15 ) raise( id, tooManyDigits, x.s * n ); + + e = e - i - 1; + + // Overflow? + if ( e > MAX_EXP ) { -module.exports = BigNumber; // jshint ignore:line + // Infinity. + x.c = x.e = null; + // Underflow? + } else if ( e < MIN_EXP ) { + + // Zero. + x.c = [ x.e = 0 ]; + } else { + x.e = e; + x.c = []; + + // Transform base + + // e is the base 10 exponent. + // i is where to slice str to get the first element of the coefficient array. + i = ( e + 1 ) % LOG_BASE; + if ( e < 0 ) i += LOG_BASE; + + if ( i < len ) { + if (i) x.c.push( +str.slice( 0, i ) ); + + for ( len -= LOG_BASE; i < len; ) { + x.c.push( +str.slice( i, i += LOG_BASE ) ); + } + + str = str.slice(i); + i = LOG_BASE - str.length; + } else { + i -= len; + } + + for ( ; i--; str += '0' ); + x.c.push( +str ); + } + } else { + + // Zero. + x.c = [ x.e = 0 ]; + } + + id = 0; + } + + + // CONSTRUCTOR PROPERTIES + + + BigNumber.another = another; + + BigNumber.ROUND_UP = 0; + BigNumber.ROUND_DOWN = 1; + BigNumber.ROUND_CEIL = 2; + BigNumber.ROUND_FLOOR = 3; + BigNumber.ROUND_HALF_UP = 4; + BigNumber.ROUND_HALF_DOWN = 5; + BigNumber.ROUND_HALF_EVEN = 6; + BigNumber.ROUND_HALF_CEIL = 7; + BigNumber.ROUND_HALF_FLOOR = 8; + BigNumber.EUCLID = 9; + + + /* + * Configure infrequently-changing library-wide settings. + * + * Accept an object or an argument list, with one or many of the following properties or + * parameters respectively: + * + * DECIMAL_PLACES {number} Integer, 0 to MAX inclusive + * ROUNDING_MODE {number} Integer, 0 to 8 inclusive + * EXPONENTIAL_AT {number|number[]} Integer, -MAX to MAX inclusive or + * [integer -MAX to 0 incl., 0 to MAX incl.] + * RANGE {number|number[]} Non-zero integer, -MAX to MAX inclusive or + * [integer -MAX to -1 incl., integer 1 to MAX incl.] + * ERRORS {boolean|number} true, false, 1 or 0 + * CRYPTO {boolean|number} true, false, 1 or 0 + * MODULO_MODE {number} 0 to 9 inclusive + * POW_PRECISION {number} 0 to MAX inclusive + * FORMAT {object} See BigNumber.prototype.toFormat + * decimalSeparator {string} + * groupSeparator {string} + * groupSize {number} + * secondaryGroupSize {number} + * fractionGroupSeparator {string} + * fractionGroupSize {number} + * + * (The values assigned to the above FORMAT object properties are not checked for validity.) + * + * E.g. + * BigNumber.config(20, 4) is equivalent to + * BigNumber.config({ DECIMAL_PLACES : 20, ROUNDING_MODE : 4 }) + * + * Ignore properties/parameters set to null or undefined. + * Return an object with the properties current values. + */ + BigNumber.config = function () { + var v, p, + i = 0, + r = {}, + a = arguments, + o = a[0], + has = o && typeof o == 'object' + ? function () { if ( o.hasOwnProperty(p) ) return ( v = o[p] ) != null; } + : function () { if ( a.length > i ) return ( v = a[i++] ) != null; }; + + // DECIMAL_PLACES {number} Integer, 0 to MAX inclusive. + // 'config() DECIMAL_PLACES not an integer: {v}' + // 'config() DECIMAL_PLACES out of range: {v}' + if ( has( p = 'DECIMAL_PLACES' ) && isValidInt( v, 0, MAX, 2, p ) ) { + DECIMAL_PLACES = v | 0; + } + r[p] = DECIMAL_PLACES; + + // ROUNDING_MODE {number} Integer, 0 to 8 inclusive. + // 'config() ROUNDING_MODE not an integer: {v}' + // 'config() ROUNDING_MODE out of range: {v}' + if ( has( p = 'ROUNDING_MODE' ) && isValidInt( v, 0, 8, 2, p ) ) { + ROUNDING_MODE = v | 0; + } + r[p] = ROUNDING_MODE; + + // EXPONENTIAL_AT {number|number[]} + // Integer, -MAX to MAX inclusive or [integer -MAX to 0 inclusive, 0 to MAX inclusive]. + // 'config() EXPONENTIAL_AT not an integer: {v}' + // 'config() EXPONENTIAL_AT out of range: {v}' + if ( has( p = 'EXPONENTIAL_AT' ) ) { + + if ( isArray(v) ) { + if ( isValidInt( v[0], -MAX, 0, 2, p ) && isValidInt( v[1], 0, MAX, 2, p ) ) { + TO_EXP_NEG = v[0] | 0; + TO_EXP_POS = v[1] | 0; + } + } else if ( isValidInt( v, -MAX, MAX, 2, p ) ) { + TO_EXP_NEG = -( TO_EXP_POS = ( v < 0 ? -v : v ) | 0 ); + } + } + r[p] = [ TO_EXP_NEG, TO_EXP_POS ]; + + // RANGE {number|number[]} Non-zero integer, -MAX to MAX inclusive or + // [integer -MAX to -1 inclusive, integer 1 to MAX inclusive]. + // 'config() RANGE not an integer: {v}' + // 'config() RANGE cannot be zero: {v}' + // 'config() RANGE out of range: {v}' + if ( has( p = 'RANGE' ) ) { + + if ( isArray(v) ) { + if ( isValidInt( v[0], -MAX, -1, 2, p ) && isValidInt( v[1], 1, MAX, 2, p ) ) { + MIN_EXP = v[0] | 0; + MAX_EXP = v[1] | 0; + } + } else if ( isValidInt( v, -MAX, MAX, 2, p ) ) { + if ( v | 0 ) MIN_EXP = -( MAX_EXP = ( v < 0 ? -v : v ) | 0 ); + else if (ERRORS) raise( 2, p + ' cannot be zero', v ); + } + } + r[p] = [ MIN_EXP, MAX_EXP ]; + + // ERRORS {boolean|number} true, false, 1 or 0. + // 'config() ERRORS not a boolean or binary digit: {v}' + if ( has( p = 'ERRORS' ) ) { + + if ( v === !!v || v === 1 || v === 0 ) { + id = 0; + isValidInt = ( ERRORS = !!v ) ? intValidatorWithErrors : intValidatorNoErrors; + } else if (ERRORS) { + raise( 2, p + notBool, v ); + } + } + r[p] = ERRORS; + + // CRYPTO {boolean|number} true, false, 1 or 0. + // 'config() CRYPTO not a boolean or binary digit: {v}' + // 'config() crypto unavailable: {crypto}' + if ( has( p = 'CRYPTO' ) ) { + + if ( v === !!v || v === 1 || v === 0 ) { + CRYPTO = !!( v && crypto && typeof crypto == 'object' ); + if ( v && !CRYPTO && ERRORS ) raise( 2, 'crypto unavailable', crypto ); + } else if (ERRORS) { + raise( 2, p + notBool, v ); + } + } + r[p] = CRYPTO; + + // MODULO_MODE {number} Integer, 0 to 9 inclusive. + // 'config() MODULO_MODE not an integer: {v}' + // 'config() MODULO_MODE out of range: {v}' + if ( has( p = 'MODULO_MODE' ) && isValidInt( v, 0, 9, 2, p ) ) { + MODULO_MODE = v | 0; + } + r[p] = MODULO_MODE; + + // POW_PRECISION {number} Integer, 0 to MAX inclusive. + // 'config() POW_PRECISION not an integer: {v}' + // 'config() POW_PRECISION out of range: {v}' + if ( has( p = 'POW_PRECISION' ) && isValidInt( v, 0, MAX, 2, p ) ) { + POW_PRECISION = v | 0; + } + r[p] = POW_PRECISION; + + // FORMAT {object} + // 'config() FORMAT not an object: {v}' + if ( has( p = 'FORMAT' ) ) { + + if ( typeof v == 'object' ) { + FORMAT = v; + } else if (ERRORS) { + raise( 2, p + ' not an object', v ); + } + } + r[p] = FORMAT; + + return r; + }; + + + /* + * Return a new BigNumber whose value is the maximum of the arguments. + * + * arguments {number|string|BigNumber} + */ + BigNumber.max = function () { return maxOrMin( arguments, P.lt ); }; + + + /* + * Return a new BigNumber whose value is the minimum of the arguments. + * + * arguments {number|string|BigNumber} + */ + BigNumber.min = function () { return maxOrMin( arguments, P.gt ); }; + + + /* + * Return a new BigNumber with a random value equal to or greater than 0 and less than 1, + * and with dp, or DECIMAL_PLACES if dp is omitted, decimal places (or less if trailing + * zeros are produced). + * + * [dp] {number} Decimal places. Integer, 0 to MAX inclusive. + * + * 'random() decimal places not an integer: {dp}' + * 'random() decimal places out of range: {dp}' + * 'random() crypto unavailable: {crypto}' + */ + BigNumber.random = (function () { + var pow2_53 = 0x20000000000000; + + // Return a 53 bit integer n, where 0 <= n < 9007199254740992. + // Check if Math.random() produces more than 32 bits of randomness. + // If it does, assume at least 53 bits are produced, otherwise assume at least 30 bits. + // 0x40000000 is 2^30, 0x800000 is 2^23, 0x1fffff is 2^21 - 1. + var random53bitInt = (Math.random() * pow2_53) & 0x1fffff + ? function () { return mathfloor( Math.random() * pow2_53 ); } + : function () { return ((Math.random() * 0x40000000 | 0) * 0x800000) + + (Math.random() * 0x800000 | 0); }; + + return function (dp) { + var a, b, e, k, v, + i = 0, + c = [], + rand = new BigNumber(ONE); + + dp = dp == null || !isValidInt( dp, 0, MAX, 14 ) ? DECIMAL_PLACES : dp | 0; + k = mathceil( dp / LOG_BASE ); + + if (CRYPTO) { + + // Browsers supporting crypto.getRandomValues. + if ( crypto && crypto.getRandomValues ) { + + a = crypto.getRandomValues( new Uint32Array( k *= 2 ) ); + + for ( ; i < k; ) { + + // 53 bits: + // ((Math.pow(2, 32) - 1) * Math.pow(2, 21)).toString(2) + // 11111 11111111 11111111 11111111 11100000 00000000 00000000 + // ((Math.pow(2, 32) - 1) >>> 11).toString(2) + // 11111 11111111 11111111 + // 0x20000 is 2^21. + v = a[i] * 0x20000 + (a[i + 1] >>> 11); + + // Rejection sampling: + // 0 <= v < 9007199254740992 + // Probability that v >= 9e15, is + // 7199254740992 / 9007199254740992 ~= 0.0008, i.e. 1 in 1251 + if ( v >= 9e15 ) { + b = crypto.getRandomValues( new Uint32Array(2) ); + a[i] = b[0]; + a[i + 1] = b[1]; + } else { + + // 0 <= v <= 8999999999999999 + // 0 <= (v % 1e14) <= 99999999999999 + c.push( v % 1e14 ); + i += 2; + } + } + i = k / 2; + + // Node.js supporting crypto.randomBytes. + } else if ( crypto && crypto.randomBytes ) { + + // buffer + a = crypto.randomBytes( k *= 7 ); + + for ( ; i < k; ) { + + // 0x1000000000000 is 2^48, 0x10000000000 is 2^40 + // 0x100000000 is 2^32, 0x1000000 is 2^24 + // 11111 11111111 11111111 11111111 11111111 11111111 11111111 + // 0 <= v < 9007199254740992 + v = ( ( a[i] & 31 ) * 0x1000000000000 ) + ( a[i + 1] * 0x10000000000 ) + + ( a[i + 2] * 0x100000000 ) + ( a[i + 3] * 0x1000000 ) + + ( a[i + 4] << 16 ) + ( a[i + 5] << 8 ) + a[i + 6]; + + if ( v >= 9e15 ) { + crypto.randomBytes(7).copy( a, i ); + } else { + + // 0 <= (v % 1e14) <= 99999999999999 + c.push( v % 1e14 ); + i += 7; + } + } + i = k / 7; + } else if (ERRORS) { + raise( 14, 'crypto unavailable', crypto ); + } + } + + // Use Math.random: CRYPTO is false or crypto is unavailable and ERRORS is false. + if (!i) { + + for ( ; i < k; ) { + v = random53bitInt(); + if ( v < 9e15 ) c[i++] = v % 1e14; + } + } + + k = c[--i]; + dp %= LOG_BASE; + + // Convert trailing digits to zeros according to dp. + if ( k && dp ) { + v = POWS_TEN[LOG_BASE - dp]; + c[i] = mathfloor( k / v ) * v; + } + + // Remove trailing elements which are zero. + for ( ; c[i] === 0; c.pop(), i-- ); + + // Zero? + if ( i < 0 ) { + c = [ e = 0 ]; + } else { + + // Remove leading elements which are zero and adjust exponent accordingly. + for ( e = -1 ; c[0] === 0; c.shift(), e -= LOG_BASE); + + // Count the digits of the first element of c to determine leading zeros, and... + for ( i = 1, v = c[0]; v >= 10; v /= 10, i++); + + // adjust the exponent accordingly. + if ( i < LOG_BASE ) e -= LOG_BASE - i; + } + + rand.e = e; + rand.c = c; + return rand; + }; + })(); + + + // PRIVATE FUNCTIONS + + + // Convert a numeric string of baseIn to a numeric string of baseOut. + function convertBase( str, baseOut, baseIn, sign ) { + var d, e, k, r, x, xc, y, + i = str.indexOf( '.' ), + dp = DECIMAL_PLACES, + rm = ROUNDING_MODE; + + if ( baseIn < 37 ) str = str.toLowerCase(); + + // Non-integer. + if ( i >= 0 ) { + k = POW_PRECISION; + + // Unlimited precision. + POW_PRECISION = 0; + str = str.replace( '.', '' ); + y = new BigNumber(baseIn); + x = y.pow( str.length - i ); + POW_PRECISION = k; + + // Convert str as if an integer, then restore the fraction part by dividing the + // result by its base raised to a power. + y.c = toBaseOut( toFixedPoint( coeffToString( x.c ), x.e ), 10, baseOut ); + y.e = y.c.length; + } + + // Convert the number as integer. + xc = toBaseOut( str, baseIn, baseOut ); + e = k = xc.length; + + // Remove trailing zeros. + for ( ; xc[--k] == 0; xc.pop() ); + if ( !xc[0] ) return '0'; + + if ( i < 0 ) { + --e; + } else { + x.c = xc; + x.e = e; + + // sign is needed for correct rounding. + x.s = sign; + x = div( x, y, dp, rm, baseOut ); + xc = x.c; + r = x.r; + e = x.e; + } + + d = e + dp + 1; + + // The rounding digit, i.e. the digit to the right of the digit that may be rounded up. + i = xc[d]; + k = baseOut / 2; + r = r || d < 0 || xc[d + 1] != null; + + r = rm < 4 ? ( i != null || r ) && ( rm == 0 || rm == ( x.s < 0 ? 3 : 2 ) ) + : i > k || i == k &&( rm == 4 || r || rm == 6 && xc[d - 1] & 1 || + rm == ( x.s < 0 ? 8 : 7 ) ); + + if ( d < 1 || !xc[0] ) { + + // 1^-dp or 0. + str = r ? toFixedPoint( '1', -dp ) : '0'; + } else { + xc.length = d; + + if (r) { + + // Rounding up may mean the previous digit has to be rounded up and so on. + for ( --baseOut; ++xc[--d] > baseOut; ) { + xc[d] = 0; + + if ( !d ) { + ++e; + xc.unshift(1); + } + } + } + + // Determine trailing zeros. + for ( k = xc.length; !xc[--k]; ); + + // E.g. [4, 11, 15] becomes 4bf. + for ( i = 0, str = ''; i <= k; str += ALPHABET.charAt( xc[i++] ) ); + str = toFixedPoint( str, e ); + } + + // The caller will add the sign. + return str; + } + + + // Perform division in the specified base. Called by div and convertBase. + div = (function () { + + // Assume non-zero x and k. + function multiply( x, k, base ) { + var m, temp, xlo, xhi, + carry = 0, + i = x.length, + klo = k % SQRT_BASE, + khi = k / SQRT_BASE | 0; + + for ( x = x.slice(); i--; ) { + xlo = x[i] % SQRT_BASE; + xhi = x[i] / SQRT_BASE | 0; + m = khi * xlo + xhi * klo; + temp = klo * xlo + ( ( m % SQRT_BASE ) * SQRT_BASE ) + carry; + carry = ( temp / base | 0 ) + ( m / SQRT_BASE | 0 ) + khi * xhi; + x[i] = temp % base; + } + + if (carry) x.unshift(carry); + + return x; + } + + function compare( a, b, aL, bL ) { + var i, cmp; + + if ( aL != bL ) { + cmp = aL > bL ? 1 : -1; + } else { + + for ( i = cmp = 0; i < aL; i++ ) { + + if ( a[i] != b[i] ) { + cmp = a[i] > b[i] ? 1 : -1; + break; + } + } + } + return cmp; + } + + function subtract( a, b, aL, base ) { + var i = 0; + + // Subtract b from a. + for ( ; aL--; ) { + a[aL] -= i; + i = a[aL] < b[aL] ? 1 : 0; + a[aL] = i * base + a[aL] - b[aL]; + } + + // Remove leading zeros. + for ( ; !a[0] && a.length > 1; a.shift() ); + } + + // x: dividend, y: divisor. + return function ( x, y, dp, rm, base ) { + var cmp, e, i, more, n, prod, prodL, q, qc, rem, remL, rem0, xi, xL, yc0, + yL, yz, + s = x.s == y.s ? 1 : -1, + xc = x.c, + yc = y.c; + + // Either NaN, Infinity or 0? + if ( !xc || !xc[0] || !yc || !yc[0] ) { + + return new BigNumber( + + // Return NaN if either NaN, or both Infinity or 0. + !x.s || !y.s || ( xc ? yc && xc[0] == yc[0] : !yc ) ? NaN : + + // Return ±0 if x is ±0 or y is ±Infinity, or return ±Infinity as y is ±0. + xc && xc[0] == 0 || !yc ? s * 0 : s / 0 + ); + } + + q = new BigNumber(s); + qc = q.c = []; + e = x.e - y.e; + s = dp + e + 1; + + if ( !base ) { + base = BASE; + e = bitFloor( x.e / LOG_BASE ) - bitFloor( y.e / LOG_BASE ); + s = s / LOG_BASE | 0; + } + + // Result exponent may be one less then the current value of e. + // The coefficients of the BigNumbers from convertBase may have trailing zeros. + for ( i = 0; yc[i] == ( xc[i] || 0 ); i++ ); + if ( yc[i] > ( xc[i] || 0 ) ) e--; + + if ( s < 0 ) { + qc.push(1); + more = true; + } else { + xL = xc.length; + yL = yc.length; + i = 0; + s += 2; + + // Normalise xc and yc so highest order digit of yc is >= base / 2. + + n = mathfloor( base / ( yc[0] + 1 ) ); + + // Not necessary, but to handle odd bases where yc[0] == ( base / 2 ) - 1. + // if ( n > 1 || n++ == 1 && yc[0] < base / 2 ) { + if ( n > 1 ) { + yc = multiply( yc, n, base ); + xc = multiply( xc, n, base ); + yL = yc.length; + xL = xc.length; + } + + xi = yL; + rem = xc.slice( 0, yL ); + remL = rem.length; + + // Add zeros to make remainder as long as divisor. + for ( ; remL < yL; rem[remL++] = 0 ); + yz = yc.slice(); + yz.unshift(0); + yc0 = yc[0]; + if ( yc[1] >= base / 2 ) yc0++; + // Not necessary, but to prevent trial digit n > base, when using base 3. + // else if ( base == 3 && yc0 == 1 ) yc0 = 1 + 1e-15; + + do { + n = 0; + + // Compare divisor and remainder. + cmp = compare( yc, rem, yL, remL ); + + // If divisor < remainder. + if ( cmp < 0 ) { + + // Calculate trial digit, n. + + rem0 = rem[0]; + if ( yL != remL ) rem0 = rem0 * base + ( rem[1] || 0 ); + + // n is how many times the divisor goes into the current remainder. + n = mathfloor( rem0 / yc0 ); + + // Algorithm: + // 1. product = divisor * trial digit (n) + // 2. if product > remainder: product -= divisor, n-- + // 3. remainder -= product + // 4. if product was < remainder at 2: + // 5. compare new remainder and divisor + // 6. If remainder > divisor: remainder -= divisor, n++ + + if ( n > 1 ) { + + // n may be > base only when base is 3. + if (n >= base) n = base - 1; + + // product = divisor * trial digit. + prod = multiply( yc, n, base ); + prodL = prod.length; + remL = rem.length; + + // Compare product and remainder. + // If product > remainder. + // Trial digit n too high. + // n is 1 too high about 5% of the time, and is not known to have + // ever been more than 1 too high. + while ( compare( prod, rem, prodL, remL ) == 1 ) { + n--; + + // Subtract divisor from product. + subtract( prod, yL < prodL ? yz : yc, prodL, base ); + prodL = prod.length; + cmp = 1; + } + } else { + + // n is 0 or 1, cmp is -1. + // If n is 0, there is no need to compare yc and rem again below, + // so change cmp to 1 to avoid it. + // If n is 1, leave cmp as -1, so yc and rem are compared again. + if ( n == 0 ) { + + // divisor < remainder, so n must be at least 1. + cmp = n = 1; + } + + // product = divisor + prod = yc.slice(); + prodL = prod.length; + } + + if ( prodL < remL ) prod.unshift(0); + + // Subtract product from remainder. + subtract( rem, prod, remL, base ); + remL = rem.length; + + // If product was < remainder. + if ( cmp == -1 ) { + + // Compare divisor and new remainder. + // If divisor < new remainder, subtract divisor from remainder. + // Trial digit n too low. + // n is 1 too low about 5% of the time, and very rarely 2 too low. + while ( compare( yc, rem, yL, remL ) < 1 ) { + n++; + + // Subtract divisor from remainder. + subtract( rem, yL < remL ? yz : yc, remL, base ); + remL = rem.length; + } + } + } else if ( cmp === 0 ) { + n++; + rem = [0]; + } // else cmp === 1 and n will be 0 + + // Add the next digit, n, to the result array. + qc[i++] = n; + + // Update the remainder. + if ( rem[0] ) { + rem[remL++] = xc[xi] || 0; + } else { + rem = [ xc[xi] ]; + remL = 1; + } + } while ( ( xi++ < xL || rem[0] != null ) && s-- ); + + more = rem[0] != null; + + // Leading zero? + if ( !qc[0] ) qc.shift(); + } + + if ( base == BASE ) { + + // To calculate q.e, first get the number of digits of qc[0]. + for ( i = 1, s = qc[0]; s >= 10; s /= 10, i++ ); + round( q, dp + ( q.e = i + e * LOG_BASE - 1 ) + 1, rm, more ); + + // Caller is convertBase. + } else { + q.e = e; + q.r = +more; + } + + return q; + }; + })(); + + + /* + * Return a string representing the value of BigNumber n in fixed-point or exponential + * notation rounded to the specified decimal places or significant digits. + * + * n is a BigNumber. + * i is the index of the last digit required (i.e. the digit that may be rounded up). + * rm is the rounding mode. + * caller is caller id: toExponential 19, toFixed 20, toFormat 21, toPrecision 24. + */ + function format( n, i, rm, caller ) { + var c0, e, ne, len, str; + + rm = rm != null && isValidInt( rm, 0, 8, caller, roundingMode ) + ? rm | 0 : ROUNDING_MODE; + + if ( !n.c ) return n.toString(); + c0 = n.c[0]; + ne = n.e; + + if ( i == null ) { + str = coeffToString( n.c ); + str = caller == 19 || caller == 24 && ne <= TO_EXP_NEG + ? toExponential( str, ne ) + : toFixedPoint( str, ne ); + } else { + n = round( new BigNumber(n), i, rm ); + + // n.e may have changed if the value was rounded up. + e = n.e; + + str = coeffToString( n.c ); + len = str.length; + + // toPrecision returns exponential notation if the number of significant digits + // specified is less than the number of digits necessary to represent the integer + // part of the value in fixed-point notation. + + // Exponential notation. + if ( caller == 19 || caller == 24 && ( i <= e || e <= TO_EXP_NEG ) ) { + + // Append zeros? + for ( ; len < i; str += '0', len++ ); + str = toExponential( str, e ); + + // Fixed-point notation. + } else { + i -= ne; + str = toFixedPoint( str, e ); + + // Append zeros? + if ( e + 1 > len ) { + if ( --i > 0 ) for ( str += '.'; i--; str += '0' ); + } else { + i += e - len; + if ( i > 0 ) { + if ( e + 1 == len ) str += '.'; + for ( ; i--; str += '0' ); + } + } + } + } + + return n.s < 0 && c0 ? '-' + str : str; + } + + + // Handle BigNumber.max and BigNumber.min. + function maxOrMin( args, method ) { + var m, n, + i = 0; + + if ( isArray( args[0] ) ) args = args[0]; + m = new BigNumber( args[0] ); + + for ( ; ++i < args.length; ) { + n = new BigNumber( args[i] ); + + // If any number is NaN, return NaN. + if ( !n.s ) { + m = n; + break; + } else if ( method.call( m, n ) ) { + m = n; + } + } + + return m; + } + + + /* + * Return true if n is an integer in range, otherwise throw. + * Use for argument validation when ERRORS is true. + */ + function intValidatorWithErrors( n, min, max, caller, name ) { + if ( n < min || n > max || n != truncate(n) ) { + raise( caller, ( name || 'decimal places' ) + + ( n < min || n > max ? ' out of range' : ' not an integer' ), n ); + } + + return true; + } + + + /* + * Strip trailing zeros, calculate base 10 exponent and check against MIN_EXP and MAX_EXP. + * Called by minus, plus and times. + */ + function normalise( n, c, e ) { + var i = 1, + j = c.length; + + // Remove trailing zeros. + for ( ; !c[--j]; c.pop() ); + + // Calculate the base 10 exponent. First get the number of digits of c[0]. + for ( j = c[0]; j >= 10; j /= 10, i++ ); + + // Overflow? + if ( ( e = i + e * LOG_BASE - 1 ) > MAX_EXP ) { + + // Infinity. + n.c = n.e = null; + + // Underflow? + } else if ( e < MIN_EXP ) { + + // Zero. + n.c = [ n.e = 0 ]; + } else { + n.e = e; + n.c = c; + } + + return n; + } + + + // Handle values that fail the validity test in BigNumber. + parseNumeric = (function () { + var basePrefix = /^(-?)0([xbo])/i, + dotAfter = /^([^.]+)\.$/, + dotBefore = /^\.([^.]+)$/, + isInfinityOrNaN = /^-?(Infinity|NaN)$/, + whitespaceOrPlus = /^\s*\+|^\s+|\s+$/g; + + return function ( x, str, num, b ) { + var base, + s = num ? str : str.replace( whitespaceOrPlus, '' ); + + // No exception on ±Infinity or NaN. + if ( isInfinityOrNaN.test(s) ) { + x.s = isNaN(s) ? null : s < 0 ? -1 : 1; + } else { + if ( !num ) { + + // basePrefix = /^(-?)0([xbo])(?=\w[\w.]*$)/i + s = s.replace( basePrefix, function ( m, p1, p2 ) { + base = ( p2 = p2.toLowerCase() ) == 'x' ? 16 : p2 == 'b' ? 2 : 8; + return !b || b == base ? p1 : m; + }); + + if (b) { + base = b; + + // E.g. '1.' to '1', '.1' to '0.1' + s = s.replace( dotAfter, '$1' ).replace( dotBefore, '0.$1' ); + } + + if ( str != s ) return new BigNumber( s, base ); + } + + // 'new BigNumber() not a number: {n}' + // 'new BigNumber() not a base {b} number: {n}' + if (ERRORS) raise( id, 'not a' + ( b ? ' base ' + b : '' ) + ' number', str ); + x.s = null; + } + + x.c = x.e = null; + id = 0; + } + })(); + + + // Throw a BigNumber Error. + function raise( caller, msg, val ) { + var error = new Error( [ + 'new BigNumber', // 0 + 'cmp', // 1 + 'config', // 2 + 'div', // 3 + 'divToInt', // 4 + 'eq', // 5 + 'gt', // 6 + 'gte', // 7 + 'lt', // 8 + 'lte', // 9 + 'minus', // 10 + 'mod', // 11 + 'plus', // 12 + 'precision', // 13 + 'random', // 14 + 'round', // 15 + 'shift', // 16 + 'times', // 17 + 'toDigits', // 18 + 'toExponential', // 19 + 'toFixed', // 20 + 'toFormat', // 21 + 'toFraction', // 22 + 'pow', // 23 + 'toPrecision', // 24 + 'toString', // 25 + 'BigNumber' // 26 + ][caller] + '() ' + msg + ': ' + val ); + + error.name = 'BigNumber Error'; + id = 0; + throw error; + } + + + /* + * Round x to sd significant digits using rounding mode rm. Check for over/under-flow. + * If r is truthy, it is known that there are more digits after the rounding digit. + */ + function round( x, sd, rm, r ) { + var d, i, j, k, n, ni, rd, + xc = x.c, + pows10 = POWS_TEN; + + // if x is not Infinity or NaN... + if (xc) { + + // rd is the rounding digit, i.e. the digit after the digit that may be rounded up. + // n is a base 1e14 number, the value of the element of array x.c containing rd. + // ni is the index of n within x.c. + // d is the number of digits of n. + // i is the index of rd within n including leading zeros. + // j is the actual index of rd within n (if < 0, rd is a leading zero). + out: { + + // Get the number of digits of the first element of xc. + for ( d = 1, k = xc[0]; k >= 10; k /= 10, d++ ); + i = sd - d; + + // If the rounding digit is in the first element of xc... + if ( i < 0 ) { + i += LOG_BASE; + j = sd; + n = xc[ ni = 0 ]; + + // Get the rounding digit at index j of n. + rd = n / pows10[ d - j - 1 ] % 10 | 0; + } else { + ni = mathceil( ( i + 1 ) / LOG_BASE ); + + if ( ni >= xc.length ) { + + if (r) { + + // Needed by sqrt. + for ( ; xc.length <= ni; xc.push(0) ); + n = rd = 0; + d = 1; + i %= LOG_BASE; + j = i - LOG_BASE + 1; + } else { + break out; + } + } else { + n = k = xc[ni]; + + // Get the number of digits of n. + for ( d = 1; k >= 10; k /= 10, d++ ); + + // Get the index of rd within n. + i %= LOG_BASE; + + // Get the index of rd within n, adjusted for leading zeros. + // The number of leading zeros of n is given by LOG_BASE - d. + j = i - LOG_BASE + d; + + // Get the rounding digit at index j of n. + rd = j < 0 ? 0 : n / pows10[ d - j - 1 ] % 10 | 0; + } + } + + r = r || sd < 0 || + + // Are there any non-zero digits after the rounding digit? + // The expression n % pows10[ d - j - 1 ] returns all digits of n to the right + // of the digit at j, e.g. if n is 908714 and j is 2, the expression gives 714. + xc[ni + 1] != null || ( j < 0 ? n : n % pows10[ d - j - 1 ] ); + + r = rm < 4 + ? ( rd || r ) && ( rm == 0 || rm == ( x.s < 0 ? 3 : 2 ) ) + : rd > 5 || rd == 5 && ( rm == 4 || r || rm == 6 && + + // Check whether the digit to the left of the rounding digit is odd. + ( ( i > 0 ? j > 0 ? n / pows10[ d - j ] : 0 : xc[ni - 1] ) % 10 ) & 1 || + rm == ( x.s < 0 ? 8 : 7 ) ); + + if ( sd < 1 || !xc[0] ) { + xc.length = 0; + + if (r) { + + // Convert sd to decimal places. + sd -= x.e + 1; + + // 1, 0.1, 0.01, 0.001, 0.0001 etc. + xc[0] = pows10[ sd % LOG_BASE ]; + x.e = -sd || 0; + } else { + + // Zero. + xc[0] = x.e = 0; + } + + return x; + } + + // Remove excess digits. + if ( i == 0 ) { + xc.length = ni; + k = 1; + ni--; + } else { + xc.length = ni + 1; + k = pows10[ LOG_BASE - i ]; + + // E.g. 56700 becomes 56000 if 7 is the rounding digit. + // j > 0 means i > number of leading zeros of n. + xc[ni] = j > 0 ? mathfloor( n / pows10[ d - j ] % pows10[j] ) * k : 0; + } + + // Round up? + if (r) { + + for ( ; ; ) { + + // If the digit to be rounded up is in the first element of xc... + if ( ni == 0 ) { + + // i will be the length of xc[0] before k is added. + for ( i = 1, j = xc[0]; j >= 10; j /= 10, i++ ); + j = xc[0] += k; + for ( k = 1; j >= 10; j /= 10, k++ ); + + // if i != k the length has increased. + if ( i != k ) { + x.e++; + if ( xc[0] == BASE ) xc[0] = 1; + } + + break; + } else { + xc[ni] += k; + if ( xc[ni] != BASE ) break; + xc[ni--] = 0; + k = 1; + } + } + } + + // Remove trailing zeros. + for ( i = xc.length; xc[--i] === 0; xc.pop() ); + } + + // Overflow? Infinity. + if ( x.e > MAX_EXP ) { + x.c = x.e = null; + + // Underflow? Zero. + } else if ( x.e < MIN_EXP ) { + x.c = [ x.e = 0 ]; + } + } + + return x; + } + + + // PROTOTYPE/INSTANCE METHODS + + + /* + * Return a new BigNumber whose value is the absolute value of this BigNumber. + */ + P.absoluteValue = P.abs = function () { + var x = new BigNumber(this); + if ( x.s < 0 ) x.s = 1; + return x; + }; + + + /* + * Return a new BigNumber whose value is the value of this BigNumber rounded to a whole + * number in the direction of Infinity. + */ + P.ceil = function () { + return round( new BigNumber(this), this.e + 1, 2 ); + }; + + + /* + * Return + * 1 if the value of this BigNumber is greater than the value of BigNumber(y, b), + * -1 if the value of this BigNumber is less than the value of BigNumber(y, b), + * 0 if they have the same value, + * or null if the value of either is NaN. + */ + P.comparedTo = P.cmp = function ( y, b ) { + id = 1; + return compare( this, new BigNumber( y, b ) ); + }; + + + /* + * Return the number of decimal places of the value of this BigNumber, or null if the value + * of this BigNumber is ±Infinity or NaN. + */ + P.decimalPlaces = P.dp = function () { + var n, v, + c = this.c; + + if ( !c ) return null; + n = ( ( v = c.length - 1 ) - bitFloor( this.e / LOG_BASE ) ) * LOG_BASE; + + // Subtract the number of trailing zeros of the last number. + if ( v = c[v] ) for ( ; v % 10 == 0; v /= 10, n-- ); + if ( n < 0 ) n = 0; + + return n; + }; + + + /* + * n / 0 = I + * n / N = N + * n / I = 0 + * 0 / n = 0 + * 0 / 0 = N + * 0 / N = N + * 0 / I = 0 + * N / n = N + * N / 0 = N + * N / N = N + * N / I = N + * I / n = I + * I / 0 = I + * I / N = N + * I / I = N + * + * Return a new BigNumber whose value is the value of this BigNumber divided by the value of + * BigNumber(y, b), rounded according to DECIMAL_PLACES and ROUNDING_MODE. + */ + P.dividedBy = P.div = function ( y, b ) { + id = 3; + return div( this, new BigNumber( y, b ), DECIMAL_PLACES, ROUNDING_MODE ); + }; + + + /* + * Return a new BigNumber whose value is the integer part of dividing the value of this + * BigNumber by the value of BigNumber(y, b). + */ + P.dividedToIntegerBy = P.divToInt = function ( y, b ) { + id = 4; + return div( this, new BigNumber( y, b ), 0, 1 ); + }; + + + /* + * Return true if the value of this BigNumber is equal to the value of BigNumber(y, b), + * otherwise returns false. + */ + P.equals = P.eq = function ( y, b ) { + id = 5; + return compare( this, new BigNumber( y, b ) ) === 0; + }; + + + /* + * Return a new BigNumber whose value is the value of this BigNumber rounded to a whole + * number in the direction of -Infinity. + */ + P.floor = function () { + return round( new BigNumber(this), this.e + 1, 3 ); + }; + + + /* + * Return true if the value of this BigNumber is greater than the value of BigNumber(y, b), + * otherwise returns false. + */ + P.greaterThan = P.gt = function ( y, b ) { + id = 6; + return compare( this, new BigNumber( y, b ) ) > 0; + }; + + + /* + * Return true if the value of this BigNumber is greater than or equal to the value of + * BigNumber(y, b), otherwise returns false. + */ + P.greaterThanOrEqualTo = P.gte = function ( y, b ) { + id = 7; + return ( b = compare( this, new BigNumber( y, b ) ) ) === 1 || b === 0; + + }; + + + /* + * Return true if the value of this BigNumber is a finite number, otherwise returns false. + */ + P.isFinite = function () { + return !!this.c; + }; + + + /* + * Return true if the value of this BigNumber is an integer, otherwise return false. + */ + P.isInteger = P.isInt = function () { + return !!this.c && bitFloor( this.e / LOG_BASE ) > this.c.length - 2; + }; + + + /* + * Return true if the value of this BigNumber is NaN, otherwise returns false. + */ + P.isNaN = function () { + return !this.s; + }; + + + /* + * Return true if the value of this BigNumber is negative, otherwise returns false. + */ + P.isNegative = P.isNeg = function () { + return this.s < 0; + }; + + + /* + * Return true if the value of this BigNumber is 0 or -0, otherwise returns false. + */ + P.isZero = function () { + return !!this.c && this.c[0] == 0; + }; + + + /* + * Return true if the value of this BigNumber is less than the value of BigNumber(y, b), + * otherwise returns false. + */ + P.lessThan = P.lt = function ( y, b ) { + id = 8; + return compare( this, new BigNumber( y, b ) ) < 0; + }; + + + /* + * Return true if the value of this BigNumber is less than or equal to the value of + * BigNumber(y, b), otherwise returns false. + */ + P.lessThanOrEqualTo = P.lte = function ( y, b ) { + id = 9; + return ( b = compare( this, new BigNumber( y, b ) ) ) === -1 || b === 0; + }; + + + /* + * n - 0 = n + * n - N = N + * n - I = -I + * 0 - n = -n + * 0 - 0 = 0 + * 0 - N = N + * 0 - I = -I + * N - n = N + * N - 0 = N + * N - N = N + * N - I = N + * I - n = I + * I - 0 = I + * I - N = N + * I - I = N + * + * Return a new BigNumber whose value is the value of this BigNumber minus the value of + * BigNumber(y, b). + */ + P.minus = P.sub = function ( y, b ) { + var i, j, t, xLTy, + x = this, + a = x.s; + + id = 10; + y = new BigNumber( y, b ); + b = y.s; + + // Either NaN? + if ( !a || !b ) return new BigNumber(NaN); + + // Signs differ? + if ( a != b ) { + y.s = -b; + return x.plus(y); + } + + var xe = x.e / LOG_BASE, + ye = y.e / LOG_BASE, + xc = x.c, + yc = y.c; + + if ( !xe || !ye ) { + + // Either Infinity? + if ( !xc || !yc ) return xc ? ( y.s = -b, y ) : new BigNumber( yc ? x : NaN ); + + // Either zero? + if ( !xc[0] || !yc[0] ) { + + // Return y if y is non-zero, x if x is non-zero, or zero if both are zero. + return yc[0] ? ( y.s = -b, y ) : new BigNumber( xc[0] ? x : + + // IEEE 754 (2008) 6.3: n - n = -0 when rounding to -Infinity + ROUNDING_MODE == 3 ? -0 : 0 ); + } + } + + xe = bitFloor(xe); + ye = bitFloor(ye); + xc = xc.slice(); + + // Determine which is the bigger number. + if ( a = xe - ye ) { + + if ( xLTy = a < 0 ) { + a = -a; + t = xc; + } else { + ye = xe; + t = yc; + } + + t.reverse(); + + // Prepend zeros to equalise exponents. + for ( b = a; b--; t.push(0) ); + t.reverse(); + } else { + + // Exponents equal. Check digit by digit. + j = ( xLTy = ( a = xc.length ) < ( b = yc.length ) ) ? a : b; + + for ( a = b = 0; b < j; b++ ) { + + if ( xc[b] != yc[b] ) { + xLTy = xc[b] < yc[b]; + break; + } + } + } + + // x < y? Point xc to the array of the bigger number. + if (xLTy) t = xc, xc = yc, yc = t, y.s = -y.s; + + b = ( j = yc.length ) - ( i = xc.length ); + + // Append zeros to xc if shorter. + // No need to add zeros to yc if shorter as subtract only needs to start at yc.length. + if ( b > 0 ) for ( ; b--; xc[i++] = 0 ); + b = BASE - 1; + + // Subtract yc from xc. + for ( ; j > a; ) { + + if ( xc[--j] < yc[j] ) { + for ( i = j; i && !xc[--i]; xc[i] = b ); + --xc[i]; + xc[j] += BASE; + } + + xc[j] -= yc[j]; + } + + // Remove leading zeros and adjust exponent accordingly. + for ( ; xc[0] == 0; xc.shift(), --ye ); + + // Zero? + if ( !xc[0] ) { + + // Following IEEE 754 (2008) 6.3, + // n - n = +0 but n - n = -0 when rounding towards -Infinity. + y.s = ROUNDING_MODE == 3 ? -1 : 1; + y.c = [ y.e = 0 ]; + return y; + } + + // No need to check for Infinity as +x - +y != Infinity && -x - -y != Infinity + // for finite x and y. + return normalise( y, xc, ye ); + }; + + + /* + * n % 0 = N + * n % N = N + * n % I = n + * 0 % n = 0 + * -0 % n = -0 + * 0 % 0 = N + * 0 % N = N + * 0 % I = 0 + * N % n = N + * N % 0 = N + * N % N = N + * N % I = N + * I % n = N + * I % 0 = N + * I % N = N + * I % I = N + * + * Return a new BigNumber whose value is the value of this BigNumber modulo the value of + * BigNumber(y, b). The result depends on the value of MODULO_MODE. + */ + P.modulo = P.mod = function ( y, b ) { + var q, s, + x = this; + + id = 11; + y = new BigNumber( y, b ); + + // Return NaN if x is Infinity or NaN, or y is NaN or zero. + if ( !x.c || !y.s || y.c && !y.c[0] ) { + return new BigNumber(NaN); + + // Return x if y is Infinity or x is zero. + } else if ( !y.c || x.c && !x.c[0] ) { + return new BigNumber(x); + } + + if ( MODULO_MODE == 9 ) { + + // Euclidian division: q = sign(y) * floor(x / abs(y)) + // r = x - qy where 0 <= r < abs(y) + s = y.s; + y.s = 1; + q = div( x, y, 0, 3 ); + y.s = s; + q.s *= s; + } else { + q = div( x, y, 0, MODULO_MODE ); + } + + return x.minus( q.times(y) ); + }; + + + /* + * Return a new BigNumber whose value is the value of this BigNumber negated, + * i.e. multiplied by -1. + */ + P.negated = P.neg = function () { + var x = new BigNumber(this); + x.s = -x.s || null; + return x; + }; + + + /* + * n + 0 = n + * n + N = N + * n + I = I + * 0 + n = n + * 0 + 0 = 0 + * 0 + N = N + * 0 + I = I + * N + n = N + * N + 0 = N + * N + N = N + * N + I = N + * I + n = I + * I + 0 = I + * I + N = N + * I + I = I + * + * Return a new BigNumber whose value is the value of this BigNumber plus the value of + * BigNumber(y, b). + */ + P.plus = P.add = function ( y, b ) { + var t, + x = this, + a = x.s; + + id = 12; + y = new BigNumber( y, b ); + b = y.s; + + // Either NaN? + if ( !a || !b ) return new BigNumber(NaN); + + // Signs differ? + if ( a != b ) { + y.s = -b; + return x.minus(y); + } + + var xe = x.e / LOG_BASE, + ye = y.e / LOG_BASE, + xc = x.c, + yc = y.c; + + if ( !xe || !ye ) { + + // Return ±Infinity if either ±Infinity. + if ( !xc || !yc ) return new BigNumber( a / 0 ); + + // Either zero? + // Return y if y is non-zero, x if x is non-zero, or zero if both are zero. + if ( !xc[0] || !yc[0] ) return yc[0] ? y : new BigNumber( xc[0] ? x : a * 0 ); + } + + xe = bitFloor(xe); + ye = bitFloor(ye); + xc = xc.slice(); + + // Prepend zeros to equalise exponents. Faster to use reverse then do unshifts. + if ( a = xe - ye ) { + if ( a > 0 ) { + ye = xe; + t = yc; + } else { + a = -a; + t = xc; + } + + t.reverse(); + for ( ; a--; t.push(0) ); + t.reverse(); + } + + a = xc.length; + b = yc.length; + + // Point xc to the longer array, and b to the shorter length. + if ( a - b < 0 ) t = yc, yc = xc, xc = t, b = a; + + // Only start adding at yc.length - 1 as the further digits of xc can be ignored. + for ( a = 0; b; ) { + a = ( xc[--b] = xc[b] + yc[b] + a ) / BASE | 0; + xc[b] %= BASE; + } + + if (a) { + xc.unshift(a); + ++ye; + } + + // No need to check for zero, as +x + +y != 0 && -x + -y != 0 + // ye = MAX_EXP + 1 possible + return normalise( y, xc, ye ); + }; + + + /* + * Return the number of significant digits of the value of this BigNumber. + * + * [z] {boolean|number} Whether to count integer-part trailing zeros: true, false, 1 or 0. + */ + P.precision = P.sd = function (z) { + var n, v, + x = this, + c = x.c; + + // 'precision() argument not a boolean or binary digit: {z}' + if ( z != null && z !== !!z && z !== 1 && z !== 0 ) { + if (ERRORS) raise( 13, 'argument' + notBool, z ); + if ( z != !!z ) z = null; + } + + if ( !c ) return null; + v = c.length - 1; + n = v * LOG_BASE + 1; + + if ( v = c[v] ) { + + // Subtract the number of trailing zeros of the last element. + for ( ; v % 10 == 0; v /= 10, n-- ); + + // Add the number of digits of the first element. + for ( v = c[0]; v >= 10; v /= 10, n++ ); + } + + if ( z && x.e + 1 > n ) n = x.e + 1; + + return n; + }; + + + /* + * Return a new BigNumber whose value is the value of this BigNumber rounded to a maximum of + * dp decimal places using rounding mode rm, or to 0 and ROUNDING_MODE respectively if + * omitted. + * + * [dp] {number} Decimal places. Integer, 0 to MAX inclusive. + * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. + * + * 'round() decimal places out of range: {dp}' + * 'round() decimal places not an integer: {dp}' + * 'round() rounding mode not an integer: {rm}' + * 'round() rounding mode out of range: {rm}' + */ + P.round = function ( dp, rm ) { + var n = new BigNumber(this); + + if ( dp == null || isValidInt( dp, 0, MAX, 15 ) ) { + round( n, ~~dp + this.e + 1, rm == null || + !isValidInt( rm, 0, 8, 15, roundingMode ) ? ROUNDING_MODE : rm | 0 ); + } + + return n; + }; + + + /* + * Return a new BigNumber whose value is the value of this BigNumber shifted by k places + * (powers of 10). Shift to the right if n > 0, and to the left if n < 0. + * + * k {number} Integer, -MAX_SAFE_INTEGER to MAX_SAFE_INTEGER inclusive. + * + * If k is out of range and ERRORS is false, the result will be ±0 if k < 0, or ±Infinity + * otherwise. + * + * 'shift() argument not an integer: {k}' + * 'shift() argument out of range: {k}' + */ + P.shift = function (k) { + var n = this; + return isValidInt( k, -MAX_SAFE_INTEGER, MAX_SAFE_INTEGER, 16, 'argument' ) + + // k < 1e+21, or truncate(k) will produce exponential notation. + ? n.times( '1e' + truncate(k) ) + : new BigNumber( n.c && n.c[0] && ( k < -MAX_SAFE_INTEGER || k > MAX_SAFE_INTEGER ) + ? n.s * ( k < 0 ? 0 : 1 / 0 ) + : n ); + }; + + + /* + * sqrt(-n) = N + * sqrt( N) = N + * sqrt(-I) = N + * sqrt( I) = I + * sqrt( 0) = 0 + * sqrt(-0) = -0 + * + * Return a new BigNumber whose value is the square root of the value of this BigNumber, + * rounded according to DECIMAL_PLACES and ROUNDING_MODE. + */ + P.squareRoot = P.sqrt = function () { + var m, n, r, rep, t, + x = this, + c = x.c, + s = x.s, + e = x.e, + dp = DECIMAL_PLACES + 4, + half = new BigNumber('0.5'); + + // Negative/NaN/Infinity/zero? + if ( s !== 1 || !c || !c[0] ) { + return new BigNumber( !s || s < 0 && ( !c || c[0] ) ? NaN : c ? x : 1 / 0 ); + } + + // Initial estimate. + s = Math.sqrt( +x ); + + // Math.sqrt underflow/overflow? + // Pass x to Math.sqrt as integer, then adjust the exponent of the result. + if ( s == 0 || s == 1 / 0 ) { + n = coeffToString(c); + if ( ( n.length + e ) % 2 == 0 ) n += '0'; + s = Math.sqrt(n); + e = bitFloor( ( e + 1 ) / 2 ) - ( e < 0 || e % 2 ); + + if ( s == 1 / 0 ) { + n = '1e' + e; + } else { + n = s.toExponential(); + n = n.slice( 0, n.indexOf('e') + 1 ) + e; + } + + r = new BigNumber(n); + } else { + r = new BigNumber( s + '' ); + } + + // Check for zero. + // r could be zero if MIN_EXP is changed after the this value was created. + // This would cause a division by zero (x/t) and hence Infinity below, which would cause + // coeffToString to throw. + if ( r.c[0] ) { + e = r.e; + s = e + dp; + if ( s < 3 ) s = 0; + + // Newton-Raphson iteration. + for ( ; ; ) { + t = r; + r = half.times( t.plus( div( x, t, dp, 1 ) ) ); + + if ( coeffToString( t.c ).slice( 0, s ) === ( n = + coeffToString( r.c ) ).slice( 0, s ) ) { + + // The exponent of r may here be one less than the final result exponent, + // e.g 0.0009999 (e-4) --> 0.001 (e-3), so adjust s so the rounding digits + // are indexed correctly. + if ( r.e < e ) --s; + n = n.slice( s - 3, s + 1 ); + + // The 4th rounding digit may be in error by -1 so if the 4 rounding digits + // are 9999 or 4999 (i.e. approaching a rounding boundary) continue the + // iteration. + if ( n == '9999' || !rep && n == '4999' ) { + + // On the first iteration only, check to see if rounding up gives the + // exact result as the nines may infinitely repeat. + if ( !rep ) { + round( t, t.e + DECIMAL_PLACES + 2, 0 ); + + if ( t.times(t).eq(x) ) { + r = t; + break; + } + } + + dp += 4; + s += 4; + rep = 1; + } else { + + // If rounding digits are null, 0{0,4} or 50{0,3}, check for exact + // result. If not, then there are further digits and m will be truthy. + if ( !+n || !+n.slice(1) && n.charAt(0) == '5' ) { + + // Truncate to the first rounding digit. + round( r, r.e + DECIMAL_PLACES + 2, 1 ); + m = !r.times(r).eq(x); + } + + break; + } + } + } + } + + return round( r, r.e + DECIMAL_PLACES + 1, ROUNDING_MODE, m ); + }; + + + /* + * n * 0 = 0 + * n * N = N + * n * I = I + * 0 * n = 0 + * 0 * 0 = 0 + * 0 * N = N + * 0 * I = N + * N * n = N + * N * 0 = N + * N * N = N + * N * I = N + * I * n = I + * I * 0 = N + * I * N = N + * I * I = I + * + * Return a new BigNumber whose value is the value of this BigNumber times the value of + * BigNumber(y, b). + */ + P.times = P.mul = function ( y, b ) { + var c, e, i, j, k, m, xcL, xlo, xhi, ycL, ylo, yhi, zc, + base, sqrtBase, + x = this, + xc = x.c, + yc = ( id = 17, y = new BigNumber( y, b ) ).c; + + // Either NaN, ±Infinity or ±0? + if ( !xc || !yc || !xc[0] || !yc[0] ) { + + // Return NaN if either is NaN, or one is 0 and the other is Infinity. + if ( !x.s || !y.s || xc && !xc[0] && !yc || yc && !yc[0] && !xc ) { + y.c = y.e = y.s = null; + } else { + y.s *= x.s; + + // Return ±Infinity if either is ±Infinity. + if ( !xc || !yc ) { + y.c = y.e = null; + + // Return ±0 if either is ±0. + } else { + y.c = [0]; + y.e = 0; + } + } + + return y; + } + + e = bitFloor( x.e / LOG_BASE ) + bitFloor( y.e / LOG_BASE ); + y.s *= x.s; + xcL = xc.length; + ycL = yc.length; + + // Ensure xc points to longer array and xcL to its length. + if ( xcL < ycL ) zc = xc, xc = yc, yc = zc, i = xcL, xcL = ycL, ycL = i; + + // Initialise the result array with zeros. + for ( i = xcL + ycL, zc = []; i--; zc.push(0) ); + + base = BASE; + sqrtBase = SQRT_BASE; + + for ( i = ycL; --i >= 0; ) { + c = 0; + ylo = yc[i] % sqrtBase; + yhi = yc[i] / sqrtBase | 0; + + for ( k = xcL, j = i + k; j > i; ) { + xlo = xc[--k] % sqrtBase; + xhi = xc[k] / sqrtBase | 0; + m = yhi * xlo + xhi * ylo; + xlo = ylo * xlo + ( ( m % sqrtBase ) * sqrtBase ) + zc[j] + c; + c = ( xlo / base | 0 ) + ( m / sqrtBase | 0 ) + yhi * xhi; + zc[j--] = xlo % base; + } + + zc[j] = c; + } + + if (c) { + ++e; + } else { + zc.shift(); + } + + return normalise( y, zc, e ); + }; + + + /* + * Return a new BigNumber whose value is the value of this BigNumber rounded to a maximum of + * sd significant digits using rounding mode rm, or ROUNDING_MODE if rm is omitted. + * + * [sd] {number} Significant digits. Integer, 1 to MAX inclusive. + * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. + * + * 'toDigits() precision out of range: {sd}' + * 'toDigits() precision not an integer: {sd}' + * 'toDigits() rounding mode not an integer: {rm}' + * 'toDigits() rounding mode out of range: {rm}' + */ + P.toDigits = function ( sd, rm ) { + var n = new BigNumber(this); + sd = sd == null || !isValidInt( sd, 1, MAX, 18, 'precision' ) ? null : sd | 0; + rm = rm == null || !isValidInt( rm, 0, 8, 18, roundingMode ) ? ROUNDING_MODE : rm | 0; + return sd ? round( n, sd, rm ) : n; + }; + + + /* + * Return a string representing the value of this BigNumber in exponential notation and + * rounded using ROUNDING_MODE to dp fixed decimal places. + * + * [dp] {number} Decimal places. Integer, 0 to MAX inclusive. + * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. + * + * 'toExponential() decimal places not an integer: {dp}' + * 'toExponential() decimal places out of range: {dp}' + * 'toExponential() rounding mode not an integer: {rm}' + * 'toExponential() rounding mode out of range: {rm}' + */ + P.toExponential = function ( dp, rm ) { + return format( this, + dp != null && isValidInt( dp, 0, MAX, 19 ) ? ~~dp + 1 : null, rm, 19 ); + }; + + + /* + * Return a string representing the value of this BigNumber in fixed-point notation rounding + * to dp fixed decimal places using rounding mode rm, or ROUNDING_MODE if rm is omitted. + * + * Note: as with JavaScript's number type, (-0).toFixed(0) is '0', + * but e.g. (-0.00001).toFixed(0) is '-0'. + * + * [dp] {number} Decimal places. Integer, 0 to MAX inclusive. + * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. + * + * 'toFixed() decimal places not an integer: {dp}' + * 'toFixed() decimal places out of range: {dp}' + * 'toFixed() rounding mode not an integer: {rm}' + * 'toFixed() rounding mode out of range: {rm}' + */ + P.toFixed = function ( dp, rm ) { + return format( this, dp != null && isValidInt( dp, 0, MAX, 20 ) + ? ~~dp + this.e + 1 : null, rm, 20 ); + }; + + + /* + * Return a string representing the value of this BigNumber in fixed-point notation rounded + * using rm or ROUNDING_MODE to dp decimal places, and formatted according to the properties + * of the FORMAT object (see BigNumber.config). + * + * FORMAT = { + * decimalSeparator : '.', + * groupSeparator : ',', + * groupSize : 3, + * secondaryGroupSize : 0, + * fractionGroupSeparator : '\xA0', // non-breaking space + * fractionGroupSize : 0 + * }; + * + * [dp] {number} Decimal places. Integer, 0 to MAX inclusive. + * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. + * + * 'toFormat() decimal places not an integer: {dp}' + * 'toFormat() decimal places out of range: {dp}' + * 'toFormat() rounding mode not an integer: {rm}' + * 'toFormat() rounding mode out of range: {rm}' + */ + P.toFormat = function ( dp, rm ) { + var str = format( this, dp != null && isValidInt( dp, 0, MAX, 21 ) + ? ~~dp + this.e + 1 : null, rm, 21 ); + + if ( this.c ) { + var i, + arr = str.split('.'), + g1 = +FORMAT.groupSize, + g2 = +FORMAT.secondaryGroupSize, + groupSeparator = FORMAT.groupSeparator, + intPart = arr[0], + fractionPart = arr[1], + isNeg = this.s < 0, + intDigits = isNeg ? intPart.slice(1) : intPart, + len = intDigits.length; + + if (g2) i = g1, g1 = g2, g2 = i, len -= i; + + if ( g1 > 0 && len > 0 ) { + i = len % g1 || g1; + intPart = intDigits.substr( 0, i ); + + for ( ; i < len; i += g1 ) { + intPart += groupSeparator + intDigits.substr( i, g1 ); + } + + if ( g2 > 0 ) intPart += groupSeparator + intDigits.slice(i); + if (isNeg) intPart = '-' + intPart; + } + + str = fractionPart + ? intPart + FORMAT.decimalSeparator + ( ( g2 = +FORMAT.fractionGroupSize ) + ? fractionPart.replace( new RegExp( '\\d{' + g2 + '}\\B', 'g' ), + '$&' + FORMAT.fractionGroupSeparator ) + : fractionPart ) + : intPart; + } + + return str; + }; + + + /* + * Return a string array representing the value of this BigNumber as a simple fraction with + * an integer numerator and an integer denominator. The denominator will be a positive + * non-zero value less than or equal to the specified maximum denominator. If a maximum + * denominator is not specified, the denominator will be the lowest value necessary to + * represent the number exactly. + * + * [md] {number|string|BigNumber} Integer >= 1 and < Infinity. The maximum denominator. + * + * 'toFraction() max denominator not an integer: {md}' + * 'toFraction() max denominator out of range: {md}' + */ + P.toFraction = function (md) { + var arr, d0, d2, e, exp, n, n0, q, s, + k = ERRORS, + x = this, + xc = x.c, + d = new BigNumber(ONE), + n1 = d0 = new BigNumber(ONE), + d1 = n0 = new BigNumber(ONE); + + if ( md != null ) { + ERRORS = false; + n = new BigNumber(md); + ERRORS = k; + + if ( !( k = n.isInt() ) || n.lt(ONE) ) { + + if (ERRORS) { + raise( 22, + 'max denominator ' + ( k ? 'out of range' : 'not an integer' ), md ); + } + + // ERRORS is false: + // If md is a finite non-integer >= 1, round it to an integer and use it. + md = !k && n.c && round( n, n.e + 1, 1 ).gte(ONE) ? n : null; + } + } + + if ( !xc ) return x.toString(); + s = coeffToString(xc); + + // Determine initial denominator. + // d is a power of 10 and the minimum max denominator that specifies the value exactly. + e = d.e = s.length - x.e - 1; + d.c[0] = POWS_TEN[ ( exp = e % LOG_BASE ) < 0 ? LOG_BASE + exp : exp ]; + md = !md || n.cmp(d) > 0 ? ( e > 0 ? d : n1 ) : n; + + exp = MAX_EXP; + MAX_EXP = 1 / 0; + n = new BigNumber(s); + + // n0 = d1 = 0 + n0.c[0] = 0; + + for ( ; ; ) { + q = div( n, d, 0, 1 ); + d2 = d0.plus( q.times(d1) ); + if ( d2.cmp(md) == 1 ) break; + d0 = d1; + d1 = d2; + n1 = n0.plus( q.times( d2 = n1 ) ); + n0 = d2; + d = n.minus( q.times( d2 = d ) ); + n = d2; + } + + d2 = div( md.minus(d0), d1, 0, 1 ); + n0 = n0.plus( d2.times(n1) ); + d0 = d0.plus( d2.times(d1) ); + n0.s = n1.s = x.s; + e *= 2; + + // Determine which fraction is closer to x, n0/d0 or n1/d1 + arr = div( n1, d1, e, ROUNDING_MODE ).minus(x).abs().cmp( + div( n0, d0, e, ROUNDING_MODE ).minus(x).abs() ) < 1 + ? [ n1.toString(), d1.toString() ] + : [ n0.toString(), d0.toString() ]; + + MAX_EXP = exp; + return arr; + }; + + + /* + * Return the value of this BigNumber converted to a number primitive. + */ + P.toNumber = function () { + var x = this; + + // Ensure zero has correct sign. + return +x || ( x.s ? x.s * 0 : NaN ); + }; + + + /* + * Return a BigNumber whose value is the value of this BigNumber raised to the power n. + * If n is negative round according to DECIMAL_PLACES and ROUNDING_MODE. + * If POW_PRECISION is not 0, round to POW_PRECISION using ROUNDING_MODE. + * + * n {number} Integer, -9007199254740992 to 9007199254740992 inclusive. + * (Performs 54 loop iterations for n of 9007199254740992.) + * + * 'pow() exponent not an integer: {n}' + * 'pow() exponent out of range: {n}' + */ + P.toPower = P.pow = function (n) { + var k, y, + i = mathfloor( n < 0 ? -n : +n ), + x = this; + + // Pass ±Infinity to Math.pow if exponent is out of range. + if ( !isValidInt( n, -MAX_SAFE_INTEGER, MAX_SAFE_INTEGER, 23, 'exponent' ) && + ( !isFinite(n) || i > MAX_SAFE_INTEGER && ( n /= 0 ) || + parseFloat(n) != n && !( n = NaN ) ) ) { + return new BigNumber( Math.pow( +x, n ) ); + } + + // Truncating each coefficient array to a length of k after each multiplication equates + // to truncating significant digits to POW_PRECISION + [28, 41], i.e. there will be a + // minimum of 28 guard digits retained. (Using + 1.5 would give [9, 21] guard digits.) + k = POW_PRECISION ? mathceil( POW_PRECISION / LOG_BASE + 2 ) : 0; + y = new BigNumber(ONE); + + for ( ; ; ) { + + if ( i % 2 ) { + y = y.times(x); + if ( !y.c ) break; + if ( k && y.c.length > k ) y.c.length = k; + } + + i = mathfloor( i / 2 ); + if ( !i ) break; + + x = x.times(x); + if ( k && x.c && x.c.length > k ) x.c.length = k; + } + + if ( n < 0 ) y = ONE.div(y); + return k ? round( y, POW_PRECISION, ROUNDING_MODE ) : y; + }; + + + /* + * Return a string representing the value of this BigNumber rounded to sd significant digits + * using rounding mode rm or ROUNDING_MODE. If sd is less than the number of digits + * necessary to represent the integer part of the value in fixed-point notation, then use + * exponential notation. + * + * [sd] {number} Significant digits. Integer, 1 to MAX inclusive. + * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. + * + * 'toPrecision() precision not an integer: {sd}' + * 'toPrecision() precision out of range: {sd}' + * 'toPrecision() rounding mode not an integer: {rm}' + * 'toPrecision() rounding mode out of range: {rm}' + */ + P.toPrecision = function ( sd, rm ) { + return format( this, sd != null && isValidInt( sd, 1, MAX, 24, 'precision' ) + ? sd | 0 : null, rm, 24 ); + }; + + + /* + * Return a string representing the value of this BigNumber in base b, or base 10 if b is + * omitted. If a base is specified, including base 10, round according to DECIMAL_PLACES and + * ROUNDING_MODE. If a base is not specified, and this BigNumber has a positive exponent + * that is equal to or greater than TO_EXP_POS, or a negative exponent equal to or less than + * TO_EXP_NEG, return exponential notation. + * + * [b] {number} Integer, 2 to 64 inclusive. + * + * 'toString() base not an integer: {b}' + * 'toString() base out of range: {b}' + */ + P.toString = function (b) { + var str, + n = this, + s = n.s, + e = n.e; + + // Infinity or NaN? + if ( e === null ) { + + if (s) { + str = 'Infinity'; + if ( s < 0 ) str = '-' + str; + } else { + str = 'NaN'; + } + } else { + str = coeffToString( n.c ); + + if ( b == null || !isValidInt( b, 2, 64, 25, 'base' ) ) { + str = e <= TO_EXP_NEG || e >= TO_EXP_POS + ? toExponential( str, e ) + : toFixedPoint( str, e ); + } else { + str = convertBase( toFixedPoint( str, e ), b | 0, 10, s ); + } + + if ( s < 0 && n.c[0] ) str = '-' + str; + } + + return str; + }; + + + /* + * Return a new BigNumber whose value is the value of this BigNumber truncated to a whole + * number. + */ + P.truncated = P.trunc = function () { + return round( new BigNumber(this), this.e + 1, 1 ); + }; + + + + /* + * Return as toString, but do not accept a base argument. + */ + P.valueOf = P.toJSON = function () { + return this.toString(); + }; + + + // Aliases for BigDecimal methods. + //P.add = P.plus; // P.add included above + //P.subtract = P.minus; // P.sub included above + //P.multiply = P.times; // P.mul included above + //P.divide = P.div; + //P.remainder = P.mod; + //P.compareTo = P.cmp; + //P.negate = P.neg; + + + if ( configObj != null ) BigNumber.config(configObj); + + return BigNumber; + } + + + // PRIVATE HELPER FUNCTIONS + + + function bitFloor(n) { + var i = n | 0; + return n > 0 || n === i ? i : i - 1; + } + + + // Return a coefficient array as a string of base 10 digits. + function coeffToString(a) { + var s, z, + i = 1, + j = a.length, + r = a[0] + ''; + + for ( ; i < j; ) { + s = a[i++] + ''; + z = LOG_BASE - s.length; + for ( ; z--; s = '0' + s ); + r += s; + } + + // Determine trailing zeros. + for ( j = r.length; r.charCodeAt(--j) === 48; ); + return r.slice( 0, j + 1 || 1 ); + } + + + // Compare the value of BigNumbers x and y. + function compare( x, y ) { + var a, b, + xc = x.c, + yc = y.c, + i = x.s, + j = y.s, + k = x.e, + l = y.e; + + // Either NaN? + if ( !i || !j ) return null; + + a = xc && !xc[0]; + b = yc && !yc[0]; + + // Either zero? + if ( a || b ) return a ? b ? 0 : -j : i; + + // Signs differ? + if ( i != j ) return i; + + a = i < 0; + b = k == l; + + // Either Infinity? + if ( !xc || !yc ) return b ? 0 : !xc ^ a ? 1 : -1; + + // Compare exponents. + if ( !b ) return k > l ^ a ? 1 : -1; + + j = ( k = xc.length ) < ( l = yc.length ) ? k : l; + + // Compare digit by digit. + for ( i = 0; i < j; i++ ) if ( xc[i] != yc[i] ) return xc[i] > yc[i] ^ a ? 1 : -1; + + // Compare lengths. + return k == l ? 0 : k > l ^ a ? 1 : -1; + } + + + /* + * Return true if n is a valid number in range, otherwise false. + * Use for argument validation when ERRORS is false. + * Note: parseInt('1e+1') == 1 but parseFloat('1e+1') == 10. + */ + function intValidatorNoErrors( n, min, max ) { + return ( n = truncate(n) ) >= min && n <= max; + } + + + function isArray(obj) { + return Object.prototype.toString.call(obj) == '[object Array]'; + } + + + /* + * Convert string of baseIn to an array of numbers of baseOut. + * Eg. convertBase('255', 10, 16) returns [15, 15]. + * Eg. convertBase('ff', 16, 10) returns [2, 5, 5]. + */ + function toBaseOut( str, baseIn, baseOut ) { + var j, + arr = [0], + arrL, + i = 0, + len = str.length; + + for ( ; i < len; ) { + for ( arrL = arr.length; arrL--; arr[arrL] *= baseIn ); + arr[ j = 0 ] += ALPHABET.indexOf( str.charAt( i++ ) ); + + for ( ; j < arr.length; j++ ) { + + if ( arr[j] > baseOut - 1 ) { + if ( arr[j + 1] == null ) arr[j + 1] = 0; + arr[j + 1] += arr[j] / baseOut | 0; + arr[j] %= baseOut; + } + } + } + + return arr.reverse(); + } + + + function toExponential( str, e ) { + return ( str.length > 1 ? str.charAt(0) + '.' + str.slice(1) : str ) + + ( e < 0 ? 'e' : 'e+' ) + e; + } + + + function toFixedPoint( str, e ) { + var len, z; + + // Negative exponent? + if ( e < 0 ) { + + // Prepend zeros. + for ( z = '0.'; ++e; z += '0' ); + str = z + str; + + // Positive exponent + } else { + len = str.length; + + // Append zeros. + if ( ++e > len ) { + for ( z = '0', e -= len; --e; z += '0' ); + str += z; + } else if ( e < len ) { + str = str.slice( 0, e ) + '.' + str.slice(e); + } + } + + return str; + } + + + function truncate(n) { + n = parseFloat(n); + return n < 0 ? mathceil(n) : mathfloor(n); + } + + + // EXPORT + + + BigNumber = another(); + + // AMD. + if ( typeof define == 'function' && define.amd ) { + define( function () { return BigNumber; } ); + + // Node and other environments that support module.exports. + } else if ( typeof module != 'undefined' && module.exports ) { + module.exports = BigNumber; + if ( !crypto ) try { crypto = require('crypto'); } catch (e) {} + + // Browser. + } else { + global.BigNumber = BigNumber; + } +})(this); -},{}],"web3":[function(require,module,exports){ +},{"crypto":31}],"web3":[function(require,module,exports){ var web3 = require('./lib/web3'); web3.providers.HttpProvider = require('./lib/web3/httpprovider'); web3.providers.QtSyncProvider = require('./lib/web3/qtsync'); web3.eth.contract = require('./lib/web3/contract'); +web3.eth.namereg = require('./lib/web3/namereg'); +web3.eth.sendIBANTransaction = require('./lib/web3/transfer'); // dont override global variable if (typeof window !== 'undefined' && typeof window.web3 === 'undefined') { @@ -3872,8 +8283,8 @@ if (typeof window !== 'undefined' && typeof window.web3 === 'undefined') { module.exports = web3; -},{"./lib/web3":8,"./lib/web3/contract":10,"./lib/web3/httpprovider":18,"./lib/web3/qtsync":23}]},{},["web3"]) +},{"./lib/web3":9,"./lib/web3/contract":11,"./lib/web3/httpprovider":19,"./lib/web3/namereg":23,"./lib/web3/qtsync":26,"./lib/web3/transfer":29}]},{},["web3"]) -//# sourceMappingURL=web3-light.js.map -` +//# sourceMappingURL=web3.js.map +` \ No newline at end of file diff --git a/rpc/api/api.go b/rpc/api/api.go index 93dc3058c..153c73f48 100644 --- a/rpc/api/api.go +++ b/rpc/api/api.go @@ -4,14 +4,26 @@ import "github.com/ethereum/go-ethereum/rpc/shared" const ( // List with all API's which are offered over the IPC interface by default - DefaultIpcApis = "eth" + DefaultIpcApis = "eth,web3" + + EthApiName = "eth" + MergedApiName = "merged" + Web3ApiName = "web3" ) // Ethereum RPC API interface type EthereumApi interface { + // API identifier + Name() string + // Execute the given request and returns the response or an error Execute(*shared.Request) (interface{}, error) // List of supported RCP methods this API provides Methods() []string } + +// Merge multiple API's to a single API instance +func Merge(apis ...EthereumApi) EthereumApi { + return newMergedApi(apis...) +} diff --git a/rpc/api/eth.go b/rpc/api/eth.go index fa14aa41e..0a8cecdbc 100644 --- a/rpc/api/eth.go +++ b/rpc/api/eth.go @@ -93,6 +93,10 @@ func (self *EthApi) Execute(req *shared.Request) (interface{}, error) { return nil, shared.NewNotImplementedError(req.Method) } +func (self *EthApi) Name() string { + return EthApiName +} + func (self *EthApi) Accounts(req *shared.Request) (interface{}, error) { return self.xeth.Accounts(), nil } diff --git a/rpc/api/mergedapi.go b/rpc/api/mergedapi.go new file mode 100644 index 000000000..7784661d7 --- /dev/null +++ b/rpc/api/mergedapi.go @@ -0,0 +1,56 @@ +package api + +import "github.com/ethereum/go-ethereum/rpc/shared" + +// combines multiple API's +type mergedApi struct { + apis []string + methods map[string]EthereumApi +} + +// create new merged api instance +func newMergedApi(apis ...EthereumApi) *mergedApi { + mergedApi := new(mergedApi) + mergedApi.apis = make([]string, len(apis)) + mergedApi.methods = make(map[string]EthereumApi) + + for i, api := range apis { + mergedApi.apis[i] = api.Name() + for _, method := range api.Methods() { + mergedApi.methods[method] = api + } + } + return mergedApi +} + +// Supported RPC methods +func (self *mergedApi) Methods() []string { + all := make([]string, len(self.methods)) + for method, _ := range self.methods { + all = append(all, method) + } + return all +} + +// Call the correct API's Execute method for the given request +func (self *mergedApi) Execute(req *shared.Request) (interface{}, error) { + if res, _ := self.handle(req); res != nil { + return res, nil + } + if api, found := self.methods[req.Method]; found { + return api.Execute(req) + } + return nil, shared.NewNotImplementedError(req.Method) +} + +func (self *mergedApi) Name() string { + return MergedApiName +} + +func (self *mergedApi) handle(req *shared.Request) (interface{}, error) { + if req.Method == "support_apis" { // provided API's + return self.apis, nil + } + + return nil, nil +} diff --git a/rpc/api/mergedapi_js.go b/rpc/api/mergedapi_js.go new file mode 100644 index 000000000..778f64ec1 --- /dev/null +++ b/rpc/api/mergedapi_js.go @@ -0,0 +1 @@ +package api diff --git a/rpc/api/utils.go b/rpc/api/utils.go index a62058140..7024365e4 100644 --- a/rpc/api/utils.go +++ b/rpc/api/utils.go @@ -8,11 +8,6 @@ import ( "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/rpc/codec" "github.com/ethereum/go-ethereum/xeth" - "github.com/ethereum/go-ethereum/rpc/shared" -) - -const ( - EthApiName = "eth" ) // Parse a comma separated API string to individual api's @@ -28,6 +23,8 @@ func ParseApiString(apistr string, codec codec.Codec, xeth *xeth.XEth, eth *eth. switch strings.ToLower(strings.TrimSpace(name)) { case EthApiName: apis[i] = NewEthApi(xeth, codec) + case Web3ApiName: + apis[i] = NewWeb3(xeth, codec) default: return nil, fmt.Errorf("Unknown API '%s'", name) } @@ -35,43 +32,3 @@ func ParseApiString(apistr string, codec codec.Codec, xeth *xeth.XEth, eth *eth. return apis, nil } - -// combines multiple API's -type mergedApi struct { - apis map[string]EthereumApi -} - -// create new merged api instance -func newMergedApi(apis ...EthereumApi) *mergedApi { - mergedApi := new(mergedApi) - mergedApi.apis = make(map[string]EthereumApi) - - for _, api := range apis { - for _, method := range api.Methods() { - mergedApi.apis[method] = api - } - } - return mergedApi -} - -// Supported RPC methods -func (self *mergedApi) Methods() []string { - all := make([]string, len(self.apis)) - for method, _ := range self.apis { - all = append(all, method) - } - return all -} - -// Call the correct API's Execute method for the given request -func (self *mergedApi) Execute(req *shared.Request) (interface{}, error) { - if api, found := self.apis[req.Method]; found { - return api.Execute(req) - } - return nil, shared.NewNotImplementedError(req.Method) -} - -// Merge multiple API's to a single API instance -func Merge(apis ...EthereumApi) EthereumApi { - return newMergedApi(apis...) -} diff --git a/rpc/api/web3.go b/rpc/api/web3.go new file mode 100644 index 000000000..4c51c4a97 --- /dev/null +++ b/rpc/api/web3.go @@ -0,0 +1,84 @@ +package api + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/rpc/shared" + "github.com/ethereum/go-ethereum/xeth" +) + +const ( + Web3Version = "1.0.0" +) + +var ( +// mapping between methods and handlers + Web3Mapping = map[string]web3handler{ + "web3_sha3": (*web3).Sha3, + "web3_clientVersion": (*web3).ClientVersion, + } +) + +// web3 callback handler +type web3handler func(*web3, *shared.Request) (interface{}, error) + +// web3 api provider +type web3 struct { + xeth *xeth.XEth + methods map[string]web3handler + codec codec.ApiCoder +} + +// create a new web3 api instance +func NewWeb3(xeth *xeth.XEth, coder codec.Codec) *web3 { + return &web3{ + xeth: xeth, + methods: Web3Mapping, + codec: coder.New(nil), + } +} + +// collection with supported methods +func (self *web3) Methods() []string { + methods := make([]string, len(self.methods)) + i := 0 + for k := range self.methods { + methods[i] = k + i++ + } + return methods +} + +// Execute given request +func (self *web3) Execute(req *shared.Request) (interface{}, error) { + if callback, ok := self.methods[req.Method]; ok { + return callback(self, req) + } + + return nil, &shared.NotImplementedError{req.Method} +} + +func (self *web3) Name() string { + return Web3ApiName +} + +// Version of the API this instance provides +func (self *web3) Version() string { + return Web3Version +} + +// Calculates the sha3 over req.Params.Data +func (self *web3) Sha3(req *shared.Request) (interface{}, error) { + args := new(Sha3Args) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, err + } + + return common.ToHex(crypto.Sha3(common.FromHex(args.Data))), nil +} + +// returns the xeth client vrsion +func (self *web3) ClientVersion(req *shared.Request) (interface{}, error) { + return self.xeth.ClientVersion(), nil +} diff --git a/rpc/api/web3_args.go b/rpc/api/web3_args.go new file mode 100644 index 000000000..5455a6c8e --- /dev/null +++ b/rpc/api/web3_args.go @@ -0,0 +1,5 @@ +package api + +type Sha3Args struct { + Data string +} diff --git a/rpc/jeth.go b/rpc/jeth.go index 61be60dc7..d4f6dd460 100644 --- a/rpc/jeth.go +++ b/rpc/jeth.go @@ -6,15 +6,20 @@ import ( "github.com/ethereum/go-ethereum/jsre" "github.com/robertkrimen/otto" + "github.com/ethereum/go-ethereum/rpc/comms" + "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/rpc/shared" + "reflect" ) type Jeth struct { - ethApi *EthereumApi - re *jsre.JSRE + ethApi *EthereumApi + re *jsre.JSRE + ipcpath string } -func NewJeth(ethApi *EthereumApi, re *jsre.JSRE) *Jeth { - return &Jeth{ethApi, re} +func NewJeth(ethApi *EthereumApi, re *jsre.JSRE, ipcpath string) *Jeth { + return &Jeth{ethApi, re, ipcpath} } func (self *Jeth) err(call otto.FunctionCall, code int, msg string, id interface{}) (response otto.Value) { @@ -34,6 +39,13 @@ func (self *Jeth) Send(call otto.FunctionCall) (response otto.Value) { return self.err(call, -32700, err.Error(), nil) } + client, err := comms.NewIpcClient(comms.IpcConfig{self.ipcpath}, codec.JSON) + if err != nil { + fmt.Println("Unable to connect to geth.") + return self.err(call, -32603, err.Error(), -1) + } + defer client.Close() + jsonreq, err := json.Marshal(reqif) var reqs []RpcRequest batch := true @@ -48,22 +60,43 @@ func (self *Jeth) Send(call otto.FunctionCall) (response otto.Value) { call.Otto.Run("var ret_response = new Array(response_len);") for i, req := range reqs { - var respif interface{} - err = self.ethApi.GetRequestReply(&req, &respif) + err := client.Send(&req) if err != nil { - fmt.Println("Error response:", err) + fmt.Println("Error send request:", err) return self.err(call, -32603, err.Error(), req.Id) } - call.Otto.Set("ret_jsonrpc", jsonrpcver) - call.Otto.Set("ret_id", req.Id) - res, _ := json.Marshal(respif) + respif, err := client.Recv() + if err != nil { + fmt.Println("Error recv response:", err) + return self.err(call, -32603, err.Error(), req.Id) + } - call.Otto.Set("ret_result", string(res)) - call.Otto.Set("response_idx", i) - response, err = call.Otto.Run(` - ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, result: JSON.parse(ret_result) }; - `) + if res, ok := respif.(shared.SuccessResponse); ok { + call.Otto.Set("ret_id", res.Id) + call.Otto.Set("ret_jsonrpc", res.Jsonrpc) + resObj, _ := json.Marshal(res.Result) + call.Otto.Set("ret_result", string(resObj)) + call.Otto.Set("response_idx", i) + + response, err = call.Otto.Run(` + ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, result: JSON.parse(ret_result) }; + `) + } else if res, ok := respif.(shared.ErrorResponse); ok { + fmt.Printf("Error: %s (%d)\n", res.Error.Message, res.Error.Code) + + call.Otto.Set("ret_id", res.Id) + call.Otto.Set("ret_jsonrpc", res.Jsonrpc) + call.Otto.Set("ret_error", res.Error) + call.Otto.Set("response_idx", i) + + response, _ = call.Otto.Run(` + ret_response = { jsonrpc: ret_jsonrpc, id: ret_id, error: ret_error }; + `) + return + } else { + fmt.Printf("unexpected response\n", reflect.TypeOf(respif)) + } } if !batch { -- cgit v1.2.3 From 862117e4bdcc5d255fc85fc35e223eec10f0ac7b Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 13:21:24 +0200 Subject: changed send methods for backwards compatability in geth console --- cmd/geth/js.go | 1 + cmd/geth/main.go | 4 ++-- rpc/jeth.go | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 2 deletions(-) diff --git a/cmd/geth/js.go b/cmd/geth/js.go index d1a6cc29d..7e6e10ca9 100644 --- a/cmd/geth/js.go +++ b/cmd/geth/js.go @@ -111,6 +111,7 @@ func (js *jsre) apiBindings(ipcpath string, f xeth.Frontend) { js.re.Set("jeth", struct{}{}) t, _ := js.re.Get("jeth") jethObj := t.Object() + jethObj.Set("send", jeth.Send) jethObj.Set("sendAsync", jeth.Send) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 5d7e102c4..8e55b310c 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -307,8 +307,8 @@ func console(ctx *cli.Context) { repl := newJSRE( ethereum, ctx.String(utils.JSpathFlag.Name), - ctx.GlobalString(utils.IPCPathFlag.Name), ctx.GlobalString(utils.RPCCORSDomainFlag.Name), + ctx.GlobalString(utils.IPCPathFlag.Name), true, nil, ) @@ -329,8 +329,8 @@ func execJSFiles(ctx *cli.Context) { repl := newJSRE( ethereum, ctx.String(utils.JSpathFlag.Name), - ctx.GlobalString(utils.IPCPathFlag.Name), ctx.GlobalString(utils.RPCCORSDomainFlag.Name), + ctx.GlobalString(utils.IPCPathFlag.Name), false, nil, ) diff --git a/rpc/jeth.go b/rpc/jeth.go index d4f6dd460..0473adc4d 100644 --- a/rpc/jeth.go +++ b/rpc/jeth.go @@ -39,6 +39,60 @@ func (self *Jeth) Send(call otto.FunctionCall) (response otto.Value) { return self.err(call, -32700, err.Error(), nil) } + jsonreq, err := json.Marshal(reqif) + var reqs []RpcRequest + batch := true + err = json.Unmarshal(jsonreq, &reqs) + if err != nil { + reqs = make([]RpcRequest, 1) + err = json.Unmarshal(jsonreq, &reqs[0]) + batch = false + } + + call.Otto.Set("response_len", len(reqs)) + call.Otto.Run("var ret_response = new Array(response_len);") + + for i, req := range reqs { + var respif interface{} + err = self.ethApi.GetRequestReply(&req, &respif) + if err != nil { + fmt.Println("Error response:", err) + return self.err(call, -32603, err.Error(), req.Id) + } + call.Otto.Set("ret_jsonrpc", jsonrpcver) + call.Otto.Set("ret_id", req.Id) + + res, _ := json.Marshal(respif) + + call.Otto.Set("ret_result", string(res)) + call.Otto.Set("response_idx", i) + response, err = call.Otto.Run(` + ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, result: JSON.parse(ret_result) }; + `) + } + + if !batch { + call.Otto.Run("ret_response = ret_response[0];") + } + + if call.Argument(1).IsObject() { + call.Otto.Set("callback", call.Argument(1)) + call.Otto.Run(` + if (Object.prototype.toString.call(callback) == '[object Function]') { + callback(null, ret_response); + } + `) + } + + return +} + +func (self *Jeth) SendIpc(call otto.FunctionCall) (response otto.Value) { + reqif, err := call.Argument(0).Export() + if err != nil { + return self.err(call, -32700, err.Error(), nil) + } + client, err := comms.NewIpcClient(comms.IpcConfig{self.ipcpath}, codec.JSON) if err != nil { fmt.Println("Unable to connect to geth.") -- cgit v1.2.3 From 0a600a03eeb5a7928233e541f26c0c81c70929fe Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 13:46:53 +0200 Subject: fixed unittest build problem --- cmd/geth/js_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/geth/js_test.go b/cmd/geth/js_test.go index e7285a38d..20bde01f3 100644 --- a/cmd/geth/js_test.go +++ b/cmd/geth/js_test.go @@ -105,7 +105,7 @@ func testJEthRE(t *testing.T) (string, *testjethre, *eth.Ethereum) { t.Errorf("Error creating DocServer: %v", err) } tf := &testjethre{ds: ds, stateDb: ethereum.ChainManager().State().Copy()} - repl := newJSRE(ethereum, assetPath, "", false, tf) + repl := newJSRE(ethereum, assetPath, "", "", false, tf) tf.jsre = repl return tmp, tf, ethereum } -- cgit v1.2.3 From 8aea85e374f3e95a899a923ce84423ddc11eb286 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 13:50:36 +0200 Subject: fixed windows build problem --- rpc/comms/ipc_windows.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc/comms/ipc_windows.go b/rpc/comms/ipc_windows.go index ff9015d03..d989f0b2b 100644 --- a/rpc/comms/ipc_windows.go +++ b/rpc/comms/ipc_windows.go @@ -647,7 +647,7 @@ func newIpcClient(cfg IpcConfig, codec codec.Codec) (*ipcClient, error) { return &ipcClient{codec.New(c)}, nil } -func startIpc(cfg IpcConfig, codec codec.Codec, api api.Ethereum) error { +func startIpc(cfg IpcConfig, codec codec.Codec, api api.EthereumApi) error { os.Remove(cfg.Endpoint) // in case it still exists from a previous run l, err := Listen(cfg.Endpoint) -- cgit v1.2.3 From cb7f2d43b6da260dff80e4b705271c7088112c34 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 19:14:42 +0200 Subject: added console binary --- cmd/console/admin.go | 9 ++ cmd/console/console | Bin 0 -> 19332472 bytes cmd/console/contracts.go | 6 + cmd/console/history | 7 ++ cmd/console/js.go | 279 +++++++++++++++++++++++++++++++++++++++++++++++ cmd/console/main.go | 101 +++++++++++++++++ 6 files changed, 402 insertions(+) create mode 100644 cmd/console/admin.go create mode 100755 cmd/console/console create mode 100644 cmd/console/contracts.go create mode 100755 cmd/console/history create mode 100644 cmd/console/js.go create mode 100644 cmd/console/main.go diff --git a/cmd/console/admin.go b/cmd/console/admin.go new file mode 100644 index 000000000..dee88e3a0 --- /dev/null +++ b/cmd/console/admin.go @@ -0,0 +1,9 @@ +package main + +/* +node admin bindings +*/ + +func (js *jsre) adminBindings() { + +} diff --git a/cmd/console/console b/cmd/console/console new file mode 100755 index 000000000..113fb5dac Binary files /dev/null and b/cmd/console/console differ diff --git a/cmd/console/contracts.go b/cmd/console/contracts.go new file mode 100644 index 000000000..1f27838d1 --- /dev/null +++ b/cmd/console/contracts.go @@ -0,0 +1,6 @@ +package main + +var ( + globalRegistrar = `var GlobalRegistrar = web3.eth.contract([{"constant":true,"inputs":[{"name":"_owner","type":"address"}],"name":"name","outputs":[{"name":"o_name","type":"bytes32"}],"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"owner","outputs":[{"name":"","type":"address"}],"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"content","outputs":[{"name":"","type":"bytes32"}],"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"addr","outputs":[{"name":"","type":"address"}],"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserve","outputs":[],"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"subRegistrar","outputs":[{"name":"o_subRegistrar","type":"address"}],"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_newOwner","type":"address"}],"name":"transfer","outputs":[],"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_registrar","type":"address"}],"name":"setSubRegistrar","outputs":[],"type":"function"},{"constant":false,"inputs":[],"name":"Registrar","outputs":[],"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_a","type":"address"},{"name":"_primary","type":"bool"}],"name":"setAddress","outputs":[],"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_content","type":"bytes32"}],"name":"setContent","outputs":[],"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"disown","outputs":[],"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"register","outputs":[{"name":"","type":"address"}],"type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"bytes32"}],"name":"Changed","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"bytes32"},{"indexed":true,"name":"addr","type":"address"}],"name":"PrimaryChanged","type":"event"}]);` + globalRegistrarAddr = "0xc6d9d2cd449a754c494264e1809c50e34d64562b" +) diff --git a/cmd/console/history b/cmd/console/history new file mode 100755 index 000000000..728b6994a --- /dev/null +++ b/cmd/console/history @@ -0,0 +1,7 @@ +eth.accounts +help +eth +eth.getBlock(21) +net +admin +eth diff --git a/cmd/console/js.go b/cmd/console/js.go new file mode 100644 index 000000000..8b9137add --- /dev/null +++ b/cmd/console/js.go @@ -0,0 +1,279 @@ +// Copyright (c) 2013-2014, Jeffrey Wilcke. All rights reserved. +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +// MA 02110-1301 USA + +package main + +import ( + "bufio" + "fmt" + "math/big" + "os" + "os/signal" + "path/filepath" + "strings" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common/docserver" + re "github.com/ethereum/go-ethereum/jsre" + "github.com/ethereum/go-ethereum/rpc" + "github.com/peterh/liner" + "github.com/robertkrimen/otto" +) + +type prompter interface { + AppendHistory(string) + Prompt(p string) (string, error) + PasswordPrompt(p string) (string, error) +} + +type dumbterm struct{ r *bufio.Reader } + +func (r dumbterm) Prompt(p string) (string, error) { + fmt.Print(p) + line, err := r.r.ReadString('\n') + return strings.TrimSuffix(line, "\n"), err +} + +func (r dumbterm) PasswordPrompt(p string) (string, error) { + fmt.Println("!! Unsupported terminal, password will echo.") + fmt.Print(p) + input, err := bufio.NewReader(os.Stdin).ReadString('\n') + fmt.Println() + return input, err +} + +func (r dumbterm) AppendHistory(string) {} + +type jsre struct { + re *re.JSRE + wait chan *big.Int + ps1 string + atexit func() + datadir string + prompter +} + +func newJSRE(libPath, ipcpath string) *jsre { + js := &jsre{ps1: "> "} + js.wait = make(chan *big.Int) + + // update state in separare forever blocks + js.re = re.New(libPath) + js.apiBindings(ipcpath) + + if !liner.TerminalSupported() { + js.prompter = dumbterm{bufio.NewReader(os.Stdin)} + } else { + lr := liner.NewLiner() + js.withHistory(func(hist *os.File) { lr.ReadHistory(hist) }) + lr.SetCtrlCAborts(true) + js.prompter = lr + js.atexit = func() { + js.withHistory(func(hist *os.File) { hist.Truncate(0); lr.WriteHistory(hist) }) + lr.Close() + close(js.wait) + } + } + return js +} + +func (js *jsre) apiBindings(ipcpath string) { + ethApi := rpc.NewEthereumApi(nil) + jeth := rpc.NewJeth(ethApi, js.re, ipcpath) + + js.re.Set("jeth", struct{}{}) + t, _ := js.re.Get("jeth") + jethObj := t.Object() + jethObj.Set("send", jeth.SendIpc) + jethObj.Set("sendAsync", jeth.SendIpc) + + err := js.re.Compile("bignumber.js", re.BigNumber_JS) + if err != nil { + utils.Fatalf("Error loading bignumber.js: %v", err) + } + + err = js.re.Compile("ethereum.js", re.Web3_JS) + if err != nil { + utils.Fatalf("Error loading web3.js: %v", err) + } + + _, err = js.re.Eval("var web3 = require('web3');") + if err != nil { + utils.Fatalf("Error requiring web3: %v", err) + } + + _, err = js.re.Eval("web3.setProvider(jeth)") + if err != nil { + utils.Fatalf("Error setting web3 provider: %v", err) + } + _, err = js.re.Eval(` +var eth = web3.eth; + `) + + if err != nil { + utils.Fatalf("Error setting namespaces: %v", err) + } + + js.re.Eval(globalRegistrar + "registrar = GlobalRegistrar.at(\"" + globalRegistrarAddr + "\");") +} + +var ds, _ = docserver.New("/") + +/* +func (self *jsre) ConfirmTransaction(tx string) bool { + if self.ethereum.NatSpec { + notice := natspec.GetNotice(self.xeth, tx, ds) + fmt.Println(notice) + answer, _ := self.Prompt("Confirm Transaction [y/n]") + return strings.HasPrefix(strings.Trim(answer, " "), "y") + } else { + return true + } +} + +func (self *jsre) UnlockAccount(addr []byte) bool { + fmt.Printf("Please unlock account %x.\n", addr) + pass, err := self.PasswordPrompt("Passphrase: ") + if err != nil { + return false + } + // TODO: allow retry + if err := self.ethereum.AccountManager().Unlock(common.BytesToAddress(addr), pass); err != nil { + return false + } else { + fmt.Println("Account is now unlocked for this session.") + return true + } +} +*/ + +func (self *jsre) exec(filename string) error { + if err := self.re.Exec(filename); err != nil { + self.re.Stop(false) + return fmt.Errorf("Javascript Error: %v", err) + } + self.re.Stop(true) + return nil +} + +// show summary of current geth instance +func (self *jsre) welcome() { + self.re.Eval(` + console.log('Connected to ' + web3.version.client); + `) +} + +func (self *jsre) interactive() { + // Read input lines. + prompt := make(chan string) + inputln := make(chan string) + go func() { + defer close(inputln) + for { + line, err := self.Prompt(<-prompt) + if err != nil { + return + } + inputln <- line + } + }() + // Wait for Ctrl-C, too. + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt) + + defer func() { + if self.atexit != nil { + self.atexit() + } + self.re.Stop(false) + }() + for { + prompt <- self.ps1 + select { + case <-sig: + fmt.Println("caught interrupt, exiting") + return + case input, ok := <-inputln: + if !ok || indentCount <= 0 && input == "exit" { + return + } + if input == "" { + continue + } + str += input + "\n" + self.setIndent() + if indentCount <= 0 { + hist := str[:len(str)-1] + self.AppendHistory(hist) + self.parseInput(str) + str = "" + } + } + } +} + +func (self *jsre) withHistory(op func(*os.File)) { + hist, err := os.OpenFile(filepath.Join(self.datadir, "history"), os.O_RDWR|os.O_CREATE, os.ModePerm) + if err != nil { + fmt.Printf("unable to open history file: %v\n", err) + return + } + op(hist) + hist.Close() +} + +func (self *jsre) parseInput(code string) { + defer func() { + if r := recover(); r != nil { + fmt.Println("[native] error", r) + } + }() + value, err := self.re.Run(code) + if err != nil { + if ottoErr, ok := err.(*otto.Error); ok { + fmt.Println(ottoErr.String()) + } else { + fmt.Println(err) + } + return + } + self.printValue(value) +} + +var indentCount = 0 +var str = "" + +func (self *jsre) setIndent() { + open := strings.Count(str, "{") + open += strings.Count(str, "(") + closed := strings.Count(str, "}") + closed += strings.Count(str, ")") + indentCount = open - closed + if indentCount <= 0 { + self.ps1 = "> " + } else { + self.ps1 = strings.Join(make([]string, indentCount*2), "..") + self.ps1 += " " + } +} + +func (self *jsre) printValue(v interface{}) { + val, err := self.re.PrettyPrint(v) + if err == nil { + fmt.Printf("%v", val) + } +} diff --git a/cmd/console/main.go b/cmd/console/main.go new file mode 100644 index 000000000..781f1f8cb --- /dev/null +++ b/cmd/console/main.go @@ -0,0 +1,101 @@ +/* + This file is part of go-ethereum + + go-ethereum is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + go-ethereum is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with go-ethereum. If not, see . +*/ +/** + * @authors + * Jeffrey Wilcke + */ +package main + +import ( + "fmt" + "io" + "os" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" + "github.com/codegangsta/cli" + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/logger" +) + +const ( + ClientIdentifier = "Geth console" + Version = "0.9.27" +) + +var ( + gitCommit string // set via linker flag + nodeNameVersion string + app = utils.NewApp(Version, "the ether console") +) + +func init() { + if gitCommit == "" { + nodeNameVersion = Version + } else { + nodeNameVersion = Version + "-" + gitCommit[:8] + } + + app.Action = run + app.Flags = []cli.Flag{ + utils.IPCDisabledFlag, + utils.IPCPathFlag, + utils.VerbosityFlag, + utils.JSpathFlag, + } + + app.Before = func(ctx *cli.Context) error { + utils.SetupLogger(ctx) + return nil + } +} + +func main() { + // Wrap the standard output with a colorified stream (windows) + if isatty.IsTerminal(os.Stdout.Fd()) { + if pr, pw, err := os.Pipe(); err == nil { + go io.Copy(colorable.NewColorableStdout(), pr) + os.Stdout = pw + } + } + + var interrupted = false + utils.RegisterInterrupt(func(os.Signal) { + interrupted = true + }) + utils.HandleInterrupt() + + if err := app.Run(os.Args); err != nil { + fmt.Fprintln(os.Stderr, "Error: ", err) + } + + // we need to run the interrupt callbacks in case gui is closed + // this skips if we got here by actual interrupt stopping the GUI + if !interrupted { + utils.RunInterruptCallbacks(os.Interrupt) + } + logger.Flush() +} + +func run(ctx *cli.Context) { + jspath := ctx.GlobalString(utils.JSpathFlag.Name) + ipcpath := ctx.GlobalString(utils.IPCPathFlag.Name) + + repl := newJSRE(jspath, ipcpath) + repl.welcome() + repl.interactive() +} -- cgit v1.2.3 From 4b9b633dfe8c36d3a8909024ff23a1cdedce44d8 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 14:42:15 +0200 Subject: added miner API --- rpc/api/api.go | 3 +- rpc/api/miner.go | 143 ++++++++++++++++++++++++++++++++++++++++++++++++++ rpc/api/miner_args.go | 93 ++++++++++++++++++++++++++++++++ rpc/api/miner_js.go | 74 ++++++++++++++++++++++++++ rpc/api/utils.go | 11 ++++ 5 files changed, 323 insertions(+), 1 deletion(-) create mode 100644 rpc/api/miner.go create mode 100644 rpc/api/miner_args.go create mode 100644 rpc/api/miner_js.go diff --git a/rpc/api/api.go b/rpc/api/api.go index 153c73f48..7b3774b4e 100644 --- a/rpc/api/api.go +++ b/rpc/api/api.go @@ -4,10 +4,11 @@ import "github.com/ethereum/go-ethereum/rpc/shared" const ( // List with all API's which are offered over the IPC interface by default - DefaultIpcApis = "eth,web3" + DefaultIpcApis = "eth,web3,miner" EthApiName = "eth" MergedApiName = "merged" + MinerApiName = "miner" Web3ApiName = "web3" ) diff --git a/rpc/api/miner.go b/rpc/api/miner.go new file mode 100644 index 000000000..0e2ccf503 --- /dev/null +++ b/rpc/api/miner.go @@ -0,0 +1,143 @@ +package api + +import ( + "github.com/ethereum/ethash" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/rpc/shared" +) + +const ( + MinerVersion = "1.0.0" +) + +var ( +// mapping between methods and handlers + MinerMapping = map[string]minerhandler{ + "miner_hashrate": (*miner).Hashrate, + "miner_makeDAG": (*miner).MakeDAG, + "miner_setExtra": (*miner).SetExtra, + "miner_setGasPrice": (*miner).SetGasPrice, + "miner_startAutoDAG": (*miner).StartAutoDAG, + "miner_start": (*miner).StartMiner, + "miner_stopAutoDAG": (*miner).StopAutoDAG, + "miner_stop": (*miner).StopMiner, + } +) + +// miner callback handler +type minerhandler func(*miner, *shared.Request) (interface{}, error) + +// miner api provider +type miner struct { + ethereum *eth.Ethereum + methods map[string]minerhandler + codec codec.ApiCoder +} + +// create a new miner api instance +func NewMinerApi(ethereum *eth.Ethereum, coder codec.Codec) *miner { + return &miner{ + ethereum: ethereum, + methods: MinerMapping, + codec: coder.New(nil), + } +} + +// Execute given request +func (self *miner) Execute(req *shared.Request) (interface{}, error) { + if callback, ok := self.methods[req.Method]; ok { + return callback(self, req) + } + + return nil, &shared.NotImplementedError{req.Method} +} + +// collection with supported methods +func (self *miner) Methods() []string { + methods := make([]string, len(self.methods)) + i := 0 + for k := range self.methods { + methods[i] = k + i++ + } + return methods +} + +func (self *miner) Name() string { + return MinerApiName +} + +func (self *miner) StartMiner(req *shared.Request) (interface{}, error) { + args := new(StartMinerArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, err + } + if args.Threads == -1 { // (not specified by user, use default) + args.Threads = self.ethereum.MinerThreads + } + + self.ethereum.StartAutoDAG() + err := self.ethereum.StartMining(args.Threads) + if err == nil { + return true, nil + } + + return false, err +} + +func (self *miner) StopMiner(req *shared.Request) (interface{}, error) { + self.ethereum.StopMining() + return true, nil +} + +func (self *miner) Hashrate(req *shared.Request) (interface{}, error) { + return self.ethereum.Miner().HashRate(), nil +} + +func (self *miner) SetExtra(req *shared.Request) (interface{}, error) { + args := new(SetExtraArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, err + } + self.ethereum.Miner().SetExtra([]byte(args.Data)) + return true, nil +} + +func (self *miner) SetGasPrice(req *shared.Request) (interface{}, error) { + args := new(GasPriceArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return false, err + } + + self.ethereum.Miner().SetGasPrice(common.String2Big(args.Price)) + return true, nil +} + +func (self *miner) StartAutoDAG(req *shared.Request) (interface{}, error) { + self.ethereum.StartAutoDAG() + return true, nil +} + +func (self *miner) StopAutoDAG(req *shared.Request) (interface{}, error) { + self.ethereum.StopAutoDAG() + return true, nil +} + +func (self *miner) MakeDAG(req *shared.Request) (interface{}, error) { + args := new(MakeDAGArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, err + } + + if args.BlockNumber < 0 { + return false, shared.NewValidationError("BlockNumber", "BlockNumber must be positive") + } + + err := ethash.MakeDAG(uint64(args.BlockNumber), "") + if err == nil { + return true, nil + } + return false, err +} \ No newline at end of file diff --git a/rpc/api/miner_args.go b/rpc/api/miner_args.go new file mode 100644 index 000000000..8b9114940 --- /dev/null +++ b/rpc/api/miner_args.go @@ -0,0 +1,93 @@ +package api + +import ( + "encoding/json" + + "math/big" + + "github.com/ethereum/go-ethereum/rpc/shared" +) + +type StartMinerArgs struct { + Threads int +} + +func (args *StartMinerArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) == 0 || obj[0] == nil { + args.Threads = -1 + return nil + } + + var num *big.Int + if num, err = numString(obj[0]); err != nil { + return err + } + args.Threads = int(num.Int64()) + return nil +} + +type SetExtraArgs struct { + Data string +} + +func (args *SetExtraArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + extrastr, ok := obj[0].(string) + if !ok { + return shared.NewInvalidTypeError("Price", "not a string") + } + args.Data = extrastr + + return nil +} + +type GasPriceArgs struct { + Price string +} + +func (args *GasPriceArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + pricestr, ok := obj[0].(string) + if !ok { + return shared.NewInvalidTypeError("Price", "not a string") + } + args.Price = pricestr + + return nil +} + +type MakeDAGArgs struct { + BlockNumber int64 +} + +func (args *MakeDAGArgs) UnmarshalJSON(b []byte) (err error) { + args.BlockNumber = -1 + var obj []interface{} + + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) + } + + if err := blockHeight(obj[0], &args.BlockNumber); err != nil { + return err + } + + return nil +} \ No newline at end of file diff --git a/rpc/api/miner_js.go b/rpc/api/miner_js.go new file mode 100644 index 000000000..40fa3bc3d --- /dev/null +++ b/rpc/api/miner_js.go @@ -0,0 +1,74 @@ +package api + +const Miner_JS = ` +web3.extend({ + property: 'miner', + methods: + [ + new web3.extend.Method({ + name: 'start', + call: 'miner_start', + params: 1, + inputFormatter: [web3.extend.formatters.formatInputInt], + outputFormatter: web3.extend.formatters.formatOutputBool + }), + new web3.extend.Method({ + name: 'stop', + call: 'miner_stop', + params: 1, + inputFormatter: [web3.extend.formatters.formatInputInt], + outputFormatter: web3.extend.formatters.formatOutputBool + }), + new web3.extend.Method({ + name: 'getHashrate', + call: 'miner_hashrate', + params: 0, + inputFormatter: [], + outputFormatter: web3.extend.utils.toDecimal + }), + new web3.extend.Method({ + name: 'setExtra', + call: 'miner_setExtra', + params: 1, + inputFormatter: [web3.extend.utils.formatInputString], + outputFormatter: web3.extend.formatters.formatOutputBool + }), + new web3.extend.Method({ + name: 'setGasPrice', + call: 'miner_setGasPrice', + params: 1, + inputFormatter: [web3.extend.utils.formatInputString], + outputFormatter: web3.extend.formatters.formatOutputBool + }), + new web3.extend.Method({ + name: 'startAutoDAG', + call: 'miner_startAutoDAG', + params: 0, + inputFormatter: [], + outputFormatter: web3.extend.formatters.formatOutputBool + }), + new web3.extend.Method({ + name: 'stopAutoDAG', + call: 'miner_stopAutoDAG', + params: 0, + inputFormatter: [], + outputFormatter: web3.extend.formatters.formatOutputBool + }), + new web3.extend.Method({ + name: 'makeDAG', + call: 'miner_makeDAG', + params: 1, + inputFormatter: [web3.extend.formatters.inputDefaultBlockNumberFormatter], + outputFormatter: web3.extend.formatters.formatOutputBool + }) + ], + properties: + [ + new web3.extend.Property({ + name: 'hashrate', + getter: 'miner_hashrate', + outputFormatter: web3.extend.utils.toDecimal + }) + ] +}); +` \ No newline at end of file diff --git a/rpc/api/utils.go b/rpc/api/utils.go index 7024365e4..488eb1ec6 100644 --- a/rpc/api/utils.go +++ b/rpc/api/utils.go @@ -23,6 +23,8 @@ func ParseApiString(apistr string, codec codec.Codec, xeth *xeth.XEth, eth *eth. switch strings.ToLower(strings.TrimSpace(name)) { case EthApiName: apis[i] = NewEthApi(xeth, codec) + case MinerApiName: + apis[i] = NewMinerApi(eth, codec) case Web3ApiName: apis[i] = NewWeb3(xeth, codec) default: @@ -32,3 +34,12 @@ func ParseApiString(apistr string, codec codec.Codec, xeth *xeth.XEth, eth *eth. return apis, nil } + +func Javascript(name string) string { + switch strings.ToLower(strings.TrimSpace(name)) { + case MinerApiName: + return Miner_JS + } + + return "" +} -- cgit v1.2.3 From d2a87f6f72b1582fd6e220e2a00d7c3f5a6df335 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 14:50:11 +0200 Subject: added net API --- rpc/api/api.go | 9 +++--- rpc/api/mergedapi.go | 4 +-- rpc/api/miner.go | 4 +-- rpc/api/miner_args.go | 2 +- rpc/api/miner_js.go | 2 +- rpc/api/net.go | 81 +++++++++++++++++++++++++++++++++++++++++++++++++++ rpc/api/net_js.go | 44 ++++++++++++++++++++++++++++ rpc/api/utils.go | 4 +++ rpc/api/web3.go | 2 +- 9 files changed, 141 insertions(+), 11 deletions(-) create mode 100644 rpc/api/net.go create mode 100644 rpc/api/net_js.go diff --git a/rpc/api/api.go b/rpc/api/api.go index 7b3774b4e..e4f0e7446 100644 --- a/rpc/api/api.go +++ b/rpc/api/api.go @@ -4,12 +4,13 @@ import "github.com/ethereum/go-ethereum/rpc/shared" const ( // List with all API's which are offered over the IPC interface by default - DefaultIpcApis = "eth,web3,miner" + DefaultIpcApis = "eth,miner,net,web3" - EthApiName = "eth" + EthApiName = "eth" MergedApiName = "merged" - MinerApiName = "miner" - Web3ApiName = "web3" + MinerApiName = "miner" + NetApiName = "net" + Web3ApiName = "web3" ) // Ethereum RPC API interface diff --git a/rpc/api/mergedapi.go b/rpc/api/mergedapi.go index 7784661d7..88c301aae 100644 --- a/rpc/api/mergedapi.go +++ b/rpc/api/mergedapi.go @@ -4,7 +4,7 @@ import "github.com/ethereum/go-ethereum/rpc/shared" // combines multiple API's type mergedApi struct { - apis []string + apis []string methods map[string]EthereumApi } @@ -48,7 +48,7 @@ func (self *mergedApi) Name() string { } func (self *mergedApi) handle(req *shared.Request) (interface{}, error) { - if req.Method == "support_apis" { // provided API's + if req.Method == "support_apis" { // provided API's return self.apis, nil } diff --git a/rpc/api/miner.go b/rpc/api/miner.go index 0e2ccf503..b22c4b7ad 100644 --- a/rpc/api/miner.go +++ b/rpc/api/miner.go @@ -13,7 +13,7 @@ const ( ) var ( -// mapping between methods and handlers + // mapping between methods and handlers MinerMapping = map[string]minerhandler{ "miner_hashrate": (*miner).Hashrate, "miner_makeDAG": (*miner).MakeDAG, @@ -140,4 +140,4 @@ func (self *miner) MakeDAG(req *shared.Request) (interface{}, error) { return true, nil } return false, err -} \ No newline at end of file +} diff --git a/rpc/api/miner_args.go b/rpc/api/miner_args.go index 8b9114940..6b3d16d48 100644 --- a/rpc/api/miner_args.go +++ b/rpc/api/miner_args.go @@ -90,4 +90,4 @@ func (args *MakeDAGArgs) UnmarshalJSON(b []byte) (err error) { } return nil -} \ No newline at end of file +} diff --git a/rpc/api/miner_js.go b/rpc/api/miner_js.go index 40fa3bc3d..f1c64c5e8 100644 --- a/rpc/api/miner_js.go +++ b/rpc/api/miner_js.go @@ -71,4 +71,4 @@ web3.extend({ }) ] }); -` \ No newline at end of file +` diff --git a/rpc/api/net.go b/rpc/api/net.go new file mode 100644 index 000000000..6799d68f6 --- /dev/null +++ b/rpc/api/net.go @@ -0,0 +1,81 @@ +package api + +import ( + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/rpc/shared" + "github.com/ethereum/go-ethereum/xeth" +) + +var ( + // mapping between methods and handlers + netMapping = map[string]nethandler{ + "net_id": (*net).NetworkVersion, + "net_peerCount": (*net).PeerCount, + "net_listening": (*net).IsListening, + "net_peers": (*net).Peers, + } +) + +// net callback handler +type nethandler func(*net, *shared.Request) (interface{}, error) + +// net api provider +type net struct { + xeth *xeth.XEth + ethereum *eth.Ethereum + methods map[string]nethandler + codec codec.ApiCoder +} + +// create a new net api instance +func NewNetApi(xeth *xeth.XEth, eth *eth.Ethereum, coder codec.Codec) *net { + return &net{ + xeth: xeth, + ethereum: eth, + methods: netMapping, + codec: coder.New(nil), + } +} + +// collection with supported methods +func (self *net) Methods() []string { + methods := make([]string, len(self.methods)) + i := 0 + for k := range self.methods { + methods[i] = k + i++ + } + return methods +} + +// Execute given request +func (self *net) Execute(req *shared.Request) (interface{}, error) { + if callback, ok := self.methods[req.Method]; ok { + return callback(self, req) + } + + return nil, shared.NewNotImplementedError(req.Method) +} + +func (self *net) Name() string { + return NetApiName +} + +// Network version +func (self *net) NetworkVersion(req *shared.Request) (interface{}, error) { + return self.xeth.NetworkVersion(), nil +} + +// Number of connected peers +func (self *net) PeerCount(req *shared.Request) (interface{}, error) { + return self.xeth.PeerCount(), nil +} + +func (self *net) IsListening(req *shared.Request) (interface{}, error) { + return self.xeth.IsListening(), nil +} + +func (self *net) Peers(req *shared.Request) (interface{}, error) { + return self.ethereum.PeersInfo(), nil +} diff --git a/rpc/api/net_js.go b/rpc/api/net_js.go new file mode 100644 index 000000000..6ba0624d8 --- /dev/null +++ b/rpc/api/net_js.go @@ -0,0 +1,44 @@ +package api + +const Net_JS = ` +web3.extend({ + property: 'network', + methods: + [ + new web3.extend.Method({ + name: 'id', + call: 'net_id', + params: 0, + inputFormatter: [], + outputFormatter: web3.extend.formatters.formatOutputString + }), + new web3.extend.Method({ + name: 'getPeerCount', + call: 'net_peerCount', + params: 0, + inputFormatter: [], + outputFormatter: web3.extend.formatters.formatOutputString + }), + new web3.extend.Method({ + name: 'peers', + call: 'net_peers', + params: 0, + inputFormatter: [], + outputFormatter: function(obj) { return obj; } + }) + ], + properties: + [ + new web3.extend.Property({ + name: 'listening', + getter: 'net_listening', + outputFormatter: web3.extend.formatters.formatOutputBool + }), + new web3.extend.Property({ + name: 'peerCount', + getter: 'net_peerCount', + outputFormatter: web3.extend.utils.toDecimal + }) + ] +}); +` diff --git a/rpc/api/utils.go b/rpc/api/utils.go index 488eb1ec6..173a880d4 100644 --- a/rpc/api/utils.go +++ b/rpc/api/utils.go @@ -25,6 +25,8 @@ func ParseApiString(apistr string, codec codec.Codec, xeth *xeth.XEth, eth *eth. apis[i] = NewEthApi(xeth, codec) case MinerApiName: apis[i] = NewMinerApi(eth, codec) + case NetApiName: + apis[i] = NewNetApi(xeth, eth, codec) case Web3ApiName: apis[i] = NewWeb3(xeth, codec) default: @@ -39,6 +41,8 @@ func Javascript(name string) string { switch strings.ToLower(strings.TrimSpace(name)) { case MinerApiName: return Miner_JS + case NetApiName: + return Net_JS } return "" diff --git a/rpc/api/web3.go b/rpc/api/web3.go index 4c51c4a97..c46457ce6 100644 --- a/rpc/api/web3.go +++ b/rpc/api/web3.go @@ -13,7 +13,7 @@ const ( ) var ( -// mapping between methods and handlers + // mapping between methods and handlers Web3Mapping = map[string]web3handler{ "web3_sha3": (*web3).Sha3, "web3_clientVersion": (*web3).ClientVersion, -- cgit v1.2.3 From faab931ce1282dea50c8fdf0577c42ee67f69828 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 19:51:38 +0200 Subject: only load supported api's --- cmd/console/js.go | 82 +++++++++++++++++++++++++++++++++++++++++++++++++---- cmd/console/main.go | 8 +++--- 2 files changed, 81 insertions(+), 9 deletions(-) diff --git a/cmd/console/js.go b/cmd/console/js.go index 8b9137add..ea0961a39 100644 --- a/cmd/console/js.go +++ b/cmd/console/js.go @@ -26,12 +26,18 @@ import ( "path/filepath" "strings" + "encoding/json" + "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common/docserver" re "github.com/ethereum/go-ethereum/jsre" "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/rpc/comms" + "github.com/ethereum/go-ethereum/rpc/shared" "github.com/peterh/liner" "github.com/robertkrimen/otto" + "github.com/ethereum/go-ethereum/rpc/api" ) type prompter interface { @@ -120,9 +126,27 @@ func (js *jsre) apiBindings(ipcpath string) { if err != nil { utils.Fatalf("Error setting web3 provider: %v", err) } - _, err = js.re.Eval(` -var eth = web3.eth; - `) + + apis, err := js.suportedApis(ipcpath) + if err != nil { + utils.Fatalf("Unable to determine supported api's: %v", err) + } + + // load only supported API's in javascript runtime + shortcuts := "var eth = web3.eth; " + for _, apiName := range apis { + if apiName == api.Web3ApiName || apiName == api.EthApiName { + continue // manually mapped + } + + if err = js.re.Compile(fmt.Sprintf("%s.js", apiName), api.Javascript(apiName)); err == nil { + shortcuts += fmt.Sprintf("var %s = web3.%s; ", apiName, apiName) + } else { + utils.Fatalf("Error loading %s.js: %v", apiName, err) + } + } + + _, err = js.re.Eval(shortcuts) if err != nil { utils.Fatalf("Error setting namespaces: %v", err) @@ -170,11 +194,59 @@ func (self *jsre) exec(filename string) error { return nil } +func (self *jsre) suportedApis(ipcpath string) ([]string, error) { + config := comms.IpcConfig{ + Endpoint: ipcpath, + } + + client, err := comms.NewIpcClient(config, codec.JSON) + if err != nil { + return nil, err + } + + req := shared.Request{ + Id: 1, + Jsonrpc: "2.0", + Method: "support_apis", + } + + err = client.Send(req) + if err != nil { + return nil, err + } + + res, err := client.Recv() + if err != nil { + return nil, err + } + + if sucRes, ok := res.(shared.SuccessResponse); ok { + data, _ := json.Marshal(sucRes.Result) + apis := make([]string, 0) + err = json.Unmarshal(data, &apis) + if err == nil { + return apis, nil + } + } + + return nil, fmt.Errorf("Unable to determine supported API's") +} + // show summary of current geth instance -func (self *jsre) welcome() { +func (self *jsre) welcome(ipcpath string) { self.re.Eval(` - console.log('Connected to ' + web3.version.client); + console.log(' Connected to: ' + web3.version.client); `) + + if apis, err := self.suportedApis(ipcpath); err == nil { + apisStr := "" + for _, api := range apis { + apisStr += api + " " + } + self.re.Eval(fmt.Sprintf(`console.log("Available api's: %s");`, apisStr)) + } else { + utils.Fatalf("unable to determine supported api's - %v", err) + } } func (self *jsre) interactive() { diff --git a/cmd/console/main.go b/cmd/console/main.go index 781f1f8cb..9020a12fe 100644 --- a/cmd/console/main.go +++ b/cmd/console/main.go @@ -25,11 +25,11 @@ import ( "io" "os" - "github.com/mattn/go-colorable" - "github.com/mattn/go-isatty" "github.com/codegangsta/cli" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/logger" + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" ) const ( @@ -40,7 +40,7 @@ const ( var ( gitCommit string // set via linker flag nodeNameVersion string - app = utils.NewApp(Version, "the ether console") + app = utils.NewApp(Version, "the ether console") ) func init() { @@ -96,6 +96,6 @@ func run(ctx *cli.Context) { ipcpath := ctx.GlobalString(utils.IPCPathFlag.Name) repl := newJSRE(jspath, ipcpath) - repl.welcome() + repl.welcome(ipcpath) repl.interactive() } -- cgit v1.2.3 From 09d0d55fc579701191ff34f38cc20b437ee23577 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Tue, 9 Jun 2015 09:48:18 +0200 Subject: added debug API --- rpc/api/api.go | 3 +- rpc/api/debug.go | 169 ++++++ rpc/api/debug_args.go | 47 ++ rpc/api/debug_js.go | 48 ++ rpc/api/utils.go | 4 + rpc/comms/ipc_unix.go | 10 +- rpc/comms/ipc_windows.go | 1395 +++++++++++++++++++++++----------------------- rpc/jeth.go | 7 +- rpc/shared/types.go | 9 +- 9 files changed, 980 insertions(+), 712 deletions(-) create mode 100644 rpc/api/debug.go create mode 100644 rpc/api/debug_args.go create mode 100644 rpc/api/debug_js.go diff --git a/rpc/api/api.go b/rpc/api/api.go index e4f0e7446..067a4d4e8 100644 --- a/rpc/api/api.go +++ b/rpc/api/api.go @@ -4,9 +4,10 @@ import "github.com/ethereum/go-ethereum/rpc/shared" const ( // List with all API's which are offered over the IPC interface by default - DefaultIpcApis = "eth,miner,net,web3" + DefaultIpcApis = "debug,eth,miner,net,web3" EthApiName = "eth" + DebugApiName = "debug" MergedApiName = "merged" MinerApiName = "miner" NetApiName = "net" diff --git a/rpc/api/debug.go b/rpc/api/debug.go new file mode 100644 index 000000000..26f43fe74 --- /dev/null +++ b/rpc/api/debug.go @@ -0,0 +1,169 @@ +package api + +import ( + "fmt" + + "github.com/ethereum/ethash" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/rpc/shared" + "github.com/ethereum/go-ethereum/xeth" +) + +const ( + DebugVersion = "1.0.0" +) + +var ( + // mapping between methods and handlers + DebugMapping = map[string]debughandler{ + "debug_dumpBlock": (*DebugApi).DumpBlock, + "debug_getBlockRlp": (*DebugApi).GetBlockRlp, + "debug_printBlock": (*DebugApi).PrintBlock, + "debug_processBlock": (*DebugApi).ProcessBlock, + "debug_seedHash": (*DebugApi).SeedHash, + "debug_setHead": (*DebugApi).SetHead, + } +) + +// debug callback handler +type debughandler func(*DebugApi, *shared.Request) (interface{}, error) + +// admin api provider +type DebugApi struct { + xeth *xeth.XEth + ethereum *eth.Ethereum + methods map[string]debughandler + codec codec.ApiCoder +} + +// create a new debug api instance +func NewDebugApi(xeth *xeth.XEth, ethereum *eth.Ethereum, coder codec.Codec) *DebugApi { + return &DebugApi{ + xeth: xeth, + ethereum: ethereum, + methods: DebugMapping, + codec: coder.New(nil), + } +} + +// collection with supported methods +func (self *DebugApi) Methods() []string { + methods := make([]string, len(self.methods)) + i := 0 + for k := range self.methods { + methods[i] = k + i++ + } + return methods +} + +// Execute given request +func (self *DebugApi) Execute(req *shared.Request) (interface{}, error) { + if callback, ok := self.methods[req.Method]; ok { + return callback(self, req) + } + + return nil, &shared.NotImplementedError{req.Method} +} + +func (self *DebugApi) Name() string { + return DebugApiName +} + +func (self *DebugApi) PrintBlock(req *shared.Request) (interface{}, error) { + args := new(BlockNumArg) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + block := self.xeth.EthBlockByNumber(args.BlockNumber) + return fmt.Sprintf("%s", block), nil +} + +func (self *DebugApi) DumpBlock(req *shared.Request) (interface{}, error) { + args := new(BlockNumArg) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + block := self.xeth.EthBlockByNumber(args.BlockNumber) + if block == nil { + return nil, fmt.Errorf("block #%d not found", args.BlockNumber) + } + + stateDb := state.New(block.Root(), self.ethereum.StateDb()) + if stateDb == nil { + return nil, nil + } + + return stateDb.Dump(), nil +} + +func (self *DebugApi) GetBlockRlp(req *shared.Request) (interface{}, error) { + args := new(BlockNumArg) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + block := self.xeth.EthBlockByNumber(args.BlockNumber) + if block == nil { + return nil, fmt.Errorf("block #%d not found", args.BlockNumber) + } + encoded, err := rlp.EncodeToBytes(block) + return fmt.Sprintf("%x", encoded), err +} + +func (self *DebugApi) SetHead(req *shared.Request) (interface{}, error) { + args := new(BlockNumArg) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + block := self.xeth.EthBlockByNumber(args.BlockNumber) + if block == nil { + return nil, fmt.Errorf("block #%d not found", args.BlockNumber) + } + + self.ethereum.ChainManager().SetHead(block) + + return nil, nil +} + +func (self *DebugApi) ProcessBlock(req *shared.Request) (interface{}, error) { + args := new(BlockNumArg) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + block := self.xeth.EthBlockByNumber(args.BlockNumber) + if block == nil { + return nil, fmt.Errorf("block #%d not found", args.BlockNumber) + } + + old := vm.Debug + defer func() { vm.Debug = old }() + vm.Debug = true + + _, err := self.ethereum.BlockProcessor().RetryProcess(block) + if err == nil { + return true, nil + } + return false, err +} + +func (self *DebugApi) SeedHash(req *shared.Request) (interface{}, error) { + args := new(BlockNumArg) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + if hash, err := ethash.GetSeedHash(uint64(args.BlockNumber)); err == nil { + return fmt.Sprintf("0x%x", hash), nil + } else { + return nil, err + } +} diff --git a/rpc/api/debug_args.go b/rpc/api/debug_args.go new file mode 100644 index 000000000..b9b5aa27e --- /dev/null +++ b/rpc/api/debug_args.go @@ -0,0 +1,47 @@ +package api + +import ( + "encoding/json" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/rpc/shared" +) + +type WaitForBlockArgs struct { + MinHeight int + Timeout int // in seconds +} + +func (args *WaitForBlockArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) > 2 { + return fmt.Errorf("waitForArgs needs 0, 1, 2 arguments") + } + + // default values when not provided + args.MinHeight = -1 + args.Timeout = -1 + + if len(obj) >= 1 { + var minHeight *big.Int + if minHeight, err = numString(obj[0]); err != nil { + return err + } + args.MinHeight = int(minHeight.Int64()) + } + + if len(obj) >= 2 { + timeout, err := numString(obj[1]) + if err != nil { + return err + } + args.Timeout = int(timeout.Int64()) + } + + return nil +} diff --git a/rpc/api/debug_js.go b/rpc/api/debug_js.go new file mode 100644 index 000000000..43c545b2a --- /dev/null +++ b/rpc/api/debug_js.go @@ -0,0 +1,48 @@ +package api + +const Debug_JS = ` +web3.extend({ + property: 'debug', + methods: + [ + new web3.extend.Method({ + name: 'printBlock', + call: 'debug_printBlock', + params: 1, + inputFormatter: [web3.extend.formatters.formatInputInt], + outputFormatter: web3.extend.formatters.formatOutputString + }), + new web3.extend.Method({ + name: 'getBlockRlp', + call: 'debug_getBlockRlp', + params: 1, + inputFormatter: [web3.extend.formatters.formatInputInt], + outputFormatter: web3.extend.formatters.formatOutputString + }), + new web3.extend.Method({ + name: 'setHead', + call: 'debug_setHead', + params: 1, + inputFormatter: [web3.extend.formatters.formatInputInt], + outputFormatter: web3.extend.formatters.formatOutputBool + }), + new web3.extend.Method({ + name: 'processBlock', + call: 'debug_processBlock', + params: 1, + inputFormatter: [web3.extend.formatters.formatInputInt], + outputFormatter: function(obj) { return obj; } + }), + new web3.extend.Method({ + name: 'seedHash', + call: 'debug_seedHash', + params: 1, + inputFormatter: [web3.extend.formatters.formatInputInt], + outputFormatter: web3.extend.formatters.formatOutputString + }) + ], + properties: + [ + ] +}); +` diff --git a/rpc/api/utils.go b/rpc/api/utils.go index 173a880d4..6e6d5c7b0 100644 --- a/rpc/api/utils.go +++ b/rpc/api/utils.go @@ -21,6 +21,8 @@ func ParseApiString(apistr string, codec codec.Codec, xeth *xeth.XEth, eth *eth. for i, name := range names { switch strings.ToLower(strings.TrimSpace(name)) { + case DebugApiName: + apis[i] = NewDebugApi(xeth, eth, codec) case EthApiName: apis[i] = NewEthApi(xeth, codec) case MinerApiName: @@ -39,6 +41,8 @@ func ParseApiString(apistr string, codec codec.Codec, xeth *xeth.XEth, eth *eth. func Javascript(name string) string { switch strings.ToLower(strings.TrimSpace(name)) { + case DebugApiName: + return Debug_JS case MinerApiName: return Miner_JS case NetApiName: diff --git a/rpc/comms/ipc_unix.go b/rpc/comms/ipc_unix.go index bb09d9547..5a94fd1e0 100644 --- a/rpc/comms/ipc_unix.go +++ b/rpc/comms/ipc_unix.go @@ -3,9 +3,9 @@ package comms import ( - "io" + "io" "net" - "os" + "os" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" @@ -70,8 +70,8 @@ func startIpc(cfg IpcConfig, codec codec.Codec, api api.EthereumApi) error { os.Remove(cfg.Endpoint) }() - - glog.V(logger.Info).Infof("IPC service started (%s)\n", cfg.Endpoint) - + + glog.V(logger.Info).Infof("IPC service started (%s)\n", cfg.Endpoint) + return nil } diff --git a/rpc/comms/ipc_windows.go b/rpc/comms/ipc_windows.go index d989f0b2b..c48dfb7fb 100644 --- a/rpc/comms/ipc_windows.go +++ b/rpc/comms/ipc_windows.go @@ -1,699 +1,696 @@ -// +build windows - -package comms - -import ( - "fmt" - "io" - "net" - "os" - "sync" - "syscall" - "time" - "unsafe" - - "github.com/ethereum/go-ethereum/logger" - "github.com/ethereum/go-ethereum/logger/glog" - "github.com/ethereum/go-ethereum/rpc/api" - "github.com/ethereum/go-ethereum/rpc/codec" - "github.com/ethereum/go-ethereum/rpc/shared" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") - procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") - procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") - procCreateEventW = modkernel32.NewProc("CreateEventW") - procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") -) - -func createNamedPipe(name *uint16, openMode uint32, pipeMode uint32, maxInstances uint32, outBufSize uint32, inBufSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(openMode), uintptr(pipeMode), uintptr(maxInstances), uintptr(outBufSize), uintptr(inBufSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func cancelIoEx(handle syscall.Handle, overlapped *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func connectNamedPipe(handle syscall.Handle, overlapped *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func disconnectNamedPipe(handle syscall.Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func waitNamedPipe(name *uint16, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func createEvent(sa *syscall.SecurityAttributes, manualReset bool, initialState bool, name *uint16) (handle syscall.Handle, err error) { - var _p0 uint32 - if manualReset { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if initialState { - _p1 = 1 - } else { - _p1 = 0 - } - r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(sa)), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(name)), 0, 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getOverlappedResult(handle syscall.Handle, overlapped *syscall.Overlapped, transferred *uint32, wait bool) (err error) { - var _p0 uint32 - if wait { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transferred)), uintptr(_p0), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - - -const ( - // openMode - pipe_access_duplex = 0x3 - pipe_access_inbound = 0x1 - pipe_access_outbound = 0x2 - - // openMode write flags - file_flag_first_pipe_instance = 0x00080000 - file_flag_write_through = 0x80000000 - file_flag_overlapped = 0x40000000 - - // openMode ACL flags - write_dac = 0x00040000 - write_owner = 0x00080000 - access_system_security = 0x01000000 - - // pipeMode - pipe_type_byte = 0x0 - pipe_type_message = 0x4 - - // pipeMode read mode flags - pipe_readmode_byte = 0x0 - pipe_readmode_message = 0x2 - - // pipeMode wait mode flags - pipe_wait = 0x0 - pipe_nowait = 0x1 - - // pipeMode remote-client mode flags - pipe_accept_remote_clients = 0x0 - pipe_reject_remote_clients = 0x8 - - pipe_unlimited_instances = 255 - - nmpwait_wait_forever = 0xFFFFFFFF - - // the two not-an-errors below occur if a client connects to the pipe between - // the server's CreateNamedPipe and ConnectNamedPipe calls. - error_no_data syscall.Errno = 0xE8 - error_pipe_connected syscall.Errno = 0x217 - error_pipe_busy syscall.Errno = 0xE7 - error_sem_timeout syscall.Errno = 0x79 - - error_bad_pathname syscall.Errno = 0xA1 - error_invalid_name syscall.Errno = 0x7B - - error_io_incomplete syscall.Errno = 0x3e4 -) - -var _ net.Conn = (*PipeConn)(nil) -var _ net.Listener = (*PipeListener)(nil) - -// ErrClosed is the error returned by PipeListener.Accept when Close is called -// on the PipeListener. -var ErrClosed = PipeError{"Pipe has been closed.", false} - -// PipeError is an error related to a call to a pipe -type PipeError struct { - msg string - timeout bool -} - -// Error implements the error interface -func (e PipeError) Error() string { - return e.msg -} - -// Timeout implements net.AddrError.Timeout() -func (e PipeError) Timeout() bool { - return e.timeout -} - -// Temporary implements net.AddrError.Temporary() -func (e PipeError) Temporary() bool { - return false -} - -// Dial connects to a named pipe with the given address. If the specified pipe is not available, -// it will wait indefinitely for the pipe to become available. -// -// The address must be of the form \\.\\pipe\ for local pipes and \\\pipe\ -// for remote pipes. -// -// Dial will return a PipeError if you pass in a badly formatted pipe name. -// -// Examples: -// // local pipe -// conn, err := Dial(`\\.\pipe\mypipename`) -// -// // remote pipe -// conn, err := Dial(`\\othercomp\pipe\mypipename`) -func Dial(address string) (*PipeConn, error) { - for { - conn, err := dial(address, nmpwait_wait_forever) - if err == nil { - return conn, nil - } - if isPipeNotReady(err) { - <-time.After(100 * time.Millisecond) - continue - } - return nil, err - } -} - -// DialTimeout acts like Dial, but will time out after the duration of timeout -func DialTimeout(address string, timeout time.Duration) (*PipeConn, error) { - deadline := time.Now().Add(timeout) - - now := time.Now() - for now.Before(deadline) { - millis := uint32(deadline.Sub(now) / time.Millisecond) - conn, err := dial(address, millis) - if err == nil { - return conn, nil - } - if err == error_sem_timeout { - // This is WaitNamedPipe's timeout error, so we know we're done - return nil, PipeError{fmt.Sprintf( - "Timed out waiting for pipe '%s' to come available", address), true} - } - if isPipeNotReady(err) { - left := deadline.Sub(time.Now()) - retry := 100 * time.Millisecond - if left > retry { - <-time.After(retry) - } else { - <-time.After(left - time.Millisecond) - } - now = time.Now() - continue - } - return nil, err - } - return nil, PipeError{fmt.Sprintf( - "Timed out waiting for pipe '%s' to come available", address), true} -} - -// isPipeNotReady checks the error to see if it indicates the pipe is not ready -func isPipeNotReady(err error) bool { - // Pipe Busy means another client just grabbed the open pipe end, - // and the server hasn't made a new one yet. - // File Not Found means the server hasn't created the pipe yet. - // Neither is a fatal error. - - return err == syscall.ERROR_FILE_NOT_FOUND || err == error_pipe_busy -} - -// newOverlapped creates a structure used to track asynchronous -// I/O requests that have been issued. -func newOverlapped() (*syscall.Overlapped, error) { - event, err := createEvent(nil, true, true, nil) - if err != nil { - return nil, err - } - return &syscall.Overlapped{HEvent: event}, nil -} - -// waitForCompletion waits for an asynchronous I/O request referred to by overlapped to complete. -// This function returns the number of bytes transferred by the operation and an error code if -// applicable (nil otherwise). -func waitForCompletion(handle syscall.Handle, overlapped *syscall.Overlapped) (uint32, error) { - _, err := syscall.WaitForSingleObject(overlapped.HEvent, syscall.INFINITE) - if err != nil { - return 0, err - } - var transferred uint32 - err = getOverlappedResult(handle, overlapped, &transferred, true) - return transferred, err -} - -// dial is a helper to initiate a connection to a named pipe that has been started by a server. -// The timeout is only enforced if the pipe server has already created the pipe, otherwise -// this function will return immediately. -func dial(address string, timeout uint32) (*PipeConn, error) { - name, err := syscall.UTF16PtrFromString(string(address)) - if err != nil { - return nil, err - } - // If at least one instance of the pipe has been created, this function - // will wait timeout milliseconds for it to become available. - // It will return immediately regardless of timeout, if no instances - // of the named pipe have been created yet. - // If this returns with no error, there is a pipe available. - if err := waitNamedPipe(name, timeout); err != nil { - if err == error_bad_pathname { - // badly formatted pipe name - return nil, badAddr(address) - } - return nil, err - } - pathp, err := syscall.UTF16PtrFromString(address) - if err != nil { - return nil, err - } - handle, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, - uint32(syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE), nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_OVERLAPPED, 0) - if err != nil { - return nil, err - } - return &PipeConn{handle: handle, addr: PipeAddr(address)}, nil -} - -// Listen returns a new PipeListener that will listen on a pipe with the given -// address. The address must be of the form \\.\pipe\ -// -// Listen will return a PipeError for an incorrectly formatted pipe name. -func Listen(address string) (*PipeListener, error) { - handle, err := createPipe(address, true) - if err == error_invalid_name { - return nil, badAddr(address) - } - if err != nil { - return nil, err - } - return &PipeListener{ - addr: PipeAddr(address), - handle: handle, - }, nil -} - -// PipeListener is a named pipe listener. Clients should typically -// use variables of type net.Listener instead of assuming named pipe. -type PipeListener struct { - addr PipeAddr - handle syscall.Handle - closed bool - - // acceptHandle contains the current handle waiting for - // an incoming connection or nil. - acceptHandle syscall.Handle - // acceptOverlapped is set before waiting on a connection. - // If not waiting, it is nil. - acceptOverlapped *syscall.Overlapped - // acceptMutex protects the handle and overlapped structure. - acceptMutex sync.Mutex -} - -// Accept implements the Accept method in the net.Listener interface; it -// waits for the next call and returns a generic net.Conn. -func (l *PipeListener) Accept() (net.Conn, error) { - c, err := l.AcceptPipe() - for err == error_no_data { - // Ignore clients that connect and immediately disconnect. - c, err = l.AcceptPipe() - } - if err != nil { - return nil, err - } - return c, nil -} - -// AcceptPipe accepts the next incoming call and returns the new connection. -// It might return an error if a client connected and immediately cancelled -// the connection. -func (l *PipeListener) AcceptPipe() (*PipeConn, error) { - if l == nil || l.addr == "" || l.closed { - return nil, syscall.EINVAL - } - - // the first time we call accept, the handle will have been created by the Listen - // call. This is to prevent race conditions where the client thinks the server - // isn't listening because it hasn't actually called create yet. After the first time, we'll - // have to create a new handle each time - handle := l.handle - if handle == 0 { - var err error - handle, err = createPipe(string(l.addr), false) - if err != nil { - return nil, err - } - } else { - l.handle = 0 - } - - overlapped, err := newOverlapped() - if err != nil { - return nil, err - } - defer syscall.CloseHandle(overlapped.HEvent) - if err := connectNamedPipe(handle, overlapped); err != nil && err != error_pipe_connected { - if err == error_io_incomplete || err == syscall.ERROR_IO_PENDING { - l.acceptMutex.Lock() - l.acceptOverlapped = overlapped - l.acceptHandle = handle - l.acceptMutex.Unlock() - defer func() { - l.acceptMutex.Lock() - l.acceptOverlapped = nil - l.acceptHandle = 0 - l.acceptMutex.Unlock() - }() - - _, err = waitForCompletion(handle, overlapped) - } - if err == syscall.ERROR_OPERATION_ABORTED { - // Return error compatible to net.Listener.Accept() in case the - // listener was closed. - return nil, ErrClosed - } - if err != nil { - return nil, err - } - } - return &PipeConn{handle: handle, addr: l.addr}, nil -} - -// Close stops listening on the address. -// Already Accepted connections are not closed. -func (l *PipeListener) Close() error { - if l.closed { - return nil - } - l.closed = true - if l.handle != 0 { - err := disconnectNamedPipe(l.handle) - if err != nil { - return err - } - err = syscall.CloseHandle(l.handle) - if err != nil { - return err - } - l.handle = 0 - } - l.acceptMutex.Lock() - defer l.acceptMutex.Unlock() - if l.acceptOverlapped != nil && l.acceptHandle != 0 { - // Cancel the pending IO. This call does not block, so it is safe - // to hold onto the mutex above. - if err := cancelIoEx(l.acceptHandle, l.acceptOverlapped); err != nil { - return err - } - err := syscall.CloseHandle(l.acceptOverlapped.HEvent) - if err != nil { - return err - } - l.acceptOverlapped.HEvent = 0 - err = syscall.CloseHandle(l.acceptHandle) - if err != nil { - return err - } - l.acceptHandle = 0 - } - return nil -} - -// Addr returns the listener's network address, a PipeAddr. -func (l *PipeListener) Addr() net.Addr { return l.addr } - -// PipeConn is the implementation of the net.Conn interface for named pipe connections. -type PipeConn struct { - handle syscall.Handle - addr PipeAddr - - // these aren't actually used yet - readDeadline *time.Time - writeDeadline *time.Time -} - -type iodata struct { - n uint32 - err error -} - -// completeRequest looks at iodata to see if a request is pending. If so, it waits for it to either complete or to -// abort due to hitting the specified deadline. Deadline may be set to nil to wait forever. If no request is pending, -// the content of iodata is returned. -func (c *PipeConn) completeRequest(data iodata, deadline *time.Time, overlapped *syscall.Overlapped) (int, error) { - if data.err == error_io_incomplete || data.err == syscall.ERROR_IO_PENDING { - var timer <-chan time.Time - if deadline != nil { - if timeDiff := deadline.Sub(time.Now()); timeDiff > 0 { - timer = time.After(timeDiff) - } - } - done := make(chan iodata) - go func() { - n, err := waitForCompletion(c.handle, overlapped) - done <- iodata{n, err} - }() - select { - case data = <-done: - case <-timer: - syscall.CancelIoEx(c.handle, overlapped) - data = iodata{0, timeout(c.addr.String())} - } - } - // Windows will produce ERROR_BROKEN_PIPE upon closing - // a handle on the other end of a connection. Go RPC - // expects an io.EOF error in this case. - if data.err == syscall.ERROR_BROKEN_PIPE { - data.err = io.EOF - } - return int(data.n), data.err -} - -// Read implements the net.Conn Read method. -func (c *PipeConn) Read(b []byte) (int, error) { - // Use ReadFile() rather than Read() because the latter - // contains a workaround that eats ERROR_BROKEN_PIPE. - overlapped, err := newOverlapped() - if err != nil { - return 0, err - } - defer syscall.CloseHandle(overlapped.HEvent) - var n uint32 - err = syscall.ReadFile(c.handle, b, &n, overlapped) - return c.completeRequest(iodata{n, err}, c.readDeadline, overlapped) -} - -// Write implements the net.Conn Write method. -func (c *PipeConn) Write(b []byte) (int, error) { - overlapped, err := newOverlapped() - if err != nil { - return 0, err - } - defer syscall.CloseHandle(overlapped.HEvent) - var n uint32 - err = syscall.WriteFile(c.handle, b, &n, overlapped) - return c.completeRequest(iodata{n, err}, c.writeDeadline, overlapped) -} - -// Close closes the connection. -func (c *PipeConn) Close() error { - return syscall.CloseHandle(c.handle) -} - -// LocalAddr returns the local network address. -func (c *PipeConn) LocalAddr() net.Addr { - return c.addr -} - -// RemoteAddr returns the remote network address. -func (c *PipeConn) RemoteAddr() net.Addr { - // not sure what to do here, we don't have remote addr.... - return c.addr -} - -// SetDeadline implements the net.Conn SetDeadline method. -// Note that timeouts are only supported on Windows Vista/Server 2008 and above -func (c *PipeConn) SetDeadline(t time.Time) error { - c.SetReadDeadline(t) - c.SetWriteDeadline(t) - return nil -} - -// SetReadDeadline implements the net.Conn SetReadDeadline method. -// Note that timeouts are only supported on Windows Vista/Server 2008 and above -func (c *PipeConn) SetReadDeadline(t time.Time) error { - c.readDeadline = &t - return nil -} - -// SetWriteDeadline implements the net.Conn SetWriteDeadline method. -// Note that timeouts are only supported on Windows Vista/Server 2008 and above -func (c *PipeConn) SetWriteDeadline(t time.Time) error { - c.writeDeadline = &t - return nil -} - -// PipeAddr represents the address of a named pipe. -type PipeAddr string - -// Network returns the address's network name, "pipe". -func (a PipeAddr) Network() string { return "pipe" } - -// String returns the address of the pipe -func (a PipeAddr) String() string { - return string(a) -} - -// createPipe is a helper function to make sure we always create pipes -// with the same arguments, since subsequent calls to create pipe need -// to use the same arguments as the first one. If first is set, fail -// if the pipe already exists. -func createPipe(address string, first bool) (syscall.Handle, error) { - n, err := syscall.UTF16PtrFromString(address) - if err != nil { - return 0, err - } - mode := uint32(pipe_access_duplex | syscall.FILE_FLAG_OVERLAPPED) - if first { - mode |= file_flag_first_pipe_instance - } - return createNamedPipe(n, - mode, - pipe_type_byte, - pipe_unlimited_instances, - 512, 512, 0, nil) -} - -func badAddr(addr string) PipeError { - return PipeError{fmt.Sprintf("Invalid pipe address '%s'.", addr), false} -} -func timeout(addr string) PipeError { - return PipeError{fmt.Sprintf("Pipe IO timed out waiting for '%s'", addr), true} -} - - - -func newIpcClient(cfg IpcConfig, codec codec.Codec) (*ipcClient, error) { - c, err := Dial(cfg.Endpoint) - if err != nil { - return nil, err - } - - return &ipcClient{codec.New(c)}, nil -} - -func startIpc(cfg IpcConfig, codec codec.Codec, api api.EthereumApi) error { - os.Remove(cfg.Endpoint) // in case it still exists from a previous run - - l, err := Listen(cfg.Endpoint) - if err != nil { - return err - } - os.Chmod(cfg.Endpoint, 0600) - - go func() { - for { - conn, err := l.Accept() - if err != nil { - glog.V(logger.Error).Infof("Error accepting ipc connection - %v\n", err) - continue - } - - go func(conn net.Conn) { - codec := codec.New(conn) - - for { - req, err := codec.ReadRequest() - if err == io.EOF { - codec.Close() - return - } else if err != nil { - glog.V(logger.Error).Infof("IPC recv err - %v\n", err) - codec.Close() - return - } - - var rpcResponse interface{} - res, err := api.Execute(req) - - rpcResponse = shared.NewRpcResponse(req.Id, req.Jsonrpc, res, err) - err = codec.WriteResponse(rpcResponse) - if err != nil { - glog.V(logger.Error).Infof("IPC send err - %v\n", err) - codec.Close() - return - } - } - }(conn) - } - }() - - glog.V(logger.Info).Infof("IPC service started (%s)\n", cfg.Endpoint) - - return nil -} +// +build windows + +package comms + +import ( + "fmt" + "io" + "net" + "os" + "sync" + "syscall" + "time" + "unsafe" + + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/rpc/api" + "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/rpc/shared" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") + procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") + procCreateEventW = modkernel32.NewProc("CreateEventW") + procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") +) + +func createNamedPipe(name *uint16, openMode uint32, pipeMode uint32, maxInstances uint32, outBufSize uint32, inBufSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(openMode), uintptr(pipeMode), uintptr(maxInstances), uintptr(outBufSize), uintptr(inBufSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func cancelIoEx(handle syscall.Handle, overlapped *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func connectNamedPipe(handle syscall.Handle, overlapped *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func disconnectNamedPipe(handle syscall.Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func waitNamedPipe(name *uint16, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createEvent(sa *syscall.SecurityAttributes, manualReset bool, initialState bool, name *uint16) (handle syscall.Handle, err error) { + var _p0 uint32 + if manualReset { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if initialState { + _p1 = 1 + } else { + _p1 = 0 + } + r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(sa)), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(name)), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getOverlappedResult(handle syscall.Handle, overlapped *syscall.Overlapped, transferred *uint32, wait bool) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transferred)), uintptr(_p0), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +const ( + // openMode + pipe_access_duplex = 0x3 + pipe_access_inbound = 0x1 + pipe_access_outbound = 0x2 + + // openMode write flags + file_flag_first_pipe_instance = 0x00080000 + file_flag_write_through = 0x80000000 + file_flag_overlapped = 0x40000000 + + // openMode ACL flags + write_dac = 0x00040000 + write_owner = 0x00080000 + access_system_security = 0x01000000 + + // pipeMode + pipe_type_byte = 0x0 + pipe_type_message = 0x4 + + // pipeMode read mode flags + pipe_readmode_byte = 0x0 + pipe_readmode_message = 0x2 + + // pipeMode wait mode flags + pipe_wait = 0x0 + pipe_nowait = 0x1 + + // pipeMode remote-client mode flags + pipe_accept_remote_clients = 0x0 + pipe_reject_remote_clients = 0x8 + + pipe_unlimited_instances = 255 + + nmpwait_wait_forever = 0xFFFFFFFF + + // the two not-an-errors below occur if a client connects to the pipe between + // the server's CreateNamedPipe and ConnectNamedPipe calls. + error_no_data syscall.Errno = 0xE8 + error_pipe_connected syscall.Errno = 0x217 + error_pipe_busy syscall.Errno = 0xE7 + error_sem_timeout syscall.Errno = 0x79 + + error_bad_pathname syscall.Errno = 0xA1 + error_invalid_name syscall.Errno = 0x7B + + error_io_incomplete syscall.Errno = 0x3e4 +) + +var _ net.Conn = (*PipeConn)(nil) +var _ net.Listener = (*PipeListener)(nil) + +// ErrClosed is the error returned by PipeListener.Accept when Close is called +// on the PipeListener. +var ErrClosed = PipeError{"Pipe has been closed.", false} + +// PipeError is an error related to a call to a pipe +type PipeError struct { + msg string + timeout bool +} + +// Error implements the error interface +func (e PipeError) Error() string { + return e.msg +} + +// Timeout implements net.AddrError.Timeout() +func (e PipeError) Timeout() bool { + return e.timeout +} + +// Temporary implements net.AddrError.Temporary() +func (e PipeError) Temporary() bool { + return false +} + +// Dial connects to a named pipe with the given address. If the specified pipe is not available, +// it will wait indefinitely for the pipe to become available. +// +// The address must be of the form \\.\\pipe\ for local pipes and \\\pipe\ +// for remote pipes. +// +// Dial will return a PipeError if you pass in a badly formatted pipe name. +// +// Examples: +// // local pipe +// conn, err := Dial(`\\.\pipe\mypipename`) +// +// // remote pipe +// conn, err := Dial(`\\othercomp\pipe\mypipename`) +func Dial(address string) (*PipeConn, error) { + for { + conn, err := dial(address, nmpwait_wait_forever) + if err == nil { + return conn, nil + } + if isPipeNotReady(err) { + <-time.After(100 * time.Millisecond) + continue + } + return nil, err + } +} + +// DialTimeout acts like Dial, but will time out after the duration of timeout +func DialTimeout(address string, timeout time.Duration) (*PipeConn, error) { + deadline := time.Now().Add(timeout) + + now := time.Now() + for now.Before(deadline) { + millis := uint32(deadline.Sub(now) / time.Millisecond) + conn, err := dial(address, millis) + if err == nil { + return conn, nil + } + if err == error_sem_timeout { + // This is WaitNamedPipe's timeout error, so we know we're done + return nil, PipeError{fmt.Sprintf( + "Timed out waiting for pipe '%s' to come available", address), true} + } + if isPipeNotReady(err) { + left := deadline.Sub(time.Now()) + retry := 100 * time.Millisecond + if left > retry { + <-time.After(retry) + } else { + <-time.After(left - time.Millisecond) + } + now = time.Now() + continue + } + return nil, err + } + return nil, PipeError{fmt.Sprintf( + "Timed out waiting for pipe '%s' to come available", address), true} +} + +// isPipeNotReady checks the error to see if it indicates the pipe is not ready +func isPipeNotReady(err error) bool { + // Pipe Busy means another client just grabbed the open pipe end, + // and the server hasn't made a new one yet. + // File Not Found means the server hasn't created the pipe yet. + // Neither is a fatal error. + + return err == syscall.ERROR_FILE_NOT_FOUND || err == error_pipe_busy +} + +// newOverlapped creates a structure used to track asynchronous +// I/O requests that have been issued. +func newOverlapped() (*syscall.Overlapped, error) { + event, err := createEvent(nil, true, true, nil) + if err != nil { + return nil, err + } + return &syscall.Overlapped{HEvent: event}, nil +} + +// waitForCompletion waits for an asynchronous I/O request referred to by overlapped to complete. +// This function returns the number of bytes transferred by the operation and an error code if +// applicable (nil otherwise). +func waitForCompletion(handle syscall.Handle, overlapped *syscall.Overlapped) (uint32, error) { + _, err := syscall.WaitForSingleObject(overlapped.HEvent, syscall.INFINITE) + if err != nil { + return 0, err + } + var transferred uint32 + err = getOverlappedResult(handle, overlapped, &transferred, true) + return transferred, err +} + +// dial is a helper to initiate a connection to a named pipe that has been started by a server. +// The timeout is only enforced if the pipe server has already created the pipe, otherwise +// this function will return immediately. +func dial(address string, timeout uint32) (*PipeConn, error) { + name, err := syscall.UTF16PtrFromString(string(address)) + if err != nil { + return nil, err + } + // If at least one instance of the pipe has been created, this function + // will wait timeout milliseconds for it to become available. + // It will return immediately regardless of timeout, if no instances + // of the named pipe have been created yet. + // If this returns with no error, there is a pipe available. + if err := waitNamedPipe(name, timeout); err != nil { + if err == error_bad_pathname { + // badly formatted pipe name + return nil, badAddr(address) + } + return nil, err + } + pathp, err := syscall.UTF16PtrFromString(address) + if err != nil { + return nil, err + } + handle, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, + uint32(syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE), nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_OVERLAPPED, 0) + if err != nil { + return nil, err + } + return &PipeConn{handle: handle, addr: PipeAddr(address)}, nil +} + +// Listen returns a new PipeListener that will listen on a pipe with the given +// address. The address must be of the form \\.\pipe\ +// +// Listen will return a PipeError for an incorrectly formatted pipe name. +func Listen(address string) (*PipeListener, error) { + handle, err := createPipe(address, true) + if err == error_invalid_name { + return nil, badAddr(address) + } + if err != nil { + return nil, err + } + return &PipeListener{ + addr: PipeAddr(address), + handle: handle, + }, nil +} + +// PipeListener is a named pipe listener. Clients should typically +// use variables of type net.Listener instead of assuming named pipe. +type PipeListener struct { + addr PipeAddr + handle syscall.Handle + closed bool + + // acceptHandle contains the current handle waiting for + // an incoming connection or nil. + acceptHandle syscall.Handle + // acceptOverlapped is set before waiting on a connection. + // If not waiting, it is nil. + acceptOverlapped *syscall.Overlapped + // acceptMutex protects the handle and overlapped structure. + acceptMutex sync.Mutex +} + +// Accept implements the Accept method in the net.Listener interface; it +// waits for the next call and returns a generic net.Conn. +func (l *PipeListener) Accept() (net.Conn, error) { + c, err := l.AcceptPipe() + for err == error_no_data { + // Ignore clients that connect and immediately disconnect. + c, err = l.AcceptPipe() + } + if err != nil { + return nil, err + } + return c, nil +} + +// AcceptPipe accepts the next incoming call and returns the new connection. +// It might return an error if a client connected and immediately cancelled +// the connection. +func (l *PipeListener) AcceptPipe() (*PipeConn, error) { + if l == nil || l.addr == "" || l.closed { + return nil, syscall.EINVAL + } + + // the first time we call accept, the handle will have been created by the Listen + // call. This is to prevent race conditions where the client thinks the server + // isn't listening because it hasn't actually called create yet. After the first time, we'll + // have to create a new handle each time + handle := l.handle + if handle == 0 { + var err error + handle, err = createPipe(string(l.addr), false) + if err != nil { + return nil, err + } + } else { + l.handle = 0 + } + + overlapped, err := newOverlapped() + if err != nil { + return nil, err + } + defer syscall.CloseHandle(overlapped.HEvent) + if err := connectNamedPipe(handle, overlapped); err != nil && err != error_pipe_connected { + if err == error_io_incomplete || err == syscall.ERROR_IO_PENDING { + l.acceptMutex.Lock() + l.acceptOverlapped = overlapped + l.acceptHandle = handle + l.acceptMutex.Unlock() + defer func() { + l.acceptMutex.Lock() + l.acceptOverlapped = nil + l.acceptHandle = 0 + l.acceptMutex.Unlock() + }() + + _, err = waitForCompletion(handle, overlapped) + } + if err == syscall.ERROR_OPERATION_ABORTED { + // Return error compatible to net.Listener.Accept() in case the + // listener was closed. + return nil, ErrClosed + } + if err != nil { + return nil, err + } + } + return &PipeConn{handle: handle, addr: l.addr}, nil +} + +// Close stops listening on the address. +// Already Accepted connections are not closed. +func (l *PipeListener) Close() error { + if l.closed { + return nil + } + l.closed = true + if l.handle != 0 { + err := disconnectNamedPipe(l.handle) + if err != nil { + return err + } + err = syscall.CloseHandle(l.handle) + if err != nil { + return err + } + l.handle = 0 + } + l.acceptMutex.Lock() + defer l.acceptMutex.Unlock() + if l.acceptOverlapped != nil && l.acceptHandle != 0 { + // Cancel the pending IO. This call does not block, so it is safe + // to hold onto the mutex above. + if err := cancelIoEx(l.acceptHandle, l.acceptOverlapped); err != nil { + return err + } + err := syscall.CloseHandle(l.acceptOverlapped.HEvent) + if err != nil { + return err + } + l.acceptOverlapped.HEvent = 0 + err = syscall.CloseHandle(l.acceptHandle) + if err != nil { + return err + } + l.acceptHandle = 0 + } + return nil +} + +// Addr returns the listener's network address, a PipeAddr. +func (l *PipeListener) Addr() net.Addr { return l.addr } + +// PipeConn is the implementation of the net.Conn interface for named pipe connections. +type PipeConn struct { + handle syscall.Handle + addr PipeAddr + + // these aren't actually used yet + readDeadline *time.Time + writeDeadline *time.Time +} + +type iodata struct { + n uint32 + err error +} + +// completeRequest looks at iodata to see if a request is pending. If so, it waits for it to either complete or to +// abort due to hitting the specified deadline. Deadline may be set to nil to wait forever. If no request is pending, +// the content of iodata is returned. +func (c *PipeConn) completeRequest(data iodata, deadline *time.Time, overlapped *syscall.Overlapped) (int, error) { + if data.err == error_io_incomplete || data.err == syscall.ERROR_IO_PENDING { + var timer <-chan time.Time + if deadline != nil { + if timeDiff := deadline.Sub(time.Now()); timeDiff > 0 { + timer = time.After(timeDiff) + } + } + done := make(chan iodata) + go func() { + n, err := waitForCompletion(c.handle, overlapped) + done <- iodata{n, err} + }() + select { + case data = <-done: + case <-timer: + syscall.CancelIoEx(c.handle, overlapped) + data = iodata{0, timeout(c.addr.String())} + } + } + // Windows will produce ERROR_BROKEN_PIPE upon closing + // a handle on the other end of a connection. Go RPC + // expects an io.EOF error in this case. + if data.err == syscall.ERROR_BROKEN_PIPE { + data.err = io.EOF + } + return int(data.n), data.err +} + +// Read implements the net.Conn Read method. +func (c *PipeConn) Read(b []byte) (int, error) { + // Use ReadFile() rather than Read() because the latter + // contains a workaround that eats ERROR_BROKEN_PIPE. + overlapped, err := newOverlapped() + if err != nil { + return 0, err + } + defer syscall.CloseHandle(overlapped.HEvent) + var n uint32 + err = syscall.ReadFile(c.handle, b, &n, overlapped) + return c.completeRequest(iodata{n, err}, c.readDeadline, overlapped) +} + +// Write implements the net.Conn Write method. +func (c *PipeConn) Write(b []byte) (int, error) { + overlapped, err := newOverlapped() + if err != nil { + return 0, err + } + defer syscall.CloseHandle(overlapped.HEvent) + var n uint32 + err = syscall.WriteFile(c.handle, b, &n, overlapped) + return c.completeRequest(iodata{n, err}, c.writeDeadline, overlapped) +} + +// Close closes the connection. +func (c *PipeConn) Close() error { + return syscall.CloseHandle(c.handle) +} + +// LocalAddr returns the local network address. +func (c *PipeConn) LocalAddr() net.Addr { + return c.addr +} + +// RemoteAddr returns the remote network address. +func (c *PipeConn) RemoteAddr() net.Addr { + // not sure what to do here, we don't have remote addr.... + return c.addr +} + +// SetDeadline implements the net.Conn SetDeadline method. +// Note that timeouts are only supported on Windows Vista/Server 2008 and above +func (c *PipeConn) SetDeadline(t time.Time) error { + c.SetReadDeadline(t) + c.SetWriteDeadline(t) + return nil +} + +// SetReadDeadline implements the net.Conn SetReadDeadline method. +// Note that timeouts are only supported on Windows Vista/Server 2008 and above +func (c *PipeConn) SetReadDeadline(t time.Time) error { + c.readDeadline = &t + return nil +} + +// SetWriteDeadline implements the net.Conn SetWriteDeadline method. +// Note that timeouts are only supported on Windows Vista/Server 2008 and above +func (c *PipeConn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = &t + return nil +} + +// PipeAddr represents the address of a named pipe. +type PipeAddr string + +// Network returns the address's network name, "pipe". +func (a PipeAddr) Network() string { return "pipe" } + +// String returns the address of the pipe +func (a PipeAddr) String() string { + return string(a) +} + +// createPipe is a helper function to make sure we always create pipes +// with the same arguments, since subsequent calls to create pipe need +// to use the same arguments as the first one. If first is set, fail +// if the pipe already exists. +func createPipe(address string, first bool) (syscall.Handle, error) { + n, err := syscall.UTF16PtrFromString(address) + if err != nil { + return 0, err + } + mode := uint32(pipe_access_duplex | syscall.FILE_FLAG_OVERLAPPED) + if first { + mode |= file_flag_first_pipe_instance + } + return createNamedPipe(n, + mode, + pipe_type_byte, + pipe_unlimited_instances, + 512, 512, 0, nil) +} + +func badAddr(addr string) PipeError { + return PipeError{fmt.Sprintf("Invalid pipe address '%s'.", addr), false} +} +func timeout(addr string) PipeError { + return PipeError{fmt.Sprintf("Pipe IO timed out waiting for '%s'", addr), true} +} + +func newIpcClient(cfg IpcConfig, codec codec.Codec) (*ipcClient, error) { + c, err := Dial(cfg.Endpoint) + if err != nil { + return nil, err + } + + return &ipcClient{codec.New(c)}, nil +} + +func startIpc(cfg IpcConfig, codec codec.Codec, api api.EthereumApi) error { + os.Remove(cfg.Endpoint) // in case it still exists from a previous run + + l, err := Listen(cfg.Endpoint) + if err != nil { + return err + } + os.Chmod(cfg.Endpoint, 0600) + + go func() { + for { + conn, err := l.Accept() + if err != nil { + glog.V(logger.Error).Infof("Error accepting ipc connection - %v\n", err) + continue + } + + go func(conn net.Conn) { + codec := codec.New(conn) + + for { + req, err := codec.ReadRequest() + if err == io.EOF { + codec.Close() + return + } else if err != nil { + glog.V(logger.Error).Infof("IPC recv err - %v\n", err) + codec.Close() + return + } + + var rpcResponse interface{} + res, err := api.Execute(req) + + rpcResponse = shared.NewRpcResponse(req.Id, req.Jsonrpc, res, err) + err = codec.WriteResponse(rpcResponse) + if err != nil { + glog.V(logger.Error).Infof("IPC send err - %v\n", err) + codec.Close() + return + } + } + }(conn) + } + }() + + glog.V(logger.Info).Infof("IPC service started (%s)\n", cfg.Endpoint) + + return nil +} diff --git a/rpc/jeth.go b/rpc/jeth.go index 0473adc4d..e578775bb 100644 --- a/rpc/jeth.go +++ b/rpc/jeth.go @@ -4,12 +4,13 @@ import ( "encoding/json" "fmt" + "reflect" + "github.com/ethereum/go-ethereum/jsre" - "github.com/robertkrimen/otto" - "github.com/ethereum/go-ethereum/rpc/comms" "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/rpc/comms" "github.com/ethereum/go-ethereum/rpc/shared" - "reflect" + "github.com/robertkrimen/otto" ) type Jeth struct { diff --git a/rpc/shared/types.go b/rpc/shared/types.go index 600d39541..6a29fa88e 100644 --- a/rpc/shared/types.go +++ b/rpc/shared/types.go @@ -2,6 +2,7 @@ package shared import ( "encoding/json" + "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" ) @@ -45,15 +46,15 @@ func NewRpcResponse(id interface{}, jsonrpcver string, reply interface{}, err er var response interface{} switch err.(type) { - case nil: + case nil: response = &SuccessResponse{Jsonrpc: jsonrpcver, Id: id, Result: reply} - case *NotImplementedError: + case *NotImplementedError: jsonerr := &ErrorObject{-32601, err.Error()} response = &ErrorResponse{Jsonrpc: jsonrpcver, Id: id, Error: jsonerr} - case *DecodeParamError, *InsufficientParamsError, *ValidationError, *InvalidTypeError: + case *DecodeParamError, *InsufficientParamsError, *ValidationError, *InvalidTypeError: jsonerr := &ErrorObject{-32602, err.Error()} response = &ErrorResponse{Jsonrpc: jsonrpcver, Id: id, Error: jsonerr} - default: + default: jsonerr := &ErrorObject{-32603, err.Error()} response = &ErrorResponse{Jsonrpc: jsonrpcver, Id: id, Error: jsonerr} } -- cgit v1.2.3 From 08d72a9245ce6f1e11f84a6b59d66cb083bea9f9 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Tue, 9 Jun 2015 10:59:44 +0200 Subject: added personal API --- rpc/api/api.go | 31 +++++++++---- rpc/api/personal.go | 118 +++++++++++++++++++++++++++++++++++++++++++++++ rpc/api/personal_args.go | 81 ++++++++++++++++++++++++++++++++ rpc/api/personal_js.go | 34 ++++++++++++++ rpc/api/utils.go | 4 ++ 5 files changed, 259 insertions(+), 9 deletions(-) create mode 100644 rpc/api/personal.go create mode 100644 rpc/api/personal_args.go create mode 100644 rpc/api/personal_js.go diff --git a/rpc/api/api.go b/rpc/api/api.go index 067a4d4e8..d2c548ed1 100644 --- a/rpc/api/api.go +++ b/rpc/api/api.go @@ -1,17 +1,30 @@ package api -import "github.com/ethereum/go-ethereum/rpc/shared" +import ( + "strings" + + "github.com/ethereum/go-ethereum/rpc/shared" +) const ( + EthApiName = "eth" + DebugApiName = "debug" + MergedApiName = "merged" + MinerApiName = "miner" + NetApiName = "net" + PersonalApiName = "personal" + Web3ApiName = "web3" +) + +var ( // List with all API's which are offered over the IPC interface by default - DefaultIpcApis = "debug,eth,miner,net,web3" - - EthApiName = "eth" - DebugApiName = "debug" - MergedApiName = "merged" - MinerApiName = "miner" - NetApiName = "net" - Web3ApiName = "web3" + DefaultIpcApis = strings.Join([]string{ + EthApiName, + DebugApiName, + MinerApiName, + NetApiName, + PersonalApiName, + }, ",") ) // Ethereum RPC API interface diff --git a/rpc/api/personal.go b/rpc/api/personal.go new file mode 100644 index 000000000..d00363627 --- /dev/null +++ b/rpc/api/personal.go @@ -0,0 +1,118 @@ +package api + +import ( + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/rpc/shared" + "github.com/ethereum/go-ethereum/xeth" +) + +var ( + // mapping between methods and handlers + personalMapping = map[string]personalhandler{ + "personal_listAccounts": (*personal).ListAccounts, + "personal_newAccount": (*personal).NewAccount, + "personal_deleteAccount": (*personal).DeleteAccount, + "personal_unlockAccount": (*personal).UnlockAccount, + } +) + +// net callback handler +type personalhandler func(*personal, *shared.Request) (interface{}, error) + +// net api provider +type personal struct { + xeth *xeth.XEth + ethereum *eth.Ethereum + methods map[string]personalhandler + codec codec.ApiCoder +} + +// create a new net api instance +func NewPersonal(xeth *xeth.XEth, eth *eth.Ethereum, coder codec.Codec) *personal { + return &personal{ + xeth: xeth, + ethereum: eth, + methods: personalMapping, + codec: coder.New(nil), + } +} + +// collection with supported methods +func (self *personal) Methods() []string { + methods := make([]string, len(self.methods)) + i := 0 + for k := range self.methods { + methods[i] = k + i++ + } + return methods +} + +// Execute given request +func (self *personal) Execute(req *shared.Request) (interface{}, error) { + if callback, ok := self.methods[req.Method]; ok { + return callback(self, req) + } + + return nil, shared.NewNotImplementedError(req.Method) +} + +func (self *personal) Name() string { + return PersonalApiName +} + +func (self *personal) ListAccounts(req *shared.Request) (interface{}, error) { + return self.xeth.Accounts(), nil +} + +func (self *personal) NewAccount(req *shared.Request) (interface{}, error) { + args := new(NewAccountArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + am := self.ethereum.AccountManager() + acc, err := am.NewAccount(args.Passphrase) + return acc.Address.Hex(), err +} + +func (self *personal) DeleteAccount(req *shared.Request) (interface{}, error) { + args := new(DeleteAccountArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + addr := common.HexToAddress(args.Address) + am := self.ethereum.AccountManager() + if err := am.DeleteAccount(addr, args.Passphrase); err == nil { + return true, nil + } else { + return false, err + } +} + +func (self *personal) UnlockAccount(req *shared.Request) (interface{}, error) { + args := new(UnlockAccountArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + var err error + am := self.ethereum.AccountManager() + addr := common.HexToAddress(args.Address) + + if args.Duration == -1 { + err = am.Unlock(addr, args.Passphrase) + } else { + err = am.TimedUnlock(addr, args.Passphrase, time.Duration(args.Duration)*time.Second) + } + + if err == nil { + return true, nil + } + return false, err +} diff --git a/rpc/api/personal_args.go b/rpc/api/personal_args.go new file mode 100644 index 000000000..b41fc06e7 --- /dev/null +++ b/rpc/api/personal_args.go @@ -0,0 +1,81 @@ +package api + +import ( + "encoding/json" + + "github.com/ethereum/go-ethereum/rpc/shared" +) + +type NewAccountArgs struct { + Passphrase string +} + +func (args *NewAccountArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + passhrase, ok := obj[0].(string) + if !ok { + return shared.NewInvalidTypeError("passhrase", "not a string") + } + args.Passphrase = passhrase + + return nil +} + +type DeleteAccountArgs struct { + Address string + Passphrase string +} + +func (args *DeleteAccountArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + addr, ok := obj[0].(string) + if !ok { + return shared.NewInvalidTypeError("address", "not a string") + } + args.Address = addr + + passhrase, ok := obj[1].(string) + if !ok { + return shared.NewInvalidTypeError("passhrase", "not a string") + } + args.Passphrase = passhrase + + return nil +} + +type UnlockAccountArgs struct { + Address string + Passphrase string + Duration int +} + +func (args *UnlockAccountArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + args.Duration = -1 + + addrstr, ok := obj[0].(string) + if !ok { + return shared.NewInvalidTypeError("address", "not a string") + } + args.Address = addrstr + + passphrasestr, ok := obj[1].(string) + if !ok { + return shared.NewInvalidTypeError("passphrase", "not a string") + } + args.Passphrase = passphrasestr + + return nil +} diff --git a/rpc/api/personal_js.go b/rpc/api/personal_js.go new file mode 100644 index 000000000..7fd9a2dea --- /dev/null +++ b/rpc/api/personal_js.go @@ -0,0 +1,34 @@ +package api + +const Personal_JS = ` +web3.extend({ + property: 'personal', + methods: + [ + new web3.extend.Method({ + name: 'listAccounts', + call: 'personal_listAccounts', + params: 0, + inputFormatter: [], + outputFormatter: function(obj) { return obj; } + }), + new web3.extend.Method({ + name: 'newAccount', + call: 'personal_newAccount', + params: 1, + inputFormatter: [web3.extend.formatters.formatInputString], + outputFormatter: web3.extend.formatters.formatOutputString + }), + new web3.extend.Method({ + name: 'unlockAccount', + call: 'personal_unlockAccount', + params: 3, + inputFormatter: [web3.extend.formatters.formatInputString,web3.extend.formatters.formatInputString,web3.extend.formatters.formatInputInt], + outputFormatter: web3.extend.formatters.formatOutputBool + }) + ], + properties: + [ + ] +}); +` diff --git a/rpc/api/utils.go b/rpc/api/utils.go index 6e6d5c7b0..eae23d351 100644 --- a/rpc/api/utils.go +++ b/rpc/api/utils.go @@ -29,6 +29,8 @@ func ParseApiString(apistr string, codec codec.Codec, xeth *xeth.XEth, eth *eth. apis[i] = NewMinerApi(eth, codec) case NetApiName: apis[i] = NewNetApi(xeth, eth, codec) + case PersonalApiName: + apis[i] = NewPersonal(xeth, eth, codec) case Web3ApiName: apis[i] = NewWeb3(xeth, codec) default: @@ -47,6 +49,8 @@ func Javascript(name string) string { return Miner_JS case NetApiName: return Net_JS + case PersonalApiName: + return Personal_JS } return "" -- cgit v1.2.3 From cc9ae399338557b6671e8fc83bb696c5ddb068fe Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Tue, 9 Jun 2015 16:06:51 +0200 Subject: added admin API --- rpc/api/admin.go | 228 ++++++++++++++++++++++++++++++++++++++++++++++++++ rpc/api/admin_args.go | 107 +++++++++++++++++++++++ rpc/api/admin_js.go | 67 +++++++++++++++ rpc/api/api.go | 7 +- rpc/api/debug.go | 38 ++++----- rpc/api/eth.go | 180 +++++++++++++++++++-------------------- rpc/api/eth_args.go | 30 +++---- rpc/api/mergedapi.go | 14 ++-- rpc/api/miner.go | 46 +++++----- rpc/api/net.go | 30 +++---- rpc/api/net_js.go | 7 ++ rpc/api/personal.go | 30 +++---- rpc/api/utils.go | 8 +- rpc/api/web3.go | 24 +++--- 14 files changed, 613 insertions(+), 203 deletions(-) create mode 100644 rpc/api/admin.go create mode 100644 rpc/api/admin_args.go create mode 100644 rpc/api/admin_js.go diff --git a/rpc/api/admin.go b/rpc/api/admin.go new file mode 100644 index 000000000..c37463604 --- /dev/null +++ b/rpc/api/admin.go @@ -0,0 +1,228 @@ +package api + +import ( + "fmt" + "io" + "os" + + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/rpc/shared" + "github.com/ethereum/go-ethereum/xeth" +) + +const ( + AdminVersion = "1.0.0" + importBatchSize = 2500 +) + +var ( + // mapping between methods and handlers + AdminMapping = map[string]adminhandler{ + // "admin_startRPC": (*adminApi).StartRPC, + // "admin_stopRPC": (*adminApi).StopRPC, + "admin_addPeer": (*adminApi).AddPeer, + "admin_peers": (*adminApi).Peers, + "admin_nodeInfo": (*adminApi).NodeInfo, + "admin_exportChain": (*adminApi).ExportChain, + "admin_importChain": (*adminApi).ImportChain, + "admin_verbosity": (*adminApi).Verbosity, + "admin_syncStatus": (*adminApi).SyncStatus, + "admin_setSolc": (*adminApi).SetSolc, + } +) + +// admin callback handler +type adminhandler func(*adminApi, *shared.Request) (interface{}, error) + +// admin api provider +type adminApi struct { + xeth *xeth.XEth + ethereum *eth.Ethereum + methods map[string]adminhandler + codec codec.ApiCoder +} + +// create a new admin api instance +func NewAdminApi(xeth *xeth.XEth, ethereum *eth.Ethereum, coder codec.Codec) *adminApi { + return &adminApi{ + xeth: xeth, + ethereum: ethereum, + methods: AdminMapping, + codec: coder.New(nil), + } +} + +// collection with supported methods +func (self *adminApi) Methods() []string { + methods := make([]string, len(self.methods)) + i := 0 + for k := range self.methods { + methods[i] = k + i++ + } + return methods +} + +// Execute given request +func (self *adminApi) Execute(req *shared.Request) (interface{}, error) { + if callback, ok := self.methods[req.Method]; ok { + return callback(self, req) + } + + return nil, &shared.NotImplementedError{req.Method} +} + +func (self *adminApi) Name() string { + return AdminApiName +} + +func (self *adminApi) AddPeer(req *shared.Request) (interface{}, error) { + args := new(AddPeerArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + err := self.ethereum.AddPeer(args.Url) + if err == nil { + return true, nil + } + return false, err +} + +func (self *adminApi) Peers(req *shared.Request) (interface{}, error) { + return self.ethereum.PeersInfo(), nil +} + +func (self *adminApi) StartRPC(req *shared.Request) (interface{}, error) { + return false, nil + // Enable when http rpc interface is refactored to prevent import cycles + // args := new(StartRpcArgs) + // if err := self.codec.Decode(req.Params, &args); err != nil { + // return nil, shared.NewDecodeParamError(err.Error()) + // } + // + // cfg := rpc.RpcConfig{ + // ListenAddress: args.Address, + // ListenPort: args.Port, + // } + // + // err := rpc.Start(self.xeth, cfg) + // if err == nil { + // return true, nil + // } + // return false, err +} + +func (self *adminApi) StopRPC(req *shared.Request) (interface{}, error) { + return false, nil + // Enable when http rpc interface is refactored to prevent import cycles + // rpc.Stop() + // return true, nil +} + +func (self *adminApi) NodeInfo(req *shared.Request) (interface{}, error) { + return self.ethereum.NodeInfo(), nil +} + +func hasAllBlocks(chain *core.ChainManager, bs []*types.Block) bool { + for _, b := range bs { + if !chain.HasBlock(b.Hash()) { + return false + } + } + return true +} + +func (self *adminApi) ImportChain(req *shared.Request) (interface{}, error) { + args := new(ImportExportChainArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + fh, err := os.Open(args.Filename) + if err != nil { + return false, err + } + defer fh.Close() + stream := rlp.NewStream(fh, 0) + + // Run actual the import. + blocks := make(types.Blocks, importBatchSize) + n := 0 + for batch := 0; ; batch++ { + + i := 0 + for ; i < importBatchSize; i++ { + var b types.Block + if err := stream.Decode(&b); err == io.EOF { + break + } else if err != nil { + return false, fmt.Errorf("at block %d: %v", n, err) + } + blocks[i] = &b + n++ + } + if i == 0 { + break + } + // Import the batch. + if hasAllBlocks(self.ethereum.ChainManager(), blocks[:i]) { + continue + } + if _, err := self.ethereum.ChainManager().InsertChain(blocks[:i]); err != nil { + return false, fmt.Errorf("invalid block %d: %v", n, err) + } + } + return true, nil +} + +func (self *adminApi) ExportChain(req *shared.Request) (interface{}, error) { + args := new(ImportExportChainArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + fh, err := os.OpenFile(args.Filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) + if err != nil { + return false, err + } + defer fh.Close() + if err := self.ethereum.ChainManager().Export(fh); err != nil { + return false, err + } + + return true, nil +} + +func (self *adminApi) Verbosity(req *shared.Request) (interface{}, error) { + args := new(VerbosityArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + glog.SetV(args.Level) + return true, nil +} + +func (self *adminApi) SyncStatus(req *shared.Request) (interface{}, error) { + pending, cached := self.ethereum.Downloader().Stats() + return map[string]interface{}{"available": pending, "waitingForImport": cached}, nil +} + +func (self *adminApi) SetSolc(req *shared.Request) (interface{}, error) { + args := new(SetSolcArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, shared.NewDecodeParamError(err.Error()) + } + + solc, err := self.xeth.SetSolc(args.Path) + if err != nil { + return nil, err + } + return solc.Info(), nil +} diff --git a/rpc/api/admin_args.go b/rpc/api/admin_args.go new file mode 100644 index 000000000..9c0cbdcb6 --- /dev/null +++ b/rpc/api/admin_args.go @@ -0,0 +1,107 @@ +package api + +import ( + "encoding/json" + + "math/big" + + "github.com/ethereum/go-ethereum/rpc/shared" +) + +type AddPeerArgs struct { + Url string +} + +func (args *AddPeerArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) != 1 { + return shared.NewDecodeParamError("Expected enode as argument") + } + + urlstr, ok := obj[0].(string) + if !ok { + return shared.NewInvalidTypeError("url", "not a string") + } + args.Url = urlstr + + return nil +} + +type ImportExportChainArgs struct { + Filename string +} + +func (args *ImportExportChainArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) != 1 { + return shared.NewDecodeParamError("Expected filename as argument") + } + + filename, ok := obj[0].(string) + if !ok { + return shared.NewInvalidTypeError("filename", "not a string") + } + args.Filename = filename + + return nil +} + +type VerbosityArgs struct { + Level int +} + +func (args *VerbosityArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) != 1 { + return shared.NewDecodeParamError("Expected enode as argument") + } + + if levelint, ok := obj[0].(int); ok { + args.Level = levelint + } else if levelstr, ok := obj[0].(string); ok { + if !ok { + return shared.NewInvalidTypeError("level", "not a string") + } + level, success := new(big.Int).SetString(levelstr, 0) + if !success { + return shared.NewDecodeParamError("Unable to parse verbosity level") + } + args.Level = int(level.Int64()) + } + + return nil +} + +type SetSolcArgs struct { + Path string +} + +func (args *SetSolcArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) != 1 { + return shared.NewDecodeParamError("Expected path as argument") + } + + if pathstr, ok := obj[0].(string); ok { + args.Path = pathstr + return nil + } + + return shared.NewInvalidTypeError("path", "not a string") +} diff --git a/rpc/api/admin_js.go b/rpc/api/admin_js.go new file mode 100644 index 000000000..02a0e93e1 --- /dev/null +++ b/rpc/api/admin_js.go @@ -0,0 +1,67 @@ +package api + +const Admin_JS = ` +web3.extend({ + property: 'admin', + methods: + [ + new web3.extend.Method({ + name: 'addPeer', + call: 'admin_addPeer', + params: 1, + inputFormatter: [web3.extend.utils.formatInputString], + outputFormatter: web3.extend.formatters.formatOutputBool + }), + new web3.extend.Method({ + name: 'peers', + call: 'admin_peers', + params: 0, + inputFormatter: [], + outputFormatter: function(obj) { return obj; } + }), + new web3.extend.Method({ + name: 'exportChain', + call: 'admin_exportChain', + params: 1, + inputFormatter: [web3.extend.utils.formatInputString], + outputFormatter: function(obj) { return obj; } + }), + new web3.extend.Method({ + name: 'importChain', + call: 'admin_importChain', + params: 1, + inputFormatter: [web3.extend.utils.formatInputString], + outputFormatter: function(obj) { return obj; } + }), + new web3.extend.Method({ + name: 'verbosity', + call: 'admin_verbosity', + params: 1, + inputFormatter: [web3.extend.utils.formatInputInt], + outputFormatter: web3.extend.formatters.formatOutputBool + }), + new web3.extend.Method({ + name: 'syncStatus', + call: 'admin_syncStatus', + params: 1, + inputFormatter: [web3.extend.utils.formatInputInt], + outputFormatter: function(obj) { return obj; } + }), + new web3.extend.Method({ + name: 'setSolc', + call: 'admin_setSolc', + params: 1, + inputFormatter: [web3.extend.utils.formatInputString], + outputFormatter: web3.extend.formatters.formatOutputString + }) + ], + properties: + [ + new web3.extend.Property({ + name: 'nodeInfo', + getter: 'admin_nodeInfo', + outputFormatter: web3.extend.formatters.formatOutputString + }) + ] +}); +` diff --git a/rpc/api/api.go b/rpc/api/api.go index d2c548ed1..28b824658 100644 --- a/rpc/api/api.go +++ b/rpc/api/api.go @@ -7,6 +7,7 @@ import ( ) const ( + AdminApiName = "admin" EthApiName = "eth" DebugApiName = "debug" MergedApiName = "merged" @@ -19,11 +20,7 @@ const ( var ( // List with all API's which are offered over the IPC interface by default DefaultIpcApis = strings.Join([]string{ - EthApiName, - DebugApiName, - MinerApiName, - NetApiName, - PersonalApiName, + AdminApiName, EthApiName, DebugApiName, MinerApiName, NetApiName, PersonalApiName, Web3ApiName, }, ",") ) diff --git a/rpc/api/debug.go b/rpc/api/debug.go index 26f43fe74..2930ad870 100644 --- a/rpc/api/debug.go +++ b/rpc/api/debug.go @@ -20,20 +20,20 @@ const ( var ( // mapping between methods and handlers DebugMapping = map[string]debughandler{ - "debug_dumpBlock": (*DebugApi).DumpBlock, - "debug_getBlockRlp": (*DebugApi).GetBlockRlp, - "debug_printBlock": (*DebugApi).PrintBlock, - "debug_processBlock": (*DebugApi).ProcessBlock, - "debug_seedHash": (*DebugApi).SeedHash, - "debug_setHead": (*DebugApi).SetHead, + "debug_dumpBlock": (*debugApi).DumpBlock, + "debug_getBlockRlp": (*debugApi).GetBlockRlp, + "debug_printBlock": (*debugApi).PrintBlock, + "debug_processBlock": (*debugApi).ProcessBlock, + "debug_seedHash": (*debugApi).SeedHash, + "debug_setHead": (*debugApi).SetHead, } ) // debug callback handler -type debughandler func(*DebugApi, *shared.Request) (interface{}, error) +type debughandler func(*debugApi, *shared.Request) (interface{}, error) // admin api provider -type DebugApi struct { +type debugApi struct { xeth *xeth.XEth ethereum *eth.Ethereum methods map[string]debughandler @@ -41,8 +41,8 @@ type DebugApi struct { } // create a new debug api instance -func NewDebugApi(xeth *xeth.XEth, ethereum *eth.Ethereum, coder codec.Codec) *DebugApi { - return &DebugApi{ +func NewDebugApi(xeth *xeth.XEth, ethereum *eth.Ethereum, coder codec.Codec) *debugApi { + return &debugApi{ xeth: xeth, ethereum: ethereum, methods: DebugMapping, @@ -51,7 +51,7 @@ func NewDebugApi(xeth *xeth.XEth, ethereum *eth.Ethereum, coder codec.Codec) *De } // collection with supported methods -func (self *DebugApi) Methods() []string { +func (self *debugApi) Methods() []string { methods := make([]string, len(self.methods)) i := 0 for k := range self.methods { @@ -62,7 +62,7 @@ func (self *DebugApi) Methods() []string { } // Execute given request -func (self *DebugApi) Execute(req *shared.Request) (interface{}, error) { +func (self *debugApi) Execute(req *shared.Request) (interface{}, error) { if callback, ok := self.methods[req.Method]; ok { return callback(self, req) } @@ -70,11 +70,11 @@ func (self *DebugApi) Execute(req *shared.Request) (interface{}, error) { return nil, &shared.NotImplementedError{req.Method} } -func (self *DebugApi) Name() string { +func (self *debugApi) Name() string { return DebugApiName } -func (self *DebugApi) PrintBlock(req *shared.Request) (interface{}, error) { +func (self *debugApi) PrintBlock(req *shared.Request) (interface{}, error) { args := new(BlockNumArg) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -84,7 +84,7 @@ func (self *DebugApi) PrintBlock(req *shared.Request) (interface{}, error) { return fmt.Sprintf("%s", block), nil } -func (self *DebugApi) DumpBlock(req *shared.Request) (interface{}, error) { +func (self *debugApi) DumpBlock(req *shared.Request) (interface{}, error) { args := new(BlockNumArg) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -103,7 +103,7 @@ func (self *DebugApi) DumpBlock(req *shared.Request) (interface{}, error) { return stateDb.Dump(), nil } -func (self *DebugApi) GetBlockRlp(req *shared.Request) (interface{}, error) { +func (self *debugApi) GetBlockRlp(req *shared.Request) (interface{}, error) { args := new(BlockNumArg) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -117,7 +117,7 @@ func (self *DebugApi) GetBlockRlp(req *shared.Request) (interface{}, error) { return fmt.Sprintf("%x", encoded), err } -func (self *DebugApi) SetHead(req *shared.Request) (interface{}, error) { +func (self *debugApi) SetHead(req *shared.Request) (interface{}, error) { args := new(BlockNumArg) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -133,7 +133,7 @@ func (self *DebugApi) SetHead(req *shared.Request) (interface{}, error) { return nil, nil } -func (self *DebugApi) ProcessBlock(req *shared.Request) (interface{}, error) { +func (self *debugApi) ProcessBlock(req *shared.Request) (interface{}, error) { args := new(BlockNumArg) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -155,7 +155,7 @@ func (self *DebugApi) ProcessBlock(req *shared.Request) (interface{}, error) { return false, err } -func (self *DebugApi) SeedHash(req *shared.Request) (interface{}, error) { +func (self *debugApi) SeedHash(req *shared.Request) (interface{}, error) { args := new(BlockNumArg) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) diff --git a/rpc/api/eth.go b/rpc/api/eth.go index 0a8cecdbc..f27f17f39 100644 --- a/rpc/api/eth.go +++ b/rpc/api/eth.go @@ -13,68 +13,68 @@ import ( // eth api provider // See https://github.com/ethereum/wiki/wiki/JSON-RPC -type EthApi struct { +type ethApi struct { xeth *xeth.XEth methods map[string]ethhandler codec codec.ApiCoder } // eth callback handler -type ethhandler func(*EthApi, *shared.Request) (interface{}, error) +type ethhandler func(*ethApi, *shared.Request) (interface{}, error) var ( ethMapping = map[string]ethhandler{ - "eth_accounts": (*EthApi).Accounts, - "eth_blockNumber": (*EthApi).BlockNumber, - "eth_getBalance": (*EthApi).GetBalance, - "eth_protocolVersion": (*EthApi).ProtocolVersion, - "eth_coinbase": (*EthApi).Coinbase, - "eth_mining": (*EthApi).IsMining, - "eth_gasPrice": (*EthApi).GasPrice, - "eth_getStorage": (*EthApi).GetStorage, - "eth_storageAt": (*EthApi).GetStorage, - "eth_getStorageAt": (*EthApi).GetStorageAt, - "eth_getTransactionCount": (*EthApi).GetTransactionCount, - "eth_getBlockTransactionCountByHash": (*EthApi).GetBlockTransactionCountByHash, - "eth_getBlockTransactionCountByNumber": (*EthApi).GetBlockTransactionCountByNumber, - "eth_getUncleCountByBlockHash": (*EthApi).GetUncleCountByBlockHash, - "eth_getUncleCountByBlockNumber": (*EthApi).GetUncleCountByBlockNumber, - "eth_getData": (*EthApi).GetData, - "eth_getCode": (*EthApi).GetData, - "eth_sign": (*EthApi).Sign, - "eth_sendTransaction": (*EthApi).SendTransaction, - "eth_transact": (*EthApi).SendTransaction, - "eth_estimateGas": (*EthApi).EstimateGas, - "eth_call": (*EthApi).Call, - "eth_flush": (*EthApi).Flush, - "eth_getBlockByHash": (*EthApi).GetBlockByHash, - "eth_getBlockByNumber": (*EthApi).GetBlockByNumber, - "eth_getTransactionByHash": (*EthApi).GetTransactionByHash, - "eth_getTransactionByBlockHashAndIndex": (*EthApi).GetTransactionByBlockHashAndIndex, - "eth_getUncleByBlockHashAndIndex": (*EthApi).GetUncleByBlockHashAndIndex, - "eth_getUncleByBlockNumberAndIndex": (*EthApi).GetUncleByBlockNumberAndIndex, - "eth_getCompilers": (*EthApi).GetCompilers, - "eth_compileSolidity": (*EthApi).CompileSolidity, - "eth_newFilter": (*EthApi).NewFilter, - "eth_newBlockFilter": (*EthApi).NewBlockFilter, - "eth_newPendingTransactionFilter": (*EthApi).NewPendingTransactionFilter, - "eth_uninstallFilter": (*EthApi).UninstallFilter, - "eth_getFilterChanges": (*EthApi).GetFilterChanges, - "eth_getFilterLogs": (*EthApi).GetFilterLogs, - "eth_getLogs": (*EthApi).GetLogs, - "eth_hashrate": (*EthApi).Hashrate, - "eth_getWork": (*EthApi).GetWork, - "eth_submitWork": (*EthApi).SubmitWork, + "eth_accounts": (*ethApi).Accounts, + "eth_blockNumber": (*ethApi).BlockNumber, + "eth_getBalance": (*ethApi).GetBalance, + "eth_protocolVersion": (*ethApi).ProtocolVersion, + "eth_coinbase": (*ethApi).Coinbase, + "eth_mining": (*ethApi).IsMining, + "eth_gasPrice": (*ethApi).GasPrice, + "eth_getStorage": (*ethApi).GetStorage, + "eth_storageAt": (*ethApi).GetStorage, + "eth_getStorageAt": (*ethApi).GetStorageAt, + "eth_getTransactionCount": (*ethApi).GetTransactionCount, + "eth_getBlockTransactionCountByHash": (*ethApi).GetBlockTransactionCountByHash, + "eth_getBlockTransactionCountByNumber": (*ethApi).GetBlockTransactionCountByNumber, + "eth_getUncleCountByBlockHash": (*ethApi).GetUncleCountByBlockHash, + "eth_getUncleCountByBlockNumber": (*ethApi).GetUncleCountByBlockNumber, + "eth_getData": (*ethApi).GetData, + "eth_getCode": (*ethApi).GetData, + "eth_sign": (*ethApi).Sign, + "eth_sendTransaction": (*ethApi).SendTransaction, + "eth_transact": (*ethApi).SendTransaction, + "eth_estimateGas": (*ethApi).EstimateGas, + "eth_call": (*ethApi).Call, + "eth_flush": (*ethApi).Flush, + "eth_getBlockByHash": (*ethApi).GetBlockByHash, + "eth_getBlockByNumber": (*ethApi).GetBlockByNumber, + "eth_getTransactionByHash": (*ethApi).GetTransactionByHash, + "eth_getTransactionByBlockHashAndIndex": (*ethApi).GetTransactionByBlockHashAndIndex, + "eth_getUncleByBlockHashAndIndex": (*ethApi).GetUncleByBlockHashAndIndex, + "eth_getUncleByBlockNumberAndIndex": (*ethApi).GetUncleByBlockNumberAndIndex, + "eth_getCompilers": (*ethApi).GetCompilers, + "eth_compileSolidity": (*ethApi).CompileSolidity, + "eth_newFilter": (*ethApi).NewFilter, + "eth_newBlockFilter": (*ethApi).NewBlockFilter, + "eth_newPendingTransactionFilter": (*ethApi).NewPendingTransactionFilter, + "eth_uninstallFilter": (*ethApi).UninstallFilter, + "eth_getFilterChanges": (*ethApi).GetFilterChanges, + "eth_getFilterLogs": (*ethApi).GetFilterLogs, + "eth_getLogs": (*ethApi).GetLogs, + "eth_hashrate": (*ethApi).Hashrate, + "eth_getWork": (*ethApi).GetWork, + "eth_submitWork": (*ethApi).SubmitWork, } ) -// create new EthApi instance -func NewEthApi(xeth *xeth.XEth, codec codec.Codec) *EthApi { - return &EthApi{xeth, ethMapping, codec.New(nil)} +// create new ethApi instance +func NewEthApi(xeth *xeth.XEth, codec codec.Codec) *ethApi { + return ðApi{xeth, ethMapping, codec.New(nil)} } // collection with supported methods -func (self *EthApi) Methods() []string { +func (self *ethApi) Methods() []string { methods := make([]string, len(self.methods)) i := 0 for k := range self.methods { @@ -85,7 +85,7 @@ func (self *EthApi) Methods() []string { } // Execute given request -func (self *EthApi) Execute(req *shared.Request) (interface{}, error) { +func (self *ethApi) Execute(req *shared.Request) (interface{}, error) { if callback, ok := self.methods[req.Method]; ok { return callback(self, req) } @@ -93,23 +93,23 @@ func (self *EthApi) Execute(req *shared.Request) (interface{}, error) { return nil, shared.NewNotImplementedError(req.Method) } -func (self *EthApi) Name() string { +func (self *ethApi) Name() string { return EthApiName } -func (self *EthApi) Accounts(req *shared.Request) (interface{}, error) { +func (self *ethApi) Accounts(req *shared.Request) (interface{}, error) { return self.xeth.Accounts(), nil } -func (self *EthApi) Hashrate(req *shared.Request) (interface{}, error) { +func (self *ethApi) Hashrate(req *shared.Request) (interface{}, error) { return newHexNum(self.xeth.HashRate()), nil } -func (self *EthApi) BlockNumber(req *shared.Request) (interface{}, error) { +func (self *ethApi) BlockNumber(req *shared.Request) (interface{}, error) { return self.xeth.CurrentBlock().Number(), nil } -func (self *EthApi) GetBalance(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetBalance(req *shared.Request) (interface{}, error) { args := new(GetBalanceArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -118,23 +118,23 @@ func (self *EthApi) GetBalance(req *shared.Request) (interface{}, error) { return self.xeth.AtStateNum(args.BlockNumber).BalanceAt(args.Address), nil } -func (self *EthApi) ProtocolVersion(req *shared.Request) (interface{}, error) { +func (self *ethApi) ProtocolVersion(req *shared.Request) (interface{}, error) { return self.xeth.EthVersion(), nil } -func (self *EthApi) Coinbase(req *shared.Request) (interface{}, error) { +func (self *ethApi) Coinbase(req *shared.Request) (interface{}, error) { return newHexData(self.xeth.Coinbase()), nil } -func (self *EthApi) IsMining(req *shared.Request) (interface{}, error) { +func (self *ethApi) IsMining(req *shared.Request) (interface{}, error) { return self.xeth.IsMining(), nil } -func (self *EthApi) GasPrice(req *shared.Request) (interface{}, error) { +func (self *ethApi) GasPrice(req *shared.Request) (interface{}, error) { return newHexNum(xeth.DefaultGasPrice().Bytes()), nil } -func (self *EthApi) GetStorage(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetStorage(req *shared.Request) (interface{}, error) { args := new(GetStorageArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -143,7 +143,7 @@ func (self *EthApi) GetStorage(req *shared.Request) (interface{}, error) { return self.xeth.AtStateNum(args.BlockNumber).State().SafeGet(args.Address).Storage(), nil } -func (self *EthApi) GetStorageAt(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetStorageAt(req *shared.Request) (interface{}, error) { args := new(GetStorageAtArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -152,7 +152,7 @@ func (self *EthApi) GetStorageAt(req *shared.Request) (interface{}, error) { return self.xeth.AtStateNum(args.BlockNumber).StorageAt(args.Address, args.Key), nil } -func (self *EthApi) GetTransactionCount(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetTransactionCount(req *shared.Request) (interface{}, error) { args := new(GetTxCountArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -162,7 +162,7 @@ func (self *EthApi) GetTransactionCount(req *shared.Request) (interface{}, error return newHexNum(big.NewInt(int64(count)).Bytes()), nil } -func (self *EthApi) GetBlockTransactionCountByHash(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetBlockTransactionCountByHash(req *shared.Request) (interface{}, error) { args := new(HashArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -176,7 +176,7 @@ func (self *EthApi) GetBlockTransactionCountByHash(req *shared.Request) (interfa } } -func (self *EthApi) GetBlockTransactionCountByNumber(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetBlockTransactionCountByNumber(req *shared.Request) (interface{}, error) { args := new(BlockNumArg) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -190,7 +190,7 @@ func (self *EthApi) GetBlockTransactionCountByNumber(req *shared.Request) (inter } } -func (self *EthApi) GetUncleCountByBlockHash(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetUncleCountByBlockHash(req *shared.Request) (interface{}, error) { args := new(HashArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -204,7 +204,7 @@ func (self *EthApi) GetUncleCountByBlockHash(req *shared.Request) (interface{}, return newHexNum(big.NewInt(int64(len(br.Uncles))).Bytes()), nil } -func (self *EthApi) GetUncleCountByBlockNumber(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetUncleCountByBlockNumber(req *shared.Request) (interface{}, error) { args := new(BlockNumArg) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -218,7 +218,7 @@ func (self *EthApi) GetUncleCountByBlockNumber(req *shared.Request) (interface{} return newHexNum(big.NewInt(int64(len(br.Uncles))).Bytes()), nil } -func (self *EthApi) GetData(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetData(req *shared.Request) (interface{}, error) { args := new(GetDataArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -227,8 +227,8 @@ func (self *EthApi) GetData(req *shared.Request) (interface{}, error) { return newHexData(v), nil } -func (self *EthApi) Sign(req *shared.Request) (interface{}, error) { - args := new(NewSignArgs) +func (self *ethApi) Sign(req *shared.Request) (interface{}, error) { + args := new(NewSigArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) } @@ -239,7 +239,7 @@ func (self *EthApi) Sign(req *shared.Request) (interface{}, error) { return v, nil } -func (self *EthApi) SendTransaction(req *shared.Request) (interface{}, error) { +func (self *ethApi) SendTransaction(req *shared.Request) (interface{}, error) { args := new(NewTxArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -258,7 +258,7 @@ func (self *EthApi) SendTransaction(req *shared.Request) (interface{}, error) { return v, nil } -func (self *EthApi) EstimateGas(req *shared.Request) (interface{}, error) { +func (self *ethApi) EstimateGas(req *shared.Request) (interface{}, error) { _, gas, err := self.doCall(req.Params) if err != nil { return nil, err @@ -272,7 +272,7 @@ func (self *EthApi) EstimateGas(req *shared.Request) (interface{}, error) { } } -func (self *EthApi) Call(req *shared.Request) (interface{}, error) { +func (self *ethApi) Call(req *shared.Request) (interface{}, error) { v, _, err := self.doCall(req.Params) if err != nil { return nil, err @@ -286,11 +286,11 @@ func (self *EthApi) Call(req *shared.Request) (interface{}, error) { } } -func (self *EthApi) Flush(req *shared.Request) (interface{}, error) { +func (self *ethApi) Flush(req *shared.Request) (interface{}, error) { return nil, shared.NewNotImplementedError(req.Method) } -func (self *EthApi) doCall(params json.RawMessage) (string, string, error) { +func (self *ethApi) doCall(params json.RawMessage) (string, string, error) { args := new(CallArgs) if err := self.codec.Decode(params, &args); err != nil { return "", "", err @@ -299,7 +299,7 @@ func (self *EthApi) doCall(params json.RawMessage) (string, string, error) { return self.xeth.AtStateNum(args.BlockNumber).Call(args.From, args.To, args.Value.String(), args.Gas.String(), args.GasPrice.String(), args.Data) } -func (self *EthApi) GetBlockByHash(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetBlockByHash(req *shared.Request) (interface{}, error) { args := new(GetBlockByHashArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -309,7 +309,7 @@ func (self *EthApi) GetBlockByHash(req *shared.Request) (interface{}, error) { return NewBlockRes(block, args.IncludeTxs), nil } -func (self *EthApi) GetBlockByNumber(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetBlockByNumber(req *shared.Request) (interface{}, error) { args := new(GetBlockByNumberArgs) if err := json.Unmarshal(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -328,7 +328,7 @@ func (self *EthApi) GetBlockByNumber(req *shared.Request) (interface{}, error) { return br, nil } -func (self *EthApi) GetTransactionByHash(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetTransactionByHash(req *shared.Request) (interface{}, error) { args := new(HashArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -348,7 +348,7 @@ func (self *EthApi) GetTransactionByHash(req *shared.Request) (interface{}, erro return nil, nil } -func (self *EthApi) GetTransactionByBlockHashAndIndex(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetTransactionByBlockHashAndIndex(req *shared.Request) (interface{}, error) { args := new(HashIndexArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -367,7 +367,7 @@ func (self *EthApi) GetTransactionByBlockHashAndIndex(req *shared.Request) (inte } } -func (self *EthApi) GetTransactionByBlockNumberAndIndex(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetTransactionByBlockNumberAndIndex(req *shared.Request) (interface{}, error) { args := new(BlockNumIndexArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -386,7 +386,7 @@ func (self *EthApi) GetTransactionByBlockNumberAndIndex(req *shared.Request) (in return v.Transactions[args.Index], nil } -func (self *EthApi) GetUncleByBlockHashAndIndex(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetUncleByBlockHashAndIndex(req *shared.Request) (interface{}, error) { args := new(HashIndexArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -405,7 +405,7 @@ func (self *EthApi) GetUncleByBlockHashAndIndex(req *shared.Request) (interface{ return br.Uncles[args.Index], nil } -func (self *EthApi) GetUncleByBlockNumberAndIndex(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetUncleByBlockNumberAndIndex(req *shared.Request) (interface{}, error) { args := new(BlockNumIndexArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -425,7 +425,7 @@ func (self *EthApi) GetUncleByBlockNumberAndIndex(req *shared.Request) (interfac } } -func (self *EthApi) GetCompilers(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetCompilers(req *shared.Request) (interface{}, error) { var lang string if solc, _ := self.xeth.Solc(); solc != nil { lang = "Solidity" @@ -434,7 +434,7 @@ func (self *EthApi) GetCompilers(req *shared.Request) (interface{}, error) { return c, nil } -func (self *EthApi) CompileSolidity(req *shared.Request) (interface{}, error) { +func (self *ethApi) CompileSolidity(req *shared.Request) (interface{}, error) { solc, _ := self.xeth.Solc() if solc == nil { return nil, shared.NewNotAvailableError(req.Method, "solc (solidity compiler) not found") @@ -452,7 +452,7 @@ func (self *EthApi) CompileSolidity(req *shared.Request) (interface{}, error) { return contracts, nil } -func (self *EthApi) NewFilter(req *shared.Request) (interface{}, error) { +func (self *ethApi) NewFilter(req *shared.Request) (interface{}, error) { args := new(BlockFilterArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -462,15 +462,15 @@ func (self *EthApi) NewFilter(req *shared.Request) (interface{}, error) { return newHexNum(big.NewInt(int64(id)).Bytes()), nil } -func (self *EthApi) NewBlockFilter(req *shared.Request) (interface{}, error) { +func (self *ethApi) NewBlockFilter(req *shared.Request) (interface{}, error) { return newHexNum(self.xeth.NewBlockFilter()), nil } -func (self *EthApi) NewPendingTransactionFilter(req *shared.Request) (interface{}, error) { +func (self *ethApi) NewPendingTransactionFilter(req *shared.Request) (interface{}, error) { return newHexNum(self.xeth.NewTransactionFilter()), nil } -func (self *EthApi) UninstallFilter(req *shared.Request) (interface{}, error) { +func (self *ethApi) UninstallFilter(req *shared.Request) (interface{}, error) { args := new(FilterIdArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -478,7 +478,7 @@ func (self *EthApi) UninstallFilter(req *shared.Request) (interface{}, error) { return self.xeth.UninstallFilter(args.Id), nil } -func (self *EthApi) GetFilterChanges(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetFilterChanges(req *shared.Request) (interface{}, error) { args := new(FilterIdArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -496,7 +496,7 @@ func (self *EthApi) GetFilterChanges(req *shared.Request) (interface{}, error) { } } -func (self *EthApi) GetFilterLogs(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetFilterLogs(req *shared.Request) (interface{}, error) { args := new(FilterIdArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -505,7 +505,7 @@ func (self *EthApi) GetFilterLogs(req *shared.Request) (interface{}, error) { return NewLogsRes(self.xeth.Logs(args.Id)), nil } -func (self *EthApi) GetLogs(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetLogs(req *shared.Request) (interface{}, error) { args := new(BlockFilterArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -513,12 +513,12 @@ func (self *EthApi) GetLogs(req *shared.Request) (interface{}, error) { return NewLogsRes(self.xeth.AllLogs(args.Earliest, args.Latest, args.Skip, args.Max, args.Address, args.Topics)), nil } -func (self *EthApi) GetWork(req *shared.Request) (interface{}, error) { +func (self *ethApi) GetWork(req *shared.Request) (interface{}, error) { self.xeth.SetMining(true, 0) return self.xeth.RemoteMining().GetWork(), nil } -func (self *EthApi) SubmitWork(req *shared.Request) (interface{}, error) { +func (self *ethApi) SubmitWork(req *shared.Request) (interface{}, error) { args := new(SubmitWorkArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) diff --git a/rpc/api/eth_args.go b/rpc/api/eth_args.go index 1ef6f9efb..ad9a35fa2 100644 --- a/rpc/api/eth_args.go +++ b/rpc/api/eth_args.go @@ -226,19 +226,14 @@ func (args *GetDataArgs) UnmarshalJSON(b []byte) (err error) { return nil } -type NewSignArgs struct { +type NewSigArgs struct { From string Data string } -func (args *NewSignArgs) UnmarshalJSON(b []byte) (err error) { - var obj []json.RawMessage - var ext struct { - From string - Data string - } +func (args *NewSigArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} - // Decode byte slice to array of RawMessages if err := json.Unmarshal(b, &obj); err != nil { return shared.NewDecodeParamError(err.Error()) } @@ -248,21 +243,26 @@ func (args *NewSignArgs) UnmarshalJSON(b []byte) (err error) { return shared.NewInsufficientParamsError(len(obj), 1) } - // Decode 0th RawMessage to temporary struct - if err := json.Unmarshal(obj[0], &ext); err != nil { - return shared.NewDecodeParamError(err.Error()) + from, ok := obj[0].(string) + if !ok { + return shared.NewInvalidTypeError("from", "not a string") } + args.From = from - if len(ext.From) == 0 { + if len(args.From) == 0 { return shared.NewValidationError("from", "is required") } - if len(ext.Data) == 0 { + data, ok := obj[1].(string) + if !ok { + return shared.NewInvalidTypeError("data", "not a string") + } + args.Data = data + + if len(args.Data) == 0 { return shared.NewValidationError("data", "is required") } - args.From = ext.From - args.Data = ext.Data return nil } diff --git a/rpc/api/mergedapi.go b/rpc/api/mergedapi.go index 88c301aae..dea8d1289 100644 --- a/rpc/api/mergedapi.go +++ b/rpc/api/mergedapi.go @@ -3,14 +3,14 @@ package api import "github.com/ethereum/go-ethereum/rpc/shared" // combines multiple API's -type mergedApi struct { +type MergedApi struct { apis []string methods map[string]EthereumApi } // create new merged api instance -func newMergedApi(apis ...EthereumApi) *mergedApi { - mergedApi := new(mergedApi) +func newMergedApi(apis ...EthereumApi) *MergedApi { + mergedApi := new(MergedApi) mergedApi.apis = make([]string, len(apis)) mergedApi.methods = make(map[string]EthereumApi) @@ -24,7 +24,7 @@ func newMergedApi(apis ...EthereumApi) *mergedApi { } // Supported RPC methods -func (self *mergedApi) Methods() []string { +func (self *MergedApi) Methods() []string { all := make([]string, len(self.methods)) for method, _ := range self.methods { all = append(all, method) @@ -33,7 +33,7 @@ func (self *mergedApi) Methods() []string { } // Call the correct API's Execute method for the given request -func (self *mergedApi) Execute(req *shared.Request) (interface{}, error) { +func (self *MergedApi) Execute(req *shared.Request) (interface{}, error) { if res, _ := self.handle(req); res != nil { return res, nil } @@ -43,11 +43,11 @@ func (self *mergedApi) Execute(req *shared.Request) (interface{}, error) { return nil, shared.NewNotImplementedError(req.Method) } -func (self *mergedApi) Name() string { +func (self *MergedApi) Name() string { return MergedApiName } -func (self *mergedApi) handle(req *shared.Request) (interface{}, error) { +func (self *MergedApi) handle(req *shared.Request) (interface{}, error) { if req.Method == "support_apis" { // provided API's return self.apis, nil } diff --git a/rpc/api/miner.go b/rpc/api/miner.go index b22c4b7ad..496269304 100644 --- a/rpc/api/miner.go +++ b/rpc/api/miner.go @@ -15,30 +15,30 @@ const ( var ( // mapping between methods and handlers MinerMapping = map[string]minerhandler{ - "miner_hashrate": (*miner).Hashrate, - "miner_makeDAG": (*miner).MakeDAG, - "miner_setExtra": (*miner).SetExtra, - "miner_setGasPrice": (*miner).SetGasPrice, - "miner_startAutoDAG": (*miner).StartAutoDAG, - "miner_start": (*miner).StartMiner, - "miner_stopAutoDAG": (*miner).StopAutoDAG, - "miner_stop": (*miner).StopMiner, + "miner_hashrate": (*minerApi).Hashrate, + "miner_makeDAG": (*minerApi).MakeDAG, + "miner_setExtra": (*minerApi).SetExtra, + "miner_setGasPrice": (*minerApi).SetGasPrice, + "miner_startAutoDAG": (*minerApi).StartAutoDAG, + "miner_start": (*minerApi).StartMiner, + "miner_stopAutoDAG": (*minerApi).StopAutoDAG, + "miner_stop": (*minerApi).StopMiner, } ) // miner callback handler -type minerhandler func(*miner, *shared.Request) (interface{}, error) +type minerhandler func(*minerApi, *shared.Request) (interface{}, error) // miner api provider -type miner struct { +type minerApi struct { ethereum *eth.Ethereum methods map[string]minerhandler codec codec.ApiCoder } // create a new miner api instance -func NewMinerApi(ethereum *eth.Ethereum, coder codec.Codec) *miner { - return &miner{ +func NewMinerApi(ethereum *eth.Ethereum, coder codec.Codec) *minerApi { + return &minerApi{ ethereum: ethereum, methods: MinerMapping, codec: coder.New(nil), @@ -46,7 +46,7 @@ func NewMinerApi(ethereum *eth.Ethereum, coder codec.Codec) *miner { } // Execute given request -func (self *miner) Execute(req *shared.Request) (interface{}, error) { +func (self *minerApi) Execute(req *shared.Request) (interface{}, error) { if callback, ok := self.methods[req.Method]; ok { return callback(self, req) } @@ -55,7 +55,7 @@ func (self *miner) Execute(req *shared.Request) (interface{}, error) { } // collection with supported methods -func (self *miner) Methods() []string { +func (self *minerApi) Methods() []string { methods := make([]string, len(self.methods)) i := 0 for k := range self.methods { @@ -65,11 +65,11 @@ func (self *miner) Methods() []string { return methods } -func (self *miner) Name() string { +func (self *minerApi) Name() string { return MinerApiName } -func (self *miner) StartMiner(req *shared.Request) (interface{}, error) { +func (self *minerApi) StartMiner(req *shared.Request) (interface{}, error) { args := new(StartMinerArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, err @@ -87,16 +87,16 @@ func (self *miner) StartMiner(req *shared.Request) (interface{}, error) { return false, err } -func (self *miner) StopMiner(req *shared.Request) (interface{}, error) { +func (self *minerApi) StopMiner(req *shared.Request) (interface{}, error) { self.ethereum.StopMining() return true, nil } -func (self *miner) Hashrate(req *shared.Request) (interface{}, error) { +func (self *minerApi) Hashrate(req *shared.Request) (interface{}, error) { return self.ethereum.Miner().HashRate(), nil } -func (self *miner) SetExtra(req *shared.Request) (interface{}, error) { +func (self *minerApi) SetExtra(req *shared.Request) (interface{}, error) { args := new(SetExtraArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, err @@ -105,7 +105,7 @@ func (self *miner) SetExtra(req *shared.Request) (interface{}, error) { return true, nil } -func (self *miner) SetGasPrice(req *shared.Request) (interface{}, error) { +func (self *minerApi) SetGasPrice(req *shared.Request) (interface{}, error) { args := new(GasPriceArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return false, err @@ -115,17 +115,17 @@ func (self *miner) SetGasPrice(req *shared.Request) (interface{}, error) { return true, nil } -func (self *miner) StartAutoDAG(req *shared.Request) (interface{}, error) { +func (self *minerApi) StartAutoDAG(req *shared.Request) (interface{}, error) { self.ethereum.StartAutoDAG() return true, nil } -func (self *miner) StopAutoDAG(req *shared.Request) (interface{}, error) { +func (self *minerApi) StopAutoDAG(req *shared.Request) (interface{}, error) { self.ethereum.StopAutoDAG() return true, nil } -func (self *miner) MakeDAG(req *shared.Request) (interface{}, error) { +func (self *minerApi) MakeDAG(req *shared.Request) (interface{}, error) { args := new(MakeDAGArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, err diff --git a/rpc/api/net.go b/rpc/api/net.go index 6799d68f6..efc400785 100644 --- a/rpc/api/net.go +++ b/rpc/api/net.go @@ -10,18 +10,18 @@ import ( var ( // mapping between methods and handlers netMapping = map[string]nethandler{ - "net_id": (*net).NetworkVersion, - "net_peerCount": (*net).PeerCount, - "net_listening": (*net).IsListening, - "net_peers": (*net).Peers, + "net_id": (*netApi).NetworkVersion, + "net_peerCount": (*netApi).PeerCount, + "net_listening": (*netApi).IsListening, + "net_peers": (*netApi).Peers, } ) // net callback handler -type nethandler func(*net, *shared.Request) (interface{}, error) +type nethandler func(*netApi, *shared.Request) (interface{}, error) // net api provider -type net struct { +type netApi struct { xeth *xeth.XEth ethereum *eth.Ethereum methods map[string]nethandler @@ -29,8 +29,8 @@ type net struct { } // create a new net api instance -func NewNetApi(xeth *xeth.XEth, eth *eth.Ethereum, coder codec.Codec) *net { - return &net{ +func NewNetApi(xeth *xeth.XEth, eth *eth.Ethereum, coder codec.Codec) *netApi { + return &netApi{ xeth: xeth, ethereum: eth, methods: netMapping, @@ -39,7 +39,7 @@ func NewNetApi(xeth *xeth.XEth, eth *eth.Ethereum, coder codec.Codec) *net { } // collection with supported methods -func (self *net) Methods() []string { +func (self *netApi) Methods() []string { methods := make([]string, len(self.methods)) i := 0 for k := range self.methods { @@ -50,7 +50,7 @@ func (self *net) Methods() []string { } // Execute given request -func (self *net) Execute(req *shared.Request) (interface{}, error) { +func (self *netApi) Execute(req *shared.Request) (interface{}, error) { if callback, ok := self.methods[req.Method]; ok { return callback(self, req) } @@ -58,24 +58,24 @@ func (self *net) Execute(req *shared.Request) (interface{}, error) { return nil, shared.NewNotImplementedError(req.Method) } -func (self *net) Name() string { +func (self *netApi) Name() string { return NetApiName } // Network version -func (self *net) NetworkVersion(req *shared.Request) (interface{}, error) { +func (self *netApi) NetworkVersion(req *shared.Request) (interface{}, error) { return self.xeth.NetworkVersion(), nil } // Number of connected peers -func (self *net) PeerCount(req *shared.Request) (interface{}, error) { +func (self *netApi) PeerCount(req *shared.Request) (interface{}, error) { return self.xeth.PeerCount(), nil } -func (self *net) IsListening(req *shared.Request) (interface{}, error) { +func (self *netApi) IsListening(req *shared.Request) (interface{}, error) { return self.xeth.IsListening(), nil } -func (self *net) Peers(req *shared.Request) (interface{}, error) { +func (self *netApi) Peers(req *shared.Request) (interface{}, error) { return self.ethereum.PeersInfo(), nil } diff --git a/rpc/api/net_js.go b/rpc/api/net_js.go index 6ba0624d8..2fae69c58 100644 --- a/rpc/api/net_js.go +++ b/rpc/api/net_js.go @@ -5,6 +5,13 @@ web3.extend({ property: 'network', methods: [ + new web3.extend.Method({ + name: 'addPeer', + call: 'net_addPeer', + params: 1, + inputFormatter: [web3.extend.utils.formatInputString], + outputFormatter: web3.extend.formatters.formatOutputBool + }), new web3.extend.Method({ name: 'id', call: 'net_id', diff --git a/rpc/api/personal.go b/rpc/api/personal.go index d00363627..08dc4bff5 100644 --- a/rpc/api/personal.go +++ b/rpc/api/personal.go @@ -13,18 +13,18 @@ import ( var ( // mapping between methods and handlers personalMapping = map[string]personalhandler{ - "personal_listAccounts": (*personal).ListAccounts, - "personal_newAccount": (*personal).NewAccount, - "personal_deleteAccount": (*personal).DeleteAccount, - "personal_unlockAccount": (*personal).UnlockAccount, + "personal_listAccounts": (*personalApi).ListAccounts, + "personal_newAccount": (*personalApi).NewAccount, + "personal_deleteAccount": (*personalApi).DeleteAccount, + "personal_unlockAccount": (*personalApi).UnlockAccount, } ) // net callback handler -type personalhandler func(*personal, *shared.Request) (interface{}, error) +type personalhandler func(*personalApi, *shared.Request) (interface{}, error) // net api provider -type personal struct { +type personalApi struct { xeth *xeth.XEth ethereum *eth.Ethereum methods map[string]personalhandler @@ -32,8 +32,8 @@ type personal struct { } // create a new net api instance -func NewPersonal(xeth *xeth.XEth, eth *eth.Ethereum, coder codec.Codec) *personal { - return &personal{ +func NewPersonalApi(xeth *xeth.XEth, eth *eth.Ethereum, coder codec.Codec) *personalApi { + return &personalApi{ xeth: xeth, ethereum: eth, methods: personalMapping, @@ -42,7 +42,7 @@ func NewPersonal(xeth *xeth.XEth, eth *eth.Ethereum, coder codec.Codec) *persona } // collection with supported methods -func (self *personal) Methods() []string { +func (self *personalApi) Methods() []string { methods := make([]string, len(self.methods)) i := 0 for k := range self.methods { @@ -53,7 +53,7 @@ func (self *personal) Methods() []string { } // Execute given request -func (self *personal) Execute(req *shared.Request) (interface{}, error) { +func (self *personalApi) Execute(req *shared.Request) (interface{}, error) { if callback, ok := self.methods[req.Method]; ok { return callback(self, req) } @@ -61,15 +61,15 @@ func (self *personal) Execute(req *shared.Request) (interface{}, error) { return nil, shared.NewNotImplementedError(req.Method) } -func (self *personal) Name() string { +func (self *personalApi) Name() string { return PersonalApiName } -func (self *personal) ListAccounts(req *shared.Request) (interface{}, error) { +func (self *personalApi) ListAccounts(req *shared.Request) (interface{}, error) { return self.xeth.Accounts(), nil } -func (self *personal) NewAccount(req *shared.Request) (interface{}, error) { +func (self *personalApi) NewAccount(req *shared.Request) (interface{}, error) { args := new(NewAccountArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -80,7 +80,7 @@ func (self *personal) NewAccount(req *shared.Request) (interface{}, error) { return acc.Address.Hex(), err } -func (self *personal) DeleteAccount(req *shared.Request) (interface{}, error) { +func (self *personalApi) DeleteAccount(req *shared.Request) (interface{}, error) { args := new(DeleteAccountArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) @@ -95,7 +95,7 @@ func (self *personal) DeleteAccount(req *shared.Request) (interface{}, error) { } } -func (self *personal) UnlockAccount(req *shared.Request) (interface{}, error) { +func (self *personalApi) UnlockAccount(req *shared.Request) (interface{}, error) { args := new(UnlockAccountArgs) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, shared.NewDecodeParamError(err.Error()) diff --git a/rpc/api/utils.go b/rpc/api/utils.go index eae23d351..072abf883 100644 --- a/rpc/api/utils.go +++ b/rpc/api/utils.go @@ -21,6 +21,8 @@ func ParseApiString(apistr string, codec codec.Codec, xeth *xeth.XEth, eth *eth. for i, name := range names { switch strings.ToLower(strings.TrimSpace(name)) { + case AdminApiName: + apis[i] = NewAdminApi(xeth, eth, codec) case DebugApiName: apis[i] = NewDebugApi(xeth, eth, codec) case EthApiName: @@ -30,9 +32,9 @@ func ParseApiString(apistr string, codec codec.Codec, xeth *xeth.XEth, eth *eth. case NetApiName: apis[i] = NewNetApi(xeth, eth, codec) case PersonalApiName: - apis[i] = NewPersonal(xeth, eth, codec) + apis[i] = NewPersonalApi(xeth, eth, codec) case Web3ApiName: - apis[i] = NewWeb3(xeth, codec) + apis[i] = NewWeb3Api(xeth, codec) default: return nil, fmt.Errorf("Unknown API '%s'", name) } @@ -43,6 +45,8 @@ func ParseApiString(apistr string, codec codec.Codec, xeth *xeth.XEth, eth *eth. func Javascript(name string) string { switch strings.ToLower(strings.TrimSpace(name)) { + case AdminApiName: + return Admin_JS case DebugApiName: return Debug_JS case MinerApiName: diff --git a/rpc/api/web3.go b/rpc/api/web3.go index c46457ce6..42b0b7fd9 100644 --- a/rpc/api/web3.go +++ b/rpc/api/web3.go @@ -15,24 +15,24 @@ const ( var ( // mapping between methods and handlers Web3Mapping = map[string]web3handler{ - "web3_sha3": (*web3).Sha3, - "web3_clientVersion": (*web3).ClientVersion, + "web3_sha3": (*web3Api).Sha3, + "web3_clientVersion": (*web3Api).ClientVersion, } ) // web3 callback handler -type web3handler func(*web3, *shared.Request) (interface{}, error) +type web3handler func(*web3Api, *shared.Request) (interface{}, error) // web3 api provider -type web3 struct { +type web3Api struct { xeth *xeth.XEth methods map[string]web3handler codec codec.ApiCoder } // create a new web3 api instance -func NewWeb3(xeth *xeth.XEth, coder codec.Codec) *web3 { - return &web3{ +func NewWeb3Api(xeth *xeth.XEth, coder codec.Codec) *web3Api { + return &web3Api{ xeth: xeth, methods: Web3Mapping, codec: coder.New(nil), @@ -40,7 +40,7 @@ func NewWeb3(xeth *xeth.XEth, coder codec.Codec) *web3 { } // collection with supported methods -func (self *web3) Methods() []string { +func (self *web3Api) Methods() []string { methods := make([]string, len(self.methods)) i := 0 for k := range self.methods { @@ -51,7 +51,7 @@ func (self *web3) Methods() []string { } // Execute given request -func (self *web3) Execute(req *shared.Request) (interface{}, error) { +func (self *web3Api) Execute(req *shared.Request) (interface{}, error) { if callback, ok := self.methods[req.Method]; ok { return callback(self, req) } @@ -59,17 +59,17 @@ func (self *web3) Execute(req *shared.Request) (interface{}, error) { return nil, &shared.NotImplementedError{req.Method} } -func (self *web3) Name() string { +func (self *web3Api) Name() string { return Web3ApiName } // Version of the API this instance provides -func (self *web3) Version() string { +func (self *web3Api) Version() string { return Web3Version } // Calculates the sha3 over req.Params.Data -func (self *web3) Sha3(req *shared.Request) (interface{}, error) { +func (self *web3Api) Sha3(req *shared.Request) (interface{}, error) { args := new(Sha3Args) if err := self.codec.Decode(req.Params, &args); err != nil { return nil, err @@ -79,6 +79,6 @@ func (self *web3) Sha3(req *shared.Request) (interface{}, error) { } // returns the xeth client vrsion -func (self *web3) ClientVersion(req *shared.Request) (interface{}, error) { +func (self *web3Api) ClientVersion(req *shared.Request) (interface{}, error) { return self.xeth.ClientVersion(), nil } -- cgit v1.2.3 From 5f8e5a487588bab49fb3c9321fd8903132426c47 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Wed, 10 Jun 2015 09:42:14 +0200 Subject: upgrade web3.js with _extend support --- jsre/ethereum_js.go | 2901 +++--------------------------------------------- rpc/api/admin_js.go | 38 +- rpc/api/debug_js.go | 30 +- rpc/api/miner_js.go | 48 +- rpc/api/net_js.go | 26 +- rpc/api/personal_js.go | 16 +- 6 files changed, 230 insertions(+), 2829 deletions(-) diff --git a/jsre/ethereum_js.go b/jsre/ethereum_js.go index 5aa673906..8d530a532 100644 --- a/jsre/ethereum_js.go +++ b/jsre/ethereum_js.go @@ -801,7 +801,7 @@ module.exports = { ETH_SIGNATURE_LENGTH: 4, ETH_UNITS: ETH_UNITS, ETH_BIGNUMBER_ROUNDING_MODE: { ROUNDING_MODE: BigNumber.ROUND_DOWN }, - ETH_POLLING_TIMEOUT: 1000, + ETH_POLLING_TIMEOUT: 1000/2, defaultBlock: 'latest', defaultAccount: undefined }; @@ -1449,8 +1449,7 @@ web3.eth.filter = function (fil, eventParams, options, formatter) { return fil(eventParams, options); } - // what outputLogFormatter? that's wrong - //return new Filter(fil, watches.eth(), formatters.outputLogFormatter); + // output logs works for blockFilter and pendingTransaction filters? return new Filter(fil, watches.eth(), formatter || formatters.outputLogFormatter); }; /*jshint maxparams:3 */ @@ -1507,7 +1506,7 @@ Object.defineProperty(web3.eth, 'defaultAccount', { // EXTEND -web3.extend = function(extension){ +web3._extend = function(extension){ /*jshint maxcomplexity: 6 */ if(extension.property && !web3[extension.property]) @@ -1516,10 +1515,10 @@ web3.extend = function(extension){ setupMethods(web3[extension.property] || web3, extension.methods || []); setupProperties(web3[extension.property] || web3, extension.properties || []); }; -web3.extend.formatters = formatters; -web3.extend.utils = utils; -web3.extend.Method = require('./web3/method'); -web3.extend.Property = require('./web3/property'); +web3._extend.formatters = formatters; +web3._extend.utils = utils; +web3._extend.Method = require('./web3/method'); +web3._extend.Property = require('./web3/property'); /// setups all api methods @@ -2428,21 +2427,36 @@ var getOptions = function (options) { }; }; -var Filter = function (options, methods, formatter) { - var implementation = {}; - methods.forEach(function (method) { - method.attachToObject(implementation); - }); - this.options = getOptions(options); - this.implementation = implementation; - this.callbacks = []; - this.formatter = formatter; - this.filterId = this.implementation.newFilter(this.options); +/** +Adds the callback and sets up the methods, to iterate over the results. + +@method getLogsAtStart +@param {Object} self +@param {funciton} +*/ +var getLogsAtStart = function(self, callback){ + // call getFilterLogs for the first watch callback start + if (!utils.isString(self.options)) { + self.get(function (err, messages) { + // don't send all the responses to all the watches again... just to self one + if (err) { + callback(err); + } + + messages.forEach(function (message) { + callback(null, message); + }); + }); + } }; -Filter.prototype.watch = function (callback) { - this.callbacks.push(callback); - var self = this; +/** +Adds the callback and sets up the methods, to iterate over the results. + +@method pollFilter +@param {Object} self +*/ +var pollFilter = function(self) { var onMessage = function (error, messages) { if (error) { @@ -2459,29 +2473,55 @@ Filter.prototype.watch = function (callback) { }); }; - // call getFilterLogs on start - if (!utils.isString(this.options)) { - this.get(function (err, messages) { - // don't send all the responses to all the watches again... just to this one - if (err) { - callback(err); - } + RequestManager.getInstance().startPolling({ + method: self.implementation.poll.call, + params: [self.filterId], + }, self.filterId, onMessage, self.stopWatching.bind(self)); - messages.forEach(function (message) { - callback(null, message); +}; + +var Filter = function (options, methods, formatter) { + var self = this; + var implementation = {}; + methods.forEach(function (method) { + method.attachToObject(implementation); + }); + this.options = getOptions(options); + this.implementation = implementation; + this.callbacks = []; + this.pollFilters = []; + this.formatter = formatter; + this.implementation.newFilter(this.options, function(error, id){ + if(error) { + self.callbacks.forEach(function(callback){ + callback(error); }); - }); + } else { + self.filterId = id; + // get filter logs at start + self.callbacks.forEach(function(callback){ + getLogsAtStart(self, callback); + }); + pollFilter(self); + } + }); +}; + +Filter.prototype.watch = function (callback) { + this.callbacks.push(callback); + + if(this.filterId) { + getLogsAtStart(this, callback); + pollFilter(this); } - RequestManager.getInstance().startPolling({ - method: this.implementation.poll.call, - params: [this.filterId], - }, this.filterId, onMessage, this.stopWatching.bind(this)); + return this; }; Filter.prototype.stopWatching = function () { RequestManager.getInstance().stopPolling(this.filterId); - this.implementation.uninstallFilter(this.filterId); + // remove filter async + this.implementation.uninstallFilter(this.filterId, function(){}); this.callbacks = []; }; @@ -2503,6 +2543,8 @@ Filter.prototype.get = function (callback) { return self.formatter ? self.formatter(log) : log; }); } + + return this; }; module.exports = Filter; @@ -2600,8 +2642,10 @@ var inputTransactionFormatter = function (options){ * @returns {Object} transaction */ var outputTransactionFormatter = function (tx){ - tx.blockNumber = utils.toDecimal(tx.blockNumber); - tx.transactionIndex = utils.toDecimal(tx.transactionIndex); + if(tx.blockNumber !== null) + tx.blockNumber = utils.toDecimal(tx.blockNumber); + if(tx.transactionIndex !== null) + tx.transactionIndex = utils.toDecimal(tx.transactionIndex); tx.nonce = utils.toDecimal(tx.nonce); tx.gas = utils.toDecimal(tx.gas); tx.gasPrice = utils.toBigNumber(tx.gasPrice); @@ -2623,7 +2667,8 @@ var outputBlockFormatter = function(block) { block.gasUsed = utils.toDecimal(block.gasUsed); block.size = utils.toDecimal(block.size); block.timestamp = utils.toDecimal(block.timestamp); - block.number = utils.toDecimal(block.number); + if(block.number !== null) + block.number = utils.toDecimal(block.number); block.difficulty = utils.toBigNumber(block.difficulty); block.totalDifficulty = utils.toBigNumber(block.totalDifficulty); @@ -2650,9 +2695,12 @@ var outputLogFormatter = function(log) { return null; } - log.blockNumber = utils.toDecimal(log.blockNumber); - log.transactionIndex = utils.toDecimal(log.transactionIndex); - log.logIndex = utils.toDecimal(log.logIndex); + if(log.blockNumber !== null) + log.blockNumber = utils.toDecimal(log.blockNumber); + if(log.transactionIndex !== null) + log.transactionIndex = utils.toDecimal(log.transactionIndex); + if(log.logIndex !== null) + log.logIndex = utils.toDecimal(log.logIndex); return log; }; @@ -2754,6 +2802,7 @@ module.exports = { var web3 = require('../web3'); var coder = require('../solidity/coder'); var utils = require('../utils/utils'); +var formatters = require('./formatters'); var sha3 = require('../utils/sha3'); /** @@ -2777,6 +2826,12 @@ SolidityFunction.prototype.extractCallback = function (args) { } }; +SolidityFunction.prototype.extractDefaultBlock = function (args) { + if (args.length > this._inputTypes.length && !utils.isObject(args[args.length -1])) { + return formatters.inputDefaultBlockNumberFormatter(args.pop()); // modify the args array! + } +}; + /** * Should be used to create payload from arguments * @@ -2828,15 +2883,17 @@ SolidityFunction.prototype.unpackOutput = function (output) { SolidityFunction.prototype.call = function () { var args = Array.prototype.slice.call(arguments).filter(function (a) {return a !== undefined; }); var callback = this.extractCallback(args); + var defaultBlock = this.extractDefaultBlock(args); var payload = this.toPayload(args); + if (!callback) { - var output = web3.eth.call(payload); + var output = web3.eth.call(payload, defaultBlock); return this.unpackOutput(output); } var self = this; - web3.eth.call(payload, function (error, output) { + web3.eth.call(payload, defaultBlock, function (error, output) { callback(error, self.unpackOutput(output)); }); }; @@ -2955,7 +3012,7 @@ SolidityFunction.prototype.attachToContract = function (contract) { module.exports = SolidityFunction; -},{"../solidity/coder":1,"../utils/sha3":6,"../utils/utils":7,"../web3":9}],19:[function(require,module,exports){ +},{"../solidity/coder":1,"../utils/sha3":6,"../utils/utils":7,"../web3":9,"./formatters":17}],19:[function(require,module,exports){ /* This file is part of ethereum.js. @@ -2993,6 +3050,7 @@ HttpProvider.prototype.send = function (payload) { var request = new XMLHttpRequest(); request.open('POST', this.host, false); + request.setRequestHeader('Content-type','application/json'); try { request.send(JSON.stringify(payload)); @@ -3036,6 +3094,7 @@ HttpProvider.prototype.sendAsync = function (payload, callback) { }; request.open('POST', this.host, true); + request.setRequestHeader('Content-type','application/json'); try { request.send(JSON.stringify(payload)); @@ -3721,9 +3780,9 @@ var RequestManager = function (provider) { arguments.callee._singletonInstance = this; this.provider = provider; - this.polls = []; + this.polls = {}; this.timeout = null; - this.poll(); + this.isPolling = false; }; /** @@ -3818,6 +3877,11 @@ RequestManager.prototype.sendBatch = function (data, callback) { */ RequestManager.prototype.setProvider = function (p) { this.provider = p; + + if(this.provider && !this.isPolling) { + this.poll(); + this.isPolling = true; + } }; /*jshint maxparams:4 */ @@ -3834,7 +3898,7 @@ RequestManager.prototype.setProvider = function (p) { * @todo cleanup number of params */ RequestManager.prototype.startPolling = function (data, pollId, callback, uninstall) { - this.polls.push({data: data, id: pollId, callback: callback, uninstall: uninstall}); + this.polls['poll_'+ pollId] = {data: data, id: pollId, callback: callback, uninstall: uninstall}; }; /*jshint maxparams:3 */ @@ -3845,24 +3909,21 @@ RequestManager.prototype.startPolling = function (data, pollId, callback, uninst * @param {Number} pollId */ RequestManager.prototype.stopPolling = function (pollId) { - for (var i = this.polls.length; i--;) { - var poll = this.polls[i]; - if (poll.id === pollId) { - this.polls.splice(i, 1); - } - } + delete this.polls['poll_'+ pollId]; }; /** - * Should be called to reset polling mechanism of request manager + * Should be called to reset the polling mechanism of the request manager * * @method reset */ RequestManager.prototype.reset = function () { - this.polls.forEach(function (poll) { - poll.uninstall(poll.id); - }); - this.polls = []; + for (var key in this.polls) { + if (this.polls.hasOwnProperty(key)) { + this.polls[key].uninstall(); + } + } + this.polls = {}; if (this.timeout) { clearTimeout(this.timeout); @@ -3877,9 +3938,10 @@ RequestManager.prototype.reset = function () { * @method poll */ RequestManager.prototype.poll = function () { + /*jshint maxcomplexity: 6 */ this.timeout = setTimeout(this.poll.bind(this), c.ETH_POLLING_TIMEOUT); - if (!this.polls.length) { + if (this.polls === {}) { return; } @@ -3888,9 +3950,20 @@ RequestManager.prototype.poll = function () { return; } - var payload = Jsonrpc.getInstance().toBatchPayload(this.polls.map(function (data) { - return data.data; - })); + var pollsData = []; + var pollsKeys = []; + for (var key in this.polls) { + if (this.polls.hasOwnProperty(key)) { + pollsData.push(this.polls[key].data); + pollsKeys.push(key); + } + } + + if (pollsData.length === 0) { + return; + } + + var payload = Jsonrpc.getInstance().toBatchPayload(pollsData); var self = this; this.provider.sendAsync(payload, function (error, results) { @@ -3904,8 +3977,15 @@ RequestManager.prototype.poll = function () { } results.map(function (result, index) { - result.callback = self.polls[index].callback; - return result; + var key = pollsKeys[index]; + // make sure the filter is still installed after arrival of the request + if(self.polls[key]) { + result.callback = self.polls[key].callback; + return result; + } else + return false; + }).filter(function (result) { + return (!result) ? false : true; }).filter(function (result) { var valid = Jsonrpc.getInstance().isValidResponse(result); if (!valid) { @@ -4121,11 +4201,11 @@ var eth = function () { switch(type) { case 'latest': - args.pop(); + args.shift(); this.params = 0; return 'eth_newBlockFilter'; case 'pending': - args.pop(); + args.shift(); this.params = 0; return 'eth_newPendingTransactionFilter'; default: @@ -5583,2691 +5663,12 @@ module.exports = { })); },{"./core":32}],"bignumber.js":[function(require,module,exports){ -/*! bignumber.js v2.0.7 https://github.com/MikeMcl/bignumber.js/LICENCE */ - -;(function (global) { - 'use strict'; - - /* - bignumber.js v2.0.7 - A JavaScript library for arbitrary-precision arithmetic. - https://github.com/MikeMcl/bignumber.js - Copyright (c) 2015 Michael Mclaughlin - MIT Expat Licence - */ - - - var BigNumber, crypto, parseNumeric, - isNumeric = /^-?(\d+(\.\d*)?|\.\d+)(e[+-]?\d+)?$/i, - mathceil = Math.ceil, - mathfloor = Math.floor, - notBool = ' not a boolean or binary digit', - roundingMode = 'rounding mode', - tooManyDigits = 'number type has more than 15 significant digits', - ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ$_', - BASE = 1e14, - LOG_BASE = 14, - MAX_SAFE_INTEGER = 0x1fffffffffffff, // 2^53 - 1 - // MAX_INT32 = 0x7fffffff, // 2^31 - 1 - POWS_TEN = [1, 10, 100, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13], - SQRT_BASE = 1e7, - - /* - * The limit on the value of DECIMAL_PLACES, TO_EXP_NEG, TO_EXP_POS, MIN_EXP, MAX_EXP, and - * the arguments to toExponential, toFixed, toFormat, and toPrecision, beyond which an - * exception is thrown (if ERRORS is true). - */ - MAX = 1E9; // 0 to MAX_INT32 - - - /* - * Create and return a BigNumber constructor. - */ - function another(configObj) { - var div, - - // id tracks the caller function, so its name can be included in error messages. - id = 0, - P = BigNumber.prototype, - ONE = new BigNumber(1), - - - /********************************* EDITABLE DEFAULTS **********************************/ - - - /* - * The default values below must be integers within the inclusive ranges stated. - * The values can also be changed at run-time using BigNumber.config. - */ - - // The maximum number of decimal places for operations involving division. - DECIMAL_PLACES = 20, // 0 to MAX - - /* - * The rounding mode used when rounding to the above decimal places, and when using - * toExponential, toFixed, toFormat and toPrecision, and round (default value). - * UP 0 Away from zero. - * DOWN 1 Towards zero. - * CEIL 2 Towards +Infinity. - * FLOOR 3 Towards -Infinity. - * HALF_UP 4 Towards nearest neighbour. If equidistant, up. - * HALF_DOWN 5 Towards nearest neighbour. If equidistant, down. - * HALF_EVEN 6 Towards nearest neighbour. If equidistant, towards even neighbour. - * HALF_CEIL 7 Towards nearest neighbour. If equidistant, towards +Infinity. - * HALF_FLOOR 8 Towards nearest neighbour. If equidistant, towards -Infinity. - */ - ROUNDING_MODE = 4, // 0 to 8 - - // EXPONENTIAL_AT : [TO_EXP_NEG , TO_EXP_POS] - - // The exponent value at and beneath which toString returns exponential notation. - // Number type: -7 - TO_EXP_NEG = -7, // 0 to -MAX - - // The exponent value at and above which toString returns exponential notation. - // Number type: 21 - TO_EXP_POS = 21, // 0 to MAX - - // RANGE : [MIN_EXP, MAX_EXP] - - // The minimum exponent value, beneath which underflow to zero occurs. - // Number type: -324 (5e-324) - MIN_EXP = -1e7, // -1 to -MAX - - // The maximum exponent value, above which overflow to Infinity occurs. - // Number type: 308 (1.7976931348623157e+308) - // For MAX_EXP > 1e7, e.g. new BigNumber('1e100000000').plus(1) may be slow. - MAX_EXP = 1e7, // 1 to MAX - - // Whether BigNumber Errors are ever thrown. - ERRORS = true, // true or false - - // Change to intValidatorNoErrors if ERRORS is false. - isValidInt = intValidatorWithErrors, // intValidatorWithErrors/intValidatorNoErrors - - // Whether to use cryptographically-secure random number generation, if available. - CRYPTO = false, // true or false - - /* - * The modulo mode used when calculating the modulus: a mod n. - * The quotient (q = a / n) is calculated according to the corresponding rounding mode. - * The remainder (r) is calculated as: r = a - n * q. - * - * UP 0 The remainder is positive if the dividend is negative, else is negative. - * DOWN 1 The remainder has the same sign as the dividend. - * This modulo mode is commonly known as 'truncated division' and is - * equivalent to (a % n) in JavaScript. - * FLOOR 3 The remainder has the same sign as the divisor (Python %). - * HALF_EVEN 6 This modulo mode implements the IEEE 754 remainder function. - * EUCLID 9 Euclidian division. q = sign(n) * floor(a / abs(n)). - * The remainder is always positive. - * - * The truncated division, floored division, Euclidian division and IEEE 754 remainder - * modes are commonly used for the modulus operation. - * Although the other rounding modes can also be used, they may not give useful results. - */ - MODULO_MODE = 1, // 0 to 9 - - // The maximum number of significant digits of the result of the toPower operation. - // If POW_PRECISION is 0, there will be unlimited significant digits. - POW_PRECISION = 100, // 0 to MAX - - // The format specification used by the BigNumber.prototype.toFormat method. - FORMAT = { - decimalSeparator: '.', - groupSeparator: ',', - groupSize: 3, - secondaryGroupSize: 0, - fractionGroupSeparator: '\xA0', // non-breaking space - fractionGroupSize: 0 - }; - - - /******************************************************************************************/ - - - // CONSTRUCTOR - - - /* - * The BigNumber constructor and exported function. - * Create and return a new instance of a BigNumber object. - * - * n {number|string|BigNumber} A numeric value. - * [b] {number} The base of n. Integer, 2 to 64 inclusive. - */ - function BigNumber( n, b ) { - var c, e, i, num, len, str, - x = this; - - // Enable constructor usage without new. - if ( !( x instanceof BigNumber ) ) { - - // 'BigNumber() constructor call without new: {n}' - if (ERRORS) raise( 26, 'constructor call without new', n ); - return new BigNumber( n, b ); - } - - // 'new BigNumber() base not an integer: {b}' - // 'new BigNumber() base out of range: {b}' - if ( b == null || !isValidInt( b, 2, 64, id, 'base' ) ) { - - // Duplicate. - if ( n instanceof BigNumber ) { - x.s = n.s; - x.e = n.e; - x.c = ( n = n.c ) ? n.slice() : n; - id = 0; - return; - } - - if ( ( num = typeof n == 'number' ) && n * 0 == 0 ) { - x.s = 1 / n < 0 ? ( n = -n, -1 ) : 1; - - // Fast path for integers. - if ( n === ~~n ) { - for ( e = 0, i = n; i >= 10; i /= 10, e++ ); - x.e = e; - x.c = [n]; - id = 0; - return; - } - - str = n + ''; - } else { - if ( !isNumeric.test( str = n + '' ) ) return parseNumeric( x, str, num ); - x.s = str.charCodeAt(0) === 45 ? ( str = str.slice(1), -1 ) : 1; - } - } else { - b = b | 0; - str = n + ''; - - // Ensure return value is rounded to DECIMAL_PLACES as with other bases. - // Allow exponential notation to be used with base 10 argument. - if ( b == 10 ) { - x = new BigNumber( n instanceof BigNumber ? n : str ); - return round( x, DECIMAL_PLACES + x.e + 1, ROUNDING_MODE ); - } - - // Avoid potential interpretation of Infinity and NaN as base 44+ values. - // Any number in exponential form will fail due to the [Ee][+-]. - if ( ( num = typeof n == 'number' ) && n * 0 != 0 || - !( new RegExp( '^-?' + ( c = '[' + ALPHABET.slice( 0, b ) + ']+' ) + - '(?:\\.' + c + ')?$',b < 37 ? 'i' : '' ) ).test(str) ) { - return parseNumeric( x, str, num, b ); - } - - if (num) { - x.s = 1 / n < 0 ? ( str = str.slice(1), -1 ) : 1; - - if ( ERRORS && str.replace( /^0\.0*|\./, '' ).length > 15 ) { - - // 'new BigNumber() number type has more than 15 significant digits: {n}' - raise( id, tooManyDigits, n ); - } - - // Prevent later check for length on converted number. - num = false; - } else { - x.s = str.charCodeAt(0) === 45 ? ( str = str.slice(1), -1 ) : 1; - } - - str = convertBase( str, 10, b, x.s ); - } - - // Decimal point? - if ( ( e = str.indexOf('.') ) > -1 ) str = str.replace( '.', '' ); - - // Exponential form? - if ( ( i = str.search( /e/i ) ) > 0 ) { - - // Determine exponent. - if ( e < 0 ) e = i; - e += +str.slice( i + 1 ); - str = str.substring( 0, i ); - } else if ( e < 0 ) { - - // Integer. - e = str.length; - } - - // Determine leading zeros. - for ( i = 0; str.charCodeAt(i) === 48; i++ ); - - // Determine trailing zeros. - for ( len = str.length; str.charCodeAt(--len) === 48; ); - str = str.slice( i, len + 1 ); - - if (str) { - len = str.length; - - // Disallow numbers with over 15 significant digits if number type. - // 'new BigNumber() number type has more than 15 significant digits: {n}' - if ( num && ERRORS && len > 15 ) raise( id, tooManyDigits, x.s * n ); - - e = e - i - 1; - - // Overflow? - if ( e > MAX_EXP ) { - - // Infinity. - x.c = x.e = null; - - // Underflow? - } else if ( e < MIN_EXP ) { - - // Zero. - x.c = [ x.e = 0 ]; - } else { - x.e = e; - x.c = []; - - // Transform base - - // e is the base 10 exponent. - // i is where to slice str to get the first element of the coefficient array. - i = ( e + 1 ) % LOG_BASE; - if ( e < 0 ) i += LOG_BASE; - - if ( i < len ) { - if (i) x.c.push( +str.slice( 0, i ) ); - - for ( len -= LOG_BASE; i < len; ) { - x.c.push( +str.slice( i, i += LOG_BASE ) ); - } - - str = str.slice(i); - i = LOG_BASE - str.length; - } else { - i -= len; - } - - for ( ; i--; str += '0' ); - x.c.push( +str ); - } - } else { - - // Zero. - x.c = [ x.e = 0 ]; - } - - id = 0; - } - - - // CONSTRUCTOR PROPERTIES - - - BigNumber.another = another; - - BigNumber.ROUND_UP = 0; - BigNumber.ROUND_DOWN = 1; - BigNumber.ROUND_CEIL = 2; - BigNumber.ROUND_FLOOR = 3; - BigNumber.ROUND_HALF_UP = 4; - BigNumber.ROUND_HALF_DOWN = 5; - BigNumber.ROUND_HALF_EVEN = 6; - BigNumber.ROUND_HALF_CEIL = 7; - BigNumber.ROUND_HALF_FLOOR = 8; - BigNumber.EUCLID = 9; - - - /* - * Configure infrequently-changing library-wide settings. - * - * Accept an object or an argument list, with one or many of the following properties or - * parameters respectively: - * - * DECIMAL_PLACES {number} Integer, 0 to MAX inclusive - * ROUNDING_MODE {number} Integer, 0 to 8 inclusive - * EXPONENTIAL_AT {number|number[]} Integer, -MAX to MAX inclusive or - * [integer -MAX to 0 incl., 0 to MAX incl.] - * RANGE {number|number[]} Non-zero integer, -MAX to MAX inclusive or - * [integer -MAX to -1 incl., integer 1 to MAX incl.] - * ERRORS {boolean|number} true, false, 1 or 0 - * CRYPTO {boolean|number} true, false, 1 or 0 - * MODULO_MODE {number} 0 to 9 inclusive - * POW_PRECISION {number} 0 to MAX inclusive - * FORMAT {object} See BigNumber.prototype.toFormat - * decimalSeparator {string} - * groupSeparator {string} - * groupSize {number} - * secondaryGroupSize {number} - * fractionGroupSeparator {string} - * fractionGroupSize {number} - * - * (The values assigned to the above FORMAT object properties are not checked for validity.) - * - * E.g. - * BigNumber.config(20, 4) is equivalent to - * BigNumber.config({ DECIMAL_PLACES : 20, ROUNDING_MODE : 4 }) - * - * Ignore properties/parameters set to null or undefined. - * Return an object with the properties current values. - */ - BigNumber.config = function () { - var v, p, - i = 0, - r = {}, - a = arguments, - o = a[0], - has = o && typeof o == 'object' - ? function () { if ( o.hasOwnProperty(p) ) return ( v = o[p] ) != null; } - : function () { if ( a.length > i ) return ( v = a[i++] ) != null; }; - - // DECIMAL_PLACES {number} Integer, 0 to MAX inclusive. - // 'config() DECIMAL_PLACES not an integer: {v}' - // 'config() DECIMAL_PLACES out of range: {v}' - if ( has( p = 'DECIMAL_PLACES' ) && isValidInt( v, 0, MAX, 2, p ) ) { - DECIMAL_PLACES = v | 0; - } - r[p] = DECIMAL_PLACES; - - // ROUNDING_MODE {number} Integer, 0 to 8 inclusive. - // 'config() ROUNDING_MODE not an integer: {v}' - // 'config() ROUNDING_MODE out of range: {v}' - if ( has( p = 'ROUNDING_MODE' ) && isValidInt( v, 0, 8, 2, p ) ) { - ROUNDING_MODE = v | 0; - } - r[p] = ROUNDING_MODE; - - // EXPONENTIAL_AT {number|number[]} - // Integer, -MAX to MAX inclusive or [integer -MAX to 0 inclusive, 0 to MAX inclusive]. - // 'config() EXPONENTIAL_AT not an integer: {v}' - // 'config() EXPONENTIAL_AT out of range: {v}' - if ( has( p = 'EXPONENTIAL_AT' ) ) { - - if ( isArray(v) ) { - if ( isValidInt( v[0], -MAX, 0, 2, p ) && isValidInt( v[1], 0, MAX, 2, p ) ) { - TO_EXP_NEG = v[0] | 0; - TO_EXP_POS = v[1] | 0; - } - } else if ( isValidInt( v, -MAX, MAX, 2, p ) ) { - TO_EXP_NEG = -( TO_EXP_POS = ( v < 0 ? -v : v ) | 0 ); - } - } - r[p] = [ TO_EXP_NEG, TO_EXP_POS ]; - - // RANGE {number|number[]} Non-zero integer, -MAX to MAX inclusive or - // [integer -MAX to -1 inclusive, integer 1 to MAX inclusive]. - // 'config() RANGE not an integer: {v}' - // 'config() RANGE cannot be zero: {v}' - // 'config() RANGE out of range: {v}' - if ( has( p = 'RANGE' ) ) { - - if ( isArray(v) ) { - if ( isValidInt( v[0], -MAX, -1, 2, p ) && isValidInt( v[1], 1, MAX, 2, p ) ) { - MIN_EXP = v[0] | 0; - MAX_EXP = v[1] | 0; - } - } else if ( isValidInt( v, -MAX, MAX, 2, p ) ) { - if ( v | 0 ) MIN_EXP = -( MAX_EXP = ( v < 0 ? -v : v ) | 0 ); - else if (ERRORS) raise( 2, p + ' cannot be zero', v ); - } - } - r[p] = [ MIN_EXP, MAX_EXP ]; - - // ERRORS {boolean|number} true, false, 1 or 0. - // 'config() ERRORS not a boolean or binary digit: {v}' - if ( has( p = 'ERRORS' ) ) { - - if ( v === !!v || v === 1 || v === 0 ) { - id = 0; - isValidInt = ( ERRORS = !!v ) ? intValidatorWithErrors : intValidatorNoErrors; - } else if (ERRORS) { - raise( 2, p + notBool, v ); - } - } - r[p] = ERRORS; - - // CRYPTO {boolean|number} true, false, 1 or 0. - // 'config() CRYPTO not a boolean or binary digit: {v}' - // 'config() crypto unavailable: {crypto}' - if ( has( p = 'CRYPTO' ) ) { - - if ( v === !!v || v === 1 || v === 0 ) { - CRYPTO = !!( v && crypto && typeof crypto == 'object' ); - if ( v && !CRYPTO && ERRORS ) raise( 2, 'crypto unavailable', crypto ); - } else if (ERRORS) { - raise( 2, p + notBool, v ); - } - } - r[p] = CRYPTO; - - // MODULO_MODE {number} Integer, 0 to 9 inclusive. - // 'config() MODULO_MODE not an integer: {v}' - // 'config() MODULO_MODE out of range: {v}' - if ( has( p = 'MODULO_MODE' ) && isValidInt( v, 0, 9, 2, p ) ) { - MODULO_MODE = v | 0; - } - r[p] = MODULO_MODE; - - // POW_PRECISION {number} Integer, 0 to MAX inclusive. - // 'config() POW_PRECISION not an integer: {v}' - // 'config() POW_PRECISION out of range: {v}' - if ( has( p = 'POW_PRECISION' ) && isValidInt( v, 0, MAX, 2, p ) ) { - POW_PRECISION = v | 0; - } - r[p] = POW_PRECISION; - - // FORMAT {object} - // 'config() FORMAT not an object: {v}' - if ( has( p = 'FORMAT' ) ) { - - if ( typeof v == 'object' ) { - FORMAT = v; - } else if (ERRORS) { - raise( 2, p + ' not an object', v ); - } - } - r[p] = FORMAT; - - return r; - }; - - - /* - * Return a new BigNumber whose value is the maximum of the arguments. - * - * arguments {number|string|BigNumber} - */ - BigNumber.max = function () { return maxOrMin( arguments, P.lt ); }; - - - /* - * Return a new BigNumber whose value is the minimum of the arguments. - * - * arguments {number|string|BigNumber} - */ - BigNumber.min = function () { return maxOrMin( arguments, P.gt ); }; - - - /* - * Return a new BigNumber with a random value equal to or greater than 0 and less than 1, - * and with dp, or DECIMAL_PLACES if dp is omitted, decimal places (or less if trailing - * zeros are produced). - * - * [dp] {number} Decimal places. Integer, 0 to MAX inclusive. - * - * 'random() decimal places not an integer: {dp}' - * 'random() decimal places out of range: {dp}' - * 'random() crypto unavailable: {crypto}' - */ - BigNumber.random = (function () { - var pow2_53 = 0x20000000000000; - - // Return a 53 bit integer n, where 0 <= n < 9007199254740992. - // Check if Math.random() produces more than 32 bits of randomness. - // If it does, assume at least 53 bits are produced, otherwise assume at least 30 bits. - // 0x40000000 is 2^30, 0x800000 is 2^23, 0x1fffff is 2^21 - 1. - var random53bitInt = (Math.random() * pow2_53) & 0x1fffff - ? function () { return mathfloor( Math.random() * pow2_53 ); } - : function () { return ((Math.random() * 0x40000000 | 0) * 0x800000) + - (Math.random() * 0x800000 | 0); }; - - return function (dp) { - var a, b, e, k, v, - i = 0, - c = [], - rand = new BigNumber(ONE); - - dp = dp == null || !isValidInt( dp, 0, MAX, 14 ) ? DECIMAL_PLACES : dp | 0; - k = mathceil( dp / LOG_BASE ); - - if (CRYPTO) { - - // Browsers supporting crypto.getRandomValues. - if ( crypto && crypto.getRandomValues ) { - - a = crypto.getRandomValues( new Uint32Array( k *= 2 ) ); - - for ( ; i < k; ) { - - // 53 bits: - // ((Math.pow(2, 32) - 1) * Math.pow(2, 21)).toString(2) - // 11111 11111111 11111111 11111111 11100000 00000000 00000000 - // ((Math.pow(2, 32) - 1) >>> 11).toString(2) - // 11111 11111111 11111111 - // 0x20000 is 2^21. - v = a[i] * 0x20000 + (a[i + 1] >>> 11); - - // Rejection sampling: - // 0 <= v < 9007199254740992 - // Probability that v >= 9e15, is - // 7199254740992 / 9007199254740992 ~= 0.0008, i.e. 1 in 1251 - if ( v >= 9e15 ) { - b = crypto.getRandomValues( new Uint32Array(2) ); - a[i] = b[0]; - a[i + 1] = b[1]; - } else { - - // 0 <= v <= 8999999999999999 - // 0 <= (v % 1e14) <= 99999999999999 - c.push( v % 1e14 ); - i += 2; - } - } - i = k / 2; - - // Node.js supporting crypto.randomBytes. - } else if ( crypto && crypto.randomBytes ) { - - // buffer - a = crypto.randomBytes( k *= 7 ); - - for ( ; i < k; ) { - - // 0x1000000000000 is 2^48, 0x10000000000 is 2^40 - // 0x100000000 is 2^32, 0x1000000 is 2^24 - // 11111 11111111 11111111 11111111 11111111 11111111 11111111 - // 0 <= v < 9007199254740992 - v = ( ( a[i] & 31 ) * 0x1000000000000 ) + ( a[i + 1] * 0x10000000000 ) + - ( a[i + 2] * 0x100000000 ) + ( a[i + 3] * 0x1000000 ) + - ( a[i + 4] << 16 ) + ( a[i + 5] << 8 ) + a[i + 6]; - - if ( v >= 9e15 ) { - crypto.randomBytes(7).copy( a, i ); - } else { - - // 0 <= (v % 1e14) <= 99999999999999 - c.push( v % 1e14 ); - i += 7; - } - } - i = k / 7; - } else if (ERRORS) { - raise( 14, 'crypto unavailable', crypto ); - } - } - - // Use Math.random: CRYPTO is false or crypto is unavailable and ERRORS is false. - if (!i) { - - for ( ; i < k; ) { - v = random53bitInt(); - if ( v < 9e15 ) c[i++] = v % 1e14; - } - } - - k = c[--i]; - dp %= LOG_BASE; - - // Convert trailing digits to zeros according to dp. - if ( k && dp ) { - v = POWS_TEN[LOG_BASE - dp]; - c[i] = mathfloor( k / v ) * v; - } - - // Remove trailing elements which are zero. - for ( ; c[i] === 0; c.pop(), i-- ); - - // Zero? - if ( i < 0 ) { - c = [ e = 0 ]; - } else { - - // Remove leading elements which are zero and adjust exponent accordingly. - for ( e = -1 ; c[0] === 0; c.shift(), e -= LOG_BASE); - - // Count the digits of the first element of c to determine leading zeros, and... - for ( i = 1, v = c[0]; v >= 10; v /= 10, i++); - - // adjust the exponent accordingly. - if ( i < LOG_BASE ) e -= LOG_BASE - i; - } - - rand.e = e; - rand.c = c; - return rand; - }; - })(); - - - // PRIVATE FUNCTIONS - - - // Convert a numeric string of baseIn to a numeric string of baseOut. - function convertBase( str, baseOut, baseIn, sign ) { - var d, e, k, r, x, xc, y, - i = str.indexOf( '.' ), - dp = DECIMAL_PLACES, - rm = ROUNDING_MODE; - - if ( baseIn < 37 ) str = str.toLowerCase(); - - // Non-integer. - if ( i >= 0 ) { - k = POW_PRECISION; - - // Unlimited precision. - POW_PRECISION = 0; - str = str.replace( '.', '' ); - y = new BigNumber(baseIn); - x = y.pow( str.length - i ); - POW_PRECISION = k; - - // Convert str as if an integer, then restore the fraction part by dividing the - // result by its base raised to a power. - y.c = toBaseOut( toFixedPoint( coeffToString( x.c ), x.e ), 10, baseOut ); - y.e = y.c.length; - } - - // Convert the number as integer. - xc = toBaseOut( str, baseIn, baseOut ); - e = k = xc.length; - - // Remove trailing zeros. - for ( ; xc[--k] == 0; xc.pop() ); - if ( !xc[0] ) return '0'; - - if ( i < 0 ) { - --e; - } else { - x.c = xc; - x.e = e; - - // sign is needed for correct rounding. - x.s = sign; - x = div( x, y, dp, rm, baseOut ); - xc = x.c; - r = x.r; - e = x.e; - } - - d = e + dp + 1; - - // The rounding digit, i.e. the digit to the right of the digit that may be rounded up. - i = xc[d]; - k = baseOut / 2; - r = r || d < 0 || xc[d + 1] != null; - - r = rm < 4 ? ( i != null || r ) && ( rm == 0 || rm == ( x.s < 0 ? 3 : 2 ) ) - : i > k || i == k &&( rm == 4 || r || rm == 6 && xc[d - 1] & 1 || - rm == ( x.s < 0 ? 8 : 7 ) ); - - if ( d < 1 || !xc[0] ) { - - // 1^-dp or 0. - str = r ? toFixedPoint( '1', -dp ) : '0'; - } else { - xc.length = d; - - if (r) { - - // Rounding up may mean the previous digit has to be rounded up and so on. - for ( --baseOut; ++xc[--d] > baseOut; ) { - xc[d] = 0; - - if ( !d ) { - ++e; - xc.unshift(1); - } - } - } - - // Determine trailing zeros. - for ( k = xc.length; !xc[--k]; ); - - // E.g. [4, 11, 15] becomes 4bf. - for ( i = 0, str = ''; i <= k; str += ALPHABET.charAt( xc[i++] ) ); - str = toFixedPoint( str, e ); - } - - // The caller will add the sign. - return str; - } - - - // Perform division in the specified base. Called by div and convertBase. - div = (function () { - - // Assume non-zero x and k. - function multiply( x, k, base ) { - var m, temp, xlo, xhi, - carry = 0, - i = x.length, - klo = k % SQRT_BASE, - khi = k / SQRT_BASE | 0; - - for ( x = x.slice(); i--; ) { - xlo = x[i] % SQRT_BASE; - xhi = x[i] / SQRT_BASE | 0; - m = khi * xlo + xhi * klo; - temp = klo * xlo + ( ( m % SQRT_BASE ) * SQRT_BASE ) + carry; - carry = ( temp / base | 0 ) + ( m / SQRT_BASE | 0 ) + khi * xhi; - x[i] = temp % base; - } - - if (carry) x.unshift(carry); - - return x; - } - - function compare( a, b, aL, bL ) { - var i, cmp; - - if ( aL != bL ) { - cmp = aL > bL ? 1 : -1; - } else { - - for ( i = cmp = 0; i < aL; i++ ) { - - if ( a[i] != b[i] ) { - cmp = a[i] > b[i] ? 1 : -1; - break; - } - } - } - return cmp; - } - - function subtract( a, b, aL, base ) { - var i = 0; - - // Subtract b from a. - for ( ; aL--; ) { - a[aL] -= i; - i = a[aL] < b[aL] ? 1 : 0; - a[aL] = i * base + a[aL] - b[aL]; - } - - // Remove leading zeros. - for ( ; !a[0] && a.length > 1; a.shift() ); - } - - // x: dividend, y: divisor. - return function ( x, y, dp, rm, base ) { - var cmp, e, i, more, n, prod, prodL, q, qc, rem, remL, rem0, xi, xL, yc0, - yL, yz, - s = x.s == y.s ? 1 : -1, - xc = x.c, - yc = y.c; - - // Either NaN, Infinity or 0? - if ( !xc || !xc[0] || !yc || !yc[0] ) { - - return new BigNumber( - - // Return NaN if either NaN, or both Infinity or 0. - !x.s || !y.s || ( xc ? yc && xc[0] == yc[0] : !yc ) ? NaN : - - // Return ±0 if x is ±0 or y is ±Infinity, or return ±Infinity as y is ±0. - xc && xc[0] == 0 || !yc ? s * 0 : s / 0 - ); - } - - q = new BigNumber(s); - qc = q.c = []; - e = x.e - y.e; - s = dp + e + 1; - - if ( !base ) { - base = BASE; - e = bitFloor( x.e / LOG_BASE ) - bitFloor( y.e / LOG_BASE ); - s = s / LOG_BASE | 0; - } - - // Result exponent may be one less then the current value of e. - // The coefficients of the BigNumbers from convertBase may have trailing zeros. - for ( i = 0; yc[i] == ( xc[i] || 0 ); i++ ); - if ( yc[i] > ( xc[i] || 0 ) ) e--; - - if ( s < 0 ) { - qc.push(1); - more = true; - } else { - xL = xc.length; - yL = yc.length; - i = 0; - s += 2; - - // Normalise xc and yc so highest order digit of yc is >= base / 2. - - n = mathfloor( base / ( yc[0] + 1 ) ); - - // Not necessary, but to handle odd bases where yc[0] == ( base / 2 ) - 1. - // if ( n > 1 || n++ == 1 && yc[0] < base / 2 ) { - if ( n > 1 ) { - yc = multiply( yc, n, base ); - xc = multiply( xc, n, base ); - yL = yc.length; - xL = xc.length; - } - - xi = yL; - rem = xc.slice( 0, yL ); - remL = rem.length; - - // Add zeros to make remainder as long as divisor. - for ( ; remL < yL; rem[remL++] = 0 ); - yz = yc.slice(); - yz.unshift(0); - yc0 = yc[0]; - if ( yc[1] >= base / 2 ) yc0++; - // Not necessary, but to prevent trial digit n > base, when using base 3. - // else if ( base == 3 && yc0 == 1 ) yc0 = 1 + 1e-15; - - do { - n = 0; - - // Compare divisor and remainder. - cmp = compare( yc, rem, yL, remL ); - - // If divisor < remainder. - if ( cmp < 0 ) { - - // Calculate trial digit, n. - - rem0 = rem[0]; - if ( yL != remL ) rem0 = rem0 * base + ( rem[1] || 0 ); - - // n is how many times the divisor goes into the current remainder. - n = mathfloor( rem0 / yc0 ); - - // Algorithm: - // 1. product = divisor * trial digit (n) - // 2. if product > remainder: product -= divisor, n-- - // 3. remainder -= product - // 4. if product was < remainder at 2: - // 5. compare new remainder and divisor - // 6. If remainder > divisor: remainder -= divisor, n++ - - if ( n > 1 ) { - - // n may be > base only when base is 3. - if (n >= base) n = base - 1; - - // product = divisor * trial digit. - prod = multiply( yc, n, base ); - prodL = prod.length; - remL = rem.length; - - // Compare product and remainder. - // If product > remainder. - // Trial digit n too high. - // n is 1 too high about 5% of the time, and is not known to have - // ever been more than 1 too high. - while ( compare( prod, rem, prodL, remL ) == 1 ) { - n--; - - // Subtract divisor from product. - subtract( prod, yL < prodL ? yz : yc, prodL, base ); - prodL = prod.length; - cmp = 1; - } - } else { - - // n is 0 or 1, cmp is -1. - // If n is 0, there is no need to compare yc and rem again below, - // so change cmp to 1 to avoid it. - // If n is 1, leave cmp as -1, so yc and rem are compared again. - if ( n == 0 ) { - - // divisor < remainder, so n must be at least 1. - cmp = n = 1; - } - - // product = divisor - prod = yc.slice(); - prodL = prod.length; - } - - if ( prodL < remL ) prod.unshift(0); - - // Subtract product from remainder. - subtract( rem, prod, remL, base ); - remL = rem.length; - - // If product was < remainder. - if ( cmp == -1 ) { - - // Compare divisor and new remainder. - // If divisor < new remainder, subtract divisor from remainder. - // Trial digit n too low. - // n is 1 too low about 5% of the time, and very rarely 2 too low. - while ( compare( yc, rem, yL, remL ) < 1 ) { - n++; - - // Subtract divisor from remainder. - subtract( rem, yL < remL ? yz : yc, remL, base ); - remL = rem.length; - } - } - } else if ( cmp === 0 ) { - n++; - rem = [0]; - } // else cmp === 1 and n will be 0 - - // Add the next digit, n, to the result array. - qc[i++] = n; - - // Update the remainder. - if ( rem[0] ) { - rem[remL++] = xc[xi] || 0; - } else { - rem = [ xc[xi] ]; - remL = 1; - } - } while ( ( xi++ < xL || rem[0] != null ) && s-- ); - - more = rem[0] != null; - - // Leading zero? - if ( !qc[0] ) qc.shift(); - } - - if ( base == BASE ) { - - // To calculate q.e, first get the number of digits of qc[0]. - for ( i = 1, s = qc[0]; s >= 10; s /= 10, i++ ); - round( q, dp + ( q.e = i + e * LOG_BASE - 1 ) + 1, rm, more ); - - // Caller is convertBase. - } else { - q.e = e; - q.r = +more; - } - - return q; - }; - })(); - - - /* - * Return a string representing the value of BigNumber n in fixed-point or exponential - * notation rounded to the specified decimal places or significant digits. - * - * n is a BigNumber. - * i is the index of the last digit required (i.e. the digit that may be rounded up). - * rm is the rounding mode. - * caller is caller id: toExponential 19, toFixed 20, toFormat 21, toPrecision 24. - */ - function format( n, i, rm, caller ) { - var c0, e, ne, len, str; - - rm = rm != null && isValidInt( rm, 0, 8, caller, roundingMode ) - ? rm | 0 : ROUNDING_MODE; - - if ( !n.c ) return n.toString(); - c0 = n.c[0]; - ne = n.e; - - if ( i == null ) { - str = coeffToString( n.c ); - str = caller == 19 || caller == 24 && ne <= TO_EXP_NEG - ? toExponential( str, ne ) - : toFixedPoint( str, ne ); - } else { - n = round( new BigNumber(n), i, rm ); - - // n.e may have changed if the value was rounded up. - e = n.e; - - str = coeffToString( n.c ); - len = str.length; - - // toPrecision returns exponential notation if the number of significant digits - // specified is less than the number of digits necessary to represent the integer - // part of the value in fixed-point notation. - - // Exponential notation. - if ( caller == 19 || caller == 24 && ( i <= e || e <= TO_EXP_NEG ) ) { - - // Append zeros? - for ( ; len < i; str += '0', len++ ); - str = toExponential( str, e ); - - // Fixed-point notation. - } else { - i -= ne; - str = toFixedPoint( str, e ); - - // Append zeros? - if ( e + 1 > len ) { - if ( --i > 0 ) for ( str += '.'; i--; str += '0' ); - } else { - i += e - len; - if ( i > 0 ) { - if ( e + 1 == len ) str += '.'; - for ( ; i--; str += '0' ); - } - } - } - } - - return n.s < 0 && c0 ? '-' + str : str; - } - - - // Handle BigNumber.max and BigNumber.min. - function maxOrMin( args, method ) { - var m, n, - i = 0; - - if ( isArray( args[0] ) ) args = args[0]; - m = new BigNumber( args[0] ); - - for ( ; ++i < args.length; ) { - n = new BigNumber( args[i] ); - - // If any number is NaN, return NaN. - if ( !n.s ) { - m = n; - break; - } else if ( method.call( m, n ) ) { - m = n; - } - } - - return m; - } - - - /* - * Return true if n is an integer in range, otherwise throw. - * Use for argument validation when ERRORS is true. - */ - function intValidatorWithErrors( n, min, max, caller, name ) { - if ( n < min || n > max || n != truncate(n) ) { - raise( caller, ( name || 'decimal places' ) + - ( n < min || n > max ? ' out of range' : ' not an integer' ), n ); - } - - return true; - } - - - /* - * Strip trailing zeros, calculate base 10 exponent and check against MIN_EXP and MAX_EXP. - * Called by minus, plus and times. - */ - function normalise( n, c, e ) { - var i = 1, - j = c.length; - - // Remove trailing zeros. - for ( ; !c[--j]; c.pop() ); - - // Calculate the base 10 exponent. First get the number of digits of c[0]. - for ( j = c[0]; j >= 10; j /= 10, i++ ); - - // Overflow? - if ( ( e = i + e * LOG_BASE - 1 ) > MAX_EXP ) { - - // Infinity. - n.c = n.e = null; - - // Underflow? - } else if ( e < MIN_EXP ) { - - // Zero. - n.c = [ n.e = 0 ]; - } else { - n.e = e; - n.c = c; - } - - return n; - } - - - // Handle values that fail the validity test in BigNumber. - parseNumeric = (function () { - var basePrefix = /^(-?)0([xbo])/i, - dotAfter = /^([^.]+)\.$/, - dotBefore = /^\.([^.]+)$/, - isInfinityOrNaN = /^-?(Infinity|NaN)$/, - whitespaceOrPlus = /^\s*\+|^\s+|\s+$/g; - - return function ( x, str, num, b ) { - var base, - s = num ? str : str.replace( whitespaceOrPlus, '' ); - - // No exception on ±Infinity or NaN. - if ( isInfinityOrNaN.test(s) ) { - x.s = isNaN(s) ? null : s < 0 ? -1 : 1; - } else { - if ( !num ) { - - // basePrefix = /^(-?)0([xbo])(?=\w[\w.]*$)/i - s = s.replace( basePrefix, function ( m, p1, p2 ) { - base = ( p2 = p2.toLowerCase() ) == 'x' ? 16 : p2 == 'b' ? 2 : 8; - return !b || b == base ? p1 : m; - }); - - if (b) { - base = b; - - // E.g. '1.' to '1', '.1' to '0.1' - s = s.replace( dotAfter, '$1' ).replace( dotBefore, '0.$1' ); - } - - if ( str != s ) return new BigNumber( s, base ); - } - - // 'new BigNumber() not a number: {n}' - // 'new BigNumber() not a base {b} number: {n}' - if (ERRORS) raise( id, 'not a' + ( b ? ' base ' + b : '' ) + ' number', str ); - x.s = null; - } - - x.c = x.e = null; - id = 0; - } - })(); - - - // Throw a BigNumber Error. - function raise( caller, msg, val ) { - var error = new Error( [ - 'new BigNumber', // 0 - 'cmp', // 1 - 'config', // 2 - 'div', // 3 - 'divToInt', // 4 - 'eq', // 5 - 'gt', // 6 - 'gte', // 7 - 'lt', // 8 - 'lte', // 9 - 'minus', // 10 - 'mod', // 11 - 'plus', // 12 - 'precision', // 13 - 'random', // 14 - 'round', // 15 - 'shift', // 16 - 'times', // 17 - 'toDigits', // 18 - 'toExponential', // 19 - 'toFixed', // 20 - 'toFormat', // 21 - 'toFraction', // 22 - 'pow', // 23 - 'toPrecision', // 24 - 'toString', // 25 - 'BigNumber' // 26 - ][caller] + '() ' + msg + ': ' + val ); - - error.name = 'BigNumber Error'; - id = 0; - throw error; - } - - - /* - * Round x to sd significant digits using rounding mode rm. Check for over/under-flow. - * If r is truthy, it is known that there are more digits after the rounding digit. - */ - function round( x, sd, rm, r ) { - var d, i, j, k, n, ni, rd, - xc = x.c, - pows10 = POWS_TEN; - - // if x is not Infinity or NaN... - if (xc) { - - // rd is the rounding digit, i.e. the digit after the digit that may be rounded up. - // n is a base 1e14 number, the value of the element of array x.c containing rd. - // ni is the index of n within x.c. - // d is the number of digits of n. - // i is the index of rd within n including leading zeros. - // j is the actual index of rd within n (if < 0, rd is a leading zero). - out: { - - // Get the number of digits of the first element of xc. - for ( d = 1, k = xc[0]; k >= 10; k /= 10, d++ ); - i = sd - d; - - // If the rounding digit is in the first element of xc... - if ( i < 0 ) { - i += LOG_BASE; - j = sd; - n = xc[ ni = 0 ]; - - // Get the rounding digit at index j of n. - rd = n / pows10[ d - j - 1 ] % 10 | 0; - } else { - ni = mathceil( ( i + 1 ) / LOG_BASE ); - - if ( ni >= xc.length ) { - - if (r) { - - // Needed by sqrt. - for ( ; xc.length <= ni; xc.push(0) ); - n = rd = 0; - d = 1; - i %= LOG_BASE; - j = i - LOG_BASE + 1; - } else { - break out; - } - } else { - n = k = xc[ni]; - - // Get the number of digits of n. - for ( d = 1; k >= 10; k /= 10, d++ ); - - // Get the index of rd within n. - i %= LOG_BASE; - - // Get the index of rd within n, adjusted for leading zeros. - // The number of leading zeros of n is given by LOG_BASE - d. - j = i - LOG_BASE + d; - - // Get the rounding digit at index j of n. - rd = j < 0 ? 0 : n / pows10[ d - j - 1 ] % 10 | 0; - } - } - - r = r || sd < 0 || - - // Are there any non-zero digits after the rounding digit? - // The expression n % pows10[ d - j - 1 ] returns all digits of n to the right - // of the digit at j, e.g. if n is 908714 and j is 2, the expression gives 714. - xc[ni + 1] != null || ( j < 0 ? n : n % pows10[ d - j - 1 ] ); - - r = rm < 4 - ? ( rd || r ) && ( rm == 0 || rm == ( x.s < 0 ? 3 : 2 ) ) - : rd > 5 || rd == 5 && ( rm == 4 || r || rm == 6 && - - // Check whether the digit to the left of the rounding digit is odd. - ( ( i > 0 ? j > 0 ? n / pows10[ d - j ] : 0 : xc[ni - 1] ) % 10 ) & 1 || - rm == ( x.s < 0 ? 8 : 7 ) ); - - if ( sd < 1 || !xc[0] ) { - xc.length = 0; - - if (r) { - - // Convert sd to decimal places. - sd -= x.e + 1; - - // 1, 0.1, 0.01, 0.001, 0.0001 etc. - xc[0] = pows10[ sd % LOG_BASE ]; - x.e = -sd || 0; - } else { - - // Zero. - xc[0] = x.e = 0; - } - - return x; - } - - // Remove excess digits. - if ( i == 0 ) { - xc.length = ni; - k = 1; - ni--; - } else { - xc.length = ni + 1; - k = pows10[ LOG_BASE - i ]; - - // E.g. 56700 becomes 56000 if 7 is the rounding digit. - // j > 0 means i > number of leading zeros of n. - xc[ni] = j > 0 ? mathfloor( n / pows10[ d - j ] % pows10[j] ) * k : 0; - } - - // Round up? - if (r) { - - for ( ; ; ) { - - // If the digit to be rounded up is in the first element of xc... - if ( ni == 0 ) { - - // i will be the length of xc[0] before k is added. - for ( i = 1, j = xc[0]; j >= 10; j /= 10, i++ ); - j = xc[0] += k; - for ( k = 1; j >= 10; j /= 10, k++ ); - - // if i != k the length has increased. - if ( i != k ) { - x.e++; - if ( xc[0] == BASE ) xc[0] = 1; - } - - break; - } else { - xc[ni] += k; - if ( xc[ni] != BASE ) break; - xc[ni--] = 0; - k = 1; - } - } - } - - // Remove trailing zeros. - for ( i = xc.length; xc[--i] === 0; xc.pop() ); - } - - // Overflow? Infinity. - if ( x.e > MAX_EXP ) { - x.c = x.e = null; - - // Underflow? Zero. - } else if ( x.e < MIN_EXP ) { - x.c = [ x.e = 0 ]; - } - } - - return x; - } - - - // PROTOTYPE/INSTANCE METHODS - - - /* - * Return a new BigNumber whose value is the absolute value of this BigNumber. - */ - P.absoluteValue = P.abs = function () { - var x = new BigNumber(this); - if ( x.s < 0 ) x.s = 1; - return x; - }; - - - /* - * Return a new BigNumber whose value is the value of this BigNumber rounded to a whole - * number in the direction of Infinity. - */ - P.ceil = function () { - return round( new BigNumber(this), this.e + 1, 2 ); - }; - - - /* - * Return - * 1 if the value of this BigNumber is greater than the value of BigNumber(y, b), - * -1 if the value of this BigNumber is less than the value of BigNumber(y, b), - * 0 if they have the same value, - * or null if the value of either is NaN. - */ - P.comparedTo = P.cmp = function ( y, b ) { - id = 1; - return compare( this, new BigNumber( y, b ) ); - }; - - - /* - * Return the number of decimal places of the value of this BigNumber, or null if the value - * of this BigNumber is ±Infinity or NaN. - */ - P.decimalPlaces = P.dp = function () { - var n, v, - c = this.c; - - if ( !c ) return null; - n = ( ( v = c.length - 1 ) - bitFloor( this.e / LOG_BASE ) ) * LOG_BASE; - - // Subtract the number of trailing zeros of the last number. - if ( v = c[v] ) for ( ; v % 10 == 0; v /= 10, n-- ); - if ( n < 0 ) n = 0; - - return n; - }; - - - /* - * n / 0 = I - * n / N = N - * n / I = 0 - * 0 / n = 0 - * 0 / 0 = N - * 0 / N = N - * 0 / I = 0 - * N / n = N - * N / 0 = N - * N / N = N - * N / I = N - * I / n = I - * I / 0 = I - * I / N = N - * I / I = N - * - * Return a new BigNumber whose value is the value of this BigNumber divided by the value of - * BigNumber(y, b), rounded according to DECIMAL_PLACES and ROUNDING_MODE. - */ - P.dividedBy = P.div = function ( y, b ) { - id = 3; - return div( this, new BigNumber( y, b ), DECIMAL_PLACES, ROUNDING_MODE ); - }; - - - /* - * Return a new BigNumber whose value is the integer part of dividing the value of this - * BigNumber by the value of BigNumber(y, b). - */ - P.dividedToIntegerBy = P.divToInt = function ( y, b ) { - id = 4; - return div( this, new BigNumber( y, b ), 0, 1 ); - }; - - - /* - * Return true if the value of this BigNumber is equal to the value of BigNumber(y, b), - * otherwise returns false. - */ - P.equals = P.eq = function ( y, b ) { - id = 5; - return compare( this, new BigNumber( y, b ) ) === 0; - }; - - - /* - * Return a new BigNumber whose value is the value of this BigNumber rounded to a whole - * number in the direction of -Infinity. - */ - P.floor = function () { - return round( new BigNumber(this), this.e + 1, 3 ); - }; - - - /* - * Return true if the value of this BigNumber is greater than the value of BigNumber(y, b), - * otherwise returns false. - */ - P.greaterThan = P.gt = function ( y, b ) { - id = 6; - return compare( this, new BigNumber( y, b ) ) > 0; - }; - - - /* - * Return true if the value of this BigNumber is greater than or equal to the value of - * BigNumber(y, b), otherwise returns false. - */ - P.greaterThanOrEqualTo = P.gte = function ( y, b ) { - id = 7; - return ( b = compare( this, new BigNumber( y, b ) ) ) === 1 || b === 0; - - }; - - - /* - * Return true if the value of this BigNumber is a finite number, otherwise returns false. - */ - P.isFinite = function () { - return !!this.c; - }; - - - /* - * Return true if the value of this BigNumber is an integer, otherwise return false. - */ - P.isInteger = P.isInt = function () { - return !!this.c && bitFloor( this.e / LOG_BASE ) > this.c.length - 2; - }; - - - /* - * Return true if the value of this BigNumber is NaN, otherwise returns false. - */ - P.isNaN = function () { - return !this.s; - }; - - - /* - * Return true if the value of this BigNumber is negative, otherwise returns false. - */ - P.isNegative = P.isNeg = function () { - return this.s < 0; - }; - - - /* - * Return true if the value of this BigNumber is 0 or -0, otherwise returns false. - */ - P.isZero = function () { - return !!this.c && this.c[0] == 0; - }; - - - /* - * Return true if the value of this BigNumber is less than the value of BigNumber(y, b), - * otherwise returns false. - */ - P.lessThan = P.lt = function ( y, b ) { - id = 8; - return compare( this, new BigNumber( y, b ) ) < 0; - }; - - - /* - * Return true if the value of this BigNumber is less than or equal to the value of - * BigNumber(y, b), otherwise returns false. - */ - P.lessThanOrEqualTo = P.lte = function ( y, b ) { - id = 9; - return ( b = compare( this, new BigNumber( y, b ) ) ) === -1 || b === 0; - }; - - - /* - * n - 0 = n - * n - N = N - * n - I = -I - * 0 - n = -n - * 0 - 0 = 0 - * 0 - N = N - * 0 - I = -I - * N - n = N - * N - 0 = N - * N - N = N - * N - I = N - * I - n = I - * I - 0 = I - * I - N = N - * I - I = N - * - * Return a new BigNumber whose value is the value of this BigNumber minus the value of - * BigNumber(y, b). - */ - P.minus = P.sub = function ( y, b ) { - var i, j, t, xLTy, - x = this, - a = x.s; - - id = 10; - y = new BigNumber( y, b ); - b = y.s; - - // Either NaN? - if ( !a || !b ) return new BigNumber(NaN); - - // Signs differ? - if ( a != b ) { - y.s = -b; - return x.plus(y); - } - - var xe = x.e / LOG_BASE, - ye = y.e / LOG_BASE, - xc = x.c, - yc = y.c; - - if ( !xe || !ye ) { - - // Either Infinity? - if ( !xc || !yc ) return xc ? ( y.s = -b, y ) : new BigNumber( yc ? x : NaN ); - - // Either zero? - if ( !xc[0] || !yc[0] ) { - - // Return y if y is non-zero, x if x is non-zero, or zero if both are zero. - return yc[0] ? ( y.s = -b, y ) : new BigNumber( xc[0] ? x : - - // IEEE 754 (2008) 6.3: n - n = -0 when rounding to -Infinity - ROUNDING_MODE == 3 ? -0 : 0 ); - } - } - - xe = bitFloor(xe); - ye = bitFloor(ye); - xc = xc.slice(); - - // Determine which is the bigger number. - if ( a = xe - ye ) { - - if ( xLTy = a < 0 ) { - a = -a; - t = xc; - } else { - ye = xe; - t = yc; - } - - t.reverse(); - - // Prepend zeros to equalise exponents. - for ( b = a; b--; t.push(0) ); - t.reverse(); - } else { - - // Exponents equal. Check digit by digit. - j = ( xLTy = ( a = xc.length ) < ( b = yc.length ) ) ? a : b; - - for ( a = b = 0; b < j; b++ ) { - - if ( xc[b] != yc[b] ) { - xLTy = xc[b] < yc[b]; - break; - } - } - } - - // x < y? Point xc to the array of the bigger number. - if (xLTy) t = xc, xc = yc, yc = t, y.s = -y.s; - - b = ( j = yc.length ) - ( i = xc.length ); - - // Append zeros to xc if shorter. - // No need to add zeros to yc if shorter as subtract only needs to start at yc.length. - if ( b > 0 ) for ( ; b--; xc[i++] = 0 ); - b = BASE - 1; - - // Subtract yc from xc. - for ( ; j > a; ) { - - if ( xc[--j] < yc[j] ) { - for ( i = j; i && !xc[--i]; xc[i] = b ); - --xc[i]; - xc[j] += BASE; - } - - xc[j] -= yc[j]; - } - - // Remove leading zeros and adjust exponent accordingly. - for ( ; xc[0] == 0; xc.shift(), --ye ); - - // Zero? - if ( !xc[0] ) { - - // Following IEEE 754 (2008) 6.3, - // n - n = +0 but n - n = -0 when rounding towards -Infinity. - y.s = ROUNDING_MODE == 3 ? -1 : 1; - y.c = [ y.e = 0 ]; - return y; - } - - // No need to check for Infinity as +x - +y != Infinity && -x - -y != Infinity - // for finite x and y. - return normalise( y, xc, ye ); - }; - - - /* - * n % 0 = N - * n % N = N - * n % I = n - * 0 % n = 0 - * -0 % n = -0 - * 0 % 0 = N - * 0 % N = N - * 0 % I = 0 - * N % n = N - * N % 0 = N - * N % N = N - * N % I = N - * I % n = N - * I % 0 = N - * I % N = N - * I % I = N - * - * Return a new BigNumber whose value is the value of this BigNumber modulo the value of - * BigNumber(y, b). The result depends on the value of MODULO_MODE. - */ - P.modulo = P.mod = function ( y, b ) { - var q, s, - x = this; - - id = 11; - y = new BigNumber( y, b ); - - // Return NaN if x is Infinity or NaN, or y is NaN or zero. - if ( !x.c || !y.s || y.c && !y.c[0] ) { - return new BigNumber(NaN); - - // Return x if y is Infinity or x is zero. - } else if ( !y.c || x.c && !x.c[0] ) { - return new BigNumber(x); - } - - if ( MODULO_MODE == 9 ) { - - // Euclidian division: q = sign(y) * floor(x / abs(y)) - // r = x - qy where 0 <= r < abs(y) - s = y.s; - y.s = 1; - q = div( x, y, 0, 3 ); - y.s = s; - q.s *= s; - } else { - q = div( x, y, 0, MODULO_MODE ); - } - - return x.minus( q.times(y) ); - }; - - - /* - * Return a new BigNumber whose value is the value of this BigNumber negated, - * i.e. multiplied by -1. - */ - P.negated = P.neg = function () { - var x = new BigNumber(this); - x.s = -x.s || null; - return x; - }; - - - /* - * n + 0 = n - * n + N = N - * n + I = I - * 0 + n = n - * 0 + 0 = 0 - * 0 + N = N - * 0 + I = I - * N + n = N - * N + 0 = N - * N + N = N - * N + I = N - * I + n = I - * I + 0 = I - * I + N = N - * I + I = I - * - * Return a new BigNumber whose value is the value of this BigNumber plus the value of - * BigNumber(y, b). - */ - P.plus = P.add = function ( y, b ) { - var t, - x = this, - a = x.s; - - id = 12; - y = new BigNumber( y, b ); - b = y.s; - - // Either NaN? - if ( !a || !b ) return new BigNumber(NaN); - - // Signs differ? - if ( a != b ) { - y.s = -b; - return x.minus(y); - } - - var xe = x.e / LOG_BASE, - ye = y.e / LOG_BASE, - xc = x.c, - yc = y.c; - - if ( !xe || !ye ) { - - // Return ±Infinity if either ±Infinity. - if ( !xc || !yc ) return new BigNumber( a / 0 ); - - // Either zero? - // Return y if y is non-zero, x if x is non-zero, or zero if both are zero. - if ( !xc[0] || !yc[0] ) return yc[0] ? y : new BigNumber( xc[0] ? x : a * 0 ); - } - - xe = bitFloor(xe); - ye = bitFloor(ye); - xc = xc.slice(); - - // Prepend zeros to equalise exponents. Faster to use reverse then do unshifts. - if ( a = xe - ye ) { - if ( a > 0 ) { - ye = xe; - t = yc; - } else { - a = -a; - t = xc; - } - - t.reverse(); - for ( ; a--; t.push(0) ); - t.reverse(); - } - - a = xc.length; - b = yc.length; - - // Point xc to the longer array, and b to the shorter length. - if ( a - b < 0 ) t = yc, yc = xc, xc = t, b = a; - - // Only start adding at yc.length - 1 as the further digits of xc can be ignored. - for ( a = 0; b; ) { - a = ( xc[--b] = xc[b] + yc[b] + a ) / BASE | 0; - xc[b] %= BASE; - } - - if (a) { - xc.unshift(a); - ++ye; - } - - // No need to check for zero, as +x + +y != 0 && -x + -y != 0 - // ye = MAX_EXP + 1 possible - return normalise( y, xc, ye ); - }; - - - /* - * Return the number of significant digits of the value of this BigNumber. - * - * [z] {boolean|number} Whether to count integer-part trailing zeros: true, false, 1 or 0. - */ - P.precision = P.sd = function (z) { - var n, v, - x = this, - c = x.c; - - // 'precision() argument not a boolean or binary digit: {z}' - if ( z != null && z !== !!z && z !== 1 && z !== 0 ) { - if (ERRORS) raise( 13, 'argument' + notBool, z ); - if ( z != !!z ) z = null; - } - - if ( !c ) return null; - v = c.length - 1; - n = v * LOG_BASE + 1; - - if ( v = c[v] ) { - - // Subtract the number of trailing zeros of the last element. - for ( ; v % 10 == 0; v /= 10, n-- ); - - // Add the number of digits of the first element. - for ( v = c[0]; v >= 10; v /= 10, n++ ); - } - - if ( z && x.e + 1 > n ) n = x.e + 1; - - return n; - }; - - - /* - * Return a new BigNumber whose value is the value of this BigNumber rounded to a maximum of - * dp decimal places using rounding mode rm, or to 0 and ROUNDING_MODE respectively if - * omitted. - * - * [dp] {number} Decimal places. Integer, 0 to MAX inclusive. - * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. - * - * 'round() decimal places out of range: {dp}' - * 'round() decimal places not an integer: {dp}' - * 'round() rounding mode not an integer: {rm}' - * 'round() rounding mode out of range: {rm}' - */ - P.round = function ( dp, rm ) { - var n = new BigNumber(this); - - if ( dp == null || isValidInt( dp, 0, MAX, 15 ) ) { - round( n, ~~dp + this.e + 1, rm == null || - !isValidInt( rm, 0, 8, 15, roundingMode ) ? ROUNDING_MODE : rm | 0 ); - } - - return n; - }; - - - /* - * Return a new BigNumber whose value is the value of this BigNumber shifted by k places - * (powers of 10). Shift to the right if n > 0, and to the left if n < 0. - * - * k {number} Integer, -MAX_SAFE_INTEGER to MAX_SAFE_INTEGER inclusive. - * - * If k is out of range and ERRORS is false, the result will be ±0 if k < 0, or ±Infinity - * otherwise. - * - * 'shift() argument not an integer: {k}' - * 'shift() argument out of range: {k}' - */ - P.shift = function (k) { - var n = this; - return isValidInt( k, -MAX_SAFE_INTEGER, MAX_SAFE_INTEGER, 16, 'argument' ) - - // k < 1e+21, or truncate(k) will produce exponential notation. - ? n.times( '1e' + truncate(k) ) - : new BigNumber( n.c && n.c[0] && ( k < -MAX_SAFE_INTEGER || k > MAX_SAFE_INTEGER ) - ? n.s * ( k < 0 ? 0 : 1 / 0 ) - : n ); - }; - - - /* - * sqrt(-n) = N - * sqrt( N) = N - * sqrt(-I) = N - * sqrt( I) = I - * sqrt( 0) = 0 - * sqrt(-0) = -0 - * - * Return a new BigNumber whose value is the square root of the value of this BigNumber, - * rounded according to DECIMAL_PLACES and ROUNDING_MODE. - */ - P.squareRoot = P.sqrt = function () { - var m, n, r, rep, t, - x = this, - c = x.c, - s = x.s, - e = x.e, - dp = DECIMAL_PLACES + 4, - half = new BigNumber('0.5'); - - // Negative/NaN/Infinity/zero? - if ( s !== 1 || !c || !c[0] ) { - return new BigNumber( !s || s < 0 && ( !c || c[0] ) ? NaN : c ? x : 1 / 0 ); - } - - // Initial estimate. - s = Math.sqrt( +x ); - - // Math.sqrt underflow/overflow? - // Pass x to Math.sqrt as integer, then adjust the exponent of the result. - if ( s == 0 || s == 1 / 0 ) { - n = coeffToString(c); - if ( ( n.length + e ) % 2 == 0 ) n += '0'; - s = Math.sqrt(n); - e = bitFloor( ( e + 1 ) / 2 ) - ( e < 0 || e % 2 ); - - if ( s == 1 / 0 ) { - n = '1e' + e; - } else { - n = s.toExponential(); - n = n.slice( 0, n.indexOf('e') + 1 ) + e; - } - - r = new BigNumber(n); - } else { - r = new BigNumber( s + '' ); - } - - // Check for zero. - // r could be zero if MIN_EXP is changed after the this value was created. - // This would cause a division by zero (x/t) and hence Infinity below, which would cause - // coeffToString to throw. - if ( r.c[0] ) { - e = r.e; - s = e + dp; - if ( s < 3 ) s = 0; - - // Newton-Raphson iteration. - for ( ; ; ) { - t = r; - r = half.times( t.plus( div( x, t, dp, 1 ) ) ); - - if ( coeffToString( t.c ).slice( 0, s ) === ( n = - coeffToString( r.c ) ).slice( 0, s ) ) { - - // The exponent of r may here be one less than the final result exponent, - // e.g 0.0009999 (e-4) --> 0.001 (e-3), so adjust s so the rounding digits - // are indexed correctly. - if ( r.e < e ) --s; - n = n.slice( s - 3, s + 1 ); - - // The 4th rounding digit may be in error by -1 so if the 4 rounding digits - // are 9999 or 4999 (i.e. approaching a rounding boundary) continue the - // iteration. - if ( n == '9999' || !rep && n == '4999' ) { - - // On the first iteration only, check to see if rounding up gives the - // exact result as the nines may infinitely repeat. - if ( !rep ) { - round( t, t.e + DECIMAL_PLACES + 2, 0 ); - - if ( t.times(t).eq(x) ) { - r = t; - break; - } - } - - dp += 4; - s += 4; - rep = 1; - } else { - - // If rounding digits are null, 0{0,4} or 50{0,3}, check for exact - // result. If not, then there are further digits and m will be truthy. - if ( !+n || !+n.slice(1) && n.charAt(0) == '5' ) { - - // Truncate to the first rounding digit. - round( r, r.e + DECIMAL_PLACES + 2, 1 ); - m = !r.times(r).eq(x); - } - - break; - } - } - } - } - - return round( r, r.e + DECIMAL_PLACES + 1, ROUNDING_MODE, m ); - }; - - - /* - * n * 0 = 0 - * n * N = N - * n * I = I - * 0 * n = 0 - * 0 * 0 = 0 - * 0 * N = N - * 0 * I = N - * N * n = N - * N * 0 = N - * N * N = N - * N * I = N - * I * n = I - * I * 0 = N - * I * N = N - * I * I = I - * - * Return a new BigNumber whose value is the value of this BigNumber times the value of - * BigNumber(y, b). - */ - P.times = P.mul = function ( y, b ) { - var c, e, i, j, k, m, xcL, xlo, xhi, ycL, ylo, yhi, zc, - base, sqrtBase, - x = this, - xc = x.c, - yc = ( id = 17, y = new BigNumber( y, b ) ).c; - - // Either NaN, ±Infinity or ±0? - if ( !xc || !yc || !xc[0] || !yc[0] ) { - - // Return NaN if either is NaN, or one is 0 and the other is Infinity. - if ( !x.s || !y.s || xc && !xc[0] && !yc || yc && !yc[0] && !xc ) { - y.c = y.e = y.s = null; - } else { - y.s *= x.s; - - // Return ±Infinity if either is ±Infinity. - if ( !xc || !yc ) { - y.c = y.e = null; - - // Return ±0 if either is ±0. - } else { - y.c = [0]; - y.e = 0; - } - } - - return y; - } - - e = bitFloor( x.e / LOG_BASE ) + bitFloor( y.e / LOG_BASE ); - y.s *= x.s; - xcL = xc.length; - ycL = yc.length; - - // Ensure xc points to longer array and xcL to its length. - if ( xcL < ycL ) zc = xc, xc = yc, yc = zc, i = xcL, xcL = ycL, ycL = i; - - // Initialise the result array with zeros. - for ( i = xcL + ycL, zc = []; i--; zc.push(0) ); - - base = BASE; - sqrtBase = SQRT_BASE; - - for ( i = ycL; --i >= 0; ) { - c = 0; - ylo = yc[i] % sqrtBase; - yhi = yc[i] / sqrtBase | 0; - - for ( k = xcL, j = i + k; j > i; ) { - xlo = xc[--k] % sqrtBase; - xhi = xc[k] / sqrtBase | 0; - m = yhi * xlo + xhi * ylo; - xlo = ylo * xlo + ( ( m % sqrtBase ) * sqrtBase ) + zc[j] + c; - c = ( xlo / base | 0 ) + ( m / sqrtBase | 0 ) + yhi * xhi; - zc[j--] = xlo % base; - } - - zc[j] = c; - } - - if (c) { - ++e; - } else { - zc.shift(); - } - - return normalise( y, zc, e ); - }; - - - /* - * Return a new BigNumber whose value is the value of this BigNumber rounded to a maximum of - * sd significant digits using rounding mode rm, or ROUNDING_MODE if rm is omitted. - * - * [sd] {number} Significant digits. Integer, 1 to MAX inclusive. - * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. - * - * 'toDigits() precision out of range: {sd}' - * 'toDigits() precision not an integer: {sd}' - * 'toDigits() rounding mode not an integer: {rm}' - * 'toDigits() rounding mode out of range: {rm}' - */ - P.toDigits = function ( sd, rm ) { - var n = new BigNumber(this); - sd = sd == null || !isValidInt( sd, 1, MAX, 18, 'precision' ) ? null : sd | 0; - rm = rm == null || !isValidInt( rm, 0, 8, 18, roundingMode ) ? ROUNDING_MODE : rm | 0; - return sd ? round( n, sd, rm ) : n; - }; - - - /* - * Return a string representing the value of this BigNumber in exponential notation and - * rounded using ROUNDING_MODE to dp fixed decimal places. - * - * [dp] {number} Decimal places. Integer, 0 to MAX inclusive. - * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. - * - * 'toExponential() decimal places not an integer: {dp}' - * 'toExponential() decimal places out of range: {dp}' - * 'toExponential() rounding mode not an integer: {rm}' - * 'toExponential() rounding mode out of range: {rm}' - */ - P.toExponential = function ( dp, rm ) { - return format( this, - dp != null && isValidInt( dp, 0, MAX, 19 ) ? ~~dp + 1 : null, rm, 19 ); - }; - - - /* - * Return a string representing the value of this BigNumber in fixed-point notation rounding - * to dp fixed decimal places using rounding mode rm, or ROUNDING_MODE if rm is omitted. - * - * Note: as with JavaScript's number type, (-0).toFixed(0) is '0', - * but e.g. (-0.00001).toFixed(0) is '-0'. - * - * [dp] {number} Decimal places. Integer, 0 to MAX inclusive. - * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. - * - * 'toFixed() decimal places not an integer: {dp}' - * 'toFixed() decimal places out of range: {dp}' - * 'toFixed() rounding mode not an integer: {rm}' - * 'toFixed() rounding mode out of range: {rm}' - */ - P.toFixed = function ( dp, rm ) { - return format( this, dp != null && isValidInt( dp, 0, MAX, 20 ) - ? ~~dp + this.e + 1 : null, rm, 20 ); - }; - - - /* - * Return a string representing the value of this BigNumber in fixed-point notation rounded - * using rm or ROUNDING_MODE to dp decimal places, and formatted according to the properties - * of the FORMAT object (see BigNumber.config). - * - * FORMAT = { - * decimalSeparator : '.', - * groupSeparator : ',', - * groupSize : 3, - * secondaryGroupSize : 0, - * fractionGroupSeparator : '\xA0', // non-breaking space - * fractionGroupSize : 0 - * }; - * - * [dp] {number} Decimal places. Integer, 0 to MAX inclusive. - * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. - * - * 'toFormat() decimal places not an integer: {dp}' - * 'toFormat() decimal places out of range: {dp}' - * 'toFormat() rounding mode not an integer: {rm}' - * 'toFormat() rounding mode out of range: {rm}' - */ - P.toFormat = function ( dp, rm ) { - var str = format( this, dp != null && isValidInt( dp, 0, MAX, 21 ) - ? ~~dp + this.e + 1 : null, rm, 21 ); - - if ( this.c ) { - var i, - arr = str.split('.'), - g1 = +FORMAT.groupSize, - g2 = +FORMAT.secondaryGroupSize, - groupSeparator = FORMAT.groupSeparator, - intPart = arr[0], - fractionPart = arr[1], - isNeg = this.s < 0, - intDigits = isNeg ? intPart.slice(1) : intPart, - len = intDigits.length; - - if (g2) i = g1, g1 = g2, g2 = i, len -= i; - - if ( g1 > 0 && len > 0 ) { - i = len % g1 || g1; - intPart = intDigits.substr( 0, i ); - - for ( ; i < len; i += g1 ) { - intPart += groupSeparator + intDigits.substr( i, g1 ); - } - - if ( g2 > 0 ) intPart += groupSeparator + intDigits.slice(i); - if (isNeg) intPart = '-' + intPart; - } - - str = fractionPart - ? intPart + FORMAT.decimalSeparator + ( ( g2 = +FORMAT.fractionGroupSize ) - ? fractionPart.replace( new RegExp( '\\d{' + g2 + '}\\B', 'g' ), - '$&' + FORMAT.fractionGroupSeparator ) - : fractionPart ) - : intPart; - } - - return str; - }; - - - /* - * Return a string array representing the value of this BigNumber as a simple fraction with - * an integer numerator and an integer denominator. The denominator will be a positive - * non-zero value less than or equal to the specified maximum denominator. If a maximum - * denominator is not specified, the denominator will be the lowest value necessary to - * represent the number exactly. - * - * [md] {number|string|BigNumber} Integer >= 1 and < Infinity. The maximum denominator. - * - * 'toFraction() max denominator not an integer: {md}' - * 'toFraction() max denominator out of range: {md}' - */ - P.toFraction = function (md) { - var arr, d0, d2, e, exp, n, n0, q, s, - k = ERRORS, - x = this, - xc = x.c, - d = new BigNumber(ONE), - n1 = d0 = new BigNumber(ONE), - d1 = n0 = new BigNumber(ONE); - - if ( md != null ) { - ERRORS = false; - n = new BigNumber(md); - ERRORS = k; - - if ( !( k = n.isInt() ) || n.lt(ONE) ) { - - if (ERRORS) { - raise( 22, - 'max denominator ' + ( k ? 'out of range' : 'not an integer' ), md ); - } - - // ERRORS is false: - // If md is a finite non-integer >= 1, round it to an integer and use it. - md = !k && n.c && round( n, n.e + 1, 1 ).gte(ONE) ? n : null; - } - } - - if ( !xc ) return x.toString(); - s = coeffToString(xc); - - // Determine initial denominator. - // d is a power of 10 and the minimum max denominator that specifies the value exactly. - e = d.e = s.length - x.e - 1; - d.c[0] = POWS_TEN[ ( exp = e % LOG_BASE ) < 0 ? LOG_BASE + exp : exp ]; - md = !md || n.cmp(d) > 0 ? ( e > 0 ? d : n1 ) : n; - - exp = MAX_EXP; - MAX_EXP = 1 / 0; - n = new BigNumber(s); - - // n0 = d1 = 0 - n0.c[0] = 0; - - for ( ; ; ) { - q = div( n, d, 0, 1 ); - d2 = d0.plus( q.times(d1) ); - if ( d2.cmp(md) == 1 ) break; - d0 = d1; - d1 = d2; - n1 = n0.plus( q.times( d2 = n1 ) ); - n0 = d2; - d = n.minus( q.times( d2 = d ) ); - n = d2; - } - - d2 = div( md.minus(d0), d1, 0, 1 ); - n0 = n0.plus( d2.times(n1) ); - d0 = d0.plus( d2.times(d1) ); - n0.s = n1.s = x.s; - e *= 2; - - // Determine which fraction is closer to x, n0/d0 or n1/d1 - arr = div( n1, d1, e, ROUNDING_MODE ).minus(x).abs().cmp( - div( n0, d0, e, ROUNDING_MODE ).minus(x).abs() ) < 1 - ? [ n1.toString(), d1.toString() ] - : [ n0.toString(), d0.toString() ]; - - MAX_EXP = exp; - return arr; - }; - - - /* - * Return the value of this BigNumber converted to a number primitive. - */ - P.toNumber = function () { - var x = this; - - // Ensure zero has correct sign. - return +x || ( x.s ? x.s * 0 : NaN ); - }; - - - /* - * Return a BigNumber whose value is the value of this BigNumber raised to the power n. - * If n is negative round according to DECIMAL_PLACES and ROUNDING_MODE. - * If POW_PRECISION is not 0, round to POW_PRECISION using ROUNDING_MODE. - * - * n {number} Integer, -9007199254740992 to 9007199254740992 inclusive. - * (Performs 54 loop iterations for n of 9007199254740992.) - * - * 'pow() exponent not an integer: {n}' - * 'pow() exponent out of range: {n}' - */ - P.toPower = P.pow = function (n) { - var k, y, - i = mathfloor( n < 0 ? -n : +n ), - x = this; - - // Pass ±Infinity to Math.pow if exponent is out of range. - if ( !isValidInt( n, -MAX_SAFE_INTEGER, MAX_SAFE_INTEGER, 23, 'exponent' ) && - ( !isFinite(n) || i > MAX_SAFE_INTEGER && ( n /= 0 ) || - parseFloat(n) != n && !( n = NaN ) ) ) { - return new BigNumber( Math.pow( +x, n ) ); - } - - // Truncating each coefficient array to a length of k after each multiplication equates - // to truncating significant digits to POW_PRECISION + [28, 41], i.e. there will be a - // minimum of 28 guard digits retained. (Using + 1.5 would give [9, 21] guard digits.) - k = POW_PRECISION ? mathceil( POW_PRECISION / LOG_BASE + 2 ) : 0; - y = new BigNumber(ONE); - - for ( ; ; ) { - - if ( i % 2 ) { - y = y.times(x); - if ( !y.c ) break; - if ( k && y.c.length > k ) y.c.length = k; - } - - i = mathfloor( i / 2 ); - if ( !i ) break; - - x = x.times(x); - if ( k && x.c && x.c.length > k ) x.c.length = k; - } - - if ( n < 0 ) y = ONE.div(y); - return k ? round( y, POW_PRECISION, ROUNDING_MODE ) : y; - }; - - - /* - * Return a string representing the value of this BigNumber rounded to sd significant digits - * using rounding mode rm or ROUNDING_MODE. If sd is less than the number of digits - * necessary to represent the integer part of the value in fixed-point notation, then use - * exponential notation. - * - * [sd] {number} Significant digits. Integer, 1 to MAX inclusive. - * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. - * - * 'toPrecision() precision not an integer: {sd}' - * 'toPrecision() precision out of range: {sd}' - * 'toPrecision() rounding mode not an integer: {rm}' - * 'toPrecision() rounding mode out of range: {rm}' - */ - P.toPrecision = function ( sd, rm ) { - return format( this, sd != null && isValidInt( sd, 1, MAX, 24, 'precision' ) - ? sd | 0 : null, rm, 24 ); - }; - - - /* - * Return a string representing the value of this BigNumber in base b, or base 10 if b is - * omitted. If a base is specified, including base 10, round according to DECIMAL_PLACES and - * ROUNDING_MODE. If a base is not specified, and this BigNumber has a positive exponent - * that is equal to or greater than TO_EXP_POS, or a negative exponent equal to or less than - * TO_EXP_NEG, return exponential notation. - * - * [b] {number} Integer, 2 to 64 inclusive. - * - * 'toString() base not an integer: {b}' - * 'toString() base out of range: {b}' - */ - P.toString = function (b) { - var str, - n = this, - s = n.s, - e = n.e; - - // Infinity or NaN? - if ( e === null ) { - - if (s) { - str = 'Infinity'; - if ( s < 0 ) str = '-' + str; - } else { - str = 'NaN'; - } - } else { - str = coeffToString( n.c ); - - if ( b == null || !isValidInt( b, 2, 64, 25, 'base' ) ) { - str = e <= TO_EXP_NEG || e >= TO_EXP_POS - ? toExponential( str, e ) - : toFixedPoint( str, e ); - } else { - str = convertBase( toFixedPoint( str, e ), b | 0, 10, s ); - } - - if ( s < 0 && n.c[0] ) str = '-' + str; - } - - return str; - }; - - - /* - * Return a new BigNumber whose value is the value of this BigNumber truncated to a whole - * number. - */ - P.truncated = P.trunc = function () { - return round( new BigNumber(this), this.e + 1, 1 ); - }; - - - - /* - * Return as toString, but do not accept a base argument. - */ - P.valueOf = P.toJSON = function () { - return this.toString(); - }; - - - // Aliases for BigDecimal methods. - //P.add = P.plus; // P.add included above - //P.subtract = P.minus; // P.sub included above - //P.multiply = P.times; // P.mul included above - //P.divide = P.div; - //P.remainder = P.mod; - //P.compareTo = P.cmp; - //P.negate = P.neg; - - - if ( configObj != null ) BigNumber.config(configObj); - - return BigNumber; - } - - - // PRIVATE HELPER FUNCTIONS - - - function bitFloor(n) { - var i = n | 0; - return n > 0 || n === i ? i : i - 1; - } - - - // Return a coefficient array as a string of base 10 digits. - function coeffToString(a) { - var s, z, - i = 1, - j = a.length, - r = a[0] + ''; - - for ( ; i < j; ) { - s = a[i++] + ''; - z = LOG_BASE - s.length; - for ( ; z--; s = '0' + s ); - r += s; - } - - // Determine trailing zeros. - for ( j = r.length; r.charCodeAt(--j) === 48; ); - return r.slice( 0, j + 1 || 1 ); - } - - - // Compare the value of BigNumbers x and y. - function compare( x, y ) { - var a, b, - xc = x.c, - yc = y.c, - i = x.s, - j = y.s, - k = x.e, - l = y.e; - - // Either NaN? - if ( !i || !j ) return null; - - a = xc && !xc[0]; - b = yc && !yc[0]; - - // Either zero? - if ( a || b ) return a ? b ? 0 : -j : i; - - // Signs differ? - if ( i != j ) return i; - - a = i < 0; - b = k == l; - - // Either Infinity? - if ( !xc || !yc ) return b ? 0 : !xc ^ a ? 1 : -1; - - // Compare exponents. - if ( !b ) return k > l ^ a ? 1 : -1; - - j = ( k = xc.length ) < ( l = yc.length ) ? k : l; - - // Compare digit by digit. - for ( i = 0; i < j; i++ ) if ( xc[i] != yc[i] ) return xc[i] > yc[i] ^ a ? 1 : -1; - - // Compare lengths. - return k == l ? 0 : k > l ^ a ? 1 : -1; - } - - - /* - * Return true if n is a valid number in range, otherwise false. - * Use for argument validation when ERRORS is false. - * Note: parseInt('1e+1') == 1 but parseFloat('1e+1') == 10. - */ - function intValidatorNoErrors( n, min, max ) { - return ( n = truncate(n) ) >= min && n <= max; - } - - - function isArray(obj) { - return Object.prototype.toString.call(obj) == '[object Array]'; - } - - - /* - * Convert string of baseIn to an array of numbers of baseOut. - * Eg. convertBase('255', 10, 16) returns [15, 15]. - * Eg. convertBase('ff', 16, 10) returns [2, 5, 5]. - */ - function toBaseOut( str, baseIn, baseOut ) { - var j, - arr = [0], - arrL, - i = 0, - len = str.length; - - for ( ; i < len; ) { - for ( arrL = arr.length; arrL--; arr[arrL] *= baseIn ); - arr[ j = 0 ] += ALPHABET.indexOf( str.charAt( i++ ) ); - - for ( ; j < arr.length; j++ ) { - - if ( arr[j] > baseOut - 1 ) { - if ( arr[j + 1] == null ) arr[j + 1] = 0; - arr[j + 1] += arr[j] / baseOut | 0; - arr[j] %= baseOut; - } - } - } - - return arr.reverse(); - } - - - function toExponential( str, e ) { - return ( str.length > 1 ? str.charAt(0) + '.' + str.slice(1) : str ) + - ( e < 0 ? 'e' : 'e+' ) + e; - } - - - function toFixedPoint( str, e ) { - var len, z; - - // Negative exponent? - if ( e < 0 ) { - - // Prepend zeros. - for ( z = '0.'; ++e; z += '0' ); - str = z + str; - - // Positive exponent - } else { - len = str.length; - - // Append zeros. - if ( ++e > len ) { - for ( z = '0', e -= len; --e; z += '0' ); - str += z; - } else if ( e < len ) { - str = str.slice( 0, e ) + '.' + str.slice(e); - } - } - - return str; - } - - - function truncate(n) { - n = parseFloat(n); - return n < 0 ? mathceil(n) : mathfloor(n); - } - - - // EXPORT - - - BigNumber = another(); - - // AMD. - if ( typeof define == 'function' && define.amd ) { - define( function () { return BigNumber; } ); +'use strict'; - // Node and other environments that support module.exports. - } else if ( typeof module != 'undefined' && module.exports ) { - module.exports = BigNumber; - if ( !crypto ) try { crypto = require('crypto'); } catch (e) {} +module.exports = BigNumber; // jshint ignore:line - // Browser. - } else { - global.BigNumber = BigNumber; - } -})(this); -},{"crypto":31}],"web3":[function(require,module,exports){ +},{}],"web3":[function(require,module,exports){ var web3 = require('./lib/web3'); web3.providers.HttpProvider = require('./lib/web3/httpprovider'); web3.providers.QtSyncProvider = require('./lib/web3/qtsync'); @@ -8286,5 +5687,5 @@ module.exports = web3; },{"./lib/web3":9,"./lib/web3/contract":11,"./lib/web3/httpprovider":19,"./lib/web3/namereg":23,"./lib/web3/qtsync":26,"./lib/web3/transfer":29}]},{},["web3"]) -//# sourceMappingURL=web3.js.map +//# sourceMappingURL=web3-light.js.map ` \ No newline at end of file diff --git a/rpc/api/admin_js.go b/rpc/api/admin_js.go index 02a0e93e1..6255a6c7b 100644 --- a/rpc/api/admin_js.go +++ b/rpc/api/admin_js.go @@ -1,66 +1,66 @@ package api const Admin_JS = ` -web3.extend({ +web3._extend({ property: 'admin', methods: [ - new web3.extend.Method({ + new web3._extend.Method({ name: 'addPeer', call: 'admin_addPeer', params: 1, - inputFormatter: [web3.extend.utils.formatInputString], - outputFormatter: web3.extend.formatters.formatOutputBool + inputFormatter: [web3._extend.utils.formatInputString], + outputFormatter: web3._extend.formatters.formatOutputBool }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'peers', call: 'admin_peers', params: 0, inputFormatter: [], outputFormatter: function(obj) { return obj; } }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'exportChain', call: 'admin_exportChain', params: 1, - inputFormatter: [web3.extend.utils.formatInputString], + inputFormatter: [web3._extend.utils.formatInputString], outputFormatter: function(obj) { return obj; } }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'importChain', call: 'admin_importChain', params: 1, - inputFormatter: [web3.extend.utils.formatInputString], + inputFormatter: [web3._extend.utils.formatInputString], outputFormatter: function(obj) { return obj; } }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'verbosity', call: 'admin_verbosity', params: 1, - inputFormatter: [web3.extend.utils.formatInputInt], - outputFormatter: web3.extend.formatters.formatOutputBool + inputFormatter: [web3._extend.utils.formatInputInt], + outputFormatter: web3._extend.formatters.formatOutputBool }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'syncStatus', call: 'admin_syncStatus', params: 1, - inputFormatter: [web3.extend.utils.formatInputInt], + inputFormatter: [web3._extend.utils.formatInputInt], outputFormatter: function(obj) { return obj; } }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'setSolc', call: 'admin_setSolc', params: 1, - inputFormatter: [web3.extend.utils.formatInputString], - outputFormatter: web3.extend.formatters.formatOutputString + inputFormatter: [web3._extend.utils.formatInputString], + outputFormatter: web3._extend.formatters.formatOutputString }) ], properties: [ - new web3.extend.Property({ + new web3._extend.Property({ name: 'nodeInfo', getter: 'admin_nodeInfo', - outputFormatter: web3.extend.formatters.formatOutputString + outputFormatter: web3._extend.formatters.formatOutputString }) ] }); diff --git a/rpc/api/debug_js.go b/rpc/api/debug_js.go index 43c545b2a..fe19a077d 100644 --- a/rpc/api/debug_js.go +++ b/rpc/api/debug_js.go @@ -1,44 +1,44 @@ package api const Debug_JS = ` -web3.extend({ +web3._extend({ property: 'debug', methods: [ - new web3.extend.Method({ + new web3._extend.Method({ name: 'printBlock', call: 'debug_printBlock', params: 1, - inputFormatter: [web3.extend.formatters.formatInputInt], - outputFormatter: web3.extend.formatters.formatOutputString + inputFormatter: [web3._extend.formatters.formatInputInt], + outputFormatter: web3._extend.formatters.formatOutputString }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'getBlockRlp', call: 'debug_getBlockRlp', params: 1, - inputFormatter: [web3.extend.formatters.formatInputInt], - outputFormatter: web3.extend.formatters.formatOutputString + inputFormatter: [web3._extend.formatters.formatInputInt], + outputFormatter: web3._extend.formatters.formatOutputString }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'setHead', call: 'debug_setHead', params: 1, - inputFormatter: [web3.extend.formatters.formatInputInt], - outputFormatter: web3.extend.formatters.formatOutputBool + inputFormatter: [web3._extend.formatters.formatInputInt], + outputFormatter: web3._extend.formatters.formatOutputBool }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'processBlock', call: 'debug_processBlock', params: 1, - inputFormatter: [web3.extend.formatters.formatInputInt], + inputFormatter: [web3._extend.formatters.formatInputInt], outputFormatter: function(obj) { return obj; } }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'seedHash', call: 'debug_seedHash', params: 1, - inputFormatter: [web3.extend.formatters.formatInputInt], - outputFormatter: web3.extend.formatters.formatOutputString + inputFormatter: [web3._extend.formatters.formatInputInt], + outputFormatter: web3._extend.formatters.formatOutputString }) ], properties: diff --git a/rpc/api/miner_js.go b/rpc/api/miner_js.go index f1c64c5e8..bcf92f6a7 100644 --- a/rpc/api/miner_js.go +++ b/rpc/api/miner_js.go @@ -1,73 +1,73 @@ package api const Miner_JS = ` -web3.extend({ +web3._extend({ property: 'miner', methods: [ - new web3.extend.Method({ + new web3._extend.Method({ name: 'start', call: 'miner_start', params: 1, - inputFormatter: [web3.extend.formatters.formatInputInt], - outputFormatter: web3.extend.formatters.formatOutputBool + inputFormatter: [web3._extend.formatters.formatInputInt], + outputFormatter: web3._extend.formatters.formatOutputBool }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'stop', call: 'miner_stop', params: 1, - inputFormatter: [web3.extend.formatters.formatInputInt], - outputFormatter: web3.extend.formatters.formatOutputBool + inputFormatter: [web3._extend.formatters.formatInputInt], + outputFormatter: web3._extend.formatters.formatOutputBool }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'getHashrate', call: 'miner_hashrate', params: 0, inputFormatter: [], - outputFormatter: web3.extend.utils.toDecimal + outputFormatter: web3._extend.utils.toDecimal }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'setExtra', call: 'miner_setExtra', params: 1, - inputFormatter: [web3.extend.utils.formatInputString], - outputFormatter: web3.extend.formatters.formatOutputBool + inputFormatter: [web3._extend.utils.formatInputString], + outputFormatter: web3._extend.formatters.formatOutputBool }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'setGasPrice', call: 'miner_setGasPrice', params: 1, - inputFormatter: [web3.extend.utils.formatInputString], - outputFormatter: web3.extend.formatters.formatOutputBool + inputFormatter: [web3._extend.utils.formatInputString], + outputFormatter: web3._extend.formatters.formatOutputBool }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'startAutoDAG', call: 'miner_startAutoDAG', params: 0, inputFormatter: [], - outputFormatter: web3.extend.formatters.formatOutputBool + outputFormatter: web3._extend.formatters.formatOutputBool }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'stopAutoDAG', call: 'miner_stopAutoDAG', params: 0, inputFormatter: [], - outputFormatter: web3.extend.formatters.formatOutputBool + outputFormatter: web3._extend.formatters.formatOutputBool }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'makeDAG', call: 'miner_makeDAG', params: 1, - inputFormatter: [web3.extend.formatters.inputDefaultBlockNumberFormatter], - outputFormatter: web3.extend.formatters.formatOutputBool + inputFormatter: [web3._extend.formatters.inputDefaultBlockNumberFormatter], + outputFormatter: web3._extend.formatters.formatOutputBool }) ], properties: [ - new web3.extend.Property({ + new web3._extend.Property({ name: 'hashrate', getter: 'miner_hashrate', - outputFormatter: web3.extend.utils.toDecimal + outputFormatter: web3._extend.utils.toDecimal }) ] }); diff --git a/rpc/api/net_js.go b/rpc/api/net_js.go index 2fae69c58..75f6c89f3 100644 --- a/rpc/api/net_js.go +++ b/rpc/api/net_js.go @@ -1,32 +1,32 @@ package api const Net_JS = ` -web3.extend({ +web3._extend({ property: 'network', methods: [ - new web3.extend.Method({ + new web3._extend.Method({ name: 'addPeer', call: 'net_addPeer', params: 1, - inputFormatter: [web3.extend.utils.formatInputString], - outputFormatter: web3.extend.formatters.formatOutputBool + inputFormatter: [web3._extend.utils.formatInputString], + outputFormatter: web3._extend.formatters.formatOutputBool }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'id', call: 'net_id', params: 0, inputFormatter: [], - outputFormatter: web3.extend.formatters.formatOutputString + outputFormatter: web3._extend.formatters.formatOutputString }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'getPeerCount', call: 'net_peerCount', params: 0, inputFormatter: [], - outputFormatter: web3.extend.formatters.formatOutputString + outputFormatter: web3._extend.formatters.formatOutputString }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'peers', call: 'net_peers', params: 0, @@ -36,15 +36,15 @@ web3.extend({ ], properties: [ - new web3.extend.Property({ + new web3._extend.Property({ name: 'listening', getter: 'net_listening', - outputFormatter: web3.extend.formatters.formatOutputBool + outputFormatter: web3._extend.formatters.formatOutputBool }), - new web3.extend.Property({ + new web3._extend.Property({ name: 'peerCount', getter: 'net_peerCount', - outputFormatter: web3.extend.utils.toDecimal + outputFormatter: web3._extend.utils.toDecimal }) ] }); diff --git a/rpc/api/personal_js.go b/rpc/api/personal_js.go index 7fd9a2dea..f9fa60e78 100644 --- a/rpc/api/personal_js.go +++ b/rpc/api/personal_js.go @@ -1,30 +1,30 @@ package api const Personal_JS = ` -web3.extend({ +web3._extend({ property: 'personal', methods: [ - new web3.extend.Method({ + new web3._extend.Method({ name: 'listAccounts', call: 'personal_listAccounts', params: 0, inputFormatter: [], outputFormatter: function(obj) { return obj; } }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'newAccount', call: 'personal_newAccount', params: 1, - inputFormatter: [web3.extend.formatters.formatInputString], - outputFormatter: web3.extend.formatters.formatOutputString + inputFormatter: [web3._extend.formatters.formatInputString], + outputFormatter: web3._extend.formatters.formatOutputString }), - new web3.extend.Method({ + new web3._extend.Method({ name: 'unlockAccount', call: 'personal_unlockAccount', params: 3, - inputFormatter: [web3.extend.formatters.formatInputString,web3.extend.formatters.formatInputString,web3.extend.formatters.formatInputInt], - outputFormatter: web3.extend.formatters.formatOutputBool + inputFormatter: [web3._extend.formatters.formatInputString,web3._extend.formatters.formatInputString,web3._extend.formatters.formatInputInt], + outputFormatter: web3._extend.formatters.formatOutputBool }) ], properties: -- cgit v1.2.3 From c6c443385b3e9998d3090785e4287e3836c70219 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Wed, 10 Jun 2015 10:24:22 +0200 Subject: changed console welcome message --- cmd/console/js.go | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/cmd/console/js.go b/cmd/console/js.go index ea0961a39..4229a95a2 100644 --- a/cmd/console/js.go +++ b/cmd/console/js.go @@ -234,18 +234,15 @@ func (self *jsre) suportedApis(ipcpath string) ([]string, error) { // show summary of current geth instance func (self *jsre) welcome(ipcpath string) { - self.re.Eval(` - console.log(' Connected to: ' + web3.version.client); - `) - - if apis, err := self.suportedApis(ipcpath); err == nil { - apisStr := "" - for _, api := range apis { - apisStr += api + " " - } - self.re.Eval(fmt.Sprintf(`console.log("Available api's: %s");`, apisStr)) - } else { - utils.Fatalf("unable to determine supported api's - %v", err) + self.re.Eval(`console.log('instance: ' + web3.version.client);`) + self.re.Eval(`console.log("coinbase: " + eth.coinbase);`) + self.re.Eval(`var lastBlockTimestamp = 1000 * eth.getBlock(eth.blockNumber).timestamp`) + self.re.Eval(`console.log("at block: " + eth.blockNumber + " (" + new Date(lastBlockTimestamp).toLocaleDateString() + + " " + new Date(lastBlockTimestamp).toLocaleTimeString() + ")");`) + + if modules, err := self.suportedApis(ipcpath); err == nil { + self.re.Eval(fmt.Sprintf("var modules = '%s';", strings.Join(modules, " "))) + self.re.Eval(`console.log(" modules: " + modules);`) } } -- cgit v1.2.3 From 87b62f75a7b9c15c17f3352f1b50ad88966e7070 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Wed, 10 Jun 2015 10:37:10 +0200 Subject: added txpool API --- rpc/api/api.go | 3 ++- rpc/api/txpool.go | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++++ rpc/api/txpool_js.go | 18 ++++++++++++++ rpc/api/utils.go | 4 ++++ 4 files changed, 91 insertions(+), 1 deletion(-) create mode 100644 rpc/api/txpool.go create mode 100644 rpc/api/txpool_js.go diff --git a/rpc/api/api.go b/rpc/api/api.go index 28b824658..e870ec58e 100644 --- a/rpc/api/api.go +++ b/rpc/api/api.go @@ -13,6 +13,7 @@ const ( MergedApiName = "merged" MinerApiName = "miner" NetApiName = "net" + txPoolApiName = "txpool" PersonalApiName = "personal" Web3ApiName = "web3" ) @@ -20,7 +21,7 @@ const ( var ( // List with all API's which are offered over the IPC interface by default DefaultIpcApis = strings.Join([]string{ - AdminApiName, EthApiName, DebugApiName, MinerApiName, NetApiName, PersonalApiName, Web3ApiName, + AdminApiName, EthApiName, DebugApiName, MinerApiName, NetApiName, txPoolApiName, PersonalApiName, Web3ApiName, }, ",") ) diff --git a/rpc/api/txpool.go b/rpc/api/txpool.go new file mode 100644 index 000000000..f340c501f --- /dev/null +++ b/rpc/api/txpool.go @@ -0,0 +1,67 @@ +package api + +import ( + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/rpc/shared" + "github.com/ethereum/go-ethereum/xeth" +) + +var ( + // mapping between methods and handlers + txpoolMapping = map[string]txpoolhandler{ + "txpool_status": (*txPoolApi).Status, + } +) + +// net callback handler +type txpoolhandler func(*txPoolApi, *shared.Request) (interface{}, error) + +// txpool api provider +type txPoolApi struct { + xeth *xeth.XEth + ethereum *eth.Ethereum + methods map[string]txpoolhandler + codec codec.ApiCoder +} + +// create a new txpool api instance +func NewTxPoolApi(xeth *xeth.XEth, eth *eth.Ethereum, coder codec.Codec) *txPoolApi { + return &txPoolApi{ + xeth: xeth, + ethereum: eth, + methods: txpoolMapping, + codec: coder.New(nil), + } +} + +// collection with supported methods +func (self *txPoolApi) Methods() []string { + methods := make([]string, len(self.methods)) + i := 0 + for k := range self.methods { + methods[i] = k + i++ + } + return methods +} + +// Execute given request +func (self *txPoolApi) Execute(req *shared.Request) (interface{}, error) { + if callback, ok := self.methods[req.Method]; ok { + return callback(self, req) + } + + return nil, shared.NewNotImplementedError(req.Method) +} + +func (self *txPoolApi) Name() string { + return txPoolApiName +} + +func (self *txPoolApi) Status(req *shared.Request) (interface{}, error) { + return map[string]int{ + "pending": self.ethereum.TxPool().GetTransactions().Len(), + "queued": self.ethereum.TxPool().GetQueuedTransactions().Len(), + }, nil +} diff --git a/rpc/api/txpool_js.go b/rpc/api/txpool_js.go new file mode 100644 index 000000000..06528d1c4 --- /dev/null +++ b/rpc/api/txpool_js.go @@ -0,0 +1,18 @@ +package api + +const TxPool_JS = ` +web3._extend({ + property: 'txpool', + methods: + [ + ], + properties: + [ + new web3._extend.Property({ + name: 'status', + getter: 'txpool_status', + outputFormatter: function(obj) { return obj; } + }) + ] +}); +` diff --git a/rpc/api/utils.go b/rpc/api/utils.go index 072abf883..b44a325a8 100644 --- a/rpc/api/utils.go +++ b/rpc/api/utils.go @@ -31,6 +31,8 @@ func ParseApiString(apistr string, codec codec.Codec, xeth *xeth.XEth, eth *eth. apis[i] = NewMinerApi(eth, codec) case NetApiName: apis[i] = NewNetApi(xeth, eth, codec) + case txPoolApiName: + apis[i] = NewTxPoolApi(xeth, eth, codec) case PersonalApiName: apis[i] = NewPersonalApi(xeth, eth, codec) case Web3ApiName: @@ -53,6 +55,8 @@ func Javascript(name string) string { return Miner_JS case NetApiName: return Net_JS + case txPoolApiName: + return TxPool_JS case PersonalApiName: return Personal_JS } -- cgit v1.2.3 From bd38428f33b127e9c60d26127695e50c55798fcd Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Wed, 10 Jun 2015 11:29:52 +0200 Subject: cleanup of javascript API --- cmd/console/js.go | 3 ++- rpc/api/admin.go | 25 +++++++++++++++---------- rpc/api/admin_args.go | 14 ++------------ rpc/api/admin_js.go | 29 +++++++++++++++-------------- rpc/api/miner_args.go | 17 ++++++++++++----- rpc/api/miner_js.go | 7 ------- rpc/api/net.go | 4 ++-- rpc/api/net_js.go | 24 ++++++++++-------------- rpc/api/personal_args.go | 45 ++++++++++++++++++++++++++++----------------- rpc/api/personal_js.go | 12 +++++------- 10 files changed, 91 insertions(+), 89 deletions(-) diff --git a/cmd/console/js.go b/cmd/console/js.go index 4229a95a2..76695cabd 100644 --- a/cmd/console/js.go +++ b/cmd/console/js.go @@ -32,12 +32,12 @@ import ( "github.com/ethereum/go-ethereum/common/docserver" re "github.com/ethereum/go-ethereum/jsre" "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/rpc/api" "github.com/ethereum/go-ethereum/rpc/codec" "github.com/ethereum/go-ethereum/rpc/comms" "github.com/ethereum/go-ethereum/rpc/shared" "github.com/peterh/liner" "github.com/robertkrimen/otto" - "github.com/ethereum/go-ethereum/rpc/api" ) type prompter interface { @@ -235,6 +235,7 @@ func (self *jsre) suportedApis(ipcpath string) ([]string, error) { // show summary of current geth instance func (self *jsre) welcome(ipcpath string) { self.re.Eval(`console.log('instance: ' + web3.version.client);`) + self.re.Eval(`console.log(' datadir: ' + admin.datadir);`) self.re.Eval(`console.log("coinbase: " + eth.coinbase);`) self.re.Eval(`var lastBlockTimestamp = 1000 * eth.getBlock(eth.blockNumber).timestamp`) self.re.Eval(`console.log("at block: " + eth.blockNumber + " (" + new Date(lastBlockTimestamp).toLocaleDateString() diff --git a/rpc/api/admin.go b/rpc/api/admin.go index c37463604..6b89942b2 100644 --- a/rpc/api/admin.go +++ b/rpc/api/admin.go @@ -25,14 +25,15 @@ var ( AdminMapping = map[string]adminhandler{ // "admin_startRPC": (*adminApi).StartRPC, // "admin_stopRPC": (*adminApi).StopRPC, - "admin_addPeer": (*adminApi).AddPeer, - "admin_peers": (*adminApi).Peers, - "admin_nodeInfo": (*adminApi).NodeInfo, - "admin_exportChain": (*adminApi).ExportChain, - "admin_importChain": (*adminApi).ImportChain, - "admin_verbosity": (*adminApi).Verbosity, - "admin_syncStatus": (*adminApi).SyncStatus, - "admin_setSolc": (*adminApi).SetSolc, + "admin_addPeer": (*adminApi).AddPeer, + "admin_peers": (*adminApi).Peers, + "admin_nodeInfo": (*adminApi).NodeInfo, + "admin_exportChain": (*adminApi).ExportChain, + "admin_importChain": (*adminApi).ImportChain, + "admin_verbosity": (*adminApi).Verbosity, + "admin_chainSyncStatus": (*adminApi).ChainSyncStatus, + "admin_setSolc": (*adminApi).SetSolc, + "admin_datadir": (*adminApi).DataDir, } ) @@ -129,6 +130,10 @@ func (self *adminApi) NodeInfo(req *shared.Request) (interface{}, error) { return self.ethereum.NodeInfo(), nil } +func (self *adminApi) DataDir(req *shared.Request) (interface{}, error) { + return self.ethereum.DataDir, nil +} + func hasAllBlocks(chain *core.ChainManager, bs []*types.Block) bool { for _, b := range bs { if !chain.HasBlock(b.Hash()) { @@ -209,9 +214,9 @@ func (self *adminApi) Verbosity(req *shared.Request) (interface{}, error) { return true, nil } -func (self *adminApi) SyncStatus(req *shared.Request) (interface{}, error) { +func (self *adminApi) ChainSyncStatus(req *shared.Request) (interface{}, error) { pending, cached := self.ethereum.Downloader().Stats() - return map[string]interface{}{"available": pending, "waitingForImport": cached}, nil + return map[string]interface{}{"blocksAvailable": pending, "blocksWaitingForImport": cached}, nil } func (self *adminApi) SetSolc(req *shared.Request) (interface{}, error) { diff --git a/rpc/api/admin_args.go b/rpc/api/admin_args.go index 9c0cbdcb6..56bb57e20 100644 --- a/rpc/api/admin_args.go +++ b/rpc/api/admin_args.go @@ -3,8 +3,6 @@ package api import ( "encoding/json" - "math/big" - "github.com/ethereum/go-ethereum/rpc/shared" ) @@ -68,16 +66,8 @@ func (args *VerbosityArgs) UnmarshalJSON(b []byte) (err error) { return shared.NewDecodeParamError("Expected enode as argument") } - if levelint, ok := obj[0].(int); ok { - args.Level = levelint - } else if levelstr, ok := obj[0].(string); ok { - if !ok { - return shared.NewInvalidTypeError("level", "not a string") - } - level, success := new(big.Int).SetString(levelstr, 0) - if !success { - return shared.NewDecodeParamError("Unable to parse verbosity level") - } + level, err := numString(obj[0]) + if err == nil { args.Level = int(level.Int64()) } diff --git a/rpc/api/admin_js.go b/rpc/api/admin_js.go index 6255a6c7b..c3e713c67 100644 --- a/rpc/api/admin_js.go +++ b/rpc/api/admin_js.go @@ -12,13 +12,6 @@ web3._extend({ inputFormatter: [web3._extend.utils.formatInputString], outputFormatter: web3._extend.formatters.formatOutputBool }), - new web3._extend.Method({ - name: 'peers', - call: 'admin_peers', - params: 0, - inputFormatter: [], - outputFormatter: function(obj) { return obj; } - }), new web3._extend.Method({ name: 'exportChain', call: 'admin_exportChain', @@ -40,13 +33,6 @@ web3._extend({ inputFormatter: [web3._extend.utils.formatInputInt], outputFormatter: web3._extend.formatters.formatOutputBool }), - new web3._extend.Method({ - name: 'syncStatus', - call: 'admin_syncStatus', - params: 1, - inputFormatter: [web3._extend.utils.formatInputInt], - outputFormatter: function(obj) { return obj; } - }), new web3._extend.Method({ name: 'setSolc', call: 'admin_setSolc', @@ -61,6 +47,21 @@ web3._extend({ name: 'nodeInfo', getter: 'admin_nodeInfo', outputFormatter: web3._extend.formatters.formatOutputString + }), + new web3._extend.Property({ + name: 'peers', + getter: 'admin_peers', + outputFormatter: function(obj) { return obj; } + }), + new web3._extend.Property({ + name: 'datadir', + getter: 'admin_datadir', + outputFormatter: web3._extend.formatters.formatOutputString + }), + new web3._extend.Property({ + name: 'chainSyncStatus', + getter: 'admin_chainSyncStatus', + outputFormatter: function(obj) { return obj; } }) ] }); diff --git a/rpc/api/miner_args.go b/rpc/api/miner_args.go index 6b3d16d48..7b0560c16 100644 --- a/rpc/api/miner_args.go +++ b/rpc/api/miner_args.go @@ -41,6 +41,10 @@ func (args *SetExtraArgs) UnmarshalJSON(b []byte) (err error) { return shared.NewDecodeParamError(err.Error()) } + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) + } + extrastr, ok := obj[0].(string) if !ok { return shared.NewInvalidTypeError("Price", "not a string") @@ -60,13 +64,16 @@ func (args *GasPriceArgs) UnmarshalJSON(b []byte) (err error) { return shared.NewDecodeParamError(err.Error()) } - pricestr, ok := obj[0].(string) - if !ok { - return shared.NewInvalidTypeError("Price", "not a string") + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) } - args.Price = pricestr - return nil + if pricestr, ok := obj[0].(string); ok { + args.Price = pricestr + return nil + } + + return shared.NewInvalidTypeError("Price", "not a string") } type MakeDAGArgs struct { diff --git a/rpc/api/miner_js.go b/rpc/api/miner_js.go index bcf92f6a7..6290368da 100644 --- a/rpc/api/miner_js.go +++ b/rpc/api/miner_js.go @@ -19,13 +19,6 @@ web3._extend({ inputFormatter: [web3._extend.formatters.formatInputInt], outputFormatter: web3._extend.formatters.formatOutputBool }), - new web3._extend.Method({ - name: 'getHashrate', - call: 'miner_hashrate', - params: 0, - inputFormatter: [], - outputFormatter: web3._extend.utils.toDecimal - }), new web3._extend.Method({ name: 'setExtra', call: 'miner_setExtra', diff --git a/rpc/api/net.go b/rpc/api/net.go index efc400785..6f5d55f12 100644 --- a/rpc/api/net.go +++ b/rpc/api/net.go @@ -10,7 +10,7 @@ import ( var ( // mapping between methods and handlers netMapping = map[string]nethandler{ - "net_id": (*netApi).NetworkVersion, + "net_version": (*netApi).Version, "net_peerCount": (*netApi).PeerCount, "net_listening": (*netApi).IsListening, "net_peers": (*netApi).Peers, @@ -63,7 +63,7 @@ func (self *netApi) Name() string { } // Network version -func (self *netApi) NetworkVersion(req *shared.Request) (interface{}, error) { +func (self *netApi) Version(req *shared.Request) (interface{}, error) { return self.xeth.NetworkVersion(), nil } diff --git a/rpc/api/net_js.go b/rpc/api/net_js.go index 75f6c89f3..1677d9fa6 100644 --- a/rpc/api/net_js.go +++ b/rpc/api/net_js.go @@ -12,26 +12,12 @@ web3._extend({ inputFormatter: [web3._extend.utils.formatInputString], outputFormatter: web3._extend.formatters.formatOutputBool }), - new web3._extend.Method({ - name: 'id', - call: 'net_id', - params: 0, - inputFormatter: [], - outputFormatter: web3._extend.formatters.formatOutputString - }), new web3._extend.Method({ name: 'getPeerCount', call: 'net_peerCount', params: 0, inputFormatter: [], outputFormatter: web3._extend.formatters.formatOutputString - }), - new web3._extend.Method({ - name: 'peers', - call: 'net_peers', - params: 0, - inputFormatter: [], - outputFormatter: function(obj) { return obj; } }) ], properties: @@ -45,6 +31,16 @@ web3._extend({ name: 'peerCount', getter: 'net_peerCount', outputFormatter: web3._extend.utils.toDecimal + }), + new web3._extend.Property({ + name: 'peers', + getter: 'net_peers', + outputFormatter: function(obj) { return obj; } + }), + new web3._extend.Property({ + name: 'version', + getter: 'net_version', + outputFormatter: web3._extend.formatters.formatOutputString }) ] }); diff --git a/rpc/api/personal_args.go b/rpc/api/personal_args.go index b41fc06e7..b3e683638 100644 --- a/rpc/api/personal_args.go +++ b/rpc/api/personal_args.go @@ -16,13 +16,16 @@ func (args *NewAccountArgs) UnmarshalJSON(b []byte) (err error) { return shared.NewDecodeParamError(err.Error()) } - passhrase, ok := obj[0].(string) - if !ok { - return shared.NewInvalidTypeError("passhrase", "not a string") + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) } - args.Passphrase = passhrase - return nil + if passhrase, ok := obj[0].(string); ok { + args.Passphrase = passhrase + return nil + } + + return shared.NewInvalidTypeError("passhrase", "not a string") } type DeleteAccountArgs struct { @@ -36,17 +39,21 @@ func (args *DeleteAccountArgs) UnmarshalJSON(b []byte) (err error) { return shared.NewDecodeParamError(err.Error()) } - addr, ok := obj[0].(string) - if !ok { + if len(obj) < 2 { + return shared.NewInsufficientParamsError(len(obj), 2) + } + + if addr, ok := obj[0].(string); ok { + args.Address = addr + } else { return shared.NewInvalidTypeError("address", "not a string") } - args.Address = addr - passhrase, ok := obj[1].(string) - if !ok { + if passhrase, ok := obj[1].(string); ok { + args.Passphrase = passhrase + } else { return shared.NewInvalidTypeError("passhrase", "not a string") } - args.Passphrase = passhrase return nil } @@ -65,17 +72,21 @@ func (args *UnlockAccountArgs) UnmarshalJSON(b []byte) (err error) { args.Duration = -1 - addrstr, ok := obj[0].(string) - if !ok { + if len(obj) < 2 { + return shared.NewInsufficientParamsError(len(obj), 2) + } + + if addrstr, ok := obj[0].(string); ok { + args.Address = addrstr + } else { return shared.NewInvalidTypeError("address", "not a string") } - args.Address = addrstr - passphrasestr, ok := obj[1].(string) - if !ok { + if passphrasestr, ok := obj[1].(string); ok { + args.Passphrase = passphrasestr + } else { return shared.NewInvalidTypeError("passphrase", "not a string") } - args.Passphrase = passphrasestr return nil } diff --git a/rpc/api/personal_js.go b/rpc/api/personal_js.go index f9fa60e78..ddd47f6a4 100644 --- a/rpc/api/personal_js.go +++ b/rpc/api/personal_js.go @@ -5,13 +5,6 @@ web3._extend({ property: 'personal', methods: [ - new web3._extend.Method({ - name: 'listAccounts', - call: 'personal_listAccounts', - params: 0, - inputFormatter: [], - outputFormatter: function(obj) { return obj; } - }), new web3._extend.Method({ name: 'newAccount', call: 'personal_newAccount', @@ -29,6 +22,11 @@ web3._extend({ ], properties: [ + new web3._extend.Property({ + name: 'accounts', + getter: 'personal_listAccounts', + outputFormatter: function(obj) { return obj; } + }) ] }); ` -- cgit v1.2.3 From 7e41d7ac51fdaba1c03ec3f9cb8cc7a7bc3830f4 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Wed, 10 Jun 2015 12:35:12 +0200 Subject: added shh API --- rpc/api/api.go | 6 +- rpc/api/shh.go | 171 ++++++++++++++++++++++++++++++++++++++++++++++++++++ rpc/api/shh_args.go | 158 ++++++++++++++++++++++++++++++++++++++++++++++++ rpc/api/ssh_js.go | 30 +++++++++ rpc/api/txpool.go | 2 +- rpc/api/utils.go | 8 ++- 6 files changed, 370 insertions(+), 5 deletions(-) create mode 100644 rpc/api/shh.go create mode 100644 rpc/api/shh_args.go create mode 100644 rpc/api/ssh_js.go diff --git a/rpc/api/api.go b/rpc/api/api.go index e870ec58e..206647946 100644 --- a/rpc/api/api.go +++ b/rpc/api/api.go @@ -13,7 +13,8 @@ const ( MergedApiName = "merged" MinerApiName = "miner" NetApiName = "net" - txPoolApiName = "txpool" + ShhApiName = "shh" + TxPoolApiName = "txpool" PersonalApiName = "personal" Web3ApiName = "web3" ) @@ -21,7 +22,8 @@ const ( var ( // List with all API's which are offered over the IPC interface by default DefaultIpcApis = strings.Join([]string{ - AdminApiName, EthApiName, DebugApiName, MinerApiName, NetApiName, txPoolApiName, PersonalApiName, Web3ApiName, + AdminApiName, EthApiName, DebugApiName, MinerApiName, NetApiName, + ShhApiName, TxPoolApiName, PersonalApiName, Web3ApiName, }, ",") ) diff --git a/rpc/api/shh.go b/rpc/api/shh.go new file mode 100644 index 000000000..04c53c93e --- /dev/null +++ b/rpc/api/shh.go @@ -0,0 +1,171 @@ +package api + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/rpc/shared" + "github.com/ethereum/go-ethereum/xeth" +) + +var ( + // mapping between methods and handlers + shhMapping = map[string]shhhandler{ + "shh_version": (*shhApi).Version, + "shh_post": (*shhApi).Post, + "shh_hasIdentity": (*shhApi).HasIdentity, + "shh_newIdentity": (*shhApi).NewIdentity, + "shh_newFilter": (*shhApi).NewFilter, + "shh_uninstallFilter": (*shhApi).UninstallFilter, + "shh_getFilterChanges": (*shhApi).GetFilterChanges, + } +) + +func newWhisperOfflineError(method string) error { + return shared.NewNotAvailableError(method, "whisper offline") +} + +// net callback handler +type shhhandler func(*shhApi, *shared.Request) (interface{}, error) + +// shh api provider +type shhApi struct { + xeth *xeth.XEth + ethereum *eth.Ethereum + methods map[string]shhhandler + codec codec.ApiCoder +} + +// create a new whisper api instance +func NewShhApi(xeth *xeth.XEth, eth *eth.Ethereum, coder codec.Codec) *shhApi { + return &shhApi{ + xeth: xeth, + ethereum: eth, + methods: shhMapping, + codec: coder.New(nil), + } +} + +// collection with supported methods +func (self *shhApi) Methods() []string { + methods := make([]string, len(self.methods)) + i := 0 + for k := range self.methods { + methods[i] = k + i++ + } + return methods +} + +// Execute given request +func (self *shhApi) Execute(req *shared.Request) (interface{}, error) { + if callback, ok := self.methods[req.Method]; ok { + return callback(self, req) + } + + return nil, shared.NewNotImplementedError(req.Method) +} + +func (self *shhApi) Name() string { + return ShhApiName +} + +func (self *shhApi) Version(req *shared.Request) (interface{}, error) { + w := self.xeth.Whisper() + if w == nil { + return nil, newWhisperOfflineError(req.Method) + } + + return w.Version(), nil +} + +func (self *shhApi) Post(req *shared.Request) (interface{}, error) { + w := self.xeth.Whisper() + if w == nil { + return nil, newWhisperOfflineError(req.Method) + } + + args := new(WhisperMessageArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, err + } + + err := w.Post(args.Payload, args.To, args.From, args.Topics, args.Priority, args.Ttl) + if err != nil { + return false, err + } + + return true, nil +} + +func (self *shhApi) HasIdentity(req *shared.Request) (interface{}, error) { + w := self.xeth.Whisper() + if w == nil { + return nil, newWhisperOfflineError(req.Method) + } + + args := new(WhisperIdentityArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, err + } + + return w.HasIdentity(args.Identity), nil +} + +func (self *shhApi) NewIdentity(req *shared.Request) (interface{}, error) { + w := self.xeth.Whisper() + if w == nil { + return nil, newWhisperOfflineError(req.Method) + } + + return w.NewIdentity(), nil +} + +func (self *shhApi) NewFilter(req *shared.Request) (interface{}, error) { + args := new(WhisperFilterArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, err + } + + id := self.xeth.NewWhisperFilter(args.To, args.From, args.Topics) + return newHexNum(big.NewInt(int64(id)).Bytes()), nil +} + +func (self *shhApi) UninstallFilter(req *shared.Request) (interface{}, error) { + args := new(FilterIdArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, err + } + return self.xeth.UninstallWhisperFilter(args.Id), nil +} + +func (self *shhApi) GetFilterChanges(req *shared.Request) (interface{}, error) { + w := self.xeth.Whisper() + if w == nil { + return nil, newWhisperOfflineError(req.Method) + } + + // Retrieve all the new messages arrived since the last request + args := new(FilterIdArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, err + } + + return self.xeth.WhisperMessagesChanged(args.Id), nil +} + +func (self *shhApi) GetMessages(req *shared.Request) (interface{}, error) { + w := self.xeth.Whisper() + if w == nil { + return nil, newWhisperOfflineError(req.Method) + } + + // Retrieve all the cached messages matching a specific, existing filter + args := new(FilterIdArgs) + if err := self.codec.Decode(req.Params, &args); err != nil { + return nil, err + } + + return self.xeth.WhisperMessages(args.Id), nil +} diff --git a/rpc/api/shh_args.go b/rpc/api/shh_args.go new file mode 100644 index 000000000..00abac232 --- /dev/null +++ b/rpc/api/shh_args.go @@ -0,0 +1,158 @@ +package api + +import ( + "encoding/json" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/rpc/shared" +) + +type WhisperMessageArgs struct { + Payload string + To string + From string + Topics []string + Priority uint32 + Ttl uint32 +} + +func (args *WhisperMessageArgs) UnmarshalJSON(b []byte) (err error) { + var obj []struct { + Payload string + To string + From string + Topics []string + Priority interface{} + Ttl interface{} + } + + if err = json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) + } + args.Payload = obj[0].Payload + args.To = obj[0].To + args.From = obj[0].From + args.Topics = obj[0].Topics + + var num *big.Int + if num, err = numString(obj[0].Priority); err != nil { + return err + } + args.Priority = uint32(num.Int64()) + + if num, err = numString(obj[0].Ttl); err != nil { + return err + } + args.Ttl = uint32(num.Int64()) + + return nil +} + +type WhisperIdentityArgs struct { + Identity string +} + +func (args *WhisperIdentityArgs) UnmarshalJSON(b []byte) (err error) { + var obj []interface{} + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) + } + + argstr, ok := obj[0].(string) + if !ok { + return shared.NewInvalidTypeError("arg0", "not a string") + } + + args.Identity = argstr + + return nil +} + +type WhisperFilterArgs struct { + To string + From string + Topics [][]string +} + +// UnmarshalJSON implements the json.Unmarshaler interface, invoked to convert a +// JSON message blob into a WhisperFilterArgs structure. +func (args *WhisperFilterArgs) UnmarshalJSON(b []byte) (err error) { + // Unmarshal the JSON message and sanity check + var obj []struct { + To interface{} `json:"to"` + From interface{} `json:"from"` + Topics interface{} `json:"topics"` + } + if err := json.Unmarshal(b, &obj); err != nil { + return shared.NewDecodeParamError(err.Error()) + } + if len(obj) < 1 { + return shared.NewInsufficientParamsError(len(obj), 1) + } + // Retrieve the simple data contents of the filter arguments + if obj[0].To == nil { + args.To = "" + } else { + argstr, ok := obj[0].To.(string) + if !ok { + return shared.NewInvalidTypeError("to", "is not a string") + } + args.To = argstr + } + if obj[0].From == nil { + args.From = "" + } else { + argstr, ok := obj[0].From.(string) + if !ok { + return shared.NewInvalidTypeError("from", "is not a string") + } + args.From = argstr + } + // Construct the nested topic array + if obj[0].Topics != nil { + // Make sure we have an actual topic array + list, ok := obj[0].Topics.([]interface{}) + if !ok { + return shared.NewInvalidTypeError("topics", "is not an array") + } + // Iterate over each topic and handle nil, string or array + topics := make([][]string, len(list)) + for idx, field := range list { + switch value := field.(type) { + case nil: + topics[idx] = []string{} + + case string: + topics[idx] = []string{value} + + case []interface{}: + topics[idx] = make([]string, len(value)) + for i, nested := range value { + switch value := nested.(type) { + case nil: + topics[idx][i] = "" + + case string: + topics[idx][i] = value + + default: + return shared.NewInvalidTypeError(fmt.Sprintf("topic[%d][%d]", idx, i), "is not a string") + } + } + default: + return shared.NewInvalidTypeError(fmt.Sprintf("topic[%d]", idx), "not a string or array") + } + } + args.Topics = topics + } + return nil +} diff --git a/rpc/api/ssh_js.go b/rpc/api/ssh_js.go new file mode 100644 index 000000000..f401f70f1 --- /dev/null +++ b/rpc/api/ssh_js.go @@ -0,0 +1,30 @@ +package api + +const Shh_JS = ` +web3._extend({ + property: 'shh', + methods: + [ + new web3._extend.Method({ + name: 'post', + call: 'shh_post', + params: 6, + inputFormatter: [web3._extend.formatters.formatInputString, + web3._extend.formatters.formatInputString, + web3._extend.formatters.formatInputString, + , + , web3._extend.formatters.formatInputInt + , web3._extend.formatters.formatInputInt], + outputFormatter: web3._extend.formatters.formatOutputBool + }), + ], + properties: + [ + new web3._extend.Property({ + name: 'version', + getter: 'shh_version', + outputFormatter: web3._extend.formatters.formatOutputInt + }) + ] +}); +` diff --git a/rpc/api/txpool.go b/rpc/api/txpool.go index f340c501f..ebbe199b1 100644 --- a/rpc/api/txpool.go +++ b/rpc/api/txpool.go @@ -56,7 +56,7 @@ func (self *txPoolApi) Execute(req *shared.Request) (interface{}, error) { } func (self *txPoolApi) Name() string { - return txPoolApiName + return TxPoolApiName } func (self *txPoolApi) Status(req *shared.Request) (interface{}, error) { diff --git a/rpc/api/utils.go b/rpc/api/utils.go index b44a325a8..ad8a97e92 100644 --- a/rpc/api/utils.go +++ b/rpc/api/utils.go @@ -31,7 +31,9 @@ func ParseApiString(apistr string, codec codec.Codec, xeth *xeth.XEth, eth *eth. apis[i] = NewMinerApi(eth, codec) case NetApiName: apis[i] = NewNetApi(xeth, eth, codec) - case txPoolApiName: + case ShhApiName: + apis[i] = NewShhApi(xeth, eth, codec) + case TxPoolApiName: apis[i] = NewTxPoolApi(xeth, eth, codec) case PersonalApiName: apis[i] = NewPersonalApi(xeth, eth, codec) @@ -55,7 +57,9 @@ func Javascript(name string) string { return Miner_JS case NetApiName: return Net_JS - case txPoolApiName: + case ShhApiName: + return Shh_JS + case TxPoolApiName: return TxPool_JS case PersonalApiName: return Personal_JS -- cgit v1.2.3 From 348f1562e29c80cac3c1d13ff255a25ed4ec81c7 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 10:23:54 +0200 Subject: restructured eth rpc API --- rpc/api/api.go | 3 ++ rpc/api/eth.go | 8 ++++ rpc/api/utils.go | 111 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 122 insertions(+) diff --git a/rpc/api/api.go b/rpc/api/api.go index 206647946..e431e5c1e 100644 --- a/rpc/api/api.go +++ b/rpc/api/api.go @@ -32,6 +32,9 @@ type EthereumApi interface { // API identifier Name() string + // API version + ApiVersion() string + // Execute the given request and returns the response or an error Execute(*shared.Request) (interface{}, error) diff --git a/rpc/api/eth.go b/rpc/api/eth.go index f27f17f39..a0b9dad86 100644 --- a/rpc/api/eth.go +++ b/rpc/api/eth.go @@ -11,6 +11,10 @@ import ( "github.com/ethereum/go-ethereum/xeth" ) +const ( + EthApiVersion = "1.0" +) + // eth api provider // See https://github.com/ethereum/wiki/wiki/JSON-RPC type ethApi struct { @@ -97,6 +101,10 @@ func (self *ethApi) Name() string { return EthApiName } +func (self *ethApi) ApiVersion() string { + return EthApiVersion +} + func (self *ethApi) Accounts(req *shared.Request) (interface{}, error) { return self.xeth.Accounts(), nil } diff --git a/rpc/api/utils.go b/rpc/api/utils.go index ad8a97e92..318d7c39b 100644 --- a/rpc/api/utils.go +++ b/rpc/api/utils.go @@ -10,6 +10,117 @@ import ( "github.com/ethereum/go-ethereum/xeth" ) +var ( + // Mapping between the different methods each api supports + AutoCompletion = map[string][]string{ + "admin": []string{ + "addPeer", + "peers", + "nodeInfo", + "exportChain", + "importChain", + "verbosity", + "chainSyncStatus", + "setSolc", + "datadir", + }, + "debug": []string{ + "dumpBlock", + "getBlockRlp", + "printBlock", + "processBlock", + "seedHash", + "setHead", + }, + "eth": []string{ + "accounts", + "blockNumber", + "getBalance", + "protocolVersion", + "coinbase", + "mining", + "gasPrice", + "getStorage", + "storageAt", + "getStorageAt", + "getTransactionCount", + "getBlockTransactionCountByHash", + "getBlockTransactionCountByNumber", + "getUncleCountByBlockHash", + "getUncleCountByBlockNumber", + "getData", + "getCode", + "sign", + "sendTransaction", + "transact", + "estimateGas", + "call", + "flush", + "getBlockByHash", + "getBlockByNumber", + "getTransactionByHash", + "getTransactionByBlockHashAndIndex", + "getUncleByBlockHashAndIndex", + "getUncleByBlockNumberAndIndex", + "getCompilers", + "compileSolidity", + "newFilter", + "newBlockFilter", + "newPendingTransactionFilter", + "uninstallFilter", + "getFilterChanges", + "getFilterLogs", + "getLogs", + "hashrate", + "getWork", + "submitWork", + }, + "miner": []string{ + "hashrate", + "makeDAG", + "setExtra", + "setGasPrice", + "startAutoDAG", + "start", + "stopAutoDAG", + "stop", + }, + "net": []string{ + "peerCount", + "listening", + }, + "personal": []string{ + "listAccounts", + "newAccount", + "deleteAccount", + "unlockAccount", + }, + "shh": []string{ + "version", + "post", + "hasIdentity", + "newIdentity", + "newFilter", + "uninstallFilter", + "getFilterChanges", + }, + "txpool": []string{ + "status", + }, + "web3": []string{ + "sha3", + "version", + "fromWei", + "toWei", + "toHex", + "toAscii", + "fromAscii", + "toBigNumber", + "isAddress", + }, + } +) + // Parse a comma separated API string to individual api's func ParseApiString(apistr string, codec codec.Codec, xeth *xeth.XEth, eth *eth.Ethereum) ([]EthereumApi, error) { if len(strings.TrimSpace(apistr)) == 0 { -- cgit v1.2.3 From bbfa0a3dcb82e704828d1438261ff26c50c4ccc3 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 11:01:02 +0200 Subject: added API/IPC commandline flags --- rpc/api/api.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/rpc/api/api.go b/rpc/api/api.go index e431e5c1e..f62e48772 100644 --- a/rpc/api/api.go +++ b/rpc/api/api.go @@ -27,6 +27,11 @@ var ( }, ",") ) +const ( + // List with all API's which are offered over the IPC interface by default + DefaultIpcApis = "eth" +) + // Ethereum RPC API interface type EthereumApi interface { // API identifier -- cgit v1.2.3 From 1b59f890955c3658516daa958d0e4732004a78b7 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 12:43:58 +0200 Subject: added console command --- cmd/geth/main.go | 2 ++ rpc/api/api.go | 5 ----- rpc/api/mergedapi.go | 22 +++++++++++++++------ rpc/api/web3.go | 7 +++---- rpc/jeth.go | 54 +++++++++++++++++++++++++++++++++++++++++----------- 5 files changed, 64 insertions(+), 26 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 8e55b310c..9e4083251 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -307,6 +307,7 @@ func console(ctx *cli.Context) { repl := newJSRE( ethereum, ctx.String(utils.JSpathFlag.Name), + ctx.GlobalString(utils.IPCPathFlag.Name), ctx.GlobalString(utils.RPCCORSDomainFlag.Name), ctx.GlobalString(utils.IPCPathFlag.Name), true, @@ -329,6 +330,7 @@ func execJSFiles(ctx *cli.Context) { repl := newJSRE( ethereum, ctx.String(utils.JSpathFlag.Name), + ctx.GlobalString(utils.IPCPathFlag.Name), ctx.GlobalString(utils.RPCCORSDomainFlag.Name), ctx.GlobalString(utils.IPCPathFlag.Name), false, diff --git a/rpc/api/api.go b/rpc/api/api.go index f62e48772..e431e5c1e 100644 --- a/rpc/api/api.go +++ b/rpc/api/api.go @@ -27,11 +27,6 @@ var ( }, ",") ) -const ( - // List with all API's which are offered over the IPC interface by default - DefaultIpcApis = "eth" -) - // Ethereum RPC API interface type EthereumApi interface { // API identifier diff --git a/rpc/api/mergedapi.go b/rpc/api/mergedapi.go index dea8d1289..b62477a14 100644 --- a/rpc/api/mergedapi.go +++ b/rpc/api/mergedapi.go @@ -1,21 +1,27 @@ package api -import "github.com/ethereum/go-ethereum/rpc/shared" +import ( + "github.com/ethereum/go-ethereum/rpc/shared" +) + +const ( + MergedApiVersion = "1.0" +) // combines multiple API's type MergedApi struct { - apis []string + apis map[string]string methods map[string]EthereumApi } // create new merged api instance func newMergedApi(apis ...EthereumApi) *MergedApi { mergedApi := new(MergedApi) - mergedApi.apis = make([]string, len(apis)) + mergedApi.apis = make(map[string]string, len(apis)) mergedApi.methods = make(map[string]EthereumApi) - for i, api := range apis { - mergedApi.apis[i] = api.Name() + for _, api := range apis { + mergedApi.apis[api.Name()] = api.ApiVersion() for _, method := range api.Methods() { mergedApi.methods[method] = api } @@ -47,8 +53,12 @@ func (self *MergedApi) Name() string { return MergedApiName } +func (self *MergedApi) ApiVersion() string { + return MergedApiVersion +} + func (self *MergedApi) handle(req *shared.Request) (interface{}, error) { - if req.Method == "support_apis" { // provided API's + if req.Method == "modules" { // provided API's return self.apis, nil } diff --git a/rpc/api/web3.go b/rpc/api/web3.go index 42b0b7fd9..ed5008446 100644 --- a/rpc/api/web3.go +++ b/rpc/api/web3.go @@ -9,7 +9,7 @@ import ( ) const ( - Web3Version = "1.0.0" + Web3ApiVersion = "1.0" ) var ( @@ -63,9 +63,8 @@ func (self *web3Api) Name() string { return Web3ApiName } -// Version of the API this instance provides -func (self *web3Api) Version() string { - return Web3Version +func (self *web3Api) ApiVersion() string { + return Web3ApiVersion } // Calculates the sha3 over req.Params.Data diff --git a/rpc/jeth.go b/rpc/jeth.go index e578775bb..d1b36906f 100644 --- a/rpc/jeth.go +++ b/rpc/jeth.go @@ -11,6 +11,10 @@ import ( "github.com/ethereum/go-ethereum/rpc/comms" "github.com/ethereum/go-ethereum/rpc/shared" "github.com/robertkrimen/otto" + "github.com/ethereum/go-ethereum/rpc/comms" + "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/rpc/shared" + "reflect" ) type Jeth struct { @@ -40,6 +44,13 @@ func (self *Jeth) Send(call otto.FunctionCall) (response otto.Value) { return self.err(call, -32700, err.Error(), nil) } + client, err := comms.NewIpcClient(comms.IpcConfig{self.ipcpath}, codec.JSON) + if err != nil { + fmt.Println("Unable to connect to geth.") + return self.err(call, -32603, err.Error(), -1) + } + defer client.Close() + jsonreq, err := json.Marshal(reqif) var reqs []RpcRequest batch := true @@ -54,22 +65,43 @@ func (self *Jeth) Send(call otto.FunctionCall) (response otto.Value) { call.Otto.Run("var ret_response = new Array(response_len);") for i, req := range reqs { - var respif interface{} - err = self.ethApi.GetRequestReply(&req, &respif) + err := client.Send(&req) if err != nil { - fmt.Println("Error response:", err) + fmt.Println("Error send request:", err) return self.err(call, -32603, err.Error(), req.Id) } - call.Otto.Set("ret_jsonrpc", jsonrpcver) - call.Otto.Set("ret_id", req.Id) - res, _ := json.Marshal(respif) + respif, err := client.Recv() + if err != nil { + fmt.Println("Error recv response:", err) + return self.err(call, -32603, err.Error(), req.Id) + } - call.Otto.Set("ret_result", string(res)) - call.Otto.Set("response_idx", i) - response, err = call.Otto.Run(` - ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, result: JSON.parse(ret_result) }; - `) + if res, ok := respif.(shared.SuccessResponse); ok { + call.Otto.Set("ret_id", res.Id) + call.Otto.Set("ret_jsonrpc", res.Jsonrpc) + resObj, _ := json.Marshal(res.Result) + call.Otto.Set("ret_result", string(resObj)) + call.Otto.Set("response_idx", i) + + response, err = call.Otto.Run(` + ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, result: JSON.parse(ret_result) }; + `) + } else if res, ok := respif.(shared.ErrorResponse); ok { + fmt.Printf("Error: %s (%d)\n", res.Error.Message, res.Error.Code) + + call.Otto.Set("ret_id", res.Id) + call.Otto.Set("ret_jsonrpc", res.Jsonrpc) + call.Otto.Set("ret_error", res.Error) + call.Otto.Set("response_idx", i) + + response, _ = call.Otto.Run(` + ret_response = { jsonrpc: ret_jsonrpc, id: ret_id, error: ret_error }; + `) + return + } else { + fmt.Printf("unexpected response\n", reflect.TypeOf(respif)) + } } if !batch { -- cgit v1.2.3 From 594a34a88d8e66e82f5333b66f83561f0c0c5bd4 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 13:21:24 +0200 Subject: changed send methods for backwards compatability in geth console --- cmd/geth/main.go | 2 -- rpc/jeth.go | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 2 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 9e4083251..8e55b310c 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -307,7 +307,6 @@ func console(ctx *cli.Context) { repl := newJSRE( ethereum, ctx.String(utils.JSpathFlag.Name), - ctx.GlobalString(utils.IPCPathFlag.Name), ctx.GlobalString(utils.RPCCORSDomainFlag.Name), ctx.GlobalString(utils.IPCPathFlag.Name), true, @@ -330,7 +329,6 @@ func execJSFiles(ctx *cli.Context) { repl := newJSRE( ethereum, ctx.String(utils.JSpathFlag.Name), - ctx.GlobalString(utils.IPCPathFlag.Name), ctx.GlobalString(utils.RPCCORSDomainFlag.Name), ctx.GlobalString(utils.IPCPathFlag.Name), false, diff --git a/rpc/jeth.go b/rpc/jeth.go index d1b36906f..69df4500e 100644 --- a/rpc/jeth.go +++ b/rpc/jeth.go @@ -44,6 +44,60 @@ func (self *Jeth) Send(call otto.FunctionCall) (response otto.Value) { return self.err(call, -32700, err.Error(), nil) } + jsonreq, err := json.Marshal(reqif) + var reqs []RpcRequest + batch := true + err = json.Unmarshal(jsonreq, &reqs) + if err != nil { + reqs = make([]RpcRequest, 1) + err = json.Unmarshal(jsonreq, &reqs[0]) + batch = false + } + + call.Otto.Set("response_len", len(reqs)) + call.Otto.Run("var ret_response = new Array(response_len);") + + for i, req := range reqs { + var respif interface{} + err = self.ethApi.GetRequestReply(&req, &respif) + if err != nil { + fmt.Println("Error response:", err) + return self.err(call, -32603, err.Error(), req.Id) + } + call.Otto.Set("ret_jsonrpc", jsonrpcver) + call.Otto.Set("ret_id", req.Id) + + res, _ := json.Marshal(respif) + + call.Otto.Set("ret_result", string(res)) + call.Otto.Set("response_idx", i) + response, err = call.Otto.Run(` + ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, result: JSON.parse(ret_result) }; + `) + } + + if !batch { + call.Otto.Run("ret_response = ret_response[0];") + } + + if call.Argument(1).IsObject() { + call.Otto.Set("callback", call.Argument(1)) + call.Otto.Run(` + if (Object.prototype.toString.call(callback) == '[object Function]') { + callback(null, ret_response); + } + `) + } + + return +} + +func (self *Jeth) SendIpc(call otto.FunctionCall) (response otto.Value) { + reqif, err := call.Argument(0).Export() + if err != nil { + return self.err(call, -32700, err.Error(), nil) + } + client, err := comms.NewIpcClient(comms.IpcConfig{self.ipcpath}, codec.JSON) if err != nil { fmt.Println("Unable to connect to geth.") -- cgit v1.2.3 From 3c93034a32b0413579996f9c8c11a99fa0f00fe6 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 13:50:36 +0200 Subject: fixed windows build problem --- rpc/comms/ipc_windows.go | 702 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 702 insertions(+) diff --git a/rpc/comms/ipc_windows.go b/rpc/comms/ipc_windows.go index c48dfb7fb..bee781744 100644 --- a/rpc/comms/ipc_windows.go +++ b/rpc/comms/ipc_windows.go @@ -1,3 +1,4 @@ +<<<<<<< HEAD // +build windows package comms @@ -694,3 +695,704 @@ func startIpc(cfg IpcConfig, codec codec.Codec, api api.EthereumApi) error { return nil } +======= +// +build windows + +package comms + +import ( + "fmt" + "io" + "net" + "os" + "sync" + "syscall" + "time" + "unsafe" + + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/rpc/api" + "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/rpc/shared" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") + procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") + procCreateEventW = modkernel32.NewProc("CreateEventW") + procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") +) + +func createNamedPipe(name *uint16, openMode uint32, pipeMode uint32, maxInstances uint32, outBufSize uint32, inBufSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(openMode), uintptr(pipeMode), uintptr(maxInstances), uintptr(outBufSize), uintptr(inBufSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func cancelIoEx(handle syscall.Handle, overlapped *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func connectNamedPipe(handle syscall.Handle, overlapped *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func disconnectNamedPipe(handle syscall.Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func waitNamedPipe(name *uint16, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createEvent(sa *syscall.SecurityAttributes, manualReset bool, initialState bool, name *uint16) (handle syscall.Handle, err error) { + var _p0 uint32 + if manualReset { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if initialState { + _p1 = 1 + } else { + _p1 = 0 + } + r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(sa)), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(name)), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getOverlappedResult(handle syscall.Handle, overlapped *syscall.Overlapped, transferred *uint32, wait bool) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transferred)), uintptr(_p0), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + + +const ( + // openMode + pipe_access_duplex = 0x3 + pipe_access_inbound = 0x1 + pipe_access_outbound = 0x2 + + // openMode write flags + file_flag_first_pipe_instance = 0x00080000 + file_flag_write_through = 0x80000000 + file_flag_overlapped = 0x40000000 + + // openMode ACL flags + write_dac = 0x00040000 + write_owner = 0x00080000 + access_system_security = 0x01000000 + + // pipeMode + pipe_type_byte = 0x0 + pipe_type_message = 0x4 + + // pipeMode read mode flags + pipe_readmode_byte = 0x0 + pipe_readmode_message = 0x2 + + // pipeMode wait mode flags + pipe_wait = 0x0 + pipe_nowait = 0x1 + + // pipeMode remote-client mode flags + pipe_accept_remote_clients = 0x0 + pipe_reject_remote_clients = 0x8 + + pipe_unlimited_instances = 255 + + nmpwait_wait_forever = 0xFFFFFFFF + + // the two not-an-errors below occur if a client connects to the pipe between + // the server's CreateNamedPipe and ConnectNamedPipe calls. + error_no_data syscall.Errno = 0xE8 + error_pipe_connected syscall.Errno = 0x217 + error_pipe_busy syscall.Errno = 0xE7 + error_sem_timeout syscall.Errno = 0x79 + + error_bad_pathname syscall.Errno = 0xA1 + error_invalid_name syscall.Errno = 0x7B + + error_io_incomplete syscall.Errno = 0x3e4 +) + +var _ net.Conn = (*PipeConn)(nil) +var _ net.Listener = (*PipeListener)(nil) + +// ErrClosed is the error returned by PipeListener.Accept when Close is called +// on the PipeListener. +var ErrClosed = PipeError{"Pipe has been closed.", false} + +// PipeError is an error related to a call to a pipe +type PipeError struct { + msg string + timeout bool +} + +// Error implements the error interface +func (e PipeError) Error() string { + return e.msg +} + +// Timeout implements net.AddrError.Timeout() +func (e PipeError) Timeout() bool { + return e.timeout +} + +// Temporary implements net.AddrError.Temporary() +func (e PipeError) Temporary() bool { + return false +} + +// Dial connects to a named pipe with the given address. If the specified pipe is not available, +// it will wait indefinitely for the pipe to become available. +// +// The address must be of the form \\.\\pipe\ for local pipes and \\\pipe\ +// for remote pipes. +// +// Dial will return a PipeError if you pass in a badly formatted pipe name. +// +// Examples: +// // local pipe +// conn, err := Dial(`\\.\pipe\mypipename`) +// +// // remote pipe +// conn, err := Dial(`\\othercomp\pipe\mypipename`) +func Dial(address string) (*PipeConn, error) { + for { + conn, err := dial(address, nmpwait_wait_forever) + if err == nil { + return conn, nil + } + if isPipeNotReady(err) { + <-time.After(100 * time.Millisecond) + continue + } + return nil, err + } +} + +// DialTimeout acts like Dial, but will time out after the duration of timeout +func DialTimeout(address string, timeout time.Duration) (*PipeConn, error) { + deadline := time.Now().Add(timeout) + + now := time.Now() + for now.Before(deadline) { + millis := uint32(deadline.Sub(now) / time.Millisecond) + conn, err := dial(address, millis) + if err == nil { + return conn, nil + } + if err == error_sem_timeout { + // This is WaitNamedPipe's timeout error, so we know we're done + return nil, PipeError{fmt.Sprintf( + "Timed out waiting for pipe '%s' to come available", address), true} + } + if isPipeNotReady(err) { + left := deadline.Sub(time.Now()) + retry := 100 * time.Millisecond + if left > retry { + <-time.After(retry) + } else { + <-time.After(left - time.Millisecond) + } + now = time.Now() + continue + } + return nil, err + } + return nil, PipeError{fmt.Sprintf( + "Timed out waiting for pipe '%s' to come available", address), true} +} + +// isPipeNotReady checks the error to see if it indicates the pipe is not ready +func isPipeNotReady(err error) bool { + // Pipe Busy means another client just grabbed the open pipe end, + // and the server hasn't made a new one yet. + // File Not Found means the server hasn't created the pipe yet. + // Neither is a fatal error. + + return err == syscall.ERROR_FILE_NOT_FOUND || err == error_pipe_busy +} + +// newOverlapped creates a structure used to track asynchronous +// I/O requests that have been issued. +func newOverlapped() (*syscall.Overlapped, error) { + event, err := createEvent(nil, true, true, nil) + if err != nil { + return nil, err + } + return &syscall.Overlapped{HEvent: event}, nil +} + +// waitForCompletion waits for an asynchronous I/O request referred to by overlapped to complete. +// This function returns the number of bytes transferred by the operation and an error code if +// applicable (nil otherwise). +func waitForCompletion(handle syscall.Handle, overlapped *syscall.Overlapped) (uint32, error) { + _, err := syscall.WaitForSingleObject(overlapped.HEvent, syscall.INFINITE) + if err != nil { + return 0, err + } + var transferred uint32 + err = getOverlappedResult(handle, overlapped, &transferred, true) + return transferred, err +} + +// dial is a helper to initiate a connection to a named pipe that has been started by a server. +// The timeout is only enforced if the pipe server has already created the pipe, otherwise +// this function will return immediately. +func dial(address string, timeout uint32) (*PipeConn, error) { + name, err := syscall.UTF16PtrFromString(string(address)) + if err != nil { + return nil, err + } + // If at least one instance of the pipe has been created, this function + // will wait timeout milliseconds for it to become available. + // It will return immediately regardless of timeout, if no instances + // of the named pipe have been created yet. + // If this returns with no error, there is a pipe available. + if err := waitNamedPipe(name, timeout); err != nil { + if err == error_bad_pathname { + // badly formatted pipe name + return nil, badAddr(address) + } + return nil, err + } + pathp, err := syscall.UTF16PtrFromString(address) + if err != nil { + return nil, err + } + handle, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, + uint32(syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE), nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_OVERLAPPED, 0) + if err != nil { + return nil, err + } + return &PipeConn{handle: handle, addr: PipeAddr(address)}, nil +} + +// Listen returns a new PipeListener that will listen on a pipe with the given +// address. The address must be of the form \\.\pipe\ +// +// Listen will return a PipeError for an incorrectly formatted pipe name. +func Listen(address string) (*PipeListener, error) { + handle, err := createPipe(address, true) + if err == error_invalid_name { + return nil, badAddr(address) + } + if err != nil { + return nil, err + } + return &PipeListener{ + addr: PipeAddr(address), + handle: handle, + }, nil +} + +// PipeListener is a named pipe listener. Clients should typically +// use variables of type net.Listener instead of assuming named pipe. +type PipeListener struct { + addr PipeAddr + handle syscall.Handle + closed bool + + // acceptHandle contains the current handle waiting for + // an incoming connection or nil. + acceptHandle syscall.Handle + // acceptOverlapped is set before waiting on a connection. + // If not waiting, it is nil. + acceptOverlapped *syscall.Overlapped + // acceptMutex protects the handle and overlapped structure. + acceptMutex sync.Mutex +} + +// Accept implements the Accept method in the net.Listener interface; it +// waits for the next call and returns a generic net.Conn. +func (l *PipeListener) Accept() (net.Conn, error) { + c, err := l.AcceptPipe() + for err == error_no_data { + // Ignore clients that connect and immediately disconnect. + c, err = l.AcceptPipe() + } + if err != nil { + return nil, err + } + return c, nil +} + +// AcceptPipe accepts the next incoming call and returns the new connection. +// It might return an error if a client connected and immediately cancelled +// the connection. +func (l *PipeListener) AcceptPipe() (*PipeConn, error) { + if l == nil || l.addr == "" || l.closed { + return nil, syscall.EINVAL + } + + // the first time we call accept, the handle will have been created by the Listen + // call. This is to prevent race conditions where the client thinks the server + // isn't listening because it hasn't actually called create yet. After the first time, we'll + // have to create a new handle each time + handle := l.handle + if handle == 0 { + var err error + handle, err = createPipe(string(l.addr), false) + if err != nil { + return nil, err + } + } else { + l.handle = 0 + } + + overlapped, err := newOverlapped() + if err != nil { + return nil, err + } + defer syscall.CloseHandle(overlapped.HEvent) + if err := connectNamedPipe(handle, overlapped); err != nil && err != error_pipe_connected { + if err == error_io_incomplete || err == syscall.ERROR_IO_PENDING { + l.acceptMutex.Lock() + l.acceptOverlapped = overlapped + l.acceptHandle = handle + l.acceptMutex.Unlock() + defer func() { + l.acceptMutex.Lock() + l.acceptOverlapped = nil + l.acceptHandle = 0 + l.acceptMutex.Unlock() + }() + + _, err = waitForCompletion(handle, overlapped) + } + if err == syscall.ERROR_OPERATION_ABORTED { + // Return error compatible to net.Listener.Accept() in case the + // listener was closed. + return nil, ErrClosed + } + if err != nil { + return nil, err + } + } + return &PipeConn{handle: handle, addr: l.addr}, nil +} + +// Close stops listening on the address. +// Already Accepted connections are not closed. +func (l *PipeListener) Close() error { + if l.closed { + return nil + } + l.closed = true + if l.handle != 0 { + err := disconnectNamedPipe(l.handle) + if err != nil { + return err + } + err = syscall.CloseHandle(l.handle) + if err != nil { + return err + } + l.handle = 0 + } + l.acceptMutex.Lock() + defer l.acceptMutex.Unlock() + if l.acceptOverlapped != nil && l.acceptHandle != 0 { + // Cancel the pending IO. This call does not block, so it is safe + // to hold onto the mutex above. + if err := cancelIoEx(l.acceptHandle, l.acceptOverlapped); err != nil { + return err + } + err := syscall.CloseHandle(l.acceptOverlapped.HEvent) + if err != nil { + return err + } + l.acceptOverlapped.HEvent = 0 + err = syscall.CloseHandle(l.acceptHandle) + if err != nil { + return err + } + l.acceptHandle = 0 + } + return nil +} + +// Addr returns the listener's network address, a PipeAddr. +func (l *PipeListener) Addr() net.Addr { return l.addr } + +// PipeConn is the implementation of the net.Conn interface for named pipe connections. +type PipeConn struct { + handle syscall.Handle + addr PipeAddr + + // these aren't actually used yet + readDeadline *time.Time + writeDeadline *time.Time +} + +type iodata struct { + n uint32 + err error +} + +// completeRequest looks at iodata to see if a request is pending. If so, it waits for it to either complete or to +// abort due to hitting the specified deadline. Deadline may be set to nil to wait forever. If no request is pending, +// the content of iodata is returned. +func (c *PipeConn) completeRequest(data iodata, deadline *time.Time, overlapped *syscall.Overlapped) (int, error) { + if data.err == error_io_incomplete || data.err == syscall.ERROR_IO_PENDING { + var timer <-chan time.Time + if deadline != nil { + if timeDiff := deadline.Sub(time.Now()); timeDiff > 0 { + timer = time.After(timeDiff) + } + } + done := make(chan iodata) + go func() { + n, err := waitForCompletion(c.handle, overlapped) + done <- iodata{n, err} + }() + select { + case data = <-done: + case <-timer: + syscall.CancelIoEx(c.handle, overlapped) + data = iodata{0, timeout(c.addr.String())} + } + } + // Windows will produce ERROR_BROKEN_PIPE upon closing + // a handle on the other end of a connection. Go RPC + // expects an io.EOF error in this case. + if data.err == syscall.ERROR_BROKEN_PIPE { + data.err = io.EOF + } + return int(data.n), data.err +} + +// Read implements the net.Conn Read method. +func (c *PipeConn) Read(b []byte) (int, error) { + // Use ReadFile() rather than Read() because the latter + // contains a workaround that eats ERROR_BROKEN_PIPE. + overlapped, err := newOverlapped() + if err != nil { + return 0, err + } + defer syscall.CloseHandle(overlapped.HEvent) + var n uint32 + err = syscall.ReadFile(c.handle, b, &n, overlapped) + return c.completeRequest(iodata{n, err}, c.readDeadline, overlapped) +} + +// Write implements the net.Conn Write method. +func (c *PipeConn) Write(b []byte) (int, error) { + overlapped, err := newOverlapped() + if err != nil { + return 0, err + } + defer syscall.CloseHandle(overlapped.HEvent) + var n uint32 + err = syscall.WriteFile(c.handle, b, &n, overlapped) + return c.completeRequest(iodata{n, err}, c.writeDeadline, overlapped) +} + +// Close closes the connection. +func (c *PipeConn) Close() error { + return syscall.CloseHandle(c.handle) +} + +// LocalAddr returns the local network address. +func (c *PipeConn) LocalAddr() net.Addr { + return c.addr +} + +// RemoteAddr returns the remote network address. +func (c *PipeConn) RemoteAddr() net.Addr { + // not sure what to do here, we don't have remote addr.... + return c.addr +} + +// SetDeadline implements the net.Conn SetDeadline method. +// Note that timeouts are only supported on Windows Vista/Server 2008 and above +func (c *PipeConn) SetDeadline(t time.Time) error { + c.SetReadDeadline(t) + c.SetWriteDeadline(t) + return nil +} + +// SetReadDeadline implements the net.Conn SetReadDeadline method. +// Note that timeouts are only supported on Windows Vista/Server 2008 and above +func (c *PipeConn) SetReadDeadline(t time.Time) error { + c.readDeadline = &t + return nil +} + +// SetWriteDeadline implements the net.Conn SetWriteDeadline method. +// Note that timeouts are only supported on Windows Vista/Server 2008 and above +func (c *PipeConn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = &t + return nil +} + +// PipeAddr represents the address of a named pipe. +type PipeAddr string + +// Network returns the address's network name, "pipe". +func (a PipeAddr) Network() string { return "pipe" } + +// String returns the address of the pipe +func (a PipeAddr) String() string { + return string(a) +} + +// createPipe is a helper function to make sure we always create pipes +// with the same arguments, since subsequent calls to create pipe need +// to use the same arguments as the first one. If first is set, fail +// if the pipe already exists. +func createPipe(address string, first bool) (syscall.Handle, error) { + n, err := syscall.UTF16PtrFromString(address) + if err != nil { + return 0, err + } + mode := uint32(pipe_access_duplex | syscall.FILE_FLAG_OVERLAPPED) + if first { + mode |= file_flag_first_pipe_instance + } + return createNamedPipe(n, + mode, + pipe_type_byte, + pipe_unlimited_instances, + 512, 512, 0, nil) +} + +func badAddr(addr string) PipeError { + return PipeError{fmt.Sprintf("Invalid pipe address '%s'.", addr), false} +} +func timeout(addr string) PipeError { + return PipeError{fmt.Sprintf("Pipe IO timed out waiting for '%s'", addr), true} +} + + + +func newIpcClient(cfg IpcConfig, codec codec.Codec) (*ipcClient, error) { + c, err := Dial(cfg.Endpoint) + if err != nil { + return nil, err + } + + return &ipcClient{codec.New(c)}, nil +} + +func startIpc(cfg IpcConfig, codec codec.Codec, api api.EthereumApi) error { + os.Remove(cfg.Endpoint) // in case it still exists from a previous run + + l, err := Listen(cfg.Endpoint) + if err != nil { + return err + } + os.Chmod(cfg.Endpoint, 0600) + + go func() { + for { + conn, err := l.Accept() + if err != nil { + glog.V(logger.Error).Infof("Error accepting ipc connection - %v\n", err) + continue + } + + go func(conn net.Conn) { + codec := codec.New(conn) + + for { + req, err := codec.ReadRequest() + if err == io.EOF { + codec.Close() + return + } else if err != nil { + glog.V(logger.Error).Infof("IPC recv err - %v\n", err) + codec.Close() + return + } + + var rpcResponse interface{} + res, err := api.Execute(req) + + rpcResponse = shared.NewRpcResponse(req.Id, req.Jsonrpc, res, err) + err = codec.WriteResponse(rpcResponse) + if err != nil { + glog.V(logger.Error).Infof("IPC send err - %v\n", err) + codec.Close() + return + } + } + }(conn) + } + }() + + glog.V(logger.Info).Infof("IPC service started (%s)\n", cfg.Endpoint) + + return nil +} +>>>>>>> fixed windows build problem -- cgit v1.2.3 From aa258dcc5f01a05c81075be999c83446862abb42 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 19:14:42 +0200 Subject: added console binary --- cmd/console/js.go | 94 ++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 89 insertions(+), 5 deletions(-) diff --git a/cmd/console/js.go b/cmd/console/js.go index 76695cabd..633b79315 100644 --- a/cmd/console/js.go +++ b/cmd/console/js.go @@ -28,6 +28,8 @@ import ( "encoding/json" + "sort" + "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common/docserver" re "github.com/ethereum/go-ethereum/jsre" @@ -73,6 +75,79 @@ type jsre struct { prompter } +var ( + loadedModulesMethods map[string][]string +) + +func loadAutoCompletion(js *jsre, ipcpath string) { + modules, err := js.suportedApis(ipcpath) + if err != nil { + utils.Fatalf("Unable to determine supported modules - %v", err) + } + + fmt.Printf("load autocompletion %v", modules) + + loadedModulesMethods = make(map[string][]string) + for module, _ := range modules { + loadedModulesMethods[module] = api.AutoCompletion[module] + } +} + +func keywordCompleter(line string) []string { + results := make([]string, 0) + + if strings.Contains(line, ".") { + elements := strings.Split(line, ".") + if len(elements) == 2 { + module := elements[0] + partialMethod := elements[1] + if methods, found := loadedModulesMethods[module]; found { + for _, method := range methods { + if strings.HasPrefix(method, partialMethod) { // e.g. debug.se + results = append(results, module+"."+method) + } + } + } + } + } else { + for module, methods := range loadedModulesMethods { + if line == module { // user typed in full module name, show all methods + for _, method := range methods { + results = append(results, module+"."+method) + } + } else if strings.HasPrefix(module, line) { // partial method name, e.g. admi + results = append(results, module) + } + } + } + return results +} + +func apiWordCompleter(line string, pos int) (head string, completions []string, tail string) { + if len(line) == 0 { + return "", nil, "" + } + + i := 0 + for i = pos - 1; i > 0; i-- { + if line[i] == '.' || (line[i] >= 'a' && line[i] <= 'z') || (line[i] >= 'A' && line[i] <= 'Z') { + continue + } + if i >= 3 && line[i] == '3' && line[i-3] == 'w' && line[i-2] == 'e' && line[i-1] == 'b' { + continue + } + i += 1 + break + } + + begin := line[:i] + keyword := line[i:pos] + end := line[pos:] + + completionWords := keywordCompleter(keyword) + return begin, completionWords, end +} + func newJSRE(libPath, ipcpath string) *jsre { js := &jsre{ps1: "> "} js.wait = make(chan *big.Int) @@ -87,6 +162,9 @@ func newJSRE(libPath, ipcpath string) *jsre { lr := liner.NewLiner() js.withHistory(func(hist *os.File) { lr.ReadHistory(hist) }) lr.SetCtrlCAborts(true) + loadAutoCompletion(js, ipcpath) + lr.SetWordCompleter(apiWordCompleter) + lr.SetTabCompletionStyle(liner.TabPrints) js.prompter = lr js.atexit = func() { js.withHistory(func(hist *os.File) { hist.Truncate(0); lr.WriteHistory(hist) }) @@ -134,7 +212,7 @@ func (js *jsre) apiBindings(ipcpath string) { // load only supported API's in javascript runtime shortcuts := "var eth = web3.eth; " - for _, apiName := range apis { + for apiName, _ := range apis { if apiName == api.Web3ApiName || apiName == api.EthApiName { continue // manually mapped } @@ -194,7 +272,7 @@ func (self *jsre) exec(filename string) error { return nil } -func (self *jsre) suportedApis(ipcpath string) ([]string, error) { +func (self *jsre) suportedApis(ipcpath string) (map[string]string, error) { config := comms.IpcConfig{ Endpoint: ipcpath, } @@ -207,7 +285,7 @@ func (self *jsre) suportedApis(ipcpath string) ([]string, error) { req := shared.Request{ Id: 1, Jsonrpc: "2.0", - Method: "support_apis", + Method: "modules", } err = client.Send(req) @@ -222,7 +300,7 @@ func (self *jsre) suportedApis(ipcpath string) ([]string, error) { if sucRes, ok := res.(shared.SuccessResponse); ok { data, _ := json.Marshal(sucRes.Result) - apis := make([]string, 0) + apis := make(map[string]string) err = json.Unmarshal(data, &apis) if err == nil { return apis, nil @@ -242,7 +320,13 @@ func (self *jsre) welcome(ipcpath string) { + " " + new Date(lastBlockTimestamp).toLocaleTimeString() + ")");`) if modules, err := self.suportedApis(ipcpath); err == nil { - self.re.Eval(fmt.Sprintf("var modules = '%s';", strings.Join(modules, " "))) + loadedModules := make([]string, 0) + for api, version := range modules { + loadedModules = append(loadedModules, fmt.Sprintf("%s:%s", api, version)) + } + sort.Strings(loadedModules) + + self.re.Eval(fmt.Sprintf("var modules = '%s';", strings.Join(loadedModules, " "))) self.re.Eval(`console.log(" modules: " + modules);`) } } -- cgit v1.2.3 From 87dace1fa96ed4426ea963ba4f9559c05bf42e35 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 14:42:15 +0200 Subject: added miner API --- rpc/api/miner.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/rpc/api/miner.go b/rpc/api/miner.go index 496269304..0b5e74f52 100644 --- a/rpc/api/miner.go +++ b/rpc/api/miner.go @@ -9,7 +9,7 @@ import ( ) const ( - MinerVersion = "1.0.0" + MinerApiVersion = "1.0" ) var ( @@ -69,6 +69,10 @@ func (self *minerApi) Name() string { return MinerApiName } +func (self *minerApi) ApiVersion() string { + return MinerApiVersion +} + func (self *minerApi) StartMiner(req *shared.Request) (interface{}, error) { args := new(StartMinerArgs) if err := self.codec.Decode(req.Params, &args); err != nil { -- cgit v1.2.3 From 1fe617fa5791bffb6b3cc60636c6629c7aca36d3 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Mon, 8 Jun 2015 14:50:11 +0200 Subject: added net API --- rpc/api/net.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/rpc/api/net.go b/rpc/api/net.go index 6f5d55f12..d6888ee4a 100644 --- a/rpc/api/net.go +++ b/rpc/api/net.go @@ -7,6 +7,10 @@ import ( "github.com/ethereum/go-ethereum/xeth" ) +const ( + NetApiVersion = "1.0" +) + var ( // mapping between methods and handlers netMapping = map[string]nethandler{ @@ -62,6 +66,10 @@ func (self *netApi) Name() string { return NetApiName } +func (self *netApi) ApiVersion() string { + return NetApiVersion +} + // Network version func (self *netApi) Version(req *shared.Request) (interface{}, error) { return self.xeth.NetworkVersion(), nil -- cgit v1.2.3 From 7584e68c21cfd155a9e72b29422d8d458691d4ae Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Wed, 10 Jun 2015 09:42:14 +0200 Subject: upgrade web3.js with _extend support --- rpc/api/admin.go | 16 +++++++++++++--- rpc/api/debug.go | 8 ++++++-- rpc/api/debug_js.go | 7 +++++++ rpc/api/personal.go | 8 ++++++++ rpc/api/personal_js.go | 2 +- rpc/api/shh.go | 8 ++++++++ rpc/api/txpool.go | 8 ++++++++ 7 files changed, 51 insertions(+), 6 deletions(-) diff --git a/rpc/api/admin.go b/rpc/api/admin.go index 6b89942b2..a6b9cf050 100644 --- a/rpc/api/admin.go +++ b/rpc/api/admin.go @@ -16,7 +16,7 @@ import ( ) const ( - AdminVersion = "1.0.0" + AdminApiversion = "1.0" importBatchSize = 2500 ) @@ -82,6 +82,10 @@ func (self *adminApi) Name() string { return AdminApiName } +func (self *adminApi) ApiVersion() string { + return AdminApiversion +} + func (self *adminApi) AddPeer(req *shared.Request) (interface{}, error) { args := new(AddPeerArgs) if err := self.codec.Decode(req.Params, &args); err != nil { @@ -215,8 +219,14 @@ func (self *adminApi) Verbosity(req *shared.Request) (interface{}, error) { } func (self *adminApi) ChainSyncStatus(req *shared.Request) (interface{}, error) { - pending, cached := self.ethereum.Downloader().Stats() - return map[string]interface{}{"blocksAvailable": pending, "blocksWaitingForImport": cached}, nil + pending, cached, importing, estimate := self.ethereum.Downloader().Stats() + + return map[string]interface{}{ + "blocksAvailable": pending, + "blocksWaitingForImport": cached, + "importing": importing, + "estimate": estimate.String(), + }, nil } func (self *adminApi) SetSolc(req *shared.Request) (interface{}, error) { diff --git a/rpc/api/debug.go b/rpc/api/debug.go index 2930ad870..5b6a449dc 100644 --- a/rpc/api/debug.go +++ b/rpc/api/debug.go @@ -14,7 +14,7 @@ import ( ) const ( - DebugVersion = "1.0.0" + DebugApiVersion = "1.0" ) var ( @@ -74,6 +74,10 @@ func (self *debugApi) Name() string { return DebugApiName } +func (self *debugApi) ApiVersion() string { + return DebugApiVersion +} + func (self *debugApi) PrintBlock(req *shared.Request) (interface{}, error) { args := new(BlockNumArg) if err := self.codec.Decode(req.Params, &args); err != nil { @@ -100,7 +104,7 @@ func (self *debugApi) DumpBlock(req *shared.Request) (interface{}, error) { return nil, nil } - return stateDb.Dump(), nil + return stateDb.RawDump(), nil } func (self *debugApi) GetBlockRlp(req *shared.Request) (interface{}, error) { diff --git a/rpc/api/debug_js.go b/rpc/api/debug_js.go index fe19a077d..35fecb75f 100644 --- a/rpc/api/debug_js.go +++ b/rpc/api/debug_js.go @@ -39,6 +39,13 @@ web3._extend({ params: 1, inputFormatter: [web3._extend.formatters.formatInputInt], outputFormatter: web3._extend.formatters.formatOutputString + }) , + new web3._extend.Method({ + name: 'dumpBlock', + call: 'debug_dumpBlock', + params: 1, + inputFormatter: [web3._extend.formatters.formatInputInt], + outputFormatter: function(obj) { return obj; } }) ], properties: diff --git a/rpc/api/personal.go b/rpc/api/personal.go index 08dc4bff5..7a6c91c82 100644 --- a/rpc/api/personal.go +++ b/rpc/api/personal.go @@ -10,6 +10,10 @@ import ( "github.com/ethereum/go-ethereum/xeth" ) +const ( + PersonalApiVersion = "1.0" +) + var ( // mapping between methods and handlers personalMapping = map[string]personalhandler{ @@ -65,6 +69,10 @@ func (self *personalApi) Name() string { return PersonalApiName } +func (self *personalApi) ApiVersion() string { + return PersonalApiVersion +} + func (self *personalApi) ListAccounts(req *shared.Request) (interface{}, error) { return self.xeth.Accounts(), nil } diff --git a/rpc/api/personal_js.go b/rpc/api/personal_js.go index ddd47f6a4..463a2c7f5 100644 --- a/rpc/api/personal_js.go +++ b/rpc/api/personal_js.go @@ -23,7 +23,7 @@ web3._extend({ properties: [ new web3._extend.Property({ - name: 'accounts', + name: 'listAccounts', getter: 'personal_listAccounts', outputFormatter: function(obj) { return obj; } }) diff --git a/rpc/api/shh.go b/rpc/api/shh.go index 04c53c93e..e83a7b22e 100644 --- a/rpc/api/shh.go +++ b/rpc/api/shh.go @@ -9,6 +9,10 @@ import ( "github.com/ethereum/go-ethereum/xeth" ) +const ( + ShhApiVersion = "1.0" +) + var ( // mapping between methods and handlers shhMapping = map[string]shhhandler{ @@ -71,6 +75,10 @@ func (self *shhApi) Name() string { return ShhApiName } +func (self *shhApi) ApiVersion() string { + return ShhApiVersion +} + func (self *shhApi) Version(req *shared.Request) (interface{}, error) { w := self.xeth.Whisper() if w == nil { diff --git a/rpc/api/txpool.go b/rpc/api/txpool.go index ebbe199b1..64550bdaf 100644 --- a/rpc/api/txpool.go +++ b/rpc/api/txpool.go @@ -7,6 +7,10 @@ import ( "github.com/ethereum/go-ethereum/xeth" ) +const ( + TxPoolApiVersion = "1.0" +) + var ( // mapping between methods and handlers txpoolMapping = map[string]txpoolhandler{ @@ -59,6 +63,10 @@ func (self *txPoolApi) Name() string { return TxPoolApiName } +func (self *txPoolApi) ApiVersion() string { + return TxPoolApiVersion +} + func (self *txPoolApi) Status(req *shared.Request) (interface{}, error) { return map[string]int{ "pending": self.ethereum.TxPool().GetTransactions().Len(), -- cgit v1.2.3 From ebaa9b9feb0325de33c69993cd9c215775618c7b Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Thu, 11 Jun 2015 14:54:03 +0200 Subject: removed binary files --- cmd/console/console | Bin 19332472 -> 0 bytes cmd/console/history | 7 ------- 2 files changed, 7 deletions(-) delete mode 100755 cmd/console/console delete mode 100755 cmd/console/history diff --git a/cmd/console/console b/cmd/console/console deleted file mode 100755 index 113fb5dac..000000000 Binary files a/cmd/console/console and /dev/null differ diff --git a/cmd/console/history b/cmd/console/history deleted file mode 100755 index 728b6994a..000000000 --- a/cmd/console/history +++ /dev/null @@ -1,7 +0,0 @@ -eth.accounts -help -eth -eth.getBlock(21) -net -admin -eth -- cgit v1.2.3 From ec6a7b35f68d4fd0fbf8e59f70096765cff4bffc Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Thu, 11 Jun 2015 15:00:33 +0200 Subject: removed obsolete print statement --- cmd/console/js.go | 4 +-- rpc/jeth.go | 86 ------------------------------------------------------- 2 files changed, 1 insertion(+), 89 deletions(-) diff --git a/cmd/console/js.go b/cmd/console/js.go index 633b79315..5aff8acfd 100644 --- a/cmd/console/js.go +++ b/cmd/console/js.go @@ -84,9 +84,7 @@ func loadAutoCompletion(js *jsre, ipcpath string) { if err != nil { utils.Fatalf("Unable to determine supported modules - %v", err) } - - fmt.Printf("load autocompletion %v", modules) - + loadedModulesMethods = make(map[string][]string) for module, _ := range modules { loadedModulesMethods[module] = api.AutoCompletion[module] diff --git a/rpc/jeth.go b/rpc/jeth.go index 69df4500e..e578775bb 100644 --- a/rpc/jeth.go +++ b/rpc/jeth.go @@ -11,10 +11,6 @@ import ( "github.com/ethereum/go-ethereum/rpc/comms" "github.com/ethereum/go-ethereum/rpc/shared" "github.com/robertkrimen/otto" - "github.com/ethereum/go-ethereum/rpc/comms" - "github.com/ethereum/go-ethereum/rpc/codec" - "github.com/ethereum/go-ethereum/rpc/shared" - "reflect" ) type Jeth struct { @@ -173,85 +169,3 @@ func (self *Jeth) SendIpc(call otto.FunctionCall) (response otto.Value) { return } - -func (self *Jeth) SendIpc(call otto.FunctionCall) (response otto.Value) { - reqif, err := call.Argument(0).Export() - if err != nil { - return self.err(call, -32700, err.Error(), nil) - } - - client, err := comms.NewIpcClient(comms.IpcConfig{self.ipcpath}, codec.JSON) - if err != nil { - fmt.Println("Unable to connect to geth.") - return self.err(call, -32603, err.Error(), -1) - } - defer client.Close() - - jsonreq, err := json.Marshal(reqif) - var reqs []RpcRequest - batch := true - err = json.Unmarshal(jsonreq, &reqs) - if err != nil { - reqs = make([]RpcRequest, 1) - err = json.Unmarshal(jsonreq, &reqs[0]) - batch = false - } - - call.Otto.Set("response_len", len(reqs)) - call.Otto.Run("var ret_response = new Array(response_len);") - - for i, req := range reqs { - err := client.Send(&req) - if err != nil { - fmt.Println("Error send request:", err) - return self.err(call, -32603, err.Error(), req.Id) - } - - respif, err := client.Recv() - if err != nil { - fmt.Println("Error recv response:", err) - return self.err(call, -32603, err.Error(), req.Id) - } - - if res, ok := respif.(shared.SuccessResponse); ok { - call.Otto.Set("ret_id", res.Id) - call.Otto.Set("ret_jsonrpc", res.Jsonrpc) - resObj, _ := json.Marshal(res.Result) - call.Otto.Set("ret_result", string(resObj)) - call.Otto.Set("response_idx", i) - - response, err = call.Otto.Run(` - ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, result: JSON.parse(ret_result) }; - `) - } else if res, ok := respif.(shared.ErrorResponse); ok { - fmt.Printf("Error: %s (%d)\n", res.Error.Message, res.Error.Code) - - call.Otto.Set("ret_id", res.Id) - call.Otto.Set("ret_jsonrpc", res.Jsonrpc) - call.Otto.Set("ret_error", res.Error) - call.Otto.Set("response_idx", i) - - response, _ = call.Otto.Run(` - ret_response = { jsonrpc: ret_jsonrpc, id: ret_id, error: ret_error }; - `) - return - } else { - fmt.Printf("unexpected response\n", reflect.TypeOf(respif)) - } - } - - if !batch { - call.Otto.Run("ret_response = ret_response[0];") - } - - if call.Argument(1).IsObject() { - call.Otto.Set("callback", call.Argument(1)) - call.Otto.Run(` - if (Object.prototype.toString.call(callback) == '[object Function]') { - callback(null, ret_response); - } - `) - } - - return -} -- cgit v1.2.3 From b3c07f167f8b82d1079abe6e15cd1f480712b030 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Thu, 11 Jun 2015 15:06:12 +0200 Subject: fixed incomplete merge action --- cmd/console/js.go | 2 +- rpc/comms/ipc_windows.go | 702 ----------------------------------------------- 2 files changed, 1 insertion(+), 703 deletions(-) diff --git a/cmd/console/js.go b/cmd/console/js.go index 5aff8acfd..a5fdaacc2 100644 --- a/cmd/console/js.go +++ b/cmd/console/js.go @@ -84,7 +84,7 @@ func loadAutoCompletion(js *jsre, ipcpath string) { if err != nil { utils.Fatalf("Unable to determine supported modules - %v", err) } - + loadedModulesMethods = make(map[string][]string) for module, _ := range modules { loadedModulesMethods[module] = api.AutoCompletion[module] diff --git a/rpc/comms/ipc_windows.go b/rpc/comms/ipc_windows.go index bee781744..c48dfb7fb 100644 --- a/rpc/comms/ipc_windows.go +++ b/rpc/comms/ipc_windows.go @@ -1,4 +1,3 @@ -<<<<<<< HEAD // +build windows package comms @@ -695,704 +694,3 @@ func startIpc(cfg IpcConfig, codec codec.Codec, api api.EthereumApi) error { return nil } -======= -// +build windows - -package comms - -import ( - "fmt" - "io" - "net" - "os" - "sync" - "syscall" - "time" - "unsafe" - - "github.com/ethereum/go-ethereum/logger" - "github.com/ethereum/go-ethereum/logger/glog" - "github.com/ethereum/go-ethereum/rpc/api" - "github.com/ethereum/go-ethereum/rpc/codec" - "github.com/ethereum/go-ethereum/rpc/shared" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") - procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") - procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") - procCreateEventW = modkernel32.NewProc("CreateEventW") - procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") -) - -func createNamedPipe(name *uint16, openMode uint32, pipeMode uint32, maxInstances uint32, outBufSize uint32, inBufSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(openMode), uintptr(pipeMode), uintptr(maxInstances), uintptr(outBufSize), uintptr(inBufSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func cancelIoEx(handle syscall.Handle, overlapped *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func connectNamedPipe(handle syscall.Handle, overlapped *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func disconnectNamedPipe(handle syscall.Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func waitNamedPipe(name *uint16, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func createEvent(sa *syscall.SecurityAttributes, manualReset bool, initialState bool, name *uint16) (handle syscall.Handle, err error) { - var _p0 uint32 - if manualReset { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if initialState { - _p1 = 1 - } else { - _p1 = 0 - } - r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(sa)), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(name)), 0, 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getOverlappedResult(handle syscall.Handle, overlapped *syscall.Overlapped, transferred *uint32, wait bool) (err error) { - var _p0 uint32 - if wait { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transferred)), uintptr(_p0), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - - -const ( - // openMode - pipe_access_duplex = 0x3 - pipe_access_inbound = 0x1 - pipe_access_outbound = 0x2 - - // openMode write flags - file_flag_first_pipe_instance = 0x00080000 - file_flag_write_through = 0x80000000 - file_flag_overlapped = 0x40000000 - - // openMode ACL flags - write_dac = 0x00040000 - write_owner = 0x00080000 - access_system_security = 0x01000000 - - // pipeMode - pipe_type_byte = 0x0 - pipe_type_message = 0x4 - - // pipeMode read mode flags - pipe_readmode_byte = 0x0 - pipe_readmode_message = 0x2 - - // pipeMode wait mode flags - pipe_wait = 0x0 - pipe_nowait = 0x1 - - // pipeMode remote-client mode flags - pipe_accept_remote_clients = 0x0 - pipe_reject_remote_clients = 0x8 - - pipe_unlimited_instances = 255 - - nmpwait_wait_forever = 0xFFFFFFFF - - // the two not-an-errors below occur if a client connects to the pipe between - // the server's CreateNamedPipe and ConnectNamedPipe calls. - error_no_data syscall.Errno = 0xE8 - error_pipe_connected syscall.Errno = 0x217 - error_pipe_busy syscall.Errno = 0xE7 - error_sem_timeout syscall.Errno = 0x79 - - error_bad_pathname syscall.Errno = 0xA1 - error_invalid_name syscall.Errno = 0x7B - - error_io_incomplete syscall.Errno = 0x3e4 -) - -var _ net.Conn = (*PipeConn)(nil) -var _ net.Listener = (*PipeListener)(nil) - -// ErrClosed is the error returned by PipeListener.Accept when Close is called -// on the PipeListener. -var ErrClosed = PipeError{"Pipe has been closed.", false} - -// PipeError is an error related to a call to a pipe -type PipeError struct { - msg string - timeout bool -} - -// Error implements the error interface -func (e PipeError) Error() string { - return e.msg -} - -// Timeout implements net.AddrError.Timeout() -func (e PipeError) Timeout() bool { - return e.timeout -} - -// Temporary implements net.AddrError.Temporary() -func (e PipeError) Temporary() bool { - return false -} - -// Dial connects to a named pipe with the given address. If the specified pipe is not available, -// it will wait indefinitely for the pipe to become available. -// -// The address must be of the form \\.\\pipe\ for local pipes and \\\pipe\ -// for remote pipes. -// -// Dial will return a PipeError if you pass in a badly formatted pipe name. -// -// Examples: -// // local pipe -// conn, err := Dial(`\\.\pipe\mypipename`) -// -// // remote pipe -// conn, err := Dial(`\\othercomp\pipe\mypipename`) -func Dial(address string) (*PipeConn, error) { - for { - conn, err := dial(address, nmpwait_wait_forever) - if err == nil { - return conn, nil - } - if isPipeNotReady(err) { - <-time.After(100 * time.Millisecond) - continue - } - return nil, err - } -} - -// DialTimeout acts like Dial, but will time out after the duration of timeout -func DialTimeout(address string, timeout time.Duration) (*PipeConn, error) { - deadline := time.Now().Add(timeout) - - now := time.Now() - for now.Before(deadline) { - millis := uint32(deadline.Sub(now) / time.Millisecond) - conn, err := dial(address, millis) - if err == nil { - return conn, nil - } - if err == error_sem_timeout { - // This is WaitNamedPipe's timeout error, so we know we're done - return nil, PipeError{fmt.Sprintf( - "Timed out waiting for pipe '%s' to come available", address), true} - } - if isPipeNotReady(err) { - left := deadline.Sub(time.Now()) - retry := 100 * time.Millisecond - if left > retry { - <-time.After(retry) - } else { - <-time.After(left - time.Millisecond) - } - now = time.Now() - continue - } - return nil, err - } - return nil, PipeError{fmt.Sprintf( - "Timed out waiting for pipe '%s' to come available", address), true} -} - -// isPipeNotReady checks the error to see if it indicates the pipe is not ready -func isPipeNotReady(err error) bool { - // Pipe Busy means another client just grabbed the open pipe end, - // and the server hasn't made a new one yet. - // File Not Found means the server hasn't created the pipe yet. - // Neither is a fatal error. - - return err == syscall.ERROR_FILE_NOT_FOUND || err == error_pipe_busy -} - -// newOverlapped creates a structure used to track asynchronous -// I/O requests that have been issued. -func newOverlapped() (*syscall.Overlapped, error) { - event, err := createEvent(nil, true, true, nil) - if err != nil { - return nil, err - } - return &syscall.Overlapped{HEvent: event}, nil -} - -// waitForCompletion waits for an asynchronous I/O request referred to by overlapped to complete. -// This function returns the number of bytes transferred by the operation and an error code if -// applicable (nil otherwise). -func waitForCompletion(handle syscall.Handle, overlapped *syscall.Overlapped) (uint32, error) { - _, err := syscall.WaitForSingleObject(overlapped.HEvent, syscall.INFINITE) - if err != nil { - return 0, err - } - var transferred uint32 - err = getOverlappedResult(handle, overlapped, &transferred, true) - return transferred, err -} - -// dial is a helper to initiate a connection to a named pipe that has been started by a server. -// The timeout is only enforced if the pipe server has already created the pipe, otherwise -// this function will return immediately. -func dial(address string, timeout uint32) (*PipeConn, error) { - name, err := syscall.UTF16PtrFromString(string(address)) - if err != nil { - return nil, err - } - // If at least one instance of the pipe has been created, this function - // will wait timeout milliseconds for it to become available. - // It will return immediately regardless of timeout, if no instances - // of the named pipe have been created yet. - // If this returns with no error, there is a pipe available. - if err := waitNamedPipe(name, timeout); err != nil { - if err == error_bad_pathname { - // badly formatted pipe name - return nil, badAddr(address) - } - return nil, err - } - pathp, err := syscall.UTF16PtrFromString(address) - if err != nil { - return nil, err - } - handle, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, - uint32(syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE), nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_OVERLAPPED, 0) - if err != nil { - return nil, err - } - return &PipeConn{handle: handle, addr: PipeAddr(address)}, nil -} - -// Listen returns a new PipeListener that will listen on a pipe with the given -// address. The address must be of the form \\.\pipe\ -// -// Listen will return a PipeError for an incorrectly formatted pipe name. -func Listen(address string) (*PipeListener, error) { - handle, err := createPipe(address, true) - if err == error_invalid_name { - return nil, badAddr(address) - } - if err != nil { - return nil, err - } - return &PipeListener{ - addr: PipeAddr(address), - handle: handle, - }, nil -} - -// PipeListener is a named pipe listener. Clients should typically -// use variables of type net.Listener instead of assuming named pipe. -type PipeListener struct { - addr PipeAddr - handle syscall.Handle - closed bool - - // acceptHandle contains the current handle waiting for - // an incoming connection or nil. - acceptHandle syscall.Handle - // acceptOverlapped is set before waiting on a connection. - // If not waiting, it is nil. - acceptOverlapped *syscall.Overlapped - // acceptMutex protects the handle and overlapped structure. - acceptMutex sync.Mutex -} - -// Accept implements the Accept method in the net.Listener interface; it -// waits for the next call and returns a generic net.Conn. -func (l *PipeListener) Accept() (net.Conn, error) { - c, err := l.AcceptPipe() - for err == error_no_data { - // Ignore clients that connect and immediately disconnect. - c, err = l.AcceptPipe() - } - if err != nil { - return nil, err - } - return c, nil -} - -// AcceptPipe accepts the next incoming call and returns the new connection. -// It might return an error if a client connected and immediately cancelled -// the connection. -func (l *PipeListener) AcceptPipe() (*PipeConn, error) { - if l == nil || l.addr == "" || l.closed { - return nil, syscall.EINVAL - } - - // the first time we call accept, the handle will have been created by the Listen - // call. This is to prevent race conditions where the client thinks the server - // isn't listening because it hasn't actually called create yet. After the first time, we'll - // have to create a new handle each time - handle := l.handle - if handle == 0 { - var err error - handle, err = createPipe(string(l.addr), false) - if err != nil { - return nil, err - } - } else { - l.handle = 0 - } - - overlapped, err := newOverlapped() - if err != nil { - return nil, err - } - defer syscall.CloseHandle(overlapped.HEvent) - if err := connectNamedPipe(handle, overlapped); err != nil && err != error_pipe_connected { - if err == error_io_incomplete || err == syscall.ERROR_IO_PENDING { - l.acceptMutex.Lock() - l.acceptOverlapped = overlapped - l.acceptHandle = handle - l.acceptMutex.Unlock() - defer func() { - l.acceptMutex.Lock() - l.acceptOverlapped = nil - l.acceptHandle = 0 - l.acceptMutex.Unlock() - }() - - _, err = waitForCompletion(handle, overlapped) - } - if err == syscall.ERROR_OPERATION_ABORTED { - // Return error compatible to net.Listener.Accept() in case the - // listener was closed. - return nil, ErrClosed - } - if err != nil { - return nil, err - } - } - return &PipeConn{handle: handle, addr: l.addr}, nil -} - -// Close stops listening on the address. -// Already Accepted connections are not closed. -func (l *PipeListener) Close() error { - if l.closed { - return nil - } - l.closed = true - if l.handle != 0 { - err := disconnectNamedPipe(l.handle) - if err != nil { - return err - } - err = syscall.CloseHandle(l.handle) - if err != nil { - return err - } - l.handle = 0 - } - l.acceptMutex.Lock() - defer l.acceptMutex.Unlock() - if l.acceptOverlapped != nil && l.acceptHandle != 0 { - // Cancel the pending IO. This call does not block, so it is safe - // to hold onto the mutex above. - if err := cancelIoEx(l.acceptHandle, l.acceptOverlapped); err != nil { - return err - } - err := syscall.CloseHandle(l.acceptOverlapped.HEvent) - if err != nil { - return err - } - l.acceptOverlapped.HEvent = 0 - err = syscall.CloseHandle(l.acceptHandle) - if err != nil { - return err - } - l.acceptHandle = 0 - } - return nil -} - -// Addr returns the listener's network address, a PipeAddr. -func (l *PipeListener) Addr() net.Addr { return l.addr } - -// PipeConn is the implementation of the net.Conn interface for named pipe connections. -type PipeConn struct { - handle syscall.Handle - addr PipeAddr - - // these aren't actually used yet - readDeadline *time.Time - writeDeadline *time.Time -} - -type iodata struct { - n uint32 - err error -} - -// completeRequest looks at iodata to see if a request is pending. If so, it waits for it to either complete or to -// abort due to hitting the specified deadline. Deadline may be set to nil to wait forever. If no request is pending, -// the content of iodata is returned. -func (c *PipeConn) completeRequest(data iodata, deadline *time.Time, overlapped *syscall.Overlapped) (int, error) { - if data.err == error_io_incomplete || data.err == syscall.ERROR_IO_PENDING { - var timer <-chan time.Time - if deadline != nil { - if timeDiff := deadline.Sub(time.Now()); timeDiff > 0 { - timer = time.After(timeDiff) - } - } - done := make(chan iodata) - go func() { - n, err := waitForCompletion(c.handle, overlapped) - done <- iodata{n, err} - }() - select { - case data = <-done: - case <-timer: - syscall.CancelIoEx(c.handle, overlapped) - data = iodata{0, timeout(c.addr.String())} - } - } - // Windows will produce ERROR_BROKEN_PIPE upon closing - // a handle on the other end of a connection. Go RPC - // expects an io.EOF error in this case. - if data.err == syscall.ERROR_BROKEN_PIPE { - data.err = io.EOF - } - return int(data.n), data.err -} - -// Read implements the net.Conn Read method. -func (c *PipeConn) Read(b []byte) (int, error) { - // Use ReadFile() rather than Read() because the latter - // contains a workaround that eats ERROR_BROKEN_PIPE. - overlapped, err := newOverlapped() - if err != nil { - return 0, err - } - defer syscall.CloseHandle(overlapped.HEvent) - var n uint32 - err = syscall.ReadFile(c.handle, b, &n, overlapped) - return c.completeRequest(iodata{n, err}, c.readDeadline, overlapped) -} - -// Write implements the net.Conn Write method. -func (c *PipeConn) Write(b []byte) (int, error) { - overlapped, err := newOverlapped() - if err != nil { - return 0, err - } - defer syscall.CloseHandle(overlapped.HEvent) - var n uint32 - err = syscall.WriteFile(c.handle, b, &n, overlapped) - return c.completeRequest(iodata{n, err}, c.writeDeadline, overlapped) -} - -// Close closes the connection. -func (c *PipeConn) Close() error { - return syscall.CloseHandle(c.handle) -} - -// LocalAddr returns the local network address. -func (c *PipeConn) LocalAddr() net.Addr { - return c.addr -} - -// RemoteAddr returns the remote network address. -func (c *PipeConn) RemoteAddr() net.Addr { - // not sure what to do here, we don't have remote addr.... - return c.addr -} - -// SetDeadline implements the net.Conn SetDeadline method. -// Note that timeouts are only supported on Windows Vista/Server 2008 and above -func (c *PipeConn) SetDeadline(t time.Time) error { - c.SetReadDeadline(t) - c.SetWriteDeadline(t) - return nil -} - -// SetReadDeadline implements the net.Conn SetReadDeadline method. -// Note that timeouts are only supported on Windows Vista/Server 2008 and above -func (c *PipeConn) SetReadDeadline(t time.Time) error { - c.readDeadline = &t - return nil -} - -// SetWriteDeadline implements the net.Conn SetWriteDeadline method. -// Note that timeouts are only supported on Windows Vista/Server 2008 and above -func (c *PipeConn) SetWriteDeadline(t time.Time) error { - c.writeDeadline = &t - return nil -} - -// PipeAddr represents the address of a named pipe. -type PipeAddr string - -// Network returns the address's network name, "pipe". -func (a PipeAddr) Network() string { return "pipe" } - -// String returns the address of the pipe -func (a PipeAddr) String() string { - return string(a) -} - -// createPipe is a helper function to make sure we always create pipes -// with the same arguments, since subsequent calls to create pipe need -// to use the same arguments as the first one. If first is set, fail -// if the pipe already exists. -func createPipe(address string, first bool) (syscall.Handle, error) { - n, err := syscall.UTF16PtrFromString(address) - if err != nil { - return 0, err - } - mode := uint32(pipe_access_duplex | syscall.FILE_FLAG_OVERLAPPED) - if first { - mode |= file_flag_first_pipe_instance - } - return createNamedPipe(n, - mode, - pipe_type_byte, - pipe_unlimited_instances, - 512, 512, 0, nil) -} - -func badAddr(addr string) PipeError { - return PipeError{fmt.Sprintf("Invalid pipe address '%s'.", addr), false} -} -func timeout(addr string) PipeError { - return PipeError{fmt.Sprintf("Pipe IO timed out waiting for '%s'", addr), true} -} - - - -func newIpcClient(cfg IpcConfig, codec codec.Codec) (*ipcClient, error) { - c, err := Dial(cfg.Endpoint) - if err != nil { - return nil, err - } - - return &ipcClient{codec.New(c)}, nil -} - -func startIpc(cfg IpcConfig, codec codec.Codec, api api.EthereumApi) error { - os.Remove(cfg.Endpoint) // in case it still exists from a previous run - - l, err := Listen(cfg.Endpoint) - if err != nil { - return err - } - os.Chmod(cfg.Endpoint, 0600) - - go func() { - for { - conn, err := l.Accept() - if err != nil { - glog.V(logger.Error).Infof("Error accepting ipc connection - %v\n", err) - continue - } - - go func(conn net.Conn) { - codec := codec.New(conn) - - for { - req, err := codec.ReadRequest() - if err == io.EOF { - codec.Close() - return - } else if err != nil { - glog.V(logger.Error).Infof("IPC recv err - %v\n", err) - codec.Close() - return - } - - var rpcResponse interface{} - res, err := api.Execute(req) - - rpcResponse = shared.NewRpcResponse(req.Id, req.Jsonrpc, res, err) - err = codec.WriteResponse(rpcResponse) - if err != nil { - glog.V(logger.Error).Infof("IPC send err - %v\n", err) - codec.Close() - return - } - } - }(conn) - } - }() - - glog.V(logger.Info).Infof("IPC service started (%s)\n", cfg.Endpoint) - - return nil -} ->>>>>>> fixed windows build problem -- cgit v1.2.3 From 22080e1fdd8eafd6201bb5a06ac19859bcc6b432 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Thu, 11 Jun 2015 20:35:19 +0200 Subject: ipc socket always used default path --- cmd/geth/main.go | 2 +- cmd/utils/flags.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 8e55b310c..11ea23165 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -308,7 +308,7 @@ func console(ctx *cli.Context) { ethereum, ctx.String(utils.JSpathFlag.Name), ctx.GlobalString(utils.RPCCORSDomainFlag.Name), - ctx.GlobalString(utils.IPCPathFlag.Name), + filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), "geth.ipc"), true, nil, ) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 4c3690d49..80f21d50a 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -387,7 +387,7 @@ func MakeAccountManager(ctx *cli.Context) *accounts.Manager { func StartIPC(eth *eth.Ethereum, ctx *cli.Context) error { config := comms.IpcConfig{ - Endpoint: ctx.GlobalString(IPCPathFlag.Name), + Endpoint: filepath.Join(ctx.GlobalString(DataDirFlag.Name), "geth.ipc"), } xeth := xeth.New(eth, nil) -- cgit v1.2.3 From 359e6414e50df415caa1d4411224c6d48b6cb798 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Fri, 12 Jun 2015 09:32:37 +0200 Subject: fixed windows ipc path issue --- cmd/console/main.go | 2 +- cmd/geth/main.go | 4 ++-- cmd/utils/flags.go | 22 +++++++++++++++++++++- common/path.go | 3 +++ 4 files changed, 27 insertions(+), 4 deletions(-) diff --git a/cmd/console/main.go b/cmd/console/main.go index 9020a12fe..365dbec56 100644 --- a/cmd/console/main.go +++ b/cmd/console/main.go @@ -93,7 +93,7 @@ func main() { func run(ctx *cli.Context) { jspath := ctx.GlobalString(utils.JSpathFlag.Name) - ipcpath := ctx.GlobalString(utils.IPCPathFlag.Name) + ipcpath := utils.IpcSocketPath(ctx) repl := newJSRE(jspath, ipcpath) repl.welcome(ipcpath) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 11ea23165..0f2438cfd 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -308,7 +308,7 @@ func console(ctx *cli.Context) { ethereum, ctx.String(utils.JSpathFlag.Name), ctx.GlobalString(utils.RPCCORSDomainFlag.Name), - filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), "geth.ipc"), + utils.IpcSocketPath(ctx), true, nil, ) @@ -330,7 +330,7 @@ func execJSFiles(ctx *cli.Context) { ethereum, ctx.String(utils.JSpathFlag.Name), ctx.GlobalString(utils.RPCCORSDomainFlag.Name), - ctx.GlobalString(utils.IPCPathFlag.Name), + utils.IpcSocketPath(ctx), false, nil, ) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 80f21d50a..3626c36f2 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -385,9 +385,29 @@ func MakeAccountManager(ctx *cli.Context) *accounts.Manager { return accounts.NewManager(ks) } +func IpcSocketPath(ctx *cli.Context) (ipcpath string) { + + if common.IsWindows() { + ipcpath = common.DefaultIpcPath() + if ipcpath != ctx.GlobalString(IPCPathFlag.Name) { + ipcpath = ctx.GlobalString(IPCPathFlag.Name) + } + } else { + ipcpath = common.DefaultIpcPath() + if ctx.GlobalString(IPCPathFlag.Name) != common.DefaultIpcPath() { + ipcpath = ctx.GlobalString(IPCPathFlag.Name) + } else if ctx.GlobalString(DataDirFlag.Name) != "" && + ctx.GlobalString(DataDirFlag.Name) != common.DefaultDataDir() { + ipcpath = filepath.Join(ctx.GlobalString(DataDirFlag.Name), "geth.ipc") + } + } + + return +} + func StartIPC(eth *eth.Ethereum, ctx *cli.Context) error { config := comms.IpcConfig{ - Endpoint: filepath.Join(ctx.GlobalString(DataDirFlag.Name), "geth.ipc"), + Endpoint: IpcSocketPath(ctx), } xeth := xeth.New(eth, nil) diff --git a/common/path.go b/common/path.go index 63a23abcd..6e3259656 100644 --- a/common/path.go +++ b/common/path.go @@ -95,6 +95,9 @@ func DefaultDataDir() string { } func DefaultIpcPath() string { + if runtime.GOOS == "windows" { + return `\\.\pipe\geth.ipc` + } return filepath.Join(DefaultDataDir(), "geth.ipc") } -- cgit v1.2.3 From 55a796b7c3a9560f27b0bfc7fe064e84d1b36608 Mon Sep 17 00:00:00 2001 From: Bas van Kervel Date: Fri, 12 Jun 2015 09:42:57 +0200 Subject: removed obsolete console flag --- cmd/console/main.go | 1 - cmd/utils/flags.go | 1 - 2 files changed, 2 deletions(-) diff --git a/cmd/console/main.go b/cmd/console/main.go index 365dbec56..e8dd412ba 100644 --- a/cmd/console/main.go +++ b/cmd/console/main.go @@ -52,7 +52,6 @@ func init() { app.Action = run app.Flags = []cli.Flag{ - utils.IPCDisabledFlag, utils.IPCPathFlag, utils.VerbosityFlag, utils.JSpathFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 3626c36f2..ec29598fb 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -386,7 +386,6 @@ func MakeAccountManager(ctx *cli.Context) *accounts.Manager { } func IpcSocketPath(ctx *cli.Context) (ipcpath string) { - if common.IsWindows() { ipcpath = common.DefaultIpcPath() if ipcpath != ctx.GlobalString(IPCPathFlag.Name) { -- cgit v1.2.3 From 02d629af8f63ca310c9419beba6d915592c43b25 Mon Sep 17 00:00:00 2001 From: obscuren Date: Fri, 12 Jun 2015 11:18:17 +0200 Subject: core/vm: fixed printable characters using unicode instead --- core/block_processor.go | 4 ---- core/vm/logger.go | 4 ++-- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/core/block_processor.go b/core/block_processor.go index 3ec3c585f..54378b2b9 100644 --- a/core/block_processor.go +++ b/core/block_processor.go @@ -71,14 +71,10 @@ func (sm *BlockProcessor) TransitionState(statedb *state.StateDB, parent, block func (self *BlockProcessor) ApplyTransaction(coinbase *state.StateObject, statedb *state.StateDB, block *types.Block, tx *types.Transaction, usedGas *big.Int, transientProcess bool) (*types.Receipt, *big.Int, error) { // If we are mining this block and validating we want to set the logs back to 0 - //statedb.EmptyLogs() cb := statedb.GetStateObject(coinbase.Address()) _, gas, err := ApplyMessage(NewEnv(statedb, self.bc, tx, block), tx, cb) if err != nil && (IsNonceErr(err) || state.IsGasLimitErr(err) || IsInvalidTxErr(err)) { - // If the account is managed, remove the invalid nonce. - //from, _ := tx.From() - //self.bc.TxState().RemoveNonce(from, tx.Nonce()) return nil, nil, err } diff --git a/core/vm/logger.go b/core/vm/logger.go index 96d07dab5..061e3745b 100644 --- a/core/vm/logger.go +++ b/core/vm/logger.go @@ -3,7 +3,7 @@ package vm import ( "fmt" "os" - "unicode/utf8" + "unicode" "github.com/ethereum/go-ethereum/common" ) @@ -27,7 +27,7 @@ func StdErrFormat(logs []StructLog) { for _, r := range data { if r == 0 { str += "." - } else if utf8.ValidRune(rune(r)) { + } else if unicode.IsPrint(rune(r)) { str += fmt.Sprintf("%s", string(r)) } else { str += "?" -- cgit v1.2.3 From 287f99089181c1eaa6f25a6b531e476b631a201a Mon Sep 17 00:00:00 2001 From: obscuren Date: Fri, 12 Jun 2015 13:35:14 +0200 Subject: core/vm: Improved error reporting for trace logging --- core/vm/environment.go | 1 + core/vm/errors.go | 12 +++------ core/vm/logger.go | 9 +++++-- core/vm/vm.go | 69 +++++++++++++++++++++++++------------------------- 4 files changed, 45 insertions(+), 46 deletions(-) diff --git a/core/vm/environment.go b/core/vm/environment.go index 5c04e7022..c103049a2 100644 --- a/core/vm/environment.go +++ b/core/vm/environment.go @@ -45,6 +45,7 @@ type StructLog struct { Memory []byte Stack []*big.Int Storage map[common.Hash][]byte + Err error } type Account interface { diff --git a/core/vm/errors.go b/core/vm/errors.go index fc3459de0..799eb6797 100644 --- a/core/vm/errors.go +++ b/core/vm/errors.go @@ -2,20 +2,14 @@ package vm import ( "fmt" + "github.com/ethereum/go-ethereum/params" - "math/big" ) -type OutOfGasError struct { - req, has *big.Int -} - -func OOG(req, has *big.Int) OutOfGasError { - return OutOfGasError{req, has} -} +type OutOfGasError struct{} func (self OutOfGasError) Error() string { - return fmt.Sprintf("out of gas! require %v, have %v", self.req, self.has) + return "Out Of Gas" } func IsOOGErr(err error) bool { diff --git a/core/vm/logger.go b/core/vm/logger.go index 061e3745b..0e2a417ae 100644 --- a/core/vm/logger.go +++ b/core/vm/logger.go @@ -9,9 +9,14 @@ import ( ) func StdErrFormat(logs []StructLog) { - fmt.Fprintf(os.Stderr, "VM Stats %d ops\n", len(logs)) + fmt.Fprintf(os.Stderr, "VM STAT %d OPs\n", len(logs)) for _, log := range logs { - fmt.Fprintf(os.Stderr, "PC %08d: %s GAS: %v COST: %v\n", log.Pc, log.Op, log.Gas, log.GasCost) + fmt.Fprintf(os.Stderr, "PC %08d: %s GAS: %v COST: %v", log.Pc, log.Op, log.Gas, log.GasCost) + if log.Err != nil { + fmt.Fprintf(os.Stderr, " ERROR: %v", log.Err) + } + fmt.Fprintf(os.Stderr, "\n") + fmt.Fprintln(os.Stderr, "STACK =", len(log.Stack)) for i := len(log.Stack) - 1; i >= 0; i-- { diff --git a/core/vm/vm.go b/core/vm/vm.go index 4c0ab0f47..c5ad761f6 100644 --- a/core/vm/vm.go +++ b/core/vm/vm.go @@ -43,6 +43,31 @@ func (self *Vm) Run(context *Context, input []byte) (ret []byte, err error) { code = context.Code value = context.value price = context.Price + + op OpCode // current opcode + codehash = crypto.Sha3Hash(code) // codehash is used when doing jump dest caching + mem = NewMemory() // bound memory + stack = newstack() // local stack + statedb = self.env.State() // current state + // For optimisation reason we're using uint64 as the program counter. + // It's theoretically possible to go above 2^64. The YP defines the PC to be uint256. Pratically much less so feasible. + pc = uint64(0) // program counter + + // jump evaluates and checks whether the given jump destination is a valid one + // if valid move the `pc` otherwise return an error. + jump = func(from uint64, to *big.Int) error { + if !context.jumpdests.has(codehash, code, to) { + nop := context.GetOp(to.Uint64()) + return fmt.Errorf("invalid jump destination (%v) %v", nop, to) + } + + pc = to.Uint64() + + return nil + } + + newMemSize *big.Int + cost *big.Int ) // User defer pattern to check for an error and, based on the error being nil or not, use all gas and return. @@ -52,6 +77,7 @@ func (self *Vm) Run(context *Context, input []byte) (ret []byte, err error) { } if err != nil { + self.log(pc, op, context.Gas, cost, mem, stack, context, err) // In case of a VM exception (known exceptions) all gas consumed (panics NOT included). context.UseGas(context.Gas) @@ -71,30 +97,6 @@ func (self *Vm) Run(context *Context, input []byte) (ret []byte, err error) { return context.Return(nil), nil } - var ( - op OpCode // current opcode - codehash = crypto.Sha3Hash(code) // codehash is used when doing jump dest caching - mem = NewMemory() // bound memory - stack = newstack() // local stack - statedb = self.env.State() // current state - // For optimisation reason we're using uint64 as the program counter. - // It's theoretically possible to go above 2^64. The YP defines the PC to be uint256. Pratically much less so feasible. - pc = uint64(0) // program counter - - // jump evaluates and checks whether the given jump destination is a valid one - // if valid move the `pc` otherwise return an error. - jump = func(from uint64, to *big.Int) error { - if !context.jumpdests.has(codehash, code, to) { - nop := context.GetOp(to.Uint64()) - return fmt.Errorf("invalid jump destination (%v) %v", nop, to) - } - - pc = to.Uint64() - - return nil - } - ) - for { // The base for all big integer arithmetic base := new(big.Int) @@ -103,24 +105,23 @@ func (self *Vm) Run(context *Context, input []byte) (ret []byte, err error) { op = context.GetOp(pc) // calculate the new memory size and gas price for the current executing opcode - newMemSize, gas, err := self.calculateGasAndSize(context, caller, op, statedb, mem, stack) + newMemSize, cost, err = self.calculateGasAndSize(context, caller, op, statedb, mem, stack) if err != nil { return nil, err } - self.log(pc, op, context.Gas, gas, mem, stack, context) - // Use the calculated gas. When insufficient gas is present, use all gas and return an // Out Of Gas error - if !context.UseGas(gas) { - tmp := new(big.Int).Set(context.Gas) + if !context.UseGas(cost) { context.UseGas(context.Gas) - return context.Return(nil), OOG(gas, tmp) + return context.Return(nil), OutOfGasError{} } // Resize the memory calculated previously mem.Resize(newMemSize.Uint64()) + // Add a log message + self.log(pc, op, context.Gas, cost, mem, stack, context, nil) switch op { case ADD: @@ -783,15 +784,13 @@ func (self *Vm) RunPrecompiled(p *PrecompiledAccount, input []byte, context *Con return context.Return(ret), nil } else { - tmp := new(big.Int).Set(context.Gas) - - return nil, OOG(gas, tmp) + return nil, OutOfGasError{} } } // log emits a log event to the environment for each opcode encountered. This is not to be confused with the // LOG* opcode. -func (self *Vm) log(pc uint64, op OpCode, gas, cost *big.Int, memory *Memory, stack *stack, context *Context) { +func (self *Vm) log(pc uint64, op OpCode, gas, cost *big.Int, memory *Memory, stack *stack, context *Context, err error) { if Debug { mem := make([]byte, len(memory.Data())) copy(mem, memory.Data()) @@ -804,7 +803,7 @@ func (self *Vm) log(pc uint64, op OpCode, gas, cost *big.Int, memory *Memory, st storage[common.BytesToHash(k)] = v }) - self.env.AddStructLog(StructLog{pc, op, new(big.Int).Set(gas), cost, mem, stck, storage}) + self.env.AddStructLog(StructLog{pc, op, new(big.Int).Set(gas), cost, mem, stck, storage, err}) } } -- cgit v1.2.3 From 90c4493a105ef33c1d10735489dce5a42c30b282 Mon Sep 17 00:00:00 2001 From: obscuren Date: Fri, 12 Jun 2015 13:36:38 +0200 Subject: eth, core: interupt the chain processing on stop Added an additional channel which is used to interupt the chain manager when it's processing blocks. --- core/chain_manager.go | 198 ++++++++++++++++++++++++++------------------------ eth/backend.go | 2 +- 2 files changed, 105 insertions(+), 95 deletions(-) diff --git a/core/chain_manager.go b/core/chain_manager.go index be64b54f4..8629fb4ce 100644 --- a/core/chain_manager.go +++ b/core/chain_manager.go @@ -100,8 +100,9 @@ type ChainManager struct { cache *BlockCache futureBlocks *BlockCache - quit chan struct{} - wg sync.WaitGroup + quit chan struct{} + procInterupt chan struct{} // interupt signaler for block processing + wg sync.WaitGroup pow pow.PoW } @@ -113,6 +114,7 @@ func NewChainManager(genesis *types.Block, blockDb, stateDb common.Database, pow genesisBlock: GenesisBlock(42, stateDb), eventMux: mux, quit: make(chan struct{}), + procInterupt: make(chan struct{}), cache: NewBlockCache(blockCacheLimit), pow: pow, } @@ -516,6 +518,7 @@ func (self *ChainManager) CalcTotalDiff(block *types.Block) (*big.Int, error) { func (bc *ChainManager) Stop() { close(bc.quit) + close(bc.procInterupt) bc.wg.Wait() @@ -568,119 +571,126 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) { defer close(nonceQuit) txcount := 0 +done: for i, block := range chain { - bstart := time.Now() - // Wait for block i's nonce to be verified before processing - // its state transition. - for !nonceChecked[i] { - r := <-nonceDone - nonceChecked[r.i] = true - if !r.valid { - block := chain[r.i] - return r.i, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()} + select { + case <-self.procInterupt: + glog.V(logger.Debug).Infoln("Premature abort during chain processing") + break done + default: + bstart := time.Now() + // Wait for block i's nonce to be verified before processing + // its state transition. + for !nonceChecked[i] { + r := <-nonceDone + nonceChecked[r.i] = true + if !r.valid { + block := chain[r.i] + return r.i, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()} + } } - } - if BadHashes[block.Hash()] { - err := fmt.Errorf("Found known bad hash in chain %x", block.Hash()) - blockErr(block, err) - return i, err - } - - // Setting block.Td regardless of error (known for example) prevents errors down the line - // in the protocol handler - block.Td = new(big.Int).Set(CalcTD(block, self.GetBlock(block.ParentHash()))) - - // Call in to the block processor and check for errors. It's likely that if one block fails - // all others will fail too (unless a known block is returned). - logs, err := self.processor.Process(block) - if err != nil { - if IsKnownBlockErr(err) { - stats.ignored++ - continue + if BadHashes[block.Hash()] { + err := fmt.Errorf("Found known bad hash in chain %x", block.Hash()) + blockErr(block, err) + return i, err } - if err == BlockFutureErr { - // Allow up to MaxFuture second in the future blocks. If this limit - // is exceeded the chain is discarded and processed at a later time - // if given. - if max := time.Now().Unix() + maxTimeFutureBlocks; block.Time() > max { - return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max) + // Setting block.Td regardless of error (known for example) prevents errors down the line + // in the protocol handler + block.Td = new(big.Int).Set(CalcTD(block, self.GetBlock(block.ParentHash()))) + + // Call in to the block processor and check for errors. It's likely that if one block fails + // all others will fail too (unless a known block is returned). + logs, err := self.processor.Process(block) + if err != nil { + if IsKnownBlockErr(err) { + stats.ignored++ + continue } - block.SetQueued(true) - self.futureBlocks.Push(block) - stats.queued++ - continue - } + if err == BlockFutureErr { + // Allow up to MaxFuture second in the future blocks. If this limit + // is exceeded the chain is discarded and processed at a later time + // if given. + if max := time.Now().Unix() + maxTimeFutureBlocks; block.Time() > max { + return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max) + } - if IsParentErr(err) && self.futureBlocks.Has(block.ParentHash()) { - block.SetQueued(true) - self.futureBlocks.Push(block) - stats.queued++ - continue - } + block.SetQueued(true) + self.futureBlocks.Push(block) + stats.queued++ + continue + } - blockErr(block, err) + if IsParentErr(err) && self.futureBlocks.Has(block.ParentHash()) { + block.SetQueued(true) + self.futureBlocks.Push(block) + stats.queued++ + continue + } - return i, err - } + blockErr(block, err) - txcount += len(block.Transactions()) - - cblock := self.currentBlock - // Compare the TD of the last known block in the canonical chain to make sure it's greater. - // At this point it's possible that a different chain (fork) becomes the new canonical chain. - if block.Td.Cmp(self.Td()) > 0 { - // chain fork - if block.ParentHash() != cblock.Hash() { - // during split we merge two different chains and create the new canonical chain - err := self.merge(cblock, block) - if err != nil { - return i, err + return i, err + } + + txcount += len(block.Transactions()) + + cblock := self.currentBlock + // Compare the TD of the last known block in the canonical chain to make sure it's greater. + // At this point it's possible that a different chain (fork) becomes the new canonical chain. + if block.Td.Cmp(self.Td()) > 0 { + // chain fork + if block.ParentHash() != cblock.Hash() { + // during split we merge two different chains and create the new canonical chain + err := self.merge(cblock, block) + if err != nil { + return i, err + } + + queue[i] = ChainSplitEvent{block, logs} + queueEvent.splitCount++ } - queue[i] = ChainSplitEvent{block, logs} - queueEvent.splitCount++ - } + self.mu.Lock() + self.setTotalDifficulty(block.Td) + self.insert(block) + self.mu.Unlock() - self.mu.Lock() - self.setTotalDifficulty(block.Td) - self.insert(block) - self.mu.Unlock() + jsonlogger.LogJson(&logger.EthChainNewHead{ + BlockHash: block.Hash().Hex(), + BlockNumber: block.Number(), + ChainHeadHash: cblock.Hash().Hex(), + BlockPrevHash: block.ParentHash().Hex(), + }) - jsonlogger.LogJson(&logger.EthChainNewHead{ - BlockHash: block.Hash().Hex(), - BlockNumber: block.Number(), - ChainHeadHash: cblock.Hash().Hex(), - BlockPrevHash: block.ParentHash().Hex(), - }) + self.setTransState(state.New(block.Root(), self.stateDb)) + self.txState.SetState(state.New(block.Root(), self.stateDb)) - self.setTransState(state.New(block.Root(), self.stateDb)) - self.txState.SetState(state.New(block.Root(), self.stateDb)) + queue[i] = ChainEvent{block, block.Hash(), logs} + queueEvent.canonicalCount++ - queue[i] = ChainEvent{block, block.Hash(), logs} - queueEvent.canonicalCount++ + if glog.V(logger.Debug) { + glog.Infof("[%v] inserted block #%d (%d TXs %d UNCs) (%x...). Took %v\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) + } + } else { + if glog.V(logger.Detail) { + glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) (%x...). Took %v\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) + } - if glog.V(logger.Debug) { - glog.Infof("[%v] inserted block #%d (%d TXs %d UNCs) (%x...). Took %v\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) - } - } else { - if glog.V(logger.Detail) { - glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) (%x...). Took %v\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) + queue[i] = ChainSideEvent{block, logs} + queueEvent.sideCount++ } + // Write block to database. Eventually we'll have to improve on this and throw away blocks that are + // not in the canonical chain. + self.write(block) + // Delete from future blocks + self.futureBlocks.Delete(block.Hash()) - queue[i] = ChainSideEvent{block, logs} - queueEvent.sideCount++ - } - // Write block to database. Eventually we'll have to improve on this and throw away blocks that are - // not in the canonical chain. - self.write(block) - // Delete from future blocks - self.futureBlocks.Delete(block.Hash()) - - stats.processed++ + stats.processed++ + } } if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) { diff --git a/eth/backend.go b/eth/backend.go index 60e9359dc..d2ec0cc62 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -527,8 +527,8 @@ func (self *Ethereum) AddPeer(nodeURL string) error { func (s *Ethereum) Stop() { s.net.Stop() - s.protocolManager.Stop() s.chainManager.Stop() + s.protocolManager.Stop() s.txPool.Stop() s.eventMux.Stop() if s.whisper != nil { -- cgit v1.2.3 From 645dfd96932c87e256c3edc9035843c6baf4a2e8 Mon Sep 17 00:00:00 2001 From: obscuren Date: Fri, 12 Jun 2015 16:45:53 +0200 Subject: core: changed interrupt strategy Removed chain manager's select/channel approach when checking for interrupts. Now using an atomic int32 instead which checked for every block processed. --- core/chain_manager.go | 203 +++++++++++++++++++++++++------------------------- 1 file changed, 101 insertions(+), 102 deletions(-) diff --git a/core/chain_manager.go b/core/chain_manager.go index 8629fb4ce..e56d82cce 100644 --- a/core/chain_manager.go +++ b/core/chain_manager.go @@ -8,6 +8,7 @@ import ( "os" "runtime" "sync" + "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -100,9 +101,10 @@ type ChainManager struct { cache *BlockCache futureBlocks *BlockCache - quit chan struct{} - procInterupt chan struct{} // interupt signaler for block processing - wg sync.WaitGroup + quit chan struct{} + // procInterrupt must be atomically called + procInterrupt int32 // interrupt signaler for block processing + wg sync.WaitGroup pow pow.PoW } @@ -114,7 +116,6 @@ func NewChainManager(genesis *types.Block, blockDb, stateDb common.Database, pow genesisBlock: GenesisBlock(42, stateDb), eventMux: mux, quit: make(chan struct{}), - procInterupt: make(chan struct{}), cache: NewBlockCache(blockCacheLimit), pow: pow, } @@ -518,7 +519,7 @@ func (self *ChainManager) CalcTotalDiff(block *types.Block) (*big.Int, error) { func (bc *ChainManager) Stop() { close(bc.quit) - close(bc.procInterupt) + atomic.StoreInt32(&bc.procInterrupt, 1) bc.wg.Wait() @@ -571,126 +572,124 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) { defer close(nonceQuit) txcount := 0 -done: for i, block := range chain { - select { - case <-self.procInterupt: + if atomic.LoadInt32(&self.procInterrupt) == 1 { glog.V(logger.Debug).Infoln("Premature abort during chain processing") - break done - default: - bstart := time.Now() - // Wait for block i's nonce to be verified before processing - // its state transition. - for !nonceChecked[i] { - r := <-nonceDone - nonceChecked[r.i] = true - if !r.valid { - block := chain[r.i] - return r.i, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()} - } - } + break + } - if BadHashes[block.Hash()] { - err := fmt.Errorf("Found known bad hash in chain %x", block.Hash()) - blockErr(block, err) - return i, err + bstart := time.Now() + // Wait for block i's nonce to be verified before processing + // its state transition. + for !nonceChecked[i] { + r := <-nonceDone + nonceChecked[r.i] = true + if !r.valid { + block := chain[r.i] + return r.i, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()} } + } - // Setting block.Td regardless of error (known for example) prevents errors down the line - // in the protocol handler - block.Td = new(big.Int).Set(CalcTD(block, self.GetBlock(block.ParentHash()))) - - // Call in to the block processor and check for errors. It's likely that if one block fails - // all others will fail too (unless a known block is returned). - logs, err := self.processor.Process(block) - if err != nil { - if IsKnownBlockErr(err) { - stats.ignored++ - continue - } - - if err == BlockFutureErr { - // Allow up to MaxFuture second in the future blocks. If this limit - // is exceeded the chain is discarded and processed at a later time - // if given. - if max := time.Now().Unix() + maxTimeFutureBlocks; block.Time() > max { - return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max) - } + if BadHashes[block.Hash()] { + err := fmt.Errorf("Found known bad hash in chain %x", block.Hash()) + blockErr(block, err) + return i, err + } - block.SetQueued(true) - self.futureBlocks.Push(block) - stats.queued++ - continue - } + // Setting block.Td regardless of error (known for example) prevents errors down the line + // in the protocol handler + block.Td = new(big.Int).Set(CalcTD(block, self.GetBlock(block.ParentHash()))) + + // Call in to the block processor and check for errors. It's likely that if one block fails + // all others will fail too (unless a known block is returned). + logs, err := self.processor.Process(block) + if err != nil { + if IsKnownBlockErr(err) { + stats.ignored++ + continue + } - if IsParentErr(err) && self.futureBlocks.Has(block.ParentHash()) { - block.SetQueued(true) - self.futureBlocks.Push(block) - stats.queued++ - continue + if err == BlockFutureErr { + // Allow up to MaxFuture second in the future blocks. If this limit + // is exceeded the chain is discarded and processed at a later time + // if given. + if max := time.Now().Unix() + maxTimeFutureBlocks; block.Time() > max { + return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max) } - blockErr(block, err) + block.SetQueued(true) + self.futureBlocks.Push(block) + stats.queued++ + continue + } - return i, err + if IsParentErr(err) && self.futureBlocks.Has(block.ParentHash()) { + block.SetQueued(true) + self.futureBlocks.Push(block) + stats.queued++ + continue } - txcount += len(block.Transactions()) - - cblock := self.currentBlock - // Compare the TD of the last known block in the canonical chain to make sure it's greater. - // At this point it's possible that a different chain (fork) becomes the new canonical chain. - if block.Td.Cmp(self.Td()) > 0 { - // chain fork - if block.ParentHash() != cblock.Hash() { - // during split we merge two different chains and create the new canonical chain - err := self.merge(cblock, block) - if err != nil { - return i, err - } + blockErr(block, err) + + return i, err + } - queue[i] = ChainSplitEvent{block, logs} - queueEvent.splitCount++ + txcount += len(block.Transactions()) + + cblock := self.currentBlock + // Compare the TD of the last known block in the canonical chain to make sure it's greater. + // At this point it's possible that a different chain (fork) becomes the new canonical chain. + if block.Td.Cmp(self.Td()) > 0 { + // chain fork + if block.ParentHash() != cblock.Hash() { + // during split we merge two different chains and create the new canonical chain + err := self.merge(cblock, block) + if err != nil { + return i, err } - self.mu.Lock() - self.setTotalDifficulty(block.Td) - self.insert(block) - self.mu.Unlock() + queue[i] = ChainSplitEvent{block, logs} + queueEvent.splitCount++ + } - jsonlogger.LogJson(&logger.EthChainNewHead{ - BlockHash: block.Hash().Hex(), - BlockNumber: block.Number(), - ChainHeadHash: cblock.Hash().Hex(), - BlockPrevHash: block.ParentHash().Hex(), - }) + self.mu.Lock() + self.setTotalDifficulty(block.Td) + self.insert(block) + self.mu.Unlock() - self.setTransState(state.New(block.Root(), self.stateDb)) - self.txState.SetState(state.New(block.Root(), self.stateDb)) + jsonlogger.LogJson(&logger.EthChainNewHead{ + BlockHash: block.Hash().Hex(), + BlockNumber: block.Number(), + ChainHeadHash: cblock.Hash().Hex(), + BlockPrevHash: block.ParentHash().Hex(), + }) - queue[i] = ChainEvent{block, block.Hash(), logs} - queueEvent.canonicalCount++ + self.setTransState(state.New(block.Root(), self.stateDb)) + self.txState.SetState(state.New(block.Root(), self.stateDb)) - if glog.V(logger.Debug) { - glog.Infof("[%v] inserted block #%d (%d TXs %d UNCs) (%x...). Took %v\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) - } - } else { - if glog.V(logger.Detail) { - glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) (%x...). Took %v\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) - } + queue[i] = ChainEvent{block, block.Hash(), logs} + queueEvent.canonicalCount++ - queue[i] = ChainSideEvent{block, logs} - queueEvent.sideCount++ + if glog.V(logger.Debug) { + glog.Infof("[%v] inserted block #%d (%d TXs %d UNCs) (%x...). Took %v\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) + } + } else { + if glog.V(logger.Detail) { + glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) (%x...). Took %v\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) } - // Write block to database. Eventually we'll have to improve on this and throw away blocks that are - // not in the canonical chain. - self.write(block) - // Delete from future blocks - self.futureBlocks.Delete(block.Hash()) - - stats.processed++ + queue[i] = ChainSideEvent{block, logs} + queueEvent.sideCount++ } + // Write block to database. Eventually we'll have to improve on this and throw away blocks that are + // not in the canonical chain. + self.write(block) + // Delete from future blocks + self.futureBlocks.Delete(block.Hash()) + + stats.processed++ + } if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) { -- cgit v1.2.3 From 66d3dc8690e0aa551e7b35a17006a2135b51c9bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 11 Jun 2015 15:56:08 +0300 Subject: eth, eth/downloader: move peer removal into downloader --- eth/backend.go | 8 ++-- eth/downloader/downloader.go | 87 ++++++++++++++++++++++++++------------- eth/downloader/downloader_test.go | 32 +++++++------- eth/downloader/queue.go | 2 +- eth/handler.go | 4 +- eth/sync.go | 32 +------------- 6 files changed, 83 insertions(+), 82 deletions(-) diff --git a/eth/backend.go b/eth/backend.go index d2ec0cc62..4ebf21811 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -193,7 +193,6 @@ type Ethereum struct { whisper *whisper.Whisper pow *ethash.Ethash protocolManager *ProtocolManager - downloader *downloader.Downloader SolcPath string solc *compiler.Solidity @@ -290,14 +289,13 @@ func New(config *Config) (*Ethereum, error) { if err != nil { return nil, err } - eth.downloader = downloader.New(eth.EventMux(), eth.chainManager.HasBlock, eth.chainManager.GetBlock) eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State, eth.chainManager.GasLimit) eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.chainManager, eth.EventMux()) eth.chainManager.SetProcessor(eth.blockProcessor) + eth.protocolManager = NewProtocolManager(config.ProtocolVersion, config.NetworkId, eth.eventMux, eth.txPool, eth.chainManager) + eth.miner = miner.New(eth, eth.EventMux(), eth.pow) eth.miner.SetGasPrice(config.GasPrice) - - eth.protocolManager = NewProtocolManager(config.ProtocolVersion, config.NetworkId, eth.eventMux, eth.txPool, eth.chainManager, eth.downloader) if config.Shh { eth.whisper = whisper.New() eth.shhVersionId = int(eth.whisper.Version()) @@ -447,7 +445,7 @@ func (s *Ethereum) ClientVersion() string { return s.clientVersio func (s *Ethereum) EthVersion() int { return s.ethVersionId } func (s *Ethereum) NetVersion() int { return s.netVersionId } func (s *Ethereum) ShhVersion() int { return s.shhVersionId } -func (s *Ethereum) Downloader() *downloader.Downloader { return s.downloader } +func (s *Ethereum) Downloader() *downloader.Downloader { return s.protocolManager.downloader } // Start the ethereum func (s *Ethereum) Start() error { diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index f0a515d12..499b3a585 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -32,28 +32,32 @@ var ( var ( errLowTd = errors.New("peers TD is too low") - ErrBusy = errors.New("busy") + errBusy = errors.New("busy") errUnknownPeer = errors.New("peer is unknown or unhealthy") - ErrBadPeer = errors.New("action from bad peer ignored") - ErrStallingPeer = errors.New("peer is stalling") + errBadPeer = errors.New("action from bad peer ignored") + errStallingPeer = errors.New("peer is stalling") errBannedHead = errors.New("peer head hash already banned") errNoPeers = errors.New("no peers to keep download active") - ErrPendingQueue = errors.New("pending items in queue") - ErrTimeout = errors.New("timeout") - ErrEmptyHashSet = errors.New("empty hash set by peer") + errPendingQueue = errors.New("pending items in queue") + errTimeout = errors.New("timeout") + errEmptyHashSet = errors.New("empty hash set by peer") errPeersUnavailable = errors.New("no peers available or all peers tried for block download process") errAlreadyInPool = errors.New("hash already in pool") - ErrInvalidChain = errors.New("retrieved hash chain is invalid") - ErrCrossCheckFailed = errors.New("block cross-check failed") + errInvalidChain = errors.New("retrieved hash chain is invalid") + errCrossCheckFailed = errors.New("block cross-check failed") errCancelHashFetch = errors.New("hash fetching cancelled (requested)") errCancelBlockFetch = errors.New("block downloading cancelled (requested)") errNoSyncActive = errors.New("no sync active") ) +// hashCheckFn is a callback type for verifying a hash's presence in the local chain. type hashCheckFn func(common.Hash) bool -type getBlockFn func(common.Hash) *types.Block -type chainInsertFn func(types.Blocks) (int, error) -type hashIterFn func() (common.Hash, error) + +// blockRetrievalFn is a callback type for retrieving a block from the local chain. +type blockRetrievalFn func(common.Hash) *types.Block + +// peerDropFn is a callback type for dropping a peer detected as malicious. +type peerDropFn func(id string) type blockPack struct { peerId string @@ -85,8 +89,9 @@ type Downloader struct { importLock sync.Mutex // Callbacks - hasBlock hashCheckFn - getBlock getBlockFn + hasBlock hashCheckFn // Checks if a block is present in the chain + getBlock blockRetrievalFn // Retrieves a block from the chain + dropPeer peerDropFn // Retrieved the TD of our own chain // Status synchronising int32 @@ -107,7 +112,8 @@ type Block struct { OriginPeer string } -func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock getBlockFn) *Downloader { +// New creates a new downloader to fetch hashes and blocks from remote peers. +func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock blockRetrievalFn, dropPeer peerDropFn) *Downloader { // Create the base downloader downloader := &Downloader{ mux: mux, @@ -115,6 +121,7 @@ func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock getBlockFn) *Downloa peers: newPeerSet(), hasBlock: hasBlock, getBlock: getBlock, + dropPeer: dropPeer, newPeerCh: make(chan *peer, 1), hashCh: make(chan hashPack, 1), blockCh: make(chan blockPack, 1), @@ -183,19 +190,43 @@ func (d *Downloader) UnregisterPeer(id string) error { return nil } -// Synchronise will select the peer and use it for synchronising. If an empty string is given +// Synchronise tries to sync up our local block chain with a remote peer, both +// adding various sanity checks as well as wrapping it with various log entries. +func (d *Downloader) Synchronise(id string, head common.Hash) { + glog.V(logger.Detail).Infof("Attempting synchronisation: %v, 0x%x", id, head) + + switch err := d.synchronise(id, head); err { + case nil: + glog.V(logger.Detail).Infof("Synchronisation completed") + + case errBusy: + glog.V(logger.Detail).Infof("Synchronisation already in progress") + + case errTimeout, errBadPeer, errEmptyHashSet, errInvalidChain, errCrossCheckFailed: + glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err) + d.dropPeer(id) + + case errPendingQueue: + glog.V(logger.Debug).Infoln("Synchronisation aborted:", err) + + default: + glog.V(logger.Warn).Infof("Synchronisation failed: %v", err) + } +} + +// synchronise will select the peer and use it for synchronising. If an empty string is given // it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the // checks fail an error will be returned. This method is synchronous -func (d *Downloader) Synchronise(id string, hash common.Hash) error { +func (d *Downloader) synchronise(id string, hash common.Hash) error { // Make sure only one goroutine is ever allowed past this point at once if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { - return ErrBusy + return errBusy } defer atomic.StoreInt32(&d.synchronising, 0) // If the head hash is banned, terminate immediately if d.banned.Has(hash) { - return ErrInvalidChain + return errInvalidChain } // Post a user notification of the sync (only once per session) if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { @@ -209,7 +240,7 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error { // Abort if the queue still contains some leftover data if _, cached := d.queue.Size(); cached > 0 && d.queue.GetHeadBlock() != nil { - return ErrPendingQueue + return errPendingQueue } // Reset the queue and peer set to clean any internal leftover state d.queue.Reset() @@ -342,7 +373,7 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error { // Make sure the peer actually gave something valid if len(hashPack.hashes) == 0 { glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set", active.id) - return ErrEmptyHashSet + return errEmptyHashSet } for index, hash := range hashPack.hashes { if d.banned.Has(hash) { @@ -352,7 +383,7 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error { if err := d.banBlocks(active.id, hash); err != nil { glog.V(logger.Debug).Infof("Failed to ban batch of blocks: %v", err) } - return ErrInvalidChain + return errInvalidChain } } // Determine if we're done fetching hashes (queue up all pending), and continue if not done @@ -369,12 +400,12 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error { inserts := d.queue.Insert(hashPack.hashes) if len(inserts) == 0 && !done { glog.V(logger.Debug).Infof("Peer (%s) responded with stale hashes", active.id) - return ErrBadPeer + return errBadPeer } if !done { // Check that the peer is not stalling the sync if len(inserts) < MinHashFetch { - return ErrStallingPeer + return errStallingPeer } // Try and fetch a random block to verify the hash batch // Skip the last hash as the cross check races with the next hash fetch @@ -408,7 +439,7 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error { block := blockPack.blocks[0] if check, ok := d.checks[block.Hash()]; ok { if block.ParentHash() != check.parent { - return ErrCrossCheckFailed + return errCrossCheckFailed } delete(d.checks, block.Hash()) } @@ -418,7 +449,7 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error { for hash, check := range d.checks { if time.Now().After(check.expire) { glog.V(logger.Debug).Infof("Cross check timeout for %x", hash) - return ErrCrossCheckFailed + return errCrossCheckFailed } } @@ -438,7 +469,7 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error { // if all peers have been tried, abort the process entirely or if the hash is // the zero hash. if p == nil || (head == common.Hash{}) { - return ErrTimeout + return errTimeout } // set p to the active peer. this will invalidate any hashes that may be returned // by our previous (delayed) peer. @@ -500,7 +531,7 @@ out: peer.SetIdle() glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blockPack.blocks)) - case ErrInvalidChain: + case errInvalidChain: // The hash chain is invalid (blocks are not ordered properly), abort return err @@ -617,7 +648,7 @@ func (d *Downloader) banBlocks(peerId string, head common.Hash) error { return errCancelBlockFetch case <-timeout: - return ErrTimeout + return errTimeout case <-d.hashCh: // Out of bounds hashes received, ignore them diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 5f10fb41f..5e79c10c9 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -73,7 +73,7 @@ func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types done: make(chan bool), } var mux event.TypeMux - downloader := New(&mux, tester.hasBlock, tester.getBlock) + downloader := New(&mux, tester.hasBlock, tester.getBlock, nil) tester.downloader = downloader return tester @@ -83,7 +83,7 @@ func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types // block until it returns func (dl *downloadTester) sync(peerId string, head common.Hash) error { dl.activePeerId = peerId - return dl.downloader.Synchronise(peerId, head) + return dl.downloader.synchronise(peerId, head) } // syncTake is starts synchronising with a remote peer, but concurrently it also @@ -415,8 +415,8 @@ func TestInvalidHashOrderAttack(t *testing.T) { // Try and sync with the malicious node and check that it fails tester := newTester(t, reverse, blocks) tester.newPeer("attack", big.NewInt(10000), reverse[0]) - if _, err := tester.syncTake("attack", reverse[0]); err != ErrInvalidChain { - t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrInvalidChain) + if _, err := tester.syncTake("attack", reverse[0]); err != errInvalidChain { + t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain) } // Ensure that a valid chain can still pass sync tester.hashes = hashes @@ -438,8 +438,8 @@ func TestMadeupHashChainAttack(t *testing.T) { // Try and sync with the malicious node and check that it fails tester := newTester(t, hashes, nil) tester.newPeer("attack", big.NewInt(10000), hashes[0]) - if _, err := tester.syncTake("attack", hashes[0]); err != ErrCrossCheckFailed { - t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrCrossCheckFailed) + if _, err := tester.syncTake("attack", hashes[0]); err != errCrossCheckFailed { + t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed) } } @@ -455,8 +455,8 @@ func TestMadeupHashChainDrippingAttack(t *testing.T) { // Try and sync with the attacker, one hash at a time tester.maxHashFetch = 1 tester.newPeer("attack", big.NewInt(10000), hashes[0]) - if _, err := tester.syncTake("attack", hashes[0]); err != ErrStallingPeer { - t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrStallingPeer) + if _, err := tester.syncTake("attack", hashes[0]); err != errStallingPeer { + t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) } } @@ -480,8 +480,8 @@ func TestMadeupBlockChainAttack(t *testing.T) { // Try and sync with the malicious node and check that it fails tester := newTester(t, gapped, blocks) tester.newPeer("attack", big.NewInt(10000), gapped[0]) - if _, err := tester.syncTake("attack", gapped[0]); err != ErrCrossCheckFailed { - t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrCrossCheckFailed) + if _, err := tester.syncTake("attack", gapped[0]); err != errCrossCheckFailed { + t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed) } // Ensure that a valid chain can still pass sync blockSoftTTL = defaultBlockTTL @@ -514,8 +514,8 @@ func TestMadeupParentBlockChainAttack(t *testing.T) { // Try and sync with the malicious node and check that it fails tester := newTester(t, hashes, forges) tester.newPeer("attack", big.NewInt(10000), hashes[0]) - if _, err := tester.syncTake("attack", hashes[0]); err != ErrCrossCheckFailed { - t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrCrossCheckFailed) + if _, err := tester.syncTake("attack", hashes[0]); err != errCrossCheckFailed { + t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed) } // Ensure that a valid chain can still pass sync blockSoftTTL = defaultBlockTTL @@ -547,8 +547,8 @@ func TestBannedChainStarvationAttack(t *testing.T) { tester.newPeer("attack", big.NewInt(10000), hashes[0]) for banned := tester.downloader.banned.Size(); ; { // Try to sync with the attacker, check hash chain failure - if _, err := tester.syncTake("attack", hashes[0]); err != ErrInvalidChain { - t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrInvalidChain) + if _, err := tester.syncTake("attack", hashes[0]); err != errInvalidChain { + t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain) } // Check that the ban list grew with at least 1 new item, or all banned bans := tester.downloader.banned.Size() @@ -592,8 +592,8 @@ func TestBannedChainMemoryExhaustionAttack(t *testing.T) { tester.newPeer("attack", big.NewInt(10000), hashes[0]) for { // Try to sync with the attacker, check hash chain failure - if _, err := tester.syncTake("attack", hashes[0]); err != ErrInvalidChain { - t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrInvalidChain) + if _, err := tester.syncTake("attack", hashes[0]); err != errInvalidChain { + t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain) } // Short circuit if the entire chain was banned if tester.downloader.banned.Has(hashes[0]) { diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 7abbd42fd..903f043eb 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -320,7 +320,7 @@ func (q *queue) Deliver(id string, blocks []*types.Block) (err error) { // If a requested block falls out of the range, the hash chain is invalid index := int(block.NumberU64()) - q.blockOffset if index >= len(q.blockCache) || index < 0 { - return ErrInvalidChain + return errInvalidChain } // Otherwise merge the block and mark the hash block q.blockCache[index] = &Block{ diff --git a/eth/handler.go b/eth/handler.go index f002727f3..ac7fb8fcf 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -68,12 +68,11 @@ type ProtocolManager struct { // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable // with the ethereum network. -func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpool txPool, chainman *core.ChainManager, downloader *downloader.Downloader) *ProtocolManager { +func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpool txPool, chainman *core.ChainManager) *ProtocolManager { manager := &ProtocolManager{ eventMux: mux, txpool: txpool, chainman: chainman, - downloader: downloader, peers: newPeerSet(), newPeerCh: make(chan *peer, 1), newHashCh: make(chan []*blockAnnounce, 1), @@ -81,6 +80,7 @@ func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpo txsyncCh: make(chan *txsync), quitSync: make(chan struct{}), } + manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.removePeer) manager.SubProtocol = p2p.Protocol{ Name: "eth", Version: uint(protocolVersion), diff --git a/eth/sync.go b/eth/sync.go index 8fee21d7b..b127ca979 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -8,7 +8,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/p2p/discover" @@ -332,33 +331,6 @@ func (pm *ProtocolManager) synchronise(peer *peer) { if peer.Td().Cmp(pm.chainman.Td()) <= 0 { return } - // FIXME if we have the hash in our chain and the TD of the peer is - // much higher than ours, something is wrong with us or the peer. - // Check if the hash is on our own chain - head := peer.Head() - if pm.chainman.HasBlock(head) { - glog.V(logger.Debug).Infoln("Synchronisation canceled: head already known") - return - } - // Get the hashes from the peer (synchronously) - glog.V(logger.Detail).Infof("Attempting synchronisation: %v, 0x%x", peer.id, head) - - err := pm.downloader.Synchronise(peer.id, head) - switch err { - case nil: - glog.V(logger.Detail).Infof("Synchronisation completed") - - case downloader.ErrBusy: - glog.V(logger.Detail).Infof("Synchronisation already in progress") - - case downloader.ErrTimeout, downloader.ErrBadPeer, downloader.ErrEmptyHashSet, downloader.ErrInvalidChain, downloader.ErrCrossCheckFailed: - glog.V(logger.Debug).Infof("Removing peer %v: %v", peer.id, err) - pm.removePeer(peer.id) - - case downloader.ErrPendingQueue: - glog.V(logger.Debug).Infoln("Synchronisation aborted:", err) - - default: - glog.V(logger.Warn).Infof("Synchronisation failed: %v", err) - } + // Otherwise try to sync with the downloader + pm.downloader.Synchronise(peer.id, peer.Head()) } -- cgit v1.2.3 From 2937903299b9b965eb54a9d5031714d6a7af972e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 11 Jun 2015 16:11:56 +0300 Subject: eth/downloader: remove uneeded testing functions --- eth/downloader/downloader_test.go | 9 ++------- eth/downloader/queue_test.go | 30 ------------------------------ 2 files changed, 2 insertions(+), 37 deletions(-) delete mode 100644 eth/downloader/queue_test.go diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 5e79c10c9..e7dfbc70b 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -214,18 +214,13 @@ func TestBlockTaking(t *testing.T) { // Tests that an inactive downloader will not accept incoming hashes and blocks. func TestInactiveDownloader(t *testing.T) { - // Create a small enough block chain to download and the tester - targetBlocks := blockCacheLimit - 15 - hashes := createHashes(0, targetBlocks) - blocks := createBlocksFromHashSet(createHashSet(hashes)) - tester := newTester(t, nil, nil) // Check that neither hashes nor blocks are accepted - if err := tester.downloader.DeliverHashes("bad peer", hashes); err != errNoSyncActive { + if err := tester.downloader.DeliverHashes("bad peer", []common.Hash{}); err != errNoSyncActive { t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) } - if err := tester.downloader.DeliverBlocks("bad peer", blocks); err != errNoSyncActive { + if err := tester.downloader.DeliverBlocks("bad peer", []*types.Block{}); err != errNoSyncActive { t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) } } diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go deleted file mode 100644 index ee6141f71..000000000 --- a/eth/downloader/queue_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package downloader - -import ( - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "gopkg.in/fatih/set.v0" -) - -func createHashSet(hashes []common.Hash) *set.Set { - hset := set.New() - - for _, hash := range hashes { - hset.Add(hash) - } - - return hset -} - -func createBlocksFromHashSet(hashes *set.Set) []*types.Block { - blocks := make([]*types.Block, hashes.Size()) - - var i int - hashes.Each(func(v interface{}) bool { - blocks[i] = createBlock(i, common.Hash{}, v.(common.Hash)) - i++ - return true - }) - - return blocks -} -- cgit v1.2.3 From 2dd6a62f67a217cc2186f04d7d99565f7782c79b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 11 Jun 2015 17:14:45 +0300 Subject: eth/downloader: support individual peers in the test suite --- eth/downloader/downloader_test.go | 225 +++++++++++++++++++------------------- 1 file changed, 113 insertions(+), 112 deletions(-) diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index e7dfbc70b..6269ed87c 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -16,6 +16,8 @@ var ( knownHash = common.Hash{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} unknownHash = common.Hash{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9} bannedHash = common.Hash{5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5} + + genesis = createBlock(1, common.Hash{}, knownHash) ) func createHashes(start, amount int) (hashes []common.Hash) { @@ -51,26 +53,20 @@ func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block { type downloadTester struct { downloader *Downloader - hashes []common.Hash // Chain of hashes simulating - blocks map[common.Hash]*types.Block // Blocks associated with the hashes - chain []common.Hash // Block-chain being constructed + ownHashes []common.Hash // Hash chain belonging to the tester + ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester + peerHashes map[string][]common.Hash // Hash chain belonging to different test peers + peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers maxHashFetch int // Overrides the maximum number of retrieved hashes - - t *testing.T - done chan bool - activePeerId string } -func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types.Block) *downloadTester { +func newTester() *downloadTester { tester := &downloadTester{ - t: t, - - hashes: hashes, - blocks: blocks, - chain: []common.Hash{knownHash}, - - done: make(chan bool), + ownHashes: []common.Hash{knownHash}, + ownBlocks: map[common.Hash]*types.Block{knownHash: genesis}, + peerHashes: make(map[string][]common.Hash), + peerBlocks: make(map[string]map[common.Hash]*types.Block), } var mux event.TypeMux downloader := New(&mux, tester.hasBlock, tester.getBlock, nil) @@ -79,13 +75,6 @@ func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types return tester } -// sync is a simple wrapper around the downloader to start synchronisation and -// block until it returns -func (dl *downloadTester) sync(peerId string, head common.Hash) error { - dl.activePeerId = peerId - return dl.downloader.synchronise(peerId, head) -} - // syncTake is starts synchronising with a remote peer, but concurrently it also // starts fetching blocks that the downloader retrieved. IT blocks until both go // routines terminate. @@ -102,12 +91,17 @@ func (dl *downloadTester) syncTake(peerId string, head common.Hash) ([]*Block, e time.Sleep(time.Millisecond) } // Take a batch of blocks and accumulate - took = append(took, dl.downloader.TakeBlocks()...) + blocks := dl.downloader.TakeBlocks() + for _, block := range blocks { + dl.ownHashes = append(dl.ownHashes, block.RawBlock.Hash()) + dl.ownBlocks[block.RawBlock.Hash()] = block.RawBlock + } + took = append(took, blocks...) } done <- struct{}{} }() // Start the downloading, sync the taker and return - err := dl.sync(peerId, head) + err := dl.downloader.synchronise(peerId, head) done <- struct{}{} <-done @@ -115,65 +109,76 @@ func (dl *downloadTester) syncTake(peerId string, head common.Hash) ([]*Block, e return took, err } +// hasBlock checks if a block is present in the testers canonical chain. func (dl *downloadTester) hasBlock(hash common.Hash) bool { - for _, h := range dl.chain { - if h == hash { - return true - } - } - return false + return dl.getBlock(hash) != nil } +// getBlock retrieves a block from the testers canonical chain. func (dl *downloadTester) getBlock(hash common.Hash) *types.Block { - return dl.blocks[knownHash] + return dl.ownBlocks[hash] +} + +// newPeer registers a new block download source into the downloader. +func (dl *downloadTester) newPeer(id string, hashes []common.Hash, blocks map[common.Hash]*types.Block) error { + err := dl.downloader.RegisterPeer(id, hashes[0], dl.peerGetHashesFn(id), dl.peerGetBlocksFn(id)) + if err == nil { + // Assign the owned hashes and blocks to the peer + dl.peerHashes[id] = hashes + dl.peerBlocks[id] = blocks + } + return err } -// getHashes retrieves a batch of hashes for reconstructing the chain. -func (dl *downloadTester) getHashes(head common.Hash) error { - limit := MaxHashFetch - if dl.maxHashFetch > 0 { - limit = dl.maxHashFetch - } - // Gather the next batch of hashes - hashes := make([]common.Hash, 0, limit) - for i, hash := range dl.hashes { - if hash == head { - i++ - for len(hashes) < cap(hashes) && i < len(dl.hashes) { - hashes = append(hashes, dl.hashes[i]) +// peerGetBlocksFn constructs a getHashes function associated with a particular +// peer in the download tester. The returned function can be used to retrieve +// batches of hashes from the particularly requested peer. +func (dl *downloadTester) peerGetHashesFn(id string) func(head common.Hash) error { + return func(head common.Hash) error { + limit := MaxHashFetch + if dl.maxHashFetch > 0 { + limit = dl.maxHashFetch + } + // Gather the next batch of hashes + hashes := dl.peerHashes[id] + result := make([]common.Hash, 0, limit) + for i, hash := range hashes { + if hash == head { i++ + for len(result) < cap(result) && i < len(hashes) { + result = append(result, hashes[i]) + i++ + } + break } - break } + // Delay delivery a bit to allow attacks to unfold + go func() { + time.Sleep(time.Millisecond) + dl.downloader.DeliverHashes(id, result) + }() + return nil } - // Delay delivery a bit to allow attacks to unfold - id := dl.activePeerId - go func() { - time.Sleep(time.Millisecond) - dl.downloader.DeliverHashes(id, hashes) - }() - return nil } -func (dl *downloadTester) getBlocks(id string) func([]common.Hash) error { +// peerGetBlocksFn constructs a getBlocks function associated with a particular +// peer in the download tester. The returned function can be used to retrieve +// batches of blocks from the particularly requested peer. +func (dl *downloadTester) peerGetBlocksFn(id string) func([]common.Hash) error { return func(hashes []common.Hash) error { - blocks := make([]*types.Block, 0, len(hashes)) + blocks := dl.peerBlocks[id] + result := make([]*types.Block, 0, len(hashes)) for _, hash := range hashes { - if block, ok := dl.blocks[hash]; ok { - blocks = append(blocks, block) + if block, ok := blocks[hash]; ok { + result = append(result, block) } } - go dl.downloader.DeliverBlocks(id, blocks) + go dl.downloader.DeliverBlocks(id, result) return nil } } -// newPeer registers a new block download source into the syncer. -func (dl *downloadTester) newPeer(id string, td *big.Int, hash common.Hash) error { - return dl.downloader.RegisterPeer(id, hash, dl.getHashes, dl.getBlocks(id)) -} - // Tests that simple synchronization, without throttling from a good peer works. func TestSynchronisation(t *testing.T) { // Create a small enough block chain to download and the tester @@ -181,11 +186,11 @@ func TestSynchronisation(t *testing.T) { hashes := createHashes(0, targetBlocks) blocks := createBlocksFromHashes(hashes) - tester := newTester(t, hashes, blocks) - tester.newPeer("peer", big.NewInt(10000), hashes[0]) + tester := newTester() + tester.newPeer("peer", hashes, blocks) // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("peer", hashes[0]); err != nil { + if err := tester.downloader.synchronise("peer", hashes[0]); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } if queued := len(tester.downloader.queue.blockPool); queued != targetBlocks { @@ -200,11 +205,11 @@ func TestBlockTaking(t *testing.T) { hashes := createHashes(0, targetBlocks) blocks := createBlocksFromHashes(hashes) - tester := newTester(t, hashes, blocks) - tester.newPeer("peer", big.NewInt(10000), hashes[0]) + tester := newTester() + tester.newPeer("peer", hashes, blocks) // Synchronise with the peer and test block retrieval - if err := tester.sync("peer", hashes[0]); err != nil { + if err := tester.downloader.synchronise("peer", hashes[0]); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } if took := tester.downloader.TakeBlocks(); len(took) != targetBlocks { @@ -214,7 +219,7 @@ func TestBlockTaking(t *testing.T) { // Tests that an inactive downloader will not accept incoming hashes and blocks. func TestInactiveDownloader(t *testing.T) { - tester := newTester(t, nil, nil) + tester := newTester() // Check that neither hashes nor blocks are accepted if err := tester.downloader.DeliverHashes("bad peer", []common.Hash{}); err != errNoSyncActive { @@ -232,11 +237,11 @@ func TestCancel(t *testing.T) { hashes := createHashes(0, targetBlocks) blocks := createBlocksFromHashes(hashes) - tester := newTester(t, hashes, blocks) - tester.newPeer("peer", big.NewInt(10000), hashes[0]) + tester := newTester() + tester.newPeer("peer", hashes, blocks) // Synchronise with the peer, but cancel afterwards - if err := tester.sync("peer", hashes[0]); err != nil { + if err := tester.downloader.synchronise("peer", hashes[0]); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } if !tester.downloader.Cancel() { @@ -260,13 +265,13 @@ func TestThrottling(t *testing.T) { hashes := createHashes(0, targetBlocks) blocks := createBlocksFromHashes(hashes) - tester := newTester(t, hashes, blocks) - tester.newPeer("peer", big.NewInt(10000), hashes[0]) + tester := newTester() + tester.newPeer("peer", hashes, blocks) // Start a synchronisation concurrently errc := make(chan error) go func() { - errc <- tester.sync("peer", hashes[0]) + errc <- tester.downloader.synchronise("peer", hashes[0]) }() // Iteratively take some blocks, always checking the retrieval count for total := 0; total < targetBlocks; { @@ -303,9 +308,9 @@ func TestNonExistingParentAttack(t *testing.T) { forged.ParentHeaderHash = unknownHash // Try and sync with the malicious node and check that it fails - tester := newTester(t, hashes, blocks) - tester.newPeer("attack", big.NewInt(10000), hashes[0]) - if err := tester.sync("attack", hashes[0]); err != nil { + tester := newTester() + tester.newPeer("attack", hashes, blocks) + if err := tester.downloader.synchronise("attack", hashes[0]); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } bs := tester.downloader.TakeBlocks() @@ -319,8 +324,8 @@ func TestNonExistingParentAttack(t *testing.T) { // Reconstruct a valid chain, and try to synchronize with it forged.ParentHeaderHash = knownHash - tester.newPeer("valid", big.NewInt(20000), hashes[0]) - if err := tester.sync("valid", hashes[0]); err != nil { + tester.newPeer("valid", hashes, blocks) + if err := tester.downloader.synchronise("valid", hashes[0]); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } bs = tester.downloader.TakeBlocks() @@ -341,12 +346,12 @@ func TestRepeatingHashAttack(t *testing.T) { forged := hashes[:len(hashes)-1] // Try and sync with the malicious node - tester := newTester(t, forged, blocks) - tester.newPeer("attack", big.NewInt(10000), forged[0]) + tester := newTester() + tester.newPeer("attack", forged, blocks) errc := make(chan error) go func() { - errc <- tester.sync("attack", hashes[0]) + errc <- tester.downloader.synchronise("attack", hashes[0]) }() // Make sure that syncing returns and does so with a failure @@ -359,9 +364,8 @@ func TestRepeatingHashAttack(t *testing.T) { } } // Ensure that a valid chain can still pass sync - tester.hashes = hashes - tester.newPeer("valid", big.NewInt(20000), hashes[0]) - if err := tester.sync("valid", hashes[0]); err != nil { + tester.newPeer("valid", hashes, blocks) + if err := tester.downloader.synchronise("valid", hashes[0]); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } } @@ -377,15 +381,15 @@ func TestNonExistingBlockAttack(t *testing.T) { hashes[len(hashes)/2] = unknownHash // Try and sync with the malicious node and check that it fails - tester := newTester(t, hashes, blocks) - tester.newPeer("attack", big.NewInt(10000), hashes[0]) - if err := tester.sync("attack", hashes[0]); err != errPeersUnavailable { + tester := newTester() + tester.newPeer("attack", hashes, blocks) + if err := tester.downloader.synchronise("attack", hashes[0]); err != errPeersUnavailable { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errPeersUnavailable) } // Ensure that a valid chain can still pass sync hashes[len(hashes)/2] = origin - tester.newPeer("valid", big.NewInt(20000), hashes[0]) - if err := tester.sync("valid", hashes[0]); err != nil { + tester.newPeer("valid", hashes, blocks) + if err := tester.downloader.synchronise("valid", hashes[0]); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } } @@ -408,14 +412,13 @@ func TestInvalidHashOrderAttack(t *testing.T) { copy(reverse[blockCacheLimit:], chunk2) // Try and sync with the malicious node and check that it fails - tester := newTester(t, reverse, blocks) - tester.newPeer("attack", big.NewInt(10000), reverse[0]) + tester := newTester() + tester.newPeer("attack", reverse, blocks) if _, err := tester.syncTake("attack", reverse[0]); err != errInvalidChain { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain) } // Ensure that a valid chain can still pass sync - tester.hashes = hashes - tester.newPeer("valid", big.NewInt(20000), hashes[0]) + tester.newPeer("valid", hashes, blocks) if _, err := tester.syncTake("valid", hashes[0]); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } @@ -431,8 +434,8 @@ func TestMadeupHashChainAttack(t *testing.T) { hashes := createHashes(0, 1024*blockCacheLimit) // Try and sync with the malicious node and check that it fails - tester := newTester(t, hashes, nil) - tester.newPeer("attack", big.NewInt(10000), hashes[0]) + tester := newTester() + tester.newPeer("attack", hashes, nil) if _, err := tester.syncTake("attack", hashes[0]); err != errCrossCheckFailed { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed) } @@ -445,11 +448,11 @@ func TestMadeupHashChainAttack(t *testing.T) { func TestMadeupHashChainDrippingAttack(t *testing.T) { // Create a random chain of hashes to drip hashes := createHashes(0, 16*blockCacheLimit) - tester := newTester(t, hashes, nil) + tester := newTester() // Try and sync with the attacker, one hash at a time tester.maxHashFetch = 1 - tester.newPeer("attack", big.NewInt(10000), hashes[0]) + tester.newPeer("attack", hashes, nil) if _, err := tester.syncTake("attack", hashes[0]); err != errStallingPeer { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) } @@ -473,8 +476,8 @@ func TestMadeupBlockChainAttack(t *testing.T) { gapped[i] = hashes[2*i] } // Try and sync with the malicious node and check that it fails - tester := newTester(t, gapped, blocks) - tester.newPeer("attack", big.NewInt(10000), gapped[0]) + tester := newTester() + tester.newPeer("attack", gapped, blocks) if _, err := tester.syncTake("attack", gapped[0]); err != errCrossCheckFailed { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed) } @@ -482,8 +485,7 @@ func TestMadeupBlockChainAttack(t *testing.T) { blockSoftTTL = defaultBlockTTL crossCheckCycle = defaultCrossCheckCycle - tester.hashes = hashes - tester.newPeer("valid", big.NewInt(20000), hashes[0]) + tester.newPeer("valid", hashes, blocks) if _, err := tester.syncTake("valid", hashes[0]); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } @@ -507,8 +509,8 @@ func TestMadeupParentBlockChainAttack(t *testing.T) { block.ParentHeaderHash = hash // Simulate pointing to already known hash } // Try and sync with the malicious node and check that it fails - tester := newTester(t, hashes, forges) - tester.newPeer("attack", big.NewInt(10000), hashes[0]) + tester := newTester() + tester.newPeer("attack", hashes, forges) if _, err := tester.syncTake("attack", hashes[0]); err != errCrossCheckFailed { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed) } @@ -516,8 +518,7 @@ func TestMadeupParentBlockChainAttack(t *testing.T) { blockSoftTTL = defaultBlockTTL crossCheckCycle = defaultCrossCheckCycle - tester.blocks = blocks - tester.newPeer("valid", big.NewInt(20000), hashes[0]) + tester.newPeer("valid", hashes, blocks) if _, err := tester.syncTake("valid", hashes[0]); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } @@ -534,12 +535,12 @@ func TestBannedChainStarvationAttack(t *testing.T) { blocks := createBlocksFromHashes(hashes) // Create the tester and ban the selected hash - tester := newTester(t, hashes, blocks) + tester := newTester() tester.downloader.banned.Add(bannedHash) // Iteratively try to sync, and verify that the banned hash list grows until // the head of the invalid chain is blocked too. - tester.newPeer("attack", big.NewInt(10000), hashes[0]) + tester.newPeer("attack", hashes, blocks) for banned := tester.downloader.banned.Size(); ; { // Try to sync with the attacker, check hash chain failure if _, err := tester.syncTake("attack", hashes[0]); err != errInvalidChain { @@ -556,7 +557,7 @@ func TestBannedChainStarvationAttack(t *testing.T) { banned = bans } // Check that after banning an entire chain, bad peers get dropped - if err := tester.newPeer("new attacker", big.NewInt(10000), hashes[0]); err != errBannedHead { + if err := tester.newPeer("new attacker", hashes, blocks); err != errBannedHead { t.Fatalf("peer registration mismatch: have %v, want %v", err, errBannedHead) } if peer := tester.downloader.peers.Peer("net attacker"); peer != nil { @@ -579,12 +580,12 @@ func TestBannedChainMemoryExhaustionAttack(t *testing.T) { blocks := createBlocksFromHashes(hashes) // Create the tester and ban the selected hash - tester := newTester(t, hashes, blocks) + tester := newTester() tester.downloader.banned.Add(bannedHash) // Iteratively try to sync, and verify that the banned hash list grows until // the head of the invalid chain is blocked too. - tester.newPeer("attack", big.NewInt(10000), hashes[0]) + tester.newPeer("attack", hashes, blocks) for { // Try to sync with the attacker, check hash chain failure if _, err := tester.syncTake("attack", hashes[0]); err != errInvalidChain { -- cgit v1.2.3 From faae8b7dd844f4faa2b9e2c8ce76246341ed7357 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 11 Jun 2015 18:12:51 +0300 Subject: eth: fix an accidental test compile error --- eth/protocol_test.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/eth/protocol_test.go b/eth/protocol_test.go index bbea9fc11..69d487c71 100644 --- a/eth/protocol_test.go +++ b/eth/protocol_test.go @@ -11,7 +11,6 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/p2p" @@ -168,8 +167,7 @@ func newProtocolManagerForTesting(txAdded chan<- []*types.Transaction) *Protocol db, _ = ethdb.NewMemDatabase() chain, _ = core.NewChainManager(core.GenesisBlock(0, db), db, db, core.FakePow{}, em) txpool = &fakeTxPool{added: txAdded} - dl = downloader.New(em, chain.HasBlock, chain.GetBlock) - pm = NewProtocolManager(ProtocolVersion, 0, em, txpool, chain, dl) + pm = NewProtocolManager(ProtocolVersion, 0, em, txpool, chain) ) pm.Start() return pm -- cgit v1.2.3 From 80833f813715cb151bbf165f462e38930fc4fccd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 11 Jun 2015 18:13:13 +0300 Subject: eth/downloader: instreument and test the sync peer drop --- eth/downloader/downloader.go | 14 +++++---- eth/downloader/downloader_test.go | 61 ++++++++++++++++++++++++++++++++++++--- 2 files changed, 66 insertions(+), 9 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 499b3a585..88ceeb5ac 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -31,7 +31,6 @@ var ( ) var ( - errLowTd = errors.New("peers TD is too low") errBusy = errors.New("busy") errUnknownPeer = errors.New("peer is unknown or unhealthy") errBadPeer = errors.New("action from bad peer ignored") @@ -94,8 +93,9 @@ type Downloader struct { dropPeer peerDropFn // Retrieved the TD of our own chain // Status - synchronising int32 - notified int32 + synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing + synchronising int32 + notified int32 // Channels newPeerCh chan *peer @@ -202,7 +202,7 @@ func (d *Downloader) Synchronise(id string, head common.Hash) { case errBusy: glog.V(logger.Detail).Infof("Synchronisation already in progress") - case errTimeout, errBadPeer, errEmptyHashSet, errInvalidChain, errCrossCheckFailed: + case errTimeout, errBadPeer, errStallingPeer, errBannedHead, errEmptyHashSet, errPeersUnavailable, errInvalidChain, errCrossCheckFailed: glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err) d.dropPeer(id) @@ -218,6 +218,10 @@ func (d *Downloader) Synchronise(id string, head common.Hash) { // it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the // checks fail an error will be returned. This method is synchronous func (d *Downloader) synchronise(id string, hash common.Hash) error { + // Mock out the synchonisation if testing + if d.synchroniseMock != nil { + return d.synchroniseMock(id, hash) + } // Make sure only one goroutine is ever allowed past this point at once if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { return errBusy @@ -226,7 +230,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash) error { // If the head hash is banned, terminate immediately if d.banned.Has(hash) { - return errInvalidChain + return errBannedHead } // Post a user notification of the sync (only once per session) if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 6269ed87c..c328e7d9a 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -2,6 +2,7 @@ package downloader import ( "encoding/binary" + "fmt" "math/big" "testing" "time" @@ -69,7 +70,7 @@ func newTester() *downloadTester { peerBlocks: make(map[string]map[common.Hash]*types.Block), } var mux event.TypeMux - downloader := New(&mux, tester.hasBlock, tester.getBlock, nil) + downloader := New(&mux, tester.hasBlock, tester.getBlock, tester.dropPeer) tester.downloader = downloader return tester @@ -130,6 +131,14 @@ func (dl *downloadTester) newPeer(id string, hashes []common.Hash, blocks map[co return err } +// dropPeer simulates a hard peer removal from the connection pool. +func (dl *downloadTester) dropPeer(id string) { + delete(dl.peerHashes, id) + delete(dl.peerBlocks, id) + + dl.downloader.UnregisterPeer(id) +} + // peerGetBlocksFn constructs a getHashes function associated with a particular // peer in the download tester. The returned function can be used to retrieve // batches of hashes from the particularly requested peer. @@ -544,14 +553,14 @@ func TestBannedChainStarvationAttack(t *testing.T) { for banned := tester.downloader.banned.Size(); ; { // Try to sync with the attacker, check hash chain failure if _, err := tester.syncTake("attack", hashes[0]); err != errInvalidChain { + if tester.downloader.banned.Has(hashes[0]) && err == errBannedHead { + break + } t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain) } // Check that the ban list grew with at least 1 new item, or all banned bans := tester.downloader.banned.Size() if bans < banned+1 { - if tester.downloader.banned.Has(hashes[0]) { - break - } t.Fatalf("ban count mismatch: have %v, want %v+", bans, banned+1) } banned = bans @@ -606,3 +615,47 @@ func TestBannedChainMemoryExhaustionAttack(t *testing.T) { } } } + +// Tests that misbehaving peers are disconnected, whilst behaving ones are not. +func TestAttackerDropping(t *testing.T) { + // Define the disconnection requirement for individual errors + tests := []struct { + result error + drop bool + }{ + {nil, false}, // Sync succeeded, all is well + {errBusy, false}, // Sync is already in progress, no problem + {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop + {errBadPeer, true}, // Peer was deemed bad for some reason, drop it + {errStallingPeer, true}, // Peer was detected to be stalling, drop it + {errBannedHead, true}, // Peer's head hash is a known bad hash, drop it + {errNoPeers, false}, // No peers to download from, soft race, no issue + {errPendingQueue, false}, // There are blocks still cached, wait to exhaust, no issue + {errTimeout, true}, // No hashes received in due time, drop the peer + {errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end + {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser + {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop + {errCrossCheckFailed, true}, // Hash-origin failed to pass a block cross check, drop + {errCancelHashFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop + {errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop + } + // Run the tests and check disconnection status + tester := newTester() + for i, tt := range tests { + // Register a new peer and ensure it's presence + id := fmt.Sprintf("test %d", i) + if err := tester.newPeer(id, []common.Hash{knownHash}, nil); err != nil { + t.Fatalf("test %d: failed to register new peer: %v", i, err) + } + if _, ok := tester.peerHashes[id]; !ok { + t.Fatalf("test %d: registered peer not found", i) + } + // Simulate a synchronisation and check the required result + tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } + + tester.downloader.Synchronise(id, knownHash) + if _, ok := tester.peerHashes[id]; !ok != tt.drop { + t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) + } + } +} -- cgit v1.2.3 From 0fc71877a7d7a46f35147f753cba0de7b937c77a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 11 Jun 2015 20:22:40 +0300 Subject: eth/downloader: add valid peer during attacks (check interference) --- eth/downloader/downloader_test.go | 233 +++++++++++++++++++++++--------------- 1 file changed, 140 insertions(+), 93 deletions(-) diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index c328e7d9a..5b85f01fb 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -14,23 +14,29 @@ import ( ) var ( - knownHash = common.Hash{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} - unknownHash = common.Hash{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9} - bannedHash = common.Hash{5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5} + knownHash = common.Hash{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} + unknownHash = common.Hash{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2} + bannedHash = common.Hash{3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3} genesis = createBlock(1, common.Hash{}, knownHash) ) -func createHashes(start, amount int) (hashes []common.Hash) { +// idCounter is used by the createHashes method the generate deterministic but unique hashes +var idCounter = int64(2) // #1 is the genesis block + +// createHashes generates a batch of hashes rooted at a specific point in the chain. +func createHashes(amount int, root common.Hash) (hashes []common.Hash) { hashes = make([]common.Hash, amount+1) - hashes[len(hashes)-1] = knownHash + hashes[len(hashes)-1] = root - for i := range hashes[:len(hashes)-1] { - binary.BigEndian.PutUint64(hashes[i][:8], uint64(start+i+2)) + for i := 0; i < len(hashes)-1; i++ { + binary.BigEndian.PutUint64(hashes[i][:8], uint64(idCounter)) + idCounter++ } return } +// createBlock assembles a new block at the given chain height. func createBlock(i int, parent, hash common.Hash) *types.Block { header := &types.Header{Number: big.NewInt(int64(i))} block := types.NewBlockWithHeader(header) @@ -39,6 +45,11 @@ func createBlock(i int, parent, hash common.Hash) *types.Block { return block } +// copyBlock makes a deep copy of a block suitable for local modifications. +func copyBlock(block *types.Block) *types.Block { + return createBlock(int(block.Number().Int64()), block.ParentHeaderHash, block.HeaderHash) +} + func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block { blocks := make(map[common.Hash]*types.Block) for i := 0; i < len(hashes); i++ { @@ -76,10 +87,15 @@ func newTester() *downloadTester { return tester } +// sync starts synchronizing with a remote peer, blocking until it completes. +func (dl *downloadTester) sync(id string) error { + return dl.downloader.synchronise(id, dl.peerHashes[id][0]) +} + // syncTake is starts synchronising with a remote peer, but concurrently it also // starts fetching blocks that the downloader retrieved. IT blocks until both go // routines terminate. -func (dl *downloadTester) syncTake(peerId string, head common.Hash) ([]*Block, error) { +func (dl *downloadTester) syncTake(id string) ([]*Block, error) { // Start a block collector to take blocks as they become available done := make(chan struct{}) took := []*Block{} @@ -102,7 +118,7 @@ func (dl *downloadTester) syncTake(peerId string, head common.Hash) ([]*Block, e done <- struct{}{} }() // Start the downloading, sync the taker and return - err := dl.downloader.synchronise(peerId, head) + err := dl.sync(id) done <- struct{}{} <-done @@ -124,9 +140,14 @@ func (dl *downloadTester) getBlock(hash common.Hash) *types.Block { func (dl *downloadTester) newPeer(id string, hashes []common.Hash, blocks map[common.Hash]*types.Block) error { err := dl.downloader.RegisterPeer(id, hashes[0], dl.peerGetHashesFn(id), dl.peerGetBlocksFn(id)) if err == nil { - // Assign the owned hashes and blocks to the peer - dl.peerHashes[id] = hashes - dl.peerBlocks[id] = blocks + // Assign the owned hashes and blocks to the peer (deep copy) + dl.peerHashes[id] = make([]common.Hash, len(hashes)) + copy(dl.peerHashes[id], hashes) + + dl.peerBlocks[id] = make(map[common.Hash]*types.Block) + for hash, block := range blocks { + dl.peerBlocks[id][hash] = copyBlock(block) + } } return err } @@ -192,14 +213,14 @@ func (dl *downloadTester) peerGetBlocksFn(id string) func([]common.Hash) error { func TestSynchronisation(t *testing.T) { // Create a small enough block chain to download and the tester targetBlocks := blockCacheLimit - 15 - hashes := createHashes(0, targetBlocks) + hashes := createHashes(targetBlocks, knownHash) blocks := createBlocksFromHashes(hashes) tester := newTester() tester.newPeer("peer", hashes, blocks) // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.downloader.synchronise("peer", hashes[0]); err != nil { + if err := tester.sync("peer"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } if queued := len(tester.downloader.queue.blockPool); queued != targetBlocks { @@ -211,14 +232,14 @@ func TestSynchronisation(t *testing.T) { func TestBlockTaking(t *testing.T) { // Create a small enough block chain to download and the tester targetBlocks := blockCacheLimit - 15 - hashes := createHashes(0, targetBlocks) + hashes := createHashes(targetBlocks, knownHash) blocks := createBlocksFromHashes(hashes) tester := newTester() tester.newPeer("peer", hashes, blocks) // Synchronise with the peer and test block retrieval - if err := tester.downloader.synchronise("peer", hashes[0]); err != nil { + if err := tester.sync("peer"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } if took := tester.downloader.TakeBlocks(); len(took) != targetBlocks { @@ -243,14 +264,14 @@ func TestInactiveDownloader(t *testing.T) { func TestCancel(t *testing.T) { // Create a small enough block chain to download and the tester targetBlocks := blockCacheLimit - 15 - hashes := createHashes(0, targetBlocks) + hashes := createHashes(targetBlocks, knownHash) blocks := createBlocksFromHashes(hashes) tester := newTester() tester.newPeer("peer", hashes, blocks) // Synchronise with the peer, but cancel afterwards - if err := tester.downloader.synchronise("peer", hashes[0]); err != nil { + if err := tester.sync("peer"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } if !tester.downloader.Cancel() { @@ -271,7 +292,7 @@ func TestCancel(t *testing.T) { func TestThrottling(t *testing.T) { // Create a long block chain to download and the tester targetBlocks := 8 * blockCacheLimit - hashes := createHashes(0, targetBlocks) + hashes := createHashes(targetBlocks, knownHash) blocks := createBlocksFromHashes(hashes) tester := newTester() @@ -280,7 +301,7 @@ func TestThrottling(t *testing.T) { // Start a synchronisation concurrently errc := make(chan error) go func() { - errc <- tester.downloader.synchronise("peer", hashes[0]) + errc <- tester.sync("peer") }() // Iteratively take some blocks, always checking the retrieval count for total := 0; total < targetBlocks; { @@ -309,17 +330,20 @@ func TestThrottling(t *testing.T) { // Tests that if a peer returns an invalid chain with a block pointing to a non- // existing parent, it is correctly detected and handled. func TestNonExistingParentAttack(t *testing.T) { + tester := newTester() + // Forge a single-link chain with a forged header - hashes := createHashes(0, 1) + hashes := createHashes(1, knownHash) blocks := createBlocksFromHashes(hashes) + tester.newPeer("valid", hashes, blocks) - forged := blocks[hashes[0]] - forged.ParentHeaderHash = unknownHash + hashes = createHashes(1, knownHash) + blocks = createBlocksFromHashes(hashes) + blocks[hashes[0]].ParentHeaderHash = unknownHash + tester.newPeer("attack", hashes, blocks) // Try and sync with the malicious node and check that it fails - tester := newTester() - tester.newPeer("attack", hashes, blocks) - if err := tester.downloader.synchronise("attack", hashes[0]); err != nil { + if err := tester.sync("attack"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } bs := tester.downloader.TakeBlocks() @@ -331,10 +355,8 @@ func TestNonExistingParentAttack(t *testing.T) { } tester.downloader.Cancel() - // Reconstruct a valid chain, and try to synchronize with it - forged.ParentHeaderHash = knownHash - tester.newPeer("valid", hashes, blocks) - if err := tester.downloader.synchronise("valid", hashes[0]); err != nil { + // Try to synchronize with the valid chain and make sure it succeeds + if err := tester.sync("valid"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } bs = tester.downloader.TakeBlocks() @@ -348,21 +370,20 @@ func TestNonExistingParentAttack(t *testing.T) { // Tests that if a malicious peers keeps sending us repeating hashes, we don't // loop indefinitely. -func TestRepeatingHashAttack(t *testing.T) { +func TestRepeatingHashAttack(t *testing.T) { // TODO: Is this thing valid?? + tester := newTester() + // Create a valid chain, but drop the last link - hashes := createHashes(0, blockCacheLimit) + hashes := createHashes(blockCacheLimit, knownHash) blocks := createBlocksFromHashes(hashes) - forged := hashes[:len(hashes)-1] + tester.newPeer("valid", hashes, blocks) + tester.newPeer("attack", hashes[:len(hashes)-1], blocks) // Try and sync with the malicious node - tester := newTester() - tester.newPeer("attack", forged, blocks) - errc := make(chan error) go func() { - errc <- tester.downloader.synchronise("attack", hashes[0]) + errc <- tester.sync("attack") }() - // Make sure that syncing returns and does so with a failure select { case <-time.After(time.Second): @@ -373,8 +394,7 @@ func TestRepeatingHashAttack(t *testing.T) { } } // Ensure that a valid chain can still pass sync - tester.newPeer("valid", hashes, blocks) - if err := tester.downloader.synchronise("valid", hashes[0]); err != nil { + if err := tester.sync("valid"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } } @@ -382,23 +402,22 @@ func TestRepeatingHashAttack(t *testing.T) { // Tests that if a malicious peers returns a non-existent block hash, it should // eventually time out and the sync reattempted. func TestNonExistingBlockAttack(t *testing.T) { + tester := newTester() + // Create a valid chain, but forge the last link - hashes := createHashes(0, blockCacheLimit) + hashes := createHashes(blockCacheLimit, knownHash) blocks := createBlocksFromHashes(hashes) - origin := hashes[len(hashes)/2] + tester.newPeer("valid", hashes, blocks) hashes[len(hashes)/2] = unknownHash + tester.newPeer("attack", hashes, blocks) // Try and sync with the malicious node and check that it fails - tester := newTester() - tester.newPeer("attack", hashes, blocks) - if err := tester.downloader.synchronise("attack", hashes[0]); err != errPeersUnavailable { + if err := tester.sync("attack"); err != errPeersUnavailable { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errPeersUnavailable) } // Ensure that a valid chain can still pass sync - hashes[len(hashes)/2] = origin - tester.newPeer("valid", hashes, blocks) - if err := tester.downloader.synchronise("valid", hashes[0]); err != nil { + if err := tester.sync("valid"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } } @@ -406,29 +425,28 @@ func TestNonExistingBlockAttack(t *testing.T) { // Tests that if a malicious peer is returning hashes in a weird order, that the // sync throttler doesn't choke on them waiting for the valid blocks. func TestInvalidHashOrderAttack(t *testing.T) { + tester := newTester() + // Create a valid long chain, but reverse some hashes within - hashes := createHashes(0, 4*blockCacheLimit) + hashes := createHashes(4*blockCacheLimit, knownHash) blocks := createBlocksFromHashes(hashes) + tester.newPeer("valid", hashes, blocks) chunk1 := make([]common.Hash, blockCacheLimit) chunk2 := make([]common.Hash, blockCacheLimit) copy(chunk1, hashes[blockCacheLimit:2*blockCacheLimit]) copy(chunk2, hashes[2*blockCacheLimit:3*blockCacheLimit]) - reverse := make([]common.Hash, len(hashes)) - copy(reverse, hashes) - copy(reverse[2*blockCacheLimit:], chunk1) - copy(reverse[blockCacheLimit:], chunk2) + copy(hashes[2*blockCacheLimit:], chunk1) + copy(hashes[blockCacheLimit:], chunk2) + tester.newPeer("attack", hashes, blocks) // Try and sync with the malicious node and check that it fails - tester := newTester() - tester.newPeer("attack", reverse, blocks) - if _, err := tester.syncTake("attack", reverse[0]); err != errInvalidChain { + if _, err := tester.syncTake("attack"); err != errInvalidChain { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain) } // Ensure that a valid chain can still pass sync - tester.newPeer("valid", hashes, blocks) - if _, err := tester.syncTake("valid", hashes[0]); err != nil { + if _, err := tester.syncTake("valid"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } } @@ -436,18 +454,25 @@ func TestInvalidHashOrderAttack(t *testing.T) { // Tests that if a malicious peer makes up a random hash chain and tries to push // indefinitely, it actually gets caught with it. func TestMadeupHashChainAttack(t *testing.T) { + tester := newTester() blockSoftTTL = 100 * time.Millisecond crossCheckCycle = 25 * time.Millisecond // Create a long chain of hashes without backing blocks - hashes := createHashes(0, 1024*blockCacheLimit) + hashes := createHashes(4*blockCacheLimit, knownHash) + blocks := createBlocksFromHashes(hashes) + + tester.newPeer("valid", hashes, blocks) + tester.newPeer("attack", createHashes(1024*blockCacheLimit, knownHash), nil) // Try and sync with the malicious node and check that it fails - tester := newTester() - tester.newPeer("attack", hashes, nil) - if _, err := tester.syncTake("attack", hashes[0]); err != errCrossCheckFailed { + if _, err := tester.syncTake("attack"); err != errCrossCheckFailed { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed) } + // Ensure that a valid chain can still pass sync + if _, err := tester.syncTake("valid"); err != nil { + t.Fatalf("failed to synchronise blocks: %v", err) + } } // Tests that if a malicious peer makes up a random hash chain, and tries to push @@ -456,13 +481,13 @@ func TestMadeupHashChainAttack(t *testing.T) { // one by one prevents reliable block/parent verification. func TestMadeupHashChainDrippingAttack(t *testing.T) { // Create a random chain of hashes to drip - hashes := createHashes(0, 16*blockCacheLimit) + hashes := createHashes(16*blockCacheLimit, knownHash) tester := newTester() // Try and sync with the attacker, one hash at a time tester.maxHashFetch = 1 tester.newPeer("attack", hashes, nil) - if _, err := tester.syncTake("attack", hashes[0]); err != errStallingPeer { + if _, err := tester.syncTake("attack"); err != errStallingPeer { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) } } @@ -477,7 +502,7 @@ func TestMadeupBlockChainAttack(t *testing.T) { crossCheckCycle = 25 * time.Millisecond // Create a long chain of blocks and simulate an invalid chain by dropping every second - hashes := createHashes(0, 16*blockCacheLimit) + hashes := createHashes(16*blockCacheLimit, knownHash) blocks := createBlocksFromHashes(hashes) gapped := make([]common.Hash, len(hashes)/2) @@ -487,7 +512,7 @@ func TestMadeupBlockChainAttack(t *testing.T) { // Try and sync with the malicious node and check that it fails tester := newTester() tester.newPeer("attack", gapped, blocks) - if _, err := tester.syncTake("attack", gapped[0]); err != errCrossCheckFailed { + if _, err := tester.syncTake("attack"); err != errCrossCheckFailed { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed) } // Ensure that a valid chain can still pass sync @@ -495,7 +520,7 @@ func TestMadeupBlockChainAttack(t *testing.T) { crossCheckCycle = defaultCrossCheckCycle tester.newPeer("valid", hashes, blocks) - if _, err := tester.syncTake("valid", hashes[0]); err != nil { + if _, err := tester.syncTake("valid"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } } @@ -504,6 +529,8 @@ func TestMadeupBlockChainAttack(t *testing.T) { // attacker make up a valid hashes for random blocks, but also forges the block // parents to point to existing hashes. func TestMadeupParentBlockChainAttack(t *testing.T) { + tester := newTester() + defaultBlockTTL := blockSoftTTL defaultCrossCheckCycle := crossCheckCycle @@ -511,24 +538,24 @@ func TestMadeupParentBlockChainAttack(t *testing.T) { crossCheckCycle = 25 * time.Millisecond // Create a long chain of blocks and simulate an invalid chain by dropping every second - hashes := createHashes(0, 16*blockCacheLimit) + hashes := createHashes(16*blockCacheLimit, knownHash) blocks := createBlocksFromHashes(hashes) - forges := createBlocksFromHashes(hashes) - for hash, block := range forges { - block.ParentHeaderHash = hash // Simulate pointing to already known hash + tester.newPeer("valid", hashes, blocks) + + for _, block := range blocks { + block.ParentHeaderHash = knownHash // Simulate pointing to already known hash } + tester.newPeer("attack", hashes, blocks) + // Try and sync with the malicious node and check that it fails - tester := newTester() - tester.newPeer("attack", hashes, forges) - if _, err := tester.syncTake("attack", hashes[0]); err != errCrossCheckFailed { + if _, err := tester.syncTake("attack"); err != errCrossCheckFailed { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed) } // Ensure that a valid chain can still pass sync blockSoftTTL = defaultBlockTTL crossCheckCycle = defaultCrossCheckCycle - tester.newPeer("valid", hashes, blocks) - if _, err := tester.syncTake("valid", hashes[0]); err != nil { + if _, err := tester.syncTake("valid"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } } @@ -537,22 +564,25 @@ func TestMadeupParentBlockChainAttack(t *testing.T) { // the downloader, it will not keep refetching the same chain indefinitely, but // gradually block pieces of it, until it's head is also blocked. func TestBannedChainStarvationAttack(t *testing.T) { - // Construct a valid chain, but ban one of the hashes in it - hashes := createHashes(0, 8*blockCacheLimit) - hashes[len(hashes)/2+23] = bannedHash // weird index to have non multiple of ban chunk size - - blocks := createBlocksFromHashes(hashes) - // Create the tester and ban the selected hash tester := newTester() tester.downloader.banned.Add(bannedHash) + // Construct a valid chain, for it and ban the fork + hashes := createHashes(8*blockCacheLimit, knownHash) + blocks := createBlocksFromHashes(hashes) + tester.newPeer("valid", hashes, blocks) + + fork := len(hashes)/2 - 23 + hashes = append(createHashes(4*blockCacheLimit, bannedHash), hashes[fork:]...) + blocks = createBlocksFromHashes(hashes) + tester.newPeer("attack", hashes, blocks) + // Iteratively try to sync, and verify that the banned hash list grows until // the head of the invalid chain is blocked too. - tester.newPeer("attack", hashes, blocks) for banned := tester.downloader.banned.Size(); ; { // Try to sync with the attacker, check hash chain failure - if _, err := tester.syncTake("attack", hashes[0]); err != errInvalidChain { + if _, err := tester.syncTake("attack"); err != errInvalidChain { if tester.downloader.banned.Has(hashes[0]) && err == errBannedHead { break } @@ -569,35 +599,45 @@ func TestBannedChainStarvationAttack(t *testing.T) { if err := tester.newPeer("new attacker", hashes, blocks); err != errBannedHead { t.Fatalf("peer registration mismatch: have %v, want %v", err, errBannedHead) } - if peer := tester.downloader.peers.Peer("net attacker"); peer != nil { + if peer := tester.downloader.peers.Peer("new attacker"); peer != nil { t.Fatalf("banned attacker registered: %v", peer) } + // Ensure that a valid chain can still pass sync + if _, err := tester.syncTake("valid"); err != nil { + t.Fatalf("failed to synchronise blocks: %v", err) + } } // Tests that if a peer sends excessively many/large invalid chains that are // gradually banned, it will have an upper limit on the consumed memory and also // the origin bad hashes will not be evacuated. func TestBannedChainMemoryExhaustionAttack(t *testing.T) { + // Create the tester and ban the selected hash + tester := newTester() + tester.downloader.banned.Add(bannedHash) + // Reduce the test size a bit + defaultMaxBlockFetch := MaxBlockFetch + defaultMaxBannedHashes := maxBannedHashes + MaxBlockFetch = 4 maxBannedHashes = 256 // Construct a banned chain with more chunks than the ban limit - hashes := createHashes(0, maxBannedHashes*MaxBlockFetch) - hashes[len(hashes)-1] = bannedHash // weird index to have non multiple of ban chunk size - + hashes := createHashes(8*blockCacheLimit, knownHash) blocks := createBlocksFromHashes(hashes) + tester.newPeer("valid", hashes, blocks) - // Create the tester and ban the selected hash - tester := newTester() - tester.downloader.banned.Add(bannedHash) + fork := len(hashes)/2 - 23 + hashes = append(createHashes(maxBannedHashes*MaxBlockFetch, bannedHash), hashes[fork:]...) + blocks = createBlocksFromHashes(hashes) + tester.newPeer("attack", hashes, blocks) // Iteratively try to sync, and verify that the banned hash list grows until // the head of the invalid chain is blocked too. - tester.newPeer("attack", hashes, blocks) for { // Try to sync with the attacker, check hash chain failure - if _, err := tester.syncTake("attack", hashes[0]); err != errInvalidChain { + if _, err := tester.syncTake("attack"); err != errInvalidChain { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain) } // Short circuit if the entire chain was banned @@ -614,6 +654,13 @@ func TestBannedChainMemoryExhaustionAttack(t *testing.T) { } } } + // Ensure that a valid chain can still pass sync + MaxBlockFetch = defaultMaxBlockFetch + maxBannedHashes = defaultMaxBannedHashes + + if _, err := tester.syncTake("valid"); err != nil { + t.Fatalf("failed to synchronise blocks: %v", err) + } } // Tests that misbehaving peers are disconnected, whilst behaving ones are not. -- cgit v1.2.3 From fc7abd98865f3bdc6cc36258026db98a649cd577 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 12 Jun 2015 13:35:29 +0300 Subject: eth, eth/downloader: move block processing into the downlaoder --- eth/downloader/downloader.go | 177 +++++++++++++++++--------- eth/downloader/downloader_test.go | 252 ++++++++++++++++++++------------------ eth/handler.go | 2 +- eth/sync.go | 53 +------- 4 files changed, 253 insertions(+), 231 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 88ceeb5ac..1bbba11ed 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -3,6 +3,7 @@ package downloader import ( "bytes" "errors" + "math" "math/rand" "sync" "sync/atomic" @@ -28,25 +29,27 @@ var ( crossCheckCycle = time.Second // Period after which to check for expired cross checks maxBannedHashes = 4096 // Number of bannable hashes before phasing old ones out + maxBlockProcess = 256 // Number of blocks to import at once into the chain ) var ( - errBusy = errors.New("busy") - errUnknownPeer = errors.New("peer is unknown or unhealthy") - errBadPeer = errors.New("action from bad peer ignored") - errStallingPeer = errors.New("peer is stalling") - errBannedHead = errors.New("peer head hash already banned") - errNoPeers = errors.New("no peers to keep download active") - errPendingQueue = errors.New("pending items in queue") - errTimeout = errors.New("timeout") - errEmptyHashSet = errors.New("empty hash set by peer") - errPeersUnavailable = errors.New("no peers available or all peers tried for block download process") - errAlreadyInPool = errors.New("hash already in pool") - errInvalidChain = errors.New("retrieved hash chain is invalid") - errCrossCheckFailed = errors.New("block cross-check failed") - errCancelHashFetch = errors.New("hash fetching cancelled (requested)") - errCancelBlockFetch = errors.New("block downloading cancelled (requested)") - errNoSyncActive = errors.New("no sync active") + errBusy = errors.New("busy") + errUnknownPeer = errors.New("peer is unknown or unhealthy") + errBadPeer = errors.New("action from bad peer ignored") + errStallingPeer = errors.New("peer is stalling") + errBannedHead = errors.New("peer head hash already banned") + errNoPeers = errors.New("no peers to keep download active") + errPendingQueue = errors.New("pending items in queue") + errTimeout = errors.New("timeout") + errEmptyHashSet = errors.New("empty hash set by peer") + errPeersUnavailable = errors.New("no peers available or all peers tried for block download process") + errAlreadyInPool = errors.New("hash already in pool") + errInvalidChain = errors.New("retrieved hash chain is invalid") + errCrossCheckFailed = errors.New("block cross-check failed") + errCancelHashFetch = errors.New("hash fetching canceled (requested)") + errCancelBlockFetch = errors.New("block downloading canceled (requested)") + errCancelChainImport = errors.New("chain importing canceled (requested)") + errNoSyncActive = errors.New("no sync active") ) // hashCheckFn is a callback type for verifying a hash's presence in the local chain. @@ -55,6 +58,9 @@ type hashCheckFn func(common.Hash) bool // blockRetrievalFn is a callback type for retrieving a block from the local chain. type blockRetrievalFn func(common.Hash) *types.Block +// chainInsertFn is a callback type to insert a batch of blocks into the local chain. +type chainInsertFn func(types.Blocks) (int, error) + // peerDropFn is a callback type for dropping a peer detected as malicious. type peerDropFn func(id string) @@ -88,13 +94,15 @@ type Downloader struct { importLock sync.Mutex // Callbacks - hasBlock hashCheckFn // Checks if a block is present in the chain - getBlock blockRetrievalFn // Retrieves a block from the chain - dropPeer peerDropFn // Retrieved the TD of our own chain + hasBlock hashCheckFn // Checks if a block is present in the chain + getBlock blockRetrievalFn // Retrieves a block from the chain + insertChain chainInsertFn // Injects a batch of blocks into the chain + dropPeer peerDropFn // Retrieved the TD of our own chain // Status synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing synchronising int32 + processing int32 notified int32 // Channels @@ -113,18 +121,19 @@ type Block struct { } // New creates a new downloader to fetch hashes and blocks from remote peers. -func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock blockRetrievalFn, dropPeer peerDropFn) *Downloader { +func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock blockRetrievalFn, insertChain chainInsertFn, dropPeer peerDropFn) *Downloader { // Create the base downloader downloader := &Downloader{ - mux: mux, - queue: newQueue(), - peers: newPeerSet(), - hasBlock: hasBlock, - getBlock: getBlock, - dropPeer: dropPeer, - newPeerCh: make(chan *peer, 1), - hashCh: make(chan hashPack, 1), - blockCh: make(chan blockPack, 1), + mux: mux, + queue: newQueue(), + peers: newPeerSet(), + hasBlock: hasBlock, + getBlock: getBlock, + insertChain: insertChain, + dropPeer: dropPeer, + newPeerCh: make(chan *peer, 1), + hashCh: make(chan hashPack, 1), + blockCh: make(chan blockPack, 1), } // Inject all the known bad hashes downloader.banned = set.New() @@ -157,7 +166,7 @@ func (d *Downloader) Stats() (pending int, cached int, importing int, estimate t return } -// Synchronising returns the state of the downloader +// Synchronising returns whether the downloader is currently retrieving blocks. func (d *Downloader) Synchronising() bool { return atomic.LoadInt32(&d.synchronising) > 0 } @@ -260,19 +269,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash) error { return d.syncWithPeer(p, hash) } -// TakeBlocks takes blocks from the queue and yields them to the caller. -func (d *Downloader) TakeBlocks() []*Block { - blocks := d.queue.TakeBlocks() - if len(blocks) > 0 { - d.importLock.Lock() - d.importStart = time.Now() - d.importQueue = blocks - d.importDone = 0 - d.importLock.Unlock() - } - return blocks -} - // Has checks if the downloader knows about a particular hash, meaning that its // either already downloaded of pending retrieval. func (d *Downloader) Has(hash common.Hash) bool { @@ -307,19 +303,16 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash) (err error) { // Cancel cancels all of the operations and resets the queue. It returns true // if the cancel operation was completed. -func (d *Downloader) Cancel() bool { - // If we're not syncing just return. - hs, bs := d.queue.Size() - if atomic.LoadInt32(&d.synchronising) == 0 && hs == 0 && bs == 0 { - return false - } +func (d *Downloader) Cancel() { // Close the current cancel channel d.cancelLock.Lock() - select { - case <-d.cancelCh: - // Channel was already closed - default: - close(d.cancelCh) + if d.cancelCh != nil { + select { + case <-d.cancelCh: + // Channel was already closed + default: + close(d.cancelCh) + } } d.cancelLock.Unlock() @@ -330,11 +323,11 @@ func (d *Downloader) Cancel() bool { d.importQueue = nil d.importDone = 0 d.importLock.Unlock() - - return true } -// XXX Make synchronous +// fetchHahes starts retrieving hashes backwards from a specific peer and hash, +// up until it finds a common ancestor. If the source peer times out, alternative +// ones are tried for continuation. func (d *Downloader) fetchHashes(p *peer, h common.Hash) error { var ( start = time.Now() @@ -530,10 +523,13 @@ out: glog.V(logger.Detail).Infof("%s: no blocks delivered", peer) break } - // All was successful, promote the peer + // All was successful, promote the peer and potentially start processing peer.Promote() peer.SetIdle() glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blockPack.blocks)) + if atomic.LoadInt32(&d.processing) == 0 { + go d.process() + } case errInvalidChain: // The hash chain is invalid (blocks are not ordered properly), abort @@ -709,6 +705,71 @@ func (d *Downloader) banBlocks(peerId string, head common.Hash) error { } } +// process takes blocks from the queue and tries to import them into the chain. +func (d *Downloader) process() (err error) { + // Make sure only one goroutine is ever allowed to process blocks at once + if !atomic.CompareAndSwapInt32(&d.processing, 0, 1) { + return + } + // If the processor just exited, but there are freshly pending items, try to + // reenter. This is needed because the goroutine spinned up for processing + // the fresh blocks might have been rejected entry to to this present thread + // not yet releasing the `processing` state. + defer func() { + if err == nil && d.queue.GetHeadBlock() != nil { + err = d.process() + } + }() + // Release the lock upon exit (note, before checking for reentry!) + defer atomic.StoreInt32(&d.processing, 0) + + // Fetch the current cancel channel to allow termination + d.cancelLock.RLock() + cancel := d.cancelCh + d.cancelLock.RUnlock() + + // Repeat the processing as long as there are blocks to import + for { + // Fetch the next batch of blocks + blocks := d.queue.TakeBlocks() + if len(blocks) == 0 { + return nil + } + // Reset the import statistics + d.importLock.Lock() + d.importStart = time.Now() + d.importQueue = blocks + d.importDone = 0 + d.importLock.Unlock() + + // Actually import the blocks + glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].RawBlock.Number(), blocks[len(blocks)-1].RawBlock.Number()) + for len(blocks) != 0 { // TODO: quit + // Check for any termination requests + select { + case <-cancel: + return errCancelChainImport + default: + } + // Retrieve the first batch of blocks to insert + max := int(math.Min(float64(len(blocks)), float64(maxBlockProcess))) + raw := make(types.Blocks, 0, max) + for _, block := range blocks[:max] { + raw = append(raw, block.RawBlock) + } + // Try to inset the blocks, drop the originating peer if there's an error + index, err := d.insertChain(raw) + if err != nil { + glog.V(logger.Debug).Infoln("Block #%d import failed:", raw[index].NumberU64(), err) + d.dropPeer(blocks[index].OriginPeer) + d.Cancel() + return errCancelChainImport + } + blocks = blocks[max:] + } + } +} + // DeliverBlocks injects a new batch of blocks received from a remote node. // This is usually invoked through the BlocksMsg by the protocol handler. func (d *Downloader) DeliverBlocks(id string, blocks []*types.Block) error { diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 5b85f01fb..6cd141ef7 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -2,8 +2,10 @@ package downloader import ( "encoding/binary" + "errors" "fmt" "math/big" + "sync/atomic" "testing" "time" @@ -81,7 +83,7 @@ func newTester() *downloadTester { peerBlocks: make(map[string]map[common.Hash]*types.Block), } var mux event.TypeMux - downloader := New(&mux, tester.hasBlock, tester.getBlock, tester.dropPeer) + downloader := New(&mux, tester.hasBlock, tester.getBlock, tester.insertChain, tester.dropPeer) tester.downloader = downloader return tester @@ -89,44 +91,14 @@ func newTester() *downloadTester { // sync starts synchronizing with a remote peer, blocking until it completes. func (dl *downloadTester) sync(id string) error { - return dl.downloader.synchronise(id, dl.peerHashes[id][0]) -} - -// syncTake is starts synchronising with a remote peer, but concurrently it also -// starts fetching blocks that the downloader retrieved. IT blocks until both go -// routines terminate. -func (dl *downloadTester) syncTake(id string) ([]*Block, error) { - // Start a block collector to take blocks as they become available - done := make(chan struct{}) - took := []*Block{} - go func() { - for running := true; running; { - select { - case <-done: - running = false - default: - time.Sleep(time.Millisecond) - } - // Take a batch of blocks and accumulate - blocks := dl.downloader.TakeBlocks() - for _, block := range blocks { - dl.ownHashes = append(dl.ownHashes, block.RawBlock.Hash()) - dl.ownBlocks[block.RawBlock.Hash()] = block.RawBlock - } - took = append(took, blocks...) - } - done <- struct{}{} - }() - // Start the downloading, sync the taker and return - err := dl.sync(id) - - done <- struct{}{} - <-done - - return took, err + err := dl.downloader.synchronise(id, dl.peerHashes[id][0]) + for atomic.LoadInt32(&dl.downloader.processing) == 1 { + time.Sleep(time.Millisecond) + } + return err } -// hasBlock checks if a block is present in the testers canonical chain. +// hasBlock checks if a block is pres ent in the testers canonical chain. func (dl *downloadTester) hasBlock(hash common.Hash) bool { return dl.getBlock(hash) != nil } @@ -136,6 +108,18 @@ func (dl *downloadTester) getBlock(hash common.Hash) *types.Block { return dl.ownBlocks[hash] } +// insertChain injects a new batch of blocks into the simulated chain. +func (dl *downloadTester) insertChain(blocks types.Blocks) (int, error) { + for i, block := range blocks { + if _, ok := dl.ownBlocks[block.ParentHash()]; !ok { + return i, errors.New("unknown parent") + } + dl.ownHashes = append(dl.ownHashes, block.Hash()) + dl.ownBlocks[block.Hash()] = block + } + return len(blocks), nil +} + // newPeer registers a new block download source into the downloader. func (dl *downloadTester) newPeer(id string, hashes []common.Hash, blocks map[common.Hash]*types.Block) error { err := dl.downloader.RegisterPeer(id, hashes[0], dl.peerGetHashesFn(id), dl.peerGetBlocksFn(id)) @@ -223,27 +207,8 @@ func TestSynchronisation(t *testing.T) { if err := tester.sync("peer"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - if queued := len(tester.downloader.queue.blockPool); queued != targetBlocks { - t.Fatalf("synchronised block mismatch: have %v, want %v", queued, targetBlocks) - } -} - -// Tests that the synchronized blocks can be correctly retrieved. -func TestBlockTaking(t *testing.T) { - // Create a small enough block chain to download and the tester - targetBlocks := blockCacheLimit - 15 - hashes := createHashes(targetBlocks, knownHash) - blocks := createBlocksFromHashes(hashes) - - tester := newTester() - tester.newPeer("peer", hashes, blocks) - - // Synchronise with the peer and test block retrieval - if err := tester.sync("peer"); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - if took := tester.downloader.TakeBlocks(); len(took) != targetBlocks { - t.Fatalf("took block mismatch: have %v, want %v", len(took), targetBlocks) + if imported := len(tester.ownBlocks); imported != targetBlocks+1 { + t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1) } } @@ -270,21 +235,21 @@ func TestCancel(t *testing.T) { tester := newTester() tester.newPeer("peer", hashes, blocks) + // Make sure canceling works with a pristine downloader + tester.downloader.Cancel() + hashCount, blockCount := tester.downloader.queue.Size() + if hashCount > 0 || blockCount > 0 { + t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount) + } // Synchronise with the peer, but cancel afterwards if err := tester.sync("peer"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - if !tester.downloader.Cancel() { - t.Fatalf("cancel operation failed") - } - // Make sure the queue reports empty and no blocks can be taken - hashCount, blockCount := tester.downloader.queue.Size() + tester.downloader.Cancel() + hashCount, blockCount = tester.downloader.queue.Size() if hashCount > 0 || blockCount > 0 { t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount) } - if took := tester.downloader.TakeBlocks(); len(took) != 0 { - t.Errorf("taken blocks mismatch: have %d, want %d", len(took), 0) - } } // Tests that if a large batch of blocks are being downloaded, it is throttled @@ -298,29 +263,46 @@ func TestThrottling(t *testing.T) { tester := newTester() tester.newPeer("peer", hashes, blocks) + // Wrap the importer to allow stepping + done := make(chan int) + tester.downloader.insertChain = func(blocks types.Blocks) (int, error) { + n, err := tester.insertChain(blocks) + done <- n + return n, err + } // Start a synchronisation concurrently errc := make(chan error) go func() { errc <- tester.sync("peer") }() // Iteratively take some blocks, always checking the retrieval count - for total := 0; total < targetBlocks; { - // Wait a bit for sync to complete + for len(tester.ownBlocks) < targetBlocks+1 { + // Wait a bit for sync to throttle itself + var cached int for start := time.Now(); time.Since(start) < 3*time.Second; { time.Sleep(25 * time.Millisecond) - if len(tester.downloader.queue.blockPool) == blockCacheLimit { + + cached = len(tester.downloader.queue.blockPool) + if cached == blockCacheLimit || len(tester.ownBlocks)+cached == targetBlocks+1 { break } } - // Fetch the next batch of blocks - took := tester.downloader.TakeBlocks() - if len(took) != blockCacheLimit { - t.Fatalf("block count mismatch: have %v, want %v", len(took), blockCacheLimit) + // Make sure we filled up the cache, then exhaust it + time.Sleep(25 * time.Millisecond) // give it a chance to screw up + if cached != blockCacheLimit && len(tester.ownBlocks)+cached < targetBlocks+1 { + t.Fatalf("block count mismatch: have %v, want %v", cached, blockCacheLimit) } - total += len(took) - if total > targetBlocks { - t.Fatalf("target block count mismatch: have %v, want %v", total, targetBlocks) + <-done // finish previous blocking import + for cached > maxBlockProcess { + cached -= <-done } + time.Sleep(25 * time.Millisecond) // yield to the insertion + } + <-done // finish the last blocking import + + // Check that we haven't pulled more blocks than available + if len(tester.ownBlocks) > targetBlocks+1 { + t.Fatalf("target block count mismatch: have %v, want %v", len(tester.ownBlocks), targetBlocks+1) } if err := <-errc; err != nil { t.Fatalf("block synchronization failed: %v", err) @@ -343,28 +325,18 @@ func TestNonExistingParentAttack(t *testing.T) { tester.newPeer("attack", hashes, blocks) // Try and sync with the malicious node and check that it fails - if err := tester.sync("attack"); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) + if err := tester.sync("attack"); err == nil { + t.Fatalf("block synchronization succeeded") } - bs := tester.downloader.TakeBlocks() - if len(bs) != 1 { - t.Fatalf("retrieved block mismatch: have %v, want %v", len(bs), 1) + if tester.hasBlock(hashes[0]) { + t.Fatalf("tester accepted unknown-parent block: %v", blocks[hashes[0]]) } - if tester.hasBlock(bs[0].RawBlock.ParentHash()) { - t.Fatalf("tester knows about the unknown hash") - } - tester.downloader.Cancel() - // Try to synchronize with the valid chain and make sure it succeeds if err := tester.sync("valid"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - bs = tester.downloader.TakeBlocks() - if len(bs) != 1 { - t.Fatalf("retrieved block mismatch: have %v, want %v", len(bs), 1) - } - if !tester.hasBlock(bs[0].RawBlock.ParentHash()) { - t.Fatalf("tester doesn't know about the origin hash") + if !tester.hasBlock(tester.peerHashes["valid"][0]) { + t.Fatalf("tester didn't accept known-parent block: %v", tester.peerBlocks["valid"][hashes[0]]) } } @@ -442,11 +414,11 @@ func TestInvalidHashOrderAttack(t *testing.T) { tester.newPeer("attack", hashes, blocks) // Try and sync with the malicious node and check that it fails - if _, err := tester.syncTake("attack"); err != errInvalidChain { + if err := tester.sync("attack"); err != errInvalidChain { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain) } // Ensure that a valid chain can still pass sync - if _, err := tester.syncTake("valid"); err != nil { + if err := tester.sync("valid"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } } @@ -466,11 +438,11 @@ func TestMadeupHashChainAttack(t *testing.T) { tester.newPeer("attack", createHashes(1024*blockCacheLimit, knownHash), nil) // Try and sync with the malicious node and check that it fails - if _, err := tester.syncTake("attack"); err != errCrossCheckFailed { + if err := tester.sync("attack"); err != errCrossCheckFailed { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed) } // Ensure that a valid chain can still pass sync - if _, err := tester.syncTake("valid"); err != nil { + if err := tester.sync("valid"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } } @@ -487,7 +459,7 @@ func TestMadeupHashChainDrippingAttack(t *testing.T) { // Try and sync with the attacker, one hash at a time tester.maxHashFetch = 1 tester.newPeer("attack", hashes, nil) - if _, err := tester.syncTake("attack"); err != errStallingPeer { + if err := tester.sync("attack"); err != errStallingPeer { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) } } @@ -512,7 +484,7 @@ func TestMadeupBlockChainAttack(t *testing.T) { // Try and sync with the malicious node and check that it fails tester := newTester() tester.newPeer("attack", gapped, blocks) - if _, err := tester.syncTake("attack"); err != errCrossCheckFailed { + if err := tester.sync("attack"); err != errCrossCheckFailed { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed) } // Ensure that a valid chain can still pass sync @@ -520,7 +492,7 @@ func TestMadeupBlockChainAttack(t *testing.T) { crossCheckCycle = defaultCrossCheckCycle tester.newPeer("valid", hashes, blocks) - if _, err := tester.syncTake("valid"); err != nil { + if err := tester.sync("valid"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } } @@ -548,14 +520,14 @@ func TestMadeupParentBlockChainAttack(t *testing.T) { tester.newPeer("attack", hashes, blocks) // Try and sync with the malicious node and check that it fails - if _, err := tester.syncTake("attack"); err != errCrossCheckFailed { + if err := tester.sync("attack"); err != errCrossCheckFailed { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed) } // Ensure that a valid chain can still pass sync blockSoftTTL = defaultBlockTTL crossCheckCycle = defaultCrossCheckCycle - if _, err := tester.syncTake("valid"); err != nil { + if err := tester.sync("valid"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } } @@ -582,7 +554,7 @@ func TestBannedChainStarvationAttack(t *testing.T) { // the head of the invalid chain is blocked too. for banned := tester.downloader.banned.Size(); ; { // Try to sync with the attacker, check hash chain failure - if _, err := tester.syncTake("attack"); err != errInvalidChain { + if err := tester.sync("attack"); err != errInvalidChain { if tester.downloader.banned.Has(hashes[0]) && err == errBannedHead { break } @@ -603,7 +575,7 @@ func TestBannedChainStarvationAttack(t *testing.T) { t.Fatalf("banned attacker registered: %v", peer) } // Ensure that a valid chain can still pass sync - if _, err := tester.syncTake("valid"); err != nil { + if err := tester.sync("valid"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } } @@ -637,7 +609,7 @@ func TestBannedChainMemoryExhaustionAttack(t *testing.T) { // the head of the invalid chain is blocked too. for { // Try to sync with the attacker, check hash chain failure - if _, err := tester.syncTake("attack"); err != errInvalidChain { + if err := tester.sync("attack"); err != errInvalidChain { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain) } // Short circuit if the entire chain was banned @@ -658,33 +630,34 @@ func TestBannedChainMemoryExhaustionAttack(t *testing.T) { MaxBlockFetch = defaultMaxBlockFetch maxBannedHashes = defaultMaxBannedHashes - if _, err := tester.syncTake("valid"); err != nil { + if err := tester.sync("valid"); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } } // Tests that misbehaving peers are disconnected, whilst behaving ones are not. -func TestAttackerDropping(t *testing.T) { - // Define the disconnection requirement for individual errors +func TestHashAttackerDropping(t *testing.T) { + // Define the disconnection requirement for individual hash fetch errors tests := []struct { result error drop bool }{ - {nil, false}, // Sync succeeded, all is well - {errBusy, false}, // Sync is already in progress, no problem - {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop - {errBadPeer, true}, // Peer was deemed bad for some reason, drop it - {errStallingPeer, true}, // Peer was detected to be stalling, drop it - {errBannedHead, true}, // Peer's head hash is a known bad hash, drop it - {errNoPeers, false}, // No peers to download from, soft race, no issue - {errPendingQueue, false}, // There are blocks still cached, wait to exhaust, no issue - {errTimeout, true}, // No hashes received in due time, drop the peer - {errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end - {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser - {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop - {errCrossCheckFailed, true}, // Hash-origin failed to pass a block cross check, drop - {errCancelHashFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop - {errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop + {nil, false}, // Sync succeeded, all is well + {errBusy, false}, // Sync is already in progress, no problem + {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop + {errBadPeer, true}, // Peer was deemed bad for some reason, drop it + {errStallingPeer, true}, // Peer was detected to be stalling, drop it + {errBannedHead, true}, // Peer's head hash is a known bad hash, drop it + {errNoPeers, false}, // No peers to download from, soft race, no issue + {errPendingQueue, false}, // There are blocks still cached, wait to exhaust, no issue + {errTimeout, true}, // No hashes received in due time, drop the peer + {errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end + {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser + {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop + {errCrossCheckFailed, true}, // Hash-origin failed to pass a block cross check, drop + {errCancelHashFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop + {errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop + {errCancelChainImport, false}, // Synchronisation was canceled, origin may be innocent, don't drop } // Run the tests and check disconnection status tester := newTester() @@ -706,3 +679,38 @@ func TestAttackerDropping(t *testing.T) { } } } + +// Tests that feeding bad blocks will result in a peer drop. +func TestBlockAttackerDropping(t *testing.T) { + // Define the disconnection requirement for individual block import errors + tests := []struct { + failure bool + drop bool + }{{true, true}, {false, false}} + + // Run the tests and check disconnection status + tester := newTester() + for i, tt := range tests { + // Register a new peer and ensure it's presence + id := fmt.Sprintf("test %d", i) + if err := tester.newPeer(id, []common.Hash{common.Hash{}}, nil); err != nil { + t.Fatalf("test %d: failed to register new peer: %v", i, err) + } + if _, ok := tester.peerHashes[id]; !ok { + t.Fatalf("test %d: registered peer not found", i) + } + // Assemble a good or bad block, depending of the test + raw := createBlock(1, knownHash, common.Hash{}) + if tt.failure { + raw = createBlock(1, unknownHash, common.Hash{}) + } + block := &Block{OriginPeer: id, RawBlock: raw} + + // Simulate block processing and check the result + tester.downloader.queue.blockCache[0] = block + tester.downloader.process() + if _, ok := tester.peerHashes[id]; !ok != tt.drop { + t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.failure, !ok, tt.drop) + } + } +} diff --git a/eth/handler.go b/eth/handler.go index ac7fb8fcf..ec4f2d53a 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -80,7 +80,7 @@ func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpo txsyncCh: make(chan *txsync), quitSync: make(chan struct{}), } - manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.removePeer) + manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.chainman.InsertChain, manager.removePeer) manager.SubProtocol = p2p.Protocol{ Name: "eth", Version: uint(protocolVersion), diff --git a/eth/sync.go b/eth/sync.go index b127ca979..88a76805c 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -1,9 +1,7 @@ package eth import ( - "math" "math/rand" - "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -15,12 +13,10 @@ import ( const ( forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available - blockProcCycle = 500 * time.Millisecond // Time interval to check for new blocks to process notifyCheckCycle = 100 * time.Millisecond // Time interval to allow hash notifies to fulfill before hard fetching notifyArriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested notifyFetchTimeout = 5 * time.Second // Maximum alloted time to return an explicitly requested block minDesiredPeerCount = 5 // Amount of peers desired to start syncing - blockProcAmount = 256 // This is the target size for the packs of transactions sent by txsyncLoop. // A pack can get larger than this if a single transactions exceeds this size. @@ -254,10 +250,10 @@ func (pm *ProtocolManager) fetcher() { // syncer is responsible for periodically synchronising with the network, both // downloading hashes and blocks as well as retrieving cached ones. func (pm *ProtocolManager) syncer() { - forceSync := time.Tick(forceSyncCycle) - blockProc := time.Tick(blockProcCycle) - blockProcPend := int32(0) + // Abort any pending syncs if we terminate + defer pm.downloader.Cancel() + forceSync := time.Tick(forceSyncCycle) for { select { case <-pm.newPeerCh: @@ -271,55 +267,12 @@ func (pm *ProtocolManager) syncer() { // Force a sync even if not enough peers are present go pm.synchronise(pm.peers.BestPeer()) - case <-blockProc: - // Try to pull some blocks from the downloaded - if atomic.CompareAndSwapInt32(&blockProcPend, 0, 1) { - go func() { - pm.processBlocks() - atomic.StoreInt32(&blockProcPend, 0) - }() - } - case <-pm.quitSync: return } } } -// processBlocks retrieves downloaded blocks from the download cache and tries -// to construct the local block chain with it. Note, since the block retrieval -// order matters, access to this function *must* be synchronized/serialized. -func (pm *ProtocolManager) processBlocks() error { - pm.wg.Add(1) - defer pm.wg.Done() - - // Short circuit if no blocks are available for insertion - blocks := pm.downloader.TakeBlocks() - if len(blocks) == 0 { - return nil - } - glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].RawBlock.Number(), blocks[len(blocks)-1].RawBlock.Number()) - - for len(blocks) != 0 && !pm.quit { - // Retrieve the first batch of blocks to insert - max := int(math.Min(float64(len(blocks)), float64(blockProcAmount))) - raw := make(types.Blocks, 0, max) - for _, block := range blocks[:max] { - raw = append(raw, block.RawBlock) - } - // Try to inset the blocks, drop the originating peer if there's an error - index, err := pm.chainman.InsertChain(raw) - if err != nil { - glog.V(logger.Debug).Infoln("Downloaded block import failed:", err) - pm.removePeer(blocks[index].OriginPeer) - pm.downloader.Cancel() - return err - } - blocks = blocks[max:] - } - return nil -} - // synchronise tries to sync up our local block chain with a remote peer, both // adding various sanity checks as well as wrapping it with various log entries. func (pm *ProtocolManager) synchronise(peer *peer) { -- cgit v1.2.3 From 30a9939388ac738aba39eb64c287bbf9bbda91c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 12 Jun 2015 14:36:44 +0300 Subject: eth/downloader: sanity test for multi peer syncs --- eth/downloader/downloader_test.go | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 6cd141ef7..9803ae534 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -309,6 +309,37 @@ func TestThrottling(t *testing.T) { } } +// Tests that synchronisation from multiple peers works as intended (multi thread sanity test). +func TestMultiSynchronisation(t *testing.T) { + // Create various peers with various parts of the chain + targetPeers := 16 + targetBlocks := targetPeers*blockCacheLimit - 15 + + hashes := createHashes(targetBlocks, knownHash) + blocks := createBlocksFromHashes(hashes) + + tester := newTester() + for i := 0; i < targetPeers; i++ { + id := fmt.Sprintf("peer #%d", i) + tester.newPeer(id, hashes[i*blockCacheLimit:], blocks) + } + // Synchronise with the middle peer and make sure half of the blocks were retrieved + id := fmt.Sprintf("peer #%d", targetPeers/2) + if err := tester.sync(id); err != nil { + t.Fatalf("failed to synchronise blocks: %v", err) + } + if imported := len(tester.ownBlocks); imported != len(tester.peerHashes[id]) { + t.Fatalf("synchronised block mismatch: have %v, want %v", imported, len(tester.peerHashes[id])) + } + // Synchronise with the best peer and make sure everything is retrieved + if err := tester.sync("peer #0"); err != nil { + t.Fatalf("failed to synchronise blocks: %v", err) + } + if imported := len(tester.ownBlocks); imported != targetBlocks+1 { + t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1) + } +} + // Tests that if a peer returns an invalid chain with a block pointing to a non- // existing parent, it is correctly detected and handled. func TestNonExistingParentAttack(t *testing.T) { -- cgit v1.2.3 From b240983e2bafcde1c5902ce3a196b22475412f16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 15 Jun 2015 12:26:05 +0300 Subject: eth, eth/downloader: do async block fetches, add dl tests --- eth/downloader/downloader_test.go | 48 ++++++++++++++++++++++++++++++++++++--- eth/downloader/peer.go | 2 +- eth/sync.go | 2 +- 3 files changed, 47 insertions(+), 5 deletions(-) diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 9803ae534..f71c16237 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -122,7 +122,14 @@ func (dl *downloadTester) insertChain(blocks types.Blocks) (int, error) { // newPeer registers a new block download source into the downloader. func (dl *downloadTester) newPeer(id string, hashes []common.Hash, blocks map[common.Hash]*types.Block) error { - err := dl.downloader.RegisterPeer(id, hashes[0], dl.peerGetHashesFn(id), dl.peerGetBlocksFn(id)) + return dl.newSlowPeer(id, hashes, blocks, 0) +} + +// newSlowPeer registers a new block download source into the downloader, with a +// specific delay time on processing the network packets sent to it, simulating +// potentially slow network IO. +func (dl *downloadTester) newSlowPeer(id string, hashes []common.Hash, blocks map[common.Hash]*types.Block, delay time.Duration) error { + err := dl.downloader.RegisterPeer(id, hashes[0], dl.peerGetHashesFn(id, delay), dl.peerGetBlocksFn(id, delay)) if err == nil { // Assign the owned hashes and blocks to the peer (deep copy) dl.peerHashes[id] = make([]common.Hash, len(hashes)) @@ -147,8 +154,10 @@ func (dl *downloadTester) dropPeer(id string) { // peerGetBlocksFn constructs a getHashes function associated with a particular // peer in the download tester. The returned function can be used to retrieve // batches of hashes from the particularly requested peer. -func (dl *downloadTester) peerGetHashesFn(id string) func(head common.Hash) error { +func (dl *downloadTester) peerGetHashesFn(id string, delay time.Duration) func(head common.Hash) error { return func(head common.Hash) error { + time.Sleep(delay) + limit := MaxHashFetch if dl.maxHashFetch > 0 { limit = dl.maxHashFetch @@ -178,8 +187,10 @@ func (dl *downloadTester) peerGetHashesFn(id string) func(head common.Hash) erro // peerGetBlocksFn constructs a getBlocks function associated with a particular // peer in the download tester. The returned function can be used to retrieve // batches of blocks from the particularly requested peer. -func (dl *downloadTester) peerGetBlocksFn(id string) func([]common.Hash) error { +func (dl *downloadTester) peerGetBlocksFn(id string, delay time.Duration) func([]common.Hash) error { return func(hashes []common.Hash) error { + time.Sleep(delay) + blocks := dl.peerBlocks[id] result := make([]*types.Block, 0, len(hashes)) for _, hash := range hashes { @@ -340,6 +351,37 @@ func TestMultiSynchronisation(t *testing.T) { } } +// Tests that synchronising with a peer who's very slow at network IO does not +// stall the other peers in the system. +func TestSlowSynchronisation(t *testing.T) { + tester := newTester() + + // Create a batch of blocks, with a slow and a full speed peer + targetCycles := 2 + targetBlocks := targetCycles*blockCacheLimit - 15 + targetIODelay := 500 * time.Millisecond + + hashes := createHashes(targetBlocks, knownHash) + blocks := createBlocksFromHashes(hashes) + + tester.newSlowPeer("fast", hashes, blocks, 0) + tester.newSlowPeer("slow", hashes, blocks, targetIODelay) + + // Try to sync with the peers (pull hashes from fast) + start := time.Now() + if err := tester.sync("fast"); err != nil { + t.Fatalf("failed to synchronise blocks: %v", err) + } + if imported := len(tester.ownBlocks); imported != targetBlocks+1 { + t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1) + } + // Check that the slow peer got hit at most once per block-cache-size import + limit := time.Duration(targetCycles+1) * targetIODelay + if delay := time.Since(start); delay >= limit { + t.Fatalf("synchronisation exceeded delay limit: have %v, want %v", delay, limit) + } +} + // Tests that if a peer returns an invalid chain with a block pointing to a non- // existing parent, it is correctly detected and handled. func TestNonExistingParentAttack(t *testing.T) { diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index 9614a6951..f36e133e4 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -74,7 +74,7 @@ func (p *peer) Fetch(request *fetchRequest) error { for hash, _ := range request.Hashes { hashes = append(hashes, hash) } - p.getBlocks(hashes) + go p.getBlocks(hashes) return nil } diff --git a/eth/sync.go b/eth/sync.go index 88a76805c..917fc0fce 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -171,7 +171,7 @@ func (pm *ProtocolManager) fetcher() { // Send out all block requests for peer, hashes := range request { glog.V(logger.Debug).Infof("Explicitly fetching %d blocks from %s", len(hashes), peer.id) - peer.requestBlocks(hashes) + go peer.requestBlocks(hashes) } request = make(map[*peer][]common.Hash) -- cgit v1.2.3 From 6d817e16c1c17f7cad4a34fa91457e21f63f2de4 Mon Sep 17 00:00:00 2001 From: obscuren Date: Mon, 15 Jun 2015 11:33:08 +0200 Subject: core, miner: tx pool drops txs below ask price --- core/transaction_pool.go | 19 ++++++++++++++++--- eth/backend.go | 1 + miner/miner.go | 2 +- miner/worker.go | 7 +++++-- 4 files changed, 23 insertions(+), 6 deletions(-) diff --git a/core/transaction_pool.go b/core/transaction_pool.go index 4a0594228..8f917e96a 100644 --- a/core/transaction_pool.go +++ b/core/transaction_pool.go @@ -19,6 +19,7 @@ var ( // Transaction Pool Errors ErrInvalidSender = errors.New("Invalid sender") ErrNonce = errors.New("Nonce too low") + ErrCheap = errors.New("Gas price too low for acceptance") ErrBalance = errors.New("Insufficient balance") ErrNonExistentAccount = errors.New("Account does not exist or account balance too low") ErrInsufficientFunds = errors.New("Insufficient funds for gas * price + value") @@ -41,6 +42,7 @@ type TxPool struct { currentState stateFn // The state function which will allow us to do some pre checkes pendingState *state.ManagedState gasLimit func() *big.Int // The current gas limit function callback + minGasPrice *big.Int eventMux *event.TypeMux events event.Subscription @@ -57,8 +59,9 @@ func NewTxPool(eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func( eventMux: eventMux, currentState: currentStateFn, gasLimit: gasLimitFn, + minGasPrice: new(big.Int), pendingState: state.ManageState(currentStateFn()), - events: eventMux.Subscribe(ChainEvent{}), + events: eventMux.Subscribe(ChainEvent{}, GasPriceChanged{}), } go pool.eventLoop() @@ -69,10 +72,15 @@ func (pool *TxPool) eventLoop() { // Track chain events. When a chain events occurs (new chain canon block) // we need to know the new state. The new state will help us determine // the nonces in the managed state - for _ = range pool.events.Chan() { + for ev := range pool.events.Chan() { pool.mu.Lock() - pool.resetState() + switch ev := ev.(type) { + case ChainEvent: + pool.resetState() + case GasPriceChanged: + pool.minGasPrice = ev.Price + } pool.mu.Unlock() } @@ -124,6 +132,11 @@ func (pool *TxPool) validateTx(tx *types.Transaction) error { err error ) + // Drop transactions under our own minimal accepted gas price + if pool.minGasPrice.Cmp(tx.GasPrice()) > 0 { + return ErrCheap + } + // Validate the transaction sender and it's sig. Throw // if the from fields is invalid. if from, err = tx.From(); err != nil { diff --git a/eth/backend.go b/eth/backend.go index d2ec0cc62..6b7eb736f 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -292,6 +292,7 @@ func New(config *Config) (*Ethereum, error) { } eth.downloader = downloader.New(eth.EventMux(), eth.chainManager.HasBlock, eth.chainManager.GetBlock) eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State, eth.chainManager.GasLimit) + eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.chainManager, eth.EventMux()) eth.chainManager.SetProcessor(eth.blockProcessor) eth.miner = miner.New(eth, eth.EventMux(), eth.pow) diff --git a/miner/miner.go b/miner/miner.go index 20ca81648..7f73f3ee8 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -77,7 +77,7 @@ func (m *Miner) SetGasPrice(price *big.Int) { return } - m.worker.gasPrice = price + m.worker.setGasPrice(price) } func (self *Miner) Start(coinbase common.Address, threads int) { diff --git a/miner/worker.go b/miner/worker.go index bd4bc0e3c..d339507ca 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -6,6 +6,7 @@ import ( "sort" "sync" "sync/atomic" + "time" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" @@ -374,6 +375,8 @@ func (self *worker) commitNewWork() { self.currentMu.Lock() defer self.currentMu.Unlock() + tstart := time.Now() + previous := self.current self.makeCurrent() current := self.current @@ -409,7 +412,7 @@ func (self *worker) commitNewWork() { // We only care about logging if we're actually mining if atomic.LoadInt32(&self.mining) == 1 { - glog.V(logger.Info).Infof("commit new work on block %v with %d txs & %d uncles\n", current.block.Number(), current.tcount, len(uncles)) + glog.V(logger.Info).Infof("commit new work on block %v with %d txs & %d uncles. Took %v\n", current.block.Number(), current.tcount, len(uncles), time.Since(tstart)) self.logLocalMinedBlocks(previous) } @@ -437,7 +440,6 @@ func (self *worker) commitUncle(uncle *types.Header) error { // Error not unique return core.UncleError("Uncle not unique") } - self.current.uncles.Add(uncle.Hash()) if !self.current.ancestors.Has(uncle.ParentHash) { return core.UncleError(fmt.Sprintf("Uncle's parent unknown (%x)", uncle.ParentHash[0:4])) @@ -446,6 +448,7 @@ func (self *worker) commitUncle(uncle *types.Header) error { if self.current.family.Has(uncle.Hash()) { return core.UncleError(fmt.Sprintf("Uncle already in family (%x)", uncle.Hash())) } + self.current.uncles.Add(uncle.Hash()) return nil } -- cgit v1.2.3 From 9c03c374e32541c905adb9a3b8783cd721117030 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 15 Jun 2015 13:05:01 +0300 Subject: eth/downloader: fix import statistic reset, fetch hashes async --- eth/downloader/downloader.go | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 1bbba11ed..7f8ef12ee 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -316,13 +316,8 @@ func (d *Downloader) Cancel() { } d.cancelLock.Unlock() - // Reset the queue and import statistics + // Reset the queue d.queue.Reset() - - d.importLock.Lock() - d.importQueue = nil - d.importDone = 0 - d.importLock.Unlock() } // fetchHahes starts retrieving hashes backwards from a specific peer and hash, @@ -345,7 +340,7 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error { <-timeout.C // timeout channel should be initially empty. getHashes := func(from common.Hash) { - active.getHashes(from) + go active.getHashes(from) timeout.Reset(hashTTL) } @@ -414,9 +409,9 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error { expire: time.Now().Add(blockSoftTTL), parent: parent, } - active.getBlocks([]common.Hash{origin}) + go active.getBlocks([]common.Hash{origin}) - // Also fetch a fresh + // Also fetch a fresh batch of hashes getHashes(head) continue } @@ -720,8 +715,16 @@ func (d *Downloader) process() (err error) { err = d.process() } }() - // Release the lock upon exit (note, before checking for reentry!) - defer atomic.StoreInt32(&d.processing, 0) + // Release the lock upon exit (note, before checking for reentry!), and set + // the import statistics to zero. + defer func() { + d.importLock.Lock() + d.importQueue = nil + d.importDone = 0 + d.importLock.Unlock() + + atomic.StoreInt32(&d.processing, 0) + }() // Fetch the current cancel channel to allow termination d.cancelLock.RLock() -- cgit v1.2.3 From 21fa29111b3cd12e3748fcb6310e6a18c5562f17 Mon Sep 17 00:00:00 2001 From: obscuren Date: Mon, 15 Jun 2015 12:16:29 +0200 Subject: core: reduce max allowed queued txs per address Transactions in the queue are now capped to a maximum of 200 transactions. This number is completely arbitrary. --- common/types.go | 11 +++++++++++ core/transaction_pool.go | 19 +++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/common/types.go b/common/types.go index 183d48fb3..d05c21eec 100644 --- a/common/types.go +++ b/common/types.go @@ -1,6 +1,7 @@ package common import ( + "fmt" "math/big" "math/rand" "reflect" @@ -95,3 +96,13 @@ func (a *Address) Set(other Address) { a[i] = v } } + +// PP Pretty Prints a byte slice in the following format: +// hex(value[:4])...(hex[len(value)-4:]) +func PP(value []byte) string { + if len(value) <= 8 { + return Bytes2Hex(value) + } + + return fmt.Sprintf("%x...%x", value[:4], value[len(value)-4]) +} diff --git a/core/transaction_pool.go b/core/transaction_pool.go index 8f917e96a..ce6fed1a9 100644 --- a/core/transaction_pool.go +++ b/core/transaction_pool.go @@ -28,6 +28,10 @@ var ( ErrNegativeValue = errors.New("Negative value") ) +const ( + maxQueued = 200 // max limit of queued txs per address +) + type stateFn func() *state.StateDB // TxPool contains all currently known transactions. Transactions @@ -224,6 +228,21 @@ func (self *TxPool) queueTx(hash common.Hash, tx *types.Transaction) { self.queue[from] = make(map[common.Hash]*types.Transaction) } self.queue[from][hash] = tx + + if len(self.queue[from]) > maxQueued { + var ( + worstHash common.Hash + worstNonce uint64 + ) + for hash, tx := range self.queue[from] { + if tx.Nonce() > worstNonce { + worstNonce = tx.Nonce() + worstHash = hash + } + } + glog.V(logger.Debug).Infof("Queued tx limit exceeded for %x. Removed worst nonce tx: %x\n", common.PP(from[:]), common.PP(worstHash[:])) + delete(self.queue[from], worstHash) + } } // addTx will add a transaction to the pending (processable queue) list of transactions -- cgit v1.2.3 From cf7c44a7f6e5422148e0be98186d5570ce3e0ac5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 15 Jun 2015 15:18:04 +0300 Subject: eth/downloader: detailed comment for the race corner case --- eth/downloader/downloader.go | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 7f8ef12ee..306c4fd2d 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -522,9 +522,7 @@ out: peer.Promote() peer.SetIdle() glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blockPack.blocks)) - if atomic.LoadInt32(&d.processing) == 0 { - go d.process() - } + go d.process() case errInvalidChain: // The hash chain is invalid (blocks are not ordered properly), abort @@ -701,6 +699,19 @@ func (d *Downloader) banBlocks(peerId string, head common.Hash) error { } // process takes blocks from the queue and tries to import them into the chain. +// +// The algorithmic flow is as follows: +// - The `processing` flag is swapped to 1 to ensure singleton access +// - The current `cancel` channel is retrieved to detect sync abortions +// - Blocks are iteratively taken from the cache and inserted into the chain +// - When the cache becomes empty, insertion stops +// - The `processing` flag is swapped back to 0 +// - A post-exit check is made whether new blocks became available +// - This step is important: it handles a potential race condition between +// checking for no more work, and releasing the processing "mutex". In +// between these state changes, a block may have arrived, but a processing +// attempt denied, so we need to re-enter to ensure the block isn't left +// to idle in the cache. func (d *Downloader) process() (err error) { // Make sure only one goroutine is ever allowed to process blocks at once if !atomic.CompareAndSwapInt32(&d.processing, 0, 1) { @@ -763,7 +774,7 @@ func (d *Downloader) process() (err error) { // Try to inset the blocks, drop the originating peer if there's an error index, err := d.insertChain(raw) if err != nil { - glog.V(logger.Debug).Infoln("Block #%d import failed:", raw[index].NumberU64(), err) + glog.V(logger.Debug).Infof("Block #%d import failed: %v", raw[index].NumberU64(), err) d.dropPeer(blocks[index].OriginPeer) d.Cancel() return errCancelChainImport -- cgit v1.2.3 From aa250e228a7f2eec5d512d05eb042b75e2755d30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 15 Jun 2015 15:18:31 +0300 Subject: eth: don't refetch non fitting blocks to avoid duplicates --- eth/sync.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/sync.go b/eth/sync.go index 917fc0fce..a3b177a4d 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -214,7 +214,7 @@ func (pm *ProtocolManager) fetcher() { if announce := pending[hash]; announce != nil { // Drop the block if it surely cannot fit if pm.chainman.HasBlock(hash) || !pm.chainman.HasBlock(block.ParentHash()) { - delete(pending, hash) + // delete(pending, hash) // if we drop, it will re-fetch it, wait for timeout? continue } // Otherwise accumulate for import -- cgit v1.2.3 From 8dcbdcad0a92b053c7a5da2dc00b679c0044d050 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Mon, 15 Jun 2015 13:42:44 +0200 Subject: p2p: track write errors and prevent writes during shutdown As of this commit, we no longer rely on the protocol handler to report write errors in a timely fashion. When a write fails, shutdown is initiated immediately and no new writes can start. This will also prevent new writes from starting after Server.Stop has been called. --- p2p/peer.go | 82 ++++++++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 57 insertions(+), 25 deletions(-) diff --git a/p2p/peer.go b/p2p/peer.go index cbe5ccc84..5489273bd 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -115,37 +115,54 @@ func newPeer(conn *conn, protocols []Protocol) *Peer { } func (p *Peer) run() DiscReason { - readErr := make(chan error, 1) + var ( + writeStart = make(chan struct{}, 1) + writeErr = make(chan error, 1) + readErr = make(chan error, 1) + reason DiscReason + requested bool + ) p.wg.Add(2) go p.readLoop(readErr) go p.pingLoop() - p.startProtocols() + // Start all protocol handlers. + writeStart <- struct{}{} + p.startProtocols(writeStart, writeErr) // Wait for an error or disconnect. - var ( - reason DiscReason - requested bool - ) - select { - case err := <-readErr: - if r, ok := err.(DiscReason); ok { - reason = r - } else { - // Note: We rely on protocols to abort if there is a write - // error. It might be more robust to handle them here as well. - glog.V(logger.Detail).Infof("%v: Read error: %v\n", p, err) - reason = DiscNetworkError +loop: + for { + select { + case err := <-writeErr: + // A write finished. Allow the next write to start if + // there was no error. + if err != nil { + glog.V(logger.Detail).Infof("%v: Write error: %v\n", p, err) + reason = DiscNetworkError + break loop + } + writeStart <- struct{}{} + case err := <-readErr: + if r, ok := err.(DiscReason); ok { + reason = r + } else { + glog.V(logger.Detail).Infof("%v: Read error: %v\n", p, err) + reason = DiscNetworkError + } + break loop + case err := <-p.protoErr: + reason = discReasonForError(err) + break loop + case reason = <-p.disc: + requested = true + break loop } - case err := <-p.protoErr: - reason = discReasonForError(err) - case reason = <-p.disc: - requested = true } + close(p.closed) p.rw.close(reason) p.wg.Wait() - if requested { reason = DiscRequested } @@ -247,11 +264,13 @@ outer: return result } -func (p *Peer) startProtocols() { +func (p *Peer) startProtocols(writeStart <-chan struct{}, writeErr chan<- error) { p.wg.Add(len(p.running)) for _, proto := range p.running { proto := proto proto.closed = p.closed + proto.wstart = writeStart + proto.werr = writeErr glog.V(logger.Detail).Infof("%v: Starting protocol %s/%d\n", p, proto.Name, proto.Version) go func() { err := proto.Run(p, proto) @@ -280,18 +299,31 @@ func (p *Peer) getProto(code uint64) (*protoRW, error) { type protoRW struct { Protocol - in chan Msg - closed <-chan struct{} + in chan Msg // receices read messages + closed <-chan struct{} // receives when peer is shutting down + wstart <-chan struct{} // receives when write may start + werr chan<- error // for write results offset uint64 w MsgWriter } -func (rw *protoRW) WriteMsg(msg Msg) error { +func (rw *protoRW) WriteMsg(msg Msg) (err error) { if msg.Code >= rw.Length { return newPeerError(errInvalidMsgCode, "not handled") } msg.Code += rw.offset - return rw.w.WriteMsg(msg) + select { + case <-rw.wstart: + err = rw.w.WriteMsg(msg) + // Report write status back to Peer.run. It will initiate + // shutdown if the error is non-nil and unblock the next write + // otherwise. The calling protocol code should exit for errors + // as well but we don't want to rely on that. + rw.werr <- err + case <-rw.closed: + err = fmt.Errorf("shutting down") + } + return err } func (rw *protoRW) ReadMsg() (Msg, error) { -- cgit v1.2.3 From 70da79f04c14e562c024e85c6b081b6b4b8e45ec Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Mon, 15 Jun 2015 14:00:50 +0200 Subject: p2p: improve disconnect logging --- p2p/peer.go | 11 ++++++----- p2p/peer_test.go | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/p2p/peer.go b/p2p/peer.go index 5489273bd..40466cf84 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -138,24 +138,27 @@ loop: // A write finished. Allow the next write to start if // there was no error. if err != nil { - glog.V(logger.Detail).Infof("%v: Write error: %v\n", p, err) + glog.V(logger.Detail).Infof("%v: write error: %v\n", p, err) reason = DiscNetworkError break loop } writeStart <- struct{}{} case err := <-readErr: if r, ok := err.(DiscReason); ok { + glog.V(logger.Debug).Infof("%v: remote requested disconnect: %v\n", p, r) + requested = true reason = r } else { - glog.V(logger.Detail).Infof("%v: Read error: %v\n", p, err) + glog.V(logger.Detail).Infof("%v: read error: %v\n", p, err) reason = DiscNetworkError } break loop case err := <-p.protoErr: reason = discReasonForError(err) + glog.V(logger.Debug).Infof("%v: protocol error: %v (%v)\n", p, err, reason) break loop case reason = <-p.disc: - requested = true + glog.V(logger.Debug).Infof("%v: locally requested disconnect: %v\n", p, reason) break loop } } @@ -166,7 +169,6 @@ loop: if requested { reason = DiscRequested } - glog.V(logger.Debug).Infof("%v: Disconnected: %v\n", p, reason) return reason } @@ -213,7 +215,6 @@ func (p *Peer) handle(msg Msg) error { // This is the last message. We don't need to discard or // check errors because, the connection will be closed after it. rlp.Decode(msg.Payload, &reason) - glog.V(logger.Debug).Infof("%v: Disconnect Requested: %v\n", p, reason[0]) return reason[0] case msg.Code < baseProtocolLength: // ignore other base protocol messages diff --git a/p2p/peer_test.go b/p2p/peer_test.go index 7b772e198..575d0ff79 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -121,7 +121,7 @@ func TestPeerDisconnect(t *testing.T) { } select { case reason := <-disc: - if reason != DiscQuitting { + if reason != DiscRequested { t.Errorf("run returned wrong reason: got %v, want %v", reason, DiscRequested) } case <-time.After(500 * time.Millisecond): -- cgit v1.2.3 From 3f94d09c1f07538c3fc72c72609037c47c04c4b5 Mon Sep 17 00:00:00 2001 From: zsfelfoldi Date: Tue, 26 May 2015 14:17:43 +0200 Subject: fixed saving receipts --- cmd/geth/main.go | 6 ++++ cmd/utils/flags.go | 90 ++++++++++++++++++++++++++++++++++--------------- core/block_processor.go | 22 ++++++++++++ eth/backend.go | 52 +++++++++++++++++++--------- rpc/api.go | 2 +- xeth/xeth.go | 26 ++++++++------ 6 files changed, 144 insertions(+), 54 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 0f2438cfd..1739fbc6b 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -256,6 +256,12 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso utils.PProfEanbledFlag, utils.PProfPortFlag, utils.SolcPathFlag, + utils.GpoMinGasPriceFlag, + utils.GpoMaxGasPriceFlag, + utils.GpoFullBlockRatioFlag, + utils.GpobaseStepDownFlag, + utils.GpobaseStepUpFlag, + utils.GpobaseCorrectionFactorFlag, } app.Before = func(ctx *cli.Context) error { utils.SetupLogger(ctx) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index ec29598fb..0f5e443e4 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -276,6 +276,36 @@ var ( Usage: "solidity compiler to be used", Value: "solc", } + GpoMinGasPriceFlag = cli.StringFlag{ + Name: "gpomin", + Usage: "Minimum suggested gas price", + Value: new(big.Int).Mul(big.NewInt(10), common.Szabo).String(), + } + GpoMaxGasPriceFlag = cli.StringFlag{ + Name: "gpomax", + Usage: "Maximum suggested gas price", + Value: new(big.Int).Mul(big.NewInt(1000), common.Szabo).String(), + } + GpoFullBlockRatioFlag = cli.IntFlag{ + Name: "gpofull", + Usage: "Full block threshold for gas price calculation (%)", + Value: 80, + } + GpobaseStepDownFlag = cli.IntFlag{ + Name: "gpobasedown", + Usage: "Suggested gas price base step down ratio (1/1000)", + Value: 10, + } + GpobaseStepUpFlag = cli.IntFlag{ + Name: "gpobaseup", + Usage: "Suggested gas price base step up ratio (1/1000)", + Value: 100, + } + GpobaseCorrectionFactorFlag = cli.IntFlag{ + Name: "gpobasecf", + Usage: "Suggested gas price base correction factor (%)", + Value: 110, + } ) // MakeNAT creates a port mapper from set command line flags. @@ -313,33 +343,39 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config { clientID += "/" + customName } return ð.Config{ - Name: common.MakeName(clientID, version), - DataDir: ctx.GlobalString(DataDirFlag.Name), - ProtocolVersion: ctx.GlobalInt(ProtocolVersionFlag.Name), - GenesisNonce: ctx.GlobalInt(GenesisNonceFlag.Name), - BlockChainVersion: ctx.GlobalInt(BlockchainVersionFlag.Name), - SkipBcVersionCheck: false, - NetworkId: ctx.GlobalInt(NetworkIdFlag.Name), - LogFile: ctx.GlobalString(LogFileFlag.Name), - Verbosity: ctx.GlobalInt(VerbosityFlag.Name), - LogJSON: ctx.GlobalString(LogJSONFlag.Name), - Etherbase: ctx.GlobalString(EtherbaseFlag.Name), - MinerThreads: ctx.GlobalInt(MinerThreadsFlag.Name), - AccountManager: MakeAccountManager(ctx), - VmDebug: ctx.GlobalBool(VMDebugFlag.Name), - MaxPeers: ctx.GlobalInt(MaxPeersFlag.Name), - MaxPendingPeers: ctx.GlobalInt(MaxPendingPeersFlag.Name), - Port: ctx.GlobalString(ListenPortFlag.Name), - NAT: MakeNAT(ctx), - NatSpec: ctx.GlobalBool(NatspecEnabledFlag.Name), - Discovery: !ctx.GlobalBool(NoDiscoverFlag.Name), - NodeKey: MakeNodeKey(ctx), - Shh: ctx.GlobalBool(WhisperEnabledFlag.Name), - Dial: true, - BootNodes: ctx.GlobalString(BootnodesFlag.Name), - GasPrice: common.String2Big(ctx.GlobalString(GasPriceFlag.Name)), - SolcPath: ctx.GlobalString(SolcPathFlag.Name), - AutoDAG: ctx.GlobalBool(AutoDAGFlag.Name) || ctx.GlobalBool(MiningEnabledFlag.Name), + Name: common.MakeName(clientID, version), + DataDir: ctx.GlobalString(DataDirFlag.Name), + ProtocolVersion: ctx.GlobalInt(ProtocolVersionFlag.Name), + GenesisNonce: ctx.GlobalInt(GenesisNonceFlag.Name), + BlockChainVersion: ctx.GlobalInt(BlockchainVersionFlag.Name), + SkipBcVersionCheck: false, + NetworkId: ctx.GlobalInt(NetworkIdFlag.Name), + LogFile: ctx.GlobalString(LogFileFlag.Name), + Verbosity: ctx.GlobalInt(VerbosityFlag.Name), + LogJSON: ctx.GlobalString(LogJSONFlag.Name), + Etherbase: ctx.GlobalString(EtherbaseFlag.Name), + MinerThreads: ctx.GlobalInt(MinerThreadsFlag.Name), + AccountManager: MakeAccountManager(ctx), + VmDebug: ctx.GlobalBool(VMDebugFlag.Name), + MaxPeers: ctx.GlobalInt(MaxPeersFlag.Name), + MaxPendingPeers: ctx.GlobalInt(MaxPendingPeersFlag.Name), + Port: ctx.GlobalString(ListenPortFlag.Name), + NAT: MakeNAT(ctx), + NatSpec: ctx.GlobalBool(NatspecEnabledFlag.Name), + Discovery: !ctx.GlobalBool(NoDiscoverFlag.Name), + NodeKey: MakeNodeKey(ctx), + Shh: ctx.GlobalBool(WhisperEnabledFlag.Name), + Dial: true, + BootNodes: ctx.GlobalString(BootnodesFlag.Name), + GasPrice: common.String2Big(ctx.GlobalString(GasPriceFlag.Name)), + GpoMinGasPrice: common.String2Big(ctx.GlobalString(GpoMinGasPriceFlag.Name)), + GpoMaxGasPrice: common.String2Big(ctx.GlobalString(GpoMaxGasPriceFlag.Name)), + GpoFullBlockRatio: ctx.GlobalInt(GpoFullBlockRatioFlag.Name), + GpobaseStepDown: ctx.GlobalInt(GpobaseStepDownFlag.Name), + GpobaseStepUp: ctx.GlobalInt(GpobaseStepUpFlag.Name), + GpobaseCorrectionFactor: ctx.GlobalInt(GpobaseCorrectionFactorFlag.Name), + SolcPath: ctx.GlobalString(SolcPathFlag.Name), + AutoDAG: ctx.GlobalBool(AutoDAGFlag.Name) || ctx.GlobalBool(MiningEnabledFlag.Name), } } diff --git a/core/block_processor.go b/core/block_processor.go index 54378b2b9..0ed30ca21 100644 --- a/core/block_processor.go +++ b/core/block_processor.go @@ -260,9 +260,31 @@ func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs st putTx(sm.extraDb, tx, block, uint64(i)) } + receiptsRlp := receipts.RlpEncode() + /*if len(receipts) > 0 { + glog.V(logger.Info).Infof("Saving %v receipts, rlp len is %v\n", len(receipts), len(receiptsRlp)) + }*/ + sm.extraDb.Put(append(receiptsPre, block.Hash().Bytes()...), receiptsRlp) + return state.Logs(), nil } +func (self *BlockProcessor) GetBlockReceipts(bhash common.Hash) (receipts types.Receipts, err error) { + var rdata []byte + rdata, err = self.extraDb.Get(append(receiptsPre, bhash[:]...)) + + if err == nil { + err = rlp.DecodeBytes(rdata, &receipts) + } else { + glog.V(logger.Detail).Infof("GetBlockReceipts error %v\n", err) + } + /*if len(receipts) > 0 { + glog.V(logger.Info).Infof("GBR len %v\n", len(receipts)) + }*/ + return + +} + // See YP section 4.3.4. "Block Header Validity" // Validates a block. Returns an error if the block is invalid. func (sm *BlockProcessor) ValidateHeader(block, parent *types.Header, checkPow bool) error { diff --git a/eth/backend.go b/eth/backend.go index d2ec0cc62..c24b4e877 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -93,6 +93,13 @@ type Config struct { AccountManager *accounts.Manager SolcPath string + GpoMinGasPrice *big.Int + GpoMaxGasPrice *big.Int + GpoFullBlockRatio int + GpobaseStepDown int + GpobaseStepUp int + GpobaseCorrectionFactor int + // NewDB is used to create databases. // If nil, the default is to create leveldb databases on disk. NewDB func(path string) (common.Database, error) @@ -197,6 +204,13 @@ type Ethereum struct { SolcPath string solc *compiler.Solidity + GpoMinGasPrice *big.Int + GpoMaxGasPrice *big.Int + GpoFullBlockRatio int + GpobaseStepDown int + GpobaseStepUp int + GpobaseCorrectionFactor int + net *p2p.Server eventMux *event.TypeMux miner *miner.Miner @@ -266,22 +280,28 @@ func New(config *Config) (*Ethereum, error) { glog.V(logger.Info).Infof("Blockchain DB Version: %d", config.BlockChainVersion) eth := &Ethereum{ - shutdownChan: make(chan bool), - databasesClosed: make(chan bool), - blockDb: blockDb, - stateDb: stateDb, - extraDb: extraDb, - eventMux: &event.TypeMux{}, - accountManager: config.AccountManager, - DataDir: config.DataDir, - etherbase: common.HexToAddress(config.Etherbase), - clientVersion: config.Name, // TODO should separate from Name - ethVersionId: config.ProtocolVersion, - netVersionId: config.NetworkId, - NatSpec: config.NatSpec, - MinerThreads: config.MinerThreads, - SolcPath: config.SolcPath, - AutoDAG: config.AutoDAG, + shutdownChan: make(chan bool), + databasesClosed: make(chan bool), + blockDb: blockDb, + stateDb: stateDb, + extraDb: extraDb, + eventMux: &event.TypeMux{}, + accountManager: config.AccountManager, + DataDir: config.DataDir, + etherbase: common.HexToAddress(config.Etherbase), + clientVersion: config.Name, // TODO should separate from Name + ethVersionId: config.ProtocolVersion, + netVersionId: config.NetworkId, + NatSpec: config.NatSpec, + MinerThreads: config.MinerThreads, + SolcPath: config.SolcPath, + AutoDAG: config.AutoDAG, + GpoMinGasPrice: config.GpoMinGasPrice, + GpoMaxGasPrice: config.GpoMaxGasPrice, + GpoFullBlockRatio: config.GpoFullBlockRatio, + GpobaseStepDown: config.GpobaseStepDown, + GpobaseStepUp: config.GpobaseStepUp, + GpobaseCorrectionFactor: config.GpobaseCorrectionFactor, } eth.pow = ethash.New() diff --git a/rpc/api.go b/rpc/api.go index e35395734..8b9a080f8 100644 --- a/rpc/api.go +++ b/rpc/api.go @@ -59,7 +59,7 @@ func (api *EthereumApi) GetRequestReply(req *RpcRequest, reply *interface{}) err case "eth_mining": *reply = api.xeth().IsMining() case "eth_gasPrice": - v := xeth.DefaultGasPrice() + v := api.xeth().DefaultGasPrice() *reply = newHexNum(v.Bytes()) case "eth_accounts": *reply = api.xeth().Accounts() diff --git a/xeth/xeth.go b/xeth/xeth.go index d2f992084..1044b02f6 100644 --- a/xeth/xeth.go +++ b/xeth/xeth.go @@ -39,8 +39,11 @@ const ( LogFilterTy ) -func DefaultGas() *big.Int { return new(big.Int).Set(defaultGas) } -func DefaultGasPrice() *big.Int { return new(big.Int).Set(defaultGasPrice) } +func DefaultGas() *big.Int { return new(big.Int).Set(defaultGas) } + +func (self *XEth) DefaultGasPrice() *big.Int { + return self.gpo.SuggestPrice() +} type XEth struct { backend *eth.Ethereum @@ -68,6 +71,8 @@ type XEth struct { // register map[string][]*interface{} // TODO improve return type agent *miner.RemoteAgent + + gpo *eth.GasPriceOracle } func NewTest(eth *eth.Ethereum, frontend Frontend) *XEth { @@ -80,22 +85,23 @@ func NewTest(eth *eth.Ethereum, frontend Frontend) *XEth { // New creates an XEth that uses the given frontend. // If a nil Frontend is provided, a default frontend which // confirms all transactions will be used. -func New(eth *eth.Ethereum, frontend Frontend) *XEth { +func New(ethereum *eth.Ethereum, frontend Frontend) *XEth { xeth := &XEth{ - backend: eth, + backend: ethereum, frontend: frontend, quit: make(chan struct{}), - filterManager: filter.NewFilterManager(eth.EventMux()), + filterManager: filter.NewFilterManager(ethereum.EventMux()), logQueue: make(map[int]*logQueue), blockQueue: make(map[int]*hashQueue), transactionQueue: make(map[int]*hashQueue), messages: make(map[int]*whisperFilter), agent: miner.NewRemoteAgent(), + gpo: eth.NewGasPriceOracle(ethereum), } - if eth.Whisper() != nil { - xeth.whisper = NewWhisper(eth.Whisper()) + if ethereum.Whisper() != nil { + xeth.whisper = NewWhisper(ethereum.Whisper()) } - eth.Miner().Register(xeth.agent) + ethereum.Miner().Register(xeth.agent) if frontend == nil { xeth.frontend = dummyFrontend{} } @@ -829,7 +835,7 @@ func (self *XEth) Call(fromStr, toStr, valueStr, gasStr, gasPriceStr, dataStr st } if msg.gasPrice.Cmp(big.NewInt(0)) == 0 { - msg.gasPrice = DefaultGasPrice() + msg.gasPrice = self.DefaultGasPrice() } block := self.CurrentBlock() @@ -898,7 +904,7 @@ func (self *XEth) Transact(fromStr, toStr, nonceStr, valueStr, gasStr, gasPriceS } if len(gasPriceStr) == 0 { - price = DefaultGasPrice() + price = self.DefaultGasPrice() } else { price = common.Big(gasPriceStr) } -- cgit v1.2.3 From 0930e190a7eec8f956e22ada638e5b97f7ba9cda Mon Sep 17 00:00:00 2001 From: zsfelfoldi Date: Tue, 26 May 2015 14:28:32 +0200 Subject: added missing source --- eth/gasprice.go | 174 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 174 insertions(+) create mode 100644 eth/gasprice.go diff --git a/eth/gasprice.go b/eth/gasprice.go new file mode 100644 index 000000000..f5b241e2c --- /dev/null +++ b/eth/gasprice.go @@ -0,0 +1,174 @@ +package eth + +import ( + "math/big" + "math/rand" + "sync" + + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" +) + +const gpoProcessPastBlocks = 100 + +type blockPriceInfo struct { + baseGasPrice *big.Int +} + +type GasPriceOracle struct { + eth *Ethereum + chain *core.ChainManager + pool *core.TxPool + events event.Subscription + blocks map[uint64]*blockPriceInfo + firstProcessed, lastProcessed uint64 + lastBaseMutex sync.Mutex + lastBase *big.Int +} + +func NewGasPriceOracle(eth *Ethereum) (self *GasPriceOracle) { + self = &GasPriceOracle{} + self.blocks = make(map[uint64]*blockPriceInfo) + self.eth = eth + self.chain = eth.chainManager + self.pool = eth.txPool + self.events = eth.EventMux().Subscribe( + core.ChainEvent{}, + core.ChainSplitEvent{}, + core.TxPreEvent{}, + core.TxPostEvent{}, + ) + self.processPastBlocks() + go self.listenLoop() + return +} + +func (self *GasPriceOracle) processPastBlocks() { + last := self.chain.CurrentBlock().NumberU64() + first := uint64(0) + if last > gpoProcessPastBlocks { + first = last - gpoProcessPastBlocks + } + self.firstProcessed = first + for i := first; i <= last; i++ { + self.processBlock(self.chain.GetBlockByNumber(i)) + } + +} + +func (self *GasPriceOracle) listenLoop() { + for { + ev, isopen := <-self.events.Chan() + if !isopen { + break + } + switch ev := ev.(type) { + case core.ChainEvent: + self.processBlock(ev.Block) + case core.ChainSplitEvent: + self.processBlock(ev.Block) + case core.TxPreEvent: + case core.TxPostEvent: + } + } + self.events.Unsubscribe() +} + +func (self *GasPriceOracle) processBlock(block *types.Block) { + i := block.NumberU64() + if i > self.lastProcessed { + self.lastProcessed = i + } + + lastBase := self.eth.GpoMinGasPrice + bpl := self.blocks[i-1] + if bpl != nil { + lastBase = bpl.baseGasPrice + } + if lastBase == nil { + return + } + + var corr int + lp := self.lowestPrice(block) + if lp == nil { + return + } + + if lastBase.Cmp(lp) < 0 { + corr = self.eth.GpobaseStepUp + } else { + corr = -self.eth.GpobaseStepDown + } + + crand := int64(corr * (900 + rand.Intn(201))) + newBase := new(big.Int).Mul(lastBase, big.NewInt(1000000+crand)) + newBase.Div(newBase, big.NewInt(1000000)) + + bpi := self.blocks[i] + if bpi == nil { + bpi = &blockPriceInfo{} + self.blocks[i] = bpi + } + bpi.baseGasPrice = newBase + self.lastBaseMutex.Lock() + self.lastBase = newBase + self.lastBaseMutex.Unlock() + + glog.V(logger.Detail).Infof("Processed block #%v, base price is %v\n", block.NumberU64(), newBase.Int64()) +} + +// returns the lowers possible price with which a tx was or could have been included +func (self *GasPriceOracle) lowestPrice(block *types.Block) *big.Int { + gasUsed := new(big.Int) + recepits, err := self.eth.BlockProcessor().GetBlockReceipts(block.Hash()) + if err != nil { + return self.eth.GpoMinGasPrice + } + + if len(recepits) > 0 { + gasUsed = recepits[len(recepits)-1].CumulativeGasUsed + } + + if new(big.Int).Mul(gasUsed, big.NewInt(100)).Cmp(new(big.Int).Mul(block.Header().GasLimit, + big.NewInt(int64(self.eth.GpoFullBlockRatio)))) < 0 { + // block is not full, could have posted a tx with MinGasPrice + return self.eth.GpoMinGasPrice + } + + if len(block.Transactions()) < 1 { + return self.eth.GpoMinGasPrice + } + + // block is full, find smallest gasPrice + minPrice := block.Transactions()[0].GasPrice() + for i := 1; i < len(block.Transactions()); i++ { + price := block.Transactions()[i].GasPrice() + if price.Cmp(minPrice) < 0 { + minPrice = price + } + } + return minPrice +} + +func (self *GasPriceOracle) SuggestPrice() *big.Int { + self.lastBaseMutex.Lock() + base := self.lastBase + self.lastBaseMutex.Unlock() + + baseCorr := new(big.Int).Mul(base, big.NewInt(int64(100+self.eth.GpobaseCorrectionFactor))) + baseCorr.Div(baseCorr, big.NewInt(100)) + + if baseCorr.Cmp(self.eth.GpoMinGasPrice) < 0 { + return self.eth.GpoMinGasPrice + } + + if baseCorr.Cmp(self.eth.GpoMaxGasPrice) > 0 { + return self.eth.GpoMaxGasPrice + } + + return baseCorr +} -- cgit v1.2.3 From 2e8016c80d450a7d1126b481f1262b7cd9dec24d Mon Sep 17 00:00:00 2001 From: zsfelfoldi Date: Tue, 26 May 2015 14:39:13 +0200 Subject: fixed initial base price bug --- eth/gasprice.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/eth/gasprice.go b/eth/gasprice.go index f5b241e2c..6f46559a2 100644 --- a/eth/gasprice.go +++ b/eth/gasprice.go @@ -159,6 +159,10 @@ func (self *GasPriceOracle) SuggestPrice() *big.Int { base := self.lastBase self.lastBaseMutex.Unlock() + if base == nil { + base = self.eth.GpoMinGasPrice + } + baseCorr := new(big.Int).Mul(base, big.NewInt(int64(100+self.eth.GpobaseCorrectionFactor))) baseCorr.Div(baseCorr, big.NewInt(100)) -- cgit v1.2.3 From 6e212bdc6d1319c84c305c446bbf1ba9ddfdc66d Mon Sep 17 00:00:00 2001 From: zsfelfoldi Date: Tue, 26 May 2015 15:15:54 +0200 Subject: fallback for uninitialized GPO config values --- eth/gasprice.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/eth/gasprice.go b/eth/gasprice.go index 6f46559a2..12e76fc2c 100644 --- a/eth/gasprice.go +++ b/eth/gasprice.go @@ -162,6 +162,9 @@ func (self *GasPriceOracle) SuggestPrice() *big.Int { if base == nil { base = self.eth.GpoMinGasPrice } + if base == nil { + return big.NewInt(10000000000000) // apparently MinGasPrice is not initialized during some tests + } baseCorr := new(big.Int).Mul(base, big.NewInt(int64(100+self.eth.GpobaseCorrectionFactor))) baseCorr.Div(baseCorr, big.NewInt(100)) -- cgit v1.2.3 From a977cecbe49e9cf049785a437581a767b079570c Mon Sep 17 00:00:00 2001 From: zsfelfoldi Date: Wed, 27 May 2015 12:39:59 +0200 Subject: fixed gas price corr. factor --- eth/gasprice.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/gasprice.go b/eth/gasprice.go index 12e76fc2c..cd5293691 100644 --- a/eth/gasprice.go +++ b/eth/gasprice.go @@ -166,7 +166,7 @@ func (self *GasPriceOracle) SuggestPrice() *big.Int { return big.NewInt(10000000000000) // apparently MinGasPrice is not initialized during some tests } - baseCorr := new(big.Int).Mul(base, big.NewInt(int64(100+self.eth.GpobaseCorrectionFactor))) + baseCorr := new(big.Int).Mul(base, big.NewInt(int64(self.eth.GpobaseCorrectionFactor))) baseCorr.Div(baseCorr, big.NewInt(100)) if baseCorr.Cmp(self.eth.GpoMinGasPrice) < 0 { -- cgit v1.2.3 From 3c7b64ce20aefeae6728d09d96b34ef31bc405ac Mon Sep 17 00:00:00 2001 From: zsfelfoldi Date: Wed, 10 Jun 2015 15:57:38 +0200 Subject: removed duplicate function after rebase --- core/block_processor.go | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/core/block_processor.go b/core/block_processor.go index 0ed30ca21..c01b110be 100644 --- a/core/block_processor.go +++ b/core/block_processor.go @@ -269,22 +269,6 @@ func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs st return state.Logs(), nil } -func (self *BlockProcessor) GetBlockReceipts(bhash common.Hash) (receipts types.Receipts, err error) { - var rdata []byte - rdata, err = self.extraDb.Get(append(receiptsPre, bhash[:]...)) - - if err == nil { - err = rlp.DecodeBytes(rdata, &receipts) - } else { - glog.V(logger.Detail).Infof("GetBlockReceipts error %v\n", err) - } - /*if len(receipts) > 0 { - glog.V(logger.Info).Infof("GBR len %v\n", len(receipts)) - }*/ - return - -} - // See YP section 4.3.4. "Block Header Validity" // Validates a block. Returns an error if the block is invalid. func (sm *BlockProcessor) ValidateHeader(block, parent *types.Header, checkPow bool) error { @@ -426,6 +410,8 @@ func getBlockReceipts(db common.Database, bhash common.Hash) (receipts types.Rec if err == nil { err = rlp.DecodeBytes(rdata, &receipts) + } else { + glog.V(logger.Detail).Infof("getBlockReceipts error %v\n", err) } return } -- cgit v1.2.3 From 610adfd83f14652ba08d99fb098d1d62123abd38 Mon Sep 17 00:00:00 2001 From: zsfelfoldi Date: Fri, 12 Jun 2015 13:57:30 +0200 Subject: fixed xeth.gpo = nil bug --- xeth/xeth.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/xeth/xeth.go b/xeth/xeth.go index 1044b02f6..71753d6bd 100644 --- a/xeth/xeth.go +++ b/xeth/xeth.go @@ -42,6 +42,9 @@ const ( func DefaultGas() *big.Int { return new(big.Int).Set(defaultGas) } func (self *XEth) DefaultGasPrice() *big.Int { + if self.gpo == nil { + self.gpo = eth.NewGasPriceOracle(self.backend) + } return self.gpo.SuggestPrice() } @@ -96,7 +99,6 @@ func New(ethereum *eth.Ethereum, frontend Frontend) *XEth { transactionQueue: make(map[int]*hashQueue), messages: make(map[int]*whisperFilter), agent: miner.NewRemoteAgent(), - gpo: eth.NewGasPriceOracle(ethereum), } if ethereum.Whisper() != nil { xeth.whisper = NewWhisper(ethereum.Whisper()) @@ -233,6 +235,7 @@ func (self *XEth) WithState(statedb *state.StateDB) *XEth { xeth := &XEth{ backend: self.backend, frontend: self.frontend, + gpo: self.gpo, } xeth.state = NewState(xeth, statedb) -- cgit v1.2.3 From b42b70eb5fd55741175dd5503686843a9d7d043a Mon Sep 17 00:00:00 2001 From: zsfelfoldi Date: Fri, 12 Jun 2015 14:11:10 +0200 Subject: fixed rpc/api.GasPrice --- rpc/api/eth.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc/api/eth.go b/rpc/api/eth.go index a0b9dad86..943f19b90 100644 --- a/rpc/api/eth.go +++ b/rpc/api/eth.go @@ -139,7 +139,7 @@ func (self *ethApi) IsMining(req *shared.Request) (interface{}, error) { } func (self *ethApi) GasPrice(req *shared.Request) (interface{}, error) { - return newHexNum(xeth.DefaultGasPrice().Bytes()), nil + return newHexNum(self.xeth.DefaultGasPrice().Bytes()), nil } func (self *ethApi) GetStorage(req *shared.Request) (interface{}, error) { -- cgit v1.2.3 From e79cc42dfe36f6db61cebb37607f5bfe89e4cdcc Mon Sep 17 00:00:00 2001 From: obscuren Date: Mon, 15 Jun 2015 16:46:45 +0200 Subject: core: moved check for max queue to checkQueue Moved the queue to check to the checkQueue method so no undeeded loops need to be initiated or sorting needs to happen twice. --- core/chain_manager.go | 12 ++---------- core/transaction_pool.go | 26 ++++++++++---------------- 2 files changed, 12 insertions(+), 26 deletions(-) diff --git a/core/chain_manager.go b/core/chain_manager.go index e56d82cce..c3b7273c2 100644 --- a/core/chain_manager.go +++ b/core/chain_manager.go @@ -5,7 +5,6 @@ import ( "fmt" "io" "math/big" - "os" "runtime" "sync" "sync/atomic" @@ -235,15 +234,8 @@ func (bc *ChainManager) setLastState() { if block != nil { bc.currentBlock = block bc.lastBlockHash = block.Hash() - } else { // TODO CLEAN THIS UP TMP CODE - block = bc.GetBlockByNumber(400000) - if block == nil { - fmt.Println("Fatal. LastBlock not found. Report this issue") - os.Exit(1) - } - bc.currentBlock = block - bc.lastBlockHash = block.Hash() - bc.insert(block) + } else { + glog.Fatalf("Fatal. LastBlock not found. Please run removedb and resync") } } else { bc.Reset() diff --git a/core/transaction_pool.go b/core/transaction_pool.go index ce6fed1a9..e31f5c6b3 100644 --- a/core/transaction_pool.go +++ b/core/transaction_pool.go @@ -228,21 +228,6 @@ func (self *TxPool) queueTx(hash common.Hash, tx *types.Transaction) { self.queue[from] = make(map[common.Hash]*types.Transaction) } self.queue[from][hash] = tx - - if len(self.queue[from]) > maxQueued { - var ( - worstHash common.Hash - worstNonce uint64 - ) - for hash, tx := range self.queue[from] { - if tx.Nonce() > worstNonce { - worstNonce = tx.Nonce() - worstHash = hash - } - } - glog.V(logger.Debug).Infof("Queued tx limit exceeded for %x. Removed worst nonce tx: %x\n", common.PP(from[:]), common.PP(worstHash[:])) - delete(self.queue[from], worstHash) - } } // addTx will add a transaction to the pending (processable queue) list of transactions @@ -367,7 +352,16 @@ func (pool *TxPool) checkQueue() { // Find the next consecutive nonce range starting at the // current account nonce. sort.Sort(addq) - for _, e := range addq { + for i, e := range addq { + // start deleting the transactions from the queue if they exceed the limit + if i > maxQueued { + if glog.V(logger.Debug) { + glog.Infof("Queued tx limit exceeded for %s. Tx %s removed\n", common.PP(address[:]), common.PP(e.hash[:])) + } + delete(pool.queue[address], e.hash) + continue + } + if e.AccountNonce > guessedNonce { break } -- cgit v1.2.3 From 1e3f4877c0e9ebf58ffa06b0b119fdf3bab21658 Mon Sep 17 00:00:00 2001 From: zsfelfoldi Date: Mon, 15 Jun 2015 16:48:59 +0200 Subject: Changed miner and gpo min gas price to 1 szabo --- cmd/utils/flags.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 0f5e443e4..696dbd142 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -23,10 +23,10 @@ import ( "github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/xeth" "github.com/ethereum/go-ethereum/rpc/api" - "github.com/ethereum/go-ethereum/rpc/comms" "github.com/ethereum/go-ethereum/rpc/codec" + "github.com/ethereum/go-ethereum/rpc/comms" + "github.com/ethereum/go-ethereum/xeth" ) func init() { @@ -132,7 +132,7 @@ var ( GasPriceFlag = cli.StringFlag{ Name: "gasprice", Usage: "Sets the minimal gasprice when mining transactions", - Value: new(big.Int).Mul(big.NewInt(10), common.Szabo).String(), + Value: new(big.Int).Mul(big.NewInt(1), common.Szabo).String(), } UnlockedAccountFlag = cli.StringFlag{ @@ -279,12 +279,12 @@ var ( GpoMinGasPriceFlag = cli.StringFlag{ Name: "gpomin", Usage: "Minimum suggested gas price", - Value: new(big.Int).Mul(big.NewInt(10), common.Szabo).String(), + Value: new(big.Int).Mul(big.NewInt(1), common.Szabo).String(), } GpoMaxGasPriceFlag = cli.StringFlag{ Name: "gpomax", Usage: "Maximum suggested gas price", - Value: new(big.Int).Mul(big.NewInt(1000), common.Szabo).String(), + Value: new(big.Int).Mul(big.NewInt(100), common.Szabo).String(), } GpoFullBlockRatioFlag = cli.IntFlag{ Name: "gpofull", @@ -432,7 +432,7 @@ func IpcSocketPath(ctx *cli.Context) (ipcpath string) { if ctx.GlobalString(IPCPathFlag.Name) != common.DefaultIpcPath() { ipcpath = ctx.GlobalString(IPCPathFlag.Name) } else if ctx.GlobalString(DataDirFlag.Name) != "" && - ctx.GlobalString(DataDirFlag.Name) != common.DefaultDataDir() { + ctx.GlobalString(DataDirFlag.Name) != common.DefaultDataDir() { ipcpath = filepath.Join(ctx.GlobalString(DataDirFlag.Name), "geth.ipc") } } -- cgit v1.2.3 From 2628103f1df35ad6a130f2f41e73c7703bf61886 Mon Sep 17 00:00:00 2001 From: obscuren Date: Mon, 15 Jun 2015 17:21:08 +0200 Subject: rpc/api: fixed default gas-(price) issue. --- rpc/api/eth.go | 9 ++++++++- rpc/api/eth_args.go | 8 ++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/rpc/api/eth.go b/rpc/api/eth.go index a0b9dad86..d329dbf10 100644 --- a/rpc/api/eth.go +++ b/rpc/api/eth.go @@ -259,7 +259,14 @@ func (self *ethApi) SendTransaction(req *shared.Request) (interface{}, error) { nonce = args.Nonce.String() } - v, err := self.xeth.Transact(args.From, args.To, nonce, args.Value.String(), args.Gas.String(), args.GasPrice.String(), args.Data) + var gas, price string + if args.Gas != nil { + gas = args.Gas.String() + } + if args.GasPrice != nil { + price = args.GasPrice.String() + } + v, err := self.xeth.Transact(args.From, args.To, nonce, args.Value.String(), gas, price, args.Data) if err != nil { return nil, err } diff --git a/rpc/api/eth_args.go b/rpc/api/eth_args.go index ad9a35fa2..1c86bee51 100644 --- a/rpc/api/eth_args.go +++ b/rpc/api/eth_args.go @@ -333,9 +333,7 @@ func (args *NewTxArgs) UnmarshalJSON(b []byte) (err error) { args.Value = num num = nil - if ext.Gas == nil { - num = big.NewInt(0) - } else { + if ext.Gas != nil { if num, err = numString(ext.Gas); err != nil { return err } @@ -343,9 +341,7 @@ func (args *NewTxArgs) UnmarshalJSON(b []byte) (err error) { args.Gas = num num = nil - if ext.GasPrice == nil { - num = big.NewInt(0) - } else { + if ext.GasPrice != nil { if num, err = numString(ext.GasPrice); err != nil { return err } -- cgit v1.2.3 From 4673b04503742de9b1622557b44135d6a4934ad6 Mon Sep 17 00:00:00 2001 From: obscuren Date: Mon, 15 Jun 2015 19:14:14 +0200 Subject: cmd/geth: bump version number 0.9.30 --- cmd/geth/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 1739fbc6b..5ea670f20 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -44,7 +44,7 @@ import ( const ( ClientIdentifier = "Geth" - Version = "0.9.29" + Version = "0.9.30" ) var ( -- cgit v1.2.3