aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2016-10-29 01:05:01 +0800
committerFelix Lange <fjl@twurst.com>2016-10-29 01:05:01 +0800
commit289b30715d097edafd5562f66cb3567a70b2d330 (patch)
tree7eaaa6da97c84727469303b986e364606ece57ce /vendor/github.com
parent77703045765343c489ded2f43e3ed0f332c5f148 (diff)
downloadgo-tangerine-289b30715d097edafd5562f66cb3567a70b2d330.tar
go-tangerine-289b30715d097edafd5562f66cb3567a70b2d330.tar.gz
go-tangerine-289b30715d097edafd5562f66cb3567a70b2d330.tar.bz2
go-tangerine-289b30715d097edafd5562f66cb3567a70b2d330.tar.lz
go-tangerine-289b30715d097edafd5562f66cb3567a70b2d330.tar.xz
go-tangerine-289b30715d097edafd5562f66cb3567a70b2d330.tar.zst
go-tangerine-289b30715d097edafd5562f66cb3567a70b2d330.zip
Godeps, vendor: convert dependency management to trash (#3198)
This commit converts the dependency management from Godeps to the vendor folder, also switching the tool from godep to trash. Since the upstream tool lacks a few features proposed via a few PRs, until those PRs are merged in (if), use github.com/karalabe/trash. You can update dependencies via trash --update. All dependencies have been updated to their latest version. Parts of the build system are reworked to drop old notions of Godeps and invocation of the go vet command so that it doesn't run against the vendor folder, as that will just blow up during vetting. The conversion drops OpenCL (and hence GPU mining support) from ethash and our codebase. The short reasoning is that there's noone to maintain and having opencl libs in our deps messes up builds as go install ./... tries to build them, failing with unsatisfied link errors for the C OpenCL deps. golang.org/x/net/context is not vendored in. We expect it to be fetched by the user (i.e. using go get). To keep ci.go builds reproducible the package is "vendored" in build/_vendor.
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/cespare/cp/LICENSE.txt19
-rw-r--r--vendor/github.com/cespare/cp/README.md9
-rw-r--r--vendor/github.com/cespare/cp/cp.go58
-rw-r--r--vendor/github.com/davecgh/go-spew/.gitignore22
-rw-r--r--vendor/github.com/davecgh/go-spew/.travis.yml14
-rw-r--r--vendor/github.com/davecgh/go-spew/LICENSE15
-rw-r--r--vendor/github.com/davecgh/go-spew/README.md194
-rw-r--r--vendor/github.com/davecgh/go-spew/cov_report.sh22
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/bypass.go152
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/bypasssafe.go38
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/common.go341
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/config.go297
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/doc.go202
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/dump.go509
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/format.go419
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/spew.go148
-rw-r--r--vendor/github.com/davecgh/go-spew/test_coverage.txt61
-rw-r--r--vendor/github.com/ethereum/ethash/.gitignore12
-rw-r--r--vendor/github.com/ethereum/ethash/.travis.yml23
-rw-r--r--vendor/github.com/ethereum/ethash/CMakeLists.txt14
-rw-r--r--vendor/github.com/ethereum/ethash/MANIFEST.in17
-rw-r--r--vendor/github.com/ethereum/ethash/Makefile6
-rw-r--r--vendor/github.com/ethereum/ethash/README.md22
-rw-r--r--vendor/github.com/ethereum/ethash/Vagrantfile7
-rw-r--r--vendor/github.com/ethereum/ethash/appveyor.yml43
-rw-r--r--vendor/github.com/ethereum/ethash/ethash.go441
-rw-r--r--vendor/github.com/ethereum/ethash/ethashc.go51
-rwxr-xr-xvendor/github.com/ethereum/ethash/setup.py47
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/CMakeLists.txt44
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/compiler.h33
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/data_sizes.h812
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/endian.h78
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/ethash.h147
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/fnv.h43
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/internal.c507
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/internal.h179
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/io.c119
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/io.h202
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/io_posix.c111
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/io_win32.c100
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/mmap.h47
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/mmap_win32.c84
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/sha3.c151
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/sha3.h31
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/sha3_cryptopp.cpp37
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/sha3_cryptopp.h18
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/util.h47
-rw-r--r--vendor/github.com/ethereum/ethash/src/libethash/util_win32.c38
-rw-r--r--vendor/github.com/fatih/color/.travis.yml5
-rw-r--r--vendor/github.com/fatih/color/LICENSE.md20
-rw-r--r--vendor/github.com/fatih/color/README.md154
-rw-r--r--vendor/github.com/fatih/color/color.go423
-rw-r--r--vendor/github.com/fatih/color/doc.go114
-rw-r--r--vendor/github.com/gizak/termui/.gitignore26
-rw-r--r--vendor/github.com/gizak/termui/.travis.yml6
-rw-r--r--vendor/github.com/gizak/termui/LICENSE22
-rw-r--r--vendor/github.com/gizak/termui/README.md150
-rw-r--r--vendor/github.com/gizak/termui/barchart.go149
-rw-r--r--vendor/github.com/gizak/termui/block.go240
-rw-r--r--vendor/github.com/gizak/termui/block_common.go20
-rw-r--r--vendor/github.com/gizak/termui/block_windows.go14
-rw-r--r--vendor/github.com/gizak/termui/buffer.go106
-rw-r--r--vendor/github.com/gizak/termui/canvas.go72
-rwxr-xr-xvendor/github.com/gizak/termui/config26
-rw-r--r--vendor/github.com/gizak/termui/doc.go29
-rw-r--r--vendor/github.com/gizak/termui/events.go323
-rw-r--r--vendor/github.com/gizak/termui/gauge.go109
-rw-r--r--vendor/github.com/gizak/termui/glide.lock14
-rw-r--r--vendor/github.com/gizak/termui/glide.yaml8
-rw-r--r--vendor/github.com/gizak/termui/grid.go279
-rw-r--r--vendor/github.com/gizak/termui/helper.go222
-rw-r--r--vendor/github.com/gizak/termui/linechart.go331
-rw-r--r--vendor/github.com/gizak/termui/linechart_others.go11
-rw-r--r--vendor/github.com/gizak/termui/linechart_windows.go11
-rw-r--r--vendor/github.com/gizak/termui/list.go89
-rw-r--r--vendor/github.com/gizak/termui/mbarchart.go242
-rw-r--r--vendor/github.com/gizak/termui/mkdocs.yml28
-rw-r--r--vendor/github.com/gizak/termui/par.go73
-rw-r--r--vendor/github.com/gizak/termui/pos.go78
-rw-r--r--vendor/github.com/gizak/termui/render.go135
-rw-r--r--vendor/github.com/gizak/termui/sparkline.go167
-rw-r--r--vendor/github.com/gizak/termui/textbuilder.go278
-rw-r--r--vendor/github.com/gizak/termui/theme.go140
-rw-r--r--vendor/github.com/gizak/termui/widget.go94
-rw-r--r--vendor/github.com/golang/snappy/.gitignore16
-rw-r--r--vendor/github.com/golang/snappy/AUTHORS15
-rw-r--r--vendor/github.com/golang/snappy/CONTRIBUTORS37
-rw-r--r--vendor/github.com/golang/snappy/LICENSE27
-rw-r--r--vendor/github.com/golang/snappy/README107
-rw-r--r--vendor/github.com/golang/snappy/decode.go237
-rw-r--r--vendor/github.com/golang/snappy/decode_amd64.go14
-rw-r--r--vendor/github.com/golang/snappy/decode_amd64.s490
-rw-r--r--vendor/github.com/golang/snappy/decode_other.go101
-rw-r--r--vendor/github.com/golang/snappy/encode.go285
-rw-r--r--vendor/github.com/golang/snappy/encode_amd64.go29
-rw-r--r--vendor/github.com/golang/snappy/encode_amd64.s730
-rw-r--r--vendor/github.com/golang/snappy/encode_other.go238
-rw-r--r--vendor/github.com/golang/snappy/snappy.go87
-rw-r--r--vendor/github.com/hashicorp/golang-lru/.gitignore23
-rw-r--r--vendor/github.com/hashicorp/golang-lru/2q.go212
-rw-r--r--vendor/github.com/hashicorp/golang-lru/LICENSE362
-rw-r--r--vendor/github.com/hashicorp/golang-lru/README.md25
-rw-r--r--vendor/github.com/hashicorp/golang-lru/arc.go257
-rw-r--r--vendor/github.com/hashicorp/golang-lru/lru.go114
-rw-r--r--vendor/github.com/hashicorp/golang-lru/simplelru/lru.go160
-rw-r--r--vendor/github.com/huin/goupnp/.gitignore1
-rw-r--r--vendor/github.com/huin/goupnp/LICENSE23
-rw-r--r--vendor/github.com/huin/goupnp/README.md44
-rw-r--r--vendor/github.com/huin/goupnp/dcps/internetgateway1/internetgateway1.go3717
-rw-r--r--vendor/github.com/huin/goupnp/dcps/internetgateway2/internetgateway2.go5378
-rw-r--r--vendor/github.com/huin/goupnp/device.go184
-rw-r--r--vendor/github.com/huin/goupnp/goupnp.go131
-rw-r--r--vendor/github.com/huin/goupnp/httpu/httpu.go132
-rw-r--r--vendor/github.com/huin/goupnp/httpu/serve.go108
-rw-r--r--vendor/github.com/huin/goupnp/scpd/scpd.go167
-rw-r--r--vendor/github.com/huin/goupnp/service_client.go88
-rw-r--r--vendor/github.com/huin/goupnp/soap/soap.go157
-rw-r--r--vendor/github.com/huin/goupnp/soap/types.go519
-rw-r--r--vendor/github.com/huin/goupnp/ssdp/registry.go312
-rw-r--r--vendor/github.com/huin/goupnp/ssdp/ssdp.go83
-rw-r--r--vendor/github.com/jackpal/go-nat-pmp/.travis.yml13
-rw-r--r--vendor/github.com/jackpal/go-nat-pmp/LICENSE13
-rw-r--r--vendor/github.com/jackpal/go-nat-pmp/README.md52
-rw-r--r--vendor/github.com/jackpal/go-nat-pmp/natpmp.go153
-rw-r--r--vendor/github.com/jackpal/go-nat-pmp/network.go89
-rw-r--r--vendor/github.com/jackpal/go-nat-pmp/recorder.go19
-rw-r--r--vendor/github.com/mattn/go-colorable/.travis.yml8
-rw-r--r--vendor/github.com/mattn/go-colorable/LICENSE21
-rw-r--r--vendor/github.com/mattn/go-colorable/README.md43
-rw-r--r--vendor/github.com/mattn/go-colorable/colorable_others.go24
-rw-r--r--vendor/github.com/mattn/go-colorable/colorable_windows.go810
-rw-r--r--vendor/github.com/mattn/go-colorable/noncolorable.go58
-rw-r--r--vendor/github.com/mattn/go-isatty/LICENSE9
-rw-r--r--vendor/github.com/mattn/go-isatty/README.md37
-rw-r--r--vendor/github.com/mattn/go-isatty/doc.go2
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_appengine.go9
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_bsd.go18
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_linux.go18
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_solaris.go16
-rw-r--r--vendor/github.com/mattn/go-isatty/isatty_windows.go19
-rw-r--r--vendor/github.com/mattn/go-runewidth/.travis.yml8
-rw-r--r--vendor/github.com/mattn/go-runewidth/LICENSE21
-rw-r--r--vendor/github.com/mattn/go-runewidth/README.mkd27
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth.go481
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth_js.go8
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth_posix.go77
-rw-r--r--vendor/github.com/mattn/go-runewidth/runewidth_windows.go25
-rw-r--r--vendor/github.com/mitchellh/go-wordwrap/LICENSE.md21
-rw-r--r--vendor/github.com/mitchellh/go-wordwrap/README.md39
-rw-r--r--vendor/github.com/mitchellh/go-wordwrap/wordwrap.go73
-rw-r--r--vendor/github.com/nsf/termbox-go/AUTHORS4
-rw-r--r--vendor/github.com/nsf/termbox-go/LICENSE19
-rw-r--r--vendor/github.com/nsf/termbox-go/README.md29
-rw-r--r--vendor/github.com/nsf/termbox-go/api.go458
-rw-r--r--vendor/github.com/nsf/termbox-go/api_common.go187
-rw-r--r--vendor/github.com/nsf/termbox-go/api_windows.go239
-rwxr-xr-xvendor/github.com/nsf/termbox-go/collect_terminfo.py110
-rw-r--r--vendor/github.com/nsf/termbox-go/syscalls.go39
-rw-r--r--vendor/github.com/nsf/termbox-go/syscalls_darwin.go41
-rw-r--r--vendor/github.com/nsf/termbox-go/syscalls_darwin_amd64.go40
-rw-r--r--vendor/github.com/nsf/termbox-go/syscalls_dragonfly.go39
-rw-r--r--vendor/github.com/nsf/termbox-go/syscalls_freebsd.go39
-rw-r--r--vendor/github.com/nsf/termbox-go/syscalls_linux.go33
-rw-r--r--vendor/github.com/nsf/termbox-go/syscalls_netbsd.go39
-rw-r--r--vendor/github.com/nsf/termbox-go/syscalls_openbsd.go39
-rw-r--r--vendor/github.com/nsf/termbox-go/syscalls_windows.go61
-rw-r--r--vendor/github.com/nsf/termbox-go/termbox.go514
-rw-r--r--vendor/github.com/nsf/termbox-go/termbox_common.go59
-rw-r--r--vendor/github.com/nsf/termbox-go/termbox_windows.go856
-rw-r--r--vendor/github.com/nsf/termbox-go/terminfo.go221
-rw-r--r--vendor/github.com/nsf/termbox-go/terminfo_builtin.go64
-rw-r--r--vendor/github.com/pborman/uuid/.travis.yml9
-rw-r--r--vendor/github.com/pborman/uuid/CONTRIBUTING.md10
-rw-r--r--vendor/github.com/pborman/uuid/CONTRIBUTORS1
-rw-r--r--vendor/github.com/pborman/uuid/LICENSE27
-rw-r--r--vendor/github.com/pborman/uuid/README.md13
-rw-r--r--vendor/github.com/pborman/uuid/dce.go84
-rw-r--r--vendor/github.com/pborman/uuid/doc.go8
-rw-r--r--vendor/github.com/pborman/uuid/hash.go53
-rw-r--r--vendor/github.com/pborman/uuid/json.go34
-rw-r--r--vendor/github.com/pborman/uuid/node.go117
-rw-r--r--vendor/github.com/pborman/uuid/sql.go66
-rw-r--r--vendor/github.com/pborman/uuid/time.go132
-rw-r--r--vendor/github.com/pborman/uuid/util.go43
-rw-r--r--vendor/github.com/pborman/uuid/uuid.go201
-rw-r--r--vendor/github.com/pborman/uuid/version1.go41
-rw-r--r--vendor/github.com/pborman/uuid/version4.go25
-rw-r--r--vendor/github.com/peterh/liner/COPYING21
-rw-r--r--vendor/github.com/peterh/liner/README.md100
-rw-r--r--vendor/github.com/peterh/liner/bsdinput.go41
-rw-r--r--vendor/github.com/peterh/liner/common.go237
-rw-r--r--vendor/github.com/peterh/liner/fallbackinput.go57
-rw-r--r--vendor/github.com/peterh/liner/input.go367
-rw-r--r--vendor/github.com/peterh/liner/input_darwin.go43
-rw-r--r--vendor/github.com/peterh/liner/input_linux.go28
-rw-r--r--vendor/github.com/peterh/liner/input_windows.go324
-rw-r--r--vendor/github.com/peterh/liner/line.go1033
-rw-r--r--vendor/github.com/peterh/liner/output.go79
-rw-r--r--vendor/github.com/peterh/liner/output_windows.go76
-rw-r--r--vendor/github.com/peterh/liner/signal.go12
-rw-r--r--vendor/github.com/peterh/liner/signal_legacy.go11
-rw-r--r--vendor/github.com/peterh/liner/unixmode.go37
-rw-r--r--vendor/github.com/peterh/liner/width.go79
-rw-r--r--vendor/github.com/rcrowley/go-metrics/.gitignore9
-rw-r--r--vendor/github.com/rcrowley/go-metrics/.travis.yml14
-rw-r--r--vendor/github.com/rcrowley/go-metrics/LICENSE29
-rw-r--r--vendor/github.com/rcrowley/go-metrics/README.md153
-rw-r--r--vendor/github.com/rcrowley/go-metrics/counter.go112
-rw-r--r--vendor/github.com/rcrowley/go-metrics/debug.go76
-rw-r--r--vendor/github.com/rcrowley/go-metrics/ewma.go118
-rw-r--r--vendor/github.com/rcrowley/go-metrics/exp/exp.go156
-rw-r--r--vendor/github.com/rcrowley/go-metrics/gauge.go120
-rw-r--r--vendor/github.com/rcrowley/go-metrics/gauge_float64.go127
-rw-r--r--vendor/github.com/rcrowley/go-metrics/graphite.go113
-rw-r--r--vendor/github.com/rcrowley/go-metrics/healthcheck.go61
-rw-r--r--vendor/github.com/rcrowley/go-metrics/histogram.go202
-rw-r--r--vendor/github.com/rcrowley/go-metrics/json.go87
-rw-r--r--vendor/github.com/rcrowley/go-metrics/log.go80
-rw-r--r--vendor/github.com/rcrowley/go-metrics/memory.md285
-rw-r--r--vendor/github.com/rcrowley/go-metrics/meter.go233
-rw-r--r--vendor/github.com/rcrowley/go-metrics/metrics.go13
-rw-r--r--vendor/github.com/rcrowley/go-metrics/opentsdb.go119
-rw-r--r--vendor/github.com/rcrowley/go-metrics/registry.go270
-rw-r--r--vendor/github.com/rcrowley/go-metrics/runtime.go212
-rw-r--r--vendor/github.com/rcrowley/go-metrics/runtime_cgo.go10
-rw-r--r--vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go9
-rw-r--r--vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go7
-rw-r--r--vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go9
-rw-r--r--vendor/github.com/rcrowley/go-metrics/sample.go609
-rw-r--r--vendor/github.com/rcrowley/go-metrics/syslog.go78
-rw-r--r--vendor/github.com/rcrowley/go-metrics/timer.go311
-rwxr-xr-xvendor/github.com/rcrowley/go-metrics/validate.sh10
-rw-r--r--vendor/github.com/rcrowley/go-metrics/writer.go100
-rw-r--r--vendor/github.com/rjeczalik/notify/.gitignore88
-rw-r--r--vendor/github.com/rjeczalik/notify/.travis.yml29
-rw-r--r--vendor/github.com/rjeczalik/notify/AUTHORS10
-rw-r--r--vendor/github.com/rjeczalik/notify/LICENSE21
-rw-r--r--vendor/github.com/rjeczalik/notify/README.md21
-rw-r--r--vendor/github.com/rjeczalik/notify/appveyor.yml23
-rw-r--r--vendor/github.com/rjeczalik/notify/debug.go11
-rw-r--r--vendor/github.com/rjeczalik/notify/debug_debug.go43
-rw-r--r--vendor/github.com/rjeczalik/notify/doc.go40
-rw-r--r--vendor/github.com/rjeczalik/notify/event.go143
-rw-r--r--vendor/github.com/rjeczalik/notify/event_fen.go46
-rw-r--r--vendor/github.com/rjeczalik/notify/event_fsevents.go71
-rw-r--r--vendor/github.com/rjeczalik/notify/event_inotify.go75
-rw-r--r--vendor/github.com/rjeczalik/notify/event_kqueue.go59
-rw-r--r--vendor/github.com/rjeczalik/notify/event_readdcw.go108
-rw-r--r--vendor/github.com/rjeczalik/notify/event_stub.go31
-rw-r--r--vendor/github.com/rjeczalik/notify/event_trigger.go22
-rw-r--r--vendor/github.com/rjeczalik/notify/node.go272
-rw-r--r--vendor/github.com/rjeczalik/notify/notify.go74
-rw-r--r--vendor/github.com/rjeczalik/notify/tree.go22
-rw-r--r--vendor/github.com/rjeczalik/notify/tree_nonrecursive.go292
-rw-r--r--vendor/github.com/rjeczalik/notify/tree_recursive.go354
-rw-r--r--vendor/github.com/rjeczalik/notify/util.go150
-rw-r--r--vendor/github.com/rjeczalik/notify/watcher.go85
-rw-r--r--vendor/github.com/rjeczalik/notify/watcher_fen.go170
-rw-r--r--vendor/github.com/rjeczalik/notify/watcher_fen_cgo.go141
-rw-r--r--vendor/github.com/rjeczalik/notify/watcher_fsevents.go319
-rw-r--r--vendor/github.com/rjeczalik/notify/watcher_fsevents_cgo.go190
-rw-r--r--vendor/github.com/rjeczalik/notify/watcher_inotify.go396
-rw-r--r--vendor/github.com/rjeczalik/notify/watcher_kqueue.go192
-rw-r--r--vendor/github.com/rjeczalik/notify/watcher_readdcw.go574
-rw-r--r--vendor/github.com/rjeczalik/notify/watcher_stub.go23
-rw-r--r--vendor/github.com/rjeczalik/notify/watcher_trigger.go438
-rw-r--r--vendor/github.com/rjeczalik/notify/watchpoint.go103
-rw-r--r--vendor/github.com/rjeczalik/notify/watchpoint_other.go23
-rw-r--r--vendor/github.com/rjeczalik/notify/watchpoint_readdcw.go38
-rw-r--r--vendor/github.com/robertkrimen/otto/.gitignore5
-rw-r--r--vendor/github.com/robertkrimen/otto/DESIGN.markdown1
-rw-r--r--vendor/github.com/robertkrimen/otto/LICENSE7
-rw-r--r--vendor/github.com/robertkrimen/otto/Makefile63
-rw-r--r--vendor/github.com/robertkrimen/otto/README.markdown871
-rw-r--r--vendor/github.com/robertkrimen/otto/ast/README.markdown1068
-rw-r--r--vendor/github.com/robertkrimen/otto/ast/comments.go278
-rw-r--r--vendor/github.com/robertkrimen/otto/ast/node.go515
-rw-r--r--vendor/github.com/robertkrimen/otto/builtin.go353
-rw-r--r--vendor/github.com/robertkrimen/otto/builtin_array.go681
-rw-r--r--vendor/github.com/robertkrimen/otto/builtin_boolean.go28
-rw-r--r--vendor/github.com/robertkrimen/otto/builtin_date.go615
-rw-r--r--vendor/github.com/robertkrimen/otto/builtin_error.go126
-rw-r--r--vendor/github.com/robertkrimen/otto/builtin_function.go129
-rw-r--r--vendor/github.com/robertkrimen/otto/builtin_json.go299
-rw-r--r--vendor/github.com/robertkrimen/otto/builtin_math.go151
-rw-r--r--vendor/github.com/robertkrimen/otto/builtin_number.go93
-rw-r--r--vendor/github.com/robertkrimen/otto/builtin_object.go289
-rw-r--r--vendor/github.com/robertkrimen/otto/builtin_regexp.go65
-rw-r--r--vendor/github.com/robertkrimen/otto/builtin_string.go495
-rw-r--r--vendor/github.com/robertkrimen/otto/clone.go173
-rw-r--r--vendor/github.com/robertkrimen/otto/cmpl.go24
-rw-r--r--vendor/github.com/robertkrimen/otto/cmpl_evaluate.go96
-rw-r--r--vendor/github.com/robertkrimen/otto/cmpl_evaluate_expression.go460
-rw-r--r--vendor/github.com/robertkrimen/otto/cmpl_evaluate_statement.go424
-rw-r--r--vendor/github.com/robertkrimen/otto/cmpl_parse.go656
-rw-r--r--vendor/github.com/robertkrimen/otto/console.go51
-rw-r--r--vendor/github.com/robertkrimen/otto/dbg.go9
-rw-r--r--vendor/github.com/robertkrimen/otto/dbg/dbg.go387
-rw-r--r--vendor/github.com/robertkrimen/otto/error.go252
-rw-r--r--vendor/github.com/robertkrimen/otto/evaluate.go318
-rw-r--r--vendor/github.com/robertkrimen/otto/file/README.markdown110
-rw-r--r--vendor/github.com/robertkrimen/otto/file/file.go164
-rw-r--r--vendor/github.com/robertkrimen/otto/global.go221
-rw-r--r--vendor/github.com/robertkrimen/otto/inline.go6649
-rwxr-xr-xvendor/github.com/robertkrimen/otto/inline.pl1086
-rw-r--r--vendor/github.com/robertkrimen/otto/object.go156
-rw-r--r--vendor/github.com/robertkrimen/otto/object_class.go493
-rw-r--r--vendor/github.com/robertkrimen/otto/otto.go770
-rw-r--r--vendor/github.com/robertkrimen/otto/otto_.go178
-rw-r--r--vendor/github.com/robertkrimen/otto/parser/Makefile4
-rw-r--r--vendor/github.com/robertkrimen/otto/parser/README.markdown190
-rw-r--r--vendor/github.com/robertkrimen/otto/parser/dbg.go9
-rw-r--r--vendor/github.com/robertkrimen/otto/parser/error.go175
-rw-r--r--vendor/github.com/robertkrimen/otto/parser/expression.go1005
-rw-r--r--vendor/github.com/robertkrimen/otto/parser/lexer.go866
-rw-r--r--vendor/github.com/robertkrimen/otto/parser/parser.go344
-rw-r--r--vendor/github.com/robertkrimen/otto/parser/regexp.go358
-rw-r--r--vendor/github.com/robertkrimen/otto/parser/scope.go44
-rw-r--r--vendor/github.com/robertkrimen/otto/parser/statement.go938
-rw-r--r--vendor/github.com/robertkrimen/otto/property.go220
-rw-r--r--vendor/github.com/robertkrimen/otto/registry/README.markdown51
-rw-r--r--vendor/github.com/robertkrimen/otto/registry/registry.go47
-rw-r--r--vendor/github.com/robertkrimen/otto/result.go30
-rw-r--r--vendor/github.com/robertkrimen/otto/runtime.go711
-rw-r--r--vendor/github.com/robertkrimen/otto/scope.go35
-rw-r--r--vendor/github.com/robertkrimen/otto/script.go119
-rw-r--r--vendor/github.com/robertkrimen/otto/stash.go296
-rw-r--r--vendor/github.com/robertkrimen/otto/token/Makefile2
-rw-r--r--vendor/github.com/robertkrimen/otto/token/README.markdown171
-rw-r--r--vendor/github.com/robertkrimen/otto/token/token.go116
-rw-r--r--vendor/github.com/robertkrimen/otto/token/token_const.go349
-rwxr-xr-xvendor/github.com/robertkrimen/otto/token/tokenfmt222
-rw-r--r--vendor/github.com/robertkrimen/otto/type_arguments.go106
-rw-r--r--vendor/github.com/robertkrimen/otto/type_array.go109
-rw-r--r--vendor/github.com/robertkrimen/otto/type_boolean.go13
-rw-r--r--vendor/github.com/robertkrimen/otto/type_date.go299
-rw-r--r--vendor/github.com/robertkrimen/otto/type_error.go24
-rw-r--r--vendor/github.com/robertkrimen/otto/type_function.go292
-rw-r--r--vendor/github.com/robertkrimen/otto/type_go_array.go134
-rw-r--r--vendor/github.com/robertkrimen/otto/type_go_map.go87
-rw-r--r--vendor/github.com/robertkrimen/otto/type_go_slice.go126
-rw-r--r--vendor/github.com/robertkrimen/otto/type_go_struct.go146
-rw-r--r--vendor/github.com/robertkrimen/otto/type_number.go5
-rw-r--r--vendor/github.com/robertkrimen/otto/type_reference.go103
-rw-r--r--vendor/github.com/robertkrimen/otto/type_regexp.go146
-rw-r--r--vendor/github.com/robertkrimen/otto/type_string.go112
-rw-r--r--vendor/github.com/robertkrimen/otto/value.go1033
-rw-r--r--vendor/github.com/robertkrimen/otto/value_boolean.go40
-rw-r--r--vendor/github.com/robertkrimen/otto/value_number.go324
-rw-r--r--vendor/github.com/robertkrimen/otto/value_primitive.go23
-rw-r--r--vendor/github.com/robertkrimen/otto/value_string.go103
-rw-r--r--vendor/github.com/rs/cors/.travis.yml4
-rw-r--r--vendor/github.com/rs/cors/LICENSE19
-rw-r--r--vendor/github.com/rs/cors/README.md99
-rw-r--r--vendor/github.com/rs/cors/cors.go412
-rw-r--r--vendor/github.com/rs/cors/utils.go70
-rw-r--r--vendor/github.com/rs/xhandler/.travis.yml7
-rw-r--r--vendor/github.com/rs/xhandler/LICENSE19
-rw-r--r--vendor/github.com/rs/xhandler/README.md134
-rw-r--r--vendor/github.com/rs/xhandler/chain.go121
-rw-r--r--vendor/github.com/rs/xhandler/middleware.go59
-rw-r--r--vendor/github.com/rs/xhandler/xhandler.go42
-rw-r--r--vendor/github.com/syndtr/goleveldb/.travis.yml12
-rw-r--r--vendor/github.com/syndtr/goleveldb/LICENSE24
-rw-r--r--vendor/github.com/syndtr/goleveldb/README.md105
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/batch.go349
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go705
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go195
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/comparer.go67
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go51
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go57
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/db.go1091
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go826
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go360
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go183
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/db_state.go234
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go325
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/db_util.go102
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/db_write.go443
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/doc.go90
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/errors.go20
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go78
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/filter.go31
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go116
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go60
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go184
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go242
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go132
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go304
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go524
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/key.go143
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go475
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go684
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/options.go107
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/session.go208
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go302
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/session_record.go323
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/session_util.go258
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go583
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go34
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go65
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go81
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go86
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go78
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go218
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go179
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/table.go529
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go1134
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/table/table.go177
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go375
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/util.go98
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go293
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go239
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go30
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go48
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/util/range.go32
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/util/util.go73
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/version.go524
418 files changed, 85636 insertions, 0 deletions
diff --git a/vendor/github.com/cespare/cp/LICENSE.txt b/vendor/github.com/cespare/cp/LICENSE.txt
new file mode 100644
index 000000000..70da676c9
--- /dev/null
+++ b/vendor/github.com/cespare/cp/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2015 Caleb Spare
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/cespare/cp/README.md b/vendor/github.com/cespare/cp/README.md
new file mode 100644
index 000000000..577701fbe
--- /dev/null
+++ b/vendor/github.com/cespare/cp/README.md
@@ -0,0 +1,9 @@
+# cp
+
+[![GoDoc](https://godoc.org/github.com/cespare/cp?status.svg)](https://godoc.org/github.com/cespare/cp)
+
+cp is a small Go package for copying files and directories.
+
+The API may change because I want to add some options in the future (for merging with existing dirs).
+
+It does not currently handle Windows specifically (I think it may require some special treatment).
diff --git a/vendor/github.com/cespare/cp/cp.go b/vendor/github.com/cespare/cp/cp.go
new file mode 100644
index 000000000..d71dbb4ba
--- /dev/null
+++ b/vendor/github.com/cespare/cp/cp.go
@@ -0,0 +1,58 @@
+// Package cp offers simple file and directory copying for Go.
+package cp
+
+import (
+ "errors"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+var errCopyFileWithDir = errors.New("dir argument to CopyFile")
+
+// CopyFile copies the file with path src to dst. The new file must not exist.
+// It is created with the same permissions as src.
+func CopyFile(dst, src string) error {
+ rf, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer rf.Close()
+ rstat, err := rf.Stat()
+ if err != nil {
+ return err
+ }
+ if rstat.IsDir() {
+ return errCopyFileWithDir
+ }
+
+ wf, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_EXCL, rstat.Mode())
+ if err != nil {
+ return err
+ }
+ if _, err := io.Copy(wf, rf); err != nil {
+ wf.Close()
+ return err
+ }
+ return wf.Close()
+}
+
+// CopyAll copies the file or (recursively) the directory at src to dst.
+// Permissions are preserved. dst must not already exist.
+func CopyAll(dst, src string) error {
+ return filepath.Walk(src, makeWalkFn(dst, src))
+}
+
+func makeWalkFn(dst, src string) filepath.WalkFunc {
+ return func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ dstPath := filepath.Join(dst, strings.TrimPrefix(path, src))
+ if info.IsDir() {
+ return os.Mkdir(dstPath, info.Mode())
+ }
+ return CopyFile(dstPath, path)
+ }
+}
diff --git a/vendor/github.com/davecgh/go-spew/.gitignore b/vendor/github.com/davecgh/go-spew/.gitignore
new file mode 100644
index 000000000..00268614f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/.gitignore
@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/vendor/github.com/davecgh/go-spew/.travis.yml b/vendor/github.com/davecgh/go-spew/.travis.yml
new file mode 100644
index 000000000..984e0736e
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+go:
+ - 1.5.4
+ - 1.6.3
+ - 1.7
+install:
+ - go get -v golang.org/x/tools/cmd/cover
+script:
+ - go test -v -tags=safe ./spew
+ - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov
+after_success:
+ - go get -v github.com/mattn/goveralls
+ - export PATH=$PATH:$HOME/gopath/bin
+ - goveralls -coverprofile=profile.cov -service=travis-ci
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 000000000..bb6733231
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2013 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md
new file mode 100644
index 000000000..556170ae6
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/README.md
@@ -0,0 +1,194 @@
+go-spew
+=======
+
+[![Build Status](https://travis-ci.org/davecgh/go-spew.png?branch=master)]
+(https://travis-ci.org/davecgh/go-spew) [![Coverage Status]
+(https://coveralls.io/repos/davecgh/go-spew/badge.png?branch=master)]
+(https://coveralls.io/r/davecgh/go-spew?branch=master)
+
+Go-spew implements a deep pretty printer for Go data structures to aid in
+debugging. A comprehensive suite of tests with 100% test coverage is provided
+to ensure proper functionality. See `test_coverage.txt` for the gocov coverage
+report. Go-spew is licensed under the liberal ISC license, so it may be used in
+open source or commercial projects.
+
+If you're interested in reading about how this package came to life and some
+of the challenges involved in providing a deep pretty printer, there is a blog
+post about it
+[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/).
+
+## Documentation
+
+[![GoDoc](https://godoc.org/github.com/davecgh/go-spew/spew?status.png)]
+(http://godoc.org/github.com/davecgh/go-spew/spew)
+
+Full `go doc` style documentation for the project can be viewed online without
+installing this package by using the excellent GoDoc site here:
+http://godoc.org/github.com/davecgh/go-spew/spew
+
+You can also view the documentation locally once the package is installed with
+the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to
+http://localhost:6060/pkg/github.com/davecgh/go-spew/spew
+
+## Installation
+
+```bash
+$ go get -u github.com/davecgh/go-spew/spew
+```
+
+## Quick Start
+
+Add this import line to the file you're working in:
+
+```Go
+import "github.com/davecgh/go-spew/spew"
+```
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+
+```Go
+spew.Dump(myVar1, myVar2, ...)
+spew.Fdump(someWriter, myVar1, myVar2, ...)
+str := spew.Sdump(myVar1, myVar2, ...)
+```
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most
+compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types
+and pointer addresses):
+
+```Go
+spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+```
+
+## Debugging a Web Application Example
+
+Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production.
+
+```Go
+package main
+
+import (
+ "fmt"
+ "html"
+ "net/http"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/html")
+ fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:])
+ fmt.Fprintf(w, "<!--\n" + html.EscapeString(spew.Sdump(w)) + "\n-->")
+}
+
+func main() {
+ http.HandleFunc("/", handler)
+ http.ListenAndServe(":8080", nil)
+}
+```
+
+## Sample Dump Output
+
+```
+(main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr) <nil>
+ }),
+ ExportedField: (map[interface {}]interface {}) {
+ (string) "one": (bool) true
+ }
+}
+([]uint8) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+}
+```
+
+## Sample Formatter Output
+
+Double pointer to a uint8:
+```
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+```
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+```
+ %v: <*>{1 <*><shown>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+```
+
+## Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available via the
+spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+```
+* Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+* MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+* DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+* DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables. This option
+ relies on access to the unsafe package, so it will not have any effect when
+ running in environments without access to the unsafe package such as Google
+ App Engine or with the "safe" build tag specified.
+ Pointer method invocation is enabled by default.
+
+* ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+* SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are supported,
+ with other types sorted according to the reflect.Value.String() output
+ which guarantees display stability. Natural map order is used by
+ default.
+
+* SpewKeys
+ SpewKeys specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only considered
+ if SortKeys is true.
+
+```
+
+## Unsafe Package Dependency
+
+This package relies on the unsafe package to perform some of the more advanced
+features, however it also supports a "limited" mode which allows it to work in
+environments where the unsafe package is not available. By default, it will
+operate in this mode on Google App Engine and when compiled with GopherJS. The
+"safe" build tag may also be specified to force the package to build without
+using the unsafe package.
+
+## License
+
+Go-spew is licensed under the liberal ISC License.
diff --git a/vendor/github.com/davecgh/go-spew/cov_report.sh b/vendor/github.com/davecgh/go-spew/cov_report.sh
new file mode 100644
index 000000000..9579497e4
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/cov_report.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+# This script uses gocov to generate a test coverage report.
+# The gocov tool my be obtained with the following command:
+# go get github.com/axw/gocov/gocov
+#
+# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
+
+# Check for gocov.
+if ! type gocov >/dev/null 2>&1; then
+ echo >&2 "This script requires the gocov tool."
+ echo >&2 "You may obtain it with the following command:"
+ echo >&2 "go get github.com/axw/gocov/gocov"
+ exit 1
+fi
+
+# Only run the cgo tests if gcc is installed.
+if type gcc >/dev/null 2>&1; then
+ (cd spew && gocov test -tags testcgo | gocov report)
+else
+ (cd spew && gocov test | gocov report)
+fi
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 000000000..d42a0bc4a
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2015 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build !js,!appengine,!safe,!disableunsafe
+
+package spew
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = false
+
+ // ptrSize is the size of a pointer on the current arch.
+ ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+var (
+ // offsetPtr, offsetScalar, and offsetFlag are the offsets for the
+ // internal reflect.Value fields. These values are valid before golang
+ // commit ecccf07e7f9d which changed the format. The are also valid
+ // after commit 82f48826c6c7 which changed the format again to mirror
+ // the original format. Code in the init function updates these offsets
+ // as necessary.
+ offsetPtr = uintptr(ptrSize)
+ offsetScalar = uintptr(0)
+ offsetFlag = uintptr(ptrSize * 2)
+
+ // flagKindWidth and flagKindShift indicate various bits that the
+ // reflect package uses internally to track kind information.
+ //
+ // flagRO indicates whether or not the value field of a reflect.Value is
+ // read-only.
+ //
+ // flagIndir indicates whether the value field of a reflect.Value is
+ // the actual data or a pointer to the data.
+ //
+ // These values are valid before golang commit 90a7c3c86944 which
+ // changed their positions. Code in the init function updates these
+ // flags as necessary.
+ flagKindWidth = uintptr(5)
+ flagKindShift = uintptr(flagKindWidth - 1)
+ flagRO = uintptr(1 << 0)
+ flagIndir = uintptr(1 << 1)
+)
+
+func init() {
+ // Older versions of reflect.Value stored small integers directly in the
+ // ptr field (which is named val in the older versions). Versions
+ // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
+ // scalar for this purpose which unfortunately came before the flag
+ // field, so the offset of the flag field is different for those
+ // versions.
+ //
+ // This code constructs a new reflect.Value from a known small integer
+ // and checks if the size of the reflect.Value struct indicates it has
+ // the scalar field. When it does, the offsets are updated accordingly.
+ vv := reflect.ValueOf(0xf00)
+ if unsafe.Sizeof(vv) == (ptrSize * 4) {
+ offsetScalar = ptrSize * 2
+ offsetFlag = ptrSize * 3
+ }
+
+ // Commit 90a7c3c86944 changed the flag positions such that the low
+ // order bits are the kind. This code extracts the kind from the flags
+ // field and ensures it's the correct type. When it's not, the flag
+ // order has been changed to the newer format, so the flags are updated
+ // accordingly.
+ upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
+ upfv := *(*uintptr)(upf)
+ flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
+ if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
+ flagKindShift = 0
+ flagRO = 1 << 5
+ flagIndir = 1 << 6
+
+ // Commit adf9b30e5594 modified the flags to separate the
+ // flagRO flag into two bits which specifies whether or not the
+ // field is embedded. This causes flagIndir to move over a bit
+ // and means that flagRO is the combination of either of the
+ // original flagRO bit and the new bit.
+ //
+ // This code detects the change by extracting what used to be
+ // the indirect bit to ensure it's set. When it's not, the flag
+ // order has been changed to the newer format, so the flags are
+ // updated accordingly.
+ if upfv&flagIndir == 0 {
+ flagRO = 3 << 5
+ flagIndir = 1 << 7
+ }
+ }
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data. It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
+ indirects := 1
+ vt := v.Type()
+ upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
+ rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
+ if rvf&flagIndir != 0 {
+ vt = reflect.PtrTo(v.Type())
+ indirects++
+ } else if offsetScalar != 0 {
+ // The value is in the scalar field when it's not one of the
+ // reference types.
+ switch vt.Kind() {
+ case reflect.Uintptr:
+ case reflect.Chan:
+ case reflect.Func:
+ case reflect.Map:
+ case reflect.Ptr:
+ case reflect.UnsafePointer:
+ default:
+ upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
+ offsetScalar)
+ }
+ }
+
+ pv := reflect.NewAt(vt, upv)
+ rv = pv
+ for i := 0; i < indirects; i++ {
+ rv = rv.Elem()
+ }
+ return rv
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 000000000..e47a4e795
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe
+
+package spew
+
+import "reflect"
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data. However, doing this relies on access to
+// the unsafe package. This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 000000000..14f02dc15
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead. This mirrors
+// the technique used in the fmt package.
+var (
+ panicBytes = []byte("(PANIC=")
+ plusBytes = []byte("+")
+ iBytes = []byte("i")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ interfaceBytes = []byte("(interface {})")
+ commaNewlineBytes = []byte(",\n")
+ newlineBytes = []byte("\n")
+ openBraceBytes = []byte("{")
+ openBraceNewlineBytes = []byte("{\n")
+ closeBraceBytes = []byte("}")
+ asteriskBytes = []byte("*")
+ colonBytes = []byte(":")
+ colonSpaceBytes = []byte(": ")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ spaceBytes = []byte(" ")
+ pointerChainBytes = []byte("->")
+ nilAngleBytes = []byte("<nil>")
+ maxNewlineBytes = []byte("<max depth reached>\n")
+ maxShortBytes = []byte("<max>")
+ circularBytes = []byte("<already shown>")
+ circularShortBytes = []byte("<shown>")
+ invalidAngleBytes = []byte("<invalid>")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ percentBytes = []byte("%")
+ precisionBytes = []byte(".")
+ openAngleBytes = []byte("<")
+ closeAngleBytes = []byte(">")
+ openMapBytes = []byte("map[")
+ closeMapBytes = []byte("]")
+ lenEqualsBytes = []byte("len=")
+ capEqualsBytes = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+ if err := recover(); err != nil {
+ w.Write(panicBytes)
+ fmt.Fprintf(w, "%v", err)
+ w.Write(closeParenBytes)
+ }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+ // We need an interface to check if the type implements the error or
+ // Stringer interface. However, the reflect package won't give us an
+ // interface on certain things like unexported struct fields in order
+ // to enforce visibility rules. We use unsafe, when it's available,
+ // to bypass these restrictions since this package does not mutate the
+ // values.
+ if !v.CanInterface() {
+ if UnsafeDisabled {
+ return false
+ }
+
+ v = unsafeReflectValue(v)
+ }
+
+ // Choose whether or not to do error and Stringer interface lookups against
+ // the base type or a pointer to the base type depending on settings.
+ // Technically calling one of these methods with a pointer receiver can
+ // mutate the value, however, types which choose to satisify an error or
+ // Stringer interface with a pointer receiver should not be mutating their
+ // state inside these interface methods.
+ if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+ v = unsafeReflectValue(v)
+ }
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+
+ // Is it an error or Stringer?
+ switch iface := v.Interface().(type) {
+ case error:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.Error()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+
+ w.Write([]byte(iface.Error()))
+ return true
+
+ case fmt.Stringer:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.String()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+ w.Write([]byte(iface.String()))
+ return true
+ }
+ return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+ if val {
+ w.Write(trueBytes)
+ } else {
+ w.Write(falseBytes)
+ }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+ w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+ w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+ w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+ r := real(c)
+ w.Write(openParenBytes)
+ w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+ i := imag(c)
+ if i >= 0 {
+ w.Write(plusBytes)
+ }
+ w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+ w.Write(iBytes)
+ w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+ // Null pointer.
+ num := uint64(p)
+ if num == 0 {
+ w.Write(nilAngleBytes)
+ return
+ }
+
+ // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+ buf := make([]byte, 18)
+
+ // It's simpler to construct the hex string right to left.
+ base := uint64(16)
+ i := len(buf) - 1
+ for num >= base {
+ buf[i] = hexDigits[num%base]
+ num /= base
+ i--
+ }
+ buf[i] = hexDigits[num]
+
+ // Add '0x' prefix.
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+
+ // Strip unused leading bytes.
+ buf = buf[i:]
+ w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+ values []reflect.Value
+ strings []string // either nil or same len and values
+ cs *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted. It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+ vs := &valuesSorter{values: values, cs: cs}
+ if canSortSimply(vs.values[0].Kind()) {
+ return vs
+ }
+ if !cs.DisableMethods {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ b := bytes.Buffer{}
+ if !handleMethods(cs, &b, vs.values[i]) {
+ vs.strings = nil
+ break
+ }
+ vs.strings[i] = b.String()
+ }
+ }
+ if vs.strings == nil && cs.SpewKeys {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+ }
+ }
+ return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+ // This switch parallels valueSortLess, except for the default case.
+ switch kind {
+ case reflect.Bool:
+ return true
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return true
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Uintptr:
+ return true
+ case reflect.Array:
+ return true
+ }
+ return false
+}
+
+// Len returns the number of values in the slice. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+ return len(s.values)
+}
+
+// Swap swaps the values at the passed indices. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ if s.strings != nil {
+ s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+ }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value. It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return a.Int() < b.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return a.Uint() < b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Array:
+ // Compare the contents of both arrays.
+ l := a.Len()
+ for i := 0; i < l; i++ {
+ av := a.Index(i)
+ bv := b.Index(i)
+ if av.Interface() == bv.Interface() {
+ continue
+ }
+ return valueSortLess(av, bv)
+ }
+ }
+ return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j. It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+ if s.strings == nil {
+ return valueSortLess(s.values[i], s.values[j])
+ }
+ return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer. Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+ if len(values) == 0 {
+ return
+ }
+ sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 000000000..555282723
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values. There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality. Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation. You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings. See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+ // Indent specifies the string to use for each indentation level. The
+ // global config instance that all top-level functions use set this to a
+ // single space by default. If you would like more indentation, you might
+ // set this to a tab with "\t" or perhaps two spaces with " ".
+ Indent string
+
+ // MaxDepth controls the maximum number of levels to descend into nested
+ // data structures. The default, 0, means there is no limit.
+ //
+ // NOTE: Circular data structures are properly detected, so it is not
+ // necessary to set this value unless you specifically want to limit deeply
+ // nested data structures.
+ MaxDepth int
+
+ // DisableMethods specifies whether or not error and Stringer interfaces are
+ // invoked for types that implement them.
+ DisableMethods bool
+
+ // DisablePointerMethods specifies whether or not to check for and invoke
+ // error and Stringer interfaces on types which only accept a pointer
+ // receiver when the current type is not a pointer.
+ //
+ // NOTE: This might be an unsafe action since calling one of these methods
+ // with a pointer receiver could technically mutate the value, however,
+ // in practice, types which choose to satisify an error or Stringer
+ // interface with a pointer receiver should not be mutating their state
+ // inside these interface methods. As a result, this option relies on
+ // access to the unsafe package, so it will not have any effect when
+ // running in environments without access to the unsafe package such as
+ // Google App Engine or with the "safe" build tag specified.
+ DisablePointerMethods bool
+
+ // ContinueOnMethod specifies whether or not recursion should continue once
+ // a custom error or Stringer interface is invoked. The default, false,
+ // means it will print the results of invoking the custom error or Stringer
+ // interface and return immediately instead of continuing to recurse into
+ // the internals of the data type.
+ //
+ // NOTE: This flag does not have any effect if method invocation is disabled
+ // via the DisableMethods or DisablePointerMethods options.
+ ContinueOnMethod bool
+
+ // SortKeys specifies map keys should be sorted before being printed. Use
+ // this to have a more deterministic, diffable output. Note that only
+ // native types (bool, int, uint, floats, uintptr and string) and types
+ // that support the error or Stringer interfaces (if methods are
+ // enabled) are supported, with other types sorted according to the
+ // reflect.Value.String() output which guarantees display stability.
+ SortKeys bool
+
+ // SpewKeys specifies that, as a last resort attempt, map keys should
+ // be spewed to strings and sorted by those strings. This is only
+ // considered if SortKeys is true.
+ SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the formatted string as a value that satisfies error. See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+ return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+ fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+ fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(c, &buf, a...)
+ return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = newFormatter(c, arg)
+ }
+ return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// Indent: " "
+// MaxDepth: 0
+// DisableMethods: false
+// DisablePointerMethods: false
+// ContinueOnMethod: false
+// SortKeys: false
+func NewDefaultConfig() *ConfigState {
+ return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 000000000..5be0c4060
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output (only when using
+ Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+ * Dump style which prints with newlines, customizable indentation,
+ and additional debug information such as types and all pointer addresses
+ used to indirect to the final value
+ * A custom Formatter interface that integrates cleanly with the standard fmt
+ package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+ similar to the default %v while providing the additional functionality
+ outlined above and passing unsupported format verbs such as %x and %q
+ along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew. See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+ spew.Dump(myVar1, myVar2, ...)
+ spew.Fdump(someWriter, myVar1, myVar2, ...)
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+The following configuration options are available:
+ * Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+ * MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+ * DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+ * DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables.
+ Pointer method invocation is enabled by default.
+
+ * ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+ * SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are
+ supported with other types sorted according to the
+ reflect.Value.String() output which guarantees display
+ stability. Natural map order is used by default.
+
+ * SpewKeys
+ Specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only
+ considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+ spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer. For example, to dump to standard error:
+
+ spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+ (main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr) <nil>
+ }),
+ ExportedField: (map[interface {}]interface {}) (len=1) {
+ (string) (len=3) "one": (bool) true
+ }
+ }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+ ([]uint8) (len=32 cap=32) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+ }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
+functions have syntax you are most likely already familiar with:
+
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Println(myVar, myVar2)
+ spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+ %v: <*>{1 <*><shown>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output. Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 000000000..a0ff95e27
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ // uint8Type is a reflect.Type representing a uint8. It is used to
+ // convert cgo types to uint8 slices for hexdumping.
+ uint8Type = reflect.TypeOf(uint8(0))
+
+ // cCharRE is a regular expression that matches a cgo char.
+ // It is used to detect character arrays to hexdump them.
+ cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
+
+ // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+ // char. It is used to detect unsigned character arrays to hexdump
+ // them.
+ cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
+
+ // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+ // It is used to detect uint8_t arrays to hexdump them.
+ cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+ w io.Writer
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ ignoreNextIndent bool
+ cs *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+ if d.ignoreNextIndent {
+ d.ignoreNextIndent = false
+ return
+ }
+ d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range d.pointers {
+ if depth >= d.depth {
+ delete(d.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by dereferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ d.pointers[addr] = d.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type information.
+ d.w.Write(openParenBytes)
+ d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+ d.w.Write([]byte(ve.Type().String()))
+ d.w.Write(closeParenBytes)
+
+ // Display pointer information.
+ if len(pointerChain) > 0 {
+ d.w.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ d.w.Write(pointerChainBytes)
+ }
+ printHexPtr(d.w, addr)
+ }
+ d.w.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ d.w.Write(openParenBytes)
+ switch {
+ case nilFound == true:
+ d.w.Write(nilAngleBytes)
+
+ case cycleFound == true:
+ d.w.Write(circularBytes)
+
+ default:
+ d.ignoreNextType = true
+ d.dump(ve)
+ }
+ d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+ // Determine whether this type should be hex dumped or not. Also,
+ // for types which should be hexdumped, try to use the underlying data
+ // first, then fall back to trying to convert them to a uint8 slice.
+ var buf []uint8
+ doConvert := false
+ doHexDump := false
+ numEntries := v.Len()
+ if numEntries > 0 {
+ vt := v.Index(0).Type()
+ vts := vt.String()
+ switch {
+ // C types that need to be converted.
+ case cCharRE.MatchString(vts):
+ fallthrough
+ case cUnsignedCharRE.MatchString(vts):
+ fallthrough
+ case cUint8tCharRE.MatchString(vts):
+ doConvert = true
+
+ // Try to use existing uint8 slices and fall back to converting
+ // and copying if that fails.
+ case vt.Kind() == reflect.Uint8:
+ // We need an addressable interface to convert the type
+ // to a byte slice. However, the reflect package won't
+ // give us an interface on certain things like
+ // unexported struct fields in order to enforce
+ // visibility rules. We use unsafe, when available, to
+ // bypass these restrictions since this package does not
+ // mutate the values.
+ vs := v
+ if !vs.CanInterface() || !vs.CanAddr() {
+ vs = unsafeReflectValue(vs)
+ }
+ if !UnsafeDisabled {
+ vs = vs.Slice(0, numEntries)
+
+ // Use the existing uint8 slice if it can be
+ // type asserted.
+ iface := vs.Interface()
+ if slice, ok := iface.([]uint8); ok {
+ buf = slice
+ doHexDump = true
+ break
+ }
+ }
+
+ // The underlying data needs to be converted if it can't
+ // be type asserted to a uint8 slice.
+ doConvert = true
+ }
+
+ // Copy and convert the underlying type if needed.
+ if doConvert && vt.ConvertibleTo(uint8Type) {
+ // Convert and copy each element into a uint8 byte
+ // slice.
+ buf = make([]uint8, numEntries)
+ for i := 0; i < numEntries; i++ {
+ vv := v.Index(i)
+ buf[i] = uint8(vv.Convert(uint8Type).Uint())
+ }
+ doHexDump = true
+ }
+ }
+
+ // Hexdump the entire slice as needed.
+ if doHexDump {
+ indent := strings.Repeat(d.cs.Indent, d.depth)
+ str := indent + hex.Dump(buf)
+ str = strings.Replace(str, "\n", "\n"+indent, -1)
+ str = strings.TrimRight(str, d.cs.Indent)
+ d.w.Write([]byte(str))
+ return
+ }
+
+ // Recursively call dump for each item.
+ for i := 0; i < numEntries; i++ {
+ d.dump(d.unpackValue(v.Index(i)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+}
+
+// dump is the main workhorse for dumping a value. It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately. It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ d.w.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ d.indent()
+ d.dumpPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !d.ignoreNextType {
+ d.indent()
+ d.w.Write(openParenBytes)
+ d.w.Write([]byte(v.Type().String()))
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+ d.ignoreNextType = false
+
+ // Display length and capacity if the built-in len and cap functions
+ // work with the value's kind and the len/cap itself is non-zero.
+ valueLen, valueCap := 0, 0
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ valueLen, valueCap = v.Len(), v.Cap()
+ case reflect.Map, reflect.String:
+ valueLen = v.Len()
+ }
+ if valueLen != 0 || valueCap != 0 {
+ d.w.Write(openParenBytes)
+ if valueLen != 0 {
+ d.w.Write(lenEqualsBytes)
+ printInt(d.w, int64(valueLen), 10)
+ }
+ if valueCap != 0 {
+ if valueLen != 0 {
+ d.w.Write(spaceBytes)
+ }
+ d.w.Write(capEqualsBytes)
+ printInt(d.w, int64(valueCap), 10)
+ }
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+
+ // Call Stringer/error interfaces if they exist and the handle methods flag
+ // is enabled
+ if !d.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(d.cs, d.w, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(d.w, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(d.w, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(d.w, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(d.w, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(d.w, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(d.w, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(d.w, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ d.dumpSlice(v)
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.String:
+ d.w.Write([]byte(strconv.Quote(v.String())))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ numEntries := v.Len()
+ keys := v.MapKeys()
+ if d.cs.SortKeys {
+ sortValues(keys, d.cs)
+ }
+ for i, key := range keys {
+ d.dump(d.unpackValue(key))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.MapIndex(key)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Struct:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ vt := v.Type()
+ numFields := v.NumField()
+ for i := 0; i < numFields; i++ {
+ d.indent()
+ vtf := vt.Field(i)
+ d.w.Write([]byte(vtf.Name))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.Field(i)))
+ if i < (numFields - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(d.w, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(d.w, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it in case any new
+ // types are added.
+ default:
+ if v.CanInterface() {
+ fmt.Fprintf(d.w, "%v", v.Interface())
+ } else {
+ fmt.Fprintf(d.w, "%v", v.String())
+ }
+ }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+ for _, arg := range a {
+ if arg == nil {
+ w.Write(interfaceBytes)
+ w.Write(spaceBytes)
+ w.Write(nilAngleBytes)
+ w.Write(newlineBytes)
+ continue
+ }
+
+ d := dumpState{w: w, cs: cs}
+ d.pointers = make(map[uintptr]int)
+ d.dump(reflect.ValueOf(arg))
+ d.w.Write(newlineBytes)
+ }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+ fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(&Config, &buf, a...)
+ return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+ fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 000000000..ecf3b80e2
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation. The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+ value interface{}
+ fs fmt.State
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ cs *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type. Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ buf.WriteRune('v')
+
+ format = buf.String()
+ return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package. This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ if width, ok := f.fs.Width(); ok {
+ buf.WriteString(strconv.Itoa(width))
+ }
+
+ if precision, ok := f.fs.Precision(); ok {
+ buf.Write(precisionBytes)
+ buf.WriteString(strconv.Itoa(precision))
+ }
+
+ buf.WriteRune(verb)
+
+ format = buf.String()
+ return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface {
+ f.ignoreNextType = false
+ if !v.IsNil() {
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+ // Display nil if top level pointer is nil.
+ showTypes := f.fs.Flag('#')
+ if v.IsNil() && (!showTypes || f.ignoreNextType) {
+ f.fs.Write(nilAngleBytes)
+ return
+ }
+
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range f.pointers {
+ if depth >= f.depth {
+ delete(f.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to possibly show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by derferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ f.pointers[addr] = f.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type or indirection level depending on flags.
+ if showTypes && !f.ignoreNextType {
+ f.fs.Write(openParenBytes)
+ f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+ f.fs.Write([]byte(ve.Type().String()))
+ f.fs.Write(closeParenBytes)
+ } else {
+ if nilFound || cycleFound {
+ indirects += strings.Count(ve.Type().String(), "*")
+ }
+ f.fs.Write(openAngleBytes)
+ f.fs.Write([]byte(strings.Repeat("*", indirects)))
+ f.fs.Write(closeAngleBytes)
+ }
+
+ // Display pointer information depending on flags.
+ if f.fs.Flag('+') && (len(pointerChain) > 0) {
+ f.fs.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ f.fs.Write(pointerChainBytes)
+ }
+ printHexPtr(f.fs, addr)
+ }
+ f.fs.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ switch {
+ case nilFound == true:
+ f.fs.Write(nilAngleBytes)
+
+ case cycleFound == true:
+ f.fs.Write(circularShortBytes)
+
+ default:
+ f.ignoreNextType = true
+ f.format(ve)
+ }
+}
+
+// format is the main workhorse for providing the Formatter interface. It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately. It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ f.fs.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ f.formatPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !f.ignoreNextType && f.fs.Flag('#') {
+ f.fs.Write(openParenBytes)
+ f.fs.Write([]byte(v.Type().String()))
+ f.fs.Write(closeParenBytes)
+ }
+ f.ignoreNextType = false
+
+ // Call Stringer/error interfaces if they exist and the handle methods
+ // flag is enabled.
+ if !f.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(f.cs, f.fs, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(f.fs, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(f.fs, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(f.fs, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(f.fs, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(f.fs, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(f.fs, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(f.fs, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ f.fs.Write(openBracketBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ numEntries := v.Len()
+ for i := 0; i < numEntries; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.Index(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBracketBytes)
+
+ case reflect.String:
+ f.fs.Write([]byte(v.String()))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
+ f.fs.Write(openMapBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ keys := v.MapKeys()
+ if f.cs.SortKeys {
+ sortValues(keys, f.cs)
+ }
+ for i, key := range keys {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(key))
+ f.fs.Write(colonBytes)
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.MapIndex(key)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeMapBytes)
+
+ case reflect.Struct:
+ numFields := v.NumField()
+ f.fs.Write(openBraceBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ vt := v.Type()
+ for i := 0; i < numFields; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ vtf := vt.Field(i)
+ if f.fs.Flag('+') || f.fs.Flag('#') {
+ f.fs.Write([]byte(vtf.Name))
+ f.fs.Write(colonBytes)
+ }
+ f.format(f.unpackValue(v.Field(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(f.fs, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(f.fs, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it if any get added.
+ default:
+ format := f.buildDefaultFormat()
+ if v.CanInterface() {
+ fmt.Fprintf(f.fs, format, v.Interface())
+ } else {
+ fmt.Fprintf(f.fs, format, v.String())
+ }
+ }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+ f.fs = fs
+
+ // Use standard formatting for verbs that are not v.
+ if verb != 'v' {
+ format := f.constructOrigFormat(verb)
+ fmt.Fprintf(fs, format, f.value)
+ return
+ }
+
+ if f.value == nil {
+ if fs.Flag('#') {
+ fs.Write(interfaceBytes)
+ }
+ fs.Write(nilAngleBytes)
+ return
+ }
+
+ f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+ fs := &formatState{value: v, cs: cs}
+ fs.pointers = make(map[uintptr]int)
+ return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 000000000..d8233f542
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "fmt"
+ "io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the formatted string as a value that satisfies error. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+ return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = NewFormatter(arg)
+ }
+ return formatters
+}
diff --git a/vendor/github.com/davecgh/go-spew/test_coverage.txt b/vendor/github.com/davecgh/go-spew/test_coverage.txt
new file mode 100644
index 000000000..2cd087a2a
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/test_coverage.txt
@@ -0,0 +1,61 @@
+
+github.com/davecgh/go-spew/spew/dump.go dumpState.dump 100.00% (88/88)
+github.com/davecgh/go-spew/spew/format.go formatState.format 100.00% (82/82)
+github.com/davecgh/go-spew/spew/format.go formatState.formatPtr 100.00% (52/52)
+github.com/davecgh/go-spew/spew/dump.go dumpState.dumpPtr 100.00% (44/44)
+github.com/davecgh/go-spew/spew/dump.go dumpState.dumpSlice 100.00% (39/39)
+github.com/davecgh/go-spew/spew/common.go handleMethods 100.00% (30/30)
+github.com/davecgh/go-spew/spew/common.go printHexPtr 100.00% (18/18)
+github.com/davecgh/go-spew/spew/common.go unsafeReflectValue 100.00% (13/13)
+github.com/davecgh/go-spew/spew/format.go formatState.constructOrigFormat 100.00% (12/12)
+github.com/davecgh/go-spew/spew/dump.go fdump 100.00% (11/11)
+github.com/davecgh/go-spew/spew/format.go formatState.Format 100.00% (11/11)
+github.com/davecgh/go-spew/spew/common.go init 100.00% (10/10)
+github.com/davecgh/go-spew/spew/common.go printComplex 100.00% (9/9)
+github.com/davecgh/go-spew/spew/common.go valuesSorter.Less 100.00% (8/8)
+github.com/davecgh/go-spew/spew/format.go formatState.buildDefaultFormat 100.00% (7/7)
+github.com/davecgh/go-spew/spew/format.go formatState.unpackValue 100.00% (5/5)
+github.com/davecgh/go-spew/spew/dump.go dumpState.indent 100.00% (4/4)
+github.com/davecgh/go-spew/spew/common.go catchPanic 100.00% (4/4)
+github.com/davecgh/go-spew/spew/config.go ConfigState.convertArgs 100.00% (4/4)
+github.com/davecgh/go-spew/spew/spew.go convertArgs 100.00% (4/4)
+github.com/davecgh/go-spew/spew/format.go newFormatter 100.00% (3/3)
+github.com/davecgh/go-spew/spew/dump.go Sdump 100.00% (3/3)
+github.com/davecgh/go-spew/spew/common.go printBool 100.00% (3/3)
+github.com/davecgh/go-spew/spew/common.go sortValues 100.00% (3/3)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Sdump 100.00% (3/3)
+github.com/davecgh/go-spew/spew/dump.go dumpState.unpackValue 100.00% (3/3)
+github.com/davecgh/go-spew/spew/spew.go Printf 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go Println 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go Sprint 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go Sprintf 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go Sprintln 100.00% (1/1)
+github.com/davecgh/go-spew/spew/common.go printFloat 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go NewDefaultConfig 100.00% (1/1)
+github.com/davecgh/go-spew/spew/common.go printInt 100.00% (1/1)
+github.com/davecgh/go-spew/spew/common.go printUint 100.00% (1/1)
+github.com/davecgh/go-spew/spew/common.go valuesSorter.Len 100.00% (1/1)
+github.com/davecgh/go-spew/spew/common.go valuesSorter.Swap 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Errorf 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Fprint 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintf 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintln 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Print 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Printf 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Println 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Sprint 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintf 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintln 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.NewFormatter 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Fdump 100.00% (1/1)
+github.com/davecgh/go-spew/spew/config.go ConfigState.Dump 100.00% (1/1)
+github.com/davecgh/go-spew/spew/dump.go Fdump 100.00% (1/1)
+github.com/davecgh/go-spew/spew/dump.go Dump 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go Fprintln 100.00% (1/1)
+github.com/davecgh/go-spew/spew/format.go NewFormatter 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go Errorf 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go Fprint 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go Fprintf 100.00% (1/1)
+github.com/davecgh/go-spew/spew/spew.go Print 100.00% (1/1)
+github.com/davecgh/go-spew/spew ------------------------------- 100.00% (505/505)
+
diff --git a/vendor/github.com/ethereum/ethash/.gitignore b/vendor/github.com/ethereum/ethash/.gitignore
new file mode 100644
index 000000000..da074e6b3
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/.gitignore
@@ -0,0 +1,12 @@
+.idea/
+.DS_Store
+*/**/*un~
+.vagrant/
+*.pyc
+build/
+pyethash.egg-info/
+*.so
+*~
+*.swp
+MANIFEST
+dist/
diff --git a/vendor/github.com/ethereum/ethash/.travis.yml b/vendor/github.com/ethereum/ethash/.travis.yml
new file mode 100644
index 000000000..b83b02bf3
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/.travis.yml
@@ -0,0 +1,23 @@
+language: go
+go:
+ - 1.4.2
+
+before_install:
+ # for g++4.8 and C++11
+ - sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
+
+ # Set up go-ethereum
+ - sudo apt-get update -y -qq
+ - sudo apt-get install -yqq libgmp3-dev
+ - git clone --depth=10 https://github.com/ethereum/go-ethereum ${GOPATH}/src/github.com/ethereum/go-ethereum
+ # use canned dependencies from the go-ethereum repository
+ - export GOPATH=$GOPATH:$GOPATH/src/github.com/ethereum/go-ethereum/Godeps/_workspace/
+ - echo $GOPATH
+
+install:
+ # need to explicitly request version 1.48 since by default we get 1.46 which does not work with C++11
+ - sudo apt-get install -qq --yes --force-yes g++-4.8
+ - sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 50
+ - sudo apt-get install -qq wget cmake bash libboost-test1.48-dev libboost-system1.48-dev libboost-filesystem1.48-dev nodejs python-pip python-dev valgrind
+ - sudo pip install virtualenv -q
+script: "./test/test.sh"
diff --git a/vendor/github.com/ethereum/ethash/CMakeLists.txt b/vendor/github.com/ethereum/ethash/CMakeLists.txt
new file mode 100644
index 000000000..807c43e96
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/CMakeLists.txt
@@ -0,0 +1,14 @@
+cmake_minimum_required(VERSION 2.8.7)
+project(ethash)
+
+set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/modules/")
+set(ETHHASH_LIBS ethash)
+
+if (WIN32 AND WANT_CRYPTOPP)
+ add_subdirectory(cryptopp)
+endif()
+
+add_subdirectory(src/libethash)
+
+add_subdirectory(src/benchmark EXCLUDE_FROM_ALL)
+add_subdirectory(test/c)
diff --git a/vendor/github.com/ethereum/ethash/MANIFEST.in b/vendor/github.com/ethereum/ethash/MANIFEST.in
new file mode 100644
index 000000000..74e73c8be
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/MANIFEST.in
@@ -0,0 +1,17 @@
+include setup.py
+
+# C sources
+include src/libethash/internal.c
+include src/libethash/sha3.c
+include src/libethash/util.c
+include src/python/core.c
+
+# Headers
+include src/libethash/compiler.h
+include src/libethash/data_sizes.h
+include src/libethash/endian.h
+include src/libethash/ethash.h
+include src/libethash/fnv.h
+include src/libethash/internal.h
+include src/libethash/sha3.h
+include src/libethash/util.h
diff --git a/vendor/github.com/ethereum/ethash/Makefile b/vendor/github.com/ethereum/ethash/Makefile
new file mode 100644
index 000000000..741d3b56d
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/Makefile
@@ -0,0 +1,6 @@
+.PHONY: clean test
+test:
+ ./test/test.sh
+
+clean:
+ rm -rf *.so pyethash.egg-info/ build/ test/python/python-virtual-env/ test/c/build/ pyethash.so test/python/*.pyc dist/ MANIFEST
diff --git a/vendor/github.com/ethereum/ethash/README.md b/vendor/github.com/ethereum/ethash/README.md
new file mode 100644
index 000000000..2b2c3b544
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/README.md
@@ -0,0 +1,22 @@
+[![Build Status](https://travis-ci.org/ethereum/ethash.svg?branch=master)](https://travis-ci.org/ethereum/ethash)
+[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/debris/ethash?branch=master&svg=true)](https://ci.appveyor.com/project/debris/ethash-nr37r/branch/master)
+
+# Ethash
+
+For details on this project, please see the Ethereum wiki:
+https://github.com/ethereum/wiki/wiki/Ethash
+
+### Coding Style for C++ code:
+
+Follow the same exact style as in [cpp-ethereum](https://github.com/ethereum/cpp-ethereum/blob/develop/CodingStandards.txt)
+
+### Coding Style for C code:
+
+The main thing above all is code consistency.
+
+- Tabs for indentation. A tab is 4 spaces
+- Try to stick to the [K&R](http://en.wikipedia.org/wiki/Indent_style#K.26R_style),
+ especially for the C code.
+- Keep the line lengths reasonable. No hard limit on 80 characters but don't go further
+ than 110. Some people work with multiple buffers next to each other.
+ Make them like you :)
diff --git a/vendor/github.com/ethereum/ethash/Vagrantfile b/vendor/github.com/ethereum/ethash/Vagrantfile
new file mode 100644
index 000000000..03891653f
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/Vagrantfile
@@ -0,0 +1,7 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+Vagrant.configure(2) do |config|
+ config.vm.box = "Ubuntu 12.04"
+ config.vm.box_url = "https://cloud-images.ubuntu.com/vagrant/precise/current/precise-server-cloudimg-amd64-vagrant-disk1.box"
+end
diff --git a/vendor/github.com/ethereum/ethash/appveyor.yml b/vendor/github.com/ethereum/ethash/appveyor.yml
new file mode 100644
index 000000000..ac36a0626
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/appveyor.yml
@@ -0,0 +1,43 @@
+version: 1.0.0.{build}
+
+environment:
+ BOOST_ROOT: "c:/projects/ethash/deps/boost"
+
+branches:
+ only:
+ - master
+ - develop
+
+os: Windows Server 2012 R2
+
+clone_folder: c:\projects\ethash
+
+#platform: Any CPU
+#configuration: Debug
+
+install:
+ # by default, all script lines are interpreted as batch
+
+# scripts to run before build
+before_build:
+ - echo "Downloading boost..."
+ - mkdir c:\projects\ethash\deps
+ - cd c:\projects\ethash\deps
+ - curl -O https://build.ethdev.com/builds/windows-precompiled/boost.tar.gz
+ - echo "Unzipping boost..."
+ - 7z x boost.tar.gz > nul
+ - 7z x boost.tar > nul
+ - ls
+ - echo "Running cmake..."
+ - cd c:\projects\ethash
+ - cmake .
+
+build:
+ project: ALL_BUILD.vcxproj # path to Visual Studio solution or project
+
+after_build:
+ - echo "Running tests..."
+ - cd c:\projects\ethash\test\c\Debug
+ - Test.exe
+ - echo "Finished!"
+
diff --git a/vendor/github.com/ethereum/ethash/ethash.go b/vendor/github.com/ethereum/ethash/ethash.go
new file mode 100644
index 000000000..2a31aaf2d
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/ethash.go
@@ -0,0 +1,441 @@
+// Copyright 2015 The go-ethereum Authors
+// Copyright 2015 Lefteris Karapetsas <lefteris@refu.co>
+// Copyright 2015 Matthew Wampler-Doty <matthew.wampler.doty@gmail.com>
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package ethash
+
+/*
+#include "src/libethash/internal.h"
+
+int ethashGoCallback_cgo(unsigned);
+*/
+import "C"
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "math/big"
+ "math/rand"
+ "os"
+ "os/user"
+ "path/filepath"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "time"
+ "unsafe"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/logger"
+ "github.com/ethereum/go-ethereum/logger/glog"
+ "github.com/ethereum/go-ethereum/pow"
+)
+
+var (
+ maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
+ sharedLight = new(Light)
+)
+
+const (
+ epochLength uint64 = 30000
+ cacheSizeForTesting C.uint64_t = 1024
+ dagSizeForTesting C.uint64_t = 1024 * 32
+)
+
+var DefaultDir = defaultDir()
+
+func defaultDir() string {
+ home := os.Getenv("HOME")
+ if user, err := user.Current(); err == nil {
+ home = user.HomeDir
+ }
+ if runtime.GOOS == "windows" {
+ return filepath.Join(home, "AppData", "Ethash")
+ }
+ return filepath.Join(home, ".ethash")
+}
+
+// cache wraps an ethash_light_t with some metadata
+// and automatic memory management.
+type cache struct {
+ epoch uint64
+ used time.Time
+ test bool
+
+ gen sync.Once // ensures cache is only generated once.
+ ptr *C.struct_ethash_light
+}
+
+// generate creates the actual cache. it can be called from multiple
+// goroutines. the first call will generate the cache, subsequent
+// calls wait until it is generated.
+func (cache *cache) generate() {
+ cache.gen.Do(func() {
+ started := time.Now()
+ seedHash := makeSeedHash(cache.epoch)
+ glog.V(logger.Debug).Infof("Generating cache for epoch %d (%x)", cache.epoch, seedHash)
+ size := C.ethash_get_cachesize(C.uint64_t(cache.epoch * epochLength))
+ if cache.test {
+ size = cacheSizeForTesting
+ }
+ cache.ptr = C.ethash_light_new_internal(size, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
+ runtime.SetFinalizer(cache, freeCache)
+ glog.V(logger.Debug).Infof("Done generating cache for epoch %d, it took %v", cache.epoch, time.Since(started))
+ })
+}
+
+func freeCache(cache *cache) {
+ C.ethash_light_delete(cache.ptr)
+ cache.ptr = nil
+}
+
+func (cache *cache) compute(dagSize uint64, hash common.Hash, nonce uint64) (ok bool, mixDigest, result common.Hash) {
+ ret := C.ethash_light_compute_internal(cache.ptr, C.uint64_t(dagSize), hashToH256(hash), C.uint64_t(nonce))
+ // Make sure cache is live until after the C call.
+ // This is important because a GC might happen and execute
+ // the finalizer before the call completes.
+ _ = cache
+ return bool(ret.success), h256ToHash(ret.mix_hash), h256ToHash(ret.result)
+}
+
+// Light implements the Verify half of the proof of work. It uses a few small
+// in-memory caches to verify the nonces found by Full.
+type Light struct {
+ test bool // If set, use a smaller cache size
+
+ mu sync.Mutex // Protects the per-epoch map of verification caches
+ caches map[uint64]*cache // Currently maintained verification caches
+ future *cache // Pre-generated cache for the estimated future DAG
+
+ NumCaches int // Maximum number of caches to keep before eviction (only init, don't modify)
+}
+
+// Verify checks whether the block's nonce is valid.
+func (l *Light) Verify(block pow.Block) bool {
+ // TODO: do ethash_quick_verify before getCache in order
+ // to prevent DOS attacks.
+ blockNum := block.NumberU64()
+ if blockNum >= epochLength*2048 {
+ glog.V(logger.Debug).Infof("block number %d too high, limit is %d", epochLength*2048)
+ return false
+ }
+
+ difficulty := block.Difficulty()
+ /* Cannot happen if block header diff is validated prior to PoW, but can
+ happen if PoW is checked first due to parallel PoW checking.
+ We could check the minimum valid difficulty but for SoC we avoid (duplicating)
+ Ethereum protocol consensus rules here which are not in scope of Ethash
+ */
+ if difficulty.Cmp(common.Big0) == 0 {
+ glog.V(logger.Debug).Infof("invalid block difficulty")
+ return false
+ }
+
+ cache := l.getCache(blockNum)
+ dagSize := C.ethash_get_datasize(C.uint64_t(blockNum))
+ if l.test {
+ dagSize = dagSizeForTesting
+ }
+ // Recompute the hash using the cache.
+ ok, mixDigest, result := cache.compute(uint64(dagSize), block.HashNoNonce(), block.Nonce())
+ if !ok {
+ return false
+ }
+
+ // avoid mixdigest malleability as it's not included in a block's "hashNononce"
+ if block.MixDigest() != mixDigest {
+ return false
+ }
+
+ // The actual check.
+ target := new(big.Int).Div(maxUint256, difficulty)
+ return result.Big().Cmp(target) <= 0
+}
+
+func h256ToHash(in C.ethash_h256_t) common.Hash {
+ return *(*common.Hash)(unsafe.Pointer(&in.b))
+}
+
+func hashToH256(in common.Hash) C.ethash_h256_t {
+ return C.ethash_h256_t{b: *(*[32]C.uint8_t)(unsafe.Pointer(&in[0]))}
+}
+
+func (l *Light) getCache(blockNum uint64) *cache {
+ var c *cache
+ epoch := blockNum / epochLength
+
+ // If we have a PoW for that epoch, use that
+ l.mu.Lock()
+ if l.caches == nil {
+ l.caches = make(map[uint64]*cache)
+ }
+ if l.NumCaches == 0 {
+ l.NumCaches = 3
+ }
+ c = l.caches[epoch]
+ if c == nil {
+ // No cached DAG, evict the oldest if the cache limit was reached
+ if len(l.caches) >= l.NumCaches {
+ var evict *cache
+ for _, cache := range l.caches {
+ if evict == nil || evict.used.After(cache.used) {
+ evict = cache
+ }
+ }
+ glog.V(logger.Debug).Infof("Evicting DAG for epoch %d in favour of epoch %d", evict.epoch, epoch)
+ delete(l.caches, evict.epoch)
+ }
+ // If we have the new DAG pre-generated, use that, otherwise create a new one
+ if l.future != nil && l.future.epoch == epoch {
+ glog.V(logger.Debug).Infof("Using pre-generated DAG for epoch %d", epoch)
+ c, l.future = l.future, nil
+ } else {
+ glog.V(logger.Debug).Infof("No pre-generated DAG available, creating new for epoch %d", epoch)
+ c = &cache{epoch: epoch, test: l.test}
+ }
+ l.caches[epoch] = c
+
+ // If we just used up the future cache, or need a refresh, regenerate
+ if l.future == nil || l.future.epoch <= epoch {
+ glog.V(logger.Debug).Infof("Pre-generating DAG for epoch %d", epoch+1)
+ l.future = &cache{epoch: epoch + 1, test: l.test}
+ go l.future.generate()
+ }
+ }
+ c.used = time.Now()
+ l.mu.Unlock()
+
+ // Wait for generation finish and return the cache
+ c.generate()
+ return c
+}
+
+// dag wraps an ethash_full_t with some metadata
+// and automatic memory management.
+type dag struct {
+ epoch uint64
+ test bool
+ dir string
+
+ gen sync.Once // ensures DAG is only generated once.
+ ptr *C.struct_ethash_full
+}
+
+// generate creates the actual DAG. it can be called from multiple
+// goroutines. the first call will generate the DAG, subsequent
+// calls wait until it is generated.
+func (d *dag) generate() {
+ d.gen.Do(func() {
+ var (
+ started = time.Now()
+ seedHash = makeSeedHash(d.epoch)
+ blockNum = C.uint64_t(d.epoch * epochLength)
+ cacheSize = C.ethash_get_cachesize(blockNum)
+ dagSize = C.ethash_get_datasize(blockNum)
+ )
+ if d.test {
+ cacheSize = cacheSizeForTesting
+ dagSize = dagSizeForTesting
+ }
+ if d.dir == "" {
+ d.dir = DefaultDir
+ }
+ glog.V(logger.Info).Infof("Generating DAG for epoch %d (size %d) (%x)", d.epoch, dagSize, seedHash)
+ // Generate a temporary cache.
+ // TODO: this could share the cache with Light
+ cache := C.ethash_light_new_internal(cacheSize, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
+ defer C.ethash_light_delete(cache)
+ // Generate the actual DAG.
+ d.ptr = C.ethash_full_new_internal(
+ C.CString(d.dir),
+ hashToH256(seedHash),
+ dagSize,
+ cache,
+ (C.ethash_callback_t)(unsafe.Pointer(C.ethashGoCallback_cgo)),
+ )
+ if d.ptr == nil {
+ panic("ethash_full_new IO or memory error")
+ }
+ runtime.SetFinalizer(d, freeDAG)
+ glog.V(logger.Info).Infof("Done generating DAG for epoch %d, it took %v", d.epoch, time.Since(started))
+ })
+}
+
+func freeDAG(d *dag) {
+ C.ethash_full_delete(d.ptr)
+ d.ptr = nil
+}
+
+func (d *dag) Ptr() unsafe.Pointer {
+ return unsafe.Pointer(d.ptr.data)
+}
+
+//export ethashGoCallback
+func ethashGoCallback(percent C.unsigned) C.int {
+ glog.V(logger.Info).Infof("Generating DAG: %d%%", percent)
+ return 0
+}
+
+// MakeDAG pre-generates a DAG file for the given block number in the
+// given directory. If dir is the empty string, the default directory
+// is used.
+func MakeDAG(blockNum uint64, dir string) error {
+ d := &dag{epoch: blockNum / epochLength, dir: dir}
+ if blockNum >= epochLength*2048 {
+ return fmt.Errorf("block number too high, limit is %d", epochLength*2048)
+ }
+ d.generate()
+ if d.ptr == nil {
+ return errors.New("failed")
+ }
+ return nil
+}
+
+// Full implements the Search half of the proof of work.
+type Full struct {
+ Dir string // use this to specify a non-default DAG directory
+
+ test bool // if set use a smaller DAG size
+ turbo bool
+ hashRate int32
+
+ mu sync.Mutex // protects dag
+ current *dag // current full DAG
+}
+
+func (pow *Full) getDAG(blockNum uint64) (d *dag) {
+ epoch := blockNum / epochLength
+ pow.mu.Lock()
+ if pow.current != nil && pow.current.epoch == epoch {
+ d = pow.current
+ } else {
+ d = &dag{epoch: epoch, test: pow.test, dir: pow.Dir}
+ pow.current = d
+ }
+ pow.mu.Unlock()
+ // wait for it to finish generating.
+ d.generate()
+ return d
+}
+
+func (pow *Full) Search(block pow.Block, stop <-chan struct{}, index int) (nonce uint64, mixDigest []byte) {
+ dag := pow.getDAG(block.NumberU64())
+
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+ diff := block.Difficulty()
+
+ i := int64(0)
+ starti := i
+ start := time.Now().UnixNano()
+ previousHashrate := int32(0)
+
+ nonce = uint64(r.Int63())
+ hash := hashToH256(block.HashNoNonce())
+ target := new(big.Int).Div(maxUint256, diff)
+ for {
+ select {
+ case <-stop:
+ atomic.AddInt32(&pow.hashRate, -previousHashrate)
+ return 0, nil
+ default:
+ i++
+
+ // we don't have to update hash rate on every nonce, so update after
+ // first nonce check and then after 2^X nonces
+ if i == 2 || ((i % (1 << 16)) == 0) {
+ elapsed := time.Now().UnixNano() - start
+ hashes := (float64(1e9) / float64(elapsed)) * float64(i-starti)
+ hashrateDiff := int32(hashes) - previousHashrate
+ previousHashrate = int32(hashes)
+ atomic.AddInt32(&pow.hashRate, hashrateDiff)
+ }
+
+ ret := C.ethash_full_compute(dag.ptr, hash, C.uint64_t(nonce))
+ result := h256ToHash(ret.result).Big()
+
+ // TODO: disagrees with the spec https://github.com/ethereum/wiki/wiki/Ethash#mining
+ if ret.success && result.Cmp(target) <= 0 {
+ mixDigest = C.GoBytes(unsafe.Pointer(&ret.mix_hash), C.int(32))
+ atomic.AddInt32(&pow.hashRate, -previousHashrate)
+ return nonce, mixDigest
+ }
+ nonce += 1
+ }
+
+ if !pow.turbo {
+ time.Sleep(20 * time.Microsecond)
+ }
+ }
+}
+
+func (pow *Full) GetHashrate() int64 {
+ return int64(atomic.LoadInt32(&pow.hashRate))
+}
+
+func (pow *Full) Turbo(on bool) {
+ // TODO: this needs to use an atomic operation.
+ pow.turbo = on
+}
+
+// Ethash combines block verification with Light and
+// nonce searching with Full into a single proof of work.
+type Ethash struct {
+ *Light
+ *Full
+}
+
+// New creates an instance of the proof of work.
+func New() *Ethash {
+ return &Ethash{new(Light), &Full{turbo: true}}
+}
+
+// NewShared creates an instance of the proof of work., where a single instance
+// of the Light cache is shared across all instances created with NewShared.
+func NewShared() *Ethash {
+ return &Ethash{sharedLight, &Full{turbo: true}}
+}
+
+// NewForTesting creates a proof of work for use in unit tests.
+// It uses a smaller DAG and cache size to keep test times low.
+// DAG files are stored in a temporary directory.
+//
+// Nonces found by a testing instance are not verifiable with a
+// regular-size cache.
+func NewForTesting() (*Ethash, error) {
+ dir, err := ioutil.TempDir("", "ethash-test")
+ if err != nil {
+ return nil, err
+ }
+ return &Ethash{&Light{test: true}, &Full{Dir: dir, test: true}}, nil
+}
+
+func GetSeedHash(blockNum uint64) ([]byte, error) {
+ if blockNum >= epochLength*2048 {
+ return nil, fmt.Errorf("block number too high, limit is %d", epochLength*2048)
+ }
+ sh := makeSeedHash(blockNum / epochLength)
+ return sh[:], nil
+}
+
+func makeSeedHash(epoch uint64) (sh common.Hash) {
+ for ; epoch > 0; epoch-- {
+ sh = crypto.Sha3Hash(sh[:])
+ }
+ return sh
+}
diff --git a/vendor/github.com/ethereum/ethash/ethashc.go b/vendor/github.com/ethereum/ethash/ethashc.go
new file mode 100644
index 000000000..1d2ba1613
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/ethashc.go
@@ -0,0 +1,51 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package ethash
+
+/*
+ -mno-stack-arg-probe disables stack probing which avoids the function
+ __chkstk_ms being linked. this avoids a clash of this symbol as we also
+ separately link the secp256k1 lib which ends up defining this symbol
+
+ 1. https://gcc.gnu.org/onlinedocs/gccint/Stack-Checking.html
+ 2. https://groups.google.com/forum/#!msg/golang-dev/v1bziURSQ4k/88fXuJ24e-gJ
+ 3. https://groups.google.com/forum/#!topic/golang-nuts/VNP6Mwz_B6o
+
+*/
+
+/*
+#cgo CFLAGS: -std=gnu99 -Wall
+#cgo windows CFLAGS: -mno-stack-arg-probe
+#cgo LDFLAGS: -lm
+
+#include "src/libethash/internal.c"
+#include "src/libethash/sha3.c"
+#include "src/libethash/io.c"
+
+#ifdef _WIN32
+# include "src/libethash/io_win32.c"
+# include "src/libethash/mmap_win32.c"
+#else
+# include "src/libethash/io_posix.c"
+#endif
+
+// 'gateway function' for calling back into go.
+extern int ethashGoCallback(unsigned);
+int ethashGoCallback_cgo(unsigned percent) { return ethashGoCallback(percent); }
+
+*/
+import "C"
diff --git a/vendor/github.com/ethereum/ethash/setup.py b/vendor/github.com/ethereum/ethash/setup.py
new file mode 100755
index 000000000..18aa20f6d
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/setup.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+import os
+from distutils.core import setup, Extension
+sources = [
+ 'src/python/core.c',
+ 'src/libethash/io.c',
+ 'src/libethash/internal.c',
+ 'src/libethash/sha3.c']
+if os.name == 'nt':
+ sources += [
+ 'src/libethash/util_win32.c',
+ 'src/libethash/io_win32.c',
+ 'src/libethash/mmap_win32.c',
+ ]
+else:
+ sources += [
+ 'src/libethash/io_posix.c'
+ ]
+depends = [
+ 'src/libethash/ethash.h',
+ 'src/libethash/compiler.h',
+ 'src/libethash/data_sizes.h',
+ 'src/libethash/endian.h',
+ 'src/libethash/ethash.h',
+ 'src/libethash/io.h',
+ 'src/libethash/fnv.h',
+ 'src/libethash/internal.h',
+ 'src/libethash/sha3.h',
+ 'src/libethash/util.h',
+]
+pyethash = Extension('pyethash',
+ sources=sources,
+ depends=depends,
+ extra_compile_args=["-Isrc/", "-std=gnu99", "-Wall"])
+
+setup(
+ name='pyethash',
+ author="Matthew Wampler-Doty",
+ author_email="matthew.wampler.doty@gmail.com",
+ license='GPL',
+ version='0.1.23',
+ url='https://github.com/ethereum/ethash',
+ download_url='https://github.com/ethereum/ethash/tarball/v23',
+ description=('Python wrappers for ethash, the ethereum proof of work'
+ 'hashing function'),
+ ext_modules=[pyethash],
+)
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/CMakeLists.txt b/vendor/github.com/ethereum/ethash/src/libethash/CMakeLists.txt
new file mode 100644
index 000000000..a65621c3e
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/CMakeLists.txt
@@ -0,0 +1,44 @@
+set(LIBRARY ethash)
+
+if (CPPETHEREUM)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
+endif ()
+
+set(CMAKE_BUILD_TYPE Release)
+
+if (NOT MSVC)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99")
+endif()
+
+set(FILES util.h
+ io.c
+ internal.c
+ ethash.h
+ endian.h
+ compiler.h
+ fnv.h
+ data_sizes.h)
+
+if (MSVC)
+ list(APPEND FILES util_win32.c io_win32.c mmap_win32.c)
+else()
+ list(APPEND FILES io_posix.c)
+endif()
+
+if (NOT CRYPTOPP_FOUND)
+ find_package(CryptoPP 5.6.2)
+endif()
+
+if (CRYPTOPP_FOUND)
+ add_definitions(-DWITH_CRYPTOPP)
+ include_directories( ${CRYPTOPP_INCLUDE_DIRS} )
+ list(APPEND FILES sha3_cryptopp.cpp sha3_cryptopp.h)
+else()
+ list(APPEND FILES sha3.c sha3.h)
+endif()
+
+add_library(${LIBRARY} ${FILES})
+
+if (CRYPTOPP_FOUND)
+ TARGET_LINK_LIBRARIES(${LIBRARY} ${CRYPTOPP_LIBRARIES})
+endif()
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/compiler.h b/vendor/github.com/ethereum/ethash/src/libethash/compiler.h
new file mode 100644
index 000000000..9695871cd
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/compiler.h
@@ -0,0 +1,33 @@
+/*
+ This file is part of cpp-ethereum.
+
+ cpp-ethereum is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ cpp-ethereum is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
+*/
+/** @file compiler.h
+ * @date 2014
+ */
+#pragma once
+
+// Visual Studio doesn't support the inline keyword in C mode
+#if defined(_MSC_VER) && !defined(__cplusplus)
+#define inline __inline
+#endif
+
+// pretend restrict is a standard keyword
+#if defined(_MSC_VER)
+#define restrict __restrict
+#else
+#define restrict __restrict__
+#endif
+
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/data_sizes.h b/vendor/github.com/ethereum/ethash/src/libethash/data_sizes.h
new file mode 100644
index 000000000..83cc30bcb
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/data_sizes.h
@@ -0,0 +1,812 @@
+/*
+ This file is part of cpp-ethereum.
+
+ cpp-ethereum is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software FoundationUUU,either version 3 of the LicenseUUU,or
+ (at your option) any later version.
+
+ cpp-ethereum is distributed in the hope that it will be usefulU,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with cpp-ethereum. If notUUU,see <http://www.gnu.org/licenses/>.
+*/
+
+/** @file data_sizes.h
+* @author Matthew Wampler-Doty <negacthulhu@gmail.com>
+* @date 2015
+*/
+
+#pragma once
+
+#include <stdint.h>
+#include "compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+// 2048 Epochs (~20 years) worth of tabulated DAG sizes
+
+// Generated with the following Mathematica Code:
+
+// GetCacheSizes[n_] := Module[{
+// CacheSizeBytesInit = 2^24,
+// CacheGrowth = 2^17,
+// HashBytes = 64,
+// j = 0},
+// Reap[
+// While[j < n,
+// Module[{i =
+// Floor[(CacheSizeBytesInit + CacheGrowth * j) / HashBytes]},
+// While[! PrimeQ[i], i--];
+// Sow[i*HashBytes]; j++]]]][[2]][[1]]
+
+
+static const uint64_t dag_sizes[2048] = {
+ 1073739904U, 1082130304U, 1090514816U, 1098906752U, 1107293056U,
+ 1115684224U, 1124070016U, 1132461952U, 1140849536U, 1149232768U,
+ 1157627776U, 1166013824U, 1174404736U, 1182786944U, 1191180416U,
+ 1199568512U, 1207958912U, 1216345216U, 1224732032U, 1233124736U,
+ 1241513344U, 1249902464U, 1258290304U, 1266673792U, 1275067264U,
+ 1283453312U, 1291844992U, 1300234112U, 1308619904U, 1317010048U,
+ 1325397376U, 1333787776U, 1342176128U, 1350561664U, 1358954368U,
+ 1367339392U, 1375731584U, 1384118144U, 1392507008U, 1400897408U,
+ 1409284736U, 1417673344U, 1426062464U, 1434451072U, 1442839168U,
+ 1451229056U, 1459615616U, 1468006016U, 1476394112U, 1484782976U,
+ 1493171584U, 1501559168U, 1509948032U, 1518337664U, 1526726528U,
+ 1535114624U, 1543503488U, 1551892096U, 1560278656U, 1568669056U,
+ 1577056384U, 1585446272U, 1593831296U, 1602219392U, 1610610304U,
+ 1619000192U, 1627386752U, 1635773824U, 1644164224U, 1652555648U,
+ 1660943488U, 1669332608U, 1677721216U, 1686109312U, 1694497664U,
+ 1702886272U, 1711274624U, 1719661184U, 1728047744U, 1736434816U,
+ 1744829056U, 1753218944U, 1761606272U, 1769995904U, 1778382464U,
+ 1786772864U, 1795157888U, 1803550592U, 1811937664U, 1820327552U,
+ 1828711552U, 1837102976U, 1845488768U, 1853879936U, 1862269312U,
+ 1870656896U, 1879048064U, 1887431552U, 1895825024U, 1904212096U,
+ 1912601216U, 1920988544U, 1929379456U, 1937765504U, 1946156672U,
+ 1954543232U, 1962932096U, 1971321728U, 1979707264U, 1988093056U,
+ 1996487552U, 2004874624U, 2013262208U, 2021653888U, 2030039936U,
+ 2038430848U, 2046819968U, 2055208576U, 2063596672U, 2071981952U,
+ 2080373632U, 2088762752U, 2097149056U, 2105539712U, 2113928576U,
+ 2122315136U, 2130700672U, 2139092608U, 2147483264U, 2155872128U,
+ 2164257664U, 2172642176U, 2181035392U, 2189426048U, 2197814912U,
+ 2206203008U, 2214587264U, 2222979712U, 2231367808U, 2239758208U,
+ 2248145024U, 2256527744U, 2264922752U, 2273312128U, 2281701248U,
+ 2290086272U, 2298476672U, 2306867072U, 2315251072U, 2323639168U,
+ 2332032128U, 2340420224U, 2348808064U, 2357196416U, 2365580416U,
+ 2373966976U, 2382363008U, 2390748544U, 2399139968U, 2407530368U,
+ 2415918976U, 2424307328U, 2432695424U, 2441084288U, 2449472384U,
+ 2457861248U, 2466247808U, 2474637184U, 2483026816U, 2491414144U,
+ 2499803776U, 2508191872U, 2516582272U, 2524970368U, 2533359232U,
+ 2541743488U, 2550134144U, 2558525056U, 2566913408U, 2575301504U,
+ 2583686528U, 2592073856U, 2600467328U, 2608856192U, 2617240448U,
+ 2625631616U, 2634022016U, 2642407552U, 2650796416U, 2659188352U,
+ 2667574912U, 2675965312U, 2684352896U, 2692738688U, 2701130624U,
+ 2709518464U, 2717907328U, 2726293376U, 2734685056U, 2743073152U,
+ 2751462016U, 2759851648U, 2768232832U, 2776625536U, 2785017728U,
+ 2793401984U, 2801794432U, 2810182016U, 2818571648U, 2826959488U,
+ 2835349376U, 2843734144U, 2852121472U, 2860514432U, 2868900992U,
+ 2877286784U, 2885676928U, 2894069632U, 2902451584U, 2910843008U,
+ 2919234688U, 2927622784U, 2936011648U, 2944400768U, 2952789376U,
+ 2961177728U, 2969565568U, 2977951616U, 2986338944U, 2994731392U,
+ 3003120256U, 3011508352U, 3019895936U, 3028287104U, 3036675968U,
+ 3045063808U, 3053452928U, 3061837696U, 3070228352U, 3078615424U,
+ 3087003776U, 3095394944U, 3103782272U, 3112173184U, 3120562048U,
+ 3128944768U, 3137339264U, 3145725056U, 3154109312U, 3162505088U,
+ 3170893184U, 3179280256U, 3187669376U, 3196056704U, 3204445568U,
+ 3212836736U, 3221224064U, 3229612928U, 3238002304U, 3246391168U,
+ 3254778496U, 3263165824U, 3271556224U, 3279944576U, 3288332416U,
+ 3296719232U, 3305110912U, 3313500032U, 3321887104U, 3330273152U,
+ 3338658944U, 3347053184U, 3355440512U, 3363827072U, 3372220288U,
+ 3380608384U, 3388997504U, 3397384576U, 3405774208U, 3414163072U,
+ 3422551936U, 3430937984U, 3439328384U, 3447714176U, 3456104576U,
+ 3464493952U, 3472883584U, 3481268864U, 3489655168U, 3498048896U,
+ 3506434432U, 3514826368U, 3523213952U, 3531603584U, 3539987072U,
+ 3548380288U, 3556763264U, 3565157248U, 3573545344U, 3581934464U,
+ 3590324096U, 3598712704U, 3607098752U, 3615488384U, 3623877248U,
+ 3632265856U, 3640646528U, 3649043584U, 3657430144U, 3665821568U,
+ 3674207872U, 3682597504U, 3690984832U, 3699367808U, 3707764352U,
+ 3716152448U, 3724541056U, 3732925568U, 3741318016U, 3749706368U,
+ 3758091136U, 3766481536U, 3774872704U, 3783260032U, 3791650432U,
+ 3800036224U, 3808427648U, 3816815488U, 3825204608U, 3833592704U,
+ 3841981568U, 3850370432U, 3858755968U, 3867147904U, 3875536256U,
+ 3883920512U, 3892313728U, 3900702592U, 3909087872U, 3917478784U,
+ 3925868416U, 3934256512U, 3942645376U, 3951032192U, 3959422336U,
+ 3967809152U, 3976200064U, 3984588416U, 3992974976U, 4001363584U,
+ 4009751168U, 4018141312U, 4026530432U, 4034911616U, 4043308928U,
+ 4051695488U, 4060084352U, 4068472448U, 4076862848U, 4085249408U,
+ 4093640576U, 4102028416U, 4110413696U, 4118805632U, 4127194496U,
+ 4135583104U, 4143971968U, 4152360832U, 4160746112U, 4169135744U,
+ 4177525888U, 4185912704U, 4194303616U, 4202691968U, 4211076736U,
+ 4219463552U, 4227855488U, 4236246656U, 4244633728U, 4253022848U,
+ 4261412224U, 4269799808U, 4278184832U, 4286578048U, 4294962304U,
+ 4303349632U, 4311743104U, 4320130432U, 4328521088U, 4336909184U,
+ 4345295488U, 4353687424U, 4362073472U, 4370458496U, 4378852736U,
+ 4387238528U, 4395630208U, 4404019072U, 4412407424U, 4420790656U,
+ 4429182848U, 4437571456U, 4445962112U, 4454344064U, 4462738048U,
+ 4471119232U, 4479516544U, 4487904128U, 4496289664U, 4504682368U,
+ 4513068416U, 4521459584U, 4529846144U, 4538232704U, 4546619776U,
+ 4555010176U, 4563402112U, 4571790208U, 4580174464U, 4588567936U,
+ 4596957056U, 4605344896U, 4613734016U, 4622119808U, 4630511488U,
+ 4638898816U, 4647287936U, 4655675264U, 4664065664U, 4672451968U,
+ 4680842624U, 4689231488U, 4697620352U, 4706007424U, 4714397056U,
+ 4722786176U, 4731173248U, 4739562368U, 4747951744U, 4756340608U,
+ 4764727936U, 4773114496U, 4781504384U, 4789894784U, 4798283648U,
+ 4806667648U, 4815059584U, 4823449472U, 4831835776U, 4840226176U,
+ 4848612224U, 4857003392U, 4865391488U, 4873780096U, 4882169728U,
+ 4890557312U, 4898946944U, 4907333248U, 4915722368U, 4924110976U,
+ 4932499328U, 4940889728U, 4949276032U, 4957666432U, 4966054784U,
+ 4974438016U, 4982831488U, 4991221376U, 4999607168U, 5007998848U,
+ 5016386432U, 5024763776U, 5033164672U, 5041544576U, 5049941888U,
+ 5058329728U, 5066717056U, 5075107456U, 5083494272U, 5091883904U,
+ 5100273536U, 5108662144U, 5117048192U, 5125436032U, 5133827456U,
+ 5142215296U, 5150605184U, 5158993024U, 5167382144U, 5175769472U,
+ 5184157568U, 5192543872U, 5200936064U, 5209324928U, 5217711232U,
+ 5226102656U, 5234490496U, 5242877312U, 5251263872U, 5259654016U,
+ 5268040832U, 5276434304U, 5284819328U, 5293209728U, 5301598592U,
+ 5309986688U, 5318374784U, 5326764416U, 5335151488U, 5343542144U,
+ 5351929472U, 5360319872U, 5368706944U, 5377096576U, 5385484928U,
+ 5393871232U, 5402263424U, 5410650496U, 5419040384U, 5427426944U,
+ 5435816576U, 5444205952U, 5452594816U, 5460981376U, 5469367936U,
+ 5477760896U, 5486148736U, 5494536832U, 5502925952U, 5511315328U,
+ 5519703424U, 5528089984U, 5536481152U, 5544869504U, 5553256064U,
+ 5561645696U, 5570032768U, 5578423936U, 5586811264U, 5595193216U,
+ 5603585408U, 5611972736U, 5620366208U, 5628750464U, 5637143936U,
+ 5645528192U, 5653921408U, 5662310272U, 5670694784U, 5679082624U,
+ 5687474048U, 5695864448U, 5704251008U, 5712641408U, 5721030272U,
+ 5729416832U, 5737806208U, 5746194304U, 5754583936U, 5762969984U,
+ 5771358592U, 5779748224U, 5788137856U, 5796527488U, 5804911232U,
+ 5813300608U, 5821692544U, 5830082176U, 5838468992U, 5846855552U,
+ 5855247488U, 5863636096U, 5872024448U, 5880411008U, 5888799872U,
+ 5897186432U, 5905576832U, 5913966976U, 5922352768U, 5930744704U,
+ 5939132288U, 5947522432U, 5955911296U, 5964299392U, 5972688256U,
+ 5981074304U, 5989465472U, 5997851008U, 6006241408U, 6014627968U,
+ 6023015552U, 6031408256U, 6039796096U, 6048185216U, 6056574848U,
+ 6064963456U, 6073351808U, 6081736064U, 6090128768U, 6098517632U,
+ 6106906496U, 6115289216U, 6123680896U, 6132070016U, 6140459648U,
+ 6148849024U, 6157237376U, 6165624704U, 6174009728U, 6182403712U,
+ 6190792064U, 6199176064U, 6207569792U, 6215952256U, 6224345216U,
+ 6232732544U, 6241124224U, 6249510272U, 6257899136U, 6266287744U,
+ 6274676864U, 6283065728U, 6291454336U, 6299843456U, 6308232064U,
+ 6316620928U, 6325006208U, 6333395584U, 6341784704U, 6350174848U,
+ 6358562176U, 6366951296U, 6375337856U, 6383729536U, 6392119168U,
+ 6400504192U, 6408895616U, 6417283456U, 6425673344U, 6434059136U,
+ 6442444672U, 6450837376U, 6459223424U, 6467613056U, 6476004224U,
+ 6484393088U, 6492781952U, 6501170048U, 6509555072U, 6517947008U,
+ 6526336384U, 6534725504U, 6543112832U, 6551500672U, 6559888768U,
+ 6568278656U, 6576662912U, 6585055616U, 6593443456U, 6601834112U,
+ 6610219648U, 6618610304U, 6626999168U, 6635385472U, 6643777408U,
+ 6652164224U, 6660552832U, 6668941952U, 6677330048U, 6685719424U,
+ 6694107776U, 6702493568U, 6710882176U, 6719274112U, 6727662976U,
+ 6736052096U, 6744437632U, 6752825984U, 6761213824U, 6769604224U,
+ 6777993856U, 6786383488U, 6794770816U, 6803158144U, 6811549312U,
+ 6819937664U, 6828326528U, 6836706176U, 6845101696U, 6853491328U,
+ 6861880448U, 6870269312U, 6878655104U, 6887046272U, 6895433344U,
+ 6903822208U, 6912212864U, 6920596864U, 6928988288U, 6937377152U,
+ 6945764992U, 6954149248U, 6962544256U, 6970928768U, 6979317376U,
+ 6987709312U, 6996093824U, 7004487296U, 7012875392U, 7021258624U,
+ 7029652352U, 7038038912U, 7046427776U, 7054818944U, 7063207808U,
+ 7071595136U, 7079980928U, 7088372608U, 7096759424U, 7105149824U,
+ 7113536896U, 7121928064U, 7130315392U, 7138699648U, 7147092352U,
+ 7155479168U, 7163865728U, 7172249984U, 7180648064U, 7189036672U,
+ 7197424768U, 7205810816U, 7214196608U, 7222589824U, 7230975104U,
+ 7239367552U, 7247755904U, 7256145536U, 7264533376U, 7272921472U,
+ 7281308032U, 7289694848U, 7298088832U, 7306471808U, 7314864512U,
+ 7323253888U, 7331643008U, 7340029568U, 7348419712U, 7356808832U,
+ 7365196672U, 7373585792U, 7381973888U, 7390362752U, 7398750592U,
+ 7407138944U, 7415528576U, 7423915648U, 7432302208U, 7440690304U,
+ 7449080192U, 7457472128U, 7465860992U, 7474249088U, 7482635648U,
+ 7491023744U, 7499412608U, 7507803008U, 7516192384U, 7524579968U,
+ 7532967296U, 7541358464U, 7549745792U, 7558134656U, 7566524032U,
+ 7574912896U, 7583300992U, 7591690112U, 7600075136U, 7608466816U,
+ 7616854912U, 7625244544U, 7633629824U, 7642020992U, 7650410368U,
+ 7658794112U, 7667187328U, 7675574912U, 7683961984U, 7692349568U,
+ 7700739712U, 7709130368U, 7717519232U, 7725905536U, 7734295424U,
+ 7742683264U, 7751069056U, 7759457408U, 7767849088U, 7776238208U,
+ 7784626816U, 7793014912U, 7801405312U, 7809792128U, 7818179968U,
+ 7826571136U, 7834957184U, 7843347328U, 7851732352U, 7860124544U,
+ 7868512384U, 7876902016U, 7885287808U, 7893679744U, 7902067072U,
+ 7910455936U, 7918844288U, 7927230848U, 7935622784U, 7944009344U,
+ 7952400256U, 7960786048U, 7969176704U, 7977565312U, 7985953408U,
+ 7994339968U, 8002730368U, 8011119488U, 8019508096U, 8027896192U,
+ 8036285056U, 8044674688U, 8053062272U, 8061448832U, 8069838464U,
+ 8078227328U, 8086616704U, 8095006592U, 8103393664U, 8111783552U,
+ 8120171392U, 8128560256U, 8136949376U, 8145336704U, 8153726848U,
+ 8162114944U, 8170503296U, 8178891904U, 8187280768U, 8195669632U,
+ 8204058496U, 8212444544U, 8220834176U, 8229222272U, 8237612672U,
+ 8246000768U, 8254389376U, 8262775168U, 8271167104U, 8279553664U,
+ 8287944064U, 8296333184U, 8304715136U, 8313108352U, 8321497984U,
+ 8329885568U, 8338274432U, 8346663296U, 8355052928U, 8363441536U,
+ 8371828352U, 8380217984U, 8388606592U, 8396996224U, 8405384576U,
+ 8413772672U, 8422161536U, 8430549376U, 8438939008U, 8447326592U,
+ 8455715456U, 8464104832U, 8472492928U, 8480882048U, 8489270656U,
+ 8497659776U, 8506045312U, 8514434944U, 8522823808U, 8531208832U,
+ 8539602304U, 8547990656U, 8556378752U, 8564768384U, 8573154176U,
+ 8581542784U, 8589933952U, 8598322816U, 8606705024U, 8615099264U,
+ 8623487872U, 8631876992U, 8640264064U, 8648653952U, 8657040256U,
+ 8665430656U, 8673820544U, 8682209152U, 8690592128U, 8698977152U,
+ 8707374464U, 8715763328U, 8724151424U, 8732540032U, 8740928384U,
+ 8749315712U, 8757704576U, 8766089344U, 8774480768U, 8782871936U,
+ 8791260032U, 8799645824U, 8808034432U, 8816426368U, 8824812928U,
+ 8833199488U, 8841591424U, 8849976448U, 8858366336U, 8866757248U,
+ 8875147136U, 8883532928U, 8891923328U, 8900306816U, 8908700288U,
+ 8917088384U, 8925478784U, 8933867392U, 8942250368U, 8950644608U,
+ 8959032704U, 8967420544U, 8975809664U, 8984197504U, 8992584064U,
+ 9000976256U, 9009362048U, 9017752448U, 9026141312U, 9034530688U,
+ 9042917504U, 9051307904U, 9059694208U, 9068084864U, 9076471424U,
+ 9084861824U, 9093250688U, 9101638528U, 9110027648U, 9118416512U,
+ 9126803584U, 9135188096U, 9143581312U, 9151969664U, 9160356224U,
+ 9168747136U, 9177134464U, 9185525632U, 9193910144U, 9202302848U,
+ 9210690688U, 9219079552U, 9227465344U, 9235854464U, 9244244864U,
+ 9252633472U, 9261021824U, 9269411456U, 9277799296U, 9286188928U,
+ 9294574208U, 9302965888U, 9311351936U, 9319740032U, 9328131968U,
+ 9336516736U, 9344907392U, 9353296768U, 9361685888U, 9370074752U,
+ 9378463616U, 9386849408U, 9395239808U, 9403629184U, 9412016512U,
+ 9420405376U, 9428795008U, 9437181568U, 9445570688U, 9453960832U,
+ 9462346624U, 9470738048U, 9479121536U, 9487515008U, 9495903616U,
+ 9504289664U, 9512678528U, 9521067904U, 9529456256U, 9537843584U,
+ 9546233728U, 9554621312U, 9563011456U, 9571398784U, 9579788672U,
+ 9588178304U, 9596567168U, 9604954496U, 9613343104U, 9621732992U,
+ 9630121856U, 9638508416U, 9646898816U, 9655283584U, 9663675776U,
+ 9672061312U, 9680449664U, 9688840064U, 9697230464U, 9705617536U,
+ 9714003584U, 9722393984U, 9730772608U, 9739172224U, 9747561088U,
+ 9755945344U, 9764338816U, 9772726144U, 9781116544U, 9789503872U,
+ 9797892992U, 9806282624U, 9814670464U, 9823056512U, 9831439232U,
+ 9839833984U, 9848224384U, 9856613504U, 9865000576U, 9873391232U,
+ 9881772416U, 9890162816U, 9898556288U, 9906940544U, 9915333248U,
+ 9923721088U, 9932108672U, 9940496512U, 9948888448U, 9957276544U,
+ 9965666176U, 9974048384U, 9982441088U, 9990830464U, 9999219584U,
+ 10007602816U, 10015996544U, 10024385152U, 10032774016U, 10041163648U,
+ 10049548928U, 10057940096U, 10066329472U, 10074717824U, 10083105152U,
+ 10091495296U, 10099878784U, 10108272256U, 10116660608U, 10125049216U,
+ 10133437312U, 10141825664U, 10150213504U, 10158601088U, 10166991232U,
+ 10175378816U, 10183766144U, 10192157312U, 10200545408U, 10208935552U,
+ 10217322112U, 10225712768U, 10234099328U, 10242489472U, 10250876032U,
+ 10259264896U, 10267656064U, 10276042624U, 10284429184U, 10292820352U,
+ 10301209472U, 10309598848U, 10317987712U, 10326375296U, 10334763392U,
+ 10343153536U, 10351541632U, 10359930752U, 10368318592U, 10376707456U,
+ 10385096576U, 10393484672U, 10401867136U, 10410262144U, 10418647424U,
+ 10427039104U, 10435425664U, 10443810176U, 10452203648U, 10460589952U,
+ 10468982144U, 10477369472U, 10485759104U, 10494147712U, 10502533504U,
+ 10510923392U, 10519313536U, 10527702656U, 10536091264U, 10544478592U,
+ 10552867712U, 10561255808U, 10569642368U, 10578032768U, 10586423168U,
+ 10594805632U, 10603200128U, 10611588992U, 10619976064U, 10628361344U,
+ 10636754048U, 10645143424U, 10653531776U, 10661920384U, 10670307968U,
+ 10678696832U, 10687086464U, 10695475072U, 10703863168U, 10712246144U,
+ 10720639616U, 10729026688U, 10737414784U, 10745806208U, 10754190976U,
+ 10762581376U, 10770971264U, 10779356288U, 10787747456U, 10796135552U,
+ 10804525184U, 10812915584U, 10821301888U, 10829692288U, 10838078336U,
+ 10846469248U, 10854858368U, 10863247232U, 10871631488U, 10880023424U,
+ 10888412032U, 10896799616U, 10905188992U, 10913574016U, 10921964672U,
+ 10930352768U, 10938742912U, 10947132544U, 10955518592U, 10963909504U,
+ 10972298368U, 10980687488U, 10989074816U, 10997462912U, 11005851776U,
+ 11014241152U, 11022627712U, 11031017344U, 11039403904U, 11047793024U,
+ 11056184704U, 11064570752U, 11072960896U, 11081343872U, 11089737856U,
+ 11098128256U, 11106514816U, 11114904448U, 11123293568U, 11131680128U,
+ 11140065152U, 11148458368U, 11156845696U, 11165236864U, 11173624192U,
+ 11182013824U, 11190402688U, 11198790784U, 11207179136U, 11215568768U,
+ 11223957376U, 11232345728U, 11240734592U, 11249122688U, 11257511296U,
+ 11265899648U, 11274285952U, 11282675584U, 11291065472U, 11299452544U,
+ 11307842432U, 11316231296U, 11324616832U, 11333009024U, 11341395584U,
+ 11349782656U, 11358172288U, 11366560384U, 11374950016U, 11383339648U,
+ 11391721856U, 11400117376U, 11408504192U, 11416893568U, 11425283456U,
+ 11433671552U, 11442061184U, 11450444672U, 11458837888U, 11467226752U,
+ 11475611776U, 11484003968U, 11492392064U, 11500780672U, 11509169024U,
+ 11517550976U, 11525944448U, 11534335616U, 11542724224U, 11551111808U,
+ 11559500672U, 11567890304U, 11576277376U, 11584667008U, 11593056128U,
+ 11601443456U, 11609830016U, 11618221952U, 11626607488U, 11634995072U,
+ 11643387776U, 11651775104U, 11660161664U, 11668552576U, 11676940928U,
+ 11685330304U, 11693718656U, 11702106496U, 11710496128U, 11718882688U,
+ 11727273088U, 11735660416U, 11744050048U, 11752437376U, 11760824704U,
+ 11769216128U, 11777604736U, 11785991296U, 11794381952U, 11802770048U,
+ 11811157888U, 11819548544U, 11827932544U, 11836324736U, 11844713344U,
+ 11853100928U, 11861486464U, 11869879936U, 11878268032U, 11886656896U,
+ 11895044992U, 11903433088U, 11911822976U, 11920210816U, 11928600448U,
+ 11936987264U, 11945375872U, 11953761152U, 11962151296U, 11970543488U,
+ 11978928512U, 11987320448U, 11995708288U, 12004095104U, 12012486272U,
+ 12020875136U, 12029255552U, 12037652096U, 12046039168U, 12054429568U,
+ 12062813824U, 12071206528U, 12079594624U, 12087983744U, 12096371072U,
+ 12104759936U, 12113147264U, 12121534592U, 12129924992U, 12138314624U,
+ 12146703232U, 12155091584U, 12163481216U, 12171864704U, 12180255872U,
+ 12188643968U, 12197034112U, 12205424512U, 12213811328U, 12222199424U,
+ 12230590336U, 12238977664U, 12247365248U, 12255755392U, 12264143488U,
+ 12272531584U, 12280920448U, 12289309568U, 12297694592U, 12306086528U,
+ 12314475392U, 12322865024U, 12331253632U, 12339640448U, 12348029312U,
+ 12356418944U, 12364805248U, 12373196672U, 12381580928U, 12389969024U,
+ 12398357632U, 12406750592U, 12415138432U, 12423527552U, 12431916416U,
+ 12440304512U, 12448692352U, 12457081216U, 12465467776U, 12473859968U,
+ 12482245504U, 12490636672U, 12499025536U, 12507411584U, 12515801728U,
+ 12524190592U, 12532577152U, 12540966272U, 12549354368U, 12557743232U,
+ 12566129536U, 12574523264U, 12582911872U, 12591299456U, 12599688064U,
+ 12608074624U, 12616463488U, 12624845696U, 12633239936U, 12641631616U,
+ 12650019968U, 12658407296U, 12666795136U, 12675183232U, 12683574656U,
+ 12691960192U, 12700350592U, 12708740224U, 12717128576U, 12725515904U,
+ 12733906816U, 12742295168U, 12750680192U, 12759071872U, 12767460736U,
+ 12775848832U, 12784236928U, 12792626816U, 12801014656U, 12809404288U,
+ 12817789312U, 12826181504U, 12834568832U, 12842954624U, 12851345792U,
+ 12859732352U, 12868122496U, 12876512128U, 12884901248U, 12893289088U,
+ 12901672832U, 12910067584U, 12918455168U, 12926842496U, 12935232896U,
+ 12943620736U, 12952009856U, 12960396928U, 12968786816U, 12977176192U,
+ 12985563776U, 12993951104U, 13002341504U, 13010730368U, 13019115392U,
+ 13027506304U, 13035895168U, 13044272512U, 13052673152U, 13061062528U,
+ 13069446272U, 13077838976U, 13086227072U, 13094613632U, 13103000192U,
+ 13111393664U, 13119782528U, 13128157568U, 13136559232U, 13144945024U,
+ 13153329536U, 13161724288U, 13170111872U, 13178502784U, 13186884736U,
+ 13195279744U, 13203667072U, 13212057472U, 13220445824U, 13228832128U,
+ 13237221248U, 13245610624U, 13254000512U, 13262388352U, 13270777472U,
+ 13279166336U, 13287553408U, 13295943296U, 13304331904U, 13312719488U,
+ 13321108096U, 13329494656U, 13337885824U, 13346274944U, 13354663808U,
+ 13363051136U, 13371439232U, 13379825024U, 13388210816U, 13396605056U,
+ 13404995456U, 13413380224U, 13421771392U, 13430159744U, 13438546048U,
+ 13446937216U, 13455326848U, 13463708288U, 13472103808U, 13480492672U,
+ 13488875648U, 13497269888U, 13505657728U, 13514045312U, 13522435712U,
+ 13530824576U, 13539210112U, 13547599232U, 13555989376U, 13564379008U,
+ 13572766336U, 13581154432U, 13589544832U, 13597932928U, 13606320512U,
+ 13614710656U, 13623097472U, 13631477632U, 13639874944U, 13648264064U,
+ 13656652928U, 13665041792U, 13673430656U, 13681818496U, 13690207616U,
+ 13698595712U, 13706982272U, 13715373184U, 13723762048U, 13732150144U,
+ 13740536704U, 13748926592U, 13757316224U, 13765700992U, 13774090112U,
+ 13782477952U, 13790869376U, 13799259008U, 13807647872U, 13816036736U,
+ 13824425344U, 13832814208U, 13841202304U, 13849591424U, 13857978752U,
+ 13866368896U, 13874754688U, 13883145344U, 13891533184U, 13899919232U,
+ 13908311168U, 13916692096U, 13925085056U, 13933473152U, 13941866368U,
+ 13950253696U, 13958643584U, 13967032192U, 13975417216U, 13983807616U,
+ 13992197504U, 14000582272U, 14008973696U, 14017363072U, 14025752192U,
+ 14034137984U, 14042528384U, 14050918016U, 14059301504U, 14067691648U,
+ 14076083584U, 14084470144U, 14092852352U, 14101249664U, 14109635968U,
+ 14118024832U, 14126407552U, 14134804352U, 14143188608U, 14151577984U,
+ 14159968384U, 14168357248U, 14176741504U, 14185127296U, 14193521024U,
+ 14201911424U, 14210301824U, 14218685056U, 14227067264U, 14235467392U,
+ 14243855488U, 14252243072U, 14260630144U, 14269021568U, 14277409408U,
+ 14285799296U, 14294187904U, 14302571392U, 14310961792U, 14319353728U,
+ 14327738752U, 14336130944U, 14344518784U, 14352906368U, 14361296512U,
+ 14369685376U, 14378071424U, 14386462592U, 14394848128U, 14403230848U,
+ 14411627392U, 14420013952U, 14428402304U, 14436793472U, 14445181568U,
+ 14453569664U, 14461959808U, 14470347904U, 14478737024U, 14487122816U,
+ 14495511424U, 14503901824U, 14512291712U, 14520677504U, 14529064832U,
+ 14537456768U, 14545845632U, 14554234496U, 14562618496U, 14571011456U,
+ 14579398784U, 14587789184U, 14596172672U, 14604564608U, 14612953984U,
+ 14621341312U, 14629724288U, 14638120832U, 14646503296U, 14654897536U,
+ 14663284864U, 14671675264U, 14680061056U, 14688447616U, 14696835968U,
+ 14705228416U, 14713616768U, 14722003328U, 14730392192U, 14738784128U,
+ 14747172736U, 14755561088U, 14763947648U, 14772336512U, 14780725376U,
+ 14789110144U, 14797499776U, 14805892736U, 14814276992U, 14822670208U,
+ 14831056256U, 14839444352U, 14847836032U, 14856222848U, 14864612992U,
+ 14872997504U, 14881388672U, 14889775744U, 14898165376U, 14906553472U,
+ 14914944896U, 14923329664U, 14931721856U, 14940109696U, 14948497024U,
+ 14956887424U, 14965276544U, 14973663616U, 14982053248U, 14990439808U,
+ 14998830976U, 15007216768U, 15015605888U, 15023995264U, 15032385152U,
+ 15040768384U, 15049154944U, 15057549184U, 15065939072U, 15074328448U,
+ 15082715008U, 15091104128U, 15099493504U, 15107879296U, 15116269184U,
+ 15124659584U, 15133042304U, 15141431936U, 15149824384U, 15158214272U,
+ 15166602368U, 15174991232U, 15183378304U, 15191760512U, 15200154496U,
+ 15208542592U, 15216931712U, 15225323392U, 15233708416U, 15242098048U,
+ 15250489216U, 15258875264U, 15267265408U, 15275654528U, 15284043136U,
+ 15292431488U, 15300819584U, 15309208192U, 15317596544U, 15325986176U,
+ 15334374784U, 15342763648U, 15351151744U, 15359540608U, 15367929728U,
+ 15376318336U, 15384706432U, 15393092992U, 15401481856U, 15409869952U,
+ 15418258816U, 15426649984U, 15435037568U, 15443425664U, 15451815296U,
+ 15460203392U, 15468589184U, 15476979328U, 15485369216U, 15493755776U,
+ 15502146944U, 15510534272U, 15518924416U, 15527311232U, 15535699072U,
+ 15544089472U, 15552478336U, 15560866688U, 15569254528U, 15577642624U,
+ 15586031488U, 15594419072U, 15602809472U, 15611199104U, 15619586432U,
+ 15627975296U, 15636364928U, 15644753792U, 15653141888U, 15661529216U,
+ 15669918848U, 15678305152U, 15686696576U, 15695083136U, 15703474048U,
+ 15711861632U, 15720251264U, 15728636288U, 15737027456U, 15745417088U,
+ 15753804928U, 15762194048U, 15770582656U, 15778971008U, 15787358336U,
+ 15795747712U, 15804132224U, 15812523392U, 15820909696U, 15829300096U,
+ 15837691264U, 15846071936U, 15854466944U, 15862855808U, 15871244672U,
+ 15879634816U, 15888020608U, 15896409728U, 15904799104U, 15913185152U,
+ 15921577088U, 15929966464U, 15938354816U, 15946743424U, 15955129472U,
+ 15963519872U, 15971907968U, 15980296064U, 15988684928U, 15997073024U,
+ 16005460864U, 16013851264U, 16022241152U, 16030629248U, 16039012736U,
+ 16047406976U, 16055794816U, 16064181376U, 16072571264U, 16080957824U,
+ 16089346688U, 16097737856U, 16106125184U, 16114514816U, 16122904192U,
+ 16131292544U, 16139678848U, 16148066944U, 16156453504U, 16164839552U,
+ 16173236096U, 16181623424U, 16190012032U, 16198401152U, 16206790528U,
+ 16215177344U, 16223567744U, 16231956352U, 16240344704U, 16248731008U,
+ 16257117824U, 16265504384U, 16273898624U, 16282281856U, 16290668672U,
+ 16299064192U, 16307449216U, 16315842176U, 16324230016U, 16332613504U,
+ 16341006464U, 16349394304U, 16357783168U, 16366172288U, 16374561664U,
+ 16382951296U, 16391337856U, 16399726208U, 16408116352U, 16416505472U,
+ 16424892032U, 16433282176U, 16441668224U, 16450058624U, 16458448768U,
+ 16466836864U, 16475224448U, 16483613056U, 16492001408U, 16500391808U,
+ 16508779648U, 16517166976U, 16525555328U, 16533944192U, 16542330752U,
+ 16550719616U, 16559110528U, 16567497088U, 16575888512U, 16584274816U,
+ 16592665472U, 16601051008U, 16609442944U, 16617832064U, 16626218624U,
+ 16634607488U, 16642996096U, 16651385728U, 16659773824U, 16668163712U,
+ 16676552576U, 16684938112U, 16693328768U, 16701718144U, 16710095488U,
+ 16718492288U, 16726883968U, 16735272832U, 16743661184U, 16752049792U,
+ 16760436608U, 16768827008U, 16777214336U, 16785599104U, 16793992832U,
+ 16802381696U, 16810768768U, 16819151744U, 16827542656U, 16835934848U,
+ 16844323712U, 16852711552U, 16861101952U, 16869489536U, 16877876864U,
+ 16886265728U, 16894653056U, 16903044736U, 16911431296U, 16919821696U,
+ 16928207488U, 16936592768U, 16944987776U, 16953375616U, 16961763968U,
+ 16970152832U, 16978540928U, 16986929536U, 16995319168U, 17003704448U,
+ 17012096896U, 17020481152U, 17028870784U, 17037262208U, 17045649536U,
+ 17054039936U, 17062426496U, 17070814336U, 17079205504U, 17087592064U,
+ 17095978112U, 17104369024U, 17112759424U, 17121147776U, 17129536384U,
+ 17137926016U, 17146314368U, 17154700928U, 17163089792U, 17171480192U,
+ 17179864192U, 17188256896U, 17196644992U, 17205033856U, 17213423488U,
+ 17221811072U, 17230198912U, 17238588032U, 17246976896U, 17255360384U,
+ 17263754624U, 17272143232U, 17280530048U, 17288918912U, 17297309312U,
+ 17305696384U, 17314085504U, 17322475136U, 17330863744U, 17339252096U,
+ 17347640192U, 17356026496U, 17364413824U, 17372796544U, 17381190016U,
+ 17389583488U, 17397972608U, 17406360704U, 17414748544U, 17423135872U,
+ 17431527296U, 17439915904U, 17448303232U, 17456691584U, 17465081728U,
+ 17473468288U, 17481857408U, 17490247552U, 17498635904U, 17507022464U,
+ 17515409024U, 17523801728U, 17532189824U, 17540577664U, 17548966016U,
+ 17557353344U, 17565741184U, 17574131584U, 17582519168U, 17590907008U,
+ 17599296128U, 17607687808U, 17616076672U, 17624455808U, 17632852352U,
+ 17641238656U, 17649630848U, 17658018944U, 17666403968U, 17674794112U,
+ 17683178368U, 17691573376U, 17699962496U, 17708350592U, 17716739968U,
+ 17725126528U, 17733517184U, 17741898112U, 17750293888U, 17758673024U,
+ 17767070336U, 17775458432U, 17783848832U, 17792236928U, 17800625536U,
+ 17809012352U, 17817402752U, 17825785984U, 17834178944U, 17842563968U,
+ 17850955648U, 17859344512U, 17867732864U, 17876119424U, 17884511872U,
+ 17892900224U, 17901287296U, 17909677696U, 17918058112U, 17926451072U,
+ 17934843776U, 17943230848U, 17951609216U, 17960008576U, 17968397696U,
+ 17976784256U, 17985175424U, 17993564032U, 18001952128U, 18010339712U,
+ 18018728576U, 18027116672U, 18035503232U, 18043894144U, 18052283264U,
+ 18060672128U, 18069056384U, 18077449856U, 18085837184U, 18094225792U,
+ 18102613376U, 18111004544U, 18119388544U, 18127781248U, 18136170368U,
+ 18144558976U, 18152947328U, 18161336192U, 18169724288U, 18178108544U,
+ 18186498944U, 18194886784U, 18203275648U, 18211666048U, 18220048768U,
+ 18228444544U, 18236833408U, 18245220736U
+};
+
+
+// Generated with the following Mathematica Code:
+
+// GetCacheSizes[n_] := Module[{
+// DataSetSizeBytesInit = 2^30,
+// MixBytes = 128,
+// DataSetGrowth = 2^23,
+// HashBytes = 64,
+// CacheMultiplier = 1024,
+// j = 0},
+// Reap[
+// While[j < n,
+// Module[{i = Floor[(DataSetSizeBytesInit + DataSetGrowth * j) / (CacheMultiplier * HashBytes)]},
+// While[! PrimeQ[i], i--];
+// Sow[i*HashBytes]; j++]]]][[2]][[1]]
+
+const uint64_t cache_sizes[2048] = {
+ 16776896U, 16907456U, 17039296U, 17170112U, 17301056U, 17432512U, 17563072U,
+ 17693888U, 17824192U, 17955904U, 18087488U, 18218176U, 18349504U, 18481088U,
+ 18611392U, 18742336U, 18874304U, 19004224U, 19135936U, 19267264U, 19398208U,
+ 19529408U, 19660096U, 19791424U, 19922752U, 20053952U, 20184896U, 20315968U,
+ 20446912U, 20576576U, 20709184U, 20840384U, 20971072U, 21102272U, 21233216U,
+ 21364544U, 21494848U, 21626816U, 21757376U, 21887552U, 22019392U, 22151104U,
+ 22281536U, 22412224U, 22543936U, 22675264U, 22806464U, 22935872U, 23068096U,
+ 23198272U, 23330752U, 23459008U, 23592512U, 23723968U, 23854912U, 23986112U,
+ 24116672U, 24247616U, 24378688U, 24509504U, 24640832U, 24772544U, 24903488U,
+ 25034432U, 25165376U, 25296704U, 25427392U, 25558592U, 25690048U, 25820096U,
+ 25951936U, 26081728U, 26214208U, 26345024U, 26476096U, 26606656U, 26737472U,
+ 26869184U, 26998208U, 27131584U, 27262528U, 27393728U, 27523904U, 27655744U,
+ 27786688U, 27917888U, 28049344U, 28179904U, 28311488U, 28441792U, 28573504U,
+ 28700864U, 28835648U, 28966208U, 29096768U, 29228608U, 29359808U, 29490752U,
+ 29621824U, 29752256U, 29882816U, 30014912U, 30144448U, 30273728U, 30406976U,
+ 30538432U, 30670784U, 30799936U, 30932672U, 31063744U, 31195072U, 31325248U,
+ 31456192U, 31588288U, 31719232U, 31850432U, 31981504U, 32110784U, 32243392U,
+ 32372672U, 32505664U, 32636608U, 32767808U, 32897344U, 33029824U, 33160768U,
+ 33289664U, 33423296U, 33554368U, 33683648U, 33816512U, 33947456U, 34076992U,
+ 34208704U, 34340032U, 34471744U, 34600256U, 34734016U, 34864576U, 34993984U,
+ 35127104U, 35258176U, 35386688U, 35518528U, 35650624U, 35782336U, 35910976U,
+ 36044608U, 36175808U, 36305728U, 36436672U, 36568384U, 36699968U, 36830656U,
+ 36961984U, 37093312U, 37223488U, 37355072U, 37486528U, 37617472U, 37747904U,
+ 37879232U, 38009792U, 38141888U, 38272448U, 38403392U, 38535104U, 38660672U,
+ 38795584U, 38925632U, 39059264U, 39190336U, 39320768U, 39452096U, 39581632U,
+ 39713984U, 39844928U, 39974848U, 40107968U, 40238144U, 40367168U, 40500032U,
+ 40631744U, 40762816U, 40894144U, 41023552U, 41155904U, 41286208U, 41418304U,
+ 41547712U, 41680448U, 41811904U, 41942848U, 42073792U, 42204992U, 42334912U,
+ 42467008U, 42597824U, 42729152U, 42860096U, 42991552U, 43122368U, 43253696U,
+ 43382848U, 43515712U, 43646912U, 43777088U, 43907648U, 44039104U, 44170432U,
+ 44302144U, 44433344U, 44564288U, 44694976U, 44825152U, 44956864U, 45088448U,
+ 45219008U, 45350464U, 45481024U, 45612608U, 45744064U, 45874496U, 46006208U,
+ 46136768U, 46267712U, 46399424U, 46529344U, 46660672U, 46791488U, 46923328U,
+ 47053504U, 47185856U, 47316928U, 47447872U, 47579072U, 47710144U, 47839936U,
+ 47971648U, 48103232U, 48234176U, 48365248U, 48496192U, 48627136U, 48757312U,
+ 48889664U, 49020736U, 49149248U, 49283008U, 49413824U, 49545152U, 49675712U,
+ 49807168U, 49938368U, 50069056U, 50200256U, 50331584U, 50462656U, 50593472U,
+ 50724032U, 50853952U, 50986048U, 51117632U, 51248576U, 51379904U, 51510848U,
+ 51641792U, 51773248U, 51903296U, 52035136U, 52164032U, 52297664U, 52427968U,
+ 52557376U, 52690112U, 52821952U, 52952896U, 53081536U, 53213504U, 53344576U,
+ 53475776U, 53608384U, 53738816U, 53870528U, 54000832U, 54131776U, 54263744U,
+ 54394688U, 54525248U, 54655936U, 54787904U, 54918592U, 55049152U, 55181248U,
+ 55312064U, 55442752U, 55574336U, 55705024U, 55836224U, 55967168U, 56097856U,
+ 56228672U, 56358592U, 56490176U, 56621888U, 56753728U, 56884928U, 57015488U,
+ 57146816U, 57278272U, 57409216U, 57540416U, 57671104U, 57802432U, 57933632U,
+ 58064576U, 58195264U, 58326976U, 58457408U, 58588864U, 58720192U, 58849984U,
+ 58981696U, 59113024U, 59243456U, 59375552U, 59506624U, 59637568U, 59768512U,
+ 59897792U, 60030016U, 60161984U, 60293056U, 60423872U, 60554432U, 60683968U,
+ 60817216U, 60948032U, 61079488U, 61209664U, 61341376U, 61471936U, 61602752U,
+ 61733696U, 61865792U, 61996736U, 62127808U, 62259136U, 62389568U, 62520512U,
+ 62651584U, 62781632U, 62910784U, 63045056U, 63176128U, 63307072U, 63438656U,
+ 63569216U, 63700928U, 63831616U, 63960896U, 64093888U, 64225088U, 64355392U,
+ 64486976U, 64617664U, 64748608U, 64879424U, 65009216U, 65142464U, 65273792U,
+ 65402816U, 65535424U, 65666752U, 65797696U, 65927744U, 66060224U, 66191296U,
+ 66321344U, 66453056U, 66584384U, 66715328U, 66846656U, 66977728U, 67108672U,
+ 67239104U, 67370432U, 67501888U, 67631296U, 67763776U, 67895104U, 68026304U,
+ 68157248U, 68287936U, 68419264U, 68548288U, 68681408U, 68811968U, 68942912U,
+ 69074624U, 69205568U, 69337024U, 69467584U, 69599168U, 69729472U, 69861184U,
+ 69989824U, 70122944U, 70253888U, 70385344U, 70515904U, 70647232U, 70778816U,
+ 70907968U, 71040832U, 71171648U, 71303104U, 71432512U, 71564992U, 71695168U,
+ 71826368U, 71958464U, 72089536U, 72219712U, 72350144U, 72482624U, 72613568U,
+ 72744512U, 72875584U, 73006144U, 73138112U, 73268672U, 73400128U, 73530944U,
+ 73662272U, 73793344U, 73924544U, 74055104U, 74185792U, 74316992U, 74448832U,
+ 74579392U, 74710976U, 74841664U, 74972864U, 75102784U, 75233344U, 75364544U,
+ 75497024U, 75627584U, 75759296U, 75890624U, 76021696U, 76152256U, 76283072U,
+ 76414144U, 76545856U, 76676672U, 76806976U, 76937792U, 77070016U, 77200832U,
+ 77331392U, 77462464U, 77593664U, 77725376U, 77856448U, 77987776U, 78118336U,
+ 78249664U, 78380992U, 78511424U, 78642496U, 78773056U, 78905152U, 79033664U,
+ 79166656U, 79297472U, 79429568U, 79560512U, 79690816U, 79822784U, 79953472U,
+ 80084672U, 80214208U, 80346944U, 80477632U, 80608576U, 80740288U, 80870848U,
+ 81002048U, 81133504U, 81264448U, 81395648U, 81525952U, 81657536U, 81786304U,
+ 81919808U, 82050112U, 82181312U, 82311616U, 82443968U, 82573376U, 82705984U,
+ 82835776U, 82967744U, 83096768U, 83230528U, 83359552U, 83491264U, 83622464U,
+ 83753536U, 83886016U, 84015296U, 84147776U, 84277184U, 84409792U, 84540608U,
+ 84672064U, 84803008U, 84934336U, 85065152U, 85193792U, 85326784U, 85458496U,
+ 85589312U, 85721024U, 85851968U, 85982656U, 86112448U, 86244416U, 86370112U,
+ 86506688U, 86637632U, 86769344U, 86900672U, 87031744U, 87162304U, 87293632U,
+ 87424576U, 87555392U, 87687104U, 87816896U, 87947968U, 88079168U, 88211264U,
+ 88341824U, 88473152U, 88603712U, 88735424U, 88862912U, 88996672U, 89128384U,
+ 89259712U, 89390272U, 89521984U, 89652544U, 89783872U, 89914816U, 90045376U,
+ 90177088U, 90307904U, 90438848U, 90569152U, 90700096U, 90832832U, 90963776U,
+ 91093696U, 91223744U, 91356992U, 91486784U, 91618496U, 91749824U, 91880384U,
+ 92012224U, 92143552U, 92273344U, 92405696U, 92536768U, 92666432U, 92798912U,
+ 92926016U, 93060544U, 93192128U, 93322816U, 93453632U, 93583936U, 93715136U,
+ 93845056U, 93977792U, 94109504U, 94240448U, 94371776U, 94501184U, 94632896U,
+ 94764224U, 94895552U, 95023424U, 95158208U, 95287744U, 95420224U, 95550016U,
+ 95681216U, 95811904U, 95943872U, 96075328U, 96203584U, 96337856U, 96468544U,
+ 96599744U, 96731072U, 96860992U, 96992576U, 97124288U, 97254848U, 97385536U,
+ 97517248U, 97647808U, 97779392U, 97910464U, 98041408U, 98172608U, 98303168U,
+ 98434496U, 98565568U, 98696768U, 98827328U, 98958784U, 99089728U, 99220928U,
+ 99352384U, 99482816U, 99614272U, 99745472U, 99876416U, 100007104U,
+ 100138048U, 100267072U, 100401088U, 100529984U, 100662592U, 100791872U,
+ 100925248U, 101056064U, 101187392U, 101317952U, 101449408U, 101580608U,
+ 101711296U, 101841728U, 101973824U, 102104896U, 102235712U, 102366016U,
+ 102498112U, 102628672U, 102760384U, 102890432U, 103021888U, 103153472U,
+ 103284032U, 103415744U, 103545152U, 103677248U, 103808576U, 103939648U,
+ 104070976U, 104201792U, 104332736U, 104462528U, 104594752U, 104725952U,
+ 104854592U, 104988608U, 105118912U, 105247808U, 105381184U, 105511232U,
+ 105643072U, 105774784U, 105903296U, 106037056U, 106167872U, 106298944U,
+ 106429504U, 106561472U, 106691392U, 106822592U, 106954304U, 107085376U,
+ 107216576U, 107346368U, 107478464U, 107609792U, 107739712U, 107872192U,
+ 108003136U, 108131392U, 108265408U, 108396224U, 108527168U, 108657344U,
+ 108789568U, 108920384U, 109049792U, 109182272U, 109312576U, 109444928U,
+ 109572928U, 109706944U, 109837888U, 109969088U, 110099648U, 110230976U,
+ 110362432U, 110492992U, 110624704U, 110755264U, 110886208U, 111017408U,
+ 111148864U, 111279296U, 111410752U, 111541952U, 111673024U, 111803456U,
+ 111933632U, 112066496U, 112196416U, 112328512U, 112457792U, 112590784U,
+ 112715968U, 112852672U, 112983616U, 113114944U, 113244224U, 113376448U,
+ 113505472U, 113639104U, 113770304U, 113901376U, 114031552U, 114163264U,
+ 114294592U, 114425536U, 114556864U, 114687424U, 114818624U, 114948544U,
+ 115080512U, 115212224U, 115343296U, 115473472U, 115605184U, 115736128U,
+ 115867072U, 115997248U, 116128576U, 116260288U, 116391488U, 116522944U,
+ 116652992U, 116784704U, 116915648U, 117046208U, 117178304U, 117308608U,
+ 117440192U, 117569728U, 117701824U, 117833024U, 117964096U, 118094656U,
+ 118225984U, 118357312U, 118489024U, 118617536U, 118749632U, 118882112U,
+ 119012416U, 119144384U, 119275328U, 119406016U, 119537344U, 119668672U,
+ 119798464U, 119928896U, 120061376U, 120192832U, 120321728U, 120454336U,
+ 120584512U, 120716608U, 120848192U, 120979136U, 121109056U, 121241408U,
+ 121372352U, 121502912U, 121634752U, 121764416U, 121895744U, 122027072U,
+ 122157632U, 122289088U, 122421184U, 122550592U, 122682944U, 122813888U,
+ 122945344U, 123075776U, 123207488U, 123338048U, 123468736U, 123600704U,
+ 123731264U, 123861952U, 123993664U, 124124608U, 124256192U, 124386368U,
+ 124518208U, 124649024U, 124778048U, 124911296U, 125041088U, 125173696U,
+ 125303744U, 125432896U, 125566912U, 125696576U, 125829056U, 125958592U,
+ 126090304U, 126221248U, 126352832U, 126483776U, 126615232U, 126746432U,
+ 126876608U, 127008704U, 127139392U, 127270336U, 127401152U, 127532224U,
+ 127663552U, 127794752U, 127925696U, 128055232U, 128188096U, 128319424U,
+ 128449856U, 128581312U, 128712256U, 128843584U, 128973632U, 129103808U,
+ 129236288U, 129365696U, 129498944U, 129629888U, 129760832U, 129892288U,
+ 130023104U, 130154048U, 130283968U, 130416448U, 130547008U, 130678336U,
+ 130807616U, 130939456U, 131071552U, 131202112U, 131331776U, 131464384U,
+ 131594048U, 131727296U, 131858368U, 131987392U, 132120256U, 132250816U,
+ 132382528U, 132513728U, 132644672U, 132774976U, 132905792U, 133038016U,
+ 133168832U, 133299392U, 133429312U, 133562048U, 133692992U, 133823296U,
+ 133954624U, 134086336U, 134217152U, 134348608U, 134479808U, 134607296U,
+ 134741056U, 134872384U, 135002944U, 135134144U, 135265472U, 135396544U,
+ 135527872U, 135659072U, 135787712U, 135921472U, 136052416U, 136182848U,
+ 136313792U, 136444864U, 136576448U, 136707904U, 136837952U, 136970048U,
+ 137099584U, 137232064U, 137363392U, 137494208U, 137625536U, 137755712U,
+ 137887424U, 138018368U, 138149824U, 138280256U, 138411584U, 138539584U,
+ 138672832U, 138804928U, 138936128U, 139066688U, 139196864U, 139328704U,
+ 139460032U, 139590208U, 139721024U, 139852864U, 139984576U, 140115776U,
+ 140245696U, 140376512U, 140508352U, 140640064U, 140769856U, 140902336U,
+ 141032768U, 141162688U, 141294016U, 141426496U, 141556544U, 141687488U,
+ 141819584U, 141949888U, 142080448U, 142212544U, 142342336U, 142474432U,
+ 142606144U, 142736192U, 142868288U, 142997824U, 143129408U, 143258944U,
+ 143392448U, 143523136U, 143653696U, 143785024U, 143916992U, 144045632U,
+ 144177856U, 144309184U, 144440768U, 144570688U, 144701888U, 144832448U,
+ 144965056U, 145096384U, 145227584U, 145358656U, 145489856U, 145620928U,
+ 145751488U, 145883072U, 146011456U, 146144704U, 146275264U, 146407232U,
+ 146538176U, 146668736U, 146800448U, 146931392U, 147062336U, 147193664U,
+ 147324224U, 147455936U, 147586624U, 147717056U, 147848768U, 147979456U,
+ 148110784U, 148242368U, 148373312U, 148503232U, 148635584U, 148766144U,
+ 148897088U, 149028416U, 149159488U, 149290688U, 149420224U, 149551552U,
+ 149683136U, 149814976U, 149943616U, 150076352U, 150208064U, 150338624U,
+ 150470464U, 150600256U, 150732224U, 150862784U, 150993088U, 151125952U,
+ 151254976U, 151388096U, 151519168U, 151649728U, 151778752U, 151911104U,
+ 152042944U, 152174144U, 152304704U, 152435648U, 152567488U, 152698816U,
+ 152828992U, 152960576U, 153091648U, 153222976U, 153353792U, 153484096U,
+ 153616192U, 153747008U, 153878336U, 154008256U, 154139968U, 154270912U,
+ 154402624U, 154533824U, 154663616U, 154795712U, 154926272U, 155057984U,
+ 155188928U, 155319872U, 155450816U, 155580608U, 155712064U, 155843392U,
+ 155971136U, 156106688U, 156237376U, 156367424U, 156499264U, 156630976U,
+ 156761536U, 156892352U, 157024064U, 157155008U, 157284416U, 157415872U,
+ 157545536U, 157677248U, 157810496U, 157938112U, 158071744U, 158203328U,
+ 158334656U, 158464832U, 158596288U, 158727616U, 158858048U, 158988992U,
+ 159121216U, 159252416U, 159381568U, 159513152U, 159645632U, 159776192U,
+ 159906496U, 160038464U, 160169536U, 160300352U, 160430656U, 160563008U,
+ 160693952U, 160822208U, 160956352U, 161086784U, 161217344U, 161349184U,
+ 161480512U, 161611456U, 161742272U, 161873216U, 162002752U, 162135872U,
+ 162266432U, 162397888U, 162529216U, 162660032U, 162790976U, 162922048U,
+ 163052096U, 163184576U, 163314752U, 163446592U, 163577408U, 163707968U,
+ 163839296U, 163969984U, 164100928U, 164233024U, 164364224U, 164494912U,
+ 164625856U, 164756672U, 164887616U, 165019072U, 165150016U, 165280064U,
+ 165412672U, 165543104U, 165674944U, 165805888U, 165936832U, 166067648U,
+ 166198336U, 166330048U, 166461248U, 166591552U, 166722496U, 166854208U,
+ 166985408U, 167116736U, 167246656U, 167378368U, 167508416U, 167641024U,
+ 167771584U, 167903168U, 168034112U, 168164032U, 168295744U, 168427456U,
+ 168557632U, 168688448U, 168819136U, 168951616U, 169082176U, 169213504U,
+ 169344832U, 169475648U, 169605952U, 169738048U, 169866304U, 169999552U,
+ 170131264U, 170262464U, 170393536U, 170524352U, 170655424U, 170782016U,
+ 170917696U, 171048896U, 171179072U, 171310784U, 171439936U, 171573184U,
+ 171702976U, 171835072U, 171966272U, 172097216U, 172228288U, 172359232U,
+ 172489664U, 172621376U, 172747712U, 172883264U, 173014208U, 173144512U,
+ 173275072U, 173407424U, 173539136U, 173669696U, 173800768U, 173931712U,
+ 174063424U, 174193472U, 174325696U, 174455744U, 174586816U, 174718912U,
+ 174849728U, 174977728U, 175109696U, 175242688U, 175374272U, 175504832U,
+ 175636288U, 175765696U, 175898432U, 176028992U, 176159936U, 176291264U,
+ 176422592U, 176552512U, 176684864U, 176815424U, 176946496U, 177076544U,
+ 177209152U, 177340096U, 177470528U, 177600704U, 177731648U, 177864256U,
+ 177994816U, 178126528U, 178257472U, 178387648U, 178518464U, 178650176U,
+ 178781888U, 178912064U, 179044288U, 179174848U, 179305024U, 179436736U,
+ 179568448U, 179698496U, 179830208U, 179960512U, 180092608U, 180223808U,
+ 180354752U, 180485696U, 180617152U, 180748096U, 180877504U, 181009984U,
+ 181139264U, 181272512U, 181402688U, 181532608U, 181663168U, 181795136U,
+ 181926592U, 182057536U, 182190016U, 182320192U, 182451904U, 182582336U,
+ 182713792U, 182843072U, 182976064U, 183107264U, 183237056U, 183368384U,
+ 183494848U, 183631424U, 183762752U, 183893824U, 184024768U, 184154816U,
+ 184286656U, 184417984U, 184548928U, 184680128U, 184810816U, 184941248U,
+ 185072704U, 185203904U, 185335616U, 185465408U, 185596352U, 185727296U,
+ 185859904U, 185989696U, 186121664U, 186252992U, 186383552U, 186514112U,
+ 186645952U, 186777152U, 186907328U, 187037504U, 187170112U, 187301824U,
+ 187429184U, 187562048U, 187693504U, 187825472U, 187957184U, 188087104U,
+ 188218304U, 188349376U, 188481344U, 188609728U, 188743616U, 188874304U,
+ 189005248U, 189136448U, 189265088U, 189396544U, 189528128U, 189660992U,
+ 189791936U, 189923264U, 190054208U, 190182848U, 190315072U, 190447424U,
+ 190577984U, 190709312U, 190840768U, 190971328U, 191102656U, 191233472U,
+ 191364032U, 191495872U, 191626816U, 191758016U, 191888192U, 192020288U,
+ 192148928U, 192282176U, 192413504U, 192542528U, 192674752U, 192805952U,
+ 192937792U, 193068608U, 193198912U, 193330496U, 193462208U, 193592384U,
+ 193723456U, 193854272U, 193985984U, 194116672U, 194247232U, 194379712U,
+ 194508352U, 194641856U, 194772544U, 194900672U, 195035072U, 195166016U,
+ 195296704U, 195428032U, 195558592U, 195690304U, 195818176U, 195952576U,
+ 196083392U, 196214336U, 196345792U, 196476736U, 196607552U, 196739008U,
+ 196869952U, 197000768U, 197130688U, 197262784U, 197394368U, 197523904U,
+ 197656384U, 197787584U, 197916608U, 198049472U, 198180544U, 198310208U,
+ 198442432U, 198573632U, 198705088U, 198834368U, 198967232U, 199097792U,
+ 199228352U, 199360192U, 199491392U, 199621696U, 199751744U, 199883968U,
+ 200014016U, 200146624U, 200276672U, 200408128U, 200540096U, 200671168U,
+ 200801984U, 200933312U, 201062464U, 201194944U, 201326144U, 201457472U,
+ 201588544U, 201719744U, 201850816U, 201981632U, 202111552U, 202244032U,
+ 202374464U, 202505152U, 202636352U, 202767808U, 202898368U, 203030336U,
+ 203159872U, 203292608U, 203423296U, 203553472U, 203685824U, 203816896U,
+ 203947712U, 204078272U, 204208192U, 204341056U, 204472256U, 204603328U,
+ 204733888U, 204864448U, 204996544U, 205125568U, 205258304U, 205388864U,
+ 205517632U, 205650112U, 205782208U, 205913536U, 206044736U, 206176192U,
+ 206307008U, 206434496U, 206569024U, 206700224U, 206831168U, 206961856U,
+ 207093056U, 207223616U, 207355328U, 207486784U, 207616832U, 207749056U,
+ 207879104U, 208010048U, 208141888U, 208273216U, 208404032U, 208534336U,
+ 208666048U, 208796864U, 208927424U, 209059264U, 209189824U, 209321792U,
+ 209451584U, 209582656U, 209715136U, 209845568U, 209976896U, 210106432U,
+ 210239296U, 210370112U, 210501568U, 210630976U, 210763712U, 210894272U,
+ 211024832U, 211156672U, 211287616U, 211418176U, 211549376U, 211679296U,
+ 211812032U, 211942592U, 212074432U, 212204864U, 212334016U, 212467648U,
+ 212597824U, 212727616U, 212860352U, 212991424U, 213120832U, 213253952U,
+ 213385024U, 213515584U, 213645632U, 213777728U, 213909184U, 214040128U,
+ 214170688U, 214302656U, 214433728U, 214564544U, 214695232U, 214826048U,
+ 214956992U, 215089088U, 215219776U, 215350592U, 215482304U, 215613248U,
+ 215743552U, 215874752U, 216005312U, 216137024U, 216267328U, 216399296U,
+ 216530752U, 216661696U, 216790592U, 216923968U, 217054528U, 217183168U,
+ 217316672U, 217448128U, 217579072U, 217709504U, 217838912U, 217972672U,
+ 218102848U, 218233024U, 218364736U, 218496832U, 218627776U, 218759104U,
+ 218888896U, 219021248U, 219151936U, 219281728U, 219413056U, 219545024U,
+ 219675968U, 219807296U, 219938624U, 220069312U, 220200128U, 220331456U,
+ 220461632U, 220592704U, 220725184U, 220855744U, 220987072U, 221117888U,
+ 221249216U, 221378368U, 221510336U, 221642048U, 221772736U, 221904832U,
+ 222031808U, 222166976U, 222297536U, 222428992U, 222559936U, 222690368U,
+ 222820672U, 222953152U, 223083968U, 223213376U, 223345984U, 223476928U,
+ 223608512U, 223738688U, 223869376U, 224001472U, 224132672U, 224262848U,
+ 224394944U, 224524864U, 224657344U, 224788288U, 224919488U, 225050432U,
+ 225181504U, 225312704U, 225443776U, 225574592U, 225704768U, 225834176U,
+ 225966784U, 226097216U, 226229824U, 226360384U, 226491712U, 226623424U,
+ 226754368U, 226885312U, 227015104U, 227147456U, 227278528U, 227409472U,
+ 227539904U, 227669696U, 227802944U, 227932352U, 228065216U, 228196288U,
+ 228326464U, 228457792U, 228588736U, 228720064U, 228850112U, 228981056U,
+ 229113152U, 229243328U, 229375936U, 229505344U, 229636928U, 229769152U,
+ 229894976U, 230030272U, 230162368U, 230292416U, 230424512U, 230553152U,
+ 230684864U, 230816704U, 230948416U, 231079616U, 231210944U, 231342016U,
+ 231472448U, 231603776U, 231733952U, 231866176U, 231996736U, 232127296U,
+ 232259392U, 232388672U, 232521664U, 232652608U, 232782272U, 232914496U,
+ 233043904U, 233175616U, 233306816U, 233438528U, 233569984U, 233699776U,
+ 233830592U, 233962688U, 234092224U, 234221888U, 234353984U, 234485312U,
+ 234618304U, 234749888U, 234880832U, 235011776U, 235142464U, 235274048U,
+ 235403456U, 235535936U, 235667392U, 235797568U, 235928768U, 236057152U,
+ 236190272U, 236322752U, 236453312U, 236583616U, 236715712U, 236846528U,
+ 236976448U, 237108544U, 237239104U, 237371072U, 237501632U, 237630784U,
+ 237764416U, 237895232U, 238026688U, 238157632U, 238286912U, 238419392U,
+ 238548032U, 238681024U, 238812608U, 238941632U, 239075008U, 239206336U,
+ 239335232U, 239466944U, 239599168U, 239730496U, 239861312U, 239992384U,
+ 240122816U, 240254656U, 240385856U, 240516928U, 240647872U, 240779072U,
+ 240909632U, 241040704U, 241171904U, 241302848U, 241433408U, 241565248U,
+ 241696192U, 241825984U, 241958848U, 242088256U, 242220224U, 242352064U,
+ 242481856U, 242611648U, 242744896U, 242876224U, 243005632U, 243138496U,
+ 243268672U, 243400384U, 243531712U, 243662656U, 243793856U, 243924544U,
+ 244054592U, 244187072U, 244316608U, 244448704U, 244580032U, 244710976U,
+ 244841536U, 244972864U, 245104448U, 245233984U, 245365312U, 245497792U,
+ 245628736U, 245759936U, 245889856U, 246021056U, 246152512U, 246284224U,
+ 246415168U, 246545344U, 246675904U, 246808384U, 246939584U, 247070144U,
+ 247199552U, 247331648U, 247463872U, 247593536U, 247726016U, 247857088U,
+ 247987648U, 248116928U, 248249536U, 248380736U, 248512064U, 248643008U,
+ 248773312U, 248901056U, 249036608U, 249167552U, 249298624U, 249429184U,
+ 249560512U, 249692096U, 249822784U, 249954112U, 250085312U, 250215488U,
+ 250345792U, 250478528U, 250608704U, 250739264U, 250870976U, 251002816U,
+ 251133632U, 251263552U, 251395136U, 251523904U, 251657792U, 251789248U,
+ 251919424U, 252051392U, 252182464U, 252313408U, 252444224U, 252575552U,
+ 252706624U, 252836032U, 252968512U, 253099712U, 253227584U, 253361728U,
+ 253493056U, 253623488U, 253754432U, 253885504U, 254017216U, 254148032U,
+ 254279488U, 254410432U, 254541376U, 254672576U, 254803264U, 254933824U,
+ 255065792U, 255196736U, 255326528U, 255458752U, 255589952U, 255721408U,
+ 255851072U, 255983296U, 256114624U, 256244416U, 256374208U, 256507712U,
+ 256636096U, 256768832U, 256900544U, 257031616U, 257162176U, 257294272U,
+ 257424448U, 257555776U, 257686976U, 257818432U, 257949632U, 258079552U,
+ 258211136U, 258342464U, 258473408U, 258603712U, 258734656U, 258867008U,
+ 258996544U, 259127744U, 259260224U, 259391296U, 259522112U, 259651904U,
+ 259784384U, 259915328U, 260045888U, 260175424U, 260308544U, 260438336U,
+ 260570944U, 260700992U, 260832448U, 260963776U, 261092672U, 261226304U,
+ 261356864U, 261487936U, 261619648U, 261750592U, 261879872U, 262011968U,
+ 262143424U, 262274752U, 262404416U, 262537024U, 262667968U, 262799296U,
+ 262928704U, 263061184U, 263191744U, 263322944U, 263454656U, 263585216U,
+ 263716672U, 263847872U, 263978944U, 264108608U, 264241088U, 264371648U,
+ 264501184U, 264632768U, 264764096U, 264895936U, 265024576U, 265158464U,
+ 265287488U, 265418432U, 265550528U, 265681216U, 265813312U, 265943488U,
+ 266075968U, 266206144U, 266337728U, 266468032U, 266600384U, 266731072U,
+ 266862272U, 266993344U, 267124288U, 267255616U, 267386432U, 267516992U,
+ 267648704U, 267777728U, 267910592U, 268040512U, 268172096U, 268302784U,
+ 268435264U, 268566208U, 268696256U, 268828096U, 268959296U, 269090368U,
+ 269221312U, 269352256U, 269482688U, 269614784U, 269745856U, 269876416U,
+ 270007616U, 270139328U, 270270272U, 270401216U, 270531904U, 270663616U,
+ 270791744U, 270924736U, 271056832U, 271186112U, 271317184U, 271449536U,
+ 271580992U, 271711936U, 271843136U, 271973056U, 272105408U, 272236352U,
+ 272367296U, 272498368U, 272629568U, 272759488U, 272891456U, 273022784U,
+ 273153856U, 273284672U, 273415616U, 273547072U, 273677632U, 273808448U,
+ 273937088U, 274071488U, 274200896U, 274332992U, 274463296U, 274595392U,
+ 274726208U, 274857536U, 274988992U, 275118656U, 275250496U, 275382208U,
+ 275513024U, 275643968U, 275775296U, 275906368U, 276037184U, 276167872U,
+ 276297664U, 276429376U, 276560576U, 276692672U, 276822976U, 276955072U,
+ 277085632U, 277216832U, 277347008U, 277478848U, 277609664U, 277740992U,
+ 277868608U, 278002624U, 278134336U, 278265536U, 278395328U, 278526784U,
+ 278657728U, 278789824U, 278921152U, 279052096U, 279182912U, 279313088U,
+ 279443776U, 279576256U, 279706048U, 279838528U, 279969728U, 280099648U,
+ 280230976U, 280361408U, 280493632U, 280622528U, 280755392U, 280887104U,
+ 281018176U, 281147968U, 281278912U, 281411392U, 281542592U, 281673152U,
+ 281803712U, 281935552U, 282066496U, 282197312U, 282329024U, 282458816U,
+ 282590272U, 282720832U, 282853184U, 282983744U, 283115072U, 283246144U,
+ 283377344U, 283508416U, 283639744U, 283770304U, 283901504U, 284032576U,
+ 284163136U, 284294848U, 284426176U, 284556992U, 284687296U, 284819264U,
+ 284950208U, 285081536U
+};
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/endian.h b/vendor/github.com/ethereum/ethash/src/libethash/endian.h
new file mode 100644
index 000000000..5b8abf03d
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/endian.h
@@ -0,0 +1,78 @@
+#pragma once
+
+#include <stdint.h>
+#include "compiler.h"
+
+#if defined(__MINGW32__) || defined(_WIN32)
+ # define LITTLE_ENDIAN 1234
+ # define BYTE_ORDER LITTLE_ENDIAN
+#elif defined(__FreeBSD__) || defined(__DragonFly__) || defined(__NetBSD__)
+ # include <sys/endian.h>
+#elif defined(__OpenBSD__) || defined(__SVR4)
+ # include <sys/types.h>
+#elif defined(__APPLE__)
+# include <machine/endian.h>
+#elif defined( BSD ) && (BSD >= 199103)
+ # include <machine/endian.h>
+#elif defined( __QNXNTO__ ) && defined( __LITTLEENDIAN__ )
+ # define LITTLE_ENDIAN 1234
+ # define BYTE_ORDER LITTLE_ENDIAN
+#elif defined( __QNXNTO__ ) && defined( __BIGENDIAN__ )
+ # define BIG_ENDIAN 1234
+ # define BYTE_ORDER BIG_ENDIAN
+#else
+# include <endian.h>
+#endif
+
+#if defined(_WIN32)
+#include <stdlib.h>
+#define ethash_swap_u32(input_) _byteswap_ulong(input_)
+#define ethash_swap_u64(input_) _byteswap_uint64(input_)
+#elif defined(__APPLE__)
+#include <libkern/OSByteOrder.h>
+#define ethash_swap_u32(input_) OSSwapInt32(input_)
+#define ethash_swap_u64(input_) OSSwapInt64(input_)
+#elif defined(__FreeBSD__) || defined(__DragonFly__) || defined(__NetBSD__)
+#define ethash_swap_u32(input_) bswap32(input_)
+#define ethash_swap_u64(input_) bswap64(input_)
+#elif defined(__OpenBSD__)
+#include <endian.h>
+#define ethash_swap_u32(input_) swap32(input_)
+#define ethash_swap_u64(input_) swap64(input_)
+#else // posix
+#include <byteswap.h>
+#define ethash_swap_u32(input_) bswap_32(input_)
+#define ethash_swap_u64(input_) bswap_64(input_)
+#endif
+
+
+#if LITTLE_ENDIAN == BYTE_ORDER
+
+#define fix_endian32(dst_ ,src_) dst_ = src_
+#define fix_endian32_same(val_)
+#define fix_endian64(dst_, src_) dst_ = src_
+#define fix_endian64_same(val_)
+#define fix_endian_arr32(arr_, size_)
+#define fix_endian_arr64(arr_, size_)
+
+#elif BIG_ENDIAN == BYTE_ORDER
+
+#define fix_endian32(dst_, src_) dst_ = ethash_swap_u32(src_)
+#define fix_endian32_same(val_) val_ = ethash_swap_u32(val_)
+#define fix_endian64(dst_, src_) dst_ = ethash_swap_u64(src_)
+#define fix_endian64_same(val_) val_ = ethash_swap_u64(val_)
+#define fix_endian_arr32(arr_, size_) \
+ do { \
+ for (unsigned i_ = 0; i_ < (size_); ++i_) { \
+ arr_[i_] = ethash_swap_u32(arr_[i_]); \
+ } \
+ } while (0)
+#define fix_endian_arr64(arr_, size_) \
+ do { \
+ for (unsigned i_ = 0; i_ < (size_); ++i_) { \
+ arr_[i_] = ethash_swap_u64(arr_[i_]); \
+ } \
+ } while (0)
+#else
+# error "endian not supported"
+#endif // BYTE_ORDER
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/ethash.h b/vendor/github.com/ethereum/ethash/src/libethash/ethash.h
new file mode 100644
index 000000000..0c6a1f9e9
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/ethash.h
@@ -0,0 +1,147 @@
+/*
+ This file is part of ethash.
+
+ ethash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ ethash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with ethash. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/** @file ethash.h
+* @date 2015
+*/
+#pragma once
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+#include <stddef.h>
+#include "compiler.h"
+
+#define ETHASH_REVISION 23
+#define ETHASH_DATASET_BYTES_INIT 1073741824U // 2**30
+#define ETHASH_DATASET_BYTES_GROWTH 8388608U // 2**23
+#define ETHASH_CACHE_BYTES_INIT 1073741824U // 2**24
+#define ETHASH_CACHE_BYTES_GROWTH 131072U // 2**17
+#define ETHASH_EPOCH_LENGTH 30000U
+#define ETHASH_MIX_BYTES 128
+#define ETHASH_HASH_BYTES 64
+#define ETHASH_DATASET_PARENTS 256
+#define ETHASH_CACHE_ROUNDS 3
+#define ETHASH_ACCESSES 64
+#define ETHASH_DAG_MAGIC_NUM_SIZE 8
+#define ETHASH_DAG_MAGIC_NUM 0xFEE1DEADBADDCAFE
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// Type of a seedhash/blockhash e.t.c.
+typedef struct ethash_h256 { uint8_t b[32]; } ethash_h256_t;
+
+// convenience macro to statically initialize an h256_t
+// usage:
+// ethash_h256_t a = ethash_h256_static_init(1, 2, 3, ... )
+// have to provide all 32 values. If you don't provide all the rest
+// will simply be unitialized (not guranteed to be 0)
+#define ethash_h256_static_init(...) \
+ { {__VA_ARGS__} }
+
+struct ethash_light;
+typedef struct ethash_light* ethash_light_t;
+struct ethash_full;
+typedef struct ethash_full* ethash_full_t;
+typedef int(*ethash_callback_t)(unsigned);
+
+typedef struct ethash_return_value {
+ ethash_h256_t result;
+ ethash_h256_t mix_hash;
+ bool success;
+} ethash_return_value_t;
+
+/**
+ * Allocate and initialize a new ethash_light handler
+ *
+ * @param block_number The block number for which to create the handler
+ * @return Newly allocated ethash_light handler or NULL in case of
+ * ERRNOMEM or invalid parameters used for @ref ethash_compute_cache_nodes()
+ */
+ethash_light_t ethash_light_new(uint64_t block_number);
+/**
+ * Frees a previously allocated ethash_light handler
+ * @param light The light handler to free
+ */
+void ethash_light_delete(ethash_light_t light);
+/**
+ * Calculate the light client data
+ *
+ * @param light The light client handler
+ * @param header_hash The header hash to pack into the mix
+ * @param nonce The nonce to pack into the mix
+ * @return an object of ethash_return_value_t holding the return values
+ */
+ethash_return_value_t ethash_light_compute(
+ ethash_light_t light,
+ ethash_h256_t const header_hash,
+ uint64_t nonce
+);
+
+/**
+ * Allocate and initialize a new ethash_full handler
+ *
+ * @param light The light handler containing the cache.
+ * @param callback A callback function with signature of @ref ethash_callback_t
+ * It accepts an unsigned with which a progress of DAG calculation
+ * can be displayed. If all goes well the callback should return 0.
+ * If a non-zero value is returned then DAG generation will stop.
+ * Be advised. A progress value of 100 means that DAG creation is
+ * almost complete and that this function will soon return succesfully.
+ * It does not mean that the function has already had a succesfull return.
+ * @return Newly allocated ethash_full handler or NULL in case of
+ * ERRNOMEM or invalid parameters used for @ref ethash_compute_full_data()
+ */
+ethash_full_t ethash_full_new(ethash_light_t light, ethash_callback_t callback);
+
+/**
+ * Frees a previously allocated ethash_full handler
+ * @param full The light handler to free
+ */
+void ethash_full_delete(ethash_full_t full);
+/**
+ * Calculate the full client data
+ *
+ * @param full The full client handler
+ * @param header_hash The header hash to pack into the mix
+ * @param nonce The nonce to pack into the mix
+ * @return An object of ethash_return_value to hold the return value
+ */
+ethash_return_value_t ethash_full_compute(
+ ethash_full_t full,
+ ethash_h256_t const header_hash,
+ uint64_t nonce
+);
+/**
+ * Get a pointer to the full DAG data
+ */
+void const* ethash_full_dag(ethash_full_t full);
+/**
+ * Get the size of the DAG data
+ */
+uint64_t ethash_full_dag_size(ethash_full_t full);
+
+/**
+ * Calculate the seedhash for a given block number
+ */
+ethash_h256_t ethash_get_seedhash(uint64_t block_number);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/fnv.h b/vendor/github.com/ethereum/ethash/src/libethash/fnv.h
new file mode 100644
index 000000000..82cd655c4
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/fnv.h
@@ -0,0 +1,43 @@
+/*
+ This file is part of cpp-ethereum.
+
+ cpp-ethereum is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ cpp-ethereum is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
+*/
+/** @file fnv.h
+* @author Matthew Wampler-Doty <negacthulhu@gmail.com>
+* @date 2015
+*/
+
+#pragma once
+#include <stdint.h>
+#include "compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define FNV_PRIME 0x01000193
+
+/* The FNV-1 spec multiplies the prime with the input one byte (octet) in turn.
+ We instead multiply it with the full 32-bit input.
+ This gives a different result compared to a canonical FNV-1 implementation.
+*/
+static inline uint32_t fnv_hash(uint32_t const x, uint32_t const y)
+{
+ return x * FNV_PRIME ^ y;
+}
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/internal.c b/vendor/github.com/ethereum/ethash/src/libethash/internal.c
new file mode 100644
index 000000000..0a830fc82
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/internal.c
@@ -0,0 +1,507 @@
+/*
+ This file is part of ethash.
+
+ ethash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ ethash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
+*/
+/** @file internal.c
+* @author Tim Hughes <tim@twistedfury.com>
+* @author Matthew Wampler-Doty
+* @date 2015
+*/
+
+#include <assert.h>
+#include <inttypes.h>
+#include <stddef.h>
+#include <errno.h>
+#include <math.h>
+#include "mmap.h"
+#include "ethash.h"
+#include "fnv.h"
+#include "endian.h"
+#include "internal.h"
+#include "data_sizes.h"
+#include "io.h"
+
+#ifdef WITH_CRYPTOPP
+
+#include "sha3_cryptopp.h"
+
+#else
+#include "sha3.h"
+#endif // WITH_CRYPTOPP
+
+uint64_t ethash_get_datasize(uint64_t const block_number)
+{
+ assert(block_number / ETHASH_EPOCH_LENGTH < 2048);
+ return dag_sizes[block_number / ETHASH_EPOCH_LENGTH];
+}
+
+uint64_t ethash_get_cachesize(uint64_t const block_number)
+{
+ assert(block_number / ETHASH_EPOCH_LENGTH < 2048);
+ return cache_sizes[block_number / ETHASH_EPOCH_LENGTH];
+}
+
+// Follows Sergio's "STRICT MEMORY HARD HASHING FUNCTIONS" (2014)
+// https://bitslog.files.wordpress.com/2013/12/memohash-v0-3.pdf
+// SeqMemoHash(s, R, N)
+bool static ethash_compute_cache_nodes(
+ node* const nodes,
+ uint64_t cache_size,
+ ethash_h256_t const* seed
+)
+{
+ if (cache_size % sizeof(node) != 0) {
+ return false;
+ }
+ uint32_t const num_nodes = (uint32_t) (cache_size / sizeof(node));
+
+ SHA3_512(nodes[0].bytes, (uint8_t*)seed, 32);
+
+ for (uint32_t i = 1; i != num_nodes; ++i) {
+ SHA3_512(nodes[i].bytes, nodes[i - 1].bytes, 64);
+ }
+
+ for (uint32_t j = 0; j != ETHASH_CACHE_ROUNDS; j++) {
+ for (uint32_t i = 0; i != num_nodes; i++) {
+ uint32_t const idx = nodes[i].words[0] % num_nodes;
+ node data;
+ data = nodes[(num_nodes - 1 + i) % num_nodes];
+ for (uint32_t w = 0; w != NODE_WORDS; ++w) {
+ data.words[w] ^= nodes[idx].words[w];
+ }
+ SHA3_512(nodes[i].bytes, data.bytes, sizeof(data));
+ }
+ }
+
+ // now perform endian conversion
+ fix_endian_arr32(nodes->words, num_nodes * NODE_WORDS);
+ return true;
+}
+
+void ethash_calculate_dag_item(
+ node* const ret,
+ uint32_t node_index,
+ ethash_light_t const light
+)
+{
+ uint32_t num_parent_nodes = (uint32_t) (light->cache_size / sizeof(node));
+ node const* cache_nodes = (node const *) light->cache;
+ node const* init = &cache_nodes[node_index % num_parent_nodes];
+ memcpy(ret, init, sizeof(node));
+ ret->words[0] ^= node_index;
+ SHA3_512(ret->bytes, ret->bytes, sizeof(node));
+#if defined(_M_X64) && ENABLE_SSE
+ __m128i const fnv_prime = _mm_set1_epi32(FNV_PRIME);
+ __m128i xmm0 = ret->xmm[0];
+ __m128i xmm1 = ret->xmm[1];
+ __m128i xmm2 = ret->xmm[2];
+ __m128i xmm3 = ret->xmm[3];
+#endif
+
+ for (uint32_t i = 0; i != ETHASH_DATASET_PARENTS; ++i) {
+ uint32_t parent_index = fnv_hash(node_index ^ i, ret->words[i % NODE_WORDS]) % num_parent_nodes;
+ node const *parent = &cache_nodes[parent_index];
+
+#if defined(_M_X64) && ENABLE_SSE
+ {
+ xmm0 = _mm_mullo_epi32(xmm0, fnv_prime);
+ xmm1 = _mm_mullo_epi32(xmm1, fnv_prime);
+ xmm2 = _mm_mullo_epi32(xmm2, fnv_prime);
+ xmm3 = _mm_mullo_epi32(xmm3, fnv_prime);
+ xmm0 = _mm_xor_si128(xmm0, parent->xmm[0]);
+ xmm1 = _mm_xor_si128(xmm1, parent->xmm[1]);
+ xmm2 = _mm_xor_si128(xmm2, parent->xmm[2]);
+ xmm3 = _mm_xor_si128(xmm3, parent->xmm[3]);
+
+ // have to write to ret as values are used to compute index
+ ret->xmm[0] = xmm0;
+ ret->xmm[1] = xmm1;
+ ret->xmm[2] = xmm2;
+ ret->xmm[3] = xmm3;
+ }
+ #else
+ {
+ for (unsigned w = 0; w != NODE_WORDS; ++w) {
+ ret->words[w] = fnv_hash(ret->words[w], parent->words[w]);
+ }
+ }
+#endif
+ }
+ SHA3_512(ret->bytes, ret->bytes, sizeof(node));
+}
+
+bool ethash_compute_full_data(
+ void* mem,
+ uint64_t full_size,
+ ethash_light_t const light,
+ ethash_callback_t callback
+)
+{
+ if (full_size % (sizeof(uint32_t) * MIX_WORDS) != 0 ||
+ (full_size % sizeof(node)) != 0) {
+ return false;
+ }
+ uint32_t const max_n = (uint32_t)(full_size / sizeof(node));
+ node* full_nodes = mem;
+ double const progress_change = 1.0f / max_n;
+ double progress = 0.0f;
+ // now compute full nodes
+ for (uint32_t n = 0; n != max_n; ++n) {
+ if (callback &&
+ n % (max_n / 100) == 0 &&
+ callback((unsigned int)(ceil(progress * 100.0f))) != 0) {
+
+ return false;
+ }
+ progress += progress_change;
+ ethash_calculate_dag_item(&(full_nodes[n]), n, light);
+ }
+ return true;
+}
+
+static bool ethash_hash(
+ ethash_return_value_t* ret,
+ node const* full_nodes,
+ ethash_light_t const light,
+ uint64_t full_size,
+ ethash_h256_t const header_hash,
+ uint64_t const nonce
+)
+{
+ if (full_size % MIX_WORDS != 0) {
+ return false;
+ }
+
+ // pack hash and nonce together into first 40 bytes of s_mix
+ assert(sizeof(node) * 8 == 512);
+ node s_mix[MIX_NODES + 1];
+ memcpy(s_mix[0].bytes, &header_hash, 32);
+ fix_endian64(s_mix[0].double_words[4], nonce);
+
+ // compute sha3-512 hash and replicate across mix
+ SHA3_512(s_mix->bytes, s_mix->bytes, 40);
+ fix_endian_arr32(s_mix[0].words, 16);
+
+ node* const mix = s_mix + 1;
+ for (uint32_t w = 0; w != MIX_WORDS; ++w) {
+ mix->words[w] = s_mix[0].words[w % NODE_WORDS];
+ }
+
+ unsigned const page_size = sizeof(uint32_t) * MIX_WORDS;
+ unsigned const num_full_pages = (unsigned) (full_size / page_size);
+
+ for (unsigned i = 0; i != ETHASH_ACCESSES; ++i) {
+ uint32_t const index = fnv_hash(s_mix->words[0] ^ i, mix->words[i % MIX_WORDS]) % num_full_pages;
+
+ for (unsigned n = 0; n != MIX_NODES; ++n) {
+ node const* dag_node;
+ if (full_nodes) {
+ dag_node = &full_nodes[MIX_NODES * index + n];
+ } else {
+ node tmp_node;
+ ethash_calculate_dag_item(&tmp_node, index * MIX_NODES + n, light);
+ dag_node = &tmp_node;
+ }
+
+#if defined(_M_X64) && ENABLE_SSE
+ {
+ __m128i fnv_prime = _mm_set1_epi32(FNV_PRIME);
+ __m128i xmm0 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[0]);
+ __m128i xmm1 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[1]);
+ __m128i xmm2 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[2]);
+ __m128i xmm3 = _mm_mullo_epi32(fnv_prime, mix[n].xmm[3]);
+ mix[n].xmm[0] = _mm_xor_si128(xmm0, dag_node->xmm[0]);
+ mix[n].xmm[1] = _mm_xor_si128(xmm1, dag_node->xmm[1]);
+ mix[n].xmm[2] = _mm_xor_si128(xmm2, dag_node->xmm[2]);
+ mix[n].xmm[3] = _mm_xor_si128(xmm3, dag_node->xmm[3]);
+ }
+ #else
+ {
+ for (unsigned w = 0; w != NODE_WORDS; ++w) {
+ mix[n].words[w] = fnv_hash(mix[n].words[w], dag_node->words[w]);
+ }
+ }
+#endif
+ }
+
+ }
+
+ // compress mix
+ for (uint32_t w = 0; w != MIX_WORDS; w += 4) {
+ uint32_t reduction = mix->words[w + 0];
+ reduction = reduction * FNV_PRIME ^ mix->words[w + 1];
+ reduction = reduction * FNV_PRIME ^ mix->words[w + 2];
+ reduction = reduction * FNV_PRIME ^ mix->words[w + 3];
+ mix->words[w / 4] = reduction;
+ }
+
+ fix_endian_arr32(mix->words, MIX_WORDS / 4);
+ memcpy(&ret->mix_hash, mix->bytes, 32);
+ // final Keccak hash
+ SHA3_256(&ret->result, s_mix->bytes, 64 + 32); // Keccak-256(s + compressed_mix)
+ return true;
+}
+
+void ethash_quick_hash(
+ ethash_h256_t* return_hash,
+ ethash_h256_t const* header_hash,
+ uint64_t nonce,
+ ethash_h256_t const* mix_hash
+)
+{
+ uint8_t buf[64 + 32];
+ memcpy(buf, header_hash, 32);
+ fix_endian64_same(nonce);
+ memcpy(&(buf[32]), &nonce, 8);
+ SHA3_512(buf, buf, 40);
+ memcpy(&(buf[64]), mix_hash, 32);
+ SHA3_256(return_hash, buf, 64 + 32);
+}
+
+ethash_h256_t ethash_get_seedhash(uint64_t block_number)
+{
+ ethash_h256_t ret;
+ ethash_h256_reset(&ret);
+ uint64_t const epochs = block_number / ETHASH_EPOCH_LENGTH;
+ for (uint32_t i = 0; i < epochs; ++i)
+ SHA3_256(&ret, (uint8_t*)&ret, 32);
+ return ret;
+}
+
+bool ethash_quick_check_difficulty(
+ ethash_h256_t const* header_hash,
+ uint64_t const nonce,
+ ethash_h256_t const* mix_hash,
+ ethash_h256_t const* boundary
+)
+{
+
+ ethash_h256_t return_hash;
+ ethash_quick_hash(&return_hash, header_hash, nonce, mix_hash);
+ return ethash_check_difficulty(&return_hash, boundary);
+}
+
+ethash_light_t ethash_light_new_internal(uint64_t cache_size, ethash_h256_t const* seed)
+{
+ struct ethash_light *ret;
+ ret = calloc(sizeof(*ret), 1);
+ if (!ret) {
+ return NULL;
+ }
+ ret->cache = malloc((size_t)cache_size);
+ if (!ret->cache) {
+ goto fail_free_light;
+ }
+ node* nodes = (node*)ret->cache;
+ if (!ethash_compute_cache_nodes(nodes, cache_size, seed)) {
+ goto fail_free_cache_mem;
+ }
+ ret->cache_size = cache_size;
+ return ret;
+
+fail_free_cache_mem:
+ free(ret->cache);
+fail_free_light:
+ free(ret);
+ return NULL;
+}
+
+ethash_light_t ethash_light_new(uint64_t block_number)
+{
+ ethash_h256_t seedhash = ethash_get_seedhash(block_number);
+ ethash_light_t ret;
+ ret = ethash_light_new_internal(ethash_get_cachesize(block_number), &seedhash);
+ ret->block_number = block_number;
+ return ret;
+}
+
+void ethash_light_delete(ethash_light_t light)
+{
+ if (light->cache) {
+ free(light->cache);
+ }
+ free(light);
+}
+
+ethash_return_value_t ethash_light_compute_internal(
+ ethash_light_t light,
+ uint64_t full_size,
+ ethash_h256_t const header_hash,
+ uint64_t nonce
+)
+{
+ ethash_return_value_t ret;
+ ret.success = true;
+ if (!ethash_hash(&ret, NULL, light, full_size, header_hash, nonce)) {
+ ret.success = false;
+ }
+ return ret;
+}
+
+ethash_return_value_t ethash_light_compute(
+ ethash_light_t light,
+ ethash_h256_t const header_hash,
+ uint64_t nonce
+)
+{
+ uint64_t full_size = ethash_get_datasize(light->block_number);
+ return ethash_light_compute_internal(light, full_size, header_hash, nonce);
+}
+
+static bool ethash_mmap(struct ethash_full* ret, FILE* f)
+{
+ int fd;
+ char* mmapped_data;
+ errno = 0;
+ ret->file = f;
+ if ((fd = ethash_fileno(ret->file)) == -1) {
+ return false;
+ }
+ mmapped_data= mmap(
+ NULL,
+ (size_t)ret->file_size + ETHASH_DAG_MAGIC_NUM_SIZE,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ fd,
+ 0
+ );
+ if (mmapped_data == MAP_FAILED) {
+ return false;
+ }
+ ret->data = (node*)(mmapped_data + ETHASH_DAG_MAGIC_NUM_SIZE);
+ return true;
+}
+
+ethash_full_t ethash_full_new_internal(
+ char const* dirname,
+ ethash_h256_t const seed_hash,
+ uint64_t full_size,
+ ethash_light_t const light,
+ ethash_callback_t callback
+)
+{
+ struct ethash_full* ret;
+ FILE *f = NULL;
+ ret = calloc(sizeof(*ret), 1);
+ if (!ret) {
+ return NULL;
+ }
+ ret->file_size = (size_t)full_size;
+ switch (ethash_io_prepare(dirname, seed_hash, &f, (size_t)full_size, false)) {
+ case ETHASH_IO_FAIL:
+ // ethash_io_prepare will do all ETHASH_CRITICAL() logging in fail case
+ goto fail_free_full;
+ case ETHASH_IO_MEMO_MATCH:
+ if (!ethash_mmap(ret, f)) {
+ ETHASH_CRITICAL("mmap failure()");
+ goto fail_close_file;
+ }
+ return ret;
+ case ETHASH_IO_MEMO_SIZE_MISMATCH:
+ // if a DAG of same filename but unexpected size is found, silently force new file creation
+ if (ethash_io_prepare(dirname, seed_hash, &f, (size_t)full_size, true) != ETHASH_IO_MEMO_MISMATCH) {
+ ETHASH_CRITICAL("Could not recreate DAG file after finding existing DAG with unexpected size.");
+ goto fail_free_full;
+ }
+ // fallthrough to the mismatch case here, DO NOT go through match
+ case ETHASH_IO_MEMO_MISMATCH:
+ if (!ethash_mmap(ret, f)) {
+ ETHASH_CRITICAL("mmap failure()");
+ goto fail_close_file;
+ }
+ break;
+ }
+
+ if (!ethash_compute_full_data(ret->data, full_size, light, callback)) {
+ ETHASH_CRITICAL("Failure at computing DAG data.");
+ goto fail_free_full_data;
+ }
+
+ // after the DAG has been filled then we finalize it by writting the magic number at the beginning
+ if (fseek(f, 0, SEEK_SET) != 0) {
+ ETHASH_CRITICAL("Could not seek to DAG file start to write magic number.");
+ goto fail_free_full_data;
+ }
+ uint64_t const magic_num = ETHASH_DAG_MAGIC_NUM;
+ if (fwrite(&magic_num, ETHASH_DAG_MAGIC_NUM_SIZE, 1, f) != 1) {
+ ETHASH_CRITICAL("Could not write magic number to DAG's beginning.");
+ goto fail_free_full_data;
+ }
+ if (fflush(f) != 0) {// make sure the magic number IS there
+ ETHASH_CRITICAL("Could not flush memory mapped data to DAG file. Insufficient space?");
+ goto fail_free_full_data;
+ }
+ return ret;
+
+fail_free_full_data:
+ // could check that munmap(..) == 0 but even if it did not can't really do anything here
+ munmap(ret->data, (size_t)full_size);
+fail_close_file:
+ fclose(ret->file);
+fail_free_full:
+ free(ret);
+ return NULL;
+}
+
+ethash_full_t ethash_full_new(ethash_light_t light, ethash_callback_t callback)
+{
+ char strbuf[256];
+ if (!ethash_get_default_dirname(strbuf, 256)) {
+ return NULL;
+ }
+ uint64_t full_size = ethash_get_datasize(light->block_number);
+ ethash_h256_t seedhash = ethash_get_seedhash(light->block_number);
+ return ethash_full_new_internal(strbuf, seedhash, full_size, light, callback);
+}
+
+void ethash_full_delete(ethash_full_t full)
+{
+ // could check that munmap(..) == 0 but even if it did not can't really do anything here
+ munmap(full->data, (size_t)full->file_size);
+ if (full->file) {
+ fclose(full->file);
+ }
+ free(full);
+}
+
+ethash_return_value_t ethash_full_compute(
+ ethash_full_t full,
+ ethash_h256_t const header_hash,
+ uint64_t nonce
+)
+{
+ ethash_return_value_t ret;
+ ret.success = true;
+ if (!ethash_hash(
+ &ret,
+ (node const*)full->data,
+ NULL,
+ full->file_size,
+ header_hash,
+ nonce)) {
+ ret.success = false;
+ }
+ return ret;
+}
+
+void const* ethash_full_dag(ethash_full_t full)
+{
+ return full->data;
+}
+
+uint64_t ethash_full_dag_size(ethash_full_t full)
+{
+ return full->file_size;
+}
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/internal.h b/vendor/github.com/ethereum/ethash/src/libethash/internal.h
new file mode 100644
index 000000000..26c395ad6
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/internal.h
@@ -0,0 +1,179 @@
+#pragma once
+#include "compiler.h"
+#include "endian.h"
+#include "ethash.h"
+#include <stdio.h>
+
+#define ENABLE_SSE 0
+
+#if defined(_M_X64) && ENABLE_SSE
+#include <smmintrin.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// compile time settings
+#define NODE_WORDS (64/4)
+#define MIX_WORDS (ETHASH_MIX_BYTES/4)
+#define MIX_NODES (MIX_WORDS / NODE_WORDS)
+#include <stdint.h>
+
+typedef union node {
+ uint8_t bytes[NODE_WORDS * 4];
+ uint32_t words[NODE_WORDS];
+ uint64_t double_words[NODE_WORDS / 2];
+
+#if defined(_M_X64) && ENABLE_SSE
+ __m128i xmm[NODE_WORDS/4];
+#endif
+
+} node;
+
+static inline uint8_t ethash_h256_get(ethash_h256_t const* hash, unsigned int i)
+{
+ return hash->b[i];
+}
+
+static inline void ethash_h256_set(ethash_h256_t* hash, unsigned int i, uint8_t v)
+{
+ hash->b[i] = v;
+}
+
+static inline void ethash_h256_reset(ethash_h256_t* hash)
+{
+ memset(hash, 0, 32);
+}
+
+// Returns if hash is less than or equal to boundary (2^256/difficulty)
+static inline bool ethash_check_difficulty(
+ ethash_h256_t const* hash,
+ ethash_h256_t const* boundary
+)
+{
+ // Boundary is big endian
+ for (int i = 0; i < 32; i++) {
+ if (ethash_h256_get(hash, i) == ethash_h256_get(boundary, i)) {
+ continue;
+ }
+ return ethash_h256_get(hash, i) < ethash_h256_get(boundary, i);
+ }
+ return true;
+}
+
+/**
+ * Difficulty quick check for POW preverification
+ *
+ * @param header_hash The hash of the header
+ * @param nonce The block's nonce
+ * @param mix_hash The mix digest hash
+ * @param boundary The boundary is defined as (2^256 / difficulty)
+ * @return true for succesful pre-verification and false otherwise
+ */
+bool ethash_quick_check_difficulty(
+ ethash_h256_t const* header_hash,
+ uint64_t const nonce,
+ ethash_h256_t const* mix_hash,
+ ethash_h256_t const* boundary
+);
+
+struct ethash_light {
+ void* cache;
+ uint64_t cache_size;
+ uint64_t block_number;
+};
+
+/**
+ * Allocate and initialize a new ethash_light handler. Internal version
+ *
+ * @param cache_size The size of the cache in bytes
+ * @param seed Block seedhash to be used during the computation of the
+ * cache nodes
+ * @return Newly allocated ethash_light handler or NULL in case of
+ * ERRNOMEM or invalid parameters used for @ref ethash_compute_cache_nodes()
+ */
+ethash_light_t ethash_light_new_internal(uint64_t cache_size, ethash_h256_t const* seed);
+
+/**
+ * Calculate the light client data. Internal version.
+ *
+ * @param light The light client handler
+ * @param full_size The size of the full data in bytes.
+ * @param header_hash The header hash to pack into the mix
+ * @param nonce The nonce to pack into the mix
+ * @return The resulting hash.
+ */
+ethash_return_value_t ethash_light_compute_internal(
+ ethash_light_t light,
+ uint64_t full_size,
+ ethash_h256_t const header_hash,
+ uint64_t nonce
+);
+
+struct ethash_full {
+ FILE* file;
+ uint64_t file_size;
+ node* data;
+};
+
+/**
+ * Allocate and initialize a new ethash_full handler. Internal version.
+ *
+ * @param dirname The directory in which to put the DAG file.
+ * @param seedhash The seed hash of the block. Used in the DAG file naming.
+ * @param full_size The size of the full data in bytes.
+ * @param cache A cache object to use that was allocated with @ref ethash_cache_new().
+ * Iff this function succeeds the ethash_full_t will take memory
+ * memory ownership of the cache and free it at deletion. If
+ * not then the user still has to handle freeing of the cache himself.
+ * @param callback A callback function with signature of @ref ethash_callback_t
+ * It accepts an unsigned with which a progress of DAG calculation
+ * can be displayed. If all goes well the callback should return 0.
+ * If a non-zero value is returned then DAG generation will stop.
+ * @return Newly allocated ethash_full handler or NULL in case of
+ * ERRNOMEM or invalid parameters used for @ref ethash_compute_full_data()
+ */
+ethash_full_t ethash_full_new_internal(
+ char const* dirname,
+ ethash_h256_t const seed_hash,
+ uint64_t full_size,
+ ethash_light_t const light,
+ ethash_callback_t callback
+);
+
+void ethash_calculate_dag_item(
+ node* const ret,
+ uint32_t node_index,
+ ethash_light_t const cache
+);
+
+void ethash_quick_hash(
+ ethash_h256_t* return_hash,
+ ethash_h256_t const* header_hash,
+ const uint64_t nonce,
+ ethash_h256_t const* mix_hash
+);
+
+uint64_t ethash_get_datasize(uint64_t const block_number);
+uint64_t ethash_get_cachesize(uint64_t const block_number);
+
+/**
+ * Compute the memory data for a full node's memory
+ *
+ * @param mem A pointer to an ethash full's memory
+ * @param full_size The size of the full data in bytes
+ * @param cache A cache object to use in the calculation
+ * @param callback The callback function. Check @ref ethash_full_new() for details.
+ * @return true if all went fine and false for invalid parameters
+ */
+bool ethash_compute_full_data(
+ void* mem,
+ uint64_t full_size,
+ ethash_light_t const light,
+ ethash_callback_t callback
+);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/io.c b/vendor/github.com/ethereum/ethash/src/libethash/io.c
new file mode 100644
index 000000000..f4db477c2
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/io.c
@@ -0,0 +1,119 @@
+/*
+ This file is part of ethash.
+
+ ethash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ ethash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with ethash. If not, see <http://www.gnu.org/licenses/>.
+*/
+/** @file io.c
+ * @author Lefteris Karapetsas <lefteris@ethdev.com>
+ * @date 2015
+ */
+#include "io.h"
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+
+enum ethash_io_rc ethash_io_prepare(
+ char const* dirname,
+ ethash_h256_t const seedhash,
+ FILE** output_file,
+ uint64_t file_size,
+ bool force_create
+)
+{
+ char mutable_name[DAG_MUTABLE_NAME_MAX_SIZE];
+ enum ethash_io_rc ret = ETHASH_IO_FAIL;
+ // reset errno before io calls
+ errno = 0;
+
+ // assert directory exists
+ if (!ethash_mkdir(dirname)) {
+ ETHASH_CRITICAL("Could not create the ethash directory");
+ goto end;
+ }
+
+ ethash_io_mutable_name(ETHASH_REVISION, &seedhash, mutable_name);
+ char* tmpfile = ethash_io_create_filename(dirname, mutable_name, strlen(mutable_name));
+ if (!tmpfile) {
+ ETHASH_CRITICAL("Could not create the full DAG pathname");
+ goto end;
+ }
+
+ FILE *f;
+ if (!force_create) {
+ // try to open the file
+ f = ethash_fopen(tmpfile, "rb+");
+ if (f) {
+ size_t found_size;
+ if (!ethash_file_size(f, &found_size)) {
+ fclose(f);
+ ETHASH_CRITICAL("Could not query size of DAG file: \"%s\"", tmpfile);
+ goto free_memo;
+ }
+ if (file_size != found_size - ETHASH_DAG_MAGIC_NUM_SIZE) {
+ fclose(f);
+ ret = ETHASH_IO_MEMO_SIZE_MISMATCH;
+ goto free_memo;
+ }
+ // compare the magic number, no need to care about endianess since it's local
+ uint64_t magic_num;
+ if (fread(&magic_num, ETHASH_DAG_MAGIC_NUM_SIZE, 1, f) != 1) {
+ // I/O error
+ fclose(f);
+ ETHASH_CRITICAL("Could not read from DAG file: \"%s\"", tmpfile);
+ ret = ETHASH_IO_MEMO_SIZE_MISMATCH;
+ goto free_memo;
+ }
+ if (magic_num != ETHASH_DAG_MAGIC_NUM) {
+ fclose(f);
+ ret = ETHASH_IO_MEMO_SIZE_MISMATCH;
+ goto free_memo;
+ }
+ ret = ETHASH_IO_MEMO_MATCH;
+ goto set_file;
+ }
+ }
+
+ // file does not exist, will need to be created
+ f = ethash_fopen(tmpfile, "wb+");
+ if (!f) {
+ ETHASH_CRITICAL("Could not create DAG file: \"%s\"", tmpfile);
+ goto free_memo;
+ }
+ // make sure it's of the proper size
+ if (fseek(f, (long int)(file_size + ETHASH_DAG_MAGIC_NUM_SIZE - 1), SEEK_SET) != 0) {
+ fclose(f);
+ ETHASH_CRITICAL("Could not seek to the end of DAG file: \"%s\". Insufficient space?", tmpfile);
+ goto free_memo;
+ }
+ if (fputc('\n', f) == EOF) {
+ fclose(f);
+ ETHASH_CRITICAL("Could not write in the end of DAG file: \"%s\". Insufficient space?", tmpfile);
+ goto free_memo;
+ }
+ if (fflush(f) != 0) {
+ fclose(f);
+ ETHASH_CRITICAL("Could not flush at end of DAG file: \"%s\". Insufficient space?", tmpfile);
+ goto free_memo;
+ }
+ ret = ETHASH_IO_MEMO_MISMATCH;
+ goto set_file;
+
+ ret = ETHASH_IO_MEMO_MATCH;
+set_file:
+ *output_file = f;
+free_memo:
+ free(tmpfile);
+end:
+ return ret;
+}
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/io.h b/vendor/github.com/ethereum/ethash/src/libethash/io.h
new file mode 100644
index 000000000..7a27089c7
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/io.h
@@ -0,0 +1,202 @@
+/*
+ This file is part of ethash.
+
+ ethash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ ethash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with ethash. If not, see <http://www.gnu.org/licenses/>.
+*/
+/** @file io.h
+ * @author Lefteris Karapetsas <lefteris@ethdev.com>
+ * @date 2015
+ */
+#pragma once
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdio.h>
+#ifdef __cplusplus
+#define __STDC_FORMAT_MACROS 1
+#endif
+#include <inttypes.h>
+#include "endian.h"
+#include "ethash.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+// Maximum size for mutable part of DAG file name
+// 6 is for "full-R", the suffix of the filename
+// 10 is for maximum number of digits of a uint32_t (for REVISION)
+// 1 is for - and 16 is for the first 16 hex digits for first 8 bytes of
+// the seedhash and last 1 is for the null terminating character
+// Reference: https://github.com/ethereum/wiki/wiki/Ethash-DAG
+#define DAG_MUTABLE_NAME_MAX_SIZE (6 + 10 + 1 + 16 + 1)
+/// Possible return values of @see ethash_io_prepare
+enum ethash_io_rc {
+ ETHASH_IO_FAIL = 0, ///< There has been an IO failure
+ ETHASH_IO_MEMO_SIZE_MISMATCH, ///< DAG with revision/hash match, but file size was wrong.
+ ETHASH_IO_MEMO_MISMATCH, ///< The DAG file did not exist or there was revision/hash mismatch
+ ETHASH_IO_MEMO_MATCH, ///< DAG file existed and revision/hash matched. No need to do anything
+};
+
+// small hack for windows. I don't feel I should use va_args and forward just
+// to have this one function properly cross-platform abstracted
+#if defined(_WIN32) && !defined(__GNUC__)
+#define snprintf(...) sprintf_s(__VA_ARGS__)
+#endif
+
+/**
+ * Logs a critical error in important parts of ethash. Should mostly help
+ * figure out what kind of problem (I/O, memory e.t.c.) causes a NULL
+ * ethash_full_t
+ */
+#ifdef ETHASH_PRINT_CRITICAL_OUTPUT
+#define ETHASH_CRITICAL(...) \
+ do \
+ { \
+ printf("ETHASH CRITICAL ERROR: "__VA_ARGS__); \
+ printf("\n"); \
+ fflush(stdout); \
+ } while (0)
+#else
+#define ETHASH_CRITICAL(...)
+#endif
+
+/**
+ * Prepares io for ethash
+ *
+ * Create the DAG directory and the DAG file if they don't exist.
+ *
+ * @param[in] dirname A null terminated c-string of the path of the ethash
+ * data directory. If it does not exist it's created.
+ * @param[in] seedhash The seedhash of the current block number, used in the
+ * naming of the file as can be seen from the spec at:
+ * https://github.com/ethereum/wiki/wiki/Ethash-DAG
+ * @param[out] output_file If there was no failure then this will point to an open
+ * file descriptor. User is responsible for closing it.
+ * In the case of memo match then the file is open on read
+ * mode, while on the case of mismatch a new file is created
+ * on write mode
+ * @param[in] file_size The size that the DAG file should have on disk
+ * @param[out] force_create If true then there is no check to see if the file
+ * already exists
+ * @return For possible return values @see enum ethash_io_rc
+ */
+enum ethash_io_rc ethash_io_prepare(
+ char const* dirname,
+ ethash_h256_t const seedhash,
+ FILE** output_file,
+ uint64_t file_size,
+ bool force_create
+);
+
+/**
+ * An fopen wrapper for no-warnings crossplatform fopen.
+ *
+ * Msvc compiler considers fopen to be insecure and suggests to use their
+ * alternative. This is a wrapper for this alternative. Another way is to
+ * #define _CRT_SECURE_NO_WARNINGS, but disabling all security warnings does
+ * not sound like a good idea.
+ *
+ * @param file_name The path to the file to open
+ * @param mode Opening mode. Check fopen()
+ * @return The FILE* or NULL in failure
+ */
+FILE* ethash_fopen(char const* file_name, char const* mode);
+
+/**
+ * An strncat wrapper for no-warnings crossplatform strncat.
+ *
+ * Msvc compiler considers strncat to be insecure and suggests to use their
+ * alternative. This is a wrapper for this alternative. Another way is to
+ * #define _CRT_SECURE_NO_WARNINGS, but disabling all security warnings does
+ * not sound like a good idea.
+ *
+ * @param des Destination buffer
+ * @param dest_size Maximum size of the destination buffer. This is the
+ * extra argument for the MSVC secure strncat
+ * @param src Souce buffer
+ * @param count Number of bytes to copy from source
+ * @return If all is well returns the dest buffer. If there is an
+ * error returns NULL
+ */
+char* ethash_strncat(char* dest, size_t dest_size, char const* src, size_t count);
+
+/**
+ * A cross-platform mkdir wrapper to create a directory or assert it's there
+ *
+ * @param dirname The full path of the directory to create
+ * @return true if the directory was created or if it already
+ * existed
+ */
+bool ethash_mkdir(char const* dirname);
+
+/**
+ * Get a file's size
+ *
+ * @param[in] f The open file stream whose size to get
+ * @param[out] size Pass a size_t by reference to contain the file size
+ * @return true in success and false if there was a failure
+ */
+bool ethash_file_size(FILE* f, size_t* ret_size);
+
+/**
+ * Get a file descriptor number from a FILE stream
+ *
+ * @param f The file stream whose fd to get
+ * @return Platform specific fd handler
+ */
+int ethash_fileno(FILE* f);
+
+/**
+ * Create the filename for the DAG.
+ *
+ * @param dirname The directory name in which the DAG file should reside
+ * If it does not end with a directory separator it is appended.
+ * @param filename The actual name of the file
+ * @param filename_length The length of the filename in bytes
+ * @return A char* containing the full name. User must deallocate.
+ */
+char* ethash_io_create_filename(
+ char const* dirname,
+ char const* filename,
+ size_t filename_length
+);
+
+/**
+ * Gets the default directory name for the DAG depending on the system
+ *
+ * The spec defining this directory is here: https://github.com/ethereum/wiki/wiki/Ethash-DAG
+ *
+ * @param[out] strbuf A string buffer of sufficient size to keep the
+ * null termninated string of the directory name
+ * @param[in] buffsize Size of @a strbuf in bytes
+ * @return true for success and false otherwise
+ */
+bool ethash_get_default_dirname(char* strbuf, size_t buffsize);
+
+static inline bool ethash_io_mutable_name(
+ uint32_t revision,
+ ethash_h256_t const* seed_hash,
+ char* output
+)
+{
+ uint64_t hash = *((uint64_t*)seed_hash);
+#if LITTLE_ENDIAN == BYTE_ORDER
+ hash = ethash_swap_u64(hash);
+#endif
+ return snprintf(output, DAG_MUTABLE_NAME_MAX_SIZE, "full-R%u-%016" PRIx64, revision, hash) >= 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/io_posix.c b/vendor/github.com/ethereum/ethash/src/libethash/io_posix.c
new file mode 100644
index 000000000..c9a17d845
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/io_posix.c
@@ -0,0 +1,111 @@
+/*
+ This file is part of ethash.
+
+ ethash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ ethash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with ethash. If not, see <http://www.gnu.org/licenses/>.
+*/
+/** @file io_posix.c
+ * @author Lefteris Karapetsas <lefteris@ethdev.com>
+ * @date 2015
+ */
+
+#include "io.h"
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <libgen.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <pwd.h>
+
+FILE* ethash_fopen(char const* file_name, char const* mode)
+{
+ return fopen(file_name, mode);
+}
+
+char* ethash_strncat(char* dest, size_t dest_size, char const* src, size_t count)
+{
+ return strlen(dest) + count + 1 <= dest_size ? strncat(dest, src, count) : NULL;
+}
+
+bool ethash_mkdir(char const* dirname)
+{
+ int rc = mkdir(dirname, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
+ return rc != -1 || errno == EEXIST;
+}
+
+int ethash_fileno(FILE *f)
+{
+ return fileno(f);
+}
+
+char* ethash_io_create_filename(
+ char const* dirname,
+ char const* filename,
+ size_t filename_length
+)
+{
+ size_t dirlen = strlen(dirname);
+ size_t dest_size = dirlen + filename_length + 1;
+ if (dirname[dirlen] != '/') {
+ dest_size += 1;
+ }
+ char* name = malloc(dest_size);
+ if (!name) {
+ return NULL;
+ }
+
+ name[0] = '\0';
+ ethash_strncat(name, dest_size, dirname, dirlen);
+ if (dirname[dirlen] != '/') {
+ ethash_strncat(name, dest_size, "/", 1);
+ }
+ ethash_strncat(name, dest_size, filename, filename_length);
+ return name;
+}
+
+bool ethash_file_size(FILE* f, size_t* ret_size)
+{
+ struct stat st;
+ int fd;
+ if ((fd = fileno(f)) == -1 || fstat(fd, &st) != 0) {
+ return false;
+ }
+ *ret_size = st.st_size;
+ return true;
+}
+
+bool ethash_get_default_dirname(char* strbuf, size_t buffsize)
+{
+ static const char dir_suffix[] = ".ethash/";
+ strbuf[0] = '\0';
+ char* home_dir = getenv("HOME");
+ if (!home_dir || strlen(home_dir) == 0)
+ {
+ struct passwd* pwd = getpwuid(getuid());
+ if (pwd)
+ home_dir = pwd->pw_dir;
+ }
+
+ size_t len = strlen(home_dir);
+ if (!ethash_strncat(strbuf, buffsize, home_dir, len)) {
+ return false;
+ }
+ if (home_dir[len] != '/') {
+ if (!ethash_strncat(strbuf, buffsize, "/", 1)) {
+ return false;
+ }
+ }
+ return ethash_strncat(strbuf, buffsize, dir_suffix, sizeof(dir_suffix));
+}
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/io_win32.c b/vendor/github.com/ethereum/ethash/src/libethash/io_win32.c
new file mode 100644
index 000000000..34f1aaa77
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/io_win32.c
@@ -0,0 +1,100 @@
+/*
+ This file is part of ethash.
+
+ ethash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ ethash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with ethash. If not, see <http://www.gnu.org/licenses/>.
+*/
+/** @file io_win32.c
+ * @author Lefteris Karapetsas <lefteris@ethdev.com>
+ * @date 2015
+ */
+
+#include "io.h"
+#include <direct.h>
+#include <errno.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <shlobj.h>
+
+FILE* ethash_fopen(char const* file_name, char const* mode)
+{
+ FILE* f;
+ return fopen_s(&f, file_name, mode) == 0 ? f : NULL;
+}
+
+char* ethash_strncat(char* dest, size_t dest_size, char const* src, size_t count)
+{
+ return strncat_s(dest, dest_size, src, count) == 0 ? dest : NULL;
+}
+
+bool ethash_mkdir(char const* dirname)
+{
+ int rc = _mkdir(dirname);
+ return rc != -1 || errno == EEXIST;
+}
+
+int ethash_fileno(FILE* f)
+{
+ return _fileno(f);
+}
+
+char* ethash_io_create_filename(
+ char const* dirname,
+ char const* filename,
+ size_t filename_length
+)
+{
+ size_t dirlen = strlen(dirname);
+ size_t dest_size = dirlen + filename_length + 1;
+ if (dirname[dirlen] != '\\' || dirname[dirlen] != '/') {
+ dest_size += 1;
+ }
+ char* name = malloc(dest_size);
+ if (!name) {
+ return NULL;
+ }
+
+ name[0] = '\0';
+ ethash_strncat(name, dest_size, dirname, dirlen);
+ if (dirname[dirlen] != '\\' || dirname[dirlen] != '/') {
+ ethash_strncat(name, dest_size, "\\", 1);
+ }
+ ethash_strncat(name, dest_size, filename, filename_length);
+ return name;
+}
+
+bool ethash_file_size(FILE* f, size_t* ret_size)
+{
+ struct _stat st;
+ int fd;
+ if ((fd = _fileno(f)) == -1 || _fstat(fd, &st) != 0) {
+ return false;
+ }
+ *ret_size = st.st_size;
+ return true;
+}
+
+bool ethash_get_default_dirname(char* strbuf, size_t buffsize)
+{
+ static const char dir_suffix[] = "Ethash\\";
+ strbuf[0] = '\0';
+ if (!SUCCEEDED(SHGetFolderPathA(NULL, CSIDL_LOCAL_APPDATA, NULL, 0, (CHAR*)strbuf))) {
+ return false;
+ }
+ if (!ethash_strncat(strbuf, buffsize, "\\", 1)) {
+ return false;
+ }
+
+ return ethash_strncat(strbuf, buffsize, dir_suffix, sizeof(dir_suffix));
+}
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/mmap.h b/vendor/github.com/ethereum/ethash/src/libethash/mmap.h
new file mode 100644
index 000000000..1e226e83f
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/mmap.h
@@ -0,0 +1,47 @@
+/*
+ This file is part of ethash.
+
+ ethash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ ethash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with ethash. If not, see <http://www.gnu.org/licenses/>.
+*/
+/** @file mmap.h
+ * @author Lefteris Karapetsas <lefteris@ethdev.com>
+ * @date 2015
+ */
+#pragma once
+#if defined(__MINGW32__) || defined(_WIN32)
+#include <sys/types.h>
+
+#define PROT_READ 0x1
+#define PROT_WRITE 0x2
+/* This flag is only available in WinXP+ */
+#ifdef FILE_MAP_EXECUTE
+#define PROT_EXEC 0x4
+#else
+#define PROT_EXEC 0x0
+#define FILE_MAP_EXECUTE 0
+#endif
+
+#define MAP_SHARED 0x01
+#define MAP_PRIVATE 0x02
+#define MAP_ANONYMOUS 0x20
+#define MAP_ANON MAP_ANONYMOUS
+#define MAP_FAILED ((void *) -1)
+
+void* mmap(void* start, size_t length, int prot, int flags, int fd, off_t offset);
+void munmap(void* addr, size_t length);
+#else // posix, yay! ^_^
+#include <sys/mman.h>
+#endif
+
+
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/mmap_win32.c b/vendor/github.com/ethereum/ethash/src/libethash/mmap_win32.c
new file mode 100644
index 000000000..42968b98a
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/mmap_win32.c
@@ -0,0 +1,84 @@
+/* mmap() replacement for Windows
+ *
+ * Author: Mike Frysinger <vapier@gentoo.org>
+ * Placed into the public domain
+ */
+
+/* References:
+ * CreateFileMapping: http://msdn.microsoft.com/en-us/library/aa366537(VS.85).aspx
+ * CloseHandle: http://msdn.microsoft.com/en-us/library/ms724211(VS.85).aspx
+ * MapViewOfFile: http://msdn.microsoft.com/en-us/library/aa366761(VS.85).aspx
+ * UnmapViewOfFile: http://msdn.microsoft.com/en-us/library/aa366882(VS.85).aspx
+ */
+
+#include <io.h>
+#include <windows.h>
+#include "mmap.h"
+
+#ifdef __USE_FILE_OFFSET64
+# define DWORD_HI(x) (x >> 32)
+# define DWORD_LO(x) ((x) & 0xffffffff)
+#else
+# define DWORD_HI(x) (0)
+# define DWORD_LO(x) (x)
+#endif
+
+void* mmap(void* start, size_t length, int prot, int flags, int fd, off_t offset)
+{
+ if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
+ return MAP_FAILED;
+ if (fd == -1) {
+ if (!(flags & MAP_ANON) || offset)
+ return MAP_FAILED;
+ } else if (flags & MAP_ANON)
+ return MAP_FAILED;
+
+ DWORD flProtect;
+ if (prot & PROT_WRITE) {
+ if (prot & PROT_EXEC)
+ flProtect = PAGE_EXECUTE_READWRITE;
+ else
+ flProtect = PAGE_READWRITE;
+ } else if (prot & PROT_EXEC) {
+ if (prot & PROT_READ)
+ flProtect = PAGE_EXECUTE_READ;
+ else if (prot & PROT_EXEC)
+ flProtect = PAGE_EXECUTE;
+ } else
+ flProtect = PAGE_READONLY;
+
+ off_t end = length + offset;
+ HANDLE mmap_fd, h;
+ if (fd == -1)
+ mmap_fd = INVALID_HANDLE_VALUE;
+ else
+ mmap_fd = (HANDLE)_get_osfhandle(fd);
+ h = CreateFileMapping(mmap_fd, NULL, flProtect, DWORD_HI(end), DWORD_LO(end), NULL);
+ if (h == NULL)
+ return MAP_FAILED;
+
+ DWORD dwDesiredAccess;
+ if (prot & PROT_WRITE)
+ dwDesiredAccess = FILE_MAP_WRITE;
+ else
+ dwDesiredAccess = FILE_MAP_READ;
+ if (prot & PROT_EXEC)
+ dwDesiredAccess |= FILE_MAP_EXECUTE;
+ if (flags & MAP_PRIVATE)
+ dwDesiredAccess |= FILE_MAP_COPY;
+ void *ret = MapViewOfFile(h, dwDesiredAccess, DWORD_HI(offset), DWORD_LO(offset), length);
+ if (ret == NULL) {
+ ret = MAP_FAILED;
+ }
+ // since we are handling the file ourselves with fd, close the Windows Handle here
+ CloseHandle(h);
+ return ret;
+}
+
+void munmap(void* addr, size_t length)
+{
+ UnmapViewOfFile(addr);
+}
+
+#undef DWORD_HI
+#undef DWORD_LO
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/sha3.c b/vendor/github.com/ethereum/ethash/src/libethash/sha3.c
new file mode 100644
index 000000000..e72fe1018
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/sha3.c
@@ -0,0 +1,151 @@
+/** libkeccak-tiny
+*
+* A single-file implementation of SHA-3 and SHAKE.
+*
+* Implementor: David Leon Gil
+* License: CC0, attribution kindly requested. Blame taken too,
+* but not liability.
+*/
+#include "sha3.h"
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/******** The Keccak-f[1600] permutation ********/
+
+/*** Constants. ***/
+static const uint8_t rho[24] = \
+ { 1, 3, 6, 10, 15, 21,
+ 28, 36, 45, 55, 2, 14,
+ 27, 41, 56, 8, 25, 43,
+ 62, 18, 39, 61, 20, 44};
+static const uint8_t pi[24] = \
+ {10, 7, 11, 17, 18, 3,
+ 5, 16, 8, 21, 24, 4,
+ 15, 23, 19, 13, 12, 2,
+ 20, 14, 22, 9, 6, 1};
+static const uint64_t RC[24] = \
+ {1ULL, 0x8082ULL, 0x800000000000808aULL, 0x8000000080008000ULL,
+ 0x808bULL, 0x80000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL,
+ 0x8aULL, 0x88ULL, 0x80008009ULL, 0x8000000aULL,
+ 0x8000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL,
+ 0x8000000000008002ULL, 0x8000000000000080ULL, 0x800aULL, 0x800000008000000aULL,
+ 0x8000000080008081ULL, 0x8000000000008080ULL, 0x80000001ULL, 0x8000000080008008ULL};
+
+/*** Helper macros to unroll the permutation. ***/
+#define rol(x, s) (((x) << s) | ((x) >> (64 - s)))
+#define REPEAT6(e) e e e e e e
+#define REPEAT24(e) REPEAT6(e e e e)
+#define REPEAT5(e) e e e e e
+#define FOR5(v, s, e) \
+ v = 0; \
+ REPEAT5(e; v += s;)
+
+/*** Keccak-f[1600] ***/
+static inline void keccakf(void* state) {
+ uint64_t* a = (uint64_t*)state;
+ uint64_t b[5] = {0};
+ uint64_t t = 0;
+ uint8_t x, y;
+
+ for (int i = 0; i < 24; i++) {
+ // Theta
+ FOR5(x, 1,
+ b[x] = 0;
+ FOR5(y, 5,
+ b[x] ^= a[x + y]; ))
+ FOR5(x, 1,
+ FOR5(y, 5,
+ a[y + x] ^= b[(x + 4) % 5] ^ rol(b[(x + 1) % 5], 1); ))
+ // Rho and pi
+ t = a[1];
+ x = 0;
+ REPEAT24(b[0] = a[pi[x]];
+ a[pi[x]] = rol(t, rho[x]);
+ t = b[0];
+ x++; )
+ // Chi
+ FOR5(y,
+ 5,
+ FOR5(x, 1,
+ b[x] = a[y + x];)
+ FOR5(x, 1,
+ a[y + x] = b[x] ^ ((~b[(x + 1) % 5]) & b[(x + 2) % 5]); ))
+ // Iota
+ a[0] ^= RC[i];
+ }
+}
+
+/******** The FIPS202-defined functions. ********/
+
+/*** Some helper macros. ***/
+
+#define _(S) do { S } while (0)
+#define FOR(i, ST, L, S) \
+ _(for (size_t i = 0; i < L; i += ST) { S; })
+#define mkapply_ds(NAME, S) \
+ static inline void NAME(uint8_t* dst, \
+ const uint8_t* src, \
+ size_t len) { \
+ FOR(i, 1, len, S); \
+ }
+#define mkapply_sd(NAME, S) \
+ static inline void NAME(const uint8_t* src, \
+ uint8_t* dst, \
+ size_t len) { \
+ FOR(i, 1, len, S); \
+ }
+
+mkapply_ds(xorin, dst[i] ^= src[i]) // xorin
+mkapply_sd(setout, dst[i] = src[i]) // setout
+
+#define P keccakf
+#define Plen 200
+
+// Fold P*F over the full blocks of an input.
+#define foldP(I, L, F) \
+ while (L >= rate) { \
+ F(a, I, rate); \
+ P(a); \
+ I += rate; \
+ L -= rate; \
+ }
+
+/** The sponge-based hash construction. **/
+static inline int hash(uint8_t* out, size_t outlen,
+ const uint8_t* in, size_t inlen,
+ size_t rate, uint8_t delim) {
+ if ((out == NULL) || ((in == NULL) && inlen != 0) || (rate >= Plen)) {
+ return -1;
+ }
+ uint8_t a[Plen] = {0};
+ // Absorb input.
+ foldP(in, inlen, xorin);
+ // Xor in the DS and pad frame.
+ a[inlen] ^= delim;
+ a[rate - 1] ^= 0x80;
+ // Xor in the last block.
+ xorin(a, in, inlen);
+ // Apply P
+ P(a);
+ // Squeeze output.
+ foldP(out, outlen, setout);
+ setout(a, out, outlen);
+ memset(a, 0, 200);
+ return 0;
+}
+
+#define defsha3(bits) \
+ int sha3_##bits(uint8_t* out, size_t outlen, \
+ const uint8_t* in, size_t inlen) { \
+ if (outlen > (bits/8)) { \
+ return -1; \
+ } \
+ return hash(out, outlen, in, inlen, 200 - (bits / 4), 0x01); \
+ }
+
+/*** FIPS202 SHA3 FOFs ***/
+defsha3(256)
+defsha3(512)
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/sha3.h b/vendor/github.com/ethereum/ethash/src/libethash/sha3.h
new file mode 100644
index 000000000..a38006292
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/sha3.h
@@ -0,0 +1,31 @@
+#pragma once
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "compiler.h"
+#include <stdint.h>
+#include <stdlib.h>
+
+struct ethash_h256;
+
+#define decsha3(bits) \
+ int sha3_##bits(uint8_t*, size_t, uint8_t const*, size_t);
+
+decsha3(256)
+decsha3(512)
+
+static inline void SHA3_256(struct ethash_h256 const* ret, uint8_t const* data, size_t const size)
+{
+ sha3_256((uint8_t*)ret, 32, data, size);
+}
+
+static inline void SHA3_512(uint8_t* ret, uint8_t const* data, size_t const size)
+{
+ sha3_512(ret, 64, data, size);
+}
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/sha3_cryptopp.cpp b/vendor/github.com/ethereum/ethash/src/libethash/sha3_cryptopp.cpp
new file mode 100644
index 000000000..2a7c02664
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/sha3_cryptopp.cpp
@@ -0,0 +1,37 @@
+/*
+ This file is part of ethash.
+
+ ethash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ ethash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with ethash. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/** @file sha3.cpp
+* @author Tim Hughes <tim@twistedfury.com>
+* @date 2015
+*/
+#include <stdint.h>
+#include <cryptopp/sha3.h>
+
+extern "C" {
+struct ethash_h256;
+typedef struct ethash_h256 ethash_h256_t;
+void SHA3_256(ethash_h256_t const* ret, uint8_t const* data, size_t size)
+{
+ CryptoPP::SHA3_256().CalculateDigest((uint8_t*)ret, data, size);
+}
+
+void SHA3_512(uint8_t* const ret, uint8_t const* data, size_t size)
+{
+ CryptoPP::SHA3_512().CalculateDigest(ret, data, size);
+}
+}
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/sha3_cryptopp.h b/vendor/github.com/ethereum/ethash/src/libethash/sha3_cryptopp.h
new file mode 100644
index 000000000..9edc407d5
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/sha3_cryptopp.h
@@ -0,0 +1,18 @@
+#pragma once
+
+#include "compiler.h"
+#include <stdint.h>
+#include <stdlib.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct ethash_h256;
+
+void SHA3_256(struct ethash_h256 const* ret, uint8_t const* data, size_t size);
+void SHA3_512(uint8_t* const ret, uint8_t const* data, size_t size);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/util.h b/vendor/github.com/ethereum/ethash/src/libethash/util.h
new file mode 100644
index 000000000..c5fc6e55b
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/util.h
@@ -0,0 +1,47 @@
+/*
+ This file is part of ethash.
+
+ ethash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ ethash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with ethash. If not, see <http://www.gnu.org/licenses/>.
+*/
+/** @file util.h
+ * @author Tim Hughes <tim@twistedfury.com>
+ * @date 2015
+ */
+#pragma once
+#include <stdint.h>
+#include "compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef _MSC_VER
+void debugf(char const* str, ...);
+#else
+#define debugf printf
+#endif
+
+static inline uint32_t min_u32(uint32_t a, uint32_t b)
+{
+ return a < b ? a : b;
+}
+
+static inline uint32_t clamp_u32(uint32_t x, uint32_t min_, uint32_t max_)
+{
+ return x < min_ ? min_ : (x > max_ ? max_ : x);
+}
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/vendor/github.com/ethereum/ethash/src/libethash/util_win32.c b/vendor/github.com/ethereum/ethash/src/libethash/util_win32.c
new file mode 100644
index 000000000..268e6db05
--- /dev/null
+++ b/vendor/github.com/ethereum/ethash/src/libethash/util_win32.c
@@ -0,0 +1,38 @@
+/*
+ This file is part of cpp-ethereum.
+
+ cpp-ethereum is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ cpp-ethereum is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
+*/
+/** @file util.c
+ * @author Tim Hughes <tim@twistedfury.com>
+ * @date 2015
+ */
+#include <stdarg.h>
+#include <stdio.h>
+#include "util.h"
+
+
+// foward declare without all of Windows.h
+__declspec(dllimport) void __stdcall OutputDebugStringA(char const* lpOutputString);
+
+void debugf(char const* str, ...)
+{
+ va_list args;
+ va_start(args, str);
+
+ char buf[1<<16];
+ _vsnprintf_s(buf, sizeof(buf), sizeof(buf), str, args);
+ buf[sizeof(buf)-1] = '\0';
+ OutputDebugStringA(buf);
+}
diff --git a/vendor/github.com/fatih/color/.travis.yml b/vendor/github.com/fatih/color/.travis.yml
new file mode 100644
index 000000000..57b4b57c8
--- /dev/null
+++ b/vendor/github.com/fatih/color/.travis.yml
@@ -0,0 +1,5 @@
+language: go
+go:
+ - 1.6
+ - tip
+
diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md
new file mode 100644
index 000000000..25fdaf639
--- /dev/null
+++ b/vendor/github.com/fatih/color/LICENSE.md
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Fatih Arslan
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md
new file mode 100644
index 000000000..6e39e919f
--- /dev/null
+++ b/vendor/github.com/fatih/color/README.md
@@ -0,0 +1,154 @@
+# Color [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/color) [![Build Status](http://img.shields.io/travis/fatih/color.svg?style=flat-square)](https://travis-ci.org/fatih/color)
+
+
+
+Color lets you use colorized outputs in terms of [ANSI Escape
+Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It
+has support for Windows too! The API can be used in several ways, pick one that
+suits you.
+
+
+
+![Color](http://i.imgur.com/c1JI0lA.png)
+
+
+## Install
+
+```bash
+go get github.com/fatih/color
+```
+
+## Examples
+
+### Standard colors
+
+```go
+// Print with default helper functions
+color.Cyan("Prints text in cyan.")
+
+// A newline will be appended automatically
+color.Blue("Prints %s in blue.", "text")
+
+// These are using the default foreground colors
+color.Red("We have red")
+color.Magenta("And many others ..")
+
+```
+
+### Mix and reuse colors
+
+```go
+// Create a new color object
+c := color.New(color.FgCyan).Add(color.Underline)
+c.Println("Prints cyan text with an underline.")
+
+// Or just add them to New()
+d := color.New(color.FgCyan, color.Bold)
+d.Printf("This prints bold cyan %s\n", "too!.")
+
+// Mix up foreground and background colors, create new mixes!
+red := color.New(color.FgRed)
+
+boldRed := red.Add(color.Bold)
+boldRed.Println("This will print text in bold red.")
+
+whiteBackground := red.Add(color.BgWhite)
+whiteBackground.Println("Red text with white background.")
+```
+
+### Custom print functions (PrintFunc)
+
+```go
+// Create a custom print function for convenience
+red := color.New(color.FgRed).PrintfFunc()
+red("Warning")
+red("Error: %s", err)
+
+// Mix up multiple attributes
+notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
+notice("Don't forget this...")
+```
+
+### Insert into noncolor strings (SprintFunc)
+
+```go
+// Create SprintXxx functions to mix strings with other non-colorized strings:
+yellow := color.New(color.FgYellow).SprintFunc()
+red := color.New(color.FgRed).SprintFunc()
+fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error"))
+
+info := color.New(color.FgWhite, color.BgGreen).SprintFunc()
+fmt.Printf("This %s rocks!\n", info("package"))
+
+// Use helper functions
+fmt.Println("This", color.RedString("warning"), "should be not neglected.")
+fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.")
+
+// Windows supported too! Just don't forget to change the output to color.Output
+fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
+```
+
+### Plug into existing code
+
+```go
+// Use handy standard colors
+color.Set(color.FgYellow)
+
+fmt.Println("Existing text will now be in yellow")
+fmt.Printf("This one %s\n", "too")
+
+color.Unset() // Don't forget to unset
+
+// You can mix up parameters
+color.Set(color.FgMagenta, color.Bold)
+defer color.Unset() // Use it in your function
+
+fmt.Println("All text will now be bold magenta.")
+```
+
+### Disable color
+
+There might be a case where you want to disable color output (for example to
+pipe the standard output of your app to somewhere else). `Color` has support to
+disable colors both globally and for single color definition. For example
+suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
+the color output with:
+
+```go
+
+var flagNoColor = flag.Bool("no-color", false, "Disable color output")
+
+if *flagNoColor {
+ color.NoColor = true // disables colorized output
+}
+```
+
+It also has support for single color definitions (local). You can
+disable/enable color output on the fly:
+
+```go
+c := color.New(color.FgCyan)
+c.Println("Prints cyan text")
+
+c.DisableColor()
+c.Println("This is printed without any color")
+
+c.EnableColor()
+c.Println("This prints again cyan...")
+```
+
+## Todo
+
+* Save/Return previous values
+* Evaluate fmt.Formatter interface
+
+
+## Credits
+
+ * [Fatih Arslan](https://github.com/fatih)
+ * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable)
+
+## License
+
+The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details
+
diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go
new file mode 100644
index 000000000..06f4867f7
--- /dev/null
+++ b/vendor/github.com/fatih/color/color.go
@@ -0,0 +1,423 @@
+package color
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/mattn/go-colorable"
+ "github.com/mattn/go-isatty"
+)
+
+// NoColor defines if the output is colorized or not. It's dynamically set to
+// false or true based on the stdout's file descriptor referring to a terminal
+// or not. This is a global option and affects all colors. For more control
+// over each color block use the methods DisableColor() individually.
+var NoColor = !isatty.IsTerminal(os.Stdout.Fd())
+
+// Color defines a custom color object which is defined by SGR parameters.
+type Color struct {
+ params []Attribute
+ noColor *bool
+}
+
+// Attribute defines a single SGR Code
+type Attribute int
+
+const escape = "\x1b"
+
+// Base attributes
+const (
+ Reset Attribute = iota
+ Bold
+ Faint
+ Italic
+ Underline
+ BlinkSlow
+ BlinkRapid
+ ReverseVideo
+ Concealed
+ CrossedOut
+)
+
+// Foreground text colors
+const (
+ FgBlack Attribute = iota + 30
+ FgRed
+ FgGreen
+ FgYellow
+ FgBlue
+ FgMagenta
+ FgCyan
+ FgWhite
+)
+
+// Foreground Hi-Intensity text colors
+const (
+ FgHiBlack Attribute = iota + 90
+ FgHiRed
+ FgHiGreen
+ FgHiYellow
+ FgHiBlue
+ FgHiMagenta
+ FgHiCyan
+ FgHiWhite
+)
+
+// Background text colors
+const (
+ BgBlack Attribute = iota + 40
+ BgRed
+ BgGreen
+ BgYellow
+ BgBlue
+ BgMagenta
+ BgCyan
+ BgWhite
+)
+
+// Background Hi-Intensity text colors
+const (
+ BgHiBlack Attribute = iota + 100
+ BgHiRed
+ BgHiGreen
+ BgHiYellow
+ BgHiBlue
+ BgHiMagenta
+ BgHiCyan
+ BgHiWhite
+)
+
+// New returns a newly created color object.
+func New(value ...Attribute) *Color {
+ c := &Color{params: make([]Attribute, 0)}
+ c.Add(value...)
+ return c
+}
+
+// Set sets the given parameters immediately. It will change the color of
+// output with the given SGR parameters until color.Unset() is called.
+func Set(p ...Attribute) *Color {
+ c := New(p...)
+ c.Set()
+ return c
+}
+
+// Unset resets all escape attributes and clears the output. Usually should
+// be called after Set().
+func Unset() {
+ if NoColor {
+ return
+ }
+
+ fmt.Fprintf(Output, "%s[%dm", escape, Reset)
+}
+
+// Set sets the SGR sequence.
+func (c *Color) Set() *Color {
+ if c.isNoColorSet() {
+ return c
+ }
+
+ fmt.Fprintf(Output, c.format())
+ return c
+}
+
+func (c *Color) unset() {
+ if c.isNoColorSet() {
+ return
+ }
+
+ Unset()
+}
+
+// Add is used to chain SGR parameters. Use as many as parameters to combine
+// and create custom color objects. Example: Add(color.FgRed, color.Underline).
+func (c *Color) Add(value ...Attribute) *Color {
+ c.params = append(c.params, value...)
+ return c
+}
+
+func (c *Color) prepend(value Attribute) {
+ c.params = append(c.params, 0)
+ copy(c.params[1:], c.params[0:])
+ c.params[0] = value
+}
+
+// Output defines the standard output of the print functions. By default
+// os.Stdout is used.
+var Output = colorable.NewColorableStdout()
+
+// Print formats using the default formats for its operands and writes to
+// standard output. Spaces are added between operands when neither is a
+// string. It returns the number of bytes written and any write error
+// encountered. This is the standard fmt.Print() method wrapped with the given
+// color.
+func (c *Color) Print(a ...interface{}) (n int, err error) {
+ c.Set()
+ defer c.unset()
+
+ return fmt.Fprint(Output, a...)
+}
+
+// Printf formats according to a format specifier and writes to standard output.
+// It returns the number of bytes written and any write error encountered.
+// This is the standard fmt.Printf() method wrapped with the given color.
+func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
+ c.Set()
+ defer c.unset()
+
+ return fmt.Fprintf(Output, format, a...)
+}
+
+// Println formats using the default formats for its operands and writes to
+// standard output. Spaces are always added between operands and a newline is
+// appended. It returns the number of bytes written and any write error
+// encountered. This is the standard fmt.Print() method wrapped with the given
+// color.
+func (c *Color) Println(a ...interface{}) (n int, err error) {
+ c.Set()
+ defer c.unset()
+
+ return fmt.Fprintln(Output, a...)
+}
+
+// PrintFunc returns a new function that prints the passed arguments as
+// colorized with color.Print().
+func (c *Color) PrintFunc() func(a ...interface{}) {
+ return func(a ...interface{}) { c.Print(a...) }
+}
+
+// PrintfFunc returns a new function that prints the passed arguments as
+// colorized with color.Printf().
+func (c *Color) PrintfFunc() func(format string, a ...interface{}) {
+ return func(format string, a ...interface{}) { c.Printf(format, a...) }
+}
+
+// PrintlnFunc returns a new function that prints the passed arguments as
+// colorized with color.Println().
+func (c *Color) PrintlnFunc() func(a ...interface{}) {
+ return func(a ...interface{}) { c.Println(a...) }
+}
+
+// SprintFunc returns a new function that returns colorized strings for the
+// given arguments with fmt.Sprint(). Useful to put into or mix into other
+// string. Windows users should use this in conjuction with color.Output, example:
+//
+// put := New(FgYellow).SprintFunc()
+// fmt.Fprintf(color.Output, "This is a %s", put("warning"))
+func (c *Color) SprintFunc() func(a ...interface{}) string {
+ return func(a ...interface{}) string {
+ return c.wrap(fmt.Sprint(a...))
+ }
+}
+
+// SprintfFunc returns a new function that returns colorized strings for the
+// given arguments with fmt.Sprintf(). Useful to put into or mix into other
+// string. Windows users should use this in conjuction with color.Output.
+func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
+ return func(format string, a ...interface{}) string {
+ return c.wrap(fmt.Sprintf(format, a...))
+ }
+}
+
+// SprintlnFunc returns a new function that returns colorized strings for the
+// given arguments with fmt.Sprintln(). Useful to put into or mix into other
+// string. Windows users should use this in conjuction with color.Output.
+func (c *Color) SprintlnFunc() func(a ...interface{}) string {
+ return func(a ...interface{}) string {
+ return c.wrap(fmt.Sprintln(a...))
+ }
+}
+
+// sequence returns a formated SGR sequence to be plugged into a "\x1b[...m"
+// an example output might be: "1;36" -> bold cyan
+func (c *Color) sequence() string {
+ format := make([]string, len(c.params))
+ for i, v := range c.params {
+ format[i] = strconv.Itoa(int(v))
+ }
+
+ return strings.Join(format, ";")
+}
+
+// wrap wraps the s string with the colors attributes. The string is ready to
+// be printed.
+func (c *Color) wrap(s string) string {
+ if c.isNoColorSet() {
+ return s
+ }
+
+ return c.format() + s + c.unformat()
+}
+
+func (c *Color) format() string {
+ return fmt.Sprintf("%s[%sm", escape, c.sequence())
+}
+
+func (c *Color) unformat() string {
+ return fmt.Sprintf("%s[%dm", escape, Reset)
+}
+
+// DisableColor disables the color output. Useful to not change any existing
+// code and still being able to output. Can be used for flags like
+// "--no-color". To enable back use EnableColor() method.
+func (c *Color) DisableColor() {
+ c.noColor = boolPtr(true)
+}
+
+// EnableColor enables the color output. Use it in conjuction with
+// DisableColor(). Otherwise this method has no side effects.
+func (c *Color) EnableColor() {
+ c.noColor = boolPtr(false)
+}
+
+func (c *Color) isNoColorSet() bool {
+ // check first if we have user setted action
+ if c.noColor != nil {
+ return *c.noColor
+ }
+
+ // if not return the global option, which is disabled by default
+ return NoColor
+}
+
+// Equals returns a boolean value indicating whether two colors are equal.
+func (c *Color) Equals(c2 *Color) bool {
+ if len(c.params) != len(c2.params) {
+ return false
+ }
+
+ for _, attr := range c.params {
+ if !c2.attrExists(attr) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (c *Color) attrExists(a Attribute) bool {
+ for _, attr := range c.params {
+ if attr == a {
+ return true
+ }
+ }
+
+ return false
+}
+
+func boolPtr(v bool) *bool {
+ return &v
+}
+
+// colorsCache is used to reduce the count of created Color objects and
+// allows to reuse already created objects with required Attribute.
+var colorsCache = make(map[Attribute]*Color)
+
+var colorsCacheMu = new(sync.Mutex) // protects colorsCache
+
+func getCachedColor(p Attribute) *Color {
+ colorsCacheMu.Lock()
+ defer colorsCacheMu.Unlock()
+
+ c, ok := colorsCache[p]
+ if !ok {
+ c = New(p)
+ colorsCache[p] = c
+ }
+
+ return c
+}
+
+func colorPrint(format string, p Attribute, a ...interface{}) {
+ c := getCachedColor(p)
+
+ if !strings.HasSuffix(format, "\n") {
+ format += "\n"
+ }
+
+ if len(a) == 0 {
+ c.Print(format)
+ } else {
+ c.Printf(format, a...)
+ }
+}
+
+func colorString(format string, p Attribute, a ...interface{}) string {
+ c := getCachedColor(p)
+
+ if len(a) == 0 {
+ return c.SprintFunc()(format)
+ }
+
+ return c.SprintfFunc()(format, a...)
+}
+
+// Black is an convenient helper function to print with black foreground. A
+// newline is appended to format by default.
+func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) }
+
+// Red is an convenient helper function to print with red foreground. A
+// newline is appended to format by default.
+func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) }
+
+// Green is an convenient helper function to print with green foreground. A
+// newline is appended to format by default.
+func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) }
+
+// Yellow is an convenient helper function to print with yellow foreground.
+// A newline is appended to format by default.
+func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) }
+
+// Blue is an convenient helper function to print with blue foreground. A
+// newline is appended to format by default.
+func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) }
+
+// Magenta is an convenient helper function to print with magenta foreground.
+// A newline is appended to format by default.
+func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) }
+
+// Cyan is an convenient helper function to print with cyan foreground. A
+// newline is appended to format by default.
+func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) }
+
+// White is an convenient helper function to print with white foreground. A
+// newline is appended to format by default.
+func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) }
+
+// BlackString is an convenient helper function to return a string with black
+// foreground.
+func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) }
+
+// RedString is an convenient helper function to return a string with red
+// foreground.
+func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) }
+
+// GreenString is an convenient helper function to return a string with green
+// foreground.
+func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) }
+
+// YellowString is an convenient helper function to return a string with yellow
+// foreground.
+func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) }
+
+// BlueString is an convenient helper function to return a string with blue
+// foreground.
+func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) }
+
+// MagentaString is an convenient helper function to return a string with magenta
+// foreground.
+func MagentaString(format string, a ...interface{}) string {
+ return colorString(format, FgMagenta, a...)
+}
+
+// CyanString is an convenient helper function to return a string with cyan
+// foreground.
+func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) }
+
+// WhiteString is an convenient helper function to return a string with white
+// foreground.
+func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) }
diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go
new file mode 100644
index 000000000..17908787c
--- /dev/null
+++ b/vendor/github.com/fatih/color/doc.go
@@ -0,0 +1,114 @@
+/*
+Package color is an ANSI color package to output colorized or SGR defined
+output to the standard output. The API can be used in several way, pick one
+that suits you.
+
+Use simple and default helper functions with predefined foreground colors:
+
+ color.Cyan("Prints text in cyan.")
+
+ // a newline will be appended automatically
+ color.Blue("Prints %s in blue.", "text")
+
+ // More default foreground colors..
+ color.Red("We have red")
+ color.Yellow("Yellow color too!")
+ color.Magenta("And many others ..")
+
+However there are times where custom color mixes are required. Below are some
+examples to create custom color objects and use the print functions of each
+separate color object.
+
+ // Create a new color object
+ c := color.New(color.FgCyan).Add(color.Underline)
+ c.Println("Prints cyan text with an underline.")
+
+ // Or just add them to New()
+ d := color.New(color.FgCyan, color.Bold)
+ d.Printf("This prints bold cyan %s\n", "too!.")
+
+
+ // Mix up foreground and background colors, create new mixes!
+ red := color.New(color.FgRed)
+
+ boldRed := red.Add(color.Bold)
+ boldRed.Println("This will print text in bold red.")
+
+ whiteBackground := red.Add(color.BgWhite)
+ whiteBackground.Println("Red text with White background.")
+
+
+You can create PrintXxx functions to simplify even more:
+
+ // Create a custom print function for convenient
+ red := color.New(color.FgRed).PrintfFunc()
+ red("warning")
+ red("error: %s", err)
+
+ // Mix up multiple attributes
+ notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
+ notice("don't forget this...")
+
+
+Or create SprintXxx functions to mix strings with other non-colorized strings:
+
+ yellow := New(FgYellow).SprintFunc()
+ red := New(FgRed).SprintFunc()
+
+ fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error"))
+
+ info := New(FgWhite, BgGreen).SprintFunc()
+ fmt.Printf("this %s rocks!\n", info("package"))
+
+Windows support is enabled by default. All Print functions works as intended.
+However only for color.SprintXXX functions, user should use fmt.FprintXXX and
+set the output to color.Output:
+
+ fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
+
+ info := New(FgWhite, BgGreen).SprintFunc()
+ fmt.Fprintf(color.Output, "this %s rocks!\n", info("package"))
+
+Using with existing code is possible. Just use the Set() method to set the
+standard output to the given parameters. That way a rewrite of an existing
+code is not required.
+
+ // Use handy standard colors.
+ color.Set(color.FgYellow)
+
+ fmt.Println("Existing text will be now in Yellow")
+ fmt.Printf("This one %s\n", "too")
+
+ color.Unset() // don't forget to unset
+
+ // You can mix up parameters
+ color.Set(color.FgMagenta, color.Bold)
+ defer color.Unset() // use it in your function
+
+ fmt.Println("All text will be now bold magenta.")
+
+There might be a case where you want to disable color output (for example to
+pipe the standard output of your app to somewhere else). `Color` has support to
+disable colors both globally and for single color definition. For example
+suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
+the color output with:
+
+ var flagNoColor = flag.Bool("no-color", false, "Disable color output")
+
+ if *flagNoColor {
+ color.NoColor = true // disables colorized output
+ }
+
+It also has support for single color definitions (local). You can
+disable/enable color output on the fly:
+
+ c := color.New(color.FgCyan)
+ c.Println("Prints cyan text")
+
+ c.DisableColor()
+ c.Println("This is printed without any color")
+
+ c.EnableColor()
+ c.Println("This prints again cyan...")
+*/
+package color
diff --git a/vendor/github.com/gizak/termui/.gitignore b/vendor/github.com/gizak/termui/.gitignore
new file mode 100644
index 000000000..8b156b020
--- /dev/null
+++ b/vendor/github.com/gizak/termui/.gitignore
@@ -0,0 +1,26 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+.DS_Store
+/vendor
diff --git a/vendor/github.com/gizak/termui/.travis.yml b/vendor/github.com/gizak/termui/.travis.yml
new file mode 100644
index 000000000..206e88740
--- /dev/null
+++ b/vendor/github.com/gizak/termui/.travis.yml
@@ -0,0 +1,6 @@
+language: go
+
+go:
+ - tip
+
+script: go test -v ./ \ No newline at end of file
diff --git a/vendor/github.com/gizak/termui/LICENSE b/vendor/github.com/gizak/termui/LICENSE
new file mode 100644
index 000000000..311ccc74f
--- /dev/null
+++ b/vendor/github.com/gizak/termui/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Zack Guo
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/gizak/termui/README.md b/vendor/github.com/gizak/termui/README.md
new file mode 100644
index 000000000..4f3d4a419
--- /dev/null
+++ b/vendor/github.com/gizak/termui/README.md
@@ -0,0 +1,150 @@
+# termui [![Build Status](https://travis-ci.org/gizak/termui.svg?branch=master)](https://travis-ci.org/gizak/termui) [![Doc Status](https://godoc.org/github.com/gizak/termui?status.png)](https://godoc.org/github.com/gizak/termui)
+
+<img src="./_example/dashboard.gif" alt="demo cast under osx 10.10; Terminal.app; Menlo Regular 12pt.)" width="80%">
+
+`termui` is a cross-platform, easy-to-compile, and fully-customizable terminal dashboard. It is inspired by [blessed-contrib](https://github.com/yaronn/blessed-contrib), but purely in Go.
+
+Now version v2 has arrived! It brings new event system, new theme system, new `Buffer` interface and specific colour text rendering. (some docs are missing, but it will be completed soon!)
+
+## Installation
+
+`master` mirrors v2 branch, to install:
+
+ go get -u github.com/gizak/termui
+
+It is recommanded to use locked deps by using [glide](https://glide.sh): move to `termui` src directory then run `glide up`.
+
+For the compatible reason, you can choose to install the legacy version of `termui`:
+
+ go get gopkg.in/gizak/termui.v1
+
+## Usage
+
+### Layout
+
+To use `termui`, the very first thing you may want to know is how to manage layout. `termui` offers two ways of doing this, known as absolute layout and grid layout.
+
+__Absolute layout__
+
+Each widget has an underlying block structure which basically is a box model. It has border, label and padding properties. A border of a widget can be chosen to hide or display (with its border label), you can pick a different front/back colour for the border as well. To display such a widget at a specific location in terminal window, you need to assign `.X`, `.Y`, `.Height`, `.Width` values for each widget before sending it to `.Render`. Let's demonstrate these by a code snippet:
+
+`````go
+ import ui "github.com/gizak/termui" // <- ui shortcut, optional
+
+ func main() {
+ err := ui.Init()
+ if err != nil {
+ panic(err)
+ }
+ defer ui.Close()
+
+ p := ui.NewPar(":PRESS q TO QUIT DEMO")
+ p.Height = 3
+ p.Width = 50
+ p.TextFgColor = ui.ColorWhite
+ p.BorderLabel = "Text Box"
+ p.BorderFg = ui.ColorCyan
+
+ g := ui.NewGauge()
+ g.Percent = 50
+ g.Width = 50
+ g.Height = 3
+ g.Y = 11
+ g.BorderLabel = "Gauge"
+ g.BarColor = ui.ColorRed
+ g.BorderFg = ui.ColorWhite
+ g.BorderLabelFg = ui.ColorCyan
+
+ ui.Render(p, g) // feel free to call Render, it's async and non-block
+
+ // event handler...
+ }
+`````
+
+Note that components can be overlapped (I'd rather call this a feature...), `Render(rs ...Renderer)` renders its args from left to right (i.e. each component's weight is arising from left to right).
+
+__Grid layout:__
+
+<img src="./_example/grid.gif" alt="grid" width="60%">
+
+Grid layout uses [12 columns grid system](http://www.w3schools.com/bootstrap/bootstrap_grid_system.asp) with expressive syntax. To use `Grid`, all we need to do is build a widget tree consisting of `Row`s and `Col`s (Actually a `Col` is also a `Row` but with a widget endpoint attached).
+
+```go
+ import ui "github.com/gizak/termui"
+ // init and create widgets...
+
+ // build
+ ui.Body.AddRows(
+ ui.NewRow(
+ ui.NewCol(6, 0, widget0),
+ ui.NewCol(6, 0, widget1)),
+ ui.NewRow(
+ ui.NewCol(3, 0, widget2),
+ ui.NewCol(3, 0, widget30, widget31, widget32),
+ ui.NewCol(6, 0, widget4)))
+
+ // calculate layout
+ ui.Body.Align()
+
+ ui.Render(ui.Body)
+```
+
+### Events
+
+`termui` ships with a http-like event mux handling system. All events are channeled up from different sources (typing, click, windows resize, custom event) and then encoded as universal `Event` object. `Event.Path` indicates the event type and `Event.Data` stores the event data struct. Add a handler to a certain event is easy as below:
+
+```go
+ // handle key q pressing
+ ui.Handle("/sys/kbd/q", func(ui.Event) {
+ // press q to quit
+ ui.StopLoop()
+ })
+
+ ui.Handle("/sys/kbd/C-x", func(ui.Event) {
+ // handle Ctrl + x combination
+ })
+
+ ui.Handle("/sys/kbd", func(ui.Event) {
+ // handle all other key pressing
+ })
+
+ // handle a 1s timer
+ ui.Handle("/timer/1s", func(e ui.Event) {
+ t := e.Data.(ui.EvtTimer)
+ // t is a EvtTimer
+ if t.Count%2 ==0 {
+ // do something
+ }
+ })
+
+ ui.Loop() // block until StopLoop is called
+```
+
+### Widgets
+
+Click image to see the corresponding demo codes.
+
+[<img src="./_example/par.png" alt="par" type="image/png" width="45%">](https://github.com/gizak/termui/blob/master/_example/par.go)
+[<img src="./_example/list.png" alt="list" type="image/png" width="45%">](https://github.com/gizak/termui/blob/master/_example/list.go)
+[<img src="./_example/gauge.png" alt="gauge" type="image/png" width="45%">](https://github.com/gizak/termui/blob/master/_example/gauge.go)
+[<img src="./_example/linechart.png" alt="linechart" type="image/png" width="45%">](https://github.com/gizak/termui/blob/master/_example/linechart.go)
+[<img src="./_example/barchart.png" alt="barchart" type="image/png" width="45%">](https://github.com/gizak/termui/blob/master/_example/barchart.go)
+[<img src="./_example/mbarchart.png" alt="barchart" type="image/png" width="45%">](https://github.com/gizak/termui/blob/master/_example/mbarchart.go)
+[<img src="./_example/sparklines.png" alt="sparklines" type="image/png" width="45%">](https://github.com/gizak/termui/blob/master/_example/sparklines.go)
+
+## GoDoc
+
+[godoc](https://godoc.org/github.com/gizak/termui)
+
+## TODO
+
+- [x] Grid layout
+- [x] Event system
+- [x] Canvas widget
+- [x] Refine APIs
+- [ ] Focusable widgets
+
+## Changelog
+
+## License
+This library is under the [MIT License](http://opensource.org/licenses/MIT)
diff --git a/vendor/github.com/gizak/termui/barchart.go b/vendor/github.com/gizak/termui/barchart.go
new file mode 100644
index 000000000..9e2a10689
--- /dev/null
+++ b/vendor/github.com/gizak/termui/barchart.go
@@ -0,0 +1,149 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import "fmt"
+
+// BarChart creates multiple bars in a widget:
+/*
+ bc := termui.NewBarChart()
+ data := []int{3, 2, 5, 3, 9, 5}
+ bclabels := []string{"S0", "S1", "S2", "S3", "S4", "S5"}
+ bc.BorderLabel = "Bar Chart"
+ bc.Data = data
+ bc.Width = 26
+ bc.Height = 10
+ bc.DataLabels = bclabels
+ bc.TextColor = termui.ColorGreen
+ bc.BarColor = termui.ColorRed
+ bc.NumColor = termui.ColorYellow
+*/
+type BarChart struct {
+ Block
+ BarColor Attribute
+ TextColor Attribute
+ NumColor Attribute
+ Data []int
+ DataLabels []string
+ BarWidth int
+ BarGap int
+ CellChar rune
+ labels [][]rune
+ dataNum [][]rune
+ numBar int
+ scale float64
+ max int
+}
+
+// NewBarChart returns a new *BarChart with current theme.
+func NewBarChart() *BarChart {
+ bc := &BarChart{Block: *NewBlock()}
+ bc.BarColor = ThemeAttr("barchart.bar.bg")
+ bc.NumColor = ThemeAttr("barchart.num.fg")
+ bc.TextColor = ThemeAttr("barchart.text.fg")
+ bc.BarGap = 1
+ bc.BarWidth = 3
+ bc.CellChar = ' '
+ return bc
+}
+
+func (bc *BarChart) layout() {
+ bc.numBar = bc.innerArea.Dx() / (bc.BarGap + bc.BarWidth)
+ bc.labels = make([][]rune, bc.numBar)
+ bc.dataNum = make([][]rune, len(bc.Data))
+
+ for i := 0; i < bc.numBar && i < len(bc.DataLabels) && i < len(bc.Data); i++ {
+ bc.labels[i] = trimStr2Runes(bc.DataLabels[i], bc.BarWidth)
+ n := bc.Data[i]
+ s := fmt.Sprint(n)
+ bc.dataNum[i] = trimStr2Runes(s, bc.BarWidth)
+ }
+
+ //bc.max = bc.Data[0] // what if Data is nil? Sometimes when bar graph is nill it produces panic with panic: runtime error: index out of range
+ // Asign a negative value to get maxvalue auto-populates
+ if bc.max == 0 {
+ bc.max = -1
+ }
+ for i := 0; i < len(bc.Data); i++ {
+ if bc.max < bc.Data[i] {
+ bc.max = bc.Data[i]
+ }
+ }
+ bc.scale = float64(bc.max) / float64(bc.innerArea.Dy()-1)
+}
+
+func (bc *BarChart) SetMax(max int) {
+
+ if max > 0 {
+ bc.max = max
+ }
+}
+
+// Buffer implements Bufferer interface.
+func (bc *BarChart) Buffer() Buffer {
+ buf := bc.Block.Buffer()
+ bc.layout()
+
+ for i := 0; i < bc.numBar && i < len(bc.Data) && i < len(bc.DataLabels); i++ {
+ h := int(float64(bc.Data[i]) / bc.scale)
+ oftX := i * (bc.BarWidth + bc.BarGap)
+
+ barBg := bc.Bg
+ barFg := bc.BarColor
+
+ if bc.CellChar == ' ' {
+ barBg = bc.BarColor
+ barFg = ColorDefault
+ if bc.BarColor == ColorDefault { // the same as above
+ barBg |= AttrReverse
+ }
+ }
+
+ // plot bar
+ for j := 0; j < bc.BarWidth; j++ {
+ for k := 0; k < h; k++ {
+ c := Cell{
+ Ch: bc.CellChar,
+ Bg: barBg,
+ Fg: barFg,
+ }
+
+ x := bc.innerArea.Min.X + i*(bc.BarWidth+bc.BarGap) + j
+ y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2 - k
+ buf.Set(x, y, c)
+ }
+ }
+ // plot text
+ for j, k := 0, 0; j < len(bc.labels[i]); j++ {
+ w := charWidth(bc.labels[i][j])
+ c := Cell{
+ Ch: bc.labels[i][j],
+ Bg: bc.Bg,
+ Fg: bc.TextColor,
+ }
+ y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 1
+ x := bc.innerArea.Min.X + oftX + k
+ buf.Set(x, y, c)
+ k += w
+ }
+ // plot num
+ for j := 0; j < len(bc.dataNum[i]); j++ {
+ c := Cell{
+ Ch: bc.dataNum[i][j],
+ Fg: bc.NumColor,
+ Bg: barBg,
+ }
+
+ if h == 0 {
+ c.Bg = bc.Bg
+ }
+ x := bc.innerArea.Min.X + oftX + (bc.BarWidth-len(bc.dataNum[i]))/2 + j
+ y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2
+ buf.Set(x, y, c)
+ }
+ }
+
+ return buf
+}
diff --git a/vendor/github.com/gizak/termui/block.go b/vendor/github.com/gizak/termui/block.go
new file mode 100644
index 000000000..418738c8d
--- /dev/null
+++ b/vendor/github.com/gizak/termui/block.go
@@ -0,0 +1,240 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import "image"
+
+// Hline is a horizontal line.
+type Hline struct {
+ X int
+ Y int
+ Len int
+ Fg Attribute
+ Bg Attribute
+}
+
+// Vline is a vertical line.
+type Vline struct {
+ X int
+ Y int
+ Len int
+ Fg Attribute
+ Bg Attribute
+}
+
+// Buffer draws a horizontal line.
+func (l Hline) Buffer() Buffer {
+ if l.Len <= 0 {
+ return NewBuffer()
+ }
+ return NewFilledBuffer(l.X, l.Y, l.X+l.Len, l.Y+1, HORIZONTAL_LINE, l.Fg, l.Bg)
+}
+
+// Buffer draws a vertical line.
+func (l Vline) Buffer() Buffer {
+ if l.Len <= 0 {
+ return NewBuffer()
+ }
+ return NewFilledBuffer(l.X, l.Y, l.X+1, l.Y+l.Len, VERTICAL_LINE, l.Fg, l.Bg)
+}
+
+// Buffer draws a box border.
+func (b Block) drawBorder(buf Buffer) {
+ if !b.Border {
+ return
+ }
+
+ min := b.area.Min
+ max := b.area.Max
+
+ x0 := min.X
+ y0 := min.Y
+ x1 := max.X - 1
+ y1 := max.Y - 1
+
+ // draw lines
+ if b.BorderTop {
+ buf.Merge(Hline{x0, y0, x1 - x0, b.BorderFg, b.BorderBg}.Buffer())
+ }
+ if b.BorderBottom {
+ buf.Merge(Hline{x0, y1, x1 - x0, b.BorderFg, b.BorderBg}.Buffer())
+ }
+ if b.BorderLeft {
+ buf.Merge(Vline{x0, y0, y1 - y0, b.BorderFg, b.BorderBg}.Buffer())
+ }
+ if b.BorderRight {
+ buf.Merge(Vline{x1, y0, y1 - y0, b.BorderFg, b.BorderBg}.Buffer())
+ }
+
+ // draw corners
+ if b.BorderTop && b.BorderLeft && b.area.Dx() > 0 && b.area.Dy() > 0 {
+ buf.Set(x0, y0, Cell{TOP_LEFT, b.BorderFg, b.BorderBg})
+ }
+ if b.BorderTop && b.BorderRight && b.area.Dx() > 1 && b.area.Dy() > 0 {
+ buf.Set(x1, y0, Cell{TOP_RIGHT, b.BorderFg, b.BorderBg})
+ }
+ if b.BorderBottom && b.BorderLeft && b.area.Dx() > 0 && b.area.Dy() > 1 {
+ buf.Set(x0, y1, Cell{BOTTOM_LEFT, b.BorderFg, b.BorderBg})
+ }
+ if b.BorderBottom && b.BorderRight && b.area.Dx() > 1 && b.area.Dy() > 1 {
+ buf.Set(x1, y1, Cell{BOTTOM_RIGHT, b.BorderFg, b.BorderBg})
+ }
+}
+
+func (b Block) drawBorderLabel(buf Buffer) {
+ maxTxtW := b.area.Dx() - 2
+ tx := DTrimTxCls(DefaultTxBuilder.Build(b.BorderLabel, b.BorderLabelFg, b.BorderLabelBg), maxTxtW)
+
+ for i, w := 0, 0; i < len(tx); i++ {
+ buf.Set(b.area.Min.X+1+w, b.area.Min.Y, tx[i])
+ w += tx[i].Width()
+ }
+}
+
+// Block is a base struct for all other upper level widgets,
+// consider it as css: display:block.
+// Normally you do not need to create it manually.
+type Block struct {
+ area image.Rectangle
+ innerArea image.Rectangle
+ X int
+ Y int
+ Border bool
+ BorderFg Attribute
+ BorderBg Attribute
+ BorderLeft bool
+ BorderRight bool
+ BorderTop bool
+ BorderBottom bool
+ BorderLabel string
+ BorderLabelFg Attribute
+ BorderLabelBg Attribute
+ Display bool
+ Bg Attribute
+ Width int
+ Height int
+ PaddingTop int
+ PaddingBottom int
+ PaddingLeft int
+ PaddingRight int
+ id string
+ Float Align
+}
+
+// NewBlock returns a *Block which inherits styles from current theme.
+func NewBlock() *Block {
+ b := Block{}
+ b.Display = true
+ b.Border = true
+ b.BorderLeft = true
+ b.BorderRight = true
+ b.BorderTop = true
+ b.BorderBottom = true
+ b.BorderBg = ThemeAttr("border.bg")
+ b.BorderFg = ThemeAttr("border.fg")
+ b.BorderLabelBg = ThemeAttr("label.bg")
+ b.BorderLabelFg = ThemeAttr("label.fg")
+ b.Bg = ThemeAttr("block.bg")
+ b.Width = 2
+ b.Height = 2
+ b.id = GenId()
+ b.Float = AlignNone
+ return &b
+}
+
+func (b Block) Id() string {
+ return b.id
+}
+
+// Align computes box model
+func (b *Block) Align() {
+ // outer
+ b.area.Min.X = 0
+ b.area.Min.Y = 0
+ b.area.Max.X = b.Width
+ b.area.Max.Y = b.Height
+
+ // float
+ b.area = AlignArea(TermRect(), b.area, b.Float)
+ b.area = MoveArea(b.area, b.X, b.Y)
+
+ // inner
+ b.innerArea.Min.X = b.area.Min.X + b.PaddingLeft
+ b.innerArea.Min.Y = b.area.Min.Y + b.PaddingTop
+ b.innerArea.Max.X = b.area.Max.X - b.PaddingRight
+ b.innerArea.Max.Y = b.area.Max.Y - b.PaddingBottom
+
+ if b.Border {
+ if b.BorderLeft {
+ b.innerArea.Min.X++
+ }
+ if b.BorderRight {
+ b.innerArea.Max.X--
+ }
+ if b.BorderTop {
+ b.innerArea.Min.Y++
+ }
+ if b.BorderBottom {
+ b.innerArea.Max.Y--
+ }
+ }
+}
+
+// InnerBounds returns the internal bounds of the block after aligning and
+// calculating the padding and border, if any.
+func (b *Block) InnerBounds() image.Rectangle {
+ b.Align()
+ return b.innerArea
+}
+
+// Buffer implements Bufferer interface.
+// Draw background and border (if any).
+func (b *Block) Buffer() Buffer {
+ b.Align()
+
+ buf := NewBuffer()
+ buf.SetArea(b.area)
+ buf.Fill(' ', ColorDefault, b.Bg)
+
+ b.drawBorder(buf)
+ b.drawBorderLabel(buf)
+
+ return buf
+}
+
+// GetHeight implements GridBufferer.
+// It returns current height of the block.
+func (b Block) GetHeight() int {
+ return b.Height
+}
+
+// SetX implements GridBufferer interface, which sets block's x position.
+func (b *Block) SetX(x int) {
+ b.X = x
+}
+
+// SetY implements GridBufferer interface, it sets y position for block.
+func (b *Block) SetY(y int) {
+ b.Y = y
+}
+
+// SetWidth implements GridBuffer interface, it sets block's width.
+func (b *Block) SetWidth(w int) {
+ b.Width = w
+}
+
+func (b Block) InnerWidth() int {
+ return b.innerArea.Dx()
+}
+
+func (b Block) InnerHeight() int {
+ return b.innerArea.Dy()
+}
+
+func (b Block) InnerX() int {
+ return b.innerArea.Min.X
+}
+
+func (b Block) InnerY() int { return b.innerArea.Min.Y }
diff --git a/vendor/github.com/gizak/termui/block_common.go b/vendor/github.com/gizak/termui/block_common.go
new file mode 100644
index 000000000..0f972cbbd
--- /dev/null
+++ b/vendor/github.com/gizak/termui/block_common.go
@@ -0,0 +1,20 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+// +build !windows
+
+package termui
+
+const TOP_RIGHT = '┐'
+const VERTICAL_LINE = '│'
+const HORIZONTAL_LINE = '─'
+const TOP_LEFT = '┌'
+const BOTTOM_RIGHT = '┘'
+const BOTTOM_LEFT = '└'
+const VERTICAL_LEFT = '┤'
+const VERTICAL_RIGHT = '├'
+const HORIZONTAL_DOWN = '┬'
+const HORIZONTAL_UP = '┴'
+const QUOTA_LEFT = '«'
+const QUOTA_RIGHT = '»'
diff --git a/vendor/github.com/gizak/termui/block_windows.go b/vendor/github.com/gizak/termui/block_windows.go
new file mode 100644
index 000000000..4dbc017de
--- /dev/null
+++ b/vendor/github.com/gizak/termui/block_windows.go
@@ -0,0 +1,14 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+// +build windows
+
+package termui
+
+const TOP_RIGHT = '+'
+const VERTICAL_LINE = '|'
+const HORIZONTAL_LINE = '-'
+const TOP_LEFT = '+'
+const BOTTOM_RIGHT = '+'
+const BOTTOM_LEFT = '+'
diff --git a/vendor/github.com/gizak/termui/buffer.go b/vendor/github.com/gizak/termui/buffer.go
new file mode 100644
index 000000000..60e77863a
--- /dev/null
+++ b/vendor/github.com/gizak/termui/buffer.go
@@ -0,0 +1,106 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import "image"
+
+// Cell is a rune with assigned Fg and Bg
+type Cell struct {
+ Ch rune
+ Fg Attribute
+ Bg Attribute
+}
+
+// Buffer is a renderable rectangle cell data container.
+type Buffer struct {
+ Area image.Rectangle // selected drawing area
+ CellMap map[image.Point]Cell
+}
+
+// At returns the cell at (x,y).
+func (b Buffer) At(x, y int) Cell {
+ return b.CellMap[image.Pt(x, y)]
+}
+
+// Set assigns a char to (x,y)
+func (b Buffer) Set(x, y int, c Cell) {
+ b.CellMap[image.Pt(x, y)] = c
+}
+
+// Bounds returns the domain for which At can return non-zero color.
+func (b Buffer) Bounds() image.Rectangle {
+ x0, y0, x1, y1 := 0, 0, 0, 0
+ for p := range b.CellMap {
+ if p.X > x1 {
+ x1 = p.X
+ }
+ if p.X < x0 {
+ x0 = p.X
+ }
+ if p.Y > y1 {
+ y1 = p.Y
+ }
+ if p.Y < y0 {
+ y0 = p.Y
+ }
+ }
+ return image.Rect(x0, y0, x1, y1)
+}
+
+// SetArea assigns a new rect area to Buffer b.
+func (b *Buffer) SetArea(r image.Rectangle) {
+ b.Area.Max = r.Max
+ b.Area.Min = r.Min
+}
+
+// Sync sets drawing area to the buffer's bound
+func (b Buffer) Sync() {
+ b.SetArea(b.Bounds())
+}
+
+// NewCell returns a new cell
+func NewCell(ch rune, fg, bg Attribute) Cell {
+ return Cell{ch, fg, bg}
+}
+
+// Merge merges bs Buffers onto b
+func (b *Buffer) Merge(bs ...Buffer) {
+ for _, buf := range bs {
+ for p, v := range buf.CellMap {
+ b.Set(p.X, p.Y, v)
+ }
+ b.SetArea(b.Area.Union(buf.Area))
+ }
+}
+
+// NewBuffer returns a new Buffer
+func NewBuffer() Buffer {
+ return Buffer{
+ CellMap: make(map[image.Point]Cell),
+ Area: image.Rectangle{}}
+}
+
+// Fill fills the Buffer b with ch,fg and bg.
+func (b Buffer) Fill(ch rune, fg, bg Attribute) {
+ for x := b.Area.Min.X; x < b.Area.Max.X; x++ {
+ for y := b.Area.Min.Y; y < b.Area.Max.Y; y++ {
+ b.Set(x, y, Cell{ch, fg, bg})
+ }
+ }
+}
+
+// NewFilledBuffer returns a new Buffer filled with ch, fb and bg.
+func NewFilledBuffer(x0, y0, x1, y1 int, ch rune, fg, bg Attribute) Buffer {
+ buf := NewBuffer()
+ buf.Area.Min = image.Pt(x0, y0)
+ buf.Area.Max = image.Pt(x1, y1)
+
+ for x := buf.Area.Min.X; x < buf.Area.Max.X; x++ {
+ for y := buf.Area.Min.Y; y < buf.Area.Max.Y; y++ {
+ buf.Set(x, y, Cell{ch, fg, bg})
+ }
+ }
+ return buf
+}
diff --git a/vendor/github.com/gizak/termui/canvas.go b/vendor/github.com/gizak/termui/canvas.go
new file mode 100644
index 000000000..4173780f3
--- /dev/null
+++ b/vendor/github.com/gizak/termui/canvas.go
@@ -0,0 +1,72 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+/*
+dots:
+ ,___,
+ |1 4|
+ |2 5|
+ |3 6|
+ |7 8|
+ `````
+*/
+
+var brailleBase = '\u2800'
+
+var brailleOftMap = [4][2]rune{
+ {'\u0001', '\u0008'},
+ {'\u0002', '\u0010'},
+ {'\u0004', '\u0020'},
+ {'\u0040', '\u0080'}}
+
+// Canvas contains drawing map: i,j -> rune
+type Canvas map[[2]int]rune
+
+// NewCanvas returns an empty Canvas
+func NewCanvas() Canvas {
+ return make(map[[2]int]rune)
+}
+
+func chOft(x, y int) rune {
+ return brailleOftMap[y%4][x%2]
+}
+
+func (c Canvas) rawCh(x, y int) rune {
+ if ch, ok := c[[2]int{x, y}]; ok {
+ return ch
+ }
+ return '\u0000' //brailleOffset
+}
+
+// return coordinate in terminal
+func chPos(x, y int) (int, int) {
+ return y / 4, x / 2
+}
+
+// Set sets a point (x,y) in the virtual coordinate
+func (c Canvas) Set(x, y int) {
+ i, j := chPos(x, y)
+ ch := c.rawCh(i, j)
+ ch |= chOft(x, y)
+ c[[2]int{i, j}] = ch
+}
+
+// Unset removes point (x,y)
+func (c Canvas) Unset(x, y int) {
+ i, j := chPos(x, y)
+ ch := c.rawCh(i, j)
+ ch &= ^chOft(x, y)
+ c[[2]int{i, j}] = ch
+}
+
+// Buffer returns un-styled points
+func (c Canvas) Buffer() Buffer {
+ buf := NewBuffer()
+ for k, v := range c {
+ buf.Set(k[0], k[1], Cell{Ch: v + brailleBase})
+ }
+ return buf
+}
diff --git a/vendor/github.com/gizak/termui/config b/vendor/github.com/gizak/termui/config
new file mode 100755
index 000000000..18fd6a401
--- /dev/null
+++ b/vendor/github.com/gizak/termui/config
@@ -0,0 +1,26 @@
+#!/usr/bin/env perl6
+
+use v6;
+
+my $copyright = '// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+';
+
+sub MAIN('update-docstr', Str $srcp) {
+ if $srcp.IO.f {
+ $_ = $srcp.IO.slurp;
+ if m/^ \/\/\s Copyright .+? \n\n/ {
+ unless ~$/ eq $copyright {
+ s/^ \/\/\s Copyright .+? \n\n /$copyright/;
+ spurt $srcp, $_;
+ say "[updated] doc string for:"~$srcp;
+ }
+ } else {
+ say "[added] doc string for "~$srcp~" (no match found)";
+ $_ = $copyright ~ $_;
+ spurt $srcp, $_;
+ }
+ }
+}
diff --git a/vendor/github.com/gizak/termui/doc.go b/vendor/github.com/gizak/termui/doc.go
new file mode 100644
index 000000000..0d7108d30
--- /dev/null
+++ b/vendor/github.com/gizak/termui/doc.go
@@ -0,0 +1,29 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+/*
+Package termui is a library designed for creating command line UI. For more info, goto http://github.com/gizak/termui
+
+A simplest example:
+ package main
+
+ import ui "github.com/gizak/termui"
+
+ func main() {
+ if err:=ui.Init(); err != nil {
+ panic(err)
+ }
+ defer ui.Close()
+
+ g := ui.NewGauge()
+ g.Percent = 50
+ g.Width = 50
+ g.BorderLabel = "Gauge"
+
+ ui.Render(g)
+
+ ui.Loop()
+ }
+*/
+package termui
diff --git a/vendor/github.com/gizak/termui/events.go b/vendor/github.com/gizak/termui/events.go
new file mode 100644
index 000000000..6627f8942
--- /dev/null
+++ b/vendor/github.com/gizak/termui/events.go
@@ -0,0 +1,323 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import (
+ "path"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/nsf/termbox-go"
+)
+
+type Event struct {
+ Type string
+ Path string
+ From string
+ To string
+ Data interface{}
+ Time int64
+}
+
+var sysEvtChs []chan Event
+
+type EvtKbd struct {
+ KeyStr string
+}
+
+func evtKbd(e termbox.Event) EvtKbd {
+ ek := EvtKbd{}
+
+ k := string(e.Ch)
+ pre := ""
+ mod := ""
+
+ if e.Mod == termbox.ModAlt {
+ mod = "M-"
+ }
+ if e.Ch == 0 {
+ if e.Key > 0xFFFF-12 {
+ k = "<f" + strconv.Itoa(0xFFFF-int(e.Key)+1) + ">"
+ } else if e.Key > 0xFFFF-25 {
+ ks := []string{"<insert>", "<delete>", "<home>", "<end>", "<previous>", "<next>", "<up>", "<down>", "<left>", "<right>"}
+ k = ks[0xFFFF-int(e.Key)-12]
+ }
+
+ if e.Key <= 0x7F {
+ pre = "C-"
+ k = string('a' - 1 + int(e.Key))
+ kmap := map[termbox.Key][2]string{
+ termbox.KeyCtrlSpace: {"C-", "<space>"},
+ termbox.KeyBackspace: {"", "<backspace>"},
+ termbox.KeyTab: {"", "<tab>"},
+ termbox.KeyEnter: {"", "<enter>"},
+ termbox.KeyEsc: {"", "<escape>"},
+ termbox.KeyCtrlBackslash: {"C-", "\\"},
+ termbox.KeyCtrlSlash: {"C-", "/"},
+ termbox.KeySpace: {"", "<space>"},
+ termbox.KeyCtrl8: {"C-", "8"},
+ }
+ if sk, ok := kmap[e.Key]; ok {
+ pre = sk[0]
+ k = sk[1]
+ }
+ }
+ }
+
+ ek.KeyStr = pre + mod + k
+ return ek
+}
+
+func crtTermboxEvt(e termbox.Event) Event {
+ systypemap := map[termbox.EventType]string{
+ termbox.EventKey: "keyboard",
+ termbox.EventResize: "window",
+ termbox.EventMouse: "mouse",
+ termbox.EventError: "error",
+ termbox.EventInterrupt: "interrupt",
+ }
+ ne := Event{From: "/sys", Time: time.Now().Unix()}
+ typ := e.Type
+ ne.Type = systypemap[typ]
+
+ switch typ {
+ case termbox.EventKey:
+ kbd := evtKbd(e)
+ ne.Path = "/sys/kbd/" + kbd.KeyStr
+ ne.Data = kbd
+ case termbox.EventResize:
+ wnd := EvtWnd{}
+ wnd.Width = e.Width
+ wnd.Height = e.Height
+ ne.Path = "/sys/wnd/resize"
+ ne.Data = wnd
+ case termbox.EventError:
+ err := EvtErr(e.Err)
+ ne.Path = "/sys/err"
+ ne.Data = err
+ case termbox.EventMouse:
+ m := EvtMouse{}
+ m.X = e.MouseX
+ m.Y = e.MouseY
+ ne.Path = "/sys/mouse"
+ ne.Data = m
+ }
+ return ne
+}
+
+type EvtWnd struct {
+ Width int
+ Height int
+}
+
+type EvtMouse struct {
+ X int
+ Y int
+ Press string
+}
+
+type EvtErr error
+
+func hookTermboxEvt() {
+ for {
+ e := termbox.PollEvent()
+
+ for _, c := range sysEvtChs {
+ go func(ch chan Event) {
+ ch <- crtTermboxEvt(e)
+ }(c)
+ }
+ }
+}
+
+func NewSysEvtCh() chan Event {
+ ec := make(chan Event)
+ sysEvtChs = append(sysEvtChs, ec)
+ return ec
+}
+
+var DefaultEvtStream = NewEvtStream()
+
+type EvtStream struct {
+ sync.RWMutex
+ srcMap map[string]chan Event
+ stream chan Event
+ wg sync.WaitGroup
+ sigStopLoop chan Event
+ Handlers map[string]func(Event)
+ hook func(Event)
+}
+
+func NewEvtStream() *EvtStream {
+ return &EvtStream{
+ srcMap: make(map[string]chan Event),
+ stream: make(chan Event),
+ Handlers: make(map[string]func(Event)),
+ sigStopLoop: make(chan Event),
+ }
+}
+
+func (es *EvtStream) Init() {
+ es.Merge("internal", es.sigStopLoop)
+ go func() {
+ es.wg.Wait()
+ close(es.stream)
+ }()
+}
+
+func cleanPath(p string) string {
+ if p == "" {
+ return "/"
+ }
+ if p[0] != '/' {
+ p = "/" + p
+ }
+ return path.Clean(p)
+}
+
+func isPathMatch(pattern, path string) bool {
+ if len(pattern) == 0 {
+ return false
+ }
+ n := len(pattern)
+ return len(path) >= n && path[0:n] == pattern
+}
+
+func (es *EvtStream) Merge(name string, ec chan Event) {
+ es.Lock()
+ defer es.Unlock()
+
+ es.wg.Add(1)
+ es.srcMap[name] = ec
+
+ go func(a chan Event) {
+ for n := range a {
+ n.From = name
+ es.stream <- n
+ }
+ es.wg.Done()
+ }(ec)
+}
+
+func (es *EvtStream) Handle(path string, handler func(Event)) {
+ es.Handlers[cleanPath(path)] = handler
+}
+
+func findMatch(mux map[string]func(Event), path string) string {
+ n := -1
+ pattern := ""
+ for m := range mux {
+ if !isPathMatch(m, path) {
+ continue
+ }
+ if len(m) > n {
+ pattern = m
+ n = len(m)
+ }
+ }
+ return pattern
+
+}
+// Remove all existing defined Handlers from the map
+func (es *EvtStream) ResetHandlers() {
+ for Path, _ := range es.Handlers {
+ delete(es.Handlers, Path)
+ }
+ return
+}
+
+func (es *EvtStream) match(path string) string {
+ return findMatch(es.Handlers, path)
+}
+
+func (es *EvtStream) Hook(f func(Event)) {
+ es.hook = f
+}
+
+func (es *EvtStream) Loop() {
+ for e := range es.stream {
+ switch e.Path {
+ case "/sig/stoploop":
+ return
+ }
+ go func(a Event) {
+ es.RLock()
+ defer es.RUnlock()
+ if pattern := es.match(a.Path); pattern != "" {
+ es.Handlers[pattern](a)
+ }
+ }(e)
+ if es.hook != nil {
+ es.hook(e)
+ }
+ }
+}
+
+func (es *EvtStream) StopLoop() {
+ go func() {
+ e := Event{
+ Path: "/sig/stoploop",
+ }
+ es.sigStopLoop <- e
+ }()
+}
+
+func Merge(name string, ec chan Event) {
+ DefaultEvtStream.Merge(name, ec)
+}
+
+func Handle(path string, handler func(Event)) {
+ DefaultEvtStream.Handle(path, handler)
+}
+
+func Loop() {
+ DefaultEvtStream.Loop()
+}
+
+func StopLoop() {
+ DefaultEvtStream.StopLoop()
+}
+
+type EvtTimer struct {
+ Duration time.Duration
+ Count uint64
+}
+
+func NewTimerCh(du time.Duration) chan Event {
+ t := make(chan Event)
+
+ go func(a chan Event) {
+ n := uint64(0)
+ for {
+ n++
+ time.Sleep(du)
+ e := Event{}
+ e.Type = "timer"
+ e.Path = "/timer/" + du.String()
+ e.Time = time.Now().Unix()
+ e.Data = EvtTimer{
+ Duration: du,
+ Count: n,
+ }
+ t <- e
+
+ }
+ }(t)
+ return t
+}
+
+var DefualtHandler = func(e Event) {
+}
+
+var usrEvtCh = make(chan Event)
+
+func SendCustomEvt(path string, data interface{}) {
+ e := Event{}
+ e.Path = path
+ e.Data = data
+ e.Time = time.Now().Unix()
+ usrEvtCh <- e
+}
diff --git a/vendor/github.com/gizak/termui/gauge.go b/vendor/github.com/gizak/termui/gauge.go
new file mode 100644
index 000000000..01af0ea7c
--- /dev/null
+++ b/vendor/github.com/gizak/termui/gauge.go
@@ -0,0 +1,109 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import (
+ "strconv"
+ "strings"
+)
+
+// Gauge is a progress bar like widget.
+// A simple example:
+/*
+ g := termui.NewGauge()
+ g.Percent = 40
+ g.Width = 50
+ g.Height = 3
+ g.BorderLabel = "Slim Gauge"
+ g.BarColor = termui.ColorRed
+ g.PercentColor = termui.ColorBlue
+*/
+
+const ColorUndef Attribute = Attribute(^uint16(0))
+
+type Gauge struct {
+ Block
+ Percent int
+ BarColor Attribute
+ PercentColor Attribute
+ PercentColorHighlighted Attribute
+ Label string
+ LabelAlign Align
+}
+
+// NewGauge return a new gauge with current theme.
+func NewGauge() *Gauge {
+ g := &Gauge{
+ Block: *NewBlock(),
+ PercentColor: ThemeAttr("gauge.percent.fg"),
+ BarColor: ThemeAttr("gauge.bar.bg"),
+ Label: "{{percent}}%",
+ LabelAlign: AlignCenter,
+ PercentColorHighlighted: ColorUndef,
+ }
+
+ g.Width = 12
+ g.Height = 5
+ return g
+}
+
+// Buffer implements Bufferer interface.
+func (g *Gauge) Buffer() Buffer {
+ buf := g.Block.Buffer()
+
+ // plot bar
+ w := g.Percent * g.innerArea.Dx() / 100
+ for i := 0; i < g.innerArea.Dy(); i++ {
+ for j := 0; j < w; j++ {
+ c := Cell{}
+ c.Ch = ' '
+ c.Bg = g.BarColor
+ if c.Bg == ColorDefault {
+ c.Bg |= AttrReverse
+ }
+ buf.Set(g.innerArea.Min.X+j, g.innerArea.Min.Y+i, c)
+ }
+ }
+
+ // plot percentage
+ s := strings.Replace(g.Label, "{{percent}}", strconv.Itoa(g.Percent), -1)
+ pry := g.innerArea.Min.Y + g.innerArea.Dy()/2
+ rs := str2runes(s)
+ var pos int
+ switch g.LabelAlign {
+ case AlignLeft:
+ pos = 0
+
+ case AlignCenter:
+ pos = (g.innerArea.Dx() - strWidth(s)) / 2
+
+ case AlignRight:
+ pos = g.innerArea.Dx() - strWidth(s) - 1
+ }
+ pos += g.innerArea.Min.X
+
+ for i, v := range rs {
+ c := Cell{
+ Ch: v,
+ Fg: g.PercentColor,
+ }
+
+ if w+g.innerArea.Min.X > pos+i {
+ c.Bg = g.BarColor
+ if c.Bg == ColorDefault {
+ c.Bg |= AttrReverse
+ }
+
+ if g.PercentColorHighlighted != ColorUndef {
+ c.Fg = g.PercentColorHighlighted
+ }
+ } else {
+ c.Bg = g.Block.Bg
+ }
+
+ buf.Set(1+pos+i, pry, c)
+ }
+ return buf
+}
diff --git a/vendor/github.com/gizak/termui/glide.lock b/vendor/github.com/gizak/termui/glide.lock
new file mode 100644
index 000000000..798e944d3
--- /dev/null
+++ b/vendor/github.com/gizak/termui/glide.lock
@@ -0,0 +1,14 @@
+hash: 67a478802ee1d122cf1df063c52458d074864900c96a5cc25dc6be4b7638eb1c
+updated: 2016-04-06T21:16:00.849048757-04:00
+imports:
+- name: github.com/mattn/go-runewidth
+ version: d6bea18f789704b5f83375793155289da36a3c7f
+- name: github.com/mitchellh/go-wordwrap
+ version: ad45545899c7b13c020ea92b2072220eefad42b8
+- name: github.com/nsf/termbox-go
+ version: 362329b0aa6447eadd52edd8d660ec1dff470295
+- name: golang.org/x/net
+ version: af4fee9d05b66edc24197d189e6118f8ebce8c2b
+ subpackages:
+ - websocket
+devImports: []
diff --git a/vendor/github.com/gizak/termui/glide.yaml b/vendor/github.com/gizak/termui/glide.yaml
new file mode 100644
index 000000000..d8e445254
--- /dev/null
+++ b/vendor/github.com/gizak/termui/glide.yaml
@@ -0,0 +1,8 @@
+package: github.com/gizak/termui
+import:
+- package: github.com/mattn/go-runewidth
+- package: github.com/mitchellh/go-wordwrap
+- package: github.com/nsf/termbox-go
+- package: golang.org/x/net
+ subpackages:
+ - websocket
diff --git a/vendor/github.com/gizak/termui/grid.go b/vendor/github.com/gizak/termui/grid.go
new file mode 100644
index 000000000..364442e02
--- /dev/null
+++ b/vendor/github.com/gizak/termui/grid.go
@@ -0,0 +1,279 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+// GridBufferer introduces a Bufferer that can be manipulated by Grid.
+type GridBufferer interface {
+ Bufferer
+ GetHeight() int
+ SetWidth(int)
+ SetX(int)
+ SetY(int)
+}
+
+// Row builds a layout tree
+type Row struct {
+ Cols []*Row //children
+ Widget GridBufferer // root
+ X int
+ Y int
+ Width int
+ Height int
+ Span int
+ Offset int
+}
+
+// calculate and set the underlying layout tree's x, y, height and width.
+func (r *Row) calcLayout() {
+ r.assignWidth(r.Width)
+ r.Height = r.solveHeight()
+ r.assignX(r.X)
+ r.assignY(r.Y)
+}
+
+// tell if the node is leaf in the tree.
+func (r *Row) isLeaf() bool {
+ return r.Cols == nil || len(r.Cols) == 0
+}
+
+func (r *Row) isRenderableLeaf() bool {
+ return r.isLeaf() && r.Widget != nil
+}
+
+// assign widgets' (and their parent rows') width recursively.
+func (r *Row) assignWidth(w int) {
+ r.SetWidth(w)
+
+ accW := 0 // acc span and offset
+ calcW := make([]int, len(r.Cols)) // calculated width
+ calcOftX := make([]int, len(r.Cols)) // computated start position of x
+
+ for i, c := range r.Cols {
+ accW += c.Span + c.Offset
+ cw := int(float64(c.Span*r.Width) / 12.0)
+
+ if i >= 1 {
+ calcOftX[i] = calcOftX[i-1] +
+ calcW[i-1] +
+ int(float64(r.Cols[i-1].Offset*r.Width)/12.0)
+ }
+
+ // use up the space if it is the last col
+ if i == len(r.Cols)-1 && accW == 12 {
+ cw = r.Width - calcOftX[i]
+ }
+ calcW[i] = cw
+ r.Cols[i].assignWidth(cw)
+ }
+}
+
+// bottom up calc and set rows' (and their widgets') height,
+// return r's total height.
+func (r *Row) solveHeight() int {
+ if r.isRenderableLeaf() {
+ r.Height = r.Widget.GetHeight()
+ return r.Widget.GetHeight()
+ }
+
+ maxh := 0
+ if !r.isLeaf() {
+ for _, c := range r.Cols {
+ nh := c.solveHeight()
+ // when embed rows in Cols, row widgets stack up
+ if r.Widget != nil {
+ nh += r.Widget.GetHeight()
+ }
+ if nh > maxh {
+ maxh = nh
+ }
+ }
+ }
+
+ r.Height = maxh
+ return maxh
+}
+
+// recursively assign x position for r tree.
+func (r *Row) assignX(x int) {
+ r.SetX(x)
+
+ if !r.isLeaf() {
+ acc := 0
+ for i, c := range r.Cols {
+ if c.Offset != 0 {
+ acc += int(float64(c.Offset*r.Width) / 12.0)
+ }
+ r.Cols[i].assignX(x + acc)
+ acc += c.Width
+ }
+ }
+}
+
+// recursively assign y position to r.
+func (r *Row) assignY(y int) {
+ r.SetY(y)
+
+ if r.isLeaf() {
+ return
+ }
+
+ for i := range r.Cols {
+ acc := 0
+ if r.Widget != nil {
+ acc = r.Widget.GetHeight()
+ }
+ r.Cols[i].assignY(y + acc)
+ }
+
+}
+
+// GetHeight implements GridBufferer interface.
+func (r Row) GetHeight() int {
+ return r.Height
+}
+
+// SetX implements GridBufferer interface.
+func (r *Row) SetX(x int) {
+ r.X = x
+ if r.Widget != nil {
+ r.Widget.SetX(x)
+ }
+}
+
+// SetY implements GridBufferer interface.
+func (r *Row) SetY(y int) {
+ r.Y = y
+ if r.Widget != nil {
+ r.Widget.SetY(y)
+ }
+}
+
+// SetWidth implements GridBufferer interface.
+func (r *Row) SetWidth(w int) {
+ r.Width = w
+ if r.Widget != nil {
+ r.Widget.SetWidth(w)
+ }
+}
+
+// Buffer implements Bufferer interface,
+// recursively merge all widgets buffer
+func (r *Row) Buffer() Buffer {
+ merged := NewBuffer()
+
+ if r.isRenderableLeaf() {
+ return r.Widget.Buffer()
+ }
+
+ // for those are not leaves but have a renderable widget
+ if r.Widget != nil {
+ merged.Merge(r.Widget.Buffer())
+ }
+
+ // collect buffer from children
+ if !r.isLeaf() {
+ for _, c := range r.Cols {
+ merged.Merge(c.Buffer())
+ }
+ }
+
+ return merged
+}
+
+// Grid implements 12 columns system.
+// A simple example:
+/*
+ import ui "github.com/gizak/termui"
+ // init and create widgets...
+
+ // build
+ ui.Body.AddRows(
+ ui.NewRow(
+ ui.NewCol(6, 0, widget0),
+ ui.NewCol(6, 0, widget1)),
+ ui.NewRow(
+ ui.NewCol(3, 0, widget2),
+ ui.NewCol(3, 0, widget30, widget31, widget32),
+ ui.NewCol(6, 0, widget4)))
+
+ // calculate layout
+ ui.Body.Align()
+
+ ui.Render(ui.Body)
+*/
+type Grid struct {
+ Rows []*Row
+ Width int
+ X int
+ Y int
+ BgColor Attribute
+}
+
+// NewGrid returns *Grid with given rows.
+func NewGrid(rows ...*Row) *Grid {
+ return &Grid{Rows: rows}
+}
+
+// AddRows appends given rows to Grid.
+func (g *Grid) AddRows(rs ...*Row) {
+ g.Rows = append(g.Rows, rs...)
+}
+
+// NewRow creates a new row out of given columns.
+func NewRow(cols ...*Row) *Row {
+ rs := &Row{Span: 12, Cols: cols}
+ return rs
+}
+
+// NewCol accepts: widgets are LayoutBufferer or widgets is A NewRow.
+// Note that if multiple widgets are provided, they will stack up in the col.
+func NewCol(span, offset int, widgets ...GridBufferer) *Row {
+ r := &Row{Span: span, Offset: offset}
+
+ if widgets != nil && len(widgets) == 1 {
+ wgt := widgets[0]
+ nw, isRow := wgt.(*Row)
+ if isRow {
+ r.Cols = nw.Cols
+ } else {
+ r.Widget = wgt
+ }
+ return r
+ }
+
+ r.Cols = []*Row{}
+ ir := r
+ for _, w := range widgets {
+ nr := &Row{Span: 12, Widget: w}
+ ir.Cols = []*Row{nr}
+ ir = nr
+ }
+
+ return r
+}
+
+// Align calculate each rows' layout.
+func (g *Grid) Align() {
+ h := 0
+ for _, r := range g.Rows {
+ r.SetWidth(g.Width)
+ r.SetX(g.X)
+ r.SetY(g.Y + h)
+ r.calcLayout()
+ h += r.GetHeight()
+ }
+}
+
+// Buffer implments Bufferer interface.
+func (g Grid) Buffer() Buffer {
+ buf := NewBuffer()
+
+ for _, r := range g.Rows {
+ buf.Merge(r.Buffer())
+ }
+ return buf
+}
+
+var Body *Grid
diff --git a/vendor/github.com/gizak/termui/helper.go b/vendor/github.com/gizak/termui/helper.go
new file mode 100644
index 000000000..308f6c1db
--- /dev/null
+++ b/vendor/github.com/gizak/termui/helper.go
@@ -0,0 +1,222 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import (
+ "regexp"
+ "strings"
+
+ tm "github.com/nsf/termbox-go"
+)
+import rw "github.com/mattn/go-runewidth"
+
+/* ---------------Port from termbox-go --------------------- */
+
+// Attribute is printable cell's color and style.
+type Attribute uint16
+
+// 8 basic clolrs
+const (
+ ColorDefault Attribute = iota
+ ColorBlack
+ ColorRed
+ ColorGreen
+ ColorYellow
+ ColorBlue
+ ColorMagenta
+ ColorCyan
+ ColorWhite
+)
+
+//Have a constant that defines number of colors
+const NumberofColors = 8
+
+// Text style
+const (
+ AttrBold Attribute = 1 << (iota + 9)
+ AttrUnderline
+ AttrReverse
+)
+
+var (
+ dot = "…"
+ dotw = rw.StringWidth(dot)
+)
+
+/* ----------------------- End ----------------------------- */
+
+func toTmAttr(x Attribute) tm.Attribute {
+ return tm.Attribute(x)
+}
+
+func str2runes(s string) []rune {
+ return []rune(s)
+}
+
+// Here for backwards-compatibility.
+func trimStr2Runes(s string, w int) []rune {
+ return TrimStr2Runes(s, w)
+}
+
+// TrimStr2Runes trims string to w[-1 rune], appends …, and returns the runes
+// of that string if string is grather then n. If string is small then w,
+// return the runes.
+func TrimStr2Runes(s string, w int) []rune {
+ if w <= 0 {
+ return []rune{}
+ }
+
+ sw := rw.StringWidth(s)
+ if sw > w {
+ return []rune(rw.Truncate(s, w, dot))
+ }
+ return str2runes(s)
+}
+
+// TrimStrIfAppropriate trim string to "s[:-1] + …"
+// if string > width otherwise return string
+func TrimStrIfAppropriate(s string, w int) string {
+ if w <= 0 {
+ return ""
+ }
+
+ sw := rw.StringWidth(s)
+ if sw > w {
+ return rw.Truncate(s, w, dot)
+ }
+
+ return s
+}
+
+func strWidth(s string) int {
+ return rw.StringWidth(s)
+}
+
+func charWidth(ch rune) int {
+ return rw.RuneWidth(ch)
+}
+
+var whiteSpaceRegex = regexp.MustCompile(`\s`)
+
+// StringToAttribute converts text to a termui attribute. You may specifiy more
+// then one attribute like that: "BLACK, BOLD, ...". All whitespaces
+// are ignored.
+func StringToAttribute(text string) Attribute {
+ text = whiteSpaceRegex.ReplaceAllString(strings.ToLower(text), "")
+ attributes := strings.Split(text, ",")
+ result := Attribute(0)
+
+ for _, theAttribute := range attributes {
+ var match Attribute
+ switch theAttribute {
+ case "reset", "default":
+ match = ColorDefault
+
+ case "black":
+ match = ColorBlack
+
+ case "red":
+ match = ColorRed
+
+ case "green":
+ match = ColorGreen
+
+ case "yellow":
+ match = ColorYellow
+
+ case "blue":
+ match = ColorBlue
+
+ case "magenta":
+ match = ColorMagenta
+
+ case "cyan":
+ match = ColorCyan
+
+ case "white":
+ match = ColorWhite
+
+ case "bold":
+ match = AttrBold
+
+ case "underline":
+ match = AttrUnderline
+
+ case "reverse":
+ match = AttrReverse
+ }
+
+ result |= match
+ }
+
+ return result
+}
+
+// TextCells returns a coloured text cells []Cell
+func TextCells(s string, fg, bg Attribute) []Cell {
+ cs := make([]Cell, 0, len(s))
+
+ // sequence := MarkdownTextRendererFactory{}.TextRenderer(s).Render(fg, bg)
+ // runes := []rune(sequence.NormalizedText)
+ runes := str2runes(s)
+
+ for n := range runes {
+ // point, _ := sequence.PointAt(n, 0, 0)
+ // cs = append(cs, Cell{point.Ch, point.Fg, point.Bg})
+ cs = append(cs, Cell{runes[n], fg, bg})
+ }
+ return cs
+}
+
+// Width returns the actual screen space the cell takes (usually 1 or 2).
+func (c Cell) Width() int {
+ return charWidth(c.Ch)
+}
+
+// Copy return a copy of c
+func (c Cell) Copy() Cell {
+ return c
+}
+
+// TrimTxCells trims the overflowed text cells sequence.
+func TrimTxCells(cs []Cell, w int) []Cell {
+ if len(cs) <= w {
+ return cs
+ }
+ return cs[:w]
+}
+
+// DTrimTxCls trims the overflowed text cells sequence and append dots at the end.
+func DTrimTxCls(cs []Cell, w int) []Cell {
+ l := len(cs)
+ if l <= 0 {
+ return []Cell{}
+ }
+
+ rt := make([]Cell, 0, w)
+ csw := 0
+ for i := 0; i < l && csw <= w; i++ {
+ c := cs[i]
+ cw := c.Width()
+
+ if cw+csw < w {
+ rt = append(rt, c)
+ csw += cw
+ } else {
+ rt = append(rt, Cell{'…', c.Fg, c.Bg})
+ break
+ }
+ }
+
+ return rt
+}
+
+func CellsToStr(cs []Cell) string {
+ str := ""
+ for _, c := range cs {
+ str += string(c.Ch)
+ }
+ return str
+}
diff --git a/vendor/github.com/gizak/termui/linechart.go b/vendor/github.com/gizak/termui/linechart.go
new file mode 100644
index 000000000..81834efdb
--- /dev/null
+++ b/vendor/github.com/gizak/termui/linechart.go
@@ -0,0 +1,331 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import (
+ "fmt"
+ "math"
+)
+
+// only 16 possible combinations, why bother
+var braillePatterns = map[[2]int]rune{
+ [2]int{0, 0}: '⣀',
+ [2]int{0, 1}: '⡠',
+ [2]int{0, 2}: '⡐',
+ [2]int{0, 3}: '⡈',
+
+ [2]int{1, 0}: '⢄',
+ [2]int{1, 1}: '⠤',
+ [2]int{1, 2}: '⠔',
+ [2]int{1, 3}: '⠌',
+
+ [2]int{2, 0}: '⢂',
+ [2]int{2, 1}: '⠢',
+ [2]int{2, 2}: '⠒',
+ [2]int{2, 3}: '⠊',
+
+ [2]int{3, 0}: '⢁',
+ [2]int{3, 1}: '⠡',
+ [2]int{3, 2}: '⠑',
+ [2]int{3, 3}: '⠉',
+}
+
+var lSingleBraille = [4]rune{'\u2840', '⠄', '⠂', '⠁'}
+var rSingleBraille = [4]rune{'\u2880', '⠠', '⠐', '⠈'}
+
+// LineChart has two modes: braille(default) and dot. Using braille gives 2x capicity as dot mode,
+// because one braille char can represent two data points.
+/*
+ lc := termui.NewLineChart()
+ lc.BorderLabel = "braille-mode Line Chart"
+ lc.Data = [1.2, 1.3, 1.5, 1.7, 1.5, 1.6, 1.8, 2.0]
+ lc.Width = 50
+ lc.Height = 12
+ lc.AxesColor = termui.ColorWhite
+ lc.LineColor = termui.ColorGreen | termui.AttrBold
+ // termui.Render(lc)...
+*/
+type LineChart struct {
+ Block
+ Data []float64
+ DataLabels []string // if unset, the data indices will be used
+ Mode string // braille | dot
+ DotStyle rune
+ LineColor Attribute
+ scale float64 // data span per cell on y-axis
+ AxesColor Attribute
+ drawingX int
+ drawingY int
+ axisYHeight int
+ axisXWidth int
+ axisYLabelGap int
+ axisXLabelGap int
+ topValue float64
+ bottomValue float64
+ labelX [][]rune
+ labelY [][]rune
+ labelYSpace int
+ maxY float64
+ minY float64
+ autoLabels bool
+}
+
+// NewLineChart returns a new LineChart with current theme.
+func NewLineChart() *LineChart {
+ lc := &LineChart{Block: *NewBlock()}
+ lc.AxesColor = ThemeAttr("linechart.axes.fg")
+ lc.LineColor = ThemeAttr("linechart.line.fg")
+ lc.Mode = "braille"
+ lc.DotStyle = '•'
+ lc.axisXLabelGap = 2
+ lc.axisYLabelGap = 1
+ lc.bottomValue = math.Inf(1)
+ lc.topValue = math.Inf(-1)
+ return lc
+}
+
+// one cell contains two data points
+// so the capicity is 2x as dot-mode
+func (lc *LineChart) renderBraille() Buffer {
+ buf := NewBuffer()
+
+ // return: b -> which cell should the point be in
+ // m -> in the cell, divided into 4 equal height levels, which subcell?
+ getPos := func(d float64) (b, m int) {
+ cnt4 := int((d-lc.bottomValue)/(lc.scale/4) + 0.5)
+ b = cnt4 / 4
+ m = cnt4 % 4
+ return
+ }
+ // plot points
+ for i := 0; 2*i+1 < len(lc.Data) && i < lc.axisXWidth; i++ {
+ b0, m0 := getPos(lc.Data[2*i])
+ b1, m1 := getPos(lc.Data[2*i+1])
+
+ if b0 == b1 {
+ c := Cell{
+ Ch: braillePatterns[[2]int{m0, m1}],
+ Bg: lc.Bg,
+ Fg: lc.LineColor,
+ }
+ y := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - b0
+ x := lc.innerArea.Min.X + lc.labelYSpace + 1 + i
+ buf.Set(x, y, c)
+ } else {
+ c0 := Cell{Ch: lSingleBraille[m0],
+ Fg: lc.LineColor,
+ Bg: lc.Bg}
+ x0 := lc.innerArea.Min.X + lc.labelYSpace + 1 + i
+ y0 := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - b0
+ buf.Set(x0, y0, c0)
+
+ c1 := Cell{Ch: rSingleBraille[m1],
+ Fg: lc.LineColor,
+ Bg: lc.Bg}
+ x1 := lc.innerArea.Min.X + lc.labelYSpace + 1 + i
+ y1 := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - b1
+ buf.Set(x1, y1, c1)
+ }
+
+ }
+ return buf
+}
+
+func (lc *LineChart) renderDot() Buffer {
+ buf := NewBuffer()
+ for i := 0; i < len(lc.Data) && i < lc.axisXWidth; i++ {
+ c := Cell{
+ Ch: lc.DotStyle,
+ Fg: lc.LineColor,
+ Bg: lc.Bg,
+ }
+ x := lc.innerArea.Min.X + lc.labelYSpace + 1 + i
+ y := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - int((lc.Data[i]-lc.bottomValue)/lc.scale+0.5)
+ buf.Set(x, y, c)
+ }
+
+ return buf
+}
+
+func (lc *LineChart) calcLabelX() {
+ lc.labelX = [][]rune{}
+
+ for i, l := 0, 0; i < len(lc.DataLabels) && l < lc.axisXWidth; i++ {
+ if lc.Mode == "dot" {
+ if l >= len(lc.DataLabels) {
+ break
+ }
+
+ s := str2runes(lc.DataLabels[l])
+ w := strWidth(lc.DataLabels[l])
+ if l+w <= lc.axisXWidth {
+ lc.labelX = append(lc.labelX, s)
+ }
+ l += w + lc.axisXLabelGap
+ } else { // braille
+ if 2*l >= len(lc.DataLabels) {
+ break
+ }
+
+ s := str2runes(lc.DataLabels[2*l])
+ w := strWidth(lc.DataLabels[2*l])
+ if l+w <= lc.axisXWidth {
+ lc.labelX = append(lc.labelX, s)
+ }
+ l += w + lc.axisXLabelGap
+
+ }
+ }
+}
+
+func shortenFloatVal(x float64) string {
+ s := fmt.Sprintf("%.2f", x)
+ if len(s)-3 > 3 {
+ s = fmt.Sprintf("%.2e", x)
+ }
+
+ if x < 0 {
+ s = fmt.Sprintf("%.2f", x)
+ }
+ return s
+}
+
+func (lc *LineChart) calcLabelY() {
+ span := lc.topValue - lc.bottomValue
+ lc.scale = span / float64(lc.axisYHeight)
+
+ n := (1 + lc.axisYHeight) / (lc.axisYLabelGap + 1)
+ lc.labelY = make([][]rune, n)
+ maxLen := 0
+ for i := 0; i < n; i++ {
+ s := str2runes(shortenFloatVal(lc.bottomValue + float64(i)*span/float64(n)))
+ if len(s) > maxLen {
+ maxLen = len(s)
+ }
+ lc.labelY[i] = s
+ }
+
+ lc.labelYSpace = maxLen
+}
+
+func (lc *LineChart) calcLayout() {
+ // set datalabels if it is not provided
+ if (lc.DataLabels == nil || len(lc.DataLabels) == 0) || lc.autoLabels {
+ lc.autoLabels = true
+ lc.DataLabels = make([]string, len(lc.Data))
+ for i := range lc.Data {
+ lc.DataLabels[i] = fmt.Sprint(i)
+ }
+ }
+
+ // lazy increase, to avoid y shaking frequently
+ // update bound Y when drawing is gonna overflow
+ lc.minY = lc.Data[0]
+ lc.maxY = lc.Data[0]
+
+ // valid visible range
+ vrange := lc.innerArea.Dx()
+ if lc.Mode == "braille" {
+ vrange = 2 * lc.innerArea.Dx()
+ }
+ if vrange > len(lc.Data) {
+ vrange = len(lc.Data)
+ }
+
+ for _, v := range lc.Data[:vrange] {
+ if v > lc.maxY {
+ lc.maxY = v
+ }
+ if v < lc.minY {
+ lc.minY = v
+ }
+ }
+
+ span := lc.maxY - lc.minY
+
+ if lc.minY < lc.bottomValue {
+ lc.bottomValue = lc.minY - 0.2*span
+ }
+
+ if lc.maxY > lc.topValue {
+ lc.topValue = lc.maxY + 0.2*span
+ }
+
+ lc.axisYHeight = lc.innerArea.Dy() - 2
+ lc.calcLabelY()
+
+ lc.axisXWidth = lc.innerArea.Dx() - 1 - lc.labelYSpace
+ lc.calcLabelX()
+
+ lc.drawingX = lc.innerArea.Min.X + 1 + lc.labelYSpace
+ lc.drawingY = lc.innerArea.Min.Y
+}
+
+func (lc *LineChart) plotAxes() Buffer {
+ buf := NewBuffer()
+
+ origY := lc.innerArea.Min.Y + lc.innerArea.Dy() - 2
+ origX := lc.innerArea.Min.X + lc.labelYSpace
+
+ buf.Set(origX, origY, Cell{Ch: ORIGIN, Fg: lc.AxesColor, Bg: lc.Bg})
+
+ for x := origX + 1; x < origX+lc.axisXWidth; x++ {
+ buf.Set(x, origY, Cell{Ch: HDASH, Fg: lc.AxesColor, Bg: lc.Bg})
+ }
+
+ for dy := 1; dy <= lc.axisYHeight; dy++ {
+ buf.Set(origX, origY-dy, Cell{Ch: VDASH, Fg: lc.AxesColor, Bg: lc.Bg})
+ }
+
+ // x label
+ oft := 0
+ for _, rs := range lc.labelX {
+ if oft+len(rs) > lc.axisXWidth {
+ break
+ }
+ for j, r := range rs {
+ c := Cell{
+ Ch: r,
+ Fg: lc.AxesColor,
+ Bg: lc.Bg,
+ }
+ x := origX + oft + j
+ y := lc.innerArea.Min.Y + lc.innerArea.Dy() - 1
+ buf.Set(x, y, c)
+ }
+ oft += len(rs) + lc.axisXLabelGap
+ }
+
+ // y labels
+ for i, rs := range lc.labelY {
+ for j, r := range rs {
+ buf.Set(
+ lc.innerArea.Min.X+j,
+ origY-i*(lc.axisYLabelGap+1),
+ Cell{Ch: r, Fg: lc.AxesColor, Bg: lc.Bg})
+ }
+ }
+
+ return buf
+}
+
+// Buffer implements Bufferer interface.
+func (lc *LineChart) Buffer() Buffer {
+ buf := lc.Block.Buffer()
+
+ if lc.Data == nil || len(lc.Data) == 0 {
+ return buf
+ }
+ lc.calcLayout()
+ buf.Merge(lc.plotAxes())
+
+ if lc.Mode == "dot" {
+ buf.Merge(lc.renderDot())
+ } else {
+ buf.Merge(lc.renderBraille())
+ }
+
+ return buf
+}
diff --git a/vendor/github.com/gizak/termui/linechart_others.go b/vendor/github.com/gizak/termui/linechart_others.go
new file mode 100644
index 000000000..7e2e66b3e
--- /dev/null
+++ b/vendor/github.com/gizak/termui/linechart_others.go
@@ -0,0 +1,11 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+// +build !windows
+
+package termui
+
+const VDASH = '┊'
+const HDASH = '┈'
+const ORIGIN = '└'
diff --git a/vendor/github.com/gizak/termui/linechart_windows.go b/vendor/github.com/gizak/termui/linechart_windows.go
new file mode 100644
index 000000000..1478b5ce3
--- /dev/null
+++ b/vendor/github.com/gizak/termui/linechart_windows.go
@@ -0,0 +1,11 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+// +build windows
+
+package termui
+
+const VDASH = '|'
+const HDASH = '-'
+const ORIGIN = '+'
diff --git a/vendor/github.com/gizak/termui/list.go b/vendor/github.com/gizak/termui/list.go
new file mode 100644
index 000000000..0029d2622
--- /dev/null
+++ b/vendor/github.com/gizak/termui/list.go
@@ -0,0 +1,89 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import "strings"
+
+// List displays []string as its items,
+// it has a Overflow option (default is "hidden"), when set to "hidden",
+// the item exceeding List's width is truncated, but when set to "wrap",
+// the overflowed text breaks into next line.
+/*
+ strs := []string{
+ "[0] github.com/gizak/termui",
+ "[1] editbox.go",
+ "[2] iterrupt.go",
+ "[3] keyboard.go",
+ "[4] output.go",
+ "[5] random_out.go",
+ "[6] dashboard.go",
+ "[7] nsf/termbox-go"}
+
+ ls := termui.NewList()
+ ls.Items = strs
+ ls.ItemFgColor = termui.ColorYellow
+ ls.BorderLabel = "List"
+ ls.Height = 7
+ ls.Width = 25
+ ls.Y = 0
+*/
+type List struct {
+ Block
+ Items []string
+ Overflow string
+ ItemFgColor Attribute
+ ItemBgColor Attribute
+}
+
+// NewList returns a new *List with current theme.
+func NewList() *List {
+ l := &List{Block: *NewBlock()}
+ l.Overflow = "hidden"
+ l.ItemFgColor = ThemeAttr("list.item.fg")
+ l.ItemBgColor = ThemeAttr("list.item.bg")
+ return l
+}
+
+// Buffer implements Bufferer interface.
+func (l *List) Buffer() Buffer {
+ buf := l.Block.Buffer()
+
+ switch l.Overflow {
+ case "wrap":
+ cs := DefaultTxBuilder.Build(strings.Join(l.Items, "\n"), l.ItemFgColor, l.ItemBgColor)
+ i, j, k := 0, 0, 0
+ for i < l.innerArea.Dy() && k < len(cs) {
+ w := cs[k].Width()
+ if cs[k].Ch == '\n' || j+w > l.innerArea.Dx() {
+ i++
+ j = 0
+ if cs[k].Ch == '\n' {
+ k++
+ }
+ continue
+ }
+ buf.Set(l.innerArea.Min.X+j, l.innerArea.Min.Y+i, cs[k])
+
+ k++
+ j++
+ }
+
+ case "hidden":
+ trimItems := l.Items
+ if len(trimItems) > l.innerArea.Dy() {
+ trimItems = trimItems[:l.innerArea.Dy()]
+ }
+ for i, v := range trimItems {
+ cs := DTrimTxCls(DefaultTxBuilder.Build(v, l.ItemFgColor, l.ItemBgColor), l.innerArea.Dx())
+ j := 0
+ for _, vv := range cs {
+ w := vv.Width()
+ buf.Set(l.innerArea.Min.X+j, l.innerArea.Min.Y+i, vv)
+ j += w
+ }
+ }
+ }
+ return buf
+}
diff --git a/vendor/github.com/gizak/termui/mbarchart.go b/vendor/github.com/gizak/termui/mbarchart.go
new file mode 100644
index 000000000..6828e6533
--- /dev/null
+++ b/vendor/github.com/gizak/termui/mbarchart.go
@@ -0,0 +1,242 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import (
+ "fmt"
+)
+
+// This is the implemetation of multi-colored or stacked bar graph. This is different from default barGraph which is implemented in bar.go
+// Multi-Colored-BarChart creates multiple bars in a widget:
+/*
+ bc := termui.NewMBarChart()
+ data := make([][]int, 2)
+ data[0] := []int{3, 2, 5, 7, 9, 4}
+ data[1] := []int{7, 8, 5, 3, 1, 6}
+ bclabels := []string{"S0", "S1", "S2", "S3", "S4", "S5"}
+ bc.BorderLabel = "Bar Chart"
+ bc.Data = data
+ bc.Width = 26
+ bc.Height = 10
+ bc.DataLabels = bclabels
+ bc.TextColor = termui.ColorGreen
+ bc.BarColor = termui.ColorRed
+ bc.NumColor = termui.ColorYellow
+*/
+type MBarChart struct {
+ Block
+ BarColor [NumberofColors]Attribute
+ TextColor Attribute
+ NumColor [NumberofColors]Attribute
+ Data [NumberofColors][]int
+ DataLabels []string
+ BarWidth int
+ BarGap int
+ labels [][]rune
+ dataNum [NumberofColors][][]rune
+ numBar int
+ scale float64
+ max int
+ minDataLen int
+ numStack int
+ ShowScale bool
+ maxScale []rune
+}
+
+// NewBarChart returns a new *BarChart with current theme.
+func NewMBarChart() *MBarChart {
+ bc := &MBarChart{Block: *NewBlock()}
+ bc.BarColor[0] = ThemeAttr("mbarchart.bar.bg")
+ bc.NumColor[0] = ThemeAttr("mbarchart.num.fg")
+ bc.TextColor = ThemeAttr("mbarchart.text.fg")
+ bc.BarGap = 1
+ bc.BarWidth = 3
+ return bc
+}
+
+func (bc *MBarChart) layout() {
+ bc.numBar = bc.innerArea.Dx() / (bc.BarGap + bc.BarWidth)
+ bc.labels = make([][]rune, bc.numBar)
+ DataLen := 0
+ LabelLen := len(bc.DataLabels)
+ bc.minDataLen = 9999 //Set this to some very hight value so that we find the minimum one We want to know which array among data[][] has got the least length
+
+ // We need to know how many stack/data array data[0] , data[1] are there
+ for i := 0; i < len(bc.Data); i++ {
+ if bc.Data[i] == nil {
+ break
+ }
+ DataLen++
+ }
+ bc.numStack = DataLen
+
+ //We need to know what is the mimimum size of data array data[0] could have 10 elements data[1] could have only 5, so we plot only 5 bar graphs
+
+ for i := 0; i < DataLen; i++ {
+ if bc.minDataLen > len(bc.Data[i]) {
+ bc.minDataLen = len(bc.Data[i])
+ }
+ }
+
+ if LabelLen > bc.minDataLen {
+ LabelLen = bc.minDataLen
+ }
+
+ for i := 0; i < LabelLen && i < bc.numBar; i++ {
+ bc.labels[i] = trimStr2Runes(bc.DataLabels[i], bc.BarWidth)
+ }
+
+ for i := 0; i < bc.numStack; i++ {
+ bc.dataNum[i] = make([][]rune, len(bc.Data[i]))
+ //For each stack of bar calcualte the rune
+ for j := 0; j < LabelLen && i < bc.numBar; j++ {
+ n := bc.Data[i][j]
+ s := fmt.Sprint(n)
+ bc.dataNum[i][j] = trimStr2Runes(s, bc.BarWidth)
+ }
+ //If color is not defined by default then populate a color that is different from the prevous bar
+ if bc.BarColor[i] == ColorDefault && bc.NumColor[i] == ColorDefault {
+ if i == 0 {
+ bc.BarColor[i] = ColorBlack
+ } else {
+ bc.BarColor[i] = bc.BarColor[i-1] + 1
+ if bc.BarColor[i] > NumberofColors {
+ bc.BarColor[i] = ColorBlack
+ }
+ }
+ bc.NumColor[i] = (NumberofColors + 1) - bc.BarColor[i] //Make NumColor opposite of barColor for visibility
+ }
+ }
+
+ //If Max value is not set then we have to populate, this time the max value will be max(sum(d1[0],d2[0],d3[0]) .... sum(d1[n], d2[n], d3[n]))
+
+ if bc.max == 0 {
+ bc.max = -1
+ }
+ for i := 0; i < bc.minDataLen && i < LabelLen; i++ {
+ var dsum int
+ for j := 0; j < bc.numStack; j++ {
+ dsum += bc.Data[j][i]
+ }
+ if dsum > bc.max {
+ bc.max = dsum
+ }
+ }
+
+ //Finally Calculate max sale
+ if bc.ShowScale {
+ s := fmt.Sprintf("%d", bc.max)
+ bc.maxScale = trimStr2Runes(s, len(s))
+ bc.scale = float64(bc.max) / float64(bc.innerArea.Dy()-2)
+ } else {
+ bc.scale = float64(bc.max) / float64(bc.innerArea.Dy()-1)
+ }
+
+}
+
+func (bc *MBarChart) SetMax(max int) {
+
+ if max > 0 {
+ bc.max = max
+ }
+}
+
+// Buffer implements Bufferer interface.
+func (bc *MBarChart) Buffer() Buffer {
+ buf := bc.Block.Buffer()
+ bc.layout()
+ var oftX int
+
+ for i := 0; i < bc.numBar && i < bc.minDataLen && i < len(bc.DataLabels); i++ {
+ ph := 0 //Previous Height to stack up
+ oftX = i * (bc.BarWidth + bc.BarGap)
+ for i1 := 0; i1 < bc.numStack; i1++ {
+ h := int(float64(bc.Data[i1][i]) / bc.scale)
+ // plot bars
+ for j := 0; j < bc.BarWidth; j++ {
+ for k := 0; k < h; k++ {
+ c := Cell{
+ Ch: ' ',
+ Bg: bc.BarColor[i1],
+ }
+ if bc.BarColor[i1] == ColorDefault { // when color is default, space char treated as transparent!
+ c.Bg |= AttrReverse
+ }
+ x := bc.innerArea.Min.X + i*(bc.BarWidth+bc.BarGap) + j
+ y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2 - k - ph
+ buf.Set(x, y, c)
+
+ }
+ }
+ ph += h
+ }
+ // plot text
+ for j, k := 0, 0; j < len(bc.labels[i]); j++ {
+ w := charWidth(bc.labels[i][j])
+ c := Cell{
+ Ch: bc.labels[i][j],
+ Bg: bc.Bg,
+ Fg: bc.TextColor,
+ }
+ y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 1
+ x := bc.innerArea.Max.X + oftX + ((bc.BarWidth - len(bc.labels[i])) / 2) + k
+ buf.Set(x, y, c)
+ k += w
+ }
+ // plot num
+ ph = 0 //re-initialize previous height
+ for i1 := 0; i1 < bc.numStack; i1++ {
+ h := int(float64(bc.Data[i1][i]) / bc.scale)
+ for j := 0; j < len(bc.dataNum[i1][i]) && h > 0; j++ {
+ c := Cell{
+ Ch: bc.dataNum[i1][i][j],
+ Fg: bc.NumColor[i1],
+ Bg: bc.BarColor[i1],
+ }
+ if bc.BarColor[i1] == ColorDefault { // the same as above
+ c.Bg |= AttrReverse
+ }
+ if h == 0 {
+ c.Bg = bc.Bg
+ }
+ x := bc.innerArea.Min.X + oftX + (bc.BarWidth-len(bc.dataNum[i1][i]))/2 + j
+ y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2 - ph
+ buf.Set(x, y, c)
+ }
+ ph += h
+ }
+ }
+
+ if bc.ShowScale {
+ //Currently bar graph only supprts data range from 0 to MAX
+ //Plot 0
+ c := Cell{
+ Ch: '0',
+ Bg: bc.Bg,
+ Fg: bc.TextColor,
+ }
+
+ y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2
+ x := bc.X
+ buf.Set(x, y, c)
+
+ //Plot the maximum sacle value
+ for i := 0; i < len(bc.maxScale); i++ {
+ c := Cell{
+ Ch: bc.maxScale[i],
+ Bg: bc.Bg,
+ Fg: bc.TextColor,
+ }
+
+ y := bc.innerArea.Min.Y
+ x := bc.X + i
+
+ buf.Set(x, y, c)
+ }
+
+ }
+
+ return buf
+}
diff --git a/vendor/github.com/gizak/termui/mkdocs.yml b/vendor/github.com/gizak/termui/mkdocs.yml
new file mode 100644
index 000000000..2ab45f064
--- /dev/null
+++ b/vendor/github.com/gizak/termui/mkdocs.yml
@@ -0,0 +1,28 @@
+pages:
+- Home: 'index.md'
+- Quickstart: 'quickstart.md'
+- Recipes: 'recipes.md'
+- References:
+ - Layouts: 'layouts.md'
+ - Components: 'components.md'
+ - Events: 'events.md'
+ - Themes: 'themes.md'
+- Versions: 'versions.md'
+- About: 'about.md'
+
+site_name: termui
+repo_url: https://github.com/gizak/termui/
+site_description: 'termui user guide'
+site_author: gizak
+
+docs_dir: '_docs'
+
+theme: readthedocs
+
+markdown_extensions:
+ - smarty
+ - admonition
+ - toc
+
+extra:
+ version: 1.0
diff --git a/vendor/github.com/gizak/termui/par.go b/vendor/github.com/gizak/termui/par.go
new file mode 100644
index 000000000..78bffd091
--- /dev/null
+++ b/vendor/github.com/gizak/termui/par.go
@@ -0,0 +1,73 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+// Par displays a paragraph.
+/*
+ par := termui.NewPar("Simple Text")
+ par.Height = 3
+ par.Width = 17
+ par.BorderLabel = "Label"
+*/
+type Par struct {
+ Block
+ Text string
+ TextFgColor Attribute
+ TextBgColor Attribute
+ WrapLength int // words wrap limit. Note it may not work properly with multi-width char
+}
+
+// NewPar returns a new *Par with given text as its content.
+func NewPar(s string) *Par {
+ return &Par{
+ Block: *NewBlock(),
+ Text: s,
+ TextFgColor: ThemeAttr("par.text.fg"),
+ TextBgColor: ThemeAttr("par.text.bg"),
+ WrapLength: 0,
+ }
+}
+
+// Buffer implements Bufferer interface.
+func (p *Par) Buffer() Buffer {
+ buf := p.Block.Buffer()
+
+ fg, bg := p.TextFgColor, p.TextBgColor
+ cs := DefaultTxBuilder.Build(p.Text, fg, bg)
+
+ // wrap if WrapLength set
+ if p.WrapLength < 0 {
+ cs = wrapTx(cs, p.Width-2)
+ } else if p.WrapLength > 0 {
+ cs = wrapTx(cs, p.WrapLength)
+ }
+
+ y, x, n := 0, 0, 0
+ for y < p.innerArea.Dy() && n < len(cs) {
+ w := cs[n].Width()
+ if cs[n].Ch == '\n' || x+w > p.innerArea.Dx() {
+ y++
+ x = 0 // set x = 0
+ if cs[n].Ch == '\n' {
+ n++
+ }
+
+ if y >= p.innerArea.Dy() {
+ buf.Set(p.innerArea.Min.X+p.innerArea.Dx()-1,
+ p.innerArea.Min.Y+p.innerArea.Dy()-1,
+ Cell{Ch: '…', Fg: p.TextFgColor, Bg: p.TextBgColor})
+ break
+ }
+ continue
+ }
+
+ buf.Set(p.innerArea.Min.X+x, p.innerArea.Min.Y+y, cs[n])
+
+ n++
+ x += w
+ }
+
+ return buf
+}
diff --git a/vendor/github.com/gizak/termui/pos.go b/vendor/github.com/gizak/termui/pos.go
new file mode 100644
index 000000000..2046dce5a
--- /dev/null
+++ b/vendor/github.com/gizak/termui/pos.go
@@ -0,0 +1,78 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import "image"
+
+// Align is the position of the gauge's label.
+type Align uint
+
+// All supported positions.
+const (
+ AlignNone Align = 0
+ AlignLeft Align = 1 << iota
+ AlignRight
+ AlignBottom
+ AlignTop
+ AlignCenterVertical
+ AlignCenterHorizontal
+ AlignCenter = AlignCenterVertical | AlignCenterHorizontal
+)
+
+func AlignArea(parent, child image.Rectangle, a Align) image.Rectangle {
+ w, h := child.Dx(), child.Dy()
+
+ // parent center
+ pcx, pcy := parent.Min.X+parent.Dx()/2, parent.Min.Y+parent.Dy()/2
+ // child center
+ ccx, ccy := child.Min.X+child.Dx()/2, child.Min.Y+child.Dy()/2
+
+ if a&AlignLeft == AlignLeft {
+ child.Min.X = parent.Min.X
+ child.Max.X = child.Min.X + w
+ }
+
+ if a&AlignRight == AlignRight {
+ child.Max.X = parent.Max.X
+ child.Min.X = child.Max.X - w
+ }
+
+ if a&AlignBottom == AlignBottom {
+ child.Max.Y = parent.Max.Y
+ child.Min.Y = child.Max.Y - h
+ }
+
+ if a&AlignTop == AlignRight {
+ child.Min.Y = parent.Min.Y
+ child.Max.Y = child.Min.Y + h
+ }
+
+ if a&AlignCenterHorizontal == AlignCenterHorizontal {
+ child.Min.X += pcx - ccx
+ child.Max.X = child.Min.X + w
+ }
+
+ if a&AlignCenterVertical == AlignCenterVertical {
+ child.Min.Y += pcy - ccy
+ child.Max.Y = child.Min.Y + h
+ }
+
+ return child
+}
+
+func MoveArea(a image.Rectangle, dx, dy int) image.Rectangle {
+ a.Min.X += dx
+ a.Max.X += dx
+ a.Min.Y += dy
+ a.Max.Y += dy
+ return a
+}
+
+var termWidth int
+var termHeight int
+
+func TermRect() image.Rectangle {
+ return image.Rect(0, 0, termWidth, termHeight)
+}
diff --git a/vendor/github.com/gizak/termui/render.go b/vendor/github.com/gizak/termui/render.go
new file mode 100644
index 000000000..36544f063
--- /dev/null
+++ b/vendor/github.com/gizak/termui/render.go
@@ -0,0 +1,135 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import (
+ "image"
+ "sync"
+ "time"
+
+ tm "github.com/nsf/termbox-go"
+)
+
+// Bufferer should be implemented by all renderable components.
+type Bufferer interface {
+ Buffer() Buffer
+}
+
+// Init initializes termui library. This function should be called before any others.
+// After initialization, the library must be finalized by 'Close' function.
+func Init() error {
+ if err := tm.Init(); err != nil {
+ return err
+ }
+
+ sysEvtChs = make([]chan Event, 0)
+ go hookTermboxEvt()
+
+ renderJobs = make(chan []Bufferer)
+ //renderLock = new(sync.RWMutex)
+
+ Body = NewGrid()
+ Body.X = 0
+ Body.Y = 0
+ Body.BgColor = ThemeAttr("bg")
+ Body.Width = TermWidth()
+
+ DefaultEvtStream.Init()
+ DefaultEvtStream.Merge("termbox", NewSysEvtCh())
+ DefaultEvtStream.Merge("timer", NewTimerCh(time.Second))
+ DefaultEvtStream.Merge("custom", usrEvtCh)
+
+ DefaultEvtStream.Handle("/", DefualtHandler)
+ DefaultEvtStream.Handle("/sys/wnd/resize", func(e Event) {
+ w := e.Data.(EvtWnd)
+ Body.Width = w.Width
+ })
+
+ DefaultWgtMgr = NewWgtMgr()
+ DefaultEvtStream.Hook(DefaultWgtMgr.WgtHandlersHook())
+
+ go func() {
+ for bs := range renderJobs {
+ render(bs...)
+ }
+ }()
+
+ return nil
+}
+
+// Close finalizes termui library,
+// should be called after successful initialization when termui's functionality isn't required anymore.
+func Close() {
+ tm.Close()
+}
+
+var renderLock sync.Mutex
+
+func termSync() {
+ renderLock.Lock()
+ tm.Sync()
+ termWidth, termHeight = tm.Size()
+ renderLock.Unlock()
+}
+
+// TermWidth returns the current terminal's width.
+func TermWidth() int {
+ termSync()
+ return termWidth
+}
+
+// TermHeight returns the current terminal's height.
+func TermHeight() int {
+ termSync()
+ return termHeight
+}
+
+// Render renders all Bufferer in the given order from left to right,
+// right could overlap on left ones.
+func render(bs ...Bufferer) {
+
+ for _, b := range bs {
+
+ buf := b.Buffer()
+ // set cels in buf
+ for p, c := range buf.CellMap {
+ if p.In(buf.Area) {
+
+ tm.SetCell(p.X, p.Y, c.Ch, toTmAttr(c.Fg), toTmAttr(c.Bg))
+
+ }
+ }
+
+ }
+
+ renderLock.Lock()
+ // render
+ tm.Flush()
+ renderLock.Unlock()
+}
+
+func Clear() {
+ tm.Clear(tm.ColorDefault, toTmAttr(ThemeAttr("bg")))
+}
+
+func clearArea(r image.Rectangle, bg Attribute) {
+ for i := r.Min.X; i < r.Max.X; i++ {
+ for j := r.Min.Y; j < r.Max.Y; j++ {
+ tm.SetCell(i, j, ' ', tm.ColorDefault, toTmAttr(bg))
+ }
+ }
+}
+
+func ClearArea(r image.Rectangle, bg Attribute) {
+ clearArea(r, bg)
+ tm.Flush()
+}
+
+var renderJobs chan []Bufferer
+
+func Render(bs ...Bufferer) {
+ //go func() { renderJobs <- bs }()
+ renderJobs <- bs
+}
diff --git a/vendor/github.com/gizak/termui/sparkline.go b/vendor/github.com/gizak/termui/sparkline.go
new file mode 100644
index 000000000..312ad9563
--- /dev/null
+++ b/vendor/github.com/gizak/termui/sparkline.go
@@ -0,0 +1,167 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+// Sparkline is like: ▅▆▂▂▅▇▂▂▃▆▆▆▅▃. The data points should be non-negative integers.
+/*
+ data := []int{4, 2, 1, 6, 3, 9, 1, 4, 2, 15, 14, 9, 8, 6, 10, 13, 15, 12, 10, 5, 3, 6, 1}
+ spl := termui.NewSparkline()
+ spl.Data = data
+ spl.Title = "Sparkline 0"
+ spl.LineColor = termui.ColorGreen
+*/
+type Sparkline struct {
+ Data []int
+ Height int
+ Title string
+ TitleColor Attribute
+ LineColor Attribute
+ displayHeight int
+ scale float32
+ max int
+}
+
+// Sparklines is a renderable widget which groups together the given sparklines.
+/*
+ spls := termui.NewSparklines(spl0,spl1,spl2) //...
+ spls.Height = 2
+ spls.Width = 20
+*/
+type Sparklines struct {
+ Block
+ Lines []Sparkline
+ displayLines int
+ displayWidth int
+}
+
+var sparks = []rune{'▁', '▂', '▃', '▄', '▅', '▆', '▇', '█'}
+
+// Add appends a given Sparkline to s *Sparklines.
+func (s *Sparklines) Add(sl Sparkline) {
+ s.Lines = append(s.Lines, sl)
+}
+
+// NewSparkline returns a unrenderable single sparkline that intended to be added into Sparklines.
+func NewSparkline() Sparkline {
+ return Sparkline{
+ Height: 1,
+ TitleColor: ThemeAttr("sparkline.title.fg"),
+ LineColor: ThemeAttr("sparkline.line.fg")}
+}
+
+// NewSparklines return a new *Spaklines with given Sparkline(s), you can always add a new Sparkline later.
+func NewSparklines(ss ...Sparkline) *Sparklines {
+ s := &Sparklines{Block: *NewBlock(), Lines: ss}
+ return s
+}
+
+func (sl *Sparklines) update() {
+ for i, v := range sl.Lines {
+ if v.Title == "" {
+ sl.Lines[i].displayHeight = v.Height
+ } else {
+ sl.Lines[i].displayHeight = v.Height + 1
+ }
+ }
+ sl.displayWidth = sl.innerArea.Dx()
+
+ // get how many lines gotta display
+ h := 0
+ sl.displayLines = 0
+ for _, v := range sl.Lines {
+ if h+v.displayHeight <= sl.innerArea.Dy() {
+ sl.displayLines++
+ } else {
+ break
+ }
+ h += v.displayHeight
+ }
+
+ for i := 0; i < sl.displayLines; i++ {
+ data := sl.Lines[i].Data
+
+ max := 0
+ for _, v := range data {
+ if max < v {
+ max = v
+ }
+ }
+ sl.Lines[i].max = max
+ if max != 0 {
+ sl.Lines[i].scale = float32(8*sl.Lines[i].Height) / float32(max)
+ } else { // when all negative
+ sl.Lines[i].scale = 0
+ }
+ }
+}
+
+// Buffer implements Bufferer interface.
+func (sl *Sparklines) Buffer() Buffer {
+ buf := sl.Block.Buffer()
+ sl.update()
+
+ oftY := 0
+ for i := 0; i < sl.displayLines; i++ {
+ l := sl.Lines[i]
+ data := l.Data
+
+ if len(data) > sl.innerArea.Dx() {
+ data = data[len(data)-sl.innerArea.Dx():]
+ }
+
+ if l.Title != "" {
+ rs := trimStr2Runes(l.Title, sl.innerArea.Dx())
+ oftX := 0
+ for _, v := range rs {
+ w := charWidth(v)
+ c := Cell{
+ Ch: v,
+ Fg: l.TitleColor,
+ Bg: sl.Bg,
+ }
+ x := sl.innerArea.Min.X + oftX
+ y := sl.innerArea.Min.Y + oftY
+ buf.Set(x, y, c)
+ oftX += w
+ }
+ }
+
+ for j, v := range data {
+ // display height of the data point, zero when data is negative
+ h := int(float32(v)*l.scale + 0.5)
+ if v < 0 {
+ h = 0
+ }
+
+ barCnt := h / 8
+ barMod := h % 8
+ for jj := 0; jj < barCnt; jj++ {
+ c := Cell{
+ Ch: ' ', // => sparks[7]
+ Bg: l.LineColor,
+ }
+ x := sl.innerArea.Min.X + j
+ y := sl.innerArea.Min.Y + oftY + l.Height - jj
+
+ //p.Bg = sl.BgColor
+ buf.Set(x, y, c)
+ }
+ if barMod != 0 {
+ c := Cell{
+ Ch: sparks[barMod-1],
+ Fg: l.LineColor,
+ Bg: sl.Bg,
+ }
+ x := sl.innerArea.Min.X + j
+ y := sl.innerArea.Min.Y + oftY + l.Height - barCnt
+ buf.Set(x, y, c)
+ }
+ }
+
+ oftY += l.displayHeight
+ }
+
+ return buf
+}
diff --git a/vendor/github.com/gizak/termui/textbuilder.go b/vendor/github.com/gizak/termui/textbuilder.go
new file mode 100644
index 000000000..06a019bed
--- /dev/null
+++ b/vendor/github.com/gizak/termui/textbuilder.go
@@ -0,0 +1,278 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import (
+ "regexp"
+ "strings"
+
+ "github.com/mitchellh/go-wordwrap"
+)
+
+// TextBuilder is a minimal interface to produce text []Cell using specific syntax (markdown).
+type TextBuilder interface {
+ Build(s string, fg, bg Attribute) []Cell
+}
+
+// DefaultTxBuilder is set to be MarkdownTxBuilder.
+var DefaultTxBuilder = NewMarkdownTxBuilder()
+
+// MarkdownTxBuilder implements TextBuilder interface, using markdown syntax.
+type MarkdownTxBuilder struct {
+ baseFg Attribute
+ baseBg Attribute
+ plainTx []rune
+ markers []marker
+}
+
+type marker struct {
+ st int
+ ed int
+ fg Attribute
+ bg Attribute
+}
+
+var colorMap = map[string]Attribute{
+ "red": ColorRed,
+ "blue": ColorBlue,
+ "black": ColorBlack,
+ "cyan": ColorCyan,
+ "yellow": ColorYellow,
+ "white": ColorWhite,
+ "default": ColorDefault,
+ "green": ColorGreen,
+ "magenta": ColorMagenta,
+}
+
+var attrMap = map[string]Attribute{
+ "bold": AttrBold,
+ "underline": AttrUnderline,
+ "reverse": AttrReverse,
+}
+
+func rmSpc(s string) string {
+ reg := regexp.MustCompile(`\s+`)
+ return reg.ReplaceAllString(s, "")
+}
+
+// readAttr translates strings like `fg-red,fg-bold,bg-white` to fg and bg Attribute
+func (mtb MarkdownTxBuilder) readAttr(s string) (Attribute, Attribute) {
+ fg := mtb.baseFg
+ bg := mtb.baseBg
+
+ updateAttr := func(a Attribute, attrs []string) Attribute {
+ for _, s := range attrs {
+ // replace the color
+ if c, ok := colorMap[s]; ok {
+ a &= 0xFF00 // erase clr 0 ~ 8 bits
+ a |= c // set clr
+ }
+ // add attrs
+ if c, ok := attrMap[s]; ok {
+ a |= c
+ }
+ }
+ return a
+ }
+
+ ss := strings.Split(s, ",")
+ fgs := []string{}
+ bgs := []string{}
+ for _, v := range ss {
+ subs := strings.Split(v, "-")
+ if len(subs) > 1 {
+ if subs[0] == "fg" {
+ fgs = append(fgs, subs[1])
+ }
+ if subs[0] == "bg" {
+ bgs = append(bgs, subs[1])
+ }
+ }
+ }
+
+ fg = updateAttr(fg, fgs)
+ bg = updateAttr(bg, bgs)
+ return fg, bg
+}
+
+func (mtb *MarkdownTxBuilder) reset() {
+ mtb.plainTx = []rune{}
+ mtb.markers = []marker{}
+}
+
+// parse streams and parses text into normalized text and render sequence.
+func (mtb *MarkdownTxBuilder) parse(str string) {
+ rs := str2runes(str)
+ normTx := []rune{}
+ square := []rune{}
+ brackt := []rune{}
+ accSquare := false
+ accBrackt := false
+ cntSquare := 0
+
+ reset := func() {
+ square = []rune{}
+ brackt = []rune{}
+ accSquare = false
+ accBrackt = false
+ cntSquare = 0
+ }
+ // pipe stacks into normTx and clear
+ rollback := func() {
+ normTx = append(normTx, square...)
+ normTx = append(normTx, brackt...)
+ reset()
+ }
+ // chop first and last
+ chop := func(s []rune) []rune {
+ return s[1 : len(s)-1]
+ }
+
+ for i, r := range rs {
+ switch {
+ // stacking brackt
+ case accBrackt:
+ brackt = append(brackt, r)
+ if ')' == r {
+ fg, bg := mtb.readAttr(string(chop(brackt)))
+ st := len(normTx)
+ ed := len(normTx) + len(square) - 2
+ mtb.markers = append(mtb.markers, marker{st, ed, fg, bg})
+ normTx = append(normTx, chop(square)...)
+ reset()
+ } else if i+1 == len(rs) {
+ rollback()
+ }
+ // stacking square
+ case accSquare:
+ switch {
+ // squares closed and followed by a '('
+ case cntSquare == 0 && '(' == r:
+ accBrackt = true
+ brackt = append(brackt, '(')
+ // squares closed but not followed by a '('
+ case cntSquare == 0:
+ rollback()
+ if '[' == r {
+ accSquare = true
+ cntSquare = 1
+ brackt = append(brackt, '[')
+ } else {
+ normTx = append(normTx, r)
+ }
+ // hit the end
+ case i+1 == len(rs):
+ square = append(square, r)
+ rollback()
+ case '[' == r:
+ cntSquare++
+ square = append(square, '[')
+ case ']' == r:
+ cntSquare--
+ square = append(square, ']')
+ // normal char
+ default:
+ square = append(square, r)
+ }
+ // stacking normTx
+ default:
+ if '[' == r {
+ accSquare = true
+ cntSquare = 1
+ square = append(square, '[')
+ } else {
+ normTx = append(normTx, r)
+ }
+ }
+ }
+
+ mtb.plainTx = normTx
+}
+
+func wrapTx(cs []Cell, wl int) []Cell {
+ tmpCell := make([]Cell, len(cs))
+ copy(tmpCell, cs)
+
+ // get the plaintext
+ plain := CellsToStr(cs)
+
+ // wrap
+ plainWrapped := wordwrap.WrapString(plain, uint(wl))
+
+ // find differences and insert
+ finalCell := tmpCell // finalcell will get the inserts and is what is returned
+
+ plainRune := []rune(plain)
+ plainWrappedRune := []rune(plainWrapped)
+ trigger := "go"
+ plainRuneNew := plainRune
+
+ for trigger != "stop" {
+ plainRune = plainRuneNew
+ for i := range plainRune {
+ if plainRune[i] == plainWrappedRune[i] {
+ trigger = "stop"
+ } else if plainRune[i] != plainWrappedRune[i] && plainWrappedRune[i] == 10 {
+ trigger = "go"
+ cell := Cell{10, 0, 0}
+ j := i - 0
+
+ // insert a cell into the []Cell in correct position
+ tmpCell[i] = cell
+
+ // insert the newline into plain so we avoid indexing errors
+ plainRuneNew = append(plainRune, 10)
+ copy(plainRuneNew[j+1:], plainRuneNew[j:])
+ plainRuneNew[j] = plainWrappedRune[j]
+
+ // restart the inner for loop until plain and plain wrapped are
+ // the same; yeah, it's inefficient, but the text amounts
+ // should be small
+ break
+
+ } else if plainRune[i] != plainWrappedRune[i] &&
+ plainWrappedRune[i-1] == 10 && // if the prior rune is a newline
+ plainRune[i] == 32 { // and this rune is a space
+ trigger = "go"
+ // need to delete plainRune[i] because it gets rid of an extra
+ // space
+ plainRuneNew = append(plainRune[:i], plainRune[i+1:]...)
+ break
+
+ } else {
+ trigger = "stop" // stops the outer for loop
+ }
+ }
+ }
+
+ finalCell = tmpCell
+
+ return finalCell
+}
+
+// Build implements TextBuilder interface.
+func (mtb MarkdownTxBuilder) Build(s string, fg, bg Attribute) []Cell {
+ mtb.baseFg = fg
+ mtb.baseBg = bg
+ mtb.reset()
+ mtb.parse(s)
+ cs := make([]Cell, len(mtb.plainTx))
+ for i := range cs {
+ cs[i] = Cell{Ch: mtb.plainTx[i], Fg: fg, Bg: bg}
+ }
+ for _, mrk := range mtb.markers {
+ for i := mrk.st; i < mrk.ed; i++ {
+ cs[i].Fg = mrk.fg
+ cs[i].Bg = mrk.bg
+ }
+ }
+
+ return cs
+}
+
+// NewMarkdownTxBuilder returns a TextBuilder employing markdown syntax.
+func NewMarkdownTxBuilder() TextBuilder {
+ return MarkdownTxBuilder{}
+}
diff --git a/vendor/github.com/gizak/termui/theme.go b/vendor/github.com/gizak/termui/theme.go
new file mode 100644
index 000000000..c3ccda559
--- /dev/null
+++ b/vendor/github.com/gizak/termui/theme.go
@@ -0,0 +1,140 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import "strings"
+
+/*
+// A ColorScheme represents the current look-and-feel of the dashboard.
+type ColorScheme struct {
+ BodyBg Attribute
+ BlockBg Attribute
+ HasBorder bool
+ BorderFg Attribute
+ BorderBg Attribute
+ BorderLabelTextFg Attribute
+ BorderLabelTextBg Attribute
+ ParTextFg Attribute
+ ParTextBg Attribute
+ SparklineLine Attribute
+ SparklineTitle Attribute
+ GaugeBar Attribute
+ GaugePercent Attribute
+ LineChartLine Attribute
+ LineChartAxes Attribute
+ ListItemFg Attribute
+ ListItemBg Attribute
+ BarChartBar Attribute
+ BarChartText Attribute
+ BarChartNum Attribute
+ MBarChartBar Attribute
+ MBarChartText Attribute
+ MBarChartNum Attribute
+ TabActiveBg Attribute
+}
+
+// default color scheme depends on the user's terminal setting.
+var themeDefault = ColorScheme{HasBorder: true}
+
+var themeHelloWorld = ColorScheme{
+ BodyBg: ColorBlack,
+ BlockBg: ColorBlack,
+ HasBorder: true,
+ BorderFg: ColorWhite,
+ BorderBg: ColorBlack,
+ BorderLabelTextBg: ColorBlack,
+ BorderLabelTextFg: ColorGreen,
+ ParTextBg: ColorBlack,
+ ParTextFg: ColorWhite,
+ SparklineLine: ColorMagenta,
+ SparklineTitle: ColorWhite,
+ GaugeBar: ColorRed,
+ GaugePercent: ColorWhite,
+ LineChartLine: ColorYellow | AttrBold,
+ LineChartAxes: ColorWhite,
+ ListItemBg: ColorBlack,
+ ListItemFg: ColorYellow,
+ BarChartBar: ColorRed,
+ BarChartNum: ColorWhite,
+ BarChartText: ColorCyan,
+ MBarChartBar: ColorRed,
+ MBarChartNum: ColorWhite,
+ MBarChartText: ColorCyan,
+ TabActiveBg: ColorMagenta,
+}
+
+var theme = themeDefault // global dep
+
+// Theme returns the currently used theme.
+func Theme() ColorScheme {
+ return theme
+}
+
+// SetTheme sets a new, custom theme.
+func SetTheme(newTheme ColorScheme) {
+ theme = newTheme
+}
+
+// UseTheme sets a predefined scheme. Currently available: "hello-world" and
+// "black-and-white".
+func UseTheme(th string) {
+ switch th {
+ case "helloworld":
+ theme = themeHelloWorld
+ default:
+ theme = themeDefault
+ }
+}
+*/
+
+var ColorMap = map[string]Attribute{
+ "fg": ColorWhite,
+ "bg": ColorDefault,
+ "border.fg": ColorWhite,
+ "label.fg": ColorGreen,
+ "par.fg": ColorYellow,
+ "par.label.bg": ColorWhite,
+}
+
+func ThemeAttr(name string) Attribute {
+ return lookUpAttr(ColorMap, name)
+}
+
+func lookUpAttr(clrmap map[string]Attribute, name string) Attribute {
+
+ a, ok := clrmap[name]
+ if ok {
+ return a
+ }
+
+ ns := strings.Split(name, ".")
+ for i := range ns {
+ nn := strings.Join(ns[i:len(ns)], ".")
+ a, ok = ColorMap[nn]
+ if ok {
+ break
+ }
+ }
+
+ return a
+}
+
+// 0<=r,g,b <= 5
+func ColorRGB(r, g, b int) Attribute {
+ within := func(n int) int {
+ if n < 0 {
+ return 0
+ }
+
+ if n > 5 {
+ return 5
+ }
+
+ return n
+ }
+
+ r, b, g = within(r), within(b), within(g)
+ return Attribute(0x0f + 36*r + 6*g + b)
+}
diff --git a/vendor/github.com/gizak/termui/widget.go b/vendor/github.com/gizak/termui/widget.go
new file mode 100644
index 000000000..35cf143a3
--- /dev/null
+++ b/vendor/github.com/gizak/termui/widget.go
@@ -0,0 +1,94 @@
+// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
+// Use of this source code is governed by a MIT license that can
+// be found in the LICENSE file.
+
+package termui
+
+import (
+ "fmt"
+ "sync"
+)
+
+// event mixins
+type WgtMgr map[string]WgtInfo
+
+type WgtInfo struct {
+ Handlers map[string]func(Event)
+ WgtRef Widget
+ Id string
+}
+
+type Widget interface {
+ Id() string
+}
+
+func NewWgtInfo(wgt Widget) WgtInfo {
+ return WgtInfo{
+ Handlers: make(map[string]func(Event)),
+ WgtRef: wgt,
+ Id: wgt.Id(),
+ }
+}
+
+func NewWgtMgr() WgtMgr {
+ wm := WgtMgr(make(map[string]WgtInfo))
+ return wm
+
+}
+
+func (wm WgtMgr) AddWgt(wgt Widget) {
+ wm[wgt.Id()] = NewWgtInfo(wgt)
+}
+
+func (wm WgtMgr) RmWgt(wgt Widget) {
+ wm.RmWgtById(wgt.Id())
+}
+
+func (wm WgtMgr) RmWgtById(id string) {
+ delete(wm, id)
+}
+
+func (wm WgtMgr) AddWgtHandler(id, path string, h func(Event)) {
+ if w, ok := wm[id]; ok {
+ w.Handlers[path] = h
+ }
+}
+
+func (wm WgtMgr) RmWgtHandler(id, path string) {
+ if w, ok := wm[id]; ok {
+ delete(w.Handlers, path)
+ }
+}
+
+var counter struct {
+ sync.RWMutex
+ count int
+}
+
+func GenId() string {
+ counter.Lock()
+ defer counter.Unlock()
+
+ counter.count += 1
+ return fmt.Sprintf("%d", counter.count)
+}
+
+func (wm WgtMgr) WgtHandlersHook() func(Event) {
+ return func(e Event) {
+ for _, v := range wm {
+ if k := findMatch(v.Handlers, e.Path); k != "" {
+ v.Handlers[k](e)
+ }
+ }
+ }
+}
+
+var DefaultWgtMgr WgtMgr
+
+func (b *Block) Handle(path string, handler func(Event)) {
+ if _, ok := DefaultWgtMgr[b.Id()]; !ok {
+ DefaultWgtMgr.AddWgt(b)
+ }
+
+ DefaultWgtMgr.AddWgtHandler(b.Id(), path, handler)
+}
diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore
new file mode 100644
index 000000000..042091d9b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/.gitignore
@@ -0,0 +1,16 @@
+cmd/snappytool/snappytool
+testdata/bench
+
+# These explicitly listed benchmark data files are for an obsolete version of
+# snappy_test.go.
+testdata/alice29.txt
+testdata/asyoulik.txt
+testdata/fireworks.jpeg
+testdata/geo.protodata
+testdata/html
+testdata/html_x_4
+testdata/kppkn.gtb
+testdata/lcet10.txt
+testdata/paper-100k.pdf
+testdata/plrabn12.txt
+testdata/urls.10K
diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS
new file mode 100644
index 000000000..bcfa19520
--- /dev/null
+++ b/vendor/github.com/golang/snappy/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+# Name or Organization <email address>
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski <dgryski@gmail.com>
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Sebastien Binet <seb.binet@gmail.com>
diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS
new file mode 100644
index 000000000..931ae3160
--- /dev/null
+++ b/vendor/github.com/golang/snappy/CONTRIBUTORS
@@ -0,0 +1,37 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+# http://code.google.com/legal/individual-cla-v1.0.html
+# http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+# Name <email address>
+
+# Please keep the list sorted.
+
+Damian Gryski <dgryski@gmail.com>
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman <kaib@golang.org>
+Marc-Antoine Ruel <maruel@chromium.org>
+Nigel Tao <nigeltao@golang.org>
+Rob Pike <r@golang.org>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Russ Cox <rsc@golang.org>
+Sebastien Binet <seb.binet@gmail.com>
diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE
new file mode 100644
index 000000000..6050c10f4
--- /dev/null
+++ b/vendor/github.com/golang/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README
new file mode 100644
index 000000000..cea12879a
--- /dev/null
+++ b/vendor/github.com/golang/snappy/README
@@ -0,0 +1,107 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
+
+
+
+Benchmarks.
+
+The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
+or so files, the same set used by the C++ Snappy code (github.com/google/snappy
+and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
+3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
+
+"go test -test.bench=."
+
+_UFlat0-8 2.19GB/s ± 0% html
+_UFlat1-8 1.41GB/s ± 0% urls
+_UFlat2-8 23.5GB/s ± 2% jpg
+_UFlat3-8 1.91GB/s ± 0% jpg_200
+_UFlat4-8 14.0GB/s ± 1% pdf
+_UFlat5-8 1.97GB/s ± 0% html4
+_UFlat6-8 814MB/s ± 0% txt1
+_UFlat7-8 785MB/s ± 0% txt2
+_UFlat8-8 857MB/s ± 0% txt3
+_UFlat9-8 719MB/s ± 1% txt4
+_UFlat10-8 2.84GB/s ± 0% pb
+_UFlat11-8 1.05GB/s ± 0% gaviota
+
+_ZFlat0-8 1.04GB/s ± 0% html
+_ZFlat1-8 534MB/s ± 0% urls
+_ZFlat2-8 15.7GB/s ± 1% jpg
+_ZFlat3-8 740MB/s ± 3% jpg_200
+_ZFlat4-8 9.20GB/s ± 1% pdf
+_ZFlat5-8 991MB/s ± 0% html4
+_ZFlat6-8 379MB/s ± 0% txt1
+_ZFlat7-8 352MB/s ± 0% txt2
+_ZFlat8-8 396MB/s ± 1% txt3
+_ZFlat9-8 327MB/s ± 1% txt4
+_ZFlat10-8 1.33GB/s ± 1% pb
+_ZFlat11-8 605MB/s ± 1% gaviota
+
+
+
+"go test -test.bench=. -tags=noasm"
+
+_UFlat0-8 621MB/s ± 2% html
+_UFlat1-8 494MB/s ± 1% urls
+_UFlat2-8 23.2GB/s ± 1% jpg
+_UFlat3-8 1.12GB/s ± 1% jpg_200
+_UFlat4-8 4.35GB/s ± 1% pdf
+_UFlat5-8 609MB/s ± 0% html4
+_UFlat6-8 296MB/s ± 0% txt1
+_UFlat7-8 288MB/s ± 0% txt2
+_UFlat8-8 309MB/s ± 1% txt3
+_UFlat9-8 280MB/s ± 1% txt4
+_UFlat10-8 753MB/s ± 0% pb
+_UFlat11-8 400MB/s ± 0% gaviota
+
+_ZFlat0-8 409MB/s ± 1% html
+_ZFlat1-8 250MB/s ± 1% urls
+_ZFlat2-8 12.3GB/s ± 1% jpg
+_ZFlat3-8 132MB/s ± 0% jpg_200
+_ZFlat4-8 2.92GB/s ± 0% pdf
+_ZFlat5-8 405MB/s ± 1% html4
+_ZFlat6-8 179MB/s ± 1% txt1
+_ZFlat7-8 170MB/s ± 1% txt2
+_ZFlat8-8 189MB/s ± 1% txt3
+_ZFlat9-8 164MB/s ± 1% txt4
+_ZFlat10-8 479MB/s ± 1% pb
+_ZFlat11-8 270MB/s ± 1% gaviota
+
+
+
+For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
+are the numbers from C++ Snappy's
+
+make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
+
+BM_UFlat/0 2.4GB/s html
+BM_UFlat/1 1.4GB/s urls
+BM_UFlat/2 21.8GB/s jpg
+BM_UFlat/3 1.5GB/s jpg_200
+BM_UFlat/4 13.3GB/s pdf
+BM_UFlat/5 2.1GB/s html4
+BM_UFlat/6 1.0GB/s txt1
+BM_UFlat/7 959.4MB/s txt2
+BM_UFlat/8 1.0GB/s txt3
+BM_UFlat/9 864.5MB/s txt4
+BM_UFlat/10 2.9GB/s pb
+BM_UFlat/11 1.2GB/s gaviota
+
+BM_ZFlat/0 944.3MB/s html (22.31 %)
+BM_ZFlat/1 501.6MB/s urls (47.78 %)
+BM_ZFlat/2 14.3GB/s jpg (99.95 %)
+BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
+BM_ZFlat/4 8.3GB/s pdf (83.30 %)
+BM_ZFlat/5 903.5MB/s html4 (22.52 %)
+BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
+BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
+BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
+BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
+BM_ZFlat/10 1.2GB/s pb (19.68 %)
+BM_ZFlat/11 527.4MB/s gaviota (37.72 %)
diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go
new file mode 100644
index 000000000..72efb0353
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode.go
@@ -0,0 +1,237 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+var (
+ // ErrCorrupt reports that the input is invalid.
+ ErrCorrupt = errors.New("snappy: corrupt input")
+ // ErrTooLarge reports that the uncompressed length is too large.
+ ErrTooLarge = errors.New("snappy: decoded block is too large")
+ // ErrUnsupported reports that the input isn't supported.
+ ErrUnsupported = errors.New("snappy: unsupported input")
+
+ errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+ v, _, err := decodedLen(src)
+ return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+ v, n := binary.Uvarint(src)
+ if n <= 0 || v > 0xffffffff {
+ return 0, 0, ErrCorrupt
+ }
+
+ const wordSize = 32 << (^uint(0) >> 32 & 1)
+ if wordSize == 32 && v > 0x7fffffff {
+ return 0, 0, ErrTooLarge
+ }
+ return int(v), n, nil
+}
+
+const (
+ decodeErrCodeCorrupt = 1
+ decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if dLen <= len(dst) {
+ dst = dst[:dLen]
+ } else {
+ dst = make([]byte, dLen)
+ }
+ switch decode(dst, src[s:]) {
+ case 0:
+ return dst, nil
+ case decodeErrCodeUnsupportedLiteralLength:
+ return nil, errUnsupportedLiteralLength
+ }
+ return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+ return &Reader{
+ r: r,
+ decoded: make([]byte, maxBlockSize),
+ buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+ }
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+type Reader struct {
+ r io.Reader
+ err error
+ decoded []byte
+ buf []byte
+ // decoded[i:j] contains decoded bytes that have not yet been passed on.
+ i, j int
+ readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+ r.r = reader
+ r.err = nil
+ r.i = 0
+ r.j = 0
+ r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+ if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrCorrupt
+ }
+ return false
+ }
+ return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ for {
+ if r.i < r.j {
+ n := copy(p, r.decoded[r.i:r.j])
+ r.i += n
+ return n, nil
+ }
+ if !r.readFull(r.buf[:4], true) {
+ return 0, r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+ if chunkLen > len(r.buf) {
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if _, err := Decode(r.decoded, buf); err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeUncompressedData:
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:checksumSize]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n := chunkLen - checksumSize
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.decoded[:n], false) {
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)], false) {
+ return 0, r.err
+ }
+ for i := 0; i < len(magicBody); i++ {
+ if r.buf[i] != magicBody[i] {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if !r.readFull(r.buf[:chunkLen], false) {
+ return 0, r.err
+ }
+ }
+}
diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go
new file mode 100644
index 000000000..fcd192b84
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func decode(dst, src []byte) int
diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s
new file mode 100644
index 000000000..e6179f65e
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.s
@@ -0,0 +1,490 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+// - AX scratch
+// - BX scratch
+// - CX length or x
+// - DX offset
+// - SI &src[s]
+// - DI &dst[d]
+// + R8 dst_base
+// + R9 dst_len
+// + R10 dst_base + dst_len
+// + R11 src_base
+// + R12 src_len
+// + R13 src_base + src_len
+// - R14 used by doCopy
+// - R15 used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
+// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
+TEXT ·decode(SB), NOSPLIT, $48-56
+ // Initialize SI, DI and R8-R13.
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, DI
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, SI
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+loop:
+ // for s < len(src)
+ CMPQ SI, R13
+ JEQ end
+
+ // CX = uint32(src[s])
+ //
+ // switch src[s] & 0x03
+ MOVBLZX (SI), CX
+ MOVL CX, BX
+ ANDL $3, BX
+ CMPL BX, $1
+ JAE tagCopy
+
+ // ----------------------------------------
+ // The code below handles literal tags.
+
+ // case tagLiteral:
+ // x := uint32(src[s] >> 2)
+ // switch
+ SHRL $2, CX
+ CMPL CX, $60
+ JAE tagLit60Plus
+
+ // case x < 60:
+ // s++
+ INCQ SI
+
+doLit:
+ // This is the end of the inner "switch", when we have a literal tag.
+ //
+ // We assume that CX == x and x fits in a uint32, where x is the variable
+ // used in the pure Go decode_other.go code.
+
+ // length = int(x) + 1
+ //
+ // Unlike the pure Go code, we don't need to check if length <= 0 because
+ // CX can hold 64 bits, so the increment cannot overflow.
+ INCQ CX
+
+ // Prepare to check if copying length bytes will run past the end of dst or
+ // src.
+ //
+ // AX = len(dst) - d
+ // BX = len(src) - s
+ MOVQ R10, AX
+ SUBQ DI, AX
+ MOVQ R13, BX
+ SUBQ SI, BX
+
+ // !!! Try a faster technique for short (16 or fewer bytes) copies.
+ //
+ // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+ // goto callMemmove // Fall back on calling runtime·memmove.
+ // }
+ //
+ // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+ // against 21 instead of 16, because it cannot assume that all of its input
+ // is contiguous in memory and so it needs to leave enough source bytes to
+ // read the next tag without refilling buffers, but Go's Decode assumes
+ // contiguousness (the src argument is a []byte).
+ CMPQ CX, $16
+ JGT callMemmove
+ CMPQ AX, $16
+ JLT callMemmove
+ CMPQ BX, $16
+ JLT callMemmove
+
+ // !!! Implement the copy from src to dst as a 16-byte load and store.
+ // (Decode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only length bytes, but that's
+ // OK. If the input is a valid Snappy encoding then subsequent iterations
+ // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+ // non-nil error), so the overrun will be ignored.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(SI), X0
+ MOVOU X0, 0(DI)
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+callMemmove:
+ // if length > len(dst)-d || length > len(src)-s { etc }
+ CMPQ CX, AX
+ JGT errCorrupt
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // copy(dst[d:], src[s:s+length])
+ //
+ // This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+ // DI, SI and CX as arguments. Coincidentally, we also need to spill those
+ // three registers to the stack, to save local variables across the CALL.
+ MOVQ DI, 0(SP)
+ MOVQ SI, 8(SP)
+ MOVQ CX, 16(SP)
+ MOVQ DI, 24(SP)
+ MOVQ SI, 32(SP)
+ MOVQ CX, 40(SP)
+ CALL runtime·memmove(SB)
+
+ // Restore local variables: unspill registers from the stack and
+ // re-calculate R8-R13.
+ MOVQ 24(SP), DI
+ MOVQ 32(SP), SI
+ MOVQ 40(SP), CX
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+tagLit60Plus:
+ // !!! This fragment does the
+ //
+ // s += x - 58; if uint(s) > uint(len(src)) { etc }
+ //
+ // checks. In the asm version, we code it once instead of once per switch case.
+ ADDQ CX, SI
+ SUBQ $58, SI
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // case x == 60:
+ CMPL CX, $61
+ JEQ tagLit61
+ JA tagLit62Plus
+
+ // x = uint32(src[s-1])
+ MOVBLZX -1(SI), CX
+ JMP doLit
+
+tagLit61:
+ // case x == 61:
+ // x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ MOVWLZX -2(SI), CX
+ JMP doLit
+
+tagLit62Plus:
+ CMPL CX, $62
+ JA tagLit63
+
+ // case x == 62:
+ // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ MOVWLZX -3(SI), CX
+ MOVBLZX -1(SI), BX
+ SHLL $16, BX
+ ORL BX, CX
+ JMP doLit
+
+tagLit63:
+ // case x == 63:
+ // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ MOVL -4(SI), CX
+ JMP doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+ // case tagCopy4:
+ // s += 5
+ ADDQ $5, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-5])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ MOVLQZX -4(SI), DX
+ JMP doCopy
+
+tagCopy2:
+ // case tagCopy2:
+ // s += 3
+ ADDQ $3, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-3])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+ MOVWQZX -2(SI), DX
+ JMP doCopy
+
+tagCopy:
+ // We have a copy tag. We assume that:
+ // - BX == src[s] & 0x03
+ // - CX == src[s]
+ CMPQ BX, $2
+ JEQ tagCopy2
+ JA tagCopy4
+
+ // case tagCopy1:
+ // s += 2
+ ADDQ $2, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ MOVQ CX, DX
+ ANDQ $0xe0, DX
+ SHLQ $3, DX
+ MOVBQZX -1(SI), BX
+ ORQ BX, DX
+
+ // length = 4 + int(src[s-2])>>2&0x7
+ SHRQ $2, CX
+ ANDQ $7, CX
+ ADDQ $4, CX
+
+doCopy:
+ // This is the end of the outer "switch", when we have a copy tag.
+ //
+ // We assume that:
+ // - CX == length && CX > 0
+ // - DX == offset
+
+ // if offset <= 0 { etc }
+ CMPQ DX, $0
+ JLE errCorrupt
+
+ // if d < offset { etc }
+ MOVQ DI, BX
+ SUBQ R8, BX
+ CMPQ BX, DX
+ JLT errCorrupt
+
+ // if length > len(dst)-d { etc }
+ MOVQ R10, BX
+ SUBQ DI, BX
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+ //
+ // Set:
+ // - R14 = len(dst)-d
+ // - R15 = &dst[d-offset]
+ MOVQ R10, R14
+ SUBQ DI, R14
+ MOVQ DI, R15
+ SUBQ DX, R15
+
+ // !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+ //
+ // First, try using two 8-byte load/stores, similar to the doLit technique
+ // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+ // still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+ // and not one 16-byte load/store, and the first store has to be before the
+ // second load, due to the overlap if offset is in the range [8, 16).
+ //
+ // if length > 16 || offset < 8 || len(dst)-d < 16 {
+ // goto slowForwardCopy
+ // }
+ // copy 16 bytes
+ // d += length
+ CMPQ CX, $16
+ JGT slowForwardCopy
+ CMPQ DX, $8
+ JLT slowForwardCopy
+ CMPQ R14, $16
+ JLT slowForwardCopy
+ MOVQ 0(R15), AX
+ MOVQ AX, 0(DI)
+ MOVQ 8(R15), BX
+ MOVQ BX, 8(DI)
+ ADDQ CX, DI
+ JMP loop
+
+slowForwardCopy:
+ // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+ // can still try 8-byte load stores, provided we can overrun up to 10 extra
+ // bytes. As above, the overrun will be fixed up by subsequent iterations
+ // of the outermost loop.
+ //
+ // The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+ // commentary says:
+ //
+ // ----
+ //
+ // The main part of this loop is a simple copy of eight bytes at a time
+ // until we've copied (at least) the requested amount of bytes. However,
+ // if d and d-offset are less than eight bytes apart (indicating a
+ // repeating pattern of length < 8), we first need to expand the pattern in
+ // order to get the correct results. For instance, if the buffer looks like
+ // this, with the eight-byte <d-offset> and <d> patterns marked as
+ // intervals:
+ //
+ // abxxxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // a single eight-byte copy from <d-offset> to <d> will repeat the pattern
+ // once, after which we can move <d> two bytes without moving <d-offset>:
+ //
+ // ababxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // and repeat the exercise until the two no longer overlap.
+ //
+ // This allows us to do very well in the special case of one single byte
+ // repeated many times, without taking a big hit for more general cases.
+ //
+ // The worst case of extra writing past the end of the match occurs when
+ // offset == 1 and length == 1; the last copy will read from byte positions
+ // [0..7] and write to [4..11], whereas it was only supposed to write to
+ // position 1. Thus, ten excess bytes.
+ //
+ // ----
+ //
+ // That "10 byte overrun" worst case is confirmed by Go's
+ // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+ // and finishSlowForwardCopy algorithm.
+ //
+ // if length > len(dst)-d-10 {
+ // goto verySlowForwardCopy
+ // }
+ SUBQ $10, R14
+ CMPQ CX, R14
+ JGT verySlowForwardCopy
+
+makeOffsetAtLeast8:
+ // !!! As above, expand the pattern so that offset >= 8 and we can use
+ // 8-byte load/stores.
+ //
+ // for offset < 8 {
+ // copy 8 bytes from dst[d-offset:] to dst[d:]
+ // length -= offset
+ // d += offset
+ // offset += offset
+ // // The two previous lines together means that d-offset, and therefore
+ // // R15, is unchanged.
+ // }
+ CMPQ DX, $8
+ JGE fixUpSlowForwardCopy
+ MOVQ (R15), BX
+ MOVQ BX, (DI)
+ SUBQ DX, CX
+ ADDQ DX, DI
+ ADDQ DX, DX
+ JMP makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+ // !!! Add length (which might be negative now) to d (implied by DI being
+ // &dst[d]) so that d ends up at the right place when we jump back to the
+ // top of the loop. Before we do that, though, we save DI to AX so that, if
+ // length is positive, copying the remaining length bytes will write to the
+ // right place.
+ MOVQ DI, AX
+ ADDQ CX, DI
+
+finishSlowForwardCopy:
+ // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+ // length means that we overrun, but as above, that will be fixed up by
+ // subsequent iterations of the outermost loop.
+ CMPQ CX, $0
+ JLE loop
+ MOVQ (R15), BX
+ MOVQ BX, (AX)
+ ADDQ $8, R15
+ ADDQ $8, AX
+ SUBQ $8, CX
+ JMP finishSlowForwardCopy
+
+verySlowForwardCopy:
+ // verySlowForwardCopy is a simple implementation of forward copy. In C
+ // parlance, this is a do/while loop instead of a while loop, since we know
+ // that length > 0. In Go syntax:
+ //
+ // for {
+ // dst[d] = dst[d - offset]
+ // d++
+ // length--
+ // if length == 0 {
+ // break
+ // }
+ // }
+ MOVB (R15), BX
+ MOVB BX, (DI)
+ INCQ R15
+ INCQ DI
+ DECQ CX
+ JNZ verySlowForwardCopy
+ JMP loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+ // This is the end of the "for s < len(src)".
+ //
+ // if d != len(dst) { etc }
+ CMPQ DI, R10
+ JNE errCorrupt
+
+ // return 0
+ MOVQ $0, ret+48(FP)
+ RET
+
+errCorrupt:
+ // return decodeErrCodeCorrupt
+ MOVQ $1, ret+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go
new file mode 100644
index 000000000..8c9f2049b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_other.go
@@ -0,0 +1,101 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func decode(dst, src []byte) int {
+ var d, s, offset, length int
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-1])
+ case x == 61:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+ length = int(x) + 1
+ if length <= 0 {
+ return decodeErrCodeUnsupportedLiteralLength
+ }
+ if length > len(dst)-d || length > len(src)-s {
+ return decodeErrCodeCorrupt
+ }
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 4 + int(src[s-2])>>2&0x7
+ offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+
+ case tagCopy2:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+ case tagCopy4:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-5])>>2
+ offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ }
+
+ if offset <= 0 || d < offset || length > len(dst)-d {
+ return decodeErrCodeCorrupt
+ }
+ // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
+ // the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ for end := d + length; d != end; d++ {
+ dst[d] = dst[d-offset]
+ }
+ }
+ if d != len(dst) {
+ return decodeErrCodeCorrupt
+ }
+ return 0
+}
diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go
new file mode 100644
index 000000000..874968906
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode.go
@@ -0,0 +1,285 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ for len(src) > 0 {
+ p := src
+ src = nil
+ if len(p) > maxBlockSize {
+ p, src = p[:maxBlockSize], p[maxBlockSize:]
+ }
+ if len(p) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], p)
+ } else {
+ d += encodeBlock(dst[d:], p)
+ }
+ }
+ return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+ n := uint64(srcLen)
+ if n > 0xffffffff {
+ return -1
+ }
+ // Compressed data can be defined as:
+ // compressed := item* literal*
+ // item := literal* copy
+ //
+ // The trailing literal sequence has a space blowup of at most 62/60
+ // since a literal of length 60 needs one tag byte + one extra byte
+ // for length information.
+ //
+ // Item blowup is trickier to measure. Suppose the "copy" op copies
+ // 4 bytes of data. Because of a special check in the encoding code,
+ // we produce a 4-byte copy only if the offset is < 65536. Therefore
+ // the copy op takes 3 bytes to encode, and this type of item leads
+ // to at most the 62/60 blowup for representing literals.
+ //
+ // Suppose the "copy" op copies 5 bytes of data. If the offset is big
+ // enough, it will take 5 bytes to encode the copy op. Therefore the
+ // worst case here is a one-byte literal followed by a five-byte copy.
+ // That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+ //
+ // This last factor dominates the blowup, so the final estimate is:
+ n = 32 + n + n/6
+ if n > 0xffffffff {
+ return -1
+ }
+ return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ ibuf: make([]byte, 0, maxBlockSize),
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// Writer is an io.Writer than can write Snappy-compressed bytes.
+type Writer struct {
+ w io.Writer
+ err error
+
+ // ibuf is a buffer for the incoming (uncompressed) bytes.
+ //
+ // Its use is optional. For backwards compatibility, Writers created by the
+ // NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+ // therefore do not need to be Flush'ed or Close'd.
+ ibuf []byte
+
+ // obuf is a buffer for the outgoing (compressed) bytes.
+ obuf []byte
+
+ // wroteStreamHeader is whether we have written the stream header.
+ wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+ w.w = writer
+ w.err = nil
+ if w.ibuf != nil {
+ w.ibuf = w.ibuf[:0]
+ }
+ w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+ if w.ibuf == nil {
+ // Do not buffer incoming bytes. This does not perform or compress well
+ // if the caller of Writer.Write writes many small slices. This
+ // behavior is therefore deprecated, but still supported for backwards
+ // compatibility with code that doesn't explicitly Flush or Close.
+ return w.write(p)
+ }
+
+ // The remainder of this method is based on bufio.Writer.Write from the
+ // standard library.
+
+ for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+ var n int
+ if len(w.ibuf) == 0 {
+ // Large write, empty buffer.
+ // Write directly from p to avoid copy.
+ n, _ = w.write(p)
+ } else {
+ n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ w.Flush()
+ }
+ nRet += n
+ p = p[n:]
+ }
+ if w.err != nil {
+ return nRet, w.err
+ }
+ n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ nRet += n
+ return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ for len(p) > 0 {
+ obufStart := len(magicChunk)
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ copy(w.obuf, magicChunk)
+ obufStart = 0
+ }
+
+ var uncompressed []byte
+ if len(p) > maxBlockSize {
+ uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+ } else {
+ uncompressed, p = p, nil
+ }
+ checksum := crc(uncompressed)
+
+ // Compress the buffer, discarding the result if the improvement
+ // isn't at least 12.5%.
+ compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+ chunkType := uint8(chunkTypeCompressedData)
+ chunkLen := 4 + len(compressed)
+ obufEnd := obufHeaderLen + len(compressed)
+ if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+ chunkType = chunkTypeUncompressedData
+ chunkLen = 4 + len(uncompressed)
+ obufEnd = obufHeaderLen
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ w.obuf[len(magicChunk)+0] = chunkType
+ w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+ w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+ w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+ w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+ w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+ w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+ w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+ if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ if chunkType == chunkTypeUncompressedData {
+ if _, err := w.w.Write(uncompressed); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ }
+ nRet += len(uncompressed)
+ }
+ return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+ if w.err != nil {
+ return w.err
+ }
+ if len(w.ibuf) == 0 {
+ return nil
+ }
+ w.write(w.ibuf)
+ w.ibuf = w.ibuf[:0]
+ return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+ w.Flush()
+ ret := w.err
+ if w.err == nil {
+ w.err = errClosed
+ }
+ return ret
+}
diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go
new file mode 100644
index 000000000..2a56fb504
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// emitLiteral has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitLiteral(dst, lit []byte) int
+
+// emitCopy has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitCopy(dst []byte, offset, length int) int
+
+// extendMatch has the same semantics as in encode_other.go.
+//
+//go:noescape
+func extendMatch(src []byte, i, j int) int
+
+// encodeBlock has the same semantics as in encode_other.go.
+//
+//go:noescape
+func encodeBlock(dst, src []byte) (d int) \ No newline at end of file
diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s
new file mode 100644
index 000000000..adfd979fe
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.s
@@ -0,0 +1,730 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
+// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
+// https://github.com/golang/snappy/issues/29
+//
+// As a workaround, the package was built with a known good assembler, and
+// those instructions were disassembled by "objdump -d" to yield the
+// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+// style comments, in AT&T asm syntax. Note that rsp here is a physical
+// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
+// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
+// fine on Go 1.6.
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX len(lit)
+// - BX n
+// - DX return value
+// - DI &dst[i]
+// - R10 &lit[0]
+//
+// The 24 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $24-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ lit_base+24(FP), R10
+ MOVQ lit_len+32(FP), AX
+ MOVQ AX, DX
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT oneByte
+ CMPL BX, $256
+ JLT twoBytes
+
+threeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ ADDQ $3, DX
+ JMP memmove
+
+twoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ ADDQ $2, DX
+ JMP memmove
+
+oneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+ ADDQ $1, DX
+
+memmove:
+ MOVQ DX, ret+48(FP)
+
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ CALL runtime·memmove(SB)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX length
+// - SI &dst[0]
+// - DI &dst[i]
+// - R11 offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+ MOVQ dst_base+0(FP), DI
+ MOVQ DI, SI
+ MOVQ offset+24(FP), R11
+ MOVQ length+32(FP), AX
+
+loop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT step1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP loop0
+
+step1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE step2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+step2:
+ // if length >= 12 || offset >= 2048 { goto step3 }
+ CMPL AX, $12
+ JGE step3
+ CMPL R11, $2048
+ JGE step3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+step3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+// - DX &src[0]
+// - SI &src[j]
+// - R13 &src[len(src) - 8]
+// - R14 &src[len(src)]
+// - R15 &src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+ MOVQ src_base+0(FP), DX
+ MOVQ src_len+8(FP), R14
+ MOVQ i+24(FP), R15
+ MOVQ j+32(FP), SI
+ ADDQ DX, R14
+ ADDQ DX, R15
+ ADDQ DX, SI
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+cmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA cmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE bsf
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP cmp8
+
+bsf:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+cmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE extendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE extendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP cmp1
+
+extendMatchEnd:
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+// - AX . .
+// - BX . .
+// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
+// - DX 64 &src[0], tableSize
+// - SI 72 &src[s]
+// - DI 80 &dst[d]
+// - R9 88 sLimit
+// - R10 . &src[nextEmit]
+// - R11 96 prevHash, currHash, nextHash, offset
+// - R12 104 &src[base], skip
+// - R13 . &src[nextS], &src[len(src) - 8]
+// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
+// - R15 112 candidate
+//
+// The second column (56, 64, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
+TEXT ·encodeBlock(SB), 0, $32888-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ src_base+24(FP), SI
+ MOVQ src_len+32(FP), R14
+
+ // shift, tableSize := uint32(32-8), 1<<8
+ MOVQ $24, CX
+ MOVQ $256, DX
+
+calcShift:
+ // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ // shift--
+ // }
+ CMPQ DX, $16384
+ JGE varTable
+ CMPQ DX, R14
+ JGE varTable
+ SUBQ $1, CX
+ SHLQ $1, DX
+ JMP calcShift
+
+varTable:
+ // var table [maxTableSize]uint16
+ //
+ // In the asm code, unlike the Go code, we can zero-initialize only the
+ // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
+ // writes 16 bytes, so we can do only tableSize/8 writes instead of the
+ // 2048 writes that would zero-initialize all of table's 32768 bytes.
+ SHRQ $3, DX
+ LEAQ table-32768(SP), BX
+ PXOR X0, X0
+
+memclr:
+ MOVOU X0, 0(BX)
+ ADDQ $16, BX
+ SUBQ $1, DX
+ JNZ memclr
+
+ // !!! DX = &src[0]
+ MOVQ SI, DX
+
+ // sLimit := len(src) - inputMargin
+ MOVQ R14, R9
+ SUBQ $15, R9
+
+ // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
+ // change for the rest of the function.
+ MOVQ CX, 56(SP)
+ MOVQ DX, 64(SP)
+ MOVQ R9, 88(SP)
+
+ // nextEmit := 0
+ MOVQ DX, R10
+
+ // s := 1
+ ADDQ $1, SI
+
+ // nextHash := hash(load32(src, s), shift)
+ MOVL 0(SI), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+outer:
+ // for { etc }
+
+ // skip := 32
+ MOVQ $32, R12
+
+ // nextS := s
+ MOVQ SI, R13
+
+ // candidate := 0
+ MOVQ $0, R15
+
+inner0:
+ // for { etc }
+
+ // s := nextS
+ MOVQ R13, SI
+
+ // bytesBetweenHashLookups := skip >> 5
+ MOVQ R12, R14
+ SHRQ $5, R14
+
+ // nextS = s + bytesBetweenHashLookups
+ ADDQ R14, R13
+
+ // skip += bytesBetweenHashLookups
+ ADDQ R14, R12
+
+ // if nextS > sLimit { goto emitRemainder }
+ MOVQ R13, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JA emitRemainder
+
+ // candidate = int(table[nextHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[nextHash] = uint16(s)
+ MOVQ SI, AX
+ SUBQ DX, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // nextHash = hash(load32(src, nextS), shift)
+ MOVL 0(R13), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // if load32(src, s) != load32(src, candidate) { continue } break
+ MOVL 0(SI), AX
+ MOVL (DX)(R15*1), BX
+ CMPL AX, BX
+ JNE inner0
+
+fourByteMatch:
+ // As per the encode_other.go code:
+ //
+ // A 4-byte match has been found. We'll later see etc.
+
+ // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+ // on inputMargin in encode.go.
+ MOVQ SI, AX
+ SUBQ R10, AX
+ CMPQ AX, $16
+ JLE emitLiteralFastPath
+
+ // ----------------------------------------
+ // Begin inline of the emitLiteral call.
+ //
+ // d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT inlineEmitLiteralOneByte
+ CMPL BX, $256
+ JLT inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+
+inlineEmitLiteralMemmove:
+ // Spill local variables (registers) onto the stack; call; unspill.
+ //
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
+ MOVQ SI, 72(SP)
+ MOVQ DI, 80(SP)
+ MOVQ R15, 112(SP)
+ CALL runtime·memmove(SB)
+ MOVQ 56(SP), CX
+ MOVQ 64(SP), DX
+ MOVQ 72(SP), SI
+ MOVQ 80(SP), DI
+ MOVQ 88(SP), R9
+ MOVQ 112(SP), R15
+ JMP inner1
+
+inlineEmitLiteralEnd:
+ // End inline of the emitLiteral call.
+ // ----------------------------------------
+
+emitLiteralFastPath:
+ // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+ MOVB AX, BX
+ SUBB $1, BX
+ SHLB $2, BX
+ MOVB BX, (DI)
+ ADDQ $1, DI
+
+ // !!! Implement the copy from lit to dst as a 16-byte load and store.
+ // (Encode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only len(lit) bytes, but that's
+ // OK. Subsequent iterations will fix up the overrun.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(R10), X0
+ MOVOU X0, 0(DI)
+ ADDQ AX, DI
+
+inner1:
+ // for { etc }
+
+ // base := s
+ MOVQ SI, R12
+
+ // !!! offset := base - candidate
+ MOVQ R12, R11
+ SUBQ R15, R11
+ SUBQ DX, R11
+
+ // ----------------------------------------
+ // Begin inline of the extendMatch call.
+ //
+ // s = extendMatch(src, candidate+4, s+4)
+
+ // !!! R14 = &src[len(src)]
+ MOVQ src_len+32(FP), R14
+ ADDQ DX, R14
+
+ // !!! R13 = &src[len(src) - 8]
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+ // !!! R15 = &src[candidate + 4]
+ ADDQ $4, R15
+ ADDQ DX, R15
+
+ // !!! s += 4
+ ADDQ $4, SI
+
+inlineExtendMatchCmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA inlineExtendMatchCmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE inlineExtendMatchBSF
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+ JMP inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE inlineExtendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE inlineExtendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+ // End inline of the extendMatch call.
+ // ----------------------------------------
+
+ // ----------------------------------------
+ // Begin inline of the emitCopy call.
+ //
+ // d += emitCopy(dst[d:], base-candidate, s-base)
+
+ // !!! length := s - base
+ MOVQ SI, AX
+ SUBQ R12, AX
+
+inlineEmitCopyLoop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT inlineEmitCopyStep1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE inlineEmitCopyStep2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+inlineEmitCopyStep2:
+ // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+ CMPL AX, $12
+ JGE inlineEmitCopyStep3
+ CMPL R11, $2048
+ JGE inlineEmitCopyStep3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+ JMP inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+inlineEmitCopyEnd:
+ // End inline of the emitCopy call.
+ // ----------------------------------------
+
+ // nextEmit = s
+ MOVQ SI, R10
+
+ // if s >= sLimit { goto emitRemainder }
+ MOVQ SI, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JAE emitRemainder
+
+ // As per the encode_other.go code:
+ //
+ // We could immediately etc.
+
+ // x := load64(src, s-1)
+ MOVQ -1(SI), R14
+
+ // prevHash := hash(uint32(x>>0), shift)
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // table[prevHash] = uint16(s-1)
+ MOVQ SI, AX
+ SUBQ DX, AX
+ SUBQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // currHash := hash(uint32(x>>8), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // candidate = int(table[currHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[currHash] = uint16(s)
+ ADDQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // if uint32(x>>8) == load32(src, candidate) { continue }
+ MOVL (DX)(R15*1), BX
+ CMPL R14, BX
+ JEQ inner1
+
+ // nextHash = hash(uint32(x>>16), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // s++
+ ADDQ $1, SI
+
+ // break out of the inner1 for loop, i.e. continue the outer loop.
+ JMP outer
+
+emitRemainder:
+ // if nextEmit < len(src) { etc }
+ MOVQ src_len+32(FP), AX
+ ADDQ DX, AX
+ CMPQ R10, AX
+ JEQ encodeBlockEnd
+
+ // d += emitLiteral(dst[d:], src[nextEmit:])
+ //
+ // Push args.
+ MOVQ DI, 0(SP)
+ MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ R10, 24(SP)
+ SUBQ R10, AX
+ MOVQ AX, 32(SP)
+ MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
+
+ // Spill local variables (registers) onto the stack; call; unspill.
+ MOVQ DI, 80(SP)
+ CALL ·emitLiteral(SB)
+ MOVQ 80(SP), DI
+
+ // Finish the "d +=" part of "d += emitLiteral(etc)".
+ ADDQ 48(SP), DI
+
+encodeBlockEnd:
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, DI
+ MOVQ DI, d+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go
new file mode 100644
index 000000000..dbcae905e
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_other.go
@@ -0,0 +1,238 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+func load32(b []byte, i int) uint32 {
+ b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+ b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+// dst is long enough to hold the encoded bytes
+// 1 <= len(lit) && len(lit) <= 65536
+func emitLiteral(dst, lit []byte) int {
+ i, n := 0, uint(len(lit)-1)
+ switch {
+ case n < 60:
+ dst[0] = uint8(n)<<2 | tagLiteral
+ i = 1
+ case n < 1<<8:
+ dst[0] = 60<<2 | tagLiteral
+ dst[1] = uint8(n)
+ i = 2
+ default:
+ dst[0] = 61<<2 | tagLiteral
+ dst[1] = uint8(n)
+ dst[2] = uint8(n >> 8)
+ i = 3
+ }
+ return i + copy(dst[i:], lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= 65535
+// 4 <= length && length <= 65535
+func emitCopy(dst []byte, offset, length int) int {
+ i := 0
+ // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
+ // threshold for this loop is a little higher (at 68 = 64 + 4), and the
+ // length emitted down below is is a little lower (at 60 = 64 - 4), because
+ // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
+ // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
+ // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
+ // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
+ // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
+ // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
+ for length >= 68 {
+ // Emit a length 64 copy, encoded as 3 bytes.
+ dst[i+0] = 63<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 64
+ }
+ if length > 64 {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ dst[i+0] = 59<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 60
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[i+0] = uint8(length-1)<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ return i + 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ dst[i+1] = uint8(offset)
+ return i + 2
+}
+
+// extendMatch returns the largest k such that k <= len(src) and that
+// src[i:i+k-j] and src[j:k] have the same contents.
+//
+// It assumes that:
+// 0 <= i && i < j && j <= len(src)
+func extendMatch(src []byte, i, j int) int {
+ for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+ }
+ return j
+}
+
+func hash(u, shift uint32) uint32 {
+ return (u * 0x1e35a7bd) >> shift
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+ // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+ // The table element type is uint16, as s < sLimit and sLimit < len(src)
+ // and len(src) <= maxBlockSize and maxBlockSize == 65536.
+ const (
+ maxTableSize = 1 << 14
+ // tableMask is redundant, but helps the compiler eliminate bounds
+ // checks.
+ tableMask = maxTableSize - 1
+ )
+ shift := uint32(32 - 8)
+ for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ shift--
+ }
+ // In Go, all array elements are zero-initialized, so there is no advantage
+ // to a smaller tableSize per se. However, it matches the C++ algorithm,
+ // and in the asm versions of this code, we can get away with zeroing only
+ // the first tableSize elements.
+ var table [maxTableSize]uint16
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ nextHash := hash(load32(src, s), shift)
+
+ for {
+ // Copied from the C++ snappy implementation:
+ //
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
+ // found, start looking only at every other byte. If 32 more bytes are
+ // scanned (or skipped), look at every third byte, etc.. When a match
+ // is found, immediately go back to looking at every byte. This is a
+ // small loss (~5% performance, ~0.1% density) for compressible data
+ // due to more bookkeeping, but for non-compressible data (such as
+ // JPEG) it's a huge win since the compressor quickly "realizes" the
+ // data is incompressible and doesn't bother looking for matches
+ // everywhere.
+ //
+ // The "skip" variable keeps track of how many bytes there are since
+ // the last match; dividing it by 32 (ie. right-shifting by five) gives
+ // the number of bytes to move ahead for each iteration.
+ skip := 32
+
+ nextS := s
+ candidate := 0
+ for {
+ s = nextS
+ bytesBetweenHashLookups := skip >> 5
+ nextS = s + bytesBetweenHashLookups
+ skip += bytesBetweenHashLookups
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidate = int(table[nextHash&tableMask])
+ table[nextHash&tableMask] = uint16(s)
+ nextHash = hash(load32(src, nextS), shift)
+ if load32(src, s) == load32(src, candidate) {
+ break
+ }
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+
+ // Extend the 4-byte match as long as possible.
+ //
+ // This is an inlined version of:
+ // s = extendMatch(src, candidate+4, s+4)
+ s += 4
+ for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
+ }
+
+ d += emitCopy(dst[d:], base-candidate, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load64(src, s-1)
+ prevHash := hash(uint32(x>>0), shift)
+ table[prevHash&tableMask] = uint16(s - 1)
+ currHash := hash(uint32(x>>8), shift)
+ candidate = int(table[currHash&tableMask])
+ table[currHash&tableMask] = uint16(s)
+ if uint32(x>>8) != load32(src, candidate) {
+ nextHash = hash(uint32(x>>16), shift)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go
new file mode 100644
index 000000000..0cf5e379c
--- /dev/null
+++ b/vendor/github.com/golang/snappy/snappy.go
@@ -0,0 +1,87 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the snappy block-based compression format.
+// It aims for very high speeds and reasonable compression.
+//
+// The C++ snappy implementation is at https://github.com/google/snappy
+package snappy // import "github.com/golang/snappy"
+
+import (
+ "hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+ - If m < 60, the next 1 + m bytes are literal bytes.
+ - Otherwise, let n be the little-endian unsigned integer denoted by the next
+ m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+ - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+ The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+ of the offset. The next byte is bits 0-7 of the offset.
+ - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+ The length is 1 + m. The offset is the little-endian unsigned integer
+ denoted by the next 2 bytes.
+ - For l == 3, this tag is a legacy format that is no longer issued by most
+ encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+ [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+ integer denoted by the next 4 bytes.
+*/
+const (
+ tagLiteral = 0x00
+ tagCopy1 = 0x01
+ tagCopy2 = 0x02
+ tagCopy4 = 0x03
+)
+
+const (
+ checksumSize = 4
+ chunkHeaderSize = 4
+ magicChunk = "\xff\x06\x00\x00" + magicBody
+ magicBody = "sNaPpY"
+
+ // maxBlockSize is the maximum size of the input to encodeBlock. It is not
+ // part of the wire format per se, but some parts of the encoder assume
+ // that an offset fits into a uint16.
+ //
+ // Also, for the framing format (Writer type instead of Encode function),
+ // https://github.com/google/snappy/blob/master/framing_format.txt says
+ // that "the uncompressed data in a chunk must be no longer than 65536
+ // bytes".
+ maxBlockSize = 65536
+
+ // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+ // hard coded to be a const instead of a variable, so that obufLen can also
+ // be a const. Their equivalence is confirmed by
+ // TestMaxEncodedLenOfMaxBlockSize.
+ maxEncodedLenOfMaxBlockSize = 76490
+
+ obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+ obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+ chunkTypeCompressedData = 0x00
+ chunkTypeUncompressedData = 0x01
+ chunkTypePadding = 0xfe
+ chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+ c := crc32.Update(0, crcTable, b)
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/.gitignore b/vendor/github.com/hashicorp/golang-lru/.gitignore
new file mode 100644
index 000000000..836562412
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/.gitignore
@@ -0,0 +1,23 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go
new file mode 100644
index 000000000..337d96329
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/2q.go
@@ -0,0 +1,212 @@
+package lru
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/golang-lru/simplelru"
+)
+
+const (
+ // Default2QRecentRatio is the ratio of the 2Q cache dedicated
+ // to recently added entries that have only been accessed once.
+ Default2QRecentRatio = 0.25
+
+ // Default2QGhostEntries is the default ratio of ghost
+ // entries kept to track entries recently evicted
+ Default2QGhostEntries = 0.50
+)
+
+// TwoQueueCache is a thread-safe fixed size 2Q cache.
+// 2Q is an enhancement over the standard LRU cache
+// in that it tracks both frequently and recently used
+// entries separately. This avoids a burst in access to new
+// entries from evicting frequently used entries. It adds some
+// additional tracking overhead to the standard LRU cache, and is
+// computationally about 2x the cost, and adds some metadata over
+// head. The ARCCache is similar, but does not require setting any
+// parameters.
+type TwoQueueCache struct {
+ size int
+ recentSize int
+
+ recent *simplelru.LRU
+ frequent *simplelru.LRU
+ recentEvict *simplelru.LRU
+ lock sync.RWMutex
+}
+
+// New2Q creates a new TwoQueueCache using the default
+// values for the parameters.
+func New2Q(size int) (*TwoQueueCache, error) {
+ return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries)
+}
+
+// New2QParams creates a new TwoQueueCache using the provided
+// parameter values.
+func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) {
+ if size <= 0 {
+ return nil, fmt.Errorf("invalid size")
+ }
+ if recentRatio < 0.0 || recentRatio > 1.0 {
+ return nil, fmt.Errorf("invalid recent ratio")
+ }
+ if ghostRatio < 0.0 || ghostRatio > 1.0 {
+ return nil, fmt.Errorf("invalid ghost ratio")
+ }
+
+ // Determine the sub-sizes
+ recentSize := int(float64(size) * recentRatio)
+ evictSize := int(float64(size) * ghostRatio)
+
+ // Allocate the LRUs
+ recent, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ frequent, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ recentEvict, err := simplelru.NewLRU(evictSize, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize the cache
+ c := &TwoQueueCache{
+ size: size,
+ recentSize: recentSize,
+ recent: recent,
+ frequent: frequent,
+ recentEvict: recentEvict,
+ }
+ return c, nil
+}
+
+func (c *TwoQueueCache) Get(key interface{}) (interface{}, bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Check if this is a frequent value
+ if val, ok := c.frequent.Get(key); ok {
+ return val, ok
+ }
+
+ // If the value is contained in recent, then we
+ // promote it to frequent
+ if val, ok := c.recent.Peek(key); ok {
+ c.recent.Remove(key)
+ c.frequent.Add(key, val)
+ return val, ok
+ }
+
+ // No hit
+ return nil, false
+}
+
+func (c *TwoQueueCache) Add(key, value interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Check if the value is frequently used already,
+ // and just update the value
+ if c.frequent.Contains(key) {
+ c.frequent.Add(key, value)
+ return
+ }
+
+ // Check if the value is recently used, and promote
+ // the value into the frequent list
+ if c.recent.Contains(key) {
+ c.recent.Remove(key)
+ c.frequent.Add(key, value)
+ return
+ }
+
+ // If the value was recently evicted, add it to the
+ // frequently used list
+ if c.recentEvict.Contains(key) {
+ c.ensureSpace(true)
+ c.recentEvict.Remove(key)
+ c.frequent.Add(key, value)
+ return
+ }
+
+ // Add to the recently seen list
+ c.ensureSpace(false)
+ c.recent.Add(key, value)
+ return
+}
+
+// ensureSpace is used to ensure we have space in the cache
+func (c *TwoQueueCache) ensureSpace(recentEvict bool) {
+ // If we have space, nothing to do
+ recentLen := c.recent.Len()
+ freqLen := c.frequent.Len()
+ if recentLen+freqLen < c.size {
+ return
+ }
+
+ // If the recent buffer is larger than
+ // the target, evict from there
+ if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) {
+ k, _, _ := c.recent.RemoveOldest()
+ c.recentEvict.Add(k, nil)
+ return
+ }
+
+ // Remove from the frequent list otherwise
+ c.frequent.RemoveOldest()
+}
+
+func (c *TwoQueueCache) Len() int {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.recent.Len() + c.frequent.Len()
+}
+
+func (c *TwoQueueCache) Keys() []interface{} {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ k1 := c.frequent.Keys()
+ k2 := c.recent.Keys()
+ return append(k1, k2...)
+}
+
+func (c *TwoQueueCache) Remove(key interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if c.frequent.Remove(key) {
+ return
+ }
+ if c.recent.Remove(key) {
+ return
+ }
+ if c.recentEvict.Remove(key) {
+ return
+ }
+}
+
+func (c *TwoQueueCache) Purge() {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ c.recent.Purge()
+ c.frequent.Purge()
+ c.recentEvict.Purge()
+}
+
+func (c *TwoQueueCache) Contains(key interface{}) bool {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.frequent.Contains(key) || c.recent.Contains(key)
+}
+
+func (c *TwoQueueCache) Peek(key interface{}) (interface{}, bool) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ if val, ok := c.frequent.Peek(key); ok {
+ return val, ok
+ }
+ return c.recent.Peek(key)
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE
new file mode 100644
index 000000000..be2cc4dfb
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/LICENSE
@@ -0,0 +1,362 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md
new file mode 100644
index 000000000..33e58cfaf
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/README.md
@@ -0,0 +1,25 @@
+golang-lru
+==========
+
+This provides the `lru` package which implements a fixed-size
+thread safe LRU cache. It is based on the cache in Groupcache.
+
+Documentation
+=============
+
+Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru)
+
+Example
+=======
+
+Using the LRU is very simple:
+
+```go
+l, _ := New(128)
+for i := 0; i < 256; i++ {
+ l.Add(i, nil)
+}
+if l.Len() != 128 {
+ panic(fmt.Sprintf("bad len: %v", l.Len()))
+}
+```
diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go
new file mode 100644
index 000000000..a2a252817
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/arc.go
@@ -0,0 +1,257 @@
+package lru
+
+import (
+ "sync"
+
+ "github.com/hashicorp/golang-lru/simplelru"
+)
+
+// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
+// ARC is an enhancement over the standard LRU cache in that tracks both
+// frequency and recency of use. This avoids a burst in access to new
+// entries from evicting the frequently used older entries. It adds some
+// additional tracking overhead to a standard LRU cache, computationally
+// it is roughly 2x the cost, and the extra memory overhead is linear
+// with the size of the cache. ARC has been patented by IBM, but is
+// similar to the TwoQueueCache (2Q) which requires setting parameters.
+type ARCCache struct {
+ size int // Size is the total capacity of the cache
+ p int // P is the dynamic preference towards T1 or T2
+
+ t1 *simplelru.LRU // T1 is the LRU for recently accessed items
+ b1 *simplelru.LRU // B1 is the LRU for evictions from t1
+
+ t2 *simplelru.LRU // T2 is the LRU for frequently accessed items
+ b2 *simplelru.LRU // B2 is the LRU for evictions from t2
+
+ lock sync.RWMutex
+}
+
+// NewARC creates an ARC of the given size
+func NewARC(size int) (*ARCCache, error) {
+ // Create the sub LRUs
+ b1, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ t1, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ t2, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize the ARC
+ c := &ARCCache{
+ size: size,
+ p: 0,
+ t1: t1,
+ b1: b1,
+ t2: t2,
+ b2: b2,
+ }
+ return c, nil
+}
+
+// Get looks up a key's value from the cache.
+func (c *ARCCache) Get(key interface{}) (interface{}, bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Ff the value is contained in T1 (recent), then
+ // promote it to T2 (frequent)
+ if val, ok := c.t1.Peek(key); ok {
+ c.t1.Remove(key)
+ c.t2.Add(key, val)
+ return val, ok
+ }
+
+ // Check if the value is contained in T2 (frequent)
+ if val, ok := c.t2.Get(key); ok {
+ return val, ok
+ }
+
+ // No hit
+ return nil, false
+}
+
+// Add adds a value to the cache.
+func (c *ARCCache) Add(key, value interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Check if the value is contained in T1 (recent), and potentially
+ // promote it to frequent T2
+ if c.t1.Contains(key) {
+ c.t1.Remove(key)
+ c.t2.Add(key, value)
+ return
+ }
+
+ // Check if the value is already in T2 (frequent) and update it
+ if c.t2.Contains(key) {
+ c.t2.Add(key, value)
+ return
+ }
+
+ // Check if this value was recently evicted as part of the
+ // recently used list
+ if c.b1.Contains(key) {
+ // T1 set is too small, increase P appropriately
+ delta := 1
+ b1Len := c.b1.Len()
+ b2Len := c.b2.Len()
+ if b2Len > b1Len {
+ delta = b2Len / b1Len
+ }
+ if c.p+delta >= c.size {
+ c.p = c.size
+ } else {
+ c.p += delta
+ }
+
+ // Potentially need to make room in the cache
+ if c.t1.Len()+c.t2.Len() >= c.size {
+ c.replace(false)
+ }
+
+ // Remove from B1
+ c.b1.Remove(key)
+
+ // Add the key to the frequently used list
+ c.t2.Add(key, value)
+ return
+ }
+
+ // Check if this value was recently evicted as part of the
+ // frequently used list
+ if c.b2.Contains(key) {
+ // T2 set is too small, decrease P appropriately
+ delta := 1
+ b1Len := c.b1.Len()
+ b2Len := c.b2.Len()
+ if b1Len > b2Len {
+ delta = b1Len / b2Len
+ }
+ if delta >= c.p {
+ c.p = 0
+ } else {
+ c.p -= delta
+ }
+
+ // Potentially need to make room in the cache
+ if c.t1.Len()+c.t2.Len() >= c.size {
+ c.replace(true)
+ }
+
+ // Remove from B2
+ c.b2.Remove(key)
+
+ // Add the key to the frequntly used list
+ c.t2.Add(key, value)
+ return
+ }
+
+ // Potentially need to make room in the cache
+ if c.t1.Len()+c.t2.Len() >= c.size {
+ c.replace(false)
+ }
+
+ // Keep the size of the ghost buffers trim
+ if c.b1.Len() > c.size-c.p {
+ c.b1.RemoveOldest()
+ }
+ if c.b2.Len() > c.p {
+ c.b2.RemoveOldest()
+ }
+
+ // Add to the recently seen list
+ c.t1.Add(key, value)
+ return
+}
+
+// replace is used to adaptively evict from either T1 or T2
+// based on the current learned value of P
+func (c *ARCCache) replace(b2ContainsKey bool) {
+ t1Len := c.t1.Len()
+ if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
+ k, _, ok := c.t1.RemoveOldest()
+ if ok {
+ c.b1.Add(k, nil)
+ }
+ } else {
+ k, _, ok := c.t2.RemoveOldest()
+ if ok {
+ c.b2.Add(k, nil)
+ }
+ }
+}
+
+// Len returns the number of cached entries
+func (c *ARCCache) Len() int {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.t1.Len() + c.t2.Len()
+}
+
+// Keys returns all the cached keys
+func (c *ARCCache) Keys() []interface{} {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ k1 := c.t1.Keys()
+ k2 := c.t2.Keys()
+ return append(k1, k2...)
+}
+
+// Remove is used to purge a key from the cache
+func (c *ARCCache) Remove(key interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if c.t1.Remove(key) {
+ return
+ }
+ if c.t2.Remove(key) {
+ return
+ }
+ if c.b1.Remove(key) {
+ return
+ }
+ if c.b2.Remove(key) {
+ return
+ }
+}
+
+// Purge is used to clear the cache
+func (c *ARCCache) Purge() {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ c.t1.Purge()
+ c.t2.Purge()
+ c.b1.Purge()
+ c.b2.Purge()
+}
+
+// Contains is used to check if the cache contains a key
+// without updating recency or frequency.
+func (c *ARCCache) Contains(key interface{}) bool {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.t1.Contains(key) || c.t2.Contains(key)
+}
+
+// Peek is used to inspect the cache value of a key
+// without updating recency or frequency.
+func (c *ARCCache) Peek(key interface{}) (interface{}, bool) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ if val, ok := c.t1.Peek(key); ok {
+ return val, ok
+ }
+ return c.t2.Peek(key)
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go
new file mode 100644
index 000000000..a6285f989
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/lru.go
@@ -0,0 +1,114 @@
+// This package provides a simple LRU cache. It is based on the
+// LRU implementation in groupcache:
+// https://github.com/golang/groupcache/tree/master/lru
+package lru
+
+import (
+ "sync"
+
+ "github.com/hashicorp/golang-lru/simplelru"
+)
+
+// Cache is a thread-safe fixed size LRU cache.
+type Cache struct {
+ lru *simplelru.LRU
+ lock sync.RWMutex
+}
+
+// New creates an LRU of the given size
+func New(size int) (*Cache, error) {
+ return NewWithEvict(size, nil)
+}
+
+// NewWithEvict constructs a fixed size cache with the given eviction
+// callback.
+func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) {
+ lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted))
+ if err != nil {
+ return nil, err
+ }
+ c := &Cache{
+ lru: lru,
+ }
+ return c, nil
+}
+
+// Purge is used to completely clear the cache
+func (c *Cache) Purge() {
+ c.lock.Lock()
+ c.lru.Purge()
+ c.lock.Unlock()
+}
+
+// Add adds a value to the cache. Returns true if an eviction occurred.
+func (c *Cache) Add(key, value interface{}) bool {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ return c.lru.Add(key, value)
+}
+
+// Get looks up a key's value from the cache.
+func (c *Cache) Get(key interface{}) (interface{}, bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ return c.lru.Get(key)
+}
+
+// Check if a key is in the cache, without updating the recent-ness
+// or deleting it for being stale.
+func (c *Cache) Contains(key interface{}) bool {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.lru.Contains(key)
+}
+
+// Returns the key value (or undefined if not found) without updating
+// the "recently used"-ness of the key.
+func (c *Cache) Peek(key interface{}) (interface{}, bool) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.lru.Peek(key)
+}
+
+// ContainsOrAdd checks if a key is in the cache without updating the
+// recent-ness or deleting it for being stale, and if not, adds the value.
+// Returns whether found and whether an eviction occurred.
+func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evict bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if c.lru.Contains(key) {
+ return true, false
+ } else {
+ evict := c.lru.Add(key, value)
+ return false, evict
+ }
+}
+
+// Remove removes the provided key from the cache.
+func (c *Cache) Remove(key interface{}) {
+ c.lock.Lock()
+ c.lru.Remove(key)
+ c.lock.Unlock()
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *Cache) RemoveOldest() {
+ c.lock.Lock()
+ c.lru.RemoveOldest()
+ c.lock.Unlock()
+}
+
+// Keys returns a slice of the keys in the cache, from oldest to newest.
+func (c *Cache) Keys() []interface{} {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.lru.Keys()
+}
+
+// Len returns the number of items in the cache.
+func (c *Cache) Len() int {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.lru.Len()
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
new file mode 100644
index 000000000..cb416b394
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
@@ -0,0 +1,160 @@
+package simplelru
+
+import (
+ "container/list"
+ "errors"
+)
+
+// EvictCallback is used to get a callback when a cache entry is evicted
+type EvictCallback func(key interface{}, value interface{})
+
+// LRU implements a non-thread safe fixed size LRU cache
+type LRU struct {
+ size int
+ evictList *list.List
+ items map[interface{}]*list.Element
+ onEvict EvictCallback
+}
+
+// entry is used to hold a value in the evictList
+type entry struct {
+ key interface{}
+ value interface{}
+}
+
+// NewLRU constructs an LRU of the given size
+func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
+ if size <= 0 {
+ return nil, errors.New("Must provide a positive size")
+ }
+ c := &LRU{
+ size: size,
+ evictList: list.New(),
+ items: make(map[interface{}]*list.Element),
+ onEvict: onEvict,
+ }
+ return c, nil
+}
+
+// Purge is used to completely clear the cache
+func (c *LRU) Purge() {
+ for k, v := range c.items {
+ if c.onEvict != nil {
+ c.onEvict(k, v.Value.(*entry).value)
+ }
+ delete(c.items, k)
+ }
+ c.evictList.Init()
+}
+
+// Add adds a value to the cache. Returns true if an eviction occurred.
+func (c *LRU) Add(key, value interface{}) bool {
+ // Check for existing item
+ if ent, ok := c.items[key]; ok {
+ c.evictList.MoveToFront(ent)
+ ent.Value.(*entry).value = value
+ return false
+ }
+
+ // Add new item
+ ent := &entry{key, value}
+ entry := c.evictList.PushFront(ent)
+ c.items[key] = entry
+
+ evict := c.evictList.Len() > c.size
+ // Verify size not exceeded
+ if evict {
+ c.removeOldest()
+ }
+ return evict
+}
+
+// Get looks up a key's value from the cache.
+func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
+ if ent, ok := c.items[key]; ok {
+ c.evictList.MoveToFront(ent)
+ return ent.Value.(*entry).value, true
+ }
+ return
+}
+
+// Check if a key is in the cache, without updating the recent-ness
+// or deleting it for being stale.
+func (c *LRU) Contains(key interface{}) (ok bool) {
+ _, ok = c.items[key]
+ return ok
+}
+
+// Returns the key value (or undefined if not found) without updating
+// the "recently used"-ness of the key.
+func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
+ if ent, ok := c.items[key]; ok {
+ return ent.Value.(*entry).value, true
+ }
+ return nil, ok
+}
+
+// Remove removes the provided key from the cache, returning if the
+// key was contained.
+func (c *LRU) Remove(key interface{}) bool {
+ if ent, ok := c.items[key]; ok {
+ c.removeElement(ent)
+ return true
+ }
+ return false
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *LRU) RemoveOldest() (interface{}, interface{}, bool) {
+ ent := c.evictList.Back()
+ if ent != nil {
+ c.removeElement(ent)
+ kv := ent.Value.(*entry)
+ return kv.key, kv.value, true
+ }
+ return nil, nil, false
+}
+
+// GetOldest returns the oldest entry
+func (c *LRU) GetOldest() (interface{}, interface{}, bool) {
+ ent := c.evictList.Back()
+ if ent != nil {
+ kv := ent.Value.(*entry)
+ return kv.key, kv.value, true
+ }
+ return nil, nil, false
+}
+
+// Keys returns a slice of the keys in the cache, from oldest to newest.
+func (c *LRU) Keys() []interface{} {
+ keys := make([]interface{}, len(c.items))
+ i := 0
+ for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
+ keys[i] = ent.Value.(*entry).key
+ i++
+ }
+ return keys
+}
+
+// Len returns the number of items in the cache.
+func (c *LRU) Len() int {
+ return c.evictList.Len()
+}
+
+// removeOldest removes the oldest item from the cache.
+func (c *LRU) removeOldest() {
+ ent := c.evictList.Back()
+ if ent != nil {
+ c.removeElement(ent)
+ }
+}
+
+// removeElement is used to remove a given list element from the cache
+func (c *LRU) removeElement(e *list.Element) {
+ c.evictList.Remove(e)
+ kv := e.Value.(*entry)
+ delete(c.items, kv.key)
+ if c.onEvict != nil {
+ c.onEvict(kv.key, kv.value)
+ }
+}
diff --git a/vendor/github.com/huin/goupnp/.gitignore b/vendor/github.com/huin/goupnp/.gitignore
new file mode 100644
index 000000000..09ef375e8
--- /dev/null
+++ b/vendor/github.com/huin/goupnp/.gitignore
@@ -0,0 +1 @@
+/gotasks/specs
diff --git a/vendor/github.com/huin/goupnp/LICENSE b/vendor/github.com/huin/goupnp/LICENSE
new file mode 100644
index 000000000..252e3d639
--- /dev/null
+++ b/vendor/github.com/huin/goupnp/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2013, John Beisley <greatred@gmail.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/huin/goupnp/README.md b/vendor/github.com/huin/goupnp/README.md
new file mode 100644
index 000000000..a228ccea6
--- /dev/null
+++ b/vendor/github.com/huin/goupnp/README.md
@@ -0,0 +1,44 @@
+goupnp is a UPnP client library for Go
+
+Installation
+------------
+
+Run `go get -u github.com/huin/goupnp`.
+
+Documentation
+-------------
+
+All doc links below are for ![GoDoc](https://godoc.org/github.com/huin/goupnp?status.svg).
+
+Supported DCPs (you probably want to start with one of these):
+* [av1](https://godoc.org/github.com/huin/goupnp/dcps/av1) - Client for UPnP Device Control Protocol MediaServer v1 and MediaRenderer v1.
+* [internetgateway1](https://godoc.org/github.com/huin/goupnp/dcps/internetgateway1) - Client for UPnP Device Control Protocol Internet Gateway Device v1.
+* [internetgateway2](https://godoc.org/github.com/huin/goupnp/dcps/internetgateway2) - Client for UPnP Device Control Protocol Internet Gateway Device v2.
+
+Core components:
+* [(goupnp)](https://godoc.org/github.com/huin/goupnp) core library - contains datastructures and utilities typically used by the implemented DCPs.
+* [httpu](https://godoc.org/github.com/huin/goupnp/httpu) HTTPU implementation, underlies SSDP.
+* [ssdp](https://godoc.org/github.com/huin/goupnp/ssdp) SSDP client implementation (simple service discovery protocol) - used to discover UPnP services on a network.
+* [soap](https://godoc.org/github.com/huin/goupnp/soap) SOAP client implementation (simple object access protocol) - used to communicate with discovered services.
+
+
+Regenerating dcps generated source code:
+----------------------------------------
+
+1. Install gotasks: `go get -u github.com/jingweno/gotask`
+2. Change to the gotasks directory: `cd gotasks`
+3. Run specgen task: `gotask specgen`
+
+Supporting additional UPnP devices and services:
+------------------------------------------------
+
+Supporting additional services is, in the trivial case, simply a matter of
+adding the service to the `dcpMetadata` whitelist in `gotasks/specgen_task.go`,
+regenerating the source code (see above), and committing that source code.
+
+However, it would be helpful if anyone needing such a service could test the
+service against the service they have, and then reporting any trouble
+encountered as an [issue on this
+project](https://github.com/huin/goupnp/issues/new). If it just works, then
+please report at least minimal working functionality as an issue, and
+optionally contribute the metadata upstream.
diff --git a/vendor/github.com/huin/goupnp/dcps/internetgateway1/internetgateway1.go b/vendor/github.com/huin/goupnp/dcps/internetgateway1/internetgateway1.go
new file mode 100644
index 000000000..1e0802cd4
--- /dev/null
+++ b/vendor/github.com/huin/goupnp/dcps/internetgateway1/internetgateway1.go
@@ -0,0 +1,3717 @@
+// Client for UPnP Device Control Protocol Internet Gateway Device v1.
+//
+// This DCP is documented in detail at: http://upnp.org/specs/gw/UPnP-gw-InternetGatewayDevice-v1-Device.pdf
+//
+// Typically, use one of the New* functions to create clients for services.
+package internetgateway1
+
+// Generated file - do not edit by hand. See README.md
+
+import (
+ "net/url"
+ "time"
+
+ "github.com/huin/goupnp"
+ "github.com/huin/goupnp/soap"
+)
+
+// Hack to avoid Go complaining if time isn't used.
+var _ time.Time
+
+// Device URNs:
+const (
+ URN_LANDevice_1 = "urn:schemas-upnp-org:device:LANDevice:1"
+ URN_WANConnectionDevice_1 = "urn:schemas-upnp-org:device:WANConnectionDevice:1"
+ URN_WANDevice_1 = "urn:schemas-upnp-org:device:WANDevice:1"
+)
+
+// Service URNs:
+const (
+ URN_LANHostConfigManagement_1 = "urn:schemas-upnp-org:service:LANHostConfigManagement:1"
+ URN_Layer3Forwarding_1 = "urn:schemas-upnp-org:service:Layer3Forwarding:1"
+ URN_WANCableLinkConfig_1 = "urn:schemas-upnp-org:service:WANCableLinkConfig:1"
+ URN_WANCommonInterfaceConfig_1 = "urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1"
+ URN_WANDSLLinkConfig_1 = "urn:schemas-upnp-org:service:WANDSLLinkConfig:1"
+ URN_WANEthernetLinkConfig_1 = "urn:schemas-upnp-org:service:WANEthernetLinkConfig:1"
+ URN_WANIPConnection_1 = "urn:schemas-upnp-org:service:WANIPConnection:1"
+ URN_WANPOTSLinkConfig_1 = "urn:schemas-upnp-org:service:WANPOTSLinkConfig:1"
+ URN_WANPPPConnection_1 = "urn:schemas-upnp-org:service:WANPPPConnection:1"
+)
+
+// LANHostConfigManagement1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:LANHostConfigManagement:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type LANHostConfigManagement1 struct {
+ goupnp.ServiceClient
+}
+
+// NewLANHostConfigManagement1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewLANHostConfigManagement1Clients() (clients []*LANHostConfigManagement1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_LANHostConfigManagement_1); err != nil {
+ return
+ }
+ clients = newLANHostConfigManagement1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewLANHostConfigManagement1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewLANHostConfigManagement1ClientsByURL(loc *url.URL) ([]*LANHostConfigManagement1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_LANHostConfigManagement_1)
+ if err != nil {
+ return nil, err
+ }
+ return newLANHostConfigManagement1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewLANHostConfigManagement1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewLANHostConfigManagement1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*LANHostConfigManagement1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_LANHostConfigManagement_1)
+ if err != nil {
+ return nil, err
+ }
+ return newLANHostConfigManagement1ClientsFromGenericClients(genericClients), nil
+}
+
+func newLANHostConfigManagement1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*LANHostConfigManagement1 {
+ clients := make([]*LANHostConfigManagement1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &LANHostConfigManagement1{genericClients[i]}
+ }
+ return clients
+}
+
+func (client *LANHostConfigManagement1) SetDHCPServerConfigurable(NewDHCPServerConfigurable bool) (err error) {
+ // Request structure.
+ request := &struct {
+ NewDHCPServerConfigurable string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewDHCPServerConfigurable, err = soap.MarshalBoolean(NewDHCPServerConfigurable); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "SetDHCPServerConfigurable", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) GetDHCPServerConfigurable() (NewDHCPServerConfigurable bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDHCPServerConfigurable string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "GetDHCPServerConfigurable", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDHCPServerConfigurable, err = soap.UnmarshalBoolean(response.NewDHCPServerConfigurable); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) SetDHCPRelay(NewDHCPRelay bool) (err error) {
+ // Request structure.
+ request := &struct {
+ NewDHCPRelay string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewDHCPRelay, err = soap.MarshalBoolean(NewDHCPRelay); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "SetDHCPRelay", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) GetDHCPRelay() (NewDHCPRelay bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDHCPRelay string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "GetDHCPRelay", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDHCPRelay, err = soap.UnmarshalBoolean(response.NewDHCPRelay); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) SetSubnetMask(NewSubnetMask string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewSubnetMask string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewSubnetMask, err = soap.MarshalString(NewSubnetMask); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "SetSubnetMask", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) GetSubnetMask() (NewSubnetMask string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewSubnetMask string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "GetSubnetMask", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewSubnetMask, err = soap.UnmarshalString(response.NewSubnetMask); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) SetIPRouter(NewIPRouters string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewIPRouters string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewIPRouters, err = soap.MarshalString(NewIPRouters); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "SetIPRouter", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) DeleteIPRouter(NewIPRouters string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewIPRouters string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewIPRouters, err = soap.MarshalString(NewIPRouters); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "DeleteIPRouter", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) GetIPRoutersList() (NewIPRouters string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewIPRouters string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "GetIPRoutersList", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewIPRouters, err = soap.UnmarshalString(response.NewIPRouters); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) SetDomainName(NewDomainName string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewDomainName string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewDomainName, err = soap.MarshalString(NewDomainName); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "SetDomainName", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) GetDomainName() (NewDomainName string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDomainName string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "GetDomainName", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDomainName, err = soap.UnmarshalString(response.NewDomainName); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) SetAddressRange(NewMinAddress string, NewMaxAddress string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewMinAddress string
+
+ NewMaxAddress string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewMinAddress, err = soap.MarshalString(NewMinAddress); err != nil {
+ return
+ }
+ if request.NewMaxAddress, err = soap.MarshalString(NewMaxAddress); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "SetAddressRange", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) GetAddressRange() (NewMinAddress string, NewMaxAddress string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewMinAddress string
+
+ NewMaxAddress string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "GetAddressRange", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewMinAddress, err = soap.UnmarshalString(response.NewMinAddress); err != nil {
+ return
+ }
+ if NewMaxAddress, err = soap.UnmarshalString(response.NewMaxAddress); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) SetReservedAddress(NewReservedAddresses string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewReservedAddresses string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewReservedAddresses, err = soap.MarshalString(NewReservedAddresses); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "SetReservedAddress", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) DeleteReservedAddress(NewReservedAddresses string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewReservedAddresses string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewReservedAddresses, err = soap.MarshalString(NewReservedAddresses); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "DeleteReservedAddress", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) GetReservedAddresses() (NewReservedAddresses string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewReservedAddresses string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "GetReservedAddresses", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewReservedAddresses, err = soap.UnmarshalString(response.NewReservedAddresses); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) SetDNSServer(NewDNSServers string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewDNSServers string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewDNSServers, err = soap.MarshalString(NewDNSServers); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "SetDNSServer", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) DeleteDNSServer(NewDNSServers string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewDNSServers string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewDNSServers, err = soap.MarshalString(NewDNSServers); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "DeleteDNSServer", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) GetDNSServers() (NewDNSServers string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDNSServers string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "GetDNSServers", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDNSServers, err = soap.UnmarshalString(response.NewDNSServers); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+// Layer3Forwarding1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:Layer3Forwarding:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type Layer3Forwarding1 struct {
+ goupnp.ServiceClient
+}
+
+// NewLayer3Forwarding1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewLayer3Forwarding1Clients() (clients []*Layer3Forwarding1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_Layer3Forwarding_1); err != nil {
+ return
+ }
+ clients = newLayer3Forwarding1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewLayer3Forwarding1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewLayer3Forwarding1ClientsByURL(loc *url.URL) ([]*Layer3Forwarding1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_Layer3Forwarding_1)
+ if err != nil {
+ return nil, err
+ }
+ return newLayer3Forwarding1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewLayer3Forwarding1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewLayer3Forwarding1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*Layer3Forwarding1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_Layer3Forwarding_1)
+ if err != nil {
+ return nil, err
+ }
+ return newLayer3Forwarding1ClientsFromGenericClients(genericClients), nil
+}
+
+func newLayer3Forwarding1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*Layer3Forwarding1 {
+ clients := make([]*Layer3Forwarding1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &Layer3Forwarding1{genericClients[i]}
+ }
+ return clients
+}
+
+func (client *Layer3Forwarding1) SetDefaultConnectionService(NewDefaultConnectionService string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewDefaultConnectionService string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewDefaultConnectionService, err = soap.MarshalString(NewDefaultConnectionService); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_Layer3Forwarding_1, "SetDefaultConnectionService", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *Layer3Forwarding1) GetDefaultConnectionService() (NewDefaultConnectionService string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDefaultConnectionService string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_Layer3Forwarding_1, "GetDefaultConnectionService", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDefaultConnectionService, err = soap.UnmarshalString(response.NewDefaultConnectionService); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+// WANCableLinkConfig1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:WANCableLinkConfig:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type WANCableLinkConfig1 struct {
+ goupnp.ServiceClient
+}
+
+// NewWANCableLinkConfig1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewWANCableLinkConfig1Clients() (clients []*WANCableLinkConfig1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_WANCableLinkConfig_1); err != nil {
+ return
+ }
+ clients = newWANCableLinkConfig1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewWANCableLinkConfig1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewWANCableLinkConfig1ClientsByURL(loc *url.URL) ([]*WANCableLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANCableLinkConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANCableLinkConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewWANCableLinkConfig1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewWANCableLinkConfig1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*WANCableLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_WANCableLinkConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANCableLinkConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+func newWANCableLinkConfig1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*WANCableLinkConfig1 {
+ clients := make([]*WANCableLinkConfig1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &WANCableLinkConfig1{genericClients[i]}
+ }
+ return clients
+}
+
+//
+// Return values:
+//
+// * NewCableLinkConfigState: allowed values: notReady, dsSyncComplete, usParamAcquired, rangingComplete, ipComplete, todEstablished, paramTransferComplete, registrationComplete, operational, accessDenied
+//
+// * NewLinkType: allowed values: Ethernet
+func (client *WANCableLinkConfig1) GetCableLinkConfigInfo() (NewCableLinkConfigState string, NewLinkType string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewCableLinkConfigState string
+
+ NewLinkType string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetCableLinkConfigInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewCableLinkConfigState, err = soap.UnmarshalString(response.NewCableLinkConfigState); err != nil {
+ return
+ }
+ if NewLinkType, err = soap.UnmarshalString(response.NewLinkType); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCableLinkConfig1) GetDownstreamFrequency() (NewDownstreamFrequency uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDownstreamFrequency string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetDownstreamFrequency", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDownstreamFrequency, err = soap.UnmarshalUi4(response.NewDownstreamFrequency); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewDownstreamModulation: allowed values: 64QAM, 256QAM
+func (client *WANCableLinkConfig1) GetDownstreamModulation() (NewDownstreamModulation string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDownstreamModulation string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetDownstreamModulation", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDownstreamModulation, err = soap.UnmarshalString(response.NewDownstreamModulation); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCableLinkConfig1) GetUpstreamFrequency() (NewUpstreamFrequency uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewUpstreamFrequency string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetUpstreamFrequency", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewUpstreamFrequency, err = soap.UnmarshalUi4(response.NewUpstreamFrequency); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewUpstreamModulation: allowed values: QPSK, 16QAM
+func (client *WANCableLinkConfig1) GetUpstreamModulation() (NewUpstreamModulation string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewUpstreamModulation string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetUpstreamModulation", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewUpstreamModulation, err = soap.UnmarshalString(response.NewUpstreamModulation); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCableLinkConfig1) GetUpstreamChannelID() (NewUpstreamChannelID uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewUpstreamChannelID string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetUpstreamChannelID", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewUpstreamChannelID, err = soap.UnmarshalUi4(response.NewUpstreamChannelID); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCableLinkConfig1) GetUpstreamPowerLevel() (NewUpstreamPowerLevel uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewUpstreamPowerLevel string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetUpstreamPowerLevel", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewUpstreamPowerLevel, err = soap.UnmarshalUi4(response.NewUpstreamPowerLevel); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCableLinkConfig1) GetBPIEncryptionEnabled() (NewBPIEncryptionEnabled bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewBPIEncryptionEnabled string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetBPIEncryptionEnabled", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewBPIEncryptionEnabled, err = soap.UnmarshalBoolean(response.NewBPIEncryptionEnabled); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCableLinkConfig1) GetConfigFile() (NewConfigFile string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewConfigFile string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetConfigFile", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewConfigFile, err = soap.UnmarshalString(response.NewConfigFile); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCableLinkConfig1) GetTFTPServer() (NewTFTPServer string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewTFTPServer string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetTFTPServer", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewTFTPServer, err = soap.UnmarshalString(response.NewTFTPServer); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+// WANCommonInterfaceConfig1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type WANCommonInterfaceConfig1 struct {
+ goupnp.ServiceClient
+}
+
+// NewWANCommonInterfaceConfig1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewWANCommonInterfaceConfig1Clients() (clients []*WANCommonInterfaceConfig1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_WANCommonInterfaceConfig_1); err != nil {
+ return
+ }
+ clients = newWANCommonInterfaceConfig1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewWANCommonInterfaceConfig1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewWANCommonInterfaceConfig1ClientsByURL(loc *url.URL) ([]*WANCommonInterfaceConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANCommonInterfaceConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANCommonInterfaceConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewWANCommonInterfaceConfig1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewWANCommonInterfaceConfig1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*WANCommonInterfaceConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_WANCommonInterfaceConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANCommonInterfaceConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+func newWANCommonInterfaceConfig1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*WANCommonInterfaceConfig1 {
+ clients := make([]*WANCommonInterfaceConfig1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &WANCommonInterfaceConfig1{genericClients[i]}
+ }
+ return clients
+}
+
+func (client *WANCommonInterfaceConfig1) SetEnabledForInternet(NewEnabledForInternet bool) (err error) {
+ // Request structure.
+ request := &struct {
+ NewEnabledForInternet string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewEnabledForInternet, err = soap.MarshalBoolean(NewEnabledForInternet); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "SetEnabledForInternet", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCommonInterfaceConfig1) GetEnabledForInternet() (NewEnabledForInternet bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewEnabledForInternet string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "GetEnabledForInternet", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewEnabledForInternet, err = soap.UnmarshalBoolean(response.NewEnabledForInternet); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewWANAccessType: allowed values: DSL, POTS, Cable, Ethernet
+//
+// * NewPhysicalLinkStatus: allowed values: Up, Down
+func (client *WANCommonInterfaceConfig1) GetCommonLinkProperties() (NewWANAccessType string, NewLayer1UpstreamMaxBitRate uint32, NewLayer1DownstreamMaxBitRate uint32, NewPhysicalLinkStatus string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewWANAccessType string
+
+ NewLayer1UpstreamMaxBitRate string
+
+ NewLayer1DownstreamMaxBitRate string
+
+ NewPhysicalLinkStatus string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "GetCommonLinkProperties", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewWANAccessType, err = soap.UnmarshalString(response.NewWANAccessType); err != nil {
+ return
+ }
+ if NewLayer1UpstreamMaxBitRate, err = soap.UnmarshalUi4(response.NewLayer1UpstreamMaxBitRate); err != nil {
+ return
+ }
+ if NewLayer1DownstreamMaxBitRate, err = soap.UnmarshalUi4(response.NewLayer1DownstreamMaxBitRate); err != nil {
+ return
+ }
+ if NewPhysicalLinkStatus, err = soap.UnmarshalString(response.NewPhysicalLinkStatus); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCommonInterfaceConfig1) GetWANAccessProvider() (NewWANAccessProvider string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewWANAccessProvider string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "GetWANAccessProvider", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewWANAccessProvider, err = soap.UnmarshalString(response.NewWANAccessProvider); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewMaximumActiveConnections: allowed value range: minimum=1, step=1
+func (client *WANCommonInterfaceConfig1) GetMaximumActiveConnections() (NewMaximumActiveConnections uint16, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewMaximumActiveConnections string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "GetMaximumActiveConnections", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewMaximumActiveConnections, err = soap.UnmarshalUi2(response.NewMaximumActiveConnections); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCommonInterfaceConfig1) GetTotalBytesSent() (NewTotalBytesSent uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewTotalBytesSent string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "GetTotalBytesSent", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewTotalBytesSent, err = soap.UnmarshalUi4(response.NewTotalBytesSent); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCommonInterfaceConfig1) GetTotalBytesReceived() (NewTotalBytesReceived uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewTotalBytesReceived string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "GetTotalBytesReceived", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewTotalBytesReceived, err = soap.UnmarshalUi4(response.NewTotalBytesReceived); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCommonInterfaceConfig1) GetTotalPacketsSent() (NewTotalPacketsSent uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewTotalPacketsSent string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "GetTotalPacketsSent", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewTotalPacketsSent, err = soap.UnmarshalUi4(response.NewTotalPacketsSent); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCommonInterfaceConfig1) GetTotalPacketsReceived() (NewTotalPacketsReceived uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewTotalPacketsReceived string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "GetTotalPacketsReceived", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewTotalPacketsReceived, err = soap.UnmarshalUi4(response.NewTotalPacketsReceived); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCommonInterfaceConfig1) GetActiveConnection(NewActiveConnectionIndex uint16) (NewActiveConnDeviceContainer string, NewActiveConnectionServiceID string, err error) {
+ // Request structure.
+ request := &struct {
+ NewActiveConnectionIndex string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewActiveConnectionIndex, err = soap.MarshalUi2(NewActiveConnectionIndex); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewActiveConnDeviceContainer string
+
+ NewActiveConnectionServiceID string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "GetActiveConnection", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewActiveConnDeviceContainer, err = soap.UnmarshalString(response.NewActiveConnDeviceContainer); err != nil {
+ return
+ }
+ if NewActiveConnectionServiceID, err = soap.UnmarshalString(response.NewActiveConnectionServiceID); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+// WANDSLLinkConfig1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:WANDSLLinkConfig:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type WANDSLLinkConfig1 struct {
+ goupnp.ServiceClient
+}
+
+// NewWANDSLLinkConfig1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewWANDSLLinkConfig1Clients() (clients []*WANDSLLinkConfig1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_WANDSLLinkConfig_1); err != nil {
+ return
+ }
+ clients = newWANDSLLinkConfig1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewWANDSLLinkConfig1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewWANDSLLinkConfig1ClientsByURL(loc *url.URL) ([]*WANDSLLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANDSLLinkConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANDSLLinkConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewWANDSLLinkConfig1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewWANDSLLinkConfig1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*WANDSLLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_WANDSLLinkConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANDSLLinkConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+func newWANDSLLinkConfig1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*WANDSLLinkConfig1 {
+ clients := make([]*WANDSLLinkConfig1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &WANDSLLinkConfig1{genericClients[i]}
+ }
+ return clients
+}
+
+func (client *WANDSLLinkConfig1) SetDSLLinkType(NewLinkType string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewLinkType string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewLinkType, err = soap.MarshalString(NewLinkType); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "SetDSLLinkType", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewLinkStatus: allowed values: Up, Down
+func (client *WANDSLLinkConfig1) GetDSLLinkInfo() (NewLinkType string, NewLinkStatus string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewLinkType string
+
+ NewLinkStatus string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "GetDSLLinkInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewLinkType, err = soap.UnmarshalString(response.NewLinkType); err != nil {
+ return
+ }
+ if NewLinkStatus, err = soap.UnmarshalString(response.NewLinkStatus); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANDSLLinkConfig1) GetAutoConfig() (NewAutoConfig bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewAutoConfig string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "GetAutoConfig", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewAutoConfig, err = soap.UnmarshalBoolean(response.NewAutoConfig); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANDSLLinkConfig1) GetModulationType() (NewModulationType string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewModulationType string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "GetModulationType", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewModulationType, err = soap.UnmarshalString(response.NewModulationType); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANDSLLinkConfig1) SetDestinationAddress(NewDestinationAddress string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewDestinationAddress string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewDestinationAddress, err = soap.MarshalString(NewDestinationAddress); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "SetDestinationAddress", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANDSLLinkConfig1) GetDestinationAddress() (NewDestinationAddress string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDestinationAddress string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "GetDestinationAddress", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDestinationAddress, err = soap.UnmarshalString(response.NewDestinationAddress); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANDSLLinkConfig1) SetATMEncapsulation(NewATMEncapsulation string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewATMEncapsulation string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewATMEncapsulation, err = soap.MarshalString(NewATMEncapsulation); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "SetATMEncapsulation", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANDSLLinkConfig1) GetATMEncapsulation() (NewATMEncapsulation string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewATMEncapsulation string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "GetATMEncapsulation", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewATMEncapsulation, err = soap.UnmarshalString(response.NewATMEncapsulation); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANDSLLinkConfig1) SetFCSPreserved(NewFCSPreserved bool) (err error) {
+ // Request structure.
+ request := &struct {
+ NewFCSPreserved string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewFCSPreserved, err = soap.MarshalBoolean(NewFCSPreserved); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "SetFCSPreserved", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANDSLLinkConfig1) GetFCSPreserved() (NewFCSPreserved bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewFCSPreserved string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "GetFCSPreserved", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewFCSPreserved, err = soap.UnmarshalBoolean(response.NewFCSPreserved); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+// WANEthernetLinkConfig1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:WANEthernetLinkConfig:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type WANEthernetLinkConfig1 struct {
+ goupnp.ServiceClient
+}
+
+// NewWANEthernetLinkConfig1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewWANEthernetLinkConfig1Clients() (clients []*WANEthernetLinkConfig1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_WANEthernetLinkConfig_1); err != nil {
+ return
+ }
+ clients = newWANEthernetLinkConfig1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewWANEthernetLinkConfig1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewWANEthernetLinkConfig1ClientsByURL(loc *url.URL) ([]*WANEthernetLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANEthernetLinkConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANEthernetLinkConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewWANEthernetLinkConfig1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewWANEthernetLinkConfig1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*WANEthernetLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_WANEthernetLinkConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANEthernetLinkConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+func newWANEthernetLinkConfig1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*WANEthernetLinkConfig1 {
+ clients := make([]*WANEthernetLinkConfig1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &WANEthernetLinkConfig1{genericClients[i]}
+ }
+ return clients
+}
+
+//
+// Return values:
+//
+// * NewEthernetLinkStatus: allowed values: Up, Down
+func (client *WANEthernetLinkConfig1) GetEthernetLinkStatus() (NewEthernetLinkStatus string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewEthernetLinkStatus string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANEthernetLinkConfig_1, "GetEthernetLinkStatus", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewEthernetLinkStatus, err = soap.UnmarshalString(response.NewEthernetLinkStatus); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+// WANIPConnection1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:WANIPConnection:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type WANIPConnection1 struct {
+ goupnp.ServiceClient
+}
+
+// NewWANIPConnection1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewWANIPConnection1Clients() (clients []*WANIPConnection1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_WANIPConnection_1); err != nil {
+ return
+ }
+ clients = newWANIPConnection1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewWANIPConnection1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewWANIPConnection1ClientsByURL(loc *url.URL) ([]*WANIPConnection1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANIPConnection_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANIPConnection1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewWANIPConnection1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewWANIPConnection1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*WANIPConnection1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_WANIPConnection_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANIPConnection1ClientsFromGenericClients(genericClients), nil
+}
+
+func newWANIPConnection1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*WANIPConnection1 {
+ clients := make([]*WANIPConnection1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &WANIPConnection1{genericClients[i]}
+ }
+ return clients
+}
+
+func (client *WANIPConnection1) SetConnectionType(NewConnectionType string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewConnectionType string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewConnectionType, err = soap.MarshalString(NewConnectionType); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "SetConnectionType", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewPossibleConnectionTypes: allowed values: Unconfigured, IP_Routed, IP_Bridged
+func (client *WANIPConnection1) GetConnectionTypeInfo() (NewConnectionType string, NewPossibleConnectionTypes string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewConnectionType string
+
+ NewPossibleConnectionTypes string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "GetConnectionTypeInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewConnectionType, err = soap.UnmarshalString(response.NewConnectionType); err != nil {
+ return
+ }
+ if NewPossibleConnectionTypes, err = soap.UnmarshalString(response.NewPossibleConnectionTypes); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) RequestConnection() (err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "RequestConnection", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) RequestTermination() (err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "RequestTermination", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) ForceTermination() (err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "ForceTermination", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) SetAutoDisconnectTime(NewAutoDisconnectTime uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewAutoDisconnectTime string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewAutoDisconnectTime, err = soap.MarshalUi4(NewAutoDisconnectTime); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "SetAutoDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) SetIdleDisconnectTime(NewIdleDisconnectTime uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewIdleDisconnectTime string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewIdleDisconnectTime, err = soap.MarshalUi4(NewIdleDisconnectTime); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "SetIdleDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) SetWarnDisconnectDelay(NewWarnDisconnectDelay uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewWarnDisconnectDelay string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewWarnDisconnectDelay, err = soap.MarshalUi4(NewWarnDisconnectDelay); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "SetWarnDisconnectDelay", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewConnectionStatus: allowed values: Unconfigured, Connected, Disconnected
+//
+// * NewLastConnectionError: allowed values: ERROR_NONE
+func (client *WANIPConnection1) GetStatusInfo() (NewConnectionStatus string, NewLastConnectionError string, NewUptime uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewConnectionStatus string
+
+ NewLastConnectionError string
+
+ NewUptime string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "GetStatusInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewConnectionStatus, err = soap.UnmarshalString(response.NewConnectionStatus); err != nil {
+ return
+ }
+ if NewLastConnectionError, err = soap.UnmarshalString(response.NewLastConnectionError); err != nil {
+ return
+ }
+ if NewUptime, err = soap.UnmarshalUi4(response.NewUptime); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) GetAutoDisconnectTime() (NewAutoDisconnectTime uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewAutoDisconnectTime string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "GetAutoDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewAutoDisconnectTime, err = soap.UnmarshalUi4(response.NewAutoDisconnectTime); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) GetIdleDisconnectTime() (NewIdleDisconnectTime uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewIdleDisconnectTime string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "GetIdleDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewIdleDisconnectTime, err = soap.UnmarshalUi4(response.NewIdleDisconnectTime); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) GetWarnDisconnectDelay() (NewWarnDisconnectDelay uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewWarnDisconnectDelay string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "GetWarnDisconnectDelay", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewWarnDisconnectDelay, err = soap.UnmarshalUi4(response.NewWarnDisconnectDelay); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) GetNATRSIPStatus() (NewRSIPAvailable bool, NewNATEnabled bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewRSIPAvailable string
+
+ NewNATEnabled string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "GetNATRSIPStatus", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewRSIPAvailable, err = soap.UnmarshalBoolean(response.NewRSIPAvailable); err != nil {
+ return
+ }
+ if NewNATEnabled, err = soap.UnmarshalBoolean(response.NewNATEnabled); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewProtocol: allowed values: TCP, UDP
+func (client *WANIPConnection1) GetGenericPortMappingEntry(NewPortMappingIndex uint16) (NewRemoteHost string, NewExternalPort uint16, NewProtocol string, NewInternalPort uint16, NewInternalClient string, NewEnabled bool, NewPortMappingDescription string, NewLeaseDuration uint32, err error) {
+ // Request structure.
+ request := &struct {
+ NewPortMappingIndex string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewPortMappingIndex, err = soap.MarshalUi2(NewPortMappingIndex); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+
+ NewInternalPort string
+
+ NewInternalClient string
+
+ NewEnabled string
+
+ NewPortMappingDescription string
+
+ NewLeaseDuration string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "GetGenericPortMappingEntry", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewRemoteHost, err = soap.UnmarshalString(response.NewRemoteHost); err != nil {
+ return
+ }
+ if NewExternalPort, err = soap.UnmarshalUi2(response.NewExternalPort); err != nil {
+ return
+ }
+ if NewProtocol, err = soap.UnmarshalString(response.NewProtocol); err != nil {
+ return
+ }
+ if NewInternalPort, err = soap.UnmarshalUi2(response.NewInternalPort); err != nil {
+ return
+ }
+ if NewInternalClient, err = soap.UnmarshalString(response.NewInternalClient); err != nil {
+ return
+ }
+ if NewEnabled, err = soap.UnmarshalBoolean(response.NewEnabled); err != nil {
+ return
+ }
+ if NewPortMappingDescription, err = soap.UnmarshalString(response.NewPortMappingDescription); err != nil {
+ return
+ }
+ if NewLeaseDuration, err = soap.UnmarshalUi4(response.NewLeaseDuration); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewProtocol: allowed values: TCP, UDP
+
+func (client *WANIPConnection1) GetSpecificPortMappingEntry(NewRemoteHost string, NewExternalPort uint16, NewProtocol string) (NewInternalPort uint16, NewInternalClient string, NewEnabled bool, NewPortMappingDescription string, NewLeaseDuration uint32, err error) {
+ // Request structure.
+ request := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewRemoteHost, err = soap.MarshalString(NewRemoteHost); err != nil {
+ return
+ }
+ if request.NewExternalPort, err = soap.MarshalUi2(NewExternalPort); err != nil {
+ return
+ }
+ if request.NewProtocol, err = soap.MarshalString(NewProtocol); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewInternalPort string
+
+ NewInternalClient string
+
+ NewEnabled string
+
+ NewPortMappingDescription string
+
+ NewLeaseDuration string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "GetSpecificPortMappingEntry", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewInternalPort, err = soap.UnmarshalUi2(response.NewInternalPort); err != nil {
+ return
+ }
+ if NewInternalClient, err = soap.UnmarshalString(response.NewInternalClient); err != nil {
+ return
+ }
+ if NewEnabled, err = soap.UnmarshalBoolean(response.NewEnabled); err != nil {
+ return
+ }
+ if NewPortMappingDescription, err = soap.UnmarshalString(response.NewPortMappingDescription); err != nil {
+ return
+ }
+ if NewLeaseDuration, err = soap.UnmarshalUi4(response.NewLeaseDuration); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewProtocol: allowed values: TCP, UDP
+
+func (client *WANIPConnection1) AddPortMapping(NewRemoteHost string, NewExternalPort uint16, NewProtocol string, NewInternalPort uint16, NewInternalClient string, NewEnabled bool, NewPortMappingDescription string, NewLeaseDuration uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+
+ NewInternalPort string
+
+ NewInternalClient string
+
+ NewEnabled string
+
+ NewPortMappingDescription string
+
+ NewLeaseDuration string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewRemoteHost, err = soap.MarshalString(NewRemoteHost); err != nil {
+ return
+ }
+ if request.NewExternalPort, err = soap.MarshalUi2(NewExternalPort); err != nil {
+ return
+ }
+ if request.NewProtocol, err = soap.MarshalString(NewProtocol); err != nil {
+ return
+ }
+ if request.NewInternalPort, err = soap.MarshalUi2(NewInternalPort); err != nil {
+ return
+ }
+ if request.NewInternalClient, err = soap.MarshalString(NewInternalClient); err != nil {
+ return
+ }
+ if request.NewEnabled, err = soap.MarshalBoolean(NewEnabled); err != nil {
+ return
+ }
+ if request.NewPortMappingDescription, err = soap.MarshalString(NewPortMappingDescription); err != nil {
+ return
+ }
+ if request.NewLeaseDuration, err = soap.MarshalUi4(NewLeaseDuration); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "AddPortMapping", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewProtocol: allowed values: TCP, UDP
+
+func (client *WANIPConnection1) DeletePortMapping(NewRemoteHost string, NewExternalPort uint16, NewProtocol string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewRemoteHost, err = soap.MarshalString(NewRemoteHost); err != nil {
+ return
+ }
+ if request.NewExternalPort, err = soap.MarshalUi2(NewExternalPort); err != nil {
+ return
+ }
+ if request.NewProtocol, err = soap.MarshalString(NewProtocol); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "DeletePortMapping", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) GetExternalIPAddress() (NewExternalIPAddress string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewExternalIPAddress string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "GetExternalIPAddress", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewExternalIPAddress, err = soap.UnmarshalString(response.NewExternalIPAddress); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+// WANPOTSLinkConfig1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:WANPOTSLinkConfig:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type WANPOTSLinkConfig1 struct {
+ goupnp.ServiceClient
+}
+
+// NewWANPOTSLinkConfig1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewWANPOTSLinkConfig1Clients() (clients []*WANPOTSLinkConfig1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_WANPOTSLinkConfig_1); err != nil {
+ return
+ }
+ clients = newWANPOTSLinkConfig1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewWANPOTSLinkConfig1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewWANPOTSLinkConfig1ClientsByURL(loc *url.URL) ([]*WANPOTSLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANPOTSLinkConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANPOTSLinkConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewWANPOTSLinkConfig1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewWANPOTSLinkConfig1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*WANPOTSLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_WANPOTSLinkConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANPOTSLinkConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+func newWANPOTSLinkConfig1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*WANPOTSLinkConfig1 {
+ clients := make([]*WANPOTSLinkConfig1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &WANPOTSLinkConfig1{genericClients[i]}
+ }
+ return clients
+}
+
+//
+// Arguments:
+//
+// * NewLinkType: allowed values: PPP_Dialup
+
+func (client *WANPOTSLinkConfig1) SetISPInfo(NewISPPhoneNumber string, NewISPInfo string, NewLinkType string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewISPPhoneNumber string
+
+ NewISPInfo string
+
+ NewLinkType string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewISPPhoneNumber, err = soap.MarshalString(NewISPPhoneNumber); err != nil {
+ return
+ }
+ if request.NewISPInfo, err = soap.MarshalString(NewISPInfo); err != nil {
+ return
+ }
+ if request.NewLinkType, err = soap.MarshalString(NewLinkType); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPOTSLinkConfig_1, "SetISPInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPOTSLinkConfig1) SetCallRetryInfo(NewNumberOfRetries uint32, NewDelayBetweenRetries uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewNumberOfRetries string
+
+ NewDelayBetweenRetries string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewNumberOfRetries, err = soap.MarshalUi4(NewNumberOfRetries); err != nil {
+ return
+ }
+ if request.NewDelayBetweenRetries, err = soap.MarshalUi4(NewDelayBetweenRetries); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPOTSLinkConfig_1, "SetCallRetryInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewLinkType: allowed values: PPP_Dialup
+func (client *WANPOTSLinkConfig1) GetISPInfo() (NewISPPhoneNumber string, NewISPInfo string, NewLinkType string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewISPPhoneNumber string
+
+ NewISPInfo string
+
+ NewLinkType string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPOTSLinkConfig_1, "GetISPInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewISPPhoneNumber, err = soap.UnmarshalString(response.NewISPPhoneNumber); err != nil {
+ return
+ }
+ if NewISPInfo, err = soap.UnmarshalString(response.NewISPInfo); err != nil {
+ return
+ }
+ if NewLinkType, err = soap.UnmarshalString(response.NewLinkType); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPOTSLinkConfig1) GetCallRetryInfo() (NewNumberOfRetries uint32, NewDelayBetweenRetries uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewNumberOfRetries string
+
+ NewDelayBetweenRetries string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPOTSLinkConfig_1, "GetCallRetryInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewNumberOfRetries, err = soap.UnmarshalUi4(response.NewNumberOfRetries); err != nil {
+ return
+ }
+ if NewDelayBetweenRetries, err = soap.UnmarshalUi4(response.NewDelayBetweenRetries); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPOTSLinkConfig1) GetFclass() (NewFclass string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewFclass string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPOTSLinkConfig_1, "GetFclass", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewFclass, err = soap.UnmarshalString(response.NewFclass); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPOTSLinkConfig1) GetDataModulationSupported() (NewDataModulationSupported string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDataModulationSupported string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPOTSLinkConfig_1, "GetDataModulationSupported", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDataModulationSupported, err = soap.UnmarshalString(response.NewDataModulationSupported); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPOTSLinkConfig1) GetDataProtocol() (NewDataProtocol string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDataProtocol string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPOTSLinkConfig_1, "GetDataProtocol", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDataProtocol, err = soap.UnmarshalString(response.NewDataProtocol); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPOTSLinkConfig1) GetDataCompression() (NewDataCompression string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDataCompression string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPOTSLinkConfig_1, "GetDataCompression", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDataCompression, err = soap.UnmarshalString(response.NewDataCompression); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPOTSLinkConfig1) GetPlusVTRCommandSupported() (NewPlusVTRCommandSupported bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewPlusVTRCommandSupported string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPOTSLinkConfig_1, "GetPlusVTRCommandSupported", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewPlusVTRCommandSupported, err = soap.UnmarshalBoolean(response.NewPlusVTRCommandSupported); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+// WANPPPConnection1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:WANPPPConnection:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type WANPPPConnection1 struct {
+ goupnp.ServiceClient
+}
+
+// NewWANPPPConnection1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewWANPPPConnection1Clients() (clients []*WANPPPConnection1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_WANPPPConnection_1); err != nil {
+ return
+ }
+ clients = newWANPPPConnection1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewWANPPPConnection1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewWANPPPConnection1ClientsByURL(loc *url.URL) ([]*WANPPPConnection1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANPPPConnection_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANPPPConnection1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewWANPPPConnection1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewWANPPPConnection1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*WANPPPConnection1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_WANPPPConnection_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANPPPConnection1ClientsFromGenericClients(genericClients), nil
+}
+
+func newWANPPPConnection1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*WANPPPConnection1 {
+ clients := make([]*WANPPPConnection1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &WANPPPConnection1{genericClients[i]}
+ }
+ return clients
+}
+
+func (client *WANPPPConnection1) SetConnectionType(NewConnectionType string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewConnectionType string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewConnectionType, err = soap.MarshalString(NewConnectionType); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "SetConnectionType", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewPossibleConnectionTypes: allowed values: Unconfigured, IP_Routed, DHCP_Spoofed, PPPoE_Bridged, PPTP_Relay, L2TP_Relay, PPPoE_Relay
+func (client *WANPPPConnection1) GetConnectionTypeInfo() (NewConnectionType string, NewPossibleConnectionTypes string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewConnectionType string
+
+ NewPossibleConnectionTypes string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetConnectionTypeInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewConnectionType, err = soap.UnmarshalString(response.NewConnectionType); err != nil {
+ return
+ }
+ if NewPossibleConnectionTypes, err = soap.UnmarshalString(response.NewPossibleConnectionTypes); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) ConfigureConnection(NewUserName string, NewPassword string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewUserName string
+
+ NewPassword string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewUserName, err = soap.MarshalString(NewUserName); err != nil {
+ return
+ }
+ if request.NewPassword, err = soap.MarshalString(NewPassword); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "ConfigureConnection", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) RequestConnection() (err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "RequestConnection", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) RequestTermination() (err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "RequestTermination", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) ForceTermination() (err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "ForceTermination", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) SetAutoDisconnectTime(NewAutoDisconnectTime uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewAutoDisconnectTime string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewAutoDisconnectTime, err = soap.MarshalUi4(NewAutoDisconnectTime); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "SetAutoDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) SetIdleDisconnectTime(NewIdleDisconnectTime uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewIdleDisconnectTime string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewIdleDisconnectTime, err = soap.MarshalUi4(NewIdleDisconnectTime); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "SetIdleDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) SetWarnDisconnectDelay(NewWarnDisconnectDelay uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewWarnDisconnectDelay string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewWarnDisconnectDelay, err = soap.MarshalUi4(NewWarnDisconnectDelay); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "SetWarnDisconnectDelay", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewConnectionStatus: allowed values: Unconfigured, Connected, Disconnected
+//
+// * NewLastConnectionError: allowed values: ERROR_NONE
+func (client *WANPPPConnection1) GetStatusInfo() (NewConnectionStatus string, NewLastConnectionError string, NewUptime uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewConnectionStatus string
+
+ NewLastConnectionError string
+
+ NewUptime string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetStatusInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewConnectionStatus, err = soap.UnmarshalString(response.NewConnectionStatus); err != nil {
+ return
+ }
+ if NewLastConnectionError, err = soap.UnmarshalString(response.NewLastConnectionError); err != nil {
+ return
+ }
+ if NewUptime, err = soap.UnmarshalUi4(response.NewUptime); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetLinkLayerMaxBitRates() (NewUpstreamMaxBitRate uint32, NewDownstreamMaxBitRate uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewUpstreamMaxBitRate string
+
+ NewDownstreamMaxBitRate string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetLinkLayerMaxBitRates", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewUpstreamMaxBitRate, err = soap.UnmarshalUi4(response.NewUpstreamMaxBitRate); err != nil {
+ return
+ }
+ if NewDownstreamMaxBitRate, err = soap.UnmarshalUi4(response.NewDownstreamMaxBitRate); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetPPPEncryptionProtocol() (NewPPPEncryptionProtocol string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewPPPEncryptionProtocol string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetPPPEncryptionProtocol", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewPPPEncryptionProtocol, err = soap.UnmarshalString(response.NewPPPEncryptionProtocol); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetPPPCompressionProtocol() (NewPPPCompressionProtocol string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewPPPCompressionProtocol string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetPPPCompressionProtocol", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewPPPCompressionProtocol, err = soap.UnmarshalString(response.NewPPPCompressionProtocol); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetPPPAuthenticationProtocol() (NewPPPAuthenticationProtocol string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewPPPAuthenticationProtocol string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetPPPAuthenticationProtocol", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewPPPAuthenticationProtocol, err = soap.UnmarshalString(response.NewPPPAuthenticationProtocol); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetUserName() (NewUserName string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewUserName string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetUserName", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewUserName, err = soap.UnmarshalString(response.NewUserName); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetPassword() (NewPassword string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewPassword string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetPassword", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewPassword, err = soap.UnmarshalString(response.NewPassword); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetAutoDisconnectTime() (NewAutoDisconnectTime uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewAutoDisconnectTime string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetAutoDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewAutoDisconnectTime, err = soap.UnmarshalUi4(response.NewAutoDisconnectTime); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetIdleDisconnectTime() (NewIdleDisconnectTime uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewIdleDisconnectTime string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetIdleDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewIdleDisconnectTime, err = soap.UnmarshalUi4(response.NewIdleDisconnectTime); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetWarnDisconnectDelay() (NewWarnDisconnectDelay uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewWarnDisconnectDelay string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetWarnDisconnectDelay", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewWarnDisconnectDelay, err = soap.UnmarshalUi4(response.NewWarnDisconnectDelay); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetNATRSIPStatus() (NewRSIPAvailable bool, NewNATEnabled bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewRSIPAvailable string
+
+ NewNATEnabled string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetNATRSIPStatus", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewRSIPAvailable, err = soap.UnmarshalBoolean(response.NewRSIPAvailable); err != nil {
+ return
+ }
+ if NewNATEnabled, err = soap.UnmarshalBoolean(response.NewNATEnabled); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewProtocol: allowed values: TCP, UDP
+func (client *WANPPPConnection1) GetGenericPortMappingEntry(NewPortMappingIndex uint16) (NewRemoteHost string, NewExternalPort uint16, NewProtocol string, NewInternalPort uint16, NewInternalClient string, NewEnabled bool, NewPortMappingDescription string, NewLeaseDuration uint32, err error) {
+ // Request structure.
+ request := &struct {
+ NewPortMappingIndex string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewPortMappingIndex, err = soap.MarshalUi2(NewPortMappingIndex); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+
+ NewInternalPort string
+
+ NewInternalClient string
+
+ NewEnabled string
+
+ NewPortMappingDescription string
+
+ NewLeaseDuration string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetGenericPortMappingEntry", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewRemoteHost, err = soap.UnmarshalString(response.NewRemoteHost); err != nil {
+ return
+ }
+ if NewExternalPort, err = soap.UnmarshalUi2(response.NewExternalPort); err != nil {
+ return
+ }
+ if NewProtocol, err = soap.UnmarshalString(response.NewProtocol); err != nil {
+ return
+ }
+ if NewInternalPort, err = soap.UnmarshalUi2(response.NewInternalPort); err != nil {
+ return
+ }
+ if NewInternalClient, err = soap.UnmarshalString(response.NewInternalClient); err != nil {
+ return
+ }
+ if NewEnabled, err = soap.UnmarshalBoolean(response.NewEnabled); err != nil {
+ return
+ }
+ if NewPortMappingDescription, err = soap.UnmarshalString(response.NewPortMappingDescription); err != nil {
+ return
+ }
+ if NewLeaseDuration, err = soap.UnmarshalUi4(response.NewLeaseDuration); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewProtocol: allowed values: TCP, UDP
+
+func (client *WANPPPConnection1) GetSpecificPortMappingEntry(NewRemoteHost string, NewExternalPort uint16, NewProtocol string) (NewInternalPort uint16, NewInternalClient string, NewEnabled bool, NewPortMappingDescription string, NewLeaseDuration uint32, err error) {
+ // Request structure.
+ request := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewRemoteHost, err = soap.MarshalString(NewRemoteHost); err != nil {
+ return
+ }
+ if request.NewExternalPort, err = soap.MarshalUi2(NewExternalPort); err != nil {
+ return
+ }
+ if request.NewProtocol, err = soap.MarshalString(NewProtocol); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewInternalPort string
+
+ NewInternalClient string
+
+ NewEnabled string
+
+ NewPortMappingDescription string
+
+ NewLeaseDuration string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetSpecificPortMappingEntry", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewInternalPort, err = soap.UnmarshalUi2(response.NewInternalPort); err != nil {
+ return
+ }
+ if NewInternalClient, err = soap.UnmarshalString(response.NewInternalClient); err != nil {
+ return
+ }
+ if NewEnabled, err = soap.UnmarshalBoolean(response.NewEnabled); err != nil {
+ return
+ }
+ if NewPortMappingDescription, err = soap.UnmarshalString(response.NewPortMappingDescription); err != nil {
+ return
+ }
+ if NewLeaseDuration, err = soap.UnmarshalUi4(response.NewLeaseDuration); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewProtocol: allowed values: TCP, UDP
+
+func (client *WANPPPConnection1) AddPortMapping(NewRemoteHost string, NewExternalPort uint16, NewProtocol string, NewInternalPort uint16, NewInternalClient string, NewEnabled bool, NewPortMappingDescription string, NewLeaseDuration uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+
+ NewInternalPort string
+
+ NewInternalClient string
+
+ NewEnabled string
+
+ NewPortMappingDescription string
+
+ NewLeaseDuration string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewRemoteHost, err = soap.MarshalString(NewRemoteHost); err != nil {
+ return
+ }
+ if request.NewExternalPort, err = soap.MarshalUi2(NewExternalPort); err != nil {
+ return
+ }
+ if request.NewProtocol, err = soap.MarshalString(NewProtocol); err != nil {
+ return
+ }
+ if request.NewInternalPort, err = soap.MarshalUi2(NewInternalPort); err != nil {
+ return
+ }
+ if request.NewInternalClient, err = soap.MarshalString(NewInternalClient); err != nil {
+ return
+ }
+ if request.NewEnabled, err = soap.MarshalBoolean(NewEnabled); err != nil {
+ return
+ }
+ if request.NewPortMappingDescription, err = soap.MarshalString(NewPortMappingDescription); err != nil {
+ return
+ }
+ if request.NewLeaseDuration, err = soap.MarshalUi4(NewLeaseDuration); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "AddPortMapping", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewProtocol: allowed values: TCP, UDP
+
+func (client *WANPPPConnection1) DeletePortMapping(NewRemoteHost string, NewExternalPort uint16, NewProtocol string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewRemoteHost, err = soap.MarshalString(NewRemoteHost); err != nil {
+ return
+ }
+ if request.NewExternalPort, err = soap.MarshalUi2(NewExternalPort); err != nil {
+ return
+ }
+ if request.NewProtocol, err = soap.MarshalString(NewProtocol); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "DeletePortMapping", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetExternalIPAddress() (NewExternalIPAddress string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewExternalIPAddress string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetExternalIPAddress", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewExternalIPAddress, err = soap.UnmarshalString(response.NewExternalIPAddress); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
diff --git a/vendor/github.com/huin/goupnp/dcps/internetgateway2/internetgateway2.go b/vendor/github.com/huin/goupnp/dcps/internetgateway2/internetgateway2.go
new file mode 100644
index 000000000..2d67a4a2e
--- /dev/null
+++ b/vendor/github.com/huin/goupnp/dcps/internetgateway2/internetgateway2.go
@@ -0,0 +1,5378 @@
+// Client for UPnP Device Control Protocol Internet Gateway Device v2.
+//
+// This DCP is documented in detail at: http://upnp.org/specs/gw/UPnP-gw-InternetGatewayDevice-v2-Device.pdf
+//
+// Typically, use one of the New* functions to create clients for services.
+package internetgateway2
+
+// Generated file - do not edit by hand. See README.md
+
+import (
+ "net/url"
+ "time"
+
+ "github.com/huin/goupnp"
+ "github.com/huin/goupnp/soap"
+)
+
+// Hack to avoid Go complaining if time isn't used.
+var _ time.Time
+
+// Device URNs:
+const (
+ URN_LANDevice_1 = "urn:schemas-upnp-org:device:LANDevice:1"
+ URN_WANConnectionDevice_1 = "urn:schemas-upnp-org:device:WANConnectionDevice:1"
+ URN_WANConnectionDevice_2 = "urn:schemas-upnp-org:device:WANConnectionDevice:2"
+ URN_WANDevice_1 = "urn:schemas-upnp-org:device:WANDevice:1"
+ URN_WANDevice_2 = "urn:schemas-upnp-org:device:WANDevice:2"
+)
+
+// Service URNs:
+const (
+ URN_DeviceProtection_1 = "urn:schemas-upnp-org:service:DeviceProtection:1"
+ URN_LANHostConfigManagement_1 = "urn:schemas-upnp-org:service:LANHostConfigManagement:1"
+ URN_Layer3Forwarding_1 = "urn:schemas-upnp-org:service:Layer3Forwarding:1"
+ URN_WANCableLinkConfig_1 = "urn:schemas-upnp-org:service:WANCableLinkConfig:1"
+ URN_WANCommonInterfaceConfig_1 = "urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1"
+ URN_WANDSLLinkConfig_1 = "urn:schemas-upnp-org:service:WANDSLLinkConfig:1"
+ URN_WANEthernetLinkConfig_1 = "urn:schemas-upnp-org:service:WANEthernetLinkConfig:1"
+ URN_WANIPConnection_1 = "urn:schemas-upnp-org:service:WANIPConnection:1"
+ URN_WANIPConnection_2 = "urn:schemas-upnp-org:service:WANIPConnection:2"
+ URN_WANIPv6FirewallControl_1 = "urn:schemas-upnp-org:service:WANIPv6FirewallControl:1"
+ URN_WANPOTSLinkConfig_1 = "urn:schemas-upnp-org:service:WANPOTSLinkConfig:1"
+ URN_WANPPPConnection_1 = "urn:schemas-upnp-org:service:WANPPPConnection:1"
+)
+
+// DeviceProtection1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:DeviceProtection:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type DeviceProtection1 struct {
+ goupnp.ServiceClient
+}
+
+// NewDeviceProtection1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewDeviceProtection1Clients() (clients []*DeviceProtection1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_DeviceProtection_1); err != nil {
+ return
+ }
+ clients = newDeviceProtection1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewDeviceProtection1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewDeviceProtection1ClientsByURL(loc *url.URL) ([]*DeviceProtection1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_DeviceProtection_1)
+ if err != nil {
+ return nil, err
+ }
+ return newDeviceProtection1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewDeviceProtection1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewDeviceProtection1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*DeviceProtection1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_DeviceProtection_1)
+ if err != nil {
+ return nil, err
+ }
+ return newDeviceProtection1ClientsFromGenericClients(genericClients), nil
+}
+
+func newDeviceProtection1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*DeviceProtection1 {
+ clients := make([]*DeviceProtection1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &DeviceProtection1{genericClients[i]}
+ }
+ return clients
+}
+
+func (client *DeviceProtection1) SendSetupMessage(ProtocolType string, InMessage []byte) (OutMessage []byte, err error) {
+ // Request structure.
+ request := &struct {
+ ProtocolType string
+
+ InMessage string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.ProtocolType, err = soap.MarshalString(ProtocolType); err != nil {
+ return
+ }
+ if request.InMessage, err = soap.MarshalBinBase64(InMessage); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ OutMessage string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_DeviceProtection_1, "SendSetupMessage", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if OutMessage, err = soap.UnmarshalBinBase64(response.OutMessage); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *DeviceProtection1) GetSupportedProtocols() (ProtocolList string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ ProtocolList string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_DeviceProtection_1, "GetSupportedProtocols", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if ProtocolList, err = soap.UnmarshalString(response.ProtocolList); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *DeviceProtection1) GetAssignedRoles() (RoleList string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ RoleList string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_DeviceProtection_1, "GetAssignedRoles", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if RoleList, err = soap.UnmarshalString(response.RoleList); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *DeviceProtection1) GetRolesForAction(DeviceUDN string, ServiceId string, ActionName string) (RoleList string, RestrictedRoleList string, err error) {
+ // Request structure.
+ request := &struct {
+ DeviceUDN string
+
+ ServiceId string
+
+ ActionName string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.DeviceUDN, err = soap.MarshalString(DeviceUDN); err != nil {
+ return
+ }
+ if request.ServiceId, err = soap.MarshalString(ServiceId); err != nil {
+ return
+ }
+ if request.ActionName, err = soap.MarshalString(ActionName); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ RoleList string
+
+ RestrictedRoleList string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_DeviceProtection_1, "GetRolesForAction", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if RoleList, err = soap.UnmarshalString(response.RoleList); err != nil {
+ return
+ }
+ if RestrictedRoleList, err = soap.UnmarshalString(response.RestrictedRoleList); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *DeviceProtection1) GetUserLoginChallenge(ProtocolType string, Name string) (Salt []byte, Challenge []byte, err error) {
+ // Request structure.
+ request := &struct {
+ ProtocolType string
+
+ Name string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.ProtocolType, err = soap.MarshalString(ProtocolType); err != nil {
+ return
+ }
+ if request.Name, err = soap.MarshalString(Name); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ Salt string
+
+ Challenge string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_DeviceProtection_1, "GetUserLoginChallenge", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if Salt, err = soap.UnmarshalBinBase64(response.Salt); err != nil {
+ return
+ }
+ if Challenge, err = soap.UnmarshalBinBase64(response.Challenge); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *DeviceProtection1) UserLogin(ProtocolType string, Challenge []byte, Authenticator []byte) (err error) {
+ // Request structure.
+ request := &struct {
+ ProtocolType string
+
+ Challenge string
+
+ Authenticator string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.ProtocolType, err = soap.MarshalString(ProtocolType); err != nil {
+ return
+ }
+ if request.Challenge, err = soap.MarshalBinBase64(Challenge); err != nil {
+ return
+ }
+ if request.Authenticator, err = soap.MarshalBinBase64(Authenticator); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_DeviceProtection_1, "UserLogin", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *DeviceProtection1) UserLogout() (err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_DeviceProtection_1, "UserLogout", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *DeviceProtection1) GetACLData() (ACL string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ ACL string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_DeviceProtection_1, "GetACLData", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if ACL, err = soap.UnmarshalString(response.ACL); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *DeviceProtection1) AddIdentityList(IdentityList string) (IdentityListResult string, err error) {
+ // Request structure.
+ request := &struct {
+ IdentityList string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.IdentityList, err = soap.MarshalString(IdentityList); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ IdentityListResult string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_DeviceProtection_1, "AddIdentityList", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if IdentityListResult, err = soap.UnmarshalString(response.IdentityListResult); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *DeviceProtection1) RemoveIdentity(Identity string) (err error) {
+ // Request structure.
+ request := &struct {
+ Identity string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.Identity, err = soap.MarshalString(Identity); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_DeviceProtection_1, "RemoveIdentity", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *DeviceProtection1) SetUserLoginPassword(ProtocolType string, Name string, Stored []byte, Salt []byte) (err error) {
+ // Request structure.
+ request := &struct {
+ ProtocolType string
+
+ Name string
+
+ Stored string
+
+ Salt string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.ProtocolType, err = soap.MarshalString(ProtocolType); err != nil {
+ return
+ }
+ if request.Name, err = soap.MarshalString(Name); err != nil {
+ return
+ }
+ if request.Stored, err = soap.MarshalBinBase64(Stored); err != nil {
+ return
+ }
+ if request.Salt, err = soap.MarshalBinBase64(Salt); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_DeviceProtection_1, "SetUserLoginPassword", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *DeviceProtection1) AddRolesForIdentity(Identity string, RoleList string) (err error) {
+ // Request structure.
+ request := &struct {
+ Identity string
+
+ RoleList string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.Identity, err = soap.MarshalString(Identity); err != nil {
+ return
+ }
+ if request.RoleList, err = soap.MarshalString(RoleList); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_DeviceProtection_1, "AddRolesForIdentity", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *DeviceProtection1) RemoveRolesForIdentity(Identity string, RoleList string) (err error) {
+ // Request structure.
+ request := &struct {
+ Identity string
+
+ RoleList string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.Identity, err = soap.MarshalString(Identity); err != nil {
+ return
+ }
+ if request.RoleList, err = soap.MarshalString(RoleList); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_DeviceProtection_1, "RemoveRolesForIdentity", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+// LANHostConfigManagement1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:LANHostConfigManagement:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type LANHostConfigManagement1 struct {
+ goupnp.ServiceClient
+}
+
+// NewLANHostConfigManagement1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewLANHostConfigManagement1Clients() (clients []*LANHostConfigManagement1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_LANHostConfigManagement_1); err != nil {
+ return
+ }
+ clients = newLANHostConfigManagement1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewLANHostConfigManagement1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewLANHostConfigManagement1ClientsByURL(loc *url.URL) ([]*LANHostConfigManagement1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_LANHostConfigManagement_1)
+ if err != nil {
+ return nil, err
+ }
+ return newLANHostConfigManagement1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewLANHostConfigManagement1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewLANHostConfigManagement1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*LANHostConfigManagement1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_LANHostConfigManagement_1)
+ if err != nil {
+ return nil, err
+ }
+ return newLANHostConfigManagement1ClientsFromGenericClients(genericClients), nil
+}
+
+func newLANHostConfigManagement1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*LANHostConfigManagement1 {
+ clients := make([]*LANHostConfigManagement1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &LANHostConfigManagement1{genericClients[i]}
+ }
+ return clients
+}
+
+func (client *LANHostConfigManagement1) SetDHCPServerConfigurable(NewDHCPServerConfigurable bool) (err error) {
+ // Request structure.
+ request := &struct {
+ NewDHCPServerConfigurable string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewDHCPServerConfigurable, err = soap.MarshalBoolean(NewDHCPServerConfigurable); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "SetDHCPServerConfigurable", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) GetDHCPServerConfigurable() (NewDHCPServerConfigurable bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDHCPServerConfigurable string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "GetDHCPServerConfigurable", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDHCPServerConfigurable, err = soap.UnmarshalBoolean(response.NewDHCPServerConfigurable); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) SetDHCPRelay(NewDHCPRelay bool) (err error) {
+ // Request structure.
+ request := &struct {
+ NewDHCPRelay string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewDHCPRelay, err = soap.MarshalBoolean(NewDHCPRelay); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "SetDHCPRelay", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) GetDHCPRelay() (NewDHCPRelay bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDHCPRelay string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "GetDHCPRelay", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDHCPRelay, err = soap.UnmarshalBoolean(response.NewDHCPRelay); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) SetSubnetMask(NewSubnetMask string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewSubnetMask string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewSubnetMask, err = soap.MarshalString(NewSubnetMask); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "SetSubnetMask", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) GetSubnetMask() (NewSubnetMask string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewSubnetMask string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "GetSubnetMask", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewSubnetMask, err = soap.UnmarshalString(response.NewSubnetMask); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) SetIPRouter(NewIPRouters string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewIPRouters string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewIPRouters, err = soap.MarshalString(NewIPRouters); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "SetIPRouter", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) DeleteIPRouter(NewIPRouters string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewIPRouters string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewIPRouters, err = soap.MarshalString(NewIPRouters); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "DeleteIPRouter", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) GetIPRoutersList() (NewIPRouters string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewIPRouters string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "GetIPRoutersList", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewIPRouters, err = soap.UnmarshalString(response.NewIPRouters); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) SetDomainName(NewDomainName string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewDomainName string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewDomainName, err = soap.MarshalString(NewDomainName); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "SetDomainName", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) GetDomainName() (NewDomainName string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDomainName string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "GetDomainName", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDomainName, err = soap.UnmarshalString(response.NewDomainName); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) SetAddressRange(NewMinAddress string, NewMaxAddress string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewMinAddress string
+
+ NewMaxAddress string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewMinAddress, err = soap.MarshalString(NewMinAddress); err != nil {
+ return
+ }
+ if request.NewMaxAddress, err = soap.MarshalString(NewMaxAddress); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "SetAddressRange", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) GetAddressRange() (NewMinAddress string, NewMaxAddress string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewMinAddress string
+
+ NewMaxAddress string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "GetAddressRange", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewMinAddress, err = soap.UnmarshalString(response.NewMinAddress); err != nil {
+ return
+ }
+ if NewMaxAddress, err = soap.UnmarshalString(response.NewMaxAddress); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) SetReservedAddress(NewReservedAddresses string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewReservedAddresses string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewReservedAddresses, err = soap.MarshalString(NewReservedAddresses); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "SetReservedAddress", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) DeleteReservedAddress(NewReservedAddresses string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewReservedAddresses string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewReservedAddresses, err = soap.MarshalString(NewReservedAddresses); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "DeleteReservedAddress", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) GetReservedAddresses() (NewReservedAddresses string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewReservedAddresses string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "GetReservedAddresses", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewReservedAddresses, err = soap.UnmarshalString(response.NewReservedAddresses); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) SetDNSServer(NewDNSServers string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewDNSServers string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewDNSServers, err = soap.MarshalString(NewDNSServers); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "SetDNSServer", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) DeleteDNSServer(NewDNSServers string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewDNSServers string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewDNSServers, err = soap.MarshalString(NewDNSServers); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "DeleteDNSServer", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *LANHostConfigManagement1) GetDNSServers() (NewDNSServers string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDNSServers string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_LANHostConfigManagement_1, "GetDNSServers", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDNSServers, err = soap.UnmarshalString(response.NewDNSServers); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+// Layer3Forwarding1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:Layer3Forwarding:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type Layer3Forwarding1 struct {
+ goupnp.ServiceClient
+}
+
+// NewLayer3Forwarding1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewLayer3Forwarding1Clients() (clients []*Layer3Forwarding1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_Layer3Forwarding_1); err != nil {
+ return
+ }
+ clients = newLayer3Forwarding1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewLayer3Forwarding1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewLayer3Forwarding1ClientsByURL(loc *url.URL) ([]*Layer3Forwarding1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_Layer3Forwarding_1)
+ if err != nil {
+ return nil, err
+ }
+ return newLayer3Forwarding1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewLayer3Forwarding1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewLayer3Forwarding1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*Layer3Forwarding1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_Layer3Forwarding_1)
+ if err != nil {
+ return nil, err
+ }
+ return newLayer3Forwarding1ClientsFromGenericClients(genericClients), nil
+}
+
+func newLayer3Forwarding1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*Layer3Forwarding1 {
+ clients := make([]*Layer3Forwarding1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &Layer3Forwarding1{genericClients[i]}
+ }
+ return clients
+}
+
+func (client *Layer3Forwarding1) SetDefaultConnectionService(NewDefaultConnectionService string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewDefaultConnectionService string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewDefaultConnectionService, err = soap.MarshalString(NewDefaultConnectionService); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_Layer3Forwarding_1, "SetDefaultConnectionService", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *Layer3Forwarding1) GetDefaultConnectionService() (NewDefaultConnectionService string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDefaultConnectionService string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_Layer3Forwarding_1, "GetDefaultConnectionService", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDefaultConnectionService, err = soap.UnmarshalString(response.NewDefaultConnectionService); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+// WANCableLinkConfig1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:WANCableLinkConfig:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type WANCableLinkConfig1 struct {
+ goupnp.ServiceClient
+}
+
+// NewWANCableLinkConfig1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewWANCableLinkConfig1Clients() (clients []*WANCableLinkConfig1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_WANCableLinkConfig_1); err != nil {
+ return
+ }
+ clients = newWANCableLinkConfig1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewWANCableLinkConfig1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewWANCableLinkConfig1ClientsByURL(loc *url.URL) ([]*WANCableLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANCableLinkConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANCableLinkConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewWANCableLinkConfig1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewWANCableLinkConfig1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*WANCableLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_WANCableLinkConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANCableLinkConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+func newWANCableLinkConfig1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*WANCableLinkConfig1 {
+ clients := make([]*WANCableLinkConfig1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &WANCableLinkConfig1{genericClients[i]}
+ }
+ return clients
+}
+
+//
+// Return values:
+//
+// * NewCableLinkConfigState: allowed values: notReady, dsSyncComplete, usParamAcquired, rangingComplete, ipComplete, todEstablished, paramTransferComplete, registrationComplete, operational, accessDenied
+//
+// * NewLinkType: allowed values: Ethernet
+func (client *WANCableLinkConfig1) GetCableLinkConfigInfo() (NewCableLinkConfigState string, NewLinkType string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewCableLinkConfigState string
+
+ NewLinkType string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetCableLinkConfigInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewCableLinkConfigState, err = soap.UnmarshalString(response.NewCableLinkConfigState); err != nil {
+ return
+ }
+ if NewLinkType, err = soap.UnmarshalString(response.NewLinkType); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCableLinkConfig1) GetDownstreamFrequency() (NewDownstreamFrequency uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDownstreamFrequency string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetDownstreamFrequency", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDownstreamFrequency, err = soap.UnmarshalUi4(response.NewDownstreamFrequency); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewDownstreamModulation: allowed values: 64QAM, 256QAM
+func (client *WANCableLinkConfig1) GetDownstreamModulation() (NewDownstreamModulation string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDownstreamModulation string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetDownstreamModulation", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDownstreamModulation, err = soap.UnmarshalString(response.NewDownstreamModulation); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCableLinkConfig1) GetUpstreamFrequency() (NewUpstreamFrequency uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewUpstreamFrequency string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetUpstreamFrequency", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewUpstreamFrequency, err = soap.UnmarshalUi4(response.NewUpstreamFrequency); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewUpstreamModulation: allowed values: QPSK, 16QAM
+func (client *WANCableLinkConfig1) GetUpstreamModulation() (NewUpstreamModulation string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewUpstreamModulation string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetUpstreamModulation", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewUpstreamModulation, err = soap.UnmarshalString(response.NewUpstreamModulation); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCableLinkConfig1) GetUpstreamChannelID() (NewUpstreamChannelID uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewUpstreamChannelID string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetUpstreamChannelID", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewUpstreamChannelID, err = soap.UnmarshalUi4(response.NewUpstreamChannelID); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCableLinkConfig1) GetUpstreamPowerLevel() (NewUpstreamPowerLevel uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewUpstreamPowerLevel string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetUpstreamPowerLevel", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewUpstreamPowerLevel, err = soap.UnmarshalUi4(response.NewUpstreamPowerLevel); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCableLinkConfig1) GetBPIEncryptionEnabled() (NewBPIEncryptionEnabled bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewBPIEncryptionEnabled string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetBPIEncryptionEnabled", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewBPIEncryptionEnabled, err = soap.UnmarshalBoolean(response.NewBPIEncryptionEnabled); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCableLinkConfig1) GetConfigFile() (NewConfigFile string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewConfigFile string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetConfigFile", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewConfigFile, err = soap.UnmarshalString(response.NewConfigFile); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCableLinkConfig1) GetTFTPServer() (NewTFTPServer string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewTFTPServer string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCableLinkConfig_1, "GetTFTPServer", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewTFTPServer, err = soap.UnmarshalString(response.NewTFTPServer); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+// WANCommonInterfaceConfig1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type WANCommonInterfaceConfig1 struct {
+ goupnp.ServiceClient
+}
+
+// NewWANCommonInterfaceConfig1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewWANCommonInterfaceConfig1Clients() (clients []*WANCommonInterfaceConfig1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_WANCommonInterfaceConfig_1); err != nil {
+ return
+ }
+ clients = newWANCommonInterfaceConfig1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewWANCommonInterfaceConfig1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewWANCommonInterfaceConfig1ClientsByURL(loc *url.URL) ([]*WANCommonInterfaceConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANCommonInterfaceConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANCommonInterfaceConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewWANCommonInterfaceConfig1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewWANCommonInterfaceConfig1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*WANCommonInterfaceConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_WANCommonInterfaceConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANCommonInterfaceConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+func newWANCommonInterfaceConfig1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*WANCommonInterfaceConfig1 {
+ clients := make([]*WANCommonInterfaceConfig1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &WANCommonInterfaceConfig1{genericClients[i]}
+ }
+ return clients
+}
+
+func (client *WANCommonInterfaceConfig1) SetEnabledForInternet(NewEnabledForInternet bool) (err error) {
+ // Request structure.
+ request := &struct {
+ NewEnabledForInternet string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewEnabledForInternet, err = soap.MarshalBoolean(NewEnabledForInternet); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "SetEnabledForInternet", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCommonInterfaceConfig1) GetEnabledForInternet() (NewEnabledForInternet bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewEnabledForInternet string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "GetEnabledForInternet", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewEnabledForInternet, err = soap.UnmarshalBoolean(response.NewEnabledForInternet); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewWANAccessType: allowed values: DSL, POTS, Cable, Ethernet
+//
+// * NewPhysicalLinkStatus: allowed values: Up, Down
+func (client *WANCommonInterfaceConfig1) GetCommonLinkProperties() (NewWANAccessType string, NewLayer1UpstreamMaxBitRate uint32, NewLayer1DownstreamMaxBitRate uint32, NewPhysicalLinkStatus string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewWANAccessType string
+
+ NewLayer1UpstreamMaxBitRate string
+
+ NewLayer1DownstreamMaxBitRate string
+
+ NewPhysicalLinkStatus string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "GetCommonLinkProperties", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewWANAccessType, err = soap.UnmarshalString(response.NewWANAccessType); err != nil {
+ return
+ }
+ if NewLayer1UpstreamMaxBitRate, err = soap.UnmarshalUi4(response.NewLayer1UpstreamMaxBitRate); err != nil {
+ return
+ }
+ if NewLayer1DownstreamMaxBitRate, err = soap.UnmarshalUi4(response.NewLayer1DownstreamMaxBitRate); err != nil {
+ return
+ }
+ if NewPhysicalLinkStatus, err = soap.UnmarshalString(response.NewPhysicalLinkStatus); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCommonInterfaceConfig1) GetWANAccessProvider() (NewWANAccessProvider string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewWANAccessProvider string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "GetWANAccessProvider", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewWANAccessProvider, err = soap.UnmarshalString(response.NewWANAccessProvider); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewMaximumActiveConnections: allowed value range: minimum=1, step=1
+func (client *WANCommonInterfaceConfig1) GetMaximumActiveConnections() (NewMaximumActiveConnections uint16, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewMaximumActiveConnections string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "GetMaximumActiveConnections", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewMaximumActiveConnections, err = soap.UnmarshalUi2(response.NewMaximumActiveConnections); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCommonInterfaceConfig1) GetTotalBytesSent() (NewTotalBytesSent uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewTotalBytesSent string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "GetTotalBytesSent", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewTotalBytesSent, err = soap.UnmarshalUi4(response.NewTotalBytesSent); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCommonInterfaceConfig1) GetTotalBytesReceived() (NewTotalBytesReceived uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewTotalBytesReceived string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "GetTotalBytesReceived", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewTotalBytesReceived, err = soap.UnmarshalUi4(response.NewTotalBytesReceived); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCommonInterfaceConfig1) GetTotalPacketsSent() (NewTotalPacketsSent uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewTotalPacketsSent string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "GetTotalPacketsSent", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewTotalPacketsSent, err = soap.UnmarshalUi4(response.NewTotalPacketsSent); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCommonInterfaceConfig1) GetTotalPacketsReceived() (NewTotalPacketsReceived uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewTotalPacketsReceived string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "GetTotalPacketsReceived", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewTotalPacketsReceived, err = soap.UnmarshalUi4(response.NewTotalPacketsReceived); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANCommonInterfaceConfig1) GetActiveConnection(NewActiveConnectionIndex uint16) (NewActiveConnDeviceContainer string, NewActiveConnectionServiceID string, err error) {
+ // Request structure.
+ request := &struct {
+ NewActiveConnectionIndex string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewActiveConnectionIndex, err = soap.MarshalUi2(NewActiveConnectionIndex); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewActiveConnDeviceContainer string
+
+ NewActiveConnectionServiceID string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANCommonInterfaceConfig_1, "GetActiveConnection", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewActiveConnDeviceContainer, err = soap.UnmarshalString(response.NewActiveConnDeviceContainer); err != nil {
+ return
+ }
+ if NewActiveConnectionServiceID, err = soap.UnmarshalString(response.NewActiveConnectionServiceID); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+// WANDSLLinkConfig1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:WANDSLLinkConfig:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type WANDSLLinkConfig1 struct {
+ goupnp.ServiceClient
+}
+
+// NewWANDSLLinkConfig1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewWANDSLLinkConfig1Clients() (clients []*WANDSLLinkConfig1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_WANDSLLinkConfig_1); err != nil {
+ return
+ }
+ clients = newWANDSLLinkConfig1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewWANDSLLinkConfig1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewWANDSLLinkConfig1ClientsByURL(loc *url.URL) ([]*WANDSLLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANDSLLinkConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANDSLLinkConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewWANDSLLinkConfig1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewWANDSLLinkConfig1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*WANDSLLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_WANDSLLinkConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANDSLLinkConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+func newWANDSLLinkConfig1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*WANDSLLinkConfig1 {
+ clients := make([]*WANDSLLinkConfig1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &WANDSLLinkConfig1{genericClients[i]}
+ }
+ return clients
+}
+
+func (client *WANDSLLinkConfig1) SetDSLLinkType(NewLinkType string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewLinkType string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewLinkType, err = soap.MarshalString(NewLinkType); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "SetDSLLinkType", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewLinkStatus: allowed values: Up, Down
+func (client *WANDSLLinkConfig1) GetDSLLinkInfo() (NewLinkType string, NewLinkStatus string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewLinkType string
+
+ NewLinkStatus string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "GetDSLLinkInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewLinkType, err = soap.UnmarshalString(response.NewLinkType); err != nil {
+ return
+ }
+ if NewLinkStatus, err = soap.UnmarshalString(response.NewLinkStatus); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANDSLLinkConfig1) GetAutoConfig() (NewAutoConfig bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewAutoConfig string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "GetAutoConfig", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewAutoConfig, err = soap.UnmarshalBoolean(response.NewAutoConfig); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANDSLLinkConfig1) GetModulationType() (NewModulationType string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewModulationType string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "GetModulationType", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewModulationType, err = soap.UnmarshalString(response.NewModulationType); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANDSLLinkConfig1) SetDestinationAddress(NewDestinationAddress string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewDestinationAddress string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewDestinationAddress, err = soap.MarshalString(NewDestinationAddress); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "SetDestinationAddress", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANDSLLinkConfig1) GetDestinationAddress() (NewDestinationAddress string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDestinationAddress string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "GetDestinationAddress", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDestinationAddress, err = soap.UnmarshalString(response.NewDestinationAddress); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANDSLLinkConfig1) SetATMEncapsulation(NewATMEncapsulation string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewATMEncapsulation string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewATMEncapsulation, err = soap.MarshalString(NewATMEncapsulation); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "SetATMEncapsulation", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANDSLLinkConfig1) GetATMEncapsulation() (NewATMEncapsulation string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewATMEncapsulation string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "GetATMEncapsulation", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewATMEncapsulation, err = soap.UnmarshalString(response.NewATMEncapsulation); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANDSLLinkConfig1) SetFCSPreserved(NewFCSPreserved bool) (err error) {
+ // Request structure.
+ request := &struct {
+ NewFCSPreserved string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewFCSPreserved, err = soap.MarshalBoolean(NewFCSPreserved); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "SetFCSPreserved", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANDSLLinkConfig1) GetFCSPreserved() (NewFCSPreserved bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewFCSPreserved string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANDSLLinkConfig_1, "GetFCSPreserved", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewFCSPreserved, err = soap.UnmarshalBoolean(response.NewFCSPreserved); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+// WANEthernetLinkConfig1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:WANEthernetLinkConfig:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type WANEthernetLinkConfig1 struct {
+ goupnp.ServiceClient
+}
+
+// NewWANEthernetLinkConfig1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewWANEthernetLinkConfig1Clients() (clients []*WANEthernetLinkConfig1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_WANEthernetLinkConfig_1); err != nil {
+ return
+ }
+ clients = newWANEthernetLinkConfig1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewWANEthernetLinkConfig1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewWANEthernetLinkConfig1ClientsByURL(loc *url.URL) ([]*WANEthernetLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANEthernetLinkConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANEthernetLinkConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewWANEthernetLinkConfig1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewWANEthernetLinkConfig1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*WANEthernetLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_WANEthernetLinkConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANEthernetLinkConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+func newWANEthernetLinkConfig1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*WANEthernetLinkConfig1 {
+ clients := make([]*WANEthernetLinkConfig1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &WANEthernetLinkConfig1{genericClients[i]}
+ }
+ return clients
+}
+
+//
+// Return values:
+//
+// * NewEthernetLinkStatus: allowed values: Up, Down
+func (client *WANEthernetLinkConfig1) GetEthernetLinkStatus() (NewEthernetLinkStatus string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewEthernetLinkStatus string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANEthernetLinkConfig_1, "GetEthernetLinkStatus", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewEthernetLinkStatus, err = soap.UnmarshalString(response.NewEthernetLinkStatus); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+// WANIPConnection1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:WANIPConnection:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type WANIPConnection1 struct {
+ goupnp.ServiceClient
+}
+
+// NewWANIPConnection1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewWANIPConnection1Clients() (clients []*WANIPConnection1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_WANIPConnection_1); err != nil {
+ return
+ }
+ clients = newWANIPConnection1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewWANIPConnection1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewWANIPConnection1ClientsByURL(loc *url.URL) ([]*WANIPConnection1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANIPConnection_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANIPConnection1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewWANIPConnection1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewWANIPConnection1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*WANIPConnection1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_WANIPConnection_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANIPConnection1ClientsFromGenericClients(genericClients), nil
+}
+
+func newWANIPConnection1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*WANIPConnection1 {
+ clients := make([]*WANIPConnection1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &WANIPConnection1{genericClients[i]}
+ }
+ return clients
+}
+
+func (client *WANIPConnection1) SetConnectionType(NewConnectionType string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewConnectionType string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewConnectionType, err = soap.MarshalString(NewConnectionType); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "SetConnectionType", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewPossibleConnectionTypes: allowed values: Unconfigured, IP_Routed, IP_Bridged
+func (client *WANIPConnection1) GetConnectionTypeInfo() (NewConnectionType string, NewPossibleConnectionTypes string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewConnectionType string
+
+ NewPossibleConnectionTypes string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "GetConnectionTypeInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewConnectionType, err = soap.UnmarshalString(response.NewConnectionType); err != nil {
+ return
+ }
+ if NewPossibleConnectionTypes, err = soap.UnmarshalString(response.NewPossibleConnectionTypes); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) RequestConnection() (err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "RequestConnection", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) RequestTermination() (err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "RequestTermination", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) ForceTermination() (err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "ForceTermination", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) SetAutoDisconnectTime(NewAutoDisconnectTime uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewAutoDisconnectTime string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewAutoDisconnectTime, err = soap.MarshalUi4(NewAutoDisconnectTime); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "SetAutoDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) SetIdleDisconnectTime(NewIdleDisconnectTime uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewIdleDisconnectTime string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewIdleDisconnectTime, err = soap.MarshalUi4(NewIdleDisconnectTime); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "SetIdleDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) SetWarnDisconnectDelay(NewWarnDisconnectDelay uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewWarnDisconnectDelay string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewWarnDisconnectDelay, err = soap.MarshalUi4(NewWarnDisconnectDelay); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "SetWarnDisconnectDelay", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewConnectionStatus: allowed values: Unconfigured, Connected, Disconnected
+//
+// * NewLastConnectionError: allowed values: ERROR_NONE
+func (client *WANIPConnection1) GetStatusInfo() (NewConnectionStatus string, NewLastConnectionError string, NewUptime uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewConnectionStatus string
+
+ NewLastConnectionError string
+
+ NewUptime string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "GetStatusInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewConnectionStatus, err = soap.UnmarshalString(response.NewConnectionStatus); err != nil {
+ return
+ }
+ if NewLastConnectionError, err = soap.UnmarshalString(response.NewLastConnectionError); err != nil {
+ return
+ }
+ if NewUptime, err = soap.UnmarshalUi4(response.NewUptime); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) GetAutoDisconnectTime() (NewAutoDisconnectTime uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewAutoDisconnectTime string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "GetAutoDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewAutoDisconnectTime, err = soap.UnmarshalUi4(response.NewAutoDisconnectTime); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) GetIdleDisconnectTime() (NewIdleDisconnectTime uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewIdleDisconnectTime string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "GetIdleDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewIdleDisconnectTime, err = soap.UnmarshalUi4(response.NewIdleDisconnectTime); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) GetWarnDisconnectDelay() (NewWarnDisconnectDelay uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewWarnDisconnectDelay string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "GetWarnDisconnectDelay", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewWarnDisconnectDelay, err = soap.UnmarshalUi4(response.NewWarnDisconnectDelay); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) GetNATRSIPStatus() (NewRSIPAvailable bool, NewNATEnabled bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewRSIPAvailable string
+
+ NewNATEnabled string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "GetNATRSIPStatus", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewRSIPAvailable, err = soap.UnmarshalBoolean(response.NewRSIPAvailable); err != nil {
+ return
+ }
+ if NewNATEnabled, err = soap.UnmarshalBoolean(response.NewNATEnabled); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewProtocol: allowed values: TCP, UDP
+func (client *WANIPConnection1) GetGenericPortMappingEntry(NewPortMappingIndex uint16) (NewRemoteHost string, NewExternalPort uint16, NewProtocol string, NewInternalPort uint16, NewInternalClient string, NewEnabled bool, NewPortMappingDescription string, NewLeaseDuration uint32, err error) {
+ // Request structure.
+ request := &struct {
+ NewPortMappingIndex string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewPortMappingIndex, err = soap.MarshalUi2(NewPortMappingIndex); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+
+ NewInternalPort string
+
+ NewInternalClient string
+
+ NewEnabled string
+
+ NewPortMappingDescription string
+
+ NewLeaseDuration string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "GetGenericPortMappingEntry", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewRemoteHost, err = soap.UnmarshalString(response.NewRemoteHost); err != nil {
+ return
+ }
+ if NewExternalPort, err = soap.UnmarshalUi2(response.NewExternalPort); err != nil {
+ return
+ }
+ if NewProtocol, err = soap.UnmarshalString(response.NewProtocol); err != nil {
+ return
+ }
+ if NewInternalPort, err = soap.UnmarshalUi2(response.NewInternalPort); err != nil {
+ return
+ }
+ if NewInternalClient, err = soap.UnmarshalString(response.NewInternalClient); err != nil {
+ return
+ }
+ if NewEnabled, err = soap.UnmarshalBoolean(response.NewEnabled); err != nil {
+ return
+ }
+ if NewPortMappingDescription, err = soap.UnmarshalString(response.NewPortMappingDescription); err != nil {
+ return
+ }
+ if NewLeaseDuration, err = soap.UnmarshalUi4(response.NewLeaseDuration); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewProtocol: allowed values: TCP, UDP
+
+func (client *WANIPConnection1) GetSpecificPortMappingEntry(NewRemoteHost string, NewExternalPort uint16, NewProtocol string) (NewInternalPort uint16, NewInternalClient string, NewEnabled bool, NewPortMappingDescription string, NewLeaseDuration uint32, err error) {
+ // Request structure.
+ request := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewRemoteHost, err = soap.MarshalString(NewRemoteHost); err != nil {
+ return
+ }
+ if request.NewExternalPort, err = soap.MarshalUi2(NewExternalPort); err != nil {
+ return
+ }
+ if request.NewProtocol, err = soap.MarshalString(NewProtocol); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewInternalPort string
+
+ NewInternalClient string
+
+ NewEnabled string
+
+ NewPortMappingDescription string
+
+ NewLeaseDuration string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "GetSpecificPortMappingEntry", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewInternalPort, err = soap.UnmarshalUi2(response.NewInternalPort); err != nil {
+ return
+ }
+ if NewInternalClient, err = soap.UnmarshalString(response.NewInternalClient); err != nil {
+ return
+ }
+ if NewEnabled, err = soap.UnmarshalBoolean(response.NewEnabled); err != nil {
+ return
+ }
+ if NewPortMappingDescription, err = soap.UnmarshalString(response.NewPortMappingDescription); err != nil {
+ return
+ }
+ if NewLeaseDuration, err = soap.UnmarshalUi4(response.NewLeaseDuration); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewProtocol: allowed values: TCP, UDP
+
+func (client *WANIPConnection1) AddPortMapping(NewRemoteHost string, NewExternalPort uint16, NewProtocol string, NewInternalPort uint16, NewInternalClient string, NewEnabled bool, NewPortMappingDescription string, NewLeaseDuration uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+
+ NewInternalPort string
+
+ NewInternalClient string
+
+ NewEnabled string
+
+ NewPortMappingDescription string
+
+ NewLeaseDuration string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewRemoteHost, err = soap.MarshalString(NewRemoteHost); err != nil {
+ return
+ }
+ if request.NewExternalPort, err = soap.MarshalUi2(NewExternalPort); err != nil {
+ return
+ }
+ if request.NewProtocol, err = soap.MarshalString(NewProtocol); err != nil {
+ return
+ }
+ if request.NewInternalPort, err = soap.MarshalUi2(NewInternalPort); err != nil {
+ return
+ }
+ if request.NewInternalClient, err = soap.MarshalString(NewInternalClient); err != nil {
+ return
+ }
+ if request.NewEnabled, err = soap.MarshalBoolean(NewEnabled); err != nil {
+ return
+ }
+ if request.NewPortMappingDescription, err = soap.MarshalString(NewPortMappingDescription); err != nil {
+ return
+ }
+ if request.NewLeaseDuration, err = soap.MarshalUi4(NewLeaseDuration); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "AddPortMapping", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewProtocol: allowed values: TCP, UDP
+
+func (client *WANIPConnection1) DeletePortMapping(NewRemoteHost string, NewExternalPort uint16, NewProtocol string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewRemoteHost, err = soap.MarshalString(NewRemoteHost); err != nil {
+ return
+ }
+ if request.NewExternalPort, err = soap.MarshalUi2(NewExternalPort); err != nil {
+ return
+ }
+ if request.NewProtocol, err = soap.MarshalString(NewProtocol); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "DeletePortMapping", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection1) GetExternalIPAddress() (NewExternalIPAddress string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewExternalIPAddress string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_1, "GetExternalIPAddress", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewExternalIPAddress, err = soap.UnmarshalString(response.NewExternalIPAddress); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+// WANIPConnection2 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:WANIPConnection:2". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type WANIPConnection2 struct {
+ goupnp.ServiceClient
+}
+
+// NewWANIPConnection2Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewWANIPConnection2Clients() (clients []*WANIPConnection2, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_WANIPConnection_2); err != nil {
+ return
+ }
+ clients = newWANIPConnection2ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewWANIPConnection2ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewWANIPConnection2ClientsByURL(loc *url.URL) ([]*WANIPConnection2, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANIPConnection_2)
+ if err != nil {
+ return nil, err
+ }
+ return newWANIPConnection2ClientsFromGenericClients(genericClients), nil
+}
+
+// NewWANIPConnection2ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewWANIPConnection2ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*WANIPConnection2, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_WANIPConnection_2)
+ if err != nil {
+ return nil, err
+ }
+ return newWANIPConnection2ClientsFromGenericClients(genericClients), nil
+}
+
+func newWANIPConnection2ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*WANIPConnection2 {
+ clients := make([]*WANIPConnection2, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &WANIPConnection2{genericClients[i]}
+ }
+ return clients
+}
+
+func (client *WANIPConnection2) SetConnectionType(NewConnectionType string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewConnectionType string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewConnectionType, err = soap.MarshalString(NewConnectionType); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "SetConnectionType", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection2) GetConnectionTypeInfo() (NewConnectionType string, NewPossibleConnectionTypes string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewConnectionType string
+
+ NewPossibleConnectionTypes string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "GetConnectionTypeInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewConnectionType, err = soap.UnmarshalString(response.NewConnectionType); err != nil {
+ return
+ }
+ if NewPossibleConnectionTypes, err = soap.UnmarshalString(response.NewPossibleConnectionTypes); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection2) RequestConnection() (err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "RequestConnection", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection2) RequestTermination() (err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "RequestTermination", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection2) ForceTermination() (err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "ForceTermination", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection2) SetAutoDisconnectTime(NewAutoDisconnectTime uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewAutoDisconnectTime string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewAutoDisconnectTime, err = soap.MarshalUi4(NewAutoDisconnectTime); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "SetAutoDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection2) SetIdleDisconnectTime(NewIdleDisconnectTime uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewIdleDisconnectTime string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewIdleDisconnectTime, err = soap.MarshalUi4(NewIdleDisconnectTime); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "SetIdleDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection2) SetWarnDisconnectDelay(NewWarnDisconnectDelay uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewWarnDisconnectDelay string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewWarnDisconnectDelay, err = soap.MarshalUi4(NewWarnDisconnectDelay); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "SetWarnDisconnectDelay", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewConnectionStatus: allowed values: Unconfigured, Connecting, Connected, PendingDisconnect, Disconnecting, Disconnected
+//
+// * NewLastConnectionError: allowed values: ERROR_NONE, ERROR_COMMAND_ABORTED, ERROR_NOT_ENABLED_FOR_INTERNET, ERROR_USER_DISCONNECT, ERROR_ISP_DISCONNECT, ERROR_IDLE_DISCONNECT, ERROR_FORCED_DISCONNECT, ERROR_NO_CARRIER, ERROR_IP_CONFIGURATION, ERROR_UNKNOWN
+func (client *WANIPConnection2) GetStatusInfo() (NewConnectionStatus string, NewLastConnectionError string, NewUptime uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewConnectionStatus string
+
+ NewLastConnectionError string
+
+ NewUptime string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "GetStatusInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewConnectionStatus, err = soap.UnmarshalString(response.NewConnectionStatus); err != nil {
+ return
+ }
+ if NewLastConnectionError, err = soap.UnmarshalString(response.NewLastConnectionError); err != nil {
+ return
+ }
+ if NewUptime, err = soap.UnmarshalUi4(response.NewUptime); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection2) GetAutoDisconnectTime() (NewAutoDisconnectTime uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewAutoDisconnectTime string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "GetAutoDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewAutoDisconnectTime, err = soap.UnmarshalUi4(response.NewAutoDisconnectTime); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection2) GetIdleDisconnectTime() (NewIdleDisconnectTime uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewIdleDisconnectTime string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "GetIdleDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewIdleDisconnectTime, err = soap.UnmarshalUi4(response.NewIdleDisconnectTime); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection2) GetWarnDisconnectDelay() (NewWarnDisconnectDelay uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewWarnDisconnectDelay string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "GetWarnDisconnectDelay", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewWarnDisconnectDelay, err = soap.UnmarshalUi4(response.NewWarnDisconnectDelay); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection2) GetNATRSIPStatus() (NewRSIPAvailable bool, NewNATEnabled bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewRSIPAvailable string
+
+ NewNATEnabled string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "GetNATRSIPStatus", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewRSIPAvailable, err = soap.UnmarshalBoolean(response.NewRSIPAvailable); err != nil {
+ return
+ }
+ if NewNATEnabled, err = soap.UnmarshalBoolean(response.NewNATEnabled); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewProtocol: allowed values: TCP, UDP
+func (client *WANIPConnection2) GetGenericPortMappingEntry(NewPortMappingIndex uint16) (NewRemoteHost string, NewExternalPort uint16, NewProtocol string, NewInternalPort uint16, NewInternalClient string, NewEnabled bool, NewPortMappingDescription string, NewLeaseDuration uint32, err error) {
+ // Request structure.
+ request := &struct {
+ NewPortMappingIndex string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewPortMappingIndex, err = soap.MarshalUi2(NewPortMappingIndex); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+
+ NewInternalPort string
+
+ NewInternalClient string
+
+ NewEnabled string
+
+ NewPortMappingDescription string
+
+ NewLeaseDuration string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "GetGenericPortMappingEntry", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewRemoteHost, err = soap.UnmarshalString(response.NewRemoteHost); err != nil {
+ return
+ }
+ if NewExternalPort, err = soap.UnmarshalUi2(response.NewExternalPort); err != nil {
+ return
+ }
+ if NewProtocol, err = soap.UnmarshalString(response.NewProtocol); err != nil {
+ return
+ }
+ if NewInternalPort, err = soap.UnmarshalUi2(response.NewInternalPort); err != nil {
+ return
+ }
+ if NewInternalClient, err = soap.UnmarshalString(response.NewInternalClient); err != nil {
+ return
+ }
+ if NewEnabled, err = soap.UnmarshalBoolean(response.NewEnabled); err != nil {
+ return
+ }
+ if NewPortMappingDescription, err = soap.UnmarshalString(response.NewPortMappingDescription); err != nil {
+ return
+ }
+ if NewLeaseDuration, err = soap.UnmarshalUi4(response.NewLeaseDuration); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewProtocol: allowed values: TCP, UDP
+
+func (client *WANIPConnection2) GetSpecificPortMappingEntry(NewRemoteHost string, NewExternalPort uint16, NewProtocol string) (NewInternalPort uint16, NewInternalClient string, NewEnabled bool, NewPortMappingDescription string, NewLeaseDuration uint32, err error) {
+ // Request structure.
+ request := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewRemoteHost, err = soap.MarshalString(NewRemoteHost); err != nil {
+ return
+ }
+ if request.NewExternalPort, err = soap.MarshalUi2(NewExternalPort); err != nil {
+ return
+ }
+ if request.NewProtocol, err = soap.MarshalString(NewProtocol); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewInternalPort string
+
+ NewInternalClient string
+
+ NewEnabled string
+
+ NewPortMappingDescription string
+
+ NewLeaseDuration string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "GetSpecificPortMappingEntry", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewInternalPort, err = soap.UnmarshalUi2(response.NewInternalPort); err != nil {
+ return
+ }
+ if NewInternalClient, err = soap.UnmarshalString(response.NewInternalClient); err != nil {
+ return
+ }
+ if NewEnabled, err = soap.UnmarshalBoolean(response.NewEnabled); err != nil {
+ return
+ }
+ if NewPortMappingDescription, err = soap.UnmarshalString(response.NewPortMappingDescription); err != nil {
+ return
+ }
+ if NewLeaseDuration, err = soap.UnmarshalUi4(response.NewLeaseDuration); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewProtocol: allowed values: TCP, UDP
+
+func (client *WANIPConnection2) AddPortMapping(NewRemoteHost string, NewExternalPort uint16, NewProtocol string, NewInternalPort uint16, NewInternalClient string, NewEnabled bool, NewPortMappingDescription string, NewLeaseDuration uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+
+ NewInternalPort string
+
+ NewInternalClient string
+
+ NewEnabled string
+
+ NewPortMappingDescription string
+
+ NewLeaseDuration string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewRemoteHost, err = soap.MarshalString(NewRemoteHost); err != nil {
+ return
+ }
+ if request.NewExternalPort, err = soap.MarshalUi2(NewExternalPort); err != nil {
+ return
+ }
+ if request.NewProtocol, err = soap.MarshalString(NewProtocol); err != nil {
+ return
+ }
+ if request.NewInternalPort, err = soap.MarshalUi2(NewInternalPort); err != nil {
+ return
+ }
+ if request.NewInternalClient, err = soap.MarshalString(NewInternalClient); err != nil {
+ return
+ }
+ if request.NewEnabled, err = soap.MarshalBoolean(NewEnabled); err != nil {
+ return
+ }
+ if request.NewPortMappingDescription, err = soap.MarshalString(NewPortMappingDescription); err != nil {
+ return
+ }
+ if request.NewLeaseDuration, err = soap.MarshalUi4(NewLeaseDuration); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "AddPortMapping", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewProtocol: allowed values: TCP, UDP
+
+func (client *WANIPConnection2) DeletePortMapping(NewRemoteHost string, NewExternalPort uint16, NewProtocol string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewRemoteHost, err = soap.MarshalString(NewRemoteHost); err != nil {
+ return
+ }
+ if request.NewExternalPort, err = soap.MarshalUi2(NewExternalPort); err != nil {
+ return
+ }
+ if request.NewProtocol, err = soap.MarshalString(NewProtocol); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "DeletePortMapping", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewProtocol: allowed values: TCP, UDP
+
+func (client *WANIPConnection2) DeletePortMappingRange(NewStartPort uint16, NewEndPort uint16, NewProtocol string, NewManage bool) (err error) {
+ // Request structure.
+ request := &struct {
+ NewStartPort string
+
+ NewEndPort string
+
+ NewProtocol string
+
+ NewManage string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewStartPort, err = soap.MarshalUi2(NewStartPort); err != nil {
+ return
+ }
+ if request.NewEndPort, err = soap.MarshalUi2(NewEndPort); err != nil {
+ return
+ }
+ if request.NewProtocol, err = soap.MarshalString(NewProtocol); err != nil {
+ return
+ }
+ if request.NewManage, err = soap.MarshalBoolean(NewManage); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "DeletePortMappingRange", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPConnection2) GetExternalIPAddress() (NewExternalIPAddress string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewExternalIPAddress string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "GetExternalIPAddress", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewExternalIPAddress, err = soap.UnmarshalString(response.NewExternalIPAddress); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewProtocol: allowed values: TCP, UDP
+
+func (client *WANIPConnection2) GetListOfPortMappings(NewStartPort uint16, NewEndPort uint16, NewProtocol string, NewManage bool, NewNumberOfPorts uint16) (NewPortListing string, err error) {
+ // Request structure.
+ request := &struct {
+ NewStartPort string
+
+ NewEndPort string
+
+ NewProtocol string
+
+ NewManage string
+
+ NewNumberOfPorts string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewStartPort, err = soap.MarshalUi2(NewStartPort); err != nil {
+ return
+ }
+ if request.NewEndPort, err = soap.MarshalUi2(NewEndPort); err != nil {
+ return
+ }
+ if request.NewProtocol, err = soap.MarshalString(NewProtocol); err != nil {
+ return
+ }
+ if request.NewManage, err = soap.MarshalBoolean(NewManage); err != nil {
+ return
+ }
+ if request.NewNumberOfPorts, err = soap.MarshalUi2(NewNumberOfPorts); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewPortListing string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "GetListOfPortMappings", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewPortListing, err = soap.UnmarshalString(response.NewPortListing); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewProtocol: allowed values: TCP, UDP
+
+func (client *WANIPConnection2) AddAnyPortMapping(NewRemoteHost string, NewExternalPort uint16, NewProtocol string, NewInternalPort uint16, NewInternalClient string, NewEnabled bool, NewPortMappingDescription string, NewLeaseDuration uint32) (NewReservedPort uint16, err error) {
+ // Request structure.
+ request := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+
+ NewInternalPort string
+
+ NewInternalClient string
+
+ NewEnabled string
+
+ NewPortMappingDescription string
+
+ NewLeaseDuration string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewRemoteHost, err = soap.MarshalString(NewRemoteHost); err != nil {
+ return
+ }
+ if request.NewExternalPort, err = soap.MarshalUi2(NewExternalPort); err != nil {
+ return
+ }
+ if request.NewProtocol, err = soap.MarshalString(NewProtocol); err != nil {
+ return
+ }
+ if request.NewInternalPort, err = soap.MarshalUi2(NewInternalPort); err != nil {
+ return
+ }
+ if request.NewInternalClient, err = soap.MarshalString(NewInternalClient); err != nil {
+ return
+ }
+ if request.NewEnabled, err = soap.MarshalBoolean(NewEnabled); err != nil {
+ return
+ }
+ if request.NewPortMappingDescription, err = soap.MarshalString(NewPortMappingDescription); err != nil {
+ return
+ }
+ if request.NewLeaseDuration, err = soap.MarshalUi4(NewLeaseDuration); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewReservedPort string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPConnection_2, "AddAnyPortMapping", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewReservedPort, err = soap.UnmarshalUi2(response.NewReservedPort); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+// WANIPv6FirewallControl1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:WANIPv6FirewallControl:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type WANIPv6FirewallControl1 struct {
+ goupnp.ServiceClient
+}
+
+// NewWANIPv6FirewallControl1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewWANIPv6FirewallControl1Clients() (clients []*WANIPv6FirewallControl1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_WANIPv6FirewallControl_1); err != nil {
+ return
+ }
+ clients = newWANIPv6FirewallControl1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewWANIPv6FirewallControl1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewWANIPv6FirewallControl1ClientsByURL(loc *url.URL) ([]*WANIPv6FirewallControl1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANIPv6FirewallControl_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANIPv6FirewallControl1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewWANIPv6FirewallControl1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewWANIPv6FirewallControl1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*WANIPv6FirewallControl1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_WANIPv6FirewallControl_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANIPv6FirewallControl1ClientsFromGenericClients(genericClients), nil
+}
+
+func newWANIPv6FirewallControl1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*WANIPv6FirewallControl1 {
+ clients := make([]*WANIPv6FirewallControl1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &WANIPv6FirewallControl1{genericClients[i]}
+ }
+ return clients
+}
+
+func (client *WANIPv6FirewallControl1) GetFirewallStatus() (FirewallEnabled bool, InboundPinholeAllowed bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ FirewallEnabled string
+
+ InboundPinholeAllowed string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPv6FirewallControl_1, "GetFirewallStatus", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if FirewallEnabled, err = soap.UnmarshalBoolean(response.FirewallEnabled); err != nil {
+ return
+ }
+ if InboundPinholeAllowed, err = soap.UnmarshalBoolean(response.InboundPinholeAllowed); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPv6FirewallControl1) GetOutboundPinholeTimeout(RemoteHost string, RemotePort uint16, InternalClient string, InternalPort uint16, Protocol uint16) (OutboundPinholeTimeout uint32, err error) {
+ // Request structure.
+ request := &struct {
+ RemoteHost string
+
+ RemotePort string
+
+ InternalClient string
+
+ InternalPort string
+
+ Protocol string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.RemoteHost, err = soap.MarshalString(RemoteHost); err != nil {
+ return
+ }
+ if request.RemotePort, err = soap.MarshalUi2(RemotePort); err != nil {
+ return
+ }
+ if request.InternalClient, err = soap.MarshalString(InternalClient); err != nil {
+ return
+ }
+ if request.InternalPort, err = soap.MarshalUi2(InternalPort); err != nil {
+ return
+ }
+ if request.Protocol, err = soap.MarshalUi2(Protocol); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ OutboundPinholeTimeout string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPv6FirewallControl_1, "GetOutboundPinholeTimeout", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if OutboundPinholeTimeout, err = soap.UnmarshalUi4(response.OutboundPinholeTimeout); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * LeaseTime: allowed value range: minimum=1, maximum=86400
+
+func (client *WANIPv6FirewallControl1) AddPinhole(RemoteHost string, RemotePort uint16, InternalClient string, InternalPort uint16, Protocol uint16, LeaseTime uint32) (UniqueID uint16, err error) {
+ // Request structure.
+ request := &struct {
+ RemoteHost string
+
+ RemotePort string
+
+ InternalClient string
+
+ InternalPort string
+
+ Protocol string
+
+ LeaseTime string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.RemoteHost, err = soap.MarshalString(RemoteHost); err != nil {
+ return
+ }
+ if request.RemotePort, err = soap.MarshalUi2(RemotePort); err != nil {
+ return
+ }
+ if request.InternalClient, err = soap.MarshalString(InternalClient); err != nil {
+ return
+ }
+ if request.InternalPort, err = soap.MarshalUi2(InternalPort); err != nil {
+ return
+ }
+ if request.Protocol, err = soap.MarshalUi2(Protocol); err != nil {
+ return
+ }
+ if request.LeaseTime, err = soap.MarshalUi4(LeaseTime); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ UniqueID string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPv6FirewallControl_1, "AddPinhole", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if UniqueID, err = soap.UnmarshalUi2(response.UniqueID); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewLeaseTime: allowed value range: minimum=1, maximum=86400
+
+func (client *WANIPv6FirewallControl1) UpdatePinhole(UniqueID uint16, NewLeaseTime uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ UniqueID string
+
+ NewLeaseTime string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.UniqueID, err = soap.MarshalUi2(UniqueID); err != nil {
+ return
+ }
+ if request.NewLeaseTime, err = soap.MarshalUi4(NewLeaseTime); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPv6FirewallControl_1, "UpdatePinhole", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPv6FirewallControl1) DeletePinhole(UniqueID uint16) (err error) {
+ // Request structure.
+ request := &struct {
+ UniqueID string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.UniqueID, err = soap.MarshalUi2(UniqueID); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPv6FirewallControl_1, "DeletePinhole", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPv6FirewallControl1) GetPinholePackets(UniqueID uint16) (PinholePackets uint32, err error) {
+ // Request structure.
+ request := &struct {
+ UniqueID string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.UniqueID, err = soap.MarshalUi2(UniqueID); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ PinholePackets string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPv6FirewallControl_1, "GetPinholePackets", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if PinholePackets, err = soap.UnmarshalUi4(response.PinholePackets); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANIPv6FirewallControl1) CheckPinholeWorking(UniqueID uint16) (IsWorking bool, err error) {
+ // Request structure.
+ request := &struct {
+ UniqueID string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.UniqueID, err = soap.MarshalUi2(UniqueID); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ IsWorking string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANIPv6FirewallControl_1, "CheckPinholeWorking", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if IsWorking, err = soap.UnmarshalBoolean(response.IsWorking); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+// WANPOTSLinkConfig1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:WANPOTSLinkConfig:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type WANPOTSLinkConfig1 struct {
+ goupnp.ServiceClient
+}
+
+// NewWANPOTSLinkConfig1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewWANPOTSLinkConfig1Clients() (clients []*WANPOTSLinkConfig1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_WANPOTSLinkConfig_1); err != nil {
+ return
+ }
+ clients = newWANPOTSLinkConfig1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewWANPOTSLinkConfig1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewWANPOTSLinkConfig1ClientsByURL(loc *url.URL) ([]*WANPOTSLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANPOTSLinkConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANPOTSLinkConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewWANPOTSLinkConfig1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewWANPOTSLinkConfig1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*WANPOTSLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_WANPOTSLinkConfig_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANPOTSLinkConfig1ClientsFromGenericClients(genericClients), nil
+}
+
+func newWANPOTSLinkConfig1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*WANPOTSLinkConfig1 {
+ clients := make([]*WANPOTSLinkConfig1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &WANPOTSLinkConfig1{genericClients[i]}
+ }
+ return clients
+}
+
+//
+// Arguments:
+//
+// * NewLinkType: allowed values: PPP_Dialup
+
+func (client *WANPOTSLinkConfig1) SetISPInfo(NewISPPhoneNumber string, NewISPInfo string, NewLinkType string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewISPPhoneNumber string
+
+ NewISPInfo string
+
+ NewLinkType string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewISPPhoneNumber, err = soap.MarshalString(NewISPPhoneNumber); err != nil {
+ return
+ }
+ if request.NewISPInfo, err = soap.MarshalString(NewISPInfo); err != nil {
+ return
+ }
+ if request.NewLinkType, err = soap.MarshalString(NewLinkType); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPOTSLinkConfig_1, "SetISPInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPOTSLinkConfig1) SetCallRetryInfo(NewNumberOfRetries uint32, NewDelayBetweenRetries uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewNumberOfRetries string
+
+ NewDelayBetweenRetries string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewNumberOfRetries, err = soap.MarshalUi4(NewNumberOfRetries); err != nil {
+ return
+ }
+ if request.NewDelayBetweenRetries, err = soap.MarshalUi4(NewDelayBetweenRetries); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPOTSLinkConfig_1, "SetCallRetryInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewLinkType: allowed values: PPP_Dialup
+func (client *WANPOTSLinkConfig1) GetISPInfo() (NewISPPhoneNumber string, NewISPInfo string, NewLinkType string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewISPPhoneNumber string
+
+ NewISPInfo string
+
+ NewLinkType string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPOTSLinkConfig_1, "GetISPInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewISPPhoneNumber, err = soap.UnmarshalString(response.NewISPPhoneNumber); err != nil {
+ return
+ }
+ if NewISPInfo, err = soap.UnmarshalString(response.NewISPInfo); err != nil {
+ return
+ }
+ if NewLinkType, err = soap.UnmarshalString(response.NewLinkType); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPOTSLinkConfig1) GetCallRetryInfo() (NewNumberOfRetries uint32, NewDelayBetweenRetries uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewNumberOfRetries string
+
+ NewDelayBetweenRetries string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPOTSLinkConfig_1, "GetCallRetryInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewNumberOfRetries, err = soap.UnmarshalUi4(response.NewNumberOfRetries); err != nil {
+ return
+ }
+ if NewDelayBetweenRetries, err = soap.UnmarshalUi4(response.NewDelayBetweenRetries); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPOTSLinkConfig1) GetFclass() (NewFclass string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewFclass string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPOTSLinkConfig_1, "GetFclass", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewFclass, err = soap.UnmarshalString(response.NewFclass); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPOTSLinkConfig1) GetDataModulationSupported() (NewDataModulationSupported string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDataModulationSupported string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPOTSLinkConfig_1, "GetDataModulationSupported", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDataModulationSupported, err = soap.UnmarshalString(response.NewDataModulationSupported); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPOTSLinkConfig1) GetDataProtocol() (NewDataProtocol string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDataProtocol string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPOTSLinkConfig_1, "GetDataProtocol", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDataProtocol, err = soap.UnmarshalString(response.NewDataProtocol); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPOTSLinkConfig1) GetDataCompression() (NewDataCompression string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewDataCompression string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPOTSLinkConfig_1, "GetDataCompression", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewDataCompression, err = soap.UnmarshalString(response.NewDataCompression); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPOTSLinkConfig1) GetPlusVTRCommandSupported() (NewPlusVTRCommandSupported bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewPlusVTRCommandSupported string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPOTSLinkConfig_1, "GetPlusVTRCommandSupported", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewPlusVTRCommandSupported, err = soap.UnmarshalBoolean(response.NewPlusVTRCommandSupported); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+// WANPPPConnection1 is a client for UPnP SOAP service with URN "urn:schemas-upnp-org:service:WANPPPConnection:1". See
+// goupnp.ServiceClient, which contains RootDevice and Service attributes which
+// are provided for informational value.
+type WANPPPConnection1 struct {
+ goupnp.ServiceClient
+}
+
+// NewWANPPPConnection1Clients discovers instances of the service on the network,
+// and returns clients to any that are found. errors will contain an error for
+// any devices that replied but which could not be queried, and err will be set
+// if the discovery process failed outright.
+//
+// This is a typical entry calling point into this package.
+func NewWANPPPConnection1Clients() (clients []*WANPPPConnection1, errors []error, err error) {
+ var genericClients []goupnp.ServiceClient
+ if genericClients, errors, err = goupnp.NewServiceClients(URN_WANPPPConnection_1); err != nil {
+ return
+ }
+ clients = newWANPPPConnection1ClientsFromGenericClients(genericClients)
+ return
+}
+
+// NewWANPPPConnection1ClientsByURL discovers instances of the service at the given
+// URL, and returns clients to any that are found. An error is returned if
+// there was an error probing the service.
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered service URL.
+func NewWANPPPConnection1ClientsByURL(loc *url.URL) ([]*WANPPPConnection1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANPPPConnection_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANPPPConnection1ClientsFromGenericClients(genericClients), nil
+}
+
+// NewWANPPPConnection1ClientsFromRootDevice discovers instances of the service in
+// a given root device, and returns clients to any that are found. An error is
+// returned if there was not at least one instance of the service within the
+// device. The location parameter is simply assigned to the Location attribute
+// of the wrapped ServiceClient(s).
+//
+// This is a typical entry calling point into this package when reusing an
+// previously discovered root device.
+func NewWANPPPConnection1ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*WANPPPConnection1, error) {
+ genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, URN_WANPPPConnection_1)
+ if err != nil {
+ return nil, err
+ }
+ return newWANPPPConnection1ClientsFromGenericClients(genericClients), nil
+}
+
+func newWANPPPConnection1ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*WANPPPConnection1 {
+ clients := make([]*WANPPPConnection1, len(genericClients))
+ for i := range genericClients {
+ clients[i] = &WANPPPConnection1{genericClients[i]}
+ }
+ return clients
+}
+
+func (client *WANPPPConnection1) SetConnectionType(NewConnectionType string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewConnectionType string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewConnectionType, err = soap.MarshalString(NewConnectionType); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "SetConnectionType", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewPossibleConnectionTypes: allowed values: Unconfigured, IP_Routed, DHCP_Spoofed, PPPoE_Bridged, PPTP_Relay, L2TP_Relay, PPPoE_Relay
+func (client *WANPPPConnection1) GetConnectionTypeInfo() (NewConnectionType string, NewPossibleConnectionTypes string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewConnectionType string
+
+ NewPossibleConnectionTypes string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetConnectionTypeInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewConnectionType, err = soap.UnmarshalString(response.NewConnectionType); err != nil {
+ return
+ }
+ if NewPossibleConnectionTypes, err = soap.UnmarshalString(response.NewPossibleConnectionTypes); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) ConfigureConnection(NewUserName string, NewPassword string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewUserName string
+
+ NewPassword string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewUserName, err = soap.MarshalString(NewUserName); err != nil {
+ return
+ }
+ if request.NewPassword, err = soap.MarshalString(NewPassword); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "ConfigureConnection", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) RequestConnection() (err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "RequestConnection", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) RequestTermination() (err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "RequestTermination", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) ForceTermination() (err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "ForceTermination", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) SetAutoDisconnectTime(NewAutoDisconnectTime uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewAutoDisconnectTime string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewAutoDisconnectTime, err = soap.MarshalUi4(NewAutoDisconnectTime); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "SetAutoDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) SetIdleDisconnectTime(NewIdleDisconnectTime uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewIdleDisconnectTime string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewIdleDisconnectTime, err = soap.MarshalUi4(NewIdleDisconnectTime); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "SetIdleDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) SetWarnDisconnectDelay(NewWarnDisconnectDelay uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewWarnDisconnectDelay string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewWarnDisconnectDelay, err = soap.MarshalUi4(NewWarnDisconnectDelay); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "SetWarnDisconnectDelay", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewConnectionStatus: allowed values: Unconfigured, Connected, Disconnected
+//
+// * NewLastConnectionError: allowed values: ERROR_NONE
+func (client *WANPPPConnection1) GetStatusInfo() (NewConnectionStatus string, NewLastConnectionError string, NewUptime uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewConnectionStatus string
+
+ NewLastConnectionError string
+
+ NewUptime string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetStatusInfo", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewConnectionStatus, err = soap.UnmarshalString(response.NewConnectionStatus); err != nil {
+ return
+ }
+ if NewLastConnectionError, err = soap.UnmarshalString(response.NewLastConnectionError); err != nil {
+ return
+ }
+ if NewUptime, err = soap.UnmarshalUi4(response.NewUptime); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetLinkLayerMaxBitRates() (NewUpstreamMaxBitRate uint32, NewDownstreamMaxBitRate uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewUpstreamMaxBitRate string
+
+ NewDownstreamMaxBitRate string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetLinkLayerMaxBitRates", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewUpstreamMaxBitRate, err = soap.UnmarshalUi4(response.NewUpstreamMaxBitRate); err != nil {
+ return
+ }
+ if NewDownstreamMaxBitRate, err = soap.UnmarshalUi4(response.NewDownstreamMaxBitRate); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetPPPEncryptionProtocol() (NewPPPEncryptionProtocol string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewPPPEncryptionProtocol string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetPPPEncryptionProtocol", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewPPPEncryptionProtocol, err = soap.UnmarshalString(response.NewPPPEncryptionProtocol); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetPPPCompressionProtocol() (NewPPPCompressionProtocol string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewPPPCompressionProtocol string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetPPPCompressionProtocol", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewPPPCompressionProtocol, err = soap.UnmarshalString(response.NewPPPCompressionProtocol); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetPPPAuthenticationProtocol() (NewPPPAuthenticationProtocol string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewPPPAuthenticationProtocol string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetPPPAuthenticationProtocol", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewPPPAuthenticationProtocol, err = soap.UnmarshalString(response.NewPPPAuthenticationProtocol); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetUserName() (NewUserName string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewUserName string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetUserName", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewUserName, err = soap.UnmarshalString(response.NewUserName); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetPassword() (NewPassword string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewPassword string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetPassword", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewPassword, err = soap.UnmarshalString(response.NewPassword); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetAutoDisconnectTime() (NewAutoDisconnectTime uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewAutoDisconnectTime string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetAutoDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewAutoDisconnectTime, err = soap.UnmarshalUi4(response.NewAutoDisconnectTime); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetIdleDisconnectTime() (NewIdleDisconnectTime uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewIdleDisconnectTime string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetIdleDisconnectTime", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewIdleDisconnectTime, err = soap.UnmarshalUi4(response.NewIdleDisconnectTime); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetWarnDisconnectDelay() (NewWarnDisconnectDelay uint32, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewWarnDisconnectDelay string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetWarnDisconnectDelay", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewWarnDisconnectDelay, err = soap.UnmarshalUi4(response.NewWarnDisconnectDelay); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetNATRSIPStatus() (NewRSIPAvailable bool, NewNATEnabled bool, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewRSIPAvailable string
+
+ NewNATEnabled string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetNATRSIPStatus", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewRSIPAvailable, err = soap.UnmarshalBoolean(response.NewRSIPAvailable); err != nil {
+ return
+ }
+ if NewNATEnabled, err = soap.UnmarshalBoolean(response.NewNATEnabled); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Return values:
+//
+// * NewProtocol: allowed values: TCP, UDP
+func (client *WANPPPConnection1) GetGenericPortMappingEntry(NewPortMappingIndex uint16) (NewRemoteHost string, NewExternalPort uint16, NewProtocol string, NewInternalPort uint16, NewInternalClient string, NewEnabled bool, NewPortMappingDescription string, NewLeaseDuration uint32, err error) {
+ // Request structure.
+ request := &struct {
+ NewPortMappingIndex string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewPortMappingIndex, err = soap.MarshalUi2(NewPortMappingIndex); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+
+ NewInternalPort string
+
+ NewInternalClient string
+
+ NewEnabled string
+
+ NewPortMappingDescription string
+
+ NewLeaseDuration string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetGenericPortMappingEntry", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewRemoteHost, err = soap.UnmarshalString(response.NewRemoteHost); err != nil {
+ return
+ }
+ if NewExternalPort, err = soap.UnmarshalUi2(response.NewExternalPort); err != nil {
+ return
+ }
+ if NewProtocol, err = soap.UnmarshalString(response.NewProtocol); err != nil {
+ return
+ }
+ if NewInternalPort, err = soap.UnmarshalUi2(response.NewInternalPort); err != nil {
+ return
+ }
+ if NewInternalClient, err = soap.UnmarshalString(response.NewInternalClient); err != nil {
+ return
+ }
+ if NewEnabled, err = soap.UnmarshalBoolean(response.NewEnabled); err != nil {
+ return
+ }
+ if NewPortMappingDescription, err = soap.UnmarshalString(response.NewPortMappingDescription); err != nil {
+ return
+ }
+ if NewLeaseDuration, err = soap.UnmarshalUi4(response.NewLeaseDuration); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewProtocol: allowed values: TCP, UDP
+
+func (client *WANPPPConnection1) GetSpecificPortMappingEntry(NewRemoteHost string, NewExternalPort uint16, NewProtocol string) (NewInternalPort uint16, NewInternalClient string, NewEnabled bool, NewPortMappingDescription string, NewLeaseDuration uint32, err error) {
+ // Request structure.
+ request := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewRemoteHost, err = soap.MarshalString(NewRemoteHost); err != nil {
+ return
+ }
+ if request.NewExternalPort, err = soap.MarshalUi2(NewExternalPort); err != nil {
+ return
+ }
+ if request.NewProtocol, err = soap.MarshalString(NewProtocol); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewInternalPort string
+
+ NewInternalClient string
+
+ NewEnabled string
+
+ NewPortMappingDescription string
+
+ NewLeaseDuration string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetSpecificPortMappingEntry", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewInternalPort, err = soap.UnmarshalUi2(response.NewInternalPort); err != nil {
+ return
+ }
+ if NewInternalClient, err = soap.UnmarshalString(response.NewInternalClient); err != nil {
+ return
+ }
+ if NewEnabled, err = soap.UnmarshalBoolean(response.NewEnabled); err != nil {
+ return
+ }
+ if NewPortMappingDescription, err = soap.UnmarshalString(response.NewPortMappingDescription); err != nil {
+ return
+ }
+ if NewLeaseDuration, err = soap.UnmarshalUi4(response.NewLeaseDuration); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewProtocol: allowed values: TCP, UDP
+
+func (client *WANPPPConnection1) AddPortMapping(NewRemoteHost string, NewExternalPort uint16, NewProtocol string, NewInternalPort uint16, NewInternalClient string, NewEnabled bool, NewPortMappingDescription string, NewLeaseDuration uint32) (err error) {
+ // Request structure.
+ request := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+
+ NewInternalPort string
+
+ NewInternalClient string
+
+ NewEnabled string
+
+ NewPortMappingDescription string
+
+ NewLeaseDuration string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewRemoteHost, err = soap.MarshalString(NewRemoteHost); err != nil {
+ return
+ }
+ if request.NewExternalPort, err = soap.MarshalUi2(NewExternalPort); err != nil {
+ return
+ }
+ if request.NewProtocol, err = soap.MarshalString(NewProtocol); err != nil {
+ return
+ }
+ if request.NewInternalPort, err = soap.MarshalUi2(NewInternalPort); err != nil {
+ return
+ }
+ if request.NewInternalClient, err = soap.MarshalString(NewInternalClient); err != nil {
+ return
+ }
+ if request.NewEnabled, err = soap.MarshalBoolean(NewEnabled); err != nil {
+ return
+ }
+ if request.NewPortMappingDescription, err = soap.MarshalString(NewPortMappingDescription); err != nil {
+ return
+ }
+ if request.NewLeaseDuration, err = soap.MarshalUi4(NewLeaseDuration); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "AddPortMapping", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+//
+// Arguments:
+//
+// * NewProtocol: allowed values: TCP, UDP
+
+func (client *WANPPPConnection1) DeletePortMapping(NewRemoteHost string, NewExternalPort uint16, NewProtocol string) (err error) {
+ // Request structure.
+ request := &struct {
+ NewRemoteHost string
+
+ NewExternalPort string
+
+ NewProtocol string
+ }{}
+ // BEGIN Marshal arguments into request.
+
+ if request.NewRemoteHost, err = soap.MarshalString(NewRemoteHost); err != nil {
+ return
+ }
+ if request.NewExternalPort, err = soap.MarshalUi2(NewExternalPort); err != nil {
+ return
+ }
+ if request.NewProtocol, err = soap.MarshalString(NewProtocol); err != nil {
+ return
+ }
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := interface{}(nil)
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "DeletePortMapping", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ // END Unmarshal arguments from response.
+ return
+}
+
+func (client *WANPPPConnection1) GetExternalIPAddress() (NewExternalIPAddress string, err error) {
+ // Request structure.
+ request := interface{}(nil)
+ // BEGIN Marshal arguments into request.
+
+ // END Marshal arguments into request.
+
+ // Response structure.
+ response := &struct {
+ NewExternalIPAddress string
+ }{}
+
+ // Perform the SOAP call.
+ if err = client.SOAPClient.PerformAction(URN_WANPPPConnection_1, "GetExternalIPAddress", request, response); err != nil {
+ return
+ }
+
+ // BEGIN Unmarshal arguments from response.
+
+ if NewExternalIPAddress, err = soap.UnmarshalString(response.NewExternalIPAddress); err != nil {
+ return
+ }
+ // END Unmarshal arguments from response.
+ return
+}
diff --git a/vendor/github.com/huin/goupnp/device.go b/vendor/github.com/huin/goupnp/device.go
new file mode 100644
index 000000000..e5b658b21
--- /dev/null
+++ b/vendor/github.com/huin/goupnp/device.go
@@ -0,0 +1,184 @@
+// This file contains XML structures for communicating with UPnP devices.
+
+package goupnp
+
+import (
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "net/url"
+
+ "github.com/huin/goupnp/scpd"
+ "github.com/huin/goupnp/soap"
+)
+
+const (
+ DeviceXMLNamespace = "urn:schemas-upnp-org:device-1-0"
+)
+
+// RootDevice is the device description as described by section 2.3 "Device
+// description" in
+// http://upnp.org/specs/arch/UPnP-arch-DeviceArchitecture-v1.1.pdf
+type RootDevice struct {
+ XMLName xml.Name `xml:"root"`
+ SpecVersion SpecVersion `xml:"specVersion"`
+ URLBase url.URL `xml:"-"`
+ URLBaseStr string `xml:"URLBase"`
+ Device Device `xml:"device"`
+}
+
+// SetURLBase sets the URLBase for the RootDevice and its underlying components.
+func (root *RootDevice) SetURLBase(urlBase *url.URL) {
+ root.URLBase = *urlBase
+ root.URLBaseStr = urlBase.String()
+ root.Device.SetURLBase(urlBase)
+}
+
+// SpecVersion is part of a RootDevice, describes the version of the
+// specification that the data adheres to.
+type SpecVersion struct {
+ Major int32 `xml:"major"`
+ Minor int32 `xml:"minor"`
+}
+
+// Device is a UPnP device. It can have child devices.
+type Device struct {
+ DeviceType string `xml:"deviceType"`
+ FriendlyName string `xml:"friendlyName"`
+ Manufacturer string `xml:"manufacturer"`
+ ManufacturerURL URLField `xml:"manufacturerURL"`
+ ModelDescription string `xml:"modelDescription"`
+ ModelName string `xml:"modelName"`
+ ModelNumber string `xml:"modelNumber"`
+ ModelURL URLField `xml:"modelURL"`
+ SerialNumber string `xml:"serialNumber"`
+ UDN string `xml:"UDN"`
+ UPC string `xml:"UPC,omitempty"`
+ Icons []Icon `xml:"iconList>icon,omitempty"`
+ Services []Service `xml:"serviceList>service,omitempty"`
+ Devices []Device `xml:"deviceList>device,omitempty"`
+
+ // Extra observed elements:
+ PresentationURL URLField `xml:"presentationURL"`
+}
+
+// VisitDevices calls visitor for the device, and all its descendent devices.
+func (device *Device) VisitDevices(visitor func(*Device)) {
+ visitor(device)
+ for i := range device.Devices {
+ device.Devices[i].VisitDevices(visitor)
+ }
+}
+
+// VisitServices calls visitor for all Services under the device and all its
+// descendent devices.
+func (device *Device) VisitServices(visitor func(*Service)) {
+ device.VisitDevices(func(d *Device) {
+ for i := range d.Services {
+ visitor(&d.Services[i])
+ }
+ })
+}
+
+// FindService finds all (if any) Services under the device and its descendents
+// that have the given ServiceType.
+func (device *Device) FindService(serviceType string) []*Service {
+ var services []*Service
+ device.VisitServices(func(s *Service) {
+ if s.ServiceType == serviceType {
+ services = append(services, s)
+ }
+ })
+ return services
+}
+
+// SetURLBase sets the URLBase for the Device and its underlying components.
+func (device *Device) SetURLBase(urlBase *url.URL) {
+ device.ManufacturerURL.SetURLBase(urlBase)
+ device.ModelURL.SetURLBase(urlBase)
+ device.PresentationURL.SetURLBase(urlBase)
+ for i := range device.Icons {
+ device.Icons[i].SetURLBase(urlBase)
+ }
+ for i := range device.Services {
+ device.Services[i].SetURLBase(urlBase)
+ }
+ for i := range device.Devices {
+ device.Devices[i].SetURLBase(urlBase)
+ }
+}
+
+func (device *Device) String() string {
+ return fmt.Sprintf("Device ID %s : %s (%s)", device.UDN, device.DeviceType, device.FriendlyName)
+}
+
+// Icon is a representative image that a device might include in its
+// description.
+type Icon struct {
+ Mimetype string `xml:"mimetype"`
+ Width int32 `xml:"width"`
+ Height int32 `xml:"height"`
+ Depth int32 `xml:"depth"`
+ URL URLField `xml:"url"`
+}
+
+// SetURLBase sets the URLBase for the Icon.
+func (icon *Icon) SetURLBase(url *url.URL) {
+ icon.URL.SetURLBase(url)
+}
+
+// Service is a service provided by a UPnP Device.
+type Service struct {
+ ServiceType string `xml:"serviceType"`
+ ServiceId string `xml:"serviceId"`
+ SCPDURL URLField `xml:"SCPDURL"`
+ ControlURL URLField `xml:"controlURL"`
+ EventSubURL URLField `xml:"eventSubURL"`
+}
+
+// SetURLBase sets the URLBase for the Service.
+func (srv *Service) SetURLBase(urlBase *url.URL) {
+ srv.SCPDURL.SetURLBase(urlBase)
+ srv.ControlURL.SetURLBase(urlBase)
+ srv.EventSubURL.SetURLBase(urlBase)
+}
+
+func (srv *Service) String() string {
+ return fmt.Sprintf("Service ID %s : %s", srv.ServiceId, srv.ServiceType)
+}
+
+// RequestSCDP requests the SCPD (soap actions and state variables description)
+// for the service.
+func (srv *Service) RequestSCDP() (*scpd.SCPD, error) {
+ if !srv.SCPDURL.Ok {
+ return nil, errors.New("bad/missing SCPD URL, or no URLBase has been set")
+ }
+ s := new(scpd.SCPD)
+ if err := requestXml(srv.SCPDURL.URL.String(), scpd.SCPDXMLNamespace, s); err != nil {
+ return nil, err
+ }
+ return s, nil
+}
+
+func (srv *Service) NewSOAPClient() *soap.SOAPClient {
+ return soap.NewSOAPClient(srv.ControlURL.URL)
+}
+
+// URLField is a URL that is part of a device description.
+type URLField struct {
+ URL url.URL `xml:"-"`
+ Ok bool `xml:"-"`
+ Str string `xml:",chardata"`
+}
+
+func (uf *URLField) SetURLBase(urlBase *url.URL) {
+ refUrl, err := url.Parse(uf.Str)
+ if err != nil {
+ uf.URL = url.URL{}
+ uf.Ok = false
+ return
+ }
+
+ uf.URL = *urlBase.ResolveReference(refUrl)
+ uf.Ok = true
+}
diff --git a/vendor/github.com/huin/goupnp/goupnp.go b/vendor/github.com/huin/goupnp/goupnp.go
new file mode 100644
index 000000000..fcb8dcd23
--- /dev/null
+++ b/vendor/github.com/huin/goupnp/goupnp.go
@@ -0,0 +1,131 @@
+// goupnp is an implementation of a client for various UPnP services.
+//
+// For most uses, it is recommended to use the code-generated packages under
+// github.com/huin/goupnp/dcps. Example use is shown at
+// http://godoc.org/github.com/huin/goupnp/example
+//
+// A commonly used client is internetgateway1.WANPPPConnection1:
+// http://godoc.org/github.com/huin/goupnp/dcps/internetgateway1#WANPPPConnection1
+//
+// Currently only a couple of schemas have code generated for them from the
+// UPnP example XML specifications. Not all methods will work on these clients,
+// because the generated stubs contain the full set of specified methods from
+// the XML specifications, and the discovered services will likely support a
+// subset of those methods.
+package goupnp
+
+import (
+ "encoding/xml"
+ "fmt"
+ "net/http"
+ "net/url"
+ "time"
+
+ "golang.org/x/net/html/charset"
+
+ "github.com/huin/goupnp/httpu"
+ "github.com/huin/goupnp/ssdp"
+)
+
+// ContextError is an error that wraps an error with some context information.
+type ContextError struct {
+ Context string
+ Err error
+}
+
+func (err ContextError) Error() string {
+ return fmt.Sprintf("%s: %v", err.Context, err.Err)
+}
+
+// MaybeRootDevice contains either a RootDevice or an error.
+type MaybeRootDevice struct {
+ // Set iff Err == nil.
+ Root *RootDevice
+
+ // The location the device was discovered at. This can be used with
+ // DeviceByURL, assuming the device is still present. A location represents
+ // the discovery of a device, regardless of if there was an error probing it.
+ Location *url.URL
+
+ // Any error encountered probing a discovered device.
+ Err error
+}
+
+// DiscoverDevices attempts to find targets of the given type. This is
+// typically the entry-point for this package. searchTarget is typically a URN
+// in the form "urn:schemas-upnp-org:device:..." or
+// "urn:schemas-upnp-org:service:...". A single error is returned for errors
+// while attempting to send the query. An error or RootDevice is returned for
+// each discovered RootDevice.
+func DiscoverDevices(searchTarget string) ([]MaybeRootDevice, error) {
+ httpu, err := httpu.NewHTTPUClient()
+ if err != nil {
+ return nil, err
+ }
+ defer httpu.Close()
+ responses, err := ssdp.SSDPRawSearch(httpu, string(searchTarget), 2, 3)
+ if err != nil {
+ return nil, err
+ }
+
+ results := make([]MaybeRootDevice, len(responses))
+ for i, response := range responses {
+ maybe := &results[i]
+ loc, err := response.Location()
+ if err != nil {
+ maybe.Err = ContextError{"unexpected bad location from search", err}
+ continue
+ }
+ maybe.Location = loc
+ if root, err := DeviceByURL(loc); err != nil {
+ maybe.Err = err
+ } else {
+ maybe.Root = root
+ }
+ }
+
+ return results, nil
+}
+
+func DeviceByURL(loc *url.URL) (*RootDevice, error) {
+ locStr := loc.String()
+ root := new(RootDevice)
+ if err := requestXml(locStr, DeviceXMLNamespace, root); err != nil {
+ return nil, ContextError{fmt.Sprintf("error requesting root device details from %q", locStr), err}
+ }
+ var urlBaseStr string
+ if root.URLBaseStr != "" {
+ urlBaseStr = root.URLBaseStr
+ } else {
+ urlBaseStr = locStr
+ }
+ urlBase, err := url.Parse(urlBaseStr)
+ if err != nil {
+ return nil, ContextError{fmt.Sprintf("error parsing location URL %q", locStr), err}
+ }
+ root.SetURLBase(urlBase)
+ return root, nil
+}
+
+func requestXml(url string, defaultSpace string, doc interface{}) error {
+ timeout := time.Duration(3 * time.Second)
+ client := http.Client{
+ Timeout: timeout,
+ }
+ resp, err := client.Get(url)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != 200 {
+ return fmt.Errorf("goupnp: got response status %s from %q",
+ resp.Status, url)
+ }
+
+ decoder := xml.NewDecoder(resp.Body)
+ decoder.DefaultSpace = defaultSpace
+ decoder.CharsetReader = charset.NewReaderLabel
+
+ return decoder.Decode(doc)
+}
diff --git a/vendor/github.com/huin/goupnp/httpu/httpu.go b/vendor/github.com/huin/goupnp/httpu/httpu.go
new file mode 100644
index 000000000..f52dad68b
--- /dev/null
+++ b/vendor/github.com/huin/goupnp/httpu/httpu.go
@@ -0,0 +1,132 @@
+package httpu
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "log"
+ "net"
+ "net/http"
+ "sync"
+ "time"
+)
+
+// HTTPUClient is a client for dealing with HTTPU (HTTP over UDP). Its typical
+// function is for HTTPMU, and particularly SSDP.
+type HTTPUClient struct {
+ connLock sync.Mutex // Protects use of conn.
+ conn net.PacketConn
+}
+
+// NewHTTPUClient creates a new HTTPUClient, opening up a new UDP socket for the
+// purpose.
+func NewHTTPUClient() (*HTTPUClient, error) {
+ conn, err := net.ListenPacket("udp", ":0")
+ if err != nil {
+ return nil, err
+ }
+ return &HTTPUClient{conn: conn}, nil
+}
+
+// NewHTTPUClientAddr creates a new HTTPUClient which will broadcast packets
+// from the specified address, opening up a new UDP socket for the purpose
+func NewHTTPUClientAddr(addr string) (*HTTPUClient, error) {
+ ip := net.ParseIP(addr)
+ if ip == nil {
+ return nil, errors.New("Invalid listening address")
+ }
+ conn, err := net.ListenPacket("udp", ip.String()+":0")
+ if err != nil {
+ return nil, err
+ }
+ return &HTTPUClient{conn: conn}, nil
+}
+
+// Close shuts down the client. The client will no longer be useful following
+// this.
+func (httpu *HTTPUClient) Close() error {
+ httpu.connLock.Lock()
+ defer httpu.connLock.Unlock()
+ return httpu.conn.Close()
+}
+
+// Do performs a request. The timeout is how long to wait for before returning
+// the responses that were received. An error is only returned for failing to
+// send the request. Failures in receipt simply do not add to the resulting
+// responses.
+//
+// Note that at present only one concurrent connection will happen per
+// HTTPUClient.
+func (httpu *HTTPUClient) Do(req *http.Request, timeout time.Duration, numSends int) ([]*http.Response, error) {
+ httpu.connLock.Lock()
+ defer httpu.connLock.Unlock()
+
+ // Create the request. This is a subset of what http.Request.Write does
+ // deliberately to avoid creating extra fields which may confuse some
+ // devices.
+ var requestBuf bytes.Buffer
+ method := req.Method
+ if method == "" {
+ method = "GET"
+ }
+ if _, err := fmt.Fprintf(&requestBuf, "%s %s HTTP/1.1\r\n", method, req.URL.RequestURI()); err != nil {
+ return nil, err
+ }
+ if err := req.Header.Write(&requestBuf); err != nil {
+ return nil, err
+ }
+ if _, err := requestBuf.Write([]byte{'\r', '\n'}); err != nil {
+ return nil, err
+ }
+
+ destAddr, err := net.ResolveUDPAddr("udp", req.Host)
+ if err != nil {
+ return nil, err
+ }
+ if err = httpu.conn.SetDeadline(time.Now().Add(timeout)); err != nil {
+ return nil, err
+ }
+
+ // Send request.
+ for i := 0; i < numSends; i++ {
+ if n, err := httpu.conn.WriteTo(requestBuf.Bytes(), destAddr); err != nil {
+ return nil, err
+ } else if n < len(requestBuf.Bytes()) {
+ return nil, fmt.Errorf("httpu: wrote %d bytes rather than full %d in request",
+ n, len(requestBuf.Bytes()))
+ }
+ time.Sleep(5 * time.Millisecond)
+ }
+
+ // Await responses until timeout.
+ var responses []*http.Response
+ responseBytes := make([]byte, 2048)
+ for {
+ // 2048 bytes should be sufficient for most networks.
+ n, _, err := httpu.conn.ReadFrom(responseBytes)
+ if err != nil {
+ if err, ok := err.(net.Error); ok {
+ if err.Timeout() {
+ break
+ }
+ if err.Temporary() {
+ // Sleep in case this is a persistent error to avoid pegging CPU until deadline.
+ time.Sleep(10 * time.Millisecond)
+ continue
+ }
+ }
+ return nil, err
+ }
+
+ // Parse response.
+ response, err := http.ReadResponse(bufio.NewReader(bytes.NewBuffer(responseBytes[:n])), req)
+ if err != nil {
+ log.Print("httpu: error while parsing response: %v", err)
+ continue
+ }
+
+ responses = append(responses, response)
+ }
+ return responses, err
+}
diff --git a/vendor/github.com/huin/goupnp/httpu/serve.go b/vendor/github.com/huin/goupnp/httpu/serve.go
new file mode 100644
index 000000000..9f67af85b
--- /dev/null
+++ b/vendor/github.com/huin/goupnp/httpu/serve.go
@@ -0,0 +1,108 @@
+package httpu
+
+import (
+ "bufio"
+ "bytes"
+ "log"
+ "net"
+ "net/http"
+ "regexp"
+)
+
+const (
+ DefaultMaxMessageBytes = 2048
+)
+
+var (
+ trailingWhitespaceRx = regexp.MustCompile(" +\r\n")
+ crlf = []byte("\r\n")
+)
+
+// Handler is the interface by which received HTTPU messages are passed to
+// handling code.
+type Handler interface {
+ // ServeMessage is called for each HTTPU message received. peerAddr contains
+ // the address that the message was received from.
+ ServeMessage(r *http.Request)
+}
+
+// HandlerFunc is a function-to-Handler adapter.
+type HandlerFunc func(r *http.Request)
+
+func (f HandlerFunc) ServeMessage(r *http.Request) {
+ f(r)
+}
+
+// A Server defines parameters for running an HTTPU server.
+type Server struct {
+ Addr string // UDP address to listen on
+ Multicast bool // Should listen for multicast?
+ Interface *net.Interface // Network interface to listen on for multicast, nil for default multicast interface
+ Handler Handler // handler to invoke
+ MaxMessageBytes int // maximum number of bytes to read from a packet, DefaultMaxMessageBytes if 0
+}
+
+// ListenAndServe listens on the UDP network address srv.Addr. If srv.Multicast
+// is true, then a multicast UDP listener will be used on srv.Interface (or
+// default interface if nil).
+func (srv *Server) ListenAndServe() error {
+ var err error
+
+ var addr *net.UDPAddr
+ if addr, err = net.ResolveUDPAddr("udp", srv.Addr); err != nil {
+ log.Fatal(err)
+ }
+
+ var conn net.PacketConn
+ if srv.Multicast {
+ if conn, err = net.ListenMulticastUDP("udp", srv.Interface, addr); err != nil {
+ return err
+ }
+ } else {
+ if conn, err = net.ListenUDP("udp", addr); err != nil {
+ return err
+ }
+ }
+
+ return srv.Serve(conn)
+}
+
+// Serve messages received on the given packet listener to the srv.Handler.
+func (srv *Server) Serve(l net.PacketConn) error {
+ maxMessageBytes := DefaultMaxMessageBytes
+ if srv.MaxMessageBytes != 0 {
+ maxMessageBytes = srv.MaxMessageBytes
+ }
+ for {
+ buf := make([]byte, maxMessageBytes)
+ n, peerAddr, err := l.ReadFrom(buf)
+ if err != nil {
+ return err
+ }
+ buf = buf[:n]
+
+ go func(buf []byte, peerAddr net.Addr) {
+ // At least one router's UPnP implementation has added a trailing space
+ // after "HTTP/1.1" - trim it.
+ buf = trailingWhitespaceRx.ReplaceAllLiteral(buf, crlf)
+
+ req, err := http.ReadRequest(bufio.NewReader(bytes.NewBuffer(buf)))
+ if err != nil {
+ log.Printf("httpu: Failed to parse request: %v", err)
+ return
+ }
+ req.RemoteAddr = peerAddr.String()
+ srv.Handler.ServeMessage(req)
+ // No need to call req.Body.Close - underlying reader is bytes.Buffer.
+ }(buf, peerAddr)
+ }
+}
+
+// Serve messages received on the given packet listener to the given handler.
+func Serve(l net.PacketConn, handler Handler) error {
+ srv := Server{
+ Handler: handler,
+ MaxMessageBytes: DefaultMaxMessageBytes,
+ }
+ return srv.Serve(l)
+}
diff --git a/vendor/github.com/huin/goupnp/scpd/scpd.go b/vendor/github.com/huin/goupnp/scpd/scpd.go
new file mode 100644
index 000000000..c9d2e69e8
--- /dev/null
+++ b/vendor/github.com/huin/goupnp/scpd/scpd.go
@@ -0,0 +1,167 @@
+package scpd
+
+import (
+ "encoding/xml"
+ "strings"
+)
+
+const (
+ SCPDXMLNamespace = "urn:schemas-upnp-org:service-1-0"
+)
+
+func cleanWhitespace(s *string) {
+ *s = strings.TrimSpace(*s)
+}
+
+// SCPD is the service description as described by section 2.5 "Service
+// description" in
+// http://upnp.org/specs/arch/UPnP-arch-DeviceArchitecture-v1.1.pdf
+type SCPD struct {
+ XMLName xml.Name `xml:"scpd"`
+ ConfigId string `xml:"configId,attr"`
+ SpecVersion SpecVersion `xml:"specVersion"`
+ Actions []Action `xml:"actionList>action"`
+ StateVariables []StateVariable `xml:"serviceStateTable>stateVariable"`
+}
+
+// Clean attempts to remove stray whitespace etc. in the structure. It seems
+// unfortunately common for stray whitespace to be present in SCPD documents,
+// this method attempts to make it easy to clean them out.
+func (scpd *SCPD) Clean() {
+ cleanWhitespace(&scpd.ConfigId)
+ for i := range scpd.Actions {
+ scpd.Actions[i].clean()
+ }
+ for i := range scpd.StateVariables {
+ scpd.StateVariables[i].clean()
+ }
+}
+
+func (scpd *SCPD) GetStateVariable(variable string) *StateVariable {
+ for i := range scpd.StateVariables {
+ v := &scpd.StateVariables[i]
+ if v.Name == variable {
+ return v
+ }
+ }
+ return nil
+}
+
+func (scpd *SCPD) GetAction(action string) *Action {
+ for i := range scpd.Actions {
+ a := &scpd.Actions[i]
+ if a.Name == action {
+ return a
+ }
+ }
+ return nil
+}
+
+// SpecVersion is part of a SCPD document, describes the version of the
+// specification that the data adheres to.
+type SpecVersion struct {
+ Major int32 `xml:"major"`
+ Minor int32 `xml:"minor"`
+}
+
+type Action struct {
+ Name string `xml:"name"`
+ Arguments []Argument `xml:"argumentList>argument"`
+}
+
+func (action *Action) clean() {
+ cleanWhitespace(&action.Name)
+ for i := range action.Arguments {
+ action.Arguments[i].clean()
+ }
+}
+
+func (action *Action) InputArguments() []*Argument {
+ var result []*Argument
+ for i := range action.Arguments {
+ arg := &action.Arguments[i]
+ if arg.IsInput() {
+ result = append(result, arg)
+ }
+ }
+ return result
+}
+
+func (action *Action) OutputArguments() []*Argument {
+ var result []*Argument
+ for i := range action.Arguments {
+ arg := &action.Arguments[i]
+ if arg.IsOutput() {
+ result = append(result, arg)
+ }
+ }
+ return result
+}
+
+type Argument struct {
+ Name string `xml:"name"`
+ Direction string `xml:"direction"` // in|out
+ RelatedStateVariable string `xml:"relatedStateVariable"` // ?
+ Retval string `xml:"retval"` // ?
+}
+
+func (arg *Argument) clean() {
+ cleanWhitespace(&arg.Name)
+ cleanWhitespace(&arg.Direction)
+ cleanWhitespace(&arg.RelatedStateVariable)
+ cleanWhitespace(&arg.Retval)
+}
+
+func (arg *Argument) IsInput() bool {
+ return arg.Direction == "in"
+}
+
+func (arg *Argument) IsOutput() bool {
+ return arg.Direction == "out"
+}
+
+type StateVariable struct {
+ Name string `xml:"name"`
+ SendEvents string `xml:"sendEvents,attr"` // yes|no
+ Multicast string `xml:"multicast,attr"` // yes|no
+ DataType DataType `xml:"dataType"`
+ DefaultValue string `xml:"defaultValue"`
+ AllowedValueRange *AllowedValueRange `xml:"allowedValueRange"`
+ AllowedValues []string `xml:"allowedValueList>allowedValue"`
+}
+
+func (v *StateVariable) clean() {
+ cleanWhitespace(&v.Name)
+ cleanWhitespace(&v.SendEvents)
+ cleanWhitespace(&v.Multicast)
+ v.DataType.clean()
+ cleanWhitespace(&v.DefaultValue)
+ if v.AllowedValueRange != nil {
+ v.AllowedValueRange.clean()
+ }
+ for i := range v.AllowedValues {
+ cleanWhitespace(&v.AllowedValues[i])
+ }
+}
+
+type AllowedValueRange struct {
+ Minimum string `xml:"minimum"`
+ Maximum string `xml:"maximum"`
+ Step string `xml:"step"`
+}
+
+func (r *AllowedValueRange) clean() {
+ cleanWhitespace(&r.Minimum)
+ cleanWhitespace(&r.Maximum)
+ cleanWhitespace(&r.Step)
+}
+
+type DataType struct {
+ Name string `xml:",chardata"`
+ Type string `xml:"type,attr"`
+}
+
+func (dt *DataType) clean() {
+ cleanWhitespace(&dt.Name)
+ cleanWhitespace(&dt.Type)
+}
diff --git a/vendor/github.com/huin/goupnp/service_client.go b/vendor/github.com/huin/goupnp/service_client.go
new file mode 100644
index 000000000..9111c93cb
--- /dev/null
+++ b/vendor/github.com/huin/goupnp/service_client.go
@@ -0,0 +1,88 @@
+package goupnp
+
+import (
+ "fmt"
+ "net/url"
+
+ "github.com/huin/goupnp/soap"
+)
+
+// ServiceClient is a SOAP client, root device and the service for the SOAP
+// client rolled into one value. The root device, location, and service are
+// intended to be informational. Location can be used to later recreate a
+// ServiceClient with NewServiceClientByURL if the service is still present;
+// bypassing the discovery process.
+type ServiceClient struct {
+ SOAPClient *soap.SOAPClient
+ RootDevice *RootDevice
+ Location *url.URL
+ Service *Service
+}
+
+// NewServiceClients discovers services, and returns clients for them. err will
+// report any error with the discovery process (blocking any device/service
+// discovery), errors reports errors on a per-root-device basis.
+func NewServiceClients(searchTarget string) (clients []ServiceClient, errors []error, err error) {
+ var maybeRootDevices []MaybeRootDevice
+ if maybeRootDevices, err = DiscoverDevices(searchTarget); err != nil {
+ return
+ }
+
+ clients = make([]ServiceClient, 0, len(maybeRootDevices))
+
+ for _, maybeRootDevice := range maybeRootDevices {
+ if maybeRootDevice.Err != nil {
+ errors = append(errors, maybeRootDevice.Err)
+ continue
+ }
+
+ deviceClients, err := NewServiceClientsFromRootDevice(maybeRootDevice.Root, maybeRootDevice.Location, searchTarget)
+ if err != nil {
+ errors = append(errors, err)
+ continue
+ }
+ clients = append(clients, deviceClients...)
+ }
+
+ return
+}
+
+// NewServiceClientsByURL creates client(s) for the given service URN, for a
+// root device at the given URL.
+func NewServiceClientsByURL(loc *url.URL, searchTarget string) ([]ServiceClient, error) {
+ rootDevice, err := DeviceByURL(loc)
+ if err != nil {
+ return nil, err
+ }
+ return NewServiceClientsFromRootDevice(rootDevice, loc, searchTarget)
+}
+
+// NewServiceClientsFromDevice creates client(s) for the given service URN, in
+// a given root device. The loc parameter is simply assigned to the
+// Location attribute of the returned ServiceClient(s).
+func NewServiceClientsFromRootDevice(rootDevice *RootDevice, loc *url.URL, searchTarget string) ([]ServiceClient, error) {
+ device := &rootDevice.Device
+ srvs := device.FindService(searchTarget)
+ if len(srvs) == 0 {
+ return nil, fmt.Errorf("goupnp: service %q not found within device %q (UDN=%q)",
+ searchTarget, device.FriendlyName, device.UDN)
+ }
+
+ clients := make([]ServiceClient, 0, len(srvs))
+ for _, srv := range srvs {
+ clients = append(clients, ServiceClient{
+ SOAPClient: srv.NewSOAPClient(),
+ RootDevice: rootDevice,
+ Location: loc,
+ Service: srv,
+ })
+ }
+ return clients, nil
+}
+
+// GetServiceClient returns the ServiceClient itself. This is provided so that the
+// service client attributes can be accessed via an interface method on a
+// wrapping type.
+func (client *ServiceClient) GetServiceClient() *ServiceClient {
+ return client
+}
diff --git a/vendor/github.com/huin/goupnp/soap/soap.go b/vendor/github.com/huin/goupnp/soap/soap.go
new file mode 100644
index 000000000..815610734
--- /dev/null
+++ b/vendor/github.com/huin/goupnp/soap/soap.go
@@ -0,0 +1,157 @@
+// Definition for the SOAP structure required for UPnP's SOAP usage.
+
+package soap
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "reflect"
+)
+
+const (
+ soapEncodingStyle = "http://schemas.xmlsoap.org/soap/encoding/"
+ soapPrefix = xml.Header + `<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"><s:Body>`
+ soapSuffix = `</s:Body></s:Envelope>`
+)
+
+type SOAPClient struct {
+ EndpointURL url.URL
+ HTTPClient http.Client
+}
+
+func NewSOAPClient(endpointURL url.URL) *SOAPClient {
+ return &SOAPClient{
+ EndpointURL: endpointURL,
+ }
+}
+
+// PerformSOAPAction makes a SOAP request, with the given action.
+// inAction and outAction must both be pointers to structs with string fields
+// only.
+func (client *SOAPClient) PerformAction(actionNamespace, actionName string, inAction interface{}, outAction interface{}) error {
+ requestBytes, err := encodeRequestAction(actionNamespace, actionName, inAction)
+ if err != nil {
+ return err
+ }
+
+ response, err := client.HTTPClient.Do(&http.Request{
+ Method: "POST",
+ URL: &client.EndpointURL,
+ Header: http.Header{
+ "SOAPACTION": []string{`"` + actionNamespace + "#" + actionName + `"`},
+ "CONTENT-TYPE": []string{"text/xml; charset=\"utf-8\""},
+ },
+ Body: ioutil.NopCloser(bytes.NewBuffer(requestBytes)),
+ // Set ContentLength to avoid chunked encoding - some servers might not support it.
+ ContentLength: int64(len(requestBytes)),
+ })
+ if err != nil {
+ return fmt.Errorf("goupnp: error performing SOAP HTTP request: %v", err)
+ }
+ defer response.Body.Close()
+ if response.StatusCode != 200 {
+ return fmt.Errorf("goupnp: SOAP request got HTTP %s", response.Status)
+ }
+
+ responseEnv := newSOAPEnvelope()
+ decoder := xml.NewDecoder(response.Body)
+ if err := decoder.Decode(responseEnv); err != nil {
+ return fmt.Errorf("goupnp: error decoding response body: %v", err)
+ }
+
+ if responseEnv.Body.Fault != nil {
+ return responseEnv.Body.Fault
+ }
+
+ if outAction != nil {
+ if err := xml.Unmarshal(responseEnv.Body.RawAction, outAction); err != nil {
+ return fmt.Errorf("goupnp: error unmarshalling out action: %v, %v", err, responseEnv.Body.RawAction)
+ }
+ }
+
+ return nil
+}
+
+// newSOAPAction creates a soapEnvelope with the given action and arguments.
+func newSOAPEnvelope() *soapEnvelope {
+ return &soapEnvelope{
+ EncodingStyle: soapEncodingStyle,
+ }
+}
+
+// encodeRequestAction is a hacky way to create an encoded SOAP envelope
+// containing the given action. Experiments with one router have shown that it
+// 500s for requests where the outer default xmlns is set to the SOAP
+// namespace, and then reassigning the default namespace within that to the
+// service namespace. Hand-coding the outer XML to work-around this.
+func encodeRequestAction(actionNamespace, actionName string, inAction interface{}) ([]byte, error) {
+ requestBuf := new(bytes.Buffer)
+ requestBuf.WriteString(soapPrefix)
+ requestBuf.WriteString(`<u:`)
+ xml.EscapeText(requestBuf, []byte(actionName))
+ requestBuf.WriteString(` xmlns:u="`)
+ xml.EscapeText(requestBuf, []byte(actionNamespace))
+ requestBuf.WriteString(`">`)
+ if inAction != nil {
+ if err := encodeRequestArgs(requestBuf, inAction); err != nil {
+ return nil, err
+ }
+ }
+ requestBuf.WriteString(`</u:`)
+ xml.EscapeText(requestBuf, []byte(actionName))
+ requestBuf.WriteString(`>`)
+ requestBuf.WriteString(soapSuffix)
+ return requestBuf.Bytes(), nil
+}
+
+func encodeRequestArgs(w *bytes.Buffer, inAction interface{}) error {
+ in := reflect.Indirect(reflect.ValueOf(inAction))
+ if in.Kind() != reflect.Struct {
+ return fmt.Errorf("goupnp: SOAP inAction is not a struct but of type %v", in.Type())
+ }
+ enc := xml.NewEncoder(w)
+ nFields := in.NumField()
+ inType := in.Type()
+ for i := 0; i < nFields; i++ {
+ field := inType.Field(i)
+ argName := field.Name
+ if nameOverride := field.Tag.Get("soap"); nameOverride != "" {
+ argName = nameOverride
+ }
+ value := in.Field(i)
+ if value.Kind() != reflect.String {
+ return fmt.Errorf("goupnp: SOAP arg %q is not of type string, but of type %v", argName, value.Type())
+ }
+ if err := enc.EncodeElement(value.Interface(), xml.StartElement{xml.Name{"", argName}, nil}); err != nil {
+ return fmt.Errorf("goupnp: error encoding SOAP arg %q: %v", argName, err)
+ }
+ }
+ enc.Flush()
+ return nil
+}
+
+type soapEnvelope struct {
+ XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Envelope"`
+ EncodingStyle string `xml:"http://schemas.xmlsoap.org/soap/envelope/ encodingStyle,attr"`
+ Body soapBody `xml:"http://schemas.xmlsoap.org/soap/envelope/ Body"`
+}
+
+type soapBody struct {
+ Fault *SOAPFaultError `xml:"Fault"`
+ RawAction []byte `xml:",innerxml"`
+}
+
+// SOAPFaultError implements error, and contains SOAP fault information.
+type SOAPFaultError struct {
+ FaultCode string `xml:"faultcode"`
+ FaultString string `xml:"faultstring"`
+ Detail string `xml:"detail"`
+}
+
+func (err *SOAPFaultError) Error() string {
+ return fmt.Sprintf("SOAP fault: %s", err.FaultString)
+}
diff --git a/vendor/github.com/huin/goupnp/soap/types.go b/vendor/github.com/huin/goupnp/soap/types.go
new file mode 100644
index 000000000..fdbeec8d4
--- /dev/null
+++ b/vendor/github.com/huin/goupnp/soap/types.go
@@ -0,0 +1,519 @@
+package soap
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+var (
+ // localLoc acts like time.Local for this package, but is faked out by the
+ // unit tests to ensure that things stay constant (especially when running
+ // this test in a place where local time is UTC which might mask bugs).
+ localLoc = time.Local
+)
+
+func MarshalUi1(v uint8) (string, error) {
+ return strconv.FormatUint(uint64(v), 10), nil
+}
+
+func UnmarshalUi1(s string) (uint8, error) {
+ v, err := strconv.ParseUint(s, 10, 8)
+ return uint8(v), err
+}
+
+func MarshalUi2(v uint16) (string, error) {
+ return strconv.FormatUint(uint64(v), 10), nil
+}
+
+func UnmarshalUi2(s string) (uint16, error) {
+ v, err := strconv.ParseUint(s, 10, 16)
+ return uint16(v), err
+}
+
+func MarshalUi4(v uint32) (string, error) {
+ return strconv.FormatUint(uint64(v), 10), nil
+}
+
+func UnmarshalUi4(s string) (uint32, error) {
+ v, err := strconv.ParseUint(s, 10, 32)
+ return uint32(v), err
+}
+
+func MarshalI1(v int8) (string, error) {
+ return strconv.FormatInt(int64(v), 10), nil
+}
+
+func UnmarshalI1(s string) (int8, error) {
+ v, err := strconv.ParseInt(s, 10, 8)
+ return int8(v), err
+}
+
+func MarshalI2(v int16) (string, error) {
+ return strconv.FormatInt(int64(v), 10), nil
+}
+
+func UnmarshalI2(s string) (int16, error) {
+ v, err := strconv.ParseInt(s, 10, 16)
+ return int16(v), err
+}
+
+func MarshalI4(v int32) (string, error) {
+ return strconv.FormatInt(int64(v), 10), nil
+}
+
+func UnmarshalI4(s string) (int32, error) {
+ v, err := strconv.ParseInt(s, 10, 32)
+ return int32(v), err
+}
+
+func MarshalInt(v int64) (string, error) {
+ return strconv.FormatInt(v, 10), nil
+}
+
+func UnmarshalInt(s string) (int64, error) {
+ return strconv.ParseInt(s, 10, 64)
+}
+
+func MarshalR4(v float32) (string, error) {
+ return strconv.FormatFloat(float64(v), 'G', -1, 32), nil
+}
+
+func UnmarshalR4(s string) (float32, error) {
+ v, err := strconv.ParseFloat(s, 32)
+ return float32(v), err
+}
+
+func MarshalR8(v float64) (string, error) {
+ return strconv.FormatFloat(v, 'G', -1, 64), nil
+}
+
+func UnmarshalR8(s string) (float64, error) {
+ v, err := strconv.ParseFloat(s, 64)
+ return float64(v), err
+}
+
+// MarshalFixed14_4 marshals float64 to SOAP "fixed.14.4" type.
+func MarshalFixed14_4(v float64) (string, error) {
+ if v >= 1e14 || v <= -1e14 {
+ return "", fmt.Errorf("soap fixed14.4: value %v out of bounds", v)
+ }
+ return strconv.FormatFloat(v, 'f', 4, 64), nil
+}
+
+// UnmarshalFixed14_4 unmarshals float64 from SOAP "fixed.14.4" type.
+func UnmarshalFixed14_4(s string) (float64, error) {
+ v, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return 0, err
+ }
+ if v >= 1e14 || v <= -1e14 {
+ return 0, fmt.Errorf("soap fixed14.4: value %q out of bounds", s)
+ }
+ return v, nil
+}
+
+// MarshalChar marshals rune to SOAP "char" type.
+func MarshalChar(v rune) (string, error) {
+ if v == 0 {
+ return "", errors.New("soap char: rune 0 is not allowed")
+ }
+ return string(v), nil
+}
+
+// UnmarshalChar unmarshals rune from SOAP "char" type.
+func UnmarshalChar(s string) (rune, error) {
+ if len(s) == 0 {
+ return 0, errors.New("soap char: got empty string")
+ }
+ r, n := utf8.DecodeRune([]byte(s))
+ if n != len(s) {
+ return 0, fmt.Errorf("soap char: value %q is not a single rune", s)
+ }
+ return r, nil
+}
+
+func MarshalString(v string) (string, error) {
+ return v, nil
+}
+
+func UnmarshalString(v string) (string, error) {
+ return v, nil
+}
+
+func parseInt(s string, err *error) int {
+ v, parseErr := strconv.ParseInt(s, 10, 64)
+ if parseErr != nil {
+ *err = parseErr
+ }
+ return int(v)
+}
+
+var dateRegexps = []*regexp.Regexp{
+ // yyyy[-mm[-dd]]
+ regexp.MustCompile(`^(\d{4})(?:-(\d{2})(?:-(\d{2}))?)?$`),
+ // yyyy[mm[dd]]
+ regexp.MustCompile(`^(\d{4})(?:(\d{2})(?:(\d{2}))?)?$`),
+}
+
+func parseDateParts(s string) (year, month, day int, err error) {
+ var parts []string
+ for _, re := range dateRegexps {
+ parts = re.FindStringSubmatch(s)
+ if parts != nil {
+ break
+ }
+ }
+ if parts == nil {
+ err = fmt.Errorf("soap date: value %q is not in a recognized ISO8601 date format", s)
+ return
+ }
+
+ year = parseInt(parts[1], &err)
+ month = 1
+ day = 1
+ if len(parts[2]) != 0 {
+ month = parseInt(parts[2], &err)
+ if len(parts[3]) != 0 {
+ day = parseInt(parts[3], &err)
+ }
+ }
+
+ if err != nil {
+ err = fmt.Errorf("soap date: %q: %v", s, err)
+ }
+
+ return
+}
+
+var timeRegexps = []*regexp.Regexp{
+ // hh[:mm[:ss]]
+ regexp.MustCompile(`^(\d{2})(?::(\d{2})(?::(\d{2}))?)?$`),
+ // hh[mm[ss]]
+ regexp.MustCompile(`^(\d{2})(?:(\d{2})(?:(\d{2}))?)?$`),
+}
+
+func parseTimeParts(s string) (hour, minute, second int, err error) {
+ var parts []string
+ for _, re := range timeRegexps {
+ parts = re.FindStringSubmatch(s)
+ if parts != nil {
+ break
+ }
+ }
+ if parts == nil {
+ err = fmt.Errorf("soap time: value %q is not in ISO8601 time format", s)
+ return
+ }
+
+ hour = parseInt(parts[1], &err)
+ if len(parts[2]) != 0 {
+ minute = parseInt(parts[2], &err)
+ if len(parts[3]) != 0 {
+ second = parseInt(parts[3], &err)
+ }
+ }
+
+ if err != nil {
+ err = fmt.Errorf("soap time: %q: %v", s, err)
+ }
+
+ return
+}
+
+// (+|-)hh[[:]mm]
+var timezoneRegexp = regexp.MustCompile(`^([+-])(\d{2})(?::?(\d{2}))?$`)
+
+func parseTimezone(s string) (offset int, err error) {
+ if s == "Z" {
+ return 0, nil
+ }
+ parts := timezoneRegexp.FindStringSubmatch(s)
+ if parts == nil {
+ err = fmt.Errorf("soap timezone: value %q is not in ISO8601 timezone format", s)
+ return
+ }
+
+ offset = parseInt(parts[2], &err) * 3600
+ if len(parts[3]) != 0 {
+ offset += parseInt(parts[3], &err) * 60
+ }
+ if parts[1] == "-" {
+ offset = -offset
+ }
+
+ if err != nil {
+ err = fmt.Errorf("soap timezone: %q: %v", s, err)
+ }
+
+ return
+}
+
+var completeDateTimeZoneRegexp = regexp.MustCompile(`^([^T]+)(?:T([^-+Z]+)(.+)?)?$`)
+
+// splitCompleteDateTimeZone splits date, time and timezone apart from an
+// ISO8601 string. It does not ensure that the contents of each part are
+// correct, it merely splits on certain delimiters.
+// e.g "2010-09-08T12:15:10+0700" => "2010-09-08", "12:15:10", "+0700".
+// Timezone can only be present if time is also present.
+func splitCompleteDateTimeZone(s string) (dateStr, timeStr, zoneStr string, err error) {
+ parts := completeDateTimeZoneRegexp.FindStringSubmatch(s)
+ if parts == nil {
+ err = fmt.Errorf("soap date/time/zone: value %q is not in ISO8601 datetime format", s)
+ return
+ }
+ dateStr = parts[1]
+ timeStr = parts[2]
+ zoneStr = parts[3]
+ return
+}
+
+// MarshalDate marshals time.Time to SOAP "date" type. Note that this converts
+// to local time, and discards the time-of-day components.
+func MarshalDate(v time.Time) (string, error) {
+ return v.In(localLoc).Format("2006-01-02"), nil
+}
+
+var dateFmts = []string{"2006-01-02", "20060102"}
+
+// UnmarshalDate unmarshals time.Time from SOAP "date" type. This outputs the
+// date as midnight in the local time zone.
+func UnmarshalDate(s string) (time.Time, error) {
+ year, month, day, err := parseDateParts(s)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return time.Date(year, time.Month(month), day, 0, 0, 0, 0, localLoc), nil
+}
+
+// TimeOfDay is used in cases where SOAP "time" or "time.tz" is used.
+type TimeOfDay struct {
+ // Duration of time since midnight.
+ FromMidnight time.Duration
+
+ // Set to true if Offset is specified. If false, then the timezone is
+ // unspecified (and by ISO8601 - implies some "local" time).
+ HasOffset bool
+
+ // Offset is non-zero only if time.tz is used. It is otherwise ignored. If
+ // non-zero, then it is regarded as a UTC offset in seconds. Note that the
+ // sub-minutes is ignored by the marshal function.
+ Offset int
+}
+
+// MarshalTimeOfDay marshals TimeOfDay to the "time" type.
+func MarshalTimeOfDay(v TimeOfDay) (string, error) {
+ d := int64(v.FromMidnight / time.Second)
+ hour := d / 3600
+ d = d % 3600
+ minute := d / 60
+ second := d % 60
+
+ return fmt.Sprintf("%02d:%02d:%02d", hour, minute, second), nil
+}
+
+// UnmarshalTimeOfDay unmarshals TimeOfDay from the "time" type.
+func UnmarshalTimeOfDay(s string) (TimeOfDay, error) {
+ t, err := UnmarshalTimeOfDayTz(s)
+ if err != nil {
+ return TimeOfDay{}, err
+ } else if t.HasOffset {
+ return TimeOfDay{}, fmt.Errorf("soap time: value %q contains unexpected timezone")
+ }
+ return t, nil
+}
+
+// MarshalTimeOfDayTz marshals TimeOfDay to the "time.tz" type.
+func MarshalTimeOfDayTz(v TimeOfDay) (string, error) {
+ d := int64(v.FromMidnight / time.Second)
+ hour := d / 3600
+ d = d % 3600
+ minute := d / 60
+ second := d % 60
+
+ tz := ""
+ if v.HasOffset {
+ if v.Offset == 0 {
+ tz = "Z"
+ } else {
+ offsetMins := v.Offset / 60
+ sign := '+'
+ if offsetMins < 1 {
+ offsetMins = -offsetMins
+ sign = '-'
+ }
+ tz = fmt.Sprintf("%c%02d:%02d", sign, offsetMins/60, offsetMins%60)
+ }
+ }
+
+ return fmt.Sprintf("%02d:%02d:%02d%s", hour, minute, second, tz), nil
+}
+
+// UnmarshalTimeOfDayTz unmarshals TimeOfDay from the "time.tz" type.
+func UnmarshalTimeOfDayTz(s string) (tod TimeOfDay, err error) {
+ zoneIndex := strings.IndexAny(s, "Z+-")
+ var timePart string
+ var hasOffset bool
+ var offset int
+ if zoneIndex == -1 {
+ hasOffset = false
+ timePart = s
+ } else {
+ hasOffset = true
+ timePart = s[:zoneIndex]
+ if offset, err = parseTimezone(s[zoneIndex:]); err != nil {
+ return
+ }
+ }
+
+ hour, minute, second, err := parseTimeParts(timePart)
+ if err != nil {
+ return
+ }
+
+ fromMidnight := time.Duration(hour*3600+minute*60+second) * time.Second
+
+ // ISO8601 special case - values up to 24:00:00 are allowed, so using
+ // strictly greater-than for the maximum value.
+ if fromMidnight > 24*time.Hour || minute >= 60 || second >= 60 {
+ return TimeOfDay{}, fmt.Errorf("soap time.tz: value %q has value(s) out of range", s)
+ }
+
+ return TimeOfDay{
+ FromMidnight: time.Duration(hour*3600+minute*60+second) * time.Second,
+ HasOffset: hasOffset,
+ Offset: offset,
+ }, nil
+}
+
+// MarshalDateTime marshals time.Time to SOAP "dateTime" type. Note that this
+// converts to local time.
+func MarshalDateTime(v time.Time) (string, error) {
+ return v.In(localLoc).Format("2006-01-02T15:04:05"), nil
+}
+
+// UnmarshalDateTime unmarshals time.Time from the SOAP "dateTime" type. This
+// returns a value in the local timezone.
+func UnmarshalDateTime(s string) (result time.Time, err error) {
+ dateStr, timeStr, zoneStr, err := splitCompleteDateTimeZone(s)
+ if err != nil {
+ return
+ }
+
+ if len(zoneStr) != 0 {
+ err = fmt.Errorf("soap datetime: unexpected timezone in %q", s)
+ return
+ }
+
+ year, month, day, err := parseDateParts(dateStr)
+ if err != nil {
+ return
+ }
+
+ var hour, minute, second int
+ if len(timeStr) != 0 {
+ hour, minute, second, err = parseTimeParts(timeStr)
+ if err != nil {
+ return
+ }
+ }
+
+ result = time.Date(year, time.Month(month), day, hour, minute, second, 0, localLoc)
+ return
+}
+
+// MarshalDateTimeTz marshals time.Time to SOAP "dateTime.tz" type.
+func MarshalDateTimeTz(v time.Time) (string, error) {
+ return v.Format("2006-01-02T15:04:05-07:00"), nil
+}
+
+// UnmarshalDateTimeTz unmarshals time.Time from the SOAP "dateTime.tz" type.
+// This returns a value in the local timezone when the timezone is unspecified.
+func UnmarshalDateTimeTz(s string) (result time.Time, err error) {
+ dateStr, timeStr, zoneStr, err := splitCompleteDateTimeZone(s)
+ if err != nil {
+ return
+ }
+
+ year, month, day, err := parseDateParts(dateStr)
+ if err != nil {
+ return
+ }
+
+ var hour, minute, second int
+ var location *time.Location = localLoc
+ if len(timeStr) != 0 {
+ hour, minute, second, err = parseTimeParts(timeStr)
+ if err != nil {
+ return
+ }
+ if len(zoneStr) != 0 {
+ var offset int
+ offset, err = parseTimezone(zoneStr)
+ if offset == 0 {
+ location = time.UTC
+ } else {
+ location = time.FixedZone("", offset)
+ }
+ }
+ }
+
+ result = time.Date(year, time.Month(month), day, hour, minute, second, 0, location)
+ return
+}
+
+// MarshalBoolean marshals bool to SOAP "boolean" type.
+func MarshalBoolean(v bool) (string, error) {
+ if v {
+ return "1", nil
+ }
+ return "0", nil
+}
+
+// UnmarshalBoolean unmarshals bool from the SOAP "boolean" type.
+func UnmarshalBoolean(s string) (bool, error) {
+ switch s {
+ case "0", "false", "no":
+ return false, nil
+ case "1", "true", "yes":
+ return true, nil
+ }
+ return false, fmt.Errorf("soap boolean: %q is not a valid boolean value", s)
+}
+
+// MarshalBinBase64 marshals []byte to SOAP "bin.base64" type.
+func MarshalBinBase64(v []byte) (string, error) {
+ return base64.StdEncoding.EncodeToString(v), nil
+}
+
+// UnmarshalBinBase64 unmarshals []byte from the SOAP "bin.base64" type.
+func UnmarshalBinBase64(s string) ([]byte, error) {
+ return base64.StdEncoding.DecodeString(s)
+}
+
+// MarshalBinHex marshals []byte to SOAP "bin.hex" type.
+func MarshalBinHex(v []byte) (string, error) {
+ return hex.EncodeToString(v), nil
+}
+
+// UnmarshalBinHex unmarshals []byte from the SOAP "bin.hex" type.
+func UnmarshalBinHex(s string) ([]byte, error) {
+ return hex.DecodeString(s)
+}
+
+// MarshalURI marshals *url.URL to SOAP "uri" type.
+func MarshalURI(v *url.URL) (string, error) {
+ return v.String(), nil
+}
+
+// UnmarshalURI unmarshals *url.URL from the SOAP "uri" type.
+func UnmarshalURI(s string) (*url.URL, error) {
+ return url.Parse(s)
+}
diff --git a/vendor/github.com/huin/goupnp/ssdp/registry.go b/vendor/github.com/huin/goupnp/ssdp/registry.go
new file mode 100644
index 000000000..2f84beaae
--- /dev/null
+++ b/vendor/github.com/huin/goupnp/ssdp/registry.go
@@ -0,0 +1,312 @@
+package ssdp
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/huin/goupnp/httpu"
+)
+
+const (
+ maxExpiryTimeSeconds = 24 * 60 * 60
+)
+
+var (
+ maxAgeRx = regexp.MustCompile("max-age=([0-9]+)")
+)
+
+const (
+ EventAlive = EventType(iota)
+ EventUpdate
+ EventByeBye
+)
+
+type EventType int8
+
+func (et EventType) String() string {
+ switch et {
+ case EventAlive:
+ return "EventAlive"
+ case EventUpdate:
+ return "EventUpdate"
+ case EventByeBye:
+ return "EventByeBye"
+ default:
+ return fmt.Sprintf("EventUnknown(%d)", int8(et))
+ }
+}
+
+type Update struct {
+ // The USN of the service.
+ USN string
+ // What happened.
+ EventType EventType
+ // The entry, which is nil if the service was not known and
+ // EventType==EventByeBye. The contents of this must not be modified as it is
+ // shared with the registry and other listeners. Once created, the Registry
+ // does not modify the Entry value - any updates are replaced with a new
+ // Entry value.
+ Entry *Entry
+}
+
+type Entry struct {
+ // The address that the entry data was actually received from.
+ RemoteAddr string
+ // Unique Service Name. Identifies a unique instance of a device or service.
+ USN string
+ // Notfication Type. The type of device or service being announced.
+ NT string
+ // Server's self-identifying string.
+ Server string
+ Host string
+ // Location of the UPnP root device description.
+ Location url.URL
+
+ // Despite BOOTID,CONFIGID being required fields, apparently they are not
+ // always set by devices. Set to -1 if not present.
+
+ BootID int32
+ ConfigID int32
+
+ SearchPort uint16
+
+ // When the last update was received for this entry identified by this USN.
+ LastUpdate time.Time
+ // When the last update's cached values are advised to expire.
+ CacheExpiry time.Time
+}
+
+func newEntryFromRequest(r *http.Request) (*Entry, error) {
+ now := time.Now()
+ expiryDuration, err := parseCacheControlMaxAge(r.Header.Get("CACHE-CONTROL"))
+ if err != nil {
+ return nil, fmt.Errorf("ssdp: error parsing CACHE-CONTROL max age: %v", err)
+ }
+
+ loc, err := url.Parse(r.Header.Get("LOCATION"))
+ if err != nil {
+ return nil, fmt.Errorf("ssdp: error parsing entry Location URL: %v", err)
+ }
+
+ bootID, err := parseUpnpIntHeader(r.Header, "BOOTID.UPNP.ORG", -1)
+ if err != nil {
+ return nil, err
+ }
+ configID, err := parseUpnpIntHeader(r.Header, "CONFIGID.UPNP.ORG", -1)
+ if err != nil {
+ return nil, err
+ }
+ searchPort, err := parseUpnpIntHeader(r.Header, "SEARCHPORT.UPNP.ORG", ssdpSearchPort)
+ if err != nil {
+ return nil, err
+ }
+
+ if searchPort < 1 || searchPort > 65535 {
+ return nil, fmt.Errorf("ssdp: search port %d is out of range", searchPort)
+ }
+
+ return &Entry{
+ RemoteAddr: r.RemoteAddr,
+ USN: r.Header.Get("USN"),
+ NT: r.Header.Get("NT"),
+ Server: r.Header.Get("SERVER"),
+ Host: r.Header.Get("HOST"),
+ Location: *loc,
+ BootID: bootID,
+ ConfigID: configID,
+ SearchPort: uint16(searchPort),
+ LastUpdate: now,
+ CacheExpiry: now.Add(expiryDuration),
+ }, nil
+}
+
+func parseCacheControlMaxAge(cc string) (time.Duration, error) {
+ matches := maxAgeRx.FindStringSubmatch(cc)
+ if len(matches) != 2 {
+ return 0, fmt.Errorf("did not find exactly one max-age in cache control header: %q", cc)
+ }
+ expirySeconds, err := strconv.ParseInt(matches[1], 10, 16)
+ if err != nil {
+ return 0, err
+ }
+ if expirySeconds < 1 || expirySeconds > maxExpiryTimeSeconds {
+ return 0, fmt.Errorf("rejecting bad expiry time of %d seconds", expirySeconds)
+ }
+ return time.Duration(expirySeconds) * time.Second, nil
+}
+
+// parseUpnpIntHeader is intended to parse the
+// {BOOT,CONFIGID,SEARCHPORT}.UPNP.ORG header fields. It returns the def if
+// the head is empty or missing.
+func parseUpnpIntHeader(headers http.Header, headerName string, def int32) (int32, error) {
+ s := headers.Get(headerName)
+ if s == "" {
+ return def, nil
+ }
+ v, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return 0, fmt.Errorf("ssdp: could not parse header %s: %v", headerName, err)
+ }
+ return int32(v), nil
+}
+
+var _ httpu.Handler = new(Registry)
+
+// Registry maintains knowledge of discovered devices and services.
+//
+// NOTE: the interface for this is experimental and may change, or go away
+// entirely.
+type Registry struct {
+ lock sync.Mutex
+ byUSN map[string]*Entry
+
+ listenersLock sync.RWMutex
+ listeners map[chan<- Update]struct{}
+}
+
+func NewRegistry() *Registry {
+ return &Registry{
+ byUSN: make(map[string]*Entry),
+ listeners: make(map[chan<- Update]struct{}),
+ }
+}
+
+// NewServerAndRegistry is a convenience function to create a registry, and an
+// httpu server to pass it messages. Call ListenAndServe on the server for
+// messages to be processed.
+func NewServerAndRegistry() (*httpu.Server, *Registry) {
+ reg := NewRegistry()
+ srv := &httpu.Server{
+ Addr: ssdpUDP4Addr,
+ Multicast: true,
+ Handler: reg,
+ }
+ return srv, reg
+}
+
+func (reg *Registry) AddListener(c chan<- Update) {
+ reg.listenersLock.Lock()
+ defer reg.listenersLock.Unlock()
+ reg.listeners[c] = struct{}{}
+}
+
+func (reg *Registry) RemoveListener(c chan<- Update) {
+ reg.listenersLock.Lock()
+ defer reg.listenersLock.Unlock()
+ delete(reg.listeners, c)
+}
+
+func (reg *Registry) sendUpdate(u Update) {
+ reg.listenersLock.RLock()
+ defer reg.listenersLock.RUnlock()
+ for c := range reg.listeners {
+ c <- u
+ }
+}
+
+// GetService returns known service (or device) entries for the given service
+// URN.
+func (reg *Registry) GetService(serviceURN string) []*Entry {
+ // Currently assumes that the map is small, so we do a linear search rather
+ // than indexed to avoid maintaining two maps.
+ var results []*Entry
+ reg.lock.Lock()
+ defer reg.lock.Unlock()
+ for _, entry := range reg.byUSN {
+ if entry.NT == serviceURN {
+ results = append(results, entry)
+ }
+ }
+ return results
+}
+
+// ServeMessage implements httpu.Handler, and uses SSDP NOTIFY requests to
+// maintain the registry of devices and services.
+func (reg *Registry) ServeMessage(r *http.Request) {
+ if r.Method != methodNotify {
+ return
+ }
+
+ nts := r.Header.Get("nts")
+
+ var err error
+ switch nts {
+ case ntsAlive:
+ err = reg.handleNTSAlive(r)
+ case ntsUpdate:
+ err = reg.handleNTSUpdate(r)
+ case ntsByebye:
+ err = reg.handleNTSByebye(r)
+ default:
+ err = fmt.Errorf("unknown NTS value: %q", nts)
+ }
+ if err != nil {
+ log.Printf("goupnp/ssdp: failed to handle %s message from %s: %v", nts, r.RemoteAddr, err)
+ }
+}
+
+func (reg *Registry) handleNTSAlive(r *http.Request) error {
+ entry, err := newEntryFromRequest(r)
+ if err != nil {
+ return err
+ }
+
+ reg.lock.Lock()
+ reg.byUSN[entry.USN] = entry
+ reg.lock.Unlock()
+
+ reg.sendUpdate(Update{
+ USN: entry.USN,
+ EventType: EventAlive,
+ Entry: entry,
+ })
+
+ return nil
+}
+
+func (reg *Registry) handleNTSUpdate(r *http.Request) error {
+ entry, err := newEntryFromRequest(r)
+ if err != nil {
+ return err
+ }
+ nextBootID, err := parseUpnpIntHeader(r.Header, "NEXTBOOTID.UPNP.ORG", -1)
+ if err != nil {
+ return err
+ }
+ entry.BootID = nextBootID
+
+ reg.lock.Lock()
+ reg.byUSN[entry.USN] = entry
+ reg.lock.Unlock()
+
+ reg.sendUpdate(Update{
+ USN: entry.USN,
+ EventType: EventUpdate,
+ Entry: entry,
+ })
+
+ return nil
+}
+
+func (reg *Registry) handleNTSByebye(r *http.Request) error {
+ usn := r.Header.Get("USN")
+
+ reg.lock.Lock()
+ entry := reg.byUSN[usn]
+ delete(reg.byUSN, usn)
+ reg.lock.Unlock()
+
+ reg.sendUpdate(Update{
+ USN: usn,
+ EventType: EventByeBye,
+ Entry: entry,
+ })
+
+ return nil
+}
diff --git a/vendor/github.com/huin/goupnp/ssdp/ssdp.go b/vendor/github.com/huin/goupnp/ssdp/ssdp.go
new file mode 100644
index 000000000..8178f5d94
--- /dev/null
+++ b/vendor/github.com/huin/goupnp/ssdp/ssdp.go
@@ -0,0 +1,83 @@
+package ssdp
+
+import (
+ "errors"
+ "log"
+ "net/http"
+ "net/url"
+ "strconv"
+ "time"
+
+ "github.com/huin/goupnp/httpu"
+)
+
+const (
+ ssdpDiscover = `"ssdp:discover"`
+ ntsAlive = `ssdp:alive`
+ ntsByebye = `ssdp:byebye`
+ ntsUpdate = `ssdp:update`
+ ssdpUDP4Addr = "239.255.255.250:1900"
+ ssdpSearchPort = 1900
+ methodSearch = "M-SEARCH"
+ methodNotify = "NOTIFY"
+)
+
+// SSDPRawSearch performs a fairly raw SSDP search request, and returns the
+// unique response(s) that it receives. Each response has the requested
+// searchTarget, a USN, and a valid location. maxWaitSeconds states how long to
+// wait for responses in seconds, and must be a minimum of 1 (the
+// implementation waits an additional 100ms for responses to arrive), 2 is a
+// reasonable value for this. numSends is the number of requests to send - 3 is
+// a reasonable value for this.
+func SSDPRawSearch(httpu *httpu.HTTPUClient, searchTarget string, maxWaitSeconds int, numSends int) ([]*http.Response, error) {
+ if maxWaitSeconds < 1 {
+ return nil, errors.New("ssdp: maxWaitSeconds must be >= 1")
+ }
+
+ seenUsns := make(map[string]bool)
+ var responses []*http.Response
+ req := http.Request{
+ Method: methodSearch,
+ // TODO: Support both IPv4 and IPv6.
+ Host: ssdpUDP4Addr,
+ URL: &url.URL{Opaque: "*"},
+ Header: http.Header{
+ // Putting headers in here avoids them being title-cased.
+ // (The UPnP discovery protocol uses case-sensitive headers)
+ "HOST": []string{ssdpUDP4Addr},
+ "MX": []string{strconv.FormatInt(int64(maxWaitSeconds), 10)},
+ "MAN": []string{ssdpDiscover},
+ "ST": []string{searchTarget},
+ },
+ }
+ allResponses, err := httpu.Do(&req, time.Duration(maxWaitSeconds)*time.Second+100*time.Millisecond, numSends)
+ if err != nil {
+ return nil, err
+ }
+ for _, response := range allResponses {
+ if response.StatusCode != 200 {
+ log.Printf("ssdp: got response status code %q in search response", response.Status)
+ continue
+ }
+ if st := response.Header.Get("ST"); st != searchTarget {
+ log.Printf("ssdp: got unexpected search target result %q", st)
+ continue
+ }
+ location, err := response.Location()
+ if err != nil {
+ log.Printf("ssdp: no usable location in search response (discarding): %v", err)
+ continue
+ }
+ usn := response.Header.Get("USN")
+ if usn == "" {
+ log.Printf("ssdp: empty/missing USN in search response (using location instead): %v", err)
+ usn = location.String()
+ }
+ if _, alreadySeen := seenUsns[usn]; !alreadySeen {
+ seenUsns[usn] = true
+ responses = append(responses, response)
+ }
+ }
+
+ return responses, nil
+}
diff --git a/vendor/github.com/jackpal/go-nat-pmp/.travis.yml b/vendor/github.com/jackpal/go-nat-pmp/.travis.yml
new file mode 100644
index 000000000..9c3f6547d
--- /dev/null
+++ b/vendor/github.com/jackpal/go-nat-pmp/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+go:
+ - 1.6.2
+ - tip
+
+allowed_failures:
+ - go: tip
+
+install:
+ - go get -d -v ./... && go install -race -v ./...
+
+script: go test -race -v ./...
diff --git a/vendor/github.com/jackpal/go-nat-pmp/LICENSE b/vendor/github.com/jackpal/go-nat-pmp/LICENSE
new file mode 100644
index 000000000..249514b0f
--- /dev/null
+++ b/vendor/github.com/jackpal/go-nat-pmp/LICENSE
@@ -0,0 +1,13 @@
+ Copyright 2013 John Howard Palevich
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/jackpal/go-nat-pmp/README.md b/vendor/github.com/jackpal/go-nat-pmp/README.md
new file mode 100644
index 000000000..3ca687f0b
--- /dev/null
+++ b/vendor/github.com/jackpal/go-nat-pmp/README.md
@@ -0,0 +1,52 @@
+go-nat-pmp
+==========
+
+A Go language client for the NAT-PMP internet protocol for port mapping and discovering the external
+IP address of a firewall.
+
+NAT-PMP is supported by Apple brand routers and open source routers like Tomato and DD-WRT.
+
+See http://tools.ietf.org/html/draft-cheshire-nat-pmp-03
+
+
+[![Build Status](https://travis-ci.org/jackpal/go-nat-pmp.svg)](https://travis-ci.org/jackpal/go-nat-pmp)
+
+Get the package
+---------------
+
+ go get -u github.com/jackpal/go-nat-pmp
+
+Usage
+-----
+
+ import (
+ "github.com/jackpal/gateway"
+ natpmp "github.com/jackpal/go-nat-pmp"
+ )
+
+ gatewayIP, err = gateway.DiscoverGateway()
+ if err != nil {
+ return
+ }
+
+ client := natpmp.NewClient(gatewayIP)
+ response, err := client.GetExternalAddress()
+ if err != nil {
+ return
+ }
+ print("External IP address:", response.ExternalIPAddress)
+
+Clients
+-------
+
+This library is used in the Taipei Torrent BitTorrent client http://github.com/jackpal/Taipei-Torrent
+
+Complete documentation
+----------------------
+
+ http://godoc.org/github.com/jackpal/go-nat-pmp
+
+License
+-------
+
+This project is licensed under the Apache License 2.0.
diff --git a/vendor/github.com/jackpal/go-nat-pmp/natpmp.go b/vendor/github.com/jackpal/go-nat-pmp/natpmp.go
new file mode 100644
index 000000000..5ca7680e4
--- /dev/null
+++ b/vendor/github.com/jackpal/go-nat-pmp/natpmp.go
@@ -0,0 +1,153 @@
+package natpmp
+
+import (
+ "fmt"
+ "net"
+ "time"
+)
+
+// Implement the NAT-PMP protocol, typically supported by Apple routers and open source
+// routers such as DD-WRT and Tomato.
+//
+// See http://tools.ietf.org/html/draft-cheshire-nat-pmp-03
+//
+// Usage:
+//
+// client := natpmp.NewClient(gatewayIP)
+// response, err := client.GetExternalAddress()
+
+// The recommended mapping lifetime for AddPortMapping
+const RECOMMENDED_MAPPING_LIFETIME_SECONDS = 3600
+
+// Interface used to make remote procedure calls.
+type caller interface {
+ call(msg []byte, timeout time.Duration) (result []byte, err error)
+}
+
+// Client is a NAT-PMP protocol client.
+type Client struct {
+ caller caller
+ timeout time.Duration
+}
+
+// Create a NAT-PMP client for the NAT-PMP server at the gateway.
+// Uses default timeout which is around 128 seconds.
+func NewClient(gateway net.IP) (nat *Client) {
+ return &Client{&network{gateway}, 0}
+}
+
+// Create a NAT-PMP client for the NAT-PMP server at the gateway, with a timeout.
+// Timeout defines the total amount of time we will keep retrying before giving up.
+func NewClientWithTimeout(gateway net.IP, timeout time.Duration) (nat *Client) {
+ return &Client{&network{gateway}, timeout}
+}
+
+// Results of the NAT-PMP GetExternalAddress operation.
+type GetExternalAddressResult struct {
+ SecondsSinceStartOfEpoc uint32
+ ExternalIPAddress [4]byte
+}
+
+// Get the external address of the router.
+func (n *Client) GetExternalAddress() (result *GetExternalAddressResult, err error) {
+ msg := make([]byte, 2)
+ msg[0] = 0 // Version 0
+ msg[1] = 0 // OP Code 0
+ response, err := n.rpc(msg, 12)
+ if err != nil {
+ return
+ }
+ result = &GetExternalAddressResult{}
+ result.SecondsSinceStartOfEpoc = readNetworkOrderUint32(response[4:8])
+ copy(result.ExternalIPAddress[:], response[8:12])
+ return
+}
+
+// Results of the NAT-PMP AddPortMapping operation
+type AddPortMappingResult struct {
+ SecondsSinceStartOfEpoc uint32
+ InternalPort uint16
+ MappedExternalPort uint16
+ PortMappingLifetimeInSeconds uint32
+}
+
+// Add (or delete) a port mapping. To delete a mapping, set the requestedExternalPort and lifetime to 0
+func (n *Client) AddPortMapping(protocol string, internalPort, requestedExternalPort int, lifetime int) (result *AddPortMappingResult, err error) {
+ var opcode byte
+ if protocol == "udp" {
+ opcode = 1
+ } else if protocol == "tcp" {
+ opcode = 2
+ } else {
+ err = fmt.Errorf("unknown protocol %v", protocol)
+ return
+ }
+ msg := make([]byte, 12)
+ msg[0] = 0 // Version 0
+ msg[1] = opcode
+ writeNetworkOrderUint16(msg[4:6], uint16(internalPort))
+ writeNetworkOrderUint16(msg[6:8], uint16(requestedExternalPort))
+ writeNetworkOrderUint32(msg[8:12], uint32(lifetime))
+ response, err := n.rpc(msg, 16)
+ if err != nil {
+ return
+ }
+ result = &AddPortMappingResult{}
+ result.SecondsSinceStartOfEpoc = readNetworkOrderUint32(response[4:8])
+ result.InternalPort = readNetworkOrderUint16(response[8:10])
+ result.MappedExternalPort = readNetworkOrderUint16(response[10:12])
+ result.PortMappingLifetimeInSeconds = readNetworkOrderUint32(response[12:16])
+ return
+}
+
+func (n *Client) rpc(msg []byte, resultSize int) (result []byte, err error) {
+ result, err = n.caller.call(msg, n.timeout)
+ if err != nil {
+ return
+ }
+ err = protocolChecks(msg, resultSize, result)
+ return
+}
+
+func protocolChecks(msg []byte, resultSize int, result []byte) (err error) {
+ if len(result) != resultSize {
+ err = fmt.Errorf("unexpected result size %d, expected %d", len(result), resultSize)
+ return
+ }
+ if result[0] != 0 {
+ err = fmt.Errorf("unknown protocol version %d", result[0])
+ return
+ }
+ expectedOp := msg[1] | 0x80
+ if result[1] != expectedOp {
+ err = fmt.Errorf("Unexpected opcode %d. Expected %d", result[1], expectedOp)
+ return
+ }
+ resultCode := readNetworkOrderUint16(result[2:4])
+ if resultCode != 0 {
+ err = fmt.Errorf("Non-zero result code %d", resultCode)
+ return
+ }
+ // If we got here the RPC is good.
+ return
+}
+
+func writeNetworkOrderUint16(buf []byte, d uint16) {
+ buf[0] = byte(d >> 8)
+ buf[1] = byte(d)
+}
+
+func writeNetworkOrderUint32(buf []byte, d uint32) {
+ buf[0] = byte(d >> 24)
+ buf[1] = byte(d >> 16)
+ buf[2] = byte(d >> 8)
+ buf[3] = byte(d)
+}
+
+func readNetworkOrderUint16(buf []byte) uint16 {
+ return (uint16(buf[0]) << 8) | uint16(buf[1])
+}
+
+func readNetworkOrderUint32(buf []byte) uint32 {
+ return (uint32(buf[0]) << 24) | (uint32(buf[1]) << 16) | (uint32(buf[2]) << 8) | uint32(buf[3])
+}
diff --git a/vendor/github.com/jackpal/go-nat-pmp/network.go b/vendor/github.com/jackpal/go-nat-pmp/network.go
new file mode 100644
index 000000000..c42b4fee9
--- /dev/null
+++ b/vendor/github.com/jackpal/go-nat-pmp/network.go
@@ -0,0 +1,89 @@
+package natpmp
+
+import (
+ "fmt"
+ "net"
+ "time"
+)
+
+const nAT_PMP_PORT = 5351
+const nAT_TRIES = 9
+const nAT_INITIAL_MS = 250
+
+// A caller that implements the NAT-PMP RPC protocol.
+type network struct {
+ gateway net.IP
+}
+
+func (n *network) call(msg []byte, timeout time.Duration) (result []byte, err error) {
+ var server net.UDPAddr
+ server.IP = n.gateway
+ server.Port = nAT_PMP_PORT
+ conn, err := net.DialUDP("udp", nil, &server)
+ if err != nil {
+ return
+ }
+ defer conn.Close()
+
+ // 16 bytes is the maximum result size.
+ result = make([]byte, 16)
+
+ var finalTimeout time.Time
+ if timeout != 0 {
+ finalTimeout = time.Now().Add(timeout)
+ }
+
+ needNewDeadline := true
+
+ var tries uint
+ for tries = 0; (tries < nAT_TRIES && finalTimeout.IsZero()) || time.Now().Before(finalTimeout); {
+ if needNewDeadline {
+ nextDeadline := time.Now().Add((nAT_INITIAL_MS << tries) * time.Millisecond)
+ err = conn.SetDeadline(minTime(nextDeadline, finalTimeout))
+ if err != nil {
+ return
+ }
+ needNewDeadline = false
+ }
+ _, err = conn.Write(msg)
+ if err != nil {
+ return
+ }
+ var bytesRead int
+ var remoteAddr *net.UDPAddr
+ bytesRead, remoteAddr, err = conn.ReadFromUDP(result)
+ if err != nil {
+ if err.(net.Error).Timeout() {
+ tries++
+ needNewDeadline = true
+ continue
+ }
+ return
+ }
+ if !remoteAddr.IP.Equal(n.gateway) {
+ // Ignore this packet.
+ // Continue without increasing retransmission timeout or deadline.
+ continue
+ }
+ // Trim result to actual number of bytes received
+ if bytesRead < len(result) {
+ result = result[:bytesRead]
+ }
+ return
+ }
+ err = fmt.Errorf("Timed out trying to contact gateway")
+ return
+}
+
+func minTime(a, b time.Time) time.Time {
+ if a.IsZero() {
+ return b
+ }
+ if b.IsZero() {
+ return a
+ }
+ if a.Before(b) {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/jackpal/go-nat-pmp/recorder.go b/vendor/github.com/jackpal/go-nat-pmp/recorder.go
new file mode 100644
index 000000000..845703672
--- /dev/null
+++ b/vendor/github.com/jackpal/go-nat-pmp/recorder.go
@@ -0,0 +1,19 @@
+package natpmp
+
+import "time"
+
+type callObserver interface {
+ observeCall(msg []byte, result []byte, err error)
+}
+
+// A caller that records the RPC call.
+type recorder struct {
+ child caller
+ observer callObserver
+}
+
+func (n *recorder) call(msg []byte, timeout time.Duration) (result []byte, err error) {
+ result, err = n.child.call(msg, timeout)
+ n.observer.observeCall(msg, result, err)
+ return
+}
diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml
new file mode 100644
index 000000000..42768b8bf
--- /dev/null
+++ b/vendor/github.com/mattn/go-colorable/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+go:
+ - tip
+
+sudo: false
+
+script:
+ - go test -v
diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE
new file mode 100644
index 000000000..91b5cef30
--- /dev/null
+++ b/vendor/github.com/mattn/go-colorable/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md
new file mode 100644
index 000000000..e84226a73
--- /dev/null
+++ b/vendor/github.com/mattn/go-colorable/README.md
@@ -0,0 +1,43 @@
+# go-colorable
+
+Colorable writer for windows.
+
+For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
+This package is possible to handle escape sequence for ansi color on windows.
+
+## Too Bad!
+
+![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png)
+
+
+## So Good!
+
+![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png)
+
+## Usage
+
+```go
+logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
+logrus.SetOutput(colorable.NewColorableStdout())
+
+logrus.Info("succeeded")
+logrus.Warn("not correct")
+logrus.Error("something error")
+logrus.Fatal("panic")
+```
+
+You can compile above code on non-windows OSs.
+
+## Installation
+
+```
+$ go get github.com/mattn/go-colorable
+```
+
+# License
+
+MIT
+
+# Author
+
+Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go
new file mode 100644
index 000000000..52d6653b3
--- /dev/null
+++ b/vendor/github.com/mattn/go-colorable/colorable_others.go
@@ -0,0 +1,24 @@
+// +build !windows
+
+package colorable
+
+import (
+ "io"
+ "os"
+)
+
+func NewColorable(file *os.File) io.Writer {
+ if file == nil {
+ panic("nil passed instead of *os.File to NewColorable()")
+ }
+
+ return file
+}
+
+func NewColorableStdout() io.Writer {
+ return os.Stdout
+}
+
+func NewColorableStderr() io.Writer {
+ return os.Stderr
+}
diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go
new file mode 100644
index 000000000..bc84adfaf
--- /dev/null
+++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go
@@ -0,0 +1,810 @@
+package colorable
+
+import (
+ "bytes"
+ "io"
+ "math"
+ "os"
+ "strconv"
+ "strings"
+ "syscall"
+ "unsafe"
+
+ "github.com/mattn/go-isatty"
+)
+
+const (
+ foregroundBlue = 0x1
+ foregroundGreen = 0x2
+ foregroundRed = 0x4
+ foregroundIntensity = 0x8
+ foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
+ backgroundBlue = 0x10
+ backgroundGreen = 0x20
+ backgroundRed = 0x40
+ backgroundIntensity = 0x80
+ backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
+)
+
+type wchar uint16
+type short int16
+type dword uint32
+type word uint16
+
+type coord struct {
+ x short
+ y short
+}
+
+type smallRect struct {
+ left short
+ top short
+ right short
+ bottom short
+}
+
+type consoleScreenBufferInfo struct {
+ size coord
+ cursorPosition coord
+ attributes word
+ window smallRect
+ maximumWindowSize coord
+}
+
+type consoleCursorInfo struct {
+ size dword
+ visible int32
+}
+
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32.dll")
+ procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+ procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
+ procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
+ procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
+ procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
+ procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo")
+ procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo")
+)
+
+type Writer struct {
+ out io.Writer
+ handle syscall.Handle
+ lastbuf bytes.Buffer
+ oldattr word
+}
+
+func NewColorable(file *os.File) io.Writer {
+ if file == nil {
+ panic("nil passed instead of *os.File to NewColorable()")
+ }
+
+ if isatty.IsTerminal(file.Fd()) {
+ var csbi consoleScreenBufferInfo
+ handle := syscall.Handle(file.Fd())
+ procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+ return &Writer{out: file, handle: handle, oldattr: csbi.attributes}
+ } else {
+ return file
+ }
+}
+
+func NewColorableStdout() io.Writer {
+ return NewColorable(os.Stdout)
+}
+
+func NewColorableStderr() io.Writer {
+ return NewColorable(os.Stderr)
+}
+
+var color256 = map[int]int{
+ 0: 0x000000,
+ 1: 0x800000,
+ 2: 0x008000,
+ 3: 0x808000,
+ 4: 0x000080,
+ 5: 0x800080,
+ 6: 0x008080,
+ 7: 0xc0c0c0,
+ 8: 0x808080,
+ 9: 0xff0000,
+ 10: 0x00ff00,
+ 11: 0xffff00,
+ 12: 0x0000ff,
+ 13: 0xff00ff,
+ 14: 0x00ffff,
+ 15: 0xffffff,
+ 16: 0x000000,
+ 17: 0x00005f,
+ 18: 0x000087,
+ 19: 0x0000af,
+ 20: 0x0000d7,
+ 21: 0x0000ff,
+ 22: 0x005f00,
+ 23: 0x005f5f,
+ 24: 0x005f87,
+ 25: 0x005faf,
+ 26: 0x005fd7,
+ 27: 0x005fff,
+ 28: 0x008700,
+ 29: 0x00875f,
+ 30: 0x008787,
+ 31: 0x0087af,
+ 32: 0x0087d7,
+ 33: 0x0087ff,
+ 34: 0x00af00,
+ 35: 0x00af5f,
+ 36: 0x00af87,
+ 37: 0x00afaf,
+ 38: 0x00afd7,
+ 39: 0x00afff,
+ 40: 0x00d700,
+ 41: 0x00d75f,
+ 42: 0x00d787,
+ 43: 0x00d7af,
+ 44: 0x00d7d7,
+ 45: 0x00d7ff,
+ 46: 0x00ff00,
+ 47: 0x00ff5f,
+ 48: 0x00ff87,
+ 49: 0x00ffaf,
+ 50: 0x00ffd7,
+ 51: 0x00ffff,
+ 52: 0x5f0000,
+ 53: 0x5f005f,
+ 54: 0x5f0087,
+ 55: 0x5f00af,
+ 56: 0x5f00d7,
+ 57: 0x5f00ff,
+ 58: 0x5f5f00,
+ 59: 0x5f5f5f,
+ 60: 0x5f5f87,
+ 61: 0x5f5faf,
+ 62: 0x5f5fd7,
+ 63: 0x5f5fff,
+ 64: 0x5f8700,
+ 65: 0x5f875f,
+ 66: 0x5f8787,
+ 67: 0x5f87af,
+ 68: 0x5f87d7,
+ 69: 0x5f87ff,
+ 70: 0x5faf00,
+ 71: 0x5faf5f,
+ 72: 0x5faf87,
+ 73: 0x5fafaf,
+ 74: 0x5fafd7,
+ 75: 0x5fafff,
+ 76: 0x5fd700,
+ 77: 0x5fd75f,
+ 78: 0x5fd787,
+ 79: 0x5fd7af,
+ 80: 0x5fd7d7,
+ 81: 0x5fd7ff,
+ 82: 0x5fff00,
+ 83: 0x5fff5f,
+ 84: 0x5fff87,
+ 85: 0x5fffaf,
+ 86: 0x5fffd7,
+ 87: 0x5fffff,
+ 88: 0x870000,
+ 89: 0x87005f,
+ 90: 0x870087,
+ 91: 0x8700af,
+ 92: 0x8700d7,
+ 93: 0x8700ff,
+ 94: 0x875f00,
+ 95: 0x875f5f,
+ 96: 0x875f87,
+ 97: 0x875faf,
+ 98: 0x875fd7,
+ 99: 0x875fff,
+ 100: 0x878700,
+ 101: 0x87875f,
+ 102: 0x878787,
+ 103: 0x8787af,
+ 104: 0x8787d7,
+ 105: 0x8787ff,
+ 106: 0x87af00,
+ 107: 0x87af5f,
+ 108: 0x87af87,
+ 109: 0x87afaf,
+ 110: 0x87afd7,
+ 111: 0x87afff,
+ 112: 0x87d700,
+ 113: 0x87d75f,
+ 114: 0x87d787,
+ 115: 0x87d7af,
+ 116: 0x87d7d7,
+ 117: 0x87d7ff,
+ 118: 0x87ff00,
+ 119: 0x87ff5f,
+ 120: 0x87ff87,
+ 121: 0x87ffaf,
+ 122: 0x87ffd7,
+ 123: 0x87ffff,
+ 124: 0xaf0000,
+ 125: 0xaf005f,
+ 126: 0xaf0087,
+ 127: 0xaf00af,
+ 128: 0xaf00d7,
+ 129: 0xaf00ff,
+ 130: 0xaf5f00,
+ 131: 0xaf5f5f,
+ 132: 0xaf5f87,
+ 133: 0xaf5faf,
+ 134: 0xaf5fd7,
+ 135: 0xaf5fff,
+ 136: 0xaf8700,
+ 137: 0xaf875f,
+ 138: 0xaf8787,
+ 139: 0xaf87af,
+ 140: 0xaf87d7,
+ 141: 0xaf87ff,
+ 142: 0xafaf00,
+ 143: 0xafaf5f,
+ 144: 0xafaf87,
+ 145: 0xafafaf,
+ 146: 0xafafd7,
+ 147: 0xafafff,
+ 148: 0xafd700,
+ 149: 0xafd75f,
+ 150: 0xafd787,
+ 151: 0xafd7af,
+ 152: 0xafd7d7,
+ 153: 0xafd7ff,
+ 154: 0xafff00,
+ 155: 0xafff5f,
+ 156: 0xafff87,
+ 157: 0xafffaf,
+ 158: 0xafffd7,
+ 159: 0xafffff,
+ 160: 0xd70000,
+ 161: 0xd7005f,
+ 162: 0xd70087,
+ 163: 0xd700af,
+ 164: 0xd700d7,
+ 165: 0xd700ff,
+ 166: 0xd75f00,
+ 167: 0xd75f5f,
+ 168: 0xd75f87,
+ 169: 0xd75faf,
+ 170: 0xd75fd7,
+ 171: 0xd75fff,
+ 172: 0xd78700,
+ 173: 0xd7875f,
+ 174: 0xd78787,
+ 175: 0xd787af,
+ 176: 0xd787d7,
+ 177: 0xd787ff,
+ 178: 0xd7af00,
+ 179: 0xd7af5f,
+ 180: 0xd7af87,
+ 181: 0xd7afaf,
+ 182: 0xd7afd7,
+ 183: 0xd7afff,
+ 184: 0xd7d700,
+ 185: 0xd7d75f,
+ 186: 0xd7d787,
+ 187: 0xd7d7af,
+ 188: 0xd7d7d7,
+ 189: 0xd7d7ff,
+ 190: 0xd7ff00,
+ 191: 0xd7ff5f,
+ 192: 0xd7ff87,
+ 193: 0xd7ffaf,
+ 194: 0xd7ffd7,
+ 195: 0xd7ffff,
+ 196: 0xff0000,
+ 197: 0xff005f,
+ 198: 0xff0087,
+ 199: 0xff00af,
+ 200: 0xff00d7,
+ 201: 0xff00ff,
+ 202: 0xff5f00,
+ 203: 0xff5f5f,
+ 204: 0xff5f87,
+ 205: 0xff5faf,
+ 206: 0xff5fd7,
+ 207: 0xff5fff,
+ 208: 0xff8700,
+ 209: 0xff875f,
+ 210: 0xff8787,
+ 211: 0xff87af,
+ 212: 0xff87d7,
+ 213: 0xff87ff,
+ 214: 0xffaf00,
+ 215: 0xffaf5f,
+ 216: 0xffaf87,
+ 217: 0xffafaf,
+ 218: 0xffafd7,
+ 219: 0xffafff,
+ 220: 0xffd700,
+ 221: 0xffd75f,
+ 222: 0xffd787,
+ 223: 0xffd7af,
+ 224: 0xffd7d7,
+ 225: 0xffd7ff,
+ 226: 0xffff00,
+ 227: 0xffff5f,
+ 228: 0xffff87,
+ 229: 0xffffaf,
+ 230: 0xffffd7,
+ 231: 0xffffff,
+ 232: 0x080808,
+ 233: 0x121212,
+ 234: 0x1c1c1c,
+ 235: 0x262626,
+ 236: 0x303030,
+ 237: 0x3a3a3a,
+ 238: 0x444444,
+ 239: 0x4e4e4e,
+ 240: 0x585858,
+ 241: 0x626262,
+ 242: 0x6c6c6c,
+ 243: 0x767676,
+ 244: 0x808080,
+ 245: 0x8a8a8a,
+ 246: 0x949494,
+ 247: 0x9e9e9e,
+ 248: 0xa8a8a8,
+ 249: 0xb2b2b2,
+ 250: 0xbcbcbc,
+ 251: 0xc6c6c6,
+ 252: 0xd0d0d0,
+ 253: 0xdadada,
+ 254: 0xe4e4e4,
+ 255: 0xeeeeee,
+}
+
+func (w *Writer) Write(data []byte) (n int, err error) {
+ var csbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+
+ er := bytes.NewReader(data)
+ var bw [1]byte
+loop:
+ for {
+ r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ if r1 == 0 {
+ break loop
+ }
+
+ c1, err := er.ReadByte()
+ if err != nil {
+ break loop
+ }
+ if c1 != 0x1b {
+ bw[0] = c1
+ w.out.Write(bw[:])
+ continue
+ }
+ c2, err := er.ReadByte()
+ if err != nil {
+ w.lastbuf.WriteByte(c1)
+ break loop
+ }
+ if c2 != 0x5b {
+ w.lastbuf.WriteByte(c1)
+ w.lastbuf.WriteByte(c2)
+ continue
+ }
+
+ var buf bytes.Buffer
+ var m byte
+ for {
+ c, err := er.ReadByte()
+ if err != nil {
+ w.lastbuf.WriteByte(c1)
+ w.lastbuf.WriteByte(c2)
+ w.lastbuf.Write(buf.Bytes())
+ break loop
+ }
+ if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
+ m = c
+ break
+ }
+ buf.Write([]byte(string(c)))
+ }
+
+ var csbi consoleScreenBufferInfo
+ switch m {
+ case 'A':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.y -= short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'B':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.y += short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'C':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x -= short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'D':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ if n, err = strconv.Atoi(buf.String()); err == nil {
+ var csbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x += short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ }
+ case 'E':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x = 0
+ csbi.cursorPosition.y += short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'F':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x = 0
+ csbi.cursorPosition.y -= short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'G':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x = short(n - 1)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'H':
+ token := strings.Split(buf.String(), ";")
+ if len(token) != 2 {
+ continue
+ }
+ n1, err := strconv.Atoi(token[0])
+ if err != nil {
+ continue
+ }
+ n2, err := strconv.Atoi(token[1])
+ if err != nil {
+ continue
+ }
+ csbi.cursorPosition.x = short(n2 - 1)
+ csbi.cursorPosition.y = short(n1 - 1)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'J':
+ n, err := strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ var cursor coord
+ switch n {
+ case 0:
+ cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+ case 1:
+ cursor = coord{x: csbi.window.left, y: csbi.window.top}
+ case 2:
+ cursor = coord{x: csbi.window.left, y: csbi.window.top}
+ }
+ var count, written dword
+ count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
+ procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+ procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+ case 'K':
+ n, err := strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ var cursor coord
+ switch n {
+ case 0:
+ cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+ case 1:
+ cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
+ case 2:
+ cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
+ }
+ var count, written dword
+ count = dword(csbi.size.x - csbi.cursorPosition.x)
+ procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+ procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+ case 'm':
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ attr := csbi.attributes
+ cs := buf.String()
+ if cs == "" {
+ procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
+ continue
+ }
+ token := strings.Split(cs, ";")
+ for i := 0; i < len(token); i++ {
+ ns := token[i]
+ if n, err = strconv.Atoi(ns); err == nil {
+ switch {
+ case n == 0 || n == 100:
+ attr = w.oldattr
+ case 1 <= n && n <= 5:
+ attr |= foregroundIntensity
+ case n == 7:
+ attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
+ case 22 == n || n == 25 || n == 25:
+ attr |= foregroundIntensity
+ case n == 27:
+ attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
+ case 30 <= n && n <= 37:
+ attr &= backgroundMask
+ if (n-30)&1 != 0 {
+ attr |= foregroundRed
+ }
+ if (n-30)&2 != 0 {
+ attr |= foregroundGreen
+ }
+ if (n-30)&4 != 0 {
+ attr |= foregroundBlue
+ }
+ case n == 38: // set foreground color.
+ if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") {
+ if n256, err := strconv.Atoi(token[i+2]); err == nil {
+ if n256foreAttr == nil {
+ n256setup()
+ }
+ attr &= backgroundMask
+ attr |= n256foreAttr[n256]
+ i += 2
+ }
+ } else {
+ attr = attr & (w.oldattr & backgroundMask)
+ }
+ case n == 39: // reset foreground color.
+ attr &= backgroundMask
+ attr |= w.oldattr & foregroundMask
+ case 40 <= n && n <= 47:
+ attr &= foregroundMask
+ if (n-40)&1 != 0 {
+ attr |= backgroundRed
+ }
+ if (n-40)&2 != 0 {
+ attr |= backgroundGreen
+ }
+ if (n-40)&4 != 0 {
+ attr |= backgroundBlue
+ }
+ case n == 48: // set background color.
+ if i < len(token)-2 && token[i+1] == "5" {
+ if n256, err := strconv.Atoi(token[i+2]); err == nil {
+ if n256backAttr == nil {
+ n256setup()
+ }
+ attr &= foregroundMask
+ attr |= n256backAttr[n256]
+ i += 2
+ }
+ } else {
+ attr = attr & (w.oldattr & foregroundMask)
+ }
+ case n == 49: // reset foreground color.
+ attr &= foregroundMask
+ attr |= w.oldattr & backgroundMask
+ case 90 <= n && n <= 97:
+ attr = (attr & backgroundMask)
+ attr |= foregroundIntensity
+ if (n-90)&1 != 0 {
+ attr |= foregroundRed
+ }
+ if (n-90)&2 != 0 {
+ attr |= foregroundGreen
+ }
+ if (n-90)&4 != 0 {
+ attr |= foregroundBlue
+ }
+ case 100 <= n && n <= 107:
+ attr = (attr & foregroundMask)
+ attr |= backgroundIntensity
+ if (n-100)&1 != 0 {
+ attr |= backgroundRed
+ }
+ if (n-100)&2 != 0 {
+ attr |= backgroundGreen
+ }
+ if (n-100)&4 != 0 {
+ attr |= backgroundBlue
+ }
+ }
+ procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
+ }
+ }
+ case 'h':
+ cs := buf.String()
+ if cs == "?25" {
+ var ci consoleCursorInfo
+ procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+ ci.visible = 1
+ procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+ }
+ case 'l':
+ cs := buf.String()
+ if cs == "?25" {
+ var ci consoleCursorInfo
+ procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+ ci.visible = 0
+ procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+ }
+ }
+ }
+ return len(data) - w.lastbuf.Len(), nil
+}
+
+type consoleColor struct {
+ rgb int
+ red bool
+ green bool
+ blue bool
+ intensity bool
+}
+
+func (c consoleColor) foregroundAttr() (attr word) {
+ if c.red {
+ attr |= foregroundRed
+ }
+ if c.green {
+ attr |= foregroundGreen
+ }
+ if c.blue {
+ attr |= foregroundBlue
+ }
+ if c.intensity {
+ attr |= foregroundIntensity
+ }
+ return
+}
+
+func (c consoleColor) backgroundAttr() (attr word) {
+ if c.red {
+ attr |= backgroundRed
+ }
+ if c.green {
+ attr |= backgroundGreen
+ }
+ if c.blue {
+ attr |= backgroundBlue
+ }
+ if c.intensity {
+ attr |= backgroundIntensity
+ }
+ return
+}
+
+var color16 = []consoleColor{
+ consoleColor{0x000000, false, false, false, false},
+ consoleColor{0x000080, false, false, true, false},
+ consoleColor{0x008000, false, true, false, false},
+ consoleColor{0x008080, false, true, true, false},
+ consoleColor{0x800000, true, false, false, false},
+ consoleColor{0x800080, true, false, true, false},
+ consoleColor{0x808000, true, true, false, false},
+ consoleColor{0xc0c0c0, true, true, true, false},
+ consoleColor{0x808080, false, false, false, true},
+ consoleColor{0x0000ff, false, false, true, true},
+ consoleColor{0x00ff00, false, true, false, true},
+ consoleColor{0x00ffff, false, true, true, true},
+ consoleColor{0xff0000, true, false, false, true},
+ consoleColor{0xff00ff, true, false, true, true},
+ consoleColor{0xffff00, true, true, false, true},
+ consoleColor{0xffffff, true, true, true, true},
+}
+
+type hsv struct {
+ h, s, v float32
+}
+
+func (a hsv) dist(b hsv) float32 {
+ dh := a.h - b.h
+ switch {
+ case dh > 0.5:
+ dh = 1 - dh
+ case dh < -0.5:
+ dh = -1 - dh
+ }
+ ds := a.s - b.s
+ dv := a.v - b.v
+ return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv)))
+}
+
+func toHSV(rgb int) hsv {
+ r, g, b := float32((rgb&0xFF0000)>>16)/256.0,
+ float32((rgb&0x00FF00)>>8)/256.0,
+ float32(rgb&0x0000FF)/256.0
+ min, max := minmax3f(r, g, b)
+ h := max - min
+ if h > 0 {
+ if max == r {
+ h = (g - b) / h
+ if h < 0 {
+ h += 6
+ }
+ } else if max == g {
+ h = 2 + (b-r)/h
+ } else {
+ h = 4 + (r-g)/h
+ }
+ }
+ h /= 6.0
+ s := max - min
+ if max != 0 {
+ s /= max
+ }
+ v := max
+ return hsv{h: h, s: s, v: v}
+}
+
+type hsvTable []hsv
+
+func toHSVTable(rgbTable []consoleColor) hsvTable {
+ t := make(hsvTable, len(rgbTable))
+ for i, c := range rgbTable {
+ t[i] = toHSV(c.rgb)
+ }
+ return t
+}
+
+func (t hsvTable) find(rgb int) consoleColor {
+ hsv := toHSV(rgb)
+ n := 7
+ l := float32(5.0)
+ for i, p := range t {
+ d := hsv.dist(p)
+ if d < l {
+ l, n = d, i
+ }
+ }
+ return color16[n]
+}
+
+func minmax3f(a, b, c float32) (min, max float32) {
+ if a < b {
+ if b < c {
+ return a, c
+ } else if a < c {
+ return a, b
+ } else {
+ return c, b
+ }
+ } else {
+ if a < c {
+ return b, c
+ } else if b < c {
+ return b, a
+ } else {
+ return c, a
+ }
+ }
+}
+
+var n256foreAttr []word
+var n256backAttr []word
+
+func n256setup() {
+ n256foreAttr = make([]word, 256)
+ n256backAttr = make([]word, 256)
+ t := toHSVTable(color16)
+ for i, rgb := range color256 {
+ c := t.find(rgb)
+ n256foreAttr[i] = c.foregroundAttr()
+ n256backAttr[i] = c.backgroundAttr()
+ }
+}
diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go
new file mode 100644
index 000000000..b60801dcd
--- /dev/null
+++ b/vendor/github.com/mattn/go-colorable/noncolorable.go
@@ -0,0 +1,58 @@
+package colorable
+
+import (
+ "bytes"
+ "io"
+)
+
+type NonColorable struct {
+ out io.Writer
+ lastbuf bytes.Buffer
+}
+
+func NewNonColorable(w io.Writer) io.Writer {
+ return &NonColorable{out: w}
+}
+
+func (w *NonColorable) Write(data []byte) (n int, err error) {
+ er := bytes.NewReader(data)
+ var bw [1]byte
+loop:
+ for {
+ c1, err := er.ReadByte()
+ if err != nil {
+ break loop
+ }
+ if c1 != 0x1b {
+ bw[0] = c1
+ w.out.Write(bw[:])
+ continue
+ }
+ c2, err := er.ReadByte()
+ if err != nil {
+ w.lastbuf.WriteByte(c1)
+ break loop
+ }
+ if c2 != 0x5b {
+ w.lastbuf.WriteByte(c1)
+ w.lastbuf.WriteByte(c2)
+ continue
+ }
+
+ var buf bytes.Buffer
+ for {
+ c, err := er.ReadByte()
+ if err != nil {
+ w.lastbuf.WriteByte(c1)
+ w.lastbuf.WriteByte(c2)
+ w.lastbuf.Write(buf.Bytes())
+ break loop
+ }
+ if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
+ break
+ }
+ buf.Write([]byte(string(c)))
+ }
+ }
+ return len(data) - w.lastbuf.Len(), nil
+}
diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE
new file mode 100644
index 000000000..65dc692b6
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/LICENSE
@@ -0,0 +1,9 @@
+Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
+
+MIT License (Expat)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md
new file mode 100644
index 000000000..74845de4a
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/README.md
@@ -0,0 +1,37 @@
+# go-isatty
+
+isatty for golang
+
+## Usage
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/mattn/go-isatty"
+ "os"
+)
+
+func main() {
+ if isatty.IsTerminal(os.Stdout.Fd()) {
+ fmt.Println("Is Terminal")
+ } else {
+ fmt.Println("Is Not Terminal")
+ }
+}
+```
+
+## Installation
+
+```
+$ go get github.com/mattn/go-isatty
+```
+
+# License
+
+MIT
+
+# Author
+
+Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go
new file mode 100644
index 000000000..17d4f90eb
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/doc.go
@@ -0,0 +1,2 @@
+// Package isatty implements interface to isatty
+package isatty
diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go
new file mode 100644
index 000000000..83c588773
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_appengine.go
@@ -0,0 +1,9 @@
+// +build appengine
+
+package isatty
+
+// IsTerminal returns true if the file descriptor is terminal which
+// is always false on on appengine classic which is a sandboxed PaaS.
+func IsTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
new file mode 100644
index 000000000..42f2514d1
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
@@ -0,0 +1,18 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package isatty
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ var termios syscall.Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go
new file mode 100644
index 000000000..9d24bac1d
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_linux.go
@@ -0,0 +1,18 @@
+// +build linux
+// +build !appengine
+
+package isatty
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const ioctlReadTermios = syscall.TCGETS
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ var termios syscall.Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go
new file mode 100644
index 000000000..1f0c6bf53
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go
@@ -0,0 +1,16 @@
+// +build solaris
+// +build !appengine
+
+package isatty
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
+func IsTerminal(fd uintptr) bool {
+ var termio unix.Termio
+ err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
+ return err == nil
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go
new file mode 100644
index 000000000..83c398b16
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go
@@ -0,0 +1,19 @@
+// +build windows
+// +build !appengine
+
+package isatty
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+var procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
diff --git a/vendor/github.com/mattn/go-runewidth/.travis.yml b/vendor/github.com/mattn/go-runewidth/.travis.yml
new file mode 100644
index 000000000..5c9c2a30f
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+go:
+ - tip
+before_install:
+ - go get github.com/mattn/goveralls
+ - go get golang.org/x/tools/cmd/cover
+script:
+ - $HOME/gopath/bin/goveralls -repotoken lAKAWPzcGsD3A8yBX3BGGtRUdJ6CaGERL
diff --git a/vendor/github.com/mattn/go-runewidth/LICENSE b/vendor/github.com/mattn/go-runewidth/LICENSE
new file mode 100644
index 000000000..91b5cef30
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/mattn/go-runewidth/README.mkd b/vendor/github.com/mattn/go-runewidth/README.mkd
new file mode 100644
index 000000000..66663a94b
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/README.mkd
@@ -0,0 +1,27 @@
+go-runewidth
+============
+
+[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth)
+[![Coverage Status](https://coveralls.io/repos/mattn/go-runewidth/badge.png?branch=HEAD)](https://coveralls.io/r/mattn/go-runewidth?branch=HEAD)
+[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth)
+[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth)
+
+Provides functions to get fixed width of the character or string.
+
+Usage
+-----
+
+```go
+runewidth.StringWidth("つのだ☆HIRO") == 12
+```
+
+
+Author
+------
+
+Yasuhiro Matsumoto
+
+License
+-------
+
+under the MIT License: http://mattn.mit-license.org/2013
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go
new file mode 100644
index 000000000..3e730656b
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth.go
@@ -0,0 +1,481 @@
+package runewidth
+
+var (
+ // EastAsianWidth will be set true if the current locale is CJK
+ EastAsianWidth = IsEastAsian()
+
+ // DefaultCondition is a condition in current locale
+ DefaultCondition = &Condition{EastAsianWidth}
+)
+
+type interval struct {
+ first rune
+ last rune
+}
+
+var combining = []interval{
+ {0x0300, 0x036F}, {0x0483, 0x0486}, {0x0488, 0x0489},
+ {0x0591, 0x05BD}, {0x05BF, 0x05BF}, {0x05C1, 0x05C2},
+ {0x05C4, 0x05C5}, {0x05C7, 0x05C7}, {0x0600, 0x0603},
+ {0x0610, 0x0615}, {0x064B, 0x065E}, {0x0670, 0x0670},
+ {0x06D6, 0x06E4}, {0x06E7, 0x06E8}, {0x06EA, 0x06ED},
+ {0x070F, 0x070F}, {0x0711, 0x0711}, {0x0730, 0x074A},
+ {0x07A6, 0x07B0}, {0x07EB, 0x07F3}, {0x0901, 0x0902},
+ {0x093C, 0x093C}, {0x0941, 0x0948}, {0x094D, 0x094D},
+ {0x0951, 0x0954}, {0x0962, 0x0963}, {0x0981, 0x0981},
+ {0x09BC, 0x09BC}, {0x09C1, 0x09C4}, {0x09CD, 0x09CD},
+ {0x09E2, 0x09E3}, {0x0A01, 0x0A02}, {0x0A3C, 0x0A3C},
+ {0x0A41, 0x0A42}, {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D},
+ {0x0A70, 0x0A71}, {0x0A81, 0x0A82}, {0x0ABC, 0x0ABC},
+ {0x0AC1, 0x0AC5}, {0x0AC7, 0x0AC8}, {0x0ACD, 0x0ACD},
+ {0x0AE2, 0x0AE3}, {0x0B01, 0x0B01}, {0x0B3C, 0x0B3C},
+ {0x0B3F, 0x0B3F}, {0x0B41, 0x0B43}, {0x0B4D, 0x0B4D},
+ {0x0B56, 0x0B56}, {0x0B82, 0x0B82}, {0x0BC0, 0x0BC0},
+ {0x0BCD, 0x0BCD}, {0x0C3E, 0x0C40}, {0x0C46, 0x0C48},
+ {0x0C4A, 0x0C4D}, {0x0C55, 0x0C56}, {0x0CBC, 0x0CBC},
+ {0x0CBF, 0x0CBF}, {0x0CC6, 0x0CC6}, {0x0CCC, 0x0CCD},
+ {0x0CE2, 0x0CE3}, {0x0D41, 0x0D43}, {0x0D4D, 0x0D4D},
+ {0x0DCA, 0x0DCA}, {0x0DD2, 0x0DD4}, {0x0DD6, 0x0DD6},
+ {0x0E31, 0x0E31}, {0x0E34, 0x0E3A}, {0x0E47, 0x0E4E},
+ {0x0EB1, 0x0EB1}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC},
+ {0x0EC8, 0x0ECD}, {0x0F18, 0x0F19}, {0x0F35, 0x0F35},
+ {0x0F37, 0x0F37}, {0x0F39, 0x0F39}, {0x0F71, 0x0F7E},
+ {0x0F80, 0x0F84}, {0x0F86, 0x0F87}, {0x0F90, 0x0F97},
+ {0x0F99, 0x0FBC}, {0x0FC6, 0x0FC6}, {0x102D, 0x1030},
+ {0x1032, 0x1032}, {0x1036, 0x1037}, {0x1039, 0x1039},
+ {0x1058, 0x1059}, {0x1160, 0x11FF}, {0x135F, 0x135F},
+ {0x1712, 0x1714}, {0x1732, 0x1734}, {0x1752, 0x1753},
+ {0x1772, 0x1773}, {0x17B4, 0x17B5}, {0x17B7, 0x17BD},
+ {0x17C6, 0x17C6}, {0x17C9, 0x17D3}, {0x17DD, 0x17DD},
+ {0x180B, 0x180D}, {0x18A9, 0x18A9}, {0x1920, 0x1922},
+ {0x1927, 0x1928}, {0x1932, 0x1932}, {0x1939, 0x193B},
+ {0x1A17, 0x1A18}, {0x1B00, 0x1B03}, {0x1B34, 0x1B34},
+ {0x1B36, 0x1B3A}, {0x1B3C, 0x1B3C}, {0x1B42, 0x1B42},
+ {0x1B6B, 0x1B73}, {0x1DC0, 0x1DCA}, {0x1DFE, 0x1DFF},
+ {0x200B, 0x200F}, {0x202A, 0x202E}, {0x2060, 0x2063},
+ {0x206A, 0x206F}, {0x20D0, 0x20EF}, {0x302A, 0x302F},
+ {0x3099, 0x309A}, {0xA806, 0xA806}, {0xA80B, 0xA80B},
+ {0xA825, 0xA826}, {0xFB1E, 0xFB1E}, {0xFE00, 0xFE0F},
+ {0xFE20, 0xFE23}, {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB},
+ {0x10A01, 0x10A03}, {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F},
+ {0x10A38, 0x10A3A}, {0x10A3F, 0x10A3F}, {0x1D167, 0x1D169},
+ {0x1D173, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD},
+ {0x1D242, 0x1D244}, {0xE0001, 0xE0001}, {0xE0020, 0xE007F},
+ {0xE0100, 0xE01EF},
+}
+
+type ctype int
+
+const (
+ narrow ctype = iota
+ ambiguous
+ wide
+ halfwidth
+ fullwidth
+ neutral
+)
+
+type intervalType struct {
+ first rune
+ last rune
+ ctype ctype
+}
+
+var ctypes = []intervalType{
+ {0x0020, 0x007E, narrow},
+ {0x00A1, 0x00A1, ambiguous},
+ {0x00A2, 0x00A3, narrow},
+ {0x00A4, 0x00A4, ambiguous},
+ {0x00A5, 0x00A6, narrow},
+ {0x00A7, 0x00A8, ambiguous},
+ {0x00AA, 0x00AA, ambiguous},
+ {0x00AC, 0x00AC, narrow},
+ {0x00AD, 0x00AE, ambiguous},
+ {0x00AF, 0x00AF, narrow},
+ {0x00B0, 0x00B4, ambiguous},
+ {0x00B6, 0x00BA, ambiguous},
+ {0x00BC, 0x00BF, ambiguous},
+ {0x00C6, 0x00C6, ambiguous},
+ {0x00D0, 0x00D0, ambiguous},
+ {0x00D7, 0x00D8, ambiguous},
+ {0x00DE, 0x00E1, ambiguous},
+ {0x00E6, 0x00E6, ambiguous},
+ {0x00E8, 0x00EA, ambiguous},
+ {0x00EC, 0x00ED, ambiguous},
+ {0x00F0, 0x00F0, ambiguous},
+ {0x00F2, 0x00F3, ambiguous},
+ {0x00F7, 0x00FA, ambiguous},
+ {0x00FC, 0x00FC, ambiguous},
+ {0x00FE, 0x00FE, ambiguous},
+ {0x0101, 0x0101, ambiguous},
+ {0x0111, 0x0111, ambiguous},
+ {0x0113, 0x0113, ambiguous},
+ {0x011B, 0x011B, ambiguous},
+ {0x0126, 0x0127, ambiguous},
+ {0x012B, 0x012B, ambiguous},
+ {0x0131, 0x0133, ambiguous},
+ {0x0138, 0x0138, ambiguous},
+ {0x013F, 0x0142, ambiguous},
+ {0x0144, 0x0144, ambiguous},
+ {0x0148, 0x014B, ambiguous},
+ {0x014D, 0x014D, ambiguous},
+ {0x0152, 0x0153, ambiguous},
+ {0x0166, 0x0167, ambiguous},
+ {0x016B, 0x016B, ambiguous},
+ {0x01CE, 0x01CE, ambiguous},
+ {0x01D0, 0x01D0, ambiguous},
+ {0x01D2, 0x01D2, ambiguous},
+ {0x01D4, 0x01D4, ambiguous},
+ {0x01D6, 0x01D6, ambiguous},
+ {0x01D8, 0x01D8, ambiguous},
+ {0x01DA, 0x01DA, ambiguous},
+ {0x01DC, 0x01DC, ambiguous},
+ {0x0251, 0x0251, ambiguous},
+ {0x0261, 0x0261, ambiguous},
+ {0x02C4, 0x02C4, ambiguous},
+ {0x02C7, 0x02C7, ambiguous},
+ {0x02C9, 0x02CB, ambiguous},
+ {0x02CD, 0x02CD, ambiguous},
+ {0x02D0, 0x02D0, ambiguous},
+ {0x02D8, 0x02DB, ambiguous},
+ {0x02DD, 0x02DD, ambiguous},
+ {0x02DF, 0x02DF, ambiguous},
+ {0x0300, 0x036F, ambiguous},
+ {0x0391, 0x03A2, ambiguous},
+ {0x03A3, 0x03A9, ambiguous},
+ {0x03B1, 0x03C1, ambiguous},
+ {0x03C3, 0x03C9, ambiguous},
+ {0x0401, 0x0401, ambiguous},
+ {0x0410, 0x044F, ambiguous},
+ {0x0451, 0x0451, ambiguous},
+ {0x1100, 0x115F, wide},
+ {0x2010, 0x2010, ambiguous},
+ {0x2013, 0x2016, ambiguous},
+ {0x2018, 0x2019, ambiguous},
+ {0x201C, 0x201D, ambiguous},
+ {0x2020, 0x2022, ambiguous},
+ {0x2024, 0x2027, ambiguous},
+ {0x2030, 0x2030, ambiguous},
+ {0x2032, 0x2033, ambiguous},
+ {0x2035, 0x2035, ambiguous},
+ {0x203B, 0x203B, ambiguous},
+ {0x203E, 0x203E, ambiguous},
+ {0x2074, 0x2074, ambiguous},
+ {0x207F, 0x207F, ambiguous},
+ {0x2081, 0x2084, ambiguous},
+ {0x20A9, 0x20A9, halfwidth},
+ {0x20AC, 0x20AC, ambiguous},
+ {0x2103, 0x2103, ambiguous},
+ {0x2105, 0x2105, ambiguous},
+ {0x2109, 0x2109, ambiguous},
+ {0x2113, 0x2113, ambiguous},
+ {0x2116, 0x2116, ambiguous},
+ {0x2121, 0x2122, ambiguous},
+ {0x2126, 0x2126, ambiguous},
+ {0x212B, 0x212B, ambiguous},
+ {0x2153, 0x2154, ambiguous},
+ {0x215B, 0x215E, ambiguous},
+ {0x2160, 0x216B, ambiguous},
+ {0x2170, 0x2179, ambiguous},
+ {0x2189, 0x218A, ambiguous},
+ {0x2190, 0x2199, ambiguous},
+ {0x21B8, 0x21B9, ambiguous},
+ {0x21D2, 0x21D2, ambiguous},
+ {0x21D4, 0x21D4, ambiguous},
+ {0x21E7, 0x21E7, ambiguous},
+ {0x2200, 0x2200, ambiguous},
+ {0x2202, 0x2203, ambiguous},
+ {0x2207, 0x2208, ambiguous},
+ {0x220B, 0x220B, ambiguous},
+ {0x220F, 0x220F, ambiguous},
+ {0x2211, 0x2211, ambiguous},
+ {0x2215, 0x2215, ambiguous},
+ {0x221A, 0x221A, ambiguous},
+ {0x221D, 0x2220, ambiguous},
+ {0x2223, 0x2223, ambiguous},
+ {0x2225, 0x2225, ambiguous},
+ {0x2227, 0x222C, ambiguous},
+ {0x222E, 0x222E, ambiguous},
+ {0x2234, 0x2237, ambiguous},
+ {0x223C, 0x223D, ambiguous},
+ {0x2248, 0x2248, ambiguous},
+ {0x224C, 0x224C, ambiguous},
+ {0x2252, 0x2252, ambiguous},
+ {0x2260, 0x2261, ambiguous},
+ {0x2264, 0x2267, ambiguous},
+ {0x226A, 0x226B, ambiguous},
+ {0x226E, 0x226F, ambiguous},
+ {0x2282, 0x2283, ambiguous},
+ {0x2286, 0x2287, ambiguous},
+ {0x2295, 0x2295, ambiguous},
+ {0x2299, 0x2299, ambiguous},
+ {0x22A5, 0x22A5, ambiguous},
+ {0x22BF, 0x22BF, ambiguous},
+ {0x2312, 0x2312, ambiguous},
+ {0x2329, 0x232A, wide},
+ {0x2460, 0x24E9, ambiguous},
+ {0x24EB, 0x254B, ambiguous},
+ {0x2550, 0x2573, ambiguous},
+ {0x2580, 0x258F, ambiguous},
+ {0x2592, 0x2595, ambiguous},
+ {0x25A0, 0x25A1, ambiguous},
+ {0x25A3, 0x25A9, ambiguous},
+ {0x25B2, 0x25B3, ambiguous},
+ {0x25B6, 0x25B7, ambiguous},
+ {0x25BC, 0x25BD, ambiguous},
+ {0x25C0, 0x25C1, ambiguous},
+ {0x25C6, 0x25C8, ambiguous},
+ {0x25CB, 0x25CB, ambiguous},
+ {0x25CE, 0x25D1, ambiguous},
+ {0x25E2, 0x25E5, ambiguous},
+ {0x25EF, 0x25EF, ambiguous},
+ {0x2605, 0x2606, ambiguous},
+ {0x2609, 0x2609, ambiguous},
+ {0x260E, 0x260F, ambiguous},
+ {0x2614, 0x2615, ambiguous},
+ {0x261C, 0x261C, ambiguous},
+ {0x261E, 0x261E, ambiguous},
+ {0x2640, 0x2640, ambiguous},
+ {0x2642, 0x2642, ambiguous},
+ {0x2660, 0x2661, ambiguous},
+ {0x2663, 0x2665, ambiguous},
+ {0x2667, 0x266A, ambiguous},
+ {0x266C, 0x266D, ambiguous},
+ {0x266F, 0x266F, ambiguous},
+ {0x269E, 0x269F, ambiguous},
+ {0x26BE, 0x26BF, ambiguous},
+ {0x26C4, 0x26CD, ambiguous},
+ {0x26CF, 0x26E1, ambiguous},
+ {0x26E3, 0x26E3, ambiguous},
+ {0x26E8, 0x26FF, ambiguous},
+ {0x273D, 0x273D, ambiguous},
+ {0x2757, 0x2757, ambiguous},
+ {0x2776, 0x277F, ambiguous},
+ {0x27E6, 0x27ED, narrow},
+ {0x2985, 0x2986, narrow},
+ {0x2B55, 0x2B59, ambiguous},
+ {0x2E80, 0x2E9A, wide},
+ {0x2E9B, 0x2EF4, wide},
+ {0x2F00, 0x2FD6, wide},
+ {0x2FF0, 0x2FFC, wide},
+ {0x3000, 0x3000, fullwidth},
+ {0x3001, 0x303E, wide},
+ {0x3041, 0x3097, wide},
+ {0x3099, 0x3100, wide},
+ {0x3105, 0x312E, wide},
+ {0x3131, 0x318F, wide},
+ {0x3190, 0x31BB, wide},
+ {0x31C0, 0x31E4, wide},
+ {0x31F0, 0x321F, wide},
+ {0x3220, 0x3247, wide},
+ {0x3248, 0x324F, ambiguous},
+ {0x3250, 0x32FF, wide},
+ {0x3300, 0x4DBF, wide},
+ {0x4E00, 0xA48D, wide},
+ {0xA490, 0xA4C7, wide},
+ {0xA960, 0xA97D, wide},
+ {0xAC00, 0xD7A4, wide},
+ {0xE000, 0xF8FF, ambiguous},
+ {0xF900, 0xFAFF, wide},
+ {0xFE00, 0xFE0F, ambiguous},
+ {0xFE10, 0xFE1A, wide},
+ {0xFE30, 0xFE53, wide},
+ {0xFE54, 0xFE67, wide},
+ {0xFE68, 0xFE6C, wide},
+ {0xFF01, 0xFF60, fullwidth},
+ {0xFF61, 0xFFBF, halfwidth},
+ {0xFFC2, 0xFFC8, halfwidth},
+ {0xFFCA, 0xFFD0, halfwidth},
+ {0xFFD2, 0xFFD8, halfwidth},
+ {0xFFDA, 0xFFDD, halfwidth},
+ {0xFFE0, 0xFFE7, fullwidth},
+ {0xFFE8, 0xFFEF, halfwidth},
+ {0xFFFD, 0xFFFE, ambiguous},
+ {0x1B000, 0x1B002, wide},
+ {0x1F100, 0x1F10A, ambiguous},
+ {0x1F110, 0x1F12D, ambiguous},
+ {0x1F130, 0x1F169, ambiguous},
+ {0x1F170, 0x1F19B, ambiguous},
+ {0x1F200, 0x1F203, wide},
+ {0x1F210, 0x1F23B, wide},
+ {0x1F240, 0x1F249, wide},
+ {0x1F250, 0x1F252, wide},
+ {0x20000, 0x2FFFE, wide},
+ {0x30000, 0x3FFFE, wide},
+ {0xE0100, 0xE01F0, ambiguous},
+ {0xF0000, 0xFFFFD, ambiguous},
+ {0x100000, 0x10FFFE, ambiguous},
+}
+
+// Condition have flag EastAsianWidth whether the current locale is CJK or not.
+type Condition struct {
+ EastAsianWidth bool
+}
+
+// NewCondition return new instance of Condition which is current locale.
+func NewCondition() *Condition {
+ return &Condition{EastAsianWidth}
+}
+
+// RuneWidth returns the number of cells in r.
+// See http://www.unicode.org/reports/tr11/
+func (c *Condition) RuneWidth(r rune) int {
+ if r == 0 {
+ return 0
+ }
+ if r < 32 || (r >= 0x7f && r < 0xa0) {
+ return 1
+ }
+ for _, iv := range combining {
+ if iv.first <= r && r <= iv.last {
+ return 0
+ }
+ }
+
+ if c.EastAsianWidth && IsAmbiguousWidth(r) {
+ return 2
+ }
+
+ if r >= 0x1100 &&
+ (r <= 0x115f || r == 0x2329 || r == 0x232a ||
+ (r >= 0x2e80 && r <= 0xa4cf && r != 0x303f) ||
+ (r >= 0xac00 && r <= 0xd7a3) ||
+ (r >= 0xf900 && r <= 0xfaff) ||
+ (r >= 0xfe30 && r <= 0xfe6f) ||
+ (r >= 0xff00 && r <= 0xff60) ||
+ (r >= 0xffe0 && r <= 0xffe6) ||
+ (r >= 0x20000 && r <= 0x2fffd) ||
+ (r >= 0x30000 && r <= 0x3fffd)) {
+ return 2
+ }
+ return 1
+}
+
+// StringWidth return width as you can see
+func (c *Condition) StringWidth(s string) (width int) {
+ for _, r := range []rune(s) {
+ width += c.RuneWidth(r)
+ }
+ return width
+}
+
+// Truncate return string truncated with w cells
+func (c *Condition) Truncate(s string, w int, tail string) string {
+ if c.StringWidth(s) <= w {
+ return s
+ }
+ r := []rune(s)
+ tw := c.StringWidth(tail)
+ w -= tw
+ width := 0
+ i := 0
+ for ; i < len(r); i++ {
+ cw := c.RuneWidth(r[i])
+ if width+cw > w {
+ break
+ }
+ width += cw
+ }
+ return string(r[0:i]) + tail
+}
+
+// Wrap return string wrapped with w cells
+func (c *Condition) Wrap(s string, w int) string {
+ width := 0
+ out := ""
+ for _, r := range []rune(s) {
+ cw := RuneWidth(r)
+ if r == '\n' {
+ out += string(r)
+ width = 0
+ continue
+ } else if width+cw > w {
+ out += "\n"
+ width = 0
+ out += string(r)
+ width += cw
+ continue
+ }
+ out += string(r)
+ width += cw
+ }
+ return out
+}
+
+// FillLeft return string filled in left by spaces in w cells
+func (c *Condition) FillLeft(s string, w int) string {
+ width := c.StringWidth(s)
+ count := w - width
+ if count > 0 {
+ b := make([]byte, count)
+ for i := range b {
+ b[i] = ' '
+ }
+ return string(b) + s
+ }
+ return s
+}
+
+// FillRight return string filled in left by spaces in w cells
+func (c *Condition) FillRight(s string, w int) string {
+ width := c.StringWidth(s)
+ count := w - width
+ if count > 0 {
+ b := make([]byte, count)
+ for i := range b {
+ b[i] = ' '
+ }
+ return s + string(b)
+ }
+ return s
+}
+
+// RuneWidth returns the number of cells in r.
+// See http://www.unicode.org/reports/tr11/
+func RuneWidth(r rune) int {
+ return DefaultCondition.RuneWidth(r)
+}
+
+func ct(r rune) ctype {
+ for _, iv := range ctypes {
+ if iv.first <= r && r <= iv.last {
+ return iv.ctype
+ }
+ }
+ return neutral
+}
+
+// IsAmbiguousWidth returns whether is ambiguous width or not.
+func IsAmbiguousWidth(r rune) bool {
+ return ct(r) == ambiguous
+}
+
+// IsNeutralWidth returns whether is neutral width or not.
+func IsNeutralWidth(r rune) bool {
+ return ct(r) == neutral
+}
+
+// StringWidth return width as you can see
+func StringWidth(s string) (width int) {
+ return DefaultCondition.StringWidth(s)
+}
+
+// Truncate return string truncated with w cells
+func Truncate(s string, w int, tail string) string {
+ return DefaultCondition.Truncate(s, w, tail)
+}
+
+// Wrap return string wrapped with w cells
+func Wrap(s string, w int) string {
+ return DefaultCondition.Wrap(s, w)
+}
+
+// FillLeft return string filled in left by spaces in w cells
+func FillLeft(s string, w int) string {
+ return DefaultCondition.FillLeft(s, w)
+}
+
+// FillRight return string filled in left by spaces in w cells
+func FillRight(s string, w int) string {
+ return DefaultCondition.FillRight(s, w)
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go
new file mode 100644
index 000000000..0ce32c5e7
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_js.go
@@ -0,0 +1,8 @@
+// +build js
+
+package runewidth
+
+func IsEastAsian() bool {
+ // TODO: Implement this for the web. Detect east asian in a compatible way, and return true.
+ return false
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
new file mode 100644
index 000000000..c579e9a31
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
@@ -0,0 +1,77 @@
+// +build !windows,!js
+
+package runewidth
+
+import (
+ "os"
+ "regexp"
+ "strings"
+)
+
+var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`)
+
+var mblenTable = map[string]int{
+ "utf-8": 6,
+ "utf8": 6,
+ "jis": 8,
+ "eucjp": 3,
+ "euckr": 2,
+ "euccn": 2,
+ "sjis": 2,
+ "cp932": 2,
+ "cp51932": 2,
+ "cp936": 2,
+ "cp949": 2,
+ "cp950": 2,
+ "big5": 2,
+ "gbk": 2,
+ "gb2312": 2,
+}
+
+func isEastAsian(locale string) bool {
+ charset := strings.ToLower(locale)
+ r := reLoc.FindStringSubmatch(locale)
+ if len(r) == 2 {
+ charset = strings.ToLower(r[1])
+ }
+
+ if strings.HasSuffix(charset, "@cjk_narrow") {
+ return false
+ }
+
+ for pos, b := range []byte(charset) {
+ if b == '@' {
+ charset = charset[:pos]
+ break
+ }
+ }
+ max := 1
+ if m, ok := mblenTable[charset]; ok {
+ max = m
+ }
+ if max > 1 && (charset[0] != 'u' ||
+ strings.HasPrefix(locale, "ja") ||
+ strings.HasPrefix(locale, "ko") ||
+ strings.HasPrefix(locale, "zh")) {
+ return true
+ }
+ return false
+}
+
+// IsEastAsian return true if the current locale is CJK
+func IsEastAsian() bool {
+ locale := os.Getenv("LC_CTYPE")
+ if locale == "" {
+ locale = os.Getenv("LANG")
+ }
+
+ // ignore C locale
+ if locale == "POSIX" || locale == "C" {
+ return false
+ }
+ if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') {
+ return false
+ }
+
+ return isEastAsian(locale)
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
new file mode 100644
index 000000000..0258876b9
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
@@ -0,0 +1,25 @@
+package runewidth
+
+import (
+ "syscall"
+)
+
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32")
+ procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP")
+)
+
+// IsEastAsian return true if the current locale is CJK
+func IsEastAsian() bool {
+ r1, _, _ := procGetConsoleOutputCP.Call()
+ if r1 == 0 {
+ return false
+ }
+
+ switch int(r1) {
+ case 932, 51932, 936, 949, 950:
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md
new file mode 100644
index 000000000..229851590
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-wordwrap/README.md b/vendor/github.com/mitchellh/go-wordwrap/README.md
new file mode 100644
index 000000000..60ae31170
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-wordwrap/README.md
@@ -0,0 +1,39 @@
+# go-wordwrap
+
+`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that
+automatically wraps words into multiple lines. The primary use case for this
+is in formatting CLI output, but of course word wrapping is a generally useful
+thing to do.
+
+## Installation and Usage
+
+Install using `go get github.com/mitchellh/go-wordwrap`.
+
+Full documentation is available at
+http://godoc.org/github.com/mitchellh/go-wordwrap
+
+Below is an example of its usage ignoring errors:
+
+```go
+wrapped := wordwrap.WrapString("foo bar baz", 3)
+fmt.Println(wrapped)
+```
+
+Would output:
+
+```
+foo
+bar
+baz
+```
+
+## Word Wrap Algorithm
+
+This library doesn't use any clever algorithm for word wrapping. The wrapping
+is actually very naive: whenever there is whitespace or an explicit linebreak.
+The goal of this library is for word wrapping CLI output, so the input is
+typically pretty well controlled human language. Because of this, the naive
+approach typically works just fine.
+
+In the future, we'd like to make the algorithm more advanced. We would do
+so without breaking the API.
diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go
new file mode 100644
index 000000000..ac67205bc
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go
@@ -0,0 +1,73 @@
+package wordwrap
+
+import (
+ "bytes"
+ "unicode"
+)
+
+// WrapString wraps the given string within lim width in characters.
+//
+// Wrapping is currently naive and only happens at white-space. A future
+// version of the library will implement smarter wrapping. This means that
+// pathological cases can dramatically reach past the limit, such as a very
+// long word.
+func WrapString(s string, lim uint) string {
+ // Initialize a buffer with a slightly larger size to account for breaks
+ init := make([]byte, 0, len(s))
+ buf := bytes.NewBuffer(init)
+
+ var current uint
+ var wordBuf, spaceBuf bytes.Buffer
+
+ for _, char := range s {
+ if char == '\n' {
+ if wordBuf.Len() == 0 {
+ if current+uint(spaceBuf.Len()) > lim {
+ current = 0
+ } else {
+ current += uint(spaceBuf.Len())
+ spaceBuf.WriteTo(buf)
+ }
+ spaceBuf.Reset()
+ } else {
+ current += uint(spaceBuf.Len() + wordBuf.Len())
+ spaceBuf.WriteTo(buf)
+ spaceBuf.Reset()
+ wordBuf.WriteTo(buf)
+ wordBuf.Reset()
+ }
+ buf.WriteRune(char)
+ current = 0
+ } else if unicode.IsSpace(char) {
+ if spaceBuf.Len() == 0 || wordBuf.Len() > 0 {
+ current += uint(spaceBuf.Len() + wordBuf.Len())
+ spaceBuf.WriteTo(buf)
+ spaceBuf.Reset()
+ wordBuf.WriteTo(buf)
+ wordBuf.Reset()
+ }
+
+ spaceBuf.WriteRune(char)
+ } else {
+
+ wordBuf.WriteRune(char)
+
+ if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim {
+ buf.WriteRune('\n')
+ current = 0
+ spaceBuf.Reset()
+ }
+ }
+ }
+
+ if wordBuf.Len() == 0 {
+ if current+uint(spaceBuf.Len()) <= lim {
+ spaceBuf.WriteTo(buf)
+ }
+ } else {
+ spaceBuf.WriteTo(buf)
+ wordBuf.WriteTo(buf)
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/nsf/termbox-go/AUTHORS b/vendor/github.com/nsf/termbox-go/AUTHORS
new file mode 100644
index 000000000..fe26fb0fb
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/AUTHORS
@@ -0,0 +1,4 @@
+# Please keep this file sorted.
+
+Georg Reinke <guelfey@googlemail.com>
+nsf <no.smile.face@gmail.com>
diff --git a/vendor/github.com/nsf/termbox-go/LICENSE b/vendor/github.com/nsf/termbox-go/LICENSE
new file mode 100644
index 000000000..d9bc068ce
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/LICENSE
@@ -0,0 +1,19 @@
+Copyright (C) 2012 termbox-go authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/nsf/termbox-go/README.md b/vendor/github.com/nsf/termbox-go/README.md
new file mode 100644
index 000000000..d152da407
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/README.md
@@ -0,0 +1,29 @@
+## Termbox
+Termbox is a library that provides a minimalistic API which allows the programmer to write text-based user interfaces. The library is crossplatform and has both terminal-based implementations on *nix operating systems and a winapi console based implementation for windows operating systems. The basic idea is an abstraction of the greatest common subset of features available on all major terminals and other terminal-like APIs in a minimalistic fashion. Small API means it is easy to implement, test, maintain and learn it, that's what makes the termbox a distinct library in its area.
+
+### Installation
+Install and update this go package with `go get -u github.com/nsf/termbox-go`
+
+### Examples
+For examples of what can be done take a look at demos in the _demos directory. You can try them with go run: `go run _demos/keyboard.go`
+
+There are also some interesting projects using termbox-go:
+ - [godit](https://github.com/nsf/godit) is an emacsish lightweight text editor written using termbox.
+ - [gomatrix](https://github.com/GeertJohan/gomatrix) connects to The Matrix and displays its data streams in your terminal.
+ - [gotetris](https://github.com/jjinux/gotetris) is an implementation of Tetris.
+ - [sokoban-go](https://github.com/rn2dy/sokoban-go) is an implementation of sokoban game.
+ - [hecate](https://github.com/evanmiller/hecate) is a hex editor designed by Satan.
+ - [httopd](https://github.com/verdverm/httopd) is top for httpd logs.
+ - [mop](https://github.com/michaeldv/mop) is stock market tracker for hackers.
+ - [termui](https://github.com/gizak/termui) is a terminal dashboard.
+ - [termloop](https://github.com/JoelOtter/termloop) is a terminal game engine.
+ - [xterm-color-chart](https://github.com/kutuluk/xterm-color-chart) is a XTerm 256 color chart.
+ - [gocui](https://github.com/jroimartin/gocui) is a minimalist Go library aimed at creating console user interfaces.
+ - [dry](https://github.com/moncho/dry) is an interactive cli to manage Docker containers.
+ - [pxl](https://github.com/ichinaski/pxl) displays images in the terminal.
+ - [snake-game](https://github.com/DyegoCosta/snake-game) is an implementation of the Snake game.
+ - [gone](https://github.com/guillaumebreton/gone) is a CLI pomodoro® timer.
+ - [Spoof.go](https://github.com/sabey/spoofgo) controllable movement spoofing from the cli
+
+### API reference
+[godoc.org/github.com/nsf/termbox-go](http://godoc.org/github.com/nsf/termbox-go)
diff --git a/vendor/github.com/nsf/termbox-go/api.go b/vendor/github.com/nsf/termbox-go/api.go
new file mode 100644
index 000000000..b339e532f
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/api.go
@@ -0,0 +1,458 @@
+// +build !windows
+
+package termbox
+
+import "github.com/mattn/go-runewidth"
+import "fmt"
+import "os"
+import "os/signal"
+import "syscall"
+import "runtime"
+
+// public API
+
+// Initializes termbox library. This function should be called before any other functions.
+// After successful initialization, the library must be finalized using 'Close' function.
+//
+// Example usage:
+// err := termbox.Init()
+// if err != nil {
+// panic(err)
+// }
+// defer termbox.Close()
+func Init() error {
+ var err error
+
+ out, err = os.OpenFile("/dev/tty", syscall.O_WRONLY, 0)
+ if err != nil {
+ return err
+ }
+ in, err = syscall.Open("/dev/tty", syscall.O_RDONLY, 0)
+ if err != nil {
+ return err
+ }
+
+ err = setup_term()
+ if err != nil {
+ return fmt.Errorf("termbox: error while reading terminfo data: %v", err)
+ }
+
+ signal.Notify(sigwinch, syscall.SIGWINCH)
+ signal.Notify(sigio, syscall.SIGIO)
+
+ _, err = fcntl(in, syscall.F_SETFL, syscall.O_ASYNC|syscall.O_NONBLOCK)
+ if err != nil {
+ return err
+ }
+ _, err = fcntl(in, syscall.F_SETOWN, syscall.Getpid())
+ if runtime.GOOS != "darwin" && err != nil {
+ return err
+ }
+ err = tcgetattr(out.Fd(), &orig_tios)
+ if err != nil {
+ return err
+ }
+
+ tios := orig_tios
+ tios.Iflag &^= syscall_IGNBRK | syscall_BRKINT | syscall_PARMRK |
+ syscall_ISTRIP | syscall_INLCR | syscall_IGNCR |
+ syscall_ICRNL | syscall_IXON
+ tios.Lflag &^= syscall_ECHO | syscall_ECHONL | syscall_ICANON |
+ syscall_ISIG | syscall_IEXTEN
+ tios.Cflag &^= syscall_CSIZE | syscall_PARENB
+ tios.Cflag |= syscall_CS8
+ tios.Cc[syscall_VMIN] = 1
+ tios.Cc[syscall_VTIME] = 0
+
+ err = tcsetattr(out.Fd(), &tios)
+ if err != nil {
+ return err
+ }
+
+ out.WriteString(funcs[t_enter_ca])
+ out.WriteString(funcs[t_enter_keypad])
+ out.WriteString(funcs[t_hide_cursor])
+ out.WriteString(funcs[t_clear_screen])
+
+ termw, termh = get_term_size(out.Fd())
+ back_buffer.init(termw, termh)
+ front_buffer.init(termw, termh)
+ back_buffer.clear()
+ front_buffer.clear()
+
+ go func() {
+ buf := make([]byte, 128)
+ for {
+ select {
+ case <-sigio:
+ for {
+ n, err := syscall.Read(in, buf)
+ if err == syscall.EAGAIN || err == syscall.EWOULDBLOCK {
+ break
+ }
+ select {
+ case input_comm <- input_event{buf[:n], err}:
+ ie := <-input_comm
+ buf = ie.data[:128]
+ case <-quit:
+ return
+ }
+ }
+ case <-quit:
+ return
+ }
+ }
+ }()
+
+ IsInit = true
+ return nil
+}
+
+// Interrupt an in-progress call to PollEvent by causing it to return
+// EventInterrupt. Note that this function will block until the PollEvent
+// function has successfully been interrupted.
+func Interrupt() {
+ interrupt_comm <- struct{}{}
+}
+
+// Finalizes termbox library, should be called after successful initialization
+// when termbox's functionality isn't required anymore.
+func Close() {
+ quit <- 1
+ out.WriteString(funcs[t_show_cursor])
+ out.WriteString(funcs[t_sgr0])
+ out.WriteString(funcs[t_clear_screen])
+ out.WriteString(funcs[t_exit_ca])
+ out.WriteString(funcs[t_exit_keypad])
+ out.WriteString(funcs[t_exit_mouse])
+ tcsetattr(out.Fd(), &orig_tios)
+
+ out.Close()
+ syscall.Close(in)
+
+ // reset the state, so that on next Init() it will work again
+ termw = 0
+ termh = 0
+ input_mode = InputEsc
+ out = nil
+ in = 0
+ lastfg = attr_invalid
+ lastbg = attr_invalid
+ lastx = coord_invalid
+ lasty = coord_invalid
+ cursor_x = cursor_hidden
+ cursor_y = cursor_hidden
+ foreground = ColorDefault
+ background = ColorDefault
+ IsInit = false
+}
+
+// Synchronizes the internal back buffer with the terminal.
+func Flush() error {
+ // invalidate cursor position
+ lastx = coord_invalid
+ lasty = coord_invalid
+
+ update_size_maybe()
+
+ for y := 0; y < front_buffer.height; y++ {
+ line_offset := y * front_buffer.width
+ for x := 0; x < front_buffer.width; {
+ cell_offset := line_offset + x
+ back := &back_buffer.cells[cell_offset]
+ front := &front_buffer.cells[cell_offset]
+ if back.Ch < ' ' {
+ back.Ch = ' '
+ }
+ w := runewidth.RuneWidth(back.Ch)
+ if w == 0 || w == 2 && runewidth.IsAmbiguousWidth(back.Ch) {
+ w = 1
+ }
+ if *back == *front {
+ x += w
+ continue
+ }
+ *front = *back
+ send_attr(back.Fg, back.Bg)
+
+ if w == 2 && x == front_buffer.width-1 {
+ // there's not enough space for 2-cells rune,
+ // let's just put a space in there
+ send_char(x, y, ' ')
+ } else {
+ send_char(x, y, back.Ch)
+ if w == 2 {
+ next := cell_offset + 1
+ front_buffer.cells[next] = Cell{
+ Ch: 0,
+ Fg: back.Fg,
+ Bg: back.Bg,
+ }
+ }
+ }
+ x += w
+ }
+ }
+ if !is_cursor_hidden(cursor_x, cursor_y) {
+ write_cursor(cursor_x, cursor_y)
+ }
+ return flush()
+}
+
+// Sets the position of the cursor. See also HideCursor().
+func SetCursor(x, y int) {
+ if is_cursor_hidden(cursor_x, cursor_y) && !is_cursor_hidden(x, y) {
+ outbuf.WriteString(funcs[t_show_cursor])
+ }
+
+ if !is_cursor_hidden(cursor_x, cursor_y) && is_cursor_hidden(x, y) {
+ outbuf.WriteString(funcs[t_hide_cursor])
+ }
+
+ cursor_x, cursor_y = x, y
+ if !is_cursor_hidden(cursor_x, cursor_y) {
+ write_cursor(cursor_x, cursor_y)
+ }
+}
+
+// The shortcut for SetCursor(-1, -1).
+func HideCursor() {
+ SetCursor(cursor_hidden, cursor_hidden)
+}
+
+// Changes cell's parameters in the internal back buffer at the specified
+// position.
+func SetCell(x, y int, ch rune, fg, bg Attribute) {
+ if x < 0 || x >= back_buffer.width {
+ return
+ }
+ if y < 0 || y >= back_buffer.height {
+ return
+ }
+
+ back_buffer.cells[y*back_buffer.width+x] = Cell{ch, fg, bg}
+}
+
+// Returns a slice into the termbox's back buffer. You can get its dimensions
+// using 'Size' function. The slice remains valid as long as no 'Clear' or
+// 'Flush' function calls were made after call to this function.
+func CellBuffer() []Cell {
+ return back_buffer.cells
+}
+
+// After getting a raw event from PollRawEvent function call, you can parse it
+// again into an ordinary one using termbox logic. That is parse an event as
+// termbox would do it. Returned event in addition to usual Event struct fields
+// sets N field to the amount of bytes used within 'data' slice. If the length
+// of 'data' slice is zero or event cannot be parsed for some other reason, the
+// function will return a special event type: EventNone.
+//
+// IMPORTANT: EventNone may contain a non-zero N, which means you should skip
+// these bytes, because termbox cannot recognize them.
+//
+// NOTE: This API is experimental and may change in future.
+func ParseEvent(data []byte) Event {
+ event := Event{Type: EventKey}
+ ok := extract_event(data, &event)
+ if !ok {
+ return Event{Type: EventNone, N: event.N}
+ }
+ return event
+}
+
+// Wait for an event and return it. This is a blocking function call. Instead
+// of EventKey and EventMouse it returns EventRaw events. Raw event is written
+// into `data` slice and Event's N field is set to the amount of bytes written.
+// The minimum required length of the 'data' slice is 1. This requirement may
+// vary on different platforms.
+//
+// NOTE: This API is experimental and may change in future.
+func PollRawEvent(data []byte) Event {
+ if len(data) == 0 {
+ panic("len(data) >= 1 is a requirement")
+ }
+
+ var event Event
+ if extract_raw_event(data, &event) {
+ return event
+ }
+
+ for {
+ select {
+ case ev := <-input_comm:
+ if ev.err != nil {
+ return Event{Type: EventError, Err: ev.err}
+ }
+
+ inbuf = append(inbuf, ev.data...)
+ input_comm <- ev
+ if extract_raw_event(data, &event) {
+ return event
+ }
+ case <-interrupt_comm:
+ event.Type = EventInterrupt
+ return event
+
+ case <-sigwinch:
+ event.Type = EventResize
+ event.Width, event.Height = get_term_size(out.Fd())
+ return event
+ }
+ }
+}
+
+// Wait for an event and return it. This is a blocking function call.
+func PollEvent() Event {
+ var event Event
+
+ // try to extract event from input buffer, return on success
+ event.Type = EventKey
+ ok := extract_event(inbuf, &event)
+ if event.N != 0 {
+ copy(inbuf, inbuf[event.N:])
+ inbuf = inbuf[:len(inbuf)-event.N]
+ }
+ if ok {
+ return event
+ }
+
+ for {
+ select {
+ case ev := <-input_comm:
+ if ev.err != nil {
+ return Event{Type: EventError, Err: ev.err}
+ }
+
+ inbuf = append(inbuf, ev.data...)
+ input_comm <- ev
+ ok := extract_event(inbuf, &event)
+ if event.N != 0 {
+ copy(inbuf, inbuf[event.N:])
+ inbuf = inbuf[:len(inbuf)-event.N]
+ }
+ if ok {
+ return event
+ }
+ case <-interrupt_comm:
+ event.Type = EventInterrupt
+ return event
+
+ case <-sigwinch:
+ event.Type = EventResize
+ event.Width, event.Height = get_term_size(out.Fd())
+ return event
+ }
+ }
+ panic("unreachable")
+}
+
+// Returns the size of the internal back buffer (which is mostly the same as
+// terminal's window size in characters). But it doesn't always match the size
+// of the terminal window, after the terminal size has changed, the internal
+// back buffer will get in sync only after Clear or Flush function calls.
+func Size() (width int, height int) {
+ return termw, termh
+}
+
+// Clears the internal back buffer.
+func Clear(fg, bg Attribute) error {
+ foreground, background = fg, bg
+ err := update_size_maybe()
+ back_buffer.clear()
+ return err
+}
+
+// Sets termbox input mode. Termbox has two input modes:
+//
+// 1. Esc input mode. When ESC sequence is in the buffer and it doesn't match
+// any known sequence. ESC means KeyEsc. This is the default input mode.
+//
+// 2. Alt input mode. When ESC sequence is in the buffer and it doesn't match
+// any known sequence. ESC enables ModAlt modifier for the next keyboard event.
+//
+// Both input modes can be OR'ed with Mouse mode. Setting Mouse mode bit up will
+// enable mouse button press/release and drag events.
+//
+// If 'mode' is InputCurrent, returns the current input mode. See also Input*
+// constants.
+func SetInputMode(mode InputMode) InputMode {
+ if mode == InputCurrent {
+ return input_mode
+ }
+ if mode&(InputEsc|InputAlt) == 0 {
+ mode |= InputEsc
+ }
+ if mode&(InputEsc|InputAlt) == InputEsc|InputAlt {
+ mode &^= InputAlt
+ }
+ if mode&InputMouse != 0 {
+ out.WriteString(funcs[t_enter_mouse])
+ } else {
+ out.WriteString(funcs[t_exit_mouse])
+ }
+
+ input_mode = mode
+ return input_mode
+}
+
+// Sets the termbox output mode. Termbox has four output options:
+//
+// 1. OutputNormal => [1..8]
+// This mode provides 8 different colors:
+// black, red, green, yellow, blue, magenta, cyan, white
+// Shortcut: ColorBlack, ColorRed, ...
+// Attributes: AttrBold, AttrUnderline, AttrReverse
+//
+// Example usage:
+// SetCell(x, y, '@', ColorBlack | AttrBold, ColorRed);
+//
+// 2. Output256 => [1..256]
+// In this mode you can leverage the 256 terminal mode:
+// 0x01 - 0x08: the 8 colors as in OutputNormal
+// 0x09 - 0x10: Color* | AttrBold
+// 0x11 - 0xe8: 216 different colors
+// 0xe9 - 0x1ff: 24 different shades of grey
+//
+// Example usage:
+// SetCell(x, y, '@', 184, 240);
+// SetCell(x, y, '@', 0xb8, 0xf0);
+//
+// 3. Output216 => [1..216]
+// This mode supports the 3rd range of the 256 mode only.
+// But you dont need to provide an offset.
+//
+// 4. OutputGrayscale => [1..26]
+// This mode supports the 4th range of the 256 mode
+// and black and white colors from 3th range of the 256 mode
+// But you dont need to provide an offset.
+//
+// In all modes, 0x00 represents the default color.
+//
+// `go run _demos/output.go` to see its impact on your terminal.
+//
+// If 'mode' is OutputCurrent, it returns the current output mode.
+//
+// Note that this may return a different OutputMode than the one requested,
+// as the requested mode may not be available on the target platform.
+func SetOutputMode(mode OutputMode) OutputMode {
+ if mode == OutputCurrent {
+ return output_mode
+ }
+
+ output_mode = mode
+ return output_mode
+}
+
+// Sync comes handy when something causes desync between termbox's understanding
+// of a terminal buffer and the reality. Such as a third party process. Sync
+// forces a complete resync between the termbox and a terminal, it may not be
+// visually pretty though.
+func Sync() error {
+ front_buffer.clear()
+ err := send_clear()
+ if err != nil {
+ return err
+ }
+
+ return Flush()
+}
diff --git a/vendor/github.com/nsf/termbox-go/api_common.go b/vendor/github.com/nsf/termbox-go/api_common.go
new file mode 100644
index 000000000..9f23661f5
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/api_common.go
@@ -0,0 +1,187 @@
+// termbox is a library for creating cross-platform text-based interfaces
+package termbox
+
+// public API, common OS agnostic part
+
+type (
+ InputMode int
+ OutputMode int
+ EventType uint8
+ Modifier uint8
+ Key uint16
+ Attribute uint16
+)
+
+// This type represents a termbox event. The 'Mod', 'Key' and 'Ch' fields are
+// valid if 'Type' is EventKey. The 'Width' and 'Height' fields are valid if
+// 'Type' is EventResize. The 'Err' field is valid if 'Type' is EventError.
+type Event struct {
+ Type EventType // one of Event* constants
+ Mod Modifier // one of Mod* constants or 0
+ Key Key // one of Key* constants, invalid if 'Ch' is not 0
+ Ch rune // a unicode character
+ Width int // width of the screen
+ Height int // height of the screen
+ Err error // error in case if input failed
+ MouseX int // x coord of mouse
+ MouseY int // y coord of mouse
+ N int // number of bytes written when getting a raw event
+}
+
+// A cell, single conceptual entity on the screen. The screen is basically a 2d
+// array of cells. 'Ch' is a unicode character, 'Fg' and 'Bg' are foreground
+// and background attributes respectively.
+type Cell struct {
+ Ch rune
+ Fg Attribute
+ Bg Attribute
+}
+
+// To know if termbox has been initialized or not
+var (
+ IsInit bool = false
+)
+
+// Key constants, see Event.Key field.
+const (
+ KeyF1 Key = 0xFFFF - iota
+ KeyF2
+ KeyF3
+ KeyF4
+ KeyF5
+ KeyF6
+ KeyF7
+ KeyF8
+ KeyF9
+ KeyF10
+ KeyF11
+ KeyF12
+ KeyInsert
+ KeyDelete
+ KeyHome
+ KeyEnd
+ KeyPgup
+ KeyPgdn
+ KeyArrowUp
+ KeyArrowDown
+ KeyArrowLeft
+ KeyArrowRight
+ key_min // see terminfo
+ MouseLeft
+ MouseMiddle
+ MouseRight
+ MouseRelease
+ MouseWheelUp
+ MouseWheelDown
+)
+
+const (
+ KeyCtrlTilde Key = 0x00
+ KeyCtrl2 Key = 0x00
+ KeyCtrlSpace Key = 0x00
+ KeyCtrlA Key = 0x01
+ KeyCtrlB Key = 0x02
+ KeyCtrlC Key = 0x03
+ KeyCtrlD Key = 0x04
+ KeyCtrlE Key = 0x05
+ KeyCtrlF Key = 0x06
+ KeyCtrlG Key = 0x07
+ KeyBackspace Key = 0x08
+ KeyCtrlH Key = 0x08
+ KeyTab Key = 0x09
+ KeyCtrlI Key = 0x09
+ KeyCtrlJ Key = 0x0A
+ KeyCtrlK Key = 0x0B
+ KeyCtrlL Key = 0x0C
+ KeyEnter Key = 0x0D
+ KeyCtrlM Key = 0x0D
+ KeyCtrlN Key = 0x0E
+ KeyCtrlO Key = 0x0F
+ KeyCtrlP Key = 0x10
+ KeyCtrlQ Key = 0x11
+ KeyCtrlR Key = 0x12
+ KeyCtrlS Key = 0x13
+ KeyCtrlT Key = 0x14
+ KeyCtrlU Key = 0x15
+ KeyCtrlV Key = 0x16
+ KeyCtrlW Key = 0x17
+ KeyCtrlX Key = 0x18
+ KeyCtrlY Key = 0x19
+ KeyCtrlZ Key = 0x1A
+ KeyEsc Key = 0x1B
+ KeyCtrlLsqBracket Key = 0x1B
+ KeyCtrl3 Key = 0x1B
+ KeyCtrl4 Key = 0x1C
+ KeyCtrlBackslash Key = 0x1C
+ KeyCtrl5 Key = 0x1D
+ KeyCtrlRsqBracket Key = 0x1D
+ KeyCtrl6 Key = 0x1E
+ KeyCtrl7 Key = 0x1F
+ KeyCtrlSlash Key = 0x1F
+ KeyCtrlUnderscore Key = 0x1F
+ KeySpace Key = 0x20
+ KeyBackspace2 Key = 0x7F
+ KeyCtrl8 Key = 0x7F
+)
+
+// Alt modifier constant, see Event.Mod field and SetInputMode function.
+const (
+ ModAlt Modifier = 1 << iota
+ ModMotion
+)
+
+// Cell colors, you can combine a color with multiple attributes using bitwise
+// OR ('|').
+const (
+ ColorDefault Attribute = iota
+ ColorBlack
+ ColorRed
+ ColorGreen
+ ColorYellow
+ ColorBlue
+ ColorMagenta
+ ColorCyan
+ ColorWhite
+)
+
+// Cell attributes, it is possible to use multiple attributes by combining them
+// using bitwise OR ('|'). Although, colors cannot be combined. But you can
+// combine attributes and a single color.
+//
+// It's worth mentioning that some platforms don't support certain attibutes.
+// For example windows console doesn't support AttrUnderline. And on some
+// terminals applying AttrBold to background may result in blinking text. Use
+// them with caution and test your code on various terminals.
+const (
+ AttrBold Attribute = 1 << (iota + 9)
+ AttrUnderline
+ AttrReverse
+)
+
+// Input mode. See SetInputMode function.
+const (
+ InputEsc InputMode = 1 << iota
+ InputAlt
+ InputMouse
+ InputCurrent InputMode = 0
+)
+
+// Output mode. See SetOutputMode function.
+const (
+ OutputCurrent OutputMode = iota
+ OutputNormal
+ Output256
+ Output216
+ OutputGrayscale
+)
+
+// Event type. See Event.Type field.
+const (
+ EventKey EventType = iota
+ EventResize
+ EventMouse
+ EventError
+ EventInterrupt
+ EventRaw
+ EventNone
+)
diff --git a/vendor/github.com/nsf/termbox-go/api_windows.go b/vendor/github.com/nsf/termbox-go/api_windows.go
new file mode 100644
index 000000000..7def30a67
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/api_windows.go
@@ -0,0 +1,239 @@
+package termbox
+
+import (
+ "syscall"
+)
+
+// public API
+
+// Initializes termbox library. This function should be called before any other functions.
+// After successful initialization, the library must be finalized using 'Close' function.
+//
+// Example usage:
+// err := termbox.Init()
+// if err != nil {
+// panic(err)
+// }
+// defer termbox.Close()
+func Init() error {
+ var err error
+
+ interrupt, err = create_event()
+ if err != nil {
+ return err
+ }
+
+ in, err = syscall.Open("CONIN$", syscall.O_RDWR, 0)
+ if err != nil {
+ return err
+ }
+ out, err = syscall.Open("CONOUT$", syscall.O_RDWR, 0)
+ if err != nil {
+ return err
+ }
+
+ err = get_console_mode(in, &orig_mode)
+ if err != nil {
+ return err
+ }
+
+ err = set_console_mode(in, enable_window_input)
+ if err != nil {
+ return err
+ }
+
+ orig_size = get_term_size(out)
+ win_size := get_win_size(out)
+
+ err = set_console_screen_buffer_size(out, win_size)
+ if err != nil {
+ return err
+ }
+
+ err = get_console_cursor_info(out, &orig_cursor_info)
+ if err != nil {
+ return err
+ }
+
+ show_cursor(false)
+ term_size = get_term_size(out)
+ back_buffer.init(int(term_size.x), int(term_size.y))
+ front_buffer.init(int(term_size.x), int(term_size.y))
+ back_buffer.clear()
+ front_buffer.clear()
+ clear()
+
+ diffbuf = make([]diff_msg, 0, 32)
+
+ go input_event_producer()
+ IsInit = true
+ return nil
+}
+
+// Finalizes termbox library, should be called after successful initialization
+// when termbox's functionality isn't required anymore.
+func Close() {
+ // we ignore errors here, because we can't really do anything about them
+ Clear(0, 0)
+ Flush()
+
+ // stop event producer
+ cancel_comm <- true
+ set_event(interrupt)
+ select {
+ case <-input_comm:
+ default:
+ }
+ <-cancel_done_comm
+
+ set_console_cursor_info(out, &orig_cursor_info)
+ set_console_cursor_position(out, coord{})
+ set_console_screen_buffer_size(out, orig_size)
+ set_console_mode(in, orig_mode)
+ syscall.Close(in)
+ syscall.Close(out)
+ syscall.Close(interrupt)
+ IsInit = false
+}
+
+// Interrupt an in-progress call to PollEvent by causing it to return
+// EventInterrupt. Note that this function will block until the PollEvent
+// function has successfully been interrupted.
+func Interrupt() {
+ interrupt_comm <- struct{}{}
+}
+
+// Synchronizes the internal back buffer with the terminal.
+func Flush() error {
+ update_size_maybe()
+ prepare_diff_messages()
+ for _, diff := range diffbuf {
+ r := small_rect{
+ left: 0,
+ top: diff.pos,
+ right: term_size.x - 1,
+ bottom: diff.pos + diff.lines - 1,
+ }
+ write_console_output(out, diff.chars, r)
+ }
+ if !is_cursor_hidden(cursor_x, cursor_y) {
+ move_cursor(cursor_x, cursor_y)
+ }
+ return nil
+}
+
+// Sets the position of the cursor. See also HideCursor().
+func SetCursor(x, y int) {
+ if is_cursor_hidden(cursor_x, cursor_y) && !is_cursor_hidden(x, y) {
+ show_cursor(true)
+ }
+
+ if !is_cursor_hidden(cursor_x, cursor_y) && is_cursor_hidden(x, y) {
+ show_cursor(false)
+ }
+
+ cursor_x, cursor_y = x, y
+ if !is_cursor_hidden(cursor_x, cursor_y) {
+ move_cursor(cursor_x, cursor_y)
+ }
+}
+
+// The shortcut for SetCursor(-1, -1).
+func HideCursor() {
+ SetCursor(cursor_hidden, cursor_hidden)
+}
+
+// Changes cell's parameters in the internal back buffer at the specified
+// position.
+func SetCell(x, y int, ch rune, fg, bg Attribute) {
+ if x < 0 || x >= back_buffer.width {
+ return
+ }
+ if y < 0 || y >= back_buffer.height {
+ return
+ }
+
+ back_buffer.cells[y*back_buffer.width+x] = Cell{ch, fg, bg}
+}
+
+// Returns a slice into the termbox's back buffer. You can get its dimensions
+// using 'Size' function. The slice remains valid as long as no 'Clear' or
+// 'Flush' function calls were made after call to this function.
+func CellBuffer() []Cell {
+ return back_buffer.cells
+}
+
+// Wait for an event and return it. This is a blocking function call.
+func PollEvent() Event {
+ select {
+ case ev := <-input_comm:
+ return ev
+ case <-interrupt_comm:
+ return Event{Type: EventInterrupt}
+ }
+}
+
+// Returns the size of the internal back buffer (which is mostly the same as
+// console's window size in characters). But it doesn't always match the size
+// of the console window, after the console size has changed, the internal back
+// buffer will get in sync only after Clear or Flush function calls.
+func Size() (int, int) {
+ return int(term_size.x), int(term_size.y)
+}
+
+// Clears the internal back buffer.
+func Clear(fg, bg Attribute) error {
+ foreground, background = fg, bg
+ update_size_maybe()
+ back_buffer.clear()
+ return nil
+}
+
+// Sets termbox input mode. Termbox has two input modes:
+//
+// 1. Esc input mode. When ESC sequence is in the buffer and it doesn't match
+// any known sequence. ESC means KeyEsc. This is the default input mode.
+//
+// 2. Alt input mode. When ESC sequence is in the buffer and it doesn't match
+// any known sequence. ESC enables ModAlt modifier for the next keyboard event.
+//
+// Both input modes can be OR'ed with Mouse mode. Setting Mouse mode bit up will
+// enable mouse button press/release and drag events.
+//
+// If 'mode' is InputCurrent, returns the current input mode. See also Input*
+// constants.
+func SetInputMode(mode InputMode) InputMode {
+ if mode == InputCurrent {
+ return input_mode
+ }
+ if mode&InputMouse != 0 {
+ err := set_console_mode(in, enable_window_input|enable_mouse_input|enable_extended_flags)
+ if err != nil {
+ panic(err)
+ }
+ } else {
+ err := set_console_mode(in, enable_window_input)
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ input_mode = mode
+ return input_mode
+}
+
+// Sets the termbox output mode.
+//
+// Windows console does not support extra colour modes,
+// so this will always set and return OutputNormal.
+func SetOutputMode(mode OutputMode) OutputMode {
+ return OutputNormal
+}
+
+// Sync comes handy when something causes desync between termbox's understanding
+// of a terminal buffer and the reality. Such as a third party process. Sync
+// forces a complete resync between the termbox and a terminal, it may not be
+// visually pretty though. At the moment on Windows it does nothing.
+func Sync() error {
+ return nil
+}
diff --git a/vendor/github.com/nsf/termbox-go/collect_terminfo.py b/vendor/github.com/nsf/termbox-go/collect_terminfo.py
new file mode 100755
index 000000000..5e50975e6
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/collect_terminfo.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+
+import sys, os, subprocess
+
+def escaped(s):
+ return repr(s)[1:-1]
+
+def tput(term, name):
+ try:
+ return subprocess.check_output(['tput', '-T%s' % term, name]).decode()
+ except subprocess.CalledProcessError as e:
+ return e.output.decode()
+
+
+def w(s):
+ if s == None:
+ return
+ sys.stdout.write(s)
+
+terminals = {
+ 'xterm' : 'xterm',
+ 'rxvt-256color' : 'rxvt_256color',
+ 'rxvt-unicode' : 'rxvt_unicode',
+ 'linux' : 'linux',
+ 'Eterm' : 'eterm',
+ 'screen' : 'screen'
+}
+
+keys = [
+ "F1", "kf1",
+ "F2", "kf2",
+ "F3", "kf3",
+ "F4", "kf4",
+ "F5", "kf5",
+ "F6", "kf6",
+ "F7", "kf7",
+ "F8", "kf8",
+ "F9", "kf9",
+ "F10", "kf10",
+ "F11", "kf11",
+ "F12", "kf12",
+ "INSERT", "kich1",
+ "DELETE", "kdch1",
+ "HOME", "khome",
+ "END", "kend",
+ "PGUP", "kpp",
+ "PGDN", "knp",
+ "KEY_UP", "kcuu1",
+ "KEY_DOWN", "kcud1",
+ "KEY_LEFT", "kcub1",
+ "KEY_RIGHT", "kcuf1"
+]
+
+funcs = [
+ "T_ENTER_CA", "smcup",
+ "T_EXIT_CA", "rmcup",
+ "T_SHOW_CURSOR", "cnorm",
+ "T_HIDE_CURSOR", "civis",
+ "T_CLEAR_SCREEN", "clear",
+ "T_SGR0", "sgr0",
+ "T_UNDERLINE", "smul",
+ "T_BOLD", "bold",
+ "T_BLINK", "blink",
+ "T_REVERSE", "rev",
+ "T_ENTER_KEYPAD", "smkx",
+ "T_EXIT_KEYPAD", "rmkx"
+]
+
+def iter_pairs(iterable):
+ iterable = iter(iterable)
+ while True:
+ yield (next(iterable), next(iterable))
+
+def do_term(term, nick):
+ w("// %s\n" % term)
+ w("var %s_keys = []string{\n\t" % nick)
+ for k, v in iter_pairs(keys):
+ w('"')
+ w(escaped(tput(term, v)))
+ w('",')
+ w("\n}\n")
+ w("var %s_funcs = []string{\n\t" % nick)
+ for k,v in iter_pairs(funcs):
+ w('"')
+ if v == "sgr":
+ w("\\033[3%d;4%dm")
+ elif v == "cup":
+ w("\\033[%d;%dH")
+ else:
+ w(escaped(tput(term, v)))
+ w('", ')
+ w("\n}\n\n")
+
+def do_terms(d):
+ w("var terms = []struct {\n")
+ w("\tname string\n")
+ w("\tkeys []string\n")
+ w("\tfuncs []string\n")
+ w("}{\n")
+ for k, v in d.items():
+ w('\t{"%s", %s_keys, %s_funcs},\n' % (k, v, v))
+ w("}\n\n")
+
+w("// +build !windows\n\npackage termbox\n\n")
+
+for k,v in terminals.items():
+ do_term(k, v)
+
+do_terms(terminals)
+
diff --git a/vendor/github.com/nsf/termbox-go/syscalls.go b/vendor/github.com/nsf/termbox-go/syscalls.go
new file mode 100644
index 000000000..4f52bb9af
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/syscalls.go
@@ -0,0 +1,39 @@
+// +build ignore
+
+package termbox
+
+/*
+#include <termios.h>
+#include <sys/ioctl.h>
+*/
+import "C"
+
+type syscall_Termios C.struct_termios
+
+const (
+ syscall_IGNBRK = C.IGNBRK
+ syscall_BRKINT = C.BRKINT
+ syscall_PARMRK = C.PARMRK
+ syscall_ISTRIP = C.ISTRIP
+ syscall_INLCR = C.INLCR
+ syscall_IGNCR = C.IGNCR
+ syscall_ICRNL = C.ICRNL
+ syscall_IXON = C.IXON
+ syscall_OPOST = C.OPOST
+ syscall_ECHO = C.ECHO
+ syscall_ECHONL = C.ECHONL
+ syscall_ICANON = C.ICANON
+ syscall_ISIG = C.ISIG
+ syscall_IEXTEN = C.IEXTEN
+ syscall_CSIZE = C.CSIZE
+ syscall_PARENB = C.PARENB
+ syscall_CS8 = C.CS8
+ syscall_VMIN = C.VMIN
+ syscall_VTIME = C.VTIME
+
+ // on darwin change these to (on *bsd too?):
+ // C.TIOCGETA
+ // C.TIOCSETA
+ syscall_TCGETS = C.TCGETS
+ syscall_TCSETS = C.TCSETS
+)
diff --git a/vendor/github.com/nsf/termbox-go/syscalls_darwin.go b/vendor/github.com/nsf/termbox-go/syscalls_darwin.go
new file mode 100644
index 000000000..25b78f7ab
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/syscalls_darwin.go
@@ -0,0 +1,41 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+// +build !amd64
+
+package termbox
+
+type syscall_Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed uint32
+ Ospeed uint32
+}
+
+const (
+ syscall_IGNBRK = 0x1
+ syscall_BRKINT = 0x2
+ syscall_PARMRK = 0x8
+ syscall_ISTRIP = 0x20
+ syscall_INLCR = 0x40
+ syscall_IGNCR = 0x80
+ syscall_ICRNL = 0x100
+ syscall_IXON = 0x200
+ syscall_OPOST = 0x1
+ syscall_ECHO = 0x8
+ syscall_ECHONL = 0x10
+ syscall_ICANON = 0x100
+ syscall_ISIG = 0x80
+ syscall_IEXTEN = 0x400
+ syscall_CSIZE = 0x300
+ syscall_PARENB = 0x1000
+ syscall_CS8 = 0x300
+ syscall_VMIN = 0x10
+ syscall_VTIME = 0x11
+
+ syscall_TCGETS = 0x402c7413
+ syscall_TCSETS = 0x802c7414
+)
diff --git a/vendor/github.com/nsf/termbox-go/syscalls_darwin_amd64.go b/vendor/github.com/nsf/termbox-go/syscalls_darwin_amd64.go
new file mode 100644
index 000000000..11f25be79
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/syscalls_darwin_amd64.go
@@ -0,0 +1,40 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+package termbox
+
+type syscall_Termios struct {
+ Iflag uint64
+ Oflag uint64
+ Cflag uint64
+ Lflag uint64
+ Cc [20]uint8
+ Pad_cgo_0 [4]byte
+ Ispeed uint64
+ Ospeed uint64
+}
+
+const (
+ syscall_IGNBRK = 0x1
+ syscall_BRKINT = 0x2
+ syscall_PARMRK = 0x8
+ syscall_ISTRIP = 0x20
+ syscall_INLCR = 0x40
+ syscall_IGNCR = 0x80
+ syscall_ICRNL = 0x100
+ syscall_IXON = 0x200
+ syscall_OPOST = 0x1
+ syscall_ECHO = 0x8
+ syscall_ECHONL = 0x10
+ syscall_ICANON = 0x100
+ syscall_ISIG = 0x80
+ syscall_IEXTEN = 0x400
+ syscall_CSIZE = 0x300
+ syscall_PARENB = 0x1000
+ syscall_CS8 = 0x300
+ syscall_VMIN = 0x10
+ syscall_VTIME = 0x11
+
+ syscall_TCGETS = 0x40487413
+ syscall_TCSETS = 0x80487414
+)
diff --git a/vendor/github.com/nsf/termbox-go/syscalls_dragonfly.go b/vendor/github.com/nsf/termbox-go/syscalls_dragonfly.go
new file mode 100644
index 000000000..e03624ebc
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/syscalls_dragonfly.go
@@ -0,0 +1,39 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+package termbox
+
+type syscall_Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed uint32
+ Ospeed uint32
+}
+
+const (
+ syscall_IGNBRK = 0x1
+ syscall_BRKINT = 0x2
+ syscall_PARMRK = 0x8
+ syscall_ISTRIP = 0x20
+ syscall_INLCR = 0x40
+ syscall_IGNCR = 0x80
+ syscall_ICRNL = 0x100
+ syscall_IXON = 0x200
+ syscall_OPOST = 0x1
+ syscall_ECHO = 0x8
+ syscall_ECHONL = 0x10
+ syscall_ICANON = 0x100
+ syscall_ISIG = 0x80
+ syscall_IEXTEN = 0x400
+ syscall_CSIZE = 0x300
+ syscall_PARENB = 0x1000
+ syscall_CS8 = 0x300
+ syscall_VMIN = 0x10
+ syscall_VTIME = 0x11
+
+ syscall_TCGETS = 0x402c7413
+ syscall_TCSETS = 0x802c7414
+)
diff --git a/vendor/github.com/nsf/termbox-go/syscalls_freebsd.go b/vendor/github.com/nsf/termbox-go/syscalls_freebsd.go
new file mode 100644
index 000000000..e03624ebc
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/syscalls_freebsd.go
@@ -0,0 +1,39 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+package termbox
+
+type syscall_Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed uint32
+ Ospeed uint32
+}
+
+const (
+ syscall_IGNBRK = 0x1
+ syscall_BRKINT = 0x2
+ syscall_PARMRK = 0x8
+ syscall_ISTRIP = 0x20
+ syscall_INLCR = 0x40
+ syscall_IGNCR = 0x80
+ syscall_ICRNL = 0x100
+ syscall_IXON = 0x200
+ syscall_OPOST = 0x1
+ syscall_ECHO = 0x8
+ syscall_ECHONL = 0x10
+ syscall_ICANON = 0x100
+ syscall_ISIG = 0x80
+ syscall_IEXTEN = 0x400
+ syscall_CSIZE = 0x300
+ syscall_PARENB = 0x1000
+ syscall_CS8 = 0x300
+ syscall_VMIN = 0x10
+ syscall_VTIME = 0x11
+
+ syscall_TCGETS = 0x402c7413
+ syscall_TCSETS = 0x802c7414
+)
diff --git a/vendor/github.com/nsf/termbox-go/syscalls_linux.go b/vendor/github.com/nsf/termbox-go/syscalls_linux.go
new file mode 100644
index 000000000..b88960de6
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/syscalls_linux.go
@@ -0,0 +1,33 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+package termbox
+
+import "syscall"
+
+type syscall_Termios syscall.Termios
+
+const (
+ syscall_IGNBRK = syscall.IGNBRK
+ syscall_BRKINT = syscall.BRKINT
+ syscall_PARMRK = syscall.PARMRK
+ syscall_ISTRIP = syscall.ISTRIP
+ syscall_INLCR = syscall.INLCR
+ syscall_IGNCR = syscall.IGNCR
+ syscall_ICRNL = syscall.ICRNL
+ syscall_IXON = syscall.IXON
+ syscall_OPOST = syscall.OPOST
+ syscall_ECHO = syscall.ECHO
+ syscall_ECHONL = syscall.ECHONL
+ syscall_ICANON = syscall.ICANON
+ syscall_ISIG = syscall.ISIG
+ syscall_IEXTEN = syscall.IEXTEN
+ syscall_CSIZE = syscall.CSIZE
+ syscall_PARENB = syscall.PARENB
+ syscall_CS8 = syscall.CS8
+ syscall_VMIN = syscall.VMIN
+ syscall_VTIME = syscall.VTIME
+
+ syscall_TCGETS = syscall.TCGETS
+ syscall_TCSETS = syscall.TCSETS
+)
diff --git a/vendor/github.com/nsf/termbox-go/syscalls_netbsd.go b/vendor/github.com/nsf/termbox-go/syscalls_netbsd.go
new file mode 100644
index 000000000..49a3355b9
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/syscalls_netbsd.go
@@ -0,0 +1,39 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+package termbox
+
+type syscall_Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed int32
+ Ospeed int32
+}
+
+const (
+ syscall_IGNBRK = 0x1
+ syscall_BRKINT = 0x2
+ syscall_PARMRK = 0x8
+ syscall_ISTRIP = 0x20
+ syscall_INLCR = 0x40
+ syscall_IGNCR = 0x80
+ syscall_ICRNL = 0x100
+ syscall_IXON = 0x200
+ syscall_OPOST = 0x1
+ syscall_ECHO = 0x8
+ syscall_ECHONL = 0x10
+ syscall_ICANON = 0x100
+ syscall_ISIG = 0x80
+ syscall_IEXTEN = 0x400
+ syscall_CSIZE = 0x300
+ syscall_PARENB = 0x1000
+ syscall_CS8 = 0x300
+ syscall_VMIN = 0x10
+ syscall_VTIME = 0x11
+
+ syscall_TCGETS = 0x402c7413
+ syscall_TCSETS = 0x802c7414
+)
diff --git a/vendor/github.com/nsf/termbox-go/syscalls_openbsd.go b/vendor/github.com/nsf/termbox-go/syscalls_openbsd.go
new file mode 100644
index 000000000..49a3355b9
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/syscalls_openbsd.go
@@ -0,0 +1,39 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+package termbox
+
+type syscall_Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed int32
+ Ospeed int32
+}
+
+const (
+ syscall_IGNBRK = 0x1
+ syscall_BRKINT = 0x2
+ syscall_PARMRK = 0x8
+ syscall_ISTRIP = 0x20
+ syscall_INLCR = 0x40
+ syscall_IGNCR = 0x80
+ syscall_ICRNL = 0x100
+ syscall_IXON = 0x200
+ syscall_OPOST = 0x1
+ syscall_ECHO = 0x8
+ syscall_ECHONL = 0x10
+ syscall_ICANON = 0x100
+ syscall_ISIG = 0x80
+ syscall_IEXTEN = 0x400
+ syscall_CSIZE = 0x300
+ syscall_PARENB = 0x1000
+ syscall_CS8 = 0x300
+ syscall_VMIN = 0x10
+ syscall_VTIME = 0x11
+
+ syscall_TCGETS = 0x402c7413
+ syscall_TCSETS = 0x802c7414
+)
diff --git a/vendor/github.com/nsf/termbox-go/syscalls_windows.go b/vendor/github.com/nsf/termbox-go/syscalls_windows.go
new file mode 100644
index 000000000..472d002a5
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/syscalls_windows.go
@@ -0,0 +1,61 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs -- -DUNICODE syscalls.go
+
+package termbox
+
+const (
+ foreground_blue = 0x1
+ foreground_green = 0x2
+ foreground_red = 0x4
+ foreground_intensity = 0x8
+ background_blue = 0x10
+ background_green = 0x20
+ background_red = 0x40
+ background_intensity = 0x80
+ std_input_handle = -0xa
+ std_output_handle = -0xb
+ key_event = 0x1
+ mouse_event = 0x2
+ window_buffer_size_event = 0x4
+ enable_window_input = 0x8
+ enable_mouse_input = 0x10
+ enable_extended_flags = 0x80
+
+ vk_f1 = 0x70
+ vk_f2 = 0x71
+ vk_f3 = 0x72
+ vk_f4 = 0x73
+ vk_f5 = 0x74
+ vk_f6 = 0x75
+ vk_f7 = 0x76
+ vk_f8 = 0x77
+ vk_f9 = 0x78
+ vk_f10 = 0x79
+ vk_f11 = 0x7a
+ vk_f12 = 0x7b
+ vk_insert = 0x2d
+ vk_delete = 0x2e
+ vk_home = 0x24
+ vk_end = 0x23
+ vk_pgup = 0x21
+ vk_pgdn = 0x22
+ vk_arrow_up = 0x26
+ vk_arrow_down = 0x28
+ vk_arrow_left = 0x25
+ vk_arrow_right = 0x27
+ vk_backspace = 0x8
+ vk_tab = 0x9
+ vk_enter = 0xd
+ vk_esc = 0x1b
+ vk_space = 0x20
+
+ left_alt_pressed = 0x2
+ left_ctrl_pressed = 0x8
+ right_alt_pressed = 0x1
+ right_ctrl_pressed = 0x4
+ shift_pressed = 0x10
+
+ generic_read = 0x80000000
+ generic_write = 0x40000000
+ console_textmode_buffer = 0x1
+)
diff --git a/vendor/github.com/nsf/termbox-go/termbox.go b/vendor/github.com/nsf/termbox-go/termbox.go
new file mode 100644
index 000000000..6e5ba6c8f
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/termbox.go
@@ -0,0 +1,514 @@
+// +build !windows
+
+package termbox
+
+import "unicode/utf8"
+import "bytes"
+import "syscall"
+import "unsafe"
+import "strings"
+import "strconv"
+import "os"
+import "io"
+
+// private API
+
+const (
+ t_enter_ca = iota
+ t_exit_ca
+ t_show_cursor
+ t_hide_cursor
+ t_clear_screen
+ t_sgr0
+ t_underline
+ t_bold
+ t_blink
+ t_reverse
+ t_enter_keypad
+ t_exit_keypad
+ t_enter_mouse
+ t_exit_mouse
+ t_max_funcs
+)
+
+const (
+ coord_invalid = -2
+ attr_invalid = Attribute(0xFFFF)
+)
+
+type input_event struct {
+ data []byte
+ err error
+}
+
+var (
+ // term specific sequences
+ keys []string
+ funcs []string
+
+ // termbox inner state
+ orig_tios syscall_Termios
+ back_buffer cellbuf
+ front_buffer cellbuf
+ termw int
+ termh int
+ input_mode = InputEsc
+ output_mode = OutputNormal
+ out *os.File
+ in int
+ lastfg = attr_invalid
+ lastbg = attr_invalid
+ lastx = coord_invalid
+ lasty = coord_invalid
+ cursor_x = cursor_hidden
+ cursor_y = cursor_hidden
+ foreground = ColorDefault
+ background = ColorDefault
+ inbuf = make([]byte, 0, 64)
+ outbuf bytes.Buffer
+ sigwinch = make(chan os.Signal, 1)
+ sigio = make(chan os.Signal, 1)
+ quit = make(chan int)
+ input_comm = make(chan input_event)
+ interrupt_comm = make(chan struct{})
+ intbuf = make([]byte, 0, 16)
+
+ // grayscale indexes
+ grayscale = []Attribute{
+ 0, 17, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 232,
+ }
+)
+
+func write_cursor(x, y int) {
+ outbuf.WriteString("\033[")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(y+1), 10))
+ outbuf.WriteString(";")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(x+1), 10))
+ outbuf.WriteString("H")
+}
+
+func write_sgr_fg(a Attribute) {
+ switch output_mode {
+ case Output256, Output216, OutputGrayscale:
+ outbuf.WriteString("\033[38;5;")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10))
+ outbuf.WriteString("m")
+ default:
+ outbuf.WriteString("\033[3")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10))
+ outbuf.WriteString("m")
+ }
+}
+
+func write_sgr_bg(a Attribute) {
+ switch output_mode {
+ case Output256, Output216, OutputGrayscale:
+ outbuf.WriteString("\033[48;5;")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10))
+ outbuf.WriteString("m")
+ default:
+ outbuf.WriteString("\033[4")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10))
+ outbuf.WriteString("m")
+ }
+}
+
+func write_sgr(fg, bg Attribute) {
+ switch output_mode {
+ case Output256, Output216, OutputGrayscale:
+ outbuf.WriteString("\033[38;5;")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(fg-1), 10))
+ outbuf.WriteString("m")
+ outbuf.WriteString("\033[48;5;")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(bg-1), 10))
+ outbuf.WriteString("m")
+ default:
+ outbuf.WriteString("\033[3")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(fg-1), 10))
+ outbuf.WriteString(";4")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(bg-1), 10))
+ outbuf.WriteString("m")
+ }
+}
+
+type winsize struct {
+ rows uint16
+ cols uint16
+ xpixels uint16
+ ypixels uint16
+}
+
+func get_term_size(fd uintptr) (int, int) {
+ var sz winsize
+ _, _, _ = syscall.Syscall(syscall.SYS_IOCTL,
+ fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&sz)))
+ return int(sz.cols), int(sz.rows)
+}
+
+func send_attr(fg, bg Attribute) {
+ if fg == lastfg && bg == lastbg {
+ return
+ }
+
+ outbuf.WriteString(funcs[t_sgr0])
+
+ var fgcol, bgcol Attribute
+
+ switch output_mode {
+ case Output256:
+ fgcol = fg & 0x1FF
+ bgcol = bg & 0x1FF
+ case Output216:
+ fgcol = fg & 0xFF
+ bgcol = bg & 0xFF
+ if fgcol > 216 {
+ fgcol = ColorDefault
+ }
+ if bgcol > 216 {
+ bgcol = ColorDefault
+ }
+ if fgcol != ColorDefault {
+ fgcol += 0x10
+ }
+ if bgcol != ColorDefault {
+ bgcol += 0x10
+ }
+ case OutputGrayscale:
+ fgcol = fg & 0x1F
+ bgcol = bg & 0x1F
+ if fgcol > 26 {
+ fgcol = ColorDefault
+ }
+ if bgcol > 26 {
+ bgcol = ColorDefault
+ }
+ if fgcol != ColorDefault {
+ fgcol = grayscale[fgcol]
+ }
+ if bgcol != ColorDefault {
+ bgcol = grayscale[bgcol]
+ }
+ default:
+ fgcol = fg & 0x0F
+ bgcol = bg & 0x0F
+ }
+
+ if fgcol != ColorDefault {
+ if bgcol != ColorDefault {
+ write_sgr(fgcol, bgcol)
+ } else {
+ write_sgr_fg(fgcol)
+ }
+ } else if bgcol != ColorDefault {
+ write_sgr_bg(bgcol)
+ }
+
+ if fg&AttrBold != 0 {
+ outbuf.WriteString(funcs[t_bold])
+ }
+ if bg&AttrBold != 0 {
+ outbuf.WriteString(funcs[t_blink])
+ }
+ if fg&AttrUnderline != 0 {
+ outbuf.WriteString(funcs[t_underline])
+ }
+ if fg&AttrReverse|bg&AttrReverse != 0 {
+ outbuf.WriteString(funcs[t_reverse])
+ }
+
+ lastfg, lastbg = fg, bg
+}
+
+func send_char(x, y int, ch rune) {
+ var buf [8]byte
+ n := utf8.EncodeRune(buf[:], ch)
+ if x-1 != lastx || y != lasty {
+ write_cursor(x, y)
+ }
+ lastx, lasty = x, y
+ outbuf.Write(buf[:n])
+}
+
+func flush() error {
+ _, err := io.Copy(out, &outbuf)
+ outbuf.Reset()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func send_clear() error {
+ send_attr(foreground, background)
+ outbuf.WriteString(funcs[t_clear_screen])
+ if !is_cursor_hidden(cursor_x, cursor_y) {
+ write_cursor(cursor_x, cursor_y)
+ }
+
+ // we need to invalidate cursor position too and these two vars are
+ // used only for simple cursor positioning optimization, cursor
+ // actually may be in the correct place, but we simply discard
+ // optimization once and it gives us simple solution for the case when
+ // cursor moved
+ lastx = coord_invalid
+ lasty = coord_invalid
+
+ return flush()
+}
+
+func update_size_maybe() error {
+ w, h := get_term_size(out.Fd())
+ if w != termw || h != termh {
+ termw, termh = w, h
+ back_buffer.resize(termw, termh)
+ front_buffer.resize(termw, termh)
+ front_buffer.clear()
+ return send_clear()
+ }
+ return nil
+}
+
+func tcsetattr(fd uintptr, termios *syscall_Termios) error {
+ r, _, e := syscall.Syscall(syscall.SYS_IOCTL,
+ fd, uintptr(syscall_TCSETS), uintptr(unsafe.Pointer(termios)))
+ if r != 0 {
+ return os.NewSyscallError("SYS_IOCTL", e)
+ }
+ return nil
+}
+
+func tcgetattr(fd uintptr, termios *syscall_Termios) error {
+ r, _, e := syscall.Syscall(syscall.SYS_IOCTL,
+ fd, uintptr(syscall_TCGETS), uintptr(unsafe.Pointer(termios)))
+ if r != 0 {
+ return os.NewSyscallError("SYS_IOCTL", e)
+ }
+ return nil
+}
+
+func parse_mouse_event(event *Event, buf string) (int, bool) {
+ if strings.HasPrefix(buf, "\033[M") && len(buf) >= 6 {
+ // X10 mouse encoding, the simplest one
+ // \033 [ M Cb Cx Cy
+ b := buf[3] - 32
+ switch b & 3 {
+ case 0:
+ if b&64 != 0 {
+ event.Key = MouseWheelUp
+ } else {
+ event.Key = MouseLeft
+ }
+ case 1:
+ if b&64 != 0 {
+ event.Key = MouseWheelDown
+ } else {
+ event.Key = MouseMiddle
+ }
+ case 2:
+ event.Key = MouseRight
+ case 3:
+ event.Key = MouseRelease
+ default:
+ return 6, false
+ }
+ event.Type = EventMouse // KeyEvent by default
+ if b&32 != 0 {
+ event.Mod |= ModMotion
+ }
+
+ // the coord is 1,1 for upper left
+ event.MouseX = int(buf[4]) - 1 - 32
+ event.MouseY = int(buf[5]) - 1 - 32
+ return 6, true
+ } else if strings.HasPrefix(buf, "\033[<") || strings.HasPrefix(buf, "\033[") {
+ // xterm 1006 extended mode or urxvt 1015 extended mode
+ // xterm: \033 [ < Cb ; Cx ; Cy (M or m)
+ // urxvt: \033 [ Cb ; Cx ; Cy M
+
+ // find the first M or m, that's where we stop
+ mi := strings.IndexAny(buf, "Mm")
+ if mi == -1 {
+ return 0, false
+ }
+
+ // whether it's a capital M or not
+ isM := buf[mi] == 'M'
+
+ // whether it's urxvt or not
+ isU := false
+
+ // buf[2] is safe here, because having M or m found means we have at
+ // least 3 bytes in a string
+ if buf[2] == '<' {
+ buf = buf[3:mi]
+ } else {
+ isU = true
+ buf = buf[2:mi]
+ }
+
+ s1 := strings.Index(buf, ";")
+ s2 := strings.LastIndex(buf, ";")
+ // not found or only one ';'
+ if s1 == -1 || s2 == -1 || s1 == s2 {
+ return 0, false
+ }
+
+ n1, err := strconv.ParseInt(buf[0:s1], 10, 64)
+ if err != nil {
+ return 0, false
+ }
+ n2, err := strconv.ParseInt(buf[s1+1:s2], 10, 64)
+ if err != nil {
+ return 0, false
+ }
+ n3, err := strconv.ParseInt(buf[s2+1:], 10, 64)
+ if err != nil {
+ return 0, false
+ }
+
+ // on urxvt, first number is encoded exactly as in X10, but we need to
+ // make it zero-based, on xterm it is zero-based already
+ if isU {
+ n1 -= 32
+ }
+ switch n1 & 3 {
+ case 0:
+ if n1&64 != 0 {
+ event.Key = MouseWheelUp
+ } else {
+ event.Key = MouseLeft
+ }
+ case 1:
+ if n1&64 != 0 {
+ event.Key = MouseWheelDown
+ } else {
+ event.Key = MouseMiddle
+ }
+ case 2:
+ event.Key = MouseRight
+ case 3:
+ event.Key = MouseRelease
+ default:
+ return mi + 1, false
+ }
+ if !isM {
+ // on xterm mouse release is signaled by lowercase m
+ event.Key = MouseRelease
+ }
+
+ event.Type = EventMouse // KeyEvent by default
+ if n1&32 != 0 {
+ event.Mod |= ModMotion
+ }
+
+ event.MouseX = int(n2) - 1
+ event.MouseY = int(n3) - 1
+ return mi + 1, true
+ }
+
+ return 0, false
+}
+
+func parse_escape_sequence(event *Event, buf []byte) (int, bool) {
+ bufstr := string(buf)
+ for i, key := range keys {
+ if strings.HasPrefix(bufstr, key) {
+ event.Ch = 0
+ event.Key = Key(0xFFFF - i)
+ return len(key), true
+ }
+ }
+
+ // if none of the keys match, let's try mouse seqences
+ return parse_mouse_event(event, bufstr)
+}
+
+func extract_raw_event(data []byte, event *Event) bool {
+ if len(inbuf) == 0 {
+ return false
+ }
+
+ n := len(data)
+ if n == 0 {
+ return false
+ }
+
+ n = copy(data, inbuf)
+ copy(inbuf, inbuf[n:])
+ inbuf = inbuf[:len(inbuf)-n]
+
+ event.N = n
+ event.Type = EventRaw
+ return true
+}
+
+func extract_event(inbuf []byte, event *Event) bool {
+ if len(inbuf) == 0 {
+ event.N = 0
+ return false
+ }
+
+ if inbuf[0] == '\033' {
+ // possible escape sequence
+ if n, ok := parse_escape_sequence(event, inbuf); n != 0 {
+ event.N = n
+ return ok
+ }
+
+ // it's not escape sequence, then it's Alt or Esc, check input_mode
+ switch {
+ case input_mode&InputEsc != 0:
+ // if we're in escape mode, fill Esc event, pop buffer, return success
+ event.Ch = 0
+ event.Key = KeyEsc
+ event.Mod = 0
+ event.N = 1
+ return true
+ case input_mode&InputAlt != 0:
+ // if we're in alt mode, set Alt modifier to event and redo parsing
+ event.Mod = ModAlt
+ ok := extract_event(inbuf[1:], event)
+ if ok {
+ event.N++
+ } else {
+ event.N = 0
+ }
+ return ok
+ default:
+ panic("unreachable")
+ }
+ }
+
+ // if we're here, this is not an escape sequence and not an alt sequence
+ // so, it's a FUNCTIONAL KEY or a UNICODE character
+
+ // first of all check if it's a functional key
+ if Key(inbuf[0]) <= KeySpace || Key(inbuf[0]) == KeyBackspace2 {
+ // fill event, pop buffer, return success
+ event.Ch = 0
+ event.Key = Key(inbuf[0])
+ event.N = 1
+ return true
+ }
+
+ // the only possible option is utf8 rune
+ if r, n := utf8.DecodeRune(inbuf); r != utf8.RuneError {
+ event.Ch = r
+ event.Key = 0
+ event.N = n
+ return true
+ }
+
+ return false
+}
+
+func fcntl(fd int, cmd int, arg int) (val int, err error) {
+ r, _, e := syscall.Syscall(syscall.SYS_FCNTL, uintptr(fd), uintptr(cmd),
+ uintptr(arg))
+ val = int(r)
+ if e != 0 {
+ err = e
+ }
+ return
+}
diff --git a/vendor/github.com/nsf/termbox-go/termbox_common.go b/vendor/github.com/nsf/termbox-go/termbox_common.go
new file mode 100644
index 000000000..c3355cc25
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/termbox_common.go
@@ -0,0 +1,59 @@
+package termbox
+
+// private API, common OS agnostic part
+
+type cellbuf struct {
+ width int
+ height int
+ cells []Cell
+}
+
+func (this *cellbuf) init(width, height int) {
+ this.width = width
+ this.height = height
+ this.cells = make([]Cell, width*height)
+}
+
+func (this *cellbuf) resize(width, height int) {
+ if this.width == width && this.height == height {
+ return
+ }
+
+ oldw := this.width
+ oldh := this.height
+ oldcells := this.cells
+
+ this.init(width, height)
+ this.clear()
+
+ minw, minh := oldw, oldh
+
+ if width < minw {
+ minw = width
+ }
+ if height < minh {
+ minh = height
+ }
+
+ for i := 0; i < minh; i++ {
+ srco, dsto := i*oldw, i*width
+ src := oldcells[srco : srco+minw]
+ dst := this.cells[dsto : dsto+minw]
+ copy(dst, src)
+ }
+}
+
+func (this *cellbuf) clear() {
+ for i := range this.cells {
+ c := &this.cells[i]
+ c.Ch = ' '
+ c.Fg = foreground
+ c.Bg = background
+ }
+}
+
+const cursor_hidden = -1
+
+func is_cursor_hidden(x, y int) bool {
+ return x == cursor_hidden || y == cursor_hidden
+}
diff --git a/vendor/github.com/nsf/termbox-go/termbox_windows.go b/vendor/github.com/nsf/termbox-go/termbox_windows.go
new file mode 100644
index 000000000..f7dad7b8a
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/termbox_windows.go
@@ -0,0 +1,856 @@
+package termbox
+
+import "syscall"
+import "unsafe"
+import "unicode/utf16"
+import "github.com/mattn/go-runewidth"
+
+type (
+ wchar uint16
+ short int16
+ dword uint32
+ word uint16
+ char_info struct {
+ char wchar
+ attr word
+ }
+ coord struct {
+ x short
+ y short
+ }
+ small_rect struct {
+ left short
+ top short
+ right short
+ bottom short
+ }
+ console_screen_buffer_info struct {
+ size coord
+ cursor_position coord
+ attributes word
+ window small_rect
+ maximum_window_size coord
+ }
+ console_cursor_info struct {
+ size dword
+ visible int32
+ }
+ input_record struct {
+ event_type word
+ _ [2]byte
+ event [16]byte
+ }
+ key_event_record struct {
+ key_down int32
+ repeat_count word
+ virtual_key_code word
+ virtual_scan_code word
+ unicode_char wchar
+ control_key_state dword
+ }
+ window_buffer_size_record struct {
+ size coord
+ }
+ mouse_event_record struct {
+ mouse_pos coord
+ button_state dword
+ control_key_state dword
+ event_flags dword
+ }
+)
+
+const (
+ mouse_lmb = 0x1
+ mouse_rmb = 0x2
+ mouse_mmb = 0x4 | 0x8 | 0x10
+)
+
+func (this coord) uintptr() uintptr {
+ return uintptr(*(*int32)(unsafe.Pointer(&this)))
+}
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+var is_cjk = runewidth.IsEastAsian()
+
+var (
+ proc_set_console_active_screen_buffer = kernel32.NewProc("SetConsoleActiveScreenBuffer")
+ proc_set_console_screen_buffer_size = kernel32.NewProc("SetConsoleScreenBufferSize")
+ proc_create_console_screen_buffer = kernel32.NewProc("CreateConsoleScreenBuffer")
+ proc_get_console_screen_buffer_info = kernel32.NewProc("GetConsoleScreenBufferInfo")
+ proc_write_console_output = kernel32.NewProc("WriteConsoleOutputW")
+ proc_write_console_output_character = kernel32.NewProc("WriteConsoleOutputCharacterW")
+ proc_write_console_output_attribute = kernel32.NewProc("WriteConsoleOutputAttribute")
+ proc_set_console_cursor_info = kernel32.NewProc("SetConsoleCursorInfo")
+ proc_set_console_cursor_position = kernel32.NewProc("SetConsoleCursorPosition")
+ proc_get_console_cursor_info = kernel32.NewProc("GetConsoleCursorInfo")
+ proc_read_console_input = kernel32.NewProc("ReadConsoleInputW")
+ proc_get_console_mode = kernel32.NewProc("GetConsoleMode")
+ proc_set_console_mode = kernel32.NewProc("SetConsoleMode")
+ proc_fill_console_output_character = kernel32.NewProc("FillConsoleOutputCharacterW")
+ proc_fill_console_output_attribute = kernel32.NewProc("FillConsoleOutputAttribute")
+ proc_create_event = kernel32.NewProc("CreateEventW")
+ proc_wait_for_multiple_objects = kernel32.NewProc("WaitForMultipleObjects")
+ proc_set_event = kernel32.NewProc("SetEvent")
+)
+
+func set_console_active_screen_buffer(h syscall.Handle) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_set_console_active_screen_buffer.Addr(),
+ 1, uintptr(h), 0, 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func set_console_screen_buffer_size(h syscall.Handle, size coord) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_set_console_screen_buffer_size.Addr(),
+ 2, uintptr(h), size.uintptr(), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func create_console_screen_buffer() (h syscall.Handle, err error) {
+ r0, _, e1 := syscall.Syscall6(proc_create_console_screen_buffer.Addr(),
+ 5, uintptr(generic_read|generic_write), 0, 0, console_textmode_buffer, 0, 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return syscall.Handle(r0), err
+}
+
+func get_console_screen_buffer_info(h syscall.Handle, info *console_screen_buffer_info) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_get_console_screen_buffer_info.Addr(),
+ 2, uintptr(h), uintptr(unsafe.Pointer(info)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func write_console_output(h syscall.Handle, chars []char_info, dst small_rect) (err error) {
+ tmp_coord = coord{dst.right - dst.left + 1, dst.bottom - dst.top + 1}
+ tmp_rect = dst
+ r0, _, e1 := syscall.Syscall6(proc_write_console_output.Addr(),
+ 5, uintptr(h), uintptr(unsafe.Pointer(&chars[0])), tmp_coord.uintptr(),
+ tmp_coord0.uintptr(), uintptr(unsafe.Pointer(&tmp_rect)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func write_console_output_character(h syscall.Handle, chars []wchar, pos coord) (err error) {
+ r0, _, e1 := syscall.Syscall6(proc_write_console_output_character.Addr(),
+ 5, uintptr(h), uintptr(unsafe.Pointer(&chars[0])), uintptr(len(chars)),
+ pos.uintptr(), uintptr(unsafe.Pointer(&tmp_arg)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func write_console_output_attribute(h syscall.Handle, attrs []word, pos coord) (err error) {
+ r0, _, e1 := syscall.Syscall6(proc_write_console_output_attribute.Addr(),
+ 5, uintptr(h), uintptr(unsafe.Pointer(&attrs[0])), uintptr(len(attrs)),
+ pos.uintptr(), uintptr(unsafe.Pointer(&tmp_arg)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func set_console_cursor_info(h syscall.Handle, info *console_cursor_info) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_set_console_cursor_info.Addr(),
+ 2, uintptr(h), uintptr(unsafe.Pointer(info)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func get_console_cursor_info(h syscall.Handle, info *console_cursor_info) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_get_console_cursor_info.Addr(),
+ 2, uintptr(h), uintptr(unsafe.Pointer(info)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func set_console_cursor_position(h syscall.Handle, pos coord) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_set_console_cursor_position.Addr(),
+ 2, uintptr(h), pos.uintptr(), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func read_console_input(h syscall.Handle, record *input_record) (err error) {
+ r0, _, e1 := syscall.Syscall6(proc_read_console_input.Addr(),
+ 4, uintptr(h), uintptr(unsafe.Pointer(record)), 1, uintptr(unsafe.Pointer(&tmp_arg)), 0, 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func get_console_mode(h syscall.Handle, mode *dword) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_get_console_mode.Addr(),
+ 2, uintptr(h), uintptr(unsafe.Pointer(mode)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func set_console_mode(h syscall.Handle, mode dword) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_set_console_mode.Addr(),
+ 2, uintptr(h), uintptr(mode), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func fill_console_output_character(h syscall.Handle, char wchar, n int) (err error) {
+ r0, _, e1 := syscall.Syscall6(proc_fill_console_output_character.Addr(),
+ 5, uintptr(h), uintptr(char), uintptr(n), tmp_coord.uintptr(),
+ uintptr(unsafe.Pointer(&tmp_arg)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func fill_console_output_attribute(h syscall.Handle, attr word, n int) (err error) {
+ r0, _, e1 := syscall.Syscall6(proc_fill_console_output_attribute.Addr(),
+ 5, uintptr(h), uintptr(attr), uintptr(n), tmp_coord.uintptr(),
+ uintptr(unsafe.Pointer(&tmp_arg)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func create_event() (out syscall.Handle, err error) {
+ r0, _, e1 := syscall.Syscall6(proc_create_event.Addr(),
+ 4, 0, 0, 0, 0, 0, 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return syscall.Handle(r0), err
+}
+
+func wait_for_multiple_objects(objects []syscall.Handle) (err error) {
+ r0, _, e1 := syscall.Syscall6(proc_wait_for_multiple_objects.Addr(),
+ 4, uintptr(len(objects)), uintptr(unsafe.Pointer(&objects[0])),
+ 0, 0xFFFFFFFF, 0, 0)
+ if uint32(r0) == 0xFFFFFFFF {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func set_event(ev syscall.Handle) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_set_event.Addr(),
+ 1, uintptr(ev), 0, 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+type diff_msg struct {
+ pos short
+ lines short
+ chars []char_info
+}
+
+type input_event struct {
+ event Event
+ err error
+}
+
+var (
+ orig_cursor_info console_cursor_info
+ orig_size coord
+ orig_mode dword
+ orig_screen syscall.Handle
+ back_buffer cellbuf
+ front_buffer cellbuf
+ term_size coord
+ input_mode = InputEsc
+ cursor_x = cursor_hidden
+ cursor_y = cursor_hidden
+ foreground = ColorDefault
+ background = ColorDefault
+ in syscall.Handle
+ out syscall.Handle
+ interrupt syscall.Handle
+ charbuf []char_info
+ diffbuf []diff_msg
+ beg_x = -1
+ beg_y = -1
+ beg_i = -1
+ input_comm = make(chan Event)
+ interrupt_comm = make(chan struct{})
+ cancel_comm = make(chan bool, 1)
+ cancel_done_comm = make(chan bool)
+ alt_mode_esc = false
+
+ // these ones just to prevent heap allocs at all costs
+ tmp_info console_screen_buffer_info
+ tmp_arg dword
+ tmp_coord0 = coord{0, 0}
+ tmp_coord = coord{0, 0}
+ tmp_rect = small_rect{0, 0, 0, 0}
+)
+
+func get_cursor_position(out syscall.Handle) coord {
+ err := get_console_screen_buffer_info(out, &tmp_info)
+ if err != nil {
+ panic(err)
+ }
+ return tmp_info.cursor_position
+}
+
+func get_term_size(out syscall.Handle) coord {
+ err := get_console_screen_buffer_info(out, &tmp_info)
+ if err != nil {
+ panic(err)
+ }
+ return tmp_info.size
+}
+
+func get_win_size(out syscall.Handle) coord {
+ err := get_console_screen_buffer_info(out, &tmp_info)
+ if err != nil {
+ panic(err)
+ }
+ return coord{
+ x: tmp_info.window.right - tmp_info.window.left + 1,
+ y: tmp_info.window.bottom - tmp_info.window.top + 1,
+ }
+}
+
+func update_size_maybe() {
+ size := get_term_size(out)
+ if size.x != term_size.x || size.y != term_size.y {
+ term_size = size
+ back_buffer.resize(int(size.x), int(size.y))
+ front_buffer.resize(int(size.x), int(size.y))
+ front_buffer.clear()
+ clear()
+
+ area := int(size.x) * int(size.y)
+ if cap(charbuf) < area {
+ charbuf = make([]char_info, 0, area)
+ }
+ }
+}
+
+var color_table_bg = []word{
+ 0, // default (black)
+ 0, // black
+ background_red,
+ background_green,
+ background_red | background_green, // yellow
+ background_blue,
+ background_red | background_blue, // magenta
+ background_green | background_blue, // cyan
+ background_red | background_blue | background_green, // white
+}
+
+var color_table_fg = []word{
+ foreground_red | foreground_blue | foreground_green, // default (white)
+ 0,
+ foreground_red,
+ foreground_green,
+ foreground_red | foreground_green, // yellow
+ foreground_blue,
+ foreground_red | foreground_blue, // magenta
+ foreground_green | foreground_blue, // cyan
+ foreground_red | foreground_blue | foreground_green, // white
+}
+
+const (
+ replacement_char = '\uFFFD'
+ max_rune = '\U0010FFFF'
+ surr1 = 0xd800
+ surr2 = 0xdc00
+ surr3 = 0xe000
+ surr_self = 0x10000
+)
+
+func append_diff_line(y int) int {
+ n := 0
+ for x := 0; x < front_buffer.width; {
+ cell_offset := y*front_buffer.width + x
+ back := &back_buffer.cells[cell_offset]
+ front := &front_buffer.cells[cell_offset]
+ attr, char := cell_to_char_info(*back)
+ charbuf = append(charbuf, char_info{attr: attr, char: char[0]})
+ *front = *back
+ n++
+ w := runewidth.RuneWidth(back.Ch)
+ if w == 0 || w == 2 && runewidth.IsAmbiguousWidth(back.Ch) {
+ w = 1
+ }
+ x += w
+ // If not CJK, fill trailing space with whitespace
+ if !is_cjk && w == 2 {
+ charbuf = append(charbuf, char_info{attr: attr, char: ' '})
+ }
+ }
+ return n
+}
+
+// compares 'back_buffer' with 'front_buffer' and prepares all changes in the form of
+// 'diff_msg's in the 'diff_buf'
+func prepare_diff_messages() {
+ // clear buffers
+ diffbuf = diffbuf[:0]
+ charbuf = charbuf[:0]
+
+ var diff diff_msg
+ gbeg := 0
+ for y := 0; y < front_buffer.height; y++ {
+ same := true
+ line_offset := y * front_buffer.width
+ for x := 0; x < front_buffer.width; x++ {
+ cell_offset := line_offset + x
+ back := &back_buffer.cells[cell_offset]
+ front := &front_buffer.cells[cell_offset]
+ if *back != *front {
+ same = false
+ break
+ }
+ }
+ if same && diff.lines > 0 {
+ diffbuf = append(diffbuf, diff)
+ diff = diff_msg{}
+ }
+ if !same {
+ beg := len(charbuf)
+ end := beg + append_diff_line(y)
+ if diff.lines == 0 {
+ diff.pos = short(y)
+ gbeg = beg
+ }
+ diff.lines++
+ diff.chars = charbuf[gbeg:end]
+ }
+ }
+ if diff.lines > 0 {
+ diffbuf = append(diffbuf, diff)
+ diff = diff_msg{}
+ }
+}
+
+func get_ct(table []word, idx int) word {
+ idx = idx & 0x0F
+ if idx >= len(table) {
+ idx = len(table) - 1
+ }
+ return table[idx]
+}
+
+func cell_to_char_info(c Cell) (attr word, wc [2]wchar) {
+ attr = get_ct(color_table_fg, int(c.Fg)) | get_ct(color_table_bg, int(c.Bg))
+ if c.Fg&AttrReverse|c.Bg&AttrReverse != 0 {
+ attr = (attr&0xF0)>>4 | (attr&0x0F)<<4
+ }
+ if c.Fg&AttrBold != 0 {
+ attr |= foreground_intensity
+ }
+ if c.Bg&AttrBold != 0 {
+ attr |= background_intensity
+ }
+
+ r0, r1 := utf16.EncodeRune(c.Ch)
+ if r0 == 0xFFFD {
+ wc[0] = wchar(c.Ch)
+ wc[1] = ' '
+ } else {
+ wc[0] = wchar(r0)
+ wc[1] = wchar(r1)
+ }
+ return
+}
+
+func move_cursor(x, y int) {
+ err := set_console_cursor_position(out, coord{short(x), short(y)})
+ if err != nil {
+ panic(err)
+ }
+}
+
+func show_cursor(visible bool) {
+ var v int32
+ if visible {
+ v = 1
+ }
+
+ var info console_cursor_info
+ info.size = 100
+ info.visible = v
+ err := set_console_cursor_info(out, &info)
+ if err != nil {
+ panic(err)
+ }
+}
+
+func clear() {
+ var err error
+ attr, char := cell_to_char_info(Cell{
+ ' ',
+ foreground,
+ background,
+ })
+
+ area := int(term_size.x) * int(term_size.y)
+ err = fill_console_output_attribute(out, attr, area)
+ if err != nil {
+ panic(err)
+ }
+ err = fill_console_output_character(out, char[0], area)
+ if err != nil {
+ panic(err)
+ }
+ if !is_cursor_hidden(cursor_x, cursor_y) {
+ move_cursor(cursor_x, cursor_y)
+ }
+}
+
+func key_event_record_to_event(r *key_event_record) (Event, bool) {
+ if r.key_down == 0 {
+ return Event{}, false
+ }
+
+ e := Event{Type: EventKey}
+ if input_mode&InputAlt != 0 {
+ if alt_mode_esc {
+ e.Mod = ModAlt
+ alt_mode_esc = false
+ }
+ if r.control_key_state&(left_alt_pressed|right_alt_pressed) != 0 {
+ e.Mod = ModAlt
+ }
+ }
+
+ ctrlpressed := r.control_key_state&(left_ctrl_pressed|right_ctrl_pressed) != 0
+
+ if r.virtual_key_code >= vk_f1 && r.virtual_key_code <= vk_f12 {
+ switch r.virtual_key_code {
+ case vk_f1:
+ e.Key = KeyF1
+ case vk_f2:
+ e.Key = KeyF2
+ case vk_f3:
+ e.Key = KeyF3
+ case vk_f4:
+ e.Key = KeyF4
+ case vk_f5:
+ e.Key = KeyF5
+ case vk_f6:
+ e.Key = KeyF6
+ case vk_f7:
+ e.Key = KeyF7
+ case vk_f8:
+ e.Key = KeyF8
+ case vk_f9:
+ e.Key = KeyF9
+ case vk_f10:
+ e.Key = KeyF10
+ case vk_f11:
+ e.Key = KeyF11
+ case vk_f12:
+ e.Key = KeyF12
+ default:
+ panic("unreachable")
+ }
+
+ return e, true
+ }
+
+ if r.virtual_key_code <= vk_delete {
+ switch r.virtual_key_code {
+ case vk_insert:
+ e.Key = KeyInsert
+ case vk_delete:
+ e.Key = KeyDelete
+ case vk_home:
+ e.Key = KeyHome
+ case vk_end:
+ e.Key = KeyEnd
+ case vk_pgup:
+ e.Key = KeyPgup
+ case vk_pgdn:
+ e.Key = KeyPgdn
+ case vk_arrow_up:
+ e.Key = KeyArrowUp
+ case vk_arrow_down:
+ e.Key = KeyArrowDown
+ case vk_arrow_left:
+ e.Key = KeyArrowLeft
+ case vk_arrow_right:
+ e.Key = KeyArrowRight
+ case vk_backspace:
+ if ctrlpressed {
+ e.Key = KeyBackspace2
+ } else {
+ e.Key = KeyBackspace
+ }
+ case vk_tab:
+ e.Key = KeyTab
+ case vk_enter:
+ e.Key = KeyEnter
+ case vk_esc:
+ switch {
+ case input_mode&InputEsc != 0:
+ e.Key = KeyEsc
+ case input_mode&InputAlt != 0:
+ alt_mode_esc = true
+ return Event{}, false
+ }
+ case vk_space:
+ if ctrlpressed {
+ // manual return here, because KeyCtrlSpace is zero
+ e.Key = KeyCtrlSpace
+ return e, true
+ } else {
+ e.Key = KeySpace
+ }
+ }
+
+ if e.Key != 0 {
+ return e, true
+ }
+ }
+
+ if ctrlpressed {
+ if Key(r.unicode_char) >= KeyCtrlA && Key(r.unicode_char) <= KeyCtrlRsqBracket {
+ e.Key = Key(r.unicode_char)
+ if input_mode&InputAlt != 0 && e.Key == KeyEsc {
+ alt_mode_esc = true
+ return Event{}, false
+ }
+ return e, true
+ }
+ switch r.virtual_key_code {
+ case 192, 50:
+ // manual return here, because KeyCtrl2 is zero
+ e.Key = KeyCtrl2
+ return e, true
+ case 51:
+ if input_mode&InputAlt != 0 {
+ alt_mode_esc = true
+ return Event{}, false
+ }
+ e.Key = KeyCtrl3
+ case 52:
+ e.Key = KeyCtrl4
+ case 53:
+ e.Key = KeyCtrl5
+ case 54:
+ e.Key = KeyCtrl6
+ case 189, 191, 55:
+ e.Key = KeyCtrl7
+ case 8, 56:
+ e.Key = KeyCtrl8
+ }
+
+ if e.Key != 0 {
+ return e, true
+ }
+ }
+
+ if r.unicode_char != 0 {
+ e.Ch = rune(r.unicode_char)
+ return e, true
+ }
+
+ return Event{}, false
+}
+
+func input_event_producer() {
+ var r input_record
+ var err error
+ var last_button Key
+ var last_button_pressed Key
+ var last_state = dword(0)
+ var last_x, last_y = -1, -1
+ handles := []syscall.Handle{in, interrupt}
+ for {
+ err = wait_for_multiple_objects(handles)
+ if err != nil {
+ input_comm <- Event{Type: EventError, Err: err}
+ }
+
+ select {
+ case <-cancel_comm:
+ cancel_done_comm <- true
+ return
+ default:
+ }
+
+ err = read_console_input(in, &r)
+ if err != nil {
+ input_comm <- Event{Type: EventError, Err: err}
+ }
+
+ switch r.event_type {
+ case key_event:
+ kr := (*key_event_record)(unsafe.Pointer(&r.event))
+ ev, ok := key_event_record_to_event(kr)
+ if ok {
+ for i := 0; i < int(kr.repeat_count); i++ {
+ input_comm <- ev
+ }
+ }
+ case window_buffer_size_event:
+ sr := *(*window_buffer_size_record)(unsafe.Pointer(&r.event))
+ input_comm <- Event{
+ Type: EventResize,
+ Width: int(sr.size.x),
+ Height: int(sr.size.y),
+ }
+ case mouse_event:
+ mr := *(*mouse_event_record)(unsafe.Pointer(&r.event))
+ ev := Event{Type: EventMouse}
+ switch mr.event_flags {
+ case 0, 2:
+ // single or double click
+ cur_state := mr.button_state
+ switch {
+ case last_state&mouse_lmb == 0 && cur_state&mouse_lmb != 0:
+ last_button = MouseLeft
+ last_button_pressed = last_button
+ case last_state&mouse_rmb == 0 && cur_state&mouse_rmb != 0:
+ last_button = MouseRight
+ last_button_pressed = last_button
+ case last_state&mouse_mmb == 0 && cur_state&mouse_mmb != 0:
+ last_button = MouseMiddle
+ last_button_pressed = last_button
+ case last_state&mouse_lmb != 0 && cur_state&mouse_lmb == 0:
+ last_button = MouseRelease
+ case last_state&mouse_rmb != 0 && cur_state&mouse_rmb == 0:
+ last_button = MouseRelease
+ case last_state&mouse_mmb != 0 && cur_state&mouse_mmb == 0:
+ last_button = MouseRelease
+ default:
+ last_state = cur_state
+ continue
+ }
+ last_state = cur_state
+ ev.Key = last_button
+ last_x, last_y = int(mr.mouse_pos.x), int(mr.mouse_pos.y)
+ ev.MouseX = last_x
+ ev.MouseY = last_y
+ case 1:
+ // mouse motion
+ x, y := int(mr.mouse_pos.x), int(mr.mouse_pos.y)
+ if last_state != 0 && (last_x != x || last_y != y) {
+ ev.Key = last_button_pressed
+ ev.Mod = ModMotion
+ ev.MouseX = x
+ ev.MouseY = y
+ last_x, last_y = x, y
+ } else {
+ ev.Type = EventNone
+ }
+ case 4:
+ // mouse wheel
+ n := int16(mr.button_state >> 16)
+ if n > 0 {
+ ev.Key = MouseWheelUp
+ } else {
+ ev.Key = MouseWheelDown
+ }
+ last_x, last_y = int(mr.mouse_pos.x), int(mr.mouse_pos.y)
+ ev.MouseX = last_x
+ ev.MouseY = last_y
+ default:
+ ev.Type = EventNone
+ }
+ if ev.Type != EventNone {
+ input_comm <- ev
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/nsf/termbox-go/terminfo.go b/vendor/github.com/nsf/termbox-go/terminfo.go
new file mode 100644
index 000000000..35dbd70b8
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/terminfo.go
@@ -0,0 +1,221 @@
+// +build !windows
+// This file contains a simple and incomplete implementation of the terminfo
+// database. Information was taken from the ncurses manpages term(5) and
+// terminfo(5). Currently, only the string capabilities for special keys and for
+// functions without parameters are actually used. Colors are still done with
+// ANSI escape sequences. Other special features that are not (yet?) supported
+// are reading from ~/.terminfo, the TERMINFO_DIRS variable, Berkeley database
+// format and extended capabilities.
+
+package termbox
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+)
+
+const (
+ ti_magic = 0432
+ ti_header_length = 12
+ ti_mouse_enter = "\x1b[?1000h\x1b[?1002h\x1b[?1015h\x1b[?1006h"
+ ti_mouse_leave = "\x1b[?1006l\x1b[?1015l\x1b[?1002l\x1b[?1000l"
+)
+
+func load_terminfo() ([]byte, error) {
+ var data []byte
+ var err error
+
+ term := os.Getenv("TERM")
+ if term == "" {
+ return nil, fmt.Errorf("termbox: TERM not set")
+ }
+
+ // The following behaviour follows the one described in terminfo(5) as
+ // distributed by ncurses.
+
+ terminfo := os.Getenv("TERMINFO")
+ if terminfo != "" {
+ // if TERMINFO is set, no other directory should be searched
+ return ti_try_path(terminfo)
+ }
+
+ // next, consider ~/.terminfo
+ home := os.Getenv("HOME")
+ if home != "" {
+ data, err = ti_try_path(home + "/.terminfo")
+ if err == nil {
+ return data, nil
+ }
+ }
+
+ // next, TERMINFO_DIRS
+ dirs := os.Getenv("TERMINFO_DIRS")
+ if dirs != "" {
+ for _, dir := range strings.Split(dirs, ":") {
+ if dir == "" {
+ // "" -> "/usr/share/terminfo"
+ dir = "/usr/share/terminfo"
+ }
+ data, err = ti_try_path(dir)
+ if err == nil {
+ return data, nil
+ }
+ }
+ }
+
+ // fall back to /usr/share/terminfo
+ return ti_try_path("/usr/share/terminfo")
+}
+
+func ti_try_path(path string) (data []byte, err error) {
+ // load_terminfo already made sure it is set
+ term := os.Getenv("TERM")
+
+ // first try, the typical *nix path
+ terminfo := path + "/" + term[0:1] + "/" + term
+ data, err = ioutil.ReadFile(terminfo)
+ if err == nil {
+ return
+ }
+
+ // fallback to darwin specific dirs structure
+ terminfo = path + "/" + hex.EncodeToString([]byte(term[:1])) + "/" + term
+ data, err = ioutil.ReadFile(terminfo)
+ return
+}
+
+func setup_term_builtin() error {
+ name := os.Getenv("TERM")
+ if name == "" {
+ return errors.New("termbox: TERM environment variable not set")
+ }
+
+ for _, t := range terms {
+ if t.name == name {
+ keys = t.keys
+ funcs = t.funcs
+ return nil
+ }
+ }
+
+ compat_table := []struct {
+ partial string
+ keys []string
+ funcs []string
+ }{
+ {"xterm", xterm_keys, xterm_funcs},
+ {"rxvt", rxvt_unicode_keys, rxvt_unicode_funcs},
+ {"linux", linux_keys, linux_funcs},
+ {"Eterm", eterm_keys, eterm_funcs},
+ {"screen", screen_keys, screen_funcs},
+ // let's assume that 'cygwin' is xterm compatible
+ {"cygwin", xterm_keys, xterm_funcs},
+ {"st", xterm_keys, xterm_funcs},
+ }
+
+ // try compatibility variants
+ for _, it := range compat_table {
+ if strings.Contains(name, it.partial) {
+ keys = it.keys
+ funcs = it.funcs
+ return nil
+ }
+ }
+
+ return errors.New("termbox: unsupported terminal")
+}
+
+func setup_term() (err error) {
+ var data []byte
+ var header [6]int16
+ var str_offset, table_offset int16
+
+ data, err = load_terminfo()
+ if err != nil {
+ return setup_term_builtin()
+ }
+
+ rd := bytes.NewReader(data)
+ // 0: magic number, 1: size of names section, 2: size of boolean section, 3:
+ // size of numbers section (in integers), 4: size of the strings section (in
+ // integers), 5: size of the string table
+
+ err = binary.Read(rd, binary.LittleEndian, header[:])
+ if err != nil {
+ return
+ }
+
+ if (header[1]+header[2])%2 != 0 {
+ // old quirk to align everything on word boundaries
+ header[2] += 1
+ }
+ str_offset = ti_header_length + header[1] + header[2] + 2*header[3]
+ table_offset = str_offset + 2*header[4]
+
+ keys = make([]string, 0xFFFF-key_min)
+ for i, _ := range keys {
+ keys[i], err = ti_read_string(rd, str_offset+2*ti_keys[i], table_offset)
+ if err != nil {
+ return
+ }
+ }
+ funcs = make([]string, t_max_funcs)
+ // the last two entries are reserved for mouse. because the table offset is
+ // not there, the two entries have to fill in manually
+ for i, _ := range funcs[:len(funcs)-2] {
+ funcs[i], err = ti_read_string(rd, str_offset+2*ti_funcs[i], table_offset)
+ if err != nil {
+ return
+ }
+ }
+ funcs[t_max_funcs-2] = ti_mouse_enter
+ funcs[t_max_funcs-1] = ti_mouse_leave
+ return nil
+}
+
+func ti_read_string(rd *bytes.Reader, str_off, table int16) (string, error) {
+ var off int16
+
+ _, err := rd.Seek(int64(str_off), 0)
+ if err != nil {
+ return "", err
+ }
+ err = binary.Read(rd, binary.LittleEndian, &off)
+ if err != nil {
+ return "", err
+ }
+ _, err = rd.Seek(int64(table+off), 0)
+ if err != nil {
+ return "", err
+ }
+ var bs []byte
+ for {
+ b, err := rd.ReadByte()
+ if err != nil {
+ return "", err
+ }
+ if b == byte(0x00) {
+ break
+ }
+ bs = append(bs, b)
+ }
+ return string(bs), nil
+}
+
+// "Maps" the function constants from termbox.go to the number of the respective
+// string capability in the terminfo file. Taken from (ncurses) term.h.
+var ti_funcs = []int16{
+ 28, 40, 16, 13, 5, 39, 36, 27, 26, 34, 89, 88,
+}
+
+// Same as above for the special keys.
+var ti_keys = []int16{
+ 66, 68 /* apparently not a typo; 67 is F10 for whatever reason */, 69, 70,
+ 71, 72, 73, 74, 75, 67, 216, 217, 77, 59, 76, 164, 82, 81, 87, 61, 79, 83,
+}
diff --git a/vendor/github.com/nsf/termbox-go/terminfo_builtin.go b/vendor/github.com/nsf/termbox-go/terminfo_builtin.go
new file mode 100644
index 000000000..a94866067
--- /dev/null
+++ b/vendor/github.com/nsf/termbox-go/terminfo_builtin.go
@@ -0,0 +1,64 @@
+// +build !windows
+
+package termbox
+
+// Eterm
+var eterm_keys = []string{
+ "\x1b[11~", "\x1b[12~", "\x1b[13~", "\x1b[14~", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[7~", "\x1b[8~", "\x1b[5~", "\x1b[6~", "\x1b[A", "\x1b[B", "\x1b[D", "\x1b[C",
+}
+var eterm_funcs = []string{
+ "\x1b7\x1b[?47h", "\x1b[2J\x1b[?47l\x1b8", "\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[2J", "\x1b[m\x0f", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "", "", "", "",
+}
+
+// screen
+var screen_keys = []string{
+ "\x1bOP", "\x1bOQ", "\x1bOR", "\x1bOS", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[1~", "\x1b[4~", "\x1b[5~", "\x1b[6~", "\x1bOA", "\x1bOB", "\x1bOD", "\x1bOC",
+}
+var screen_funcs = []string{
+ "\x1b[?1049h", "\x1b[?1049l", "\x1b[34h\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[J", "\x1b[m\x0f", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "\x1b[?1h\x1b=", "\x1b[?1l\x1b>", ti_mouse_enter, ti_mouse_leave,
+}
+
+// xterm
+var xterm_keys = []string{
+ "\x1bOP", "\x1bOQ", "\x1bOR", "\x1bOS", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1bOH", "\x1bOF", "\x1b[5~", "\x1b[6~", "\x1bOA", "\x1bOB", "\x1bOD", "\x1bOC",
+}
+var xterm_funcs = []string{
+ "\x1b[?1049h", "\x1b[?1049l", "\x1b[?12l\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[2J", "\x1b(B\x1b[m", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "\x1b[?1h\x1b=", "\x1b[?1l\x1b>", ti_mouse_enter, ti_mouse_leave,
+}
+
+// rxvt-unicode
+var rxvt_unicode_keys = []string{
+ "\x1b[11~", "\x1b[12~", "\x1b[13~", "\x1b[14~", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[7~", "\x1b[8~", "\x1b[5~", "\x1b[6~", "\x1b[A", "\x1b[B", "\x1b[D", "\x1b[C",
+}
+var rxvt_unicode_funcs = []string{
+ "\x1b[?1049h", "\x1b[r\x1b[?1049l", "\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[2J", "\x1b[m\x1b(B", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "\x1b=", "\x1b>", ti_mouse_enter, ti_mouse_leave,
+}
+
+// linux
+var linux_keys = []string{
+ "\x1b[[A", "\x1b[[B", "\x1b[[C", "\x1b[[D", "\x1b[[E", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[1~", "\x1b[4~", "\x1b[5~", "\x1b[6~", "\x1b[A", "\x1b[B", "\x1b[D", "\x1b[C",
+}
+var linux_funcs = []string{
+ "", "", "\x1b[?25h\x1b[?0c", "\x1b[?25l\x1b[?1c", "\x1b[H\x1b[J", "\x1b[0;10m", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "", "", "", "",
+}
+
+// rxvt-256color
+var rxvt_256color_keys = []string{
+ "\x1b[11~", "\x1b[12~", "\x1b[13~", "\x1b[14~", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[7~", "\x1b[8~", "\x1b[5~", "\x1b[6~", "\x1b[A", "\x1b[B", "\x1b[D", "\x1b[C",
+}
+var rxvt_256color_funcs = []string{
+ "\x1b7\x1b[?47h", "\x1b[2J\x1b[?47l\x1b8", "\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[2J", "\x1b[m\x0f", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "\x1b=", "\x1b>", ti_mouse_enter, ti_mouse_leave,
+}
+
+var terms = []struct {
+ name string
+ keys []string
+ funcs []string
+}{
+ {"Eterm", eterm_keys, eterm_funcs},
+ {"screen", screen_keys, screen_funcs},
+ {"xterm", xterm_keys, xterm_funcs},
+ {"rxvt-unicode", rxvt_unicode_keys, rxvt_unicode_funcs},
+ {"linux", linux_keys, linux_funcs},
+ {"rxvt-256color", rxvt_256color_keys, rxvt_256color_funcs},
+}
diff --git a/vendor/github.com/pborman/uuid/.travis.yml b/vendor/github.com/pborman/uuid/.travis.yml
new file mode 100644
index 000000000..d8156a60b
--- /dev/null
+++ b/vendor/github.com/pborman/uuid/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+
+go:
+ - 1.4.3
+ - 1.5.3
+ - tip
+
+script:
+ - go test -v ./...
diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTING.md b/vendor/github.com/pborman/uuid/CONTRIBUTING.md
new file mode 100644
index 000000000..04fdf09f1
--- /dev/null
+++ b/vendor/github.com/pborman/uuid/CONTRIBUTING.md
@@ -0,0 +1,10 @@
+# How to contribute
+
+We definitely welcome patches and contribution to this project!
+
+### Legal requirements
+
+In order to protect both you and ourselves, you will need to sign the
+[Contributor License Agreement](https://cla.developers.google.com/clas).
+
+You may have already signed it for other Google projects.
diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTORS b/vendor/github.com/pborman/uuid/CONTRIBUTORS
new file mode 100644
index 000000000..b382a04ed
--- /dev/null
+++ b/vendor/github.com/pborman/uuid/CONTRIBUTORS
@@ -0,0 +1 @@
+Paul Borman <borman@google.com>
diff --git a/vendor/github.com/pborman/uuid/LICENSE b/vendor/github.com/pborman/uuid/LICENSE
new file mode 100644
index 000000000..5dc68268d
--- /dev/null
+++ b/vendor/github.com/pborman/uuid/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009,2014 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pborman/uuid/README.md b/vendor/github.com/pborman/uuid/README.md
new file mode 100644
index 000000000..f023d47ca
--- /dev/null
+++ b/vendor/github.com/pborman/uuid/README.md
@@ -0,0 +1,13 @@
+This project was automatically exported from code.google.com/p/go-uuid
+
+# uuid ![build status](https://travis-ci.org/pborman/uuid.svg?branch=master)
+The uuid package generates and inspects UUIDs based on [RFC 412](http://tools.ietf.org/html/rfc4122) and DCE 1.1: Authentication and Security Services.
+
+###### Install
+`go get github.com/pborman/uuid`
+
+###### Documentation
+[![GoDoc](https://godoc.org/github.com/pborman/uuid?status.svg)](http://godoc.org/github.com/pborman/uuid)
+
+Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here:
+http://godoc.org/github.com/pborman/uuid
diff --git a/vendor/github.com/pborman/uuid/dce.go b/vendor/github.com/pborman/uuid/dce.go
new file mode 100644
index 000000000..50a0f2d09
--- /dev/null
+++ b/vendor/github.com/pborman/uuid/dce.go
@@ -0,0 +1,84 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "fmt"
+ "os"
+)
+
+// A Domain represents a Version 2 domain
+type Domain byte
+
+// Domain constants for DCE Security (Version 2) UUIDs.
+const (
+ Person = Domain(0)
+ Group = Domain(1)
+ Org = Domain(2)
+)
+
+// NewDCESecurity returns a DCE Security (Version 2) UUID.
+//
+// The domain should be one of Person, Group or Org.
+// On a POSIX system the id should be the users UID for the Person
+// domain and the users GID for the Group. The meaning of id for
+// the domain Org or on non-POSIX systems is site defined.
+//
+// For a given domain/id pair the same token may be returned for up to
+// 7 minutes and 10 seconds.
+func NewDCESecurity(domain Domain, id uint32) UUID {
+ uuid := NewUUID()
+ if uuid != nil {
+ uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
+ uuid[9] = byte(domain)
+ binary.BigEndian.PutUint32(uuid[0:], id)
+ }
+ return uuid
+}
+
+// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
+// domain with the id returned by os.Getuid.
+//
+// NewDCEPerson(Person, uint32(os.Getuid()))
+func NewDCEPerson() UUID {
+ return NewDCESecurity(Person, uint32(os.Getuid()))
+}
+
+// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
+// domain with the id returned by os.Getgid.
+//
+// NewDCEGroup(Group, uint32(os.Getgid()))
+func NewDCEGroup() UUID {
+ return NewDCESecurity(Group, uint32(os.Getgid()))
+}
+
+// Domain returns the domain for a Version 2 UUID or false.
+func (uuid UUID) Domain() (Domain, bool) {
+ if v, _ := uuid.Version(); v != 2 {
+ return 0, false
+ }
+ return Domain(uuid[9]), true
+}
+
+// Id returns the id for a Version 2 UUID or false.
+func (uuid UUID) Id() (uint32, bool) {
+ if v, _ := uuid.Version(); v != 2 {
+ return 0, false
+ }
+ return binary.BigEndian.Uint32(uuid[0:4]), true
+}
+
+func (d Domain) String() string {
+ switch d {
+ case Person:
+ return "Person"
+ case Group:
+ return "Group"
+ case Org:
+ return "Org"
+ }
+ return fmt.Sprintf("Domain%d", int(d))
+}
diff --git a/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/pborman/uuid/doc.go
new file mode 100644
index 000000000..d8bd013e6
--- /dev/null
+++ b/vendor/github.com/pborman/uuid/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The uuid package generates and inspects UUIDs.
+//
+// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services.
+package uuid
diff --git a/vendor/github.com/pborman/uuid/hash.go b/vendor/github.com/pborman/uuid/hash.go
new file mode 100644
index 000000000..a0420c1ef
--- /dev/null
+++ b/vendor/github.com/pborman/uuid/hash.go
@@ -0,0 +1,53 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "crypto/md5"
+ "crypto/sha1"
+ "hash"
+)
+
+// Well known Name Space IDs and UUIDs
+var (
+ NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+ NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
+ NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
+ NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
+ NIL = Parse("00000000-0000-0000-0000-000000000000")
+)
+
+// NewHash returns a new UUID derived from the hash of space concatenated with
+// data generated by h. The hash should be at least 16 byte in length. The
+// first 16 bytes of the hash are used to form the UUID. The version of the
+// UUID will be the lower 4 bits of version. NewHash is used to implement
+// NewMD5 and NewSHA1.
+func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
+ h.Reset()
+ h.Write(space)
+ h.Write([]byte(data))
+ s := h.Sum(nil)
+ uuid := make([]byte, 16)
+ copy(uuid, s)
+ uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
+ return uuid
+}
+
+// NewMD5 returns a new MD5 (Version 3) UUID based on the
+// supplied name space and data.
+//
+// NewHash(md5.New(), space, data, 3)
+func NewMD5(space UUID, data []byte) UUID {
+ return NewHash(md5.New(), space, data, 3)
+}
+
+// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
+// supplied name space and data.
+//
+// NewHash(sha1.New(), space, data, 5)
+func NewSHA1(space UUID, data []byte) UUID {
+ return NewHash(sha1.New(), space, data, 5)
+}
diff --git a/vendor/github.com/pborman/uuid/json.go b/vendor/github.com/pborman/uuid/json.go
new file mode 100644
index 000000000..9dda1dfba
--- /dev/null
+++ b/vendor/github.com/pborman/uuid/json.go
@@ -0,0 +1,34 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "errors"
+
+func (u UUID) MarshalJSON() ([]byte, error) {
+ if len(u) != 16 {
+ return []byte(`""`), nil
+ }
+ var js [38]byte
+ js[0] = '"'
+ encodeHex(js[1:], u)
+ js[37] = '"'
+ return js[:], nil
+}
+
+func (u *UUID) UnmarshalJSON(data []byte) error {
+ if string(data) == `""` {
+ return nil
+ }
+ if data[0] != '"' {
+ return errors.New("invalid UUID format")
+ }
+ data = data[1 : len(data)-1]
+ uu := Parse(string(data))
+ if uu == nil {
+ return errors.New("invalid UUID format")
+ }
+ *u = uu
+ return nil
+}
diff --git a/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/pborman/uuid/node.go
new file mode 100644
index 000000000..42d60da8f
--- /dev/null
+++ b/vendor/github.com/pborman/uuid/node.go
@@ -0,0 +1,117 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "net"
+ "sync"
+)
+
+var (
+ nodeMu sync.Mutex
+ interfaces []net.Interface // cached list of interfaces
+ ifname string // name of interface being used
+ nodeID []byte // hardware for version 1 UUIDs
+)
+
+// NodeInterface returns the name of the interface from which the NodeID was
+// derived. The interface "user" is returned if the NodeID was set by
+// SetNodeID.
+func NodeInterface() string {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ return ifname
+}
+
+// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
+// If name is "" then the first usable interface found will be used or a random
+// Node ID will be generated. If a named interface cannot be found then false
+// is returned.
+//
+// SetNodeInterface never fails when name is "".
+func SetNodeInterface(name string) bool {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ return setNodeInterface(name)
+}
+
+func setNodeInterface(name string) bool {
+ if interfaces == nil {
+ var err error
+ interfaces, err = net.Interfaces()
+ if err != nil && name != "" {
+ return false
+ }
+ }
+
+ for _, ifs := range interfaces {
+ if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
+ if setNodeID(ifs.HardwareAddr) {
+ ifname = ifs.Name
+ return true
+ }
+ }
+ }
+
+ // We found no interfaces with a valid hardware address. If name
+ // does not specify a specific interface generate a random Node ID
+ // (section 4.1.6)
+ if name == "" {
+ if nodeID == nil {
+ nodeID = make([]byte, 6)
+ }
+ randomBits(nodeID)
+ return true
+ }
+ return false
+}
+
+// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
+// if not already set.
+func NodeID() []byte {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ if nodeID == nil {
+ setNodeInterface("")
+ }
+ nid := make([]byte, 6)
+ copy(nid, nodeID)
+ return nid
+}
+
+// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
+// of id are used. If id is less than 6 bytes then false is returned and the
+// Node ID is not set.
+func SetNodeID(id []byte) bool {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ if setNodeID(id) {
+ ifname = "user"
+ return true
+ }
+ return false
+}
+
+func setNodeID(id []byte) bool {
+ if len(id) < 6 {
+ return false
+ }
+ if nodeID == nil {
+ nodeID = make([]byte, 6)
+ }
+ copy(nodeID, id)
+ return true
+}
+
+// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
+// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) NodeID() []byte {
+ if len(uuid) != 16 {
+ return nil
+ }
+ node := make([]byte, 6)
+ copy(node, uuid[10:])
+ return node
+}
diff --git a/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/pborman/uuid/sql.go
new file mode 100644
index 000000000..d015bfd13
--- /dev/null
+++ b/vendor/github.com/pborman/uuid/sql.go
@@ -0,0 +1,66 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "database/sql/driver"
+ "errors"
+ "fmt"
+)
+
+// Scan implements sql.Scanner so UUIDs can be read from databases transparently
+// Currently, database types that map to string and []byte are supported. Please
+// consult database-specific driver documentation for matching types.
+func (uuid *UUID) Scan(src interface{}) error {
+ switch src.(type) {
+ case string:
+ // if an empty UUID comes from a table, we return a null UUID
+ if src.(string) == "" {
+ return nil
+ }
+
+ // see uuid.Parse for required string format
+ parsed := Parse(src.(string))
+
+ if parsed == nil {
+ return errors.New("Scan: invalid UUID format")
+ }
+
+ *uuid = parsed
+ case []byte:
+ b := src.([]byte)
+
+ // if an empty UUID comes from a table, we return a null UUID
+ if len(b) == 0 {
+ return nil
+ }
+
+ // assumes a simple slice of bytes if 16 bytes
+ // otherwise attempts to parse
+ if len(b) == 16 {
+ *uuid = UUID(b)
+ } else {
+ u := Parse(string(b))
+
+ if u == nil {
+ return errors.New("Scan: invalid UUID format")
+ }
+
+ *uuid = u
+ }
+
+ default:
+ return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
+ }
+
+ return nil
+}
+
+// Value implements sql.Valuer so that UUIDs can be written to databases
+// transparently. Currently, UUIDs map to strings. Please consult
+// database-specific driver documentation for matching types.
+func (uuid UUID) Value() (driver.Value, error) {
+ return uuid.String(), nil
+}
diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go
new file mode 100644
index 000000000..eedf24219
--- /dev/null
+++ b/vendor/github.com/pborman/uuid/time.go
@@ -0,0 +1,132 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "sync"
+ "time"
+)
+
+// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
+// 1582.
+type Time int64
+
+const (
+ lillian = 2299160 // Julian day of 15 Oct 1582
+ unix = 2440587 // Julian day of 1 Jan 1970
+ epoch = unix - lillian // Days between epochs
+ g1582 = epoch * 86400 // seconds between epochs
+ g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
+)
+
+var (
+ timeMu sync.Mutex
+ lasttime uint64 // last time we returned
+ clock_seq uint16 // clock sequence for this run
+
+ timeNow = time.Now // for testing
+)
+
+// UnixTime converts t the number of seconds and nanoseconds using the Unix
+// epoch of 1 Jan 1970.
+func (t Time) UnixTime() (sec, nsec int64) {
+ sec = int64(t - g1582ns100)
+ nsec = (sec % 10000000) * 100
+ sec /= 10000000
+ return sec, nsec
+}
+
+// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
+// clock sequence as well as adjusting the clock sequence as needed. An error
+// is returned if the current time cannot be determined.
+func GetTime() (Time, uint16, error) {
+ defer timeMu.Unlock()
+ timeMu.Lock()
+ return getTime()
+}
+
+func getTime() (Time, uint16, error) {
+ t := timeNow()
+
+ // If we don't have a clock sequence already, set one.
+ if clock_seq == 0 {
+ setClockSequence(-1)
+ }
+ now := uint64(t.UnixNano()/100) + g1582ns100
+
+ // If time has gone backwards with this clock sequence then we
+ // increment the clock sequence
+ if now <= lasttime {
+ clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000
+ }
+ lasttime = now
+ return Time(now), clock_seq, nil
+}
+
+// ClockSequence returns the current clock sequence, generating one if not
+// already set. The clock sequence is only used for Version 1 UUIDs.
+//
+// The uuid package does not use global static storage for the clock sequence or
+// the last time a UUID was generated. Unless SetClockSequence a new random
+// clock sequence is generated the first time a clock sequence is requested by
+// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated
+// for
+func ClockSequence() int {
+ defer timeMu.Unlock()
+ timeMu.Lock()
+ return clockSequence()
+}
+
+func clockSequence() int {
+ if clock_seq == 0 {
+ setClockSequence(-1)
+ }
+ return int(clock_seq & 0x3fff)
+}
+
+// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
+// -1 causes a new sequence to be generated.
+func SetClockSequence(seq int) {
+ defer timeMu.Unlock()
+ timeMu.Lock()
+ setClockSequence(seq)
+}
+
+func setClockSequence(seq int) {
+ if seq == -1 {
+ var b [2]byte
+ randomBits(b[:]) // clock sequence
+ seq = int(b[0])<<8 | int(b[1])
+ }
+ old_seq := clock_seq
+ clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant
+ if old_seq != clock_seq {
+ lasttime = 0
+ }
+}
+
+// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
+// uuid. It returns false if uuid is not valid. The time is only well defined
+// for version 1 and 2 UUIDs.
+func (uuid UUID) Time() (Time, bool) {
+ if len(uuid) != 16 {
+ return 0, false
+ }
+ time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+ time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+ time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+ return Time(time), true
+}
+
+// ClockSequence returns the clock sequence encoded in uuid. It returns false
+// if uuid is not valid. The clock sequence is only well defined for version 1
+// and 2 UUIDs.
+func (uuid UUID) ClockSequence() (int, bool) {
+ if len(uuid) != 16 {
+ return 0, false
+ }
+ return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true
+}
diff --git a/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/pborman/uuid/util.go
new file mode 100644
index 000000000..fc8e052c7
--- /dev/null
+++ b/vendor/github.com/pborman/uuid/util.go
@@ -0,0 +1,43 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "io"
+)
+
+// randomBits completely fills slice b with random data.
+func randomBits(b []byte) {
+ if _, err := io.ReadFull(rander, b); err != nil {
+ panic(err.Error()) // rand should never fail
+ }
+}
+
+// xvalues returns the value of a byte as a hexadecimal digit or 255.
+var xvalues = [256]byte{
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+}
+
+// xtob converts the the first two hex bytes of x into a byte.
+func xtob(x string) (byte, bool) {
+ b1 := xvalues[x[0]]
+ b2 := xvalues[x[1]]
+ return (b1 << 4) | b2, b1 != 255 && b2 != 255
+}
diff --git a/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/pborman/uuid/uuid.go
new file mode 100644
index 000000000..7c643cf0a
--- /dev/null
+++ b/vendor/github.com/pborman/uuid/uuid.go
@@ -0,0 +1,201 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// Array is a pass-by-value UUID that can be used as an effecient key in a map.
+type Array [16]byte
+
+// UUID converts uuid into a slice.
+func (uuid Array) UUID() UUID {
+ return uuid[:]
+}
+
+// String returns the string representation of uuid,
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
+func (uuid Array) String() string {
+ return uuid.UUID().String()
+}
+
+// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
+// 4122.
+type UUID []byte
+
+// A Version represents a UUIDs version.
+type Version byte
+
+// A Variant represents a UUIDs variant.
+type Variant byte
+
+// Constants returned by Variant.
+const (
+ Invalid = Variant(iota) // Invalid UUID
+ RFC4122 // The variant specified in RFC4122
+ Reserved // Reserved, NCS backward compatibility.
+ Microsoft // Reserved, Microsoft Corporation backward compatibility.
+ Future // Reserved for future definition.
+)
+
+var rander = rand.Reader // random function
+
+// New returns a new random (version 4) UUID as a string. It is a convenience
+// function for NewRandom().String().
+func New() string {
+ return NewRandom().String()
+}
+
+// Parse decodes s into a UUID or returns nil. Both the UUID form of
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded.
+func Parse(s string) UUID {
+ if len(s) == 36+9 {
+ if strings.ToLower(s[:9]) != "urn:uuid:" {
+ return nil
+ }
+ s = s[9:]
+ } else if len(s) != 36 {
+ return nil
+ }
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return nil
+ }
+ var uuid [16]byte
+ for i, x := range [16]int{
+ 0, 2, 4, 6,
+ 9, 11,
+ 14, 16,
+ 19, 21,
+ 24, 26, 28, 30, 32, 34} {
+ if v, ok := xtob(s[x:]); !ok {
+ return nil
+ } else {
+ uuid[i] = v
+ }
+ }
+ return uuid[:]
+}
+
+// Equal returns true if uuid1 and uuid2 are equal.
+func Equal(uuid1, uuid2 UUID) bool {
+ return bytes.Equal(uuid1, uuid2)
+}
+
+// Array returns an array representation of uuid that can be used as a map key.
+// Array panics if uuid is not valid.
+func (uuid UUID) Array() Array {
+ if len(uuid) != 16 {
+ panic("invalid uuid")
+ }
+ var a Array
+ copy(a[:], uuid)
+ return a
+}
+
+// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// , or "" if uuid is invalid.
+func (uuid UUID) String() string {
+ if len(uuid) != 16 {
+ return ""
+ }
+ var buf [36]byte
+ encodeHex(buf[:], uuid)
+ return string(buf[:])
+}
+
+// URN returns the RFC 2141 URN form of uuid,
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
+func (uuid UUID) URN() string {
+ if len(uuid) != 16 {
+ return ""
+ }
+ var buf [36 + 9]byte
+ copy(buf[:], "urn:uuid:")
+ encodeHex(buf[9:], uuid)
+ return string(buf[:])
+}
+
+func encodeHex(dst []byte, uuid UUID) {
+ hex.Encode(dst[:], uuid[:4])
+ dst[8] = '-'
+ hex.Encode(dst[9:13], uuid[4:6])
+ dst[13] = '-'
+ hex.Encode(dst[14:18], uuid[6:8])
+ dst[18] = '-'
+ hex.Encode(dst[19:23], uuid[8:10])
+ dst[23] = '-'
+ hex.Encode(dst[24:], uuid[10:])
+}
+
+// Variant returns the variant encoded in uuid. It returns Invalid if
+// uuid is invalid.
+func (uuid UUID) Variant() Variant {
+ if len(uuid) != 16 {
+ return Invalid
+ }
+ switch {
+ case (uuid[8] & 0xc0) == 0x80:
+ return RFC4122
+ case (uuid[8] & 0xe0) == 0xc0:
+ return Microsoft
+ case (uuid[8] & 0xe0) == 0xe0:
+ return Future
+ default:
+ return Reserved
+ }
+}
+
+// Version returns the version of uuid. It returns false if uuid is not
+// valid.
+func (uuid UUID) Version() (Version, bool) {
+ if len(uuid) != 16 {
+ return 0, false
+ }
+ return Version(uuid[6] >> 4), true
+}
+
+func (v Version) String() string {
+ if v > 15 {
+ return fmt.Sprintf("BAD_VERSION_%d", v)
+ }
+ return fmt.Sprintf("VERSION_%d", v)
+}
+
+func (v Variant) String() string {
+ switch v {
+ case RFC4122:
+ return "RFC4122"
+ case Reserved:
+ return "Reserved"
+ case Microsoft:
+ return "Microsoft"
+ case Future:
+ return "Future"
+ case Invalid:
+ return "Invalid"
+ }
+ return fmt.Sprintf("BadVariant%d", int(v))
+}
+
+// SetRand sets the random number generator to r, which implements io.Reader.
+// If r.Read returns an error when the package requests random data then
+// a panic will be issued.
+//
+// Calling SetRand with nil sets the random number generator to the default
+// generator.
+func SetRand(r io.Reader) {
+ if r == nil {
+ rander = rand.Reader
+ return
+ }
+ rander = r
+}
diff --git a/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/pborman/uuid/version1.go
new file mode 100644
index 000000000..0127eacfa
--- /dev/null
+++ b/vendor/github.com/pborman/uuid/version1.go
@@ -0,0 +1,41 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+)
+
+// NewUUID returns a Version 1 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewUUID returns nil. If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewUUID returns nil.
+func NewUUID() UUID {
+ if nodeID == nil {
+ SetNodeInterface("")
+ }
+
+ now, seq, err := GetTime()
+ if err != nil {
+ return nil
+ }
+
+ uuid := make([]byte, 16)
+
+ time_low := uint32(now & 0xffffffff)
+ time_mid := uint16((now >> 32) & 0xffff)
+ time_hi := uint16((now >> 48) & 0x0fff)
+ time_hi |= 0x1000 // Version 1
+
+ binary.BigEndian.PutUint32(uuid[0:], time_low)
+ binary.BigEndian.PutUint16(uuid[4:], time_mid)
+ binary.BigEndian.PutUint16(uuid[6:], time_hi)
+ binary.BigEndian.PutUint16(uuid[8:], seq)
+ copy(uuid[10:], nodeID)
+
+ return uuid
+}
diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go
new file mode 100644
index 000000000..b3d4a368d
--- /dev/null
+++ b/vendor/github.com/pborman/uuid/version4.go
@@ -0,0 +1,25 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+// Random returns a Random (Version 4) UUID or panics.
+//
+// The strength of the UUIDs is based on the strength of the crypto/rand
+// package.
+//
+// A note about uniqueness derived from from the UUID Wikipedia entry:
+//
+// Randomly generated UUIDs have 122 random bits. One's annual risk of being
+// hit by a meteorite is estimated to be one chance in 17 billion, that
+// means the probability is about 0.00000000006 (6 × 10−11),
+// equivalent to the odds of creating a few tens of trillions of UUIDs in a
+// year and having one duplicate.
+func NewRandom() UUID {
+ uuid := make([]byte, 16)
+ randomBits([]byte(uuid))
+ uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+ return uuid
+}
diff --git a/vendor/github.com/peterh/liner/COPYING b/vendor/github.com/peterh/liner/COPYING
new file mode 100644
index 000000000..9e8c9f206
--- /dev/null
+++ b/vendor/github.com/peterh/liner/COPYING
@@ -0,0 +1,21 @@
+Copyright © 2012 Peter Harris
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the next
+paragraph) shall be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+
diff --git a/vendor/github.com/peterh/liner/README.md b/vendor/github.com/peterh/liner/README.md
new file mode 100644
index 000000000..9148b2492
--- /dev/null
+++ b/vendor/github.com/peterh/liner/README.md
@@ -0,0 +1,100 @@
+Liner
+=====
+
+Liner is a command line editor with history. It was inspired by linenoise;
+everything Unix-like is a VT100 (or is trying very hard to be). If your
+terminal is not pretending to be a VT100, change it. Liner also support
+Windows.
+
+Liner is released under the X11 license (which is similar to the new BSD
+license).
+
+Line Editing
+------------
+
+The following line editing commands are supported on platforms and terminals
+that Liner supports:
+
+Keystroke | Action
+--------- | ------
+Ctrl-A, Home | Move cursor to beginning of line
+Ctrl-E, End | Move cursor to end of line
+Ctrl-B, Left | Move cursor one character left
+Ctrl-F, Right| Move cursor one character right
+Ctrl-Left, Alt-B | Move cursor to previous word
+Ctrl-Right, Alt-F | Move cursor to next word
+Ctrl-D, Del | (if line is *not* empty) Delete character under cursor
+Ctrl-D | (if line *is* empty) End of File - usually quits application
+Ctrl-C | Reset input (create new empty prompt)
+Ctrl-L | Clear screen (line is unmodified)
+Ctrl-T | Transpose previous character with current character
+Ctrl-H, BackSpace | Delete character before cursor
+Ctrl-W | Delete word leading up to cursor
+Ctrl-K | Delete from cursor to end of line
+Ctrl-U | Delete from start of line to cursor
+Ctrl-P, Up | Previous match from history
+Ctrl-N, Down | Next match from history
+Ctrl-R | Reverse Search history (Ctrl-S forward, Ctrl-G cancel)
+Ctrl-Y | Paste from Yank buffer (Alt-Y to paste next yank instead)
+Tab | Next completion
+Shift-Tab | (after Tab) Previous completion
+
+Getting started
+-----------------
+
+```go
+package main
+
+import (
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/peterh/liner"
+)
+
+var (
+ history_fn = filepath.Join(os.TempDir(), ".liner_example_history")
+ names = []string{"john", "james", "mary", "nancy"}
+)
+
+func main() {
+ line := liner.NewLiner()
+ defer line.Close()
+
+ line.SetCtrlCAborts(true)
+
+ line.SetCompleter(func(line string) (c []string) {
+ for _, n := range names {
+ if strings.HasPrefix(n, strings.ToLower(line)) {
+ c = append(c, n)
+ }
+ }
+ return
+ })
+
+ if f, err := os.Open(history_fn); err == nil {
+ line.ReadHistory(f)
+ f.Close()
+ }
+
+ if name, err := line.Prompt("What is your name? "); err == nil {
+ log.Print("Got: ", name)
+ line.AppendHistory(name)
+ } else if err == liner.ErrPromptAborted {
+ log.Print("Aborted")
+ } else {
+ log.Print("Error reading line: ", err)
+ }
+
+ if f, err := os.Create(history_fn); err != nil {
+ log.Print("Error writing history file: ", err)
+ } else {
+ line.WriteHistory(f)
+ f.Close()
+ }
+}
+```
+
+For documentation, see http://godoc.org/github.com/peterh/liner
diff --git a/vendor/github.com/peterh/liner/bsdinput.go b/vendor/github.com/peterh/liner/bsdinput.go
new file mode 100644
index 000000000..35933982f
--- /dev/null
+++ b/vendor/github.com/peterh/liner/bsdinput.go
@@ -0,0 +1,41 @@
+// +build openbsd freebsd netbsd
+
+package liner
+
+import "syscall"
+
+const (
+ getTermios = syscall.TIOCGETA
+ setTermios = syscall.TIOCSETA
+)
+
+const (
+ // Input flags
+ inpck = 0x010
+ istrip = 0x020
+ icrnl = 0x100
+ ixon = 0x200
+
+ // Output flags
+ opost = 0x1
+
+ // Control flags
+ cs8 = 0x300
+
+ // Local flags
+ isig = 0x080
+ icanon = 0x100
+ iexten = 0x400
+)
+
+type termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]byte
+ Ispeed int32
+ Ospeed int32
+}
+
+const cursorColumn = false
diff --git a/vendor/github.com/peterh/liner/common.go b/vendor/github.com/peterh/liner/common.go
new file mode 100644
index 000000000..b6162b624
--- /dev/null
+++ b/vendor/github.com/peterh/liner/common.go
@@ -0,0 +1,237 @@
+/*
+Package liner implements a simple command line editor, inspired by linenoise
+(https://github.com/antirez/linenoise/). This package supports WIN32 in
+addition to the xterm codes supported by everything else.
+*/
+package liner
+
+import (
+ "bufio"
+ "container/ring"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+ "unicode/utf8"
+)
+
+type commonState struct {
+ terminalSupported bool
+ outputRedirected bool
+ inputRedirected bool
+ history []string
+ historyMutex sync.RWMutex
+ completer WordCompleter
+ columns int
+ killRing *ring.Ring
+ ctrlCAborts bool
+ r *bufio.Reader
+ tabStyle TabStyle
+ multiLineMode bool
+ cursorRows int
+ maxRows int
+ shouldRestart ShouldRestart
+}
+
+// TabStyle is used to select how tab completions are displayed.
+type TabStyle int
+
+// Two tab styles are currently available:
+//
+// TabCircular cycles through each completion item and displays it directly on
+// the prompt
+//
+// TabPrints prints the list of completion items to the screen after a second
+// tab key is pressed. This behaves similar to GNU readline and BASH (which
+// uses readline)
+const (
+ TabCircular TabStyle = iota
+ TabPrints
+)
+
+// ErrPromptAborted is returned from Prompt or PasswordPrompt when the user presses Ctrl-C
+// if SetCtrlCAborts(true) has been called on the State
+var ErrPromptAborted = errors.New("prompt aborted")
+
+// ErrNotTerminalOutput is returned from Prompt or PasswordPrompt if the
+// platform is normally supported, but stdout has been redirected
+var ErrNotTerminalOutput = errors.New("standard output is not a terminal")
+
+// Max elements to save on the killring
+const KillRingMax = 60
+
+// HistoryLimit is the maximum number of entries saved in the scrollback history.
+const HistoryLimit = 1000
+
+// ReadHistory reads scrollback history from r. Returns the number of lines
+// read, and any read error (except io.EOF).
+func (s *State) ReadHistory(r io.Reader) (num int, err error) {
+ s.historyMutex.Lock()
+ defer s.historyMutex.Unlock()
+
+ in := bufio.NewReader(r)
+ num = 0
+ for {
+ line, part, err := in.ReadLine()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return num, err
+ }
+ if part {
+ return num, fmt.Errorf("line %d is too long", num+1)
+ }
+ if !utf8.Valid(line) {
+ return num, fmt.Errorf("invalid string at line %d", num+1)
+ }
+ num++
+ s.history = append(s.history, string(line))
+ if len(s.history) > HistoryLimit {
+ s.history = s.history[1:]
+ }
+ }
+ return num, nil
+}
+
+// WriteHistory writes scrollback history to w. Returns the number of lines
+// successfully written, and any write error.
+//
+// Unlike the rest of liner's API, WriteHistory is safe to call
+// from another goroutine while Prompt is in progress.
+// This exception is to facilitate the saving of the history buffer
+// during an unexpected exit (for example, due to Ctrl-C being invoked)
+func (s *State) WriteHistory(w io.Writer) (num int, err error) {
+ s.historyMutex.RLock()
+ defer s.historyMutex.RUnlock()
+
+ for _, item := range s.history {
+ _, err := fmt.Fprintln(w, item)
+ if err != nil {
+ return num, err
+ }
+ num++
+ }
+ return num, nil
+}
+
+// AppendHistory appends an entry to the scrollback history. AppendHistory
+// should be called iff Prompt returns a valid command.
+func (s *State) AppendHistory(item string) {
+ s.historyMutex.Lock()
+ defer s.historyMutex.Unlock()
+
+ if len(s.history) > 0 {
+ if item == s.history[len(s.history)-1] {
+ return
+ }
+ }
+ s.history = append(s.history, item)
+ if len(s.history) > HistoryLimit {
+ s.history = s.history[1:]
+ }
+}
+
+// Returns the history lines starting with prefix
+func (s *State) getHistoryByPrefix(prefix string) (ph []string) {
+ for _, h := range s.history {
+ if strings.HasPrefix(h, prefix) {
+ ph = append(ph, h)
+ }
+ }
+ return
+}
+
+// Returns the history lines matching the inteligent search
+func (s *State) getHistoryByPattern(pattern string) (ph []string, pos []int) {
+ if pattern == "" {
+ return
+ }
+ for _, h := range s.history {
+ if i := strings.Index(h, pattern); i >= 0 {
+ ph = append(ph, h)
+ pos = append(pos, i)
+ }
+ }
+ return
+}
+
+// Completer takes the currently edited line content at the left of the cursor
+// and returns a list of completion candidates.
+// If the line is "Hello, wo!!!" and the cursor is before the first '!', "Hello, wo" is passed
+// to the completer which may return {"Hello, world", "Hello, Word"} to have "Hello, world!!!".
+type Completer func(line string) []string
+
+// WordCompleter takes the currently edited line with the cursor position and
+// returns the completion candidates for the partial word to be completed.
+// If the line is "Hello, wo!!!" and the cursor is before the first '!', ("Hello, wo!!!", 9) is passed
+// to the completer which may returns ("Hello, ", {"world", "Word"}, "!!!") to have "Hello, world!!!".
+type WordCompleter func(line string, pos int) (head string, completions []string, tail string)
+
+// SetCompleter sets the completion function that Liner will call to
+// fetch completion candidates when the user presses tab.
+func (s *State) SetCompleter(f Completer) {
+ if f == nil {
+ s.completer = nil
+ return
+ }
+ s.completer = func(line string, pos int) (string, []string, string) {
+ return "", f(string([]rune(line)[:pos])), string([]rune(line)[pos:])
+ }
+}
+
+// SetWordCompleter sets the completion function that Liner will call to
+// fetch completion candidates when the user presses tab.
+func (s *State) SetWordCompleter(f WordCompleter) {
+ s.completer = f
+}
+
+// SetTabCompletionStyle sets the behvavior when the Tab key is pressed
+// for auto-completion. TabCircular is the default behavior and cycles
+// through the list of candidates at the prompt. TabPrints will print
+// the available completion candidates to the screen similar to BASH
+// and GNU Readline
+func (s *State) SetTabCompletionStyle(tabStyle TabStyle) {
+ s.tabStyle = tabStyle
+}
+
+// ModeApplier is the interface that wraps a representation of the terminal
+// mode. ApplyMode sets the terminal to this mode.
+type ModeApplier interface {
+ ApplyMode() error
+}
+
+// SetCtrlCAborts sets whether Prompt on a supported terminal will return an
+// ErrPromptAborted when Ctrl-C is pressed. The default is false (will not
+// return when Ctrl-C is pressed). Unsupported terminals typically raise SIGINT
+// (and Prompt does not return) regardless of the value passed to SetCtrlCAborts.
+func (s *State) SetCtrlCAborts(aborts bool) {
+ s.ctrlCAborts = aborts
+}
+
+// SetMultiLineMode sets whether line is auto-wrapped. The default is false (single line).
+func (s *State) SetMultiLineMode(mlmode bool) {
+ s.multiLineMode = mlmode
+}
+
+// ShouldRestart is passed the error generated by readNext and returns true if
+// the the read should be restarted or false if the error should be returned.
+type ShouldRestart func(err error) bool
+
+// SetShouldRestart sets the restart function that Liner will call to determine
+// whether to retry the call to, or return the error returned by, readNext.
+func (s *State) SetShouldRestart(f ShouldRestart) {
+ s.shouldRestart = f
+}
+
+func (s *State) promptUnsupported(p string) (string, error) {
+ if !s.inputRedirected || !s.terminalSupported {
+ fmt.Print(p)
+ }
+ linebuf, _, err := s.r.ReadLine()
+ if err != nil {
+ return "", err
+ }
+ return string(linebuf), nil
+}
diff --git a/vendor/github.com/peterh/liner/fallbackinput.go b/vendor/github.com/peterh/liner/fallbackinput.go
new file mode 100644
index 000000000..d9eb79d9e
--- /dev/null
+++ b/vendor/github.com/peterh/liner/fallbackinput.go
@@ -0,0 +1,57 @@
+// +build !windows,!linux,!darwin,!openbsd,!freebsd,!netbsd
+
+package liner
+
+import (
+ "bufio"
+ "errors"
+ "os"
+)
+
+// State represents an open terminal
+type State struct {
+ commonState
+}
+
+// Prompt displays p, and then waits for user input. Prompt does not support
+// line editing on this operating system.
+func (s *State) Prompt(p string) (string, error) {
+ return s.promptUnsupported(p)
+}
+
+// PasswordPrompt is not supported in this OS.
+func (s *State) PasswordPrompt(p string) (string, error) {
+ return "", errors.New("liner: function not supported in this terminal")
+}
+
+// NewLiner initializes a new *State
+//
+// Note that this operating system uses a fallback mode without line
+// editing. Patches welcome.
+func NewLiner() *State {
+ var s State
+ s.r = bufio.NewReader(os.Stdin)
+ return &s
+}
+
+// Close returns the terminal to its previous mode
+func (s *State) Close() error {
+ return nil
+}
+
+// TerminalSupported returns false because line editing is not
+// supported on this platform.
+func TerminalSupported() bool {
+ return false
+}
+
+type noopMode struct{}
+
+func (n noopMode) ApplyMode() error {
+ return nil
+}
+
+// TerminalMode returns a noop InputModeSetter on this platform.
+func TerminalMode() (ModeApplier, error) {
+ return noopMode{}, nil
+}
diff --git a/vendor/github.com/peterh/liner/input.go b/vendor/github.com/peterh/liner/input.go
new file mode 100644
index 000000000..0ee6be7af
--- /dev/null
+++ b/vendor/github.com/peterh/liner/input.go
@@ -0,0 +1,367 @@
+// +build linux darwin openbsd freebsd netbsd
+
+package liner
+
+import (
+ "bufio"
+ "errors"
+ "os"
+ "os/signal"
+ "strconv"
+ "strings"
+ "syscall"
+ "time"
+)
+
+type nexter struct {
+ r rune
+ err error
+}
+
+// State represents an open terminal
+type State struct {
+ commonState
+ origMode termios
+ defaultMode termios
+ next <-chan nexter
+ winch chan os.Signal
+ pending []rune
+ useCHA bool
+}
+
+// NewLiner initializes a new *State, and sets the terminal into raw mode. To
+// restore the terminal to its previous state, call State.Close().
+//
+// Note if you are still using Go 1.0: NewLiner handles SIGWINCH, so it will
+// leak a channel every time you call it. Therefore, it is recommened that you
+// upgrade to a newer release of Go, or ensure that NewLiner is only called
+// once.
+func NewLiner() *State {
+ var s State
+ s.r = bufio.NewReader(os.Stdin)
+
+ s.terminalSupported = TerminalSupported()
+ if m, err := TerminalMode(); err == nil {
+ s.origMode = *m.(*termios)
+ } else {
+ s.inputRedirected = true
+ }
+ if _, err := getMode(syscall.Stdout); err != 0 {
+ s.outputRedirected = true
+ }
+ if s.inputRedirected && s.outputRedirected {
+ s.terminalSupported = false
+ }
+ if s.terminalSupported && !s.inputRedirected && !s.outputRedirected {
+ mode := s.origMode
+ mode.Iflag &^= icrnl | inpck | istrip | ixon
+ mode.Cflag |= cs8
+ mode.Lflag &^= syscall.ECHO | icanon | iexten
+ mode.ApplyMode()
+
+ winch := make(chan os.Signal, 1)
+ signal.Notify(winch, syscall.SIGWINCH)
+ s.winch = winch
+
+ s.checkOutput()
+ }
+
+ if !s.outputRedirected {
+ s.outputRedirected = !s.getColumns()
+ }
+
+ return &s
+}
+
+var errTimedOut = errors.New("timeout")
+
+func (s *State) startPrompt() {
+ if s.terminalSupported {
+ if m, err := TerminalMode(); err == nil {
+ s.defaultMode = *m.(*termios)
+ mode := s.defaultMode
+ mode.Lflag &^= isig
+ mode.ApplyMode()
+ }
+ }
+ s.restartPrompt()
+}
+
+func (s *State) restartPrompt() {
+ next := make(chan nexter)
+ go func() {
+ for {
+ var n nexter
+ n.r, _, n.err = s.r.ReadRune()
+ next <- n
+ // Shut down nexter loop when an end condition has been reached
+ if n.err != nil || n.r == '\n' || n.r == '\r' || n.r == ctrlC || n.r == ctrlD {
+ close(next)
+ return
+ }
+ }
+ }()
+ s.next = next
+}
+
+func (s *State) stopPrompt() {
+ if s.terminalSupported {
+ s.defaultMode.ApplyMode()
+ }
+}
+
+func (s *State) nextPending(timeout <-chan time.Time) (rune, error) {
+ select {
+ case thing, ok := <-s.next:
+ if !ok {
+ return 0, errors.New("liner: internal error")
+ }
+ if thing.err != nil {
+ return 0, thing.err
+ }
+ s.pending = append(s.pending, thing.r)
+ return thing.r, nil
+ case <-timeout:
+ rv := s.pending[0]
+ s.pending = s.pending[1:]
+ return rv, errTimedOut
+ }
+ // not reached
+ return 0, nil
+}
+
+func (s *State) readNext() (interface{}, error) {
+ if len(s.pending) > 0 {
+ rv := s.pending[0]
+ s.pending = s.pending[1:]
+ return rv, nil
+ }
+ var r rune
+ select {
+ case thing, ok := <-s.next:
+ if !ok {
+ return 0, errors.New("liner: internal error")
+ }
+ if thing.err != nil {
+ return nil, thing.err
+ }
+ r = thing.r
+ case <-s.winch:
+ s.getColumns()
+ return winch, nil
+ }
+ if r != esc {
+ return r, nil
+ }
+ s.pending = append(s.pending, r)
+
+ // Wait at most 50 ms for the rest of the escape sequence
+ // If nothing else arrives, it was an actual press of the esc key
+ timeout := time.After(50 * time.Millisecond)
+ flag, err := s.nextPending(timeout)
+ if err != nil {
+ if err == errTimedOut {
+ return flag, nil
+ }
+ return unknown, err
+ }
+
+ switch flag {
+ case '[':
+ code, err := s.nextPending(timeout)
+ if err != nil {
+ if err == errTimedOut {
+ return code, nil
+ }
+ return unknown, err
+ }
+ switch code {
+ case 'A':
+ s.pending = s.pending[:0] // escape code complete
+ return up, nil
+ case 'B':
+ s.pending = s.pending[:0] // escape code complete
+ return down, nil
+ case 'C':
+ s.pending = s.pending[:0] // escape code complete
+ return right, nil
+ case 'D':
+ s.pending = s.pending[:0] // escape code complete
+ return left, nil
+ case 'F':
+ s.pending = s.pending[:0] // escape code complete
+ return end, nil
+ case 'H':
+ s.pending = s.pending[:0] // escape code complete
+ return home, nil
+ case 'Z':
+ s.pending = s.pending[:0] // escape code complete
+ return shiftTab, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ num := []rune{code}
+ for {
+ code, err := s.nextPending(timeout)
+ if err != nil {
+ if err == errTimedOut {
+ return code, nil
+ }
+ return nil, err
+ }
+ switch code {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ num = append(num, code)
+ case ';':
+ // Modifier code to follow
+ // This only supports Ctrl-left and Ctrl-right for now
+ x, _ := strconv.ParseInt(string(num), 10, 32)
+ if x != 1 {
+ // Can't be left or right
+ rv := s.pending[0]
+ s.pending = s.pending[1:]
+ return rv, nil
+ }
+ num = num[:0]
+ for {
+ code, err = s.nextPending(timeout)
+ if err != nil {
+ if err == errTimedOut {
+ rv := s.pending[0]
+ s.pending = s.pending[1:]
+ return rv, nil
+ }
+ return nil, err
+ }
+ switch code {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ num = append(num, code)
+ case 'C', 'D':
+ // right, left
+ mod, _ := strconv.ParseInt(string(num), 10, 32)
+ if mod != 5 {
+ // Not bare Ctrl
+ rv := s.pending[0]
+ s.pending = s.pending[1:]
+ return rv, nil
+ }
+ s.pending = s.pending[:0] // escape code complete
+ if code == 'C' {
+ return wordRight, nil
+ }
+ return wordLeft, nil
+ default:
+ // Not left or right
+ rv := s.pending[0]
+ s.pending = s.pending[1:]
+ return rv, nil
+ }
+ }
+ case '~':
+ s.pending = s.pending[:0] // escape code complete
+ x, _ := strconv.ParseInt(string(num), 10, 32)
+ switch x {
+ case 2:
+ return insert, nil
+ case 3:
+ return del, nil
+ case 5:
+ return pageUp, nil
+ case 6:
+ return pageDown, nil
+ case 7:
+ return home, nil
+ case 8:
+ return end, nil
+ case 15:
+ return f5, nil
+ case 17:
+ return f6, nil
+ case 18:
+ return f7, nil
+ case 19:
+ return f8, nil
+ case 20:
+ return f9, nil
+ case 21:
+ return f10, nil
+ case 23:
+ return f11, nil
+ case 24:
+ return f12, nil
+ default:
+ return unknown, nil
+ }
+ default:
+ // unrecognized escape code
+ rv := s.pending[0]
+ s.pending = s.pending[1:]
+ return rv, nil
+ }
+ }
+ }
+
+ case 'O':
+ code, err := s.nextPending(timeout)
+ if err != nil {
+ if err == errTimedOut {
+ return code, nil
+ }
+ return nil, err
+ }
+ s.pending = s.pending[:0] // escape code complete
+ switch code {
+ case 'c':
+ return wordRight, nil
+ case 'd':
+ return wordLeft, nil
+ case 'H':
+ return home, nil
+ case 'F':
+ return end, nil
+ case 'P':
+ return f1, nil
+ case 'Q':
+ return f2, nil
+ case 'R':
+ return f3, nil
+ case 'S':
+ return f4, nil
+ default:
+ return unknown, nil
+ }
+ case 'b':
+ s.pending = s.pending[:0] // escape code complete
+ return altB, nil
+ case 'f':
+ s.pending = s.pending[:0] // escape code complete
+ return altF, nil
+ case 'y':
+ s.pending = s.pending[:0] // escape code complete
+ return altY, nil
+ default:
+ rv := s.pending[0]
+ s.pending = s.pending[1:]
+ return rv, nil
+ }
+
+ // not reached
+ return r, nil
+}
+
+// Close returns the terminal to its previous mode
+func (s *State) Close() error {
+ stopSignal(s.winch)
+ if !s.inputRedirected {
+ s.origMode.ApplyMode()
+ }
+ return nil
+}
+
+// TerminalSupported returns true if the current terminal supports
+// line editing features, and false if liner will use the 'dumb'
+// fallback for input.
+// Note that TerminalSupported does not check all factors that may
+// cause liner to not fully support the terminal (such as stdin redirection)
+func TerminalSupported() bool {
+ bad := map[string]bool{"": true, "dumb": true, "cons25": true}
+ return !bad[strings.ToLower(os.Getenv("TERM"))]
+}
diff --git a/vendor/github.com/peterh/liner/input_darwin.go b/vendor/github.com/peterh/liner/input_darwin.go
new file mode 100644
index 000000000..e98ab4a49
--- /dev/null
+++ b/vendor/github.com/peterh/liner/input_darwin.go
@@ -0,0 +1,43 @@
+// +build darwin
+
+package liner
+
+import "syscall"
+
+const (
+ getTermios = syscall.TIOCGETA
+ setTermios = syscall.TIOCSETA
+)
+
+const (
+ // Input flags
+ inpck = 0x010
+ istrip = 0x020
+ icrnl = 0x100
+ ixon = 0x200
+
+ // Output flags
+ opost = 0x1
+
+ // Control flags
+ cs8 = 0x300
+
+ // Local flags
+ isig = 0x080
+ icanon = 0x100
+ iexten = 0x400
+)
+
+type termios struct {
+ Iflag uintptr
+ Oflag uintptr
+ Cflag uintptr
+ Lflag uintptr
+ Cc [20]byte
+ Ispeed uintptr
+ Ospeed uintptr
+}
+
+// Terminal.app needs a column for the cursor when the input line is at the
+// bottom of the window.
+const cursorColumn = true
diff --git a/vendor/github.com/peterh/liner/input_linux.go b/vendor/github.com/peterh/liner/input_linux.go
new file mode 100644
index 000000000..56ed185fa
--- /dev/null
+++ b/vendor/github.com/peterh/liner/input_linux.go
@@ -0,0 +1,28 @@
+// +build linux
+
+package liner
+
+import "syscall"
+
+const (
+ getTermios = syscall.TCGETS
+ setTermios = syscall.TCSETS
+)
+
+const (
+ icrnl = syscall.ICRNL
+ inpck = syscall.INPCK
+ istrip = syscall.ISTRIP
+ ixon = syscall.IXON
+ opost = syscall.OPOST
+ cs8 = syscall.CS8
+ isig = syscall.ISIG
+ icanon = syscall.ICANON
+ iexten = syscall.IEXTEN
+)
+
+type termios struct {
+ syscall.Termios
+}
+
+const cursorColumn = false
diff --git a/vendor/github.com/peterh/liner/input_windows.go b/vendor/github.com/peterh/liner/input_windows.go
new file mode 100644
index 000000000..199b9428e
--- /dev/null
+++ b/vendor/github.com/peterh/liner/input_windows.go
@@ -0,0 +1,324 @@
+package liner
+
+import (
+ "bufio"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+ procGetStdHandle = kernel32.NewProc("GetStdHandle")
+ procReadConsoleInput = kernel32.NewProc("ReadConsoleInputW")
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+ procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
+ procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
+ procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+ procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
+)
+
+// These names are from the Win32 api, so they use underscores (contrary to
+// what golint suggests)
+const (
+ std_input_handle = uint32(-10 & 0xFFFFFFFF)
+ std_output_handle = uint32(-11 & 0xFFFFFFFF)
+ std_error_handle = uint32(-12 & 0xFFFFFFFF)
+ invalid_handle_value = ^uintptr(0)
+)
+
+type inputMode uint32
+
+// State represents an open terminal
+type State struct {
+ commonState
+ handle syscall.Handle
+ hOut syscall.Handle
+ origMode inputMode
+ defaultMode inputMode
+ key interface{}
+ repeat uint16
+}
+
+const (
+ enableEchoInput = 0x4
+ enableInsertMode = 0x20
+ enableLineInput = 0x2
+ enableMouseInput = 0x10
+ enableProcessedInput = 0x1
+ enableQuickEditMode = 0x40
+ enableWindowInput = 0x8
+)
+
+// NewLiner initializes a new *State, and sets the terminal into raw mode. To
+// restore the terminal to its previous state, call State.Close().
+func NewLiner() *State {
+ var s State
+ hIn, _, _ := procGetStdHandle.Call(uintptr(std_input_handle))
+ s.handle = syscall.Handle(hIn)
+ hOut, _, _ := procGetStdHandle.Call(uintptr(std_output_handle))
+ s.hOut = syscall.Handle(hOut)
+
+ s.terminalSupported = true
+ if m, err := TerminalMode(); err == nil {
+ s.origMode = m.(inputMode)
+ mode := s.origMode
+ mode &^= enableEchoInput
+ mode &^= enableInsertMode
+ mode &^= enableLineInput
+ mode &^= enableMouseInput
+ mode |= enableWindowInput
+ mode.ApplyMode()
+ } else {
+ s.inputRedirected = true
+ s.r = bufio.NewReader(os.Stdin)
+ }
+
+ s.getColumns()
+ s.outputRedirected = s.columns <= 0
+
+ return &s
+}
+
+// These names are from the Win32 api, so they use underscores (contrary to
+// what golint suggests)
+const (
+ focus_event = 0x0010
+ key_event = 0x0001
+ menu_event = 0x0008
+ mouse_event = 0x0002
+ window_buffer_size_event = 0x0004
+)
+
+type input_record struct {
+ eventType uint16
+ pad uint16
+ blob [16]byte
+}
+
+type key_event_record struct {
+ KeyDown int32
+ RepeatCount uint16
+ VirtualKeyCode uint16
+ VirtualScanCode uint16
+ Char int16
+ ControlKeyState uint32
+}
+
+// These names are from the Win32 api, so they use underscores (contrary to
+// what golint suggests)
+const (
+ vk_tab = 0x09
+ vk_prior = 0x21
+ vk_next = 0x22
+ vk_end = 0x23
+ vk_home = 0x24
+ vk_left = 0x25
+ vk_up = 0x26
+ vk_right = 0x27
+ vk_down = 0x28
+ vk_insert = 0x2d
+ vk_delete = 0x2e
+ vk_f1 = 0x70
+ vk_f2 = 0x71
+ vk_f3 = 0x72
+ vk_f4 = 0x73
+ vk_f5 = 0x74
+ vk_f6 = 0x75
+ vk_f7 = 0x76
+ vk_f8 = 0x77
+ vk_f9 = 0x78
+ vk_f10 = 0x79
+ vk_f11 = 0x7a
+ vk_f12 = 0x7b
+ bKey = 0x42
+ fKey = 0x46
+ yKey = 0x59
+)
+
+const (
+ shiftPressed = 0x0010
+ leftAltPressed = 0x0002
+ leftCtrlPressed = 0x0008
+ rightAltPressed = 0x0001
+ rightCtrlPressed = 0x0004
+
+ modKeys = shiftPressed | leftAltPressed | rightAltPressed | leftCtrlPressed | rightCtrlPressed
+)
+
+func (s *State) readNext() (interface{}, error) {
+ if s.repeat > 0 {
+ s.repeat--
+ return s.key, nil
+ }
+
+ var input input_record
+ pbuf := uintptr(unsafe.Pointer(&input))
+ var rv uint32
+ prv := uintptr(unsafe.Pointer(&rv))
+
+ for {
+ ok, _, err := procReadConsoleInput.Call(uintptr(s.handle), pbuf, 1, prv)
+
+ if ok == 0 {
+ return nil, err
+ }
+
+ if input.eventType == window_buffer_size_event {
+ xy := (*coord)(unsafe.Pointer(&input.blob[0]))
+ s.columns = int(xy.x)
+ if s.columns > 1 {
+ s.columns--
+ }
+ return winch, nil
+ }
+ if input.eventType != key_event {
+ continue
+ }
+ ke := (*key_event_record)(unsafe.Pointer(&input.blob[0]))
+ if ke.KeyDown == 0 {
+ continue
+ }
+
+ if ke.VirtualKeyCode == vk_tab && ke.ControlKeyState&modKeys == shiftPressed {
+ s.key = shiftTab
+ } else if ke.VirtualKeyCode == bKey && (ke.ControlKeyState&modKeys == leftAltPressed ||
+ ke.ControlKeyState&modKeys == rightAltPressed) {
+ s.key = altB
+ } else if ke.VirtualKeyCode == fKey && (ke.ControlKeyState&modKeys == leftAltPressed ||
+ ke.ControlKeyState&modKeys == rightAltPressed) {
+ s.key = altF
+ } else if ke.VirtualKeyCode == yKey && (ke.ControlKeyState&modKeys == leftAltPressed ||
+ ke.ControlKeyState&modKeys == rightAltPressed) {
+ s.key = altY
+ } else if ke.Char > 0 {
+ s.key = rune(ke.Char)
+ } else {
+ switch ke.VirtualKeyCode {
+ case vk_prior:
+ s.key = pageUp
+ case vk_next:
+ s.key = pageDown
+ case vk_end:
+ s.key = end
+ case vk_home:
+ s.key = home
+ case vk_left:
+ s.key = left
+ if ke.ControlKeyState&(leftCtrlPressed|rightCtrlPressed) != 0 {
+ if ke.ControlKeyState&modKeys == ke.ControlKeyState&(leftCtrlPressed|rightCtrlPressed) {
+ s.key = wordLeft
+ }
+ }
+ case vk_right:
+ s.key = right
+ if ke.ControlKeyState&(leftCtrlPressed|rightCtrlPressed) != 0 {
+ if ke.ControlKeyState&modKeys == ke.ControlKeyState&(leftCtrlPressed|rightCtrlPressed) {
+ s.key = wordRight
+ }
+ }
+ case vk_up:
+ s.key = up
+ case vk_down:
+ s.key = down
+ case vk_insert:
+ s.key = insert
+ case vk_delete:
+ s.key = del
+ case vk_f1:
+ s.key = f1
+ case vk_f2:
+ s.key = f2
+ case vk_f3:
+ s.key = f3
+ case vk_f4:
+ s.key = f4
+ case vk_f5:
+ s.key = f5
+ case vk_f6:
+ s.key = f6
+ case vk_f7:
+ s.key = f7
+ case vk_f8:
+ s.key = f8
+ case vk_f9:
+ s.key = f9
+ case vk_f10:
+ s.key = f10
+ case vk_f11:
+ s.key = f11
+ case vk_f12:
+ s.key = f12
+ default:
+ // Eat modifier keys
+ // TODO: return Action(Unknown) if the key isn't a
+ // modifier.
+ continue
+ }
+ }
+
+ if ke.RepeatCount > 1 {
+ s.repeat = ke.RepeatCount - 1
+ }
+ return s.key, nil
+ }
+ return unknown, nil
+}
+
+// Close returns the terminal to its previous mode
+func (s *State) Close() error {
+ s.origMode.ApplyMode()
+ return nil
+}
+
+func (s *State) startPrompt() {
+ if m, err := TerminalMode(); err == nil {
+ s.defaultMode = m.(inputMode)
+ mode := s.defaultMode
+ mode &^= enableProcessedInput
+ mode.ApplyMode()
+ }
+}
+
+func (s *State) restartPrompt() {
+}
+
+func (s *State) stopPrompt() {
+ s.defaultMode.ApplyMode()
+}
+
+// TerminalSupported returns true because line editing is always
+// supported on Windows.
+func TerminalSupported() bool {
+ return true
+}
+
+func (mode inputMode) ApplyMode() error {
+ hIn, _, err := procGetStdHandle.Call(uintptr(std_input_handle))
+ if hIn == invalid_handle_value || hIn == 0 {
+ return err
+ }
+ ok, _, err := procSetConsoleMode.Call(hIn, uintptr(mode))
+ if ok != 0 {
+ err = nil
+ }
+ return err
+}
+
+// TerminalMode returns the current terminal input mode as an InputModeSetter.
+//
+// This function is provided for convenience, and should
+// not be necessary for most users of liner.
+func TerminalMode() (ModeApplier, error) {
+ var mode inputMode
+ hIn, _, err := procGetStdHandle.Call(uintptr(std_input_handle))
+ if hIn == invalid_handle_value || hIn == 0 {
+ return nil, err
+ }
+ ok, _, err := procGetConsoleMode.Call(hIn, uintptr(unsafe.Pointer(&mode)))
+ if ok != 0 {
+ err = nil
+ }
+ return mode, err
+}
diff --git a/vendor/github.com/peterh/liner/line.go b/vendor/github.com/peterh/liner/line.go
new file mode 100644
index 000000000..903dd240d
--- /dev/null
+++ b/vendor/github.com/peterh/liner/line.go
@@ -0,0 +1,1033 @@
+// +build windows linux darwin openbsd freebsd netbsd
+
+package liner
+
+import (
+ "container/ring"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+type action int
+
+const (
+ left action = iota
+ right
+ up
+ down
+ home
+ end
+ insert
+ del
+ pageUp
+ pageDown
+ f1
+ f2
+ f3
+ f4
+ f5
+ f6
+ f7
+ f8
+ f9
+ f10
+ f11
+ f12
+ altB
+ altF
+ altY
+ shiftTab
+ wordLeft
+ wordRight
+ winch
+ unknown
+)
+
+const (
+ ctrlA = 1
+ ctrlB = 2
+ ctrlC = 3
+ ctrlD = 4
+ ctrlE = 5
+ ctrlF = 6
+ ctrlG = 7
+ ctrlH = 8
+ tab = 9
+ lf = 10
+ ctrlK = 11
+ ctrlL = 12
+ cr = 13
+ ctrlN = 14
+ ctrlO = 15
+ ctrlP = 16
+ ctrlQ = 17
+ ctrlR = 18
+ ctrlS = 19
+ ctrlT = 20
+ ctrlU = 21
+ ctrlV = 22
+ ctrlW = 23
+ ctrlX = 24
+ ctrlY = 25
+ ctrlZ = 26
+ esc = 27
+ bs = 127
+)
+
+const (
+ beep = "\a"
+)
+
+type tabDirection int
+
+const (
+ tabForward tabDirection = iota
+ tabReverse
+)
+
+func (s *State) refresh(prompt []rune, buf []rune, pos int) error {
+ if s.multiLineMode {
+ return s.refreshMultiLine(prompt, buf, pos)
+ } else {
+ return s.refreshSingleLine(prompt, buf, pos)
+ }
+}
+
+func (s *State) refreshSingleLine(prompt []rune, buf []rune, pos int) error {
+ s.cursorPos(0)
+ _, err := fmt.Print(string(prompt))
+ if err != nil {
+ return err
+ }
+
+ pLen := countGlyphs(prompt)
+ bLen := countGlyphs(buf)
+ pos = countGlyphs(buf[:pos])
+ if pLen+bLen < s.columns {
+ _, err = fmt.Print(string(buf))
+ s.eraseLine()
+ s.cursorPos(pLen + pos)
+ } else {
+ // Find space available
+ space := s.columns - pLen
+ space-- // space for cursor
+ start := pos - space/2
+ end := start + space
+ if end > bLen {
+ end = bLen
+ start = end - space
+ }
+ if start < 0 {
+ start = 0
+ end = space
+ }
+ pos -= start
+
+ // Leave space for markers
+ if start > 0 {
+ start++
+ }
+ if end < bLen {
+ end--
+ }
+ startRune := len(getPrefixGlyphs(buf, start))
+ line := getPrefixGlyphs(buf[startRune:], end-start)
+
+ // Output
+ if start > 0 {
+ fmt.Print("{")
+ }
+ fmt.Print(string(line))
+ if end < bLen {
+ fmt.Print("}")
+ }
+
+ // Set cursor position
+ s.eraseLine()
+ s.cursorPos(pLen + pos)
+ }
+ return err
+}
+
+func (s *State) refreshMultiLine(prompt []rune, buf []rune, pos int) error {
+ promptColumns := countMultiLineGlyphs(prompt, s.columns, 0)
+ totalColumns := countMultiLineGlyphs(buf, s.columns, promptColumns)
+ totalRows := (totalColumns + s.columns - 1) / s.columns
+ maxRows := s.maxRows
+ if totalRows > s.maxRows {
+ s.maxRows = totalRows
+ }
+ cursorRows := s.cursorRows
+ if cursorRows == 0 {
+ cursorRows = 1
+ }
+
+ /* First step: clear all the lines used before. To do so start by
+ * going to the last row. */
+ if maxRows-cursorRows > 0 {
+ s.moveDown(maxRows - cursorRows)
+ }
+
+ /* Now for every row clear it, go up. */
+ for i := 0; i < maxRows-1; i++ {
+ s.cursorPos(0)
+ s.eraseLine()
+ s.moveUp(1)
+ }
+
+ /* Clean the top line. */
+ s.cursorPos(0)
+ s.eraseLine()
+
+ /* Write the prompt and the current buffer content */
+ if _, err := fmt.Print(string(prompt)); err != nil {
+ return err
+ }
+ if _, err := fmt.Print(string(buf)); err != nil {
+ return err
+ }
+
+ /* If we are at the very end of the screen with our prompt, we need to
+ * emit a newline and move the prompt to the first column. */
+ cursorColumns := countMultiLineGlyphs(buf[:pos], s.columns, promptColumns)
+ if cursorColumns == totalColumns && totalColumns%s.columns == 0 {
+ s.emitNewLine()
+ s.cursorPos(0)
+ totalRows++
+ if totalRows > s.maxRows {
+ s.maxRows = totalRows
+ }
+ }
+
+ /* Move cursor to right position. */
+ cursorRows = (cursorColumns + s.columns) / s.columns
+ if s.cursorRows > 0 && totalRows-cursorRows > 0 {
+ s.moveUp(totalRows - cursorRows)
+ }
+ /* Set column. */
+ s.cursorPos(cursorColumns % s.columns)
+
+ s.cursorRows = cursorRows
+ return nil
+}
+
+func (s *State) resetMultiLine(prompt []rune, buf []rune, pos int) {
+ columns := countMultiLineGlyphs(prompt, s.columns, 0)
+ columns = countMultiLineGlyphs(buf[:pos], s.columns, columns)
+ columns += 2 // ^C
+ cursorRows := (columns + s.columns) / s.columns
+ if s.maxRows-cursorRows > 0 {
+ for i := 0; i < s.maxRows-cursorRows; i++ {
+ fmt.Println() // always moves the cursor down or scrolls the window up as needed
+ }
+ }
+ s.maxRows = 1
+ s.cursorRows = 0
+}
+
+func longestCommonPrefix(strs []string) string {
+ if len(strs) == 0 {
+ return ""
+ }
+ longest := strs[0]
+
+ for _, str := range strs[1:] {
+ for !strings.HasPrefix(str, longest) {
+ longest = longest[:len(longest)-1]
+ }
+ }
+ // Remove trailing partial runes
+ longest = strings.TrimRight(longest, "\uFFFD")
+ return longest
+}
+
+func (s *State) circularTabs(items []string) func(tabDirection) (string, error) {
+ item := -1
+ return func(direction tabDirection) (string, error) {
+ if direction == tabForward {
+ if item < len(items)-1 {
+ item++
+ } else {
+ item = 0
+ }
+ } else if direction == tabReverse {
+ if item > 0 {
+ item--
+ } else {
+ item = len(items) - 1
+ }
+ }
+ return items[item], nil
+ }
+}
+
+func calculateColumns(screenWidth int, items []string) (numColumns, numRows, maxWidth int) {
+ for _, item := range items {
+ if len(item) >= screenWidth {
+ return 1, len(items), screenWidth - 1
+ }
+ if len(item) >= maxWidth {
+ maxWidth = len(item) + 1
+ }
+ }
+
+ numColumns = screenWidth / maxWidth
+ numRows = len(items) / numColumns
+ if len(items)%numColumns > 0 {
+ numRows++
+ }
+
+ if len(items) <= numColumns {
+ maxWidth = 0
+ }
+
+ return
+}
+
+func (s *State) printedTabs(items []string) func(tabDirection) (string, error) {
+ numTabs := 1
+ prefix := longestCommonPrefix(items)
+ return func(direction tabDirection) (string, error) {
+ if len(items) == 1 {
+ return items[0], nil
+ }
+
+ if numTabs == 2 {
+ if len(items) > 100 {
+ fmt.Printf("\nDisplay all %d possibilities? (y or n) ", len(items))
+ prompt:
+ for {
+ next, err := s.readNext()
+ if err != nil {
+ return prefix, err
+ }
+
+ if key, ok := next.(rune); ok {
+ switch key {
+ case 'n', 'N':
+ return prefix, nil
+ case 'y', 'Y':
+ break prompt
+ case ctrlC, ctrlD, cr, lf:
+ s.restartPrompt()
+ }
+ }
+ }
+ }
+ fmt.Println("")
+
+ numColumns, numRows, maxWidth := calculateColumns(s.columns, items)
+
+ for i := 0; i < numRows; i++ {
+ for j := 0; j < numColumns*numRows; j += numRows {
+ if i+j < len(items) {
+ if maxWidth > 0 {
+ fmt.Printf("%-*.[1]*s", maxWidth, items[i+j])
+ } else {
+ fmt.Printf("%v ", items[i+j])
+ }
+ }
+ }
+ fmt.Println("")
+ }
+ } else {
+ numTabs++
+ }
+ return prefix, nil
+ }
+}
+
+func (s *State) tabComplete(p []rune, line []rune, pos int) ([]rune, int, interface{}, error) {
+ if s.completer == nil {
+ return line, pos, rune(esc), nil
+ }
+ head, list, tail := s.completer(string(line), pos)
+ if len(list) <= 0 {
+ return line, pos, rune(esc), nil
+ }
+ hl := utf8.RuneCountInString(head)
+ if len(list) == 1 {
+ s.refresh(p, []rune(head+list[0]+tail), hl+utf8.RuneCountInString(list[0]))
+ return []rune(head + list[0] + tail), hl + utf8.RuneCountInString(list[0]), rune(esc), nil
+ }
+
+ direction := tabForward
+ tabPrinter := s.circularTabs(list)
+ if s.tabStyle == TabPrints {
+ tabPrinter = s.printedTabs(list)
+ }
+
+ for {
+ pick, err := tabPrinter(direction)
+ if err != nil {
+ return line, pos, rune(esc), err
+ }
+ s.refresh(p, []rune(head+pick+tail), hl+utf8.RuneCountInString(pick))
+
+ next, err := s.readNext()
+ if err != nil {
+ return line, pos, rune(esc), err
+ }
+ if key, ok := next.(rune); ok {
+ if key == tab {
+ direction = tabForward
+ continue
+ }
+ if key == esc {
+ return line, pos, rune(esc), nil
+ }
+ }
+ if a, ok := next.(action); ok && a == shiftTab {
+ direction = tabReverse
+ continue
+ }
+ return []rune(head + pick + tail), hl + utf8.RuneCountInString(pick), next, nil
+ }
+ // Not reached
+ return line, pos, rune(esc), nil
+}
+
+// reverse intelligent search, implements a bash-like history search.
+func (s *State) reverseISearch(origLine []rune, origPos int) ([]rune, int, interface{}, error) {
+ p := "(reverse-i-search)`': "
+ s.refresh([]rune(p), origLine, origPos)
+
+ line := []rune{}
+ pos := 0
+ foundLine := string(origLine)
+ foundPos := origPos
+
+ getLine := func() ([]rune, []rune, int) {
+ search := string(line)
+ prompt := "(reverse-i-search)`%s': "
+ return []rune(fmt.Sprintf(prompt, search)), []rune(foundLine), foundPos
+ }
+
+ history, positions := s.getHistoryByPattern(string(line))
+ historyPos := len(history) - 1
+
+ for {
+ next, err := s.readNext()
+ if err != nil {
+ return []rune(foundLine), foundPos, rune(esc), err
+ }
+
+ switch v := next.(type) {
+ case rune:
+ switch v {
+ case ctrlR: // Search backwards
+ if historyPos > 0 && historyPos < len(history) {
+ historyPos--
+ foundLine = history[historyPos]
+ foundPos = positions[historyPos]
+ } else {
+ fmt.Print(beep)
+ }
+ case ctrlS: // Search forward
+ if historyPos < len(history)-1 && historyPos >= 0 {
+ historyPos++
+ foundLine = history[historyPos]
+ foundPos = positions[historyPos]
+ } else {
+ fmt.Print(beep)
+ }
+ case ctrlH, bs: // Backspace
+ if pos <= 0 {
+ fmt.Print(beep)
+ } else {
+ n := len(getSuffixGlyphs(line[:pos], 1))
+ line = append(line[:pos-n], line[pos:]...)
+ pos -= n
+
+ // For each char deleted, display the last matching line of history
+ history, positions := s.getHistoryByPattern(string(line))
+ historyPos = len(history) - 1
+ if len(history) > 0 {
+ foundLine = history[historyPos]
+ foundPos = positions[historyPos]
+ } else {
+ foundLine = ""
+ foundPos = 0
+ }
+ }
+ case ctrlG: // Cancel
+ return origLine, origPos, rune(esc), err
+
+ case tab, cr, lf, ctrlA, ctrlB, ctrlD, ctrlE, ctrlF, ctrlK,
+ ctrlL, ctrlN, ctrlO, ctrlP, ctrlQ, ctrlT, ctrlU, ctrlV, ctrlW, ctrlX, ctrlY, ctrlZ:
+ fallthrough
+ case 0, ctrlC, esc, 28, 29, 30, 31:
+ return []rune(foundLine), foundPos, next, err
+ default:
+ line = append(line[:pos], append([]rune{v}, line[pos:]...)...)
+ pos++
+
+ // For each keystroke typed, display the last matching line of history
+ history, positions = s.getHistoryByPattern(string(line))
+ historyPos = len(history) - 1
+ if len(history) > 0 {
+ foundLine = history[historyPos]
+ foundPos = positions[historyPos]
+ } else {
+ foundLine = ""
+ foundPos = 0
+ }
+ }
+ case action:
+ return []rune(foundLine), foundPos, next, err
+ }
+ s.refresh(getLine())
+ }
+}
+
+// addToKillRing adds some text to the kill ring. If mode is 0 it adds it to a
+// new node in the end of the kill ring, and move the current pointer to the new
+// node. If mode is 1 or 2 it appends or prepends the text to the current entry
+// of the killRing.
+func (s *State) addToKillRing(text []rune, mode int) {
+ // Don't use the same underlying array as text
+ killLine := make([]rune, len(text))
+ copy(killLine, text)
+
+ // Point killRing to a newNode, procedure depends on the killring state and
+ // append mode.
+ if mode == 0 { // Add new node to killRing
+ if s.killRing == nil { // if killring is empty, create a new one
+ s.killRing = ring.New(1)
+ } else if s.killRing.Len() >= KillRingMax { // if killring is "full"
+ s.killRing = s.killRing.Next()
+ } else { // Normal case
+ s.killRing.Link(ring.New(1))
+ s.killRing = s.killRing.Next()
+ }
+ } else {
+ if s.killRing == nil { // if killring is empty, create a new one
+ s.killRing = ring.New(1)
+ s.killRing.Value = []rune{}
+ }
+ if mode == 1 { // Append to last entry
+ killLine = append(s.killRing.Value.([]rune), killLine...)
+ } else if mode == 2 { // Prepend to last entry
+ killLine = append(killLine, s.killRing.Value.([]rune)...)
+ }
+ }
+
+ // Save text in the current killring node
+ s.killRing.Value = killLine
+}
+
+func (s *State) yank(p []rune, text []rune, pos int) ([]rune, int, interface{}, error) {
+ if s.killRing == nil {
+ return text, pos, rune(esc), nil
+ }
+
+ lineStart := text[:pos]
+ lineEnd := text[pos:]
+ var line []rune
+
+ for {
+ value := s.killRing.Value.([]rune)
+ line = make([]rune, 0)
+ line = append(line, lineStart...)
+ line = append(line, value...)
+ line = append(line, lineEnd...)
+
+ pos = len(lineStart) + len(value)
+ s.refresh(p, line, pos)
+
+ next, err := s.readNext()
+ if err != nil {
+ return line, pos, next, err
+ }
+
+ switch v := next.(type) {
+ case rune:
+ return line, pos, next, nil
+ case action:
+ switch v {
+ case altY:
+ s.killRing = s.killRing.Prev()
+ default:
+ return line, pos, next, nil
+ }
+ }
+ }
+
+ return line, pos, esc, nil
+}
+
+// Prompt displays p and returns a line of user input, not including a trailing
+// newline character. An io.EOF error is returned if the user signals end-of-file
+// by pressing Ctrl-D. Prompt allows line editing if the terminal supports it.
+func (s *State) Prompt(prompt string) (string, error) {
+ return s.PromptWithSuggestion(prompt, "", 0)
+}
+
+// PromptWithSuggestion displays prompt and an editable text with cursor at
+// given position. The cursor will be set to the end of the line if given position
+// is negative or greater than length of text. Returns a line of user input, not
+// including a trailing newline character. An io.EOF error is returned if the user
+// signals end-of-file by pressing Ctrl-D.
+func (s *State) PromptWithSuggestion(prompt string, text string, pos int) (string, error) {
+ if s.inputRedirected || !s.terminalSupported {
+ return s.promptUnsupported(prompt)
+ }
+ if s.outputRedirected {
+ return "", ErrNotTerminalOutput
+ }
+
+ s.historyMutex.RLock()
+ defer s.historyMutex.RUnlock()
+
+ fmt.Print(prompt)
+ p := []rune(prompt)
+ var line = []rune(text)
+ historyEnd := ""
+ prefixHistory := s.getHistoryByPrefix(string(line))
+ historyPos := len(prefixHistory)
+ historyAction := false // used to mark history related actions
+ killAction := 0 // used to mark kill related actions
+
+ defer s.stopPrompt()
+
+ if pos < 0 || len(text) < pos {
+ pos = len(text)
+ }
+ if len(line) > 0 {
+ s.refresh(p, line, pos)
+ }
+
+restart:
+ s.startPrompt()
+ s.getColumns()
+
+mainLoop:
+ for {
+ next, err := s.readNext()
+ haveNext:
+ if err != nil {
+ if s.shouldRestart != nil && s.shouldRestart(err) {
+ goto restart
+ }
+ return "", err
+ }
+
+ historyAction = false
+ switch v := next.(type) {
+ case rune:
+ switch v {
+ case cr, lf:
+ if s.multiLineMode {
+ s.resetMultiLine(p, line, pos)
+ }
+ fmt.Println()
+ break mainLoop
+ case ctrlA: // Start of line
+ pos = 0
+ s.refresh(p, line, pos)
+ case ctrlE: // End of line
+ pos = len(line)
+ s.refresh(p, line, pos)
+ case ctrlB: // left
+ if pos > 0 {
+ pos -= len(getSuffixGlyphs(line[:pos], 1))
+ s.refresh(p, line, pos)
+ } else {
+ fmt.Print(beep)
+ }
+ case ctrlF: // right
+ if pos < len(line) {
+ pos += len(getPrefixGlyphs(line[pos:], 1))
+ s.refresh(p, line, pos)
+ } else {
+ fmt.Print(beep)
+ }
+ case ctrlD: // del
+ if pos == 0 && len(line) == 0 {
+ // exit
+ return "", io.EOF
+ }
+
+ // ctrlD is a potential EOF, so the rune reader shuts down.
+ // Therefore, if it isn't actually an EOF, we must re-startPrompt.
+ s.restartPrompt()
+
+ if pos >= len(line) {
+ fmt.Print(beep)
+ } else {
+ n := len(getPrefixGlyphs(line[pos:], 1))
+ line = append(line[:pos], line[pos+n:]...)
+ s.refresh(p, line, pos)
+ }
+ case ctrlK: // delete remainder of line
+ if pos >= len(line) {
+ fmt.Print(beep)
+ } else {
+ if killAction > 0 {
+ s.addToKillRing(line[pos:], 1) // Add in apend mode
+ } else {
+ s.addToKillRing(line[pos:], 0) // Add in normal mode
+ }
+
+ killAction = 2 // Mark that there was a kill action
+ line = line[:pos]
+ s.refresh(p, line, pos)
+ }
+ case ctrlP: // up
+ historyAction = true
+ if historyPos > 0 {
+ if historyPos == len(prefixHistory) {
+ historyEnd = string(line)
+ }
+ historyPos--
+ line = []rune(prefixHistory[historyPos])
+ pos = len(line)
+ s.refresh(p, line, pos)
+ } else {
+ fmt.Print(beep)
+ }
+ case ctrlN: // down
+ historyAction = true
+ if historyPos < len(prefixHistory) {
+ historyPos++
+ if historyPos == len(prefixHistory) {
+ line = []rune(historyEnd)
+ } else {
+ line = []rune(prefixHistory[historyPos])
+ }
+ pos = len(line)
+ s.refresh(p, line, pos)
+ } else {
+ fmt.Print(beep)
+ }
+ case ctrlT: // transpose prev glyph with glyph under cursor
+ if len(line) < 2 || pos < 1 {
+ fmt.Print(beep)
+ } else {
+ if pos == len(line) {
+ pos -= len(getSuffixGlyphs(line, 1))
+ }
+ prev := getSuffixGlyphs(line[:pos], 1)
+ next := getPrefixGlyphs(line[pos:], 1)
+ scratch := make([]rune, len(prev))
+ copy(scratch, prev)
+ copy(line[pos-len(prev):], next)
+ copy(line[pos-len(prev)+len(next):], scratch)
+ pos += len(next)
+ s.refresh(p, line, pos)
+ }
+ case ctrlL: // clear screen
+ s.eraseScreen()
+ s.refresh(p, line, pos)
+ case ctrlC: // reset
+ fmt.Println("^C")
+ if s.multiLineMode {
+ s.resetMultiLine(p, line, pos)
+ }
+ if s.ctrlCAborts {
+ return "", ErrPromptAborted
+ }
+ line = line[:0]
+ pos = 0
+ fmt.Print(prompt)
+ s.restartPrompt()
+ case ctrlH, bs: // Backspace
+ if pos <= 0 {
+ fmt.Print(beep)
+ } else {
+ n := len(getSuffixGlyphs(line[:pos], 1))
+ line = append(line[:pos-n], line[pos:]...)
+ pos -= n
+ s.refresh(p, line, pos)
+ }
+ case ctrlU: // Erase line before cursor
+ if killAction > 0 {
+ s.addToKillRing(line[:pos], 2) // Add in prepend mode
+ } else {
+ s.addToKillRing(line[:pos], 0) // Add in normal mode
+ }
+
+ killAction = 2 // Mark that there was some killing
+ line = line[pos:]
+ pos = 0
+ s.refresh(p, line, pos)
+ case ctrlW: // Erase word
+ if pos == 0 {
+ fmt.Print(beep)
+ break
+ }
+ // Remove whitespace to the left
+ var buf []rune // Store the deleted chars in a buffer
+ for {
+ if pos == 0 || !unicode.IsSpace(line[pos-1]) {
+ break
+ }
+ buf = append(buf, line[pos-1])
+ line = append(line[:pos-1], line[pos:]...)
+ pos--
+ }
+ // Remove non-whitespace to the left
+ for {
+ if pos == 0 || unicode.IsSpace(line[pos-1]) {
+ break
+ }
+ buf = append(buf, line[pos-1])
+ line = append(line[:pos-1], line[pos:]...)
+ pos--
+ }
+ // Invert the buffer and save the result on the killRing
+ var newBuf []rune
+ for i := len(buf) - 1; i >= 0; i-- {
+ newBuf = append(newBuf, buf[i])
+ }
+ if killAction > 0 {
+ s.addToKillRing(newBuf, 2) // Add in prepend mode
+ } else {
+ s.addToKillRing(newBuf, 0) // Add in normal mode
+ }
+ killAction = 2 // Mark that there was some killing
+
+ s.refresh(p, line, pos)
+ case ctrlY: // Paste from Yank buffer
+ line, pos, next, err = s.yank(p, line, pos)
+ goto haveNext
+ case ctrlR: // Reverse Search
+ line, pos, next, err = s.reverseISearch(line, pos)
+ s.refresh(p, line, pos)
+ goto haveNext
+ case tab: // Tab completion
+ line, pos, next, err = s.tabComplete(p, line, pos)
+ goto haveNext
+ // Catch keys that do nothing, but you don't want them to beep
+ case esc:
+ // DO NOTHING
+ // Unused keys
+ case ctrlG, ctrlO, ctrlQ, ctrlS, ctrlV, ctrlX, ctrlZ:
+ fallthrough
+ // Catch unhandled control codes (anything <= 31)
+ case 0, 28, 29, 30, 31:
+ fmt.Print(beep)
+ default:
+ if pos == len(line) && !s.multiLineMode && countGlyphs(p)+countGlyphs(line) < s.columns-1 {
+ line = append(line, v)
+ fmt.Printf("%c", v)
+ pos++
+ } else {
+ line = append(line[:pos], append([]rune{v}, line[pos:]...)...)
+ pos++
+ s.refresh(p, line, pos)
+ }
+ }
+ case action:
+ switch v {
+ case del:
+ if pos >= len(line) {
+ fmt.Print(beep)
+ } else {
+ n := len(getPrefixGlyphs(line[pos:], 1))
+ line = append(line[:pos], line[pos+n:]...)
+ }
+ case left:
+ if pos > 0 {
+ pos -= len(getSuffixGlyphs(line[:pos], 1))
+ } else {
+ fmt.Print(beep)
+ }
+ case wordLeft, altB:
+ if pos > 0 {
+ var spaceHere, spaceLeft, leftKnown bool
+ for {
+ pos--
+ if pos == 0 {
+ break
+ }
+ if leftKnown {
+ spaceHere = spaceLeft
+ } else {
+ spaceHere = unicode.IsSpace(line[pos])
+ }
+ spaceLeft, leftKnown = unicode.IsSpace(line[pos-1]), true
+ if !spaceHere && spaceLeft {
+ break
+ }
+ }
+ } else {
+ fmt.Print(beep)
+ }
+ case right:
+ if pos < len(line) {
+ pos += len(getPrefixGlyphs(line[pos:], 1))
+ } else {
+ fmt.Print(beep)
+ }
+ case wordRight, altF:
+ if pos < len(line) {
+ var spaceHere, spaceLeft, hereKnown bool
+ for {
+ pos++
+ if pos == len(line) {
+ break
+ }
+ if hereKnown {
+ spaceLeft = spaceHere
+ } else {
+ spaceLeft = unicode.IsSpace(line[pos-1])
+ }
+ spaceHere, hereKnown = unicode.IsSpace(line[pos]), true
+ if spaceHere && !spaceLeft {
+ break
+ }
+ }
+ } else {
+ fmt.Print(beep)
+ }
+ case up:
+ historyAction = true
+ if historyPos > 0 {
+ if historyPos == len(prefixHistory) {
+ historyEnd = string(line)
+ }
+ historyPos--
+ line = []rune(prefixHistory[historyPos])
+ pos = len(line)
+ } else {
+ fmt.Print(beep)
+ }
+ case down:
+ historyAction = true
+ if historyPos < len(prefixHistory) {
+ historyPos++
+ if historyPos == len(prefixHistory) {
+ line = []rune(historyEnd)
+ } else {
+ line = []rune(prefixHistory[historyPos])
+ }
+ pos = len(line)
+ } else {
+ fmt.Print(beep)
+ }
+ case home: // Start of line
+ pos = 0
+ case end: // End of line
+ pos = len(line)
+ case winch: // Window change
+ if s.multiLineMode {
+ if s.maxRows-s.cursorRows > 0 {
+ s.moveDown(s.maxRows - s.cursorRows)
+ }
+ for i := 0; i < s.maxRows-1; i++ {
+ s.cursorPos(0)
+ s.eraseLine()
+ s.moveUp(1)
+ }
+ s.maxRows = 1
+ s.cursorRows = 1
+ }
+ }
+ s.refresh(p, line, pos)
+ }
+ if !historyAction {
+ prefixHistory = s.getHistoryByPrefix(string(line))
+ historyPos = len(prefixHistory)
+ }
+ if killAction > 0 {
+ killAction--
+ }
+ }
+ return string(line), nil
+}
+
+// PasswordPrompt displays p, and then waits for user input. The input typed by
+// the user is not displayed in the terminal.
+func (s *State) PasswordPrompt(prompt string) (string, error) {
+ if !s.terminalSupported {
+ return "", errors.New("liner: function not supported in this terminal")
+ }
+ if s.inputRedirected {
+ return s.promptUnsupported(prompt)
+ }
+ if s.outputRedirected {
+ return "", ErrNotTerminalOutput
+ }
+
+ defer s.stopPrompt()
+
+restart:
+ s.startPrompt()
+ s.getColumns()
+
+ fmt.Print(prompt)
+ p := []rune(prompt)
+ var line []rune
+ pos := 0
+
+mainLoop:
+ for {
+ next, err := s.readNext()
+ if err != nil {
+ if s.shouldRestart != nil && s.shouldRestart(err) {
+ goto restart
+ }
+ return "", err
+ }
+
+ switch v := next.(type) {
+ case rune:
+ switch v {
+ case cr, lf:
+ if s.multiLineMode {
+ s.resetMultiLine(p, line, pos)
+ }
+ fmt.Println()
+ break mainLoop
+ case ctrlD: // del
+ if pos == 0 && len(line) == 0 {
+ // exit
+ return "", io.EOF
+ }
+
+ // ctrlD is a potential EOF, so the rune reader shuts down.
+ // Therefore, if it isn't actually an EOF, we must re-startPrompt.
+ s.restartPrompt()
+ case ctrlL: // clear screen
+ s.eraseScreen()
+ s.refresh(p, []rune{}, 0)
+ case ctrlH, bs: // Backspace
+ if pos <= 0 {
+ fmt.Print(beep)
+ } else {
+ n := len(getSuffixGlyphs(line[:pos], 1))
+ line = append(line[:pos-n], line[pos:]...)
+ pos -= n
+ }
+ case ctrlC:
+ fmt.Println("^C")
+ if s.multiLineMode {
+ s.resetMultiLine(p, line, pos)
+ }
+ if s.ctrlCAborts {
+ return "", ErrPromptAborted
+ }
+ line = line[:0]
+ pos = 0
+ fmt.Print(prompt)
+ s.restartPrompt()
+ // Unused keys
+ case esc, tab, ctrlA, ctrlB, ctrlE, ctrlF, ctrlG, ctrlK, ctrlN, ctrlO, ctrlP, ctrlQ, ctrlR, ctrlS,
+ ctrlT, ctrlU, ctrlV, ctrlW, ctrlX, ctrlY, ctrlZ:
+ fallthrough
+ // Catch unhandled control codes (anything <= 31)
+ case 0, 28, 29, 30, 31:
+ fmt.Print(beep)
+ default:
+ line = append(line[:pos], append([]rune{v}, line[pos:]...)...)
+ pos++
+ }
+ }
+ }
+ return string(line), nil
+}
diff --git a/vendor/github.com/peterh/liner/output.go b/vendor/github.com/peterh/liner/output.go
new file mode 100644
index 000000000..6d83d4ebf
--- /dev/null
+++ b/vendor/github.com/peterh/liner/output.go
@@ -0,0 +1,79 @@
+// +build linux darwin openbsd freebsd netbsd
+
+package liner
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "syscall"
+ "unsafe"
+)
+
+func (s *State) cursorPos(x int) {
+ if s.useCHA {
+ // 'G' is "Cursor Character Absolute (CHA)"
+ fmt.Printf("\x1b[%dG", x+1)
+ } else {
+ // 'C' is "Cursor Forward (CUF)"
+ fmt.Print("\r")
+ if x > 0 {
+ fmt.Printf("\x1b[%dC", x)
+ }
+ }
+}
+
+func (s *State) eraseLine() {
+ fmt.Print("\x1b[0K")
+}
+
+func (s *State) eraseScreen() {
+ fmt.Print("\x1b[H\x1b[2J")
+}
+
+func (s *State) moveUp(lines int) {
+ fmt.Printf("\x1b[%dA", lines)
+}
+
+func (s *State) moveDown(lines int) {
+ fmt.Printf("\x1b[%dB", lines)
+}
+
+func (s *State) emitNewLine() {
+ fmt.Print("\n")
+}
+
+type winSize struct {
+ row, col uint16
+ xpixel, ypixel uint16
+}
+
+func (s *State) getColumns() bool {
+ var ws winSize
+ ok, _, _ := syscall.Syscall(syscall.SYS_IOCTL, uintptr(syscall.Stdout),
+ syscall.TIOCGWINSZ, uintptr(unsafe.Pointer(&ws)))
+ if int(ok) < 0 {
+ return false
+ }
+ s.columns = int(ws.col)
+ if cursorColumn && s.columns > 1 {
+ s.columns--
+ }
+ return true
+}
+
+func (s *State) checkOutput() {
+ // xterm is known to support CHA
+ if strings.Contains(strings.ToLower(os.Getenv("TERM")), "xterm") {
+ s.useCHA = true
+ return
+ }
+
+ // The test for functional ANSI CHA is unreliable (eg the Windows
+ // telnet command does not support reading the cursor position with
+ // an ANSI DSR request, despite setting TERM=ansi)
+
+ // Assume CHA isn't supported (which should be safe, although it
+ // does result in occasional visible cursor jitter)
+ s.useCHA = false
+}
diff --git a/vendor/github.com/peterh/liner/output_windows.go b/vendor/github.com/peterh/liner/output_windows.go
new file mode 100644
index 000000000..63c9c5d75
--- /dev/null
+++ b/vendor/github.com/peterh/liner/output_windows.go
@@ -0,0 +1,76 @@
+package liner
+
+import (
+ "unsafe"
+)
+
+type coord struct {
+ x, y int16
+}
+type smallRect struct {
+ left, top, right, bottom int16
+}
+
+type consoleScreenBufferInfo struct {
+ dwSize coord
+ dwCursorPosition coord
+ wAttributes int16
+ srWindow smallRect
+ dwMaximumWindowSize coord
+}
+
+func (s *State) cursorPos(x int) {
+ var sbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(s.hOut), uintptr(unsafe.Pointer(&sbi)))
+ procSetConsoleCursorPosition.Call(uintptr(s.hOut),
+ uintptr(int(x)&0xFFFF|int(sbi.dwCursorPosition.y)<<16))
+}
+
+func (s *State) eraseLine() {
+ var sbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(s.hOut), uintptr(unsafe.Pointer(&sbi)))
+ var numWritten uint32
+ procFillConsoleOutputCharacter.Call(uintptr(s.hOut), uintptr(' '),
+ uintptr(sbi.dwSize.x-sbi.dwCursorPosition.x),
+ uintptr(int(sbi.dwCursorPosition.x)&0xFFFF|int(sbi.dwCursorPosition.y)<<16),
+ uintptr(unsafe.Pointer(&numWritten)))
+}
+
+func (s *State) eraseScreen() {
+ var sbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(s.hOut), uintptr(unsafe.Pointer(&sbi)))
+ var numWritten uint32
+ procFillConsoleOutputCharacter.Call(uintptr(s.hOut), uintptr(' '),
+ uintptr(sbi.dwSize.x)*uintptr(sbi.dwSize.y),
+ 0,
+ uintptr(unsafe.Pointer(&numWritten)))
+ procSetConsoleCursorPosition.Call(uintptr(s.hOut), 0)
+}
+
+func (s *State) moveUp(lines int) {
+ var sbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(s.hOut), uintptr(unsafe.Pointer(&sbi)))
+ procSetConsoleCursorPosition.Call(uintptr(s.hOut),
+ uintptr(int(sbi.dwCursorPosition.x)&0xFFFF|(int(sbi.dwCursorPosition.y)-lines)<<16))
+}
+
+func (s *State) moveDown(lines int) {
+ var sbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(s.hOut), uintptr(unsafe.Pointer(&sbi)))
+ procSetConsoleCursorPosition.Call(uintptr(s.hOut),
+ uintptr(int(sbi.dwCursorPosition.x)&0xFFFF|(int(sbi.dwCursorPosition.y)+lines)<<16))
+}
+
+func (s *State) emitNewLine() {
+ // windows doesn't need to omit a new line
+}
+
+func (s *State) getColumns() {
+ var sbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(s.hOut), uintptr(unsafe.Pointer(&sbi)))
+ s.columns = int(sbi.dwSize.x)
+ if s.columns > 1 {
+ // Windows 10 needs a spare column for the cursor
+ s.columns--
+ }
+}
diff --git a/vendor/github.com/peterh/liner/signal.go b/vendor/github.com/peterh/liner/signal.go
new file mode 100644
index 000000000..0cba79e7f
--- /dev/null
+++ b/vendor/github.com/peterh/liner/signal.go
@@ -0,0 +1,12 @@
+// +build go1.1,!windows
+
+package liner
+
+import (
+ "os"
+ "os/signal"
+)
+
+func stopSignal(c chan<- os.Signal) {
+ signal.Stop(c)
+}
diff --git a/vendor/github.com/peterh/liner/signal_legacy.go b/vendor/github.com/peterh/liner/signal_legacy.go
new file mode 100644
index 000000000..fa3672daa
--- /dev/null
+++ b/vendor/github.com/peterh/liner/signal_legacy.go
@@ -0,0 +1,11 @@
+// +build !go1.1,!windows
+
+package liner
+
+import (
+ "os"
+)
+
+func stopSignal(c chan<- os.Signal) {
+ // signal.Stop does not exist before Go 1.1
+}
diff --git a/vendor/github.com/peterh/liner/unixmode.go b/vendor/github.com/peterh/liner/unixmode.go
new file mode 100644
index 000000000..9838923f5
--- /dev/null
+++ b/vendor/github.com/peterh/liner/unixmode.go
@@ -0,0 +1,37 @@
+// +build linux darwin freebsd openbsd netbsd
+
+package liner
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func (mode *termios) ApplyMode() error {
+ _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(syscall.Stdin), setTermios, uintptr(unsafe.Pointer(mode)))
+
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
+
+// TerminalMode returns the current terminal input mode as an InputModeSetter.
+//
+// This function is provided for convenience, and should
+// not be necessary for most users of liner.
+func TerminalMode() (ModeApplier, error) {
+ mode, errno := getMode(syscall.Stdin)
+
+ if errno != 0 {
+ return nil, errno
+ }
+ return mode, nil
+}
+
+func getMode(handle int) (*termios, syscall.Errno) {
+ var mode termios
+ _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(handle), getTermios, uintptr(unsafe.Pointer(&mode)))
+
+ return &mode, errno
+}
diff --git a/vendor/github.com/peterh/liner/width.go b/vendor/github.com/peterh/liner/width.go
new file mode 100644
index 000000000..d8984aae9
--- /dev/null
+++ b/vendor/github.com/peterh/liner/width.go
@@ -0,0 +1,79 @@
+package liner
+
+import "unicode"
+
+// These character classes are mostly zero width (when combined).
+// A few might not be, depending on the user's font. Fixing this
+// is non-trivial, given that some terminals don't support
+// ANSI DSR/CPR
+var zeroWidth = []*unicode.RangeTable{
+ unicode.Mn,
+ unicode.Me,
+ unicode.Cc,
+ unicode.Cf,
+}
+
+var doubleWidth = []*unicode.RangeTable{
+ unicode.Han,
+ unicode.Hangul,
+ unicode.Hiragana,
+ unicode.Katakana,
+}
+
+// countGlyphs considers zero-width characters to be zero glyphs wide,
+// and members of Chinese, Japanese, and Korean scripts to be 2 glyphs wide.
+func countGlyphs(s []rune) int {
+ n := 0
+ for _, r := range s {
+ switch {
+ case unicode.IsOneOf(zeroWidth, r):
+ case unicode.IsOneOf(doubleWidth, r):
+ n += 2
+ default:
+ n++
+ }
+ }
+ return n
+}
+
+func countMultiLineGlyphs(s []rune, columns int, start int) int {
+ n := start
+ for _, r := range s {
+ switch {
+ case unicode.IsOneOf(zeroWidth, r):
+ case unicode.IsOneOf(doubleWidth, r):
+ n += 2
+ // no room for a 2-glyphs-wide char in the ending
+ // so skip a column and display it at the beginning
+ if n%columns == 1 {
+ n++
+ }
+ default:
+ n++
+ }
+ }
+ return n
+}
+
+func getPrefixGlyphs(s []rune, num int) []rune {
+ p := 0
+ for n := 0; n < num && p < len(s); p++ {
+ if !unicode.IsOneOf(zeroWidth, s[p]) {
+ n++
+ }
+ }
+ for p < len(s) && unicode.IsOneOf(zeroWidth, s[p]) {
+ p++
+ }
+ return s[:p]
+}
+
+func getSuffixGlyphs(s []rune, num int) []rune {
+ p := len(s)
+ for n := 0; n < num && p > 0; p-- {
+ if !unicode.IsOneOf(zeroWidth, s[p-1]) {
+ n++
+ }
+ }
+ return s[p:]
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/.gitignore b/vendor/github.com/rcrowley/go-metrics/.gitignore
new file mode 100644
index 000000000..83c8f8237
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/.gitignore
@@ -0,0 +1,9 @@
+*.[68]
+*.a
+*.out
+*.swp
+_obj
+_testmain.go
+cmd/metrics-bench/metrics-bench
+cmd/metrics-example/metrics-example
+cmd/never-read/never-read
diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml
new file mode 100644
index 000000000..20aa5d042
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.2
+ - 1.3
+ - 1.4
+ - 1.5
+
+script:
+ - ./validate.sh
+
+# this should give us faster builds according to
+# http://docs.travis-ci.com/user/migrating-from-legacy/
+sudo: false
diff --git a/vendor/github.com/rcrowley/go-metrics/LICENSE b/vendor/github.com/rcrowley/go-metrics/LICENSE
new file mode 100644
index 000000000..363fa9ee7
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/LICENSE
@@ -0,0 +1,29 @@
+Copyright 2012 Richard Crowley. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation
+are those of the authors and should not be interpreted as representing
+official policies, either expressed or implied, of Richard Crowley.
diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md
new file mode 100644
index 000000000..2d1a6dcfa
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/README.md
@@ -0,0 +1,153 @@
+go-metrics
+==========
+
+![travis build status](https://travis-ci.org/rcrowley/go-metrics.svg?branch=master)
+
+Go port of Coda Hale's Metrics library: <https://github.com/dropwizard/metrics>.
+
+Documentation: <http://godoc.org/github.com/rcrowley/go-metrics>.
+
+Usage
+-----
+
+Create and update metrics:
+
+```go
+c := metrics.NewCounter()
+metrics.Register("foo", c)
+c.Inc(47)
+
+g := metrics.NewGauge()
+metrics.Register("bar", g)
+g.Update(47)
+
+r := NewRegistry()
+g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() })
+
+s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028)
+h := metrics.NewHistogram(s)
+metrics.Register("baz", h)
+h.Update(47)
+
+m := metrics.NewMeter()
+metrics.Register("quux", m)
+m.Mark(47)
+
+t := metrics.NewTimer()
+metrics.Register("bang", t)
+t.Time(func() {})
+t.Update(47)
+```
+
+Register() is not threadsafe. For threadsafe metric registration use
+GetOrRegister:
+
+```
+t := metrics.GetOrRegisterTimer("account.create.latency", nil)
+t.Time(func() {})
+t.Update(47)
+```
+
+Periodically log every metric in human-readable form to standard error:
+
+```go
+go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
+```
+
+Periodically log every metric in slightly-more-parseable form to syslog:
+
+```go
+w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
+go metrics.Syslog(metrics.DefaultRegistry, 60e9, w)
+```
+
+Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite):
+
+```go
+
+import "github.com/cyberdelia/go-metrics-graphite"
+
+addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
+go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr)
+```
+
+Periodically emit every metric into InfluxDB:
+
+**NOTE:** this has been pulled out of the library due to constant fluctuations
+in the InfluxDB API. In fact, all client libraries are on their way out. see
+issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and
+[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details.
+
+```go
+import "github.com/vrischmann/go-metrics-influxdb"
+
+go influxdb.Influxdb(metrics.DefaultRegistry, 10e9, &influxdb.Config{
+ Host: "127.0.0.1:8086",
+ Database: "metrics",
+ Username: "test",
+ Password: "test",
+})
+```
+
+Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato):
+
+**Note**: the client included with this repository under the `librato` package
+has been deprecated and moved to the repository linked above.
+
+```go
+import "github.com/mihasya/go-metrics-librato"
+
+go librato.Librato(metrics.DefaultRegistry,
+ 10e9, // interval
+ "example@example.com", // account owner email address
+ "token", // Librato API token
+ "hostname", // source
+ []float64{0.95}, // percentiles to send
+ time.Millisecond, // time unit
+)
+```
+
+Periodically emit every metric to StatHat:
+
+```go
+import "github.com/rcrowley/go-metrics/stathat"
+
+go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com")
+```
+
+Maintain all metrics along with expvars at `/debug/metrics`:
+
+This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/)
+but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars
+as well as all your go-metrics.
+
+
+```go
+import "github.com/rcrowley/go-metrics/exp"
+
+exp.Exp(metrics.DefaultRegistry)
+```
+
+Installation
+------------
+
+```sh
+go get github.com/rcrowley/go-metrics
+```
+
+StatHat support additionally requires their Go client:
+
+```sh
+go get github.com/stathat/go
+```
+
+Publishing Metrics
+------------------
+
+Clients are available for the following destinations:
+
+* Librato - [https://github.com/mihasya/go-metrics-librato](https://github.com/mihasya/go-metrics-librato)
+* Graphite - [https://github.com/cyberdelia/go-metrics-graphite](https://github.com/cyberdelia/go-metrics-graphite)
+* InfluxDB - [https://github.com/vrischmann/go-metrics-influxdb](https://github.com/vrischmann/go-metrics-influxdb)
+* Ganglia - [https://github.com/appscode/metlia](https://github.com/appscode/metlia)
+* Prometheus - [https://github.com/deathowl/go-metrics-prometheus](https://github.com/deathowl/go-metrics-prometheus)
diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/vendor/github.com/rcrowley/go-metrics/counter.go
new file mode 100644
index 000000000..bb7b039cb
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/counter.go
@@ -0,0 +1,112 @@
+package metrics
+
+import "sync/atomic"
+
+// Counters hold an int64 value that can be incremented and decremented.
+type Counter interface {
+ Clear()
+ Count() int64
+ Dec(int64)
+ Inc(int64)
+ Snapshot() Counter
+}
+
+// GetOrRegisterCounter returns an existing Counter or constructs and registers
+// a new StandardCounter.
+func GetOrRegisterCounter(name string, r Registry) Counter {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewCounter).(Counter)
+}
+
+// NewCounter constructs a new StandardCounter.
+func NewCounter() Counter {
+ if UseNilMetrics {
+ return NilCounter{}
+ }
+ return &StandardCounter{0}
+}
+
+// NewRegisteredCounter constructs and registers a new StandardCounter.
+func NewRegisteredCounter(name string, r Registry) Counter {
+ c := NewCounter()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// CounterSnapshot is a read-only copy of another Counter.
+type CounterSnapshot int64
+
+// Clear panics.
+func (CounterSnapshot) Clear() {
+ panic("Clear called on a CounterSnapshot")
+}
+
+// Count returns the count at the time the snapshot was taken.
+func (c CounterSnapshot) Count() int64 { return int64(c) }
+
+// Dec panics.
+func (CounterSnapshot) Dec(int64) {
+ panic("Dec called on a CounterSnapshot")
+}
+
+// Inc panics.
+func (CounterSnapshot) Inc(int64) {
+ panic("Inc called on a CounterSnapshot")
+}
+
+// Snapshot returns the snapshot.
+func (c CounterSnapshot) Snapshot() Counter { return c }
+
+// NilCounter is a no-op Counter.
+type NilCounter struct{}
+
+// Clear is a no-op.
+func (NilCounter) Clear() {}
+
+// Count is a no-op.
+func (NilCounter) Count() int64 { return 0 }
+
+// Dec is a no-op.
+func (NilCounter) Dec(i int64) {}
+
+// Inc is a no-op.
+func (NilCounter) Inc(i int64) {}
+
+// Snapshot is a no-op.
+func (NilCounter) Snapshot() Counter { return NilCounter{} }
+
+// StandardCounter is the standard implementation of a Counter and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardCounter struct {
+ count int64
+}
+
+// Clear sets the counter to zero.
+func (c *StandardCounter) Clear() {
+ atomic.StoreInt64(&c.count, 0)
+}
+
+// Count returns the current count.
+func (c *StandardCounter) Count() int64 {
+ return atomic.LoadInt64(&c.count)
+}
+
+// Dec decrements the counter by the given amount.
+func (c *StandardCounter) Dec(i int64) {
+ atomic.AddInt64(&c.count, -i)
+}
+
+// Inc increments the counter by the given amount.
+func (c *StandardCounter) Inc(i int64) {
+ atomic.AddInt64(&c.count, i)
+}
+
+// Snapshot returns a read-only copy of the counter.
+func (c *StandardCounter) Snapshot() Counter {
+ return CounterSnapshot(c.Count())
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go
new file mode 100644
index 000000000..043ccefab
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/debug.go
@@ -0,0 +1,76 @@
+package metrics
+
+import (
+ "runtime/debug"
+ "time"
+)
+
+var (
+ debugMetrics struct {
+ GCStats struct {
+ LastGC Gauge
+ NumGC Gauge
+ Pause Histogram
+ //PauseQuantiles Histogram
+ PauseTotal Gauge
+ }
+ ReadGCStats Timer
+ }
+ gcStats debug.GCStats
+)
+
+// Capture new values for the Go garbage collector statistics exported in
+// debug.GCStats. This is designed to be called as a goroutine.
+func CaptureDebugGCStats(r Registry, d time.Duration) {
+ for _ = range time.Tick(d) {
+ CaptureDebugGCStatsOnce(r)
+ }
+}
+
+// Capture new values for the Go garbage collector statistics exported in
+// debug.GCStats. This is designed to be called in a background goroutine.
+// Giving a registry which has not been given to RegisterDebugGCStats will
+// panic.
+//
+// Be careful (but much less so) with this because debug.ReadGCStats calls
+// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world
+// operation, isn't something you want to be doing all the time.
+func CaptureDebugGCStatsOnce(r Registry) {
+ lastGC := gcStats.LastGC
+ t := time.Now()
+ debug.ReadGCStats(&gcStats)
+ debugMetrics.ReadGCStats.UpdateSince(t)
+
+ debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano()))
+ debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC))
+ if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
+ debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0]))
+ }
+ //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles)
+ debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal))
+}
+
+// Register metrics for the Go garbage collector statistics exported in
+// debug.GCStats. The metrics are named by their fully-qualified Go symbols,
+// i.e. debug.GCStats.PauseTotal.
+func RegisterDebugGCStats(r Registry) {
+ debugMetrics.GCStats.LastGC = NewGauge()
+ debugMetrics.GCStats.NumGC = NewGauge()
+ debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015))
+ //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015))
+ debugMetrics.GCStats.PauseTotal = NewGauge()
+ debugMetrics.ReadGCStats = NewTimer()
+
+ r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC)
+ r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC)
+ r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause)
+ //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles)
+ r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal)
+ r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats)
+}
+
+// Allocate an initial slice for gcStats.Pause to avoid allocations during
+// normal operation.
+func init() {
+ gcStats.Pause = make([]time.Duration, 11)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go
new file mode 100644
index 000000000..694a1d033
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/ewma.go
@@ -0,0 +1,118 @@
+package metrics
+
+import (
+ "math"
+ "sync"
+ "sync/atomic"
+)
+
+// EWMAs continuously calculate an exponentially-weighted moving average
+// based on an outside source of clock ticks.
+type EWMA interface {
+ Rate() float64
+ Snapshot() EWMA
+ Tick()
+ Update(int64)
+}
+
+// NewEWMA constructs a new EWMA with the given alpha.
+func NewEWMA(alpha float64) EWMA {
+ if UseNilMetrics {
+ return NilEWMA{}
+ }
+ return &StandardEWMA{alpha: alpha}
+}
+
+// NewEWMA1 constructs a new EWMA for a one-minute moving average.
+func NewEWMA1() EWMA {
+ return NewEWMA(1 - math.Exp(-5.0/60.0/1))
+}
+
+// NewEWMA5 constructs a new EWMA for a five-minute moving average.
+func NewEWMA5() EWMA {
+ return NewEWMA(1 - math.Exp(-5.0/60.0/5))
+}
+
+// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
+func NewEWMA15() EWMA {
+ return NewEWMA(1 - math.Exp(-5.0/60.0/15))
+}
+
+// EWMASnapshot is a read-only copy of another EWMA.
+type EWMASnapshot float64
+
+// Rate returns the rate of events per second at the time the snapshot was
+// taken.
+func (a EWMASnapshot) Rate() float64 { return float64(a) }
+
+// Snapshot returns the snapshot.
+func (a EWMASnapshot) Snapshot() EWMA { return a }
+
+// Tick panics.
+func (EWMASnapshot) Tick() {
+ panic("Tick called on an EWMASnapshot")
+}
+
+// Update panics.
+func (EWMASnapshot) Update(int64) {
+ panic("Update called on an EWMASnapshot")
+}
+
+// NilEWMA is a no-op EWMA.
+type NilEWMA struct{}
+
+// Rate is a no-op.
+func (NilEWMA) Rate() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
+
+// Tick is a no-op.
+func (NilEWMA) Tick() {}
+
+// Update is a no-op.
+func (NilEWMA) Update(n int64) {}
+
+// StandardEWMA is the standard implementation of an EWMA and tracks the number
+// of uncounted events and processes them on each tick. It uses the
+// sync/atomic package to manage uncounted events.
+type StandardEWMA struct {
+ uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
+ alpha float64
+ rate float64
+ init bool
+ mutex sync.Mutex
+}
+
+// Rate returns the moving average rate of events per second.
+func (a *StandardEWMA) Rate() float64 {
+ a.mutex.Lock()
+ defer a.mutex.Unlock()
+ return a.rate * float64(1e9)
+}
+
+// Snapshot returns a read-only copy of the EWMA.
+func (a *StandardEWMA) Snapshot() EWMA {
+ return EWMASnapshot(a.Rate())
+}
+
+// Tick ticks the clock to update the moving average. It assumes it is called
+// every five seconds.
+func (a *StandardEWMA) Tick() {
+ count := atomic.LoadInt64(&a.uncounted)
+ atomic.AddInt64(&a.uncounted, -count)
+ instantRate := float64(count) / float64(5e9)
+ a.mutex.Lock()
+ defer a.mutex.Unlock()
+ if a.init {
+ a.rate += a.alpha * (instantRate - a.rate)
+ } else {
+ a.init = true
+ a.rate = instantRate
+ }
+}
+
+// Update adds n uncounted events.
+func (a *StandardEWMA) Update(n int64) {
+ atomic.AddInt64(&a.uncounted, n)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/exp/exp.go b/vendor/github.com/rcrowley/go-metrics/exp/exp.go
new file mode 100644
index 000000000..11dd3f898
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/exp/exp.go
@@ -0,0 +1,156 @@
+// Hook go-metrics into expvar
+// on any /debug/metrics request, load all vars from the registry into expvar, and execute regular expvar handler
+package exp
+
+import (
+ "expvar"
+ "fmt"
+ "net/http"
+ "sync"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+type exp struct {
+ expvarLock sync.Mutex // expvar panics if you try to register the same var twice, so we must probe it safely
+ registry metrics.Registry
+}
+
+func (exp *exp) expHandler(w http.ResponseWriter, r *http.Request) {
+ // load our variables into expvar
+ exp.syncToExpvar()
+
+ // now just run the official expvar handler code (which is not publicly callable, so pasted inline)
+ w.Header().Set("Content-Type", "application/json; charset=utf-8")
+ fmt.Fprintf(w, "{\n")
+ first := true
+ expvar.Do(func(kv expvar.KeyValue) {
+ if !first {
+ fmt.Fprintf(w, ",\n")
+ }
+ first = false
+ fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
+ })
+ fmt.Fprintf(w, "\n}\n")
+}
+
+// Exp will register an expvar powered metrics handler with http.DefaultServeMux on "/debug/vars"
+func Exp(r metrics.Registry) {
+ h := ExpHandler(r)
+ // this would cause a panic:
+ // panic: http: multiple registrations for /debug/vars
+ // http.HandleFunc("/debug/vars", e.expHandler)
+ // haven't found an elegant way, so just use a different endpoint
+ http.Handle("/debug/metrics", h)
+}
+
+// ExpHandler will return an expvar powered metrics handler.
+func ExpHandler(r metrics.Registry) http.Handler {
+ e := exp{sync.Mutex{}, r}
+ return http.HandlerFunc(e.expHandler)
+}
+
+func (exp *exp) getInt(name string) *expvar.Int {
+ var v *expvar.Int
+ exp.expvarLock.Lock()
+ p := expvar.Get(name)
+ if p != nil {
+ v = p.(*expvar.Int)
+ } else {
+ v = new(expvar.Int)
+ expvar.Publish(name, v)
+ }
+ exp.expvarLock.Unlock()
+ return v
+}
+
+func (exp *exp) getFloat(name string) *expvar.Float {
+ var v *expvar.Float
+ exp.expvarLock.Lock()
+ p := expvar.Get(name)
+ if p != nil {
+ v = p.(*expvar.Float)
+ } else {
+ v = new(expvar.Float)
+ expvar.Publish(name, v)
+ }
+ exp.expvarLock.Unlock()
+ return v
+}
+
+func (exp *exp) publishCounter(name string, metric metrics.Counter) {
+ v := exp.getInt(name)
+ v.Set(metric.Count())
+}
+
+func (exp *exp) publishGauge(name string, metric metrics.Gauge) {
+ v := exp.getInt(name)
+ v.Set(metric.Value())
+}
+func (exp *exp) publishGaugeFloat64(name string, metric metrics.GaugeFloat64) {
+ exp.getFloat(name).Set(metric.Value())
+}
+
+func (exp *exp) publishHistogram(name string, metric metrics.Histogram) {
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ exp.getInt(name + ".count").Set(h.Count())
+ exp.getFloat(name + ".min").Set(float64(h.Min()))
+ exp.getFloat(name + ".max").Set(float64(h.Max()))
+ exp.getFloat(name + ".mean").Set(float64(h.Mean()))
+ exp.getFloat(name + ".std-dev").Set(float64(h.StdDev()))
+ exp.getFloat(name + ".50-percentile").Set(float64(ps[0]))
+ exp.getFloat(name + ".75-percentile").Set(float64(ps[1]))
+ exp.getFloat(name + ".95-percentile").Set(float64(ps[2]))
+ exp.getFloat(name + ".99-percentile").Set(float64(ps[3]))
+ exp.getFloat(name + ".999-percentile").Set(float64(ps[4]))
+}
+
+func (exp *exp) publishMeter(name string, metric metrics.Meter) {
+ m := metric.Snapshot()
+ exp.getInt(name + ".count").Set(m.Count())
+ exp.getFloat(name + ".one-minute").Set(float64(m.Rate1()))
+ exp.getFloat(name + ".five-minute").Set(float64(m.Rate5()))
+ exp.getFloat(name + ".fifteen-minute").Set(float64((m.Rate15())))
+ exp.getFloat(name + ".mean").Set(float64(m.RateMean()))
+}
+
+func (exp *exp) publishTimer(name string, metric metrics.Timer) {
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ exp.getInt(name + ".count").Set(t.Count())
+ exp.getFloat(name + ".min").Set(float64(t.Min()))
+ exp.getFloat(name + ".max").Set(float64(t.Max()))
+ exp.getFloat(name + ".mean").Set(float64(t.Mean()))
+ exp.getFloat(name + ".std-dev").Set(float64(t.StdDev()))
+ exp.getFloat(name + ".50-percentile").Set(float64(ps[0]))
+ exp.getFloat(name + ".75-percentile").Set(float64(ps[1]))
+ exp.getFloat(name + ".95-percentile").Set(float64(ps[2]))
+ exp.getFloat(name + ".99-percentile").Set(float64(ps[3]))
+ exp.getFloat(name + ".999-percentile").Set(float64(ps[4]))
+ exp.getFloat(name + ".one-minute").Set(float64(t.Rate1()))
+ exp.getFloat(name + ".five-minute").Set(float64(t.Rate5()))
+ exp.getFloat(name + ".fifteen-minute").Set(float64((t.Rate15())))
+ exp.getFloat(name + ".mean-rate").Set(float64(t.RateMean()))
+}
+
+func (exp *exp) syncToExpvar() {
+ exp.registry.Each(func(name string, i interface{}) {
+ switch i.(type) {
+ case metrics.Counter:
+ exp.publishCounter(name, i.(metrics.Counter))
+ case metrics.Gauge:
+ exp.publishGauge(name, i.(metrics.Gauge))
+ case metrics.GaugeFloat64:
+ exp.publishGaugeFloat64(name, i.(metrics.GaugeFloat64))
+ case metrics.Histogram:
+ exp.publishHistogram(name, i.(metrics.Histogram))
+ case metrics.Meter:
+ exp.publishMeter(name, i.(metrics.Meter))
+ case metrics.Timer:
+ exp.publishTimer(name, i.(metrics.Timer))
+ default:
+ panic(fmt.Sprintf("unsupported type for '%s': %T", name, i))
+ }
+ })
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go
new file mode 100644
index 000000000..d618c4553
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/gauge.go
@@ -0,0 +1,120 @@
+package metrics
+
+import "sync/atomic"
+
+// Gauges hold an int64 value that can be set arbitrarily.
+type Gauge interface {
+ Snapshot() Gauge
+ Update(int64)
+ Value() int64
+}
+
+// GetOrRegisterGauge returns an existing Gauge or constructs and registers a
+// new StandardGauge.
+func GetOrRegisterGauge(name string, r Registry) Gauge {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewGauge).(Gauge)
+}
+
+// NewGauge constructs a new StandardGauge.
+func NewGauge() Gauge {
+ if UseNilMetrics {
+ return NilGauge{}
+ }
+ return &StandardGauge{0}
+}
+
+// NewRegisteredGauge constructs and registers a new StandardGauge.
+func NewRegisteredGauge(name string, r Registry) Gauge {
+ c := NewGauge()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// NewFunctionalGauge constructs a new FunctionalGauge.
+func NewFunctionalGauge(f func() int64) Gauge {
+ if UseNilMetrics {
+ return NilGauge{}
+ }
+ return &FunctionalGauge{value: f}
+}
+
+
+// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
+func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge {
+ c := NewFunctionalGauge(f)
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// GaugeSnapshot is a read-only copy of another Gauge.
+type GaugeSnapshot int64
+
+// Snapshot returns the snapshot.
+func (g GaugeSnapshot) Snapshot() Gauge { return g }
+
+// Update panics.
+func (GaugeSnapshot) Update(int64) {
+ panic("Update called on a GaugeSnapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeSnapshot) Value() int64 { return int64(g) }
+
+// NilGauge is a no-op Gauge.
+type NilGauge struct{}
+
+// Snapshot is a no-op.
+func (NilGauge) Snapshot() Gauge { return NilGauge{} }
+
+// Update is a no-op.
+func (NilGauge) Update(v int64) {}
+
+// Value is a no-op.
+func (NilGauge) Value() int64 { return 0 }
+
+// StandardGauge is the standard implementation of a Gauge and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardGauge struct {
+ value int64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGauge) Snapshot() Gauge {
+ return GaugeSnapshot(g.Value())
+}
+
+// Update updates the gauge's value.
+func (g *StandardGauge) Update(v int64) {
+ atomic.StoreInt64(&g.value, v)
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGauge) Value() int64 {
+ return atomic.LoadInt64(&g.value)
+}
+// FunctionalGauge returns value from given function
+type FunctionalGauge struct {
+ value func() int64
+}
+
+// Value returns the gauge's current value.
+func (g FunctionalGauge) Value() int64 {
+ return g.value()
+}
+
+// Snapshot returns the snapshot.
+func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) }
+
+// Update panics.
+func (FunctionalGauge) Update(int64) {
+ panic("Update called on a FunctionalGauge")
+} \ No newline at end of file
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
new file mode 100644
index 000000000..6f93920b2
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
@@ -0,0 +1,127 @@
+package metrics
+
+import "sync"
+
+// GaugeFloat64s hold a float64 value that can be set arbitrarily.
+type GaugeFloat64 interface {
+ Snapshot() GaugeFloat64
+ Update(float64)
+ Value() float64
+}
+
+// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
+// new StandardGaugeFloat64.
+func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64)
+}
+
+// NewGaugeFloat64 constructs a new StandardGaugeFloat64.
+func NewGaugeFloat64() GaugeFloat64 {
+ if UseNilMetrics {
+ return NilGaugeFloat64{}
+ }
+ return &StandardGaugeFloat64{
+ value: 0.0,
+ }
+}
+
+// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64.
+func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
+ c := NewGaugeFloat64()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// NewFunctionalGauge constructs a new FunctionalGauge.
+func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 {
+ if UseNilMetrics {
+ return NilGaugeFloat64{}
+ }
+ return &FunctionalGaugeFloat64{value: f}
+}
+
+// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
+func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 {
+ c := NewFunctionalGaugeFloat64(f)
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
+type GaugeFloat64Snapshot float64
+
+// Snapshot returns the snapshot.
+func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g }
+
+// Update panics.
+func (GaugeFloat64Snapshot) Update(float64) {
+ panic("Update called on a GaugeFloat64Snapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
+
+// NilGauge is a no-op Gauge.
+type NilGaugeFloat64 struct{}
+
+// Snapshot is a no-op.
+func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} }
+
+// Update is a no-op.
+func (NilGaugeFloat64) Update(v float64) {}
+
+// Value is a no-op.
+func (NilGaugeFloat64) Value() float64 { return 0.0 }
+
+// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
+// sync.Mutex to manage a single float64 value.
+type StandardGaugeFloat64 struct {
+ mutex sync.Mutex
+ value float64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
+ return GaugeFloat64Snapshot(g.Value())
+}
+
+// Update updates the gauge's value.
+func (g *StandardGaugeFloat64) Update(v float64) {
+ g.mutex.Lock()
+ defer g.mutex.Unlock()
+ g.value = v
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGaugeFloat64) Value() float64 {
+ g.mutex.Lock()
+ defer g.mutex.Unlock()
+ return g.value
+}
+
+// FunctionalGaugeFloat64 returns value from given function
+type FunctionalGaugeFloat64 struct {
+ value func() float64
+}
+
+// Value returns the gauge's current value.
+func (g FunctionalGaugeFloat64) Value() float64 {
+ return g.value()
+}
+
+// Snapshot returns the snapshot.
+func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) }
+
+// Update panics.
+func (FunctionalGaugeFloat64) Update(float64) {
+ panic("Update called on a FunctionalGaugeFloat64")
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/vendor/github.com/rcrowley/go-metrics/graphite.go
new file mode 100644
index 000000000..abd0a7d29
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/graphite.go
@@ -0,0 +1,113 @@
+package metrics
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// GraphiteConfig provides a container with configuration parameters for
+// the Graphite exporter
+type GraphiteConfig struct {
+ Addr *net.TCPAddr // Network address to connect to
+ Registry Registry // Registry to be exported
+ FlushInterval time.Duration // Flush interval
+ DurationUnit time.Duration // Time conversion unit for durations
+ Prefix string // Prefix to be prepended to metric names
+ Percentiles []float64 // Percentiles to export from timers and histograms
+}
+
+// Graphite is a blocking exporter function which reports metrics in r
+// to a graphite server located at addr, flushing them every d duration
+// and prepending metric names with prefix.
+func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
+ GraphiteWithConfig(GraphiteConfig{
+ Addr: addr,
+ Registry: r,
+ FlushInterval: d,
+ DurationUnit: time.Nanosecond,
+ Prefix: prefix,
+ Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999},
+ })
+}
+
+// GraphiteWithConfig is a blocking exporter function just like Graphite,
+// but it takes a GraphiteConfig instead.
+func GraphiteWithConfig(c GraphiteConfig) {
+ log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
+ for _ = range time.Tick(c.FlushInterval) {
+ if err := graphite(&c); nil != err {
+ log.Println(err)
+ }
+ }
+}
+
+// GraphiteOnce performs a single submission to Graphite, returning a
+// non-nil error on failed connections. This can be used in a loop
+// similar to GraphiteWithConfig for custom error handling.
+func GraphiteOnce(c GraphiteConfig) error {
+ log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
+ return graphite(&c)
+}
+
+func graphite(c *GraphiteConfig) error {
+ now := time.Now().Unix()
+ du := float64(c.DurationUnit)
+ conn, err := net.DialTCP("tcp", nil, c.Addr)
+ if nil != err {
+ return err
+ }
+ defer conn.Close()
+ w := bufio.NewWriter(conn)
+ c.Registry.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
+ case Gauge:
+ fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
+ case GaugeFloat64:
+ fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now)
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles(c.Percentiles)
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now)
+ fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now)
+ fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now)
+ fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now)
+ fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now)
+ for psIdx, psKey := range c.Percentiles {
+ key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
+ fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
+ }
+ case Meter:
+ m := metric.Snapshot()
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now)
+ fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now)
+ fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now)
+ fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now)
+ fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now)
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles(c.Percentiles)
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now)
+ fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now)
+ fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now)
+ fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now)
+ fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now)
+ for psIdx, psKey := range c.Percentiles {
+ key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
+ fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
+ }
+ fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now)
+ fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now)
+ fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now)
+ fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now)
+ }
+ w.Flush()
+ })
+ return nil
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/vendor/github.com/rcrowley/go-metrics/healthcheck.go
new file mode 100644
index 000000000..445131cae
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/healthcheck.go
@@ -0,0 +1,61 @@
+package metrics
+
+// Healthchecks hold an error value describing an arbitrary up/down status.
+type Healthcheck interface {
+ Check()
+ Error() error
+ Healthy()
+ Unhealthy(error)
+}
+
+// NewHealthcheck constructs a new Healthcheck which will use the given
+// function to update its status.
+func NewHealthcheck(f func(Healthcheck)) Healthcheck {
+ if UseNilMetrics {
+ return NilHealthcheck{}
+ }
+ return &StandardHealthcheck{nil, f}
+}
+
+// NilHealthcheck is a no-op.
+type NilHealthcheck struct{}
+
+// Check is a no-op.
+func (NilHealthcheck) Check() {}
+
+// Error is a no-op.
+func (NilHealthcheck) Error() error { return nil }
+
+// Healthy is a no-op.
+func (NilHealthcheck) Healthy() {}
+
+// Unhealthy is a no-op.
+func (NilHealthcheck) Unhealthy(error) {}
+
+// StandardHealthcheck is the standard implementation of a Healthcheck and
+// stores the status and a function to call to update the status.
+type StandardHealthcheck struct {
+ err error
+ f func(Healthcheck)
+}
+
+// Check runs the healthcheck function to update the healthcheck's status.
+func (h *StandardHealthcheck) Check() {
+ h.f(h)
+}
+
+// Error returns the healthcheck's status, which will be nil if it is healthy.
+func (h *StandardHealthcheck) Error() error {
+ return h.err
+}
+
+// Healthy marks the healthcheck as healthy.
+func (h *StandardHealthcheck) Healthy() {
+ h.err = nil
+}
+
+// Unhealthy marks the healthcheck as unhealthy. The error is stored and
+// may be retrieved by the Error method.
+func (h *StandardHealthcheck) Unhealthy(err error) {
+ h.err = err
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/vendor/github.com/rcrowley/go-metrics/histogram.go
new file mode 100644
index 000000000..dbc837fe4
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/histogram.go
@@ -0,0 +1,202 @@
+package metrics
+
+// Histograms calculate distribution statistics from a series of int64 values.
+type Histogram interface {
+ Clear()
+ Count() int64
+ Max() int64
+ Mean() float64
+ Min() int64
+ Percentile(float64) float64
+ Percentiles([]float64) []float64
+ Sample() Sample
+ Snapshot() Histogram
+ StdDev() float64
+ Sum() int64
+ Update(int64)
+ Variance() float64
+}
+
+// GetOrRegisterHistogram returns an existing Histogram or constructs and
+// registers a new StandardHistogram.
+func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram)
+}
+
+// NewHistogram constructs a new StandardHistogram from a Sample.
+func NewHistogram(s Sample) Histogram {
+ if UseNilMetrics {
+ return NilHistogram{}
+ }
+ return &StandardHistogram{sample: s}
+}
+
+// NewRegisteredHistogram constructs and registers a new StandardHistogram from
+// a Sample.
+func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
+ c := NewHistogram(s)
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// HistogramSnapshot is a read-only copy of another Histogram.
+type HistogramSnapshot struct {
+ sample *SampleSnapshot
+}
+
+// Clear panics.
+func (*HistogramSnapshot) Clear() {
+ panic("Clear called on a HistogramSnapshot")
+}
+
+// Count returns the number of samples recorded at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample at the time the snapshot
+// was taken.
+func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) Percentile(p float64) float64 {
+ return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the sample
+// at the time the snapshot was taken.
+func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
+ return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *HistogramSnapshot) Sample() Sample { return h.sample }
+
+// Snapshot returns the snapshot.
+func (h *HistogramSnapshot) Snapshot() Histogram { return h }
+
+// StdDev returns the standard deviation of the values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
+
+// Sum returns the sum in the sample at the time the snapshot was taken.
+func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
+
+// Update panics.
+func (*HistogramSnapshot) Update(int64) {
+ panic("Update called on a HistogramSnapshot")
+}
+
+// Variance returns the variance of inputs at the time the snapshot was taken.
+func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
+
+// NilHistogram is a no-op Histogram.
+type NilHistogram struct{}
+
+// Clear is a no-op.
+func (NilHistogram) Clear() {}
+
+// Count is a no-op.
+func (NilHistogram) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilHistogram) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilHistogram) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilHistogram) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilHistogram) Percentiles(ps []float64) []float64 {
+ return make([]float64, len(ps))
+}
+
+// Sample is a no-op.
+func (NilHistogram) Sample() Sample { return NilSample{} }
+
+// Snapshot is a no-op.
+func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
+
+// StdDev is a no-op.
+func (NilHistogram) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilHistogram) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilHistogram) Update(v int64) {}
+
+// Variance is a no-op.
+func (NilHistogram) Variance() float64 { return 0.0 }
+
+// StandardHistogram is the standard implementation of a Histogram and uses a
+// Sample to bound its memory use.
+type StandardHistogram struct {
+ sample Sample
+}
+
+// Clear clears the histogram and its sample.
+func (h *StandardHistogram) Clear() { h.sample.Clear() }
+
+// Count returns the number of samples recorded since the histogram was last
+// cleared.
+func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample.
+func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample.
+func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample.
+func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (h *StandardHistogram) Percentile(p float64) float64 {
+ return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
+ return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *StandardHistogram) Sample() Sample { return h.sample }
+
+// Snapshot returns a read-only copy of the histogram.
+func (h *StandardHistogram) Snapshot() Histogram {
+ return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
+
+// Sum returns the sum in the sample.
+func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
+
+// Update samples a new value.
+func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
+
+// Variance returns the variance of the values in the sample.
+func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go
new file mode 100644
index 000000000..2fdcbcfbf
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/json.go
@@ -0,0 +1,87 @@
+package metrics
+
+import (
+ "encoding/json"
+ "io"
+ "time"
+)
+
+// MarshalJSON returns a byte slice containing a JSON representation of all
+// the metrics in the Registry.
+func (r *StandardRegistry) MarshalJSON() ([]byte, error) {
+ data := make(map[string]map[string]interface{})
+ r.Each(func(name string, i interface{}) {
+ values := make(map[string]interface{})
+ switch metric := i.(type) {
+ case Counter:
+ values["count"] = metric.Count()
+ case Gauge:
+ values["value"] = metric.Value()
+ case GaugeFloat64:
+ values["value"] = metric.Value()
+ case Healthcheck:
+ values["error"] = nil
+ metric.Check()
+ if err := metric.Error(); nil != err {
+ values["error"] = metric.Error().Error()
+ }
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ values["count"] = h.Count()
+ values["min"] = h.Min()
+ values["max"] = h.Max()
+ values["mean"] = h.Mean()
+ values["stddev"] = h.StdDev()
+ values["median"] = ps[0]
+ values["75%"] = ps[1]
+ values["95%"] = ps[2]
+ values["99%"] = ps[3]
+ values["99.9%"] = ps[4]
+ case Meter:
+ m := metric.Snapshot()
+ values["count"] = m.Count()
+ values["1m.rate"] = m.Rate1()
+ values["5m.rate"] = m.Rate5()
+ values["15m.rate"] = m.Rate15()
+ values["mean.rate"] = m.RateMean()
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ values["count"] = t.Count()
+ values["min"] = t.Min()
+ values["max"] = t.Max()
+ values["mean"] = t.Mean()
+ values["stddev"] = t.StdDev()
+ values["median"] = ps[0]
+ values["75%"] = ps[1]
+ values["95%"] = ps[2]
+ values["99%"] = ps[3]
+ values["99.9%"] = ps[4]
+ values["1m.rate"] = t.Rate1()
+ values["5m.rate"] = t.Rate5()
+ values["15m.rate"] = t.Rate15()
+ values["mean.rate"] = t.RateMean()
+ }
+ data[name] = values
+ })
+ return json.Marshal(data)
+}
+
+// WriteJSON writes metrics from the given registry periodically to the
+// specified io.Writer as JSON.
+func WriteJSON(r Registry, d time.Duration, w io.Writer) {
+ for _ = range time.Tick(d) {
+ WriteJSONOnce(r, w)
+ }
+}
+
+// WriteJSONOnce writes metrics from the given registry to the specified
+// io.Writer as JSON.
+func WriteJSONOnce(r Registry, w io.Writer) {
+ json.NewEncoder(w).Encode(r)
+}
+
+func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) {
+ return json.Marshal(p.underlying)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go
new file mode 100644
index 000000000..f8074c045
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/log.go
@@ -0,0 +1,80 @@
+package metrics
+
+import (
+ "time"
+)
+
+type Logger interface {
+ Printf(format string, v ...interface{})
+}
+
+func Log(r Registry, freq time.Duration, l Logger) {
+ LogScaled(r, freq, time.Nanosecond, l)
+}
+
+// Output each metric in the given registry periodically using the given
+// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos.
+func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
+ du := float64(scale)
+ duSuffix := scale.String()[1:]
+
+ for _ = range time.Tick(freq) {
+ r.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ l.Printf("counter %s\n", name)
+ l.Printf(" count: %9d\n", metric.Count())
+ case Gauge:
+ l.Printf("gauge %s\n", name)
+ l.Printf(" value: %9d\n", metric.Value())
+ case GaugeFloat64:
+ l.Printf("gauge %s\n", name)
+ l.Printf(" value: %f\n", metric.Value())
+ case Healthcheck:
+ metric.Check()
+ l.Printf("healthcheck %s\n", name)
+ l.Printf(" error: %v\n", metric.Error())
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ l.Printf("histogram %s\n", name)
+ l.Printf(" count: %9d\n", h.Count())
+ l.Printf(" min: %9d\n", h.Min())
+ l.Printf(" max: %9d\n", h.Max())
+ l.Printf(" mean: %12.2f\n", h.Mean())
+ l.Printf(" stddev: %12.2f\n", h.StdDev())
+ l.Printf(" median: %12.2f\n", ps[0])
+ l.Printf(" 75%%: %12.2f\n", ps[1])
+ l.Printf(" 95%%: %12.2f\n", ps[2])
+ l.Printf(" 99%%: %12.2f\n", ps[3])
+ l.Printf(" 99.9%%: %12.2f\n", ps[4])
+ case Meter:
+ m := metric.Snapshot()
+ l.Printf("meter %s\n", name)
+ l.Printf(" count: %9d\n", m.Count())
+ l.Printf(" 1-min rate: %12.2f\n", m.Rate1())
+ l.Printf(" 5-min rate: %12.2f\n", m.Rate5())
+ l.Printf(" 15-min rate: %12.2f\n", m.Rate15())
+ l.Printf(" mean rate: %12.2f\n", m.RateMean())
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ l.Printf("timer %s\n", name)
+ l.Printf(" count: %9d\n", t.Count())
+ l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix)
+ l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix)
+ l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix)
+ l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix)
+ l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix)
+ l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix)
+ l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix)
+ l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix)
+ l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix)
+ l.Printf(" 1-min rate: %12.2f\n", t.Rate1())
+ l.Printf(" 5-min rate: %12.2f\n", t.Rate5())
+ l.Printf(" 15-min rate: %12.2f\n", t.Rate15())
+ l.Printf(" mean rate: %12.2f\n", t.RateMean())
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/memory.md b/vendor/github.com/rcrowley/go-metrics/memory.md
new file mode 100644
index 000000000..47454f54b
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/memory.md
@@ -0,0 +1,285 @@
+Memory usage
+============
+
+(Highly unscientific.)
+
+Command used to gather static memory usage:
+
+```sh
+grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status"
+```
+
+Program used to gather baseline memory usage:
+
+```go
+package main
+
+import "time"
+
+func main() {
+ time.Sleep(600e9)
+}
+```
+
+Baseline
+--------
+
+```
+VmPeak: 42604 kB
+VmSize: 42604 kB
+VmLck: 0 kB
+VmHWM: 1120 kB
+VmRSS: 1120 kB
+VmData: 35460 kB
+VmStk: 136 kB
+VmExe: 1020 kB
+VmLib: 1848 kB
+VmPTE: 36 kB
+VmSwap: 0 kB
+```
+
+Program used to gather metric memory usage (with other metrics being similar):
+
+```go
+package main
+
+import (
+ "fmt"
+ "metrics"
+ "time"
+)
+
+func main() {
+ fmt.Sprintf("foo")
+ metrics.NewRegistry()
+ time.Sleep(600e9)
+}
+```
+
+1000 counters registered
+------------------------
+
+```
+VmPeak: 44016 kB
+VmSize: 44016 kB
+VmLck: 0 kB
+VmHWM: 1928 kB
+VmRSS: 1928 kB
+VmData: 36868 kB
+VmStk: 136 kB
+VmExe: 1024 kB
+VmLib: 1848 kB
+VmPTE: 40 kB
+VmSwap: 0 kB
+```
+
+**1.412 kB virtual, TODO 0.808 kB resident per counter.**
+
+100000 counters registered
+--------------------------
+
+```
+VmPeak: 55024 kB
+VmSize: 55024 kB
+VmLck: 0 kB
+VmHWM: 12440 kB
+VmRSS: 12440 kB
+VmData: 47876 kB
+VmStk: 136 kB
+VmExe: 1024 kB
+VmLib: 1848 kB
+VmPTE: 64 kB
+VmSwap: 0 kB
+```
+
+**0.1242 kB virtual, 0.1132 kB resident per counter.**
+
+1000 gauges registered
+----------------------
+
+```
+VmPeak: 44012 kB
+VmSize: 44012 kB
+VmLck: 0 kB
+VmHWM: 1928 kB
+VmRSS: 1928 kB
+VmData: 36868 kB
+VmStk: 136 kB
+VmExe: 1020 kB
+VmLib: 1848 kB
+VmPTE: 40 kB
+VmSwap: 0 kB
+```
+
+**1.408 kB virtual, 0.808 kB resident per counter.**
+
+100000 gauges registered
+------------------------
+
+```
+VmPeak: 55020 kB
+VmSize: 55020 kB
+VmLck: 0 kB
+VmHWM: 12432 kB
+VmRSS: 12432 kB
+VmData: 47876 kB
+VmStk: 136 kB
+VmExe: 1020 kB
+VmLib: 1848 kB
+VmPTE: 60 kB
+VmSwap: 0 kB
+```
+
+**0.12416 kB virtual, 0.11312 resident per gauge.**
+
+1000 histograms with a uniform sample size of 1028
+--------------------------------------------------
+
+```
+VmPeak: 72272 kB
+VmSize: 72272 kB
+VmLck: 0 kB
+VmHWM: 16204 kB
+VmRSS: 16204 kB
+VmData: 65100 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 80 kB
+VmSwap: 0 kB
+```
+
+**29.668 kB virtual, TODO 15.084 resident per histogram.**
+
+10000 histograms with a uniform sample size of 1028
+---------------------------------------------------
+
+```
+VmPeak: 256912 kB
+VmSize: 256912 kB
+VmLck: 0 kB
+VmHWM: 146204 kB
+VmRSS: 146204 kB
+VmData: 249740 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 448 kB
+VmSwap: 0 kB
+```
+
+**21.4308 kB virtual, 14.5084 kB resident per histogram.**
+
+50000 histograms with a uniform sample size of 1028
+---------------------------------------------------
+
+```
+VmPeak: 908112 kB
+VmSize: 908112 kB
+VmLck: 0 kB
+VmHWM: 645832 kB
+VmRSS: 645588 kB
+VmData: 900940 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 1716 kB
+VmSwap: 1544 kB
+```
+
+**17.31016 kB virtual, 12.88936 kB resident per histogram.**
+
+1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+-------------------------------------------------------------------------------------
+
+```
+VmPeak: 62480 kB
+VmSize: 62480 kB
+VmLck: 0 kB
+VmHWM: 11572 kB
+VmRSS: 11572 kB
+VmData: 55308 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 64 kB
+VmSwap: 0 kB
+```
+
+**19.876 kB virtual, 10.452 kB resident per histogram.**
+
+10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+--------------------------------------------------------------------------------------
+
+```
+VmPeak: 153296 kB
+VmSize: 153296 kB
+VmLck: 0 kB
+VmHWM: 101176 kB
+VmRSS: 101176 kB
+VmData: 146124 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 240 kB
+VmSwap: 0 kB
+```
+
+**11.0692 kB virtual, 10.0056 kB resident per histogram.**
+
+50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+--------------------------------------------------------------------------------------
+
+```
+VmPeak: 557264 kB
+VmSize: 557264 kB
+VmLck: 0 kB
+VmHWM: 501056 kB
+VmRSS: 501056 kB
+VmData: 550092 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 1032 kB
+VmSwap: 0 kB
+```
+
+**10.2932 kB virtual, 9.99872 kB resident per histogram.**
+
+1000 meters
+-----------
+
+```
+VmPeak: 74504 kB
+VmSize: 74504 kB
+VmLck: 0 kB
+VmHWM: 24124 kB
+VmRSS: 24124 kB
+VmData: 67340 kB
+VmStk: 136 kB
+VmExe: 1040 kB
+VmLib: 1848 kB
+VmPTE: 92 kB
+VmSwap: 0 kB
+```
+
+**31.9 kB virtual, 23.004 kB resident per meter.**
+
+10000 meters
+------------
+
+```
+VmPeak: 278920 kB
+VmSize: 278920 kB
+VmLck: 0 kB
+VmHWM: 227300 kB
+VmRSS: 227300 kB
+VmData: 271756 kB
+VmStk: 136 kB
+VmExe: 1040 kB
+VmLib: 1848 kB
+VmPTE: 488 kB
+VmSwap: 0 kB
+```
+
+**23.6316 kB virtual, 22.618 kB resident per meter.**
diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go
new file mode 100644
index 000000000..0389ab0b8
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/meter.go
@@ -0,0 +1,233 @@
+package metrics
+
+import (
+ "sync"
+ "time"
+)
+
+// Meters count events to produce exponentially-weighted moving average rates
+// at one-, five-, and fifteen-minutes and a mean rate.
+type Meter interface {
+ Count() int64
+ Mark(int64)
+ Rate1() float64
+ Rate5() float64
+ Rate15() float64
+ RateMean() float64
+ Snapshot() Meter
+}
+
+// GetOrRegisterMeter returns an existing Meter or constructs and registers a
+// new StandardMeter.
+func GetOrRegisterMeter(name string, r Registry) Meter {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewMeter).(Meter)
+}
+
+// NewMeter constructs a new StandardMeter and launches a goroutine.
+func NewMeter() Meter {
+ if UseNilMetrics {
+ return NilMeter{}
+ }
+ m := newStandardMeter()
+ arbiter.Lock()
+ defer arbiter.Unlock()
+ arbiter.meters = append(arbiter.meters, m)
+ if !arbiter.started {
+ arbiter.started = true
+ go arbiter.tick()
+ }
+ return m
+}
+
+// NewMeter constructs and registers a new StandardMeter and launches a
+// goroutine.
+func NewRegisteredMeter(name string, r Registry) Meter {
+ c := NewMeter()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// MeterSnapshot is a read-only copy of another Meter.
+type MeterSnapshot struct {
+ count int64
+ rate1, rate5, rate15, rateMean float64
+}
+
+// Count returns the count of events at the time the snapshot was taken.
+func (m *MeterSnapshot) Count() int64 { return m.count }
+
+// Mark panics.
+func (*MeterSnapshot) Mark(n int64) {
+ panic("Mark called on a MeterSnapshot")
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
+
+// Snapshot returns the snapshot.
+func (m *MeterSnapshot) Snapshot() Meter { return m }
+
+// NilMeter is a no-op Meter.
+type NilMeter struct{}
+
+// Count is a no-op.
+func (NilMeter) Count() int64 { return 0 }
+
+// Mark is a no-op.
+func (NilMeter) Mark(n int64) {}
+
+// Rate1 is a no-op.
+func (NilMeter) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilMeter) Rate5() float64 { return 0.0 }
+
+// Rate15is a no-op.
+func (NilMeter) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilMeter) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilMeter) Snapshot() Meter { return NilMeter{} }
+
+// StandardMeter is the standard implementation of a Meter.
+type StandardMeter struct {
+ lock sync.RWMutex
+ snapshot *MeterSnapshot
+ a1, a5, a15 EWMA
+ startTime time.Time
+}
+
+func newStandardMeter() *StandardMeter {
+ return &StandardMeter{
+ snapshot: &MeterSnapshot{},
+ a1: NewEWMA1(),
+ a5: NewEWMA5(),
+ a15: NewEWMA15(),
+ startTime: time.Now(),
+ }
+}
+
+// Count returns the number of events recorded.
+func (m *StandardMeter) Count() int64 {
+ m.lock.RLock()
+ count := m.snapshot.count
+ m.lock.RUnlock()
+ return count
+}
+
+// Mark records the occurance of n events.
+func (m *StandardMeter) Mark(n int64) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ m.snapshot.count += n
+ m.a1.Update(n)
+ m.a5.Update(n)
+ m.a15.Update(n)
+ m.updateSnapshot()
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (m *StandardMeter) Rate1() float64 {
+ m.lock.RLock()
+ rate1 := m.snapshot.rate1
+ m.lock.RUnlock()
+ return rate1
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (m *StandardMeter) Rate5() float64 {
+ m.lock.RLock()
+ rate5 := m.snapshot.rate5
+ m.lock.RUnlock()
+ return rate5
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (m *StandardMeter) Rate15() float64 {
+ m.lock.RLock()
+ rate15 := m.snapshot.rate15
+ m.lock.RUnlock()
+ return rate15
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (m *StandardMeter) RateMean() float64 {
+ m.lock.RLock()
+ rateMean := m.snapshot.rateMean
+ m.lock.RUnlock()
+ return rateMean
+}
+
+// Snapshot returns a read-only copy of the meter.
+func (m *StandardMeter) Snapshot() Meter {
+ m.lock.RLock()
+ snapshot := *m.snapshot
+ m.lock.RUnlock()
+ return &snapshot
+}
+
+func (m *StandardMeter) updateSnapshot() {
+ // should run with write lock held on m.lock
+ snapshot := m.snapshot
+ snapshot.rate1 = m.a1.Rate()
+ snapshot.rate5 = m.a5.Rate()
+ snapshot.rate15 = m.a15.Rate()
+ snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
+}
+
+func (m *StandardMeter) tick() {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ m.a1.Tick()
+ m.a5.Tick()
+ m.a15.Tick()
+ m.updateSnapshot()
+}
+
+type meterArbiter struct {
+ sync.RWMutex
+ started bool
+ meters []*StandardMeter
+ ticker *time.Ticker
+}
+
+var arbiter = meterArbiter{ticker: time.NewTicker(5e9)}
+
+// Ticks meters on the scheduled interval
+func (ma *meterArbiter) tick() {
+ for {
+ select {
+ case <-ma.ticker.C:
+ ma.tickMeters()
+ }
+ }
+}
+
+func (ma *meterArbiter) tickMeters() {
+ ma.RLock()
+ defer ma.RUnlock()
+ for _, meter := range ma.meters {
+ meter.tick()
+ }
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go
new file mode 100644
index 000000000..b97a49ed1
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/metrics.go
@@ -0,0 +1,13 @@
+// Go port of Coda Hale's Metrics library
+//
+// <https://github.com/rcrowley/go-metrics>
+//
+// Coda Hale's original work: <https://github.com/codahale/metrics>
+package metrics
+
+// UseNilMetrics is checked by the constructor functions for all of the
+// standard metrics. If it is true, the metric returned is a stub.
+//
+// This global kill-switch helps quantify the observer effect and makes
+// for less cluttered pprof profiles.
+var UseNilMetrics bool = false
diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/vendor/github.com/rcrowley/go-metrics/opentsdb.go
new file mode 100644
index 000000000..266b6c93d
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/opentsdb.go
@@ -0,0 +1,119 @@
+package metrics
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "net"
+ "os"
+ "strings"
+ "time"
+)
+
+var shortHostName string = ""
+
+// OpenTSDBConfig provides a container with configuration parameters for
+// the OpenTSDB exporter
+type OpenTSDBConfig struct {
+ Addr *net.TCPAddr // Network address to connect to
+ Registry Registry // Registry to be exported
+ FlushInterval time.Duration // Flush interval
+ DurationUnit time.Duration // Time conversion unit for durations
+ Prefix string // Prefix to be prepended to metric names
+}
+
+// OpenTSDB is a blocking exporter function which reports metrics in r
+// to a TSDB server located at addr, flushing them every d duration
+// and prepending metric names with prefix.
+func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
+ OpenTSDBWithConfig(OpenTSDBConfig{
+ Addr: addr,
+ Registry: r,
+ FlushInterval: d,
+ DurationUnit: time.Nanosecond,
+ Prefix: prefix,
+ })
+}
+
+// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB,
+// but it takes a OpenTSDBConfig instead.
+func OpenTSDBWithConfig(c OpenTSDBConfig) {
+ for _ = range time.Tick(c.FlushInterval) {
+ if err := openTSDB(&c); nil != err {
+ log.Println(err)
+ }
+ }
+}
+
+func getShortHostname() string {
+ if shortHostName == "" {
+ host, _ := os.Hostname()
+ if index := strings.Index(host, "."); index > 0 {
+ shortHostName = host[:index]
+ } else {
+ shortHostName = host
+ }
+ }
+ return shortHostName
+}
+
+func openTSDB(c *OpenTSDBConfig) error {
+ shortHostname := getShortHostname()
+ now := time.Now().Unix()
+ du := float64(c.DurationUnit)
+ conn, err := net.DialTCP("tcp", nil, c.Addr)
+ if nil != err {
+ return err
+ }
+ defer conn.Close()
+ w := bufio.NewWriter(conn)
+ c.Registry.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
+ case Gauge:
+ fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+ case GaugeFloat64:
+ fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname)
+ case Meter:
+ m := metric.Snapshot()
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname)
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname)
+ }
+ w.Flush()
+ })
+ return nil
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go
new file mode 100644
index 000000000..9086dcbdd
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/registry.go
@@ -0,0 +1,270 @@
+package metrics
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// DuplicateMetric is the error returned by Registry.Register when a metric
+// already exists. If you mean to Register that metric you must first
+// Unregister the existing metric.
+type DuplicateMetric string
+
+func (err DuplicateMetric) Error() string {
+ return fmt.Sprintf("duplicate metric: %s", string(err))
+}
+
+// A Registry holds references to a set of metrics by name and can iterate
+// over them, calling callback functions provided by the user.
+//
+// This is an interface so as to encourage other structs to implement
+// the Registry API as appropriate.
+type Registry interface {
+
+ // Call the given function for each registered metric.
+ Each(func(string, interface{}))
+
+ // Get the metric by the given name or nil if none is registered.
+ Get(string) interface{}
+
+ // Gets an existing metric or registers the given one.
+ // The interface can be the metric to register if not found in registry,
+ // or a function returning the metric for lazy instantiation.
+ GetOrRegister(string, interface{}) interface{}
+
+ // Register the given metric under the given name.
+ Register(string, interface{}) error
+
+ // Run all registered healthchecks.
+ RunHealthchecks()
+
+ // Unregister the metric with the given name.
+ Unregister(string)
+
+ // Unregister all metrics. (Mostly for testing.)
+ UnregisterAll()
+}
+
+// The standard implementation of a Registry is a mutex-protected map
+// of names to metrics.
+type StandardRegistry struct {
+ metrics map[string]interface{}
+ mutex sync.Mutex
+}
+
+// Create a new registry.
+func NewRegistry() Registry {
+ return &StandardRegistry{metrics: make(map[string]interface{})}
+}
+
+// Call the given function for each registered metric.
+func (r *StandardRegistry) Each(f func(string, interface{})) {
+ for name, i := range r.registered() {
+ f(name, i)
+ }
+}
+
+// Get the metric by the given name or nil if none is registered.
+func (r *StandardRegistry) Get(name string) interface{} {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ return r.metrics[name]
+}
+
+// Gets an existing metric or creates and registers a new one. Threadsafe
+// alternative to calling Get and Register on failure.
+// The interface can be the metric to register if not found in registry,
+// or a function returning the metric for lazy instantiation.
+func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ if metric, ok := r.metrics[name]; ok {
+ return metric
+ }
+ if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
+ i = v.Call(nil)[0].Interface()
+ }
+ r.register(name, i)
+ return i
+}
+
+// Register the given metric under the given name. Returns a DuplicateMetric
+// if a metric by the given name is already registered.
+func (r *StandardRegistry) Register(name string, i interface{}) error {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ return r.register(name, i)
+}
+
+// Run all registered healthchecks.
+func (r *StandardRegistry) RunHealthchecks() {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ for _, i := range r.metrics {
+ if h, ok := i.(Healthcheck); ok {
+ h.Check()
+ }
+ }
+}
+
+// Unregister the metric with the given name.
+func (r *StandardRegistry) Unregister(name string) {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ delete(r.metrics, name)
+}
+
+// Unregister all metrics. (Mostly for testing.)
+func (r *StandardRegistry) UnregisterAll() {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ for name, _ := range r.metrics {
+ delete(r.metrics, name)
+ }
+}
+
+func (r *StandardRegistry) register(name string, i interface{}) error {
+ if _, ok := r.metrics[name]; ok {
+ return DuplicateMetric(name)
+ }
+ switch i.(type) {
+ case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer:
+ r.metrics[name] = i
+ }
+ return nil
+}
+
+func (r *StandardRegistry) registered() map[string]interface{} {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ metrics := make(map[string]interface{}, len(r.metrics))
+ for name, i := range r.metrics {
+ metrics[name] = i
+ }
+ return metrics
+}
+
+type PrefixedRegistry struct {
+ underlying Registry
+ prefix string
+}
+
+func NewPrefixedRegistry(prefix string) Registry {
+ return &PrefixedRegistry{
+ underlying: NewRegistry(),
+ prefix: prefix,
+ }
+}
+
+func NewPrefixedChildRegistry(parent Registry, prefix string) Registry {
+ return &PrefixedRegistry{
+ underlying: parent,
+ prefix: prefix,
+ }
+}
+
+// Call the given function for each registered metric.
+func (r *PrefixedRegistry) Each(fn func(string, interface{})) {
+ wrappedFn := func (prefix string) func(string, interface{}) {
+ return func(name string, iface interface{}) {
+ if strings.HasPrefix(name,prefix) {
+ fn(name, iface)
+ } else {
+ return
+ }
+ }
+ }
+
+ baseRegistry, prefix := findPrefix(r, "")
+ baseRegistry.Each(wrappedFn(prefix))
+}
+
+func findPrefix(registry Registry, prefix string) (Registry, string) {
+ switch r := registry.(type) {
+ case *PrefixedRegistry:
+ return findPrefix(r.underlying, r.prefix + prefix)
+ case *StandardRegistry:
+ return r, prefix
+ }
+ return nil, ""
+}
+
+// Get the metric by the given name or nil if none is registered.
+func (r *PrefixedRegistry) Get(name string) interface{} {
+ realName := r.prefix + name
+ return r.underlying.Get(realName)
+}
+
+// Gets an existing metric or registers the given one.
+// The interface can be the metric to register if not found in registry,
+// or a function returning the metric for lazy instantiation.
+func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} {
+ realName := r.prefix + name
+ return r.underlying.GetOrRegister(realName, metric)
+}
+
+// Register the given metric under the given name. The name will be prefixed.
+func (r *PrefixedRegistry) Register(name string, metric interface{}) error {
+ realName := r.prefix + name
+ return r.underlying.Register(realName, metric)
+}
+
+// Run all registered healthchecks.
+func (r *PrefixedRegistry) RunHealthchecks() {
+ r.underlying.RunHealthchecks()
+}
+
+// Unregister the metric with the given name. The name will be prefixed.
+func (r *PrefixedRegistry) Unregister(name string) {
+ realName := r.prefix + name
+ r.underlying.Unregister(realName)
+}
+
+// Unregister all metrics. (Mostly for testing.)
+func (r *PrefixedRegistry) UnregisterAll() {
+ r.underlying.UnregisterAll()
+}
+
+var DefaultRegistry Registry = NewRegistry()
+
+// Call the given function for each registered metric.
+func Each(f func(string, interface{})) {
+ DefaultRegistry.Each(f)
+}
+
+// Get the metric by the given name or nil if none is registered.
+func Get(name string) interface{} {
+ return DefaultRegistry.Get(name)
+}
+
+// Gets an existing metric or creates and registers a new one. Threadsafe
+// alternative to calling Get and Register on failure.
+func GetOrRegister(name string, i interface{}) interface{} {
+ return DefaultRegistry.GetOrRegister(name, i)
+}
+
+// Register the given metric under the given name. Returns a DuplicateMetric
+// if a metric by the given name is already registered.
+func Register(name string, i interface{}) error {
+ return DefaultRegistry.Register(name, i)
+}
+
+// Register the given metric under the given name. Panics if a metric by the
+// given name is already registered.
+func MustRegister(name string, i interface{}) {
+ if err := Register(name, i); err != nil {
+ panic(err)
+ }
+}
+
+// Run all registered healthchecks.
+func RunHealthchecks() {
+ DefaultRegistry.RunHealthchecks()
+}
+
+// Unregister the metric with the given name.
+func Unregister(name string) {
+ DefaultRegistry.Unregister(name)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go
new file mode 100644
index 000000000..11c6b785a
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime.go
@@ -0,0 +1,212 @@
+package metrics
+
+import (
+ "runtime"
+ "runtime/pprof"
+ "time"
+)
+
+var (
+ memStats runtime.MemStats
+ runtimeMetrics struct {
+ MemStats struct {
+ Alloc Gauge
+ BuckHashSys Gauge
+ DebugGC Gauge
+ EnableGC Gauge
+ Frees Gauge
+ HeapAlloc Gauge
+ HeapIdle Gauge
+ HeapInuse Gauge
+ HeapObjects Gauge
+ HeapReleased Gauge
+ HeapSys Gauge
+ LastGC Gauge
+ Lookups Gauge
+ Mallocs Gauge
+ MCacheInuse Gauge
+ MCacheSys Gauge
+ MSpanInuse Gauge
+ MSpanSys Gauge
+ NextGC Gauge
+ NumGC Gauge
+ GCCPUFraction GaugeFloat64
+ PauseNs Histogram
+ PauseTotalNs Gauge
+ StackInuse Gauge
+ StackSys Gauge
+ Sys Gauge
+ TotalAlloc Gauge
+ }
+ NumCgoCall Gauge
+ NumGoroutine Gauge
+ NumThread Gauge
+ ReadMemStats Timer
+ }
+ frees uint64
+ lookups uint64
+ mallocs uint64
+ numGC uint32
+ numCgoCalls int64
+
+ threadCreateProfile = pprof.Lookup("threadcreate")
+)
+
+// Capture new values for the Go runtime statistics exported in
+// runtime.MemStats. This is designed to be called as a goroutine.
+func CaptureRuntimeMemStats(r Registry, d time.Duration) {
+ for _ = range time.Tick(d) {
+ CaptureRuntimeMemStatsOnce(r)
+ }
+}
+
+// Capture new values for the Go runtime statistics exported in
+// runtime.MemStats. This is designed to be called in a background
+// goroutine. Giving a registry which has not been given to
+// RegisterRuntimeMemStats will panic.
+//
+// Be very careful with this because runtime.ReadMemStats calls the C
+// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld()
+// and that last one does what it says on the tin.
+func CaptureRuntimeMemStatsOnce(r Registry) {
+ t := time.Now()
+ runtime.ReadMemStats(&memStats) // This takes 50-200us.
+ runtimeMetrics.ReadMemStats.UpdateSince(t)
+
+ runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc))
+ runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys))
+ if memStats.DebugGC {
+ runtimeMetrics.MemStats.DebugGC.Update(1)
+ } else {
+ runtimeMetrics.MemStats.DebugGC.Update(0)
+ }
+ if memStats.EnableGC {
+ runtimeMetrics.MemStats.EnableGC.Update(1)
+ } else {
+ runtimeMetrics.MemStats.EnableGC.Update(0)
+ }
+
+ runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees))
+ runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc))
+ runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle))
+ runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse))
+ runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects))
+ runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased))
+ runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys))
+ runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC))
+ runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups))
+ runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs))
+ runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse))
+ runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys))
+ runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse))
+ runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys))
+ runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC))
+ runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC))
+ runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats))
+
+ // <https://code.google.com/p/go/source/browse/src/pkg/runtime/mgc0.c>
+ i := numGC % uint32(len(memStats.PauseNs))
+ ii := memStats.NumGC % uint32(len(memStats.PauseNs))
+ if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) {
+ for i = 0; i < uint32(len(memStats.PauseNs)); i++ {
+ runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+ }
+ } else {
+ if i > ii {
+ for ; i < uint32(len(memStats.PauseNs)); i++ {
+ runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+ }
+ i = 0
+ }
+ for ; i < ii; i++ {
+ runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+ }
+ }
+ frees = memStats.Frees
+ lookups = memStats.Lookups
+ mallocs = memStats.Mallocs
+ numGC = memStats.NumGC
+
+ runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs))
+ runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse))
+ runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys))
+ runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys))
+ runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc))
+
+ currentNumCgoCalls := numCgoCall()
+ runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls)
+ numCgoCalls = currentNumCgoCalls
+
+ runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine()))
+
+ runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count()))
+}
+
+// Register runtimeMetrics for the Go runtime statistics exported in runtime and
+// specifically runtime.MemStats. The runtimeMetrics are named by their
+// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc.
+func RegisterRuntimeMemStats(r Registry) {
+ runtimeMetrics.MemStats.Alloc = NewGauge()
+ runtimeMetrics.MemStats.BuckHashSys = NewGauge()
+ runtimeMetrics.MemStats.DebugGC = NewGauge()
+ runtimeMetrics.MemStats.EnableGC = NewGauge()
+ runtimeMetrics.MemStats.Frees = NewGauge()
+ runtimeMetrics.MemStats.HeapAlloc = NewGauge()
+ runtimeMetrics.MemStats.HeapIdle = NewGauge()
+ runtimeMetrics.MemStats.HeapInuse = NewGauge()
+ runtimeMetrics.MemStats.HeapObjects = NewGauge()
+ runtimeMetrics.MemStats.HeapReleased = NewGauge()
+ runtimeMetrics.MemStats.HeapSys = NewGauge()
+ runtimeMetrics.MemStats.LastGC = NewGauge()
+ runtimeMetrics.MemStats.Lookups = NewGauge()
+ runtimeMetrics.MemStats.Mallocs = NewGauge()
+ runtimeMetrics.MemStats.MCacheInuse = NewGauge()
+ runtimeMetrics.MemStats.MCacheSys = NewGauge()
+ runtimeMetrics.MemStats.MSpanInuse = NewGauge()
+ runtimeMetrics.MemStats.MSpanSys = NewGauge()
+ runtimeMetrics.MemStats.NextGC = NewGauge()
+ runtimeMetrics.MemStats.NumGC = NewGauge()
+ runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64()
+ runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
+ runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
+ runtimeMetrics.MemStats.StackInuse = NewGauge()
+ runtimeMetrics.MemStats.StackSys = NewGauge()
+ runtimeMetrics.MemStats.Sys = NewGauge()
+ runtimeMetrics.MemStats.TotalAlloc = NewGauge()
+ runtimeMetrics.NumCgoCall = NewGauge()
+ runtimeMetrics.NumGoroutine = NewGauge()
+ runtimeMetrics.NumThread = NewGauge()
+ runtimeMetrics.ReadMemStats = NewTimer()
+
+ r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
+ r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys)
+ r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC)
+ r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC)
+ r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees)
+ r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc)
+ r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle)
+ r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse)
+ r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects)
+ r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased)
+ r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys)
+ r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC)
+ r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups)
+ r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs)
+ r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse)
+ r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys)
+ r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse)
+ r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
+ r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
+ r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
+ r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction)
+ r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
+ r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
+ r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
+ r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys)
+ r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys)
+ r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
+ r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
+ r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
+ r.Register("runtime.NumThread", runtimeMetrics.NumThread)
+ r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
new file mode 100644
index 000000000..e3391f4e8
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
@@ -0,0 +1,10 @@
+// +build cgo
+// +build !appengine
+
+package metrics
+
+import "runtime"
+
+func numCgoCall() int64 {
+ return runtime.NumCgoCall()
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
new file mode 100644
index 000000000..ca12c05ba
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
@@ -0,0 +1,9 @@
+// +build go1.5
+
+package metrics
+
+import "runtime"
+
+func gcCPUFraction(memStats *runtime.MemStats) float64 {
+ return memStats.GCCPUFraction
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
new file mode 100644
index 000000000..616a3b475
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
@@ -0,0 +1,7 @@
+// +build !cgo appengine
+
+package metrics
+
+func numCgoCall() int64 {
+ return 0
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
new file mode 100644
index 000000000..be96aa6f1
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
@@ -0,0 +1,9 @@
+// +build !go1.5
+
+package metrics
+
+import "runtime"
+
+func gcCPUFraction(memStats *runtime.MemStats) float64 {
+ return 0
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go
new file mode 100644
index 000000000..5f6a37788
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/sample.go
@@ -0,0 +1,609 @@
+package metrics
+
+import (
+ "math"
+ "math/rand"
+ "sort"
+ "sync"
+ "time"
+)
+
+const rescaleThreshold = time.Hour
+
+// Samples maintain a statistically-significant selection of values from
+// a stream.
+type Sample interface {
+ Clear()
+ Count() int64
+ Max() int64
+ Mean() float64
+ Min() int64
+ Percentile(float64) float64
+ Percentiles([]float64) []float64
+ Size() int
+ Snapshot() Sample
+ StdDev() float64
+ Sum() int64
+ Update(int64)
+ Values() []int64
+ Variance() float64
+}
+
+// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
+// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time
+// Decay Model for Streaming Systems".
+//
+// <http://www.research.att.com/people/Cormode_Graham/library/publications/CormodeShkapenyukSrivastavaXu09.pdf>
+type ExpDecaySample struct {
+ alpha float64
+ count int64
+ mutex sync.Mutex
+ reservoirSize int
+ t0, t1 time.Time
+ values *expDecaySampleHeap
+}
+
+// NewExpDecaySample constructs a new exponentially-decaying sample with the
+// given reservoir size and alpha.
+func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
+ if UseNilMetrics {
+ return NilSample{}
+ }
+ s := &ExpDecaySample{
+ alpha: alpha,
+ reservoirSize: reservoirSize,
+ t0: time.Now(),
+ values: newExpDecaySampleHeap(reservoirSize),
+ }
+ s.t1 = s.t0.Add(rescaleThreshold)
+ return s
+}
+
+// Clear clears all samples.
+func (s *ExpDecaySample) Clear() {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count = 0
+ s.t0 = time.Now()
+ s.t1 = s.t0.Add(rescaleThreshold)
+ s.values.Clear()
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *ExpDecaySample) Count() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Max() int64 {
+ return SampleMax(s.Values())
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *ExpDecaySample) Mean() float64 {
+ return SampleMean(s.Values())
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Min() int64 {
+ return SampleMin(s.Values())
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *ExpDecaySample) Percentile(p float64) float64 {
+ return SamplePercentile(s.Values(), p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
+ return SamplePercentiles(s.Values(), ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *ExpDecaySample) Size() int {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return s.values.Size()
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *ExpDecaySample) Snapshot() Sample {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ vals := s.values.Values()
+ values := make([]int64, len(vals))
+ for i, v := range vals {
+ values[i] = v.v
+ }
+ return &SampleSnapshot{
+ count: s.count,
+ values: values,
+ }
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *ExpDecaySample) StdDev() float64 {
+ return SampleStdDev(s.Values())
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *ExpDecaySample) Sum() int64 {
+ return SampleSum(s.Values())
+}
+
+// Update samples a new value.
+func (s *ExpDecaySample) Update(v int64) {
+ s.update(time.Now(), v)
+}
+
+// Values returns a copy of the values in the sample.
+func (s *ExpDecaySample) Values() []int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ vals := s.values.Values()
+ values := make([]int64, len(vals))
+ for i, v := range vals {
+ values[i] = v.v
+ }
+ return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *ExpDecaySample) Variance() float64 {
+ return SampleVariance(s.Values())
+}
+
+// update samples a new value at a particular timestamp. This is a method all
+// its own to facilitate testing.
+func (s *ExpDecaySample) update(t time.Time, v int64) {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count++
+ if s.values.Size() == s.reservoirSize {
+ s.values.Pop()
+ }
+ s.values.Push(expDecaySample{
+ k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
+ v: v,
+ })
+ if t.After(s.t1) {
+ values := s.values.Values()
+ t0 := s.t0
+ s.values.Clear()
+ s.t0 = t
+ s.t1 = s.t0.Add(rescaleThreshold)
+ for _, v := range values {
+ v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
+ s.values.Push(v)
+ }
+ }
+}
+
+// NilSample is a no-op Sample.
+type NilSample struct{}
+
+// Clear is a no-op.
+func (NilSample) Clear() {}
+
+// Count is a no-op.
+func (NilSample) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilSample) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilSample) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilSample) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilSample) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilSample) Percentiles(ps []float64) []float64 {
+ return make([]float64, len(ps))
+}
+
+// Size is a no-op.
+func (NilSample) Size() int { return 0 }
+
+// Sample is a no-op.
+func (NilSample) Snapshot() Sample { return NilSample{} }
+
+// StdDev is a no-op.
+func (NilSample) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilSample) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilSample) Update(v int64) {}
+
+// Values is a no-op.
+func (NilSample) Values() []int64 { return []int64{} }
+
+// Variance is a no-op.
+func (NilSample) Variance() float64 { return 0.0 }
+
+// SampleMax returns the maximum value of the slice of int64.
+func SampleMax(values []int64) int64 {
+ if 0 == len(values) {
+ return 0
+ }
+ var max int64 = math.MinInt64
+ for _, v := range values {
+ if max < v {
+ max = v
+ }
+ }
+ return max
+}
+
+// SampleMean returns the mean value of the slice of int64.
+func SampleMean(values []int64) float64 {
+ if 0 == len(values) {
+ return 0.0
+ }
+ return float64(SampleSum(values)) / float64(len(values))
+}
+
+// SampleMin returns the minimum value of the slice of int64.
+func SampleMin(values []int64) int64 {
+ if 0 == len(values) {
+ return 0
+ }
+ var min int64 = math.MaxInt64
+ for _, v := range values {
+ if min > v {
+ min = v
+ }
+ }
+ return min
+}
+
+// SamplePercentiles returns an arbitrary percentile of the slice of int64.
+func SamplePercentile(values int64Slice, p float64) float64 {
+ return SamplePercentiles(values, []float64{p})[0]
+}
+
+// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
+// int64.
+func SamplePercentiles(values int64Slice, ps []float64) []float64 {
+ scores := make([]float64, len(ps))
+ size := len(values)
+ if size > 0 {
+ sort.Sort(values)
+ for i, p := range ps {
+ pos := p * float64(size+1)
+ if pos < 1.0 {
+ scores[i] = float64(values[0])
+ } else if pos >= float64(size) {
+ scores[i] = float64(values[size-1])
+ } else {
+ lower := float64(values[int(pos)-1])
+ upper := float64(values[int(pos)])
+ scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
+ }
+ }
+ }
+ return scores
+}
+
+// SampleSnapshot is a read-only copy of another Sample.
+type SampleSnapshot struct {
+ count int64
+ values []int64
+}
+
+// Clear panics.
+func (*SampleSnapshot) Clear() {
+ panic("Clear called on a SampleSnapshot")
+}
+
+// Count returns the count of inputs at the time the snapshot was taken.
+func (s *SampleSnapshot) Count() int64 { return s.count }
+
+// Max returns the maximal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
+
+// Min returns the minimal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
+
+// Percentile returns an arbitrary percentile of values at the time the
+// snapshot was taken.
+func (s *SampleSnapshot) Percentile(p float64) float64 {
+ return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values at the time
+// the snapshot was taken.
+func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
+ return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample at the time the snapshot was taken.
+func (s *SampleSnapshot) Size() int { return len(s.values) }
+
+// Snapshot returns the snapshot.
+func (s *SampleSnapshot) Snapshot() Sample { return s }
+
+// StdDev returns the standard deviation of values at the time the snapshot was
+// taken.
+func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
+
+// Sum returns the sum of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
+
+// Update panics.
+func (*SampleSnapshot) Update(int64) {
+ panic("Update called on a SampleSnapshot")
+}
+
+// Values returns a copy of the values in the sample.
+func (s *SampleSnapshot) Values() []int64 {
+ values := make([]int64, len(s.values))
+ copy(values, s.values)
+ return values
+}
+
+// Variance returns the variance of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
+
+// SampleStdDev returns the standard deviation of the slice of int64.
+func SampleStdDev(values []int64) float64 {
+ return math.Sqrt(SampleVariance(values))
+}
+
+// SampleSum returns the sum of the slice of int64.
+func SampleSum(values []int64) int64 {
+ var sum int64
+ for _, v := range values {
+ sum += v
+ }
+ return sum
+}
+
+// SampleVariance returns the variance of the slice of int64.
+func SampleVariance(values []int64) float64 {
+ if 0 == len(values) {
+ return 0.0
+ }
+ m := SampleMean(values)
+ var sum float64
+ for _, v := range values {
+ d := float64(v) - m
+ sum += d * d
+ }
+ return sum / float64(len(values))
+}
+
+// A uniform sample using Vitter's Algorithm R.
+//
+// <http://www.cs.umd.edu/~samir/498/vitter.pdf>
+type UniformSample struct {
+ count int64
+ mutex sync.Mutex
+ reservoirSize int
+ values []int64
+}
+
+// NewUniformSample constructs a new uniform sample with the given reservoir
+// size.
+func NewUniformSample(reservoirSize int) Sample {
+ if UseNilMetrics {
+ return NilSample{}
+ }
+ return &UniformSample{
+ reservoirSize: reservoirSize,
+ values: make([]int64, 0, reservoirSize),
+ }
+}
+
+// Clear clears all samples.
+func (s *UniformSample) Clear() {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count = 0
+ s.values = make([]int64, 0, s.reservoirSize)
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *UniformSample) Count() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *UniformSample) Max() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleMax(s.values)
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *UniformSample) Mean() float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleMean(s.values)
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *UniformSample) Min() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleMin(s.values)
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *UniformSample) Percentile(p float64) float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *UniformSample) Percentiles(ps []float64) []float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *UniformSample) Size() int {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return len(s.values)
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *UniformSample) Snapshot() Sample {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ values := make([]int64, len(s.values))
+ copy(values, s.values)
+ return &SampleSnapshot{
+ count: s.count,
+ values: values,
+ }
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *UniformSample) StdDev() float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleStdDev(s.values)
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *UniformSample) Sum() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleSum(s.values)
+}
+
+// Update samples a new value.
+func (s *UniformSample) Update(v int64) {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count++
+ if len(s.values) < s.reservoirSize {
+ s.values = append(s.values, v)
+ } else {
+ r := rand.Int63n(s.count)
+ if r < int64(len(s.values)) {
+ s.values[int(r)] = v
+ }
+ }
+}
+
+// Values returns a copy of the values in the sample.
+func (s *UniformSample) Values() []int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ values := make([]int64, len(s.values))
+ copy(values, s.values)
+ return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *UniformSample) Variance() float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleVariance(s.values)
+}
+
+// expDecaySample represents an individual sample in a heap.
+type expDecaySample struct {
+ k float64
+ v int64
+}
+
+func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap {
+ return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)}
+}
+
+// expDecaySampleHeap is a min-heap of expDecaySamples.
+// The internal implementation is copied from the standard library's container/heap
+type expDecaySampleHeap struct {
+ s []expDecaySample
+}
+
+func (h *expDecaySampleHeap) Clear() {
+ h.s = h.s[:0]
+}
+
+func (h *expDecaySampleHeap) Push(s expDecaySample) {
+ n := len(h.s)
+ h.s = h.s[0 : n+1]
+ h.s[n] = s
+ h.up(n)
+}
+
+func (h *expDecaySampleHeap) Pop() expDecaySample {
+ n := len(h.s) - 1
+ h.s[0], h.s[n] = h.s[n], h.s[0]
+ h.down(0, n)
+
+ n = len(h.s)
+ s := h.s[n-1]
+ h.s = h.s[0 : n-1]
+ return s
+}
+
+func (h *expDecaySampleHeap) Size() int {
+ return len(h.s)
+}
+
+func (h *expDecaySampleHeap) Values() []expDecaySample {
+ return h.s
+}
+
+func (h *expDecaySampleHeap) up(j int) {
+ for {
+ i := (j - 1) / 2 // parent
+ if i == j || !(h.s[j].k < h.s[i].k) {
+ break
+ }
+ h.s[i], h.s[j] = h.s[j], h.s[i]
+ j = i
+ }
+}
+
+func (h *expDecaySampleHeap) down(i, n int) {
+ for {
+ j1 := 2*i + 1
+ if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
+ break
+ }
+ j := j1 // left child
+ if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) {
+ j = j2 // = 2*i + 2 // right child
+ }
+ if !(h.s[j].k < h.s[i].k) {
+ break
+ }
+ h.s[i], h.s[j] = h.s[j], h.s[i]
+ i = j
+ }
+}
+
+type int64Slice []int64
+
+func (p int64Slice) Len() int { return len(p) }
+func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/vendor/github.com/rcrowley/go-metrics/syslog.go
new file mode 100644
index 000000000..693f19085
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/syslog.go
@@ -0,0 +1,78 @@
+// +build !windows
+
+package metrics
+
+import (
+ "fmt"
+ "log/syslog"
+ "time"
+)
+
+// Output each metric in the given registry to syslog periodically using
+// the given syslogger.
+func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
+ for _ = range time.Tick(d) {
+ r.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
+ case Gauge:
+ w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
+ case GaugeFloat64:
+ w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value()))
+ case Healthcheck:
+ metric.Check()
+ w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ w.Info(fmt.Sprintf(
+ "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f",
+ name,
+ h.Count(),
+ h.Min(),
+ h.Max(),
+ h.Mean(),
+ h.StdDev(),
+ ps[0],
+ ps[1],
+ ps[2],
+ ps[3],
+ ps[4],
+ ))
+ case Meter:
+ m := metric.Snapshot()
+ w.Info(fmt.Sprintf(
+ "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f",
+ name,
+ m.Count(),
+ m.Rate1(),
+ m.Rate5(),
+ m.Rate15(),
+ m.RateMean(),
+ ))
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ w.Info(fmt.Sprintf(
+ "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f",
+ name,
+ t.Count(),
+ t.Min(),
+ t.Max(),
+ t.Mean(),
+ t.StdDev(),
+ ps[0],
+ ps[1],
+ ps[2],
+ ps[3],
+ ps[4],
+ t.Rate1(),
+ t.Rate5(),
+ t.Rate15(),
+ t.RateMean(),
+ ))
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go
new file mode 100644
index 000000000..17db8f8d2
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/timer.go
@@ -0,0 +1,311 @@
+package metrics
+
+import (
+ "sync"
+ "time"
+)
+
+// Timers capture the duration and rate of events.
+type Timer interface {
+ Count() int64
+ Max() int64
+ Mean() float64
+ Min() int64
+ Percentile(float64) float64
+ Percentiles([]float64) []float64
+ Rate1() float64
+ Rate5() float64
+ Rate15() float64
+ RateMean() float64
+ Snapshot() Timer
+ StdDev() float64
+ Sum() int64
+ Time(func())
+ Update(time.Duration)
+ UpdateSince(time.Time)
+ Variance() float64
+}
+
+// GetOrRegisterTimer returns an existing Timer or constructs and registers a
+// new StandardTimer.
+func GetOrRegisterTimer(name string, r Registry) Timer {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewTimer).(Timer)
+}
+
+// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
+func NewCustomTimer(h Histogram, m Meter) Timer {
+ if UseNilMetrics {
+ return NilTimer{}
+ }
+ return &StandardTimer{
+ histogram: h,
+ meter: m,
+ }
+}
+
+// NewRegisteredTimer constructs and registers a new StandardTimer.
+func NewRegisteredTimer(name string, r Registry) Timer {
+ c := NewTimer()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// NewTimer constructs a new StandardTimer using an exponentially-decaying
+// sample with the same reservoir size and alpha as UNIX load averages.
+func NewTimer() Timer {
+ if UseNilMetrics {
+ return NilTimer{}
+ }
+ return &StandardTimer{
+ histogram: NewHistogram(NewExpDecaySample(1028, 0.015)),
+ meter: NewMeter(),
+ }
+}
+
+// NilTimer is a no-op Timer.
+type NilTimer struct {
+ h Histogram
+ m Meter
+}
+
+// Count is a no-op.
+func (NilTimer) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilTimer) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilTimer) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilTimer) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilTimer) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilTimer) Percentiles(ps []float64) []float64 {
+ return make([]float64, len(ps))
+}
+
+// Rate1 is a no-op.
+func (NilTimer) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilTimer) Rate5() float64 { return 0.0 }
+
+// Rate15 is a no-op.
+func (NilTimer) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilTimer) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilTimer) Snapshot() Timer { return NilTimer{} }
+
+// StdDev is a no-op.
+func (NilTimer) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilTimer) Sum() int64 { return 0 }
+
+// Time is a no-op.
+func (NilTimer) Time(func()) {}
+
+// Update is a no-op.
+func (NilTimer) Update(time.Duration) {}
+
+// UpdateSince is a no-op.
+func (NilTimer) UpdateSince(time.Time) {}
+
+// Variance is a no-op.
+func (NilTimer) Variance() float64 { return 0.0 }
+
+// StandardTimer is the standard implementation of a Timer and uses a Histogram
+// and Meter.
+type StandardTimer struct {
+ histogram Histogram
+ meter Meter
+ mutex sync.Mutex
+}
+
+// Count returns the number of events recorded.
+func (t *StandardTimer) Count() int64 {
+ return t.histogram.Count()
+}
+
+// Max returns the maximum value in the sample.
+func (t *StandardTimer) Max() int64 {
+ return t.histogram.Max()
+}
+
+// Mean returns the mean of the values in the sample.
+func (t *StandardTimer) Mean() float64 {
+ return t.histogram.Mean()
+}
+
+// Min returns the minimum value in the sample.
+func (t *StandardTimer) Min() int64 {
+ return t.histogram.Min()
+}
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (t *StandardTimer) Percentile(p float64) float64 {
+ return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (t *StandardTimer) Percentiles(ps []float64) []float64 {
+ return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (t *StandardTimer) Rate1() float64 {
+ return t.meter.Rate1()
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (t *StandardTimer) Rate5() float64 {
+ return t.meter.Rate5()
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (t *StandardTimer) Rate15() float64 {
+ return t.meter.Rate15()
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (t *StandardTimer) RateMean() float64 {
+ return t.meter.RateMean()
+}
+
+// Snapshot returns a read-only copy of the timer.
+func (t *StandardTimer) Snapshot() Timer {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ return &TimerSnapshot{
+ histogram: t.histogram.Snapshot().(*HistogramSnapshot),
+ meter: t.meter.Snapshot().(*MeterSnapshot),
+ }
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (t *StandardTimer) StdDev() float64 {
+ return t.histogram.StdDev()
+}
+
+// Sum returns the sum in the sample.
+func (t *StandardTimer) Sum() int64 {
+ return t.histogram.Sum()
+}
+
+// Record the duration of the execution of the given function.
+func (t *StandardTimer) Time(f func()) {
+ ts := time.Now()
+ f()
+ t.Update(time.Since(ts))
+}
+
+// Record the duration of an event.
+func (t *StandardTimer) Update(d time.Duration) {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ t.histogram.Update(int64(d))
+ t.meter.Mark(1)
+}
+
+// Record the duration of an event that started at a time and ends now.
+func (t *StandardTimer) UpdateSince(ts time.Time) {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ t.histogram.Update(int64(time.Since(ts)))
+ t.meter.Mark(1)
+}
+
+// Variance returns the variance of the values in the sample.
+func (t *StandardTimer) Variance() float64 {
+ return t.histogram.Variance()
+}
+
+// TimerSnapshot is a read-only copy of another Timer.
+type TimerSnapshot struct {
+ histogram *HistogramSnapshot
+ meter *MeterSnapshot
+}
+
+// Count returns the number of events recorded at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
+
+// Max returns the maximum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
+
+// Min returns the minimum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
+
+// Percentile returns an arbitrary percentile of sampled values at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) Percentile(p float64) float64 {
+ return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of sampled values at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
+ return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
+
+// Snapshot returns the snapshot.
+func (t *TimerSnapshot) Snapshot() Timer { return t }
+
+// StdDev returns the standard deviation of the values at the time the snapshot
+// was taken.
+func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
+
+// Sum returns the sum at the time the snapshot was taken.
+func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
+
+// Time panics.
+func (*TimerSnapshot) Time(func()) {
+ panic("Time called on a TimerSnapshot")
+}
+
+// Update panics.
+func (*TimerSnapshot) Update(time.Duration) {
+ panic("Update called on a TimerSnapshot")
+}
+
+// UpdateSince panics.
+func (*TimerSnapshot) UpdateSince(time.Time) {
+ panic("UpdateSince called on a TimerSnapshot")
+}
+
+// Variance returns the variance of the values at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/vendor/github.com/rcrowley/go-metrics/validate.sh
new file mode 100755
index 000000000..f6499982e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/validate.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -e
+
+# check there are no formatting issues
+GOFMT_LINES=`gofmt -l . | wc -l | xargs`
+test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues"
+
+# run the tests for the root package
+go test .
diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/vendor/github.com/rcrowley/go-metrics/writer.go
new file mode 100644
index 000000000..091e971d2
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/writer.go
@@ -0,0 +1,100 @@
+package metrics
+
+import (
+ "fmt"
+ "io"
+ "sort"
+ "time"
+)
+
+// Write sorts writes each metric in the given registry periodically to the
+// given io.Writer.
+func Write(r Registry, d time.Duration, w io.Writer) {
+ for _ = range time.Tick(d) {
+ WriteOnce(r, w)
+ }
+}
+
+// WriteOnce sorts and writes metrics in the given registry to the given
+// io.Writer.
+func WriteOnce(r Registry, w io.Writer) {
+ var namedMetrics namedMetricSlice
+ r.Each(func(name string, i interface{}) {
+ namedMetrics = append(namedMetrics, namedMetric{name, i})
+ })
+
+ sort.Sort(namedMetrics)
+ for _, namedMetric := range namedMetrics {
+ switch metric := namedMetric.m.(type) {
+ case Counter:
+ fmt.Fprintf(w, "counter %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", metric.Count())
+ case Gauge:
+ fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+ fmt.Fprintf(w, " value: %9d\n", metric.Value())
+ case GaugeFloat64:
+ fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+ fmt.Fprintf(w, " value: %f\n", metric.Value())
+ case Healthcheck:
+ metric.Check()
+ fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name)
+ fmt.Fprintf(w, " error: %v\n", metric.Error())
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "histogram %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", h.Count())
+ fmt.Fprintf(w, " min: %9d\n", h.Min())
+ fmt.Fprintf(w, " max: %9d\n", h.Max())
+ fmt.Fprintf(w, " mean: %12.2f\n", h.Mean())
+ fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev())
+ fmt.Fprintf(w, " median: %12.2f\n", ps[0])
+ fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
+ fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
+ fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
+ fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
+ case Meter:
+ m := metric.Snapshot()
+ fmt.Fprintf(w, "meter %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", m.Count())
+ fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1())
+ fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5())
+ fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15())
+ fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean())
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "timer %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", t.Count())
+ fmt.Fprintf(w, " min: %9d\n", t.Min())
+ fmt.Fprintf(w, " max: %9d\n", t.Max())
+ fmt.Fprintf(w, " mean: %12.2f\n", t.Mean())
+ fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev())
+ fmt.Fprintf(w, " median: %12.2f\n", ps[0])
+ fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
+ fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
+ fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
+ fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
+ fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1())
+ fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5())
+ fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15())
+ fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean())
+ }
+ }
+}
+
+type namedMetric struct {
+ name string
+ m interface{}
+}
+
+// namedMetricSlice is a slice of namedMetrics that implements sort.Interface.
+type namedMetricSlice []namedMetric
+
+func (nms namedMetricSlice) Len() int { return len(nms) }
+
+func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] }
+
+func (nms namedMetricSlice) Less(i, j int) bool {
+ return nms[i].name < nms[j].name
+}
diff --git a/vendor/github.com/rjeczalik/notify/.gitignore b/vendor/github.com/rjeczalik/notify/.gitignore
new file mode 100644
index 000000000..86d4fa8b1
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/.gitignore
@@ -0,0 +1,88 @@
+# Created by https://www.gitignore.io
+
+### OSX ###
+.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+# Thumbnails
+._*
+
+# Files that might appear on external disk
+.Spotlight-V100
+.Trashes
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+
+### Windows ###
+# Windows image file caches
+Thumbs.db
+ehthumbs.db
+
+# Folder config file
+Desktop.ini
+
+# Recycle Bin used on file shares
+$RECYCLE.BIN/
+
+# Windows Installer files
+*.cab
+*.msi
+*.msm
+*.msp
+
+# Windows shortcuts
+*.lnk
+
+
+### Linux ###
+*~
+
+# KDE directory preferences
+.directory
+
+
+### Go ###
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
+
+### vim ###
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+*.un~
+Session.vim
+.netrwhist
+*~
+
diff --git a/vendor/github.com/rjeczalik/notify/.travis.yml b/vendor/github.com/rjeczalik/notify/.travis.yml
new file mode 100644
index 000000000..c92863d50
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/.travis.yml
@@ -0,0 +1,29 @@
+language: go
+
+go:
+ - 1.4.3
+ - 1.6
+
+os:
+ - linux
+ - osx
+
+matrix:
+ include:
+ - os: osx
+ go: 1.6
+ env:
+ - GOFLAGS="-tags kqueue"
+
+env:
+ global:
+ - GOBIN=$HOME/bin
+ - PATH=$HOME/bin:$PATH
+
+install:
+ - go get -t -v ./...
+
+script:
+ - "(go version | grep -q 1.4) || go tool vet -all ."
+ - go install $GOFLAGS ./...
+ - go test -v -race $GOFLAGS ./...
diff --git a/vendor/github.com/rjeczalik/notify/AUTHORS b/vendor/github.com/rjeczalik/notify/AUTHORS
new file mode 100644
index 000000000..9262eae69
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/AUTHORS
@@ -0,0 +1,10 @@
+# List of individuals who contributed to the Notify package.
+#
+# The up-to-date list of the authors one may obtain with:
+#
+# ~ $ git shortlog -es | cut -f2 | rev | uniq -f1 | rev
+#
+
+Pawel Blaszczyk <blaszczykpb@gmail.com>
+Pawel Knap <pawelknap88@gmail.com>
+Rafal Jeczalik <rjeczalik@gmail.com>
diff --git a/vendor/github.com/rjeczalik/notify/LICENSE b/vendor/github.com/rjeczalik/notify/LICENSE
new file mode 100644
index 000000000..3e678817b
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014-2015 The Notify Authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/rjeczalik/notify/README.md b/vendor/github.com/rjeczalik/notify/README.md
new file mode 100644
index 000000000..a728d1dd0
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/README.md
@@ -0,0 +1,21 @@
+notify [![GoDoc](https://godoc.org/github.com/rjeczalik/notify?status.svg)](https://godoc.org/github.com/rjeczalik/notify) [![Build Status](https://img.shields.io/travis/rjeczalik/notify/master.svg)](https://travis-ci.org/rjeczalik/notify "inotify + FSEvents + kqueue") [![Build status](https://img.shields.io/appveyor/ci/rjeczalik/notify-246.svg)](https://ci.appveyor.com/project/rjeczalik/notify-246 "ReadDirectoryChangesW") [![Coverage Status](https://img.shields.io/coveralls/rjeczalik/notify/master.svg)](https://coveralls.io/r/rjeczalik/notify?branch=master)
+======
+
+Filesystem event notification library on steroids. (under active development)
+
+*Documentation*
+
+[godoc.org/github.com/rjeczalik/notify](https://godoc.org/github.com/rjeczalik/notify)
+
+*Installation*
+
+```
+~ $ go get -u github.com/rjeczalik/notify
+```
+
+*Projects using notify*
+
+- [github.com/rjeczalik/cmd/notify](https://godoc.org/github.com/rjeczalik/cmd/notify)
+- [github.com/cortesi/devd](https://github.com/cortesi/devd)
+- [github.com/cortesi/modd](https://github.com/cortesi/modd)
+
diff --git a/vendor/github.com/rjeczalik/notify/appveyor.yml b/vendor/github.com/rjeczalik/notify/appveyor.yml
new file mode 100644
index 000000000..8e762d05c
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/appveyor.yml
@@ -0,0 +1,23 @@
+version: "{build}"
+
+os: Windows Server 2012 R2
+
+clone_folder: c:\projects\src\github.com\rjeczalik\notify
+
+environment:
+ PATH: c:\projects\bin;%PATH%
+ GOPATH: c:\projects
+ NOTIFY_TIMEOUT: 5s
+
+install:
+ - go version
+ - go get -v -t ./...
+
+build_script:
+ - go tool vet -all .
+ - go build ./...
+ - go test -v -race ./...
+
+test: off
+
+deploy: off
diff --git a/vendor/github.com/rjeczalik/notify/debug.go b/vendor/github.com/rjeczalik/notify/debug.go
new file mode 100644
index 000000000..bd9bc468d
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/debug.go
@@ -0,0 +1,11 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build !debug
+
+package notify
+
+func dbgprint(...interface{}) {}
+
+func dbgprintf(string, ...interface{}) {}
diff --git a/vendor/github.com/rjeczalik/notify/debug_debug.go b/vendor/github.com/rjeczalik/notify/debug_debug.go
new file mode 100644
index 000000000..f0622917f
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/debug_debug.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build debug
+
+package notify
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+)
+
+func dbgprint(v ...interface{}) {
+ fmt.Printf("[D] ")
+ fmt.Print(v...)
+ fmt.Printf("\n\n")
+}
+
+func dbgprintf(format string, v ...interface{}) {
+ fmt.Printf("[D] ")
+ fmt.Printf(format, v...)
+ fmt.Printf("\n\n")
+}
+
+func dbgcallstack(max int) []string {
+ pc, stack := make([]uintptr, max), make([]string, 0, max)
+ runtime.Callers(2, pc)
+ for _, pc := range pc {
+ if f := runtime.FuncForPC(pc); f != nil {
+ fname := f.Name()
+ idx := strings.LastIndex(fname, string(os.PathSeparator))
+ if idx != -1 {
+ stack = append(stack, fname[idx+1:])
+ } else {
+ stack = append(stack, fname)
+ }
+ }
+ }
+ return stack
+}
diff --git a/vendor/github.com/rjeczalik/notify/doc.go b/vendor/github.com/rjeczalik/notify/doc.go
new file mode 100644
index 000000000..8a99ddda6
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/doc.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// Package notify implements access to filesystem events.
+//
+// Notify is a high-level abstraction over filesystem watchers like inotify,
+// kqueue, FSEvents, FEN or ReadDirectoryChangesW. Watcher implementations are
+// split into two groups: ones that natively support recursive notifications
+// (FSEvents and ReadDirectoryChangesW) and ones that do not (inotify, kqueue, FEN).
+// For more details see watcher and recursiveWatcher interfaces in watcher.go
+// source file.
+//
+// On top of filesystem watchers notify maintains a watchpoint tree, which provides
+// strategy for creating and closing filesystem watches and dispatching filesystem
+// events to user channels.
+//
+// An event set is just an event list joint using bitwise OR operator
+// into a single event value.
+//
+// A filesystem watch or just a watch is platform-specific entity which represents
+// a single path registered for notifications for specific event set. Setting a watch
+// means using platform-specific API calls for creating / initializing said watch.
+// For each watcher the API call is:
+//
+// - FSEvents: FSEventStreamCreate
+// - inotify: notify_add_watch
+// - kqueue: kevent
+// - ReadDirectoryChangesW: CreateFile+ReadDirectoryChangesW
+// - FEN: port_get
+//
+// To rewatch means to either shrink or expand an event set that was previously
+// registered during watch operation for particular filesystem watch.
+//
+// A watchpoint is a list of user channel and event set pairs for particular
+// path (watchpoint tree's node). A single watchpoint can contain multiple
+// different user channels registered to listen for one or more events. A single
+// user channel can be registered in one or more watchpoints, recurisve and
+// non-recursive ones as well.
+package notify
diff --git a/vendor/github.com/rjeczalik/notify/event.go b/vendor/github.com/rjeczalik/notify/event.go
new file mode 100644
index 000000000..e045edcec
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/event.go
@@ -0,0 +1,143 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+package notify
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Event represents the type of filesystem action.
+//
+// Number of available event values is dependent on the target system or the
+// watcher implmenetation used (e.g. it's possible to use either kqueue or
+// FSEvents on Darwin).
+//
+// Please consult documentation for your target platform to see list of all
+// available events.
+type Event uint32
+
+// Create, Remove, Write and Rename are the only event values guaranteed to be
+// present on all platforms.
+const (
+ Create = osSpecificCreate
+ Remove = osSpecificRemove
+ Write = osSpecificWrite
+ Rename = osSpecificRename
+
+ // All is handful alias for all platform-independent event values.
+ All = Create | Remove | Write | Rename
+)
+
+const internal = recursive | omit
+
+// String implements fmt.Stringer interface.
+func (e Event) String() string {
+ var s []string
+ for _, strmap := range []map[Event]string{estr, osestr} {
+ for ev, str := range strmap {
+ if e&ev == ev {
+ s = append(s, str)
+ }
+ }
+ }
+ return strings.Join(s, "|")
+}
+
+// EventInfo describes an event reported by the underlying filesystem notification
+// subsystem.
+//
+// It always describes single event, even if the OS reported a coalesced action.
+// Reported path is absolute and clean.
+//
+// For non-recursive watchpoints its base is always equal to the path passed
+// to corresponding Watch call.
+//
+// The value of Sys if system-dependent and can be nil.
+//
+// Sys
+//
+// Under Darwin (FSEvents) Sys() always returns a non-nil *notify.FSEvent value,
+// which is defined as:
+//
+// type FSEvent struct {
+// Path string // real path of the file or directory
+// ID uint64 // ID of the event (FSEventStreamEventId)
+// Flags uint32 // joint FSEvents* flags (FSEventStreamEventFlags)
+// }
+//
+// For possible values of Flags see Darwin godoc for notify or FSEvents
+// documentation for FSEventStreamEventFlags constants:
+//
+// https://developer.apple.com/library/mac/documentation/Darwin/Reference/FSEvents_Ref/index.html#//apple_ref/doc/constant_group/FSEventStreamEventFlags
+//
+// Under Linux (inotify) Sys() always returns a non-nil *syscall.InotifyEvent
+// value, defined as:
+//
+// type InotifyEvent struct {
+// Wd int32 // Watch descriptor
+// Mask uint32 // Mask describing event
+// Cookie uint32 // Unique cookie associating related events (for rename(2))
+// Len uint32 // Size of name field
+// Name [0]uint8 // Optional null-terminated name
+// }
+//
+// More information about inotify masks and the usage of inotify_event structure
+// can be found at:
+//
+// http://man7.org/linux/man-pages/man7/inotify.7.html
+//
+// Under Darwin, DragonFlyBSD, FreeBSD, NetBSD, OpenBSD (kqueue) Sys() always
+// returns a non-nil *notify.Kevent value, which is defined as:
+//
+// type Kevent struct {
+// Kevent *syscall.Kevent_t // Kevent is a kqueue specific structure
+// FI os.FileInfo // FI describes file/dir
+// }
+//
+// More information about syscall.Kevent_t can be found at:
+//
+// https://www.freebsd.org/cgi/man.cgi?query=kqueue
+//
+// Under Windows (ReadDirectoryChangesW) Sys() always returns nil. The documentation
+// of watcher's WinAPI function can be found at:
+//
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365465%28v=vs.85%29.aspx
+type EventInfo interface {
+ Event() Event // event value for the filesystem action
+ Path() string // real path of the file or directory
+ Sys() interface{} // underlying data source (can return nil)
+}
+
+type isDirer interface {
+ isDir() (bool, error)
+}
+
+var _ fmt.Stringer = (*event)(nil)
+var _ isDirer = (*event)(nil)
+
+// String implements fmt.Stringer interface.
+func (e *event) String() string {
+ return e.Event().String() + `: "` + e.Path() + `"`
+}
+
+var estr = map[Event]string{
+ Create: "notify.Create",
+ Remove: "notify.Remove",
+ Write: "notify.Write",
+ Rename: "notify.Rename",
+ // Display name for recursive event is added only for debugging
+ // purposes. It's an internal event after all and won't be exposed to the
+ // user. Having Recursive event printable is helpful, e.g. for reading
+ // testing failure messages:
+ //
+ // --- FAIL: TestWatchpoint (0.00 seconds)
+ // watchpoint_test.go:64: want diff=[notify.Remove notify.Create|notify.Remove];
+ // got [notify.Remove notify.Remove|notify.Create] (i=1)
+ //
+ // Yup, here the diff have Recursive event inside. Go figure.
+ recursive: "recursive",
+ omit: "omit",
+}
diff --git a/vendor/github.com/rjeczalik/notify/event_fen.go b/vendor/github.com/rjeczalik/notify/event_fen.go
new file mode 100644
index 000000000..a3079385d
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/event_fen.go
@@ -0,0 +1,46 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build solaris
+
+package notify
+
+const (
+ osSpecificCreate Event = 0x00000100 << iota
+ osSpecificRemove
+ osSpecificWrite
+ osSpecificRename
+ // internal
+ // recursive is used to distinguish recursive eventsets from non-recursive ones
+ recursive
+ // omit is used for dispatching internal events; only those events are sent
+ // for which both the event and the watchpoint has omit in theirs event sets.
+ omit
+)
+
+const (
+ FileAccess = fileAccess
+ FileModified = fileModified
+ FileAttrib = fileAttrib
+ FileDelete = fileDelete
+ FileRenameTo = fileRenameTo
+ FileRenameFrom = fileRenameFrom
+ FileTrunc = fileTrunc
+ FileNoFollow = fileNoFollow
+ Unmounted = unmounted
+ MountedOver = mountedOver
+)
+
+var osestr = map[Event]string{
+ FileAccess: "notify.FileAccess",
+ FileModified: "notify.FileModified",
+ FileAttrib: "notify.FileAttrib",
+ FileDelete: "notify.FileDelete",
+ FileRenameTo: "notify.FileRenameTo",
+ FileRenameFrom: "notify.FileRenameFrom",
+ FileTrunc: "notify.FileTrunc",
+ FileNoFollow: "notify.FileNoFollow",
+ Unmounted: "notify.Unmounted",
+ MountedOver: "notify.MountedOver",
+}
diff --git a/vendor/github.com/rjeczalik/notify/event_fsevents.go b/vendor/github.com/rjeczalik/notify/event_fsevents.go
new file mode 100644
index 000000000..6ded80b2c
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/event_fsevents.go
@@ -0,0 +1,71 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build darwin,!kqueue
+
+package notify
+
+const (
+ osSpecificCreate = Event(FSEventsCreated)
+ osSpecificRemove = Event(FSEventsRemoved)
+ osSpecificWrite = Event(FSEventsModified)
+ osSpecificRename = Event(FSEventsRenamed)
+ // internal = Event(0x100000)
+ // recursive is used to distinguish recursive eventsets from non-recursive ones
+ recursive = Event(0x200000)
+ // omit is used for dispatching internal events; only those events are sent
+ // for which both the event and the watchpoint has omit in theirs event sets.
+ omit = Event(0x400000)
+)
+
+// FSEvents specific event values.
+const (
+ FSEventsMustScanSubDirs Event = 0x00001
+ FSEventsUserDropped = 0x00002
+ FSEventsKernelDropped = 0x00004
+ FSEventsEventIdsWrapped = 0x00008
+ FSEventsHistoryDone = 0x00010
+ FSEventsRootChanged = 0x00020
+ FSEventsMount = 0x00040
+ FSEventsUnmount = 0x00080
+ FSEventsCreated = 0x00100
+ FSEventsRemoved = 0x00200
+ FSEventsInodeMetaMod = 0x00400
+ FSEventsRenamed = 0x00800
+ FSEventsModified = 0x01000
+ FSEventsFinderInfoMod = 0x02000
+ FSEventsChangeOwner = 0x04000
+ FSEventsXattrMod = 0x08000
+ FSEventsIsFile = 0x10000
+ FSEventsIsDir = 0x20000
+ FSEventsIsSymlink = 0x40000
+)
+
+var osestr = map[Event]string{
+ FSEventsMustScanSubDirs: "notify.FSEventsMustScanSubDirs",
+ FSEventsUserDropped: "notify.FSEventsUserDropped",
+ FSEventsKernelDropped: "notify.FSEventsKernelDropped",
+ FSEventsEventIdsWrapped: "notify.FSEventsEventIdsWrapped",
+ FSEventsHistoryDone: "notify.FSEventsHistoryDone",
+ FSEventsRootChanged: "notify.FSEventsRootChanged",
+ FSEventsMount: "notify.FSEventsMount",
+ FSEventsUnmount: "notify.FSEventsUnmount",
+ FSEventsInodeMetaMod: "notify.FSEventsInodeMetaMod",
+ FSEventsFinderInfoMod: "notify.FSEventsFinderInfoMod",
+ FSEventsChangeOwner: "notify.FSEventsChangeOwner",
+ FSEventsXattrMod: "notify.FSEventsXattrMod",
+ FSEventsIsFile: "notify.FSEventsIsFile",
+ FSEventsIsDir: "notify.FSEventsIsDir",
+ FSEventsIsSymlink: "notify.FSEventsIsSymlink",
+}
+
+type event struct {
+ fse FSEvent
+ event Event
+}
+
+func (ei *event) Event() Event { return ei.event }
+func (ei *event) Path() string { return ei.fse.Path }
+func (ei *event) Sys() interface{} { return &ei.fse }
+func (ei *event) isDir() (bool, error) { return ei.fse.Flags&FSEventsIsDir != 0, nil }
diff --git a/vendor/github.com/rjeczalik/notify/event_inotify.go b/vendor/github.com/rjeczalik/notify/event_inotify.go
new file mode 100644
index 000000000..82954a9b3
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/event_inotify.go
@@ -0,0 +1,75 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build linux
+
+package notify
+
+import "syscall"
+
+// Platform independent event values.
+const (
+ osSpecificCreate Event = 0x100000 << iota
+ osSpecificRemove
+ osSpecificWrite
+ osSpecificRename
+ // internal
+ // recursive is used to distinguish recursive eventsets from non-recursive ones
+ recursive
+ // omit is used for dispatching internal events; only those events are sent
+ // for which both the event and the watchpoint has omit in theirs event sets.
+ omit
+)
+
+// Inotify specific masks are legal, implemented events that are guaranteed to
+// work with notify package on linux-based systems.
+const (
+ InAccess = Event(syscall.IN_ACCESS) // File was accessed
+ InModify = Event(syscall.IN_MODIFY) // File was modified
+ InAttrib = Event(syscall.IN_ATTRIB) // Metadata changed
+ InCloseWrite = Event(syscall.IN_CLOSE_WRITE) // Writtable file was closed
+ InCloseNowrite = Event(syscall.IN_CLOSE_NOWRITE) // Unwrittable file closed
+ InOpen = Event(syscall.IN_OPEN) // File was opened
+ InMovedFrom = Event(syscall.IN_MOVED_FROM) // File was moved from X
+ InMovedTo = Event(syscall.IN_MOVED_TO) // File was moved to Y
+ InCreate = Event(syscall.IN_CREATE) // Subfile was created
+ InDelete = Event(syscall.IN_DELETE) // Subfile was deleted
+ InDeleteSelf = Event(syscall.IN_DELETE_SELF) // Self was deleted
+ InMoveSelf = Event(syscall.IN_MOVE_SELF) // Self was moved
+)
+
+var osestr = map[Event]string{
+ InAccess: "notify.InAccess",
+ InModify: "notify.InModify",
+ InAttrib: "notify.InAttrib",
+ InCloseWrite: "notify.InCloseWrite",
+ InCloseNowrite: "notify.InCloseNowrite",
+ InOpen: "notify.InOpen",
+ InMovedFrom: "notify.InMovedFrom",
+ InMovedTo: "notify.InMovedTo",
+ InCreate: "notify.InCreate",
+ InDelete: "notify.InDelete",
+ InDeleteSelf: "notify.InDeleteSelf",
+ InMoveSelf: "notify.InMoveSelf",
+}
+
+// Inotify behavior events are not **currently** supported by notify package.
+const (
+ inDontFollow = Event(syscall.IN_DONT_FOLLOW)
+ inExclUnlink = Event(syscall.IN_EXCL_UNLINK)
+ inMaskAdd = Event(syscall.IN_MASK_ADD)
+ inOneshot = Event(syscall.IN_ONESHOT)
+ inOnlydir = Event(syscall.IN_ONLYDIR)
+)
+
+type event struct {
+ sys syscall.InotifyEvent
+ path string
+ event Event
+}
+
+func (e *event) Event() Event { return e.event }
+func (e *event) Path() string { return e.path }
+func (e *event) Sys() interface{} { return &e.sys }
+func (e *event) isDir() (bool, error) { return e.sys.Mask&syscall.IN_ISDIR != 0, nil }
diff --git a/vendor/github.com/rjeczalik/notify/event_kqueue.go b/vendor/github.com/rjeczalik/notify/event_kqueue.go
new file mode 100644
index 000000000..82e2d8cca
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/event_kqueue.go
@@ -0,0 +1,59 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build darwin,kqueue dragonfly freebsd netbsd openbsd
+
+package notify
+
+import "syscall"
+
+// TODO(pblaszczyk): ensure in runtime notify built-in event values do not
+// overlap with platform-defined ones.
+
+// Platform independent event values.
+const (
+ osSpecificCreate Event = 0x0100 << iota
+ osSpecificRemove
+ osSpecificWrite
+ osSpecificRename
+ // internal
+ // recursive is used to distinguish recursive eventsets from non-recursive ones
+ recursive
+ // omit is used for dispatching internal events; only those events are sent
+ // for which both the event and the watchpoint has omit in theirs event sets.
+ omit
+)
+
+const (
+ // NoteDelete is an even reported when the unlink() system call was called
+ // on the file referenced by the descriptor.
+ NoteDelete = Event(syscall.NOTE_DELETE)
+ // NoteWrite is an event reported when a write occurred on the file
+ // referenced by the descriptor.
+ NoteWrite = Event(syscall.NOTE_WRITE)
+ // NoteExtend is an event reported when the file referenced by the
+ // descriptor was extended.
+ NoteExtend = Event(syscall.NOTE_EXTEND)
+ // NoteAttrib is an event reported when the file referenced
+ // by the descriptor had its attributes changed.
+ NoteAttrib = Event(syscall.NOTE_ATTRIB)
+ // NoteLink is an event reported when the link count on the file changed.
+ NoteLink = Event(syscall.NOTE_LINK)
+ // NoteRename is an event reported when the file referenced
+ // by the descriptor was renamed.
+ NoteRename = Event(syscall.NOTE_RENAME)
+ // NoteRevoke is an event reported when access to the file was revoked via
+ // revoke(2) or the underlying file system was unmounted.
+ NoteRevoke = Event(syscall.NOTE_REVOKE)
+)
+
+var osestr = map[Event]string{
+ NoteDelete: "notify.NoteDelete",
+ NoteWrite: "notify.NoteWrite",
+ NoteExtend: "notify.NoteExtend",
+ NoteAttrib: "notify.NoteAttrib",
+ NoteLink: "notify.NoteLink",
+ NoteRename: "notify.NoteRename",
+ NoteRevoke: "notify.NoteRevoke",
+}
diff --git a/vendor/github.com/rjeczalik/notify/event_readdcw.go b/vendor/github.com/rjeczalik/notify/event_readdcw.go
new file mode 100644
index 000000000..11ead9e29
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/event_readdcw.go
@@ -0,0 +1,108 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build windows
+
+package notify
+
+import (
+ "os"
+ "path/filepath"
+ "syscall"
+)
+
+// Platform independent event values.
+const (
+ osSpecificCreate Event = 1 << (20 + iota)
+ osSpecificRemove
+ osSpecificWrite
+ osSpecificRename
+ // recursive is used to distinguish recursive eventsets from non-recursive ones
+ recursive
+ // omit is used for dispatching internal events; only those events are sent
+ // for which both the event and the watchpoint has omit in theirs event sets.
+ omit
+ // dirmarker TODO(pknap)
+ dirmarker
+)
+
+// ReadDirectoryChangesW filters.
+const (
+ FileNotifyChangeFileName = Event(syscall.FILE_NOTIFY_CHANGE_FILE_NAME)
+ FileNotifyChangeDirName = Event(syscall.FILE_NOTIFY_CHANGE_DIR_NAME)
+ FileNotifyChangeAttributes = Event(syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES)
+ FileNotifyChangeSize = Event(syscall.FILE_NOTIFY_CHANGE_SIZE)
+ FileNotifyChangeLastWrite = Event(syscall.FILE_NOTIFY_CHANGE_LAST_WRITE)
+ FileNotifyChangeLastAccess = Event(syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS)
+ FileNotifyChangeCreation = Event(syscall.FILE_NOTIFY_CHANGE_CREATION)
+ FileNotifyChangeSecurity = Event(syscallFileNotifyChangeSecurity)
+)
+
+const (
+ fileNotifyChangeAll = 0x17f // logical sum of all FileNotifyChange* events.
+ fileNotifyChangeModified = fileNotifyChangeAll &^ (FileNotifyChangeFileName | FileNotifyChangeDirName)
+)
+
+// according to: http://msdn.microsoft.com/en-us/library/windows/desktop/aa365465(v=vs.85).aspx
+// this flag should be declared in: http://golang.org/src/pkg/syscall/ztypes_windows.go
+const syscallFileNotifyChangeSecurity = 0x00000100
+
+// ReadDirectoryChangesW actions.
+const (
+ FileActionAdded = Event(syscall.FILE_ACTION_ADDED) << 12
+ FileActionRemoved = Event(syscall.FILE_ACTION_REMOVED) << 12
+ FileActionModified = Event(syscall.FILE_ACTION_MODIFIED) << 14
+ FileActionRenamedOldName = Event(syscall.FILE_ACTION_RENAMED_OLD_NAME) << 15
+ FileActionRenamedNewName = Event(syscall.FILE_ACTION_RENAMED_NEW_NAME) << 16
+)
+
+const fileActionAll = 0x7f000 // logical sum of all FileAction* events.
+
+var osestr = map[Event]string{
+ FileNotifyChangeFileName: "notify.FileNotifyChangeFileName",
+ FileNotifyChangeDirName: "notify.FileNotifyChangeDirName",
+ FileNotifyChangeAttributes: "notify.FileNotifyChangeAttributes",
+ FileNotifyChangeSize: "notify.FileNotifyChangeSize",
+ FileNotifyChangeLastWrite: "notify.FileNotifyChangeLastWrite",
+ FileNotifyChangeLastAccess: "notify.FileNotifyChangeLastAccess",
+ FileNotifyChangeCreation: "notify.FileNotifyChangeCreation",
+ FileNotifyChangeSecurity: "notify.FileNotifyChangeSecurity",
+
+ FileActionAdded: "notify.FileActionAdded",
+ FileActionRemoved: "notify.FileActionRemoved",
+ FileActionModified: "notify.FileActionModified",
+ FileActionRenamedOldName: "notify.FileActionRenamedOldName",
+ FileActionRenamedNewName: "notify.FileActionRenamedNewName",
+}
+
+const (
+ fTypeUnknown uint8 = iota
+ fTypeFile
+ fTypeDirectory
+)
+
+// TODO(ppknap) : doc.
+type event struct {
+ pathw []uint16
+ name string
+ ftype uint8
+ action uint32
+ filter uint32
+ e Event
+}
+
+func (e *event) Event() Event { return e.e }
+func (e *event) Path() string { return filepath.Join(syscall.UTF16ToString(e.pathw), e.name) }
+func (e *event) Sys() interface{} { return e.ftype }
+
+func (e *event) isDir() (bool, error) {
+ if e.ftype != fTypeUnknown {
+ return e.ftype == fTypeDirectory, nil
+ }
+ fi, err := os.Stat(e.Path())
+ if err != nil {
+ return false, err
+ }
+ return fi.IsDir(), nil
+}
diff --git a/vendor/github.com/rjeczalik/notify/event_stub.go b/vendor/github.com/rjeczalik/notify/event_stub.go
new file mode 100644
index 000000000..faac7c7cb
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/event_stub.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build !darwin,!linux,!freebsd,!dragonfly,!netbsd,!openbsd,!windows
+// +build !kqueue,!solaris
+
+package notify
+
+// Platform independent event values.
+const (
+ osSpecificCreate Event = 1 << iota
+ osSpecificRemove
+ osSpecificWrite
+ osSpecificRename
+ // internal
+ // recursive is used to distinguish recursive eventsets from non-recursive ones
+ recursive
+ // omit is used for dispatching internal events; only those events are sent
+ // for which both the event and the watchpoint has omit in theirs event sets.
+ omit
+)
+
+var osestr = map[Event]string{}
+
+type event struct{}
+
+func (e *event) Event() (_ Event) { return }
+func (e *event) Path() (_ string) { return }
+func (e *event) Sys() (_ interface{}) { return }
+func (e *event) isDir() (_ bool, _ error) { return }
diff --git a/vendor/github.com/rjeczalik/notify/event_trigger.go b/vendor/github.com/rjeczalik/notify/event_trigger.go
new file mode 100644
index 000000000..94470fd37
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/event_trigger.go
@@ -0,0 +1,22 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build darwin,kqueue dragonfly freebsd netbsd openbsd solaris
+
+package notify
+
+type event struct {
+ p string
+ e Event
+ d bool
+ pe interface{}
+}
+
+func (e *event) Event() Event { return e.e }
+
+func (e *event) Path() string { return e.p }
+
+func (e *event) Sys() interface{} { return e.pe }
+
+func (e *event) isDir() (bool, error) { return e.d, nil }
diff --git a/vendor/github.com/rjeczalik/notify/node.go b/vendor/github.com/rjeczalik/notify/node.go
new file mode 100644
index 000000000..29c1bb20a
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/node.go
@@ -0,0 +1,272 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+package notify
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+var errSkip = errors.New("notify: skip")
+
+type walkPathFunc func(nd node, isbase bool) error
+
+type walkFunc func(node) error
+
+func errnotexist(name string) error {
+ return &os.PathError{
+ Op: "Node",
+ Path: name,
+ Err: os.ErrNotExist,
+ }
+}
+
+type node struct {
+ Name string
+ Watch watchpoint
+ Child map[string]node
+}
+
+func newnode(name string) node {
+ return node{
+ Name: name,
+ Watch: make(watchpoint),
+ Child: make(map[string]node),
+ }
+}
+
+func (nd node) addchild(name, base string) node {
+ child, ok := nd.Child[base]
+ if !ok {
+ child = newnode(name)
+ nd.Child[base] = child
+ }
+ return child
+}
+
+func (nd node) Add(name string) node {
+ i := indexbase(nd.Name, name)
+ if i == -1 {
+ return node{}
+ }
+ for j := indexSep(name[i:]); j != -1; j = indexSep(name[i:]) {
+ nd = nd.addchild(name[:i+j], name[i:i+j])
+ i += j + 1
+ }
+ return nd.addchild(name, name[i:])
+}
+
+func (nd node) AddDir(fn walkFunc) error {
+ stack := []node{nd}
+Traverse:
+ for n := len(stack); n != 0; n = len(stack) {
+ nd, stack = stack[n-1], stack[:n-1]
+ switch err := fn(nd); err {
+ case nil:
+ case errSkip:
+ continue Traverse
+ default:
+ return fmt.Errorf("error while traversing %q: %v", nd.Name, err)
+ }
+ // TODO(rjeczalik): tolerate open failures - add failed names to
+ // AddDirError and notify users which names are not added to the tree.
+ fi, err := ioutil.ReadDir(nd.Name)
+ if err != nil {
+ return err
+ }
+ for _, fi := range fi {
+ if fi.Mode()&(os.ModeSymlink|os.ModeDir) == os.ModeDir {
+ name := filepath.Join(nd.Name, fi.Name())
+ stack = append(stack, nd.addchild(name, name[len(nd.Name)+1:]))
+ }
+ }
+ }
+ return nil
+}
+
+func (nd node) Get(name string) (node, error) {
+ i := indexbase(nd.Name, name)
+ if i == -1 {
+ return node{}, errnotexist(name)
+ }
+ ok := false
+ for j := indexSep(name[i:]); j != -1; j = indexSep(name[i:]) {
+ if nd, ok = nd.Child[name[i:i+j]]; !ok {
+ return node{}, errnotexist(name)
+ }
+ i += j + 1
+ }
+ if nd, ok = nd.Child[name[i:]]; !ok {
+ return node{}, errnotexist(name)
+ }
+ return nd, nil
+}
+
+func (nd node) Del(name string) error {
+ i := indexbase(nd.Name, name)
+ if i == -1 {
+ return errnotexist(name)
+ }
+ stack := []node{nd}
+ ok := false
+ for j := indexSep(name[i:]); j != -1; j = indexSep(name[i:]) {
+ if nd, ok = nd.Child[name[i:i+j]]; !ok {
+ return errnotexist(name[:i+j])
+ }
+ stack = append(stack, nd)
+ }
+ if nd, ok = nd.Child[name[i:]]; !ok {
+ return errnotexist(name)
+ }
+ nd.Child = nil
+ nd.Watch = nil
+ for name, i = base(nd.Name), len(stack); i != 0; name, i = base(nd.Name), i-1 {
+ nd = stack[i-1]
+ if nd := nd.Child[name]; len(nd.Watch) > 1 || len(nd.Child) != 0 {
+ break
+ } else {
+ nd.Child = nil
+ nd.Watch = nil
+ }
+ delete(nd.Child, name)
+ }
+ return nil
+}
+
+func (nd node) Walk(fn walkFunc) error {
+ stack := []node{nd}
+Traverse:
+ for n := len(stack); n != 0; n = len(stack) {
+ nd, stack = stack[n-1], stack[:n-1]
+ switch err := fn(nd); err {
+ case nil:
+ case errSkip:
+ continue Traverse
+ default:
+ return err
+ }
+ for name, nd := range nd.Child {
+ if name == "" {
+ // Node storing inactive watchpoints has empty name, skip it
+ // form traversing. Root node has also an empty name, but it
+ // never has a parent node.
+ continue
+ }
+ stack = append(stack, nd)
+ }
+ }
+ return nil
+}
+
+func (nd node) WalkPath(name string, fn walkPathFunc) error {
+ i := indexbase(nd.Name, name)
+ if i == -1 {
+ return errnotexist(name)
+ }
+ ok := false
+ for j := indexSep(name[i:]); j != -1; j = indexSep(name[i:]) {
+ switch err := fn(nd, false); err {
+ case nil:
+ case errSkip:
+ return nil
+ default:
+ return err
+ }
+ if nd, ok = nd.Child[name[i:i+j]]; !ok {
+ return errnotexist(name[:i+j])
+ }
+ i += j + 1
+ }
+ switch err := fn(nd, false); err {
+ case nil:
+ case errSkip:
+ return nil
+ default:
+ return err
+ }
+ if nd, ok = nd.Child[name[i:]]; !ok {
+ return errnotexist(name)
+ }
+ switch err := fn(nd, true); err {
+ case nil, errSkip:
+ return nil
+ default:
+ return err
+ }
+}
+
+type root struct {
+ nd node
+}
+
+func (r root) addroot(name string) node {
+ if vol := filepath.VolumeName(name); vol != "" {
+ root, ok := r.nd.Child[vol]
+ if !ok {
+ root = r.nd.addchild(vol, vol)
+ }
+ return root
+ }
+ return r.nd
+}
+
+func (r root) root(name string) (node, error) {
+ if vol := filepath.VolumeName(name); vol != "" {
+ nd, ok := r.nd.Child[vol]
+ if !ok {
+ return node{}, errnotexist(name)
+ }
+ return nd, nil
+ }
+ return r.nd, nil
+}
+
+func (r root) Add(name string) node {
+ return r.addroot(name).Add(name)
+}
+
+func (r root) AddDir(dir string, fn walkFunc) error {
+ return r.Add(dir).AddDir(fn)
+}
+
+func (r root) Del(name string) error {
+ nd, err := r.root(name)
+ if err != nil {
+ return err
+ }
+ return nd.Del(name)
+}
+
+func (r root) Get(name string) (node, error) {
+ nd, err := r.root(name)
+ if err != nil {
+ return node{}, err
+ }
+ if nd.Name != name {
+ if nd, err = nd.Get(name); err != nil {
+ return node{}, err
+ }
+ }
+ return nd, nil
+}
+
+func (r root) Walk(name string, fn walkFunc) error {
+ nd, err := r.Get(name)
+ if err != nil {
+ return err
+ }
+ return nd.Walk(fn)
+}
+
+func (r root) WalkPath(name string, fn walkPathFunc) error {
+ nd, err := r.root(name)
+ if err != nil {
+ return err
+ }
+ return nd.WalkPath(name, fn)
+}
diff --git a/vendor/github.com/rjeczalik/notify/notify.go b/vendor/github.com/rjeczalik/notify/notify.go
new file mode 100644
index 000000000..dbf1e7bc2
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/notify.go
@@ -0,0 +1,74 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// BUG(rjeczalik): Notify does not collect watchpoints, when underlying watches
+// were removed by their os-specific watcher implementations. Instead users are
+// advised to listen on persistant paths to have guarantee they receive events
+// for the whole lifetime of their applications (to discuss see #69).
+
+// BUG(ppknap): Linux (inotify) does not support watcher behavior masks like
+// InOneshot, InOnlydir etc. Instead users are advised to perform the filtering
+// themselves (to discuss see #71).
+
+// BUG(ppknap): Notify was not tested for short path name support under Windows
+// (ReadDirectoryChangesW).
+
+// BUG(ppknap): Windows (ReadDirectoryChangesW) cannot recognize which notification
+// triggers FileActionModified event. (to discuss see #75).
+
+package notify
+
+var defaultTree = newTree()
+
+// Watch sets up a watchpoint on path listening for events given by the events
+// argument.
+//
+// File or directory given by the path must exist, otherwise Watch will fail
+// with non-nil error. Notify resolves, for its internal purpose, any symlinks
+// the provided path may contain, so it may fail if the symlinks form a cycle.
+// It does so, since not all watcher implementations treat passed paths as-is.
+// E.g. FSEvents reports a real path for every event, setting a watchpoint
+// on /tmp will report events with paths rooted at /private/tmp etc.
+//
+// The c almost always is a buffered channel. Watch will not block sending to c
+// - the caller must ensure that c has sufficient buffer space to keep up with
+// the expected event rate.
+//
+// It is allowed to pass the same channel multiple times with different event
+// list or different paths. Calling Watch with different event lists for a single
+// watchpoint expands its event set. The only way to shrink it, is to call
+// Stop on its channel.
+//
+// Calling Watch with empty event list does expand nor shrink watchpoint's event
+// set. If c is the first channel to listen for events on the given path, Watch
+// will seamlessly create a watch on the filesystem.
+//
+// Notify dispatches copies of single filesystem event to all channels registered
+// for each path. If a single filesystem event contains multiple coalesced events,
+// each of them is dispatched separately. E.g. the following filesystem change:
+//
+// ~ $ echo Hello > Notify.txt
+//
+// dispatches two events - notify.Create and notify.Write. However, it may depend
+// on the underlying watcher implementation whether OS reports both of them.
+//
+// Windows and recursive watches
+//
+// If a directory which path was used to create recursive watch under Windows
+// gets deleted, the OS will not report such event. It is advised to keep in
+// mind this limitation while setting recursive watchpoints for your application,
+// e.g. use persistant paths like %userprofile% or watch additionally parent
+// directory of a recursive watchpoint in order to receive delete events for it.
+func Watch(path string, c chan<- EventInfo, events ...Event) error {
+ return defaultTree.Watch(path, c, events...)
+}
+
+// Stop removes all watchpoints registered for c. All underlying watches are
+// also removed, for which c was the last channel listening for events.
+//
+// Stop does not close c. When Stop returns, it is guranteed that c will
+// receive no more signals.
+func Stop(c chan<- EventInfo) {
+ defaultTree.Stop(c)
+}
diff --git a/vendor/github.com/rjeczalik/notify/tree.go b/vendor/github.com/rjeczalik/notify/tree.go
new file mode 100644
index 000000000..cd6afd60d
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/tree.go
@@ -0,0 +1,22 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+package notify
+
+const buffer = 128
+
+type tree interface {
+ Watch(string, chan<- EventInfo, ...Event) error
+ Stop(chan<- EventInfo)
+ Close() error
+}
+
+func newTree() tree {
+ c := make(chan EventInfo, buffer)
+ w := newWatcher(c)
+ if rw, ok := w.(recursiveWatcher); ok {
+ return newRecursiveTree(rw, c)
+ }
+ return newNonrecursiveTree(w, c, make(chan EventInfo, buffer))
+}
diff --git a/vendor/github.com/rjeczalik/notify/tree_nonrecursive.go b/vendor/github.com/rjeczalik/notify/tree_nonrecursive.go
new file mode 100644
index 000000000..dfa72d1d2
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/tree_nonrecursive.go
@@ -0,0 +1,292 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+package notify
+
+import "sync"
+
+// nonrecursiveTree TODO(rjeczalik)
+type nonrecursiveTree struct {
+ rw sync.RWMutex // protects root
+ root root
+ w watcher
+ c chan EventInfo
+ rec chan EventInfo
+}
+
+// newNonrecursiveTree TODO(rjeczalik)
+func newNonrecursiveTree(w watcher, c, rec chan EventInfo) *nonrecursiveTree {
+ if rec == nil {
+ rec = make(chan EventInfo, buffer)
+ }
+ t := &nonrecursiveTree{
+ root: root{nd: newnode("")},
+ w: w,
+ c: c,
+ rec: rec,
+ }
+ go t.dispatch(c)
+ go t.internal(rec)
+ return t
+}
+
+// dispatch TODO(rjeczalik)
+func (t *nonrecursiveTree) dispatch(c <-chan EventInfo) {
+ for ei := range c {
+ dbgprintf("dispatching %v on %q", ei.Event(), ei.Path())
+ go func(ei EventInfo) {
+ var nd node
+ var isrec bool
+ dir, base := split(ei.Path())
+ fn := func(it node, isbase bool) error {
+ isrec = isrec || it.Watch.IsRecursive()
+ if isbase {
+ nd = it
+ } else {
+ it.Watch.Dispatch(ei, recursive)
+ }
+ return nil
+ }
+ t.rw.RLock()
+ // Notify recursive watchpoints found on the path.
+ if err := t.root.WalkPath(dir, fn); err != nil {
+ dbgprint("dispatch did not reach leaf:", err)
+ t.rw.RUnlock()
+ return
+ }
+ // Notify parent watchpoint.
+ nd.Watch.Dispatch(ei, 0)
+ isrec = isrec || nd.Watch.IsRecursive()
+ // If leaf watchpoint exists, notify it.
+ if nd, ok := nd.Child[base]; ok {
+ isrec = isrec || nd.Watch.IsRecursive()
+ nd.Watch.Dispatch(ei, 0)
+ }
+ t.rw.RUnlock()
+ // If the event describes newly leaf directory created within
+ if !isrec || ei.Event() != Create {
+ return
+ }
+ if ok, err := ei.(isDirer).isDir(); !ok || err != nil {
+ return
+ }
+ t.rec <- ei
+ }(ei)
+ }
+}
+
+// internal TODO(rjeczalik)
+func (t *nonrecursiveTree) internal(rec <-chan EventInfo) {
+ for ei := range rec {
+ var nd node
+ var eset = internal
+ t.rw.Lock()
+ t.root.WalkPath(ei.Path(), func(it node, _ bool) error {
+ if e := it.Watch[t.rec]; e != 0 && e > eset {
+ eset = e
+ }
+ nd = it
+ return nil
+ })
+ if eset == internal {
+ t.rw.Unlock()
+ continue
+ }
+ err := nd.Add(ei.Path()).AddDir(t.recFunc(eset))
+ t.rw.Unlock()
+ if err != nil {
+ dbgprintf("internal(%p) error: %v", rec, err)
+ }
+ }
+}
+
+// watchAdd TODO(rjeczalik)
+func (t *nonrecursiveTree) watchAdd(nd node, c chan<- EventInfo, e Event) eventDiff {
+ if e&recursive != 0 {
+ diff := nd.Watch.Add(t.rec, e|Create|omit)
+ nd.Watch.Add(c, e)
+ return diff
+ }
+ return nd.Watch.Add(c, e)
+}
+
+// watchDelMin TODO(rjeczalik)
+func (t *nonrecursiveTree) watchDelMin(min Event, nd node, c chan<- EventInfo, e Event) eventDiff {
+ old, ok := nd.Watch[t.rec]
+ if ok {
+ nd.Watch[t.rec] = min
+ }
+ diff := nd.Watch.Del(c, e)
+ if ok {
+ switch old &^= diff[0] &^ diff[1]; {
+ case old|internal == internal:
+ delete(nd.Watch, t.rec)
+ if set, ok := nd.Watch[nil]; ok && len(nd.Watch) == 1 && set == 0 {
+ delete(nd.Watch, nil)
+ }
+ default:
+ nd.Watch.Add(t.rec, old|Create)
+ switch {
+ case diff == none:
+ case diff[1]|Create == diff[0]:
+ diff = none
+ default:
+ diff[1] |= Create
+ }
+ }
+ }
+ return diff
+}
+
+// watchDel TODO(rjeczalik)
+func (t *nonrecursiveTree) watchDel(nd node, c chan<- EventInfo, e Event) eventDiff {
+ return t.watchDelMin(0, nd, c, e)
+}
+
+// Watch TODO(rjeczalik)
+func (t *nonrecursiveTree) Watch(path string, c chan<- EventInfo, events ...Event) error {
+ if c == nil {
+ panic("notify: Watch using nil channel")
+ }
+ // Expanding with empty event set is a nop.
+ if len(events) == 0 {
+ return nil
+ }
+ path, isrec, err := cleanpath(path)
+ if err != nil {
+ return err
+ }
+ eset := joinevents(events)
+ t.rw.Lock()
+ defer t.rw.Unlock()
+ nd := t.root.Add(path)
+ if isrec {
+ return t.watchrec(nd, c, eset|recursive)
+ }
+ return t.watch(nd, c, eset)
+}
+
+func (t *nonrecursiveTree) watch(nd node, c chan<- EventInfo, e Event) (err error) {
+ diff := nd.Watch.Add(c, e)
+ switch {
+ case diff == none:
+ return nil
+ case diff[1] == 0:
+ // TODO(rjeczalik): cleanup this panic after implementation is stable
+ panic("eset is empty: " + nd.Name)
+ case diff[0] == 0:
+ err = t.w.Watch(nd.Name, diff[1])
+ default:
+ err = t.w.Rewatch(nd.Name, diff[0], diff[1])
+ }
+ if err != nil {
+ nd.Watch.Del(c, diff.Event())
+ return err
+ }
+ return nil
+}
+
+func (t *nonrecursiveTree) recFunc(e Event) walkFunc {
+ return func(nd node) error {
+ switch diff := nd.Watch.Add(t.rec, e|omit|Create); {
+ case diff == none:
+ case diff[1] == 0:
+ // TODO(rjeczalik): cleanup this panic after implementation is stable
+ panic("eset is empty: " + nd.Name)
+ case diff[0] == 0:
+ t.w.Watch(nd.Name, diff[1])
+ default:
+ t.w.Rewatch(nd.Name, diff[0], diff[1])
+ }
+ return nil
+ }
+}
+
+func (t *nonrecursiveTree) watchrec(nd node, c chan<- EventInfo, e Event) error {
+ var traverse func(walkFunc) error
+ // Non-recursive tree listens on Create event for every recursive
+ // watchpoint in order to automagically set a watch for every
+ // created directory.
+ switch diff := nd.Watch.dryAdd(t.rec, e|Create); {
+ case diff == none:
+ t.watchAdd(nd, c, e)
+ nd.Watch.Add(t.rec, e|omit|Create)
+ return nil
+ case diff[1] == 0:
+ // TODO(rjeczalik): cleanup this panic after implementation is stable
+ panic("eset is empty: " + nd.Name)
+ case diff[0] == 0:
+ // TODO(rjeczalik): BFS into directories and skip subtree as soon as first
+ // recursive watchpoint is encountered.
+ traverse = nd.AddDir
+ default:
+ traverse = nd.Walk
+ }
+ // TODO(rjeczalik): account every path that failed to be (re)watched
+ // and retry.
+ if err := traverse(t.recFunc(e)); err != nil {
+ return err
+ }
+ t.watchAdd(nd, c, e)
+ return nil
+}
+
+type walkWatchpointFunc func(Event, node) error
+
+func (t *nonrecursiveTree) walkWatchpoint(nd node, fn walkWatchpointFunc) error {
+ type minode struct {
+ min Event
+ nd node
+ }
+ mnd := minode{nd: nd}
+ stack := []minode{mnd}
+Traverse:
+ for n := len(stack); n != 0; n = len(stack) {
+ mnd, stack = stack[n-1], stack[:n-1]
+ // There must be no recursive watchpoints if the node has no watchpoints
+ // itself (every node in subtree rooted at recursive watchpoints must
+ // have at least nil (total) and t.rec watchpoints).
+ if len(mnd.nd.Watch) != 0 {
+ switch err := fn(mnd.min, mnd.nd); err {
+ case nil:
+ case errSkip:
+ continue Traverse
+ default:
+ return err
+ }
+ }
+ for _, nd := range mnd.nd.Child {
+ stack = append(stack, minode{mnd.nd.Watch[t.rec], nd})
+ }
+ }
+ return nil
+}
+
+// Stop TODO(rjeczalik)
+func (t *nonrecursiveTree) Stop(c chan<- EventInfo) {
+ fn := func(min Event, nd node) error {
+ // TODO(rjeczalik): aggregate watcher errors and retry; in worst case
+ // forward to the user.
+ switch diff := t.watchDelMin(min, nd, c, all); {
+ case diff == none:
+ return nil
+ case diff[1] == 0:
+ t.w.Unwatch(nd.Name)
+ default:
+ t.w.Rewatch(nd.Name, diff[0], diff[1])
+ }
+ return nil
+ }
+ t.rw.Lock()
+ err := t.walkWatchpoint(t.root.nd, fn) // TODO(rjeczalik): store max root per c
+ t.rw.Unlock()
+ dbgprintf("Stop(%p) error: %v\n", c, err)
+}
+
+// Close TODO(rjeczalik)
+func (t *nonrecursiveTree) Close() error {
+ err := t.w.Close()
+ close(t.c)
+ return err
+}
diff --git a/vendor/github.com/rjeczalik/notify/tree_recursive.go b/vendor/github.com/rjeczalik/notify/tree_recursive.go
new file mode 100644
index 000000000..7f00dfe35
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/tree_recursive.go
@@ -0,0 +1,354 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+package notify
+
+import "sync"
+
+// watchAdd TODO(rjeczalik)
+func watchAdd(nd node, c chan<- EventInfo, e Event) eventDiff {
+ diff := nd.Watch.Add(c, e)
+ if wp := nd.Child[""].Watch; len(wp) != 0 {
+ e = wp.Total()
+ diff[0] |= e
+ diff[1] |= e
+ if diff[0] == diff[1] {
+ return none
+ }
+ }
+ return diff
+}
+
+// watchAddInactive TODO(rjeczalik)
+func watchAddInactive(nd node, c chan<- EventInfo, e Event) eventDiff {
+ wp := nd.Child[""].Watch
+ if wp == nil {
+ wp = make(watchpoint)
+ nd.Child[""] = node{Watch: wp}
+ }
+ diff := wp.Add(c, e)
+ e = nd.Watch.Total()
+ diff[0] |= e
+ diff[1] |= e
+ if diff[0] == diff[1] {
+ return none
+ }
+ return diff
+}
+
+// watchCopy TODO(rjeczalik)
+func watchCopy(src, dst node) {
+ for c, e := range src.Watch {
+ if c == nil {
+ continue
+ }
+ watchAddInactive(dst, c, e)
+ }
+ if wpsrc := src.Child[""].Watch; len(wpsrc) != 0 {
+ wpdst := dst.Child[""].Watch
+ for c, e := range wpsrc {
+ if c == nil {
+ continue
+ }
+ wpdst.Add(c, e)
+ }
+ }
+}
+
+// watchDel TODO(rjeczalik)
+func watchDel(nd node, c chan<- EventInfo, e Event) eventDiff {
+ diff := nd.Watch.Del(c, e)
+ if wp := nd.Child[""].Watch; len(wp) != 0 {
+ diffInactive := wp.Del(c, e)
+ e = wp.Total()
+ // TODO(rjeczalik): add e if e != all?
+ diff[0] |= diffInactive[0] | e
+ diff[1] |= diffInactive[1] | e
+ if diff[0] == diff[1] {
+ return none
+ }
+ }
+ return diff
+}
+
+// watchTotal TODO(rjeczalik)
+func watchTotal(nd node) Event {
+ e := nd.Watch.Total()
+ if wp := nd.Child[""].Watch; len(wp) != 0 {
+ e |= wp.Total()
+ }
+ return e
+}
+
+// watchIsRecursive TODO(rjeczalik)
+func watchIsRecursive(nd node) bool {
+ ok := nd.Watch.IsRecursive()
+ // TODO(rjeczalik): add a test for len(wp) != 0 change the condition.
+ if wp := nd.Child[""].Watch; len(wp) != 0 {
+ // If a watchpoint holds inactive watchpoints, it means it's a parent
+ // one, which is recursive by nature even though it may be not recursive
+ // itself.
+ ok = true
+ }
+ return ok
+}
+
+// recursiveTree TODO(rjeczalik)
+type recursiveTree struct {
+ rw sync.RWMutex // protects root
+ root root
+ // TODO(rjeczalik): merge watcher + recursiveWatcher after #5 and #6
+ w interface {
+ watcher
+ recursiveWatcher
+ }
+ c chan EventInfo
+}
+
+// newRecursiveTree TODO(rjeczalik)
+func newRecursiveTree(w recursiveWatcher, c chan EventInfo) *recursiveTree {
+ t := &recursiveTree{
+ root: root{nd: newnode("")},
+ w: struct {
+ watcher
+ recursiveWatcher
+ }{w.(watcher), w},
+ c: c,
+ }
+ go t.dispatch()
+ return t
+}
+
+// dispatch TODO(rjeczalik)
+func (t *recursiveTree) dispatch() {
+ for ei := range t.c {
+ dbgprintf("dispatching %v on %q", ei.Event(), ei.Path())
+ go func(ei EventInfo) {
+ nd, ok := node{}, false
+ dir, base := split(ei.Path())
+ fn := func(it node, isbase bool) error {
+ if isbase {
+ nd = it
+ } else {
+ it.Watch.Dispatch(ei, recursive)
+ }
+ return nil
+ }
+ t.rw.RLock()
+ defer t.rw.RUnlock()
+ // Notify recursive watchpoints found on the path.
+ if err := t.root.WalkPath(dir, fn); err != nil {
+ dbgprint("dispatch did not reach leaf:", err)
+ return
+ }
+ // Notify parent watchpoint.
+ nd.Watch.Dispatch(ei, 0)
+ // If leaf watchpoint exists, notify it.
+ if nd, ok = nd.Child[base]; ok {
+ nd.Watch.Dispatch(ei, 0)
+ }
+ }(ei)
+ }
+}
+
+// Watch TODO(rjeczalik)
+func (t *recursiveTree) Watch(path string, c chan<- EventInfo, events ...Event) error {
+ if c == nil {
+ panic("notify: Watch using nil channel")
+ }
+ // Expanding with empty event set is a nop.
+ if len(events) == 0 {
+ return nil
+ }
+ path, isrec, err := cleanpath(path)
+ if err != nil {
+ return err
+ }
+ eventset := joinevents(events)
+ if isrec {
+ eventset |= recursive
+ }
+ t.rw.Lock()
+ defer t.rw.Unlock()
+ // case 1: cur is a child
+ //
+ // Look for parent watch which already covers the given path.
+ parent := node{}
+ self := false
+ err = t.root.WalkPath(path, func(nd node, isbase bool) error {
+ if watchTotal(nd) != 0 {
+ parent = nd
+ self = isbase
+ return errSkip
+ }
+ return nil
+ })
+ cur := t.root.Add(path) // add after the walk, so it's less to traverse
+ if err == nil && parent.Watch != nil {
+ // Parent watch found. Register inactive watchpoint, so we have enough
+ // information to shrink the eventset on eventual Stop.
+ // return t.resetwatchpoint(parent, parent, c, eventset|inactive)
+ var diff eventDiff
+ if self {
+ diff = watchAdd(cur, c, eventset)
+ } else {
+ diff = watchAddInactive(parent, c, eventset)
+ }
+ switch {
+ case diff == none:
+ // the parent watchpoint already covers requested subtree with its
+ // eventset
+ case diff[0] == 0:
+ // TODO(rjeczalik): cleanup this panic after implementation is stable
+ panic("dangling watchpoint: " + parent.Name)
+ default:
+ if isrec || watchIsRecursive(parent) {
+ err = t.w.RecursiveRewatch(parent.Name, parent.Name, diff[0], diff[1])
+ } else {
+ err = t.w.Rewatch(parent.Name, diff[0], diff[1])
+ }
+ if err != nil {
+ watchDel(parent, c, diff.Event())
+ return err
+ }
+ watchAdd(cur, c, eventset)
+ // TODO(rjeczalik): account top-most path for c
+ return nil
+ }
+ if !self {
+ watchAdd(cur, c, eventset)
+ }
+ return nil
+ }
+ // case 2: cur is new parent
+ //
+ // Look for children nodes, unwatch n-1 of them and rewatch the last one.
+ var children []node
+ fn := func(nd node) error {
+ if len(nd.Watch) == 0 {
+ return nil
+ }
+ children = append(children, nd)
+ return errSkip
+ }
+ switch must(cur.Walk(fn)); len(children) {
+ case 0:
+ // no child watches, cur holds a new watch
+ case 1:
+ watchAdd(cur, c, eventset) // TODO(rjeczalik): update cache c subtree root?
+ watchCopy(children[0], cur)
+ err = t.w.RecursiveRewatch(children[0].Name, cur.Name, watchTotal(children[0]),
+ watchTotal(cur))
+ if err != nil {
+ // Clean inactive watchpoint. The c chan did not exist before.
+ cur.Child[""] = node{}
+ delete(cur.Watch, c)
+ return err
+ }
+ return nil
+ default:
+ watchAdd(cur, c, eventset)
+ // Copy children inactive watchpoints to the new parent.
+ for _, nd := range children {
+ watchCopy(nd, cur)
+ }
+ // Watch parent subtree.
+ if err = t.w.RecursiveWatch(cur.Name, watchTotal(cur)); err != nil {
+ // Clean inactive watchpoint. The c chan did not exist before.
+ cur.Child[""] = node{}
+ delete(cur.Watch, c)
+ return err
+ }
+ // Unwatch children subtrees.
+ var e error
+ for _, nd := range children {
+ if watchIsRecursive(nd) {
+ e = t.w.RecursiveUnwatch(nd.Name)
+ } else {
+ e = t.w.Unwatch(nd.Name)
+ }
+ if e != nil {
+ err = nonil(err, e)
+ // TODO(rjeczalik): child is still watched, warn all its watchpoints
+ // about possible duplicate events via Error event
+ }
+ }
+ return err
+ }
+ // case 3: cur is new, alone node
+ switch diff := watchAdd(cur, c, eventset); {
+ case diff == none:
+ // TODO(rjeczalik): cleanup this panic after implementation is stable
+ panic("watch requested but no parent watchpoint found: " + cur.Name)
+ case diff[0] == 0:
+ if isrec {
+ err = t.w.RecursiveWatch(cur.Name, diff[1])
+ } else {
+ err = t.w.Watch(cur.Name, diff[1])
+ }
+ if err != nil {
+ watchDel(cur, c, diff.Event())
+ return err
+ }
+ default:
+ // TODO(rjeczalik): cleanup this panic after implementation is stable
+ panic("watch requested but no parent watchpoint found: " + cur.Name)
+ }
+ return nil
+}
+
+// Stop TODO(rjeczalik)
+//
+// TODO(rjeczalik): Split parent watchpoint - transfer watches to children
+// if parent is no longer needed. This carries a risk that underlying
+// watcher calls could fail - reconsider if it's worth the effort.
+func (t *recursiveTree) Stop(c chan<- EventInfo) {
+ var err error
+ fn := func(nd node) (e error) {
+ diff := watchDel(nd, c, all)
+ switch {
+ case diff == none && watchTotal(nd) == 0:
+ // TODO(rjeczalik): There's no watchpoints deeper in the tree,
+ // probably we should remove the nodes as well.
+ return nil
+ case diff == none:
+ // Removing c from nd does not require shrinking its eventset.
+ case diff[1] == 0:
+ if watchIsRecursive(nd) {
+ e = t.w.RecursiveUnwatch(nd.Name)
+ } else {
+ e = t.w.Unwatch(nd.Name)
+ }
+ default:
+ if watchIsRecursive(nd) {
+ e = t.w.RecursiveRewatch(nd.Name, nd.Name, diff[0], diff[1])
+ } else {
+ e = t.w.Rewatch(nd.Name, diff[0], diff[1])
+ }
+ }
+ fn := func(nd node) error {
+ watchDel(nd, c, all)
+ return nil
+ }
+ err = nonil(err, e, nd.Walk(fn))
+ // TODO(rjeczalik): if e != nil store dummy chan in nd.Watch just to
+ // retry un/rewatching next time and/or let the user handle the failure
+ // vie Error event?
+ return errSkip
+ }
+ t.rw.Lock()
+ e := t.root.Walk("", fn) // TODO(rjeczalik): use max root per c
+ t.rw.Unlock()
+ if e != nil {
+ err = nonil(err, e)
+ }
+ dbgprintf("Stop(%p) error: %v\n", c, err)
+}
+
+// Close TODO(rjeczalik)
+func (t *recursiveTree) Close() error {
+ err := t.w.Close()
+ close(t.c)
+ return err
+}
diff --git a/vendor/github.com/rjeczalik/notify/util.go b/vendor/github.com/rjeczalik/notify/util.go
new file mode 100644
index 000000000..67e01fbbd
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/util.go
@@ -0,0 +1,150 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+package notify
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+const all = ^Event(0)
+const sep = string(os.PathSeparator)
+
+var errDepth = errors.New("exceeded allowed iteration count (circular symlink?)")
+
+func min(i, j int) int {
+ if i > j {
+ return j
+ }
+ return i
+}
+
+func max(i, j int) int {
+ if i < j {
+ return j
+ }
+ return i
+}
+
+// must panics if err is non-nil.
+func must(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
+
+// nonil gives first non-nil error from the given arguments.
+func nonil(err ...error) error {
+ for _, err := range err {
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func cleanpath(path string) (realpath string, isrec bool, err error) {
+ if strings.HasSuffix(path, "...") {
+ isrec = true
+ path = path[:len(path)-3]
+ }
+ if path, err = filepath.Abs(path); err != nil {
+ return "", false, err
+ }
+ if path, err = canonical(path); err != nil {
+ return "", false, err
+ }
+ return path, isrec, nil
+}
+
+// canonical resolves any symlink in the given path and returns it in a clean form.
+// It expects the path to be absolute. It fails to resolve circular symlinks by
+// maintaining a simple iteration limit.
+func canonical(p string) (string, error) {
+ p, err := filepath.Abs(p)
+ if err != nil {
+ return "", err
+ }
+ for i, j, depth := 1, 0, 1; i < len(p); i, depth = i+1, depth+1 {
+ if depth > 128 {
+ return "", &os.PathError{Op: "canonical", Path: p, Err: errDepth}
+ }
+ if j = strings.IndexRune(p[i:], '/'); j == -1 {
+ j, i = i, len(p)
+ } else {
+ j, i = i, i+j
+ }
+ fi, err := os.Lstat(p[:i])
+ if err != nil {
+ return "", err
+ }
+ if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+ s, err := os.Readlink(p[:i])
+ if err != nil {
+ return "", err
+ }
+ if filepath.IsAbs(s) {
+ p = "/" + s + p[i:]
+ } else {
+ p = p[:j] + s + p[i:]
+ }
+ i = 1 // no guarantee s is canonical, start all over
+ }
+ }
+ return filepath.Clean(p), nil
+}
+
+func joinevents(events []Event) (e Event) {
+ if len(events) == 0 {
+ e = All
+ } else {
+ for _, event := range events {
+ e |= event
+ }
+ }
+ return
+}
+
+func split(s string) (string, string) {
+ if i := lastIndexSep(s); i != -1 {
+ return s[:i], s[i+1:]
+ }
+ return "", s
+}
+
+func base(s string) string {
+ if i := lastIndexSep(s); i != -1 {
+ return s[i+1:]
+ }
+ return s
+}
+
+func indexbase(root, name string) int {
+ if n, m := len(root), len(name); m >= n && name[:n] == root &&
+ (n == m || name[n] == os.PathSeparator) {
+ return min(n+1, m)
+ }
+ return -1
+}
+
+func indexSep(s string) int {
+ for i := 0; i < len(s); i++ {
+ if s[i] == os.PathSeparator {
+ return i
+ }
+ }
+ return -1
+}
+
+func lastIndexSep(s string) int {
+ for i := len(s) - 1; i >= 0; i-- {
+ if s[i] == os.PathSeparator {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/vendor/github.com/rjeczalik/notify/watcher.go b/vendor/github.com/rjeczalik/notify/watcher.go
new file mode 100644
index 000000000..34148eff3
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/watcher.go
@@ -0,0 +1,85 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+package notify
+
+import "errors"
+
+var (
+ errAlreadyWatched = errors.New("path is already watched")
+ errNotWatched = errors.New("path is not being watched")
+ errInvalidEventSet = errors.New("invalid event set provided")
+)
+
+// Watcher is a intermediate interface for wrapping inotify, ReadDirChangesW,
+// FSEvents, kqueue and poller implementations.
+//
+// The watcher implementation is expected to do its own mapping between paths and
+// create watchers if underlying event notification does not support it. For
+// the ease of implementation it is guaranteed that paths provided via Watch and
+// Unwatch methods are absolute and clean.
+type watcher interface {
+ // Watch requests a watcher creation for the given path and given event set.
+ Watch(path string, event Event) error
+
+ // Unwatch requests a watcher deletion for the given path and given event set.
+ Unwatch(path string) error
+
+ // Rewatch provides a functionality for modifying existing watch-points, like
+ // expanding its event set.
+ //
+ // Rewatch modifies existing watch-point under for the given path. It passes
+ // the existing event set currently registered for the given path, and the
+ // new, requested event set.
+ //
+ // It is guaranteed that Tree will not pass to Rewatch zero value for any
+ // of its arguments. If old == new and watcher can be upgraded to
+ // recursiveWatcher interface, a watch for the corresponding path is expected
+ // to be changed from recursive to the non-recursive one.
+ Rewatch(path string, old, new Event) error
+
+ // Close unwatches all paths that are registered. When Close returns, it
+ // is expected it will report no more events.
+ Close() error
+}
+
+// RecursiveWatcher is an interface for a Watcher for those OS, which do support
+// recursive watching over directories.
+type recursiveWatcher interface {
+ RecursiveWatch(path string, event Event) error
+
+ // RecursiveUnwatch removes a recursive watch-point given by the path. For
+ // native recursive implementation there is no difference in functionality
+ // between Unwatch and RecursiveUnwatch, however for those platforms, that
+ // requires emulation for recursive watch-points, the implementation differs.
+ RecursiveUnwatch(path string) error
+
+ // RecursiveRewatcher provides a functionality for modifying and/or relocating
+ // existing recursive watch-points.
+ //
+ // To relocate a watch-point means to unwatch oldpath and set a watch-point on
+ // newpath.
+ //
+ // To modify a watch-point means either to expand or shrink its event set.
+ //
+ // Tree can want to either relocate, modify or relocate and modify a watch-point
+ // via single RecursiveRewatch call.
+ //
+ // If oldpath == newpath, the watch-point is expected to change its event set value
+ // from oldevent to newevent.
+ //
+ // If oldevent == newevent, the watch-point is expected to relocate from oldpath
+ // to the newpath.
+ //
+ // If oldpath != newpath and oldevent != newevent, the watch-point is expected
+ // to relocate from oldpath to the newpath first and then change its event set
+ // value from oldevent to the newevent. In other words the end result must be
+ // a watch-point set on newpath with newevent value of its event set.
+ //
+ // It is guaranteed that Tree will not pass to RecurisveRewatcha zero value
+ // for any of its arguments. If oldpath == newpath and oldevent == newevent,
+ // a watch for the corresponding path is expected to be changed for
+ // non-recursive to the recursive one.
+ RecursiveRewatch(oldpath, newpath string, oldevent, newevent Event) error
+}
diff --git a/vendor/github.com/rjeczalik/notify/watcher_fen.go b/vendor/github.com/rjeczalik/notify/watcher_fen.go
new file mode 100644
index 000000000..60e9a36da
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/watcher_fen.go
@@ -0,0 +1,170 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build solaris
+
+package notify
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+)
+
+// newTrigger returns implementation of trigger.
+func newTrigger(pthLkp map[string]*watched) trigger {
+ return &fen{
+ pthLkp: pthLkp,
+ cf: newCfen(),
+ }
+}
+
+// fen is a structure implementing trigger for FEN.
+type fen struct {
+ // p is a FEN port identifier
+ p int
+ // pthLkp is a structure mapping monitored files/dir with data about them,
+ // shared with parent trg structure
+ pthLkp map[string]*watched
+ // cf wraps C operations for FEN
+ cf cfen
+}
+
+// watched is a data structure representing watched file/directory.
+type watched struct {
+ // p is a path to watched file/directory
+ p string
+ // fi provides information about watched file/dir
+ fi os.FileInfo
+ // eDir represents events watched directly
+ eDir Event
+ // eNonDir represents events watched indirectly
+ eNonDir Event
+}
+
+// Stop implements trigger.
+func (f *fen) Stop() error {
+ return f.cf.port_alert(f.p)
+}
+
+// Close implements trigger.
+func (f *fen) Close() (err error) {
+ return syscall.Close(f.p)
+}
+
+// NewWatched implements trigger.
+func (*fen) NewWatched(p string, fi os.FileInfo) (*watched, error) {
+ return &watched{p: p, fi: fi}, nil
+}
+
+// Record implements trigger.
+func (f *fen) Record(w *watched) {
+ f.pthLkp[w.p] = w
+}
+
+// Del implements trigger.
+func (f *fen) Del(w *watched) {
+ delete(f.pthLkp, w.p)
+}
+
+func inter2pe(n interface{}) PortEvent {
+ pe, ok := n.(PortEvent)
+ if !ok {
+ panic(fmt.Sprintf("fen: type should be PortEvent, %T instead", n))
+ }
+ return pe
+}
+
+// Watched implements trigger.
+func (f *fen) Watched(n interface{}) (*watched, int64, error) {
+ pe := inter2pe(n)
+ fo, ok := pe.PortevObject.(*FileObj)
+ if !ok || fo == nil {
+ panic(fmt.Sprintf("fen: type should be *FileObj, %T instead", fo))
+ }
+ w, ok := f.pthLkp[fo.Name]
+ if !ok {
+ return nil, 0, errNotWatched
+ }
+ return w, int64(pe.PortevEvents), nil
+}
+
+// init initializes FEN.
+func (f *fen) Init() (err error) {
+ f.p, err = f.cf.port_create()
+ return
+}
+
+func fi2fo(fi os.FileInfo, p string) FileObj {
+ st, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ panic(fmt.Sprintf("fen: type should be *syscall.Stat_t, %T instead", st))
+ }
+ return FileObj{Name: p, Atim: st.Atim, Mtim: st.Mtim, Ctim: st.Ctim}
+}
+
+// Unwatch implements trigger.
+func (f *fen) Unwatch(w *watched) error {
+ return f.cf.port_dissociate(f.p, FileObj{Name: w.p})
+}
+
+// Watch implements trigger.
+func (f *fen) Watch(fi os.FileInfo, w *watched, e int64) error {
+ return f.cf.port_associate(f.p, fi2fo(fi, w.p), int(e))
+}
+
+// Wait implements trigger.
+func (f *fen) Wait() (interface{}, error) {
+ var (
+ pe PortEvent
+ err error
+ )
+ err = f.cf.port_get(f.p, &pe)
+ return pe, err
+}
+
+// IsStop implements trigger.
+func (f *fen) IsStop(n interface{}, err error) bool {
+ return err == syscall.EBADF || inter2pe(n).PortevSource == srcAlert
+}
+
+func init() {
+ encode = func(e Event) (o int64) {
+ // Create event is not supported by FEN. Instead FileModified event will
+ // be registered. If this event will be reported on dir which is to be
+ // monitored for Create, dir will be rescanned and Create events will
+ // be generated and returned for new files. In case of files,
+ // if not requested FileModified event is reported, it will be ignored.
+ if e&Create != 0 {
+ o = (o &^ int64(Create)) | int64(FileModified)
+ }
+ if e&Write != 0 {
+ o = (o &^ int64(Write)) | int64(FileModified)
+ }
+ // Following events are 'exception events' and as such cannot be requested
+ // explicitly for monitoring or filtered out. If the will be reported
+ // by FEN and not subscribed with by user, they will be filtered out by
+ // watcher's logic.
+ o &= int64(^Rename & ^Remove &^ FileDelete &^ FileRenameTo &^
+ FileRenameFrom &^ Unmounted &^ MountedOver)
+ return
+ }
+ nat2not = map[Event]Event{
+ FileModified: Write,
+ FileRenameFrom: Rename,
+ FileDelete: Remove,
+ FileAccess: Event(0),
+ FileAttrib: Event(0),
+ FileRenameTo: Event(0),
+ FileTrunc: Event(0),
+ FileNoFollow: Event(0),
+ Unmounted: Event(0),
+ MountedOver: Event(0),
+ }
+ not2nat = map[Event]Event{
+ Write: FileModified,
+ Rename: FileRenameFrom,
+ Remove: FileDelete,
+ }
+}
diff --git a/vendor/github.com/rjeczalik/notify/watcher_fen_cgo.go b/vendor/github.com/rjeczalik/notify/watcher_fen_cgo.go
new file mode 100644
index 000000000..58ac8e8c6
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/watcher_fen_cgo.go
@@ -0,0 +1,141 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build solaris
+
+package notify
+
+// #include <port.h>
+// #include <stdio.h>
+// #include <stdlib.h>
+// struct file_obj* newFo() { return (struct file_obj*) malloc(sizeof(struct file_obj)); }
+// port_event_t* newPe() { return (port_event_t*) malloc(sizeof(port_event_t)); }
+// uintptr_t conv(struct file_obj* fo) { return (uintptr_t) fo; }
+// struct file_obj* dconv(uintptr_t fo) { return (struct file_obj*) fo; }
+import "C"
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const (
+ fileAccess = Event(C.FILE_ACCESS)
+ fileModified = Event(C.FILE_MODIFIED)
+ fileAttrib = Event(C.FILE_ATTRIB)
+ fileDelete = Event(C.FILE_DELETE)
+ fileRenameTo = Event(C.FILE_RENAME_TO)
+ fileRenameFrom = Event(C.FILE_RENAME_FROM)
+ fileTrunc = Event(C.FILE_TRUNC)
+ fileNoFollow = Event(C.FILE_NOFOLLOW)
+ unmounted = Event(C.UNMOUNTED)
+ mountedOver = Event(C.MOUNTEDOVER)
+)
+
+// PortEvent is a notify's equivalent of port_event_t.
+type PortEvent struct {
+ PortevEvents int // PortevEvents is an equivalent of portev_events.
+ PortevSource uint8 // PortevSource is an equivalent of portev_source.
+ PortevPad uint8 // Portevpad is an equivalent of portev_pad.
+ PortevObject interface{} // PortevObject is an equivalent of portev_object.
+ PortevUser uintptr // PortevUser is an equivalent of portev_user.
+}
+
+// FileObj is a notify's equivalent of file_obj.
+type FileObj struct {
+ Atim syscall.Timespec // Atim is an equivalent of fo_atime.
+ Mtim syscall.Timespec // Mtim is an equivalent of fo_mtime.
+ Ctim syscall.Timespec // Ctim is an equivalent of fo_ctime.
+ Pad [3]uintptr // Pad is an equivalent of fo_pad.
+ Name string // Name is an equivalent of fo_name.
+}
+
+type cfen struct {
+ p2pe map[string]*C.port_event_t
+ p2fo map[string]*C.struct_file_obj
+}
+
+func newCfen() cfen {
+ return cfen{
+ p2pe: make(map[string]*C.port_event_t),
+ p2fo: make(map[string]*C.struct_file_obj),
+ }
+}
+
+func unix2C(sec int64, nsec int64) (C.time_t, C.long) {
+ return C.time_t(sec), C.long(nsec)
+}
+
+func (c *cfen) port_associate(p int, fo FileObj, e int) (err error) {
+ cfo := C.newFo()
+ cfo.fo_atime.tv_sec, cfo.fo_atime.tv_nsec = unix2C(fo.Atim.Unix())
+ cfo.fo_mtime.tv_sec, cfo.fo_mtime.tv_nsec = unix2C(fo.Mtim.Unix())
+ cfo.fo_ctime.tv_sec, cfo.fo_ctime.tv_nsec = unix2C(fo.Ctim.Unix())
+ cfo.fo_name = C.CString(fo.Name)
+ c.p2fo[fo.Name] = cfo
+ _, err = C.port_associate(C.int(p), srcFile, C.conv(cfo), C.int(e), nil)
+ return
+}
+
+func (c *cfen) port_dissociate(port int, fo FileObj) (err error) {
+ cfo, ok := c.p2fo[fo.Name]
+ if !ok {
+ return errNotWatched
+ }
+ _, err = C.port_dissociate(C.int(port), srcFile, C.conv(cfo))
+ C.free(unsafe.Pointer(cfo.fo_name))
+ C.free(unsafe.Pointer(cfo))
+ delete(c.p2fo, fo.Name)
+ return
+}
+
+const srcAlert = C.PORT_SOURCE_ALERT
+const srcFile = C.PORT_SOURCE_FILE
+const alertSet = C.PORT_ALERT_SET
+
+func cfo2fo(cfo *C.struct_file_obj) *FileObj {
+ // Currently remaining attributes are not used.
+ if cfo == nil {
+ return nil
+ }
+ var fo FileObj
+ fo.Name = C.GoString(cfo.fo_name)
+ return &fo
+}
+
+func (c *cfen) port_get(port int, pe *PortEvent) (err error) {
+ cpe := C.newPe()
+ if _, err = C.port_get(C.int(port), cpe, nil); err != nil {
+ C.free(unsafe.Pointer(cpe))
+ return
+ }
+ pe.PortevEvents, pe.PortevSource, pe.PortevPad =
+ int(cpe.portev_events), uint8(cpe.portev_source), uint8(cpe.portev_pad)
+ pe.PortevObject = cfo2fo(C.dconv(cpe.portev_object))
+ pe.PortevUser = uintptr(cpe.portev_user)
+ C.free(unsafe.Pointer(cpe))
+ return
+}
+
+func (c *cfen) port_create() (int, error) {
+ p, err := C.port_create()
+ return int(p), err
+}
+
+func (c *cfen) port_alert(p int) (err error) {
+ _, err = C.port_alert(C.int(p), alertSet, C.int(666), nil)
+ return
+}
+
+func (c *cfen) free() {
+ for i := range c.p2fo {
+ C.free(unsafe.Pointer(c.p2fo[i].fo_name))
+ C.free(unsafe.Pointer(c.p2fo[i]))
+ }
+ for i := range c.p2pe {
+ C.free(unsafe.Pointer(c.p2pe[i]))
+ }
+ c.p2fo = make(map[string]*C.struct_file_obj)
+ c.p2pe = make(map[string]*C.port_event_t)
+}
diff --git a/vendor/github.com/rjeczalik/notify/watcher_fsevents.go b/vendor/github.com/rjeczalik/notify/watcher_fsevents.go
new file mode 100644
index 000000000..9062c17c7
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/watcher_fsevents.go
@@ -0,0 +1,319 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build darwin,!kqueue
+
+package notify
+
+import (
+ "errors"
+ "strings"
+ "sync/atomic"
+)
+
+// TODO(rjeczalik): get rid of calls to canonical, it's tree responsibility
+
+const (
+ failure = uint32(FSEventsMustScanSubDirs | FSEventsUserDropped | FSEventsKernelDropped)
+ filter = uint32(FSEventsCreated | FSEventsRemoved | FSEventsRenamed |
+ FSEventsModified | FSEventsInodeMetaMod)
+)
+
+// FSEvent represents single file event. It is created out of values passed by
+// FSEvents to FSEventStreamCallback function.
+type FSEvent struct {
+ Path string // real path of the file or directory
+ ID uint64 // ID of the event (FSEventStreamEventId)
+ Flags uint32 // joint FSEvents* flags (FSEventStreamEventFlags)
+}
+
+// splitflags separates event flags from single set into slice of flags.
+func splitflags(set uint32) (e []uint32) {
+ for i := uint32(1); set != 0; i, set = i<<1, set>>1 {
+ if (set & 1) != 0 {
+ e = append(e, i)
+ }
+ }
+ return
+}
+
+// watch represents a filesystem watchpoint. It is a higher level abstraction
+// over FSEvents' stream, which implements filtering of file events based
+// on path and event set. It emulates non-recursive watch-point by filtering out
+// events which paths are more than 1 level deeper than the watched path.
+type watch struct {
+ // prev stores last event set per path in order to filter out old flags
+ // for new events, which appratenly FSEvents likes to retain. It's a disgusting
+ // hack, it should be researched how to get rid of it.
+ prev map[string]uint32
+ c chan<- EventInfo
+ stream *stream
+ path string
+ events uint32
+ isrec int32
+ flushed bool
+}
+
+// Example format:
+//
+// ~ $ (trigger command) # (event set) -> (effective event set)
+//
+// Heuristics:
+//
+// 1. Create event is removed when it was present in previous event set.
+// Example:
+//
+// ~ $ echo > file # Create|Write -> Create|Write
+// ~ $ echo > file # Create|Write|InodeMetaMod -> Write|InodeMetaMod
+//
+// 2. Remove event is removed if it was present in previouse event set.
+// Example:
+//
+// ~ $ touch file # Create -> Create
+// ~ $ rm file # Create|Remove -> Remove
+// ~ $ touch file # Create|Remove -> Create
+//
+// 3. Write event is removed if not followed by InodeMetaMod on existing
+// file. Example:
+//
+// ~ $ echo > file # Create|Write -> Create|Write
+// ~ $ chmod +x file # Create|Write|ChangeOwner -> ChangeOwner
+//
+// 4. Write&InodeMetaMod is removed when effective event set contain Remove event.
+// Example:
+//
+// ~ $ echo > file # Write|InodeMetaMod -> Write|InodeMetaMod
+// ~ $ rm file # Remove|Write|InodeMetaMod -> Remove
+//
+func (w *watch) strip(base string, set uint32) uint32 {
+ const (
+ write = FSEventsModified | FSEventsInodeMetaMod
+ both = FSEventsCreated | FSEventsRemoved
+ )
+ switch w.prev[base] {
+ case FSEventsCreated:
+ set &^= FSEventsCreated
+ if set&FSEventsRemoved != 0 {
+ w.prev[base] = FSEventsRemoved
+ set &^= write
+ }
+ case FSEventsRemoved:
+ set &^= FSEventsRemoved
+ if set&FSEventsCreated != 0 {
+ w.prev[base] = FSEventsCreated
+ }
+ default:
+ switch set & both {
+ case FSEventsCreated:
+ w.prev[base] = FSEventsCreated
+ case FSEventsRemoved:
+ w.prev[base] = FSEventsRemoved
+ set &^= write
+ }
+ }
+ dbgprintf("split()=%v\n", Event(set))
+ return set
+}
+
+// Dispatch is a stream function which forwards given file events for the watched
+// path to underlying FileInfo channel.
+func (w *watch) Dispatch(ev []FSEvent) {
+ events := atomic.LoadUint32(&w.events)
+ isrec := (atomic.LoadInt32(&w.isrec) == 1)
+ for i := range ev {
+ if ev[i].Flags&FSEventsHistoryDone != 0 {
+ w.flushed = true
+ continue
+ }
+ if !w.flushed {
+ continue
+ }
+ dbgprintf("%v (0x%x) (%s, i=%d, ID=%d, len=%d)\n", Event(ev[i].Flags),
+ ev[i].Flags, ev[i].Path, i, ev[i].ID, len(ev))
+ if ev[i].Flags&failure != 0 {
+ // TODO(rjeczalik): missing error handling
+ continue
+ }
+ if !strings.HasPrefix(ev[i].Path, w.path) {
+ continue
+ }
+ n := len(w.path)
+ base := ""
+ if len(ev[i].Path) > n {
+ if ev[i].Path[n] != '/' {
+ continue
+ }
+ base = ev[i].Path[n+1:]
+ if !isrec && strings.IndexByte(base, '/') != -1 {
+ continue
+ }
+ }
+ // TODO(rjeczalik): get diff only from filtered events?
+ e := w.strip(string(base), ev[i].Flags) & events
+ if e == 0 {
+ continue
+ }
+ for _, e := range splitflags(e) {
+ dbgprintf("%d: single event: %v", ev[i].ID, Event(e))
+ w.c <- &event{
+ fse: ev[i],
+ event: Event(e),
+ }
+ }
+ }
+}
+
+// Stop closes underlying FSEvents stream and stops dispatching events.
+func (w *watch) Stop() {
+ w.stream.Stop()
+ // TODO(rjeczalik): make (*stream).Stop flush synchronously undelivered events,
+ // so the following hack can be removed. It should flush all the streams
+ // concurrently as we care not to block too much here.
+ atomic.StoreUint32(&w.events, 0)
+ atomic.StoreInt32(&w.isrec, 0)
+}
+
+// fsevents implements Watcher and RecursiveWatcher interfaces backed by FSEvents
+// framework.
+type fsevents struct {
+ watches map[string]*watch
+ c chan<- EventInfo
+}
+
+func newWatcher(c chan<- EventInfo) watcher {
+ return &fsevents{
+ watches: make(map[string]*watch),
+ c: c,
+ }
+}
+
+func (fse *fsevents) watch(path string, event Event, isrec int32) (err error) {
+ if path, err = canonical(path); err != nil {
+ return err
+ }
+ if _, ok := fse.watches[path]; ok {
+ return errAlreadyWatched
+ }
+ w := &watch{
+ prev: make(map[string]uint32),
+ c: fse.c,
+ path: path,
+ events: uint32(event),
+ isrec: isrec,
+ }
+ w.stream = newStream(path, w.Dispatch)
+ if err = w.stream.Start(); err != nil {
+ return err
+ }
+ fse.watches[path] = w
+ return nil
+}
+
+func (fse *fsevents) unwatch(path string) (err error) {
+ if path, err = canonical(path); err != nil {
+ return
+ }
+ w, ok := fse.watches[path]
+ if !ok {
+ return errNotWatched
+ }
+ w.stream.Stop()
+ delete(fse.watches, path)
+ return nil
+}
+
+// Watch implements Watcher interface. It fails with non-nil error when setting
+// the watch-point by FSEvents fails or with errAlreadyWatched error when
+// the given path is already watched.
+func (fse *fsevents) Watch(path string, event Event) error {
+ return fse.watch(path, event, 0)
+}
+
+// Unwatch implements Watcher interface. It fails with errNotWatched when
+// the given path is not being watched.
+func (fse *fsevents) Unwatch(path string) error {
+ return fse.unwatch(path)
+}
+
+// Rewatch implements Watcher interface. It fails with errNotWatched when
+// the given path is not being watched or with errInvalidEventSet when oldevent
+// does not match event set the watch-point currently holds.
+func (fse *fsevents) Rewatch(path string, oldevent, newevent Event) error {
+ w, ok := fse.watches[path]
+ if !ok {
+ return errNotWatched
+ }
+ if !atomic.CompareAndSwapUint32(&w.events, uint32(oldevent), uint32(newevent)) {
+ return errInvalidEventSet
+ }
+ atomic.StoreInt32(&w.isrec, 0)
+ return nil
+}
+
+// RecursiveWatch implements RecursiveWatcher interface. It fails with non-nil
+// error when setting the watch-point by FSEvents fails or with errAlreadyWatched
+// error when the given path is already watched.
+func (fse *fsevents) RecursiveWatch(path string, event Event) error {
+ return fse.watch(path, event, 1)
+}
+
+// RecursiveUnwatch implements RecursiveWatcher interface. It fails with
+// errNotWatched when the given path is not being watched.
+//
+// TODO(rjeczalik): fail if w.isrec == 0?
+func (fse *fsevents) RecursiveUnwatch(path string) error {
+ return fse.unwatch(path)
+}
+
+// RecrusiveRewatch implements RecursiveWatcher interface. It fails:
+//
+// * with errNotWatched when the given path is not being watched
+// * with errInvalidEventSet when oldevent does not match the current event set
+// * with errAlreadyWatched when watch-point given by the oldpath was meant to
+// be relocated to newpath, but the newpath is already watched
+// * a non-nil error when setting the watch-point with FSEvents fails
+//
+// TODO(rjeczalik): Improve handling of watch-point relocation? See two TODOs
+// that follows.
+func (fse *fsevents) RecursiveRewatch(oldpath, newpath string, oldevent, newevent Event) error {
+ switch [2]bool{oldpath == newpath, oldevent == newevent} {
+ case [2]bool{true, true}:
+ w, ok := fse.watches[oldpath]
+ if !ok {
+ return errNotWatched
+ }
+ atomic.StoreInt32(&w.isrec, 1)
+ return nil
+ case [2]bool{true, false}:
+ w, ok := fse.watches[oldpath]
+ if !ok {
+ return errNotWatched
+ }
+ if !atomic.CompareAndSwapUint32(&w.events, uint32(oldevent), uint32(newevent)) {
+ return errors.New("invalid event state diff")
+ }
+ atomic.StoreInt32(&w.isrec, 1)
+ return nil
+ default:
+ // TODO(rjeczalik): rewatch newpath only if exists?
+ // TODO(rjeczalik): migrate w.prev to new watch?
+ if _, ok := fse.watches[newpath]; ok {
+ return errAlreadyWatched
+ }
+ if err := fse.Unwatch(oldpath); err != nil {
+ return err
+ }
+ // TODO(rjeczalik): revert unwatch if watch fails?
+ return fse.watch(newpath, newevent, 1)
+ }
+}
+
+// Close unwatches all watch-points.
+func (fse *fsevents) Close() error {
+ for _, w := range fse.watches {
+ w.Stop()
+ }
+ fse.watches = nil
+ return nil
+}
diff --git a/vendor/github.com/rjeczalik/notify/watcher_fsevents_cgo.go b/vendor/github.com/rjeczalik/notify/watcher_fsevents_cgo.go
new file mode 100644
index 000000000..ee9631a61
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/watcher_fsevents_cgo.go
@@ -0,0 +1,190 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build darwin,!kqueue
+
+package notify
+
+/*
+#include <CoreServices/CoreServices.h>
+
+typedef void (*CFRunLoopPerformCallBack)(void*);
+
+void gosource(void *);
+void gostream(uintptr_t, uintptr_t, size_t, uintptr_t, uintptr_t, uintptr_t);
+
+static FSEventStreamRef EventStreamCreate(FSEventStreamContext * context, uintptr_t info, CFArrayRef paths, FSEventStreamEventId since, CFTimeInterval latency, FSEventStreamCreateFlags flags) {
+ context->info = (void*) info;
+ return FSEventStreamCreate(NULL, (FSEventStreamCallback) gostream, context, paths, since, latency, flags);
+}
+
+#cgo LDFLAGS: -framework CoreServices
+*/
+import "C"
+
+import (
+ "errors"
+ "os"
+ "sync"
+ "sync/atomic"
+ "time"
+ "unsafe"
+)
+
+var nilstream C.FSEventStreamRef
+
+// Default arguments for FSEventStreamCreate function.
+var (
+ latency C.CFTimeInterval
+ flags = C.FSEventStreamCreateFlags(C.kFSEventStreamCreateFlagFileEvents | C.kFSEventStreamCreateFlagNoDefer)
+ since = uint64(C.FSEventsGetCurrentEventId())
+)
+
+var runloop C.CFRunLoopRef // global runloop which all streams are registered with
+var wg sync.WaitGroup // used to wait until the runloop starts
+
+// source is used for synchronization purposes - it signals when runloop has
+// started and is ready via the wg. It also serves purpose of a dummy source,
+// thanks to it the runloop does not return as it also has at least one source
+// registered.
+var source = C.CFRunLoopSourceCreate(nil, 0, &C.CFRunLoopSourceContext{
+ perform: (C.CFRunLoopPerformCallBack)(C.gosource),
+})
+
+// Errors returned when FSEvents functions fail.
+var (
+ errCreate = os.NewSyscallError("FSEventStreamCreate", errors.New("NULL"))
+ errStart = os.NewSyscallError("FSEventStreamStart", errors.New("false"))
+)
+
+// initializes the global runloop and ensures any created stream awaits its
+// readiness.
+func init() {
+ wg.Add(1)
+ go func() {
+ runloop = C.CFRunLoopGetCurrent()
+ C.CFRunLoopAddSource(runloop, source, C.kCFRunLoopDefaultMode)
+ C.CFRunLoopRun()
+ panic("runloop has just unexpectedly stopped")
+ }()
+ C.CFRunLoopSourceSignal(source)
+}
+
+//export gosource
+func gosource(unsafe.Pointer) {
+ time.Sleep(time.Second)
+ wg.Done()
+}
+
+//export gostream
+func gostream(_, info uintptr, n C.size_t, paths, flags, ids uintptr) {
+ const (
+ offchar = unsafe.Sizeof((*C.char)(nil))
+ offflag = unsafe.Sizeof(C.FSEventStreamEventFlags(0))
+ offid = unsafe.Sizeof(C.FSEventStreamEventId(0))
+ )
+ if n == 0 {
+ return
+ }
+ ev := make([]FSEvent, 0, int(n))
+ for i := uintptr(0); i < uintptr(n); i++ {
+ switch flags := *(*uint32)(unsafe.Pointer((flags + i*offflag))); {
+ case flags&uint32(FSEventsEventIdsWrapped) != 0:
+ atomic.StoreUint64(&since, uint64(C.FSEventsGetCurrentEventId()))
+ default:
+ ev = append(ev, FSEvent{
+ Path: C.GoString(*(**C.char)(unsafe.Pointer(paths + i*offchar))),
+ Flags: flags,
+ ID: *(*uint64)(unsafe.Pointer(ids + i*offid)),
+ })
+ }
+
+ }
+ streamFuncs.get(info)(ev)
+}
+
+// StreamFunc is a callback called when stream receives file events.
+type streamFunc func([]FSEvent)
+
+var streamFuncs = streamFuncRegistry{m: map[uintptr]streamFunc{}}
+
+type streamFuncRegistry struct {
+ mu sync.Mutex
+ m map[uintptr]streamFunc
+ i uintptr
+}
+
+func (r *streamFuncRegistry) get(id uintptr) streamFunc {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ return r.m[id]
+}
+
+func (r *streamFuncRegistry) add(fn streamFunc) uintptr {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.i++
+ r.m[r.i] = fn
+ return r.i
+}
+
+func (r *streamFuncRegistry) delete(id uintptr) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ delete(r.m, id)
+}
+
+// Stream represents single watch-point which listens for events scheduled by
+// the global runloop.
+type stream struct {
+ path string
+ ref C.FSEventStreamRef
+ info uintptr
+}
+
+// NewStream creates a stream for given path, listening for file events and
+// calling fn upon receving any.
+func newStream(path string, fn streamFunc) *stream {
+ return &stream{
+ path: path,
+ info: streamFuncs.add(fn),
+ }
+}
+
+// Start creates a FSEventStream for the given path and schedules it with
+// global runloop. It's a nop if the stream was already started.
+func (s *stream) Start() error {
+ if s.ref != nilstream {
+ return nil
+ }
+ wg.Wait()
+ p := C.CFStringCreateWithCStringNoCopy(nil, C.CString(s.path), C.kCFStringEncodingUTF8, nil)
+ path := C.CFArrayCreate(nil, (*unsafe.Pointer)(unsafe.Pointer(&p)), 1, nil)
+ ctx := C.FSEventStreamContext{}
+ ref := C.EventStreamCreate(&ctx, C.uintptr_t(s.info), path, C.FSEventStreamEventId(atomic.LoadUint64(&since)), latency, flags)
+ if ref == nilstream {
+ return errCreate
+ }
+ C.FSEventStreamScheduleWithRunLoop(ref, runloop, C.kCFRunLoopDefaultMode)
+ if C.FSEventStreamStart(ref) == C.Boolean(0) {
+ C.FSEventStreamInvalidate(ref)
+ return errStart
+ }
+ C.CFRunLoopWakeUp(runloop)
+ s.ref = ref
+ return nil
+}
+
+// Stop stops underlying FSEventStream and unregisters it from global runloop.
+func (s *stream) Stop() {
+ if s.ref == nilstream {
+ return
+ }
+ wg.Wait()
+ C.FSEventStreamStop(s.ref)
+ C.FSEventStreamInvalidate(s.ref)
+ C.CFRunLoopWakeUp(runloop)
+ s.ref = nilstream
+ streamFuncs.delete(s.info)
+}
diff --git a/vendor/github.com/rjeczalik/notify/watcher_inotify.go b/vendor/github.com/rjeczalik/notify/watcher_inotify.go
new file mode 100644
index 000000000..3ceaa8f3a
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/watcher_inotify.go
@@ -0,0 +1,396 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build linux
+
+package notify
+
+import (
+ "bytes"
+ "errors"
+ "path/filepath"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "unsafe"
+)
+
+// eventBufferSize defines the size of the buffer given to read(2) function. One
+// should not depend on this value, since it was arbitrary chosen and may be
+// changed in the future.
+const eventBufferSize = 64 * (syscall.SizeofInotifyEvent + syscall.PathMax + 1)
+
+// consumersCount defines the number of consumers in producer-consumer based
+// implementation. Each consumer is run in a separate goroutine and has read
+// access to watched files map.
+const consumersCount = 2
+
+const invalidDescriptor = -1
+
+// watched is a pair of file path and inotify mask used as a value in
+// watched files map.
+type watched struct {
+ path string
+ mask uint32
+}
+
+// inotify implements Watcher interface.
+type inotify struct {
+ sync.RWMutex // protects inotify.m map
+ m map[int32]*watched // watch descriptor to watched object
+ fd int32 // inotify file descriptor
+ pipefd []int // pipe's read and write descriptors
+ epfd int // epoll descriptor
+ epes []syscall.EpollEvent // epoll events
+ buffer [eventBufferSize]byte // inotify event buffer
+ wg sync.WaitGroup // wait group used to close main loop
+ c chan<- EventInfo // event dispatcher channel
+}
+
+// NewWatcher creates new non-recursive inotify backed by inotify.
+func newWatcher(c chan<- EventInfo) watcher {
+ i := &inotify{
+ m: make(map[int32]*watched),
+ fd: invalidDescriptor,
+ pipefd: []int{invalidDescriptor, invalidDescriptor},
+ epfd: invalidDescriptor,
+ epes: make([]syscall.EpollEvent, 0),
+ c: c,
+ }
+ runtime.SetFinalizer(i, func(i *inotify) {
+ i.epollclose()
+ if i.fd != invalidDescriptor {
+ syscall.Close(int(i.fd))
+ }
+ })
+ return i
+}
+
+// Watch implements notify.watcher interface.
+func (i *inotify) Watch(path string, e Event) error {
+ return i.watch(path, e)
+}
+
+// Rewatch implements notify.watcher interface.
+func (i *inotify) Rewatch(path string, _, newevent Event) error {
+ return i.watch(path, newevent)
+}
+
+// watch adds a new watcher to the set of watched objects or modifies the existing
+// one. If called for the first time, this function initializes inotify filesystem
+// monitor and starts producer-consumers goroutines.
+func (i *inotify) watch(path string, e Event) (err error) {
+ if e&^(All|Event(syscall.IN_ALL_EVENTS)) != 0 {
+ return errors.New("notify: unknown event")
+ }
+ if err = i.lazyinit(); err != nil {
+ return
+ }
+ iwd, err := syscall.InotifyAddWatch(int(i.fd), path, encode(e))
+ if err != nil {
+ return
+ }
+ i.RLock()
+ wd := i.m[int32(iwd)]
+ i.RUnlock()
+ if wd == nil {
+ i.Lock()
+ if i.m[int32(iwd)] == nil {
+ i.m[int32(iwd)] = &watched{path: path, mask: uint32(e)}
+ }
+ i.Unlock()
+ } else {
+ i.Lock()
+ wd.mask = uint32(e)
+ i.Unlock()
+ }
+ return nil
+}
+
+// lazyinit sets up all required file descriptors and starts 1+consumersCount
+// goroutines. The producer goroutine blocks until file-system notifications
+// occur. Then, all events are read from system buffer and sent to consumer
+// goroutines which construct valid notify events. This method uses
+// Double-Checked Locking optimization.
+func (i *inotify) lazyinit() error {
+ if atomic.LoadInt32(&i.fd) == invalidDescriptor {
+ i.Lock()
+ defer i.Unlock()
+ if atomic.LoadInt32(&i.fd) == invalidDescriptor {
+ fd, err := syscall.InotifyInit()
+ if err != nil {
+ return err
+ }
+ i.fd = int32(fd)
+ if err = i.epollinit(); err != nil {
+ _, _ = i.epollclose(), syscall.Close(int(fd)) // Ignore errors.
+ i.fd = invalidDescriptor
+ return err
+ }
+ esch := make(chan []*event)
+ go i.loop(esch)
+ i.wg.Add(consumersCount)
+ for n := 0; n < consumersCount; n++ {
+ go i.send(esch)
+ }
+ }
+ }
+ return nil
+}
+
+// epollinit opens an epoll file descriptor and creates a pipe which will be
+// used to wake up the epoll_wait(2) function. Then, file descriptor associated
+// with inotify event queue and the read end of the pipe are added to epoll set.
+// Note that `fd` member must be set before this function is called.
+func (i *inotify) epollinit() (err error) {
+ if i.epfd, err = syscall.EpollCreate1(0); err != nil {
+ return
+ }
+ if err = syscall.Pipe(i.pipefd); err != nil {
+ return
+ }
+ i.epes = []syscall.EpollEvent{
+ {Events: syscall.EPOLLIN, Fd: i.fd},
+ {Events: syscall.EPOLLIN, Fd: int32(i.pipefd[0])},
+ }
+ if err = syscall.EpollCtl(i.epfd, syscall.EPOLL_CTL_ADD, int(i.fd), &i.epes[0]); err != nil {
+ return
+ }
+ return syscall.EpollCtl(i.epfd, syscall.EPOLL_CTL_ADD, i.pipefd[0], &i.epes[1])
+}
+
+// epollclose closes the file descriptor created by the call to epoll_create(2)
+// and two file descriptors opened by pipe(2) function.
+func (i *inotify) epollclose() (err error) {
+ if i.epfd != invalidDescriptor {
+ if err = syscall.Close(i.epfd); err == nil {
+ i.epfd = invalidDescriptor
+ }
+ }
+ for n, fd := range i.pipefd {
+ if fd != invalidDescriptor {
+ switch e := syscall.Close(fd); {
+ case e != nil && err == nil:
+ err = e
+ case e == nil:
+ i.pipefd[n] = invalidDescriptor
+ }
+ }
+ }
+ return
+}
+
+// loop blocks until either inotify or pipe file descriptor is ready for I/O.
+// All read operations triggered by filesystem notifications are forwarded to
+// one of the event's consumers. If pipe fd became ready, loop function closes
+// all file descriptors opened by lazyinit method and returns afterwards.
+func (i *inotify) loop(esch chan<- []*event) {
+ epes := make([]syscall.EpollEvent, 1)
+ fd := atomic.LoadInt32(&i.fd)
+ for {
+ switch _, err := syscall.EpollWait(i.epfd, epes, -1); err {
+ case nil:
+ switch epes[0].Fd {
+ case fd:
+ esch <- i.read()
+ epes[0].Fd = 0
+ case int32(i.pipefd[0]):
+ i.Lock()
+ defer i.Unlock()
+ if err = syscall.Close(int(fd)); err != nil && err != syscall.EINTR {
+ panic("notify: close(2) error " + err.Error())
+ }
+ atomic.StoreInt32(&i.fd, invalidDescriptor)
+ if err = i.epollclose(); err != nil && err != syscall.EINTR {
+ panic("notify: epollclose error " + err.Error())
+ }
+ close(esch)
+ return
+ }
+ case syscall.EINTR:
+ continue
+ default: // We should never reach this line.
+ panic("notify: epoll_wait(2) error " + err.Error())
+ }
+ }
+}
+
+// read reads events from an inotify file descriptor. It does not handle errors
+// returned from read(2) function since they are not critical to watcher logic.
+func (i *inotify) read() (es []*event) {
+ n, err := syscall.Read(int(i.fd), i.buffer[:])
+ if err != nil || n < syscall.SizeofInotifyEvent {
+ return
+ }
+ var sys *syscall.InotifyEvent
+ nmin := n - syscall.SizeofInotifyEvent
+ for pos, path := 0, ""; pos <= nmin; {
+ sys = (*syscall.InotifyEvent)(unsafe.Pointer(&i.buffer[pos]))
+ pos += syscall.SizeofInotifyEvent
+ if path = ""; sys.Len > 0 {
+ endpos := pos + int(sys.Len)
+ path = string(bytes.TrimRight(i.buffer[pos:endpos], "\x00"))
+ pos = endpos
+ }
+ es = append(es, &event{
+ sys: syscall.InotifyEvent{
+ Wd: sys.Wd,
+ Mask: sys.Mask,
+ Cookie: sys.Cookie,
+ },
+ path: path,
+ })
+ }
+ return
+}
+
+// send is a consumer function which sends events to event dispatcher channel.
+// It is run in a separate goroutine in order to not block loop method when
+// possibly expensive write operations are performed on inotify map.
+func (i *inotify) send(esch <-chan []*event) {
+ for es := range esch {
+ for _, e := range i.transform(es) {
+ if e != nil {
+ i.c <- e
+ }
+ }
+ }
+ i.wg.Done()
+}
+
+// transform prepares events read from inotify file descriptor for sending to
+// user. It removes invalid events and these which are no longer present in
+// inotify map. This method may also split one raw event into two different ones
+// when system-dependent result is required.
+func (i *inotify) transform(es []*event) []*event {
+ var multi []*event
+ i.RLock()
+ for idx, e := range es {
+ if e.sys.Mask&(syscall.IN_IGNORED|syscall.IN_Q_OVERFLOW) != 0 {
+ es[idx] = nil
+ continue
+ }
+ wd, ok := i.m[e.sys.Wd]
+ if !ok || e.sys.Mask&encode(Event(wd.mask)) == 0 {
+ es[idx] = nil
+ continue
+ }
+ if e.path == "" {
+ e.path = wd.path
+ } else {
+ e.path = filepath.Join(wd.path, e.path)
+ }
+ multi = append(multi, decode(Event(wd.mask), e))
+ if e.event == 0 {
+ es[idx] = nil
+ }
+ }
+ i.RUnlock()
+ es = append(es, multi...)
+ return es
+}
+
+// encode converts notify system-independent events to valid inotify mask
+// which can be passed to inotify_add_watch(2) function.
+func encode(e Event) uint32 {
+ if e&Create != 0 {
+ e = (e ^ Create) | InCreate | InMovedTo
+ }
+ if e&Remove != 0 {
+ e = (e ^ Remove) | InDelete | InDeleteSelf
+ }
+ if e&Write != 0 {
+ e = (e ^ Write) | InModify
+ }
+ if e&Rename != 0 {
+ e = (e ^ Rename) | InMovedFrom | InMoveSelf
+ }
+ return uint32(e)
+}
+
+// decode uses internally stored mask to distinguish whether system-independent
+// or system-dependent event is requested. The first one is created by modifying
+// `e` argument. decode method sets e.event value to 0 when an event should be
+// skipped. System-dependent event is set as the function's return value which
+// can be nil when the event should not be passed on.
+func decode(mask Event, e *event) (syse *event) {
+ if sysmask := uint32(mask) & e.sys.Mask; sysmask != 0 {
+ syse = &event{sys: syscall.InotifyEvent{
+ Wd: e.sys.Wd,
+ Mask: e.sys.Mask,
+ Cookie: e.sys.Cookie,
+ }, event: Event(sysmask), path: e.path}
+ }
+ imask := encode(mask)
+ switch {
+ case mask&Create != 0 && imask&uint32(InCreate|InMovedTo)&e.sys.Mask != 0:
+ e.event = Create
+ case mask&Remove != 0 && imask&uint32(InDelete|InDeleteSelf)&e.sys.Mask != 0:
+ e.event = Remove
+ case mask&Write != 0 && imask&uint32(InModify)&e.sys.Mask != 0:
+ e.event = Write
+ case mask&Rename != 0 && imask&uint32(InMovedFrom|InMoveSelf)&e.sys.Mask != 0:
+ e.event = Rename
+ default:
+ e.event = 0
+ }
+ return
+}
+
+// Unwatch implements notify.watcher interface. It looks for watch descriptor
+// related to registered path and if found, calls inotify_rm_watch(2) function.
+// This method is allowed to return EINVAL error when concurrently requested to
+// delete identical path.
+func (i *inotify) Unwatch(path string) (err error) {
+ iwd := int32(invalidDescriptor)
+ i.RLock()
+ for iwdkey, wd := range i.m {
+ if wd.path == path {
+ iwd = iwdkey
+ break
+ }
+ }
+ i.RUnlock()
+ if iwd == invalidDescriptor {
+ return errors.New("notify: path " + path + " is already watched")
+ }
+ fd := atomic.LoadInt32(&i.fd)
+ if _, err = syscall.InotifyRmWatch(int(fd), uint32(iwd)); err != nil {
+ return
+ }
+ i.Lock()
+ delete(i.m, iwd)
+ i.Unlock()
+ return nil
+}
+
+// Close implements notify.watcher interface. It removes all existing watch
+// descriptors and wakes up producer goroutine by sending data to the write end
+// of the pipe. The function waits for a signal from producer which means that
+// all operations on current monitoring instance are done.
+func (i *inotify) Close() (err error) {
+ i.Lock()
+ if fd := atomic.LoadInt32(&i.fd); fd == invalidDescriptor {
+ i.Unlock()
+ return nil
+ }
+ for iwd := range i.m {
+ if _, e := syscall.InotifyRmWatch(int(i.fd), uint32(iwd)); e != nil && err == nil {
+ err = e
+ }
+ delete(i.m, iwd)
+ }
+ switch _, errwrite := syscall.Write(i.pipefd[1], []byte{0x00}); {
+ case errwrite != nil && err == nil:
+ err = errwrite
+ fallthrough
+ case errwrite != nil:
+ i.Unlock()
+ default:
+ i.Unlock()
+ i.wg.Wait()
+ }
+ return
+}
diff --git a/vendor/github.com/rjeczalik/notify/watcher_kqueue.go b/vendor/github.com/rjeczalik/notify/watcher_kqueue.go
new file mode 100644
index 000000000..d5f7788c4
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/watcher_kqueue.go
@@ -0,0 +1,192 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build darwin,kqueue dragonfly freebsd netbsd openbsd
+
+package notify
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+)
+
+// newTrigger returns implementation of trigger.
+func newTrigger(pthLkp map[string]*watched) trigger {
+ return &kq{
+ pthLkp: pthLkp,
+ idLkp: make(map[int]*watched),
+ }
+}
+
+// kq is a structure implementing trigger for kqueue.
+type kq struct {
+ // fd is a kqueue file descriptor
+ fd int
+ // pipefds are file descriptors used to stop `Kevent` call.
+ pipefds [2]int
+ // idLkp is a data structure mapping file descriptors with data about watching
+ // represented by them files/directories.
+ idLkp map[int]*watched
+ // pthLkp is a structure mapping monitored files/dir with data about them,
+ // shared with parent trg structure
+ pthLkp map[string]*watched
+}
+
+// watched is a data structure representing watched file/directory.
+type watched struct {
+ // p is a path to watched file/directory.
+ p string
+ // fd is a file descriptor for watched file/directory.
+ fd int
+ // fi provides information about watched file/dir.
+ fi os.FileInfo
+ // eDir represents events watched directly.
+ eDir Event
+ // eNonDir represents events watched indirectly.
+ eNonDir Event
+}
+
+// Stop implements trigger.
+func (k *kq) Stop() (err error) {
+ // trigger event used to interrupt Kevent call.
+ _, err = syscall.Write(k.pipefds[1], []byte{0x00})
+ return
+}
+
+// Close implements trigger.
+func (k *kq) Close() error {
+ return syscall.Close(k.fd)
+}
+
+// NewWatched implements trigger.
+func (*kq) NewWatched(p string, fi os.FileInfo) (*watched, error) {
+ fd, err := syscall.Open(p, syscall.O_NONBLOCK|syscall.O_RDONLY, 0)
+ if err != nil {
+ return nil, err
+ }
+ return &watched{fd: fd, p: p, fi: fi}, nil
+}
+
+// Record implements trigger.
+func (k *kq) Record(w *watched) {
+ k.idLkp[w.fd], k.pthLkp[w.p] = w, w
+}
+
+// Del implements trigger.
+func (k *kq) Del(w *watched) {
+ syscall.Close(w.fd)
+ delete(k.idLkp, w.fd)
+ delete(k.pthLkp, w.p)
+}
+
+func inter2kq(n interface{}) syscall.Kevent_t {
+ kq, ok := n.(syscall.Kevent_t)
+ if !ok {
+ panic(fmt.Sprintf("kqueue: type should be Kevent_t, %T instead", n))
+ }
+ return kq
+}
+
+// Init implements trigger.
+func (k *kq) Init() (err error) {
+ if k.fd, err = syscall.Kqueue(); err != nil {
+ return
+ }
+ // Creates pipe used to stop `Kevent` call by registering it,
+ // watching read end and writing to other end of it.
+ if err = syscall.Pipe(k.pipefds[:]); err != nil {
+ return nonil(err, k.Close())
+ }
+ var kevn [1]syscall.Kevent_t
+ syscall.SetKevent(&kevn[0], k.pipefds[0], syscall.EVFILT_READ, syscall.EV_ADD)
+ if _, err = syscall.Kevent(k.fd, kevn[:], nil, nil); err != nil {
+ return nonil(err, k.Close())
+ }
+ return
+}
+
+// Unwatch implements trigger.
+func (k *kq) Unwatch(w *watched) (err error) {
+ var kevn [1]syscall.Kevent_t
+ syscall.SetKevent(&kevn[0], w.fd, syscall.EVFILT_VNODE, syscall.EV_DELETE)
+
+ _, err = syscall.Kevent(k.fd, kevn[:], nil, nil)
+ return
+}
+
+// Watch implements trigger.
+func (k *kq) Watch(fi os.FileInfo, w *watched, e int64) (err error) {
+ var kevn [1]syscall.Kevent_t
+ syscall.SetKevent(&kevn[0], w.fd, syscall.EVFILT_VNODE,
+ syscall.EV_ADD|syscall.EV_CLEAR)
+ kevn[0].Fflags = uint32(e)
+
+ _, err = syscall.Kevent(k.fd, kevn[:], nil, nil)
+ return
+}
+
+// Wait implements trigger.
+func (k *kq) Wait() (interface{}, error) {
+ var (
+ kevn [1]syscall.Kevent_t
+ err error
+ )
+ kevn[0] = syscall.Kevent_t{}
+ _, err = syscall.Kevent(k.fd, nil, kevn[:], nil)
+
+ return kevn[0], err
+}
+
+// Watched implements trigger.
+func (k *kq) Watched(n interface{}) (*watched, int64, error) {
+ kevn, ok := n.(syscall.Kevent_t)
+ if !ok {
+ panic(fmt.Sprintf("kq: type should be syscall.Kevent_t, %T instead", kevn))
+ }
+ if _, ok = k.idLkp[int(kevn.Ident)]; !ok {
+ return nil, 0, errNotWatched
+ }
+ return k.idLkp[int(kevn.Ident)], int64(kevn.Fflags), nil
+}
+
+// IsStop implements trigger.
+func (k *kq) IsStop(n interface{}, err error) bool {
+ return int(inter2kq(n).Ident) == k.pipefds[0]
+}
+
+func init() {
+ encode = func(e Event) (o int64) {
+ // Create event is not supported by kqueue. Instead NoteWrite event will
+ // be registered. If this event will be reported on dir which is to be
+ // monitored for Create, dir will be rescanned and Create events will
+ // be generated and returned for new files. In case of files,
+ // if not requested NoteRename event is reported, it will be ignored.
+ o = int64(e &^ Create)
+ if e&Write != 0 {
+ o = (o &^ int64(Write)) | int64(NoteWrite)
+ }
+ if e&Rename != 0 {
+ o = (o &^ int64(Rename)) | int64(NoteRename)
+ }
+ if e&Remove != 0 {
+ o = (o &^ int64(Remove)) | int64(NoteDelete)
+ }
+ return
+ }
+ nat2not = map[Event]Event{
+ NoteWrite: Write,
+ NoteRename: Rename,
+ NoteDelete: Remove,
+ NoteExtend: Event(0),
+ NoteAttrib: Event(0),
+ NoteRevoke: Event(0),
+ NoteLink: Event(0),
+ }
+ not2nat = map[Event]Event{
+ Write: NoteWrite,
+ Rename: NoteRename,
+ Remove: NoteDelete,
+ }
+}
diff --git a/vendor/github.com/rjeczalik/notify/watcher_readdcw.go b/vendor/github.com/rjeczalik/notify/watcher_readdcw.go
new file mode 100644
index 000000000..5923bfdda
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/watcher_readdcw.go
@@ -0,0 +1,574 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build windows
+
+package notify
+
+import (
+ "errors"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "unsafe"
+)
+
+// readBufferSize defines the size of an array in which read statuses are stored.
+// The buffer have to be DWORD-aligned and, if notify is used in monitoring a
+// directory over the network, its size must not be greater than 64KB. Each of
+// watched directories uses its own buffer for storing events.
+const readBufferSize = 4096
+
+// Since all operations which go through the Windows completion routine are done
+// asynchronously, filter may set one of the constants belor. They were defined
+// in order to distinguish whether current folder should be re-registered in
+// ReadDirectoryChangesW function or some control operations need to be executed.
+const (
+ stateRewatch uint32 = 1 << (28 + iota)
+ stateUnwatch
+ stateCPClose
+)
+
+// Filter used in current implementation was split into four segments:
+// - bits 0-11 store ReadDirectoryChangesW filters,
+// - bits 12-19 store File notify actions,
+// - bits 20-27 store notify specific events and flags,
+// - bits 28-31 store states which are used in loop's FSM.
+// Constants below are used as masks to retrieve only specific filter parts.
+const (
+ onlyNotifyChanges uint32 = 0x00000FFF
+ onlyNGlobalEvents uint32 = 0x0FF00000
+ onlyMachineStates uint32 = 0xF0000000
+)
+
+// grip represents a single watched directory. It stores the data required by
+// ReadDirectoryChangesW function. Only the filter, recursive, and handle members
+// may by modified by watcher implementation. Rest of the them have to remain
+// constant since they are used by Windows completion routine. This indicates that
+// grip can be removed only when all operations on the file handle are finished.
+type grip struct {
+ handle syscall.Handle
+ filter uint32
+ recursive bool
+ pathw []uint16
+ buffer [readBufferSize]byte
+ parent *watched
+ ovlapped *overlappedEx
+}
+
+// overlappedEx stores information used in asynchronous input and output.
+// Additionally, overlappedEx contains a pointer to 'grip' item which is used in
+// order to gather the structure in which the overlappedEx object was created.
+type overlappedEx struct {
+ syscall.Overlapped
+ parent *grip
+}
+
+// newGrip creates a new file handle that can be used in overlapped operations.
+// Then, the handle is associated with I/O completion port 'cph' and its value
+// is stored in newly created 'grip' object.
+func newGrip(cph syscall.Handle, parent *watched, filter uint32) (*grip, error) {
+ g := &grip{
+ handle: syscall.InvalidHandle,
+ filter: filter,
+ recursive: parent.recursive,
+ pathw: parent.pathw,
+ parent: parent,
+ ovlapped: &overlappedEx{},
+ }
+ if err := g.register(cph); err != nil {
+ return nil, err
+ }
+ g.ovlapped.parent = g
+ return g, nil
+}
+
+// NOTE : Thread safe
+func (g *grip) register(cph syscall.Handle) (err error) {
+ if g.handle, err = syscall.CreateFile(
+ &g.pathw[0],
+ syscall.FILE_LIST_DIRECTORY,
+ syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+ nil,
+ syscall.OPEN_EXISTING,
+ syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED,
+ 0,
+ ); err != nil {
+ return
+ }
+ if _, err = syscall.CreateIoCompletionPort(g.handle, cph, 0, 0); err != nil {
+ syscall.CloseHandle(g.handle)
+ return
+ }
+ return g.readDirChanges()
+}
+
+// readDirChanges tells the system to store file change information in grip's
+// buffer. Directory changes that occur between calls to this function are added
+// to the buffer and then, returned with the next call.
+func (g *grip) readDirChanges() error {
+ return syscall.ReadDirectoryChanges(
+ g.handle,
+ &g.buffer[0],
+ uint32(unsafe.Sizeof(g.buffer)),
+ g.recursive,
+ encode(g.filter),
+ nil,
+ (*syscall.Overlapped)(unsafe.Pointer(g.ovlapped)),
+ 0,
+ )
+}
+
+// encode transforms a generic filter, which contains platform independent and
+// implementation specific bit fields, to value that can be used as NotifyFilter
+// parameter in ReadDirectoryChangesW function.
+func encode(filter uint32) uint32 {
+ e := Event(filter & (onlyNGlobalEvents | onlyNotifyChanges))
+ if e&dirmarker != 0 {
+ return uint32(FileNotifyChangeDirName)
+ }
+ if e&Create != 0 {
+ e = (e ^ Create) | FileNotifyChangeFileName
+ }
+ if e&Remove != 0 {
+ e = (e ^ Remove) | FileNotifyChangeFileName
+ }
+ if e&Write != 0 {
+ e = (e ^ Write) | FileNotifyChangeAttributes | FileNotifyChangeSize |
+ FileNotifyChangeCreation | FileNotifyChangeSecurity
+ }
+ if e&Rename != 0 {
+ e = (e ^ Rename) | FileNotifyChangeFileName
+ }
+ return uint32(e)
+}
+
+// watched is made in order to check whether an action comes from a directory or
+// file. This approach requires two file handlers per single monitored folder. The
+// second grip handles actions which include creating or deleting a directory. If
+// these processes are not monitored, only the first grip is created.
+type watched struct {
+ filter uint32
+ recursive bool
+ count uint8
+ pathw []uint16
+ digrip [2]*grip
+}
+
+// newWatched creates a new watched instance. It splits the filter variable into
+// two parts. The first part is responsible for watching all events which can be
+// created for a file in watched directory structure and the second one watches
+// only directory Create/Remove actions. If all operations succeed, the Create
+// message is sent to I/O completion port queue for further processing.
+func newWatched(cph syscall.Handle, filter uint32, recursive bool,
+ path string) (wd *watched, err error) {
+ wd = &watched{
+ filter: filter,
+ recursive: recursive,
+ }
+ if wd.pathw, err = syscall.UTF16FromString(path); err != nil {
+ return
+ }
+ if err = wd.recreate(cph); err != nil {
+ return
+ }
+ return wd, nil
+}
+
+// TODO : doc
+func (wd *watched) recreate(cph syscall.Handle) (err error) {
+ filefilter := wd.filter &^ uint32(FileNotifyChangeDirName)
+ if err = wd.updateGrip(0, cph, filefilter == 0, filefilter); err != nil {
+ return
+ }
+ dirfilter := wd.filter & uint32(FileNotifyChangeDirName|Create|Remove)
+ if err = wd.updateGrip(1, cph, dirfilter == 0, wd.filter|uint32(dirmarker)); err != nil {
+ return
+ }
+ wd.filter &^= onlyMachineStates
+ return
+}
+
+// TODO : doc
+func (wd *watched) updateGrip(idx int, cph syscall.Handle, reset bool,
+ newflag uint32) (err error) {
+ if reset {
+ wd.digrip[idx] = nil
+ } else {
+ if wd.digrip[idx] == nil {
+ if wd.digrip[idx], err = newGrip(cph, wd, newflag); err != nil {
+ wd.closeHandle()
+ return
+ }
+ } else {
+ wd.digrip[idx].filter = newflag
+ wd.digrip[idx].recursive = wd.recursive
+ if err = wd.digrip[idx].register(cph); err != nil {
+ wd.closeHandle()
+ return
+ }
+ }
+ wd.count++
+ }
+ return
+}
+
+// closeHandle closes handles that are stored in digrip array. Function always
+// tries to close all of the handlers before it exits, even when there are errors
+// returned from the operating system kernel.
+func (wd *watched) closeHandle() (err error) {
+ for _, g := range wd.digrip {
+ if g != nil && g.handle != syscall.InvalidHandle {
+ switch suberr := syscall.CloseHandle(g.handle); {
+ case suberr == nil:
+ g.handle = syscall.InvalidHandle
+ case err == nil:
+ err = suberr
+ }
+ }
+ }
+ return
+}
+
+// watcher implements Watcher interface. It stores a set of watched directories.
+// All operations which remove watched objects from map `m` must be performed in
+// loop goroutine since these structures are used internally by operating system.
+type readdcw struct {
+ sync.Mutex
+ m map[string]*watched
+ cph syscall.Handle
+ start bool
+ wg sync.WaitGroup
+ c chan<- EventInfo
+}
+
+// NewWatcher creates new non-recursive watcher backed by ReadDirectoryChangesW.
+func newWatcher(c chan<- EventInfo) watcher {
+ r := &readdcw{
+ m: make(map[string]*watched),
+ cph: syscall.InvalidHandle,
+ c: c,
+ }
+ runtime.SetFinalizer(r, func(r *readdcw) {
+ if r.cph != syscall.InvalidHandle {
+ syscall.CloseHandle(r.cph)
+ }
+ })
+ return r
+}
+
+// Watch implements notify.Watcher interface.
+func (r *readdcw) Watch(path string, event Event) error {
+ return r.watch(path, event, false)
+}
+
+// RecursiveWatch implements notify.RecursiveWatcher interface.
+func (r *readdcw) RecursiveWatch(path string, event Event) error {
+ return r.watch(path, event, true)
+}
+
+// watch inserts a directory to the group of watched folders. If watched folder
+// already exists, function tries to rewatch it with new filters(NOT VALID). Moreover,
+// watch starts the main event loop goroutine when called for the first time.
+func (r *readdcw) watch(path string, event Event, recursive bool) (err error) {
+ if event&^(All|fileNotifyChangeAll) != 0 {
+ return errors.New("notify: unknown event")
+ }
+ r.Lock()
+ wd, ok := r.m[path]
+ r.Unlock()
+ if !ok {
+ if err = r.lazyinit(); err != nil {
+ return
+ }
+ r.Lock()
+ if wd, ok = r.m[path]; ok {
+ r.Unlock()
+ return
+ }
+ if wd, err = newWatched(r.cph, uint32(event), recursive, path); err != nil {
+ r.Unlock()
+ return
+ }
+ r.m[path] = wd
+ r.Unlock()
+ }
+ return nil
+}
+
+// lazyinit creates an I/O completion port and starts the main event processing
+// loop. This method uses Double-Checked Locking optimization.
+func (r *readdcw) lazyinit() (err error) {
+ invalid := uintptr(syscall.InvalidHandle)
+ if atomic.LoadUintptr((*uintptr)(&r.cph)) == invalid {
+ r.Lock()
+ defer r.Unlock()
+ if atomic.LoadUintptr((*uintptr)(&r.cph)) == invalid {
+ cph := syscall.InvalidHandle
+ if cph, err = syscall.CreateIoCompletionPort(cph, 0, 0, 0); err != nil {
+ return
+ }
+ r.cph, r.start = cph, true
+ go r.loop()
+ }
+ }
+ return
+}
+
+// TODO(pknap) : doc
+func (r *readdcw) loop() {
+ var n, key uint32
+ var overlapped *syscall.Overlapped
+ for {
+ err := syscall.GetQueuedCompletionStatus(r.cph, &n, &key, &overlapped, syscall.INFINITE)
+ if key == stateCPClose {
+ r.Lock()
+ handle := r.cph
+ r.cph = syscall.InvalidHandle
+ r.Unlock()
+ syscall.CloseHandle(handle)
+ r.wg.Done()
+ return
+ }
+ if overlapped == nil {
+ // TODO: check key == rewatch delete or 0(panic)
+ continue
+ }
+ overEx := (*overlappedEx)(unsafe.Pointer(overlapped))
+ if n == 0 {
+ r.loopstate(overEx)
+ } else {
+ r.loopevent(n, overEx)
+ if err = overEx.parent.readDirChanges(); err != nil {
+ // TODO: error handling
+ }
+ }
+ }
+}
+
+// TODO(pknap) : doc
+func (r *readdcw) loopstate(overEx *overlappedEx) {
+ filter := atomic.LoadUint32(&overEx.parent.parent.filter)
+ if filter&onlyMachineStates == 0 {
+ return
+ }
+ if overEx.parent.parent.count--; overEx.parent.parent.count == 0 {
+ switch filter & onlyMachineStates {
+ case stateRewatch:
+ r.Lock()
+ overEx.parent.parent.recreate(r.cph)
+ r.Unlock()
+ case stateUnwatch:
+ r.Lock()
+ delete(r.m, syscall.UTF16ToString(overEx.parent.pathw))
+ r.Unlock()
+ case stateCPClose:
+ default:
+ panic(`notify: windows loopstate logic error`)
+ }
+ }
+}
+
+// TODO(pknap) : doc
+func (r *readdcw) loopevent(n uint32, overEx *overlappedEx) {
+ events := []*event{}
+ var currOffset uint32
+ for {
+ raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&overEx.parent.buffer[currOffset]))
+ name := syscall.UTF16ToString((*[syscall.MAX_LONG_PATH]uint16)(unsafe.Pointer(&raw.FileName))[:raw.FileNameLength>>1])
+ events = append(events, &event{
+ pathw: overEx.parent.pathw,
+ filter: overEx.parent.filter,
+ action: raw.Action,
+ name: name,
+ })
+ if raw.NextEntryOffset == 0 {
+ break
+ }
+ if currOffset += raw.NextEntryOffset; currOffset >= n {
+ break
+ }
+ }
+ r.send(events)
+}
+
+// TODO(pknap) : doc
+func (r *readdcw) send(es []*event) {
+ for _, e := range es {
+ var syse Event
+ if e.e, syse = decode(e.filter, e.action); e.e == 0 && syse == 0 {
+ continue
+ }
+ switch {
+ case e.action == syscall.FILE_ACTION_MODIFIED:
+ e.ftype = fTypeUnknown
+ case e.filter&uint32(dirmarker) != 0:
+ e.ftype = fTypeDirectory
+ default:
+ e.ftype = fTypeFile
+ }
+ switch {
+ case e.e == 0:
+ e.e = syse
+ case syse != 0:
+ r.c <- &event{
+ pathw: e.pathw,
+ name: e.name,
+ ftype: e.ftype,
+ action: e.action,
+ filter: e.filter,
+ e: syse,
+ }
+ }
+ r.c <- e
+ }
+}
+
+// Rewatch implements notify.Rewatcher interface.
+func (r *readdcw) Rewatch(path string, oldevent, newevent Event) error {
+ return r.rewatch(path, uint32(oldevent), uint32(newevent), false)
+}
+
+// RecursiveRewatch implements notify.RecursiveRewatcher interface.
+func (r *readdcw) RecursiveRewatch(oldpath, newpath string, oldevent,
+ newevent Event) error {
+ if oldpath != newpath {
+ if err := r.unwatch(oldpath); err != nil {
+ return err
+ }
+ return r.watch(newpath, newevent, true)
+ }
+ return r.rewatch(newpath, uint32(oldevent), uint32(newevent), true)
+}
+
+// TODO : (pknap) doc.
+func (r *readdcw) rewatch(path string, oldevent, newevent uint32, recursive bool) (err error) {
+ if Event(newevent)&^(All|fileNotifyChangeAll) != 0 {
+ return errors.New("notify: unknown event")
+ }
+ var wd *watched
+ r.Lock()
+ if wd, err = r.nonStateWatched(path); err != nil {
+ r.Unlock()
+ return
+ }
+ if wd.filter&(onlyNotifyChanges|onlyNGlobalEvents) != oldevent {
+ panic(`notify: windows re-watcher logic error`)
+ }
+ wd.filter = stateRewatch | newevent
+ wd.recursive, recursive = recursive, wd.recursive
+ if err = wd.closeHandle(); err != nil {
+ wd.filter = oldevent
+ wd.recursive = recursive
+ r.Unlock()
+ return
+ }
+ r.Unlock()
+ return
+}
+
+// TODO : pknap
+func (r *readdcw) nonStateWatched(path string) (wd *watched, err error) {
+ wd, ok := r.m[path]
+ if !ok || wd == nil {
+ err = errors.New(`notify: ` + path + ` path is unwatched`)
+ return
+ }
+ if filter := atomic.LoadUint32(&wd.filter); filter&onlyMachineStates != 0 {
+ err = errors.New(`notify: another re/unwatching operation in progress`)
+ return
+ }
+ return
+}
+
+// Unwatch implements notify.Watcher interface.
+func (r *readdcw) Unwatch(path string) error {
+ return r.unwatch(path)
+}
+
+// RecursiveUnwatch implements notify.RecursiveWatcher interface.
+func (r *readdcw) RecursiveUnwatch(path string) error {
+ return r.unwatch(path)
+}
+
+// TODO : pknap
+func (r *readdcw) unwatch(path string) (err error) {
+ var wd *watched
+ r.Lock()
+ if wd, err = r.nonStateWatched(path); err != nil {
+ r.Unlock()
+ return
+ }
+ wd.filter |= stateUnwatch
+ if err = wd.closeHandle(); err != nil {
+ wd.filter &^= stateUnwatch
+ r.Unlock()
+ return
+ }
+ r.Unlock()
+ return
+}
+
+// Close resets the whole watcher object, closes all existing file descriptors,
+// and sends stateCPClose state as completion key to the main watcher's loop.
+func (r *readdcw) Close() (err error) {
+ r.Lock()
+ if !r.start {
+ r.Unlock()
+ return nil
+ }
+ for _, wd := range r.m {
+ wd.filter &^= onlyMachineStates
+ wd.filter |= stateCPClose
+ if e := wd.closeHandle(); e != nil && err == nil {
+ err = e
+ }
+ }
+ r.start = false
+ r.Unlock()
+ r.wg.Add(1)
+ if e := syscall.PostQueuedCompletionStatus(r.cph, 0, stateCPClose, nil); e != nil && err == nil {
+ return e
+ }
+ r.wg.Wait()
+ return
+}
+
+// decode creates a notify event from both non-raw filter and action which was
+// returned from completion routine. Function may return Event(0) in case when
+// filter was replaced by a new value which does not contain fields that are
+// valid with passed action.
+func decode(filter, action uint32) (Event, Event) {
+ switch action {
+ case syscall.FILE_ACTION_ADDED:
+ return gensys(filter, Create, FileActionAdded)
+ case syscall.FILE_ACTION_REMOVED:
+ return gensys(filter, Remove, FileActionRemoved)
+ case syscall.FILE_ACTION_MODIFIED:
+ return gensys(filter, Write, FileActionModified)
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ return gensys(filter, Rename, FileActionRenamedOldName)
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ return gensys(filter, Rename, FileActionRenamedNewName)
+ }
+ panic(`notify: cannot decode internal mask`)
+}
+
+// gensys decides whether the Windows action, system-independent event or both
+// of them should be returned. Since the grip's filter may be atomically changed
+// during watcher lifetime, it is possible that neither Windows nor notify masks
+// are watched by the user when this function is called.
+func gensys(filter uint32, ge, se Event) (gene, syse Event) {
+ isdir := filter&uint32(dirmarker) != 0
+ if isdir && filter&uint32(FileNotifyChangeDirName) != 0 ||
+ !isdir && filter&uint32(FileNotifyChangeFileName) != 0 ||
+ filter&uint32(fileNotifyChangeModified) != 0 {
+ syse = se
+ }
+ if filter&uint32(ge) != 0 {
+ gene = ge
+ }
+ return
+}
diff --git a/vendor/github.com/rjeczalik/notify/watcher_stub.go b/vendor/github.com/rjeczalik/notify/watcher_stub.go
new file mode 100644
index 000000000..68b9c135b
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/watcher_stub.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build !darwin,!linux,!freebsd,!dragonfly,!netbsd,!openbsd,!windows
+// +build !kqueue,!solaris
+
+package notify
+
+import "errors"
+
+type stub struct{ error }
+
+// newWatcher stub.
+func newWatcher(chan<- EventInfo) watcher {
+ return stub{errors.New("notify: not implemented")}
+}
+
+// Following methods implement notify.watcher interface.
+func (s stub) Watch(string, Event) error { return s }
+func (s stub) Rewatch(string, Event, Event) error { return s }
+func (s stub) Unwatch(string) (err error) { return s }
+func (s stub) Close() error { return s }
diff --git a/vendor/github.com/rjeczalik/notify/watcher_trigger.go b/vendor/github.com/rjeczalik/notify/watcher_trigger.go
new file mode 100644
index 000000000..f25a6fcbf
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/watcher_trigger.go
@@ -0,0 +1,438 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build darwin,kqueue dragonfly freebsd netbsd openbsd solaris
+
+// watcher_trigger is used for FEN and kqueue which behave similarly:
+// only files and dirs can be watched directly, but not files inside dirs.
+// As a result Create events have to be generated by implementation when
+// after Write event is returned for watched dir, it is rescanned and Create
+// event is returned for new files and these are automatically added
+// to watchlist. In case of removal of watched directory, native system returns
+// events for all files, but for Rename, they also need to be generated.
+// As a result native system works as something like trigger for rescan,
+// but contains additional data about dir in which changes occurred. For files
+// detailed data is returned.
+// Usage of watcher_trigger requires:
+// - trigger implementation,
+// - encode func,
+// - not2nat, nat2not maps.
+// Required manual operations on filesystem can lead to loss of precision.
+
+package notify
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "syscall"
+)
+
+// trigger is to be implemented by platform implementation like FEN or kqueue.
+type trigger interface {
+ // Close closes watcher's main native file descriptor.
+ Close() error
+ // Stop waiting for new events.
+ Stop() error
+ // Create new instance of watched.
+ NewWatched(string, os.FileInfo) (*watched, error)
+ // Record internally new *watched instance.
+ Record(*watched)
+ // Del removes internal copy of *watched instance.
+ Del(*watched)
+ // Watched returns *watched instance and native events for native type.
+ Watched(interface{}) (*watched, int64, error)
+ // Init initializes native watcher call.
+ Init() error
+ // Watch starts watching provided file/dir.
+ Watch(os.FileInfo, *watched, int64) error
+ // Unwatch stops watching provided file/dir.
+ Unwatch(*watched) error
+ // Wait for new events.
+ Wait() (interface{}, error)
+ // IsStop checks if Wait finished because of request watcher's stop.
+ IsStop(n interface{}, err error) bool
+}
+
+// encode Event to native representation. Implementation is to be provided by
+// platform specific implementation.
+var encode func(Event) int64
+
+var (
+ // nat2not matches native events to notify's ones. To be initialized by
+ // platform dependent implementation.
+ nat2not map[Event]Event
+ // not2nat matches notify's events to native ones. To be initialized by
+ // platform dependent implementation.
+ not2nat map[Event]Event
+)
+
+// trg is a main structure implementing watcher.
+type trg struct {
+ sync.Mutex
+ // s is a channel used to stop monitoring.
+ s chan struct{}
+ // c is a channel used to pass events further.
+ c chan<- EventInfo
+ // pthLkp is a data structure mapping file names with data about watching
+ // represented by them files/directories.
+ pthLkp map[string]*watched
+ // t is a platform dependent implementation of trigger.
+ t trigger
+}
+
+// newWatcher returns new watcher's implementation.
+func newWatcher(c chan<- EventInfo) watcher {
+ t := &trg{
+ s: make(chan struct{}, 1),
+ pthLkp: make(map[string]*watched, 0),
+ c: c,
+ }
+ t.t = newTrigger(t.pthLkp)
+ if err := t.t.Init(); err != nil {
+ panic(err)
+ }
+ go t.monitor()
+ return t
+}
+
+// Close implements watcher.
+func (t *trg) Close() (err error) {
+ t.Lock()
+ if err = t.t.Stop(); err != nil {
+ t.Unlock()
+ return
+ }
+ <-t.s
+ var e error
+ for _, w := range t.pthLkp {
+ if e = t.unwatch(w.p, w.fi); e != nil {
+ dbgprintf("trg: unwatch %q failed: %q\n", w.p, e)
+ err = nonil(err, e)
+ }
+ }
+ if e = t.t.Close(); e != nil {
+ dbgprintf("trg: closing native watch failed: %q\n", e)
+ err = nonil(err, e)
+ }
+ t.Unlock()
+ return
+}
+
+// send reported events one by one through chan.
+func (t *trg) send(evn []event) {
+ for i := range evn {
+ t.c <- &evn[i]
+ }
+}
+
+// singlewatch starts to watch given p file/directory.
+func (t *trg) singlewatch(p string, e Event, direct mode, fi os.FileInfo) (err error) {
+ w, ok := t.pthLkp[p]
+ if !ok {
+ if w, err = t.t.NewWatched(p, fi); err != nil {
+ return
+ }
+ }
+ switch direct {
+ case dir:
+ w.eDir |= e
+ case ndir:
+ w.eNonDir |= e
+ case both:
+ w.eDir |= e
+ w.eNonDir |= e
+ }
+ var ee int64
+ // Native Write event is added to wait for Create events (Write event on
+ // directory triggers it's rescan).
+ if e&Create != 0 && fi.IsDir() {
+ ee = int64(not2nat[Write])
+ }
+ if err = t.t.Watch(fi, w, encode(w.eDir|w.eNonDir)|ee); err != nil {
+ return
+ }
+ if !ok {
+ t.t.Record(w)
+ return nil
+ }
+ return errAlreadyWatched
+}
+
+// decode converts event received from native to notify.Event
+// representation taking into account requested events (w).
+func decode(o int64, w Event) (e Event) {
+ for f, n := range nat2not {
+ if o&int64(f) != 0 {
+ if w&f != 0 {
+ e |= f
+ }
+ if w&n != 0 {
+ e |= n
+ }
+ }
+ }
+
+ return
+}
+
+func (t *trg) watch(p string, e Event, fi os.FileInfo) error {
+ if err := t.singlewatch(p, e, dir, fi); err != nil {
+ if err != errAlreadyWatched {
+ return nil
+ }
+ }
+ if fi.IsDir() {
+ err := t.walk(p, func(fi os.FileInfo) (err error) {
+ if err = t.singlewatch(filepath.Join(p, fi.Name()), e, ndir,
+ fi); err != nil {
+ if err != errAlreadyWatched {
+ return
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// walk runs f func on each file/dir from p directory.
+func (t *trg) walk(p string, fn func(os.FileInfo) error) error {
+ fp, err := os.Open(p)
+ if err != nil {
+ return err
+ }
+ ls, err := fp.Readdir(0)
+ fp.Close()
+ if err != nil {
+ return err
+ }
+ for i := range ls {
+ if err := fn(ls[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (t *trg) unwatch(p string, fi os.FileInfo) error {
+ if fi.IsDir() {
+ err := t.walk(p, func(fi os.FileInfo) error {
+ err := t.singleunwatch(filepath.Join(p, fi.Name()), ndir)
+ if err != errNotWatched {
+ return err
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ }
+ return t.singleunwatch(p, dir)
+}
+
+// Watch implements Watcher interface.
+func (t *trg) Watch(p string, e Event) error {
+ fi, err := os.Stat(p)
+ if err != nil {
+ return err
+ }
+ t.Lock()
+ err = t.watch(p, e, fi)
+ t.Unlock()
+ return err
+}
+
+// Unwatch implements Watcher interface.
+func (t *trg) Unwatch(p string) error {
+ fi, err := os.Stat(p)
+ if err != nil {
+ return err
+ }
+ t.Lock()
+ err = t.unwatch(p, fi)
+ t.Unlock()
+ return err
+}
+
+// Rewatch implements Watcher interface.
+//
+// TODO(rjeczalik): This is a naive hack. Rewrite might help.
+func (t *trg) Rewatch(p string, _, e Event) error {
+ fi, err := os.Stat(p)
+ if err != nil {
+ return err
+ }
+ t.Lock()
+ if err = t.unwatch(p, fi); err == nil {
+ // TODO(rjeczalik): If watch fails then we leave trigger in inconsistent
+ // state. Handle? Panic? Native version of rewatch?
+ err = t.watch(p, e, fi)
+ }
+ t.Unlock()
+ return nil
+}
+
+func (*trg) file(w *watched, n interface{}, e Event) (evn []event) {
+ evn = append(evn, event{w.p, e, w.fi.IsDir(), n})
+ return
+}
+
+func (t *trg) dir(w *watched, n interface{}, e, ge Event) (evn []event) {
+ // If it's dir and delete we have to send it and continue, because
+ // other processing relies on opening (in this case not existing) dir.
+ // Events for contents of this dir are reported by native impl.
+ // However events for rename must be generated for all monitored files
+ // inside of moved directory, because native impl does not report it independently
+ // for each file descriptor being moved in result of move action on
+ // parent dirLiczba dostępnych dni urlopowych: 0ectory.
+ if (ge & (not2nat[Rename] | not2nat[Remove])) != 0 {
+ // Write is reported also for Remove on directory. Because of that
+ // we have to filter it out explicitly.
+ evn = append(evn, event{w.p, e & ^Write & ^not2nat[Write], true, n})
+ if ge&not2nat[Rename] != 0 {
+ for p := range t.pthLkp {
+ if strings.HasPrefix(p, w.p+string(os.PathSeparator)) {
+ if err := t.singleunwatch(p, both); err != nil && err != errNotWatched &&
+ !os.IsNotExist(err) {
+ dbgprintf("trg: failed stop watching moved file (%q): %q\n",
+ p, err)
+ }
+ if (w.eDir|w.eNonDir)&(not2nat[Rename]|Rename) != 0 {
+ evn = append(evn, event{
+ p, (w.eDir | w.eNonDir) & e &^ Write &^ not2nat[Write],
+ w.fi.IsDir(), nil,
+ })
+ }
+ }
+ }
+ }
+ t.t.Del(w)
+ return
+ }
+ if (ge & not2nat[Write]) != 0 {
+ switch err := t.walk(w.p, func(fi os.FileInfo) error {
+ p := filepath.Join(w.p, fi.Name())
+ switch err := t.singlewatch(p, w.eDir, ndir, fi); {
+ case os.IsNotExist(err) && ((w.eDir & Remove) != 0):
+ evn = append(evn, event{p, Remove, fi.IsDir(), n})
+ case err == errAlreadyWatched:
+ case err != nil:
+ dbgprintf("trg: watching %q failed: %q", p, err)
+ case (w.eDir & Create) != 0:
+ evn = append(evn, event{p, Create, fi.IsDir(), n})
+ default:
+ }
+ return nil
+ }); {
+ case os.IsNotExist(err):
+ return
+ case err != nil:
+ dbgprintf("trg: dir processing failed: %q", err)
+ default:
+ }
+ }
+ return
+}
+
+type mode uint
+
+const (
+ dir mode = iota
+ ndir
+ both
+)
+
+// unwatch stops watching p file/directory.
+func (t *trg) singleunwatch(p string, direct mode) error {
+ w, ok := t.pthLkp[p]
+ if !ok {
+ return errNotWatched
+ }
+ switch direct {
+ case dir:
+ w.eDir = 0
+ case ndir:
+ w.eNonDir = 0
+ case both:
+ w.eDir, w.eNonDir = 0, 0
+ }
+ if err := t.t.Unwatch(w); err != nil {
+ return err
+ }
+ if w.eNonDir|w.eDir != 0 {
+ mod := dir
+ if w.eNonDir == 0 {
+ mod = ndir
+ }
+ if err := t.singlewatch(p, w.eNonDir|w.eDir, mod,
+ w.fi); err != nil && err != errAlreadyWatched {
+ return err
+ }
+ } else {
+ t.t.Del(w)
+ }
+ return nil
+}
+
+func (t *trg) monitor() {
+ var (
+ n interface{}
+ err error
+ )
+ for {
+ switch n, err = t.t.Wait(); {
+ case err == syscall.EINTR:
+ case t.t.IsStop(n, err):
+ t.s <- struct{}{}
+ return
+ case err != nil:
+ dbgprintf("trg: failed to read events: %q\n", err)
+ default:
+ t.send(t.process(n))
+ }
+ }
+}
+
+// process event returned by port_get call.
+func (t *trg) process(n interface{}) (evn []event) {
+ t.Lock()
+ w, ge, err := t.t.Watched(n)
+ if err != nil {
+ t.Unlock()
+ dbgprintf("trg: %v event lookup failed: %q", Event(ge), err)
+ return
+ }
+
+ e := decode(ge, w.eDir|w.eNonDir)
+ if ge&int64(not2nat[Remove]|not2nat[Rename]) == 0 {
+ switch fi, err := os.Stat(w.p); {
+ case err != nil:
+ default:
+ if err = t.t.Watch(fi, w, (encode(w.eDir | w.eNonDir))); err != nil {
+ dbgprintf("trg: %q is no longer watched: %q", w.p, err)
+ t.t.Del(w)
+ }
+ }
+ }
+ if e == Event(0) {
+ t.Unlock()
+ return
+ }
+
+ if w.fi.IsDir() {
+ evn = append(evn, t.dir(w, n, e, Event(ge))...)
+ } else {
+ evn = append(evn, t.file(w, n, e)...)
+ }
+ if Event(ge)&(not2nat[Remove]|not2nat[Rename]) != 0 {
+ t.t.Del(w)
+ }
+ t.Unlock()
+ return
+}
diff --git a/vendor/github.com/rjeczalik/notify/watchpoint.go b/vendor/github.com/rjeczalik/notify/watchpoint.go
new file mode 100644
index 000000000..5afc914f4
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/watchpoint.go
@@ -0,0 +1,103 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+package notify
+
+// EventDiff describes a change to an event set - EventDiff[0] is an old state,
+// while EventDiff[1] is a new state. If event set has not changed (old == new),
+// functions typically return the None value.
+type eventDiff [2]Event
+
+func (diff eventDiff) Event() Event {
+ return diff[1] &^ diff[0]
+}
+
+// Watchpoint
+//
+// The nil key holds total event set - logical sum for all registered events.
+// It speeds up computing EventDiff for Add method.
+//
+// The rec key holds an event set for a watchpoints created by RecursiveWatch
+// for a Watcher implementation which is not natively recursive.
+type watchpoint map[chan<- EventInfo]Event
+
+// None is an empty event diff, think null object.
+var none eventDiff
+
+// rec is just a placeholder
+var rec = func() (ch chan<- EventInfo) {
+ ch = make(chan<- EventInfo)
+ close(ch)
+ return
+}()
+
+func (wp watchpoint) dryAdd(ch chan<- EventInfo, e Event) eventDiff {
+ if e &^= internal; wp[ch]&e == e {
+ return none
+ }
+ total := wp[ch] &^ internal
+ return eventDiff{total, total | e}
+}
+
+// Add assumes neither c nor e are nil or zero values.
+func (wp watchpoint) Add(c chan<- EventInfo, e Event) (diff eventDiff) {
+ wp[c] |= e
+ diff[0] = wp[nil]
+ diff[1] = diff[0] | e
+ wp[nil] = diff[1] &^ omit
+ // Strip diff from internal events.
+ diff[0] &^= internal
+ diff[1] &^= internal
+ if diff[0] == diff[1] {
+ return none
+ }
+ return
+}
+
+func (wp watchpoint) Del(c chan<- EventInfo, e Event) (diff eventDiff) {
+ wp[c] &^= e
+ if wp[c] == 0 {
+ delete(wp, c)
+ }
+ diff[0] = wp[nil]
+ delete(wp, nil)
+ if len(wp) != 0 {
+ // Recalculate total event set.
+ for _, e := range wp {
+ diff[1] |= e
+ }
+ wp[nil] = diff[1] &^ omit
+ }
+ // Strip diff from internal events.
+ diff[0] &^= internal
+ diff[1] &^= internal
+ if diff[0] == diff[1] {
+ return none
+ }
+ return
+}
+
+func (wp watchpoint) Dispatch(ei EventInfo, extra Event) {
+ e := eventmask(ei, extra)
+ if !matches(wp[nil], e) {
+ return
+ }
+ for ch, eset := range wp {
+ if ch != nil && matches(eset, e) {
+ select {
+ case ch <- ei:
+ default: // Drop event if receiver is too slow
+ dbgprintf("dropped %s on %q: receiver too slow", ei.Event(), ei.Path())
+ }
+ }
+ }
+}
+
+func (wp watchpoint) Total() Event {
+ return wp[nil] &^ internal
+}
+
+func (wp watchpoint) IsRecursive() bool {
+ return wp[nil]&recursive != 0
+}
diff --git a/vendor/github.com/rjeczalik/notify/watchpoint_other.go b/vendor/github.com/rjeczalik/notify/watchpoint_other.go
new file mode 100644
index 000000000..881631c99
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/watchpoint_other.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build !windows
+
+package notify
+
+// eventmask uses ei to create a new event which contains internal flags used by
+// notify package logic.
+func eventmask(ei EventInfo, extra Event) Event {
+ return ei.Event() | extra
+}
+
+// matches reports a match only when:
+//
+// - for user events, when event is present in the given set
+// - for internal events, when additionaly both event and set have omit bit set
+//
+// Internal events must not be sent to user channels and vice versa.
+func matches(set, event Event) bool {
+ return (set&omit)^(event&omit) == 0 && set&event == event
+}
diff --git a/vendor/github.com/rjeczalik/notify/watchpoint_readdcw.go b/vendor/github.com/rjeczalik/notify/watchpoint_readdcw.go
new file mode 100644
index 000000000..9fd1e1df3
--- /dev/null
+++ b/vendor/github.com/rjeczalik/notify/watchpoint_readdcw.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
+// Use of this source code is governed by the MIT license that can be
+// found in the LICENSE file.
+
+// +build windows
+
+package notify
+
+// eventmask uses ei to create a new event which contains internal flags used by
+// notify package logic. If one of FileAction* masks is detected, this function
+// adds corresponding FileNotifyChange* values. This allows non registered
+// FileAction* events to be passed on.
+func eventmask(ei EventInfo, extra Event) (e Event) {
+ if e = ei.Event() | extra; e&fileActionAll != 0 {
+ if ev, ok := ei.(*event); ok {
+ switch ev.ftype {
+ case fTypeFile:
+ e |= FileNotifyChangeFileName
+ case fTypeDirectory:
+ e |= FileNotifyChangeDirName
+ case fTypeUnknown:
+ e |= fileNotifyChangeModified
+ }
+ return e &^ fileActionAll
+ }
+ }
+ return
+}
+
+// matches reports a match only when:
+//
+// - for user events, when event is present in the given set
+// - for internal events, when additionally both event and set have omit bit set
+//
+// Internal events must not be sent to user channels and vice versa.
+func matches(set, event Event) bool {
+ return (set&omit)^(event&omit) == 0 && (set&event == event || set&fileNotifyChangeModified&event != 0)
+}
diff --git a/vendor/github.com/robertkrimen/otto/.gitignore b/vendor/github.com/robertkrimen/otto/.gitignore
new file mode 100644
index 000000000..8c2a16949
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/.gitignore
@@ -0,0 +1,5 @@
+/.test
+/otto/otto
+/otto/otto-*
+/test/test-*.js
+/test/tester
diff --git a/vendor/github.com/robertkrimen/otto/DESIGN.markdown b/vendor/github.com/robertkrimen/otto/DESIGN.markdown
new file mode 100644
index 000000000..288752987
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/DESIGN.markdown
@@ -0,0 +1 @@
+* Designate the filename of "anonymous" source code by the hash (md5/sha1, etc.)
diff --git a/vendor/github.com/robertkrimen/otto/LICENSE b/vendor/github.com/robertkrimen/otto/LICENSE
new file mode 100644
index 000000000..b6179fe38
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/LICENSE
@@ -0,0 +1,7 @@
+Copyright (c) 2012 Robert Krimen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/robertkrimen/otto/Makefile b/vendor/github.com/robertkrimen/otto/Makefile
new file mode 100644
index 000000000..9868db3ca
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/Makefile
@@ -0,0 +1,63 @@
+.PHONY: test test-race test-release release release-check test-262
+.PHONY: parser
+.PHONY: otto assets underscore
+
+TESTS := \
+ ~
+
+TEST := -v --run
+TEST := -v
+TEST := -v --run Test\($(subst $(eval) ,\|,$(TESTS))\)
+TEST := .
+
+test: parser inline.go
+ go test -i
+ go test $(TEST)
+ @echo PASS
+
+parser:
+ $(MAKE) -C parser
+
+inline.go: inline.pl
+ ./$< > $@
+
+#################
+# release, test #
+#################
+
+release: test-race test-release
+ for package in . parser token ast file underscore registry; do (cd $$package && godocdown --signature > README.markdown); done
+ @echo \*\*\* make release-check
+ @echo PASS
+
+release-check: .test
+ $(MAKE) -C test build test
+ $(MAKE) -C .test/test262 build test
+ @echo PASS
+
+test-262: .test
+ $(MAKE) -C .test/test262 build test
+ @echo PASS
+
+test-release:
+ go test -i
+ go test
+
+test-race:
+ go test -race -i
+ go test -race
+
+#################################
+# otto, assets, underscore, ... #
+#################################
+
+otto:
+ $(MAKE) -C otto
+
+assets:
+ mkdir -p .assets
+ for file in underscore/test/*.js; do tr "\`" "_" < $$file > .assets/`basename $$file`; done
+
+underscore:
+ $(MAKE) -C $@
+
diff --git a/vendor/github.com/robertkrimen/otto/README.markdown b/vendor/github.com/robertkrimen/otto/README.markdown
new file mode 100644
index 000000000..ed14f0ae9
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/README.markdown
@@ -0,0 +1,871 @@
+# otto
+--
+```go
+import "github.com/robertkrimen/otto"
+```
+
+Package otto is a JavaScript parser and interpreter written natively in Go.
+
+http://godoc.org/github.com/robertkrimen/otto
+
+```go
+import (
+ "github.com/robertkrimen/otto"
+)
+```
+
+Run something in the VM
+
+```go
+vm := otto.New()
+vm.Run(`
+ abc = 2 + 2;
+ console.log("The value of abc is " + abc); // 4
+`)
+```
+
+Get a value out of the VM
+
+```go
+if value, err := vm.Get("abc"); err == nil {
+ if value_int, err := value.ToInteger(); err == nil {
+ fmt.Printf("", value_int, err)
+ }
+}
+```
+
+Set a number
+
+```go
+vm.Set("def", 11)
+vm.Run(`
+ console.log("The value of def is " + def);
+ // The value of def is 11
+`)
+```
+
+Set a string
+
+```go
+vm.Set("xyzzy", "Nothing happens.")
+vm.Run(`
+ console.log(xyzzy.length); // 16
+`)
+```
+
+Get the value of an expression
+
+```go
+value, _ = vm.Run("xyzzy.length")
+{
+ // value is an int64 with a value of 16
+ value, _ := value.ToInteger()
+}
+```
+
+An error happens
+
+```go
+value, err = vm.Run("abcdefghijlmnopqrstuvwxyz.length")
+if err != nil {
+ // err = ReferenceError: abcdefghijlmnopqrstuvwxyz is not defined
+ // If there is an error, then value.IsUndefined() is true
+ ...
+}
+```
+
+Set a Go function
+
+```go
+vm.Set("sayHello", func(call otto.FunctionCall) otto.Value {
+ fmt.Printf("Hello, %s.\n", call.Argument(0).String())
+ return otto.Value{}
+})
+```
+
+Set a Go function that returns something useful
+
+```go
+vm.Set("twoPlus", func(call otto.FunctionCall) otto.Value {
+ right, _ := call.Argument(0).ToInteger()
+ result, _ := vm.ToValue(2 + right)
+ return result
+})
+```
+
+Use the functions in JavaScript
+
+```go
+result, _ = vm.Run(`
+ sayHello("Xyzzy"); // Hello, Xyzzy.
+ sayHello(); // Hello, undefined
+
+ result = twoPlus(2.0); // 4
+`)
+```
+
+### Parser
+
+A separate parser is available in the parser package if you're just interested
+in building an AST.
+
+http://godoc.org/github.com/robertkrimen/otto/parser
+
+Parse and return an AST
+
+```go
+filenamee := "" // A filename is optional
+src := `
+ // Sample xyzzy example
+ (function(){
+ if (3.14159 > 0) {
+ console.log("Hello, World.");
+ return;
+ }
+
+ var xyzzy = NaN;
+ console.log("Nothing happens.");
+ return xyzzy;
+ })();
+`
+
+// Parse some JavaScript, yielding a *ast.Program and/or an ErrorList
+program, err := parser.ParseFile(nil, filename, src, 0)
+```
+
+### otto
+
+You can run (Go) JavaScript from the commandline with:
+http://github.com/robertkrimen/otto/tree/master/otto
+
+ $ go get -v github.com/robertkrimen/otto/otto
+
+Run JavaScript by entering some source on stdin or by giving otto a filename:
+
+ $ otto example.js
+
+### underscore
+
+Optionally include the JavaScript utility-belt library, underscore, with this
+import:
+
+```go
+import (
+ "github.com/robertkrimen/otto"
+ _ "github.com/robertkrimen/otto/underscore"
+)
+
+// Now every otto runtime will come loaded with underscore
+```
+
+For more information: http://github.com/robertkrimen/otto/tree/master/underscore
+
+
+### Caveat Emptor
+
+The following are some limitations with otto:
+
+ * "use strict" will parse, but does nothing.
+ * The regular expression engine (re2/regexp) is not fully compatible with the ECMA5 specification.
+ * Otto targets ES5. ES6 features (eg: Typed Arrays) are not supported.
+
+
+### Regular Expression Incompatibility
+
+Go translates JavaScript-style regular expressions into something that is
+"regexp" compatible via `parser.TransformRegExp`. Unfortunately, RegExp requires
+backtracking for some patterns, and backtracking is not supported by the
+standard Go engine: https://code.google.com/p/re2/wiki/Syntax
+
+Therefore, the following syntax is incompatible:
+
+ (?=) // Lookahead (positive), currently a parsing error
+ (?!) // Lookahead (backhead), currently a parsing error
+ \1 // Backreference (\1, \2, \3, ...), currently a parsing error
+
+A brief discussion of these limitations: "Regexp (?!re)"
+https://groups.google.com/forum/?fromgroups=#%21topic/golang-nuts/7qgSDWPIh_E
+
+More information about re2: https://code.google.com/p/re2/
+
+In addition to the above, re2 (Go) has a different definition for \s: [\t\n\f\r
+]. The JavaScript definition, on the other hand, also includes \v, Unicode
+"Separator, Space", etc.
+
+
+### Halting Problem
+
+If you want to stop long running executions (like third-party code), you can use
+the interrupt channel to do this:
+
+```go
+package main
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/robertkrimen/otto"
+)
+
+var halt = errors.New("Stahp")
+
+func main() {
+ runUnsafe(`var abc = [];`)
+ runUnsafe(`
+ while (true) {
+ // Loop forever
+ }`)
+}
+
+func runUnsafe(unsafe string) {
+ start := time.Now()
+ defer func() {
+ duration := time.Since(start)
+ if caught := recover(); caught != nil {
+ if caught == halt {
+ fmt.Fprintf(os.Stderr, "Some code took to long! Stopping after: %v\n", duration)
+ return
+ }
+ panic(caught) // Something else happened, repanic!
+ }
+ fmt.Fprintf(os.Stderr, "Ran code successfully: %v\n", duration)
+ }()
+
+ vm := otto.New()
+ vm.Interrupt = make(chan func(), 1) // The buffer prevents blocking
+
+ go func() {
+ time.Sleep(2 * time.Second) // Stop after two seconds
+ vm.Interrupt <- func() {
+ panic(halt)
+ }
+ }()
+
+ vm.Run(unsafe) // Here be dragons (risky code)
+}
+```
+
+Where is setTimeout/setInterval?
+
+These timing functions are not actually part of the ECMA-262 specification.
+Typically, they belong to the `window` object (in the browser). It would not be
+difficult to provide something like these via Go, but you probably want to wrap
+otto in an event loop in that case.
+
+For an example of how this could be done in Go with otto, see natto:
+
+http://github.com/robertkrimen/natto
+
+Here is some more discussion of the issue:
+
+* http://book.mixu.net/node/ch2.html
+
+* http://en.wikipedia.org/wiki/Reentrancy_%28computing%29
+
+* http://aaroncrane.co.uk/2009/02/perl_safe_signals/
+
+## Usage
+
+```go
+var ErrVersion = errors.New("version mismatch")
+```
+
+#### type Error
+
+```go
+type Error struct {
+}
+```
+
+An Error represents a runtime error, e.g. a TypeError, a ReferenceError, etc.
+
+#### func (Error) Error
+
+```go
+func (err Error) Error() string
+```
+Error returns a description of the error
+
+ TypeError: 'def' is not a function
+
+#### func (Error) String
+
+```go
+func (err Error) String() string
+```
+String returns a description of the error and a trace of where the error
+occurred.
+
+ TypeError: 'def' is not a function
+ at xyz (<anonymous>:3:9)
+ at <anonymous>:7:1/
+
+#### type FunctionCall
+
+```go
+type FunctionCall struct {
+ This Value
+ ArgumentList []Value
+ Otto *Otto
+}
+```
+
+FunctionCall is an encapsulation of a JavaScript function call.
+
+#### func (FunctionCall) Argument
+
+```go
+func (self FunctionCall) Argument(index int) Value
+```
+Argument will return the value of the argument at the given index.
+
+If no such argument exists, undefined is returned.
+
+#### type Object
+
+```go
+type Object struct {
+}
+```
+
+Object is the representation of a JavaScript object.
+
+#### func (Object) Call
+
+```go
+func (self Object) Call(name string, argumentList ...interface{}) (Value, error)
+```
+Call a method on the object.
+
+It is essentially equivalent to:
+
+ var method, _ := object.Get(name)
+ method.Call(object, argumentList...)
+
+An undefined value and an error will result if:
+
+ 1. There is an error during conversion of the argument list
+ 2. The property is not actually a function
+ 3. An (uncaught) exception is thrown
+
+#### func (Object) Class
+
+```go
+func (self Object) Class() string
+```
+Class will return the class string of the object.
+
+The return value will (generally) be one of:
+
+ Object
+ Function
+ Array
+ String
+ Number
+ Boolean
+ Date
+ RegExp
+
+#### func (Object) Get
+
+```go
+func (self Object) Get(name string) (Value, error)
+```
+Get the value of the property with the given name.
+
+#### func (Object) Keys
+
+```go
+func (self Object) Keys() []string
+```
+Get the keys for the object
+
+Equivalent to calling Object.keys on the object
+
+#### func (Object) Set
+
+```go
+func (self Object) Set(name string, value interface{}) error
+```
+Set the property of the given name to the given value.
+
+An error will result if the setting the property triggers an exception (i.e.
+read-only), or there is an error during conversion of the given value.
+
+#### func (Object) Value
+
+```go
+func (self Object) Value() Value
+```
+Value will return self as a value.
+
+#### type Otto
+
+```go
+type Otto struct {
+ // Interrupt is a channel for interrupting the runtime. You can use this to halt a long running execution, for example.
+ // See "Halting Problem" for more information.
+ Interrupt chan func()
+}
+```
+
+Otto is the representation of the JavaScript runtime. Each instance of Otto has
+a self-contained namespace.
+
+#### func New
+
+```go
+func New() *Otto
+```
+New will allocate a new JavaScript runtime
+
+#### func Run
+
+```go
+func Run(src interface{}) (*Otto, Value, error)
+```
+Run will allocate a new JavaScript runtime, run the given source on the
+allocated runtime, and return the runtime, resulting value, and error (if any).
+
+src may be a string, a byte slice, a bytes.Buffer, or an io.Reader, but it MUST
+always be in UTF-8.
+
+src may also be a Script.
+
+src may also be a Program, but if the AST has been modified, then runtime
+behavior is undefined.
+
+#### func (Otto) Call
+
+```go
+func (self Otto) Call(source string, this interface{}, argumentList ...interface{}) (Value, error)
+```
+Call the given JavaScript with a given this and arguments.
+
+If this is nil, then some special handling takes place to determine the proper
+this value, falling back to a "standard" invocation if necessary (where this is
+undefined).
+
+If source begins with "new " (A lowercase new followed by a space), then Call
+will invoke the function constructor rather than performing a function call. In
+this case, the this argument has no effect.
+
+```go
+// value is a String object
+value, _ := vm.Call("Object", nil, "Hello, World.")
+
+// Likewise...
+value, _ := vm.Call("new Object", nil, "Hello, World.")
+
+// This will perform a concat on the given array and return the result
+// value is [ 1, 2, 3, undefined, 4, 5, 6, 7, "abc" ]
+value, _ := vm.Call(`[ 1, 2, 3, undefined, 4 ].concat`, nil, 5, 6, 7, "abc")
+```
+
+#### func (*Otto) Compile
+
+```go
+func (self *Otto) Compile(filename string, src interface{}) (*Script, error)
+```
+Compile will parse the given source and return a Script value or nil and an
+error if there was a problem during compilation.
+
+```go
+script, err := vm.Compile("", `var abc; if (!abc) abc = 0; abc += 2; abc;`)
+vm.Run(script)
+```
+
+#### func (*Otto) Copy
+
+```go
+func (in *Otto) Copy() *Otto
+```
+Copy will create a copy/clone of the runtime.
+
+Copy is useful for saving some time when creating many similar runtimes.
+
+This method works by walking the original runtime and cloning each object,
+scope, stash, etc. into a new runtime.
+
+Be on the lookout for memory leaks or inadvertent sharing of resources.
+
+#### func (Otto) Get
+
+```go
+func (self Otto) Get(name string) (Value, error)
+```
+Get the value of the top-level binding of the given name.
+
+If there is an error (like the binding does not exist), then the value will be
+undefined.
+
+#### func (Otto) Object
+
+```go
+func (self Otto) Object(source string) (*Object, error)
+```
+Object will run the given source and return the result as an object.
+
+For example, accessing an existing object:
+
+```go
+object, _ := vm.Object(`Number`)
+```
+
+Or, creating a new object:
+
+```go
+object, _ := vm.Object(`({ xyzzy: "Nothing happens." })`)
+```
+
+Or, creating and assigning an object:
+
+```go
+object, _ := vm.Object(`xyzzy = {}`)
+object.Set("volume", 11)
+```
+
+If there is an error (like the source does not result in an object), then nil
+and an error is returned.
+
+#### func (Otto) Run
+
+```go
+func (self Otto) Run(src interface{}) (Value, error)
+```
+Run will run the given source (parsing it first if necessary), returning the
+resulting value and error (if any)
+
+src may be a string, a byte slice, a bytes.Buffer, or an io.Reader, but it MUST
+always be in UTF-8.
+
+If the runtime is unable to parse source, then this function will return
+undefined and the parse error (nothing will be evaluated in this case).
+
+src may also be a Script.
+
+src may also be a Program, but if the AST has been modified, then runtime
+behavior is undefined.
+
+#### func (Otto) Set
+
+```go
+func (self Otto) Set(name string, value interface{}) error
+```
+Set the top-level binding of the given name to the given value.
+
+Set will automatically apply ToValue to the given value in order to convert it
+to a JavaScript value (type Value).
+
+If there is an error (like the binding is read-only, or the ToValue conversion
+fails), then an error is returned.
+
+If the top-level binding does not exist, it will be created.
+
+#### func (Otto) ToValue
+
+```go
+func (self Otto) ToValue(value interface{}) (Value, error)
+```
+ToValue will convert an interface{} value to a value digestible by
+otto/JavaScript.
+
+#### type Script
+
+```go
+type Script struct {
+}
+```
+
+Script is a handle for some (reusable) JavaScript. Passing a Script value to a
+run method will evaluate the JavaScript.
+
+#### func (*Script) String
+
+```go
+func (self *Script) String() string
+```
+
+#### type Value
+
+```go
+type Value struct {
+}
+```
+
+Value is the representation of a JavaScript value.
+
+#### func FalseValue
+
+```go
+func FalseValue() Value
+```
+FalseValue will return a value representing false.
+
+It is equivalent to:
+
+```go
+ToValue(false)
+```
+
+#### func NaNValue
+
+```go
+func NaNValue() Value
+```
+NaNValue will return a value representing NaN.
+
+It is equivalent to:
+
+```go
+ToValue(math.NaN())
+```
+
+#### func NullValue
+
+```go
+func NullValue() Value
+```
+NullValue will return a Value representing null.
+
+#### func ToValue
+
+```go
+func ToValue(value interface{}) (Value, error)
+```
+ToValue will convert an interface{} value to a value digestible by
+otto/JavaScript
+
+This function will not work for advanced types (struct, map, slice/array, etc.)
+and you should use Otto.ToValue instead.
+
+#### func TrueValue
+
+```go
+func TrueValue() Value
+```
+TrueValue will return a value representing true.
+
+It is equivalent to:
+
+```go
+ToValue(true)
+```
+
+#### func UndefinedValue
+
+```go
+func UndefinedValue() Value
+```
+UndefinedValue will return a Value representing undefined.
+
+#### func (Value) Call
+
+```go
+func (value Value) Call(this Value, argumentList ...interface{}) (Value, error)
+```
+Call the value as a function with the given this value and argument list and
+return the result of invocation. It is essentially equivalent to:
+
+ value.apply(thisValue, argumentList)
+
+An undefined value and an error will result if:
+
+ 1. There is an error during conversion of the argument list
+ 2. The value is not actually a function
+ 3. An (uncaught) exception is thrown
+
+#### func (Value) Class
+
+```go
+func (value Value) Class() string
+```
+Class will return the class string of the value or the empty string if value is
+not an object.
+
+The return value will (generally) be one of:
+
+ Object
+ Function
+ Array
+ String
+ Number
+ Boolean
+ Date
+ RegExp
+
+#### func (Value) Export
+
+```go
+func (self Value) Export() (interface{}, error)
+```
+Export will attempt to convert the value to a Go representation and return it
+via an interface{} kind.
+
+Export returns an error, but it will always be nil. It is present for backwards
+compatibility.
+
+If a reasonable conversion is not possible, then the original value is returned.
+
+ undefined -> nil (FIXME?: Should be Value{})
+ null -> nil
+ boolean -> bool
+ number -> A number type (int, float32, uint64, ...)
+ string -> string
+ Array -> []interface{}
+ Object -> map[string]interface{}
+
+#### func (Value) IsBoolean
+
+```go
+func (value Value) IsBoolean() bool
+```
+IsBoolean will return true if value is a boolean (primitive).
+
+#### func (Value) IsDefined
+
+```go
+func (value Value) IsDefined() bool
+```
+IsDefined will return false if the value is undefined, and true otherwise.
+
+#### func (Value) IsFunction
+
+```go
+func (value Value) IsFunction() bool
+```
+IsFunction will return true if value is a function.
+
+#### func (Value) IsNaN
+
+```go
+func (value Value) IsNaN() bool
+```
+IsNaN will return true if value is NaN (or would convert to NaN).
+
+#### func (Value) IsNull
+
+```go
+func (value Value) IsNull() bool
+```
+IsNull will return true if the value is null, and false otherwise.
+
+#### func (Value) IsNumber
+
+```go
+func (value Value) IsNumber() bool
+```
+IsNumber will return true if value is a number (primitive).
+
+#### func (Value) IsObject
+
+```go
+func (value Value) IsObject() bool
+```
+IsObject will return true if value is an object.
+
+#### func (Value) IsPrimitive
+
+```go
+func (value Value) IsPrimitive() bool
+```
+IsPrimitive will return true if value is a primitive (any kind of primitive).
+
+#### func (Value) IsString
+
+```go
+func (value Value) IsString() bool
+```
+IsString will return true if value is a string (primitive).
+
+#### func (Value) IsUndefined
+
+```go
+func (value Value) IsUndefined() bool
+```
+IsUndefined will return true if the value is undefined, and false otherwise.
+
+#### func (Value) Object
+
+```go
+func (value Value) Object() *Object
+```
+Object will return the object of the value, or nil if value is not an object.
+
+This method will not do any implicit conversion. For example, calling this
+method on a string primitive value will not return a String object.
+
+#### func (Value) String
+
+```go
+func (value Value) String() string
+```
+String will return the value as a string.
+
+This method will make return the empty string if there is an error.
+
+#### func (Value) ToBoolean
+
+```go
+func (value Value) ToBoolean() (bool, error)
+```
+ToBoolean will convert the value to a boolean (bool).
+
+ ToValue(0).ToBoolean() => false
+ ToValue("").ToBoolean() => false
+ ToValue(true).ToBoolean() => true
+ ToValue(1).ToBoolean() => true
+ ToValue("Nothing happens").ToBoolean() => true
+
+If there is an error during the conversion process (like an uncaught exception),
+then the result will be false and an error.
+
+#### func (Value) ToFloat
+
+```go
+func (value Value) ToFloat() (float64, error)
+```
+ToFloat will convert the value to a number (float64).
+
+ ToValue(0).ToFloat() => 0.
+ ToValue(1.1).ToFloat() => 1.1
+ ToValue("11").ToFloat() => 11.
+
+If there is an error during the conversion process (like an uncaught exception),
+then the result will be 0 and an error.
+
+#### func (Value) ToInteger
+
+```go
+func (value Value) ToInteger() (int64, error)
+```
+ToInteger will convert the value to a number (int64).
+
+ ToValue(0).ToInteger() => 0
+ ToValue(1.1).ToInteger() => 1
+ ToValue("11").ToInteger() => 11
+
+If there is an error during the conversion process (like an uncaught exception),
+then the result will be 0 and an error.
+
+#### func (Value) ToString
+
+```go
+func (value Value) ToString() (string, error)
+```
+ToString will convert the value to a string (string).
+
+ ToValue(0).ToString() => "0"
+ ToValue(false).ToString() => "false"
+ ToValue(1.1).ToString() => "1.1"
+ ToValue("11").ToString() => "11"
+ ToValue('Nothing happens.').ToString() => "Nothing happens."
+
+If there is an error during the conversion process (like an uncaught exception),
+then the result will be the empty string ("") and an error.
+
+--
+**godocdown** http://github.com/robertkrimen/godocdown
diff --git a/vendor/github.com/robertkrimen/otto/ast/README.markdown b/vendor/github.com/robertkrimen/otto/ast/README.markdown
new file mode 100644
index 000000000..a785da911
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/ast/README.markdown
@@ -0,0 +1,1068 @@
+# ast
+--
+ import "github.com/robertkrimen/otto/ast"
+
+Package ast declares types representing a JavaScript AST.
+
+
+### Warning
+
+The parser and AST interfaces are still works-in-progress (particularly where
+node types are concerned) and may change in the future.
+
+## Usage
+
+#### type ArrayLiteral
+
+```go
+type ArrayLiteral struct {
+ LeftBracket file.Idx
+ RightBracket file.Idx
+ Value []Expression
+}
+```
+
+
+#### func (*ArrayLiteral) Idx0
+
+```go
+func (self *ArrayLiteral) Idx0() file.Idx
+```
+
+#### func (*ArrayLiteral) Idx1
+
+```go
+func (self *ArrayLiteral) Idx1() file.Idx
+```
+
+#### type AssignExpression
+
+```go
+type AssignExpression struct {
+ Operator token.Token
+ Left Expression
+ Right Expression
+}
+```
+
+
+#### func (*AssignExpression) Idx0
+
+```go
+func (self *AssignExpression) Idx0() file.Idx
+```
+
+#### func (*AssignExpression) Idx1
+
+```go
+func (self *AssignExpression) Idx1() file.Idx
+```
+
+#### type BadExpression
+
+```go
+type BadExpression struct {
+ From file.Idx
+ To file.Idx
+}
+```
+
+
+#### func (*BadExpression) Idx0
+
+```go
+func (self *BadExpression) Idx0() file.Idx
+```
+
+#### func (*BadExpression) Idx1
+
+```go
+func (self *BadExpression) Idx1() file.Idx
+```
+
+#### type BadStatement
+
+```go
+type BadStatement struct {
+ From file.Idx
+ To file.Idx
+}
+```
+
+
+#### func (*BadStatement) Idx0
+
+```go
+func (self *BadStatement) Idx0() file.Idx
+```
+
+#### func (*BadStatement) Idx1
+
+```go
+func (self *BadStatement) Idx1() file.Idx
+```
+
+#### type BinaryExpression
+
+```go
+type BinaryExpression struct {
+ Operator token.Token
+ Left Expression
+ Right Expression
+ Comparison bool
+}
+```
+
+
+#### func (*BinaryExpression) Idx0
+
+```go
+func (self *BinaryExpression) Idx0() file.Idx
+```
+
+#### func (*BinaryExpression) Idx1
+
+```go
+func (self *BinaryExpression) Idx1() file.Idx
+```
+
+#### type BlockStatement
+
+```go
+type BlockStatement struct {
+ LeftBrace file.Idx
+ List []Statement
+ RightBrace file.Idx
+}
+```
+
+
+#### func (*BlockStatement) Idx0
+
+```go
+func (self *BlockStatement) Idx0() file.Idx
+```
+
+#### func (*BlockStatement) Idx1
+
+```go
+func (self *BlockStatement) Idx1() file.Idx
+```
+
+#### type BooleanLiteral
+
+```go
+type BooleanLiteral struct {
+ Idx file.Idx
+ Literal string
+ Value bool
+}
+```
+
+
+#### func (*BooleanLiteral) Idx0
+
+```go
+func (self *BooleanLiteral) Idx0() file.Idx
+```
+
+#### func (*BooleanLiteral) Idx1
+
+```go
+func (self *BooleanLiteral) Idx1() file.Idx
+```
+
+#### type BracketExpression
+
+```go
+type BracketExpression struct {
+ Left Expression
+ Member Expression
+ LeftBracket file.Idx
+ RightBracket file.Idx
+}
+```
+
+
+#### func (*BracketExpression) Idx0
+
+```go
+func (self *BracketExpression) Idx0() file.Idx
+```
+
+#### func (*BracketExpression) Idx1
+
+```go
+func (self *BracketExpression) Idx1() file.Idx
+```
+
+#### type BranchStatement
+
+```go
+type BranchStatement struct {
+ Idx file.Idx
+ Token token.Token
+ Label *Identifier
+}
+```
+
+
+#### func (*BranchStatement) Idx0
+
+```go
+func (self *BranchStatement) Idx0() file.Idx
+```
+
+#### func (*BranchStatement) Idx1
+
+```go
+func (self *BranchStatement) Idx1() file.Idx
+```
+
+#### type CallExpression
+
+```go
+type CallExpression struct {
+ Callee Expression
+ LeftParenthesis file.Idx
+ ArgumentList []Expression
+ RightParenthesis file.Idx
+}
+```
+
+
+#### func (*CallExpression) Idx0
+
+```go
+func (self *CallExpression) Idx0() file.Idx
+```
+
+#### func (*CallExpression) Idx1
+
+```go
+func (self *CallExpression) Idx1() file.Idx
+```
+
+#### type CaseStatement
+
+```go
+type CaseStatement struct {
+ Case file.Idx
+ Test Expression
+ Consequent []Statement
+}
+```
+
+
+#### func (*CaseStatement) Idx0
+
+```go
+func (self *CaseStatement) Idx0() file.Idx
+```
+
+#### func (*CaseStatement) Idx1
+
+```go
+func (self *CaseStatement) Idx1() file.Idx
+```
+
+#### type CatchStatement
+
+```go
+type CatchStatement struct {
+ Catch file.Idx
+ Parameter *Identifier
+ Body Statement
+}
+```
+
+
+#### func (*CatchStatement) Idx0
+
+```go
+func (self *CatchStatement) Idx0() file.Idx
+```
+
+#### func (*CatchStatement) Idx1
+
+```go
+func (self *CatchStatement) Idx1() file.Idx
+```
+
+#### type ConditionalExpression
+
+```go
+type ConditionalExpression struct {
+ Test Expression
+ Consequent Expression
+ Alternate Expression
+}
+```
+
+
+#### func (*ConditionalExpression) Idx0
+
+```go
+func (self *ConditionalExpression) Idx0() file.Idx
+```
+
+#### func (*ConditionalExpression) Idx1
+
+```go
+func (self *ConditionalExpression) Idx1() file.Idx
+```
+
+#### type DebuggerStatement
+
+```go
+type DebuggerStatement struct {
+ Debugger file.Idx
+}
+```
+
+
+#### func (*DebuggerStatement) Idx0
+
+```go
+func (self *DebuggerStatement) Idx0() file.Idx
+```
+
+#### func (*DebuggerStatement) Idx1
+
+```go
+func (self *DebuggerStatement) Idx1() file.Idx
+```
+
+#### type Declaration
+
+```go
+type Declaration interface {
+ // contains filtered or unexported methods
+}
+```
+
+All declaration nodes implement the Declaration interface.
+
+#### type DoWhileStatement
+
+```go
+type DoWhileStatement struct {
+ Do file.Idx
+ Test Expression
+ Body Statement
+}
+```
+
+
+#### func (*DoWhileStatement) Idx0
+
+```go
+func (self *DoWhileStatement) Idx0() file.Idx
+```
+
+#### func (*DoWhileStatement) Idx1
+
+```go
+func (self *DoWhileStatement) Idx1() file.Idx
+```
+
+#### type DotExpression
+
+```go
+type DotExpression struct {
+ Left Expression
+ Identifier Identifier
+}
+```
+
+
+#### func (*DotExpression) Idx0
+
+```go
+func (self *DotExpression) Idx0() file.Idx
+```
+
+#### func (*DotExpression) Idx1
+
+```go
+func (self *DotExpression) Idx1() file.Idx
+```
+
+#### type EmptyStatement
+
+```go
+type EmptyStatement struct {
+ Semicolon file.Idx
+}
+```
+
+
+#### func (*EmptyStatement) Idx0
+
+```go
+func (self *EmptyStatement) Idx0() file.Idx
+```
+
+#### func (*EmptyStatement) Idx1
+
+```go
+func (self *EmptyStatement) Idx1() file.Idx
+```
+
+#### type Expression
+
+```go
+type Expression interface {
+ Node
+ // contains filtered or unexported methods
+}
+```
+
+All expression nodes implement the Expression interface.
+
+#### type ExpressionStatement
+
+```go
+type ExpressionStatement struct {
+ Expression Expression
+}
+```
+
+
+#### func (*ExpressionStatement) Idx0
+
+```go
+func (self *ExpressionStatement) Idx0() file.Idx
+```
+
+#### func (*ExpressionStatement) Idx1
+
+```go
+func (self *ExpressionStatement) Idx1() file.Idx
+```
+
+#### type ForInStatement
+
+```go
+type ForInStatement struct {
+ For file.Idx
+ Into Expression
+ Source Expression
+ Body Statement
+}
+```
+
+
+#### func (*ForInStatement) Idx0
+
+```go
+func (self *ForInStatement) Idx0() file.Idx
+```
+
+#### func (*ForInStatement) Idx1
+
+```go
+func (self *ForInStatement) Idx1() file.Idx
+```
+
+#### type ForStatement
+
+```go
+type ForStatement struct {
+ For file.Idx
+ Initializer Expression
+ Update Expression
+ Test Expression
+ Body Statement
+}
+```
+
+
+#### func (*ForStatement) Idx0
+
+```go
+func (self *ForStatement) Idx0() file.Idx
+```
+
+#### func (*ForStatement) Idx1
+
+```go
+func (self *ForStatement) Idx1() file.Idx
+```
+
+#### type FunctionDeclaration
+
+```go
+type FunctionDeclaration struct {
+ Function *FunctionLiteral
+}
+```
+
+
+#### type FunctionLiteral
+
+```go
+type FunctionLiteral struct {
+ Function file.Idx
+ Name *Identifier
+ ParameterList *ParameterList
+ Body Statement
+ Source string
+
+ DeclarationList []Declaration
+}
+```
+
+
+#### func (*FunctionLiteral) Idx0
+
+```go
+func (self *FunctionLiteral) Idx0() file.Idx
+```
+
+#### func (*FunctionLiteral) Idx1
+
+```go
+func (self *FunctionLiteral) Idx1() file.Idx
+```
+
+#### type Identifier
+
+```go
+type Identifier struct {
+ Name string
+ Idx file.Idx
+}
+```
+
+
+#### func (*Identifier) Idx0
+
+```go
+func (self *Identifier) Idx0() file.Idx
+```
+
+#### func (*Identifier) Idx1
+
+```go
+func (self *Identifier) Idx1() file.Idx
+```
+
+#### type IfStatement
+
+```go
+type IfStatement struct {
+ If file.Idx
+ Test Expression
+ Consequent Statement
+ Alternate Statement
+}
+```
+
+
+#### func (*IfStatement) Idx0
+
+```go
+func (self *IfStatement) Idx0() file.Idx
+```
+
+#### func (*IfStatement) Idx1
+
+```go
+func (self *IfStatement) Idx1() file.Idx
+```
+
+#### type LabelledStatement
+
+```go
+type LabelledStatement struct {
+ Label *Identifier
+ Colon file.Idx
+ Statement Statement
+}
+```
+
+
+#### func (*LabelledStatement) Idx0
+
+```go
+func (self *LabelledStatement) Idx0() file.Idx
+```
+
+#### func (*LabelledStatement) Idx1
+
+```go
+func (self *LabelledStatement) Idx1() file.Idx
+```
+
+#### type NewExpression
+
+```go
+type NewExpression struct {
+ New file.Idx
+ Callee Expression
+ LeftParenthesis file.Idx
+ ArgumentList []Expression
+ RightParenthesis file.Idx
+}
+```
+
+
+#### func (*NewExpression) Idx0
+
+```go
+func (self *NewExpression) Idx0() file.Idx
+```
+
+#### func (*NewExpression) Idx1
+
+```go
+func (self *NewExpression) Idx1() file.Idx
+```
+
+#### type Node
+
+```go
+type Node interface {
+ Idx0() file.Idx // The index of the first character belonging to the node
+ Idx1() file.Idx // The index of the first character immediately after the node
+}
+```
+
+All nodes implement the Node interface.
+
+#### type NullLiteral
+
+```go
+type NullLiteral struct {
+ Idx file.Idx
+ Literal string
+}
+```
+
+
+#### func (*NullLiteral) Idx0
+
+```go
+func (self *NullLiteral) Idx0() file.Idx
+```
+
+#### func (*NullLiteral) Idx1
+
+```go
+func (self *NullLiteral) Idx1() file.Idx
+```
+
+#### type NumberLiteral
+
+```go
+type NumberLiteral struct {
+ Idx file.Idx
+ Literal string
+ Value interface{}
+}
+```
+
+
+#### func (*NumberLiteral) Idx0
+
+```go
+func (self *NumberLiteral) Idx0() file.Idx
+```
+
+#### func (*NumberLiteral) Idx1
+
+```go
+func (self *NumberLiteral) Idx1() file.Idx
+```
+
+#### type ObjectLiteral
+
+```go
+type ObjectLiteral struct {
+ LeftBrace file.Idx
+ RightBrace file.Idx
+ Value []Property
+}
+```
+
+
+#### func (*ObjectLiteral) Idx0
+
+```go
+func (self *ObjectLiteral) Idx0() file.Idx
+```
+
+#### func (*ObjectLiteral) Idx1
+
+```go
+func (self *ObjectLiteral) Idx1() file.Idx
+```
+
+#### type ParameterList
+
+```go
+type ParameterList struct {
+ Opening file.Idx
+ List []*Identifier
+ Closing file.Idx
+}
+```
+
+
+#### type Program
+
+```go
+type Program struct {
+ Body []Statement
+
+ DeclarationList []Declaration
+
+ File *file.File
+}
+```
+
+
+#### func (*Program) Idx0
+
+```go
+func (self *Program) Idx0() file.Idx
+```
+
+#### func (*Program) Idx1
+
+```go
+func (self *Program) Idx1() file.Idx
+```
+
+#### type Property
+
+```go
+type Property struct {
+ Key string
+ Kind string
+ Value Expression
+}
+```
+
+
+#### type RegExpLiteral
+
+```go
+type RegExpLiteral struct {
+ Idx file.Idx
+ Literal string
+ Pattern string
+ Flags string
+ Value string
+}
+```
+
+
+#### func (*RegExpLiteral) Idx0
+
+```go
+func (self *RegExpLiteral) Idx0() file.Idx
+```
+
+#### func (*RegExpLiteral) Idx1
+
+```go
+func (self *RegExpLiteral) Idx1() file.Idx
+```
+
+#### type ReturnStatement
+
+```go
+type ReturnStatement struct {
+ Return file.Idx
+ Argument Expression
+}
+```
+
+
+#### func (*ReturnStatement) Idx0
+
+```go
+func (self *ReturnStatement) Idx0() file.Idx
+```
+
+#### func (*ReturnStatement) Idx1
+
+```go
+func (self *ReturnStatement) Idx1() file.Idx
+```
+
+#### type SequenceExpression
+
+```go
+type SequenceExpression struct {
+ Sequence []Expression
+}
+```
+
+
+#### func (*SequenceExpression) Idx0
+
+```go
+func (self *SequenceExpression) Idx0() file.Idx
+```
+
+#### func (*SequenceExpression) Idx1
+
+```go
+func (self *SequenceExpression) Idx1() file.Idx
+```
+
+#### type Statement
+
+```go
+type Statement interface {
+ Node
+ // contains filtered or unexported methods
+}
+```
+
+All statement nodes implement the Statement interface.
+
+#### type StringLiteral
+
+```go
+type StringLiteral struct {
+ Idx file.Idx
+ Literal string
+ Value string
+}
+```
+
+
+#### func (*StringLiteral) Idx0
+
+```go
+func (self *StringLiteral) Idx0() file.Idx
+```
+
+#### func (*StringLiteral) Idx1
+
+```go
+func (self *StringLiteral) Idx1() file.Idx
+```
+
+#### type SwitchStatement
+
+```go
+type SwitchStatement struct {
+ Switch file.Idx
+ Discriminant Expression
+ Default int
+ Body []*CaseStatement
+}
+```
+
+
+#### func (*SwitchStatement) Idx0
+
+```go
+func (self *SwitchStatement) Idx0() file.Idx
+```
+
+#### func (*SwitchStatement) Idx1
+
+```go
+func (self *SwitchStatement) Idx1() file.Idx
+```
+
+#### type ThisExpression
+
+```go
+type ThisExpression struct {
+ Idx file.Idx
+}
+```
+
+
+#### func (*ThisExpression) Idx0
+
+```go
+func (self *ThisExpression) Idx0() file.Idx
+```
+
+#### func (*ThisExpression) Idx1
+
+```go
+func (self *ThisExpression) Idx1() file.Idx
+```
+
+#### type ThrowStatement
+
+```go
+type ThrowStatement struct {
+ Throw file.Idx
+ Argument Expression
+}
+```
+
+
+#### func (*ThrowStatement) Idx0
+
+```go
+func (self *ThrowStatement) Idx0() file.Idx
+```
+
+#### func (*ThrowStatement) Idx1
+
+```go
+func (self *ThrowStatement) Idx1() file.Idx
+```
+
+#### type TryStatement
+
+```go
+type TryStatement struct {
+ Try file.Idx
+ Body Statement
+ Catch *CatchStatement
+ Finally Statement
+}
+```
+
+
+#### func (*TryStatement) Idx0
+
+```go
+func (self *TryStatement) Idx0() file.Idx
+```
+
+#### func (*TryStatement) Idx1
+
+```go
+func (self *TryStatement) Idx1() file.Idx
+```
+
+#### type UnaryExpression
+
+```go
+type UnaryExpression struct {
+ Operator token.Token
+ Idx file.Idx // If a prefix operation
+ Operand Expression
+ Postfix bool
+}
+```
+
+
+#### func (*UnaryExpression) Idx0
+
+```go
+func (self *UnaryExpression) Idx0() file.Idx
+```
+
+#### func (*UnaryExpression) Idx1
+
+```go
+func (self *UnaryExpression) Idx1() file.Idx
+```
+
+#### type VariableDeclaration
+
+```go
+type VariableDeclaration struct {
+ Var file.Idx
+ List []*VariableExpression
+}
+```
+
+
+#### type VariableExpression
+
+```go
+type VariableExpression struct {
+ Name string
+ Idx file.Idx
+ Initializer Expression
+}
+```
+
+
+#### func (*VariableExpression) Idx0
+
+```go
+func (self *VariableExpression) Idx0() file.Idx
+```
+
+#### func (*VariableExpression) Idx1
+
+```go
+func (self *VariableExpression) Idx1() file.Idx
+```
+
+#### type VariableStatement
+
+```go
+type VariableStatement struct {
+ Var file.Idx
+ List []Expression
+}
+```
+
+
+#### func (*VariableStatement) Idx0
+
+```go
+func (self *VariableStatement) Idx0() file.Idx
+```
+
+#### func (*VariableStatement) Idx1
+
+```go
+func (self *VariableStatement) Idx1() file.Idx
+```
+
+#### type WhileStatement
+
+```go
+type WhileStatement struct {
+ While file.Idx
+ Test Expression
+ Body Statement
+}
+```
+
+
+#### func (*WhileStatement) Idx0
+
+```go
+func (self *WhileStatement) Idx0() file.Idx
+```
+
+#### func (*WhileStatement) Idx1
+
+```go
+func (self *WhileStatement) Idx1() file.Idx
+```
+
+#### type WithStatement
+
+```go
+type WithStatement struct {
+ With file.Idx
+ Object Expression
+ Body Statement
+}
+```
+
+
+#### func (*WithStatement) Idx0
+
+```go
+func (self *WithStatement) Idx0() file.Idx
+```
+
+#### func (*WithStatement) Idx1
+
+```go
+func (self *WithStatement) Idx1() file.Idx
+```
+
+--
+**godocdown** http://github.com/robertkrimen/godocdown
diff --git a/vendor/github.com/robertkrimen/otto/ast/comments.go b/vendor/github.com/robertkrimen/otto/ast/comments.go
new file mode 100644
index 000000000..ef2cc3d89
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/ast/comments.go
@@ -0,0 +1,278 @@
+package ast
+
+import (
+ "fmt"
+ "github.com/robertkrimen/otto/file"
+)
+
+// CommentPosition determines where the comment is in a given context
+type CommentPosition int
+
+const (
+ _ CommentPosition = iota
+ LEADING // Before the pertinent expression
+ TRAILING // After the pertinent expression
+ KEY // Before a key in an object
+ COLON // After a colon in a field declaration
+ FINAL // Final comments in a block, not belonging to a specific expression or the comment after a trailing , in an array or object literal
+ IF // After an if keyword
+ WHILE // After a while keyword
+ DO // After do keyword
+ FOR // After a for keyword
+ WITH // After a with keyword
+ TBD
+)
+
+// Comment contains the data of the comment
+type Comment struct {
+ Begin file.Idx
+ Text string
+ Position CommentPosition
+}
+
+// NewComment creates a new comment
+func NewComment(text string, idx file.Idx) *Comment {
+ comment := &Comment{
+ Begin: idx,
+ Text: text,
+ Position: TBD,
+ }
+
+ return comment
+}
+
+// String returns a stringified version of the position
+func (cp CommentPosition) String() string {
+ switch cp {
+ case LEADING:
+ return "Leading"
+ case TRAILING:
+ return "Trailing"
+ case KEY:
+ return "Key"
+ case COLON:
+ return "Colon"
+ case FINAL:
+ return "Final"
+ case IF:
+ return "If"
+ case WHILE:
+ return "While"
+ case DO:
+ return "Do"
+ case FOR:
+ return "For"
+ case WITH:
+ return "With"
+ default:
+ return "???"
+ }
+}
+
+// String returns a stringified version of the comment
+func (c Comment) String() string {
+ return fmt.Sprintf("Comment: %v", c.Text)
+}
+
+// Comments defines the current view of comments from the parser
+type Comments struct {
+ // CommentMap is a reference to the parser comment map
+ CommentMap CommentMap
+ // Comments lists the comments scanned, not linked to a node yet
+ Comments []*Comment
+ // future lists the comments after a line break during a sequence of comments
+ future []*Comment
+ // Current is node for which comments are linked to
+ Current Expression
+
+ // wasLineBreak determines if a line break occured while scanning for comments
+ wasLineBreak bool
+ // primary determines whether or not processing a primary expression
+ primary bool
+ // afterBlock determines whether or not being after a block statement
+ afterBlock bool
+}
+
+func NewComments() *Comments {
+ comments := &Comments{
+ CommentMap: CommentMap{},
+ }
+
+ return comments
+}
+
+func (c *Comments) String() string {
+ return fmt.Sprintf("NODE: %v, Comments: %v, Future: %v(LINEBREAK:%v)", c.Current, len(c.Comments), len(c.future), c.wasLineBreak)
+}
+
+// FetchAll returns all the currently scanned comments,
+// including those from the next line
+func (c *Comments) FetchAll() []*Comment {
+ defer func() {
+ c.Comments = nil
+ c.future = nil
+ }()
+
+ return append(c.Comments, c.future...)
+}
+
+// Fetch returns all the currently scanned comments
+func (c *Comments) Fetch() []*Comment {
+ defer func() {
+ c.Comments = nil
+ }()
+
+ return c.Comments
+}
+
+// ResetLineBreak marks the beginning of a new statement
+func (c *Comments) ResetLineBreak() {
+ c.wasLineBreak = false
+}
+
+// MarkPrimary will mark the context as processing a primary expression
+func (c *Comments) MarkPrimary() {
+ c.primary = true
+ c.wasLineBreak = false
+}
+
+// AfterBlock will mark the context as being after a block.
+func (c *Comments) AfterBlock() {
+ c.afterBlock = true
+}
+
+// AddComment adds a comment to the view.
+// Depending on the context, comments are added normally or as post line break.
+func (c *Comments) AddComment(comment *Comment) {
+ if c.primary {
+ if !c.wasLineBreak {
+ c.Comments = append(c.Comments, comment)
+ } else {
+ c.future = append(c.future, comment)
+ }
+ } else {
+ if !c.wasLineBreak || (c.Current == nil && !c.afterBlock) {
+ c.Comments = append(c.Comments, comment)
+ } else {
+ c.future = append(c.future, comment)
+ }
+ }
+}
+
+// MarkComments will mark the found comments as the given position.
+func (c *Comments) MarkComments(position CommentPosition) {
+ for _, comment := range c.Comments {
+ if comment.Position == TBD {
+ comment.Position = position
+ }
+ }
+ for _, c := range c.future {
+ if c.Position == TBD {
+ c.Position = position
+ }
+ }
+}
+
+// Unset the current node and apply the comments to the current expression.
+// Resets context variables.
+func (c *Comments) Unset() {
+ if c.Current != nil {
+ c.applyComments(c.Current, c.Current, TRAILING)
+ c.Current = nil
+ }
+ c.wasLineBreak = false
+ c.primary = false
+ c.afterBlock = false
+}
+
+// SetExpression sets the current expression.
+// It is applied the found comments, unless the previous expression has not been unset.
+// It is skipped if the node is already set or if it is a part of the previous node.
+func (c *Comments) SetExpression(node Expression) {
+ // Skipping same node
+ if c.Current == node {
+ return
+ }
+ if c.Current != nil && c.Current.Idx1() == node.Idx1() {
+ c.Current = node
+ return
+ }
+ previous := c.Current
+ c.Current = node
+
+ // Apply the found comments and futures to the node and the previous.
+ c.applyComments(node, previous, TRAILING)
+}
+
+// PostProcessNode applies all found comments to the given node
+func (c *Comments) PostProcessNode(node Node) {
+ c.applyComments(node, nil, TRAILING)
+}
+
+// applyComments applies both the comments and the future comments to the given node and the previous one,
+// based on the context.
+func (c *Comments) applyComments(node, previous Node, position CommentPosition) {
+ if previous != nil {
+ c.CommentMap.AddComments(previous, c.Comments, position)
+ c.Comments = nil
+ } else {
+ c.CommentMap.AddComments(node, c.Comments, position)
+ c.Comments = nil
+ }
+ // Only apply the future comments to the node if the previous is set.
+ // This is for detecting end of line comments and which node comments on the following lines belongs to
+ if previous != nil {
+ c.CommentMap.AddComments(node, c.future, position)
+ c.future = nil
+ }
+}
+
+// AtLineBreak will mark a line break
+func (c *Comments) AtLineBreak() {
+ c.wasLineBreak = true
+}
+
+// CommentMap is the data structure where all found comments are stored
+type CommentMap map[Node][]*Comment
+
+// AddComment adds a single comment to the map
+func (cm CommentMap) AddComment(node Node, comment *Comment) {
+ list := cm[node]
+ list = append(list, comment)
+
+ cm[node] = list
+}
+
+// AddComments adds a slice of comments, given a node and an updated position
+func (cm CommentMap) AddComments(node Node, comments []*Comment, position CommentPosition) {
+ for _, comment := range comments {
+ if comment.Position == TBD {
+ comment.Position = position
+ }
+ cm.AddComment(node, comment)
+ }
+}
+
+// Size returns the size of the map
+func (cm CommentMap) Size() int {
+ size := 0
+ for _, comments := range cm {
+ size += len(comments)
+ }
+
+ return size
+}
+
+// MoveComments moves comments with a given position from a node to another
+func (cm CommentMap) MoveComments(from, to Node, position CommentPosition) {
+ for i, c := range cm[from] {
+ if c.Position == position {
+ cm.AddComment(to, c)
+
+ // Remove the comment from the "from" slice
+ cm[from][i] = cm[from][len(cm[from])-1]
+ cm[from][len(cm[from])-1] = nil
+ cm[from] = cm[from][:len(cm[from])-1]
+ }
+ }
+}
diff --git a/vendor/github.com/robertkrimen/otto/ast/node.go b/vendor/github.com/robertkrimen/otto/ast/node.go
new file mode 100644
index 000000000..49ae375d1
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/ast/node.go
@@ -0,0 +1,515 @@
+/*
+Package ast declares types representing a JavaScript AST.
+
+Warning
+
+The parser and AST interfaces are still works-in-progress (particularly where
+node types are concerned) and may change in the future.
+
+*/
+package ast
+
+import (
+ "github.com/robertkrimen/otto/file"
+ "github.com/robertkrimen/otto/token"
+)
+
+// All nodes implement the Node interface.
+type Node interface {
+ Idx0() file.Idx // The index of the first character belonging to the node
+ Idx1() file.Idx // The index of the first character immediately after the node
+}
+
+// ========== //
+// Expression //
+// ========== //
+
+type (
+ // All expression nodes implement the Expression interface.
+ Expression interface {
+ Node
+ _expressionNode()
+ }
+
+ ArrayLiteral struct {
+ LeftBracket file.Idx
+ RightBracket file.Idx
+ Value []Expression
+ }
+
+ AssignExpression struct {
+ Operator token.Token
+ Left Expression
+ Right Expression
+ }
+
+ BadExpression struct {
+ From file.Idx
+ To file.Idx
+ }
+
+ BinaryExpression struct {
+ Operator token.Token
+ Left Expression
+ Right Expression
+ Comparison bool
+ }
+
+ BooleanLiteral struct {
+ Idx file.Idx
+ Literal string
+ Value bool
+ }
+
+ BracketExpression struct {
+ Left Expression
+ Member Expression
+ LeftBracket file.Idx
+ RightBracket file.Idx
+ }
+
+ CallExpression struct {
+ Callee Expression
+ LeftParenthesis file.Idx
+ ArgumentList []Expression
+ RightParenthesis file.Idx
+ }
+
+ ConditionalExpression struct {
+ Test Expression
+ Consequent Expression
+ Alternate Expression
+ }
+
+ DotExpression struct {
+ Left Expression
+ Identifier *Identifier
+ }
+
+ EmptyExpression struct {
+ Begin file.Idx
+ End file.Idx
+ }
+
+ FunctionLiteral struct {
+ Function file.Idx
+ Name *Identifier
+ ParameterList *ParameterList
+ Body Statement
+ Source string
+
+ DeclarationList []Declaration
+ }
+
+ Identifier struct {
+ Name string
+ Idx file.Idx
+ }
+
+ NewExpression struct {
+ New file.Idx
+ Callee Expression
+ LeftParenthesis file.Idx
+ ArgumentList []Expression
+ RightParenthesis file.Idx
+ }
+
+ NullLiteral struct {
+ Idx file.Idx
+ Literal string
+ }
+
+ NumberLiteral struct {
+ Idx file.Idx
+ Literal string
+ Value interface{}
+ }
+
+ ObjectLiteral struct {
+ LeftBrace file.Idx
+ RightBrace file.Idx
+ Value []Property
+ }
+
+ ParameterList struct {
+ Opening file.Idx
+ List []*Identifier
+ Closing file.Idx
+ }
+
+ Property struct {
+ Key string
+ Kind string
+ Value Expression
+ }
+
+ RegExpLiteral struct {
+ Idx file.Idx
+ Literal string
+ Pattern string
+ Flags string
+ Value string
+ }
+
+ SequenceExpression struct {
+ Sequence []Expression
+ }
+
+ StringLiteral struct {
+ Idx file.Idx
+ Literal string
+ Value string
+ }
+
+ ThisExpression struct {
+ Idx file.Idx
+ }
+
+ UnaryExpression struct {
+ Operator token.Token
+ Idx file.Idx // If a prefix operation
+ Operand Expression
+ Postfix bool
+ }
+
+ VariableExpression struct {
+ Name string
+ Idx file.Idx
+ Initializer Expression
+ }
+)
+
+// _expressionNode
+
+func (*ArrayLiteral) _expressionNode() {}
+func (*AssignExpression) _expressionNode() {}
+func (*BadExpression) _expressionNode() {}
+func (*BinaryExpression) _expressionNode() {}
+func (*BooleanLiteral) _expressionNode() {}
+func (*BracketExpression) _expressionNode() {}
+func (*CallExpression) _expressionNode() {}
+func (*ConditionalExpression) _expressionNode() {}
+func (*DotExpression) _expressionNode() {}
+func (*EmptyExpression) _expressionNode() {}
+func (*FunctionLiteral) _expressionNode() {}
+func (*Identifier) _expressionNode() {}
+func (*NewExpression) _expressionNode() {}
+func (*NullLiteral) _expressionNode() {}
+func (*NumberLiteral) _expressionNode() {}
+func (*ObjectLiteral) _expressionNode() {}
+func (*RegExpLiteral) _expressionNode() {}
+func (*SequenceExpression) _expressionNode() {}
+func (*StringLiteral) _expressionNode() {}
+func (*ThisExpression) _expressionNode() {}
+func (*UnaryExpression) _expressionNode() {}
+func (*VariableExpression) _expressionNode() {}
+
+// ========= //
+// Statement //
+// ========= //
+
+type (
+ // All statement nodes implement the Statement interface.
+ Statement interface {
+ Node
+ _statementNode()
+ }
+
+ BadStatement struct {
+ From file.Idx
+ To file.Idx
+ }
+
+ BlockStatement struct {
+ LeftBrace file.Idx
+ List []Statement
+ RightBrace file.Idx
+ }
+
+ BranchStatement struct {
+ Idx file.Idx
+ Token token.Token
+ Label *Identifier
+ }
+
+ CaseStatement struct {
+ Case file.Idx
+ Test Expression
+ Consequent []Statement
+ }
+
+ CatchStatement struct {
+ Catch file.Idx
+ Parameter *Identifier
+ Body Statement
+ }
+
+ DebuggerStatement struct {
+ Debugger file.Idx
+ }
+
+ DoWhileStatement struct {
+ Do file.Idx
+ Test Expression
+ Body Statement
+ }
+
+ EmptyStatement struct {
+ Semicolon file.Idx
+ }
+
+ ExpressionStatement struct {
+ Expression Expression
+ }
+
+ ForInStatement struct {
+ For file.Idx
+ Into Expression
+ Source Expression
+ Body Statement
+ }
+
+ ForStatement struct {
+ For file.Idx
+ Initializer Expression
+ Update Expression
+ Test Expression
+ Body Statement
+ }
+
+ FunctionStatement struct {
+ Function *FunctionLiteral
+ }
+
+ IfStatement struct {
+ If file.Idx
+ Test Expression
+ Consequent Statement
+ Alternate Statement
+ }
+
+ LabelledStatement struct {
+ Label *Identifier
+ Colon file.Idx
+ Statement Statement
+ }
+
+ ReturnStatement struct {
+ Return file.Idx
+ Argument Expression
+ }
+
+ SwitchStatement struct {
+ Switch file.Idx
+ Discriminant Expression
+ Default int
+ Body []*CaseStatement
+ }
+
+ ThrowStatement struct {
+ Throw file.Idx
+ Argument Expression
+ }
+
+ TryStatement struct {
+ Try file.Idx
+ Body Statement
+ Catch *CatchStatement
+ Finally Statement
+ }
+
+ VariableStatement struct {
+ Var file.Idx
+ List []Expression
+ }
+
+ WhileStatement struct {
+ While file.Idx
+ Test Expression
+ Body Statement
+ }
+
+ WithStatement struct {
+ With file.Idx
+ Object Expression
+ Body Statement
+ }
+)
+
+// _statementNode
+
+func (*BadStatement) _statementNode() {}
+func (*BlockStatement) _statementNode() {}
+func (*BranchStatement) _statementNode() {}
+func (*CaseStatement) _statementNode() {}
+func (*CatchStatement) _statementNode() {}
+func (*DebuggerStatement) _statementNode() {}
+func (*DoWhileStatement) _statementNode() {}
+func (*EmptyStatement) _statementNode() {}
+func (*ExpressionStatement) _statementNode() {}
+func (*ForInStatement) _statementNode() {}
+func (*ForStatement) _statementNode() {}
+func (*FunctionStatement) _statementNode() {}
+func (*IfStatement) _statementNode() {}
+func (*LabelledStatement) _statementNode() {}
+func (*ReturnStatement) _statementNode() {}
+func (*SwitchStatement) _statementNode() {}
+func (*ThrowStatement) _statementNode() {}
+func (*TryStatement) _statementNode() {}
+func (*VariableStatement) _statementNode() {}
+func (*WhileStatement) _statementNode() {}
+func (*WithStatement) _statementNode() {}
+
+// =========== //
+// Declaration //
+// =========== //
+
+type (
+ // All declaration nodes implement the Declaration interface.
+ Declaration interface {
+ _declarationNode()
+ }
+
+ FunctionDeclaration struct {
+ Function *FunctionLiteral
+ }
+
+ VariableDeclaration struct {
+ Var file.Idx
+ List []*VariableExpression
+ }
+)
+
+// _declarationNode
+
+func (*FunctionDeclaration) _declarationNode() {}
+func (*VariableDeclaration) _declarationNode() {}
+
+// ==== //
+// Node //
+// ==== //
+
+type Program struct {
+ Body []Statement
+
+ DeclarationList []Declaration
+
+ File *file.File
+
+ Comments CommentMap
+}
+
+// ==== //
+// Idx0 //
+// ==== //
+
+func (self *ArrayLiteral) Idx0() file.Idx { return self.LeftBracket }
+func (self *AssignExpression) Idx0() file.Idx { return self.Left.Idx0() }
+func (self *BadExpression) Idx0() file.Idx { return self.From }
+func (self *BinaryExpression) Idx0() file.Idx { return self.Left.Idx0() }
+func (self *BooleanLiteral) Idx0() file.Idx { return self.Idx }
+func (self *BracketExpression) Idx0() file.Idx { return self.Left.Idx0() }
+func (self *CallExpression) Idx0() file.Idx { return self.Callee.Idx0() }
+func (self *ConditionalExpression) Idx0() file.Idx { return self.Test.Idx0() }
+func (self *DotExpression) Idx0() file.Idx { return self.Left.Idx0() }
+func (self *EmptyExpression) Idx0() file.Idx { return self.Begin }
+func (self *FunctionLiteral) Idx0() file.Idx { return self.Function }
+func (self *Identifier) Idx0() file.Idx { return self.Idx }
+func (self *NewExpression) Idx0() file.Idx { return self.New }
+func (self *NullLiteral) Idx0() file.Idx { return self.Idx }
+func (self *NumberLiteral) Idx0() file.Idx { return self.Idx }
+func (self *ObjectLiteral) Idx0() file.Idx { return self.LeftBrace }
+func (self *RegExpLiteral) Idx0() file.Idx { return self.Idx }
+func (self *SequenceExpression) Idx0() file.Idx { return self.Sequence[0].Idx0() }
+func (self *StringLiteral) Idx0() file.Idx { return self.Idx }
+func (self *ThisExpression) Idx0() file.Idx { return self.Idx }
+func (self *UnaryExpression) Idx0() file.Idx { return self.Idx }
+func (self *VariableExpression) Idx0() file.Idx { return self.Idx }
+
+func (self *BadStatement) Idx0() file.Idx { return self.From }
+func (self *BlockStatement) Idx0() file.Idx { return self.LeftBrace }
+func (self *BranchStatement) Idx0() file.Idx { return self.Idx }
+func (self *CaseStatement) Idx0() file.Idx { return self.Case }
+func (self *CatchStatement) Idx0() file.Idx { return self.Catch }
+func (self *DebuggerStatement) Idx0() file.Idx { return self.Debugger }
+func (self *DoWhileStatement) Idx0() file.Idx { return self.Do }
+func (self *EmptyStatement) Idx0() file.Idx { return self.Semicolon }
+func (self *ExpressionStatement) Idx0() file.Idx { return self.Expression.Idx0() }
+func (self *ForInStatement) Idx0() file.Idx { return self.For }
+func (self *ForStatement) Idx0() file.Idx { return self.For }
+func (self *FunctionStatement) Idx0() file.Idx { return self.Function.Idx0() }
+func (self *IfStatement) Idx0() file.Idx { return self.If }
+func (self *LabelledStatement) Idx0() file.Idx { return self.Label.Idx0() }
+func (self *Program) Idx0() file.Idx { return self.Body[0].Idx0() }
+func (self *ReturnStatement) Idx0() file.Idx { return self.Return }
+func (self *SwitchStatement) Idx0() file.Idx { return self.Switch }
+func (self *ThrowStatement) Idx0() file.Idx { return self.Throw }
+func (self *TryStatement) Idx0() file.Idx { return self.Try }
+func (self *VariableStatement) Idx0() file.Idx { return self.Var }
+func (self *WhileStatement) Idx0() file.Idx { return self.While }
+func (self *WithStatement) Idx0() file.Idx { return self.With }
+
+// ==== //
+// Idx1 //
+// ==== //
+
+func (self *ArrayLiteral) Idx1() file.Idx { return self.RightBracket }
+func (self *AssignExpression) Idx1() file.Idx { return self.Right.Idx1() }
+func (self *BadExpression) Idx1() file.Idx { return self.To }
+func (self *BinaryExpression) Idx1() file.Idx { return self.Right.Idx1() }
+func (self *BooleanLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Literal)) }
+func (self *BracketExpression) Idx1() file.Idx { return self.RightBracket + 1 }
+func (self *CallExpression) Idx1() file.Idx { return self.RightParenthesis + 1 }
+func (self *ConditionalExpression) Idx1() file.Idx { return self.Test.Idx1() }
+func (self *DotExpression) Idx1() file.Idx { return self.Identifier.Idx1() }
+func (self *EmptyExpression) Idx1() file.Idx { return self.End }
+func (self *FunctionLiteral) Idx1() file.Idx { return self.Body.Idx1() }
+func (self *Identifier) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Name)) }
+func (self *NewExpression) Idx1() file.Idx { return self.RightParenthesis + 1 }
+func (self *NullLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + 4) } // "null"
+func (self *NumberLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Literal)) }
+func (self *ObjectLiteral) Idx1() file.Idx { return self.RightBrace }
+func (self *RegExpLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Literal)) }
+func (self *SequenceExpression) Idx1() file.Idx { return self.Sequence[0].Idx1() }
+func (self *StringLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Literal)) }
+func (self *ThisExpression) Idx1() file.Idx { return self.Idx }
+func (self *UnaryExpression) Idx1() file.Idx {
+ if self.Postfix {
+ return self.Operand.Idx1() + 2 // ++ --
+ }
+ return self.Operand.Idx1()
+}
+func (self *VariableExpression) Idx1() file.Idx {
+ if self.Initializer == nil {
+ return file.Idx(int(self.Idx) + len(self.Name) + 1)
+ }
+ return self.Initializer.Idx1()
+}
+
+func (self *BadStatement) Idx1() file.Idx { return self.To }
+func (self *BlockStatement) Idx1() file.Idx { return self.RightBrace + 1 }
+func (self *BranchStatement) Idx1() file.Idx { return self.Idx }
+func (self *CaseStatement) Idx1() file.Idx { return self.Consequent[len(self.Consequent)-1].Idx1() }
+func (self *CatchStatement) Idx1() file.Idx { return self.Body.Idx1() }
+func (self *DebuggerStatement) Idx1() file.Idx { return self.Debugger + 8 }
+func (self *DoWhileStatement) Idx1() file.Idx { return self.Test.Idx1() }
+func (self *EmptyStatement) Idx1() file.Idx { return self.Semicolon + 1 }
+func (self *ExpressionStatement) Idx1() file.Idx { return self.Expression.Idx1() }
+func (self *ForInStatement) Idx1() file.Idx { return self.Body.Idx1() }
+func (self *ForStatement) Idx1() file.Idx { return self.Body.Idx1() }
+func (self *FunctionStatement) Idx1() file.Idx { return self.Function.Idx1() }
+func (self *IfStatement) Idx1() file.Idx {
+ if self.Alternate != nil {
+ return self.Alternate.Idx1()
+ }
+ return self.Consequent.Idx1()
+}
+func (self *LabelledStatement) Idx1() file.Idx { return self.Colon + 1 }
+func (self *Program) Idx1() file.Idx { return self.Body[len(self.Body)-1].Idx1() }
+func (self *ReturnStatement) Idx1() file.Idx { return self.Return }
+func (self *SwitchStatement) Idx1() file.Idx { return self.Body[len(self.Body)-1].Idx1() }
+func (self *ThrowStatement) Idx1() file.Idx { return self.Throw }
+func (self *TryStatement) Idx1() file.Idx { return self.Try }
+func (self *VariableStatement) Idx1() file.Idx { return self.List[len(self.List)-1].Idx1() }
+func (self *WhileStatement) Idx1() file.Idx { return self.Body.Idx1() }
+func (self *WithStatement) Idx1() file.Idx { return self.Body.Idx1() }
diff --git a/vendor/github.com/robertkrimen/otto/builtin.go b/vendor/github.com/robertkrimen/otto/builtin.go
new file mode 100644
index 000000000..83f715083
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/builtin.go
@@ -0,0 +1,353 @@
+package otto
+
+import (
+ "encoding/hex"
+ "math"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// Global
+func builtinGlobal_eval(call FunctionCall) Value {
+ src := call.Argument(0)
+ if !src.IsString() {
+ return src
+ }
+ runtime := call.runtime
+ program := runtime.cmpl_parseOrThrow(src.string(), nil)
+ if !call.eval {
+ // Not a direct call to eval, so we enter the global ExecutionContext
+ runtime.enterGlobalScope()
+ defer runtime.leaveScope()
+ }
+ returnValue := runtime.cmpl_evaluate_nodeProgram(program, true)
+ if returnValue.isEmpty() {
+ return Value{}
+ }
+ return returnValue
+}
+
+func builtinGlobal_isNaN(call FunctionCall) Value {
+ value := call.Argument(0).float64()
+ return toValue_bool(math.IsNaN(value))
+}
+
+func builtinGlobal_isFinite(call FunctionCall) Value {
+ value := call.Argument(0).float64()
+ return toValue_bool(!math.IsNaN(value) && !math.IsInf(value, 0))
+}
+
+// radix 3 => 2 (ASCII 50) +47
+// radix 11 => A/a (ASCII 65/97) +54/+86
+var parseInt_alphabetTable = func() []string {
+ table := []string{"", "", "01"}
+ for radix := 3; radix <= 36; radix += 1 {
+ alphabet := table[radix-1]
+ if radix <= 10 {
+ alphabet += string(radix + 47)
+ } else {
+ alphabet += string(radix+54) + string(radix+86)
+ }
+ table = append(table, alphabet)
+ }
+ return table
+}()
+
+func digitValue(chr rune) int {
+ switch {
+ case '0' <= chr && chr <= '9':
+ return int(chr - '0')
+ case 'a' <= chr && chr <= 'z':
+ return int(chr - 'a' + 10)
+ case 'A' <= chr && chr <= 'Z':
+ return int(chr - 'A' + 10)
+ }
+ return 36 // Larger than any legal digit value
+}
+
+func builtinGlobal_parseInt(call FunctionCall) Value {
+ input := strings.TrimSpace(call.Argument(0).string())
+ if len(input) == 0 {
+ return NaNValue()
+ }
+
+ radix := int(toInt32(call.Argument(1)))
+
+ negative := false
+ switch input[0] {
+ case '+':
+ input = input[1:]
+ case '-':
+ negative = true
+ input = input[1:]
+ }
+
+ strip := true
+ if radix == 0 {
+ radix = 10
+ } else {
+ if radix < 2 || radix > 36 {
+ return NaNValue()
+ } else if radix != 16 {
+ strip = false
+ }
+ }
+
+ switch len(input) {
+ case 0:
+ return NaNValue()
+ case 1:
+ default:
+ if strip {
+ if input[0] == '0' && (input[1] == 'x' || input[1] == 'X') {
+ input = input[2:]
+ radix = 16
+ }
+ }
+ }
+
+ base := radix
+ index := 0
+ for ; index < len(input); index++ {
+ digit := digitValue(rune(input[index])) // If not ASCII, then an error anyway
+ if digit >= base {
+ break
+ }
+ }
+ input = input[0:index]
+
+ value, err := strconv.ParseInt(input, radix, 64)
+ if err != nil {
+ if err.(*strconv.NumError).Err == strconv.ErrRange {
+ base := float64(base)
+ // Could just be a very large number (e.g. 0x8000000000000000)
+ var value float64
+ for _, chr := range input {
+ digit := float64(digitValue(chr))
+ if digit >= base {
+ goto error
+ }
+ value = value*base + digit
+ }
+ if negative {
+ value *= -1
+ }
+ return toValue_float64(value)
+ }
+ error:
+ return NaNValue()
+ }
+ if negative {
+ value *= -1
+ }
+
+ return toValue_int64(value)
+}
+
+var parseFloat_matchBadSpecial = regexp.MustCompile(`[\+\-]?(?:[Ii]nf$|infinity)`)
+var parseFloat_matchValid = regexp.MustCompile(`[0-9eE\+\-\.]|Infinity`)
+
+func builtinGlobal_parseFloat(call FunctionCall) Value {
+ // Caveat emptor: This implementation does NOT match the specification
+ input := strings.TrimSpace(call.Argument(0).string())
+ if parseFloat_matchBadSpecial.MatchString(input) {
+ return NaNValue()
+ }
+ value, err := strconv.ParseFloat(input, 64)
+ if err != nil {
+ for end := len(input); end > 0; end-- {
+ input := input[0:end]
+ if !parseFloat_matchValid.MatchString(input) {
+ return NaNValue()
+ }
+ value, err = strconv.ParseFloat(input, 64)
+ if err == nil {
+ break
+ }
+ }
+ if err != nil {
+ return NaNValue()
+ }
+ }
+ return toValue_float64(value)
+}
+
+// encodeURI/decodeURI
+
+func _builtinGlobal_encodeURI(call FunctionCall, escape *regexp.Regexp) Value {
+ value := call.Argument(0)
+ var input []uint16
+ switch vl := value.value.(type) {
+ case []uint16:
+ input = vl
+ default:
+ input = utf16.Encode([]rune(value.string()))
+ }
+ if len(input) == 0 {
+ return toValue_string("")
+ }
+ output := []byte{}
+ length := len(input)
+ encode := make([]byte, 4)
+ for index := 0; index < length; {
+ value := input[index]
+ decode := utf16.Decode(input[index : index+1])
+ if value >= 0xDC00 && value <= 0xDFFF {
+ panic(call.runtime.panicURIError("URI malformed"))
+ }
+ if value >= 0xD800 && value <= 0xDBFF {
+ index += 1
+ if index >= length {
+ panic(call.runtime.panicURIError("URI malformed"))
+ }
+ // input = ..., value, value1, ...
+ value1 := input[index]
+ if value1 < 0xDC00 || value1 > 0xDFFF {
+ panic(call.runtime.panicURIError("URI malformed"))
+ }
+ decode = []rune{((rune(value) - 0xD800) * 0x400) + (rune(value1) - 0xDC00) + 0x10000}
+ }
+ index += 1
+ size := utf8.EncodeRune(encode, decode[0])
+ encode := encode[0:size]
+ output = append(output, encode...)
+ }
+ {
+ value := escape.ReplaceAllFunc(output, func(target []byte) []byte {
+ // Probably a better way of doing this
+ if target[0] == ' ' {
+ return []byte("%20")
+ }
+ return []byte(url.QueryEscape(string(target)))
+ })
+ return toValue_string(string(value))
+ }
+}
+
+var encodeURI_Regexp = regexp.MustCompile(`([^~!@#$&*()=:/,;?+'])`)
+
+func builtinGlobal_encodeURI(call FunctionCall) Value {
+ return _builtinGlobal_encodeURI(call, encodeURI_Regexp)
+}
+
+var encodeURIComponent_Regexp = regexp.MustCompile(`([^~!*()'])`)
+
+func builtinGlobal_encodeURIComponent(call FunctionCall) Value {
+ return _builtinGlobal_encodeURI(call, encodeURIComponent_Regexp)
+}
+
+// 3B/2F/3F/3A/40/26/3D/2B/24/2C/23
+var decodeURI_guard = regexp.MustCompile(`(?i)(?:%)(3B|2F|3F|3A|40|26|3D|2B|24|2C|23)`)
+
+func _decodeURI(input string, reserve bool) (string, bool) {
+ if reserve {
+ input = decodeURI_guard.ReplaceAllString(input, "%25$1")
+ }
+ input = strings.Replace(input, "+", "%2B", -1) // Ugly hack to make QueryUnescape work with our use case
+ output, err := url.QueryUnescape(input)
+ if err != nil || !utf8.ValidString(output) {
+ return "", true
+ }
+ return output, false
+}
+
+func builtinGlobal_decodeURI(call FunctionCall) Value {
+ output, err := _decodeURI(call.Argument(0).string(), true)
+ if err {
+ panic(call.runtime.panicURIError("URI malformed"))
+ }
+ return toValue_string(output)
+}
+
+func builtinGlobal_decodeURIComponent(call FunctionCall) Value {
+ output, err := _decodeURI(call.Argument(0).string(), false)
+ if err {
+ panic(call.runtime.panicURIError("URI malformed"))
+ }
+ return toValue_string(output)
+}
+
+// escape/unescape
+
+func builtin_shouldEscape(chr byte) bool {
+ if 'A' <= chr && chr <= 'Z' || 'a' <= chr && chr <= 'z' || '0' <= chr && chr <= '9' {
+ return false
+ }
+ return !strings.ContainsRune("*_+-./", rune(chr))
+}
+
+const escapeBase16 = "0123456789ABCDEF"
+
+func builtin_escape(input string) string {
+ output := make([]byte, 0, len(input))
+ length := len(input)
+ for index := 0; index < length; {
+ if builtin_shouldEscape(input[index]) {
+ chr, width := utf8.DecodeRuneInString(input[index:])
+ chr16 := utf16.Encode([]rune{chr})[0]
+ if 256 > chr16 {
+ output = append(output, '%',
+ escapeBase16[chr16>>4],
+ escapeBase16[chr16&15],
+ )
+ } else {
+ output = append(output, '%', 'u',
+ escapeBase16[chr16>>12],
+ escapeBase16[(chr16>>8)&15],
+ escapeBase16[(chr16>>4)&15],
+ escapeBase16[chr16&15],
+ )
+ }
+ index += width
+
+ } else {
+ output = append(output, input[index])
+ index += 1
+ }
+ }
+ return string(output)
+}
+
+func builtin_unescape(input string) string {
+ output := make([]rune, 0, len(input))
+ length := len(input)
+ for index := 0; index < length; {
+ if input[index] == '%' {
+ if index <= length-6 && input[index+1] == 'u' {
+ byte16, err := hex.DecodeString(input[index+2 : index+6])
+ if err == nil {
+ value := uint16(byte16[0])<<8 + uint16(byte16[1])
+ chr := utf16.Decode([]uint16{value})[0]
+ output = append(output, chr)
+ index += 6
+ continue
+ }
+ }
+ if index <= length-3 {
+ byte8, err := hex.DecodeString(input[index+1 : index+3])
+ if err == nil {
+ value := uint16(byte8[0])
+ chr := utf16.Decode([]uint16{value})[0]
+ output = append(output, chr)
+ index += 3
+ continue
+ }
+ }
+ }
+ output = append(output, rune(input[index]))
+ index += 1
+ }
+ return string(output)
+}
+
+func builtinGlobal_escape(call FunctionCall) Value {
+ return toValue_string(builtin_escape(call.Argument(0).string()))
+}
+
+func builtinGlobal_unescape(call FunctionCall) Value {
+ return toValue_string(builtin_unescape(call.Argument(0).string()))
+}
diff --git a/vendor/github.com/robertkrimen/otto/builtin_array.go b/vendor/github.com/robertkrimen/otto/builtin_array.go
new file mode 100644
index 000000000..557ffc024
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/builtin_array.go
@@ -0,0 +1,681 @@
+package otto
+
+import (
+ "strconv"
+ "strings"
+)
+
+// Array
+
+func builtinArray(call FunctionCall) Value {
+ return toValue_object(builtinNewArrayNative(call.runtime, call.ArgumentList))
+}
+
+func builtinNewArray(self *_object, argumentList []Value) Value {
+ return toValue_object(builtinNewArrayNative(self.runtime, argumentList))
+}
+
+func builtinNewArrayNative(runtime *_runtime, argumentList []Value) *_object {
+ if len(argumentList) == 1 {
+ firstArgument := argumentList[0]
+ if firstArgument.IsNumber() {
+ return runtime.newArray(arrayUint32(runtime, firstArgument))
+ }
+ }
+ return runtime.newArrayOf(argumentList)
+}
+
+func builtinArray_toString(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ join := thisObject.get("join")
+ if join.isCallable() {
+ join := join._object()
+ return join.call(call.This, call.ArgumentList, false, nativeFrame)
+ }
+ return builtinObject_toString(call)
+}
+
+func builtinArray_toLocaleString(call FunctionCall) Value {
+ separator := ","
+ thisObject := call.thisObject()
+ length := int64(toUint32(thisObject.get("length")))
+ if length == 0 {
+ return toValue_string("")
+ }
+ stringList := make([]string, 0, length)
+ for index := int64(0); index < length; index += 1 {
+ value := thisObject.get(arrayIndexToString(index))
+ stringValue := ""
+ switch value.kind {
+ case valueEmpty, valueUndefined, valueNull:
+ default:
+ object := call.runtime.toObject(value)
+ toLocaleString := object.get("toLocaleString")
+ if !toLocaleString.isCallable() {
+ panic(call.runtime.panicTypeError())
+ }
+ stringValue = toLocaleString.call(call.runtime, toValue_object(object)).string()
+ }
+ stringList = append(stringList, stringValue)
+ }
+ return toValue_string(strings.Join(stringList, separator))
+}
+
+func builtinArray_concat(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ valueArray := []Value{}
+ source := append([]Value{toValue_object(thisObject)}, call.ArgumentList...)
+ for _, item := range source {
+ switch item.kind {
+ case valueObject:
+ object := item._object()
+ if isArray(object) {
+ length := object.get("length").number().int64
+ for index := int64(0); index < length; index += 1 {
+ name := strconv.FormatInt(index, 10)
+ if object.hasProperty(name) {
+ valueArray = append(valueArray, object.get(name))
+ } else {
+ valueArray = append(valueArray, Value{})
+ }
+ }
+ continue
+ }
+ fallthrough
+ default:
+ valueArray = append(valueArray, item)
+ }
+ }
+ return toValue_object(call.runtime.newArrayOf(valueArray))
+}
+
+func builtinArray_shift(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ length := int64(toUint32(thisObject.get("length")))
+ if 0 == length {
+ thisObject.put("length", toValue_int64(0), true)
+ return Value{}
+ }
+ first := thisObject.get("0")
+ for index := int64(1); index < length; index++ {
+ from := arrayIndexToString(index)
+ to := arrayIndexToString(index - 1)
+ if thisObject.hasProperty(from) {
+ thisObject.put(to, thisObject.get(from), true)
+ } else {
+ thisObject.delete(to, true)
+ }
+ }
+ thisObject.delete(arrayIndexToString(length-1), true)
+ thisObject.put("length", toValue_int64(length-1), true)
+ return first
+}
+
+func builtinArray_push(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ itemList := call.ArgumentList
+ index := int64(toUint32(thisObject.get("length")))
+ for len(itemList) > 0 {
+ thisObject.put(arrayIndexToString(index), itemList[0], true)
+ itemList = itemList[1:]
+ index += 1
+ }
+ length := toValue_int64(index)
+ thisObject.put("length", length, true)
+ return length
+}
+
+func builtinArray_pop(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ length := int64(toUint32(thisObject.get("length")))
+ if 0 == length {
+ thisObject.put("length", toValue_uint32(0), true)
+ return Value{}
+ }
+ last := thisObject.get(arrayIndexToString(length - 1))
+ thisObject.delete(arrayIndexToString(length-1), true)
+ thisObject.put("length", toValue_int64(length-1), true)
+ return last
+}
+
+func builtinArray_join(call FunctionCall) Value {
+ separator := ","
+ {
+ argument := call.Argument(0)
+ if argument.IsDefined() {
+ separator = argument.string()
+ }
+ }
+ thisObject := call.thisObject()
+ length := int64(toUint32(thisObject.get("length")))
+ if length == 0 {
+ return toValue_string("")
+ }
+ stringList := make([]string, 0, length)
+ for index := int64(0); index < length; index += 1 {
+ value := thisObject.get(arrayIndexToString(index))
+ stringValue := ""
+ switch value.kind {
+ case valueEmpty, valueUndefined, valueNull:
+ default:
+ stringValue = value.string()
+ }
+ stringList = append(stringList, stringValue)
+ }
+ return toValue_string(strings.Join(stringList, separator))
+}
+
+func builtinArray_splice(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ length := int64(toUint32(thisObject.get("length")))
+
+ start := valueToRangeIndex(call.Argument(0), length, false)
+ deleteCount := valueToRangeIndex(call.Argument(1), int64(length)-start, true)
+ valueArray := make([]Value, deleteCount)
+
+ for index := int64(0); index < deleteCount; index++ {
+ indexString := arrayIndexToString(int64(start + index))
+ if thisObject.hasProperty(indexString) {
+ valueArray[index] = thisObject.get(indexString)
+ }
+ }
+
+ // 0, <1, 2, 3, 4>, 5, 6, 7
+ // a, b
+ // length 8 - delete 4 @ start 1
+
+ itemList := []Value{}
+ itemCount := int64(len(call.ArgumentList))
+ if itemCount > 2 {
+ itemCount -= 2 // Less the first two arguments
+ itemList = call.ArgumentList[2:]
+ } else {
+ itemCount = 0
+ }
+ if itemCount < deleteCount {
+ // The Object/Array is shrinking
+ stop := int64(length) - deleteCount
+ // The new length of the Object/Array before
+ // appending the itemList remainder
+ // Stopping at the lower bound of the insertion:
+ // Move an item from the after the deleted portion
+ // to a position after the inserted portion
+ for index := start; index < stop; index++ {
+ from := arrayIndexToString(index + deleteCount) // Position just after deletion
+ to := arrayIndexToString(index + itemCount) // Position just after splice (insertion)
+ if thisObject.hasProperty(from) {
+ thisObject.put(to, thisObject.get(from), true)
+ } else {
+ thisObject.delete(to, true)
+ }
+ }
+ // Delete off the end
+ // We don't bother to delete below <stop + itemCount> (if any) since those
+ // will be overwritten anyway
+ for index := int64(length); index > (stop + itemCount); index-- {
+ thisObject.delete(arrayIndexToString(index-1), true)
+ }
+ } else if itemCount > deleteCount {
+ // The Object/Array is growing
+ // The itemCount is greater than the deleteCount, so we do
+ // not have to worry about overwriting what we should be moving
+ // ---
+ // Starting from the upper bound of the deletion:
+ // Move an item from the after the deleted portion
+ // to a position after the inserted portion
+ for index := int64(length) - deleteCount; index > start; index-- {
+ from := arrayIndexToString(index + deleteCount - 1)
+ to := arrayIndexToString(index + itemCount - 1)
+ if thisObject.hasProperty(from) {
+ thisObject.put(to, thisObject.get(from), true)
+ } else {
+ thisObject.delete(to, true)
+ }
+ }
+ }
+
+ for index := int64(0); index < itemCount; index++ {
+ thisObject.put(arrayIndexToString(index+start), itemList[index], true)
+ }
+ thisObject.put("length", toValue_int64(int64(length)+itemCount-deleteCount), true)
+
+ return toValue_object(call.runtime.newArrayOf(valueArray))
+}
+
+func builtinArray_slice(call FunctionCall) Value {
+ thisObject := call.thisObject()
+
+ length := int64(toUint32(thisObject.get("length")))
+ start, end := rangeStartEnd(call.ArgumentList, length, false)
+
+ if start >= end {
+ // Always an empty array
+ return toValue_object(call.runtime.newArray(0))
+ }
+ sliceLength := end - start
+ sliceValueArray := make([]Value, sliceLength)
+
+ for index := int64(0); index < sliceLength; index++ {
+ from := arrayIndexToString(index + start)
+ if thisObject.hasProperty(from) {
+ sliceValueArray[index] = thisObject.get(from)
+ }
+ }
+
+ return toValue_object(call.runtime.newArrayOf(sliceValueArray))
+}
+
+func builtinArray_unshift(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ length := int64(toUint32(thisObject.get("length")))
+ itemList := call.ArgumentList
+ itemCount := int64(len(itemList))
+
+ for index := length; index > 0; index-- {
+ from := arrayIndexToString(index - 1)
+ to := arrayIndexToString(index + itemCount - 1)
+ if thisObject.hasProperty(from) {
+ thisObject.put(to, thisObject.get(from), true)
+ } else {
+ thisObject.delete(to, true)
+ }
+ }
+
+ for index := int64(0); index < itemCount; index++ {
+ thisObject.put(arrayIndexToString(index), itemList[index], true)
+ }
+
+ newLength := toValue_int64(length + itemCount)
+ thisObject.put("length", newLength, true)
+ return newLength
+}
+
+func builtinArray_reverse(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ length := int64(toUint32(thisObject.get("length")))
+
+ lower := struct {
+ name string
+ index int64
+ exists bool
+ }{}
+ upper := lower
+
+ lower.index = 0
+ middle := length / 2 // Division will floor
+
+ for lower.index != middle {
+ lower.name = arrayIndexToString(lower.index)
+ upper.index = length - lower.index - 1
+ upper.name = arrayIndexToString(upper.index)
+
+ lower.exists = thisObject.hasProperty(lower.name)
+ upper.exists = thisObject.hasProperty(upper.name)
+
+ if lower.exists && upper.exists {
+ lowerValue := thisObject.get(lower.name)
+ upperValue := thisObject.get(upper.name)
+ thisObject.put(lower.name, upperValue, true)
+ thisObject.put(upper.name, lowerValue, true)
+ } else if !lower.exists && upper.exists {
+ value := thisObject.get(upper.name)
+ thisObject.delete(upper.name, true)
+ thisObject.put(lower.name, value, true)
+ } else if lower.exists && !upper.exists {
+ value := thisObject.get(lower.name)
+ thisObject.delete(lower.name, true)
+ thisObject.put(upper.name, value, true)
+ } else {
+ // Nothing happens.
+ }
+
+ lower.index += 1
+ }
+
+ return call.This
+}
+
+func sortCompare(thisObject *_object, index0, index1 uint, compare *_object) int {
+ j := struct {
+ name string
+ exists bool
+ defined bool
+ value string
+ }{}
+ k := j
+ j.name = arrayIndexToString(int64(index0))
+ j.exists = thisObject.hasProperty(j.name)
+ k.name = arrayIndexToString(int64(index1))
+ k.exists = thisObject.hasProperty(k.name)
+
+ if !j.exists && !k.exists {
+ return 0
+ } else if !j.exists {
+ return 1
+ } else if !k.exists {
+ return -1
+ }
+
+ x := thisObject.get(j.name)
+ y := thisObject.get(k.name)
+ j.defined = x.IsDefined()
+ k.defined = y.IsDefined()
+
+ if !j.defined && !k.defined {
+ return 0
+ } else if !j.defined {
+ return 1
+ } else if !k.defined {
+ return -1
+ }
+
+ if compare == nil {
+ j.value = x.string()
+ k.value = y.string()
+
+ if j.value == k.value {
+ return 0
+ } else if j.value < k.value {
+ return -1
+ }
+
+ return 1
+ }
+
+ return int(toInt32(compare.call(Value{}, []Value{x, y}, false, nativeFrame)))
+}
+
+func arraySortSwap(thisObject *_object, index0, index1 uint) {
+
+ j := struct {
+ name string
+ exists bool
+ }{}
+ k := j
+
+ j.name = arrayIndexToString(int64(index0))
+ j.exists = thisObject.hasProperty(j.name)
+ k.name = arrayIndexToString(int64(index1))
+ k.exists = thisObject.hasProperty(k.name)
+
+ if j.exists && k.exists {
+ jValue := thisObject.get(j.name)
+ kValue := thisObject.get(k.name)
+ thisObject.put(j.name, kValue, true)
+ thisObject.put(k.name, jValue, true)
+ } else if !j.exists && k.exists {
+ value := thisObject.get(k.name)
+ thisObject.delete(k.name, true)
+ thisObject.put(j.name, value, true)
+ } else if j.exists && !k.exists {
+ value := thisObject.get(j.name)
+ thisObject.delete(j.name, true)
+ thisObject.put(k.name, value, true)
+ } else {
+ // Nothing happens.
+ }
+}
+
+func arraySortQuickPartition(thisObject *_object, left, right, pivot uint, compare *_object) (uint, uint) {
+ arraySortSwap(thisObject, pivot, right) // Right is now the pivot value
+ cursor := left
+ cursor2 := left
+ for index := left; index < right; index++ {
+ comparison := sortCompare(thisObject, index, right, compare) // Compare to the pivot value
+ if comparison < 0 {
+ arraySortSwap(thisObject, index, cursor)
+ if cursor < cursor2 {
+ arraySortSwap(thisObject, index, cursor2)
+ }
+ cursor += 1
+ cursor2 += 1
+ } else if comparison == 0 {
+ arraySortSwap(thisObject, index, cursor2)
+ cursor2 += 1
+ }
+ }
+ arraySortSwap(thisObject, cursor2, right)
+ return cursor, cursor2
+}
+
+func arraySortQuickSort(thisObject *_object, left, right uint, compare *_object) {
+ if left < right {
+ middle := left + (right-left)/2
+ pivot, pivot2 := arraySortQuickPartition(thisObject, left, right, middle, compare)
+ if pivot > 0 {
+ arraySortQuickSort(thisObject, left, pivot-1, compare)
+ }
+ arraySortQuickSort(thisObject, pivot2+1, right, compare)
+ }
+}
+
+func builtinArray_sort(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ length := uint(toUint32(thisObject.get("length")))
+ compareValue := call.Argument(0)
+ compare := compareValue._object()
+ if compareValue.IsUndefined() {
+ } else if !compareValue.isCallable() {
+ panic(call.runtime.panicTypeError())
+ }
+ if length > 1 {
+ arraySortQuickSort(thisObject, 0, length-1, compare)
+ }
+ return call.This
+}
+
+func builtinArray_isArray(call FunctionCall) Value {
+ return toValue_bool(isArray(call.Argument(0)._object()))
+}
+
+func builtinArray_indexOf(call FunctionCall) Value {
+ thisObject, matchValue := call.thisObject(), call.Argument(0)
+ if length := int64(toUint32(thisObject.get("length"))); length > 0 {
+ index := int64(0)
+ if len(call.ArgumentList) > 1 {
+ index = call.Argument(1).number().int64
+ }
+ if index < 0 {
+ if index += length; index < 0 {
+ index = 0
+ }
+ } else if index >= length {
+ index = -1
+ }
+ for ; index >= 0 && index < length; index++ {
+ name := arrayIndexToString(int64(index))
+ if !thisObject.hasProperty(name) {
+ continue
+ }
+ value := thisObject.get(name)
+ if strictEqualityComparison(matchValue, value) {
+ return toValue_uint32(uint32(index))
+ }
+ }
+ }
+ return toValue_int(-1)
+}
+
+func builtinArray_lastIndexOf(call FunctionCall) Value {
+ thisObject, matchValue := call.thisObject(), call.Argument(0)
+ length := int64(toUint32(thisObject.get("length")))
+ index := length - 1
+ if len(call.ArgumentList) > 1 {
+ index = call.Argument(1).number().int64
+ }
+ if 0 > index {
+ index += length
+ }
+ if index > length {
+ index = length - 1
+ } else if 0 > index {
+ return toValue_int(-1)
+ }
+ for ; index >= 0; index-- {
+ name := arrayIndexToString(int64(index))
+ if !thisObject.hasProperty(name) {
+ continue
+ }
+ value := thisObject.get(name)
+ if strictEqualityComparison(matchValue, value) {
+ return toValue_uint32(uint32(index))
+ }
+ }
+ return toValue_int(-1)
+}
+
+func builtinArray_every(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ this := toValue_object(thisObject)
+ if iterator := call.Argument(0); iterator.isCallable() {
+ length := int64(toUint32(thisObject.get("length")))
+ callThis := call.Argument(1)
+ for index := int64(0); index < length; index++ {
+ if key := arrayIndexToString(index); thisObject.hasProperty(key) {
+ if value := thisObject.get(key); iterator.call(call.runtime, callThis, value, toValue_int64(index), this).bool() {
+ continue
+ }
+ return falseValue
+ }
+ }
+ return trueValue
+ }
+ panic(call.runtime.panicTypeError())
+}
+
+func builtinArray_some(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ this := toValue_object(thisObject)
+ if iterator := call.Argument(0); iterator.isCallable() {
+ length := int64(toUint32(thisObject.get("length")))
+ callThis := call.Argument(1)
+ for index := int64(0); index < length; index++ {
+ if key := arrayIndexToString(index); thisObject.hasProperty(key) {
+ if value := thisObject.get(key); iterator.call(call.runtime, callThis, value, toValue_int64(index), this).bool() {
+ return trueValue
+ }
+ }
+ }
+ return falseValue
+ }
+ panic(call.runtime.panicTypeError())
+}
+
+func builtinArray_forEach(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ this := toValue_object(thisObject)
+ if iterator := call.Argument(0); iterator.isCallable() {
+ length := int64(toUint32(thisObject.get("length")))
+ callThis := call.Argument(1)
+ for index := int64(0); index < length; index++ {
+ if key := arrayIndexToString(index); thisObject.hasProperty(key) {
+ iterator.call(call.runtime, callThis, thisObject.get(key), toValue_int64(index), this)
+ }
+ }
+ return Value{}
+ }
+ panic(call.runtime.panicTypeError())
+}
+
+func builtinArray_map(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ this := toValue_object(thisObject)
+ if iterator := call.Argument(0); iterator.isCallable() {
+ length := int64(toUint32(thisObject.get("length")))
+ callThis := call.Argument(1)
+ values := make([]Value, length)
+ for index := int64(0); index < length; index++ {
+ if key := arrayIndexToString(index); thisObject.hasProperty(key) {
+ values[index] = iterator.call(call.runtime, callThis, thisObject.get(key), index, this)
+ } else {
+ values[index] = Value{}
+ }
+ }
+ return toValue_object(call.runtime.newArrayOf(values))
+ }
+ panic(call.runtime.panicTypeError())
+}
+
+func builtinArray_filter(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ this := toValue_object(thisObject)
+ if iterator := call.Argument(0); iterator.isCallable() {
+ length := int64(toUint32(thisObject.get("length")))
+ callThis := call.Argument(1)
+ values := make([]Value, 0)
+ for index := int64(0); index < length; index++ {
+ if key := arrayIndexToString(index); thisObject.hasProperty(key) {
+ value := thisObject.get(key)
+ if iterator.call(call.runtime, callThis, value, index, this).bool() {
+ values = append(values, value)
+ }
+ }
+ }
+ return toValue_object(call.runtime.newArrayOf(values))
+ }
+ panic(call.runtime.panicTypeError())
+}
+
+func builtinArray_reduce(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ this := toValue_object(thisObject)
+ if iterator := call.Argument(0); iterator.isCallable() {
+ initial := len(call.ArgumentList) > 1
+ start := call.Argument(1)
+ length := int64(toUint32(thisObject.get("length")))
+ index := int64(0)
+ if length > 0 || initial {
+ var accumulator Value
+ if !initial {
+ for ; index < length; index++ {
+ if key := arrayIndexToString(index); thisObject.hasProperty(key) {
+ accumulator = thisObject.get(key)
+ index++
+ break
+ }
+ }
+ } else {
+ accumulator = start
+ }
+ for ; index < length; index++ {
+ if key := arrayIndexToString(index); thisObject.hasProperty(key) {
+ accumulator = iterator.call(call.runtime, Value{}, accumulator, thisObject.get(key), key, this)
+ }
+ }
+ return accumulator
+ }
+ }
+ panic(call.runtime.panicTypeError())
+}
+
+func builtinArray_reduceRight(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ this := toValue_object(thisObject)
+ if iterator := call.Argument(0); iterator.isCallable() {
+ initial := len(call.ArgumentList) > 1
+ start := call.Argument(1)
+ length := int64(toUint32(thisObject.get("length")))
+ if length > 0 || initial {
+ index := length - 1
+ var accumulator Value
+ if !initial {
+ for ; index >= 0; index-- {
+ if key := arrayIndexToString(index); thisObject.hasProperty(key) {
+ accumulator = thisObject.get(key)
+ index--
+ break
+ }
+ }
+ } else {
+ accumulator = start
+ }
+ for ; index >= 0; index-- {
+ if key := arrayIndexToString(index); thisObject.hasProperty(key) {
+ accumulator = iterator.call(call.runtime, Value{}, accumulator, thisObject.get(key), key, this)
+ }
+ }
+ return accumulator
+ }
+ }
+ panic(call.runtime.panicTypeError())
+}
diff --git a/vendor/github.com/robertkrimen/otto/builtin_boolean.go b/vendor/github.com/robertkrimen/otto/builtin_boolean.go
new file mode 100644
index 000000000..59b8e789b
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/builtin_boolean.go
@@ -0,0 +1,28 @@
+package otto
+
+// Boolean
+
+func builtinBoolean(call FunctionCall) Value {
+ return toValue_bool(call.Argument(0).bool())
+}
+
+func builtinNewBoolean(self *_object, argumentList []Value) Value {
+ return toValue_object(self.runtime.newBoolean(valueOfArrayIndex(argumentList, 0)))
+}
+
+func builtinBoolean_toString(call FunctionCall) Value {
+ value := call.This
+ if !value.IsBoolean() {
+ // Will throw a TypeError if ThisObject is not a Boolean
+ value = call.thisClassObject("Boolean").primitiveValue()
+ }
+ return toValue_string(value.string())
+}
+
+func builtinBoolean_valueOf(call FunctionCall) Value {
+ value := call.This
+ if !value.IsBoolean() {
+ value = call.thisClassObject("Boolean").primitiveValue()
+ }
+ return value
+}
diff --git a/vendor/github.com/robertkrimen/otto/builtin_date.go b/vendor/github.com/robertkrimen/otto/builtin_date.go
new file mode 100644
index 000000000..f20bf8e3f
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/builtin_date.go
@@ -0,0 +1,615 @@
+package otto
+
+import (
+ "math"
+ Time "time"
+)
+
+// Date
+
+const (
+ // TODO Be like V8?
+ // builtinDate_goDateTimeLayout = "Mon Jan 2 2006 15:04:05 GMT-0700 (MST)"
+ builtinDate_goDateTimeLayout = Time.RFC1123 // "Mon, 02 Jan 2006 15:04:05 MST"
+ builtinDate_goDateLayout = "Mon, 02 Jan 2006"
+ builtinDate_goTimeLayout = "15:04:05 MST"
+)
+
+func builtinDate(call FunctionCall) Value {
+ date := &_dateObject{}
+ date.Set(newDateTime([]Value{}, Time.Local))
+ return toValue_string(date.Time().Format(builtinDate_goDateTimeLayout))
+}
+
+func builtinNewDate(self *_object, argumentList []Value) Value {
+ return toValue_object(self.runtime.newDate(newDateTime(argumentList, Time.Local)))
+}
+
+func builtinDate_toString(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return toValue_string("Invalid Date")
+ }
+ return toValue_string(date.Time().Local().Format(builtinDate_goDateTimeLayout))
+}
+
+func builtinDate_toDateString(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return toValue_string("Invalid Date")
+ }
+ return toValue_string(date.Time().Local().Format(builtinDate_goDateLayout))
+}
+
+func builtinDate_toTimeString(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return toValue_string("Invalid Date")
+ }
+ return toValue_string(date.Time().Local().Format(builtinDate_goTimeLayout))
+}
+
+func builtinDate_toUTCString(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return toValue_string("Invalid Date")
+ }
+ return toValue_string(date.Time().Format(builtinDate_goDateTimeLayout))
+}
+
+func builtinDate_toISOString(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return toValue_string("Invalid Date")
+ }
+ return toValue_string(date.Time().Format("2006-01-02T15:04:05.000Z"))
+}
+
+func builtinDate_toJSON(call FunctionCall) Value {
+ object := call.thisObject()
+ value := object.DefaultValue(defaultValueHintNumber) // FIXME object.primitiveNumberValue
+ { // FIXME value.isFinite
+ value := value.float64()
+ if math.IsNaN(value) || math.IsInf(value, 0) {
+ return nullValue
+ }
+ }
+ toISOString := object.get("toISOString")
+ if !toISOString.isCallable() {
+ // FIXME
+ panic(call.runtime.panicTypeError())
+ }
+ return toISOString.call(call.runtime, toValue_object(object), []Value{})
+}
+
+func builtinDate_toGMTString(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return toValue_string("Invalid Date")
+ }
+ return toValue_string(date.Time().Format("Mon, 02 Jan 2006 15:04:05 GMT"))
+}
+
+func builtinDate_getTime(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ // We do this (convert away from a float) so the user
+ // does not get something back in exponential notation
+ return toValue_int64(int64(date.Epoch()))
+}
+
+func builtinDate_setTime(call FunctionCall) Value {
+ object := call.thisObject()
+ date := dateObjectOf(call.runtime, call.thisObject())
+ date.Set(call.Argument(0).float64())
+ object.value = date
+ return date.Value()
+}
+
+func _builtinDate_beforeSet(call FunctionCall, argumentLimit int, timeLocal bool) (*_object, *_dateObject, *_ecmaTime, []int) {
+ object := call.thisObject()
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return nil, nil, nil, nil
+ }
+
+ if argumentLimit > len(call.ArgumentList) {
+ argumentLimit = len(call.ArgumentList)
+ }
+
+ if argumentLimit == 0 {
+ object.value = invalidDateObject
+ return nil, nil, nil, nil
+ }
+
+ valueList := make([]int, argumentLimit)
+ for index := 0; index < argumentLimit; index++ {
+ value := call.ArgumentList[index]
+ nm := value.number()
+ switch nm.kind {
+ case numberInteger, numberFloat:
+ default:
+ object.value = invalidDateObject
+ return nil, nil, nil, nil
+ }
+ valueList[index] = int(nm.int64)
+ }
+ baseTime := date.Time()
+ if timeLocal {
+ baseTime = baseTime.Local()
+ }
+ ecmaTime := ecmaTime(baseTime)
+ return object, &date, &ecmaTime, valueList
+}
+
+func builtinDate_parse(call FunctionCall) Value {
+ date := call.Argument(0).string()
+ return toValue_float64(dateParse(date))
+}
+
+func builtinDate_UTC(call FunctionCall) Value {
+ return toValue_float64(newDateTime(call.ArgumentList, Time.UTC))
+}
+
+func builtinDate_now(call FunctionCall) Value {
+ call.ArgumentList = []Value(nil)
+ return builtinDate_UTC(call)
+}
+
+// This is a placeholder
+func builtinDate_toLocaleString(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return toValue_string("Invalid Date")
+ }
+ return toValue_string(date.Time().Local().Format("2006-01-02 15:04:05"))
+}
+
+// This is a placeholder
+func builtinDate_toLocaleDateString(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return toValue_string("Invalid Date")
+ }
+ return toValue_string(date.Time().Local().Format("2006-01-02"))
+}
+
+// This is a placeholder
+func builtinDate_toLocaleTimeString(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return toValue_string("Invalid Date")
+ }
+ return toValue_string(date.Time().Local().Format("15:04:05"))
+}
+
+func builtinDate_valueOf(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ return date.Value()
+}
+
+func builtinDate_getYear(call FunctionCall) Value {
+ // Will throw a TypeError is ThisObject is nil or
+ // does not have Class of "Date"
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ return toValue_int(date.Time().Local().Year() - 1900)
+}
+
+func builtinDate_getFullYear(call FunctionCall) Value {
+ // Will throw a TypeError is ThisObject is nil or
+ // does not have Class of "Date"
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ return toValue_int(date.Time().Local().Year())
+}
+
+func builtinDate_getUTCFullYear(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ return toValue_int(date.Time().Year())
+}
+
+func builtinDate_getMonth(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ return toValue_int(dateFromGoMonth(date.Time().Local().Month()))
+}
+
+func builtinDate_getUTCMonth(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ return toValue_int(dateFromGoMonth(date.Time().Month()))
+}
+
+func builtinDate_getDate(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ return toValue_int(date.Time().Local().Day())
+}
+
+func builtinDate_getUTCDate(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ return toValue_int(date.Time().Day())
+}
+
+func builtinDate_getDay(call FunctionCall) Value {
+ // Actually day of the week
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ return toValue_int(dateFromGoDay(date.Time().Local().Weekday()))
+}
+
+func builtinDate_getUTCDay(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ return toValue_int(dateFromGoDay(date.Time().Weekday()))
+}
+
+func builtinDate_getHours(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ return toValue_int(date.Time().Local().Hour())
+}
+
+func builtinDate_getUTCHours(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ return toValue_int(date.Time().Hour())
+}
+
+func builtinDate_getMinutes(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ return toValue_int(date.Time().Local().Minute())
+}
+
+func builtinDate_getUTCMinutes(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ return toValue_int(date.Time().Minute())
+}
+
+func builtinDate_getSeconds(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ return toValue_int(date.Time().Local().Second())
+}
+
+func builtinDate_getUTCSeconds(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ return toValue_int(date.Time().Second())
+}
+
+func builtinDate_getMilliseconds(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ return toValue_int(date.Time().Local().Nanosecond() / (100 * 100 * 100))
+}
+
+func builtinDate_getUTCMilliseconds(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ return toValue_int(date.Time().Nanosecond() / (100 * 100 * 100))
+}
+
+func builtinDate_getTimezoneOffset(call FunctionCall) Value {
+ date := dateObjectOf(call.runtime, call.thisObject())
+ if date.isNaN {
+ return NaNValue()
+ }
+ timeLocal := date.Time().Local()
+ // Is this kosher?
+ timeLocalAsUTC := Time.Date(
+ timeLocal.Year(),
+ timeLocal.Month(),
+ timeLocal.Day(),
+ timeLocal.Hour(),
+ timeLocal.Minute(),
+ timeLocal.Second(),
+ timeLocal.Nanosecond(),
+ Time.UTC,
+ )
+ return toValue_float64(date.Time().Sub(timeLocalAsUTC).Seconds() / 60)
+}
+
+func builtinDate_setMilliseconds(call FunctionCall) Value {
+ object, date, ecmaTime, value := _builtinDate_beforeSet(call, 1, true)
+ if ecmaTime == nil {
+ return NaNValue()
+ }
+
+ ecmaTime.millisecond = value[0]
+
+ date.SetTime(ecmaTime.goTime())
+ object.value = *date
+ return date.Value()
+}
+
+func builtinDate_setUTCMilliseconds(call FunctionCall) Value {
+ object, date, ecmaTime, value := _builtinDate_beforeSet(call, 1, false)
+ if ecmaTime == nil {
+ return NaNValue()
+ }
+
+ ecmaTime.millisecond = value[0]
+
+ date.SetTime(ecmaTime.goTime())
+ object.value = *date
+ return date.Value()
+}
+
+func builtinDate_setSeconds(call FunctionCall) Value {
+ object, date, ecmaTime, value := _builtinDate_beforeSet(call, 2, true)
+ if ecmaTime == nil {
+ return NaNValue()
+ }
+
+ if len(value) > 1 {
+ ecmaTime.millisecond = value[1]
+ }
+ ecmaTime.second = value[0]
+
+ date.SetTime(ecmaTime.goTime())
+ object.value = *date
+ return date.Value()
+}
+
+func builtinDate_setUTCSeconds(call FunctionCall) Value {
+ object, date, ecmaTime, value := _builtinDate_beforeSet(call, 2, false)
+ if ecmaTime == nil {
+ return NaNValue()
+ }
+
+ if len(value) > 1 {
+ ecmaTime.millisecond = value[1]
+ }
+ ecmaTime.second = value[0]
+
+ date.SetTime(ecmaTime.goTime())
+ object.value = *date
+ return date.Value()
+}
+
+func builtinDate_setMinutes(call FunctionCall) Value {
+ object, date, ecmaTime, value := _builtinDate_beforeSet(call, 3, true)
+ if ecmaTime == nil {
+ return NaNValue()
+ }
+
+ if len(value) > 2 {
+ ecmaTime.millisecond = value[2]
+ ecmaTime.second = value[1]
+ } else if len(value) > 1 {
+ ecmaTime.second = value[1]
+ }
+ ecmaTime.minute = value[0]
+
+ date.SetTime(ecmaTime.goTime())
+ object.value = *date
+ return date.Value()
+}
+
+func builtinDate_setUTCMinutes(call FunctionCall) Value {
+ object, date, ecmaTime, value := _builtinDate_beforeSet(call, 3, false)
+ if ecmaTime == nil {
+ return NaNValue()
+ }
+
+ if len(value) > 2 {
+ ecmaTime.millisecond = value[2]
+ ecmaTime.second = value[1]
+ } else if len(value) > 1 {
+ ecmaTime.second = value[1]
+ }
+ ecmaTime.minute = value[0]
+
+ date.SetTime(ecmaTime.goTime())
+ object.value = *date
+ return date.Value()
+}
+
+func builtinDate_setHours(call FunctionCall) Value {
+ object, date, ecmaTime, value := _builtinDate_beforeSet(call, 4, true)
+ if ecmaTime == nil {
+ return NaNValue()
+ }
+
+ if len(value) > 3 {
+ ecmaTime.millisecond = value[3]
+ ecmaTime.second = value[2]
+ ecmaTime.minute = value[1]
+ } else if len(value) > 2 {
+ ecmaTime.second = value[2]
+ ecmaTime.minute = value[1]
+ } else if len(value) > 1 {
+ ecmaTime.minute = value[1]
+ }
+ ecmaTime.hour = value[0]
+
+ date.SetTime(ecmaTime.goTime())
+ object.value = *date
+ return date.Value()
+}
+
+func builtinDate_setUTCHours(call FunctionCall) Value {
+ object, date, ecmaTime, value := _builtinDate_beforeSet(call, 4, false)
+ if ecmaTime == nil {
+ return NaNValue()
+ }
+
+ if len(value) > 3 {
+ ecmaTime.millisecond = value[3]
+ ecmaTime.second = value[2]
+ ecmaTime.minute = value[1]
+ } else if len(value) > 2 {
+ ecmaTime.second = value[2]
+ ecmaTime.minute = value[1]
+ } else if len(value) > 1 {
+ ecmaTime.minute = value[1]
+ }
+ ecmaTime.hour = value[0]
+
+ date.SetTime(ecmaTime.goTime())
+ object.value = *date
+ return date.Value()
+}
+
+func builtinDate_setDate(call FunctionCall) Value {
+ object, date, ecmaTime, value := _builtinDate_beforeSet(call, 1, true)
+ if ecmaTime == nil {
+ return NaNValue()
+ }
+
+ ecmaTime.day = value[0]
+
+ date.SetTime(ecmaTime.goTime())
+ object.value = *date
+ return date.Value()
+}
+
+func builtinDate_setUTCDate(call FunctionCall) Value {
+ object, date, ecmaTime, value := _builtinDate_beforeSet(call, 1, false)
+ if ecmaTime == nil {
+ return NaNValue()
+ }
+
+ ecmaTime.day = value[0]
+
+ date.SetTime(ecmaTime.goTime())
+ object.value = *date
+ return date.Value()
+}
+
+func builtinDate_setMonth(call FunctionCall) Value {
+ object, date, ecmaTime, value := _builtinDate_beforeSet(call, 2, true)
+ if ecmaTime == nil {
+ return NaNValue()
+ }
+
+ if len(value) > 1 {
+ ecmaTime.day = value[1]
+ }
+ ecmaTime.month = value[0]
+
+ date.SetTime(ecmaTime.goTime())
+ object.value = *date
+ return date.Value()
+}
+
+func builtinDate_setUTCMonth(call FunctionCall) Value {
+ object, date, ecmaTime, value := _builtinDate_beforeSet(call, 2, false)
+ if ecmaTime == nil {
+ return NaNValue()
+ }
+
+ if len(value) > 1 {
+ ecmaTime.day = value[1]
+ }
+ ecmaTime.month = value[0]
+
+ date.SetTime(ecmaTime.goTime())
+ object.value = *date
+ return date.Value()
+}
+
+func builtinDate_setYear(call FunctionCall) Value {
+ object, date, ecmaTime, value := _builtinDate_beforeSet(call, 1, true)
+ if ecmaTime == nil {
+ return NaNValue()
+ }
+
+ year := value[0]
+ if 0 <= year && year <= 99 {
+ year += 1900
+ }
+ ecmaTime.year = year
+
+ date.SetTime(ecmaTime.goTime())
+ object.value = *date
+ return date.Value()
+}
+
+func builtinDate_setFullYear(call FunctionCall) Value {
+ object, date, ecmaTime, value := _builtinDate_beforeSet(call, 3, true)
+ if ecmaTime == nil {
+ return NaNValue()
+ }
+
+ if len(value) > 2 {
+ ecmaTime.day = value[2]
+ ecmaTime.month = value[1]
+ } else if len(value) > 1 {
+ ecmaTime.month = value[1]
+ }
+ ecmaTime.year = value[0]
+
+ date.SetTime(ecmaTime.goTime())
+ object.value = *date
+ return date.Value()
+}
+
+func builtinDate_setUTCFullYear(call FunctionCall) Value {
+ object, date, ecmaTime, value := _builtinDate_beforeSet(call, 3, false)
+ if ecmaTime == nil {
+ return NaNValue()
+ }
+
+ if len(value) > 2 {
+ ecmaTime.day = value[2]
+ ecmaTime.month = value[1]
+ } else if len(value) > 1 {
+ ecmaTime.month = value[1]
+ }
+ ecmaTime.year = value[0]
+
+ date.SetTime(ecmaTime.goTime())
+ object.value = *date
+ return date.Value()
+}
+
+// toUTCString
+// toISOString
+// toJSONString
+// toJSON
diff --git a/vendor/github.com/robertkrimen/otto/builtin_error.go b/vendor/github.com/robertkrimen/otto/builtin_error.go
new file mode 100644
index 000000000..41da84ef2
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/builtin_error.go
@@ -0,0 +1,126 @@
+package otto
+
+import (
+ "fmt"
+)
+
+func builtinError(call FunctionCall) Value {
+ return toValue_object(call.runtime.newError("Error", call.Argument(0), 1))
+}
+
+func builtinNewError(self *_object, argumentList []Value) Value {
+ return toValue_object(self.runtime.newError("Error", valueOfArrayIndex(argumentList, 0), 0))
+}
+
+func builtinError_toString(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ if thisObject == nil {
+ panic(call.runtime.panicTypeError())
+ }
+
+ name := "Error"
+ nameValue := thisObject.get("name")
+ if nameValue.IsDefined() {
+ name = nameValue.string()
+ }
+
+ message := ""
+ messageValue := thisObject.get("message")
+ if messageValue.IsDefined() {
+ message = messageValue.string()
+ }
+
+ if len(name) == 0 {
+ return toValue_string(message)
+ }
+
+ if len(message) == 0 {
+ return toValue_string(name)
+ }
+
+ return toValue_string(fmt.Sprintf("%s: %s", name, message))
+}
+
+func (runtime *_runtime) newEvalError(message Value) *_object {
+ self := runtime.newErrorObject("EvalError", message, 0)
+ self.prototype = runtime.global.EvalErrorPrototype
+ return self
+}
+
+func builtinEvalError(call FunctionCall) Value {
+ return toValue_object(call.runtime.newEvalError(call.Argument(0)))
+}
+
+func builtinNewEvalError(self *_object, argumentList []Value) Value {
+ return toValue_object(self.runtime.newEvalError(valueOfArrayIndex(argumentList, 0)))
+}
+
+func (runtime *_runtime) newTypeError(message Value) *_object {
+ self := runtime.newErrorObject("TypeError", message, 0)
+ self.prototype = runtime.global.TypeErrorPrototype
+ return self
+}
+
+func builtinTypeError(call FunctionCall) Value {
+ return toValue_object(call.runtime.newTypeError(call.Argument(0)))
+}
+
+func builtinNewTypeError(self *_object, argumentList []Value) Value {
+ return toValue_object(self.runtime.newTypeError(valueOfArrayIndex(argumentList, 0)))
+}
+
+func (runtime *_runtime) newRangeError(message Value) *_object {
+ self := runtime.newErrorObject("RangeError", message, 0)
+ self.prototype = runtime.global.RangeErrorPrototype
+ return self
+}
+
+func builtinRangeError(call FunctionCall) Value {
+ return toValue_object(call.runtime.newRangeError(call.Argument(0)))
+}
+
+func builtinNewRangeError(self *_object, argumentList []Value) Value {
+ return toValue_object(self.runtime.newRangeError(valueOfArrayIndex(argumentList, 0)))
+}
+
+func (runtime *_runtime) newURIError(message Value) *_object {
+ self := runtime.newErrorObject("URIError", message, 0)
+ self.prototype = runtime.global.URIErrorPrototype
+ return self
+}
+
+func (runtime *_runtime) newReferenceError(message Value) *_object {
+ self := runtime.newErrorObject("ReferenceError", message, 0)
+ self.prototype = runtime.global.ReferenceErrorPrototype
+ return self
+}
+
+func builtinReferenceError(call FunctionCall) Value {
+ return toValue_object(call.runtime.newReferenceError(call.Argument(0)))
+}
+
+func builtinNewReferenceError(self *_object, argumentList []Value) Value {
+ return toValue_object(self.runtime.newReferenceError(valueOfArrayIndex(argumentList, 0)))
+}
+
+func (runtime *_runtime) newSyntaxError(message Value) *_object {
+ self := runtime.newErrorObject("SyntaxError", message, 0)
+ self.prototype = runtime.global.SyntaxErrorPrototype
+ return self
+}
+
+func builtinSyntaxError(call FunctionCall) Value {
+ return toValue_object(call.runtime.newSyntaxError(call.Argument(0)))
+}
+
+func builtinNewSyntaxError(self *_object, argumentList []Value) Value {
+ return toValue_object(self.runtime.newSyntaxError(valueOfArrayIndex(argumentList, 0)))
+}
+
+func builtinURIError(call FunctionCall) Value {
+ return toValue_object(call.runtime.newURIError(call.Argument(0)))
+}
+
+func builtinNewURIError(self *_object, argumentList []Value) Value {
+ return toValue_object(self.runtime.newURIError(valueOfArrayIndex(argumentList, 0)))
+}
diff --git a/vendor/github.com/robertkrimen/otto/builtin_function.go b/vendor/github.com/robertkrimen/otto/builtin_function.go
new file mode 100644
index 000000000..3d07566c6
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/builtin_function.go
@@ -0,0 +1,129 @@
+package otto
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode"
+
+ "github.com/robertkrimen/otto/parser"
+)
+
+// Function
+
+func builtinFunction(call FunctionCall) Value {
+ return toValue_object(builtinNewFunctionNative(call.runtime, call.ArgumentList))
+}
+
+func builtinNewFunction(self *_object, argumentList []Value) Value {
+ return toValue_object(builtinNewFunctionNative(self.runtime, argumentList))
+}
+
+func argumentList2parameterList(argumentList []Value) []string {
+ parameterList := make([]string, 0, len(argumentList))
+ for _, value := range argumentList {
+ tmp := strings.FieldsFunc(value.string(), func(chr rune) bool {
+ return chr == ',' || unicode.IsSpace(chr)
+ })
+ parameterList = append(parameterList, tmp...)
+ }
+ return parameterList
+}
+
+var matchIdentifier = regexp.MustCompile(`^[$_\p{L}][$_\p{L}\d}]*$`)
+
+func builtinNewFunctionNative(runtime *_runtime, argumentList []Value) *_object {
+ var parameterList, body string
+ count := len(argumentList)
+ if count > 0 {
+ tmp := make([]string, 0, count-1)
+ for _, value := range argumentList[0 : count-1] {
+ tmp = append(tmp, value.string())
+ }
+ parameterList = strings.Join(tmp, ",")
+ body = argumentList[count-1].string()
+ }
+
+ // FIXME
+ function, err := parser.ParseFunction(parameterList, body)
+ runtime.parseThrow(err) // Will panic/throw appropriately
+ cmpl := _compiler{}
+ cmpl_function := cmpl.parseExpression(function)
+
+ return runtime.newNodeFunction(cmpl_function.(*_nodeFunctionLiteral), runtime.globalStash)
+}
+
+func builtinFunction_toString(call FunctionCall) Value {
+ object := call.thisClassObject("Function") // Should throw a TypeError unless Function
+ switch fn := object.value.(type) {
+ case _nativeFunctionObject:
+ return toValue_string(fmt.Sprintf("function %s() { [native code] }", fn.name))
+ case _nodeFunctionObject:
+ return toValue_string(fn.node.source)
+ case _bindFunctionObject:
+ return toValue_string("function () { [native code] }")
+ }
+
+ panic(call.runtime.panicTypeError("Function.toString()"))
+}
+
+func builtinFunction_apply(call FunctionCall) Value {
+ if !call.This.isCallable() {
+ panic(call.runtime.panicTypeError())
+ }
+ this := call.Argument(0)
+ if this.IsUndefined() {
+ // FIXME Not ECMA5
+ this = toValue_object(call.runtime.globalObject)
+ }
+ argumentList := call.Argument(1)
+ switch argumentList.kind {
+ case valueUndefined, valueNull:
+ return call.thisObject().call(this, nil, false, nativeFrame)
+ case valueObject:
+ default:
+ panic(call.runtime.panicTypeError())
+ }
+
+ arrayObject := argumentList._object()
+ thisObject := call.thisObject()
+ length := int64(toUint32(arrayObject.get("length")))
+ valueArray := make([]Value, length)
+ for index := int64(0); index < length; index++ {
+ valueArray[index] = arrayObject.get(arrayIndexToString(index))
+ }
+ return thisObject.call(this, valueArray, false, nativeFrame)
+}
+
+func builtinFunction_call(call FunctionCall) Value {
+ if !call.This.isCallable() {
+ panic(call.runtime.panicTypeError())
+ }
+ thisObject := call.thisObject()
+ this := call.Argument(0)
+ if this.IsUndefined() {
+ // FIXME Not ECMA5
+ this = toValue_object(call.runtime.globalObject)
+ }
+ if len(call.ArgumentList) >= 1 {
+ return thisObject.call(this, call.ArgumentList[1:], false, nativeFrame)
+ }
+ return thisObject.call(this, nil, false, nativeFrame)
+}
+
+func builtinFunction_bind(call FunctionCall) Value {
+ target := call.This
+ if !target.isCallable() {
+ panic(call.runtime.panicTypeError())
+ }
+ targetObject := target._object()
+
+ this := call.Argument(0)
+ argumentList := call.slice(1)
+ if this.IsUndefined() {
+ // FIXME Do this elsewhere?
+ this = toValue_object(call.runtime.globalObject)
+ }
+
+ return toValue_object(call.runtime.newBoundFunction(targetObject, this, argumentList))
+}
diff --git a/vendor/github.com/robertkrimen/otto/builtin_json.go b/vendor/github.com/robertkrimen/otto/builtin_json.go
new file mode 100644
index 000000000..aed54bf12
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/builtin_json.go
@@ -0,0 +1,299 @@
+package otto
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+type _builtinJSON_parseContext struct {
+ call FunctionCall
+ reviver Value
+}
+
+func builtinJSON_parse(call FunctionCall) Value {
+ ctx := _builtinJSON_parseContext{
+ call: call,
+ }
+ revive := false
+ if reviver := call.Argument(1); reviver.isCallable() {
+ revive = true
+ ctx.reviver = reviver
+ }
+
+ var root interface{}
+ err := json.Unmarshal([]byte(call.Argument(0).string()), &root)
+ if err != nil {
+ panic(call.runtime.panicSyntaxError(err.Error()))
+ }
+ value, exists := builtinJSON_parseWalk(ctx, root)
+ if !exists {
+ value = Value{}
+ }
+ if revive {
+ root := ctx.call.runtime.newObject()
+ root.put("", value, false)
+ return builtinJSON_reviveWalk(ctx, root, "")
+ }
+ return value
+}
+
+func builtinJSON_reviveWalk(ctx _builtinJSON_parseContext, holder *_object, name string) Value {
+ value := holder.get(name)
+ if object := value._object(); object != nil {
+ if isArray(object) {
+ length := int64(objectLength(object))
+ for index := int64(0); index < length; index += 1 {
+ name := arrayIndexToString(index)
+ value := builtinJSON_reviveWalk(ctx, object, name)
+ if value.IsUndefined() {
+ object.delete(name, false)
+ } else {
+ object.defineProperty(name, value, 0111, false)
+ }
+ }
+ } else {
+ object.enumerate(false, func(name string) bool {
+ value := builtinJSON_reviveWalk(ctx, object, name)
+ if value.IsUndefined() {
+ object.delete(name, false)
+ } else {
+ object.defineProperty(name, value, 0111, false)
+ }
+ return true
+ })
+ }
+ }
+ return ctx.reviver.call(ctx.call.runtime, toValue_object(holder), name, value)
+}
+
+func builtinJSON_parseWalk(ctx _builtinJSON_parseContext, rawValue interface{}) (Value, bool) {
+ switch value := rawValue.(type) {
+ case nil:
+ return nullValue, true
+ case bool:
+ return toValue_bool(value), true
+ case string:
+ return toValue_string(value), true
+ case float64:
+ return toValue_float64(value), true
+ case []interface{}:
+ arrayValue := make([]Value, len(value))
+ for index, rawValue := range value {
+ if value, exists := builtinJSON_parseWalk(ctx, rawValue); exists {
+ arrayValue[index] = value
+ }
+ }
+ return toValue_object(ctx.call.runtime.newArrayOf(arrayValue)), true
+ case map[string]interface{}:
+ object := ctx.call.runtime.newObject()
+ for name, rawValue := range value {
+ if value, exists := builtinJSON_parseWalk(ctx, rawValue); exists {
+ object.put(name, value, false)
+ }
+ }
+ return toValue_object(object), true
+ }
+ return Value{}, false
+}
+
+type _builtinJSON_stringifyContext struct {
+ call FunctionCall
+ stack []*_object
+ propertyList []string
+ replacerFunction *Value
+ gap string
+}
+
+func builtinJSON_stringify(call FunctionCall) Value {
+ ctx := _builtinJSON_stringifyContext{
+ call: call,
+ stack: []*_object{nil},
+ }
+ replacer := call.Argument(1)._object()
+ if replacer != nil {
+ if isArray(replacer) {
+ length := objectLength(replacer)
+ seen := map[string]bool{}
+ propertyList := make([]string, length)
+ length = 0
+ for index, _ := range propertyList {
+ value := replacer.get(arrayIndexToString(int64(index)))
+ switch value.kind {
+ case valueObject:
+ switch value.value.(*_object).class {
+ case "String":
+ case "Number":
+ default:
+ continue
+ }
+ case valueString:
+ case valueNumber:
+ default:
+ continue
+ }
+ name := value.string()
+ if seen[name] {
+ continue
+ }
+ seen[name] = true
+ length += 1
+ propertyList[index] = name
+ }
+ ctx.propertyList = propertyList[0:length]
+ } else if replacer.class == "Function" {
+ value := toValue_object(replacer)
+ ctx.replacerFunction = &value
+ }
+ }
+ if spaceValue, exists := call.getArgument(2); exists {
+ if spaceValue.kind == valueObject {
+ switch spaceValue.value.(*_object).class {
+ case "String":
+ spaceValue = toValue_string(spaceValue.string())
+ case "Number":
+ spaceValue = spaceValue.numberValue()
+ }
+ }
+ switch spaceValue.kind {
+ case valueString:
+ value := spaceValue.string()
+ if len(value) > 10 {
+ ctx.gap = value[0:10]
+ } else {
+ ctx.gap = value
+ }
+ case valueNumber:
+ value := spaceValue.number().int64
+ if value > 10 {
+ value = 10
+ } else if value < 0 {
+ value = 0
+ }
+ ctx.gap = strings.Repeat(" ", int(value))
+ }
+ }
+ holder := call.runtime.newObject()
+ holder.put("", call.Argument(0), false)
+ value, exists := builtinJSON_stringifyWalk(ctx, "", holder)
+ if !exists {
+ return Value{}
+ }
+ valueJSON, err := json.Marshal(value)
+ if err != nil {
+ panic(call.runtime.panicTypeError(err.Error()))
+ }
+ if ctx.gap != "" {
+ valueJSON1 := bytes.Buffer{}
+ json.Indent(&valueJSON1, valueJSON, "", ctx.gap)
+ valueJSON = valueJSON1.Bytes()
+ }
+ return toValue_string(string(valueJSON))
+}
+
+func builtinJSON_stringifyWalk(ctx _builtinJSON_stringifyContext, key string, holder *_object) (interface{}, bool) {
+ value := holder.get(key)
+
+ if value.IsObject() {
+ object := value._object()
+ if toJSON := object.get("toJSON"); toJSON.IsFunction() {
+ value = toJSON.call(ctx.call.runtime, value, key)
+ } else {
+ // If the object is a GoStruct or something that implements json.Marshaler
+ if object.objectClass.marshalJSON != nil {
+ marshaler := object.objectClass.marshalJSON(object)
+ if marshaler != nil {
+ return marshaler, true
+ }
+ }
+ }
+ }
+
+ if ctx.replacerFunction != nil {
+ value = (*ctx.replacerFunction).call(ctx.call.runtime, toValue_object(holder), key, value)
+ }
+
+ if value.kind == valueObject {
+ switch value.value.(*_object).class {
+ case "Boolean":
+ value = value._object().value.(Value)
+ case "String":
+ value = toValue_string(value.string())
+ case "Number":
+ value = value.numberValue()
+ }
+ }
+
+ switch value.kind {
+ case valueBoolean:
+ return value.bool(), true
+ case valueString:
+ return value.string(), true
+ case valueNumber:
+ integer := value.number()
+ switch integer.kind {
+ case numberInteger:
+ return integer.int64, true
+ case numberFloat:
+ return integer.float64, true
+ default:
+ return nil, true
+ }
+ case valueNull:
+ return nil, true
+ case valueObject:
+ holder := value._object()
+ if value := value._object(); nil != value {
+ for _, object := range ctx.stack {
+ if holder == object {
+ panic(ctx.call.runtime.panicTypeError("Converting circular structure to JSON"))
+ }
+ }
+ ctx.stack = append(ctx.stack, value)
+ defer func() { ctx.stack = ctx.stack[:len(ctx.stack)-1] }()
+ }
+ if isArray(holder) {
+ var length uint32
+ switch value := holder.get("length").value.(type) {
+ case uint32:
+ length = value
+ case int:
+ if value >= 0 {
+ length = uint32(value)
+ }
+ default:
+ panic(ctx.call.runtime.panicTypeError(fmt.Sprintf("JSON.stringify: invalid length: %v (%[1]T)", value)))
+ }
+ array := make([]interface{}, length)
+ for index, _ := range array {
+ name := arrayIndexToString(int64(index))
+ value, _ := builtinJSON_stringifyWalk(ctx, name, holder)
+ array[index] = value
+ }
+ return array, true
+ } else if holder.class != "Function" {
+ object := map[string]interface{}{}
+ if ctx.propertyList != nil {
+ for _, name := range ctx.propertyList {
+ value, exists := builtinJSON_stringifyWalk(ctx, name, holder)
+ if exists {
+ object[name] = value
+ }
+ }
+ } else {
+ // Go maps are without order, so this doesn't conform to the ECMA ordering
+ // standard, but oh well...
+ holder.enumerate(false, func(name string) bool {
+ value, exists := builtinJSON_stringifyWalk(ctx, name, holder)
+ if exists {
+ object[name] = value
+ }
+ return true
+ })
+ }
+ return object, true
+ }
+ }
+ return nil, false
+}
diff --git a/vendor/github.com/robertkrimen/otto/builtin_math.go b/vendor/github.com/robertkrimen/otto/builtin_math.go
new file mode 100644
index 000000000..7ce90c339
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/builtin_math.go
@@ -0,0 +1,151 @@
+package otto
+
+import (
+ "math"
+ "math/rand"
+)
+
+// Math
+
+func builtinMath_abs(call FunctionCall) Value {
+ number := call.Argument(0).float64()
+ return toValue_float64(math.Abs(number))
+}
+
+func builtinMath_acos(call FunctionCall) Value {
+ number := call.Argument(0).float64()
+ return toValue_float64(math.Acos(number))
+}
+
+func builtinMath_asin(call FunctionCall) Value {
+ number := call.Argument(0).float64()
+ return toValue_float64(math.Asin(number))
+}
+
+func builtinMath_atan(call FunctionCall) Value {
+ number := call.Argument(0).float64()
+ return toValue_float64(math.Atan(number))
+}
+
+func builtinMath_atan2(call FunctionCall) Value {
+ y := call.Argument(0).float64()
+ if math.IsNaN(y) {
+ return NaNValue()
+ }
+ x := call.Argument(1).float64()
+ if math.IsNaN(x) {
+ return NaNValue()
+ }
+ return toValue_float64(math.Atan2(y, x))
+}
+
+func builtinMath_cos(call FunctionCall) Value {
+ number := call.Argument(0).float64()
+ return toValue_float64(math.Cos(number))
+}
+
+func builtinMath_ceil(call FunctionCall) Value {
+ number := call.Argument(0).float64()
+ return toValue_float64(math.Ceil(number))
+}
+
+func builtinMath_exp(call FunctionCall) Value {
+ number := call.Argument(0).float64()
+ return toValue_float64(math.Exp(number))
+}
+
+func builtinMath_floor(call FunctionCall) Value {
+ number := call.Argument(0).float64()
+ return toValue_float64(math.Floor(number))
+}
+
+func builtinMath_log(call FunctionCall) Value {
+ number := call.Argument(0).float64()
+ return toValue_float64(math.Log(number))
+}
+
+func builtinMath_max(call FunctionCall) Value {
+ switch len(call.ArgumentList) {
+ case 0:
+ return negativeInfinityValue()
+ case 1:
+ return toValue_float64(call.ArgumentList[0].float64())
+ }
+ result := call.ArgumentList[0].float64()
+ if math.IsNaN(result) {
+ return NaNValue()
+ }
+ for _, value := range call.ArgumentList[1:] {
+ value := value.float64()
+ if math.IsNaN(value) {
+ return NaNValue()
+ }
+ result = math.Max(result, value)
+ }
+ return toValue_float64(result)
+}
+
+func builtinMath_min(call FunctionCall) Value {
+ switch len(call.ArgumentList) {
+ case 0:
+ return positiveInfinityValue()
+ case 1:
+ return toValue_float64(call.ArgumentList[0].float64())
+ }
+ result := call.ArgumentList[0].float64()
+ if math.IsNaN(result) {
+ return NaNValue()
+ }
+ for _, value := range call.ArgumentList[1:] {
+ value := value.float64()
+ if math.IsNaN(value) {
+ return NaNValue()
+ }
+ result = math.Min(result, value)
+ }
+ return toValue_float64(result)
+}
+
+func builtinMath_pow(call FunctionCall) Value {
+ // TODO Make sure this works according to the specification (15.8.2.13)
+ x := call.Argument(0).float64()
+ y := call.Argument(1).float64()
+ if math.Abs(x) == 1 && math.IsInf(y, 0) {
+ return NaNValue()
+ }
+ return toValue_float64(math.Pow(x, y))
+}
+
+func builtinMath_random(call FunctionCall) Value {
+ var v float64
+ if call.runtime.random != nil {
+ v = call.runtime.random()
+ } else {
+ v = rand.Float64()
+ }
+ return toValue_float64(v)
+}
+
+func builtinMath_round(call FunctionCall) Value {
+ number := call.Argument(0).float64()
+ value := math.Floor(number + 0.5)
+ if value == 0 {
+ value = math.Copysign(0, number)
+ }
+ return toValue_float64(value)
+}
+
+func builtinMath_sin(call FunctionCall) Value {
+ number := call.Argument(0).float64()
+ return toValue_float64(math.Sin(number))
+}
+
+func builtinMath_sqrt(call FunctionCall) Value {
+ number := call.Argument(0).float64()
+ return toValue_float64(math.Sqrt(number))
+}
+
+func builtinMath_tan(call FunctionCall) Value {
+ number := call.Argument(0).float64()
+ return toValue_float64(math.Tan(number))
+}
diff --git a/vendor/github.com/robertkrimen/otto/builtin_number.go b/vendor/github.com/robertkrimen/otto/builtin_number.go
new file mode 100644
index 000000000..f99a42a2f
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/builtin_number.go
@@ -0,0 +1,93 @@
+package otto
+
+import (
+ "math"
+ "strconv"
+)
+
+// Number
+
+func numberValueFromNumberArgumentList(argumentList []Value) Value {
+ if len(argumentList) > 0 {
+ return argumentList[0].numberValue()
+ }
+ return toValue_int(0)
+}
+
+func builtinNumber(call FunctionCall) Value {
+ return numberValueFromNumberArgumentList(call.ArgumentList)
+}
+
+func builtinNewNumber(self *_object, argumentList []Value) Value {
+ return toValue_object(self.runtime.newNumber(numberValueFromNumberArgumentList(argumentList)))
+}
+
+func builtinNumber_toString(call FunctionCall) Value {
+ // Will throw a TypeError if ThisObject is not a Number
+ value := call.thisClassObject("Number").primitiveValue()
+ radix := 10
+ radixArgument := call.Argument(0)
+ if radixArgument.IsDefined() {
+ integer := toIntegerFloat(radixArgument)
+ if integer < 2 || integer > 36 {
+ panic(call.runtime.panicRangeError("toString() radix must be between 2 and 36"))
+ }
+ radix = int(integer)
+ }
+ if radix == 10 {
+ return toValue_string(value.string())
+ }
+ return toValue_string(numberToStringRadix(value, radix))
+}
+
+func builtinNumber_valueOf(call FunctionCall) Value {
+ return call.thisClassObject("Number").primitiveValue()
+}
+
+func builtinNumber_toFixed(call FunctionCall) Value {
+ precision := toIntegerFloat(call.Argument(0))
+ if 20 < precision || 0 > precision {
+ panic(call.runtime.panicRangeError("toFixed() precision must be between 0 and 20"))
+ }
+ if call.This.IsNaN() {
+ return toValue_string("NaN")
+ }
+ value := call.This.float64()
+ if math.Abs(value) >= 1e21 {
+ return toValue_string(floatToString(value, 64))
+ }
+ return toValue_string(strconv.FormatFloat(call.This.float64(), 'f', int(precision), 64))
+}
+
+func builtinNumber_toExponential(call FunctionCall) Value {
+ if call.This.IsNaN() {
+ return toValue_string("NaN")
+ }
+ precision := float64(-1)
+ if value := call.Argument(0); value.IsDefined() {
+ precision = toIntegerFloat(value)
+ if 0 > precision {
+ panic(call.runtime.panicRangeError("toString() radix must be between 2 and 36"))
+ }
+ }
+ return toValue_string(strconv.FormatFloat(call.This.float64(), 'e', int(precision), 64))
+}
+
+func builtinNumber_toPrecision(call FunctionCall) Value {
+ if call.This.IsNaN() {
+ return toValue_string("NaN")
+ }
+ value := call.Argument(0)
+ if value.IsUndefined() {
+ return toValue_string(call.This.string())
+ }
+ precision := toIntegerFloat(value)
+ if 1 > precision {
+ panic(call.runtime.panicRangeError("toPrecision() precision must be greater than 1"))
+ }
+ return toValue_string(strconv.FormatFloat(call.This.float64(), 'g', int(precision), 64))
+}
+
+func builtinNumber_toLocaleString(call FunctionCall) Value {
+ return builtinNumber_toString(call)
+}
diff --git a/vendor/github.com/robertkrimen/otto/builtin_object.go b/vendor/github.com/robertkrimen/otto/builtin_object.go
new file mode 100644
index 000000000..c2433f7be
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/builtin_object.go
@@ -0,0 +1,289 @@
+package otto
+
+import (
+ "fmt"
+)
+
+// Object
+
+func builtinObject(call FunctionCall) Value {
+ value := call.Argument(0)
+ switch value.kind {
+ case valueUndefined, valueNull:
+ return toValue_object(call.runtime.newObject())
+ }
+
+ return toValue_object(call.runtime.toObject(value))
+}
+
+func builtinNewObject(self *_object, argumentList []Value) Value {
+ value := valueOfArrayIndex(argumentList, 0)
+ switch value.kind {
+ case valueNull, valueUndefined:
+ case valueNumber, valueString, valueBoolean:
+ return toValue_object(self.runtime.toObject(value))
+ case valueObject:
+ return value
+ default:
+ }
+ return toValue_object(self.runtime.newObject())
+}
+
+func builtinObject_valueOf(call FunctionCall) Value {
+ return toValue_object(call.thisObject())
+}
+
+func builtinObject_hasOwnProperty(call FunctionCall) Value {
+ propertyName := call.Argument(0).string()
+ thisObject := call.thisObject()
+ return toValue_bool(thisObject.hasOwnProperty(propertyName))
+}
+
+func builtinObject_isPrototypeOf(call FunctionCall) Value {
+ value := call.Argument(0)
+ if !value.IsObject() {
+ return falseValue
+ }
+ prototype := call.toObject(value).prototype
+ thisObject := call.thisObject()
+ for prototype != nil {
+ if thisObject == prototype {
+ return trueValue
+ }
+ prototype = prototype.prototype
+ }
+ return falseValue
+}
+
+func builtinObject_propertyIsEnumerable(call FunctionCall) Value {
+ propertyName := call.Argument(0).string()
+ thisObject := call.thisObject()
+ property := thisObject.getOwnProperty(propertyName)
+ if property != nil && property.enumerable() {
+ return trueValue
+ }
+ return falseValue
+}
+
+func builtinObject_toString(call FunctionCall) Value {
+ result := ""
+ if call.This.IsUndefined() {
+ result = "[object Undefined]"
+ } else if call.This.IsNull() {
+ result = "[object Null]"
+ } else {
+ result = fmt.Sprintf("[object %s]", call.thisObject().class)
+ }
+ return toValue_string(result)
+}
+
+func builtinObject_toLocaleString(call FunctionCall) Value {
+ toString := call.thisObject().get("toString")
+ if !toString.isCallable() {
+ panic(call.runtime.panicTypeError())
+ }
+ return toString.call(call.runtime, call.This)
+}
+
+func builtinObject_getPrototypeOf(call FunctionCall) Value {
+ objectValue := call.Argument(0)
+ object := objectValue._object()
+ if object == nil {
+ panic(call.runtime.panicTypeError())
+ }
+
+ if object.prototype == nil {
+ return nullValue
+ }
+
+ return toValue_object(object.prototype)
+}
+
+func builtinObject_getOwnPropertyDescriptor(call FunctionCall) Value {
+ objectValue := call.Argument(0)
+ object := objectValue._object()
+ if object == nil {
+ panic(call.runtime.panicTypeError())
+ }
+
+ name := call.Argument(1).string()
+ descriptor := object.getOwnProperty(name)
+ if descriptor == nil {
+ return Value{}
+ }
+ return toValue_object(call.runtime.fromPropertyDescriptor(*descriptor))
+}
+
+func builtinObject_defineProperty(call FunctionCall) Value {
+ objectValue := call.Argument(0)
+ object := objectValue._object()
+ if object == nil {
+ panic(call.runtime.panicTypeError())
+ }
+ name := call.Argument(1).string()
+ descriptor := toPropertyDescriptor(call.runtime, call.Argument(2))
+ object.defineOwnProperty(name, descriptor, true)
+ return objectValue
+}
+
+func builtinObject_defineProperties(call FunctionCall) Value {
+ objectValue := call.Argument(0)
+ object := objectValue._object()
+ if object == nil {
+ panic(call.runtime.panicTypeError())
+ }
+
+ properties := call.runtime.toObject(call.Argument(1))
+ properties.enumerate(false, func(name string) bool {
+ descriptor := toPropertyDescriptor(call.runtime, properties.get(name))
+ object.defineOwnProperty(name, descriptor, true)
+ return true
+ })
+
+ return objectValue
+}
+
+func builtinObject_create(call FunctionCall) Value {
+ prototypeValue := call.Argument(0)
+ if !prototypeValue.IsNull() && !prototypeValue.IsObject() {
+ panic(call.runtime.panicTypeError())
+ }
+
+ object := call.runtime.newObject()
+ object.prototype = prototypeValue._object()
+
+ propertiesValue := call.Argument(1)
+ if propertiesValue.IsDefined() {
+ properties := call.runtime.toObject(propertiesValue)
+ properties.enumerate(false, func(name string) bool {
+ descriptor := toPropertyDescriptor(call.runtime, properties.get(name))
+ object.defineOwnProperty(name, descriptor, true)
+ return true
+ })
+ }
+
+ return toValue_object(object)
+}
+
+func builtinObject_isExtensible(call FunctionCall) Value {
+ object := call.Argument(0)
+ if object := object._object(); object != nil {
+ return toValue_bool(object.extensible)
+ }
+ panic(call.runtime.panicTypeError())
+}
+
+func builtinObject_preventExtensions(call FunctionCall) Value {
+ object := call.Argument(0)
+ if object := object._object(); object != nil {
+ object.extensible = false
+ } else {
+ panic(call.runtime.panicTypeError())
+ }
+ return object
+}
+
+func builtinObject_isSealed(call FunctionCall) Value {
+ object := call.Argument(0)
+ if object := object._object(); object != nil {
+ if object.extensible {
+ return toValue_bool(false)
+ }
+ result := true
+ object.enumerate(true, func(name string) bool {
+ property := object.getProperty(name)
+ if property.configurable() {
+ result = false
+ }
+ return true
+ })
+ return toValue_bool(result)
+ }
+ panic(call.runtime.panicTypeError())
+}
+
+func builtinObject_seal(call FunctionCall) Value {
+ object := call.Argument(0)
+ if object := object._object(); object != nil {
+ object.enumerate(true, func(name string) bool {
+ if property := object.getOwnProperty(name); nil != property && property.configurable() {
+ property.configureOff()
+ object.defineOwnProperty(name, *property, true)
+ }
+ return true
+ })
+ object.extensible = false
+ } else {
+ panic(call.runtime.panicTypeError())
+ }
+ return object
+}
+
+func builtinObject_isFrozen(call FunctionCall) Value {
+ object := call.Argument(0)
+ if object := object._object(); object != nil {
+ if object.extensible {
+ return toValue_bool(false)
+ }
+ result := true
+ object.enumerate(true, func(name string) bool {
+ property := object.getProperty(name)
+ if property.configurable() || property.writable() {
+ result = false
+ }
+ return true
+ })
+ return toValue_bool(result)
+ }
+ panic(call.runtime.panicTypeError())
+}
+
+func builtinObject_freeze(call FunctionCall) Value {
+ object := call.Argument(0)
+ if object := object._object(); object != nil {
+ object.enumerate(true, func(name string) bool {
+ if property, update := object.getOwnProperty(name), false; nil != property {
+ if property.isDataDescriptor() && property.writable() {
+ property.writeOff()
+ update = true
+ }
+ if property.configurable() {
+ property.configureOff()
+ update = true
+ }
+ if update {
+ object.defineOwnProperty(name, *property, true)
+ }
+ }
+ return true
+ })
+ object.extensible = false
+ } else {
+ panic(call.runtime.panicTypeError())
+ }
+ return object
+}
+
+func builtinObject_keys(call FunctionCall) Value {
+ if object, keys := call.Argument(0)._object(), []Value(nil); nil != object {
+ object.enumerate(false, func(name string) bool {
+ keys = append(keys, toValue_string(name))
+ return true
+ })
+ return toValue_object(call.runtime.newArrayOf(keys))
+ }
+ panic(call.runtime.panicTypeError())
+}
+
+func builtinObject_getOwnPropertyNames(call FunctionCall) Value {
+ if object, propertyNames := call.Argument(0)._object(), []Value(nil); nil != object {
+ object.enumerate(true, func(name string) bool {
+ if object.hasOwnProperty(name) {
+ propertyNames = append(propertyNames, toValue_string(name))
+ }
+ return true
+ })
+ return toValue_object(call.runtime.newArrayOf(propertyNames))
+ }
+ panic(call.runtime.panicTypeError())
+}
diff --git a/vendor/github.com/robertkrimen/otto/builtin_regexp.go b/vendor/github.com/robertkrimen/otto/builtin_regexp.go
new file mode 100644
index 000000000..99422510d
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/builtin_regexp.go
@@ -0,0 +1,65 @@
+package otto
+
+import (
+ "fmt"
+)
+
+// RegExp
+
+func builtinRegExp(call FunctionCall) Value {
+ pattern := call.Argument(0)
+ flags := call.Argument(1)
+ if object := pattern._object(); object != nil {
+ if object.class == "RegExp" && flags.IsUndefined() {
+ return pattern
+ }
+ }
+ return toValue_object(call.runtime.newRegExp(pattern, flags))
+}
+
+func builtinNewRegExp(self *_object, argumentList []Value) Value {
+ return toValue_object(self.runtime.newRegExp(
+ valueOfArrayIndex(argumentList, 0),
+ valueOfArrayIndex(argumentList, 1),
+ ))
+}
+
+func builtinRegExp_toString(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ source := thisObject.get("source").string()
+ flags := []byte{}
+ if thisObject.get("global").bool() {
+ flags = append(flags, 'g')
+ }
+ if thisObject.get("ignoreCase").bool() {
+ flags = append(flags, 'i')
+ }
+ if thisObject.get("multiline").bool() {
+ flags = append(flags, 'm')
+ }
+ return toValue_string(fmt.Sprintf("/%s/%s", source, flags))
+}
+
+func builtinRegExp_exec(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ target := call.Argument(0).string()
+ match, result := execRegExp(thisObject, target)
+ if !match {
+ return nullValue
+ }
+ return toValue_object(execResultToArray(call.runtime, target, result))
+}
+
+func builtinRegExp_test(call FunctionCall) Value {
+ thisObject := call.thisObject()
+ target := call.Argument(0).string()
+ match, _ := execRegExp(thisObject, target)
+ return toValue_bool(match)
+}
+
+func builtinRegExp_compile(call FunctionCall) Value {
+ // This (useless) function is deprecated, but is here to provide some
+ // semblance of compatibility.
+ // Caveat emptor: it may not be around for long.
+ return Value{}
+}
diff --git a/vendor/github.com/robertkrimen/otto/builtin_string.go b/vendor/github.com/robertkrimen/otto/builtin_string.go
new file mode 100644
index 000000000..f5f09fee1
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/builtin_string.go
@@ -0,0 +1,495 @@
+package otto
+
+import (
+ "bytes"
+ "regexp"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// String
+
+func stringValueFromStringArgumentList(argumentList []Value) Value {
+ if len(argumentList) > 0 {
+ return toValue_string(argumentList[0].string())
+ }
+ return toValue_string("")
+}
+
+func builtinString(call FunctionCall) Value {
+ return stringValueFromStringArgumentList(call.ArgumentList)
+}
+
+func builtinNewString(self *_object, argumentList []Value) Value {
+ return toValue_object(self.runtime.newString(stringValueFromStringArgumentList(argumentList)))
+}
+
+func builtinString_toString(call FunctionCall) Value {
+ return call.thisClassObject("String").primitiveValue()
+}
+func builtinString_valueOf(call FunctionCall) Value {
+ return call.thisClassObject("String").primitiveValue()
+}
+
+func builtinString_fromCharCode(call FunctionCall) Value {
+ chrList := make([]uint16, len(call.ArgumentList))
+ for index, value := range call.ArgumentList {
+ chrList[index] = toUint16(value)
+ }
+ return toValue_string16(chrList)
+}
+
+func builtinString_charAt(call FunctionCall) Value {
+ checkObjectCoercible(call.runtime, call.This)
+ idx := int(call.Argument(0).number().int64)
+ chr := stringAt(call.This._object().stringValue(), idx)
+ if chr == utf8.RuneError {
+ return toValue_string("")
+ }
+ return toValue_string(string(chr))
+}
+
+func builtinString_charCodeAt(call FunctionCall) Value {
+ checkObjectCoercible(call.runtime, call.This)
+ idx := int(call.Argument(0).number().int64)
+ chr := stringAt(call.This._object().stringValue(), idx)
+ if chr == utf8.RuneError {
+ return NaNValue()
+ }
+ return toValue_uint16(uint16(chr))
+}
+
+func builtinString_concat(call FunctionCall) Value {
+ checkObjectCoercible(call.runtime, call.This)
+ var value bytes.Buffer
+ value.WriteString(call.This.string())
+ for _, item := range call.ArgumentList {
+ value.WriteString(item.string())
+ }
+ return toValue_string(value.String())
+}
+
+func builtinString_indexOf(call FunctionCall) Value {
+ checkObjectCoercible(call.runtime, call.This)
+ value := call.This.string()
+ target := call.Argument(0).string()
+ if 2 > len(call.ArgumentList) {
+ return toValue_int(strings.Index(value, target))
+ }
+ start := toIntegerFloat(call.Argument(1))
+ if 0 > start {
+ start = 0
+ } else if start >= float64(len(value)) {
+ if target == "" {
+ return toValue_int(len(value))
+ }
+ return toValue_int(-1)
+ }
+ index := strings.Index(value[int(start):], target)
+ if index >= 0 {
+ index += int(start)
+ }
+ return toValue_int(index)
+}
+
+func builtinString_lastIndexOf(call FunctionCall) Value {
+ checkObjectCoercible(call.runtime, call.This)
+ value := call.This.string()
+ target := call.Argument(0).string()
+ if 2 > len(call.ArgumentList) || call.ArgumentList[1].IsUndefined() {
+ return toValue_int(strings.LastIndex(value, target))
+ }
+ length := len(value)
+ if length == 0 {
+ return toValue_int(strings.LastIndex(value, target))
+ }
+ start := call.ArgumentList[1].number()
+ if start.kind == numberInfinity { // FIXME
+ // startNumber is infinity, so start is the end of string (start = length)
+ return toValue_int(strings.LastIndex(value, target))
+ }
+ if 0 > start.int64 {
+ start.int64 = 0
+ }
+ end := int(start.int64) + len(target)
+ if end > length {
+ end = length
+ }
+ return toValue_int(strings.LastIndex(value[:end], target))
+}
+
+func builtinString_match(call FunctionCall) Value {
+ checkObjectCoercible(call.runtime, call.This)
+ target := call.This.string()
+ matcherValue := call.Argument(0)
+ matcher := matcherValue._object()
+ if !matcherValue.IsObject() || matcher.class != "RegExp" {
+ matcher = call.runtime.newRegExp(matcherValue, Value{})
+ }
+ global := matcher.get("global").bool()
+ if !global {
+ match, result := execRegExp(matcher, target)
+ if !match {
+ return nullValue
+ }
+ return toValue_object(execResultToArray(call.runtime, target, result))
+ }
+
+ {
+ result := matcher.regExpValue().regularExpression.FindAllStringIndex(target, -1)
+ matchCount := len(result)
+ if result == nil {
+ matcher.put("lastIndex", toValue_int(0), true)
+ return Value{} // !match
+ }
+ matchCount = len(result)
+ valueArray := make([]Value, matchCount)
+ for index := 0; index < matchCount; index++ {
+ valueArray[index] = toValue_string(target[result[index][0]:result[index][1]])
+ }
+ matcher.put("lastIndex", toValue_int(result[matchCount-1][1]), true)
+ return toValue_object(call.runtime.newArrayOf(valueArray))
+ }
+}
+
+var builtinString_replace_Regexp = regexp.MustCompile("\\$(?:[\\$\\&\\'\\`1-9]|0[1-9]|[1-9][0-9])")
+
+func builtinString_findAndReplaceString(input []byte, lastIndex int, match []int, target []byte, replaceValue []byte) (output []byte) {
+ matchCount := len(match) / 2
+ output = input
+ if match[0] != lastIndex {
+ output = append(output, target[lastIndex:match[0]]...)
+ }
+ replacement := builtinString_replace_Regexp.ReplaceAllFunc(replaceValue, func(part []byte) []byte {
+ // TODO Check if match[0] or match[1] can be -1 in this scenario
+ switch part[1] {
+ case '$':
+ return []byte{'$'}
+ case '&':
+ return target[match[0]:match[1]]
+ case '`':
+ return target[:match[0]]
+ case '\'':
+ return target[match[1]:len(target)]
+ }
+ matchNumberParse, error := strconv.ParseInt(string(part[1:]), 10, 64)
+ matchNumber := int(matchNumberParse)
+ if error != nil || matchNumber >= matchCount {
+ return []byte{}
+ }
+ offset := 2 * matchNumber
+ if match[offset] != -1 {
+ return target[match[offset]:match[offset+1]]
+ }
+ return []byte{} // The empty string
+ })
+ output = append(output, replacement...)
+ return output
+}
+
+func builtinString_replace(call FunctionCall) Value {
+ checkObjectCoercible(call.runtime, call.This)
+ target := []byte(call.This.string())
+ searchValue := call.Argument(0)
+ searchObject := searchValue._object()
+
+ // TODO If a capture is -1?
+ var search *regexp.Regexp
+ global := false
+ find := 1
+ if searchValue.IsObject() && searchObject.class == "RegExp" {
+ regExp := searchObject.regExpValue()
+ search = regExp.regularExpression
+ if regExp.global {
+ find = -1
+ }
+ } else {
+ search = regexp.MustCompile(regexp.QuoteMeta(searchValue.string()))
+ }
+
+ found := search.FindAllSubmatchIndex(target, find)
+ if found == nil {
+ return toValue_string(string(target)) // !match
+ }
+
+ {
+ lastIndex := 0
+ result := []byte{}
+
+ replaceValue := call.Argument(1)
+ if replaceValue.isCallable() {
+ target := string(target)
+ replace := replaceValue._object()
+ for _, match := range found {
+ if match[0] != lastIndex {
+ result = append(result, target[lastIndex:match[0]]...)
+ }
+ matchCount := len(match) / 2
+ argumentList := make([]Value, matchCount+2)
+ for index := 0; index < matchCount; index++ {
+ offset := 2 * index
+ if match[offset] != -1 {
+ argumentList[index] = toValue_string(target[match[offset]:match[offset+1]])
+ } else {
+ argumentList[index] = Value{}
+ }
+ }
+ argumentList[matchCount+0] = toValue_int(match[0])
+ argumentList[matchCount+1] = toValue_string(target)
+ replacement := replace.call(Value{}, argumentList, false, nativeFrame).string()
+ result = append(result, []byte(replacement)...)
+ lastIndex = match[1]
+ }
+
+ } else {
+ replace := []byte(replaceValue.string())
+ for _, match := range found {
+ result = builtinString_findAndReplaceString(result, lastIndex, match, target, replace)
+ lastIndex = match[1]
+ }
+ }
+
+ if lastIndex != len(target) {
+ result = append(result, target[lastIndex:]...)
+ }
+
+ if global && searchObject != nil {
+ searchObject.put("lastIndex", toValue_int(lastIndex), true)
+ }
+
+ return toValue_string(string(result))
+ }
+}
+
+func builtinString_search(call FunctionCall) Value {
+ checkObjectCoercible(call.runtime, call.This)
+ target := call.This.string()
+ searchValue := call.Argument(0)
+ search := searchValue._object()
+ if !searchValue.IsObject() || search.class != "RegExp" {
+ search = call.runtime.newRegExp(searchValue, Value{})
+ }
+ result := search.regExpValue().regularExpression.FindStringIndex(target)
+ if result == nil {
+ return toValue_int(-1)
+ }
+ return toValue_int(result[0])
+}
+
+func stringSplitMatch(target string, targetLength int64, index uint, search string, searchLength int64) (bool, uint) {
+ if int64(index)+searchLength > searchLength {
+ return false, 0
+ }
+ found := strings.Index(target[index:], search)
+ if 0 > found {
+ return false, 0
+ }
+ return true, uint(found)
+}
+
+func builtinString_split(call FunctionCall) Value {
+ checkObjectCoercible(call.runtime, call.This)
+ target := call.This.string()
+
+ separatorValue := call.Argument(0)
+ limitValue := call.Argument(1)
+ limit := -1
+ if limitValue.IsDefined() {
+ limit = int(toUint32(limitValue))
+ }
+
+ if limit == 0 {
+ return toValue_object(call.runtime.newArray(0))
+ }
+
+ if separatorValue.IsUndefined() {
+ return toValue_object(call.runtime.newArrayOf([]Value{toValue_string(target)}))
+ }
+
+ if separatorValue.isRegExp() {
+ targetLength := len(target)
+ search := separatorValue._object().regExpValue().regularExpression
+ valueArray := []Value{}
+ result := search.FindAllStringSubmatchIndex(target, -1)
+ lastIndex := 0
+ found := 0
+
+ for _, match := range result {
+ if match[0] == match[1] {
+ // FIXME Ugh, this is a hack
+ if match[0] == 0 || match[0] == targetLength {
+ continue
+ }
+ }
+
+ if lastIndex != match[0] {
+ valueArray = append(valueArray, toValue_string(target[lastIndex:match[0]]))
+ found++
+ } else if lastIndex == match[0] {
+ if lastIndex != -1 {
+ valueArray = append(valueArray, toValue_string(""))
+ found++
+ }
+ }
+
+ lastIndex = match[1]
+ if found == limit {
+ goto RETURN
+ }
+
+ captureCount := len(match) / 2
+ for index := 1; index < captureCount; index++ {
+ offset := index * 2
+ value := Value{}
+ if match[offset] != -1 {
+ value = toValue_string(target[match[offset]:match[offset+1]])
+ }
+ valueArray = append(valueArray, value)
+ found++
+ if found == limit {
+ goto RETURN
+ }
+ }
+ }
+
+ if found != limit {
+ if lastIndex != targetLength {
+ valueArray = append(valueArray, toValue_string(target[lastIndex:targetLength]))
+ } else {
+ valueArray = append(valueArray, toValue_string(""))
+ }
+ }
+
+ RETURN:
+ return toValue_object(call.runtime.newArrayOf(valueArray))
+
+ } else {
+ separator := separatorValue.string()
+
+ splitLimit := limit
+ excess := false
+ if limit > 0 {
+ splitLimit = limit + 1
+ excess = true
+ }
+
+ split := strings.SplitN(target, separator, splitLimit)
+
+ if excess && len(split) > limit {
+ split = split[:limit]
+ }
+
+ return call.runtime.toValue(split)
+ }
+}
+
+func builtinString_slice(call FunctionCall) Value {
+ checkObjectCoercible(call.runtime, call.This)
+ target := call.This.string()
+
+ length := int64(len(target))
+ start, end := rangeStartEnd(call.ArgumentList, length, false)
+ if end-start <= 0 {
+ return toValue_string("")
+ }
+ return toValue_string(target[start:end])
+}
+
+func builtinString_substring(call FunctionCall) Value {
+ checkObjectCoercible(call.runtime, call.This)
+ target := call.This.string()
+
+ length := int64(len(target))
+ start, end := rangeStartEnd(call.ArgumentList, length, true)
+ if start > end {
+ start, end = end, start
+ }
+ return toValue_string(target[start:end])
+}
+
+func builtinString_substr(call FunctionCall) Value {
+ target := call.This.string()
+
+ size := int64(len(target))
+ start, length := rangeStartLength(call.ArgumentList, size)
+
+ if start >= size {
+ return toValue_string("")
+ }
+
+ if length <= 0 {
+ return toValue_string("")
+ }
+
+ if start+length >= size {
+ // Cap length to be to the end of the string
+ // start = 3, length = 5, size = 4 [0, 1, 2, 3]
+ // 4 - 3 = 1
+ // target[3:4]
+ length = size - start
+ }
+
+ return toValue_string(target[start : start+length])
+}
+
+func builtinString_toLowerCase(call FunctionCall) Value {
+ checkObjectCoercible(call.runtime, call.This)
+ return toValue_string(strings.ToLower(call.This.string()))
+}
+
+func builtinString_toUpperCase(call FunctionCall) Value {
+ checkObjectCoercible(call.runtime, call.This)
+ return toValue_string(strings.ToUpper(call.This.string()))
+}
+
+// 7.2 Table 2 — Whitespace Characters & 7.3 Table 3 - Line Terminator Characters
+const builtinString_trim_whitespace = "\u0009\u000A\u000B\u000C\u000D\u0020\u00A0\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u2028\u2029\u202F\u205F\u3000\uFEFF"
+
+func builtinString_trim(call FunctionCall) Value {
+ checkObjectCoercible(call.runtime, call.This)
+ return toValue(strings.Trim(call.This.string(),
+ builtinString_trim_whitespace))
+}
+
+// Mozilla extension, not ECMAScript 5
+func builtinString_trimLeft(call FunctionCall) Value {
+ checkObjectCoercible(call.runtime, call.This)
+ return toValue(strings.TrimLeft(call.This.string(),
+ builtinString_trim_whitespace))
+}
+
+// Mozilla extension, not ECMAScript 5
+func builtinString_trimRight(call FunctionCall) Value {
+ checkObjectCoercible(call.runtime, call.This)
+ return toValue(strings.TrimRight(call.This.string(),
+ builtinString_trim_whitespace))
+}
+
+func builtinString_localeCompare(call FunctionCall) Value {
+ checkObjectCoercible(call.runtime, call.This)
+ this := call.This.string()
+ that := call.Argument(0).string()
+ if this < that {
+ return toValue_int(-1)
+ } else if this == that {
+ return toValue_int(0)
+ }
+ return toValue_int(1)
+}
+
+/*
+An alternate version of String.trim
+func builtinString_trim(call FunctionCall) Value {
+ checkObjectCoercible(call.This)
+ return toValue_string(strings.TrimFunc(call.string(.This), isWhiteSpaceOrLineTerminator))
+}
+*/
+
+func builtinString_toLocaleLowerCase(call FunctionCall) Value {
+ return builtinString_toLowerCase(call)
+}
+
+func builtinString_toLocaleUpperCase(call FunctionCall) Value {
+ return builtinString_toUpperCase(call)
+}
diff --git a/vendor/github.com/robertkrimen/otto/clone.go b/vendor/github.com/robertkrimen/otto/clone.go
new file mode 100644
index 000000000..23b59f8ac
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/clone.go
@@ -0,0 +1,173 @@
+package otto
+
+import (
+ "fmt"
+)
+
+type _clone struct {
+ runtime *_runtime
+ _object map[*_object]*_object
+ _objectStash map[*_objectStash]*_objectStash
+ _dclStash map[*_dclStash]*_dclStash
+ _fnStash map[*_fnStash]*_fnStash
+}
+
+func (in *_runtime) clone() *_runtime {
+
+ in.lck.Lock()
+ defer in.lck.Unlock()
+
+ out := &_runtime{
+ debugger: in.debugger,
+ random: in.random,
+ stackLimit: in.stackLimit,
+ traceLimit: in.traceLimit,
+ }
+
+ clone := _clone{
+ runtime: out,
+ _object: make(map[*_object]*_object),
+ _objectStash: make(map[*_objectStash]*_objectStash),
+ _dclStash: make(map[*_dclStash]*_dclStash),
+ _fnStash: make(map[*_fnStash]*_fnStash),
+ }
+
+ globalObject := clone.object(in.globalObject)
+ out.globalStash = out.newObjectStash(globalObject, nil)
+ out.globalObject = globalObject
+ out.global = _global{
+ clone.object(in.global.Object),
+ clone.object(in.global.Function),
+ clone.object(in.global.Array),
+ clone.object(in.global.String),
+ clone.object(in.global.Boolean),
+ clone.object(in.global.Number),
+ clone.object(in.global.Math),
+ clone.object(in.global.Date),
+ clone.object(in.global.RegExp),
+ clone.object(in.global.Error),
+ clone.object(in.global.EvalError),
+ clone.object(in.global.TypeError),
+ clone.object(in.global.RangeError),
+ clone.object(in.global.ReferenceError),
+ clone.object(in.global.SyntaxError),
+ clone.object(in.global.URIError),
+ clone.object(in.global.JSON),
+
+ clone.object(in.global.ObjectPrototype),
+ clone.object(in.global.FunctionPrototype),
+ clone.object(in.global.ArrayPrototype),
+ clone.object(in.global.StringPrototype),
+ clone.object(in.global.BooleanPrototype),
+ clone.object(in.global.NumberPrototype),
+ clone.object(in.global.DatePrototype),
+ clone.object(in.global.RegExpPrototype),
+ clone.object(in.global.ErrorPrototype),
+ clone.object(in.global.EvalErrorPrototype),
+ clone.object(in.global.TypeErrorPrototype),
+ clone.object(in.global.RangeErrorPrototype),
+ clone.object(in.global.ReferenceErrorPrototype),
+ clone.object(in.global.SyntaxErrorPrototype),
+ clone.object(in.global.URIErrorPrototype),
+ }
+
+ out.eval = out.globalObject.property["eval"].value.(Value).value.(*_object)
+ out.globalObject.prototype = out.global.ObjectPrototype
+
+ // Not sure if this is necessary, but give some help to the GC
+ clone.runtime = nil
+ clone._object = nil
+ clone._objectStash = nil
+ clone._dclStash = nil
+ clone._fnStash = nil
+
+ return out
+}
+
+func (clone *_clone) object(in *_object) *_object {
+ if out, exists := clone._object[in]; exists {
+ return out
+ }
+ out := &_object{}
+ clone._object[in] = out
+ return in.objectClass.clone(in, out, clone)
+}
+
+func (clone *_clone) dclStash(in *_dclStash) (*_dclStash, bool) {
+ if out, exists := clone._dclStash[in]; exists {
+ return out, true
+ }
+ out := &_dclStash{}
+ clone._dclStash[in] = out
+ return out, false
+}
+
+func (clone *_clone) objectStash(in *_objectStash) (*_objectStash, bool) {
+ if out, exists := clone._objectStash[in]; exists {
+ return out, true
+ }
+ out := &_objectStash{}
+ clone._objectStash[in] = out
+ return out, false
+}
+
+func (clone *_clone) fnStash(in *_fnStash) (*_fnStash, bool) {
+ if out, exists := clone._fnStash[in]; exists {
+ return out, true
+ }
+ out := &_fnStash{}
+ clone._fnStash[in] = out
+ return out, false
+}
+
+func (clone *_clone) value(in Value) Value {
+ out := in
+ switch value := in.value.(type) {
+ case *_object:
+ out.value = clone.object(value)
+ }
+ return out
+}
+
+func (clone *_clone) valueArray(in []Value) []Value {
+ out := make([]Value, len(in))
+ for index, value := range in {
+ out[index] = clone.value(value)
+ }
+ return out
+}
+
+func (clone *_clone) stash(in _stash) _stash {
+ if in == nil {
+ return nil
+ }
+ return in.clone(clone)
+}
+
+func (clone *_clone) property(in _property) _property {
+ out := in
+
+ switch value := in.value.(type) {
+ case Value:
+ out.value = clone.value(value)
+ case _propertyGetSet:
+ p := _propertyGetSet{}
+ if value[0] != nil {
+ p[0] = clone.object(value[0])
+ }
+ if value[1] != nil {
+ p[1] = clone.object(value[1])
+ }
+ out.value = p
+ default:
+ panic(fmt.Errorf("in.value.(Value) != true; in.value is %T", in.value))
+ }
+
+ return out
+}
+
+func (clone *_clone) dclProperty(in _dclProperty) _dclProperty {
+ out := in
+ out.value = clone.value(in.value)
+ return out
+}
diff --git a/vendor/github.com/robertkrimen/otto/cmpl.go b/vendor/github.com/robertkrimen/otto/cmpl.go
new file mode 100644
index 000000000..c191b4527
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/cmpl.go
@@ -0,0 +1,24 @@
+package otto
+
+import (
+ "github.com/robertkrimen/otto/ast"
+ "github.com/robertkrimen/otto/file"
+)
+
+type _file struct {
+ name string
+ src string
+ base int // This will always be 1 or greater
+}
+
+type _compiler struct {
+ file *file.File
+ program *ast.Program
+}
+
+func (cmpl *_compiler) parse() *_nodeProgram {
+ if cmpl.program != nil {
+ cmpl.file = cmpl.program.File
+ }
+ return cmpl._parse(cmpl.program)
+}
diff --git a/vendor/github.com/robertkrimen/otto/cmpl_evaluate.go b/vendor/github.com/robertkrimen/otto/cmpl_evaluate.go
new file mode 100644
index 000000000..6741bf394
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/cmpl_evaluate.go
@@ -0,0 +1,96 @@
+package otto
+
+import (
+ "strconv"
+)
+
+func (self *_runtime) cmpl_evaluate_nodeProgram(node *_nodeProgram, eval bool) Value {
+ if !eval {
+ self.enterGlobalScope()
+ defer func() {
+ self.leaveScope()
+ }()
+ }
+ self.cmpl_functionDeclaration(node.functionList)
+ self.cmpl_variableDeclaration(node.varList)
+ self.scope.frame.file = node.file
+ return self.cmpl_evaluate_nodeStatementList(node.body)
+}
+
+func (self *_runtime) cmpl_call_nodeFunction(function *_object, stash *_fnStash, node *_nodeFunctionLiteral, this Value, argumentList []Value) Value {
+
+ indexOfParameterName := make([]string, len(argumentList))
+ // function(abc, def, ghi)
+ // indexOfParameterName[0] = "abc"
+ // indexOfParameterName[1] = "def"
+ // indexOfParameterName[2] = "ghi"
+ // ...
+
+ argumentsFound := false
+ for index, name := range node.parameterList {
+ if name == "arguments" {
+ argumentsFound = true
+ }
+ value := Value{}
+ if index < len(argumentList) {
+ value = argumentList[index]
+ indexOfParameterName[index] = name
+ }
+ // strict = false
+ self.scope.lexical.setValue(name, value, false)
+ }
+
+ if !argumentsFound {
+ arguments := self.newArgumentsObject(indexOfParameterName, stash, len(argumentList))
+ arguments.defineProperty("callee", toValue_object(function), 0101, false)
+ stash.arguments = arguments
+ // strict = false
+ self.scope.lexical.setValue("arguments", toValue_object(arguments), false)
+ for index, _ := range argumentList {
+ if index < len(node.parameterList) {
+ continue
+ }
+ indexAsString := strconv.FormatInt(int64(index), 10)
+ arguments.defineProperty(indexAsString, argumentList[index], 0111, false)
+ }
+ }
+
+ self.cmpl_functionDeclaration(node.functionList)
+ self.cmpl_variableDeclaration(node.varList)
+
+ result := self.cmpl_evaluate_nodeStatement(node.body)
+ if result.kind == valueResult {
+ return result
+ }
+
+ return Value{}
+}
+
+func (self *_runtime) cmpl_functionDeclaration(list []*_nodeFunctionLiteral) {
+ executionContext := self.scope
+ eval := executionContext.eval
+ stash := executionContext.variable
+
+ for _, function := range list {
+ name := function.name
+ value := self.cmpl_evaluate_nodeExpression(function)
+ if !stash.hasBinding(name) {
+ stash.createBinding(name, eval == true, value)
+ } else {
+ // TODO 10.5.5.e
+ stash.setBinding(name, value, false) // TODO strict
+ }
+ }
+}
+
+func (self *_runtime) cmpl_variableDeclaration(list []string) {
+ executionContext := self.scope
+ eval := executionContext.eval
+ stash := executionContext.variable
+
+ for _, name := range list {
+ if !stash.hasBinding(name) {
+ stash.createBinding(name, eval == true, Value{}) // TODO strict?
+ }
+ }
+}
diff --git a/vendor/github.com/robertkrimen/otto/cmpl_evaluate_expression.go b/vendor/github.com/robertkrimen/otto/cmpl_evaluate_expression.go
new file mode 100644
index 000000000..8586a484f
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/cmpl_evaluate_expression.go
@@ -0,0 +1,460 @@
+package otto
+
+import (
+ "fmt"
+ "math"
+ "runtime"
+
+ "github.com/robertkrimen/otto/token"
+)
+
+func (self *_runtime) cmpl_evaluate_nodeExpression(node _nodeExpression) Value {
+ // Allow interpreter interruption
+ // If the Interrupt channel is nil, then
+ // we avoid runtime.Gosched() overhead (if any)
+ // FIXME: Test this
+ if self.otto.Interrupt != nil {
+ runtime.Gosched()
+ select {
+ case value := <-self.otto.Interrupt:
+ value()
+ default:
+ }
+ }
+
+ switch node := node.(type) {
+
+ case *_nodeArrayLiteral:
+ return self.cmpl_evaluate_nodeArrayLiteral(node)
+
+ case *_nodeAssignExpression:
+ return self.cmpl_evaluate_nodeAssignExpression(node)
+
+ case *_nodeBinaryExpression:
+ if node.comparison {
+ return self.cmpl_evaluate_nodeBinaryExpression_comparison(node)
+ } else {
+ return self.cmpl_evaluate_nodeBinaryExpression(node)
+ }
+
+ case *_nodeBracketExpression:
+ return self.cmpl_evaluate_nodeBracketExpression(node)
+
+ case *_nodeCallExpression:
+ return self.cmpl_evaluate_nodeCallExpression(node, nil)
+
+ case *_nodeConditionalExpression:
+ return self.cmpl_evaluate_nodeConditionalExpression(node)
+
+ case *_nodeDotExpression:
+ return self.cmpl_evaluate_nodeDotExpression(node)
+
+ case *_nodeFunctionLiteral:
+ var local = self.scope.lexical
+ if node.name != "" {
+ local = self.newDeclarationStash(local)
+ }
+
+ value := toValue_object(self.newNodeFunction(node, local))
+ if node.name != "" {
+ local.createBinding(node.name, false, value)
+ }
+ return value
+
+ case *_nodeIdentifier:
+ name := node.name
+ // TODO Should be true or false (strictness) depending on context
+ // getIdentifierReference should not return nil, but we check anyway and panic
+ // so as not to propagate the nil into something else
+ reference := getIdentifierReference(self, self.scope.lexical, name, false, _at(node.idx))
+ if reference == nil {
+ // Should never get here!
+ panic(hereBeDragons("referenceError == nil: " + name))
+ }
+ return toValue(reference)
+
+ case *_nodeLiteral:
+ return node.value
+
+ case *_nodeNewExpression:
+ return self.cmpl_evaluate_nodeNewExpression(node)
+
+ case *_nodeObjectLiteral:
+ return self.cmpl_evaluate_nodeObjectLiteral(node)
+
+ case *_nodeRegExpLiteral:
+ return toValue_object(self._newRegExp(node.pattern, node.flags))
+
+ case *_nodeSequenceExpression:
+ return self.cmpl_evaluate_nodeSequenceExpression(node)
+
+ case *_nodeThisExpression:
+ return toValue_object(self.scope.this)
+
+ case *_nodeUnaryExpression:
+ return self.cmpl_evaluate_nodeUnaryExpression(node)
+
+ case *_nodeVariableExpression:
+ return self.cmpl_evaluate_nodeVariableExpression(node)
+ }
+
+ panic(fmt.Errorf("Here be dragons: evaluate_nodeExpression(%T)", node))
+}
+
+func (self *_runtime) cmpl_evaluate_nodeArrayLiteral(node *_nodeArrayLiteral) Value {
+
+ valueArray := []Value{}
+
+ for _, node := range node.value {
+ if node == nil {
+ valueArray = append(valueArray, emptyValue)
+ } else {
+ valueArray = append(valueArray, self.cmpl_evaluate_nodeExpression(node).resolve())
+ }
+ }
+
+ result := self.newArrayOf(valueArray)
+
+ return toValue_object(result)
+}
+
+func (self *_runtime) cmpl_evaluate_nodeAssignExpression(node *_nodeAssignExpression) Value {
+
+ left := self.cmpl_evaluate_nodeExpression(node.left)
+ right := self.cmpl_evaluate_nodeExpression(node.right)
+ rightValue := right.resolve()
+
+ result := rightValue
+ if node.operator != token.ASSIGN {
+ result = self.calculateBinaryExpression(node.operator, left, rightValue)
+ }
+
+ self.putValue(left.reference(), result)
+
+ return result
+}
+
+func (self *_runtime) cmpl_evaluate_nodeBinaryExpression(node *_nodeBinaryExpression) Value {
+
+ left := self.cmpl_evaluate_nodeExpression(node.left)
+ leftValue := left.resolve()
+
+ switch node.operator {
+ // Logical
+ case token.LOGICAL_AND:
+ if !leftValue.bool() {
+ return leftValue
+ }
+ right := self.cmpl_evaluate_nodeExpression(node.right)
+ return right.resolve()
+ case token.LOGICAL_OR:
+ if leftValue.bool() {
+ return leftValue
+ }
+ right := self.cmpl_evaluate_nodeExpression(node.right)
+ return right.resolve()
+ }
+
+ return self.calculateBinaryExpression(node.operator, leftValue, self.cmpl_evaluate_nodeExpression(node.right))
+}
+
+func (self *_runtime) cmpl_evaluate_nodeBinaryExpression_comparison(node *_nodeBinaryExpression) Value {
+
+ left := self.cmpl_evaluate_nodeExpression(node.left).resolve()
+ right := self.cmpl_evaluate_nodeExpression(node.right).resolve()
+
+ return toValue_bool(self.calculateComparison(node.operator, left, right))
+}
+
+func (self *_runtime) cmpl_evaluate_nodeBracketExpression(node *_nodeBracketExpression) Value {
+ target := self.cmpl_evaluate_nodeExpression(node.left)
+ targetValue := target.resolve()
+ member := self.cmpl_evaluate_nodeExpression(node.member)
+ memberValue := member.resolve()
+
+ // TODO Pass in base value as-is, and defer toObject till later?
+ object, err := self.objectCoerce(targetValue)
+ if err != nil {
+ panic(self.panicTypeError("Cannot access member '%s' of %s", memberValue.string(), err.Error(), _at(node.idx)))
+ }
+ return toValue(newPropertyReference(self, object, memberValue.string(), false, _at(node.idx)))
+}
+
+func (self *_runtime) cmpl_evaluate_nodeCallExpression(node *_nodeCallExpression, withArgumentList []interface{}) Value {
+ rt := self
+ this := Value{}
+ callee := self.cmpl_evaluate_nodeExpression(node.callee)
+
+ argumentList := []Value{}
+ if withArgumentList != nil {
+ argumentList = self.toValueArray(withArgumentList...)
+ } else {
+ for _, argumentNode := range node.argumentList {
+ argumentList = append(argumentList, self.cmpl_evaluate_nodeExpression(argumentNode).resolve())
+ }
+ }
+
+ rf := callee.reference()
+ vl := callee.resolve()
+
+ eval := false // Whether this call is a (candidate for) direct call to eval
+ name := ""
+ if rf != nil {
+ switch rf := rf.(type) {
+ case *_propertyReference:
+ name = rf.name
+ object := rf.base
+ this = toValue_object(object)
+ eval = rf.name == "eval" // Possible direct eval
+ case *_stashReference:
+ // TODO ImplicitThisValue
+ name = rf.name
+ eval = rf.name == "eval" // Possible direct eval
+ default:
+ // FIXME?
+ panic(rt.panicTypeError("Here be dragons"))
+ }
+ }
+
+ at := _at(-1)
+ switch callee := node.callee.(type) {
+ case *_nodeIdentifier:
+ at = _at(callee.idx)
+ case *_nodeDotExpression:
+ at = _at(callee.idx)
+ case *_nodeBracketExpression:
+ at = _at(callee.idx)
+ }
+
+ frame := _frame{
+ callee: name,
+ file: self.scope.frame.file,
+ }
+
+ if !vl.IsFunction() {
+ if name == "" {
+ // FIXME Maybe typeof?
+ panic(rt.panicTypeError("%v is not a function", vl, at))
+ }
+ panic(rt.panicTypeError("'%s' is not a function", name, at))
+ }
+
+ self.scope.frame.offset = int(at)
+
+ return vl._object().call(this, argumentList, eval, frame)
+}
+
+func (self *_runtime) cmpl_evaluate_nodeConditionalExpression(node *_nodeConditionalExpression) Value {
+ test := self.cmpl_evaluate_nodeExpression(node.test)
+ testValue := test.resolve()
+ if testValue.bool() {
+ return self.cmpl_evaluate_nodeExpression(node.consequent)
+ }
+ return self.cmpl_evaluate_nodeExpression(node.alternate)
+}
+
+func (self *_runtime) cmpl_evaluate_nodeDotExpression(node *_nodeDotExpression) Value {
+ target := self.cmpl_evaluate_nodeExpression(node.left)
+ targetValue := target.resolve()
+ // TODO Pass in base value as-is, and defer toObject till later?
+ object, err := self.objectCoerce(targetValue)
+ if err != nil {
+ panic(self.panicTypeError("Cannot access member '%s' of %s", node.identifier, err.Error(), _at(node.idx)))
+ }
+ return toValue(newPropertyReference(self, object, node.identifier, false, _at(node.idx)))
+}
+
+func (self *_runtime) cmpl_evaluate_nodeNewExpression(node *_nodeNewExpression) Value {
+ rt := self
+ callee := self.cmpl_evaluate_nodeExpression(node.callee)
+
+ argumentList := []Value{}
+ for _, argumentNode := range node.argumentList {
+ argumentList = append(argumentList, self.cmpl_evaluate_nodeExpression(argumentNode).resolve())
+ }
+
+ rf := callee.reference()
+ vl := callee.resolve()
+
+ name := ""
+ if rf != nil {
+ switch rf := rf.(type) {
+ case *_propertyReference:
+ name = rf.name
+ case *_stashReference:
+ name = rf.name
+ default:
+ panic(rt.panicTypeError("Here be dragons"))
+ }
+ }
+
+ at := _at(-1)
+ switch callee := node.callee.(type) {
+ case *_nodeIdentifier:
+ at = _at(callee.idx)
+ case *_nodeDotExpression:
+ at = _at(callee.idx)
+ case *_nodeBracketExpression:
+ at = _at(callee.idx)
+ }
+
+ if !vl.IsFunction() {
+ if name == "" {
+ // FIXME Maybe typeof?
+ panic(rt.panicTypeError("%v is not a function", vl, at))
+ }
+ panic(rt.panicTypeError("'%s' is not a function", name, at))
+ }
+
+ self.scope.frame.offset = int(at)
+
+ return vl._object().construct(argumentList)
+}
+
+func (self *_runtime) cmpl_evaluate_nodeObjectLiteral(node *_nodeObjectLiteral) Value {
+
+ result := self.newObject()
+
+ for _, property := range node.value {
+ switch property.kind {
+ case "value":
+ result.defineProperty(property.key, self.cmpl_evaluate_nodeExpression(property.value).resolve(), 0111, false)
+ case "get":
+ getter := self.newNodeFunction(property.value.(*_nodeFunctionLiteral), self.scope.lexical)
+ descriptor := _property{}
+ descriptor.mode = 0211
+ descriptor.value = _propertyGetSet{getter, nil}
+ result.defineOwnProperty(property.key, descriptor, false)
+ case "set":
+ setter := self.newNodeFunction(property.value.(*_nodeFunctionLiteral), self.scope.lexical)
+ descriptor := _property{}
+ descriptor.mode = 0211
+ descriptor.value = _propertyGetSet{nil, setter}
+ result.defineOwnProperty(property.key, descriptor, false)
+ default:
+ panic(fmt.Errorf("Here be dragons: evaluate_nodeObjectLiteral: invalid property.Kind: %v", property.kind))
+ }
+ }
+
+ return toValue_object(result)
+}
+
+func (self *_runtime) cmpl_evaluate_nodeSequenceExpression(node *_nodeSequenceExpression) Value {
+ var result Value
+ for _, node := range node.sequence {
+ result = self.cmpl_evaluate_nodeExpression(node)
+ result = result.resolve()
+ }
+ return result
+}
+
+func (self *_runtime) cmpl_evaluate_nodeUnaryExpression(node *_nodeUnaryExpression) Value {
+
+ target := self.cmpl_evaluate_nodeExpression(node.operand)
+ switch node.operator {
+ case token.TYPEOF, token.DELETE:
+ if target.kind == valueReference && target.reference().invalid() {
+ if node.operator == token.TYPEOF {
+ return toValue_string("undefined")
+ }
+ return trueValue
+ }
+ }
+
+ switch node.operator {
+ case token.NOT:
+ targetValue := target.resolve()
+ if targetValue.bool() {
+ return falseValue
+ }
+ return trueValue
+ case token.BITWISE_NOT:
+ targetValue := target.resolve()
+ integerValue := toInt32(targetValue)
+ return toValue_int32(^integerValue)
+ case token.PLUS:
+ targetValue := target.resolve()
+ return toValue_float64(targetValue.float64())
+ case token.MINUS:
+ targetValue := target.resolve()
+ value := targetValue.float64()
+ // TODO Test this
+ sign := float64(-1)
+ if math.Signbit(value) {
+ sign = 1
+ }
+ return toValue_float64(math.Copysign(value, sign))
+ case token.INCREMENT:
+ targetValue := target.resolve()
+ if node.postfix {
+ // Postfix++
+ oldValue := targetValue.float64()
+ newValue := toValue_float64(+1 + oldValue)
+ self.putValue(target.reference(), newValue)
+ return toValue_float64(oldValue)
+ } else {
+ // ++Prefix
+ newValue := toValue_float64(+1 + targetValue.float64())
+ self.putValue(target.reference(), newValue)
+ return newValue
+ }
+ case token.DECREMENT:
+ targetValue := target.resolve()
+ if node.postfix {
+ // Postfix--
+ oldValue := targetValue.float64()
+ newValue := toValue_float64(-1 + oldValue)
+ self.putValue(target.reference(), newValue)
+ return toValue_float64(oldValue)
+ } else {
+ // --Prefix
+ newValue := toValue_float64(-1 + targetValue.float64())
+ self.putValue(target.reference(), newValue)
+ return newValue
+ }
+ case token.VOID:
+ target.resolve() // FIXME Side effect?
+ return Value{}
+ case token.DELETE:
+ reference := target.reference()
+ if reference == nil {
+ return trueValue
+ }
+ return toValue_bool(target.reference().delete())
+ case token.TYPEOF:
+ targetValue := target.resolve()
+ switch targetValue.kind {
+ case valueUndefined:
+ return toValue_string("undefined")
+ case valueNull:
+ return toValue_string("object")
+ case valueBoolean:
+ return toValue_string("boolean")
+ case valueNumber:
+ return toValue_string("number")
+ case valueString:
+ return toValue_string("string")
+ case valueObject:
+ if targetValue._object().isCall() {
+ return toValue_string("function")
+ }
+ return toValue_string("object")
+ default:
+ // FIXME ?
+ }
+ }
+
+ panic(hereBeDragons())
+}
+
+func (self *_runtime) cmpl_evaluate_nodeVariableExpression(node *_nodeVariableExpression) Value {
+ if node.initializer != nil {
+ // FIXME If reference is nil
+ left := getIdentifierReference(self, self.scope.lexical, node.name, false, _at(node.idx))
+ right := self.cmpl_evaluate_nodeExpression(node.initializer)
+ rightValue := right.resolve()
+
+ self.putValue(left, rightValue)
+ }
+ return toValue_string(node.name)
+}
diff --git a/vendor/github.com/robertkrimen/otto/cmpl_evaluate_statement.go b/vendor/github.com/robertkrimen/otto/cmpl_evaluate_statement.go
new file mode 100644
index 000000000..e16c6ac6c
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/cmpl_evaluate_statement.go
@@ -0,0 +1,424 @@
+package otto
+
+import (
+ "fmt"
+ "runtime"
+
+ "github.com/robertkrimen/otto/token"
+)
+
+func (self *_runtime) cmpl_evaluate_nodeStatement(node _nodeStatement) Value {
+ // Allow interpreter interruption
+ // If the Interrupt channel is nil, then
+ // we avoid runtime.Gosched() overhead (if any)
+ // FIXME: Test this
+ if self.otto.Interrupt != nil {
+ runtime.Gosched()
+ select {
+ case value := <-self.otto.Interrupt:
+ value()
+ default:
+ }
+ }
+
+ switch node := node.(type) {
+
+ case *_nodeBlockStatement:
+ labels := self.labels
+ self.labels = nil
+
+ value := self.cmpl_evaluate_nodeStatementList(node.list)
+ switch value.kind {
+ case valueResult:
+ switch value.evaluateBreak(labels) {
+ case resultBreak:
+ return emptyValue
+ }
+ }
+ return value
+
+ case *_nodeBranchStatement:
+ target := node.label
+ switch node.branch { // FIXME Maybe node.kind? node.operator?
+ case token.BREAK:
+ return toValue(newBreakResult(target))
+ case token.CONTINUE:
+ return toValue(newContinueResult(target))
+ }
+
+ case *_nodeDebuggerStatement:
+ if self.debugger != nil {
+ self.debugger(self.otto)
+ }
+ return emptyValue // Nothing happens.
+
+ case *_nodeDoWhileStatement:
+ return self.cmpl_evaluate_nodeDoWhileStatement(node)
+
+ case *_nodeEmptyStatement:
+ return emptyValue
+
+ case *_nodeExpressionStatement:
+ return self.cmpl_evaluate_nodeExpression(node.expression)
+
+ case *_nodeForInStatement:
+ return self.cmpl_evaluate_nodeForInStatement(node)
+
+ case *_nodeForStatement:
+ return self.cmpl_evaluate_nodeForStatement(node)
+
+ case *_nodeIfStatement:
+ return self.cmpl_evaluate_nodeIfStatement(node)
+
+ case *_nodeLabelledStatement:
+ self.labels = append(self.labels, node.label)
+ defer func() {
+ if len(self.labels) > 0 {
+ self.labels = self.labels[:len(self.labels)-1] // Pop the label
+ } else {
+ self.labels = nil
+ }
+ }()
+ return self.cmpl_evaluate_nodeStatement(node.statement)
+
+ case *_nodeReturnStatement:
+ if node.argument != nil {
+ return toValue(newReturnResult(self.cmpl_evaluate_nodeExpression(node.argument).resolve()))
+ }
+ return toValue(newReturnResult(Value{}))
+
+ case *_nodeSwitchStatement:
+ return self.cmpl_evaluate_nodeSwitchStatement(node)
+
+ case *_nodeThrowStatement:
+ value := self.cmpl_evaluate_nodeExpression(node.argument).resolve()
+ panic(newException(value))
+
+ case *_nodeTryStatement:
+ return self.cmpl_evaluate_nodeTryStatement(node)
+
+ case *_nodeVariableStatement:
+ // Variables are already defined, this is initialization only
+ for _, variable := range node.list {
+ self.cmpl_evaluate_nodeVariableExpression(variable.(*_nodeVariableExpression))
+ }
+ return emptyValue
+
+ case *_nodeWhileStatement:
+ return self.cmpl_evaluate_nodeWhileStatement(node)
+
+ case *_nodeWithStatement:
+ return self.cmpl_evaluate_nodeWithStatement(node)
+
+ }
+
+ panic(fmt.Errorf("Here be dragons: evaluate_nodeStatement(%T)", node))
+}
+
+func (self *_runtime) cmpl_evaluate_nodeStatementList(list []_nodeStatement) Value {
+ var result Value
+ for _, node := range list {
+ value := self.cmpl_evaluate_nodeStatement(node)
+ switch value.kind {
+ case valueResult:
+ return value
+ case valueEmpty:
+ default:
+ // We have getValue here to (for example) trigger a
+ // ReferenceError (of the not defined variety)
+ // Not sure if this is the best way to error out early
+ // for such errors or if there is a better way
+ // TODO Do we still need this?
+ result = value.resolve()
+ }
+ }
+ return result
+}
+
+func (self *_runtime) cmpl_evaluate_nodeDoWhileStatement(node *_nodeDoWhileStatement) Value {
+
+ labels := append(self.labels, "")
+ self.labels = nil
+
+ test := node.test
+
+ result := emptyValue
+resultBreak:
+ for {
+ for _, node := range node.body {
+ value := self.cmpl_evaluate_nodeStatement(node)
+ switch value.kind {
+ case valueResult:
+ switch value.evaluateBreakContinue(labels) {
+ case resultReturn:
+ return value
+ case resultBreak:
+ break resultBreak
+ case resultContinue:
+ goto resultContinue
+ }
+ case valueEmpty:
+ default:
+ result = value
+ }
+ }
+ resultContinue:
+ if !self.cmpl_evaluate_nodeExpression(test).resolve().bool() {
+ // Stahp: do ... while (false)
+ break
+ }
+ }
+ return result
+}
+
+func (self *_runtime) cmpl_evaluate_nodeForInStatement(node *_nodeForInStatement) Value {
+
+ labels := append(self.labels, "")
+ self.labels = nil
+
+ source := self.cmpl_evaluate_nodeExpression(node.source)
+ sourceValue := source.resolve()
+
+ switch sourceValue.kind {
+ case valueUndefined, valueNull:
+ return emptyValue
+ }
+
+ sourceObject := self.toObject(sourceValue)
+
+ into := node.into
+ body := node.body
+
+ result := emptyValue
+ object := sourceObject
+ for object != nil {
+ enumerateValue := emptyValue
+ object.enumerate(false, func(name string) bool {
+ into := self.cmpl_evaluate_nodeExpression(into)
+ // In the case of: for (var abc in def) ...
+ if into.reference() == nil {
+ identifier := into.string()
+ // TODO Should be true or false (strictness) depending on context
+ into = toValue(getIdentifierReference(self, self.scope.lexical, identifier, false, -1))
+ }
+ self.putValue(into.reference(), toValue_string(name))
+ for _, node := range body {
+ value := self.cmpl_evaluate_nodeStatement(node)
+ switch value.kind {
+ case valueResult:
+ switch value.evaluateBreakContinue(labels) {
+ case resultReturn:
+ enumerateValue = value
+ return false
+ case resultBreak:
+ object = nil
+ return false
+ case resultContinue:
+ return true
+ }
+ case valueEmpty:
+ default:
+ enumerateValue = value
+ }
+ }
+ return true
+ })
+ if object == nil {
+ break
+ }
+ object = object.prototype
+ if !enumerateValue.isEmpty() {
+ result = enumerateValue
+ }
+ }
+ return result
+}
+
+func (self *_runtime) cmpl_evaluate_nodeForStatement(node *_nodeForStatement) Value {
+
+ labels := append(self.labels, "")
+ self.labels = nil
+
+ initializer := node.initializer
+ test := node.test
+ update := node.update
+ body := node.body
+
+ if initializer != nil {
+ initialResult := self.cmpl_evaluate_nodeExpression(initializer)
+ initialResult.resolve() // Side-effect trigger
+ }
+
+ result := emptyValue
+resultBreak:
+ for {
+ if test != nil {
+ testResult := self.cmpl_evaluate_nodeExpression(test)
+ testResultValue := testResult.resolve()
+ if testResultValue.bool() == false {
+ break
+ }
+ }
+ for _, node := range body {
+ value := self.cmpl_evaluate_nodeStatement(node)
+ switch value.kind {
+ case valueResult:
+ switch value.evaluateBreakContinue(labels) {
+ case resultReturn:
+ return value
+ case resultBreak:
+ break resultBreak
+ case resultContinue:
+ goto resultContinue
+ }
+ case valueEmpty:
+ default:
+ result = value
+ }
+ }
+ resultContinue:
+ if update != nil {
+ updateResult := self.cmpl_evaluate_nodeExpression(update)
+ updateResult.resolve() // Side-effect trigger
+ }
+ }
+ return result
+}
+
+func (self *_runtime) cmpl_evaluate_nodeIfStatement(node *_nodeIfStatement) Value {
+ test := self.cmpl_evaluate_nodeExpression(node.test)
+ testValue := test.resolve()
+ if testValue.bool() {
+ return self.cmpl_evaluate_nodeStatement(node.consequent)
+ } else if node.alternate != nil {
+ return self.cmpl_evaluate_nodeStatement(node.alternate)
+ }
+
+ return emptyValue
+}
+
+func (self *_runtime) cmpl_evaluate_nodeSwitchStatement(node *_nodeSwitchStatement) Value {
+
+ labels := append(self.labels, "")
+ self.labels = nil
+
+ discriminantResult := self.cmpl_evaluate_nodeExpression(node.discriminant)
+ target := node.default_
+
+ for index, clause := range node.body {
+ test := clause.test
+ if test != nil {
+ if self.calculateComparison(token.STRICT_EQUAL, discriminantResult, self.cmpl_evaluate_nodeExpression(test)) {
+ target = index
+ break
+ }
+ }
+ }
+
+ result := emptyValue
+ if target != -1 {
+ for _, clause := range node.body[target:] {
+ for _, statement := range clause.consequent {
+ value := self.cmpl_evaluate_nodeStatement(statement)
+ switch value.kind {
+ case valueResult:
+ switch value.evaluateBreak(labels) {
+ case resultReturn:
+ return value
+ case resultBreak:
+ return emptyValue
+ }
+ case valueEmpty:
+ default:
+ result = value
+ }
+ }
+ }
+ }
+
+ return result
+}
+
+func (self *_runtime) cmpl_evaluate_nodeTryStatement(node *_nodeTryStatement) Value {
+ tryCatchValue, exception := self.tryCatchEvaluate(func() Value {
+ return self.cmpl_evaluate_nodeStatement(node.body)
+ })
+
+ if exception && node.catch != nil {
+ outer := self.scope.lexical
+ self.scope.lexical = self.newDeclarationStash(outer)
+ defer func() {
+ self.scope.lexical = outer
+ }()
+ // TODO If necessary, convert TypeError<runtime> => TypeError
+ // That, is, such errors can be thrown despite not being JavaScript "native"
+ // strict = false
+ self.scope.lexical.setValue(node.catch.parameter, tryCatchValue, false)
+
+ // FIXME node.CatchParameter
+ // FIXME node.Catch
+ tryCatchValue, exception = self.tryCatchEvaluate(func() Value {
+ return self.cmpl_evaluate_nodeStatement(node.catch.body)
+ })
+ }
+
+ if node.finally != nil {
+ finallyValue := self.cmpl_evaluate_nodeStatement(node.finally)
+ if finallyValue.kind == valueResult {
+ return finallyValue
+ }
+ }
+
+ if exception {
+ panic(newException(tryCatchValue))
+ }
+
+ return tryCatchValue
+}
+
+func (self *_runtime) cmpl_evaluate_nodeWhileStatement(node *_nodeWhileStatement) Value {
+
+ test := node.test
+ body := node.body
+ labels := append(self.labels, "")
+ self.labels = nil
+
+ result := emptyValue
+resultBreakContinue:
+ for {
+ if !self.cmpl_evaluate_nodeExpression(test).resolve().bool() {
+ // Stahp: while (false) ...
+ break
+ }
+ for _, node := range body {
+ value := self.cmpl_evaluate_nodeStatement(node)
+ switch value.kind {
+ case valueResult:
+ switch value.evaluateBreakContinue(labels) {
+ case resultReturn:
+ return value
+ case resultBreak:
+ break resultBreakContinue
+ case resultContinue:
+ continue resultBreakContinue
+ }
+ case valueEmpty:
+ default:
+ result = value
+ }
+ }
+ }
+ return result
+}
+
+func (self *_runtime) cmpl_evaluate_nodeWithStatement(node *_nodeWithStatement) Value {
+ object := self.cmpl_evaluate_nodeExpression(node.object)
+ outer := self.scope.lexical
+ lexical := self.newObjectStash(self.toObject(object.resolve()), outer)
+ self.scope.lexical = lexical
+ defer func() {
+ self.scope.lexical = outer
+ }()
+
+ return self.cmpl_evaluate_nodeStatement(node.body)
+}
diff --git a/vendor/github.com/robertkrimen/otto/cmpl_parse.go b/vendor/github.com/robertkrimen/otto/cmpl_parse.go
new file mode 100644
index 000000000..dc5baa12a
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/cmpl_parse.go
@@ -0,0 +1,656 @@
+package otto
+
+import (
+ "fmt"
+ "regexp"
+
+ "github.com/robertkrimen/otto/ast"
+ "github.com/robertkrimen/otto/file"
+ "github.com/robertkrimen/otto/token"
+)
+
+var trueLiteral = &_nodeLiteral{value: toValue_bool(true)}
+var falseLiteral = &_nodeLiteral{value: toValue_bool(false)}
+var nullLiteral = &_nodeLiteral{value: nullValue}
+var emptyStatement = &_nodeEmptyStatement{}
+
+func (cmpl *_compiler) parseExpression(in ast.Expression) _nodeExpression {
+ if in == nil {
+ return nil
+ }
+
+ switch in := in.(type) {
+
+ case *ast.ArrayLiteral:
+ out := &_nodeArrayLiteral{
+ value: make([]_nodeExpression, len(in.Value)),
+ }
+ for i, value := range in.Value {
+ out.value[i] = cmpl.parseExpression(value)
+ }
+ return out
+
+ case *ast.AssignExpression:
+ return &_nodeAssignExpression{
+ operator: in.Operator,
+ left: cmpl.parseExpression(in.Left),
+ right: cmpl.parseExpression(in.Right),
+ }
+
+ case *ast.BinaryExpression:
+ return &_nodeBinaryExpression{
+ operator: in.Operator,
+ left: cmpl.parseExpression(in.Left),
+ right: cmpl.parseExpression(in.Right),
+ comparison: in.Comparison,
+ }
+
+ case *ast.BooleanLiteral:
+ if in.Value {
+ return trueLiteral
+ }
+ return falseLiteral
+
+ case *ast.BracketExpression:
+ return &_nodeBracketExpression{
+ idx: in.Left.Idx0(),
+ left: cmpl.parseExpression(in.Left),
+ member: cmpl.parseExpression(in.Member),
+ }
+
+ case *ast.CallExpression:
+ out := &_nodeCallExpression{
+ callee: cmpl.parseExpression(in.Callee),
+ argumentList: make([]_nodeExpression, len(in.ArgumentList)),
+ }
+ for i, value := range in.ArgumentList {
+ out.argumentList[i] = cmpl.parseExpression(value)
+ }
+ return out
+
+ case *ast.ConditionalExpression:
+ return &_nodeConditionalExpression{
+ test: cmpl.parseExpression(in.Test),
+ consequent: cmpl.parseExpression(in.Consequent),
+ alternate: cmpl.parseExpression(in.Alternate),
+ }
+
+ case *ast.DotExpression:
+ return &_nodeDotExpression{
+ idx: in.Left.Idx0(),
+ left: cmpl.parseExpression(in.Left),
+ identifier: in.Identifier.Name,
+ }
+
+ case *ast.EmptyExpression:
+ return nil
+
+ case *ast.FunctionLiteral:
+ name := ""
+ if in.Name != nil {
+ name = in.Name.Name
+ }
+ out := &_nodeFunctionLiteral{
+ name: name,
+ body: cmpl.parseStatement(in.Body),
+ source: in.Source,
+ file: cmpl.file,
+ }
+ if in.ParameterList != nil {
+ list := in.ParameterList.List
+ out.parameterList = make([]string, len(list))
+ for i, value := range list {
+ out.parameterList[i] = value.Name
+ }
+ }
+ for _, value := range in.DeclarationList {
+ switch value := value.(type) {
+ case *ast.FunctionDeclaration:
+ out.functionList = append(out.functionList, cmpl.parseExpression(value.Function).(*_nodeFunctionLiteral))
+ case *ast.VariableDeclaration:
+ for _, value := range value.List {
+ out.varList = append(out.varList, value.Name)
+ }
+ default:
+ panic(fmt.Errorf("Here be dragons: parseProgram.declaration(%T)", value))
+ }
+ }
+ return out
+
+ case *ast.Identifier:
+ return &_nodeIdentifier{
+ idx: in.Idx,
+ name: in.Name,
+ }
+
+ case *ast.NewExpression:
+ out := &_nodeNewExpression{
+ callee: cmpl.parseExpression(in.Callee),
+ argumentList: make([]_nodeExpression, len(in.ArgumentList)),
+ }
+ for i, value := range in.ArgumentList {
+ out.argumentList[i] = cmpl.parseExpression(value)
+ }
+ return out
+
+ case *ast.NullLiteral:
+ return nullLiteral
+
+ case *ast.NumberLiteral:
+ return &_nodeLiteral{
+ value: toValue(in.Value),
+ }
+
+ case *ast.ObjectLiteral:
+ out := &_nodeObjectLiteral{
+ value: make([]_nodeProperty, len(in.Value)),
+ }
+ for i, value := range in.Value {
+ out.value[i] = _nodeProperty{
+ key: value.Key,
+ kind: value.Kind,
+ value: cmpl.parseExpression(value.Value),
+ }
+ }
+ return out
+
+ case *ast.RegExpLiteral:
+ return &_nodeRegExpLiteral{
+ flags: in.Flags,
+ pattern: in.Pattern,
+ }
+
+ case *ast.SequenceExpression:
+ out := &_nodeSequenceExpression{
+ sequence: make([]_nodeExpression, len(in.Sequence)),
+ }
+ for i, value := range in.Sequence {
+ out.sequence[i] = cmpl.parseExpression(value)
+ }
+ return out
+
+ case *ast.StringLiteral:
+ return &_nodeLiteral{
+ value: toValue_string(in.Value),
+ }
+
+ case *ast.ThisExpression:
+ return &_nodeThisExpression{}
+
+ case *ast.UnaryExpression:
+ return &_nodeUnaryExpression{
+ operator: in.Operator,
+ operand: cmpl.parseExpression(in.Operand),
+ postfix: in.Postfix,
+ }
+
+ case *ast.VariableExpression:
+ return &_nodeVariableExpression{
+ idx: in.Idx0(),
+ name: in.Name,
+ initializer: cmpl.parseExpression(in.Initializer),
+ }
+
+ }
+
+ panic(fmt.Errorf("Here be dragons: cmpl.parseExpression(%T)", in))
+}
+
+func (cmpl *_compiler) parseStatement(in ast.Statement) _nodeStatement {
+ if in == nil {
+ return nil
+ }
+
+ switch in := in.(type) {
+
+ case *ast.BlockStatement:
+ out := &_nodeBlockStatement{
+ list: make([]_nodeStatement, len(in.List)),
+ }
+ for i, value := range in.List {
+ out.list[i] = cmpl.parseStatement(value)
+ }
+ return out
+
+ case *ast.BranchStatement:
+ out := &_nodeBranchStatement{
+ branch: in.Token,
+ }
+ if in.Label != nil {
+ out.label = in.Label.Name
+ }
+ return out
+
+ case *ast.DebuggerStatement:
+ return &_nodeDebuggerStatement{}
+
+ case *ast.DoWhileStatement:
+ out := &_nodeDoWhileStatement{
+ test: cmpl.parseExpression(in.Test),
+ }
+ body := cmpl.parseStatement(in.Body)
+ if block, ok := body.(*_nodeBlockStatement); ok {
+ out.body = block.list
+ } else {
+ out.body = append(out.body, body)
+ }
+ return out
+
+ case *ast.EmptyStatement:
+ return emptyStatement
+
+ case *ast.ExpressionStatement:
+ return &_nodeExpressionStatement{
+ expression: cmpl.parseExpression(in.Expression),
+ }
+
+ case *ast.ForInStatement:
+ out := &_nodeForInStatement{
+ into: cmpl.parseExpression(in.Into),
+ source: cmpl.parseExpression(in.Source),
+ }
+ body := cmpl.parseStatement(in.Body)
+ if block, ok := body.(*_nodeBlockStatement); ok {
+ out.body = block.list
+ } else {
+ out.body = append(out.body, body)
+ }
+ return out
+
+ case *ast.ForStatement:
+ out := &_nodeForStatement{
+ initializer: cmpl.parseExpression(in.Initializer),
+ update: cmpl.parseExpression(in.Update),
+ test: cmpl.parseExpression(in.Test),
+ }
+ body := cmpl.parseStatement(in.Body)
+ if block, ok := body.(*_nodeBlockStatement); ok {
+ out.body = block.list
+ } else {
+ out.body = append(out.body, body)
+ }
+ return out
+
+ case *ast.FunctionStatement:
+ return emptyStatement
+
+ case *ast.IfStatement:
+ return &_nodeIfStatement{
+ test: cmpl.parseExpression(in.Test),
+ consequent: cmpl.parseStatement(in.Consequent),
+ alternate: cmpl.parseStatement(in.Alternate),
+ }
+
+ case *ast.LabelledStatement:
+ return &_nodeLabelledStatement{
+ label: in.Label.Name,
+ statement: cmpl.parseStatement(in.Statement),
+ }
+
+ case *ast.ReturnStatement:
+ return &_nodeReturnStatement{
+ argument: cmpl.parseExpression(in.Argument),
+ }
+
+ case *ast.SwitchStatement:
+ out := &_nodeSwitchStatement{
+ discriminant: cmpl.parseExpression(in.Discriminant),
+ default_: in.Default,
+ body: make([]*_nodeCaseStatement, len(in.Body)),
+ }
+ for i, clause := range in.Body {
+ out.body[i] = &_nodeCaseStatement{
+ test: cmpl.parseExpression(clause.Test),
+ consequent: make([]_nodeStatement, len(clause.Consequent)),
+ }
+ for j, value := range clause.Consequent {
+ out.body[i].consequent[j] = cmpl.parseStatement(value)
+ }
+ }
+ return out
+
+ case *ast.ThrowStatement:
+ return &_nodeThrowStatement{
+ argument: cmpl.parseExpression(in.Argument),
+ }
+
+ case *ast.TryStatement:
+ out := &_nodeTryStatement{
+ body: cmpl.parseStatement(in.Body),
+ finally: cmpl.parseStatement(in.Finally),
+ }
+ if in.Catch != nil {
+ out.catch = &_nodeCatchStatement{
+ parameter: in.Catch.Parameter.Name,
+ body: cmpl.parseStatement(in.Catch.Body),
+ }
+ }
+ return out
+
+ case *ast.VariableStatement:
+ out := &_nodeVariableStatement{
+ list: make([]_nodeExpression, len(in.List)),
+ }
+ for i, value := range in.List {
+ out.list[i] = cmpl.parseExpression(value)
+ }
+ return out
+
+ case *ast.WhileStatement:
+ out := &_nodeWhileStatement{
+ test: cmpl.parseExpression(in.Test),
+ }
+ body := cmpl.parseStatement(in.Body)
+ if block, ok := body.(*_nodeBlockStatement); ok {
+ out.body = block.list
+ } else {
+ out.body = append(out.body, body)
+ }
+ return out
+
+ case *ast.WithStatement:
+ return &_nodeWithStatement{
+ object: cmpl.parseExpression(in.Object),
+ body: cmpl.parseStatement(in.Body),
+ }
+
+ }
+
+ panic(fmt.Errorf("Here be dragons: cmpl.parseStatement(%T)", in))
+}
+
+func cmpl_parse(in *ast.Program) *_nodeProgram {
+ cmpl := _compiler{
+ program: in,
+ }
+ return cmpl.parse()
+}
+
+func (cmpl *_compiler) _parse(in *ast.Program) *_nodeProgram {
+ out := &_nodeProgram{
+ body: make([]_nodeStatement, len(in.Body)),
+ file: in.File,
+ }
+ for i, value := range in.Body {
+ out.body[i] = cmpl.parseStatement(value)
+ }
+ for _, value := range in.DeclarationList {
+ switch value := value.(type) {
+ case *ast.FunctionDeclaration:
+ out.functionList = append(out.functionList, cmpl.parseExpression(value.Function).(*_nodeFunctionLiteral))
+ case *ast.VariableDeclaration:
+ for _, value := range value.List {
+ out.varList = append(out.varList, value.Name)
+ }
+ default:
+ panic(fmt.Errorf("Here be dragons: cmpl.parseProgram.DeclarationList(%T)", value))
+ }
+ }
+ return out
+}
+
+type _nodeProgram struct {
+ body []_nodeStatement
+
+ varList []string
+ functionList []*_nodeFunctionLiteral
+
+ variableList []_nodeDeclaration
+
+ file *file.File
+}
+
+type _nodeDeclaration struct {
+ name string
+ definition _node
+}
+
+type _node interface {
+}
+
+type (
+ _nodeExpression interface {
+ _node
+ _expressionNode()
+ }
+
+ _nodeArrayLiteral struct {
+ value []_nodeExpression
+ }
+
+ _nodeAssignExpression struct {
+ operator token.Token
+ left _nodeExpression
+ right _nodeExpression
+ }
+
+ _nodeBinaryExpression struct {
+ operator token.Token
+ left _nodeExpression
+ right _nodeExpression
+ comparison bool
+ }
+
+ _nodeBracketExpression struct {
+ idx file.Idx
+ left _nodeExpression
+ member _nodeExpression
+ }
+
+ _nodeCallExpression struct {
+ callee _nodeExpression
+ argumentList []_nodeExpression
+ }
+
+ _nodeConditionalExpression struct {
+ test _nodeExpression
+ consequent _nodeExpression
+ alternate _nodeExpression
+ }
+
+ _nodeDotExpression struct {
+ idx file.Idx
+ left _nodeExpression
+ identifier string
+ }
+
+ _nodeFunctionLiteral struct {
+ name string
+ body _nodeStatement
+ source string
+ parameterList []string
+ varList []string
+ functionList []*_nodeFunctionLiteral
+ file *file.File
+ }
+
+ _nodeIdentifier struct {
+ idx file.Idx
+ name string
+ }
+
+ _nodeLiteral struct {
+ value Value
+ }
+
+ _nodeNewExpression struct {
+ callee _nodeExpression
+ argumentList []_nodeExpression
+ }
+
+ _nodeObjectLiteral struct {
+ value []_nodeProperty
+ }
+
+ _nodeProperty struct {
+ key string
+ kind string
+ value _nodeExpression
+ }
+
+ _nodeRegExpLiteral struct {
+ flags string
+ pattern string // Value?
+ regexp *regexp.Regexp
+ }
+
+ _nodeSequenceExpression struct {
+ sequence []_nodeExpression
+ }
+
+ _nodeThisExpression struct {
+ }
+
+ _nodeUnaryExpression struct {
+ operator token.Token
+ operand _nodeExpression
+ postfix bool
+ }
+
+ _nodeVariableExpression struct {
+ idx file.Idx
+ name string
+ initializer _nodeExpression
+ }
+)
+
+type (
+ _nodeStatement interface {
+ _node
+ _statementNode()
+ }
+
+ _nodeBlockStatement struct {
+ list []_nodeStatement
+ }
+
+ _nodeBranchStatement struct {
+ branch token.Token
+ label string
+ }
+
+ _nodeCaseStatement struct {
+ test _nodeExpression
+ consequent []_nodeStatement
+ }
+
+ _nodeCatchStatement struct {
+ parameter string
+ body _nodeStatement
+ }
+
+ _nodeDebuggerStatement struct {
+ }
+
+ _nodeDoWhileStatement struct {
+ test _nodeExpression
+ body []_nodeStatement
+ }
+
+ _nodeEmptyStatement struct {
+ }
+
+ _nodeExpressionStatement struct {
+ expression _nodeExpression
+ }
+
+ _nodeForInStatement struct {
+ into _nodeExpression
+ source _nodeExpression
+ body []_nodeStatement
+ }
+
+ _nodeForStatement struct {
+ initializer _nodeExpression
+ update _nodeExpression
+ test _nodeExpression
+ body []_nodeStatement
+ }
+
+ _nodeIfStatement struct {
+ test _nodeExpression
+ consequent _nodeStatement
+ alternate _nodeStatement
+ }
+
+ _nodeLabelledStatement struct {
+ label string
+ statement _nodeStatement
+ }
+
+ _nodeReturnStatement struct {
+ argument _nodeExpression
+ }
+
+ _nodeSwitchStatement struct {
+ discriminant _nodeExpression
+ default_ int
+ body []*_nodeCaseStatement
+ }
+
+ _nodeThrowStatement struct {
+ argument _nodeExpression
+ }
+
+ _nodeTryStatement struct {
+ body _nodeStatement
+ catch *_nodeCatchStatement
+ finally _nodeStatement
+ }
+
+ _nodeVariableStatement struct {
+ list []_nodeExpression
+ }
+
+ _nodeWhileStatement struct {
+ test _nodeExpression
+ body []_nodeStatement
+ }
+
+ _nodeWithStatement struct {
+ object _nodeExpression
+ body _nodeStatement
+ }
+)
+
+// _expressionNode
+
+func (*_nodeArrayLiteral) _expressionNode() {}
+func (*_nodeAssignExpression) _expressionNode() {}
+func (*_nodeBinaryExpression) _expressionNode() {}
+func (*_nodeBracketExpression) _expressionNode() {}
+func (*_nodeCallExpression) _expressionNode() {}
+func (*_nodeConditionalExpression) _expressionNode() {}
+func (*_nodeDotExpression) _expressionNode() {}
+func (*_nodeFunctionLiteral) _expressionNode() {}
+func (*_nodeIdentifier) _expressionNode() {}
+func (*_nodeLiteral) _expressionNode() {}
+func (*_nodeNewExpression) _expressionNode() {}
+func (*_nodeObjectLiteral) _expressionNode() {}
+func (*_nodeRegExpLiteral) _expressionNode() {}
+func (*_nodeSequenceExpression) _expressionNode() {}
+func (*_nodeThisExpression) _expressionNode() {}
+func (*_nodeUnaryExpression) _expressionNode() {}
+func (*_nodeVariableExpression) _expressionNode() {}
+
+// _statementNode
+
+func (*_nodeBlockStatement) _statementNode() {}
+func (*_nodeBranchStatement) _statementNode() {}
+func (*_nodeCaseStatement) _statementNode() {}
+func (*_nodeCatchStatement) _statementNode() {}
+func (*_nodeDebuggerStatement) _statementNode() {}
+func (*_nodeDoWhileStatement) _statementNode() {}
+func (*_nodeEmptyStatement) _statementNode() {}
+func (*_nodeExpressionStatement) _statementNode() {}
+func (*_nodeForInStatement) _statementNode() {}
+func (*_nodeForStatement) _statementNode() {}
+func (*_nodeIfStatement) _statementNode() {}
+func (*_nodeLabelledStatement) _statementNode() {}
+func (*_nodeReturnStatement) _statementNode() {}
+func (*_nodeSwitchStatement) _statementNode() {}
+func (*_nodeThrowStatement) _statementNode() {}
+func (*_nodeTryStatement) _statementNode() {}
+func (*_nodeVariableStatement) _statementNode() {}
+func (*_nodeWhileStatement) _statementNode() {}
+func (*_nodeWithStatement) _statementNode() {}
diff --git a/vendor/github.com/robertkrimen/otto/console.go b/vendor/github.com/robertkrimen/otto/console.go
new file mode 100644
index 000000000..948face77
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/console.go
@@ -0,0 +1,51 @@
+package otto
+
+import (
+ "fmt"
+ "os"
+ "strings"
+)
+
+func formatForConsole(argumentList []Value) string {
+ output := []string{}
+ for _, argument := range argumentList {
+ output = append(output, fmt.Sprintf("%v", argument))
+ }
+ return strings.Join(output, " ")
+}
+
+func builtinConsole_log(call FunctionCall) Value {
+ fmt.Fprintln(os.Stdout, formatForConsole(call.ArgumentList))
+ return Value{}
+}
+
+func builtinConsole_error(call FunctionCall) Value {
+ fmt.Fprintln(os.Stdout, formatForConsole(call.ArgumentList))
+ return Value{}
+}
+
+// Nothing happens.
+func builtinConsole_dir(call FunctionCall) Value {
+ return Value{}
+}
+
+func builtinConsole_time(call FunctionCall) Value {
+ return Value{}
+}
+
+func builtinConsole_timeEnd(call FunctionCall) Value {
+ return Value{}
+}
+
+func builtinConsole_trace(call FunctionCall) Value {
+ return Value{}
+}
+
+func builtinConsole_assert(call FunctionCall) Value {
+ return Value{}
+}
+
+func (runtime *_runtime) newConsole() *_object {
+
+ return newConsoleObject(runtime)
+}
diff --git a/vendor/github.com/robertkrimen/otto/dbg.go b/vendor/github.com/robertkrimen/otto/dbg.go
new file mode 100644
index 000000000..51fbdc206
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/dbg.go
@@ -0,0 +1,9 @@
+// This file was AUTOMATICALLY GENERATED by dbg-import (smuggol) for github.com/robertkrimen/dbg
+
+package otto
+
+import (
+ Dbg "github.com/robertkrimen/otto/dbg"
+)
+
+var dbg, dbgf = Dbg.New()
diff --git a/vendor/github.com/robertkrimen/otto/dbg/dbg.go b/vendor/github.com/robertkrimen/otto/dbg/dbg.go
new file mode 100644
index 000000000..8c27fa293
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/dbg/dbg.go
@@ -0,0 +1,387 @@
+// This file was AUTOMATICALLY GENERATED by dbg-import (smuggol) from github.com/robertkrimen/dbg
+
+/*
+Package dbg is a println/printf/log-debugging utility library.
+
+ import (
+ Dbg "github.com/robertkrimen/dbg"
+ )
+
+ dbg, dbgf := Dbg.New()
+
+ dbg("Emit some debug stuff", []byte{120, 121, 122, 122, 121}, math.Pi)
+ # "2013/01/28 16:50:03 Emit some debug stuff [120 121 122 122 121] 3.141592653589793"
+
+ dbgf("With a %s formatting %.2f", "little", math.Pi)
+ # "2013/01/28 16:51:55 With a little formatting (3.14)"
+
+ dbgf("%/fatal//A fatal debug statement: should not be here")
+ # "A fatal debug statement: should not be here"
+ # ...and then, os.Exit(1)
+
+ dbgf("%/panic//Can also panic %s", "this")
+ # "Can also panic this"
+ # ...as a panic, equivalent to: panic("Can also panic this")
+
+ dbgf("Any %s arguments without a corresponding %%", "extra", "are treated like arguments to dbg()")
+ # "2013/01/28 17:14:40 Any extra arguments (without a corresponding %) are treated like arguments to dbg()"
+
+ dbgf("%d %d", 1, 2, 3, 4, 5)
+ # "2013/01/28 17:16:32 Another example: 1 2 3 4 5"
+
+ dbgf("%@: Include the function name for a little context (via %s)", "%@")
+ # "2013... github.com/robertkrimen/dbg.TestSynopsis: Include the function name for a little context (via %@)"
+
+By default, dbg uses log (log.Println, log.Printf, log.Panic, etc.) for output.
+However, you can also provide your own output destination by invoking dbg.New with
+a customization function:
+
+ import (
+ "bytes"
+ Dbg "github.com/robertkrimen/dbg"
+ "os"
+ )
+
+ # dbg to os.Stderr
+ dbg, dbgf := Dbg.New(func(dbgr *Dbgr) {
+ dbgr.SetOutput(os.Stderr)
+ })
+
+ # A slightly contrived example:
+ var buffer bytes.Buffer
+ dbg, dbgf := New(func(dbgr *Dbgr) {
+ dbgr.SetOutput(&buffer)
+ })
+
+*/
+package dbg
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "regexp"
+ "runtime"
+ "strings"
+ "unicode"
+)
+
+type _frmt struct {
+ ctl string
+ format string
+ operandCount int
+ panic bool
+ fatal bool
+ check bool
+}
+
+var (
+ ctlTest = regexp.MustCompile(`^\s*%/`)
+ ctlScan = regexp.MustCompile(`%?/(panic|fatal|check)(?:\s|$)`)
+)
+
+func operandCount(format string) int {
+ count := 0
+ end := len(format)
+ for at := 0; at < end; {
+ for at < end && format[at] != '%' {
+ at++
+ }
+ at++
+ if at < end {
+ if format[at] != '%' && format[at] != '@' {
+ count++
+ }
+ at++
+ }
+ }
+ return count
+}
+
+func parseFormat(format string) (frmt _frmt) {
+ if ctlTest.MatchString(format) {
+ format = strings.TrimLeftFunc(format, unicode.IsSpace)
+ index := strings.Index(format, "//")
+ if index != -1 {
+ frmt.ctl = format[0:index]
+ format = format[index+2:] // Skip the second slash via +2 (instead of +1)
+ } else {
+ frmt.ctl = format
+ format = ""
+ }
+ for _, tmp := range ctlScan.FindAllStringSubmatch(frmt.ctl, -1) {
+ for _, value := range tmp[1:] {
+ switch value {
+ case "panic":
+ frmt.panic = true
+ case "fatal":
+ frmt.fatal = true
+ case "check":
+ frmt.check = true
+ }
+ }
+ }
+ }
+ frmt.format = format
+ frmt.operandCount = operandCount(format)
+ return
+}
+
+type Dbgr struct {
+ emit _emit
+}
+
+type DbgFunction func(values ...interface{})
+
+func NewDbgr() *Dbgr {
+ self := &Dbgr{}
+ return self
+}
+
+/*
+New will create and return a pair of debugging functions. You can customize where
+they output to by passing in an (optional) customization function:
+
+ import (
+ Dbg "github.com/robertkrimen/dbg"
+ "os"
+ )
+
+ # dbg to os.Stderr
+ dbg, dbgf := Dbg.New(func(dbgr *Dbgr) {
+ dbgr.SetOutput(os.Stderr)
+ })
+
+*/
+func New(options ...interface{}) (dbg DbgFunction, dbgf DbgFunction) {
+ dbgr := NewDbgr()
+ if len(options) > 0 {
+ if fn, ok := options[0].(func(*Dbgr)); ok {
+ fn(dbgr)
+ }
+ }
+ return dbgr.DbgDbgf()
+}
+
+func (self Dbgr) Dbg(values ...interface{}) {
+ self.getEmit().emit(_frmt{}, "", values...)
+}
+
+func (self Dbgr) Dbgf(values ...interface{}) {
+ self.dbgf(values...)
+}
+
+func (self Dbgr) DbgDbgf() (dbg DbgFunction, dbgf DbgFunction) {
+ dbg = func(vl ...interface{}) {
+ self.Dbg(vl...)
+ }
+ dbgf = func(vl ...interface{}) {
+ self.dbgf(vl...)
+ }
+ return dbg, dbgf // Redundant, but...
+}
+
+func (self Dbgr) dbgf(values ...interface{}) {
+
+ var frmt _frmt
+ if len(values) > 0 {
+ tmp := fmt.Sprint(values[0])
+ frmt = parseFormat(tmp)
+ values = values[1:]
+ }
+
+ buffer_f := bytes.Buffer{}
+ format := frmt.format
+ end := len(format)
+ for at := 0; at < end; {
+ last := at
+ for at < end && format[at] != '%' {
+ at++
+ }
+ if at > last {
+ buffer_f.WriteString(format[last:at])
+ }
+ if at >= end {
+ break
+ }
+ // format[at] == '%'
+ at++
+ // format[at] == ?
+ if format[at] == '@' {
+ depth := 2
+ pc, _, _, _ := runtime.Caller(depth)
+ name := runtime.FuncForPC(pc).Name()
+ buffer_f.WriteString(name)
+ } else {
+ buffer_f.WriteString(format[at-1 : at+1])
+ }
+ at++
+ }
+
+ //values_f := append([]interface{}{}, values[0:frmt.operandCount]...)
+ values_f := values[0:frmt.operandCount]
+ values_dbg := values[frmt.operandCount:]
+ if len(values_dbg) > 0 {
+ // Adjust frmt.format:
+ // (%v instead of %s because: frmt.check)
+ {
+ tmp := format
+ if len(tmp) > 0 {
+ if unicode.IsSpace(rune(tmp[len(tmp)-1])) {
+ buffer_f.WriteString("%v")
+ } else {
+ buffer_f.WriteString(" %v")
+ }
+ } else if frmt.check {
+ // Performing a check, so no output
+ } else {
+ buffer_f.WriteString("%v")
+ }
+ }
+
+ // Adjust values_f:
+ if !frmt.check {
+ tmp := []string{}
+ for _, value := range values_dbg {
+ tmp = append(tmp, fmt.Sprintf("%v", value))
+ }
+ // First, make a copy of values_f, so we avoid overwriting values_dbg when appending
+ values_f = append([]interface{}{}, values_f...)
+ values_f = append(values_f, strings.Join(tmp, " "))
+ }
+ }
+
+ format = buffer_f.String()
+ if frmt.check {
+ // We do not actually emit to the log, but panic if
+ // a non-nil value is detected (e.g. a non-nil error)
+ for _, value := range values_dbg {
+ if value != nil {
+ if format == "" {
+ panic(value)
+ } else {
+ panic(fmt.Sprintf(format, append(values_f, value)...))
+ }
+ }
+ }
+ } else {
+ self.getEmit().emit(frmt, format, values_f...)
+ }
+}
+
+// Idiot-proof &Dbgr{}, etc.
+func (self *Dbgr) getEmit() _emit {
+ if self.emit == nil {
+ self.emit = standardEmit()
+ }
+ return self.emit
+}
+
+// SetOutput will accept the following as a destination for output:
+//
+// *log.Logger Print*/Panic*/Fatal* of the logger
+// io.Writer -
+// nil Reset to the default output (os.Stderr)
+// "log" Print*/Panic*/Fatal* via the "log" package
+//
+func (self *Dbgr) SetOutput(output interface{}) {
+ if output == nil {
+ self.emit = standardEmit()
+ return
+ }
+ switch output := output.(type) {
+ case *log.Logger:
+ self.emit = _emitLogger{
+ logger: output,
+ }
+ return
+ case io.Writer:
+ self.emit = _emitWriter{
+ writer: output,
+ }
+ return
+ case string:
+ if output == "log" {
+ self.emit = _emitLog{}
+ return
+ }
+ }
+ panic(output)
+}
+
+// ======== //
+// = emit = //
+// ======== //
+
+func standardEmit() _emit {
+ return _emitWriter{
+ writer: os.Stderr,
+ }
+}
+
+func ln(tmp string) string {
+ length := len(tmp)
+ if length > 0 && tmp[length-1] != '\n' {
+ return tmp + "\n"
+ }
+ return tmp
+}
+
+type _emit interface {
+ emit(_frmt, string, ...interface{})
+}
+
+type _emitWriter struct {
+ writer io.Writer
+}
+
+func (self _emitWriter) emit(frmt _frmt, format string, values ...interface{}) {
+ if format == "" {
+ fmt.Fprintln(self.writer, values...)
+ } else {
+ if frmt.panic {
+ panic(fmt.Sprintf(format, values...))
+ }
+ fmt.Fprintf(self.writer, ln(format), values...)
+ if frmt.fatal {
+ os.Exit(1)
+ }
+ }
+}
+
+type _emitLogger struct {
+ logger *log.Logger
+}
+
+func (self _emitLogger) emit(frmt _frmt, format string, values ...interface{}) {
+ if format == "" {
+ self.logger.Println(values...)
+ } else {
+ if frmt.panic {
+ self.logger.Panicf(format, values...)
+ } else if frmt.fatal {
+ self.logger.Fatalf(format, values...)
+ } else {
+ self.logger.Printf(format, values...)
+ }
+ }
+}
+
+type _emitLog struct {
+}
+
+func (self _emitLog) emit(frmt _frmt, format string, values ...interface{}) {
+ if format == "" {
+ log.Println(values...)
+ } else {
+ if frmt.panic {
+ log.Panicf(format, values...)
+ } else if frmt.fatal {
+ log.Fatalf(format, values...)
+ } else {
+ log.Printf(format, values...)
+ }
+ }
+}
diff --git a/vendor/github.com/robertkrimen/otto/error.go b/vendor/github.com/robertkrimen/otto/error.go
new file mode 100644
index 000000000..41a5c10e7
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/error.go
@@ -0,0 +1,252 @@
+package otto
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/robertkrimen/otto/file"
+)
+
+type _exception struct {
+ value interface{}
+}
+
+func newException(value interface{}) *_exception {
+ return &_exception{
+ value: value,
+ }
+}
+
+func (self *_exception) eject() interface{} {
+ value := self.value
+ self.value = nil // Prevent Go from holding on to the value, whatever it is
+ return value
+}
+
+type _error struct {
+ name string
+ message string
+ trace []_frame
+
+ offset int
+}
+
+func (err _error) format() string {
+ if len(err.name) == 0 {
+ return err.message
+ }
+ if len(err.message) == 0 {
+ return err.name
+ }
+ return fmt.Sprintf("%s: %s", err.name, err.message)
+}
+
+func (err _error) formatWithStack() string {
+ str := err.format() + "\n"
+ for _, frame := range err.trace {
+ str += " at " + frame.location() + "\n"
+ }
+ return str
+}
+
+type _frame struct {
+ native bool
+ nativeFile string
+ nativeLine int
+ file *file.File
+ offset int
+ callee string
+}
+
+var (
+ nativeFrame = _frame{}
+)
+
+type _at int
+
+func (fr _frame) location() string {
+ str := "<unknown>"
+
+ switch {
+ case fr.native:
+ str = "<native code>"
+ if fr.nativeFile != "" && fr.nativeLine != 0 {
+ str = fmt.Sprintf("%s:%d", fr.nativeFile, fr.nativeLine)
+ }
+ case fr.file != nil:
+ if p := fr.file.Position(file.Idx(fr.offset)); p != nil {
+ path, line, column := p.Filename, p.Line, p.Column
+
+ if path == "" {
+ path = "<anonymous>"
+ }
+
+ str = fmt.Sprintf("%s:%d:%d", path, line, column)
+ }
+ }
+
+ if fr.callee != "" {
+ str = fmt.Sprintf("%s (%s)", fr.callee, str)
+ }
+
+ return str
+}
+
+// An Error represents a runtime error, e.g. a TypeError, a ReferenceError, etc.
+type Error struct {
+ _error
+}
+
+// Error returns a description of the error
+//
+// TypeError: 'def' is not a function
+//
+func (err Error) Error() string {
+ return err.format()
+}
+
+// String returns a description of the error and a trace of where the
+// error occurred.
+//
+// TypeError: 'def' is not a function
+// at xyz (<anonymous>:3:9)
+// at <anonymous>:7:1/
+//
+func (err Error) String() string {
+ return err.formatWithStack()
+}
+
+func (err _error) describe(format string, in ...interface{}) string {
+ return fmt.Sprintf(format, in...)
+}
+
+func (self _error) messageValue() Value {
+ if self.message == "" {
+ return Value{}
+ }
+ return toValue_string(self.message)
+}
+
+func (rt *_runtime) typeErrorResult(throw bool) bool {
+ if throw {
+ panic(rt.panicTypeError())
+ }
+ return false
+}
+
+func newError(rt *_runtime, name string, stackFramesToPop int, in ...interface{}) _error {
+ err := _error{
+ name: name,
+ offset: -1,
+ }
+ description := ""
+ length := len(in)
+
+ if rt != nil && rt.scope != nil {
+ scope := rt.scope
+
+ for i := 0; i < stackFramesToPop; i++ {
+ if scope.outer != nil {
+ scope = scope.outer
+ }
+ }
+
+ frame := scope.frame
+
+ if length > 0 {
+ if at, ok := in[length-1].(_at); ok {
+ in = in[0 : length-1]
+ if scope != nil {
+ frame.offset = int(at)
+ }
+ length--
+ }
+ if length > 0 {
+ description, in = in[0].(string), in[1:]
+ }
+ }
+
+ limit := rt.traceLimit
+
+ err.trace = append(err.trace, frame)
+ if scope != nil {
+ for scope = scope.outer; scope != nil; scope = scope.outer {
+ if limit--; limit == 0 {
+ break
+ }
+
+ if scope.frame.offset >= 0 {
+ err.trace = append(err.trace, scope.frame)
+ }
+ }
+ }
+ } else {
+ if length > 0 {
+ description, in = in[0].(string), in[1:]
+ }
+ }
+ err.message = err.describe(description, in...)
+
+ return err
+}
+
+func (rt *_runtime) panicTypeError(argumentList ...interface{}) *_exception {
+ return &_exception{
+ value: newError(rt, "TypeError", 0, argumentList...),
+ }
+}
+
+func (rt *_runtime) panicReferenceError(argumentList ...interface{}) *_exception {
+ return &_exception{
+ value: newError(rt, "ReferenceError", 0, argumentList...),
+ }
+}
+
+func (rt *_runtime) panicURIError(argumentList ...interface{}) *_exception {
+ return &_exception{
+ value: newError(rt, "URIError", 0, argumentList...),
+ }
+}
+
+func (rt *_runtime) panicSyntaxError(argumentList ...interface{}) *_exception {
+ return &_exception{
+ value: newError(rt, "SyntaxError", 0, argumentList...),
+ }
+}
+
+func (rt *_runtime) panicRangeError(argumentList ...interface{}) *_exception {
+ return &_exception{
+ value: newError(rt, "RangeError", 0, argumentList...),
+ }
+}
+
+func catchPanic(function func()) (err error) {
+ defer func() {
+ if caught := recover(); caught != nil {
+ if exception, ok := caught.(*_exception); ok {
+ caught = exception.eject()
+ }
+ switch caught := caught.(type) {
+ case *Error:
+ err = caught
+ return
+ case _error:
+ err = &Error{caught}
+ return
+ case Value:
+ if vl := caught._object(); vl != nil {
+ switch vl := vl.value.(type) {
+ case _error:
+ err = &Error{vl}
+ return
+ }
+ }
+ err = errors.New(caught.string())
+ return
+ }
+ panic(caught)
+ }
+ }()
+ function()
+ return nil
+}
diff --git a/vendor/github.com/robertkrimen/otto/evaluate.go b/vendor/github.com/robertkrimen/otto/evaluate.go
new file mode 100644
index 000000000..093054cc3
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/evaluate.go
@@ -0,0 +1,318 @@
+package otto
+
+import (
+ "fmt"
+ "math"
+ "strings"
+
+ "github.com/robertkrimen/otto/token"
+)
+
+func (self *_runtime) evaluateMultiply(left float64, right float64) Value {
+ // TODO 11.5.1
+ return Value{}
+}
+
+func (self *_runtime) evaluateDivide(left float64, right float64) Value {
+ if math.IsNaN(left) || math.IsNaN(right) {
+ return NaNValue()
+ }
+ if math.IsInf(left, 0) && math.IsInf(right, 0) {
+ return NaNValue()
+ }
+ if left == 0 && right == 0 {
+ return NaNValue()
+ }
+ if math.IsInf(left, 0) {
+ if math.Signbit(left) == math.Signbit(right) {
+ return positiveInfinityValue()
+ } else {
+ return negativeInfinityValue()
+ }
+ }
+ if math.IsInf(right, 0) {
+ if math.Signbit(left) == math.Signbit(right) {
+ return positiveZeroValue()
+ } else {
+ return negativeZeroValue()
+ }
+ }
+ if right == 0 {
+ if math.Signbit(left) == math.Signbit(right) {
+ return positiveInfinityValue()
+ } else {
+ return negativeInfinityValue()
+ }
+ }
+ return toValue_float64(left / right)
+}
+
+func (self *_runtime) evaluateModulo(left float64, right float64) Value {
+ // TODO 11.5.3
+ return Value{}
+}
+
+func (self *_runtime) calculateBinaryExpression(operator token.Token, left Value, right Value) Value {
+
+ leftValue := left.resolve()
+
+ switch operator {
+
+ // Additive
+ case token.PLUS:
+ leftValue = toPrimitive(leftValue)
+ rightValue := right.resolve()
+ rightValue = toPrimitive(rightValue)
+
+ if leftValue.IsString() || rightValue.IsString() {
+ return toValue_string(strings.Join([]string{leftValue.string(), rightValue.string()}, ""))
+ } else {
+ return toValue_float64(leftValue.float64() + rightValue.float64())
+ }
+ case token.MINUS:
+ rightValue := right.resolve()
+ return toValue_float64(leftValue.float64() - rightValue.float64())
+
+ // Multiplicative
+ case token.MULTIPLY:
+ rightValue := right.resolve()
+ return toValue_float64(leftValue.float64() * rightValue.float64())
+ case token.SLASH:
+ rightValue := right.resolve()
+ return self.evaluateDivide(leftValue.float64(), rightValue.float64())
+ case token.REMAINDER:
+ rightValue := right.resolve()
+ return toValue_float64(math.Mod(leftValue.float64(), rightValue.float64()))
+
+ // Logical
+ case token.LOGICAL_AND:
+ left := leftValue.bool()
+ if !left {
+ return falseValue
+ }
+ return toValue_bool(right.resolve().bool())
+ case token.LOGICAL_OR:
+ left := leftValue.bool()
+ if left {
+ return trueValue
+ }
+ return toValue_bool(right.resolve().bool())
+
+ // Bitwise
+ case token.AND:
+ rightValue := right.resolve()
+ return toValue_int32(toInt32(leftValue) & toInt32(rightValue))
+ case token.OR:
+ rightValue := right.resolve()
+ return toValue_int32(toInt32(leftValue) | toInt32(rightValue))
+ case token.EXCLUSIVE_OR:
+ rightValue := right.resolve()
+ return toValue_int32(toInt32(leftValue) ^ toInt32(rightValue))
+
+ // Shift
+ // (Masking of 0x1f is to restrict the shift to a maximum of 31 places)
+ case token.SHIFT_LEFT:
+ rightValue := right.resolve()
+ return toValue_int32(toInt32(leftValue) << (toUint32(rightValue) & 0x1f))
+ case token.SHIFT_RIGHT:
+ rightValue := right.resolve()
+ return toValue_int32(toInt32(leftValue) >> (toUint32(rightValue) & 0x1f))
+ case token.UNSIGNED_SHIFT_RIGHT:
+ rightValue := right.resolve()
+ // Shifting an unsigned integer is a logical shift
+ return toValue_uint32(toUint32(leftValue) >> (toUint32(rightValue) & 0x1f))
+
+ case token.INSTANCEOF:
+ rightValue := right.resolve()
+ if !rightValue.IsObject() {
+ panic(self.panicTypeError("Expecting a function in instanceof check, but got: %v", rightValue))
+ }
+ return toValue_bool(rightValue._object().hasInstance(leftValue))
+
+ case token.IN:
+ rightValue := right.resolve()
+ if !rightValue.IsObject() {
+ panic(self.panicTypeError())
+ }
+ return toValue_bool(rightValue._object().hasProperty(leftValue.string()))
+ }
+
+ panic(hereBeDragons(operator))
+}
+
+func valueKindDispatchKey(left _valueKind, right _valueKind) int {
+ return (int(left) << 2) + int(right)
+}
+
+var equalDispatch map[int](func(Value, Value) bool) = makeEqualDispatch()
+
+func makeEqualDispatch() map[int](func(Value, Value) bool) {
+ key := valueKindDispatchKey
+ return map[int](func(Value, Value) bool){
+
+ key(valueNumber, valueObject): func(x Value, y Value) bool { return x.float64() == y.float64() },
+ key(valueString, valueObject): func(x Value, y Value) bool { return x.float64() == y.float64() },
+ key(valueObject, valueNumber): func(x Value, y Value) bool { return x.float64() == y.float64() },
+ key(valueObject, valueString): func(x Value, y Value) bool { return x.float64() == y.float64() },
+ }
+}
+
+type _lessThanResult int
+
+const (
+ lessThanFalse _lessThanResult = iota
+ lessThanTrue
+ lessThanUndefined
+)
+
+func calculateLessThan(left Value, right Value, leftFirst bool) _lessThanResult {
+
+ x := Value{}
+ y := x
+
+ if leftFirst {
+ x = toNumberPrimitive(left)
+ y = toNumberPrimitive(right)
+ } else {
+ y = toNumberPrimitive(right)
+ x = toNumberPrimitive(left)
+ }
+
+ result := false
+ if x.kind != valueString || y.kind != valueString {
+ x, y := x.float64(), y.float64()
+ if math.IsNaN(x) || math.IsNaN(y) {
+ return lessThanUndefined
+ }
+ result = x < y
+ } else {
+ x, y := x.string(), y.string()
+ result = x < y
+ }
+
+ if result {
+ return lessThanTrue
+ }
+
+ return lessThanFalse
+}
+
+// FIXME Probably a map is not the most efficient way to do this
+var lessThanTable [4](map[_lessThanResult]bool) = [4](map[_lessThanResult]bool){
+ // <
+ map[_lessThanResult]bool{
+ lessThanFalse: false,
+ lessThanTrue: true,
+ lessThanUndefined: false,
+ },
+
+ // >
+ map[_lessThanResult]bool{
+ lessThanFalse: false,
+ lessThanTrue: true,
+ lessThanUndefined: false,
+ },
+
+ // <=
+ map[_lessThanResult]bool{
+ lessThanFalse: true,
+ lessThanTrue: false,
+ lessThanUndefined: false,
+ },
+
+ // >=
+ map[_lessThanResult]bool{
+ lessThanFalse: true,
+ lessThanTrue: false,
+ lessThanUndefined: false,
+ },
+}
+
+func (self *_runtime) calculateComparison(comparator token.Token, left Value, right Value) bool {
+
+ // FIXME Use strictEqualityComparison?
+ // TODO This might be redundant now (with regards to evaluateComparison)
+ x := left.resolve()
+ y := right.resolve()
+
+ kindEqualKind := false
+ result := true
+ negate := false
+
+ switch comparator {
+ case token.LESS:
+ result = lessThanTable[0][calculateLessThan(x, y, true)]
+ case token.GREATER:
+ result = lessThanTable[1][calculateLessThan(y, x, false)]
+ case token.LESS_OR_EQUAL:
+ result = lessThanTable[2][calculateLessThan(y, x, false)]
+ case token.GREATER_OR_EQUAL:
+ result = lessThanTable[3][calculateLessThan(x, y, true)]
+ case token.STRICT_NOT_EQUAL:
+ negate = true
+ fallthrough
+ case token.STRICT_EQUAL:
+ if x.kind != y.kind {
+ result = false
+ } else {
+ kindEqualKind = true
+ }
+ case token.NOT_EQUAL:
+ negate = true
+ fallthrough
+ case token.EQUAL:
+ if x.kind == y.kind {
+ kindEqualKind = true
+ } else if x.kind <= valueNull && y.kind <= valueNull {
+ result = true
+ } else if x.kind <= valueNull || y.kind <= valueNull {
+ result = false
+ } else if x.kind <= valueString && y.kind <= valueString {
+ result = x.float64() == y.float64()
+ } else if x.kind == valueBoolean {
+ result = self.calculateComparison(token.EQUAL, toValue_float64(x.float64()), y)
+ } else if y.kind == valueBoolean {
+ result = self.calculateComparison(token.EQUAL, x, toValue_float64(y.float64()))
+ } else if x.kind == valueObject {
+ result = self.calculateComparison(token.EQUAL, toPrimitive(x), y)
+ } else if y.kind == valueObject {
+ result = self.calculateComparison(token.EQUAL, x, toPrimitive(y))
+ } else {
+ panic(hereBeDragons("Unable to test for equality: %v ==? %v", x, y))
+ }
+ default:
+ panic(fmt.Errorf("Unknown comparator %s", comparator.String()))
+ }
+
+ if kindEqualKind {
+ switch x.kind {
+ case valueUndefined, valueNull:
+ result = true
+ case valueNumber:
+ x := x.float64()
+ y := y.float64()
+ if math.IsNaN(x) || math.IsNaN(y) {
+ result = false
+ } else {
+ result = x == y
+ }
+ case valueString:
+ result = x.string() == y.string()
+ case valueBoolean:
+ result = x.bool() == y.bool()
+ case valueObject:
+ result = x._object() == y._object()
+ default:
+ goto ERROR
+ }
+ }
+
+ if negate {
+ result = !result
+ }
+
+ return result
+
+ERROR:
+ panic(hereBeDragons("%v (%v) %s %v (%v)", x, x.kind, comparator, y, y.kind))
+}
diff --git a/vendor/github.com/robertkrimen/otto/file/README.markdown b/vendor/github.com/robertkrimen/otto/file/README.markdown
new file mode 100644
index 000000000..79757baa8
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/file/README.markdown
@@ -0,0 +1,110 @@
+# file
+--
+ import "github.com/robertkrimen/otto/file"
+
+Package file encapsulates the file abstractions used by the ast & parser.
+
+## Usage
+
+#### type File
+
+```go
+type File struct {
+}
+```
+
+
+#### func NewFile
+
+```go
+func NewFile(filename, src string, base int) *File
+```
+
+#### func (*File) Base
+
+```go
+func (fl *File) Base() int
+```
+
+#### func (*File) Name
+
+```go
+func (fl *File) Name() string
+```
+
+#### func (*File) Source
+
+```go
+func (fl *File) Source() string
+```
+
+#### type FileSet
+
+```go
+type FileSet struct {
+}
+```
+
+A FileSet represents a set of source files.
+
+#### func (*FileSet) AddFile
+
+```go
+func (self *FileSet) AddFile(filename, src string) int
+```
+AddFile adds a new file with the given filename and src.
+
+This an internal method, but exported for cross-package use.
+
+#### func (*FileSet) File
+
+```go
+func (self *FileSet) File(idx Idx) *File
+```
+
+#### func (*FileSet) Position
+
+```go
+func (self *FileSet) Position(idx Idx) *Position
+```
+Position converts an Idx in the FileSet into a Position.
+
+#### type Idx
+
+```go
+type Idx int
+```
+
+Idx is a compact encoding of a source position within a file set. It can be
+converted into a Position for a more convenient, but much larger,
+representation.
+
+#### type Position
+
+```go
+type Position struct {
+ Filename string // The filename where the error occurred, if any
+ Offset int // The src offset
+ Line int // The line number, starting at 1
+ Column int // The column number, starting at 1 (The character count)
+
+}
+```
+
+Position describes an arbitrary source position including the filename, line,
+and column location.
+
+#### func (*Position) String
+
+```go
+func (self *Position) String() string
+```
+String returns a string in one of several forms:
+
+ file:line:column A valid position with filename
+ line:column A valid position without filename
+ file An invalid position with filename
+ - An invalid position without filename
+
+--
+**godocdown** http://github.com/robertkrimen/godocdown
diff --git a/vendor/github.com/robertkrimen/otto/file/file.go b/vendor/github.com/robertkrimen/otto/file/file.go
new file mode 100644
index 000000000..9093206e8
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/file/file.go
@@ -0,0 +1,164 @@
+// Package file encapsulates the file abstractions used by the ast & parser.
+//
+package file
+
+import (
+ "fmt"
+ "strings"
+
+ "gopkg.in/sourcemap.v1"
+)
+
+// Idx is a compact encoding of a source position within a file set.
+// It can be converted into a Position for a more convenient, but much
+// larger, representation.
+type Idx int
+
+// Position describes an arbitrary source position
+// including the filename, line, and column location.
+type Position struct {
+ Filename string // The filename where the error occurred, if any
+ Offset int // The src offset
+ Line int // The line number, starting at 1
+ Column int // The column number, starting at 1 (The character count)
+
+}
+
+// A Position is valid if the line number is > 0.
+
+func (self *Position) isValid() bool {
+ return self.Line > 0
+}
+
+// String returns a string in one of several forms:
+//
+// file:line:column A valid position with filename
+// line:column A valid position without filename
+// file An invalid position with filename
+// - An invalid position without filename
+//
+func (self *Position) String() string {
+ str := self.Filename
+ if self.isValid() {
+ if str != "" {
+ str += ":"
+ }
+ str += fmt.Sprintf("%d:%d", self.Line, self.Column)
+ }
+ if str == "" {
+ str = "-"
+ }
+ return str
+}
+
+// FileSet
+
+// A FileSet represents a set of source files.
+type FileSet struct {
+ files []*File
+ last *File
+}
+
+// AddFile adds a new file with the given filename and src.
+//
+// This an internal method, but exported for cross-package use.
+func (self *FileSet) AddFile(filename, src string) int {
+ base := self.nextBase()
+ file := &File{
+ name: filename,
+ src: src,
+ base: base,
+ }
+ self.files = append(self.files, file)
+ self.last = file
+ return base
+}
+
+func (self *FileSet) nextBase() int {
+ if self.last == nil {
+ return 1
+ }
+ return self.last.base + len(self.last.src) + 1
+}
+
+func (self *FileSet) File(idx Idx) *File {
+ for _, file := range self.files {
+ if idx <= Idx(file.base+len(file.src)) {
+ return file
+ }
+ }
+ return nil
+}
+
+// Position converts an Idx in the FileSet into a Position.
+func (self *FileSet) Position(idx Idx) *Position {
+ for _, file := range self.files {
+ if idx <= Idx(file.base+len(file.src)) {
+ return file.Position(idx - Idx(file.base))
+ }
+ }
+
+ return nil
+}
+
+type File struct {
+ name string
+ src string
+ base int // This will always be 1 or greater
+ sm *sourcemap.Consumer
+}
+
+func NewFile(filename, src string, base int) *File {
+ return &File{
+ name: filename,
+ src: src,
+ base: base,
+ }
+}
+
+func (fl *File) WithSourceMap(sm *sourcemap.Consumer) *File {
+ fl.sm = sm
+ return fl
+}
+
+func (fl *File) Name() string {
+ return fl.name
+}
+
+func (fl *File) Source() string {
+ return fl.src
+}
+
+func (fl *File) Base() int {
+ return fl.base
+}
+
+func (fl *File) Position(idx Idx) *Position {
+ position := &Position{}
+
+ offset := int(idx) - fl.base
+
+ if offset >= len(fl.src) || offset < 0 {
+ return nil
+ }
+
+ src := fl.src[:offset]
+
+ position.Filename = fl.name
+ position.Offset = offset
+ position.Line = strings.Count(src, "\n") + 1
+
+ if index := strings.LastIndex(src, "\n"); index >= 0 {
+ position.Column = offset - index
+ } else {
+ position.Column = len(src) + 1
+ }
+
+ if fl.sm != nil {
+ if f, _, l, c, ok := fl.sm.Source(position.Line, position.Column); ok {
+ position.Filename, position.Line, position.Column = f, l, c
+ }
+ }
+
+ return position
+}
diff --git a/vendor/github.com/robertkrimen/otto/global.go b/vendor/github.com/robertkrimen/otto/global.go
new file mode 100644
index 000000000..eb2a2a0fb
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/global.go
@@ -0,0 +1,221 @@
+package otto
+
+import (
+ "strconv"
+ "time"
+)
+
+var (
+ prototypeValueObject = interface{}(nil)
+ prototypeValueFunction = _nativeFunctionObject{
+ call: func(_ FunctionCall) Value {
+ return Value{}
+ },
+ }
+ prototypeValueString = _stringASCII("")
+ // TODO Make this just false?
+ prototypeValueBoolean = Value{
+ kind: valueBoolean,
+ value: false,
+ }
+ prototypeValueNumber = Value{
+ kind: valueNumber,
+ value: 0,
+ }
+ prototypeValueDate = _dateObject{
+ epoch: 0,
+ isNaN: false,
+ time: time.Unix(0, 0).UTC(),
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ }
+ prototypeValueRegExp = _regExpObject{
+ regularExpression: nil,
+ global: false,
+ ignoreCase: false,
+ multiline: false,
+ source: "",
+ flags: "",
+ }
+)
+
+func newContext() *_runtime {
+
+ self := &_runtime{}
+
+ self.globalStash = self.newObjectStash(nil, nil)
+ self.globalObject = self.globalStash.object
+
+ _newContext(self)
+
+ self.eval = self.globalObject.property["eval"].value.(Value).value.(*_object)
+ self.globalObject.prototype = self.global.ObjectPrototype
+
+ return self
+}
+
+func (runtime *_runtime) newBaseObject() *_object {
+ self := newObject(runtime, "")
+ return self
+}
+
+func (runtime *_runtime) newClassObject(class string) *_object {
+ return newObject(runtime, class)
+}
+
+func (runtime *_runtime) newPrimitiveObject(class string, value Value) *_object {
+ self := runtime.newClassObject(class)
+ self.value = value
+ return self
+}
+
+func (self *_object) primitiveValue() Value {
+ switch value := self.value.(type) {
+ case Value:
+ return value
+ case _stringObject:
+ return toValue_string(value.String())
+ }
+ return Value{}
+}
+
+func (self *_object) hasPrimitive() bool {
+ switch self.value.(type) {
+ case Value, _stringObject:
+ return true
+ }
+ return false
+}
+
+func (runtime *_runtime) newObject() *_object {
+ self := runtime.newClassObject("Object")
+ self.prototype = runtime.global.ObjectPrototype
+ return self
+}
+
+func (runtime *_runtime) newArray(length uint32) *_object {
+ self := runtime.newArrayObject(length)
+ self.prototype = runtime.global.ArrayPrototype
+ return self
+}
+
+func (runtime *_runtime) newArrayOf(valueArray []Value) *_object {
+ self := runtime.newArray(uint32(len(valueArray)))
+ for index, value := range valueArray {
+ if value.isEmpty() {
+ continue
+ }
+ self.defineProperty(strconv.FormatInt(int64(index), 10), value, 0111, false)
+ }
+ return self
+}
+
+func (runtime *_runtime) newString(value Value) *_object {
+ self := runtime.newStringObject(value)
+ self.prototype = runtime.global.StringPrototype
+ return self
+}
+
+func (runtime *_runtime) newBoolean(value Value) *_object {
+ self := runtime.newBooleanObject(value)
+ self.prototype = runtime.global.BooleanPrototype
+ return self
+}
+
+func (runtime *_runtime) newNumber(value Value) *_object {
+ self := runtime.newNumberObject(value)
+ self.prototype = runtime.global.NumberPrototype
+ return self
+}
+
+func (runtime *_runtime) newRegExp(patternValue Value, flagsValue Value) *_object {
+
+ pattern := ""
+ flags := ""
+ if object := patternValue._object(); object != nil && object.class == "RegExp" {
+ if flagsValue.IsDefined() {
+ panic(runtime.panicTypeError("Cannot supply flags when constructing one RegExp from another"))
+ }
+ regExp := object.regExpValue()
+ pattern = regExp.source
+ flags = regExp.flags
+ } else {
+ if patternValue.IsDefined() {
+ pattern = patternValue.string()
+ }
+ if flagsValue.IsDefined() {
+ flags = flagsValue.string()
+ }
+ }
+
+ return runtime._newRegExp(pattern, flags)
+}
+
+func (runtime *_runtime) _newRegExp(pattern string, flags string) *_object {
+ self := runtime.newRegExpObject(pattern, flags)
+ self.prototype = runtime.global.RegExpPrototype
+ return self
+}
+
+// TODO Should (probably) be one argument, right? This is redundant
+func (runtime *_runtime) newDate(epoch float64) *_object {
+ self := runtime.newDateObject(epoch)
+ self.prototype = runtime.global.DatePrototype
+ return self
+}
+
+func (runtime *_runtime) newError(name string, message Value, stackFramesToPop int) *_object {
+ var self *_object
+ switch name {
+ case "EvalError":
+ return runtime.newEvalError(message)
+ case "TypeError":
+ return runtime.newTypeError(message)
+ case "RangeError":
+ return runtime.newRangeError(message)
+ case "ReferenceError":
+ return runtime.newReferenceError(message)
+ case "SyntaxError":
+ return runtime.newSyntaxError(message)
+ case "URIError":
+ return runtime.newURIError(message)
+ }
+
+ self = runtime.newErrorObject(name, message, stackFramesToPop)
+ self.prototype = runtime.global.ErrorPrototype
+ if name != "" {
+ self.defineProperty("name", toValue_string(name), 0111, false)
+ }
+ return self
+}
+
+func (runtime *_runtime) newNativeFunction(name, file string, line int, _nativeFunction _nativeFunction) *_object {
+ self := runtime.newNativeFunctionObject(name, file, line, _nativeFunction, 0)
+ self.prototype = runtime.global.FunctionPrototype
+ prototype := runtime.newObject()
+ self.defineProperty("prototype", toValue_object(prototype), 0100, false)
+ prototype.defineProperty("constructor", toValue_object(self), 0100, false)
+ return self
+}
+
+func (runtime *_runtime) newNodeFunction(node *_nodeFunctionLiteral, scopeEnvironment _stash) *_object {
+ // TODO Implement 13.2 fully
+ self := runtime.newNodeFunctionObject(node, scopeEnvironment)
+ self.prototype = runtime.global.FunctionPrototype
+ prototype := runtime.newObject()
+ self.defineProperty("prototype", toValue_object(prototype), 0100, false)
+ prototype.defineProperty("constructor", toValue_object(self), 0101, false)
+ return self
+}
+
+// FIXME Only in one place...
+func (runtime *_runtime) newBoundFunction(target *_object, this Value, argumentList []Value) *_object {
+ self := runtime.newBoundFunctionObject(target, this, argumentList)
+ self.prototype = runtime.global.FunctionPrototype
+ prototype := runtime.newObject()
+ self.defineProperty("prototype", toValue_object(prototype), 0100, false)
+ prototype.defineProperty("constructor", toValue_object(self), 0100, false)
+ return self
+}
diff --git a/vendor/github.com/robertkrimen/otto/inline.go b/vendor/github.com/robertkrimen/otto/inline.go
new file mode 100644
index 000000000..6e5df8393
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/inline.go
@@ -0,0 +1,6649 @@
+package otto
+
+import (
+ "math"
+)
+
+func _newContext(runtime *_runtime) {
+ {
+ runtime.global.ObjectPrototype = &_object{
+ runtime: runtime,
+ class: "Object",
+ objectClass: _classObject,
+ prototype: nil,
+ extensible: true,
+ value: prototypeValueObject,
+ }
+ }
+ {
+ runtime.global.FunctionPrototype = &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.ObjectPrototype,
+ extensible: true,
+ value: prototypeValueFunction,
+ }
+ }
+ {
+ valueOf_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "valueOf",
+ call: builtinObject_valueOf,
+ },
+ }
+ toString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toString",
+ call: builtinObject_toString,
+ },
+ }
+ toLocaleString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toLocaleString",
+ call: builtinObject_toLocaleString,
+ },
+ }
+ hasOwnProperty_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "hasOwnProperty",
+ call: builtinObject_hasOwnProperty,
+ },
+ }
+ isPrototypeOf_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "isPrototypeOf",
+ call: builtinObject_isPrototypeOf,
+ },
+ }
+ propertyIsEnumerable_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "propertyIsEnumerable",
+ call: builtinObject_propertyIsEnumerable,
+ },
+ }
+ runtime.global.ObjectPrototype.property = map[string]_property{
+ "valueOf": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: valueOf_function,
+ },
+ },
+ "toString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toString_function,
+ },
+ },
+ "toLocaleString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toLocaleString_function,
+ },
+ },
+ "hasOwnProperty": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: hasOwnProperty_function,
+ },
+ },
+ "isPrototypeOf": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: isPrototypeOf_function,
+ },
+ },
+ "propertyIsEnumerable": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: propertyIsEnumerable_function,
+ },
+ },
+ "constructor": _property{
+ mode: 0101,
+ value: Value{},
+ },
+ }
+ runtime.global.ObjectPrototype.propertyOrder = []string{
+ "valueOf",
+ "toString",
+ "toLocaleString",
+ "hasOwnProperty",
+ "isPrototypeOf",
+ "propertyIsEnumerable",
+ "constructor",
+ }
+ }
+ {
+ toString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toString",
+ call: builtinFunction_toString,
+ },
+ }
+ apply_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "apply",
+ call: builtinFunction_apply,
+ },
+ }
+ call_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "call",
+ call: builtinFunction_call,
+ },
+ }
+ bind_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "bind",
+ call: builtinFunction_bind,
+ },
+ }
+ runtime.global.FunctionPrototype.property = map[string]_property{
+ "toString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toString_function,
+ },
+ },
+ "apply": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: apply_function,
+ },
+ },
+ "call": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: call_function,
+ },
+ },
+ "bind": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: bind_function,
+ },
+ },
+ "constructor": _property{
+ mode: 0101,
+ value: Value{},
+ },
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ }
+ runtime.global.FunctionPrototype.propertyOrder = []string{
+ "toString",
+ "apply",
+ "call",
+ "bind",
+ "constructor",
+ "length",
+ }
+ }
+ {
+ getPrototypeOf_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getPrototypeOf",
+ call: builtinObject_getPrototypeOf,
+ },
+ }
+ getOwnPropertyDescriptor_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getOwnPropertyDescriptor",
+ call: builtinObject_getOwnPropertyDescriptor,
+ },
+ }
+ defineProperty_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 3,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "defineProperty",
+ call: builtinObject_defineProperty,
+ },
+ }
+ defineProperties_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "defineProperties",
+ call: builtinObject_defineProperties,
+ },
+ }
+ create_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "create",
+ call: builtinObject_create,
+ },
+ }
+ isExtensible_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "isExtensible",
+ call: builtinObject_isExtensible,
+ },
+ }
+ preventExtensions_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "preventExtensions",
+ call: builtinObject_preventExtensions,
+ },
+ }
+ isSealed_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "isSealed",
+ call: builtinObject_isSealed,
+ },
+ }
+ seal_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "seal",
+ call: builtinObject_seal,
+ },
+ }
+ isFrozen_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "isFrozen",
+ call: builtinObject_isFrozen,
+ },
+ }
+ freeze_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "freeze",
+ call: builtinObject_freeze,
+ },
+ }
+ keys_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "keys",
+ call: builtinObject_keys,
+ },
+ }
+ getOwnPropertyNames_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getOwnPropertyNames",
+ call: builtinObject_getOwnPropertyNames,
+ },
+ }
+ runtime.global.Object = &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ value: _nativeFunctionObject{
+ name: "Object",
+ call: builtinObject,
+ construct: builtinNewObject,
+ },
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ "prototype": _property{
+ mode: 0,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.ObjectPrototype,
+ },
+ },
+ "getPrototypeOf": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getPrototypeOf_function,
+ },
+ },
+ "getOwnPropertyDescriptor": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getOwnPropertyDescriptor_function,
+ },
+ },
+ "defineProperty": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: defineProperty_function,
+ },
+ },
+ "defineProperties": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: defineProperties_function,
+ },
+ },
+ "create": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: create_function,
+ },
+ },
+ "isExtensible": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: isExtensible_function,
+ },
+ },
+ "preventExtensions": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: preventExtensions_function,
+ },
+ },
+ "isSealed": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: isSealed_function,
+ },
+ },
+ "seal": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: seal_function,
+ },
+ },
+ "isFrozen": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: isFrozen_function,
+ },
+ },
+ "freeze": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: freeze_function,
+ },
+ },
+ "keys": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: keys_function,
+ },
+ },
+ "getOwnPropertyNames": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getOwnPropertyNames_function,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ "prototype",
+ "getPrototypeOf",
+ "getOwnPropertyDescriptor",
+ "defineProperty",
+ "defineProperties",
+ "create",
+ "isExtensible",
+ "preventExtensions",
+ "isSealed",
+ "seal",
+ "isFrozen",
+ "freeze",
+ "keys",
+ "getOwnPropertyNames",
+ },
+ }
+ runtime.global.ObjectPrototype.property["constructor"] =
+ _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.Object,
+ },
+ }
+ }
+ {
+ Function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ value: _nativeFunctionObject{
+ name: "Function",
+ call: builtinFunction,
+ construct: builtinNewFunction,
+ },
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ "prototype": _property{
+ mode: 0,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.FunctionPrototype,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ "prototype",
+ },
+ }
+ runtime.global.Function = Function
+ runtime.global.FunctionPrototype.property["constructor"] =
+ _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.Function,
+ },
+ }
+ }
+ {
+ toString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toString",
+ call: builtinArray_toString,
+ },
+ }
+ toLocaleString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toLocaleString",
+ call: builtinArray_toLocaleString,
+ },
+ }
+ concat_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "concat",
+ call: builtinArray_concat,
+ },
+ }
+ join_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "join",
+ call: builtinArray_join,
+ },
+ }
+ splice_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "splice",
+ call: builtinArray_splice,
+ },
+ }
+ shift_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "shift",
+ call: builtinArray_shift,
+ },
+ }
+ pop_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "pop",
+ call: builtinArray_pop,
+ },
+ }
+ push_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "push",
+ call: builtinArray_push,
+ },
+ }
+ slice_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "slice",
+ call: builtinArray_slice,
+ },
+ }
+ unshift_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "unshift",
+ call: builtinArray_unshift,
+ },
+ }
+ reverse_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "reverse",
+ call: builtinArray_reverse,
+ },
+ }
+ sort_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "sort",
+ call: builtinArray_sort,
+ },
+ }
+ indexOf_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "indexOf",
+ call: builtinArray_indexOf,
+ },
+ }
+ lastIndexOf_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "lastIndexOf",
+ call: builtinArray_lastIndexOf,
+ },
+ }
+ every_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "every",
+ call: builtinArray_every,
+ },
+ }
+ some_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "some",
+ call: builtinArray_some,
+ },
+ }
+ forEach_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "forEach",
+ call: builtinArray_forEach,
+ },
+ }
+ map_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "map",
+ call: builtinArray_map,
+ },
+ }
+ filter_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "filter",
+ call: builtinArray_filter,
+ },
+ }
+ reduce_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "reduce",
+ call: builtinArray_reduce,
+ },
+ }
+ reduceRight_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "reduceRight",
+ call: builtinArray_reduceRight,
+ },
+ }
+ isArray_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "isArray",
+ call: builtinArray_isArray,
+ },
+ }
+ runtime.global.ArrayPrototype = &_object{
+ runtime: runtime,
+ class: "Array",
+ objectClass: _classArray,
+ prototype: runtime.global.ObjectPrototype,
+ extensible: true,
+ value: nil,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0100,
+ value: Value{
+ kind: valueNumber,
+ value: uint32(0),
+ },
+ },
+ "toString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toString_function,
+ },
+ },
+ "toLocaleString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toLocaleString_function,
+ },
+ },
+ "concat": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: concat_function,
+ },
+ },
+ "join": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: join_function,
+ },
+ },
+ "splice": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: splice_function,
+ },
+ },
+ "shift": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: shift_function,
+ },
+ },
+ "pop": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: pop_function,
+ },
+ },
+ "push": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: push_function,
+ },
+ },
+ "slice": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: slice_function,
+ },
+ },
+ "unshift": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: unshift_function,
+ },
+ },
+ "reverse": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: reverse_function,
+ },
+ },
+ "sort": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: sort_function,
+ },
+ },
+ "indexOf": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: indexOf_function,
+ },
+ },
+ "lastIndexOf": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: lastIndexOf_function,
+ },
+ },
+ "every": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: every_function,
+ },
+ },
+ "some": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: some_function,
+ },
+ },
+ "forEach": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: forEach_function,
+ },
+ },
+ "map": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: map_function,
+ },
+ },
+ "filter": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: filter_function,
+ },
+ },
+ "reduce": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: reduce_function,
+ },
+ },
+ "reduceRight": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: reduceRight_function,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ "toString",
+ "toLocaleString",
+ "concat",
+ "join",
+ "splice",
+ "shift",
+ "pop",
+ "push",
+ "slice",
+ "unshift",
+ "reverse",
+ "sort",
+ "indexOf",
+ "lastIndexOf",
+ "every",
+ "some",
+ "forEach",
+ "map",
+ "filter",
+ "reduce",
+ "reduceRight",
+ },
+ }
+ runtime.global.Array = &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ value: _nativeFunctionObject{
+ name: "Array",
+ call: builtinArray,
+ construct: builtinNewArray,
+ },
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ "prototype": _property{
+ mode: 0,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.ArrayPrototype,
+ },
+ },
+ "isArray": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: isArray_function,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ "prototype",
+ "isArray",
+ },
+ }
+ runtime.global.ArrayPrototype.property["constructor"] =
+ _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.Array,
+ },
+ }
+ }
+ {
+ toString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toString",
+ call: builtinString_toString,
+ },
+ }
+ valueOf_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "valueOf",
+ call: builtinString_valueOf,
+ },
+ }
+ charAt_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "charAt",
+ call: builtinString_charAt,
+ },
+ }
+ charCodeAt_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "charCodeAt",
+ call: builtinString_charCodeAt,
+ },
+ }
+ concat_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "concat",
+ call: builtinString_concat,
+ },
+ }
+ indexOf_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "indexOf",
+ call: builtinString_indexOf,
+ },
+ }
+ lastIndexOf_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "lastIndexOf",
+ call: builtinString_lastIndexOf,
+ },
+ }
+ match_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "match",
+ call: builtinString_match,
+ },
+ }
+ replace_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "replace",
+ call: builtinString_replace,
+ },
+ }
+ search_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "search",
+ call: builtinString_search,
+ },
+ }
+ split_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "split",
+ call: builtinString_split,
+ },
+ }
+ slice_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "slice",
+ call: builtinString_slice,
+ },
+ }
+ substring_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "substring",
+ call: builtinString_substring,
+ },
+ }
+ toLowerCase_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toLowerCase",
+ call: builtinString_toLowerCase,
+ },
+ }
+ toUpperCase_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toUpperCase",
+ call: builtinString_toUpperCase,
+ },
+ }
+ substr_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "substr",
+ call: builtinString_substr,
+ },
+ }
+ trim_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "trim",
+ call: builtinString_trim,
+ },
+ }
+ trimLeft_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "trimLeft",
+ call: builtinString_trimLeft,
+ },
+ }
+ trimRight_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "trimRight",
+ call: builtinString_trimRight,
+ },
+ }
+ localeCompare_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "localeCompare",
+ call: builtinString_localeCompare,
+ },
+ }
+ toLocaleLowerCase_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toLocaleLowerCase",
+ call: builtinString_toLocaleLowerCase,
+ },
+ }
+ toLocaleUpperCase_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toLocaleUpperCase",
+ call: builtinString_toLocaleUpperCase,
+ },
+ }
+ fromCharCode_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "fromCharCode",
+ call: builtinString_fromCharCode,
+ },
+ }
+ runtime.global.StringPrototype = &_object{
+ runtime: runtime,
+ class: "String",
+ objectClass: _classString,
+ prototype: runtime.global.ObjectPrototype,
+ extensible: true,
+ value: prototypeValueString,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: int(0),
+ },
+ },
+ "toString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toString_function,
+ },
+ },
+ "valueOf": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: valueOf_function,
+ },
+ },
+ "charAt": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: charAt_function,
+ },
+ },
+ "charCodeAt": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: charCodeAt_function,
+ },
+ },
+ "concat": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: concat_function,
+ },
+ },
+ "indexOf": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: indexOf_function,
+ },
+ },
+ "lastIndexOf": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: lastIndexOf_function,
+ },
+ },
+ "match": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: match_function,
+ },
+ },
+ "replace": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: replace_function,
+ },
+ },
+ "search": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: search_function,
+ },
+ },
+ "split": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: split_function,
+ },
+ },
+ "slice": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: slice_function,
+ },
+ },
+ "substring": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: substring_function,
+ },
+ },
+ "toLowerCase": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toLowerCase_function,
+ },
+ },
+ "toUpperCase": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toUpperCase_function,
+ },
+ },
+ "substr": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: substr_function,
+ },
+ },
+ "trim": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: trim_function,
+ },
+ },
+ "trimLeft": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: trimLeft_function,
+ },
+ },
+ "trimRight": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: trimRight_function,
+ },
+ },
+ "localeCompare": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: localeCompare_function,
+ },
+ },
+ "toLocaleLowerCase": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toLocaleLowerCase_function,
+ },
+ },
+ "toLocaleUpperCase": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toLocaleUpperCase_function,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ "toString",
+ "valueOf",
+ "charAt",
+ "charCodeAt",
+ "concat",
+ "indexOf",
+ "lastIndexOf",
+ "match",
+ "replace",
+ "search",
+ "split",
+ "slice",
+ "substring",
+ "toLowerCase",
+ "toUpperCase",
+ "substr",
+ "trim",
+ "trimLeft",
+ "trimRight",
+ "localeCompare",
+ "toLocaleLowerCase",
+ "toLocaleUpperCase",
+ },
+ }
+ runtime.global.String = &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ value: _nativeFunctionObject{
+ name: "String",
+ call: builtinString,
+ construct: builtinNewString,
+ },
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ "prototype": _property{
+ mode: 0,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.StringPrototype,
+ },
+ },
+ "fromCharCode": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: fromCharCode_function,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ "prototype",
+ "fromCharCode",
+ },
+ }
+ runtime.global.StringPrototype.property["constructor"] =
+ _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.String,
+ },
+ }
+ }
+ {
+ toString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toString",
+ call: builtinBoolean_toString,
+ },
+ }
+ valueOf_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "valueOf",
+ call: builtinBoolean_valueOf,
+ },
+ }
+ runtime.global.BooleanPrototype = &_object{
+ runtime: runtime,
+ class: "Boolean",
+ objectClass: _classObject,
+ prototype: runtime.global.ObjectPrototype,
+ extensible: true,
+ value: prototypeValueBoolean,
+ property: map[string]_property{
+ "toString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toString_function,
+ },
+ },
+ "valueOf": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: valueOf_function,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "toString",
+ "valueOf",
+ },
+ }
+ runtime.global.Boolean = &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ value: _nativeFunctionObject{
+ name: "Boolean",
+ call: builtinBoolean,
+ construct: builtinNewBoolean,
+ },
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ "prototype": _property{
+ mode: 0,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.BooleanPrototype,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ "prototype",
+ },
+ }
+ runtime.global.BooleanPrototype.property["constructor"] =
+ _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.Boolean,
+ },
+ }
+ }
+ {
+ toString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toString",
+ call: builtinNumber_toString,
+ },
+ }
+ valueOf_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "valueOf",
+ call: builtinNumber_valueOf,
+ },
+ }
+ toFixed_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toFixed",
+ call: builtinNumber_toFixed,
+ },
+ }
+ toExponential_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toExponential",
+ call: builtinNumber_toExponential,
+ },
+ }
+ toPrecision_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toPrecision",
+ call: builtinNumber_toPrecision,
+ },
+ }
+ toLocaleString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toLocaleString",
+ call: builtinNumber_toLocaleString,
+ },
+ }
+ runtime.global.NumberPrototype = &_object{
+ runtime: runtime,
+ class: "Number",
+ objectClass: _classObject,
+ prototype: runtime.global.ObjectPrototype,
+ extensible: true,
+ value: prototypeValueNumber,
+ property: map[string]_property{
+ "toString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toString_function,
+ },
+ },
+ "valueOf": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: valueOf_function,
+ },
+ },
+ "toFixed": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toFixed_function,
+ },
+ },
+ "toExponential": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toExponential_function,
+ },
+ },
+ "toPrecision": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toPrecision_function,
+ },
+ },
+ "toLocaleString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toLocaleString_function,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "toString",
+ "valueOf",
+ "toFixed",
+ "toExponential",
+ "toPrecision",
+ "toLocaleString",
+ },
+ }
+ runtime.global.Number = &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ value: _nativeFunctionObject{
+ name: "Number",
+ call: builtinNumber,
+ construct: builtinNewNumber,
+ },
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ "prototype": _property{
+ mode: 0,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.NumberPrototype,
+ },
+ },
+ "MAX_VALUE": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: math.MaxFloat64,
+ },
+ },
+ "MIN_VALUE": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: math.SmallestNonzeroFloat64,
+ },
+ },
+ "NaN": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: math.NaN(),
+ },
+ },
+ "NEGATIVE_INFINITY": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: math.Inf(-1),
+ },
+ },
+ "POSITIVE_INFINITY": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: math.Inf(+1),
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ "prototype",
+ "MAX_VALUE",
+ "MIN_VALUE",
+ "NaN",
+ "NEGATIVE_INFINITY",
+ "POSITIVE_INFINITY",
+ },
+ }
+ runtime.global.NumberPrototype.property["constructor"] =
+ _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.Number,
+ },
+ }
+ }
+ {
+ abs_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "abs",
+ call: builtinMath_abs,
+ },
+ }
+ acos_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "acos",
+ call: builtinMath_acos,
+ },
+ }
+ asin_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "asin",
+ call: builtinMath_asin,
+ },
+ }
+ atan_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "atan",
+ call: builtinMath_atan,
+ },
+ }
+ atan2_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "atan2",
+ call: builtinMath_atan2,
+ },
+ }
+ ceil_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "ceil",
+ call: builtinMath_ceil,
+ },
+ }
+ cos_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "cos",
+ call: builtinMath_cos,
+ },
+ }
+ exp_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "exp",
+ call: builtinMath_exp,
+ },
+ }
+ floor_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "floor",
+ call: builtinMath_floor,
+ },
+ }
+ log_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "log",
+ call: builtinMath_log,
+ },
+ }
+ max_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "max",
+ call: builtinMath_max,
+ },
+ }
+ min_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "min",
+ call: builtinMath_min,
+ },
+ }
+ pow_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "pow",
+ call: builtinMath_pow,
+ },
+ }
+ random_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "random",
+ call: builtinMath_random,
+ },
+ }
+ round_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "round",
+ call: builtinMath_round,
+ },
+ }
+ sin_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "sin",
+ call: builtinMath_sin,
+ },
+ }
+ sqrt_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "sqrt",
+ call: builtinMath_sqrt,
+ },
+ }
+ tan_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "tan",
+ call: builtinMath_tan,
+ },
+ }
+ runtime.global.Math = &_object{
+ runtime: runtime,
+ class: "Math",
+ objectClass: _classObject,
+ prototype: runtime.global.ObjectPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "abs": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: abs_function,
+ },
+ },
+ "acos": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: acos_function,
+ },
+ },
+ "asin": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: asin_function,
+ },
+ },
+ "atan": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: atan_function,
+ },
+ },
+ "atan2": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: atan2_function,
+ },
+ },
+ "ceil": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: ceil_function,
+ },
+ },
+ "cos": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: cos_function,
+ },
+ },
+ "exp": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: exp_function,
+ },
+ },
+ "floor": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: floor_function,
+ },
+ },
+ "log": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: log_function,
+ },
+ },
+ "max": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: max_function,
+ },
+ },
+ "min": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: min_function,
+ },
+ },
+ "pow": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: pow_function,
+ },
+ },
+ "random": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: random_function,
+ },
+ },
+ "round": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: round_function,
+ },
+ },
+ "sin": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: sin_function,
+ },
+ },
+ "sqrt": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: sqrt_function,
+ },
+ },
+ "tan": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: tan_function,
+ },
+ },
+ "E": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: math.E,
+ },
+ },
+ "LN10": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: math.Ln10,
+ },
+ },
+ "LN2": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: math.Ln2,
+ },
+ },
+ "LOG2E": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: math.Log2E,
+ },
+ },
+ "LOG10E": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: math.Log10E,
+ },
+ },
+ "PI": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: math.Pi,
+ },
+ },
+ "SQRT1_2": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: sqrt1_2,
+ },
+ },
+ "SQRT2": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: math.Sqrt2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "abs",
+ "acos",
+ "asin",
+ "atan",
+ "atan2",
+ "ceil",
+ "cos",
+ "exp",
+ "floor",
+ "log",
+ "max",
+ "min",
+ "pow",
+ "random",
+ "round",
+ "sin",
+ "sqrt",
+ "tan",
+ "E",
+ "LN10",
+ "LN2",
+ "LOG2E",
+ "LOG10E",
+ "PI",
+ "SQRT1_2",
+ "SQRT2",
+ },
+ }
+ }
+ {
+ toString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toString",
+ call: builtinDate_toString,
+ },
+ }
+ toDateString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toDateString",
+ call: builtinDate_toDateString,
+ },
+ }
+ toTimeString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toTimeString",
+ call: builtinDate_toTimeString,
+ },
+ }
+ toUTCString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toUTCString",
+ call: builtinDate_toUTCString,
+ },
+ }
+ toISOString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toISOString",
+ call: builtinDate_toISOString,
+ },
+ }
+ toJSON_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toJSON",
+ call: builtinDate_toJSON,
+ },
+ }
+ toGMTString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toGMTString",
+ call: builtinDate_toGMTString,
+ },
+ }
+ toLocaleString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toLocaleString",
+ call: builtinDate_toLocaleString,
+ },
+ }
+ toLocaleDateString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toLocaleDateString",
+ call: builtinDate_toLocaleDateString,
+ },
+ }
+ toLocaleTimeString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toLocaleTimeString",
+ call: builtinDate_toLocaleTimeString,
+ },
+ }
+ valueOf_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "valueOf",
+ call: builtinDate_valueOf,
+ },
+ }
+ getTime_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getTime",
+ call: builtinDate_getTime,
+ },
+ }
+ getYear_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getYear",
+ call: builtinDate_getYear,
+ },
+ }
+ getFullYear_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getFullYear",
+ call: builtinDate_getFullYear,
+ },
+ }
+ getUTCFullYear_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getUTCFullYear",
+ call: builtinDate_getUTCFullYear,
+ },
+ }
+ getMonth_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getMonth",
+ call: builtinDate_getMonth,
+ },
+ }
+ getUTCMonth_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getUTCMonth",
+ call: builtinDate_getUTCMonth,
+ },
+ }
+ getDate_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getDate",
+ call: builtinDate_getDate,
+ },
+ }
+ getUTCDate_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getUTCDate",
+ call: builtinDate_getUTCDate,
+ },
+ }
+ getDay_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getDay",
+ call: builtinDate_getDay,
+ },
+ }
+ getUTCDay_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getUTCDay",
+ call: builtinDate_getUTCDay,
+ },
+ }
+ getHours_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getHours",
+ call: builtinDate_getHours,
+ },
+ }
+ getUTCHours_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getUTCHours",
+ call: builtinDate_getUTCHours,
+ },
+ }
+ getMinutes_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getMinutes",
+ call: builtinDate_getMinutes,
+ },
+ }
+ getUTCMinutes_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getUTCMinutes",
+ call: builtinDate_getUTCMinutes,
+ },
+ }
+ getSeconds_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getSeconds",
+ call: builtinDate_getSeconds,
+ },
+ }
+ getUTCSeconds_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getUTCSeconds",
+ call: builtinDate_getUTCSeconds,
+ },
+ }
+ getMilliseconds_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getMilliseconds",
+ call: builtinDate_getMilliseconds,
+ },
+ }
+ getUTCMilliseconds_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getUTCMilliseconds",
+ call: builtinDate_getUTCMilliseconds,
+ },
+ }
+ getTimezoneOffset_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "getTimezoneOffset",
+ call: builtinDate_getTimezoneOffset,
+ },
+ }
+ setTime_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "setTime",
+ call: builtinDate_setTime,
+ },
+ }
+ setMilliseconds_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "setMilliseconds",
+ call: builtinDate_setMilliseconds,
+ },
+ }
+ setUTCMilliseconds_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "setUTCMilliseconds",
+ call: builtinDate_setUTCMilliseconds,
+ },
+ }
+ setSeconds_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "setSeconds",
+ call: builtinDate_setSeconds,
+ },
+ }
+ setUTCSeconds_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "setUTCSeconds",
+ call: builtinDate_setUTCSeconds,
+ },
+ }
+ setMinutes_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 3,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "setMinutes",
+ call: builtinDate_setMinutes,
+ },
+ }
+ setUTCMinutes_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 3,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "setUTCMinutes",
+ call: builtinDate_setUTCMinutes,
+ },
+ }
+ setHours_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 4,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "setHours",
+ call: builtinDate_setHours,
+ },
+ }
+ setUTCHours_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 4,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "setUTCHours",
+ call: builtinDate_setUTCHours,
+ },
+ }
+ setDate_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "setDate",
+ call: builtinDate_setDate,
+ },
+ }
+ setUTCDate_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "setUTCDate",
+ call: builtinDate_setUTCDate,
+ },
+ }
+ setMonth_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "setMonth",
+ call: builtinDate_setMonth,
+ },
+ }
+ setUTCMonth_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "setUTCMonth",
+ call: builtinDate_setUTCMonth,
+ },
+ }
+ setYear_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "setYear",
+ call: builtinDate_setYear,
+ },
+ }
+ setFullYear_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 3,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "setFullYear",
+ call: builtinDate_setFullYear,
+ },
+ }
+ setUTCFullYear_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 3,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "setUTCFullYear",
+ call: builtinDate_setUTCFullYear,
+ },
+ }
+ parse_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "parse",
+ call: builtinDate_parse,
+ },
+ }
+ UTC_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 7,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "UTC",
+ call: builtinDate_UTC,
+ },
+ }
+ now_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "now",
+ call: builtinDate_now,
+ },
+ }
+ runtime.global.DatePrototype = &_object{
+ runtime: runtime,
+ class: "Date",
+ objectClass: _classObject,
+ prototype: runtime.global.ObjectPrototype,
+ extensible: true,
+ value: prototypeValueDate,
+ property: map[string]_property{
+ "toString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toString_function,
+ },
+ },
+ "toDateString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toDateString_function,
+ },
+ },
+ "toTimeString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toTimeString_function,
+ },
+ },
+ "toUTCString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toUTCString_function,
+ },
+ },
+ "toISOString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toISOString_function,
+ },
+ },
+ "toJSON": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toJSON_function,
+ },
+ },
+ "toGMTString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toGMTString_function,
+ },
+ },
+ "toLocaleString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toLocaleString_function,
+ },
+ },
+ "toLocaleDateString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toLocaleDateString_function,
+ },
+ },
+ "toLocaleTimeString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toLocaleTimeString_function,
+ },
+ },
+ "valueOf": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: valueOf_function,
+ },
+ },
+ "getTime": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getTime_function,
+ },
+ },
+ "getYear": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getYear_function,
+ },
+ },
+ "getFullYear": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getFullYear_function,
+ },
+ },
+ "getUTCFullYear": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getUTCFullYear_function,
+ },
+ },
+ "getMonth": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getMonth_function,
+ },
+ },
+ "getUTCMonth": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getUTCMonth_function,
+ },
+ },
+ "getDate": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getDate_function,
+ },
+ },
+ "getUTCDate": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getUTCDate_function,
+ },
+ },
+ "getDay": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getDay_function,
+ },
+ },
+ "getUTCDay": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getUTCDay_function,
+ },
+ },
+ "getHours": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getHours_function,
+ },
+ },
+ "getUTCHours": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getUTCHours_function,
+ },
+ },
+ "getMinutes": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getMinutes_function,
+ },
+ },
+ "getUTCMinutes": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getUTCMinutes_function,
+ },
+ },
+ "getSeconds": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getSeconds_function,
+ },
+ },
+ "getUTCSeconds": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getUTCSeconds_function,
+ },
+ },
+ "getMilliseconds": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getMilliseconds_function,
+ },
+ },
+ "getUTCMilliseconds": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getUTCMilliseconds_function,
+ },
+ },
+ "getTimezoneOffset": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: getTimezoneOffset_function,
+ },
+ },
+ "setTime": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: setTime_function,
+ },
+ },
+ "setMilliseconds": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: setMilliseconds_function,
+ },
+ },
+ "setUTCMilliseconds": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: setUTCMilliseconds_function,
+ },
+ },
+ "setSeconds": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: setSeconds_function,
+ },
+ },
+ "setUTCSeconds": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: setUTCSeconds_function,
+ },
+ },
+ "setMinutes": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: setMinutes_function,
+ },
+ },
+ "setUTCMinutes": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: setUTCMinutes_function,
+ },
+ },
+ "setHours": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: setHours_function,
+ },
+ },
+ "setUTCHours": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: setUTCHours_function,
+ },
+ },
+ "setDate": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: setDate_function,
+ },
+ },
+ "setUTCDate": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: setUTCDate_function,
+ },
+ },
+ "setMonth": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: setMonth_function,
+ },
+ },
+ "setUTCMonth": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: setUTCMonth_function,
+ },
+ },
+ "setYear": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: setYear_function,
+ },
+ },
+ "setFullYear": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: setFullYear_function,
+ },
+ },
+ "setUTCFullYear": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: setUTCFullYear_function,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "toString",
+ "toDateString",
+ "toTimeString",
+ "toUTCString",
+ "toISOString",
+ "toJSON",
+ "toGMTString",
+ "toLocaleString",
+ "toLocaleDateString",
+ "toLocaleTimeString",
+ "valueOf",
+ "getTime",
+ "getYear",
+ "getFullYear",
+ "getUTCFullYear",
+ "getMonth",
+ "getUTCMonth",
+ "getDate",
+ "getUTCDate",
+ "getDay",
+ "getUTCDay",
+ "getHours",
+ "getUTCHours",
+ "getMinutes",
+ "getUTCMinutes",
+ "getSeconds",
+ "getUTCSeconds",
+ "getMilliseconds",
+ "getUTCMilliseconds",
+ "getTimezoneOffset",
+ "setTime",
+ "setMilliseconds",
+ "setUTCMilliseconds",
+ "setSeconds",
+ "setUTCSeconds",
+ "setMinutes",
+ "setUTCMinutes",
+ "setHours",
+ "setUTCHours",
+ "setDate",
+ "setUTCDate",
+ "setMonth",
+ "setUTCMonth",
+ "setYear",
+ "setFullYear",
+ "setUTCFullYear",
+ },
+ }
+ runtime.global.Date = &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ value: _nativeFunctionObject{
+ name: "Date",
+ call: builtinDate,
+ construct: builtinNewDate,
+ },
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 7,
+ },
+ },
+ "prototype": _property{
+ mode: 0,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.DatePrototype,
+ },
+ },
+ "parse": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: parse_function,
+ },
+ },
+ "UTC": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: UTC_function,
+ },
+ },
+ "now": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: now_function,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ "prototype",
+ "parse",
+ "UTC",
+ "now",
+ },
+ }
+ runtime.global.DatePrototype.property["constructor"] =
+ _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.Date,
+ },
+ }
+ }
+ {
+ toString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toString",
+ call: builtinRegExp_toString,
+ },
+ }
+ exec_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "exec",
+ call: builtinRegExp_exec,
+ },
+ }
+ test_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "test",
+ call: builtinRegExp_test,
+ },
+ }
+ compile_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "compile",
+ call: builtinRegExp_compile,
+ },
+ }
+ runtime.global.RegExpPrototype = &_object{
+ runtime: runtime,
+ class: "RegExp",
+ objectClass: _classObject,
+ prototype: runtime.global.ObjectPrototype,
+ extensible: true,
+ value: prototypeValueRegExp,
+ property: map[string]_property{
+ "toString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toString_function,
+ },
+ },
+ "exec": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: exec_function,
+ },
+ },
+ "test": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: test_function,
+ },
+ },
+ "compile": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: compile_function,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "toString",
+ "exec",
+ "test",
+ "compile",
+ },
+ }
+ runtime.global.RegExp = &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ value: _nativeFunctionObject{
+ name: "RegExp",
+ call: builtinRegExp,
+ construct: builtinNewRegExp,
+ },
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ "prototype": _property{
+ mode: 0,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.RegExpPrototype,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ "prototype",
+ },
+ }
+ runtime.global.RegExpPrototype.property["constructor"] =
+ _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.RegExp,
+ },
+ }
+ }
+ {
+ toString_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "toString",
+ call: builtinError_toString,
+ },
+ }
+ runtime.global.ErrorPrototype = &_object{
+ runtime: runtime,
+ class: "Error",
+ objectClass: _classObject,
+ prototype: runtime.global.ObjectPrototype,
+ extensible: true,
+ value: nil,
+ property: map[string]_property{
+ "toString": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: toString_function,
+ },
+ },
+ "name": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueString,
+ value: "Error",
+ },
+ },
+ "message": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueString,
+ value: "",
+ },
+ },
+ },
+ propertyOrder: []string{
+ "toString",
+ "name",
+ "message",
+ },
+ }
+ runtime.global.Error = &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ value: _nativeFunctionObject{
+ name: "Error",
+ call: builtinError,
+ construct: builtinNewError,
+ },
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ "prototype": _property{
+ mode: 0,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.ErrorPrototype,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ "prototype",
+ },
+ }
+ runtime.global.ErrorPrototype.property["constructor"] =
+ _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.Error,
+ },
+ }
+ }
+ {
+ runtime.global.EvalErrorPrototype = &_object{
+ runtime: runtime,
+ class: "EvalError",
+ objectClass: _classObject,
+ prototype: runtime.global.ErrorPrototype,
+ extensible: true,
+ value: nil,
+ property: map[string]_property{
+ "name": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueString,
+ value: "EvalError",
+ },
+ },
+ },
+ propertyOrder: []string{
+ "name",
+ },
+ }
+ runtime.global.EvalError = &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ value: _nativeFunctionObject{
+ name: "EvalError",
+ call: builtinEvalError,
+ construct: builtinNewEvalError,
+ },
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ "prototype": _property{
+ mode: 0,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.EvalErrorPrototype,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ "prototype",
+ },
+ }
+ runtime.global.EvalErrorPrototype.property["constructor"] =
+ _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.EvalError,
+ },
+ }
+ }
+ {
+ runtime.global.TypeErrorPrototype = &_object{
+ runtime: runtime,
+ class: "TypeError",
+ objectClass: _classObject,
+ prototype: runtime.global.ErrorPrototype,
+ extensible: true,
+ value: nil,
+ property: map[string]_property{
+ "name": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueString,
+ value: "TypeError",
+ },
+ },
+ },
+ propertyOrder: []string{
+ "name",
+ },
+ }
+ runtime.global.TypeError = &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ value: _nativeFunctionObject{
+ name: "TypeError",
+ call: builtinTypeError,
+ construct: builtinNewTypeError,
+ },
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ "prototype": _property{
+ mode: 0,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.TypeErrorPrototype,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ "prototype",
+ },
+ }
+ runtime.global.TypeErrorPrototype.property["constructor"] =
+ _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.TypeError,
+ },
+ }
+ }
+ {
+ runtime.global.RangeErrorPrototype = &_object{
+ runtime: runtime,
+ class: "RangeError",
+ objectClass: _classObject,
+ prototype: runtime.global.ErrorPrototype,
+ extensible: true,
+ value: nil,
+ property: map[string]_property{
+ "name": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueString,
+ value: "RangeError",
+ },
+ },
+ },
+ propertyOrder: []string{
+ "name",
+ },
+ }
+ runtime.global.RangeError = &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ value: _nativeFunctionObject{
+ name: "RangeError",
+ call: builtinRangeError,
+ construct: builtinNewRangeError,
+ },
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ "prototype": _property{
+ mode: 0,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.RangeErrorPrototype,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ "prototype",
+ },
+ }
+ runtime.global.RangeErrorPrototype.property["constructor"] =
+ _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.RangeError,
+ },
+ }
+ }
+ {
+ runtime.global.ReferenceErrorPrototype = &_object{
+ runtime: runtime,
+ class: "ReferenceError",
+ objectClass: _classObject,
+ prototype: runtime.global.ErrorPrototype,
+ extensible: true,
+ value: nil,
+ property: map[string]_property{
+ "name": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueString,
+ value: "ReferenceError",
+ },
+ },
+ },
+ propertyOrder: []string{
+ "name",
+ },
+ }
+ runtime.global.ReferenceError = &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ value: _nativeFunctionObject{
+ name: "ReferenceError",
+ call: builtinReferenceError,
+ construct: builtinNewReferenceError,
+ },
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ "prototype": _property{
+ mode: 0,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.ReferenceErrorPrototype,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ "prototype",
+ },
+ }
+ runtime.global.ReferenceErrorPrototype.property["constructor"] =
+ _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.ReferenceError,
+ },
+ }
+ }
+ {
+ runtime.global.SyntaxErrorPrototype = &_object{
+ runtime: runtime,
+ class: "SyntaxError",
+ objectClass: _classObject,
+ prototype: runtime.global.ErrorPrototype,
+ extensible: true,
+ value: nil,
+ property: map[string]_property{
+ "name": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueString,
+ value: "SyntaxError",
+ },
+ },
+ },
+ propertyOrder: []string{
+ "name",
+ },
+ }
+ runtime.global.SyntaxError = &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ value: _nativeFunctionObject{
+ name: "SyntaxError",
+ call: builtinSyntaxError,
+ construct: builtinNewSyntaxError,
+ },
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ "prototype": _property{
+ mode: 0,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.SyntaxErrorPrototype,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ "prototype",
+ },
+ }
+ runtime.global.SyntaxErrorPrototype.property["constructor"] =
+ _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.SyntaxError,
+ },
+ }
+ }
+ {
+ runtime.global.URIErrorPrototype = &_object{
+ runtime: runtime,
+ class: "URIError",
+ objectClass: _classObject,
+ prototype: runtime.global.ErrorPrototype,
+ extensible: true,
+ value: nil,
+ property: map[string]_property{
+ "name": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueString,
+ value: "URIError",
+ },
+ },
+ },
+ propertyOrder: []string{
+ "name",
+ },
+ }
+ runtime.global.URIError = &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ value: _nativeFunctionObject{
+ name: "URIError",
+ call: builtinURIError,
+ construct: builtinNewURIError,
+ },
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ "prototype": _property{
+ mode: 0,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.URIErrorPrototype,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ "prototype",
+ },
+ }
+ runtime.global.URIErrorPrototype.property["constructor"] =
+ _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.URIError,
+ },
+ }
+ }
+ {
+ parse_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "parse",
+ call: builtinJSON_parse,
+ },
+ }
+ stringify_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 3,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "stringify",
+ call: builtinJSON_stringify,
+ },
+ }
+ runtime.global.JSON = &_object{
+ runtime: runtime,
+ class: "JSON",
+ objectClass: _classObject,
+ prototype: runtime.global.ObjectPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "parse": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: parse_function,
+ },
+ },
+ "stringify": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: stringify_function,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "parse",
+ "stringify",
+ },
+ }
+ }
+ {
+ eval_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "eval",
+ call: builtinGlobal_eval,
+ },
+ }
+ parseInt_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 2,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "parseInt",
+ call: builtinGlobal_parseInt,
+ },
+ }
+ parseFloat_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "parseFloat",
+ call: builtinGlobal_parseFloat,
+ },
+ }
+ isNaN_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "isNaN",
+ call: builtinGlobal_isNaN,
+ },
+ }
+ isFinite_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "isFinite",
+ call: builtinGlobal_isFinite,
+ },
+ }
+ decodeURI_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "decodeURI",
+ call: builtinGlobal_decodeURI,
+ },
+ }
+ decodeURIComponent_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "decodeURIComponent",
+ call: builtinGlobal_decodeURIComponent,
+ },
+ }
+ encodeURI_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "encodeURI",
+ call: builtinGlobal_encodeURI,
+ },
+ }
+ encodeURIComponent_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "encodeURIComponent",
+ call: builtinGlobal_encodeURIComponent,
+ },
+ }
+ escape_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "escape",
+ call: builtinGlobal_escape,
+ },
+ }
+ unescape_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 1,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "unescape",
+ call: builtinGlobal_unescape,
+ },
+ }
+ runtime.globalObject.property = map[string]_property{
+ "eval": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: eval_function,
+ },
+ },
+ "parseInt": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: parseInt_function,
+ },
+ },
+ "parseFloat": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: parseFloat_function,
+ },
+ },
+ "isNaN": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: isNaN_function,
+ },
+ },
+ "isFinite": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: isFinite_function,
+ },
+ },
+ "decodeURI": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: decodeURI_function,
+ },
+ },
+ "decodeURIComponent": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: decodeURIComponent_function,
+ },
+ },
+ "encodeURI": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: encodeURI_function,
+ },
+ },
+ "encodeURIComponent": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: encodeURIComponent_function,
+ },
+ },
+ "escape": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: escape_function,
+ },
+ },
+ "unescape": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: unescape_function,
+ },
+ },
+ "Object": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.Object,
+ },
+ },
+ "Function": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.Function,
+ },
+ },
+ "Array": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.Array,
+ },
+ },
+ "String": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.String,
+ },
+ },
+ "Boolean": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.Boolean,
+ },
+ },
+ "Number": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.Number,
+ },
+ },
+ "Math": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.Math,
+ },
+ },
+ "Date": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.Date,
+ },
+ },
+ "RegExp": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.RegExp,
+ },
+ },
+ "Error": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.Error,
+ },
+ },
+ "EvalError": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.EvalError,
+ },
+ },
+ "TypeError": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.TypeError,
+ },
+ },
+ "RangeError": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.RangeError,
+ },
+ },
+ "ReferenceError": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.ReferenceError,
+ },
+ },
+ "SyntaxError": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.SyntaxError,
+ },
+ },
+ "URIError": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.URIError,
+ },
+ },
+ "JSON": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: runtime.global.JSON,
+ },
+ },
+ "undefined": _property{
+ mode: 0,
+ value: Value{
+ kind: valueUndefined,
+ },
+ },
+ "NaN": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: math.NaN(),
+ },
+ },
+ "Infinity": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: math.Inf(+1),
+ },
+ },
+ }
+ runtime.globalObject.propertyOrder = []string{
+ "eval",
+ "parseInt",
+ "parseFloat",
+ "isNaN",
+ "isFinite",
+ "decodeURI",
+ "decodeURIComponent",
+ "encodeURI",
+ "encodeURIComponent",
+ "escape",
+ "unescape",
+ "Object",
+ "Function",
+ "Array",
+ "String",
+ "Boolean",
+ "Number",
+ "Math",
+ "Date",
+ "RegExp",
+ "Error",
+ "EvalError",
+ "TypeError",
+ "RangeError",
+ "ReferenceError",
+ "SyntaxError",
+ "URIError",
+ "JSON",
+ "undefined",
+ "NaN",
+ "Infinity",
+ }
+ }
+}
+
+func newConsoleObject(runtime *_runtime) *_object {
+ {
+ log_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "log",
+ call: builtinConsole_log,
+ },
+ }
+ debug_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "debug",
+ call: builtinConsole_log,
+ },
+ }
+ info_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "info",
+ call: builtinConsole_log,
+ },
+ }
+ error_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "error",
+ call: builtinConsole_error,
+ },
+ }
+ warn_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "warn",
+ call: builtinConsole_error,
+ },
+ }
+ dir_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "dir",
+ call: builtinConsole_dir,
+ },
+ }
+ time_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "time",
+ call: builtinConsole_time,
+ },
+ }
+ timeEnd_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "timeEnd",
+ call: builtinConsole_timeEnd,
+ },
+ }
+ trace_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "trace",
+ call: builtinConsole_trace,
+ },
+ }
+ assert_function := &_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "length": _property{
+ mode: 0,
+ value: Value{
+ kind: valueNumber,
+ value: 0,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "length",
+ },
+ value: _nativeFunctionObject{
+ name: "assert",
+ call: builtinConsole_assert,
+ },
+ }
+ return &_object{
+ runtime: runtime,
+ class: "Object",
+ objectClass: _classObject,
+ prototype: runtime.global.ObjectPrototype,
+ extensible: true,
+ property: map[string]_property{
+ "log": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: log_function,
+ },
+ },
+ "debug": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: debug_function,
+ },
+ },
+ "info": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: info_function,
+ },
+ },
+ "error": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: error_function,
+ },
+ },
+ "warn": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: warn_function,
+ },
+ },
+ "dir": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: dir_function,
+ },
+ },
+ "time": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: time_function,
+ },
+ },
+ "timeEnd": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: timeEnd_function,
+ },
+ },
+ "trace": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: trace_function,
+ },
+ },
+ "assert": _property{
+ mode: 0101,
+ value: Value{
+ kind: valueObject,
+ value: assert_function,
+ },
+ },
+ },
+ propertyOrder: []string{
+ "log",
+ "debug",
+ "info",
+ "error",
+ "warn",
+ "dir",
+ "time",
+ "timeEnd",
+ "trace",
+ "assert",
+ },
+ }
+ }
+}
+
+func toValue_int(value int) Value {
+ return Value{
+ kind: valueNumber,
+ value: value,
+ }
+}
+
+func toValue_int8(value int8) Value {
+ return Value{
+ kind: valueNumber,
+ value: value,
+ }
+}
+
+func toValue_int16(value int16) Value {
+ return Value{
+ kind: valueNumber,
+ value: value,
+ }
+}
+
+func toValue_int32(value int32) Value {
+ return Value{
+ kind: valueNumber,
+ value: value,
+ }
+}
+
+func toValue_int64(value int64) Value {
+ return Value{
+ kind: valueNumber,
+ value: value,
+ }
+}
+
+func toValue_uint(value uint) Value {
+ return Value{
+ kind: valueNumber,
+ value: value,
+ }
+}
+
+func toValue_uint8(value uint8) Value {
+ return Value{
+ kind: valueNumber,
+ value: value,
+ }
+}
+
+func toValue_uint16(value uint16) Value {
+ return Value{
+ kind: valueNumber,
+ value: value,
+ }
+}
+
+func toValue_uint32(value uint32) Value {
+ return Value{
+ kind: valueNumber,
+ value: value,
+ }
+}
+
+func toValue_uint64(value uint64) Value {
+ return Value{
+ kind: valueNumber,
+ value: value,
+ }
+}
+
+func toValue_float32(value float32) Value {
+ return Value{
+ kind: valueNumber,
+ value: value,
+ }
+}
+
+func toValue_float64(value float64) Value {
+ return Value{
+ kind: valueNumber,
+ value: value,
+ }
+}
+
+func toValue_string(value string) Value {
+ return Value{
+ kind: valueString,
+ value: value,
+ }
+}
+
+func toValue_string16(value []uint16) Value {
+ return Value{
+ kind: valueString,
+ value: value,
+ }
+}
+
+func toValue_bool(value bool) Value {
+ return Value{
+ kind: valueBoolean,
+ value: value,
+ }
+}
+
+func toValue_object(value *_object) Value {
+ return Value{
+ kind: valueObject,
+ value: value,
+ }
+}
diff --git a/vendor/github.com/robertkrimen/otto/inline.pl b/vendor/github.com/robertkrimen/otto/inline.pl
new file mode 100755
index 000000000..c3620b4a2
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/inline.pl
@@ -0,0 +1,1086 @@
+#!/usr/bin/env perl
+
+my $_fmt;
+$_fmt = "gofmt";
+$_fmt = "cat -n" if "cat" eq ($ARGV[0] || "");
+
+use strict;
+use warnings;
+use IO::File;
+
+my $self = __PACKAGE__;
+
+sub functionLabel ($) {
+ return "$_[0]_function";
+}
+
+sub trim ($) {
+ local $_ = shift;
+ s/^\s*//, s/\s*$// for $_;
+ return $_;
+}
+
+open my $fmt, "|-", "$_fmt" or die $!;
+
+$fmt->print(<<_END_);
+package otto
+
+import (
+ "math"
+)
+
+func _newContext(runtime *_runtime) {
+@{[ join "\n", $self->newContext() ]}
+}
+
+func newConsoleObject(runtime *_runtime) *_object {
+@{[ join "\n", $self->newConsoleObject() ]}
+}
+_END_
+
+for (qw/int int8 int16 int32 int64 uint uint8 uint16 uint32 uint64 float32 float64/) {
+ $fmt->print(<<_END_);
+
+func toValue_$_(value $_) Value {
+ return Value{
+ kind: valueNumber,
+ value: value,
+ }
+}
+_END_
+}
+
+$fmt->print(<<_END_);
+
+func toValue_string(value string) Value {
+ return Value{
+ kind: valueString,
+ value: value,
+ }
+}
+
+func toValue_string16(value []uint16) Value {
+ return Value{
+ kind: valueString,
+ value: value,
+ }
+}
+
+func toValue_bool(value bool) Value {
+ return Value{
+ kind: valueBoolean,
+ value: value,
+ }
+}
+
+func toValue_object(value *_object) Value {
+ return Value{
+ kind: valueObject,
+ value: value,
+ }
+}
+_END_
+
+close $fmt;
+
+sub newConsoleObject {
+ my $self = shift;
+
+ return
+ $self->block(sub {
+ my $class = "Console";
+ my @got = $self->functionDeclare(
+ $class,
+ "log", 0,
+ "debug:log", 0,
+ "info:log", 0,
+ "error", 0,
+ "warn:error", 0,
+ "dir", 0,
+ "time", 0,
+ "timeEnd", 0,
+ "trace", 0,
+ "assert", 0,
+ );
+ return
+ "return @{[ $self->newObject(@got) ]}"
+ }),
+ ;
+}
+
+sub newContext {
+ my $self = shift;
+ return
+ # ObjectPrototype
+ $self->block(sub {
+ my $class = "Object";
+ return
+ ".${class}Prototype =",
+ $self->globalPrototype(
+ $class,
+ "_classObject",
+ undef,
+ "prototypeValueObject",
+ ),
+ }),
+
+ # FunctionPrototype
+ $self->block(sub {
+ my $class = "Function";
+ return
+ ".${class}Prototype =",
+ $self->globalPrototype(
+ $class,
+ "_classObject",
+ ".ObjectPrototype",
+ "prototypeValueFunction",
+ ),
+ }),
+
+ # ObjectPrototype
+ $self->block(sub {
+ my $class = "Object";
+ my @got = $self->functionDeclare(
+ $class,
+ "valueOf", 0,
+ "toString", 0,
+ "toLocaleString", 0,
+ "hasOwnProperty", 1,
+ "isPrototypeOf", 1,
+ "propertyIsEnumerable", 1,
+ );
+ my @propertyMap = $self->propertyMap(
+ @got,
+ $self->property("constructor", undef),
+ );
+ my $propertyOrder = $self->propertyOrder(@propertyMap);
+ $propertyOrder =~ s/^propertyOrder: //;
+ return
+ ".${class}Prototype.property =", @propertyMap,
+ ".${class}Prototype.propertyOrder =", $propertyOrder,
+ }),
+
+ # FunctionPrototype
+ $self->block(sub {
+ my $class = "Function";
+ my @got = $self->functionDeclare(
+ $class,
+ "toString", 0,
+ "apply", 2,
+ "call", 1,
+ "bind", 1,
+ );
+ my @propertyMap = $self->propertyMap(
+ @got,
+ $self->property("constructor", undef),
+ $self->property("length", $self->numberValue(0), "0"),
+ );
+ my $propertyOrder = $self->propertyOrder(@propertyMap);
+ $propertyOrder =~ s/^propertyOrder: //;
+ return
+ ".${class}Prototype.property =", @propertyMap,
+ ".${class}Prototype.propertyOrder =", $propertyOrder,
+ }),
+
+ # Object
+ $self->block(sub {
+ my $class = "Object";
+ return
+ ".$class =",
+ $self->globalFunction(
+ $class,
+ 1,
+ $self->functionDeclare(
+ $class,
+ "getPrototypeOf", 1,
+ "getOwnPropertyDescriptor", 2,
+ "defineProperty", 3,
+ "defineProperties", 2,
+ "create", 2,
+ "isExtensible", 1,
+ "preventExtensions", 1,
+ "isSealed", 1,
+ "seal", 1,
+ "isFrozen", 1,
+ "freeze", 1,
+ "keys", 1,
+ "getOwnPropertyNames", 1,
+ ),
+ ),
+ }),
+
+ # Function
+ $self->block(sub {
+ my $class = "Function";
+ return
+ "Function :=",
+ $self->globalFunction(
+ $class,
+ 1,
+ ),
+ ".$class = Function",
+ }),
+
+ # Array
+ $self->block(sub {
+ my $class = "Array";
+ my @got = $self->functionDeclare(
+ $class,
+ "toString", 0,
+ "toLocaleString", 0,
+ "concat", 1,
+ "join", 1,
+ "splice", 2,
+ "shift", 0,
+ "pop", 0,
+ "push", 1,
+ "slice", 2,
+ "unshift", 1,
+ "reverse", 0,
+ "sort", 1,
+ "indexOf", 1,
+ "lastIndexOf", 1,
+ "every", 1,
+ "some", 1,
+ "forEach", 1,
+ "map", 1,
+ "filter", 1,
+ "reduce", 1,
+ "reduceRight", 1,
+ );
+ return
+ ".${class}Prototype =",
+ $self->globalPrototype(
+ $class,
+ "_classArray",
+ ".ObjectPrototype",
+ undef,
+ $self->property("length", $self->numberValue("uint32(0)"), "0100"),
+ @got,
+ ),
+ ".$class =",
+ $self->globalFunction(
+ $class,
+ 1,
+ $self->functionDeclare(
+ $class,
+ "isArray", 1,
+ ),
+ ),
+ }),
+
+ # String
+ $self->block(sub {
+ my $class = "String";
+ my @got = $self->functionDeclare(
+ $class,
+ "toString", 0,
+ "valueOf", 0,
+ "charAt", 1,
+ "charCodeAt", 1,
+ "concat", 1,
+ "indexOf", 1,
+ "lastIndexOf", 1,
+ "match", 1,
+ "replace", 2,
+ "search", 1,
+ "split", 2,
+ "slice", 2,
+ "substring", 2,
+ "toLowerCase", 0,
+ "toUpperCase", 0,
+ "substr", 2,
+ "trim", 0,
+ "trimLeft", 0,
+ "trimRight", 0,
+ "localeCompare", 1,
+ "toLocaleLowerCase", 0,
+ "toLocaleUpperCase", 0,
+ );
+ return
+ ".${class}Prototype =",
+ $self->globalPrototype(
+ $class,
+ "_classString",
+ ".ObjectPrototype",
+ "prototypeValueString",
+ $self->property("length", $self->numberValue("int(0)"), "0"),
+ @got,
+ ),
+ ".$class =",
+ $self->globalFunction(
+ $class,
+ 1,
+ $self->functionDeclare(
+ $class,
+ "fromCharCode", 1,
+ ),
+ ),
+ }),
+
+ # Boolean
+ $self->block(sub {
+ my $class = "Boolean";
+ my @got = $self->functionDeclare(
+ $class,
+ "toString", 0,
+ "valueOf", 0,
+ );
+ return
+ ".${class}Prototype =",
+ $self->globalPrototype(
+ $class,
+ "_classObject",
+ ".ObjectPrototype",
+ "prototypeValueBoolean",
+ @got,
+ ),
+ ".$class =",
+ $self->globalFunction(
+ $class,
+ 1,
+ $self->functionDeclare(
+ $class,
+ ),
+ ),
+ }),
+
+ # Number
+ $self->block(sub {
+ my $class = "Number";
+ my @got = $self->functionDeclare(
+ $class,
+ "toString", 0,
+ "valueOf", 0,
+ "toFixed", 1,
+ "toExponential", 1,
+ "toPrecision", 1,
+ "toLocaleString", 1,
+ );
+ return
+ ".${class}Prototype =",
+ $self->globalPrototype(
+ $class,
+ "_classObject",
+ ".ObjectPrototype",
+ "prototypeValueNumber",
+ @got,
+ ),
+ ".$class =",
+ $self->globalFunction(
+ $class,
+ 1,
+ $self->functionDeclare(
+ $class,
+ ),
+ $self->numberConstantDeclare(
+ "MAX_VALUE", "math.MaxFloat64",
+ "MIN_VALUE", "math.SmallestNonzeroFloat64",
+ "NaN", "math.NaN()",
+ "NEGATIVE_INFINITY", "math.Inf(-1)",
+ "POSITIVE_INFINITY", "math.Inf(+1)",
+ ),
+ ),
+ }),
+
+ # Math
+ $self->block(sub {
+ my $class = "Math";
+ return
+ ".$class =",
+ $self->globalObject(
+ $class,
+ $self->functionDeclare(
+ $class,
+ "abs", 1,
+ "acos", 1,
+ "asin", 1,
+ "atan", 1,
+ "atan2", 1,
+ "ceil", 1,
+ "cos", 1,
+ "exp", 1,
+ "floor", 1,
+ "log", 1,
+ "max", 2,
+ "min", 2,
+ "pow", 2,
+ "random", 0,
+ "round", 1,
+ "sin", 1,
+ "sqrt", 1,
+ "tan", 1,
+ ),
+ $self->numberConstantDeclare(
+ "E", "math.E",
+ "LN10", "math.Ln10",
+ "LN2", "math.Ln2",
+ "LOG2E", "math.Log2E",
+ "LOG10E", "math.Log10E",
+ "PI", "math.Pi",
+ "SQRT1_2", "sqrt1_2",
+ "SQRT2", "math.Sqrt2",
+ )
+ ),
+ }),
+
+ # Date
+ $self->block(sub {
+ my $class = "Date";
+ my @got = $self->functionDeclare(
+ $class,
+ "toString", 0,
+ "toDateString", 0,
+ "toTimeString", 0,
+ "toUTCString", 0,
+ "toISOString", 0,
+ "toJSON", 1,
+ "toGMTString", 0,
+ "toLocaleString", 0,
+ "toLocaleDateString", 0,
+ "toLocaleTimeString", 0,
+ "valueOf", 0,
+ "getTime", 0,
+ "getYear", 0,
+ "getFullYear", 0,
+ "getUTCFullYear", 0,
+ "getMonth", 0,
+ "getUTCMonth", 0,
+ "getDate", 0,
+ "getUTCDate", 0,
+ "getDay", 0,
+ "getUTCDay", 0,
+ "getHours", 0,
+ "getUTCHours", 0,
+ "getMinutes", 0,
+ "getUTCMinutes", 0,
+ "getSeconds", 0,
+ "getUTCSeconds", 0,
+ "getMilliseconds", 0,
+ "getUTCMilliseconds", 0,
+ "getTimezoneOffset", 0,
+ "setTime", 1,
+ "setMilliseconds", 1,
+ "setUTCMilliseconds", 1,
+ "setSeconds", 2,
+ "setUTCSeconds", 2,
+ "setMinutes", 3,
+ "setUTCMinutes", 3,
+ "setHours", 4,
+ "setUTCHours", 4,
+ "setDate", 1,
+ "setUTCDate", 1,
+ "setMonth", 2,
+ "setUTCMonth", 2,
+ "setYear", 1,
+ "setFullYear", 3,
+ "setUTCFullYear", 3,
+ );
+ return
+ ".${class}Prototype =",
+ $self->globalPrototype(
+ $class,
+ "_classObject",
+ ".ObjectPrototype",
+ "prototypeValueDate",
+ @got,
+ ),
+ ".$class =",
+ $self->globalFunction(
+ $class,
+ 7,
+ $self->functionDeclare(
+ $class,
+ "parse", 1,
+ "UTC", 7,
+ "now", 0,
+ ),
+ ),
+ }),
+
+ # RegExp
+ $self->block(sub {
+ my $class = "RegExp";
+ my @got = $self->functionDeclare(
+ $class,
+ "toString", 0,
+ "exec", 1,
+ "test", 1,
+ "compile", 1,
+ );
+ return
+ ".${class}Prototype =",
+ $self->globalPrototype(
+ $class,
+ "_classObject",
+ ".ObjectPrototype",
+ "prototypeValueRegExp",
+ @got,
+ ),
+ ".$class =",
+ $self->globalFunction(
+ $class,
+ 2,
+ $self->functionDeclare(
+ $class,
+ ),
+ ),
+ }),
+
+ # Error
+ $self->block(sub {
+ my $class = "Error";
+ my @got = $self->functionDeclare(
+ $class,
+ "toString", 0,
+ );
+ return
+ ".${class}Prototype =",
+ $self->globalPrototype(
+ $class,
+ "_classObject",
+ ".ObjectPrototype",
+ undef,
+ @got,
+ $self->property("name", $self->stringValue("Error")),
+ $self->property("message", $self->stringValue("")),
+ ),
+ ".$class =",
+ $self->globalFunction(
+ $class,
+ 1,
+ $self->functionDeclare(
+ $class,
+ ),
+ ),
+ }),
+
+ (map {
+ my $class = "${_}Error";
+ $self->block(sub {
+ my @got = $self->functionDeclare(
+ $class,
+ );
+ return
+ ".${class}Prototype =",
+ $self->globalPrototype(
+ $class,
+ "_classObject",
+ ".ErrorPrototype",
+ undef,
+ @got,
+ $self->property("name", $self->stringValue($class)),
+ ),
+ ".$class =",
+ $self->globalFunction(
+ $class,
+ 1,
+ $self->functionDeclare(
+ $class,
+ ),
+ ),
+ });
+ } qw/Eval Type Range Reference Syntax URI/),
+
+ # JSON
+ $self->block(sub {
+ my $class = "JSON";
+ return
+ ".$class =",
+ $self->globalObject(
+ $class,
+ $self->functionDeclare(
+ $class,
+ "parse", 2,
+ "stringify", 3,
+ ),
+ ),
+ }),
+
+ # Global
+ $self->block(sub {
+ my $class = "Global";
+ my @got = $self->functionDeclare(
+ $class,
+ "eval", 1,
+ "parseInt", 2,
+ "parseFloat", 1,
+ "isNaN", 1,
+ "isFinite", 1,
+ "decodeURI", 1,
+ "decodeURIComponent", 1,
+ "encodeURI", 1,
+ "encodeURIComponent", 1,
+ "escape", 1,
+ "unescape", 1,
+ );
+ my @propertyMap = $self->propertyMap(
+ @got,
+ $self->globalDeclare(
+ "Object",
+ "Function",
+ "Array",
+ "String",
+ "Boolean",
+ "Number",
+ "Math",
+ "Date",
+ "RegExp",
+ "Error",
+ "EvalError",
+ "TypeError",
+ "RangeError",
+ "ReferenceError",
+ "SyntaxError",
+ "URIError",
+ "JSON",
+ ),
+ $self->property("undefined", $self->undefinedValue(), "0"),
+ $self->property("NaN", $self->numberValue("math.NaN()"), "0"),
+ $self->property("Infinity", $self->numberValue("math.Inf(+1)"), "0"),
+ );
+ my $propertyOrder = $self->propertyOrder(@propertyMap);
+ $propertyOrder =~ s/^propertyOrder: //;
+ return
+ "runtime.globalObject.property =",
+ @propertyMap,
+ "runtime.globalObject.propertyOrder =",
+ $propertyOrder,
+ ;
+ }),
+ ;
+}
+
+sub propertyMap {
+ my $self = shift;
+ return "map[string]_property{", (join ",\n", @_, ""), "}",
+}
+
+our (@preblock, @postblock);
+sub block {
+ my $self = shift;
+ local @preblock = ();
+ local @postblock = ();
+ my @input = $_[0]->();
+ my @output;
+ while (@input) {
+ local $_ = shift @input;
+ if (m/^\./) {
+ $_ = "runtime.global$_";
+ }
+ if (m/ :?=$/) {
+ $_ .= shift @input;
+ }
+ push @output, $_;
+ }
+ return
+ "{",
+ @preblock,
+ @output,
+ @postblock,
+ "}",
+ ;
+}
+
+sub numberConstantDeclare {
+ my $self = shift;
+ my @got;
+ while (@_) {
+ my $name = shift;
+ my $value = shift;
+ push @got, $self->property($name, $self->numberValue($value), "0"),
+ }
+ return @got;
+}
+
+sub functionDeclare {
+ my $self = shift;
+ my $class = shift;
+ my $builtin = "builtin${class}";
+ my @got;
+ while (@_) {
+ my $name = shift;
+ my $length = shift;
+ $name = $self->newFunction($name, "${builtin}_", $length);
+ push @got, $self->functionProperty($name),
+ }
+ return @got;
+}
+
+sub globalDeclare {
+ my $self = shift;
+ my @got;
+ while (@_) {
+ my $name = shift;
+ push @got, $self->property($name, $self->objectValue("runtime.global.$name"), "0101"),
+ }
+ return @got;
+}
+
+sub propertyOrder {
+ my $self = shift;
+ my $propertyMap = join "", @_;
+
+ my (@keys) = $propertyMap =~ m/("\w+"):/g;
+ my $propertyOrder =
+ join "\n", "propertyOrder: []string{", (join ",\n", @keys, ""), "}";
+ return $propertyOrder;
+}
+
+sub globalObject {
+ my $self = shift;
+ my $name = shift;
+
+ my $propertyMap = "";
+ if (@_) {
+ $propertyMap = join "\n", $self->propertyMap(@_);
+ my $propertyOrder = $self->propertyOrder($propertyMap);
+ $propertyMap = "property: $propertyMap,\n$propertyOrder,";
+ }
+
+ return trim <<_END_;
+&_object{
+ runtime: runtime,
+ class: "$name",
+ objectClass: _classObject,
+ prototype: runtime.global.ObjectPrototype,
+ extensible: true,
+ $propertyMap
+}
+_END_
+}
+
+sub globalFunction {
+ my $self = shift;
+ my $name = shift;
+ my $length = shift;
+
+ my $builtin = "builtin${name}";
+ my $builtinNew = "builtinNew${name}";
+ my $prototype = "runtime.global.${name}Prototype";
+ my $propertyMap = "";
+ unshift @_,
+ $self->property("length", $self->numberValue($length), "0"),
+ $self->property("prototype", $self->objectValue($prototype), "0"),
+ ;
+
+ if (@_) {
+ $propertyMap = join "\n", $self->propertyMap(@_);
+ my $propertyOrder = $self->propertyOrder($propertyMap);
+ $propertyMap = "property: $propertyMap,\n$propertyOrder,";
+ }
+
+ push @postblock, $self->statement(
+ "$prototype.property[\"constructor\"] =",
+ $self->property(undef, $self->objectValue("runtime.global.${name}"), "0101"),
+ );
+
+ return trim <<_END_;
+&_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ value: @{[ $self->nativeFunctionOf($name, $builtin, $builtinNew) ]},
+ $propertyMap
+}
+_END_
+}
+
+sub nativeCallFunction {
+ my $self = shift;
+ my $name = shift;
+ my $func = shift;
+ return trim <<_END_;
+_nativeCallFunction{ "$name", $func }
+_END_
+}
+
+sub globalPrototype {
+ my $self = shift;
+ my $class = shift;
+ my $classObject = shift;
+ my $prototype = shift;
+ my $value = shift;
+
+ if (!defined $prototype) {
+ $prototype = "nil";
+ }
+
+ if (!defined $value) {
+ $value = "nil";
+ }
+
+ if ($prototype =~ m/^\./) {
+ $prototype = "runtime.global$prototype";
+ }
+
+ my $propertyMap = "";
+ if (@_) {
+ $propertyMap = join "\n", $self->propertyMap(@_);
+ my $propertyOrder = $self->propertyOrder($propertyMap);
+ $propertyMap = "property: $propertyMap,\n$propertyOrder,";
+ }
+
+ return trim <<_END_;
+&_object{
+ runtime: runtime,
+ class: "$class",
+ objectClass: $classObject,
+ prototype: $prototype,
+ extensible: true,
+ value: $value,
+ $propertyMap
+}
+_END_
+}
+
+sub newFunction {
+ my $self = shift;
+ my $name = shift;
+ my $func = shift;
+ my $length = shift;
+
+ my @name = ($name, $name);
+ if ($name =~ m/^(\w+):(\w+)$/) {
+ @name = ($1, $2);
+ $name = $name[0];
+ }
+
+ if ($func =~ m/^builtin\w+_$/) {
+ $func = "$func$name[1]";
+ }
+
+ my $propertyOrder = "";
+ my @propertyMap = (
+ $self->property("length", $self->numberValue($length), "0"),
+ );
+
+ if (@propertyMap) {
+ $propertyOrder = $self->propertyOrder(@propertyMap);
+ $propertyOrder = "$propertyOrder,";
+ }
+
+ my $label = functionLabel($name);
+ push @preblock, $self->statement(
+ "$label := @{[ trim <<_END_ ]}",
+&_object{
+ runtime: runtime,
+ class: "Function",
+ objectClass: _classObject,
+ prototype: runtime.global.FunctionPrototype,
+ extensible: true,
+ property: @{[ join "\n", $self->propertyMap(@propertyMap) ]},
+ $propertyOrder
+ value: @{[ $self->nativeFunctionOf($name, $func) ]},
+}
+_END_
+ );
+
+ return $name;
+}
+
+sub newObject {
+ my $self = shift;
+
+ my $propertyMap = join "\n", $self->propertyMap(@_);
+ my $propertyOrder = $self->propertyOrder($propertyMap);
+
+ return trim <<_END_;
+&_object{
+ runtime: runtime,
+ class: "Object",
+ objectClass: _classObject,
+ prototype: runtime.global.ObjectPrototype,
+ extensible: true,
+ property: $propertyMap,
+ $propertyOrder,
+}
+_END_
+}
+
+sub newPrototypeObject {
+ my $self = shift;
+ my $class = shift;
+ my $objectClass = shift;
+ my $value = shift;
+ if (defined $value) {
+ $value = "value: $value,";
+ }
+
+ my $propertyMap = join "\n", $self->propertyMap(@_);
+ my $propertyOrder = $self->propertyOrder($propertyMap);
+
+ return trim <<_END_;
+&_object{
+ runtime: runtime,
+ class: "$class",
+ objectClass: $objectClass,
+ prototype: runtime.global.ObjectPrototype,
+ extensible: true,
+ property: $propertyMap,
+ $propertyOrder,
+ $value
+}
+_END_
+}
+
+sub functionProperty {
+ my $self = shift;
+ my $name = shift;
+
+ return $self->property(
+ $name,
+ $self->objectValue(functionLabel($name))
+ );
+}
+
+sub statement {
+ my $self = shift;
+ return join "\n", @_;
+}
+
+sub functionOf {
+ my $self = shift;
+ my $call = shift;
+ my $construct = shift;
+ if ($construct) {
+ $construct = "construct: $construct,";
+ } else {
+ $construct = "";
+ }
+
+ return trim <<_END_
+_functionObject{
+ call: $call,
+ $construct
+}
+_END_
+}
+
+sub nativeFunctionOf {
+ my $self = shift;
+ my $name = shift;
+ my $call = shift;
+ my $construct = shift;
+ if ($construct) {
+ $construct = "construct: $construct,";
+ } else {
+ $construct = "";
+ }
+
+ return trim <<_END_
+_nativeFunctionObject{
+ name: "$name",
+ call: $call,
+ $construct
+}
+_END_
+}
+
+sub nameProperty {
+ my $self = shift;
+ my $name = shift;
+ my $value = shift;
+
+ return trim <<_END_;
+"$name": _property{
+ mode: 0101,
+ value: $value,
+}
+_END_
+}
+
+sub numberValue {
+ my $self = shift;
+ my $value = shift;
+ return trim <<_END_;
+Value{
+ kind: valueNumber,
+ value: $value,
+}
+_END_
+}
+
+sub property {
+ my $self = shift;
+ my $name = shift;
+ my $value = shift;
+ my $mode = shift;
+ $mode = "0101" unless defined $mode;
+ if (! defined $value) {
+ $value = "Value{}";
+ }
+ if (defined $name) {
+ return trim <<_END_;
+"$name": _property{
+ mode: $mode,
+ value: $value,
+}
+_END_
+ } else {
+ return trim <<_END_;
+_property{
+ mode: $mode,
+ value: $value,
+}
+_END_
+ }
+
+}
+
+sub objectProperty {
+ my $self = shift;
+ my $name = shift;
+ my $value = shift;
+
+ return trim <<_END_;
+"$name": _property{
+ mode: 0101,
+ value: @{[ $self->objectValue($value)]},
+}
+_END_
+}
+
+sub objectValue {
+ my $self = shift;
+ my $value = shift;
+ return trim <<_END_
+Value{
+ kind: valueObject,
+ value: $value,
+}
+_END_
+}
+
+sub stringValue {
+ my $self = shift;
+ my $value = shift;
+ return trim <<_END_
+Value{
+ kind: valueString,
+ value: "$value",
+}
+_END_
+}
+
+sub booleanValue {
+ my $self = shift;
+ my $value = shift;
+ return trim <<_END_
+Value{
+ kind: valueBoolean,
+ value: $value,
+}
+_END_
+}
+
+sub undefinedValue {
+ my $self = shift;
+ return trim <<_END_
+Value{
+ kind: valueUndefined,
+}
+_END_
+}
diff --git a/vendor/github.com/robertkrimen/otto/object.go b/vendor/github.com/robertkrimen/otto/object.go
new file mode 100644
index 000000000..849812c91
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/object.go
@@ -0,0 +1,156 @@
+package otto
+
+type _object struct {
+ runtime *_runtime
+
+ class string
+ objectClass *_objectClass
+ value interface{}
+
+ prototype *_object
+ extensible bool
+
+ property map[string]_property
+ propertyOrder []string
+}
+
+func newObject(runtime *_runtime, class string) *_object {
+ self := &_object{
+ runtime: runtime,
+ class: class,
+ objectClass: _classObject,
+ property: make(map[string]_property),
+ extensible: true,
+ }
+ return self
+}
+
+// 8.12
+
+// 8.12.1
+func (self *_object) getOwnProperty(name string) *_property {
+ return self.objectClass.getOwnProperty(self, name)
+}
+
+// 8.12.2
+func (self *_object) getProperty(name string) *_property {
+ return self.objectClass.getProperty(self, name)
+}
+
+// 8.12.3
+func (self *_object) get(name string) Value {
+ return self.objectClass.get(self, name)
+}
+
+// 8.12.4
+func (self *_object) canPut(name string) bool {
+ return self.objectClass.canPut(self, name)
+}
+
+// 8.12.5
+func (self *_object) put(name string, value Value, throw bool) {
+ self.objectClass.put(self, name, value, throw)
+}
+
+// 8.12.6
+func (self *_object) hasProperty(name string) bool {
+ return self.objectClass.hasProperty(self, name)
+}
+
+func (self *_object) hasOwnProperty(name string) bool {
+ return self.objectClass.hasOwnProperty(self, name)
+}
+
+type _defaultValueHint int
+
+const (
+ defaultValueNoHint _defaultValueHint = iota
+ defaultValueHintString
+ defaultValueHintNumber
+)
+
+// 8.12.8
+func (self *_object) DefaultValue(hint _defaultValueHint) Value {
+ if hint == defaultValueNoHint {
+ if self.class == "Date" {
+ // Date exception
+ hint = defaultValueHintString
+ } else {
+ hint = defaultValueHintNumber
+ }
+ }
+ methodSequence := []string{"valueOf", "toString"}
+ if hint == defaultValueHintString {
+ methodSequence = []string{"toString", "valueOf"}
+ }
+ for _, methodName := range methodSequence {
+ method := self.get(methodName)
+ // FIXME This is redundant...
+ if method.isCallable() {
+ result := method._object().call(toValue_object(self), nil, false, nativeFrame)
+ if result.IsPrimitive() {
+ return result
+ }
+ }
+ }
+
+ panic(self.runtime.panicTypeError())
+}
+
+func (self *_object) String() string {
+ return self.DefaultValue(defaultValueHintString).string()
+}
+
+func (self *_object) defineProperty(name string, value Value, mode _propertyMode, throw bool) bool {
+ return self.defineOwnProperty(name, _property{value, mode}, throw)
+}
+
+// 8.12.9
+func (self *_object) defineOwnProperty(name string, descriptor _property, throw bool) bool {
+ return self.objectClass.defineOwnProperty(self, name, descriptor, throw)
+}
+
+func (self *_object) delete(name string, throw bool) bool {
+ return self.objectClass.delete(self, name, throw)
+}
+
+func (self *_object) enumerate(all bool, each func(string) bool) {
+ self.objectClass.enumerate(self, all, each)
+}
+
+func (self *_object) _exists(name string) bool {
+ _, exists := self.property[name]
+ return exists
+}
+
+func (self *_object) _read(name string) (_property, bool) {
+ property, exists := self.property[name]
+ return property, exists
+}
+
+func (self *_object) _write(name string, value interface{}, mode _propertyMode) {
+ if value == nil {
+ value = Value{}
+ }
+ _, exists := self.property[name]
+ self.property[name] = _property{value, mode}
+ if !exists {
+ self.propertyOrder = append(self.propertyOrder, name)
+ }
+}
+
+func (self *_object) _delete(name string) {
+ _, exists := self.property[name]
+ delete(self.property, name)
+ if exists {
+ for index, property := range self.propertyOrder {
+ if name == property {
+ if index == len(self.propertyOrder)-1 {
+ self.propertyOrder = self.propertyOrder[:index]
+ } else {
+ self.propertyOrder = append(self.propertyOrder[:index], self.propertyOrder[index+1:]...)
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/robertkrimen/otto/object_class.go b/vendor/github.com/robertkrimen/otto/object_class.go
new file mode 100644
index 000000000..d18b9cede
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/object_class.go
@@ -0,0 +1,493 @@
+package otto
+
+import (
+ "encoding/json"
+)
+
+type _objectClass struct {
+ getOwnProperty func(*_object, string) *_property
+ getProperty func(*_object, string) *_property
+ get func(*_object, string) Value
+ canPut func(*_object, string) bool
+ put func(*_object, string, Value, bool)
+ hasProperty func(*_object, string) bool
+ hasOwnProperty func(*_object, string) bool
+ defineOwnProperty func(*_object, string, _property, bool) bool
+ delete func(*_object, string, bool) bool
+ enumerate func(*_object, bool, func(string) bool)
+ clone func(*_object, *_object, *_clone) *_object
+ marshalJSON func(*_object) json.Marshaler
+}
+
+func objectEnumerate(self *_object, all bool, each func(string) bool) {
+ for _, name := range self.propertyOrder {
+ if all || self.property[name].enumerable() {
+ if !each(name) {
+ return
+ }
+ }
+ }
+}
+
+var (
+ _classObject,
+ _classArray,
+ _classString,
+ _classArguments,
+ _classGoStruct,
+ _classGoMap,
+ _classGoArray,
+ _classGoSlice,
+ _ *_objectClass
+)
+
+func init() {
+ _classObject = &_objectClass{
+ objectGetOwnProperty,
+ objectGetProperty,
+ objectGet,
+ objectCanPut,
+ objectPut,
+ objectHasProperty,
+ objectHasOwnProperty,
+ objectDefineOwnProperty,
+ objectDelete,
+ objectEnumerate,
+ objectClone,
+ nil,
+ }
+
+ _classArray = &_objectClass{
+ objectGetOwnProperty,
+ objectGetProperty,
+ objectGet,
+ objectCanPut,
+ objectPut,
+ objectHasProperty,
+ objectHasOwnProperty,
+ arrayDefineOwnProperty,
+ objectDelete,
+ objectEnumerate,
+ objectClone,
+ nil,
+ }
+
+ _classString = &_objectClass{
+ stringGetOwnProperty,
+ objectGetProperty,
+ objectGet,
+ objectCanPut,
+ objectPut,
+ objectHasProperty,
+ objectHasOwnProperty,
+ objectDefineOwnProperty,
+ objectDelete,
+ stringEnumerate,
+ objectClone,
+ nil,
+ }
+
+ _classArguments = &_objectClass{
+ argumentsGetOwnProperty,
+ objectGetProperty,
+ argumentsGet,
+ objectCanPut,
+ objectPut,
+ objectHasProperty,
+ objectHasOwnProperty,
+ argumentsDefineOwnProperty,
+ argumentsDelete,
+ objectEnumerate,
+ objectClone,
+ nil,
+ }
+
+ _classGoStruct = &_objectClass{
+ goStructGetOwnProperty,
+ objectGetProperty,
+ objectGet,
+ goStructCanPut,
+ goStructPut,
+ objectHasProperty,
+ objectHasOwnProperty,
+ objectDefineOwnProperty,
+ objectDelete,
+ goStructEnumerate,
+ objectClone,
+ goStructMarshalJSON,
+ }
+
+ _classGoMap = &_objectClass{
+ goMapGetOwnProperty,
+ objectGetProperty,
+ objectGet,
+ objectCanPut,
+ objectPut,
+ objectHasProperty,
+ objectHasOwnProperty,
+ goMapDefineOwnProperty,
+ goMapDelete,
+ goMapEnumerate,
+ objectClone,
+ nil,
+ }
+
+ _classGoArray = &_objectClass{
+ goArrayGetOwnProperty,
+ objectGetProperty,
+ objectGet,
+ objectCanPut,
+ objectPut,
+ objectHasProperty,
+ objectHasOwnProperty,
+ goArrayDefineOwnProperty,
+ goArrayDelete,
+ goArrayEnumerate,
+ objectClone,
+ nil,
+ }
+
+ _classGoSlice = &_objectClass{
+ goSliceGetOwnProperty,
+ objectGetProperty,
+ objectGet,
+ objectCanPut,
+ objectPut,
+ objectHasProperty,
+ objectHasOwnProperty,
+ goSliceDefineOwnProperty,
+ goSliceDelete,
+ goSliceEnumerate,
+ objectClone,
+ nil,
+ }
+}
+
+// Allons-y
+
+// 8.12.1
+func objectGetOwnProperty(self *_object, name string) *_property {
+ // Return a _copy_ of the property
+ property, exists := self._read(name)
+ if !exists {
+ return nil
+ }
+ return &property
+}
+
+// 8.12.2
+func objectGetProperty(self *_object, name string) *_property {
+ property := self.getOwnProperty(name)
+ if property != nil {
+ return property
+ }
+ if self.prototype != nil {
+ return self.prototype.getProperty(name)
+ }
+ return nil
+}
+
+// 8.12.3
+func objectGet(self *_object, name string) Value {
+ property := self.getProperty(name)
+ if property != nil {
+ return property.get(self)
+ }
+ return Value{}
+}
+
+// 8.12.4
+func objectCanPut(self *_object, name string) bool {
+ canPut, _, _ := _objectCanPut(self, name)
+ return canPut
+}
+
+func _objectCanPut(self *_object, name string) (canPut bool, property *_property, setter *_object) {
+ property = self.getOwnProperty(name)
+ if property != nil {
+ switch propertyValue := property.value.(type) {
+ case Value:
+ canPut = property.writable()
+ return
+ case _propertyGetSet:
+ setter = propertyValue[1]
+ canPut = setter != nil
+ return
+ default:
+ panic(self.runtime.panicTypeError())
+ }
+ }
+
+ if self.prototype == nil {
+ return self.extensible, nil, nil
+ }
+
+ property = self.prototype.getProperty(name)
+ if property == nil {
+ return self.extensible, nil, nil
+ }
+
+ switch propertyValue := property.value.(type) {
+ case Value:
+ if !self.extensible {
+ return false, nil, nil
+ }
+ return property.writable(), nil, nil
+ case _propertyGetSet:
+ setter = propertyValue[1]
+ canPut = setter != nil
+ return
+ default:
+ panic(self.runtime.panicTypeError())
+ }
+}
+
+// 8.12.5
+func objectPut(self *_object, name string, value Value, throw bool) {
+
+ if true {
+ // Shortcut...
+ //
+ // So, right now, every class is using objectCanPut and every class
+ // is using objectPut.
+ //
+ // If that were to no longer be the case, we would have to have
+ // something to detect that here, so that we do not use an
+ // incompatible canPut routine
+ canPut, property, setter := _objectCanPut(self, name)
+ if !canPut {
+ self.runtime.typeErrorResult(throw)
+ } else if setter != nil {
+ setter.call(toValue(self), []Value{value}, false, nativeFrame)
+ } else if property != nil {
+ property.value = value
+ self.defineOwnProperty(name, *property, throw)
+ } else {
+ self.defineProperty(name, value, 0111, throw)
+ }
+ return
+ }
+
+ // The long way...
+ //
+ // Right now, code should never get here, see above
+ if !self.canPut(name) {
+ self.runtime.typeErrorResult(throw)
+ return
+ }
+
+ property := self.getOwnProperty(name)
+ if property == nil {
+ property = self.getProperty(name)
+ if property != nil {
+ if getSet, isAccessor := property.value.(_propertyGetSet); isAccessor {
+ getSet[1].call(toValue(self), []Value{value}, false, nativeFrame)
+ return
+ }
+ }
+ self.defineProperty(name, value, 0111, throw)
+ } else {
+ switch propertyValue := property.value.(type) {
+ case Value:
+ property.value = value
+ self.defineOwnProperty(name, *property, throw)
+ case _propertyGetSet:
+ if propertyValue[1] != nil {
+ propertyValue[1].call(toValue(self), []Value{value}, false, nativeFrame)
+ return
+ }
+ if throw {
+ panic(self.runtime.panicTypeError())
+ }
+ default:
+ panic(self.runtime.panicTypeError())
+ }
+ }
+}
+
+// 8.12.6
+func objectHasProperty(self *_object, name string) bool {
+ return self.getProperty(name) != nil
+}
+
+func objectHasOwnProperty(self *_object, name string) bool {
+ return self.getOwnProperty(name) != nil
+}
+
+// 8.12.9
+func objectDefineOwnProperty(self *_object, name string, descriptor _property, throw bool) bool {
+ property, exists := self._read(name)
+ {
+ if !exists {
+ if !self.extensible {
+ goto Reject
+ }
+ if newGetSet, isAccessor := descriptor.value.(_propertyGetSet); isAccessor {
+ if newGetSet[0] == &_nilGetSetObject {
+ newGetSet[0] = nil
+ }
+ if newGetSet[1] == &_nilGetSetObject {
+ newGetSet[1] = nil
+ }
+ descriptor.value = newGetSet
+ }
+ self._write(name, descriptor.value, descriptor.mode)
+ return true
+ }
+ if descriptor.isEmpty() {
+ return true
+ }
+
+ // TODO Per 8.12.9.6 - We should shortcut here (returning true) if
+ // the current and new (define) properties are the same
+
+ configurable := property.configurable()
+ if !configurable {
+ if descriptor.configurable() {
+ goto Reject
+ }
+ // Test that, if enumerable is set on the property descriptor, then it should
+ // be the same as the existing property
+ if descriptor.enumerateSet() && descriptor.enumerable() != property.enumerable() {
+ goto Reject
+ }
+ }
+ value, isDataDescriptor := property.value.(Value)
+ getSet, _ := property.value.(_propertyGetSet)
+ if descriptor.isGenericDescriptor() {
+ // GenericDescriptor
+ } else if isDataDescriptor != descriptor.isDataDescriptor() {
+ // DataDescriptor <=> AccessorDescriptor
+ if !configurable {
+ goto Reject
+ }
+ } else if isDataDescriptor && descriptor.isDataDescriptor() {
+ // DataDescriptor <=> DataDescriptor
+ if !configurable {
+ if !property.writable() && descriptor.writable() {
+ goto Reject
+ }
+ if !property.writable() {
+ if descriptor.value != nil && !sameValue(value, descriptor.value.(Value)) {
+ goto Reject
+ }
+ }
+ }
+ } else {
+ // AccessorDescriptor <=> AccessorDescriptor
+ newGetSet, _ := descriptor.value.(_propertyGetSet)
+ presentGet, presentSet := true, true
+ if newGetSet[0] == &_nilGetSetObject {
+ // Present, but nil
+ newGetSet[0] = nil
+ } else if newGetSet[0] == nil {
+ // Missing, not even nil
+ newGetSet[0] = getSet[0]
+ presentGet = false
+ }
+ if newGetSet[1] == &_nilGetSetObject {
+ // Present, but nil
+ newGetSet[1] = nil
+ } else if newGetSet[1] == nil {
+ // Missing, not even nil
+ newGetSet[1] = getSet[1]
+ presentSet = false
+ }
+ if !configurable {
+ if (presentGet && (getSet[0] != newGetSet[0])) || (presentSet && (getSet[1] != newGetSet[1])) {
+ goto Reject
+ }
+ }
+ descriptor.value = newGetSet
+ }
+ {
+ // This section will preserve attributes of
+ // the original property, if necessary
+ value1 := descriptor.value
+ if value1 == nil {
+ value1 = property.value
+ } else if newGetSet, isAccessor := descriptor.value.(_propertyGetSet); isAccessor {
+ if newGetSet[0] == &_nilGetSetObject {
+ newGetSet[0] = nil
+ }
+ if newGetSet[1] == &_nilGetSetObject {
+ newGetSet[1] = nil
+ }
+ value1 = newGetSet
+ }
+ mode1 := descriptor.mode
+ if mode1&0222 != 0 {
+ // TODO Factor this out into somewhere testable
+ // (Maybe put into switch ...)
+ mode0 := property.mode
+ if mode1&0200 != 0 {
+ if descriptor.isDataDescriptor() {
+ mode1 &= ^0200 // Turn off "writable" missing
+ mode1 |= (mode0 & 0100)
+ }
+ }
+ if mode1&020 != 0 {
+ mode1 |= (mode0 & 010)
+ }
+ if mode1&02 != 0 {
+ mode1 |= (mode0 & 01)
+ }
+ mode1 &= 0311 // 0311 to preserve the non-setting on "writable"
+ }
+ self._write(name, value1, mode1)
+ }
+ return true
+ }
+Reject:
+ if throw {
+ panic(self.runtime.panicTypeError())
+ }
+ return false
+}
+
+func objectDelete(self *_object, name string, throw bool) bool {
+ property_ := self.getOwnProperty(name)
+ if property_ == nil {
+ return true
+ }
+ if property_.configurable() {
+ self._delete(name)
+ return true
+ }
+ return self.runtime.typeErrorResult(throw)
+}
+
+func objectClone(in *_object, out *_object, clone *_clone) *_object {
+ *out = *in
+
+ out.runtime = clone.runtime
+ if out.prototype != nil {
+ out.prototype = clone.object(in.prototype)
+ }
+ out.property = make(map[string]_property, len(in.property))
+ out.propertyOrder = make([]string, len(in.propertyOrder))
+ copy(out.propertyOrder, in.propertyOrder)
+ for index, property := range in.property {
+ out.property[index] = clone.property(property)
+ }
+
+ switch value := in.value.(type) {
+ case _nativeFunctionObject:
+ out.value = value
+ case _bindFunctionObject:
+ out.value = _bindFunctionObject{
+ target: clone.object(value.target),
+ this: clone.value(value.this),
+ argumentList: clone.valueArray(value.argumentList),
+ }
+ case _nodeFunctionObject:
+ out.value = _nodeFunctionObject{
+ node: value.node,
+ stash: clone.stash(value.stash),
+ }
+ case _argumentsObject:
+ out.value = value.clone(clone)
+ }
+
+ return out
+}
diff --git a/vendor/github.com/robertkrimen/otto/otto.go b/vendor/github.com/robertkrimen/otto/otto.go
new file mode 100644
index 000000000..b5b528d53
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/otto.go
@@ -0,0 +1,770 @@
+/*
+Package otto is a JavaScript parser and interpreter written natively in Go.
+
+http://godoc.org/github.com/robertkrimen/otto
+
+ import (
+ "github.com/robertkrimen/otto"
+ )
+
+Run something in the VM
+
+ vm := otto.New()
+ vm.Run(`
+ abc = 2 + 2;
+ console.log("The value of abc is " + abc); // 4
+ `)
+
+Get a value out of the VM
+
+ value, err := vm.Get("abc")
+ value, _ := value.ToInteger()
+ }
+
+Set a number
+
+ vm.Set("def", 11)
+ vm.Run(`
+ console.log("The value of def is " + def);
+ // The value of def is 11
+ `)
+
+Set a string
+
+ vm.Set("xyzzy", "Nothing happens.")
+ vm.Run(`
+ console.log(xyzzy.length); // 16
+ `)
+
+Get the value of an expression
+
+ value, _ = vm.Run("xyzzy.length")
+ {
+ // value is an int64 with a value of 16
+ value, _ := value.ToInteger()
+ }
+
+An error happens
+
+ value, err = vm.Run("abcdefghijlmnopqrstuvwxyz.length")
+ if err != nil {
+ // err = ReferenceError: abcdefghijlmnopqrstuvwxyz is not defined
+ // If there is an error, then value.IsUndefined() is true
+ ...
+ }
+
+Set a Go function
+
+ vm.Set("sayHello", func(call otto.FunctionCall) otto.Value {
+ fmt.Printf("Hello, %s.\n", call.Argument(0).String())
+ return otto.Value{}
+ })
+
+Set a Go function that returns something useful
+
+ vm.Set("twoPlus", func(call otto.FunctionCall) otto.Value {
+ right, _ := call.Argument(0).ToInteger()
+ result, _ := vm.ToValue(2 + right)
+ return result
+ })
+
+Use the functions in JavaScript
+
+ result, _ = vm.Run(`
+ sayHello("Xyzzy"); // Hello, Xyzzy.
+ sayHello(); // Hello, undefined
+
+ result = twoPlus(2.0); // 4
+ `)
+
+Parser
+
+A separate parser is available in the parser package if you're just interested in building an AST.
+
+http://godoc.org/github.com/robertkrimen/otto/parser
+
+Parse and return an AST
+
+ filename := "" // A filename is optional
+ src := `
+ // Sample xyzzy example
+ (function(){
+ if (3.14159 > 0) {
+ console.log("Hello, World.");
+ return;
+ }
+
+ var xyzzy = NaN;
+ console.log("Nothing happens.");
+ return xyzzy;
+ })();
+ `
+
+ // Parse some JavaScript, yielding a *ast.Program and/or an ErrorList
+ program, err := parser.ParseFile(nil, filename, src, 0)
+
+otto
+
+You can run (Go) JavaScript from the commandline with: http://github.com/robertkrimen/otto/tree/master/otto
+
+ $ go get -v github.com/robertkrimen/otto/otto
+
+Run JavaScript by entering some source on stdin or by giving otto a filename:
+
+ $ otto example.js
+
+underscore
+
+Optionally include the JavaScript utility-belt library, underscore, with this import:
+
+ import (
+ "github.com/robertkrimen/otto"
+ _ "github.com/robertkrimen/otto/underscore"
+ )
+
+ // Now every otto runtime will come loaded with underscore
+
+For more information: http://github.com/robertkrimen/otto/tree/master/underscore
+
+Caveat Emptor
+
+The following are some limitations with otto:
+
+ * "use strict" will parse, but does nothing.
+ * The regular expression engine (re2/regexp) is not fully compatible with the ECMA5 specification.
+ * Otto targets ES5. ES6 features (eg: Typed Arrays) are not supported.
+
+Regular Expression Incompatibility
+
+Go translates JavaScript-style regular expressions into something that is "regexp" compatible via `parser.TransformRegExp`.
+Unfortunately, RegExp requires backtracking for some patterns, and backtracking is not supported by the standard Go engine: https://code.google.com/p/re2/wiki/Syntax
+
+Therefore, the following syntax is incompatible:
+
+ (?=) // Lookahead (positive), currently a parsing error
+ (?!) // Lookahead (backhead), currently a parsing error
+ \1 // Backreference (\1, \2, \3, ...), currently a parsing error
+
+A brief discussion of these limitations: "Regexp (?!re)" https://groups.google.com/forum/?fromgroups=#%21topic/golang-nuts/7qgSDWPIh_E
+
+More information about re2: https://code.google.com/p/re2/
+
+In addition to the above, re2 (Go) has a different definition for \s: [\t\n\f\r ].
+The JavaScript definition, on the other hand, also includes \v, Unicode "Separator, Space", etc.
+
+Halting Problem
+
+If you want to stop long running executions (like third-party code), you can use the interrupt channel to do this:
+
+ package main
+
+ import (
+ "errors"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/robertkrimen/otto"
+ )
+
+ var halt = errors.New("Stahp")
+
+ func main() {
+ runUnsafe(`var abc = [];`)
+ runUnsafe(`
+ while (true) {
+ // Loop forever
+ }`)
+ }
+
+ func runUnsafe(unsafe string) {
+ start := time.Now()
+ defer func() {
+ duration := time.Since(start)
+ if caught := recover(); caught != nil {
+ if caught == halt {
+ fmt.Fprintf(os.Stderr, "Some code took to long! Stopping after: %v\n", duration)
+ return
+ }
+ panic(caught) // Something else happened, repanic!
+ }
+ fmt.Fprintf(os.Stderr, "Ran code successfully: %v\n", duration)
+ }()
+
+ vm := otto.New()
+ vm.Interrupt = make(chan func(), 1) // The buffer prevents blocking
+
+ go func() {
+ time.Sleep(2 * time.Second) // Stop after two seconds
+ vm.Interrupt <- func() {
+ panic(halt)
+ }
+ }()
+
+ vm.Run(unsafe) // Here be dragons (risky code)
+ }
+
+Where is setTimeout/setInterval?
+
+These timing functions are not actually part of the ECMA-262 specification. Typically, they belong to the `windows` object (in the browser).
+It would not be difficult to provide something like these via Go, but you probably want to wrap otto in an event loop in that case.
+
+For an example of how this could be done in Go with otto, see natto:
+
+http://github.com/robertkrimen/natto
+
+Here is some more discussion of the issue:
+
+* http://book.mixu.net/node/ch2.html
+
+* http://en.wikipedia.org/wiki/Reentrancy_%28computing%29
+
+* http://aaroncrane.co.uk/2009/02/perl_safe_signals/
+
+*/
+package otto
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/robertkrimen/otto/file"
+ "github.com/robertkrimen/otto/registry"
+)
+
+// Otto is the representation of the JavaScript runtime. Each instance of Otto has a self-contained namespace.
+type Otto struct {
+ // Interrupt is a channel for interrupting the runtime. You can use this to halt a long running execution, for example.
+ // See "Halting Problem" for more information.
+ Interrupt chan func()
+ runtime *_runtime
+}
+
+// New will allocate a new JavaScript runtime
+func New() *Otto {
+ self := &Otto{
+ runtime: newContext(),
+ }
+ self.runtime.otto = self
+ self.runtime.traceLimit = 10
+ self.Set("console", self.runtime.newConsole())
+
+ registry.Apply(func(entry registry.Entry) {
+ self.Run(entry.Source())
+ })
+
+ return self
+}
+
+func (otto *Otto) clone() *Otto {
+ self := &Otto{
+ runtime: otto.runtime.clone(),
+ }
+ self.runtime.otto = self
+ return self
+}
+
+// Run will allocate a new JavaScript runtime, run the given source
+// on the allocated runtime, and return the runtime, resulting value, and
+// error (if any).
+//
+// src may be a string, a byte slice, a bytes.Buffer, or an io.Reader, but it MUST always be in UTF-8.
+//
+// src may also be a Script.
+//
+// src may also be a Program, but if the AST has been modified, then runtime behavior is undefined.
+//
+func Run(src interface{}) (*Otto, Value, error) {
+ otto := New()
+ value, err := otto.Run(src) // This already does safety checking
+ return otto, value, err
+}
+
+// Run will run the given source (parsing it first if necessary), returning the resulting value and error (if any)
+//
+// src may be a string, a byte slice, a bytes.Buffer, or an io.Reader, but it MUST always be in UTF-8.
+//
+// If the runtime is unable to parse source, then this function will return undefined and the parse error (nothing
+// will be evaluated in this case).
+//
+// src may also be a Script.
+//
+// src may also be a Program, but if the AST has been modified, then runtime behavior is undefined.
+//
+func (self Otto) Run(src interface{}) (Value, error) {
+ value, err := self.runtime.cmpl_run(src, nil)
+ if !value.safe() {
+ value = Value{}
+ }
+ return value, err
+}
+
+// Eval will do the same thing as Run, except without leaving the current scope.
+//
+// By staying in the same scope, the code evaluated has access to everything
+// already defined in the current stack frame. This is most useful in, for
+// example, a debugger call.
+func (self Otto) Eval(src interface{}) (Value, error) {
+ if self.runtime.scope == nil {
+ self.runtime.enterGlobalScope()
+ defer self.runtime.leaveScope()
+ }
+
+ value, err := self.runtime.cmpl_eval(src, nil)
+ if !value.safe() {
+ value = Value{}
+ }
+ return value, err
+}
+
+// Get the value of the top-level binding of the given name.
+//
+// If there is an error (like the binding does not exist), then the value
+// will be undefined.
+func (self Otto) Get(name string) (Value, error) {
+ value := Value{}
+ err := catchPanic(func() {
+ value = self.getValue(name)
+ })
+ if !value.safe() {
+ value = Value{}
+ }
+ return value, err
+}
+
+func (self Otto) getValue(name string) Value {
+ return self.runtime.globalStash.getBinding(name, false)
+}
+
+// Set the top-level binding of the given name to the given value.
+//
+// Set will automatically apply ToValue to the given value in order
+// to convert it to a JavaScript value (type Value).
+//
+// If there is an error (like the binding is read-only, or the ToValue conversion
+// fails), then an error is returned.
+//
+// If the top-level binding does not exist, it will be created.
+func (self Otto) Set(name string, value interface{}) error {
+ {
+ value, err := self.ToValue(value)
+ if err != nil {
+ return err
+ }
+ err = catchPanic(func() {
+ self.setValue(name, value)
+ })
+ return err
+ }
+}
+
+func (self Otto) setValue(name string, value Value) {
+ self.runtime.globalStash.setValue(name, value, false)
+}
+
+func (self Otto) SetDebuggerHandler(fn func(vm *Otto)) {
+ self.runtime.debugger = fn
+}
+
+func (self Otto) SetRandomSource(fn func() float64) {
+ self.runtime.random = fn
+}
+
+// SetStackDepthLimit sets an upper limit to the depth of the JavaScript
+// stack. In simpler terms, this limits the number of "nested" function calls
+// you can make in a particular interpreter instance.
+//
+// Note that this doesn't take into account the Go stack depth. If your
+// JavaScript makes a call to a Go function, otto won't keep track of what
+// happens outside the interpreter. So if your Go function is infinitely
+// recursive, you're still in trouble.
+func (self Otto) SetStackDepthLimit(limit int) {
+ self.runtime.stackLimit = limit
+}
+
+// SetStackTraceLimit sets an upper limit to the number of stack frames that
+// otto will use when formatting an error's stack trace. By default, the limit
+// is 10. This is consistent with V8 and SpiderMonkey.
+//
+// TODO: expose via `Error.stackTraceLimit`
+func (self Otto) SetStackTraceLimit(limit int) {
+ self.runtime.traceLimit = limit
+}
+
+// MakeCustomError creates a new Error object with the given name and message,
+// returning it as a Value.
+func (self Otto) MakeCustomError(name, message string) Value {
+ return self.runtime.toValue(self.runtime.newError(name, self.runtime.toValue(message), 0))
+}
+
+// MakeRangeError creates a new RangeError object with the given message,
+// returning it as a Value.
+func (self Otto) MakeRangeError(message string) Value {
+ return self.runtime.toValue(self.runtime.newRangeError(self.runtime.toValue(message)))
+}
+
+// MakeSyntaxError creates a new SyntaxError object with the given message,
+// returning it as a Value.
+func (self Otto) MakeSyntaxError(message string) Value {
+ return self.runtime.toValue(self.runtime.newSyntaxError(self.runtime.toValue(message)))
+}
+
+// MakeTypeError creates a new TypeError object with the given message,
+// returning it as a Value.
+func (self Otto) MakeTypeError(message string) Value {
+ return self.runtime.toValue(self.runtime.newTypeError(self.runtime.toValue(message)))
+}
+
+// Context is a structure that contains information about the current execution
+// context.
+type Context struct {
+ Filename string
+ Line int
+ Column int
+ Callee string
+ Symbols map[string]Value
+ This Value
+ Stacktrace []string
+}
+
+// Context returns the current execution context of the vm, traversing up to
+// ten stack frames, and skipping any innermost native function stack frames.
+func (self Otto) Context() Context {
+ return self.ContextSkip(10, true)
+}
+
+// ContextLimit returns the current execution context of the vm, with a
+// specific limit on the number of stack frames to traverse, skipping any
+// innermost native function stack frames.
+func (self Otto) ContextLimit(limit int) Context {
+ return self.ContextSkip(limit, true)
+}
+
+// ContextSkip returns the current execution context of the vm, with a
+// specific limit on the number of stack frames to traverse, optionally
+// skipping any innermost native function stack frames.
+func (self Otto) ContextSkip(limit int, skipNative bool) (ctx Context) {
+ // Ensure we are operating in a scope
+ if self.runtime.scope == nil {
+ self.runtime.enterGlobalScope()
+ defer self.runtime.leaveScope()
+ }
+
+ scope := self.runtime.scope
+ frame := scope.frame
+
+ for skipNative && frame.native && scope.outer != nil {
+ scope = scope.outer
+ frame = scope.frame
+ }
+
+ // Get location information
+ ctx.Filename = "<unknown>"
+ ctx.Callee = frame.callee
+
+ switch {
+ case frame.native:
+ ctx.Filename = frame.nativeFile
+ ctx.Line = frame.nativeLine
+ ctx.Column = 0
+ case frame.file != nil:
+ ctx.Filename = "<anonymous>"
+
+ if p := frame.file.Position(file.Idx(frame.offset)); p != nil {
+ ctx.Line = p.Line
+ ctx.Column = p.Column
+
+ if p.Filename != "" {
+ ctx.Filename = p.Filename
+ }
+ }
+ }
+
+ // Get the current scope this Value
+ ctx.This = toValue_object(scope.this)
+
+ // Build stacktrace (up to 10 levels deep)
+ ctx.Symbols = make(map[string]Value)
+ ctx.Stacktrace = append(ctx.Stacktrace, frame.location())
+ for limit != 0 {
+ // Get variables
+ stash := scope.lexical
+ for {
+ for _, name := range getStashProperties(stash) {
+ if _, ok := ctx.Symbols[name]; !ok {
+ ctx.Symbols[name] = stash.getBinding(name, true)
+ }
+ }
+ stash = stash.outer()
+ if stash == nil || stash.outer() == nil {
+ break
+ }
+ }
+
+ scope = scope.outer
+ if scope == nil {
+ break
+ }
+ if scope.frame.offset >= 0 {
+ ctx.Stacktrace = append(ctx.Stacktrace, scope.frame.location())
+ }
+ limit--
+ }
+
+ return
+}
+
+// Call the given JavaScript with a given this and arguments.
+//
+// If this is nil, then some special handling takes place to determine the proper
+// this value, falling back to a "standard" invocation if necessary (where this is
+// undefined).
+//
+// If source begins with "new " (A lowercase new followed by a space), then
+// Call will invoke the function constructor rather than performing a function call.
+// In this case, the this argument has no effect.
+//
+// // value is a String object
+// value, _ := vm.Call("Object", nil, "Hello, World.")
+//
+// // Likewise...
+// value, _ := vm.Call("new Object", nil, "Hello, World.")
+//
+// // This will perform a concat on the given array and return the result
+// // value is [ 1, 2, 3, undefined, 4, 5, 6, 7, "abc" ]
+// value, _ := vm.Call(`[ 1, 2, 3, undefined, 4 ].concat`, nil, 5, 6, 7, "abc")
+//
+func (self Otto) Call(source string, this interface{}, argumentList ...interface{}) (Value, error) {
+
+ thisValue := Value{}
+
+ construct := false
+ if strings.HasPrefix(source, "new ") {
+ source = source[4:]
+ construct = true
+ }
+
+ // FIXME enterGlobalScope
+ self.runtime.enterGlobalScope()
+ defer func() {
+ self.runtime.leaveScope()
+ }()
+
+ if !construct && this == nil {
+ program, err := self.runtime.cmpl_parse("", source+"()", nil)
+ if err == nil {
+ if node, ok := program.body[0].(*_nodeExpressionStatement); ok {
+ if node, ok := node.expression.(*_nodeCallExpression); ok {
+ var value Value
+ err := catchPanic(func() {
+ value = self.runtime.cmpl_evaluate_nodeCallExpression(node, argumentList)
+ })
+ if err != nil {
+ return Value{}, err
+ }
+ return value, nil
+ }
+ }
+ }
+ } else {
+ value, err := self.ToValue(this)
+ if err != nil {
+ return Value{}, err
+ }
+ thisValue = value
+ }
+
+ {
+ this := thisValue
+
+ fn, err := self.Run(source)
+ if err != nil {
+ return Value{}, err
+ }
+
+ if construct {
+ result, err := fn.constructSafe(self.runtime, this, argumentList...)
+ if err != nil {
+ return Value{}, err
+ }
+ return result, nil
+ }
+
+ result, err := fn.Call(this, argumentList...)
+ if err != nil {
+ return Value{}, err
+ }
+ return result, nil
+ }
+}
+
+// Object will run the given source and return the result as an object.
+//
+// For example, accessing an existing object:
+//
+// object, _ := vm.Object(`Number`)
+//
+// Or, creating a new object:
+//
+// object, _ := vm.Object(`({ xyzzy: "Nothing happens." })`)
+//
+// Or, creating and assigning an object:
+//
+// object, _ := vm.Object(`xyzzy = {}`)
+// object.Set("volume", 11)
+//
+// If there is an error (like the source does not result in an object), then
+// nil and an error is returned.
+func (self Otto) Object(source string) (*Object, error) {
+ value, err := self.runtime.cmpl_run(source, nil)
+ if err != nil {
+ return nil, err
+ }
+ if value.IsObject() {
+ return value.Object(), nil
+ }
+ return nil, fmt.Errorf("value is not an object")
+}
+
+// ToValue will convert an interface{} value to a value digestible by otto/JavaScript.
+func (self Otto) ToValue(value interface{}) (Value, error) {
+ return self.runtime.safeToValue(value)
+}
+
+// Copy will create a copy/clone of the runtime.
+//
+// Copy is useful for saving some time when creating many similar runtimes.
+//
+// This method works by walking the original runtime and cloning each object, scope, stash,
+// etc. into a new runtime.
+//
+// Be on the lookout for memory leaks or inadvertent sharing of resources.
+func (in *Otto) Copy() *Otto {
+ out := &Otto{
+ runtime: in.runtime.clone(),
+ }
+ out.runtime.otto = out
+ return out
+}
+
+// Object{}
+
+// Object is the representation of a JavaScript object.
+type Object struct {
+ object *_object
+ value Value
+}
+
+func _newObject(object *_object, value Value) *Object {
+ // value MUST contain object!
+ return &Object{
+ object: object,
+ value: value,
+ }
+}
+
+// Call a method on the object.
+//
+// It is essentially equivalent to:
+//
+// var method, _ := object.Get(name)
+// method.Call(object, argumentList...)
+//
+// An undefined value and an error will result if:
+//
+// 1. There is an error during conversion of the argument list
+// 2. The property is not actually a function
+// 3. An (uncaught) exception is thrown
+//
+func (self Object) Call(name string, argumentList ...interface{}) (Value, error) {
+ // TODO: Insert an example using JavaScript below...
+ // e.g., Object("JSON").Call("stringify", ...)
+
+ function, err := self.Get(name)
+ if err != nil {
+ return Value{}, err
+ }
+ return function.Call(self.Value(), argumentList...)
+}
+
+// Value will return self as a value.
+func (self Object) Value() Value {
+ return self.value
+}
+
+// Get the value of the property with the given name.
+func (self Object) Get(name string) (Value, error) {
+ value := Value{}
+ err := catchPanic(func() {
+ value = self.object.get(name)
+ })
+ if !value.safe() {
+ value = Value{}
+ }
+ return value, err
+}
+
+// Set the property of the given name to the given value.
+//
+// An error will result if the setting the property triggers an exception (i.e. read-only),
+// or there is an error during conversion of the given value.
+func (self Object) Set(name string, value interface{}) error {
+ {
+ value, err := self.object.runtime.safeToValue(value)
+ if err != nil {
+ return err
+ }
+ err = catchPanic(func() {
+ self.object.put(name, value, true)
+ })
+ return err
+ }
+}
+
+// Keys gets the keys for the given object.
+//
+// Equivalent to calling Object.keys on the object.
+func (self Object) Keys() []string {
+ var keys []string
+ self.object.enumerate(false, func(name string) bool {
+ keys = append(keys, name)
+ return true
+ })
+ return keys
+}
+
+// KeysByParent gets the keys (and those of the parents) for the given object,
+// in order of "closest" to "furthest".
+func (self Object) KeysByParent() [][]string {
+ var a [][]string
+
+ for o := self.object; o != nil; o = o.prototype {
+ var l []string
+
+ o.enumerate(false, func(name string) bool {
+ l = append(l, name)
+ return true
+ })
+
+ a = append(a, l)
+ }
+
+ return a
+}
+
+// Class will return the class string of the object.
+//
+// The return value will (generally) be one of:
+//
+// Object
+// Function
+// Array
+// String
+// Number
+// Boolean
+// Date
+// RegExp
+//
+func (self Object) Class() string {
+ return self.object.class
+}
diff --git a/vendor/github.com/robertkrimen/otto/otto_.go b/vendor/github.com/robertkrimen/otto/otto_.go
new file mode 100644
index 000000000..304a83150
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/otto_.go
@@ -0,0 +1,178 @@
+package otto
+
+import (
+ "fmt"
+ "regexp"
+ runtime_ "runtime"
+ "strconv"
+ "strings"
+)
+
+var isIdentifier_Regexp *regexp.Regexp = regexp.MustCompile(`^[a-zA-Z\$][a-zA-Z0-9\$]*$`)
+
+func isIdentifier(string_ string) bool {
+ return isIdentifier_Regexp.MatchString(string_)
+}
+
+func (self *_runtime) toValueArray(arguments ...interface{}) []Value {
+ length := len(arguments)
+ if length == 1 {
+ if valueArray, ok := arguments[0].([]Value); ok {
+ return valueArray
+ }
+ return []Value{self.toValue(arguments[0])}
+ }
+
+ valueArray := make([]Value, length)
+ for index, value := range arguments {
+ valueArray[index] = self.toValue(value)
+ }
+
+ return valueArray
+}
+
+func stringToArrayIndex(name string) int64 {
+ index, err := strconv.ParseInt(name, 10, 64)
+ if err != nil {
+ return -1
+ }
+ if index < 0 {
+ return -1
+ }
+ if index >= maxUint32 {
+ // The value 2^32 (or above) is not a valid index because
+ // you cannot store a uint32 length for an index of uint32
+ return -1
+ }
+ return index
+}
+
+func isUint32(value int64) bool {
+ return value >= 0 && value <= maxUint32
+}
+
+func arrayIndexToString(index int64) string {
+ return strconv.FormatInt(index, 10)
+}
+
+func valueOfArrayIndex(array []Value, index int) Value {
+ value, _ := getValueOfArrayIndex(array, index)
+ return value
+}
+
+func getValueOfArrayIndex(array []Value, index int) (Value, bool) {
+ if index >= 0 && index < len(array) {
+ value := array[index]
+ if !value.isEmpty() {
+ return value, true
+ }
+ }
+ return Value{}, false
+}
+
+// A range index can be anything from 0 up to length. It is NOT safe to use as an index
+// to an array, but is useful for slicing and in some ECMA algorithms.
+func valueToRangeIndex(indexValue Value, length int64, negativeIsZero bool) int64 {
+ index := indexValue.number().int64
+ if negativeIsZero {
+ if index < 0 {
+ index = 0
+ }
+ // minimum(index, length)
+ if index >= length {
+ index = length
+ }
+ return index
+ }
+
+ if index < 0 {
+ index += length
+ if index < 0 {
+ index = 0
+ }
+ } else {
+ if index > length {
+ index = length
+ }
+ }
+ return index
+}
+
+func rangeStartEnd(array []Value, size int64, negativeIsZero bool) (start, end int64) {
+ start = valueToRangeIndex(valueOfArrayIndex(array, 0), size, negativeIsZero)
+ if len(array) == 1 {
+ // If there is only the start argument, then end = size
+ end = size
+ return
+ }
+
+ // Assuming the argument is undefined...
+ end = size
+ endValue := valueOfArrayIndex(array, 1)
+ if !endValue.IsUndefined() {
+ // Which it is not, so get the value as an array index
+ end = valueToRangeIndex(endValue, size, negativeIsZero)
+ }
+ return
+}
+
+func rangeStartLength(source []Value, size int64) (start, length int64) {
+ start = valueToRangeIndex(valueOfArrayIndex(source, 0), size, false)
+
+ // Assume the second argument is missing or undefined
+ length = int64(size)
+ if len(source) == 1 {
+ // If there is only the start argument, then length = size
+ return
+ }
+
+ lengthValue := valueOfArrayIndex(source, 1)
+ if !lengthValue.IsUndefined() {
+ // Which it is not, so get the value as an array index
+ length = lengthValue.number().int64
+ }
+ return
+}
+
+func boolFields(input string) (result map[string]bool) {
+ result = map[string]bool{}
+ for _, word := range strings.Fields(input) {
+ result[word] = true
+ }
+ return result
+}
+
+func hereBeDragons(arguments ...interface{}) string {
+ pc, _, _, _ := runtime_.Caller(1)
+ name := runtime_.FuncForPC(pc).Name()
+ message := fmt.Sprintf("Here be dragons -- %s", name)
+ if len(arguments) > 0 {
+ message += ": "
+ argument0 := fmt.Sprintf("%s", arguments[0])
+ if len(arguments) == 1 {
+ message += argument0
+ } else {
+ message += fmt.Sprintf(argument0, arguments[1:]...)
+ }
+ } else {
+ message += "."
+ }
+ return message
+}
+
+func throwHereBeDragons(arguments ...interface{}) {
+ panic(hereBeDragons(arguments...))
+}
+
+func eachPair(list []interface{}, fn func(_0, _1 interface{})) {
+ for len(list) > 0 {
+ var _0, _1 interface{}
+ _0 = list[0]
+ list = list[1:] // Pop off first
+ if len(list) > 0 {
+ _1 = list[0]
+ list = list[1:] // Pop off second
+ }
+ fn(_0, _1)
+ }
+}
diff --git a/vendor/github.com/robertkrimen/otto/parser/Makefile b/vendor/github.com/robertkrimen/otto/parser/Makefile
new file mode 100644
index 000000000..766fd4d0b
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/parser/Makefile
@@ -0,0 +1,4 @@
+.PHONY: test
+
+test:
+ go test
diff --git a/vendor/github.com/robertkrimen/otto/parser/README.markdown b/vendor/github.com/robertkrimen/otto/parser/README.markdown
new file mode 100644
index 000000000..c3cae5b60
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/parser/README.markdown
@@ -0,0 +1,190 @@
+# parser
+--
+ import "github.com/robertkrimen/otto/parser"
+
+Package parser implements a parser for JavaScript.
+
+ import (
+ "github.com/robertkrimen/otto/parser"
+ )
+
+Parse and return an AST
+
+ filename := "" // A filename is optional
+ src := `
+ // Sample xyzzy example
+ (function(){
+ if (3.14159 > 0) {
+ console.log("Hello, World.");
+ return;
+ }
+
+ var xyzzy = NaN;
+ console.log("Nothing happens.");
+ return xyzzy;
+ })();
+ `
+
+ // Parse some JavaScript, yielding a *ast.Program and/or an ErrorList
+ program, err := parser.ParseFile(nil, filename, src, 0)
+
+
+### Warning
+
+The parser and AST interfaces are still works-in-progress (particularly where
+node types are concerned) and may change in the future.
+
+## Usage
+
+#### func ParseFile
+
+```go
+func ParseFile(fileSet *file.FileSet, filename string, src interface{}, mode Mode) (*ast.Program, error)
+```
+ParseFile parses the source code of a single JavaScript/ECMAScript source file
+and returns the corresponding ast.Program node.
+
+If fileSet == nil, ParseFile parses source without a FileSet. If fileSet != nil,
+ParseFile first adds filename and src to fileSet.
+
+The filename argument is optional and is used for labelling errors, etc.
+
+src may be a string, a byte slice, a bytes.Buffer, or an io.Reader, but it MUST
+always be in UTF-8.
+
+ // Parse some JavaScript, yielding a *ast.Program and/or an ErrorList
+ program, err := parser.ParseFile(nil, "", `if (abc > 1) {}`, 0)
+
+#### func ParseFunction
+
+```go
+func ParseFunction(parameterList, body string) (*ast.FunctionLiteral, error)
+```
+ParseFunction parses a given parameter list and body as a function and returns
+the corresponding ast.FunctionLiteral node.
+
+The parameter list, if any, should be a comma-separated list of identifiers.
+
+#### func ReadSource
+
+```go
+func ReadSource(filename string, src interface{}) ([]byte, error)
+```
+
+#### func TransformRegExp
+
+```go
+func TransformRegExp(pattern string) (string, error)
+```
+TransformRegExp transforms a JavaScript pattern into a Go "regexp" pattern.
+
+re2 (Go) cannot do backtracking, so the presence of a lookahead (?=) (?!) or
+backreference (\1, \2, ...) will cause an error.
+
+re2 (Go) has a different definition for \s: [\t\n\f\r ]. The JavaScript
+definition, on the other hand, also includes \v, Unicode "Separator, Space",
+etc.
+
+If the pattern is invalid (not valid even in JavaScript), then this function
+returns the empty string and an error.
+
+If the pattern is valid, but incompatible (contains a lookahead or
+backreference), then this function returns the transformation (a non-empty
+string) AND an error.
+
+#### type Error
+
+```go
+type Error struct {
+ Position file.Position
+ Message string
+}
+```
+
+An Error represents a parsing error. It includes the position where the error
+occurred and a message/description.
+
+#### func (Error) Error
+
+```go
+func (self Error) Error() string
+```
+
+#### type ErrorList
+
+```go
+type ErrorList []*Error
+```
+
+ErrorList is a list of *Errors.
+
+#### func (*ErrorList) Add
+
+```go
+func (self *ErrorList) Add(position file.Position, msg string)
+```
+Add adds an Error with given position and message to an ErrorList.
+
+#### func (ErrorList) Err
+
+```go
+func (self ErrorList) Err() error
+```
+Err returns an error equivalent to this ErrorList. If the list is empty, Err
+returns nil.
+
+#### func (ErrorList) Error
+
+```go
+func (self ErrorList) Error() string
+```
+Error implements the Error interface.
+
+#### func (ErrorList) Len
+
+```go
+func (self ErrorList) Len() int
+```
+
+#### func (ErrorList) Less
+
+```go
+func (self ErrorList) Less(i, j int) bool
+```
+
+#### func (*ErrorList) Reset
+
+```go
+func (self *ErrorList) Reset()
+```
+Reset resets an ErrorList to no errors.
+
+#### func (ErrorList) Sort
+
+```go
+func (self ErrorList) Sort()
+```
+
+#### func (ErrorList) Swap
+
+```go
+func (self ErrorList) Swap(i, j int)
+```
+
+#### type Mode
+
+```go
+type Mode uint
+```
+
+A Mode value is a set of flags (or 0). They control optional parser
+functionality.
+
+```go
+const (
+ IgnoreRegExpErrors Mode = 1 << iota // Ignore RegExp compatibility errors (allow backtracking)
+)
+```
+
+--
+**godocdown** http://github.com/robertkrimen/godocdown
diff --git a/vendor/github.com/robertkrimen/otto/parser/dbg.go b/vendor/github.com/robertkrimen/otto/parser/dbg.go
new file mode 100644
index 000000000..3c5f2f698
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/parser/dbg.go
@@ -0,0 +1,9 @@
+// This file was AUTOMATICALLY GENERATED by dbg-import (smuggol) for github.com/robertkrimen/dbg
+
+package parser
+
+import (
+ Dbg "github.com/robertkrimen/otto/dbg"
+)
+
+var dbg, dbgf = Dbg.New()
diff --git a/vendor/github.com/robertkrimen/otto/parser/error.go b/vendor/github.com/robertkrimen/otto/parser/error.go
new file mode 100644
index 000000000..e0f74a5cf
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/parser/error.go
@@ -0,0 +1,175 @@
+package parser
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/robertkrimen/otto/file"
+ "github.com/robertkrimen/otto/token"
+)
+
+const (
+ err_UnexpectedToken = "Unexpected token %v"
+ err_UnexpectedEndOfInput = "Unexpected end of input"
+ err_UnexpectedEscape = "Unexpected escape"
+)
+
+// UnexpectedNumber: 'Unexpected number',
+// UnexpectedString: 'Unexpected string',
+// UnexpectedIdentifier: 'Unexpected identifier',
+// UnexpectedReserved: 'Unexpected reserved word',
+// NewlineAfterThrow: 'Illegal newline after throw',
+// InvalidRegExp: 'Invalid regular expression',
+// UnterminatedRegExp: 'Invalid regular expression: missing /',
+// InvalidLHSInAssignment: 'Invalid left-hand side in assignment',
+// InvalidLHSInForIn: 'Invalid left-hand side in for-in',
+// MultipleDefaultsInSwitch: 'More than one default clause in switch statement',
+// NoCatchOrFinally: 'Missing catch or finally after try',
+// UnknownLabel: 'Undefined label \'%0\'',
+// Redeclaration: '%0 \'%1\' has already been declared',
+// IllegalContinue: 'Illegal continue statement',
+// IllegalBreak: 'Illegal break statement',
+// IllegalReturn: 'Illegal return statement',
+// StrictModeWith: 'Strict mode code may not include a with statement',
+// StrictCatchVariable: 'Catch variable may not be eval or arguments in strict mode',
+// StrictVarName: 'Variable name may not be eval or arguments in strict mode',
+// StrictParamName: 'Parameter name eval or arguments is not allowed in strict mode',
+// StrictParamDupe: 'Strict mode function may not have duplicate parameter names',
+// StrictFunctionName: 'Function name may not be eval or arguments in strict mode',
+// StrictOctalLiteral: 'Octal literals are not allowed in strict mode.',
+// StrictDelete: 'Delete of an unqualified identifier in strict mode.',
+// StrictDuplicateProperty: 'Duplicate data property in object literal not allowed in strict mode',
+// AccessorDataProperty: 'Object literal may not have data and accessor property with the same name',
+// AccessorGetSet: 'Object literal may not have multiple get/set accessors with the same name',
+// StrictLHSAssignment: 'Assignment to eval or arguments is not allowed in strict mode',
+// StrictLHSPostfix: 'Postfix increment/decrement may not have eval or arguments operand in strict mode',
+// StrictLHSPrefix: 'Prefix increment/decrement may not have eval or arguments operand in strict mode',
+// StrictReservedWord: 'Use of future reserved word in strict mode'
+
+// A SyntaxError is a description of an ECMAScript syntax error.
+
+// An Error represents a parsing error. It includes the position where the error occurred and a message/description.
+type Error struct {
+ Position file.Position
+ Message string
+}
+
+// FIXME Should this be "SyntaxError"?
+
+func (self Error) Error() string {
+ filename := self.Position.Filename
+ if filename == "" {
+ filename = "(anonymous)"
+ }
+ return fmt.Sprintf("%s: Line %d:%d %s",
+ filename,
+ self.Position.Line,
+ self.Position.Column,
+ self.Message,
+ )
+}
+
+func (self *_parser) error(place interface{}, msg string, msgValues ...interface{}) *Error {
+ idx := file.Idx(0)
+ switch place := place.(type) {
+ case int:
+ idx = self.idxOf(place)
+ case file.Idx:
+ if place == 0 {
+ idx = self.idxOf(self.chrOffset)
+ } else {
+ idx = place
+ }
+ default:
+ panic(fmt.Errorf("error(%T, ...)", place))
+ }
+
+ position := self.position(idx)
+ msg = fmt.Sprintf(msg, msgValues...)
+ self.errors.Add(position, msg)
+ return self.errors[len(self.errors)-1]
+}
+
+func (self *_parser) errorUnexpected(idx file.Idx, chr rune) error {
+ if chr == -1 {
+ return self.error(idx, err_UnexpectedEndOfInput)
+ }
+ return self.error(idx, err_UnexpectedToken, token.ILLEGAL)
+}
+
+func (self *_parser) errorUnexpectedToken(tkn token.Token) error {
+ switch tkn {
+ case token.EOF:
+ return self.error(file.Idx(0), err_UnexpectedEndOfInput)
+ }
+ value := tkn.String()
+ switch tkn {
+ case token.BOOLEAN, token.NULL:
+ value = self.literal
+ case token.IDENTIFIER:
+ return self.error(self.idx, "Unexpected identifier")
+ case token.KEYWORD:
+ // TODO Might be a future reserved word
+ return self.error(self.idx, "Unexpected reserved word")
+ case token.NUMBER:
+ return self.error(self.idx, "Unexpected number")
+ case token.STRING:
+ return self.error(self.idx, "Unexpected string")
+ }
+ return self.error(self.idx, err_UnexpectedToken, value)
+}
+
+// ErrorList is a list of *Errors.
+//
+type ErrorList []*Error
+
+// Add adds an Error with given position and message to an ErrorList.
+func (self *ErrorList) Add(position file.Position, msg string) {
+ *self = append(*self, &Error{position, msg})
+}
+
+// Reset resets an ErrorList to no errors.
+func (self *ErrorList) Reset() { *self = (*self)[0:0] }
+
+func (self ErrorList) Len() int { return len(self) }
+func (self ErrorList) Swap(i, j int) { self[i], self[j] = self[j], self[i] }
+func (self ErrorList) Less(i, j int) bool {
+ x := &self[i].Position
+ y := &self[j].Position
+ if x.Filename < y.Filename {
+ return true
+ }
+ if x.Filename == y.Filename {
+ if x.Line < y.Line {
+ return true
+ }
+ if x.Line == y.Line {
+ return x.Column < y.Column
+ }
+ }
+ return false
+}
+
+func (self ErrorList) Sort() {
+ sort.Sort(self)
+}
+
+// Error implements the Error interface.
+func (self ErrorList) Error() string {
+ switch len(self) {
+ case 0:
+ return "no errors"
+ case 1:
+ return self[0].Error()
+ }
+ return fmt.Sprintf("%s (and %d more errors)", self[0].Error(), len(self)-1)
+}
+
+// Err returns an error equivalent to this ErrorList.
+// If the list is empty, Err returns nil.
+func (self ErrorList) Err() error {
+ if len(self) == 0 {
+ return nil
+ }
+ return self
+}
diff --git a/vendor/github.com/robertkrimen/otto/parser/expression.go b/vendor/github.com/robertkrimen/otto/parser/expression.go
new file mode 100644
index 000000000..63a169d40
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/parser/expression.go
@@ -0,0 +1,1005 @@
+package parser
+
+import (
+ "regexp"
+
+ "github.com/robertkrimen/otto/ast"
+ "github.com/robertkrimen/otto/file"
+ "github.com/robertkrimen/otto/token"
+)
+
+func (self *_parser) parseIdentifier() *ast.Identifier {
+ literal := self.literal
+ idx := self.idx
+ if self.mode&StoreComments != 0 {
+ self.comments.MarkComments(ast.LEADING)
+ }
+ self.next()
+ exp := &ast.Identifier{
+ Name: literal,
+ Idx: idx,
+ }
+
+ if self.mode&StoreComments != 0 {
+ self.comments.SetExpression(exp)
+ }
+
+ return exp
+}
+
+func (self *_parser) parsePrimaryExpression() ast.Expression {
+ literal := self.literal
+ idx := self.idx
+ switch self.token {
+ case token.IDENTIFIER:
+ self.next()
+ if len(literal) > 1 {
+ tkn, strict := token.IsKeyword(literal)
+ if tkn == token.KEYWORD {
+ if !strict {
+ self.error(idx, "Unexpected reserved word")
+ }
+ }
+ }
+ return &ast.Identifier{
+ Name: literal,
+ Idx: idx,
+ }
+ case token.NULL:
+ self.next()
+ return &ast.NullLiteral{
+ Idx: idx,
+ Literal: literal,
+ }
+ case token.BOOLEAN:
+ self.next()
+ value := false
+ switch literal {
+ case "true":
+ value = true
+ case "false":
+ value = false
+ default:
+ self.error(idx, "Illegal boolean literal")
+ }
+ return &ast.BooleanLiteral{
+ Idx: idx,
+ Literal: literal,
+ Value: value,
+ }
+ case token.STRING:
+ self.next()
+ value, err := parseStringLiteral(literal[1 : len(literal)-1])
+ if err != nil {
+ self.error(idx, err.Error())
+ }
+ return &ast.StringLiteral{
+ Idx: idx,
+ Literal: literal,
+ Value: value,
+ }
+ case token.NUMBER:
+ self.next()
+ value, err := parseNumberLiteral(literal)
+ if err != nil {
+ self.error(idx, err.Error())
+ value = 0
+ }
+ return &ast.NumberLiteral{
+ Idx: idx,
+ Literal: literal,
+ Value: value,
+ }
+ case token.SLASH, token.QUOTIENT_ASSIGN:
+ return self.parseRegExpLiteral()
+ case token.LEFT_BRACE:
+ return self.parseObjectLiteral()
+ case token.LEFT_BRACKET:
+ return self.parseArrayLiteral()
+ case token.LEFT_PARENTHESIS:
+ self.expect(token.LEFT_PARENTHESIS)
+ expression := self.parseExpression()
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.expect(token.RIGHT_PARENTHESIS)
+ return expression
+ case token.THIS:
+ self.next()
+ return &ast.ThisExpression{
+ Idx: idx,
+ }
+ case token.FUNCTION:
+ return self.parseFunction(false)
+ }
+
+ self.errorUnexpectedToken(self.token)
+ self.nextStatement()
+ return &ast.BadExpression{From: idx, To: self.idx}
+}
+
+func (self *_parser) parseRegExpLiteral() *ast.RegExpLiteral {
+
+ offset := self.chrOffset - 1 // Opening slash already gotten
+ if self.token == token.QUOTIENT_ASSIGN {
+ offset -= 1 // =
+ }
+ idx := self.idxOf(offset)
+
+ pattern, err := self.scanString(offset)
+ endOffset := self.chrOffset
+
+ self.next()
+ if err == nil {
+ pattern = pattern[1 : len(pattern)-1]
+ }
+
+ flags := ""
+ if self.token == token.IDENTIFIER { // gim
+
+ flags = self.literal
+ self.next()
+ endOffset = self.chrOffset - 1
+ }
+
+ var value string
+ // TODO 15.10
+ {
+ // Test during parsing that this is a valid regular expression
+ // Sorry, (?=) and (?!) are invalid (for now)
+ pattern, err := TransformRegExp(pattern)
+ if err != nil {
+ if pattern == "" || self.mode&IgnoreRegExpErrors == 0 {
+ self.error(idx, "Invalid regular expression: %s", err.Error())
+ }
+ } else {
+ _, err = regexp.Compile(pattern)
+ if err != nil {
+ // We should not get here, ParseRegExp should catch any errors
+ self.error(idx, "Invalid regular expression: %s", err.Error()[22:]) // Skip redundant "parse regexp error"
+ } else {
+ value = pattern
+ }
+ }
+ }
+
+ literal := self.str[offset:endOffset]
+
+ return &ast.RegExpLiteral{
+ Idx: idx,
+ Literal: literal,
+ Pattern: pattern,
+ Flags: flags,
+ Value: value,
+ }
+}
+
+func (self *_parser) parseVariableDeclaration(declarationList *[]*ast.VariableExpression) ast.Expression {
+
+ if self.token != token.IDENTIFIER {
+ idx := self.expect(token.IDENTIFIER)
+ self.nextStatement()
+ return &ast.BadExpression{From: idx, To: self.idx}
+ }
+
+ literal := self.literal
+ idx := self.idx
+ self.next()
+ node := &ast.VariableExpression{
+ Name: literal,
+ Idx: idx,
+ }
+ if self.mode&StoreComments != 0 {
+ self.comments.SetExpression(node)
+ }
+
+ if declarationList != nil {
+ *declarationList = append(*declarationList, node)
+ }
+
+ if self.token == token.ASSIGN {
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next()
+ node.Initializer = self.parseAssignmentExpression()
+ }
+
+ return node
+}
+
+func (self *_parser) parseVariableDeclarationList(var_ file.Idx) []ast.Expression {
+
+ var declarationList []*ast.VariableExpression // Avoid bad expressions
+ var list []ast.Expression
+
+ for {
+ if self.mode&StoreComments != 0 {
+ self.comments.MarkComments(ast.LEADING)
+ }
+ decl := self.parseVariableDeclaration(&declarationList)
+ list = append(list, decl)
+ if self.token != token.COMMA {
+ break
+ }
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next()
+ }
+
+ self.scope.declare(&ast.VariableDeclaration{
+ Var: var_,
+ List: declarationList,
+ })
+
+ return list
+}
+
+func (self *_parser) parseObjectPropertyKey() (string, string) {
+ idx, tkn, literal := self.idx, self.token, self.literal
+ value := ""
+ if self.mode&StoreComments != 0 {
+ self.comments.MarkComments(ast.KEY)
+ }
+ self.next()
+
+ switch tkn {
+ case token.IDENTIFIER:
+ value = literal
+ case token.NUMBER:
+ var err error
+ _, err = parseNumberLiteral(literal)
+ if err != nil {
+ self.error(idx, err.Error())
+ } else {
+ value = literal
+ }
+ case token.STRING:
+ var err error
+ value, err = parseStringLiteral(literal[1 : len(literal)-1])
+ if err != nil {
+ self.error(idx, err.Error())
+ }
+ default:
+ // null, false, class, etc.
+ if matchIdentifier.MatchString(literal) {
+ value = literal
+ }
+ }
+ return literal, value
+}
+
+func (self *_parser) parseObjectProperty() ast.Property {
+ literal, value := self.parseObjectPropertyKey()
+ if literal == "get" && self.token != token.COLON {
+ idx := self.idx
+ _, value := self.parseObjectPropertyKey()
+ parameterList := self.parseFunctionParameterList()
+
+ node := &ast.FunctionLiteral{
+ Function: idx,
+ ParameterList: parameterList,
+ }
+ self.parseFunctionBlock(node)
+ return ast.Property{
+ Key: value,
+ Kind: "get",
+ Value: node,
+ }
+ } else if literal == "set" && self.token != token.COLON {
+ idx := self.idx
+ _, value := self.parseObjectPropertyKey()
+ parameterList := self.parseFunctionParameterList()
+
+ node := &ast.FunctionLiteral{
+ Function: idx,
+ ParameterList: parameterList,
+ }
+ self.parseFunctionBlock(node)
+ return ast.Property{
+ Key: value,
+ Kind: "set",
+ Value: node,
+ }
+ }
+
+ if self.mode&StoreComments != 0 {
+ self.comments.MarkComments(ast.COLON)
+ }
+ self.expect(token.COLON)
+
+ exp := ast.Property{
+ Key: value,
+ Kind: "value",
+ Value: self.parseAssignmentExpression(),
+ }
+
+ if self.mode&StoreComments != 0 {
+ self.comments.SetExpression(exp.Value)
+ }
+ return exp
+}
+
+func (self *_parser) parseObjectLiteral() ast.Expression {
+ var value []ast.Property
+ idx0 := self.expect(token.LEFT_BRACE)
+ for self.token != token.RIGHT_BRACE && self.token != token.EOF {
+ value = append(value, self.parseObjectProperty())
+ if self.token == token.COMMA {
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next()
+ continue
+ }
+ }
+ if self.mode&StoreComments != 0 {
+ self.comments.MarkComments(ast.FINAL)
+ }
+ idx1 := self.expect(token.RIGHT_BRACE)
+
+ return &ast.ObjectLiteral{
+ LeftBrace: idx0,
+ RightBrace: idx1,
+ Value: value,
+ }
+}
+
+func (self *_parser) parseArrayLiteral() ast.Expression {
+ idx0 := self.expect(token.LEFT_BRACKET)
+ var value []ast.Expression
+ for self.token != token.RIGHT_BRACKET && self.token != token.EOF {
+ if self.token == token.COMMA {
+ // This kind of comment requires a special empty expression node.
+ empty := &ast.EmptyExpression{self.idx, self.idx}
+
+ if self.mode&StoreComments != 0 {
+ self.comments.SetExpression(empty)
+ self.comments.Unset()
+ }
+ value = append(value, empty)
+ self.next()
+ continue
+ }
+
+ exp := self.parseAssignmentExpression()
+
+ value = append(value, exp)
+ if self.token != token.RIGHT_BRACKET {
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.expect(token.COMMA)
+ }
+ }
+ if self.mode&StoreComments != 0 {
+ self.comments.MarkComments(ast.FINAL)
+ }
+ idx1 := self.expect(token.RIGHT_BRACKET)
+
+ return &ast.ArrayLiteral{
+ LeftBracket: idx0,
+ RightBracket: idx1,
+ Value: value,
+ }
+}
+
+func (self *_parser) parseArgumentList() (argumentList []ast.Expression, idx0, idx1 file.Idx) {
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ idx0 = self.expect(token.LEFT_PARENTHESIS)
+ if self.token != token.RIGHT_PARENTHESIS {
+ for {
+ exp := self.parseAssignmentExpression()
+ if self.mode&StoreComments != 0 {
+ self.comments.SetExpression(exp)
+ }
+ argumentList = append(argumentList, exp)
+ if self.token != token.COMMA {
+ break
+ }
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next()
+ }
+ }
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ idx1 = self.expect(token.RIGHT_PARENTHESIS)
+ return
+}
+
+func (self *_parser) parseCallExpression(left ast.Expression) ast.Expression {
+ argumentList, idx0, idx1 := self.parseArgumentList()
+ exp := &ast.CallExpression{
+ Callee: left,
+ LeftParenthesis: idx0,
+ ArgumentList: argumentList,
+ RightParenthesis: idx1,
+ }
+
+ if self.mode&StoreComments != 0 {
+ self.comments.SetExpression(exp)
+ }
+ return exp
+}
+
+func (self *_parser) parseDotMember(left ast.Expression) ast.Expression {
+ period := self.expect(token.PERIOD)
+
+ literal := self.literal
+ idx := self.idx
+
+ if !matchIdentifier.MatchString(literal) {
+ self.expect(token.IDENTIFIER)
+ self.nextStatement()
+ return &ast.BadExpression{From: period, To: self.idx}
+ }
+
+ self.next()
+
+ return &ast.DotExpression{
+ Left: left,
+ Identifier: &ast.Identifier{
+ Idx: idx,
+ Name: literal,
+ },
+ }
+}
+
+func (self *_parser) parseBracketMember(left ast.Expression) ast.Expression {
+ idx0 := self.expect(token.LEFT_BRACKET)
+ member := self.parseExpression()
+ idx1 := self.expect(token.RIGHT_BRACKET)
+ return &ast.BracketExpression{
+ LeftBracket: idx0,
+ Left: left,
+ Member: member,
+ RightBracket: idx1,
+ }
+}
+
+func (self *_parser) parseNewExpression() ast.Expression {
+ idx := self.expect(token.NEW)
+ callee := self.parseLeftHandSideExpression()
+ node := &ast.NewExpression{
+ New: idx,
+ Callee: callee,
+ }
+ if self.token == token.LEFT_PARENTHESIS {
+ argumentList, idx0, idx1 := self.parseArgumentList()
+ node.ArgumentList = argumentList
+ node.LeftParenthesis = idx0
+ node.RightParenthesis = idx1
+ }
+
+ if self.mode&StoreComments != 0 {
+ self.comments.SetExpression(node)
+ }
+
+ return node
+}
+
+func (self *_parser) parseLeftHandSideExpression() ast.Expression {
+
+ var left ast.Expression
+ if self.token == token.NEW {
+ left = self.parseNewExpression()
+ } else {
+ if self.mode&StoreComments != 0 {
+ self.comments.MarkComments(ast.LEADING)
+ self.comments.MarkPrimary()
+ }
+ left = self.parsePrimaryExpression()
+ }
+
+ if self.mode&StoreComments != 0 {
+ self.comments.SetExpression(left)
+ }
+
+ for {
+ if self.token == token.PERIOD {
+ left = self.parseDotMember(left)
+ } else if self.token == token.LEFT_BRACKET {
+ left = self.parseBracketMember(left)
+ } else {
+ break
+ }
+ }
+
+ return left
+}
+
+func (self *_parser) parseLeftHandSideExpressionAllowCall() ast.Expression {
+
+ allowIn := self.scope.allowIn
+ self.scope.allowIn = true
+ defer func() {
+ self.scope.allowIn = allowIn
+ }()
+
+ var left ast.Expression
+ if self.token == token.NEW {
+ var newComments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ newComments = self.comments.FetchAll()
+ self.comments.MarkComments(ast.LEADING)
+ self.comments.MarkPrimary()
+ }
+ left = self.parseNewExpression()
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(left, newComments, ast.LEADING)
+ }
+ } else {
+ if self.mode&StoreComments != 0 {
+ self.comments.MarkComments(ast.LEADING)
+ self.comments.MarkPrimary()
+ }
+ left = self.parsePrimaryExpression()
+ }
+
+ if self.mode&StoreComments != 0 {
+ self.comments.SetExpression(left)
+ }
+
+ for {
+ if self.token == token.PERIOD {
+ left = self.parseDotMember(left)
+ } else if self.token == token.LEFT_BRACKET {
+ left = self.parseBracketMember(left)
+ } else if self.token == token.LEFT_PARENTHESIS {
+ left = self.parseCallExpression(left)
+ } else {
+ break
+ }
+ }
+
+ return left
+}
+
+func (self *_parser) parsePostfixExpression() ast.Expression {
+ operand := self.parseLeftHandSideExpressionAllowCall()
+
+ switch self.token {
+ case token.INCREMENT, token.DECREMENT:
+ // Make sure there is no line terminator here
+ if self.implicitSemicolon {
+ break
+ }
+ tkn := self.token
+ idx := self.idx
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next()
+ switch operand.(type) {
+ case *ast.Identifier, *ast.DotExpression, *ast.BracketExpression:
+ default:
+ self.error(idx, "Invalid left-hand side in assignment")
+ self.nextStatement()
+ return &ast.BadExpression{From: idx, To: self.idx}
+ }
+ exp := &ast.UnaryExpression{
+ Operator: tkn,
+ Idx: idx,
+ Operand: operand,
+ Postfix: true,
+ }
+
+ if self.mode&StoreComments != 0 {
+ self.comments.SetExpression(exp)
+ }
+
+ return exp
+ }
+
+ return operand
+}
+
+func (self *_parser) parseUnaryExpression() ast.Expression {
+
+ switch self.token {
+ case token.PLUS, token.MINUS, token.NOT, token.BITWISE_NOT:
+ fallthrough
+ case token.DELETE, token.VOID, token.TYPEOF:
+ tkn := self.token
+ idx := self.idx
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next()
+
+ return &ast.UnaryExpression{
+ Operator: tkn,
+ Idx: idx,
+ Operand: self.parseUnaryExpression(),
+ }
+ case token.INCREMENT, token.DECREMENT:
+ tkn := self.token
+ idx := self.idx
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next()
+ operand := self.parseUnaryExpression()
+ switch operand.(type) {
+ case *ast.Identifier, *ast.DotExpression, *ast.BracketExpression:
+ default:
+ self.error(idx, "Invalid left-hand side in assignment")
+ self.nextStatement()
+ return &ast.BadExpression{From: idx, To: self.idx}
+ }
+ return &ast.UnaryExpression{
+ Operator: tkn,
+ Idx: idx,
+ Operand: operand,
+ }
+ }
+
+ return self.parsePostfixExpression()
+}
+
+func (self *_parser) parseMultiplicativeExpression() ast.Expression {
+ next := self.parseUnaryExpression
+ left := next()
+
+ for self.token == token.MULTIPLY || self.token == token.SLASH ||
+ self.token == token.REMAINDER {
+ tkn := self.token
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next()
+
+ left = &ast.BinaryExpression{
+ Operator: tkn,
+ Left: left,
+ Right: next(),
+ }
+ }
+
+ return left
+}
+
+func (self *_parser) parseAdditiveExpression() ast.Expression {
+ next := self.parseMultiplicativeExpression
+ left := next()
+
+ for self.token == token.PLUS || self.token == token.MINUS {
+ tkn := self.token
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next()
+
+ left = &ast.BinaryExpression{
+ Operator: tkn,
+ Left: left,
+ Right: next(),
+ }
+ }
+
+ return left
+}
+
+func (self *_parser) parseShiftExpression() ast.Expression {
+ next := self.parseAdditiveExpression
+ left := next()
+
+ for self.token == token.SHIFT_LEFT || self.token == token.SHIFT_RIGHT ||
+ self.token == token.UNSIGNED_SHIFT_RIGHT {
+ tkn := self.token
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next()
+
+ left = &ast.BinaryExpression{
+ Operator: tkn,
+ Left: left,
+ Right: next(),
+ }
+ }
+
+ return left
+}
+
+func (self *_parser) parseRelationalExpression() ast.Expression {
+ next := self.parseShiftExpression
+ left := next()
+
+ allowIn := self.scope.allowIn
+ self.scope.allowIn = true
+ defer func() {
+ self.scope.allowIn = allowIn
+ }()
+
+ switch self.token {
+ case token.LESS, token.LESS_OR_EQUAL, token.GREATER, token.GREATER_OR_EQUAL:
+ tkn := self.token
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next()
+
+ exp := &ast.BinaryExpression{
+ Operator: tkn,
+ Left: left,
+ Right: self.parseRelationalExpression(),
+ Comparison: true,
+ }
+ return exp
+ case token.INSTANCEOF:
+ tkn := self.token
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next()
+
+ exp := &ast.BinaryExpression{
+ Operator: tkn,
+ Left: left,
+ Right: self.parseRelationalExpression(),
+ }
+ return exp
+ case token.IN:
+ if !allowIn {
+ return left
+ }
+ tkn := self.token
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next()
+
+ exp := &ast.BinaryExpression{
+ Operator: tkn,
+ Left: left,
+ Right: self.parseRelationalExpression(),
+ }
+ return exp
+ }
+
+ return left
+}
+
+func (self *_parser) parseEqualityExpression() ast.Expression {
+ next := self.parseRelationalExpression
+ left := next()
+
+ for self.token == token.EQUAL || self.token == token.NOT_EQUAL ||
+ self.token == token.STRICT_EQUAL || self.token == token.STRICT_NOT_EQUAL {
+ tkn := self.token
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next()
+
+ left = &ast.BinaryExpression{
+ Operator: tkn,
+ Left: left,
+ Right: next(),
+ Comparison: true,
+ }
+ }
+
+ return left
+}
+
+func (self *_parser) parseBitwiseAndExpression() ast.Expression {
+ next := self.parseEqualityExpression
+ left := next()
+
+ for self.token == token.AND {
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ tkn := self.token
+ self.next()
+
+ left = &ast.BinaryExpression{
+ Operator: tkn,
+ Left: left,
+ Right: next(),
+ }
+ }
+
+ return left
+}
+
+func (self *_parser) parseBitwiseExclusiveOrExpression() ast.Expression {
+ next := self.parseBitwiseAndExpression
+ left := next()
+
+ for self.token == token.EXCLUSIVE_OR {
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ tkn := self.token
+ self.next()
+
+ left = &ast.BinaryExpression{
+ Operator: tkn,
+ Left: left,
+ Right: next(),
+ }
+ }
+
+ return left
+}
+
+func (self *_parser) parseBitwiseOrExpression() ast.Expression {
+ next := self.parseBitwiseExclusiveOrExpression
+ left := next()
+
+ for self.token == token.OR {
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ tkn := self.token
+ self.next()
+
+ left = &ast.BinaryExpression{
+ Operator: tkn,
+ Left: left,
+ Right: next(),
+ }
+ }
+
+ return left
+}
+
+func (self *_parser) parseLogicalAndExpression() ast.Expression {
+ next := self.parseBitwiseOrExpression
+ left := next()
+
+ for self.token == token.LOGICAL_AND {
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ tkn := self.token
+ self.next()
+
+ left = &ast.BinaryExpression{
+ Operator: tkn,
+ Left: left,
+ Right: next(),
+ }
+ }
+
+ return left
+}
+
+func (self *_parser) parseLogicalOrExpression() ast.Expression {
+ next := self.parseLogicalAndExpression
+ left := next()
+
+ for self.token == token.LOGICAL_OR {
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ tkn := self.token
+ self.next()
+
+ left = &ast.BinaryExpression{
+ Operator: tkn,
+ Left: left,
+ Right: next(),
+ }
+ }
+
+ return left
+}
+
+func (self *_parser) parseConditionlExpression() ast.Expression {
+ left := self.parseLogicalOrExpression()
+
+ if self.token == token.QUESTION_MARK {
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next()
+
+ consequent := self.parseAssignmentExpression()
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.expect(token.COLON)
+ exp := &ast.ConditionalExpression{
+ Test: left,
+ Consequent: consequent,
+ Alternate: self.parseAssignmentExpression(),
+ }
+
+ return exp
+ }
+
+ return left
+}
+
+func (self *_parser) parseAssignmentExpression() ast.Expression {
+ left := self.parseConditionlExpression()
+ var operator token.Token
+ switch self.token {
+ case token.ASSIGN:
+ operator = self.token
+ case token.ADD_ASSIGN:
+ operator = token.PLUS
+ case token.SUBTRACT_ASSIGN:
+ operator = token.MINUS
+ case token.MULTIPLY_ASSIGN:
+ operator = token.MULTIPLY
+ case token.QUOTIENT_ASSIGN:
+ operator = token.SLASH
+ case token.REMAINDER_ASSIGN:
+ operator = token.REMAINDER
+ case token.AND_ASSIGN:
+ operator = token.AND
+ case token.AND_NOT_ASSIGN:
+ operator = token.AND_NOT
+ case token.OR_ASSIGN:
+ operator = token.OR
+ case token.EXCLUSIVE_OR_ASSIGN:
+ operator = token.EXCLUSIVE_OR
+ case token.SHIFT_LEFT_ASSIGN:
+ operator = token.SHIFT_LEFT
+ case token.SHIFT_RIGHT_ASSIGN:
+ operator = token.SHIFT_RIGHT
+ case token.UNSIGNED_SHIFT_RIGHT_ASSIGN:
+ operator = token.UNSIGNED_SHIFT_RIGHT
+ }
+
+ if operator != 0 {
+ idx := self.idx
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next()
+ switch left.(type) {
+ case *ast.Identifier, *ast.DotExpression, *ast.BracketExpression:
+ default:
+ self.error(left.Idx0(), "Invalid left-hand side in assignment")
+ self.nextStatement()
+ return &ast.BadExpression{From: idx, To: self.idx}
+ }
+
+ exp := &ast.AssignExpression{
+ Left: left,
+ Operator: operator,
+ Right: self.parseAssignmentExpression(),
+ }
+
+ if self.mode&StoreComments != 0 {
+ self.comments.SetExpression(exp)
+ }
+
+ return exp
+ }
+
+ return left
+}
+
+func (self *_parser) parseExpression() ast.Expression {
+ next := self.parseAssignmentExpression
+ left := next()
+
+ if self.token == token.COMMA {
+ sequence := []ast.Expression{left}
+ for {
+ if self.token != token.COMMA {
+ break
+ }
+ self.next()
+ sequence = append(sequence, next())
+ }
+ return &ast.SequenceExpression{
+ Sequence: sequence,
+ }
+ }
+
+ return left
+}
diff --git a/vendor/github.com/robertkrimen/otto/parser/lexer.go b/vendor/github.com/robertkrimen/otto/parser/lexer.go
new file mode 100644
index 000000000..d9d69e124
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/parser/lexer.go
@@ -0,0 +1,866 @@
+package parser
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/robertkrimen/otto/ast"
+ "github.com/robertkrimen/otto/file"
+ "github.com/robertkrimen/otto/token"
+)
+
+type _chr struct {
+ value rune
+ width int
+}
+
+var matchIdentifier = regexp.MustCompile(`^[$_\p{L}][$_\p{L}\d}]*$`)
+
+func isDecimalDigit(chr rune) bool {
+ return '0' <= chr && chr <= '9'
+}
+
+func digitValue(chr rune) int {
+ switch {
+ case '0' <= chr && chr <= '9':
+ return int(chr - '0')
+ case 'a' <= chr && chr <= 'f':
+ return int(chr - 'a' + 10)
+ case 'A' <= chr && chr <= 'F':
+ return int(chr - 'A' + 10)
+ }
+ return 16 // Larger than any legal digit value
+}
+
+func isDigit(chr rune, base int) bool {
+ return digitValue(chr) < base
+}
+
+func isIdentifierStart(chr rune) bool {
+ return chr == '$' || chr == '_' || chr == '\\' ||
+ 'a' <= chr && chr <= 'z' || 'A' <= chr && chr <= 'Z' ||
+ chr >= utf8.RuneSelf && unicode.IsLetter(chr)
+}
+
+func isIdentifierPart(chr rune) bool {
+ return chr == '$' || chr == '_' || chr == '\\' ||
+ 'a' <= chr && chr <= 'z' || 'A' <= chr && chr <= 'Z' ||
+ '0' <= chr && chr <= '9' ||
+ chr >= utf8.RuneSelf && (unicode.IsLetter(chr) || unicode.IsDigit(chr))
+}
+
+func (self *_parser) scanIdentifier() (string, error) {
+ offset := self.chrOffset
+ parse := false
+ for isIdentifierPart(self.chr) {
+ if self.chr == '\\' {
+ distance := self.chrOffset - offset
+ self.read()
+ if self.chr != 'u' {
+ return "", fmt.Errorf("Invalid identifier escape character: %c (%s)", self.chr, string(self.chr))
+ }
+ parse = true
+ var value rune
+ for j := 0; j < 4; j++ {
+ self.read()
+ decimal, ok := hex2decimal(byte(self.chr))
+ if !ok {
+ return "", fmt.Errorf("Invalid identifier escape character: %c (%s)", self.chr, string(self.chr))
+ }
+ value = value<<4 | decimal
+ }
+ if value == '\\' {
+ return "", fmt.Errorf("Invalid identifier escape value: %c (%s)", value, string(value))
+ } else if distance == 0 {
+ if !isIdentifierStart(value) {
+ return "", fmt.Errorf("Invalid identifier escape value: %c (%s)", value, string(value))
+ }
+ } else if distance > 0 {
+ if !isIdentifierPart(value) {
+ return "", fmt.Errorf("Invalid identifier escape value: %c (%s)", value, string(value))
+ }
+ }
+ }
+ self.read()
+ }
+ literal := string(self.str[offset:self.chrOffset])
+ if parse {
+ return parseStringLiteral(literal)
+ }
+ return literal, nil
+}
+
+// 7.2
+func isLineWhiteSpace(chr rune) bool {
+ switch chr {
+ case '\u0009', '\u000b', '\u000c', '\u0020', '\u00a0', '\ufeff':
+ return true
+ case '\u000a', '\u000d', '\u2028', '\u2029':
+ return false
+ case '\u0085':
+ return false
+ }
+ return unicode.IsSpace(chr)
+}
+
+// 7.3
+func isLineTerminator(chr rune) bool {
+ switch chr {
+ case '\u000a', '\u000d', '\u2028', '\u2029':
+ return true
+ }
+ return false
+}
+
+func (self *_parser) scan() (tkn token.Token, literal string, idx file.Idx) {
+
+ self.implicitSemicolon = false
+
+ for {
+ self.skipWhiteSpace()
+
+ idx = self.idxOf(self.chrOffset)
+ insertSemicolon := false
+
+ switch chr := self.chr; {
+ case isIdentifierStart(chr):
+ var err error
+ literal, err = self.scanIdentifier()
+ if err != nil {
+ tkn = token.ILLEGAL
+ break
+ }
+ if len(literal) > 1 {
+ // Keywords are longer than 1 character, avoid lookup otherwise
+ var strict bool
+ tkn, strict = token.IsKeyword(literal)
+
+ switch tkn {
+
+ case 0: // Not a keyword
+ if literal == "true" || literal == "false" {
+ self.insertSemicolon = true
+ tkn = token.BOOLEAN
+ return
+ } else if literal == "null" {
+ self.insertSemicolon = true
+ tkn = token.NULL
+ return
+ }
+
+ case token.KEYWORD:
+ tkn = token.KEYWORD
+ if strict {
+ // TODO If strict and in strict mode, then this is not a break
+ break
+ }
+ return
+
+ case
+ token.THIS,
+ token.BREAK,
+ token.THROW, // A newline after a throw is not allowed, but we need to detect it
+ token.RETURN,
+ token.CONTINUE,
+ token.DEBUGGER:
+ self.insertSemicolon = true
+ return
+
+ default:
+ return
+
+ }
+ }
+ self.insertSemicolon = true
+ tkn = token.IDENTIFIER
+ return
+ case '0' <= chr && chr <= '9':
+ self.insertSemicolon = true
+ tkn, literal = self.scanNumericLiteral(false)
+ return
+ default:
+ self.read()
+ switch chr {
+ case -1:
+ if self.insertSemicolon {
+ self.insertSemicolon = false
+ self.implicitSemicolon = true
+ }
+ tkn = token.EOF
+ case '\r', '\n', '\u2028', '\u2029':
+ self.insertSemicolon = false
+ self.implicitSemicolon = true
+ self.comments.AtLineBreak()
+ continue
+ case ':':
+ tkn = token.COLON
+ case '.':
+ if digitValue(self.chr) < 10 {
+ insertSemicolon = true
+ tkn, literal = self.scanNumericLiteral(true)
+ } else {
+ tkn = token.PERIOD
+ }
+ case ',':
+ tkn = token.COMMA
+ case ';':
+ tkn = token.SEMICOLON
+ case '(':
+ tkn = token.LEFT_PARENTHESIS
+ case ')':
+ tkn = token.RIGHT_PARENTHESIS
+ insertSemicolon = true
+ case '[':
+ tkn = token.LEFT_BRACKET
+ case ']':
+ tkn = token.RIGHT_BRACKET
+ insertSemicolon = true
+ case '{':
+ tkn = token.LEFT_BRACE
+ case '}':
+ tkn = token.RIGHT_BRACE
+ insertSemicolon = true
+ case '+':
+ tkn = self.switch3(token.PLUS, token.ADD_ASSIGN, '+', token.INCREMENT)
+ if tkn == token.INCREMENT {
+ insertSemicolon = true
+ }
+ case '-':
+ tkn = self.switch3(token.MINUS, token.SUBTRACT_ASSIGN, '-', token.DECREMENT)
+ if tkn == token.DECREMENT {
+ insertSemicolon = true
+ }
+ case '*':
+ tkn = self.switch2(token.MULTIPLY, token.MULTIPLY_ASSIGN)
+ case '/':
+ if self.chr == '/' {
+ if self.mode&StoreComments != 0 {
+ literal := string(self.readSingleLineComment())
+ self.comments.AddComment(ast.NewComment(literal, self.idx))
+ continue
+ }
+ self.skipSingleLineComment()
+ continue
+ } else if self.chr == '*' {
+ if self.mode&StoreComments != 0 {
+ literal = string(self.readMultiLineComment())
+ self.comments.AddComment(ast.NewComment(literal, self.idx))
+ continue
+ }
+ self.skipMultiLineComment()
+ continue
+ } else {
+ // Could be division, could be RegExp literal
+ tkn = self.switch2(token.SLASH, token.QUOTIENT_ASSIGN)
+ insertSemicolon = true
+ }
+ case '%':
+ tkn = self.switch2(token.REMAINDER, token.REMAINDER_ASSIGN)
+ case '^':
+ tkn = self.switch2(token.EXCLUSIVE_OR, token.EXCLUSIVE_OR_ASSIGN)
+ case '<':
+ tkn = self.switch4(token.LESS, token.LESS_OR_EQUAL, '<', token.SHIFT_LEFT, token.SHIFT_LEFT_ASSIGN)
+ case '>':
+ tkn = self.switch6(token.GREATER, token.GREATER_OR_EQUAL, '>', token.SHIFT_RIGHT, token.SHIFT_RIGHT_ASSIGN, '>', token.UNSIGNED_SHIFT_RIGHT, token.UNSIGNED_SHIFT_RIGHT_ASSIGN)
+ case '=':
+ tkn = self.switch2(token.ASSIGN, token.EQUAL)
+ if tkn == token.EQUAL && self.chr == '=' {
+ self.read()
+ tkn = token.STRICT_EQUAL
+ }
+ case '!':
+ tkn = self.switch2(token.NOT, token.NOT_EQUAL)
+ if tkn == token.NOT_EQUAL && self.chr == '=' {
+ self.read()
+ tkn = token.STRICT_NOT_EQUAL
+ }
+ case '&':
+ if self.chr == '^' {
+ self.read()
+ tkn = self.switch2(token.AND_NOT, token.AND_NOT_ASSIGN)
+ } else {
+ tkn = self.switch3(token.AND, token.AND_ASSIGN, '&', token.LOGICAL_AND)
+ }
+ case '|':
+ tkn = self.switch3(token.OR, token.OR_ASSIGN, '|', token.LOGICAL_OR)
+ case '~':
+ tkn = token.BITWISE_NOT
+ case '?':
+ tkn = token.QUESTION_MARK
+ case '"', '\'':
+ insertSemicolon = true
+ tkn = token.STRING
+ var err error
+ literal, err = self.scanString(self.chrOffset - 1)
+ if err != nil {
+ tkn = token.ILLEGAL
+ }
+ default:
+ self.errorUnexpected(idx, chr)
+ tkn = token.ILLEGAL
+ }
+ }
+ self.insertSemicolon = insertSemicolon
+ return
+ }
+}
+
+func (self *_parser) switch2(tkn0, tkn1 token.Token) token.Token {
+ if self.chr == '=' {
+ self.read()
+ return tkn1
+ }
+ return tkn0
+}
+
+func (self *_parser) switch3(tkn0, tkn1 token.Token, chr2 rune, tkn2 token.Token) token.Token {
+ if self.chr == '=' {
+ self.read()
+ return tkn1
+ }
+ if self.chr == chr2 {
+ self.read()
+ return tkn2
+ }
+ return tkn0
+}
+
+func (self *_parser) switch4(tkn0, tkn1 token.Token, chr2 rune, tkn2, tkn3 token.Token) token.Token {
+ if self.chr == '=' {
+ self.read()
+ return tkn1
+ }
+ if self.chr == chr2 {
+ self.read()
+ if self.chr == '=' {
+ self.read()
+ return tkn3
+ }
+ return tkn2
+ }
+ return tkn0
+}
+
+func (self *_parser) switch6(tkn0, tkn1 token.Token, chr2 rune, tkn2, tkn3 token.Token, chr3 rune, tkn4, tkn5 token.Token) token.Token {
+ if self.chr == '=' {
+ self.read()
+ return tkn1
+ }
+ if self.chr == chr2 {
+ self.read()
+ if self.chr == '=' {
+ self.read()
+ return tkn3
+ }
+ if self.chr == chr3 {
+ self.read()
+ if self.chr == '=' {
+ self.read()
+ return tkn5
+ }
+ return tkn4
+ }
+ return tkn2
+ }
+ return tkn0
+}
+
+func (self *_parser) chrAt(index int) _chr {
+ value, width := utf8.DecodeRuneInString(self.str[index:])
+ return _chr{
+ value: value,
+ width: width,
+ }
+}
+
+func (self *_parser) _peek() rune {
+ if self.offset+1 < self.length {
+ return rune(self.str[self.offset+1])
+ }
+ return -1
+}
+
+func (self *_parser) read() {
+ if self.offset < self.length {
+ self.chrOffset = self.offset
+ chr, width := rune(self.str[self.offset]), 1
+ if chr >= utf8.RuneSelf { // !ASCII
+ chr, width = utf8.DecodeRuneInString(self.str[self.offset:])
+ if chr == utf8.RuneError && width == 1 {
+ self.error(self.chrOffset, "Invalid UTF-8 character")
+ }
+ }
+ self.offset += width
+ self.chr = chr
+ } else {
+ self.chrOffset = self.length
+ self.chr = -1 // EOF
+ }
+}
+
+// This is here since the functions are so similar
+func (self *_RegExp_parser) read() {
+ if self.offset < self.length {
+ self.chrOffset = self.offset
+ chr, width := rune(self.str[self.offset]), 1
+ if chr >= utf8.RuneSelf { // !ASCII
+ chr, width = utf8.DecodeRuneInString(self.str[self.offset:])
+ if chr == utf8.RuneError && width == 1 {
+ self.error(self.chrOffset, "Invalid UTF-8 character")
+ }
+ }
+ self.offset += width
+ self.chr = chr
+ } else {
+ self.chrOffset = self.length
+ self.chr = -1 // EOF
+ }
+}
+
+func (self *_parser) readSingleLineComment() (result []rune) {
+ for self.chr != -1 {
+ self.read()
+ if isLineTerminator(self.chr) {
+ return
+ }
+ result = append(result, self.chr)
+ }
+
+ // Get rid of the trailing -1
+ result = result[:len(result)-1]
+
+ return
+}
+
+func (self *_parser) readMultiLineComment() (result []rune) {
+ self.read()
+ for self.chr >= 0 {
+ chr := self.chr
+ self.read()
+ if chr == '*' && self.chr == '/' {
+ self.read()
+ return
+ }
+
+ result = append(result, chr)
+ }
+
+ self.errorUnexpected(0, self.chr)
+
+ return
+}
+
+func (self *_parser) skipSingleLineComment() {
+ for self.chr != -1 {
+ self.read()
+ if isLineTerminator(self.chr) {
+ return
+ }
+ }
+}
+
+func (self *_parser) skipMultiLineComment() {
+ self.read()
+ for self.chr >= 0 {
+ chr := self.chr
+ self.read()
+ if chr == '*' && self.chr == '/' {
+ self.read()
+ return
+ }
+ }
+
+ self.errorUnexpected(0, self.chr)
+}
+
+func (self *_parser) skipWhiteSpace() {
+ for {
+ switch self.chr {
+ case ' ', '\t', '\f', '\v', '\u00a0', '\ufeff':
+ self.read()
+ continue
+ case '\r':
+ if self._peek() == '\n' {
+ self.comments.AtLineBreak()
+ self.read()
+ }
+ fallthrough
+ case '\u2028', '\u2029', '\n':
+ if self.insertSemicolon {
+ return
+ }
+ self.comments.AtLineBreak()
+ self.read()
+ continue
+ }
+ if self.chr >= utf8.RuneSelf {
+ if unicode.IsSpace(self.chr) {
+ self.read()
+ continue
+ }
+ }
+ break
+ }
+}
+
+func (self *_parser) skipLineWhiteSpace() {
+ for isLineWhiteSpace(self.chr) {
+ self.read()
+ }
+}
+
+func (self *_parser) scanMantissa(base int) {
+ for digitValue(self.chr) < base {
+ self.read()
+ }
+}
+
+func (self *_parser) scanEscape(quote rune) {
+
+ var length, base uint32
+ switch self.chr {
+ //case '0', '1', '2', '3', '4', '5', '6', '7':
+ // Octal:
+ // length, base, limit = 3, 8, 255
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"', '\'', '0':
+ self.read()
+ return
+ case '\r', '\n', '\u2028', '\u2029':
+ self.scanNewline()
+ return
+ case 'x':
+ self.read()
+ length, base = 2, 16
+ case 'u':
+ self.read()
+ length, base = 4, 16
+ default:
+ self.read() // Always make progress
+ return
+ }
+
+ var value uint32
+ for ; length > 0 && self.chr != quote && self.chr >= 0; length-- {
+ digit := uint32(digitValue(self.chr))
+ if digit >= base {
+ break
+ }
+ value = value*base + digit
+ self.read()
+ }
+}
+
+func (self *_parser) scanString(offset int) (string, error) {
+ // " ' /
+ quote := rune(self.str[offset])
+
+ for self.chr != quote {
+ chr := self.chr
+ if chr == '\n' || chr == '\r' || chr == '\u2028' || chr == '\u2029' || chr < 0 {
+ goto newline
+ }
+ self.read()
+ if chr == '\\' {
+ if quote == '/' {
+ if self.chr == '\n' || self.chr == '\r' || self.chr == '\u2028' || self.chr == '\u2029' || self.chr < 0 {
+ goto newline
+ }
+ self.read()
+ } else {
+ self.scanEscape(quote)
+ }
+ } else if chr == '[' && quote == '/' {
+ // Allow a slash (/) in a bracket character class ([...])
+ // TODO Fix this, this is hacky...
+ quote = -1
+ } else if chr == ']' && quote == -1 {
+ quote = '/'
+ }
+ }
+
+ // " ' /
+ self.read()
+
+ return string(self.str[offset:self.chrOffset]), nil
+
+newline:
+ self.scanNewline()
+ err := "String not terminated"
+ if quote == '/' {
+ err = "Invalid regular expression: missing /"
+ self.error(self.idxOf(offset), err)
+ }
+ return "", errors.New(err)
+}
+
+func (self *_parser) scanNewline() {
+ if self.chr == '\r' {
+ self.read()
+ if self.chr != '\n' {
+ return
+ }
+ }
+ self.read()
+}
+
+func hex2decimal(chr byte) (value rune, ok bool) {
+ {
+ chr := rune(chr)
+ switch {
+ case '0' <= chr && chr <= '9':
+ return chr - '0', true
+ case 'a' <= chr && chr <= 'f':
+ return chr - 'a' + 10, true
+ case 'A' <= chr && chr <= 'F':
+ return chr - 'A' + 10, true
+ }
+ return
+ }
+}
+
+func parseNumberLiteral(literal string) (value interface{}, err error) {
+ // TODO Is Uint okay? What about -MAX_UINT
+ value, err = strconv.ParseInt(literal, 0, 64)
+ if err == nil {
+ return
+ }
+
+ parseIntErr := err // Save this first error, just in case
+
+ value, err = strconv.ParseFloat(literal, 64)
+ if err == nil {
+ return
+ } else if err.(*strconv.NumError).Err == strconv.ErrRange {
+ // Infinity, etc.
+ return value, nil
+ }
+
+ err = parseIntErr
+
+ if err.(*strconv.NumError).Err == strconv.ErrRange {
+ if len(literal) > 2 && literal[0] == '0' && (literal[1] == 'X' || literal[1] == 'x') {
+ // Could just be a very large number (e.g. 0x8000000000000000)
+ var value float64
+ literal = literal[2:]
+ for _, chr := range literal {
+ digit := digitValue(chr)
+ if digit >= 16 {
+ goto error
+ }
+ value = value*16 + float64(digit)
+ }
+ return value, nil
+ }
+ }
+
+error:
+ return nil, errors.New("Illegal numeric literal")
+}
+
+func parseStringLiteral(literal string) (string, error) {
+ // Best case scenario...
+ if literal == "" {
+ return "", nil
+ }
+
+ // Slightly less-best case scenario...
+ if !strings.ContainsRune(literal, '\\') {
+ return literal, nil
+ }
+
+ str := literal
+ buffer := bytes.NewBuffer(make([]byte, 0, 3*len(literal)/2))
+
+ for len(str) > 0 {
+ switch chr := str[0]; {
+ // We do not explicitly handle the case of the quote
+ // value, which can be: " ' /
+ // This assumes we're already passed a partially well-formed literal
+ case chr >= utf8.RuneSelf:
+ chr, size := utf8.DecodeRuneInString(str)
+ buffer.WriteRune(chr)
+ str = str[size:]
+ continue
+ case chr != '\\':
+ buffer.WriteByte(chr)
+ str = str[1:]
+ continue
+ }
+
+ if len(str) <= 1 {
+ panic("len(str) <= 1")
+ }
+ chr := str[1]
+ var value rune
+ if chr >= utf8.RuneSelf {
+ str = str[1:]
+ var size int
+ value, size = utf8.DecodeRuneInString(str)
+ str = str[size:] // \ + <character>
+ } else {
+ str = str[2:] // \<character>
+ switch chr {
+ case 'b':
+ value = '\b'
+ case 'f':
+ value = '\f'
+ case 'n':
+ value = '\n'
+ case 'r':
+ value = '\r'
+ case 't':
+ value = '\t'
+ case 'v':
+ value = '\v'
+ case 'x', 'u':
+ size := 0
+ switch chr {
+ case 'x':
+ size = 2
+ case 'u':
+ size = 4
+ }
+ if len(str) < size {
+ return "", fmt.Errorf("invalid escape: \\%s: len(%q) != %d", string(chr), str, size)
+ }
+ for j := 0; j < size; j++ {
+ decimal, ok := hex2decimal(str[j])
+ if !ok {
+ return "", fmt.Errorf("invalid escape: \\%s: %q", string(chr), str[:size])
+ }
+ value = value<<4 | decimal
+ }
+ str = str[size:]
+ if chr == 'x' {
+ break
+ }
+ if value > utf8.MaxRune {
+ panic("value > utf8.MaxRune")
+ }
+ case '0':
+ if len(str) == 0 || '0' > str[0] || str[0] > '7' {
+ value = 0
+ break
+ }
+ fallthrough
+ case '1', '2', '3', '4', '5', '6', '7':
+ // TODO strict
+ value = rune(chr) - '0'
+ j := 0
+ for ; j < 2; j++ {
+ if len(str) < j+1 {
+ break
+ }
+ chr := str[j]
+ if '0' > chr || chr > '7' {
+ break
+ }
+ decimal := rune(str[j]) - '0'
+ value = (value << 3) | decimal
+ }
+ str = str[j:]
+ case '\\':
+ value = '\\'
+ case '\'', '"':
+ value = rune(chr)
+ case '\r':
+ if len(str) > 0 {
+ if str[0] == '\n' {
+ str = str[1:]
+ }
+ }
+ fallthrough
+ case '\n':
+ continue
+ default:
+ value = rune(chr)
+ }
+ }
+ buffer.WriteRune(value)
+ }
+
+ return buffer.String(), nil
+}
+
+func (self *_parser) scanNumericLiteral(decimalPoint bool) (token.Token, string) {
+
+ offset := self.chrOffset
+ tkn := token.NUMBER
+
+ if decimalPoint {
+ offset--
+ self.scanMantissa(10)
+ goto exponent
+ }
+
+ if self.chr == '0' {
+ offset := self.chrOffset
+ self.read()
+ if self.chr == 'x' || self.chr == 'X' {
+ // Hexadecimal
+ self.read()
+ if isDigit(self.chr, 16) {
+ self.read()
+ } else {
+ return token.ILLEGAL, self.str[offset:self.chrOffset]
+ }
+ self.scanMantissa(16)
+
+ if self.chrOffset-offset <= 2 {
+ // Only "0x" or "0X"
+ self.error(0, "Illegal hexadecimal number")
+ }
+
+ goto hexadecimal
+ } else if self.chr == '.' {
+ // Float
+ goto float
+ } else {
+ // Octal, Float
+ if self.chr == 'e' || self.chr == 'E' {
+ goto exponent
+ }
+ self.scanMantissa(8)
+ if self.chr == '8' || self.chr == '9' {
+ return token.ILLEGAL, self.str[offset:self.chrOffset]
+ }
+ goto octal
+ }
+ }
+
+ self.scanMantissa(10)
+
+float:
+ if self.chr == '.' {
+ self.read()
+ self.scanMantissa(10)
+ }
+
+exponent:
+ if self.chr == 'e' || self.chr == 'E' {
+ self.read()
+ if self.chr == '-' || self.chr == '+' {
+ self.read()
+ }
+ if isDecimalDigit(self.chr) {
+ self.read()
+ self.scanMantissa(10)
+ } else {
+ return token.ILLEGAL, self.str[offset:self.chrOffset]
+ }
+ }
+
+hexadecimal:
+octal:
+ if isIdentifierStart(self.chr) || isDecimalDigit(self.chr) {
+ return token.ILLEGAL, self.str[offset:self.chrOffset]
+ }
+
+ return tkn, self.str[offset:self.chrOffset]
+}
diff --git a/vendor/github.com/robertkrimen/otto/parser/parser.go b/vendor/github.com/robertkrimen/otto/parser/parser.go
new file mode 100644
index 000000000..75b7c500c
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/parser/parser.go
@@ -0,0 +1,344 @@
+/*
+Package parser implements a parser for JavaScript.
+
+ import (
+ "github.com/robertkrimen/otto/parser"
+ )
+
+Parse and return an AST
+
+ filename := "" // A filename is optional
+ src := `
+ // Sample xyzzy example
+ (function(){
+ if (3.14159 > 0) {
+ console.log("Hello, World.");
+ return;
+ }
+
+ var xyzzy = NaN;
+ console.log("Nothing happens.");
+ return xyzzy;
+ })();
+ `
+
+ // Parse some JavaScript, yielding a *ast.Program and/or an ErrorList
+ program, err := parser.ParseFile(nil, filename, src, 0)
+
+Warning
+
+The parser and AST interfaces are still works-in-progress (particularly where
+node types are concerned) and may change in the future.
+
+*/
+package parser
+
+import (
+ "bytes"
+ "encoding/base64"
+ "errors"
+ "io"
+ "io/ioutil"
+
+ "github.com/robertkrimen/otto/ast"
+ "github.com/robertkrimen/otto/file"
+ "github.com/robertkrimen/otto/token"
+ "gopkg.in/sourcemap.v1"
+)
+
+// A Mode value is a set of flags (or 0). They control optional parser functionality.
+type Mode uint
+
+const (
+ IgnoreRegExpErrors Mode = 1 << iota // Ignore RegExp compatibility errors (allow backtracking)
+ StoreComments // Store the comments from source to the comments map
+)
+
+type _parser struct {
+ str string
+ length int
+ base int
+
+ chr rune // The current character
+ chrOffset int // The offset of current character
+ offset int // The offset after current character (may be greater than 1)
+
+ idx file.Idx // The index of token
+ token token.Token // The token
+ literal string // The literal of the token, if any
+
+ scope *_scope
+ insertSemicolon bool // If we see a newline, then insert an implicit semicolon
+ implicitSemicolon bool // An implicit semicolon exists
+
+ errors ErrorList
+
+ recover struct {
+ // Scratch when trying to seek to the next statement, etc.
+ idx file.Idx
+ count int
+ }
+
+ mode Mode
+
+ file *file.File
+
+ comments *ast.Comments
+}
+
+type Parser interface {
+ Scan() (tkn token.Token, literal string, idx file.Idx)
+}
+
+func _newParser(filename, src string, base int, sm *sourcemap.Consumer) *_parser {
+ return &_parser{
+ chr: ' ', // This is set so we can start scanning by skipping whitespace
+ str: src,
+ length: len(src),
+ base: base,
+ file: file.NewFile(filename, src, base).WithSourceMap(sm),
+ comments: ast.NewComments(),
+ }
+}
+
+// Returns a new Parser.
+func NewParser(filename, src string) Parser {
+ return _newParser(filename, src, 1, nil)
+}
+
+func ReadSource(filename string, src interface{}) ([]byte, error) {
+ if src != nil {
+ switch src := src.(type) {
+ case string:
+ return []byte(src), nil
+ case []byte:
+ return src, nil
+ case *bytes.Buffer:
+ if src != nil {
+ return src.Bytes(), nil
+ }
+ case io.Reader:
+ var bfr bytes.Buffer
+ if _, err := io.Copy(&bfr, src); err != nil {
+ return nil, err
+ }
+ return bfr.Bytes(), nil
+ }
+ return nil, errors.New("invalid source")
+ }
+ return ioutil.ReadFile(filename)
+}
+
+func ReadSourceMap(filename string, src interface{}) (*sourcemap.Consumer, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return sourcemap.Parse(filename, []byte(src))
+ case []byte:
+ return sourcemap.Parse(filename, src)
+ case *bytes.Buffer:
+ if src != nil {
+ return sourcemap.Parse(filename, src.Bytes())
+ }
+ case io.Reader:
+ var bfr bytes.Buffer
+ if _, err := io.Copy(&bfr, src); err != nil {
+ return nil, err
+ }
+ return sourcemap.Parse(filename, bfr.Bytes())
+ case *sourcemap.Consumer:
+ return src, nil
+ }
+
+ return nil, errors.New("invalid sourcemap type")
+}
+
+func ParseFileWithSourceMap(fileSet *file.FileSet, filename string, javascriptSource, sourcemapSource interface{}, mode Mode) (*ast.Program, error) {
+ src, err := ReadSource(filename, javascriptSource)
+ if err != nil {
+ return nil, err
+ }
+
+ if sourcemapSource == nil {
+ lines := bytes.Split(src, []byte("\n"))
+ lastLine := lines[len(lines)-1]
+ if bytes.HasPrefix(lastLine, []byte("//# sourceMappingURL=data:application/json")) {
+ bits := bytes.SplitN(lastLine, []byte(","), 2)
+ if len(bits) == 2 {
+ if d, err := base64.StdEncoding.DecodeString(string(bits[1])); err == nil {
+ sourcemapSource = d
+ }
+ }
+ }
+ }
+
+ sm, err := ReadSourceMap(filename, sourcemapSource)
+ if err != nil {
+ return nil, err
+ }
+
+ base := 1
+ if fileSet != nil {
+ base = fileSet.AddFile(filename, string(src))
+ }
+
+ parser := _newParser(filename, string(src), base, sm)
+ parser.mode = mode
+ program, err := parser.parse()
+ program.Comments = parser.comments.CommentMap
+
+ return program, err
+}
+
+// ParseFile parses the source code of a single JavaScript/ECMAScript source file and returns
+// the corresponding ast.Program node.
+//
+// If fileSet == nil, ParseFile parses source without a FileSet.
+// If fileSet != nil, ParseFile first adds filename and src to fileSet.
+//
+// The filename argument is optional and is used for labelling errors, etc.
+//
+// src may be a string, a byte slice, a bytes.Buffer, or an io.Reader, but it MUST always be in UTF-8.
+//
+// // Parse some JavaScript, yielding a *ast.Program and/or an ErrorList
+// program, err := parser.ParseFile(nil, "", `if (abc > 1) {}`, 0)
+//
+func ParseFile(fileSet *file.FileSet, filename string, src interface{}, mode Mode) (*ast.Program, error) {
+ return ParseFileWithSourceMap(fileSet, filename, src, nil, mode)
+}
+
+// ParseFunction parses a given parameter list and body as a function and returns the
+// corresponding ast.FunctionLiteral node.
+//
+// The parameter list, if any, should be a comma-separated list of identifiers.
+//
+func ParseFunction(parameterList, body string) (*ast.FunctionLiteral, error) {
+
+ src := "(function(" + parameterList + ") {\n" + body + "\n})"
+
+ parser := _newParser("", src, 1, nil)
+ program, err := parser.parse()
+ if err != nil {
+ return nil, err
+ }
+
+ return program.Body[0].(*ast.ExpressionStatement).Expression.(*ast.FunctionLiteral), nil
+}
+
+// Scan reads a single token from the source at the current offset, increments the offset and
+// returns the token.Token token, a string literal representing the value of the token (if applicable)
+// and it's current file.Idx index.
+func (self *_parser) Scan() (tkn token.Token, literal string, idx file.Idx) {
+ return self.scan()
+}
+
+func (self *_parser) slice(idx0, idx1 file.Idx) string {
+ from := int(idx0) - self.base
+ to := int(idx1) - self.base
+ if from >= 0 && to <= len(self.str) {
+ return self.str[from:to]
+ }
+
+ return ""
+}
+
+func (self *_parser) parse() (*ast.Program, error) {
+ self.next()
+ program := self.parseProgram()
+ if false {
+ self.errors.Sort()
+ }
+
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(program, self.comments.FetchAll(), ast.TRAILING)
+ }
+
+ return program, self.errors.Err()
+}
+
+func (self *_parser) next() {
+ self.token, self.literal, self.idx = self.scan()
+}
+
+func (self *_parser) optionalSemicolon() {
+ if self.token == token.SEMICOLON {
+ self.next()
+ return
+ }
+
+ if self.implicitSemicolon {
+ self.implicitSemicolon = false
+ return
+ }
+
+ if self.token != token.EOF && self.token != token.RIGHT_BRACE {
+ self.expect(token.SEMICOLON)
+ }
+}
+
+func (self *_parser) semicolon() {
+ if self.token != token.RIGHT_PARENTHESIS && self.token != token.RIGHT_BRACE {
+ if self.implicitSemicolon {
+ self.implicitSemicolon = false
+ return
+ }
+
+ self.expect(token.SEMICOLON)
+ }
+}
+
+func (self *_parser) idxOf(offset int) file.Idx {
+ return file.Idx(self.base + offset)
+}
+
+func (self *_parser) expect(value token.Token) file.Idx {
+ idx := self.idx
+ if self.token != value {
+ self.errorUnexpectedToken(self.token)
+ }
+ self.next()
+ return idx
+}
+
+func lineCount(str string) (int, int) {
+ line, last := 0, -1
+ pair := false
+ for index, chr := range str {
+ switch chr {
+ case '\r':
+ line += 1
+ last = index
+ pair = true
+ continue
+ case '\n':
+ if !pair {
+ line += 1
+ }
+ last = index
+ case '\u2028', '\u2029':
+ line += 1
+ last = index + 2
+ }
+ pair = false
+ }
+ return line, last
+}
+
+func (self *_parser) position(idx file.Idx) file.Position {
+ position := file.Position{}
+ offset := int(idx) - self.base
+ str := self.str[:offset]
+ position.Filename = self.file.Name()
+ line, last := lineCount(str)
+ position.Line = 1 + line
+ if last >= 0 {
+ position.Column = offset - last
+ } else {
+ position.Column = 1 + len(str)
+ }
+
+ return position
+}
diff --git a/vendor/github.com/robertkrimen/otto/parser/regexp.go b/vendor/github.com/robertkrimen/otto/parser/regexp.go
new file mode 100644
index 000000000..f614dae74
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/parser/regexp.go
@@ -0,0 +1,358 @@
+package parser
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+)
+
+type _RegExp_parser struct {
+ str string
+ length int
+
+ chr rune // The current character
+ chrOffset int // The offset of current character
+ offset int // The offset after current character (may be greater than 1)
+
+ errors []error
+ invalid bool // The input is an invalid JavaScript RegExp
+
+ goRegexp *bytes.Buffer
+}
+
+// TransformRegExp transforms a JavaScript pattern into a Go "regexp" pattern.
+//
+// re2 (Go) cannot do backtracking, so the presence of a lookahead (?=) (?!) or
+// backreference (\1, \2, ...) will cause an error.
+//
+// re2 (Go) has a different definition for \s: [\t\n\f\r ].
+// The JavaScript definition, on the other hand, also includes \v, Unicode "Separator, Space", etc.
+//
+// If the pattern is invalid (not valid even in JavaScript), then this function
+// returns the empty string and an error.
+//
+// If the pattern is valid, but incompatible (contains a lookahead or backreference),
+// then this function returns the transformation (a non-empty string) AND an error.
+func TransformRegExp(pattern string) (string, error) {
+
+ if pattern == "" {
+ return "", nil
+ }
+
+ // TODO If without \, if without (?=, (?!, then another shortcut
+
+ parser := _RegExp_parser{
+ str: pattern,
+ length: len(pattern),
+ goRegexp: bytes.NewBuffer(make([]byte, 0, 3*len(pattern)/2)),
+ }
+ parser.read() // Pull in the first character
+ parser.scan()
+ var err error
+ if len(parser.errors) > 0 {
+ err = parser.errors[0]
+ }
+ if parser.invalid {
+ return "", err
+ }
+
+ // Might not be re2 compatible, but is still a valid JavaScript RegExp
+ return parser.goRegexp.String(), err
+}
+
+func (self *_RegExp_parser) scan() {
+ for self.chr != -1 {
+ switch self.chr {
+ case '\\':
+ self.read()
+ self.scanEscape(false)
+ case '(':
+ self.pass()
+ self.scanGroup()
+ case '[':
+ self.pass()
+ self.scanBracket()
+ case ')':
+ self.error(-1, "Unmatched ')'")
+ self.invalid = true
+ self.pass()
+ default:
+ self.pass()
+ }
+ }
+}
+
+// (...)
+func (self *_RegExp_parser) scanGroup() {
+ str := self.str[self.chrOffset:]
+ if len(str) > 1 { // A possibility of (?= or (?!
+ if str[0] == '?' {
+ if str[1] == '=' || str[1] == '!' {
+ self.error(-1, "re2: Invalid (%s) <lookahead>", self.str[self.chrOffset:self.chrOffset+2])
+ }
+ }
+ }
+ for self.chr != -1 && self.chr != ')' {
+ switch self.chr {
+ case '\\':
+ self.read()
+ self.scanEscape(false)
+ case '(':
+ self.pass()
+ self.scanGroup()
+ case '[':
+ self.pass()
+ self.scanBracket()
+ default:
+ self.pass()
+ continue
+ }
+ }
+ if self.chr != ')' {
+ self.error(-1, "Unterminated group")
+ self.invalid = true
+ return
+ }
+ self.pass()
+}
+
+// [...]
+func (self *_RegExp_parser) scanBracket() {
+ for self.chr != -1 {
+ if self.chr == ']' {
+ break
+ } else if self.chr == '\\' {
+ self.read()
+ self.scanEscape(true)
+ continue
+ }
+ self.pass()
+ }
+ if self.chr != ']' {
+ self.error(-1, "Unterminated character class")
+ self.invalid = true
+ return
+ }
+ self.pass()
+}
+
+// \...
+func (self *_RegExp_parser) scanEscape(inClass bool) {
+ offset := self.chrOffset
+
+ var length, base uint32
+ switch self.chr {
+
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ var value int64
+ size := 0
+ for {
+ digit := int64(digitValue(self.chr))
+ if digit >= 8 {
+ // Not a valid digit
+ break
+ }
+ value = value*8 + digit
+ self.read()
+ size += 1
+ }
+ if size == 1 { // The number of characters read
+ _, err := self.goRegexp.Write([]byte{'\\', byte(value) + '0'})
+ if err != nil {
+ self.errors = append(self.errors, err)
+ }
+ if value != 0 {
+ // An invalid backreference
+ self.error(-1, "re2: Invalid \\%d <backreference>", value)
+ }
+ return
+ }
+ tmp := []byte{'\\', 'x', '0', 0}
+ if value >= 16 {
+ tmp = tmp[0:2]
+ } else {
+ tmp = tmp[0:3]
+ }
+ tmp = strconv.AppendInt(tmp, value, 16)
+ _, err := self.goRegexp.Write(tmp)
+ if err != nil {
+ self.errors = append(self.errors, err)
+ }
+ return
+
+ case '8', '9':
+ size := 0
+ for {
+ digit := digitValue(self.chr)
+ if digit >= 10 {
+ // Not a valid digit
+ break
+ }
+ self.read()
+ size += 1
+ }
+ err := self.goRegexp.WriteByte('\\')
+ if err != nil {
+ self.errors = append(self.errors, err)
+ }
+ _, err = self.goRegexp.WriteString(self.str[offset:self.chrOffset])
+ if err != nil {
+ self.errors = append(self.errors, err)
+ }
+ self.error(-1, "re2: Invalid \\%s <backreference>", self.str[offset:self.chrOffset])
+ return
+
+ case 'x':
+ self.read()
+ length, base = 2, 16
+
+ case 'u':
+ self.read()
+ length, base = 4, 16
+
+ case 'b':
+ if inClass {
+ _, err := self.goRegexp.Write([]byte{'\\', 'x', '0', '8'})
+ if err != nil {
+ self.errors = append(self.errors, err)
+ }
+ self.read()
+ return
+ }
+ fallthrough
+
+ case 'B':
+ fallthrough
+
+ case 'd', 'D', 's', 'S', 'w', 'W':
+ // This is slightly broken, because ECMAScript
+ // includes \v in \s, \S, while re2 does not
+ fallthrough
+
+ case '\\':
+ fallthrough
+
+ case 'f', 'n', 'r', 't', 'v':
+ err := self.goRegexp.WriteByte('\\')
+ if err != nil {
+ self.errors = append(self.errors, err)
+ }
+ self.pass()
+ return
+
+ case 'c':
+ self.read()
+ var value int64
+ if 'a' <= self.chr && self.chr <= 'z' {
+ value = int64(self.chr) - 'a' + 1
+ } else if 'A' <= self.chr && self.chr <= 'Z' {
+ value = int64(self.chr) - 'A' + 1
+ } else {
+ err := self.goRegexp.WriteByte('c')
+ if err != nil {
+ self.errors = append(self.errors, err)
+ }
+ return
+ }
+ tmp := []byte{'\\', 'x', '0', 0}
+ if value >= 16 {
+ tmp = tmp[0:2]
+ } else {
+ tmp = tmp[0:3]
+ }
+ tmp = strconv.AppendInt(tmp, value, 16)
+ _, err := self.goRegexp.Write(tmp)
+ if err != nil {
+ self.errors = append(self.errors, err)
+ }
+ self.read()
+ return
+
+ default:
+ // $ is an identifier character, so we have to have
+ // a special case for it here
+ if self.chr == '$' || !isIdentifierPart(self.chr) {
+ // A non-identifier character needs escaping
+ err := self.goRegexp.WriteByte('\\')
+ if err != nil {
+ self.errors = append(self.errors, err)
+ }
+ } else {
+ // Unescape the character for re2
+ }
+ self.pass()
+ return
+ }
+
+ // Otherwise, we're a \u.... or \x...
+ valueOffset := self.chrOffset
+
+ var value uint32
+ {
+ length := length
+ for ; length > 0; length-- {
+ digit := uint32(digitValue(self.chr))
+ if digit >= base {
+ // Not a valid digit
+ goto skip
+ }
+ value = value*base + digit
+ self.read()
+ }
+ }
+
+ if length == 4 {
+ _, err := self.goRegexp.Write([]byte{
+ '\\',
+ 'x',
+ '{',
+ self.str[valueOffset+0],
+ self.str[valueOffset+1],
+ self.str[valueOffset+2],
+ self.str[valueOffset+3],
+ '}',
+ })
+ if err != nil {
+ self.errors = append(self.errors, err)
+ }
+ } else if length == 2 {
+ _, err := self.goRegexp.Write([]byte{
+ '\\',
+ 'x',
+ self.str[valueOffset+0],
+ self.str[valueOffset+1],
+ })
+ if err != nil {
+ self.errors = append(self.errors, err)
+ }
+ } else {
+ // Should never, ever get here...
+ self.error(-1, "re2: Illegal branch in scanEscape")
+ goto skip
+ }
+
+ return
+
+skip:
+ _, err := self.goRegexp.WriteString(self.str[offset:self.chrOffset])
+ if err != nil {
+ self.errors = append(self.errors, err)
+ }
+}
+
+func (self *_RegExp_parser) pass() {
+ if self.chr != -1 {
+ _, err := self.goRegexp.WriteRune(self.chr)
+ if err != nil {
+ self.errors = append(self.errors, err)
+ }
+ }
+ self.read()
+}
+
+// TODO Better error reporting, use the offset, etc.
+func (self *_RegExp_parser) error(offset int, msg string, msgValues ...interface{}) error {
+ err := fmt.Errorf(msg, msgValues...)
+ self.errors = append(self.errors, err)
+ return err
+}
diff --git a/vendor/github.com/robertkrimen/otto/parser/scope.go b/vendor/github.com/robertkrimen/otto/parser/scope.go
new file mode 100644
index 000000000..e1dbdda13
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/parser/scope.go
@@ -0,0 +1,44 @@
+package parser
+
+import (
+ "github.com/robertkrimen/otto/ast"
+)
+
+type _scope struct {
+ outer *_scope
+ allowIn bool
+ inIteration bool
+ inSwitch bool
+ inFunction bool
+ declarationList []ast.Declaration
+
+ labels []string
+}
+
+func (self *_parser) openScope() {
+ self.scope = &_scope{
+ outer: self.scope,
+ allowIn: true,
+ }
+}
+
+func (self *_parser) closeScope() {
+ self.scope = self.scope.outer
+}
+
+func (self *_scope) declare(declaration ast.Declaration) {
+ self.declarationList = append(self.declarationList, declaration)
+}
+
+func (self *_scope) hasLabel(name string) bool {
+ for _, label := range self.labels {
+ if label == name {
+ return true
+ }
+ }
+ if self.outer != nil && !self.inFunction {
+ // Crossing a function boundary to look for a label is verboten
+ return self.outer.hasLabel(name)
+ }
+ return false
+}
diff --git a/vendor/github.com/robertkrimen/otto/parser/statement.go b/vendor/github.com/robertkrimen/otto/parser/statement.go
new file mode 100644
index 000000000..6ff19d975
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/parser/statement.go
@@ -0,0 +1,938 @@
+package parser
+
+import (
+ "github.com/robertkrimen/otto/ast"
+ "github.com/robertkrimen/otto/token"
+)
+
+func (self *_parser) parseBlockStatement() *ast.BlockStatement {
+ node := &ast.BlockStatement{}
+
+ // Find comments before the leading brace
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(node, self.comments.FetchAll(), ast.LEADING)
+ self.comments.Unset()
+ }
+
+ node.LeftBrace = self.expect(token.LEFT_BRACE)
+ node.List = self.parseStatementList()
+
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ self.comments.CommentMap.AddComments(node, self.comments.FetchAll(), ast.FINAL)
+ self.comments.AfterBlock()
+ }
+
+ node.RightBrace = self.expect(token.RIGHT_BRACE)
+
+ // Find comments after the trailing brace
+ if self.mode&StoreComments != 0 {
+ self.comments.ResetLineBreak()
+ self.comments.CommentMap.AddComments(node, self.comments.Fetch(), ast.TRAILING)
+ }
+
+ return node
+}
+
+func (self *_parser) parseEmptyStatement() ast.Statement {
+ idx := self.expect(token.SEMICOLON)
+ return &ast.EmptyStatement{Semicolon: idx}
+}
+
+func (self *_parser) parseStatementList() (list []ast.Statement) {
+ for self.token != token.RIGHT_BRACE && self.token != token.EOF {
+ statement := self.parseStatement()
+ list = append(list, statement)
+ }
+
+ return
+}
+
+func (self *_parser) parseStatement() ast.Statement {
+
+ if self.token == token.EOF {
+ self.errorUnexpectedToken(self.token)
+ return &ast.BadStatement{From: self.idx, To: self.idx + 1}
+ }
+
+ if self.mode&StoreComments != 0 {
+ self.comments.ResetLineBreak()
+ }
+
+ switch self.token {
+ case token.SEMICOLON:
+ return self.parseEmptyStatement()
+ case token.LEFT_BRACE:
+ return self.parseBlockStatement()
+ case token.IF:
+ return self.parseIfStatement()
+ case token.DO:
+ statement := self.parseDoWhileStatement()
+ self.comments.PostProcessNode(statement)
+ return statement
+ case token.WHILE:
+ return self.parseWhileStatement()
+ case token.FOR:
+ return self.parseForOrForInStatement()
+ case token.BREAK:
+ return self.parseBreakStatement()
+ case token.CONTINUE:
+ return self.parseContinueStatement()
+ case token.DEBUGGER:
+ return self.parseDebuggerStatement()
+ case token.WITH:
+ return self.parseWithStatement()
+ case token.VAR:
+ return self.parseVariableStatement()
+ case token.FUNCTION:
+ return self.parseFunctionStatement()
+ case token.SWITCH:
+ return self.parseSwitchStatement()
+ case token.RETURN:
+ return self.parseReturnStatement()
+ case token.THROW:
+ return self.parseThrowStatement()
+ case token.TRY:
+ return self.parseTryStatement()
+ }
+
+ var comments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ comments = self.comments.FetchAll()
+ }
+
+ expression := self.parseExpression()
+
+ if identifier, isIdentifier := expression.(*ast.Identifier); isIdentifier && self.token == token.COLON {
+ // LabelledStatement
+ colon := self.idx
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next() // :
+
+ label := identifier.Name
+ for _, value := range self.scope.labels {
+ if label == value {
+ self.error(identifier.Idx0(), "Label '%s' already exists", label)
+ }
+ }
+ var labelComments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ labelComments = self.comments.FetchAll()
+ }
+ self.scope.labels = append(self.scope.labels, label) // Push the label
+ statement := self.parseStatement()
+ self.scope.labels = self.scope.labels[:len(self.scope.labels)-1] // Pop the label
+ exp := &ast.LabelledStatement{
+ Label: identifier,
+ Colon: colon,
+ Statement: statement,
+ }
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(exp, labelComments, ast.LEADING)
+ }
+
+ return exp
+ }
+
+ self.optionalSemicolon()
+
+ statement := &ast.ExpressionStatement{
+ Expression: expression,
+ }
+
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(statement, comments, ast.LEADING)
+ }
+ return statement
+}
+
+func (self *_parser) parseTryStatement() ast.Statement {
+ var tryComments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ tryComments = self.comments.FetchAll()
+ }
+ node := &ast.TryStatement{
+ Try: self.expect(token.TRY),
+ Body: self.parseBlockStatement(),
+ }
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(node, tryComments, ast.LEADING)
+ self.comments.CommentMap.AddComments(node.Body, self.comments.FetchAll(), ast.TRAILING)
+ }
+
+ if self.token == token.CATCH {
+ catch := self.idx
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next()
+ self.expect(token.LEFT_PARENTHESIS)
+ if self.token != token.IDENTIFIER {
+ self.expect(token.IDENTIFIER)
+ self.nextStatement()
+ return &ast.BadStatement{From: catch, To: self.idx}
+ } else {
+ identifier := self.parseIdentifier()
+ self.expect(token.RIGHT_PARENTHESIS)
+ node.Catch = &ast.CatchStatement{
+ Catch: catch,
+ Parameter: identifier,
+ Body: self.parseBlockStatement(),
+ }
+
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(node.Catch.Body, self.comments.FetchAll(), ast.TRAILING)
+ }
+ }
+ }
+
+ if self.token == token.FINALLY {
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next()
+ if self.mode&StoreComments != 0 {
+ tryComments = self.comments.FetchAll()
+ }
+
+ node.Finally = self.parseBlockStatement()
+
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(node.Finally, tryComments, ast.LEADING)
+ }
+ }
+
+ if node.Catch == nil && node.Finally == nil {
+ self.error(node.Try, "Missing catch or finally after try")
+ return &ast.BadStatement{From: node.Try, To: node.Body.Idx1()}
+ }
+
+ return node
+}
+
+func (self *_parser) parseFunctionParameterList() *ast.ParameterList {
+ opening := self.expect(token.LEFT_PARENTHESIS)
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ var list []*ast.Identifier
+ for self.token != token.RIGHT_PARENTHESIS && self.token != token.EOF {
+ if self.token != token.IDENTIFIER {
+ self.expect(token.IDENTIFIER)
+ } else {
+ identifier := self.parseIdentifier()
+ list = append(list, identifier)
+ }
+ if self.token != token.RIGHT_PARENTHESIS {
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.expect(token.COMMA)
+ }
+ }
+ closing := self.expect(token.RIGHT_PARENTHESIS)
+
+ return &ast.ParameterList{
+ Opening: opening,
+ List: list,
+ Closing: closing,
+ }
+}
+
+func (self *_parser) parseParameterList() (list []string) {
+ for self.token != token.EOF {
+ if self.token != token.IDENTIFIER {
+ self.expect(token.IDENTIFIER)
+ }
+ list = append(list, self.literal)
+ self.next()
+ if self.token != token.EOF {
+ self.expect(token.COMMA)
+ }
+ }
+ return
+}
+
+func (self *_parser) parseFunctionStatement() *ast.FunctionStatement {
+ var comments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ comments = self.comments.FetchAll()
+ }
+ function := &ast.FunctionStatement{
+ Function: self.parseFunction(true),
+ }
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(function, comments, ast.LEADING)
+ }
+
+ return function
+}
+
+func (self *_parser) parseFunction(declaration bool) *ast.FunctionLiteral {
+
+ node := &ast.FunctionLiteral{
+ Function: self.expect(token.FUNCTION),
+ }
+
+ var name *ast.Identifier
+ if self.token == token.IDENTIFIER {
+ name = self.parseIdentifier()
+ if declaration {
+ self.scope.declare(&ast.FunctionDeclaration{
+ Function: node,
+ })
+ }
+ } else if declaration {
+ // Use expect error handling
+ self.expect(token.IDENTIFIER)
+ }
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ node.Name = name
+ node.ParameterList = self.parseFunctionParameterList()
+ self.parseFunctionBlock(node)
+ node.Source = self.slice(node.Idx0(), node.Idx1())
+
+ return node
+}
+
+func (self *_parser) parseFunctionBlock(node *ast.FunctionLiteral) {
+ {
+ self.openScope()
+ inFunction := self.scope.inFunction
+ self.scope.inFunction = true
+ defer func() {
+ self.scope.inFunction = inFunction
+ self.closeScope()
+ }()
+ node.Body = self.parseBlockStatement()
+ node.DeclarationList = self.scope.declarationList
+ }
+}
+
+func (self *_parser) parseDebuggerStatement() ast.Statement {
+ idx := self.expect(token.DEBUGGER)
+
+ node := &ast.DebuggerStatement{
+ Debugger: idx,
+ }
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(node, self.comments.FetchAll(), ast.TRAILING)
+ }
+
+ self.semicolon()
+ return node
+}
+
+func (self *_parser) parseReturnStatement() ast.Statement {
+ idx := self.expect(token.RETURN)
+ var comments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ comments = self.comments.FetchAll()
+ }
+
+ if !self.scope.inFunction {
+ self.error(idx, "Illegal return statement")
+ self.nextStatement()
+ return &ast.BadStatement{From: idx, To: self.idx}
+ }
+
+ node := &ast.ReturnStatement{
+ Return: idx,
+ }
+
+ if !self.implicitSemicolon && self.token != token.SEMICOLON && self.token != token.RIGHT_BRACE && self.token != token.EOF {
+ node.Argument = self.parseExpression()
+ }
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(node, comments, ast.LEADING)
+ }
+
+ self.semicolon()
+
+ return node
+}
+
+func (self *_parser) parseThrowStatement() ast.Statement {
+ var comments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ comments = self.comments.FetchAll()
+ }
+ idx := self.expect(token.THROW)
+
+ if self.implicitSemicolon {
+ if self.chr == -1 { // Hackish
+ self.error(idx, "Unexpected end of input")
+ } else {
+ self.error(idx, "Illegal newline after throw")
+ }
+ self.nextStatement()
+ return &ast.BadStatement{From: idx, To: self.idx}
+ }
+
+ node := &ast.ThrowStatement{
+ Argument: self.parseExpression(),
+ }
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(node, comments, ast.LEADING)
+ }
+
+ self.semicolon()
+
+ return node
+}
+
+func (self *_parser) parseSwitchStatement() ast.Statement {
+ var comments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ comments = self.comments.FetchAll()
+ }
+ self.expect(token.SWITCH)
+ if self.mode&StoreComments != 0 {
+ comments = append(comments, self.comments.FetchAll()...)
+ }
+ self.expect(token.LEFT_PARENTHESIS)
+ node := &ast.SwitchStatement{
+ Discriminant: self.parseExpression(),
+ Default: -1,
+ }
+ self.expect(token.RIGHT_PARENTHESIS)
+ if self.mode&StoreComments != 0 {
+ comments = append(comments, self.comments.FetchAll()...)
+ }
+
+ self.expect(token.LEFT_BRACE)
+
+ inSwitch := self.scope.inSwitch
+ self.scope.inSwitch = true
+ defer func() {
+ self.scope.inSwitch = inSwitch
+ }()
+
+ for index := 0; self.token != token.EOF; index++ {
+ if self.token == token.RIGHT_BRACE {
+ self.next()
+ break
+ }
+
+ clause := self.parseCaseStatement()
+ if clause.Test == nil {
+ if node.Default != -1 {
+ self.error(clause.Case, "Already saw a default in switch")
+ }
+ node.Default = index
+ }
+ node.Body = append(node.Body, clause)
+ }
+
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(node, comments, ast.LEADING)
+ }
+
+ return node
+}
+
+func (self *_parser) parseWithStatement() ast.Statement {
+ var comments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ comments = self.comments.FetchAll()
+ }
+ self.expect(token.WITH)
+ var withComments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ withComments = self.comments.FetchAll()
+ }
+
+ self.expect(token.LEFT_PARENTHESIS)
+
+ node := &ast.WithStatement{
+ Object: self.parseExpression(),
+ }
+ self.expect(token.RIGHT_PARENTHESIS)
+
+ if self.mode&StoreComments != 0 {
+ //comments = append(comments, self.comments.FetchAll()...)
+ self.comments.CommentMap.AddComments(node, comments, ast.LEADING)
+ self.comments.CommentMap.AddComments(node, withComments, ast.WITH)
+ }
+
+ node.Body = self.parseStatement()
+
+ return node
+}
+
+func (self *_parser) parseCaseStatement() *ast.CaseStatement {
+ node := &ast.CaseStatement{
+ Case: self.idx,
+ }
+
+ var comments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ comments = self.comments.FetchAll()
+ self.comments.Unset()
+ }
+
+ if self.token == token.DEFAULT {
+ self.next()
+ } else {
+ self.expect(token.CASE)
+ node.Test = self.parseExpression()
+ }
+
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.expect(token.COLON)
+
+ for {
+ if self.token == token.EOF ||
+ self.token == token.RIGHT_BRACE ||
+ self.token == token.CASE ||
+ self.token == token.DEFAULT {
+ break
+ }
+ consequent := self.parseStatement()
+ node.Consequent = append(node.Consequent, consequent)
+ }
+
+ // Link the comments to the case statement
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(node, comments, ast.LEADING)
+ }
+
+ return node
+}
+
+func (self *_parser) parseIterationStatement() ast.Statement {
+ inIteration := self.scope.inIteration
+ self.scope.inIteration = true
+ defer func() {
+ self.scope.inIteration = inIteration
+ }()
+ return self.parseStatement()
+}
+
+func (self *_parser) parseForIn(into ast.Expression) *ast.ForInStatement {
+
+ // Already have consumed "<into> in"
+
+ source := self.parseExpression()
+ self.expect(token.RIGHT_PARENTHESIS)
+ body := self.parseIterationStatement()
+
+ forin := &ast.ForInStatement{
+ Into: into,
+ Source: source,
+ Body: body,
+ }
+
+ return forin
+}
+
+func (self *_parser) parseFor(initializer ast.Expression) *ast.ForStatement {
+
+ // Already have consumed "<initializer> ;"
+
+ var test, update ast.Expression
+
+ if self.token != token.SEMICOLON {
+ test = self.parseExpression()
+ }
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.expect(token.SEMICOLON)
+
+ if self.token != token.RIGHT_PARENTHESIS {
+ update = self.parseExpression()
+ }
+ self.expect(token.RIGHT_PARENTHESIS)
+ body := self.parseIterationStatement()
+
+ forstatement := &ast.ForStatement{
+ Initializer: initializer,
+ Test: test,
+ Update: update,
+ Body: body,
+ }
+
+ return forstatement
+}
+
+func (self *_parser) parseForOrForInStatement() ast.Statement {
+ var comments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ comments = self.comments.FetchAll()
+ }
+ idx := self.expect(token.FOR)
+ var forComments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ forComments = self.comments.FetchAll()
+ }
+ self.expect(token.LEFT_PARENTHESIS)
+
+ var left []ast.Expression
+
+ forIn := false
+ if self.token != token.SEMICOLON {
+
+ allowIn := self.scope.allowIn
+ self.scope.allowIn = false
+ if self.token == token.VAR {
+ var_ := self.idx
+ var varComments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ varComments = self.comments.FetchAll()
+ self.comments.Unset()
+ }
+ self.next()
+ list := self.parseVariableDeclarationList(var_)
+ if len(list) == 1 && self.token == token.IN {
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.next() // in
+ forIn = true
+ left = []ast.Expression{list[0]} // There is only one declaration
+ } else {
+ left = list
+ }
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(left[0], varComments, ast.LEADING)
+ }
+ } else {
+ left = append(left, self.parseExpression())
+ if self.token == token.IN {
+ self.next()
+ forIn = true
+ }
+ }
+ self.scope.allowIn = allowIn
+ }
+
+ if forIn {
+ switch left[0].(type) {
+ case *ast.Identifier, *ast.DotExpression, *ast.BracketExpression, *ast.VariableExpression:
+ // These are all acceptable
+ default:
+ self.error(idx, "Invalid left-hand side in for-in")
+ self.nextStatement()
+ return &ast.BadStatement{From: idx, To: self.idx}
+ }
+
+ forin := self.parseForIn(left[0])
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(forin, comments, ast.LEADING)
+ self.comments.CommentMap.AddComments(forin, forComments, ast.FOR)
+ }
+ return forin
+ }
+
+ if self.mode&StoreComments != 0 {
+ self.comments.Unset()
+ }
+ self.expect(token.SEMICOLON)
+ initializer := &ast.SequenceExpression{Sequence: left}
+ forstatement := self.parseFor(initializer)
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(forstatement, comments, ast.LEADING)
+ self.comments.CommentMap.AddComments(forstatement, forComments, ast.FOR)
+ }
+ return forstatement
+}
+
+func (self *_parser) parseVariableStatement() *ast.VariableStatement {
+ var comments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ comments = self.comments.FetchAll()
+ }
+ idx := self.expect(token.VAR)
+
+ list := self.parseVariableDeclarationList(idx)
+
+ statement := &ast.VariableStatement{
+ Var: idx,
+ List: list,
+ }
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(statement, comments, ast.LEADING)
+ self.comments.Unset()
+ }
+ self.semicolon()
+
+ return statement
+}
+
+func (self *_parser) parseDoWhileStatement() ast.Statement {
+ inIteration := self.scope.inIteration
+ self.scope.inIteration = true
+ defer func() {
+ self.scope.inIteration = inIteration
+ }()
+
+ var comments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ comments = self.comments.FetchAll()
+ }
+ self.expect(token.DO)
+ var doComments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ doComments = self.comments.FetchAll()
+ }
+
+ node := &ast.DoWhileStatement{}
+ if self.token == token.LEFT_BRACE {
+ node.Body = self.parseBlockStatement()
+ } else {
+ node.Body = self.parseStatement()
+ }
+
+ self.expect(token.WHILE)
+ var whileComments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ whileComments = self.comments.FetchAll()
+ }
+ self.expect(token.LEFT_PARENTHESIS)
+ node.Test = self.parseExpression()
+ self.expect(token.RIGHT_PARENTHESIS)
+
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(node, comments, ast.LEADING)
+ self.comments.CommentMap.AddComments(node, doComments, ast.DO)
+ self.comments.CommentMap.AddComments(node, whileComments, ast.WHILE)
+ }
+
+ return node
+}
+
+func (self *_parser) parseWhileStatement() ast.Statement {
+ var comments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ comments = self.comments.FetchAll()
+ }
+ self.expect(token.WHILE)
+
+ var whileComments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ whileComments = self.comments.FetchAll()
+ }
+
+ self.expect(token.LEFT_PARENTHESIS)
+ node := &ast.WhileStatement{
+ Test: self.parseExpression(),
+ }
+ self.expect(token.RIGHT_PARENTHESIS)
+ node.Body = self.parseIterationStatement()
+
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(node, comments, ast.LEADING)
+ self.comments.CommentMap.AddComments(node, whileComments, ast.WHILE)
+ }
+
+ return node
+}
+
+func (self *_parser) parseIfStatement() ast.Statement {
+ var comments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ comments = self.comments.FetchAll()
+ }
+ self.expect(token.IF)
+ var ifComments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ ifComments = self.comments.FetchAll()
+ }
+
+ self.expect(token.LEFT_PARENTHESIS)
+ node := &ast.IfStatement{
+ Test: self.parseExpression(),
+ }
+ self.expect(token.RIGHT_PARENTHESIS)
+ if self.token == token.LEFT_BRACE {
+ node.Consequent = self.parseBlockStatement()
+ } else {
+ node.Consequent = self.parseStatement()
+ }
+
+ if self.token == token.ELSE {
+ self.next()
+ node.Alternate = self.parseStatement()
+ }
+
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(node, comments, ast.LEADING)
+ self.comments.CommentMap.AddComments(node, ifComments, ast.IF)
+ }
+
+ return node
+}
+
+func (self *_parser) parseSourceElement() ast.Statement {
+ statement := self.parseStatement()
+ //self.comments.Unset()
+ return statement
+}
+
+func (self *_parser) parseSourceElements() []ast.Statement {
+ body := []ast.Statement(nil)
+
+ for {
+ if self.token != token.STRING {
+ break
+ }
+ body = append(body, self.parseSourceElement())
+ }
+
+ for self.token != token.EOF {
+ body = append(body, self.parseSourceElement())
+ }
+
+ return body
+}
+
+func (self *_parser) parseProgram() *ast.Program {
+ self.openScope()
+ defer self.closeScope()
+ return &ast.Program{
+ Body: self.parseSourceElements(),
+ DeclarationList: self.scope.declarationList,
+ File: self.file,
+ }
+}
+
+func (self *_parser) parseBreakStatement() ast.Statement {
+ var comments []*ast.Comment
+ if self.mode&StoreComments != 0 {
+ comments = self.comments.FetchAll()
+ }
+ idx := self.expect(token.BREAK)
+ semicolon := self.implicitSemicolon
+ if self.token == token.SEMICOLON {
+ semicolon = true
+ self.next()
+ }
+
+ if semicolon || self.token == token.RIGHT_BRACE {
+ self.implicitSemicolon = false
+ if !self.scope.inIteration && !self.scope.inSwitch {
+ goto illegal
+ }
+ breakStatement := &ast.BranchStatement{
+ Idx: idx,
+ Token: token.BREAK,
+ }
+
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(breakStatement, comments, ast.LEADING)
+ self.comments.CommentMap.AddComments(breakStatement, self.comments.FetchAll(), ast.TRAILING)
+ }
+
+ return breakStatement
+ }
+
+ if self.token == token.IDENTIFIER {
+ identifier := self.parseIdentifier()
+ if !self.scope.hasLabel(identifier.Name) {
+ self.error(idx, "Undefined label '%s'", identifier.Name)
+ return &ast.BadStatement{From: idx, To: identifier.Idx1()}
+ }
+ self.semicolon()
+ breakStatement := &ast.BranchStatement{
+ Idx: idx,
+ Token: token.BREAK,
+ Label: identifier,
+ }
+ if self.mode&StoreComments != 0 {
+ self.comments.CommentMap.AddComments(breakStatement, comments, ast.LEADING)
+ }
+
+ return breakStatement
+ }
+
+ self.expect(token.IDENTIFIER)
+
+illegal:
+ self.error(idx, "Illegal break statement")
+ self.nextStatement()
+ return &ast.BadStatement{From: idx, To: self.idx}
+}
+
+func (self *_parser) parseContinueStatement() ast.Statement {
+ idx := self.expect(token.CONTINUE)
+ semicolon := self.implicitSemicolon
+ if self.token == token.SEMICOLON {
+ semicolon = true
+ self.next()
+ }
+
+ if semicolon || self.token == token.RIGHT_BRACE {
+ self.implicitSemicolon = false
+ if !self.scope.inIteration {
+ goto illegal
+ }
+ return &ast.BranchStatement{
+ Idx: idx,
+ Token: token.CONTINUE,
+ }
+ }
+
+ if self.token == token.IDENTIFIER {
+ identifier := self.parseIdentifier()
+ if !self.scope.hasLabel(identifier.Name) {
+ self.error(idx, "Undefined label '%s'", identifier.Name)
+ return &ast.BadStatement{From: idx, To: identifier.Idx1()}
+ }
+ if !self.scope.inIteration {
+ goto illegal
+ }
+ self.semicolon()
+ return &ast.BranchStatement{
+ Idx: idx,
+ Token: token.CONTINUE,
+ Label: identifier,
+ }
+ }
+
+ self.expect(token.IDENTIFIER)
+
+illegal:
+ self.error(idx, "Illegal continue statement")
+ self.nextStatement()
+ return &ast.BadStatement{From: idx, To: self.idx}
+}
+
+// Find the next statement after an error (recover)
+func (self *_parser) nextStatement() {
+ for {
+ switch self.token {
+ case token.BREAK, token.CONTINUE,
+ token.FOR, token.IF, token.RETURN, token.SWITCH,
+ token.VAR, token.DO, token.TRY, token.WITH,
+ token.WHILE, token.THROW, token.CATCH, token.FINALLY:
+ // Return only if parser made some progress since last
+ // sync or if it has not reached 10 next calls without
+ // progress. Otherwise consume at least one token to
+ // avoid an endless parser loop
+ if self.idx == self.recover.idx && self.recover.count < 10 {
+ self.recover.count++
+ return
+ }
+ if self.idx > self.recover.idx {
+ self.recover.idx = self.idx
+ self.recover.count = 0
+ return
+ }
+ // Reaching here indicates a parser bug, likely an
+ // incorrect token list in this function, but it only
+ // leads to skipping of possibly correct code if a
+ // previous error is present, and thus is preferred
+ // over a non-terminating parse.
+ case token.EOF:
+ return
+ }
+ self.next()
+ }
+}
diff --git a/vendor/github.com/robertkrimen/otto/property.go b/vendor/github.com/robertkrimen/otto/property.go
new file mode 100644
index 000000000..5445eccde
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/property.go
@@ -0,0 +1,220 @@
+package otto
+
+// property
+
+type _propertyMode int
+
+const (
+ modeWriteMask _propertyMode = 0700
+ modeEnumerateMask = 0070
+ modeConfigureMask = 0007
+ modeOnMask = 0111
+ modeOffMask = 0000
+ modeSetMask = 0222 // If value is 2, then mode is neither "On" nor "Off"
+)
+
+type _propertyGetSet [2]*_object
+
+var _nilGetSetObject _object = _object{}
+
+type _property struct {
+ value interface{}
+ mode _propertyMode
+}
+
+func (self _property) writable() bool {
+ return self.mode&modeWriteMask == modeWriteMask&modeOnMask
+}
+
+func (self *_property) writeOn() {
+ self.mode = (self.mode & ^modeWriteMask) | (modeWriteMask & modeOnMask)
+}
+
+func (self *_property) writeOff() {
+ self.mode &= ^modeWriteMask
+}
+
+func (self *_property) writeClear() {
+ self.mode = (self.mode & ^modeWriteMask) | (modeWriteMask & modeSetMask)
+}
+
+func (self _property) writeSet() bool {
+ return 0 == self.mode&modeWriteMask&modeSetMask
+}
+
+func (self _property) enumerable() bool {
+ return self.mode&modeEnumerateMask == modeEnumerateMask&modeOnMask
+}
+
+func (self *_property) enumerateOn() {
+ self.mode = (self.mode & ^modeEnumerateMask) | (modeEnumerateMask & modeOnMask)
+}
+
+func (self *_property) enumerateOff() {
+ self.mode &= ^modeEnumerateMask
+}
+
+func (self _property) enumerateSet() bool {
+ return 0 == self.mode&modeEnumerateMask&modeSetMask
+}
+
+func (self _property) configurable() bool {
+ return self.mode&modeConfigureMask == modeConfigureMask&modeOnMask
+}
+
+func (self *_property) configureOn() {
+ self.mode = (self.mode & ^modeConfigureMask) | (modeConfigureMask & modeOnMask)
+}
+
+func (self *_property) configureOff() {
+ self.mode &= ^modeConfigureMask
+}
+
+func (self _property) configureSet() bool {
+ return 0 == self.mode&modeConfigureMask&modeSetMask
+}
+
+func (self _property) copy() *_property {
+ property := self
+ return &property
+}
+
+func (self _property) get(this *_object) Value {
+ switch value := self.value.(type) {
+ case Value:
+ return value
+ case _propertyGetSet:
+ if value[0] != nil {
+ return value[0].call(toValue(this), nil, false, nativeFrame)
+ }
+ }
+ return Value{}
+}
+
+func (self _property) isAccessorDescriptor() bool {
+ setGet, test := self.value.(_propertyGetSet)
+ return test && (setGet[0] != nil || setGet[1] != nil)
+}
+
+func (self _property) isDataDescriptor() bool {
+ if self.writeSet() { // Either "On" or "Off"
+ return true
+ }
+ value, valid := self.value.(Value)
+ return valid && !value.isEmpty()
+}
+
+func (self _property) isGenericDescriptor() bool {
+ return !(self.isDataDescriptor() || self.isAccessorDescriptor())
+}
+
+func (self _property) isEmpty() bool {
+ return self.mode == 0222 && self.isGenericDescriptor()
+}
+
+// _enumerableValue, _enumerableTrue, _enumerableFalse?
+// .enumerableValue() .enumerableExists()
+
+func toPropertyDescriptor(rt *_runtime, value Value) (descriptor _property) {
+ objectDescriptor := value._object()
+ if objectDescriptor == nil {
+ panic(rt.panicTypeError())
+ }
+
+ {
+ descriptor.mode = modeSetMask // Initially nothing is set
+ if objectDescriptor.hasProperty("enumerable") {
+ if objectDescriptor.get("enumerable").bool() {
+ descriptor.enumerateOn()
+ } else {
+ descriptor.enumerateOff()
+ }
+ }
+
+ if objectDescriptor.hasProperty("configurable") {
+ if objectDescriptor.get("configurable").bool() {
+ descriptor.configureOn()
+ } else {
+ descriptor.configureOff()
+ }
+ }
+
+ if objectDescriptor.hasProperty("writable") {
+ if objectDescriptor.get("writable").bool() {
+ descriptor.writeOn()
+ } else {
+ descriptor.writeOff()
+ }
+ }
+ }
+
+ var getter, setter *_object
+ getterSetter := false
+
+ if objectDescriptor.hasProperty("get") {
+ value := objectDescriptor.get("get")
+ if value.IsDefined() {
+ if !value.isCallable() {
+ panic(rt.panicTypeError())
+ }
+ getter = value._object()
+ getterSetter = true
+ } else {
+ getter = &_nilGetSetObject
+ getterSetter = true
+ }
+ }
+
+ if objectDescriptor.hasProperty("set") {
+ value := objectDescriptor.get("set")
+ if value.IsDefined() {
+ if !value.isCallable() {
+ panic(rt.panicTypeError())
+ }
+ setter = value._object()
+ getterSetter = true
+ } else {
+ setter = &_nilGetSetObject
+ getterSetter = true
+ }
+ }
+
+ if getterSetter {
+ if descriptor.writeSet() {
+ panic(rt.panicTypeError())
+ }
+ descriptor.value = _propertyGetSet{getter, setter}
+ }
+
+ if objectDescriptor.hasProperty("value") {
+ if getterSetter {
+ panic(rt.panicTypeError())
+ }
+ descriptor.value = objectDescriptor.get("value")
+ }
+
+ return
+}
+
+func (self *_runtime) fromPropertyDescriptor(descriptor _property) *_object {
+ object := self.newObject()
+ if descriptor.isDataDescriptor() {
+ object.defineProperty("value", descriptor.value.(Value), 0111, false)
+ object.defineProperty("writable", toValue_bool(descriptor.writable()), 0111, false)
+ } else if descriptor.isAccessorDescriptor() {
+ getSet := descriptor.value.(_propertyGetSet)
+ get := Value{}
+ if getSet[0] != nil {
+ get = toValue_object(getSet[0])
+ }
+ set := Value{}
+ if getSet[1] != nil {
+ set = toValue_object(getSet[1])
+ }
+ object.defineProperty("get", get, 0111, false)
+ object.defineProperty("set", set, 0111, false)
+ }
+ object.defineProperty("enumerable", toValue_bool(descriptor.enumerable()), 0111, false)
+ object.defineProperty("configurable", toValue_bool(descriptor.configurable()), 0111, false)
+ return object
+}
diff --git a/vendor/github.com/robertkrimen/otto/registry/README.markdown b/vendor/github.com/robertkrimen/otto/registry/README.markdown
new file mode 100644
index 000000000..ba2d38909
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/registry/README.markdown
@@ -0,0 +1,51 @@
+# registry
+--
+ import "github.com/robertkrimen/otto/registry"
+
+Package registry is an expirmental package to facillitate altering the otto
+runtime via import.
+
+This interface can change at any time.
+
+## Usage
+
+#### func Apply
+
+```go
+func Apply(callback func(Entry))
+```
+
+#### type Entry
+
+```go
+type Entry struct {
+}
+```
+
+
+#### func Register
+
+```go
+func Register(source func() string) *Entry
+```
+
+#### func (*Entry) Disable
+
+```go
+func (self *Entry) Disable()
+```
+
+#### func (*Entry) Enable
+
+```go
+func (self *Entry) Enable()
+```
+
+#### func (Entry) Source
+
+```go
+func (self Entry) Source() string
+```
+
+--
+**godocdown** http://github.com/robertkrimen/godocdown
diff --git a/vendor/github.com/robertkrimen/otto/registry/registry.go b/vendor/github.com/robertkrimen/otto/registry/registry.go
new file mode 100644
index 000000000..966638ac4
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/registry/registry.go
@@ -0,0 +1,47 @@
+/*
+Package registry is an expirmental package to facillitate altering the otto runtime via import.
+
+This interface can change at any time.
+*/
+package registry
+
+var registry []*Entry = make([]*Entry, 0)
+
+type Entry struct {
+ active bool
+ source func() string
+}
+
+func newEntry(source func() string) *Entry {
+ return &Entry{
+ active: true,
+ source: source,
+ }
+}
+
+func (self *Entry) Enable() {
+ self.active = true
+}
+
+func (self *Entry) Disable() {
+ self.active = false
+}
+
+func (self Entry) Source() string {
+ return self.source()
+}
+
+func Apply(callback func(Entry)) {
+ for _, entry := range registry {
+ if !entry.active {
+ continue
+ }
+ callback(*entry)
+ }
+}
+
+func Register(source func() string) *Entry {
+ entry := newEntry(source)
+ registry = append(registry, entry)
+ return entry
+}
diff --git a/vendor/github.com/robertkrimen/otto/result.go b/vendor/github.com/robertkrimen/otto/result.go
new file mode 100644
index 000000000..63642e7d0
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/result.go
@@ -0,0 +1,30 @@
+package otto
+
+import ()
+
+type _resultKind int
+
+const (
+ resultNormal _resultKind = iota
+ resultReturn
+ resultBreak
+ resultContinue
+)
+
+type _result struct {
+ kind _resultKind
+ value Value
+ target string
+}
+
+func newReturnResult(value Value) _result {
+ return _result{resultReturn, value, ""}
+}
+
+func newContinueResult(target string) _result {
+ return _result{resultContinue, emptyValue, target}
+}
+
+func newBreakResult(target string) _result {
+ return _result{resultBreak, emptyValue, target}
+}
diff --git a/vendor/github.com/robertkrimen/otto/runtime.go b/vendor/github.com/robertkrimen/otto/runtime.go
new file mode 100644
index 000000000..5762bd0c3
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/runtime.go
@@ -0,0 +1,711 @@
+package otto
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "path"
+ "reflect"
+ "runtime"
+ "strconv"
+ "sync"
+
+ "github.com/robertkrimen/otto/ast"
+ "github.com/robertkrimen/otto/parser"
+)
+
+type _global struct {
+ Object *_object // Object( ... ), new Object( ... ) - 1 (length)
+ Function *_object // Function( ... ), new Function( ... ) - 1
+ Array *_object // Array( ... ), new Array( ... ) - 1
+ String *_object // String( ... ), new String( ... ) - 1
+ Boolean *_object // Boolean( ... ), new Boolean( ... ) - 1
+ Number *_object // Number( ... ), new Number( ... ) - 1
+ Math *_object
+ Date *_object // Date( ... ), new Date( ... ) - 7
+ RegExp *_object // RegExp( ... ), new RegExp( ... ) - 2
+ Error *_object // Error( ... ), new Error( ... ) - 1
+ EvalError *_object
+ TypeError *_object
+ RangeError *_object
+ ReferenceError *_object
+ SyntaxError *_object
+ URIError *_object
+ JSON *_object
+
+ ObjectPrototype *_object // Object.prototype
+ FunctionPrototype *_object // Function.prototype
+ ArrayPrototype *_object // Array.prototype
+ StringPrototype *_object // String.prototype
+ BooleanPrototype *_object // Boolean.prototype
+ NumberPrototype *_object // Number.prototype
+ DatePrototype *_object // Date.prototype
+ RegExpPrototype *_object // RegExp.prototype
+ ErrorPrototype *_object // Error.prototype
+ EvalErrorPrototype *_object
+ TypeErrorPrototype *_object
+ RangeErrorPrototype *_object
+ ReferenceErrorPrototype *_object
+ SyntaxErrorPrototype *_object
+ URIErrorPrototype *_object
+}
+
+type _runtime struct {
+ global _global
+ globalObject *_object
+ globalStash *_objectStash
+ scope *_scope
+ otto *Otto
+ eval *_object // The builtin eval, for determine indirect versus direct invocation
+ debugger func(*Otto)
+ random func() float64
+ stackLimit int
+ traceLimit int
+
+ labels []string // FIXME
+ lck sync.Mutex
+}
+
+func (self *_runtime) enterScope(scope *_scope) {
+ scope.outer = self.scope
+ if self.scope != nil {
+ if self.stackLimit != 0 && self.scope.depth+1 >= self.stackLimit {
+ panic(self.panicRangeError("Maximum call stack size exceeded"))
+ }
+
+ scope.depth = self.scope.depth + 1
+ }
+
+ self.scope = scope
+}
+
+func (self *_runtime) leaveScope() {
+ self.scope = self.scope.outer
+}
+
+// FIXME This is used in two places (cloning)
+func (self *_runtime) enterGlobalScope() {
+ self.enterScope(newScope(self.globalStash, self.globalStash, self.globalObject))
+}
+
+func (self *_runtime) enterFunctionScope(outer _stash, this Value) *_fnStash {
+ if outer == nil {
+ outer = self.globalStash
+ }
+ stash := self.newFunctionStash(outer)
+ var thisObject *_object
+ switch this.kind {
+ case valueUndefined, valueNull:
+ thisObject = self.globalObject
+ default:
+ thisObject = self.toObject(this)
+ }
+ self.enterScope(newScope(stash, stash, thisObject))
+ return stash
+}
+
+func (self *_runtime) putValue(reference _reference, value Value) {
+ name := reference.putValue(value)
+ if name != "" {
+ // Why? -- If reference.base == nil
+ // strict = false
+ self.globalObject.defineProperty(name, value, 0111, false)
+ }
+}
+
+func (self *_runtime) tryCatchEvaluate(inner func() Value) (tryValue Value, exception bool) {
+ // resultValue = The value of the block (e.g. the last statement)
+ // throw = Something was thrown
+ // throwValue = The value of what was thrown
+ // other = Something that changes flow (return, break, continue) that is not a throw
+ // Otherwise, some sort of unknown panic happened, we'll just propagate it
+ defer func() {
+ if caught := recover(); caught != nil {
+ if exception, ok := caught.(*_exception); ok {
+ caught = exception.eject()
+ }
+ switch caught := caught.(type) {
+ case _error:
+ exception = true
+ tryValue = toValue_object(self.newError(caught.name, caught.messageValue(), 0))
+ case Value:
+ exception = true
+ tryValue = caught
+ default:
+ panic(caught)
+ }
+ }
+ }()
+
+ tryValue = inner()
+ return
+}
+
+// toObject
+
+func (self *_runtime) toObject(value Value) *_object {
+ switch value.kind {
+ case valueEmpty, valueUndefined, valueNull:
+ panic(self.panicTypeError())
+ case valueBoolean:
+ return self.newBoolean(value)
+ case valueString:
+ return self.newString(value)
+ case valueNumber:
+ return self.newNumber(value)
+ case valueObject:
+ return value._object()
+ }
+ panic(self.panicTypeError())
+}
+
+func (self *_runtime) objectCoerce(value Value) (*_object, error) {
+ switch value.kind {
+ case valueUndefined:
+ return nil, errors.New("undefined")
+ case valueNull:
+ return nil, errors.New("null")
+ case valueBoolean:
+ return self.newBoolean(value), nil
+ case valueString:
+ return self.newString(value), nil
+ case valueNumber:
+ return self.newNumber(value), nil
+ case valueObject:
+ return value._object(), nil
+ }
+ panic(self.panicTypeError())
+}
+
+func checkObjectCoercible(rt *_runtime, value Value) {
+ isObject, mustCoerce := testObjectCoercible(value)
+ if !isObject && !mustCoerce {
+ panic(rt.panicTypeError())
+ }
+}
+
+// testObjectCoercible
+
+func testObjectCoercible(value Value) (isObject bool, mustCoerce bool) {
+ switch value.kind {
+ case valueReference, valueEmpty, valueNull, valueUndefined:
+ return false, false
+ case valueNumber, valueString, valueBoolean:
+ return false, true
+ case valueObject:
+ return true, false
+ default:
+ panic("this should never happen")
+ }
+}
+
+func (self *_runtime) safeToValue(value interface{}) (Value, error) {
+ result := Value{}
+ err := catchPanic(func() {
+ result = self.toValue(value)
+ })
+ return result, err
+}
+
+// convertNumeric converts numeric parameter val from js to that of type t if it is safe to do so, otherwise it panics.
+// This allows literals (int64), bitwise values (int32) and the general form (float64) of javascript numerics to be passed as parameters to go functions easily.
+func (self *_runtime) convertNumeric(v Value, t reflect.Type) reflect.Value {
+ val := reflect.ValueOf(v.export())
+
+ if val.Kind() == t.Kind() {
+ return val
+ }
+
+ if val.Kind() == reflect.Interface {
+ val = reflect.ValueOf(val.Interface())
+ }
+
+ switch val.Kind() {
+ case reflect.Float32, reflect.Float64:
+ f64 := val.Float()
+ switch t.Kind() {
+ case reflect.Float64:
+ return reflect.ValueOf(f64)
+ case reflect.Float32:
+ if reflect.Zero(t).OverflowFloat(f64) {
+ panic(self.panicRangeError("converting float64 to float32 would overflow"))
+ }
+
+ return val.Convert(t)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ i64 := int64(f64)
+ if float64(i64) != f64 {
+ panic(self.panicRangeError(fmt.Sprintf("converting %v to %v would cause loss of precision", val.Type(), t)))
+ }
+
+ // The float represents an integer
+ val = reflect.ValueOf(i64)
+ default:
+ panic(self.panicTypeError(fmt.Sprintf("cannot convert %v to %v", val.Type(), t)))
+ }
+ }
+
+ switch val.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ i64 := val.Int()
+ switch t.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if reflect.Zero(t).OverflowInt(i64) {
+ panic(self.panicRangeError(fmt.Sprintf("converting %v to %v would overflow", val.Type(), t)))
+ }
+ return val.Convert(t)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if i64 < 0 {
+ panic(self.panicRangeError(fmt.Sprintf("converting %v to %v would underflow", val.Type(), t)))
+ }
+ if reflect.Zero(t).OverflowUint(uint64(i64)) {
+ panic(self.panicRangeError(fmt.Sprintf("converting %v to %v would overflow", val.Type(), t)))
+ }
+ return val.Convert(t)
+ }
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ u64 := val.Uint()
+ switch t.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if u64 > math.MaxInt64 || reflect.Zero(t).OverflowInt(int64(u64)) {
+ panic(self.panicRangeError(fmt.Sprintf("converting %v to %v would overflow", val.Type(), t)))
+ }
+ return val.Convert(t)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if reflect.Zero(t).OverflowUint(u64) {
+ panic(self.panicRangeError(fmt.Sprintf("converting %v to %v would overflow", val.Type(), t)))
+ }
+ return val.Convert(t)
+ }
+ }
+
+ panic(self.panicTypeError(fmt.Sprintf("unsupported type %v for numeric conversion", val.Type())))
+}
+
+var typeOfValue = reflect.TypeOf(Value{})
+
+// convertCallParameter converts request val to type t if possible.
+// If the conversion fails due to overflow or type miss-match then it panics.
+// If no conversion is known then the original value is returned.
+func (self *_runtime) convertCallParameter(v Value, t reflect.Type) reflect.Value {
+ if t == typeOfValue {
+ return reflect.ValueOf(v)
+ }
+
+ if v.kind == valueObject {
+ if gso, ok := v._object().value.(*_goStructObject); ok {
+ if gso.value.Type().AssignableTo(t) {
+ return gso.value
+ }
+ }
+ }
+
+ if t.Kind() == reflect.Interface {
+ iv := reflect.ValueOf(v.export())
+ if iv.Type().AssignableTo(t) {
+ return iv
+ }
+ }
+
+ tk := t.Kind()
+
+ if tk == reflect.Ptr {
+ switch v.kind {
+ case valueEmpty, valueNull, valueUndefined:
+ return reflect.Zero(t)
+ default:
+ var vv reflect.Value
+ if err := catchPanic(func() { vv = self.convertCallParameter(v, t.Elem()) }); err == nil {
+ if vv.CanAddr() {
+ return vv.Addr()
+ }
+
+ pv := reflect.New(vv.Type())
+ pv.Elem().Set(vv)
+ return pv
+ }
+ }
+ }
+
+ switch tk {
+ case reflect.Bool:
+ return reflect.ValueOf(v.bool())
+ case reflect.String:
+ switch v.kind {
+ case valueString:
+ return reflect.ValueOf(v.value)
+ case valueNumber:
+ return reflect.ValueOf(fmt.Sprintf("%v", v.value))
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64:
+ switch v.kind {
+ case valueNumber:
+ return self.convertNumeric(v, t)
+ }
+ case reflect.Slice:
+ if o := v._object(); o != nil {
+ if lv := o.get("length"); lv.IsNumber() {
+ l := lv.number().int64
+
+ s := reflect.MakeSlice(t, int(l), int(l))
+
+ tt := t.Elem()
+
+ if o.class == "Array" {
+ for i := int64(0); i < l; i++ {
+ p, ok := o.property[strconv.FormatInt(i, 10)]
+ if !ok {
+ continue
+ }
+
+ e, ok := p.value.(Value)
+ if !ok {
+ continue
+ }
+
+ ev := self.convertCallParameter(e, tt)
+
+ s.Index(int(i)).Set(ev)
+ }
+ } else if o.class == "GoArray" {
+
+ var gslice bool
+ switch o.value.(type) {
+ case *_goSliceObject:
+ gslice = true
+ case *_goArrayObject:
+ gslice = false
+ }
+
+ for i := int64(0); i < l; i++ {
+ var p *_property
+ if gslice {
+ p = goSliceGetOwnProperty(o, strconv.FormatInt(i, 10))
+ } else {
+ p = goArrayGetOwnProperty(o, strconv.FormatInt(i, 10))
+ }
+ if p == nil {
+ continue
+ }
+
+ e, ok := p.value.(Value)
+ if !ok {
+ continue
+ }
+
+ ev := self.convertCallParameter(e, tt)
+
+ s.Index(int(i)).Set(ev)
+ }
+ }
+
+ return s
+ }
+ }
+ case reflect.Map:
+ if o := v._object(); o != nil && t.Key().Kind() == reflect.String {
+ m := reflect.MakeMap(t)
+
+ o.enumerate(false, func(k string) bool {
+ m.SetMapIndex(reflect.ValueOf(k), self.convertCallParameter(o.get(k), t.Elem()))
+ return true
+ })
+
+ return m
+ }
+ case reflect.Func:
+ if t.NumOut() > 1 {
+ panic(self.panicTypeError("converting JavaScript values to Go functions with more than one return value is currently not supported"))
+ }
+
+ if o := v._object(); o != nil && o.class == "Function" {
+ return reflect.MakeFunc(t, func(args []reflect.Value) []reflect.Value {
+ l := make([]interface{}, len(args))
+ for i, a := range args {
+ if a.CanInterface() {
+ l[i] = a.Interface()
+ }
+ }
+
+ rv, err := v.Call(nullValue, l...)
+ if err != nil {
+ panic(err)
+ }
+
+ if t.NumOut() == 0 {
+ return nil
+ }
+
+ return []reflect.Value{self.convertCallParameter(rv, t.Out(0))}
+ })
+ }
+ }
+
+ if tk == reflect.String {
+ if o := v._object(); o != nil && o.hasProperty("toString") {
+ if fn := o.get("toString"); fn.IsFunction() {
+ sv, err := fn.Call(v)
+ if err != nil {
+ panic(err)
+ }
+
+ var r reflect.Value
+ if err := catchPanic(func() { r = self.convertCallParameter(sv, t) }); err == nil {
+ return r
+ }
+ }
+ }
+
+ return reflect.ValueOf(v.String())
+ }
+
+ s := "OTTO DOES NOT UNDERSTAND THIS TYPE"
+ switch v.kind {
+ case valueBoolean:
+ s = "boolean"
+ case valueNull:
+ s = "null"
+ case valueNumber:
+ s = "number"
+ case valueString:
+ s = "string"
+ case valueUndefined:
+ s = "undefined"
+ case valueObject:
+ s = v.Class()
+ }
+
+ panic(self.panicTypeError("can't convert from %q to %q", s, t.String()))
+}
+
+func (self *_runtime) toValue(value interface{}) Value {
+ switch value := value.(type) {
+ case Value:
+ return value
+ case func(FunctionCall) Value:
+ var name, file string
+ var line int
+ pc := reflect.ValueOf(value).Pointer()
+ fn := runtime.FuncForPC(pc)
+ if fn != nil {
+ name = fn.Name()
+ file, line = fn.FileLine(pc)
+ file = path.Base(file)
+ }
+ return toValue_object(self.newNativeFunction(name, file, line, value))
+ case _nativeFunction:
+ var name, file string
+ var line int
+ pc := reflect.ValueOf(value).Pointer()
+ fn := runtime.FuncForPC(pc)
+ if fn != nil {
+ name = fn.Name()
+ file, line = fn.FileLine(pc)
+ file = path.Base(file)
+ }
+ return toValue_object(self.newNativeFunction(name, file, line, value))
+ case Object, *Object, _object, *_object:
+ // Nothing happens.
+ // FIXME We should really figure out what can come here.
+ // This catch-all is ugly.
+ default:
+ {
+ value := reflect.ValueOf(value)
+
+ switch value.Kind() {
+ case reflect.Ptr:
+ switch reflect.Indirect(value).Kind() {
+ case reflect.Struct:
+ return toValue_object(self.newGoStructObject(value))
+ case reflect.Array:
+ return toValue_object(self.newGoArray(value))
+ }
+ case reflect.Struct:
+ return toValue_object(self.newGoStructObject(value))
+ case reflect.Map:
+ return toValue_object(self.newGoMapObject(value))
+ case reflect.Slice:
+ return toValue_object(self.newGoSlice(value))
+ case reflect.Array:
+ return toValue_object(self.newGoArray(value))
+ case reflect.Func:
+ var name, file string
+ var line int
+ if v := reflect.ValueOf(value); v.Kind() == reflect.Ptr {
+ pc := v.Pointer()
+ fn := runtime.FuncForPC(pc)
+ if fn != nil {
+ name = fn.Name()
+ file, line = fn.FileLine(pc)
+ file = path.Base(file)
+ }
+ }
+
+ typ := value.Type()
+
+ return toValue_object(self.newNativeFunction(name, file, line, func(c FunctionCall) Value {
+ nargs := typ.NumIn()
+
+ if len(c.ArgumentList) != nargs {
+ if typ.IsVariadic() {
+ if len(c.ArgumentList) < nargs-1 {
+ panic(self.panicRangeError(fmt.Sprintf("expected at least %d arguments; got %d", nargs-1, len(c.ArgumentList))))
+ }
+ } else {
+ panic(self.panicRangeError(fmt.Sprintf("expected %d argument(s); got %d", nargs, len(c.ArgumentList))))
+ }
+ }
+
+ in := make([]reflect.Value, len(c.ArgumentList))
+
+ callSlice := false
+
+ for i, a := range c.ArgumentList {
+ var t reflect.Type
+
+ n := i
+ if n >= nargs-1 && typ.IsVariadic() {
+ if n > nargs-1 {
+ n = nargs - 1
+ }
+
+ t = typ.In(n).Elem()
+ } else {
+ t = typ.In(n)
+ }
+
+ // if this is a variadic Go function, and the caller has supplied
+ // exactly the number of JavaScript arguments required, and this
+ // is the last JavaScript argument, try treating the it as the
+ // actual set of variadic Go arguments. if that succeeds, break
+ // out of the loop.
+ if typ.IsVariadic() && len(c.ArgumentList) == nargs && i == nargs-1 {
+ var v reflect.Value
+ if err := catchPanic(func() { v = self.convertCallParameter(a, typ.In(n)) }); err == nil {
+ in[i] = v
+ callSlice = true
+ break
+ }
+ }
+
+ in[i] = self.convertCallParameter(a, t)
+ }
+
+ var out []reflect.Value
+ if callSlice {
+ out = value.CallSlice(in)
+ } else {
+ out = value.Call(in)
+ }
+
+ switch len(out) {
+ case 0:
+ return Value{}
+ case 1:
+ return self.toValue(out[0].Interface())
+ default:
+ s := make([]interface{}, len(out))
+ for i, v := range out {
+ s[i] = self.toValue(v.Interface())
+ }
+
+ return self.toValue(s)
+ }
+ }))
+ }
+ }
+ }
+
+ return toValue(value)
+}
+
+func (runtime *_runtime) newGoSlice(value reflect.Value) *_object {
+ self := runtime.newGoSliceObject(value)
+ self.prototype = runtime.global.ArrayPrototype
+ return self
+}
+
+func (runtime *_runtime) newGoArray(value reflect.Value) *_object {
+ self := runtime.newGoArrayObject(value)
+ self.prototype = runtime.global.ArrayPrototype
+ return self
+}
+
+func (runtime *_runtime) parse(filename string, src, sm interface{}) (*ast.Program, error) {
+ return parser.ParseFileWithSourceMap(nil, filename, src, sm, 0)
+}
+
+func (runtime *_runtime) cmpl_parse(filename string, src, sm interface{}) (*_nodeProgram, error) {
+ program, err := parser.ParseFileWithSourceMap(nil, filename, src, sm, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ return cmpl_parse(program), nil
+}
+
+func (self *_runtime) parseSource(src, sm interface{}) (*_nodeProgram, *ast.Program, error) {
+ switch src := src.(type) {
+ case *ast.Program:
+ return nil, src, nil
+ case *Script:
+ return src.program, nil, nil
+ }
+
+ program, err := self.parse("", src, sm)
+
+ return nil, program, err
+}
+
+func (self *_runtime) cmpl_runOrEval(src, sm interface{}, eval bool) (Value, error) {
+ result := Value{}
+ cmpl_program, program, err := self.parseSource(src, sm)
+ if err != nil {
+ return result, err
+ }
+ if cmpl_program == nil {
+ cmpl_program = cmpl_parse(program)
+ }
+ err = catchPanic(func() {
+ result = self.cmpl_evaluate_nodeProgram(cmpl_program, eval)
+ })
+ switch result.kind {
+ case valueEmpty:
+ result = Value{}
+ case valueReference:
+ result = result.resolve()
+ }
+ return result, err
+}
+
+func (self *_runtime) cmpl_run(src, sm interface{}) (Value, error) {
+ return self.cmpl_runOrEval(src, sm, false)
+}
+
+func (self *_runtime) cmpl_eval(src, sm interface{}) (Value, error) {
+ return self.cmpl_runOrEval(src, sm, true)
+}
+
+func (self *_runtime) parseThrow(err error) {
+ if err == nil {
+ return
+ }
+ switch err := err.(type) {
+ case parser.ErrorList:
+ {
+ err := err[0]
+ if err.Message == "Invalid left-hand side in assignment" {
+ panic(self.panicReferenceError(err.Message))
+ }
+ panic(self.panicSyntaxError(err.Message))
+ }
+ }
+ panic(self.panicSyntaxError(err.Error()))
+}
+
+func (self *_runtime) cmpl_parseOrThrow(src, sm interface{}) *_nodeProgram {
+ program, err := self.cmpl_parse("", src, sm)
+ self.parseThrow(err) // Will panic/throw appropriately
+ return program
+}
diff --git a/vendor/github.com/robertkrimen/otto/scope.go b/vendor/github.com/robertkrimen/otto/scope.go
new file mode 100644
index 000000000..465e6b98c
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/scope.go
@@ -0,0 +1,35 @@
+package otto
+
+// _scope:
+// entryFile
+// entryIdx
+// top?
+// outer => nil
+
+// _stash:
+// lexical
+// variable
+//
+// _thisStash (ObjectEnvironment)
+// _fnStash
+// _dclStash
+
+// An ECMA-262 ExecutionContext
+type _scope struct {
+ lexical _stash
+ variable _stash
+ this *_object
+ eval bool // Replace this with kind?
+ outer *_scope
+ depth int
+
+ frame _frame
+}
+
+func newScope(lexical _stash, variable _stash, this *_object) *_scope {
+ return &_scope{
+ lexical: lexical,
+ variable: variable,
+ this: this,
+ }
+}
diff --git a/vendor/github.com/robertkrimen/otto/script.go b/vendor/github.com/robertkrimen/otto/script.go
new file mode 100644
index 000000000..2ae890ecd
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/script.go
@@ -0,0 +1,119 @@
+package otto
+
+import (
+ "bytes"
+ "encoding/gob"
+ "errors"
+)
+
+var ErrVersion = errors.New("version mismatch")
+
+var scriptVersion = "2014-04-13/1"
+
+// Script is a handle for some (reusable) JavaScript.
+// Passing a Script value to a run method will evaluate the JavaScript.
+//
+type Script struct {
+ version string
+ program *_nodeProgram
+ filename string
+ src string
+}
+
+// Compile will parse the given source and return a Script value or nil and
+// an error if there was a problem during compilation.
+//
+// script, err := vm.Compile("", `var abc; if (!abc) abc = 0; abc += 2; abc;`)
+// vm.Run(script)
+//
+func (self *Otto) Compile(filename string, src interface{}) (*Script, error) {
+ return self.CompileWithSourceMap(filename, src, nil)
+}
+
+// CompileWithSourceMap does the same thing as Compile, but with the obvious
+// difference of applying a source map.
+func (self *Otto) CompileWithSourceMap(filename string, src, sm interface{}) (*Script, error) {
+ program, err := self.runtime.parse(filename, src, sm)
+ if err != nil {
+ return nil, err
+ }
+
+ cmpl_program := cmpl_parse(program)
+
+ script := &Script{
+ version: scriptVersion,
+ program: cmpl_program,
+ filename: filename,
+ src: program.File.Source(),
+ }
+
+ return script, nil
+}
+
+func (self *Script) String() string {
+ return "// " + self.filename + "\n" + self.src
+}
+
+// MarshalBinary will marshal a script into a binary form. A marshalled script
+// that is later unmarshalled can be executed on the same version of the otto runtime.
+//
+// The binary format can change at any time and should be considered unspecified and opaque.
+//
+func (self *Script) marshalBinary() ([]byte, error) {
+ var bfr bytes.Buffer
+ encoder := gob.NewEncoder(&bfr)
+ err := encoder.Encode(self.version)
+ if err != nil {
+ return nil, err
+ }
+ err = encoder.Encode(self.program)
+ if err != nil {
+ return nil, err
+ }
+ err = encoder.Encode(self.filename)
+ if err != nil {
+ return nil, err
+ }
+ err = encoder.Encode(self.src)
+ if err != nil {
+ return nil, err
+ }
+ return bfr.Bytes(), nil
+}
+
+// UnmarshalBinary will vivify a marshalled script into something usable. If the script was
+// originally marshalled on a different version of the otto runtime, then this method
+// will return an error.
+//
+// The binary format can change at any time and should be considered unspecified and opaque.
+//
+func (self *Script) unmarshalBinary(data []byte) error {
+ decoder := gob.NewDecoder(bytes.NewReader(data))
+ err := decoder.Decode(&self.version)
+ if err != nil {
+ goto error
+ }
+ if self.version != scriptVersion {
+ err = ErrVersion
+ goto error
+ }
+ err = decoder.Decode(&self.program)
+ if err != nil {
+ goto error
+ }
+ err = decoder.Decode(&self.filename)
+ if err != nil {
+ goto error
+ }
+ err = decoder.Decode(&self.src)
+ if err != nil {
+ goto error
+ }
+ return nil
+error:
+ self.version = ""
+ self.program = nil
+ self.filename = ""
+ self.src = ""
+ return err
+}
diff --git a/vendor/github.com/robertkrimen/otto/stash.go b/vendor/github.com/robertkrimen/otto/stash.go
new file mode 100644
index 000000000..0d3ffa511
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/stash.go
@@ -0,0 +1,296 @@
+package otto
+
+import (
+ "fmt"
+)
+
+// ======
+// _stash
+// ======
+
+type _stash interface {
+ hasBinding(string) bool //
+ createBinding(string, bool, Value) // CreateMutableBinding
+ setBinding(string, Value, bool) // SetMutableBinding
+ getBinding(string, bool) Value // GetBindingValue
+ deleteBinding(string) bool //
+ setValue(string, Value, bool) // createBinding + setBinding
+
+ outer() _stash
+ runtime() *_runtime
+
+ newReference(string, bool, _at) _reference
+
+ clone(clone *_clone) _stash
+}
+
+// ==========
+// _objectStash
+// ==========
+
+type _objectStash struct {
+ _runtime *_runtime
+ _outer _stash
+ object *_object
+}
+
+func (self *_objectStash) runtime() *_runtime {
+ return self._runtime
+}
+
+func (runtime *_runtime) newObjectStash(object *_object, outer _stash) *_objectStash {
+ if object == nil {
+ object = runtime.newBaseObject()
+ object.class = "environment"
+ }
+ return &_objectStash{
+ _runtime: runtime,
+ _outer: outer,
+ object: object,
+ }
+}
+
+func (in *_objectStash) clone(clone *_clone) _stash {
+ out, exists := clone.objectStash(in)
+ if exists {
+ return out
+ }
+ *out = _objectStash{
+ clone.runtime,
+ clone.stash(in._outer),
+ clone.object(in.object),
+ }
+ return out
+}
+
+func (self *_objectStash) hasBinding(name string) bool {
+ return self.object.hasProperty(name)
+}
+
+func (self *_objectStash) createBinding(name string, deletable bool, value Value) {
+ if self.object.hasProperty(name) {
+ panic(hereBeDragons())
+ }
+ mode := _propertyMode(0111)
+ if !deletable {
+ mode = _propertyMode(0110)
+ }
+ // TODO False?
+ self.object.defineProperty(name, value, mode, false)
+}
+
+func (self *_objectStash) setBinding(name string, value Value, strict bool) {
+ self.object.put(name, value, strict)
+}
+
+func (self *_objectStash) setValue(name string, value Value, throw bool) {
+ if !self.hasBinding(name) {
+ self.createBinding(name, true, value) // Configurable by default
+ } else {
+ self.setBinding(name, value, throw)
+ }
+}
+
+func (self *_objectStash) getBinding(name string, throw bool) Value {
+ if self.object.hasProperty(name) {
+ return self.object.get(name)
+ }
+ if throw { // strict?
+ panic(self._runtime.panicReferenceError("Not Defined", name))
+ }
+ return Value{}
+}
+
+func (self *_objectStash) deleteBinding(name string) bool {
+ return self.object.delete(name, false)
+}
+
+func (self *_objectStash) outer() _stash {
+ return self._outer
+}
+
+func (self *_objectStash) newReference(name string, strict bool, at _at) _reference {
+ return newPropertyReference(self._runtime, self.object, name, strict, at)
+}
+
+// =========
+// _dclStash
+// =========
+
+type _dclStash struct {
+ _runtime *_runtime
+ _outer _stash
+ property map[string]_dclProperty
+}
+
+type _dclProperty struct {
+ value Value
+ mutable bool
+ deletable bool
+ readable bool
+}
+
+func (runtime *_runtime) newDeclarationStash(outer _stash) *_dclStash {
+ return &_dclStash{
+ _runtime: runtime,
+ _outer: outer,
+ property: map[string]_dclProperty{},
+ }
+}
+
+func (in *_dclStash) clone(clone *_clone) _stash {
+ out, exists := clone.dclStash(in)
+ if exists {
+ return out
+ }
+ property := make(map[string]_dclProperty, len(in.property))
+ for index, value := range in.property {
+ property[index] = clone.dclProperty(value)
+ }
+ *out = _dclStash{
+ clone.runtime,
+ clone.stash(in._outer),
+ property,
+ }
+ return out
+}
+
+func (self *_dclStash) hasBinding(name string) bool {
+ _, exists := self.property[name]
+ return exists
+}
+
+func (self *_dclStash) runtime() *_runtime {
+ return self._runtime
+}
+
+func (self *_dclStash) createBinding(name string, deletable bool, value Value) {
+ _, exists := self.property[name]
+ if exists {
+ panic(fmt.Errorf("createBinding: %s: already exists", name))
+ }
+ self.property[name] = _dclProperty{
+ value: value,
+ mutable: true,
+ deletable: deletable,
+ readable: false,
+ }
+}
+
+func (self *_dclStash) setBinding(name string, value Value, strict bool) {
+ property, exists := self.property[name]
+ if !exists {
+ panic(fmt.Errorf("setBinding: %s: missing", name))
+ }
+ if property.mutable {
+ property.value = value
+ self.property[name] = property
+ } else {
+ self._runtime.typeErrorResult(strict)
+ }
+}
+
+func (self *_dclStash) setValue(name string, value Value, throw bool) {
+ if !self.hasBinding(name) {
+ self.createBinding(name, false, value) // NOT deletable by default
+ } else {
+ self.setBinding(name, value, throw)
+ }
+}
+
+// FIXME This is called a __lot__
+func (self *_dclStash) getBinding(name string, throw bool) Value {
+ property, exists := self.property[name]
+ if !exists {
+ panic(fmt.Errorf("getBinding: %s: missing", name))
+ }
+ if !property.mutable && !property.readable {
+ if throw { // strict?
+ panic(self._runtime.panicTypeError())
+ }
+ return Value{}
+ }
+ return property.value
+}
+
+func (self *_dclStash) deleteBinding(name string) bool {
+ property, exists := self.property[name]
+ if !exists {
+ return true
+ }
+ if !property.deletable {
+ return false
+ }
+ delete(self.property, name)
+ return true
+}
+
+func (self *_dclStash) outer() _stash {
+ return self._outer
+}
+
+func (self *_dclStash) newReference(name string, strict bool, _ _at) _reference {
+ return &_stashReference{
+ name: name,
+ base: self,
+ }
+}
+
+// ========
+// _fnStash
+// ========
+
+type _fnStash struct {
+ _dclStash
+ arguments *_object
+ indexOfArgumentName map[string]string
+}
+
+func (runtime *_runtime) newFunctionStash(outer _stash) *_fnStash {
+ return &_fnStash{
+ _dclStash: _dclStash{
+ _runtime: runtime,
+ _outer: outer,
+ property: map[string]_dclProperty{},
+ },
+ }
+}
+
+func (in *_fnStash) clone(clone *_clone) _stash {
+ out, exists := clone.fnStash(in)
+ if exists {
+ return out
+ }
+ dclStash := in._dclStash.clone(clone).(*_dclStash)
+ index := make(map[string]string, len(in.indexOfArgumentName))
+ for name, value := range in.indexOfArgumentName {
+ index[name] = value
+ }
+ *out = _fnStash{
+ _dclStash: *dclStash,
+ arguments: clone.object(in.arguments),
+ indexOfArgumentName: index,
+ }
+ return out
+}
+
+func getStashProperties(stash _stash) (keys []string) {
+ switch vars := stash.(type) {
+ case *_dclStash:
+ for k := range vars.property {
+ keys = append(keys, k)
+ }
+ case *_fnStash:
+ for k := range vars.property {
+ keys = append(keys, k)
+ }
+ case *_objectStash:
+ for k := range vars.object.property {
+ keys = append(keys, k)
+ }
+ default:
+ panic("unknown stash type")
+ }
+
+ return
+}
diff --git a/vendor/github.com/robertkrimen/otto/token/Makefile b/vendor/github.com/robertkrimen/otto/token/Makefile
new file mode 100644
index 000000000..1e85c7348
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/token/Makefile
@@ -0,0 +1,2 @@
+token_const.go: tokenfmt
+ ./$^ | gofmt > $@
diff --git a/vendor/github.com/robertkrimen/otto/token/README.markdown b/vendor/github.com/robertkrimen/otto/token/README.markdown
new file mode 100644
index 000000000..ff3b16104
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/token/README.markdown
@@ -0,0 +1,171 @@
+# token
+--
+ import "github.com/robertkrimen/otto/token"
+
+Package token defines constants representing the lexical tokens of JavaScript
+(ECMA5).
+
+## Usage
+
+```go
+const (
+ ILLEGAL
+ EOF
+ COMMENT
+ KEYWORD
+
+ STRING
+ BOOLEAN
+ NULL
+ NUMBER
+ IDENTIFIER
+
+ PLUS // +
+ MINUS // -
+ MULTIPLY // *
+ SLASH // /
+ REMAINDER // %
+
+ AND // &
+ OR // |
+ EXCLUSIVE_OR // ^
+ SHIFT_LEFT // <<
+ SHIFT_RIGHT // >>
+ UNSIGNED_SHIFT_RIGHT // >>>
+ AND_NOT // &^
+
+ ADD_ASSIGN // +=
+ SUBTRACT_ASSIGN // -=
+ MULTIPLY_ASSIGN // *=
+ QUOTIENT_ASSIGN // /=
+ REMAINDER_ASSIGN // %=
+
+ AND_ASSIGN // &=
+ OR_ASSIGN // |=
+ EXCLUSIVE_OR_ASSIGN // ^=
+ SHIFT_LEFT_ASSIGN // <<=
+ SHIFT_RIGHT_ASSIGN // >>=
+ UNSIGNED_SHIFT_RIGHT_ASSIGN // >>>=
+ AND_NOT_ASSIGN // &^=
+
+ LOGICAL_AND // &&
+ LOGICAL_OR // ||
+ INCREMENT // ++
+ DECREMENT // --
+
+ EQUAL // ==
+ STRICT_EQUAL // ===
+ LESS // <
+ GREATER // >
+ ASSIGN // =
+ NOT // !
+
+ BITWISE_NOT // ~
+
+ NOT_EQUAL // !=
+ STRICT_NOT_EQUAL // !==
+ LESS_OR_EQUAL // <=
+ GREATER_OR_EQUAL // >=
+
+ LEFT_PARENTHESIS // (
+ LEFT_BRACKET // [
+ LEFT_BRACE // {
+ COMMA // ,
+ PERIOD // .
+
+ RIGHT_PARENTHESIS // )
+ RIGHT_BRACKET // ]
+ RIGHT_BRACE // }
+ SEMICOLON // ;
+ COLON // :
+ QUESTION_MARK // ?
+
+ IF
+ IN
+ DO
+
+ VAR
+ FOR
+ NEW
+ TRY
+
+ THIS
+ ELSE
+ CASE
+ VOID
+ WITH
+
+ WHILE
+ BREAK
+ CATCH
+ THROW
+
+ RETURN
+ TYPEOF
+ DELETE
+ SWITCH
+
+ DEFAULT
+ FINALLY
+
+ FUNCTION
+ CONTINUE
+ DEBUGGER
+
+ INSTANCEOF
+)
+```
+
+#### type Token
+
+```go
+type Token int
+```
+
+Token is the set of lexical tokens in JavaScript (ECMA5).
+
+#### func IsKeyword
+
+```go
+func IsKeyword(literal string) (Token, bool)
+```
+IsKeyword returns the keyword token if literal is a keyword, a KEYWORD token if
+the literal is a future keyword (const, let, class, super, ...), or 0 if the
+literal is not a keyword.
+
+If the literal is a keyword, IsKeyword returns a second value indicating if the
+literal is considered a future keyword in strict-mode only.
+
+7.6.1.2 Future Reserved Words:
+
+ const
+ class
+ enum
+ export
+ extends
+ import
+ super
+
+7.6.1.2 Future Reserved Words (strict):
+
+ implements
+ interface
+ let
+ package
+ private
+ protected
+ public
+ static
+
+#### func (Token) String
+
+```go
+func (tkn Token) String() string
+```
+String returns the string corresponding to the token. For operators, delimiters,
+and keywords the string is the actual token string (e.g., for the token PLUS,
+the String() is "+"). For all other tokens the string corresponds to the token
+name (e.g. for the token IDENTIFIER, the string is "IDENTIFIER").
+
+--
+**godocdown** http://github.com/robertkrimen/godocdown
diff --git a/vendor/github.com/robertkrimen/otto/token/token.go b/vendor/github.com/robertkrimen/otto/token/token.go
new file mode 100644
index 000000000..0e941ac96
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/token/token.go
@@ -0,0 +1,116 @@
+// Package token defines constants representing the lexical tokens of JavaScript (ECMA5).
+package token
+
+import (
+ "strconv"
+)
+
+// Token is the set of lexical tokens in JavaScript (ECMA5).
+type Token int
+
+// String returns the string corresponding to the token.
+// For operators, delimiters, and keywords the string is the actual
+// token string (e.g., for the token PLUS, the String() is
+// "+"). For all other tokens the string corresponds to the token
+// name (e.g. for the token IDENTIFIER, the string is "IDENTIFIER").
+//
+func (tkn Token) String() string {
+ if 0 == tkn {
+ return "UNKNOWN"
+ }
+ if tkn < Token(len(token2string)) {
+ return token2string[tkn]
+ }
+ return "token(" + strconv.Itoa(int(tkn)) + ")"
+}
+
+// This is not used for anything
+func (tkn Token) precedence(in bool) int {
+
+ switch tkn {
+ case LOGICAL_OR:
+ return 1
+
+ case LOGICAL_AND:
+ return 2
+
+ case OR, OR_ASSIGN:
+ return 3
+
+ case EXCLUSIVE_OR:
+ return 4
+
+ case AND, AND_ASSIGN, AND_NOT, AND_NOT_ASSIGN:
+ return 5
+
+ case EQUAL,
+ NOT_EQUAL,
+ STRICT_EQUAL,
+ STRICT_NOT_EQUAL:
+ return 6
+
+ case LESS, GREATER, LESS_OR_EQUAL, GREATER_OR_EQUAL, INSTANCEOF:
+ return 7
+
+ case IN:
+ if in {
+ return 7
+ }
+ return 0
+
+ case SHIFT_LEFT, SHIFT_RIGHT, UNSIGNED_SHIFT_RIGHT:
+ fallthrough
+ case SHIFT_LEFT_ASSIGN, SHIFT_RIGHT_ASSIGN, UNSIGNED_SHIFT_RIGHT_ASSIGN:
+ return 8
+
+ case PLUS, MINUS, ADD_ASSIGN, SUBTRACT_ASSIGN:
+ return 9
+
+ case MULTIPLY, SLASH, REMAINDER, MULTIPLY_ASSIGN, QUOTIENT_ASSIGN, REMAINDER_ASSIGN:
+ return 11
+ }
+ return 0
+}
+
+type _keyword struct {
+ token Token
+ futureKeyword bool
+ strict bool
+}
+
+// IsKeyword returns the keyword token if literal is a keyword, a KEYWORD token
+// if the literal is a future keyword (const, let, class, super, ...), or 0 if the literal is not a keyword.
+//
+// If the literal is a keyword, IsKeyword returns a second value indicating if the literal
+// is considered a future keyword in strict-mode only.
+//
+// 7.6.1.2 Future Reserved Words:
+//
+// const
+// class
+// enum
+// export
+// extends
+// import
+// super
+//
+// 7.6.1.2 Future Reserved Words (strict):
+//
+// implements
+// interface
+// let
+// package
+// private
+// protected
+// public
+// static
+//
+func IsKeyword(literal string) (Token, bool) {
+ if keyword, exists := keywordTable[literal]; exists {
+ if keyword.futureKeyword {
+ return KEYWORD, keyword.strict
+ }
+ return keyword.token, false
+ }
+ return 0, false
+}
diff --git a/vendor/github.com/robertkrimen/otto/token/token_const.go b/vendor/github.com/robertkrimen/otto/token/token_const.go
new file mode 100644
index 000000000..b1d83c6de
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/token/token_const.go
@@ -0,0 +1,349 @@
+package token
+
+const (
+ _ Token = iota
+
+ ILLEGAL
+ EOF
+ COMMENT
+ KEYWORD
+
+ STRING
+ BOOLEAN
+ NULL
+ NUMBER
+ IDENTIFIER
+
+ PLUS // +
+ MINUS // -
+ MULTIPLY // *
+ SLASH // /
+ REMAINDER // %
+
+ AND // &
+ OR // |
+ EXCLUSIVE_OR // ^
+ SHIFT_LEFT // <<
+ SHIFT_RIGHT // >>
+ UNSIGNED_SHIFT_RIGHT // >>>
+ AND_NOT // &^
+
+ ADD_ASSIGN // +=
+ SUBTRACT_ASSIGN // -=
+ MULTIPLY_ASSIGN // *=
+ QUOTIENT_ASSIGN // /=
+ REMAINDER_ASSIGN // %=
+
+ AND_ASSIGN // &=
+ OR_ASSIGN // |=
+ EXCLUSIVE_OR_ASSIGN // ^=
+ SHIFT_LEFT_ASSIGN // <<=
+ SHIFT_RIGHT_ASSIGN // >>=
+ UNSIGNED_SHIFT_RIGHT_ASSIGN // >>>=
+ AND_NOT_ASSIGN // &^=
+
+ LOGICAL_AND // &&
+ LOGICAL_OR // ||
+ INCREMENT // ++
+ DECREMENT // --
+
+ EQUAL // ==
+ STRICT_EQUAL // ===
+ LESS // <
+ GREATER // >
+ ASSIGN // =
+ NOT // !
+
+ BITWISE_NOT // ~
+
+ NOT_EQUAL // !=
+ STRICT_NOT_EQUAL // !==
+ LESS_OR_EQUAL // <=
+ GREATER_OR_EQUAL // >=
+
+ LEFT_PARENTHESIS // (
+ LEFT_BRACKET // [
+ LEFT_BRACE // {
+ COMMA // ,
+ PERIOD // .
+
+ RIGHT_PARENTHESIS // )
+ RIGHT_BRACKET // ]
+ RIGHT_BRACE // }
+ SEMICOLON // ;
+ COLON // :
+ QUESTION_MARK // ?
+
+ firstKeyword
+ IF
+ IN
+ DO
+
+ VAR
+ FOR
+ NEW
+ TRY
+
+ THIS
+ ELSE
+ CASE
+ VOID
+ WITH
+
+ WHILE
+ BREAK
+ CATCH
+ THROW
+
+ RETURN
+ TYPEOF
+ DELETE
+ SWITCH
+
+ DEFAULT
+ FINALLY
+
+ FUNCTION
+ CONTINUE
+ DEBUGGER
+
+ INSTANCEOF
+ lastKeyword
+)
+
+var token2string = [...]string{
+ ILLEGAL: "ILLEGAL",
+ EOF: "EOF",
+ COMMENT: "COMMENT",
+ KEYWORD: "KEYWORD",
+ STRING: "STRING",
+ BOOLEAN: "BOOLEAN",
+ NULL: "NULL",
+ NUMBER: "NUMBER",
+ IDENTIFIER: "IDENTIFIER",
+ PLUS: "+",
+ MINUS: "-",
+ MULTIPLY: "*",
+ SLASH: "/",
+ REMAINDER: "%",
+ AND: "&",
+ OR: "|",
+ EXCLUSIVE_OR: "^",
+ SHIFT_LEFT: "<<",
+ SHIFT_RIGHT: ">>",
+ UNSIGNED_SHIFT_RIGHT: ">>>",
+ AND_NOT: "&^",
+ ADD_ASSIGN: "+=",
+ SUBTRACT_ASSIGN: "-=",
+ MULTIPLY_ASSIGN: "*=",
+ QUOTIENT_ASSIGN: "/=",
+ REMAINDER_ASSIGN: "%=",
+ AND_ASSIGN: "&=",
+ OR_ASSIGN: "|=",
+ EXCLUSIVE_OR_ASSIGN: "^=",
+ SHIFT_LEFT_ASSIGN: "<<=",
+ SHIFT_RIGHT_ASSIGN: ">>=",
+ UNSIGNED_SHIFT_RIGHT_ASSIGN: ">>>=",
+ AND_NOT_ASSIGN: "&^=",
+ LOGICAL_AND: "&&",
+ LOGICAL_OR: "||",
+ INCREMENT: "++",
+ DECREMENT: "--",
+ EQUAL: "==",
+ STRICT_EQUAL: "===",
+ LESS: "<",
+ GREATER: ">",
+ ASSIGN: "=",
+ NOT: "!",
+ BITWISE_NOT: "~",
+ NOT_EQUAL: "!=",
+ STRICT_NOT_EQUAL: "!==",
+ LESS_OR_EQUAL: "<=",
+ GREATER_OR_EQUAL: ">=",
+ LEFT_PARENTHESIS: "(",
+ LEFT_BRACKET: "[",
+ LEFT_BRACE: "{",
+ COMMA: ",",
+ PERIOD: ".",
+ RIGHT_PARENTHESIS: ")",
+ RIGHT_BRACKET: "]",
+ RIGHT_BRACE: "}",
+ SEMICOLON: ";",
+ COLON: ":",
+ QUESTION_MARK: "?",
+ IF: "if",
+ IN: "in",
+ DO: "do",
+ VAR: "var",
+ FOR: "for",
+ NEW: "new",
+ TRY: "try",
+ THIS: "this",
+ ELSE: "else",
+ CASE: "case",
+ VOID: "void",
+ WITH: "with",
+ WHILE: "while",
+ BREAK: "break",
+ CATCH: "catch",
+ THROW: "throw",
+ RETURN: "return",
+ TYPEOF: "typeof",
+ DELETE: "delete",
+ SWITCH: "switch",
+ DEFAULT: "default",
+ FINALLY: "finally",
+ FUNCTION: "function",
+ CONTINUE: "continue",
+ DEBUGGER: "debugger",
+ INSTANCEOF: "instanceof",
+}
+
+var keywordTable = map[string]_keyword{
+ "if": _keyword{
+ token: IF,
+ },
+ "in": _keyword{
+ token: IN,
+ },
+ "do": _keyword{
+ token: DO,
+ },
+ "var": _keyword{
+ token: VAR,
+ },
+ "for": _keyword{
+ token: FOR,
+ },
+ "new": _keyword{
+ token: NEW,
+ },
+ "try": _keyword{
+ token: TRY,
+ },
+ "this": _keyword{
+ token: THIS,
+ },
+ "else": _keyword{
+ token: ELSE,
+ },
+ "case": _keyword{
+ token: CASE,
+ },
+ "void": _keyword{
+ token: VOID,
+ },
+ "with": _keyword{
+ token: WITH,
+ },
+ "while": _keyword{
+ token: WHILE,
+ },
+ "break": _keyword{
+ token: BREAK,
+ },
+ "catch": _keyword{
+ token: CATCH,
+ },
+ "throw": _keyword{
+ token: THROW,
+ },
+ "return": _keyword{
+ token: RETURN,
+ },
+ "typeof": _keyword{
+ token: TYPEOF,
+ },
+ "delete": _keyword{
+ token: DELETE,
+ },
+ "switch": _keyword{
+ token: SWITCH,
+ },
+ "default": _keyword{
+ token: DEFAULT,
+ },
+ "finally": _keyword{
+ token: FINALLY,
+ },
+ "function": _keyword{
+ token: FUNCTION,
+ },
+ "continue": _keyword{
+ token: CONTINUE,
+ },
+ "debugger": _keyword{
+ token: DEBUGGER,
+ },
+ "instanceof": _keyword{
+ token: INSTANCEOF,
+ },
+ "const": _keyword{
+ token: KEYWORD,
+ futureKeyword: true,
+ },
+ "class": _keyword{
+ token: KEYWORD,
+ futureKeyword: true,
+ },
+ "enum": _keyword{
+ token: KEYWORD,
+ futureKeyword: true,
+ },
+ "export": _keyword{
+ token: KEYWORD,
+ futureKeyword: true,
+ },
+ "extends": _keyword{
+ token: KEYWORD,
+ futureKeyword: true,
+ },
+ "import": _keyword{
+ token: KEYWORD,
+ futureKeyword: true,
+ },
+ "super": _keyword{
+ token: KEYWORD,
+ futureKeyword: true,
+ },
+ "implements": _keyword{
+ token: KEYWORD,
+ futureKeyword: true,
+ strict: true,
+ },
+ "interface": _keyword{
+ token: KEYWORD,
+ futureKeyword: true,
+ strict: true,
+ },
+ "let": _keyword{
+ token: KEYWORD,
+ futureKeyword: true,
+ strict: true,
+ },
+ "package": _keyword{
+ token: KEYWORD,
+ futureKeyword: true,
+ strict: true,
+ },
+ "private": _keyword{
+ token: KEYWORD,
+ futureKeyword: true,
+ strict: true,
+ },
+ "protected": _keyword{
+ token: KEYWORD,
+ futureKeyword: true,
+ strict: true,
+ },
+ "public": _keyword{
+ token: KEYWORD,
+ futureKeyword: true,
+ strict: true,
+ },
+ "static": _keyword{
+ token: KEYWORD,
+ futureKeyword: true,
+ strict: true,
+ },
+}
diff --git a/vendor/github.com/robertkrimen/otto/token/tokenfmt b/vendor/github.com/robertkrimen/otto/token/tokenfmt
new file mode 100755
index 000000000..63dd5d9e6
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/token/tokenfmt
@@ -0,0 +1,222 @@
+#!/usr/bin/env perl
+
+use strict;
+use warnings;
+
+my (%token, @order, @keywords);
+
+{
+ my $keywords;
+ my @const;
+ push @const, <<_END_;
+package token
+
+const(
+ _ Token = iota
+_END_
+
+ for (split m/\n/, <<_END_) {
+ILLEGAL
+EOF
+COMMENT
+KEYWORD
+
+STRING
+BOOLEAN
+NULL
+NUMBER
+IDENTIFIER
+
+PLUS +
+MINUS -
+MULTIPLY *
+SLASH /
+REMAINDER %
+
+AND &
+OR |
+EXCLUSIVE_OR ^
+SHIFT_LEFT <<
+SHIFT_RIGHT >>
+UNSIGNED_SHIFT_RIGHT >>>
+AND_NOT &^
+
+ADD_ASSIGN +=
+SUBTRACT_ASSIGN -=
+MULTIPLY_ASSIGN *=
+QUOTIENT_ASSIGN /=
+REMAINDER_ASSIGN %=
+
+AND_ASSIGN &=
+OR_ASSIGN |=
+EXCLUSIVE_OR_ASSIGN ^=
+SHIFT_LEFT_ASSIGN <<=
+SHIFT_RIGHT_ASSIGN >>=
+UNSIGNED_SHIFT_RIGHT_ASSIGN >>>=
+AND_NOT_ASSIGN &^=
+
+LOGICAL_AND &&
+LOGICAL_OR ||
+INCREMENT ++
+DECREMENT --
+
+EQUAL ==
+STRICT_EQUAL ===
+LESS <
+GREATER >
+ASSIGN =
+NOT !
+
+BITWISE_NOT ~
+
+NOT_EQUAL !=
+STRICT_NOT_EQUAL !==
+LESS_OR_EQUAL <=
+GREATER_OR_EQUAL <=
+
+LEFT_PARENTHESIS (
+LEFT_BRACKET [
+LEFT_BRACE {
+COMMA ,
+PERIOD .
+
+RIGHT_PARENTHESIS )
+RIGHT_BRACKET ]
+RIGHT_BRACE }
+SEMICOLON ;
+COLON :
+QUESTION_MARK ?
+
+firstKeyword
+IF
+IN
+DO
+
+VAR
+FOR
+NEW
+TRY
+
+THIS
+ELSE
+CASE
+VOID
+WITH
+
+WHILE
+BREAK
+CATCH
+THROW
+
+RETURN
+TYPEOF
+DELETE
+SWITCH
+
+DEFAULT
+FINALLY
+
+FUNCTION
+CONTINUE
+DEBUGGER
+
+INSTANCEOF
+lastKeyword
+_END_
+ chomp;
+
+ next if m/^\s*#/;
+
+ my ($name, $symbol) = m/(\w+)\s*(\S+)?/;
+
+ if (defined $symbol) {
+ push @order, $name;
+ push @const, "$name // $symbol";
+ $token{$name} = $symbol;
+ } elsif (defined $name) {
+ $keywords ||= $name eq 'firstKeyword';
+
+ push @const, $name;
+ #$const[-1] .= " Token = iota" if 2 == @const;
+ if ($name =~ m/^([A-Z]+)/) {
+ push @keywords, $name if $keywords;
+ push @order, $name;
+ if ($token{SEMICOLON}) {
+ $token{$name} = lc $1;
+ } else {
+ $token{$name} = $name;
+ }
+ }
+ } else {
+ push @const, "";
+ }
+
+ }
+ push @const, ")";
+ print join "\n", @const, "";
+}
+
+{
+ print <<_END_;
+
+var token2string = [...]string{
+_END_
+ for my $name (@order) {
+ print "$name: \"$token{$name}\",\n";
+ }
+ print <<_END_;
+}
+_END_
+
+ print <<_END_;
+
+var keywordTable = map[string]_keyword{
+_END_
+ for my $name (@keywords) {
+ print <<_END_
+ "@{[ lc $name ]}": _keyword{
+ token: $name,
+ },
+_END_
+ }
+
+ for my $name (qw/
+ const
+ class
+ enum
+ export
+ extends
+ import
+ super
+ /) {
+ print <<_END_
+ "$name": _keyword{
+ token: KEYWORD,
+ futureKeyword: true,
+ },
+_END_
+ }
+
+ for my $name (qw/
+ implements
+ interface
+ let
+ package
+ private
+ protected
+ public
+ static
+ /) {
+ print <<_END_
+ "$name": _keyword{
+ token: KEYWORD,
+ futureKeyword: true,
+ strict: true,
+ },
+_END_
+ }
+
+ print <<_END_;
+}
+_END_
+}
diff --git a/vendor/github.com/robertkrimen/otto/type_arguments.go b/vendor/github.com/robertkrimen/otto/type_arguments.go
new file mode 100644
index 000000000..841d75855
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/type_arguments.go
@@ -0,0 +1,106 @@
+package otto
+
+import (
+ "strconv"
+)
+
+func (runtime *_runtime) newArgumentsObject(indexOfParameterName []string, stash _stash, length int) *_object {
+ self := runtime.newClassObject("Arguments")
+
+ for index, _ := range indexOfParameterName {
+ name := strconv.FormatInt(int64(index), 10)
+ objectDefineOwnProperty(self, name, _property{Value{}, 0111}, false)
+ }
+
+ self.objectClass = _classArguments
+ self.value = _argumentsObject{
+ indexOfParameterName: indexOfParameterName,
+ stash: stash,
+ }
+
+ self.prototype = runtime.global.ObjectPrototype
+
+ self.defineProperty("length", toValue_int(length), 0101, false)
+
+ return self
+}
+
+type _argumentsObject struct {
+ indexOfParameterName []string
+ // function(abc, def, ghi)
+ // indexOfParameterName[0] = "abc"
+ // indexOfParameterName[1] = "def"
+ // indexOfParameterName[2] = "ghi"
+ // ...
+ stash _stash
+}
+
+func (in _argumentsObject) clone(clone *_clone) _argumentsObject {
+ indexOfParameterName := make([]string, len(in.indexOfParameterName))
+ copy(indexOfParameterName, in.indexOfParameterName)
+ return _argumentsObject{
+ indexOfParameterName,
+ clone.stash(in.stash),
+ }
+}
+
+func (self _argumentsObject) get(name string) (Value, bool) {
+ index := stringToArrayIndex(name)
+ if index >= 0 && index < int64(len(self.indexOfParameterName)) {
+ name := self.indexOfParameterName[index]
+ if name == "" {
+ return Value{}, false
+ }
+ return self.stash.getBinding(name, false), true
+ }
+ return Value{}, false
+}
+
+func (self _argumentsObject) put(name string, value Value) {
+ index := stringToArrayIndex(name)
+ name = self.indexOfParameterName[index]
+ self.stash.setBinding(name, value, false)
+}
+
+func (self _argumentsObject) delete(name string) {
+ index := stringToArrayIndex(name)
+ self.indexOfParameterName[index] = ""
+}
+
+func argumentsGet(self *_object, name string) Value {
+ if value, exists := self.value.(_argumentsObject).get(name); exists {
+ return value
+ }
+ return objectGet(self, name)
+}
+
+func argumentsGetOwnProperty(self *_object, name string) *_property {
+ property := objectGetOwnProperty(self, name)
+ if value, exists := self.value.(_argumentsObject).get(name); exists {
+ property.value = value
+ }
+ return property
+}
+
+func argumentsDefineOwnProperty(self *_object, name string, descriptor _property, throw bool) bool {
+ if _, exists := self.value.(_argumentsObject).get(name); exists {
+ if !objectDefineOwnProperty(self, name, descriptor, false) {
+ return self.runtime.typeErrorResult(throw)
+ }
+ if value, valid := descriptor.value.(Value); valid {
+ self.value.(_argumentsObject).put(name, value)
+ }
+ return true
+ }
+ return objectDefineOwnProperty(self, name, descriptor, throw)
+}
+
+func argumentsDelete(self *_object, name string, throw bool) bool {
+ if !objectDelete(self, name, throw) {
+ return false
+ }
+ if _, exists := self.value.(_argumentsObject).get(name); exists {
+ self.value.(_argumentsObject).delete(name)
+ }
+ return true
+}
diff --git a/vendor/github.com/robertkrimen/otto/type_array.go b/vendor/github.com/robertkrimen/otto/type_array.go
new file mode 100644
index 000000000..c8a974b02
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/type_array.go
@@ -0,0 +1,109 @@
+package otto
+
+import (
+ "strconv"
+)
+
+func (runtime *_runtime) newArrayObject(length uint32) *_object {
+ self := runtime.newObject()
+ self.class = "Array"
+ self.defineProperty("length", toValue_uint32(length), 0100, false)
+ self.objectClass = _classArray
+ return self
+}
+
+func isArray(object *_object) bool {
+ return object != nil && (object.class == "Array" || object.class == "GoArray")
+}
+
+func objectLength(object *_object) uint32 {
+ if object == nil {
+ return 0
+ }
+ switch object.class {
+ case "Array":
+ return object.get("length").value.(uint32)
+ case "String":
+ return uint32(object.get("length").value.(int))
+ case "GoArray":
+ return uint32(object.get("length").value.(int))
+ }
+ return 0
+}
+
+func arrayUint32(rt *_runtime, value Value) uint32 {
+ nm := value.number()
+ if nm.kind != numberInteger || !isUint32(nm.int64) {
+ // FIXME
+ panic(rt.panicRangeError())
+ }
+ return uint32(nm.int64)
+}
+
+func arrayDefineOwnProperty(self *_object, name string, descriptor _property, throw bool) bool {
+ lengthProperty := self.getOwnProperty("length")
+ lengthValue, valid := lengthProperty.value.(Value)
+ if !valid {
+ panic("Array.length != Value{}")
+ }
+ length := lengthValue.value.(uint32)
+ if name == "length" {
+ if descriptor.value == nil {
+ return objectDefineOwnProperty(self, name, descriptor, throw)
+ }
+ newLengthValue, isValue := descriptor.value.(Value)
+ if !isValue {
+ panic(self.runtime.panicTypeError())
+ }
+ newLength := arrayUint32(self.runtime, newLengthValue)
+ descriptor.value = toValue_uint32(newLength)
+ if newLength > length {
+ return objectDefineOwnProperty(self, name, descriptor, throw)
+ }
+ if !lengthProperty.writable() {
+ goto Reject
+ }
+ newWritable := true
+ if descriptor.mode&0700 == 0 {
+ // If writable is off
+ newWritable = false
+ descriptor.mode |= 0100
+ }
+ if !objectDefineOwnProperty(self, name, descriptor, throw) {
+ return false
+ }
+ for newLength < length {
+ length--
+ if !self.delete(strconv.FormatInt(int64(length), 10), false) {
+ descriptor.value = toValue_uint32(length + 1)
+ if !newWritable {
+ descriptor.mode &= 0077
+ }
+ objectDefineOwnProperty(self, name, descriptor, false)
+ goto Reject
+ }
+ }
+ if !newWritable {
+ descriptor.mode &= 0077
+ objectDefineOwnProperty(self, name, descriptor, false)
+ }
+ } else if index := stringToArrayIndex(name); index >= 0 {
+ if index >= int64(length) && !lengthProperty.writable() {
+ goto Reject
+ }
+ if !objectDefineOwnProperty(self, strconv.FormatInt(index, 10), descriptor, false) {
+ goto Reject
+ }
+ if index >= int64(length) {
+ lengthProperty.value = toValue_uint32(uint32(index + 1))
+ objectDefineOwnProperty(self, "length", *lengthProperty, false)
+ return true
+ }
+ }
+ return objectDefineOwnProperty(self, name, descriptor, throw)
+Reject:
+ if throw {
+ panic(self.runtime.panicTypeError())
+ }
+ return false
+}
diff --git a/vendor/github.com/robertkrimen/otto/type_boolean.go b/vendor/github.com/robertkrimen/otto/type_boolean.go
new file mode 100644
index 000000000..afc45c69b
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/type_boolean.go
@@ -0,0 +1,13 @@
+package otto
+
+import (
+ "strconv"
+)
+
+func (runtime *_runtime) newBooleanObject(value Value) *_object {
+ return runtime.newPrimitiveObject("Boolean", toValue_bool(value.bool()))
+}
+
+func booleanToString(value bool) string {
+ return strconv.FormatBool(value)
+}
diff --git a/vendor/github.com/robertkrimen/otto/type_date.go b/vendor/github.com/robertkrimen/otto/type_date.go
new file mode 100644
index 000000000..7079e649c
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/type_date.go
@@ -0,0 +1,299 @@
+package otto
+
+import (
+ "fmt"
+ "math"
+ "regexp"
+ Time "time"
+)
+
+type _dateObject struct {
+ time Time.Time // Time from the "time" package, a cached version of time
+ epoch int64
+ value Value
+ isNaN bool
+}
+
+var (
+ invalidDateObject = _dateObject{
+ time: Time.Time{},
+ epoch: -1,
+ value: NaNValue(),
+ isNaN: true,
+ }
+)
+
+type _ecmaTime struct {
+ year int
+ month int
+ day int
+ hour int
+ minute int
+ second int
+ millisecond int
+ location *Time.Location // Basically, either local or UTC
+}
+
+func ecmaTime(goTime Time.Time) _ecmaTime {
+ return _ecmaTime{
+ goTime.Year(),
+ dateFromGoMonth(goTime.Month()),
+ goTime.Day(),
+ goTime.Hour(),
+ goTime.Minute(),
+ goTime.Second(),
+ goTime.Nanosecond() / (100 * 100 * 100),
+ goTime.Location(),
+ }
+}
+
+func (self *_ecmaTime) goTime() Time.Time {
+ return Time.Date(
+ self.year,
+ dateToGoMonth(self.month),
+ self.day,
+ self.hour,
+ self.minute,
+ self.second,
+ self.millisecond*(100*100*100),
+ self.location,
+ )
+}
+
+func (self *_dateObject) Time() Time.Time {
+ return self.time
+}
+
+func (self *_dateObject) Epoch() int64 {
+ return self.epoch
+}
+
+func (self *_dateObject) Value() Value {
+ return self.value
+}
+
+// FIXME A date should only be in the range of -100,000,000 to +100,000,000 (1970): 15.9.1.1
+func (self *_dateObject) SetNaN() {
+ self.time = Time.Time{}
+ self.epoch = -1
+ self.value = NaNValue()
+ self.isNaN = true
+}
+
+func (self *_dateObject) SetTime(time Time.Time) {
+ self.Set(timeToEpoch(time))
+}
+
+func epoch2dateObject(epoch float64) _dateObject {
+ date := _dateObject{}
+ date.Set(epoch)
+ return date
+}
+
+func (self *_dateObject) Set(epoch float64) {
+ // epoch
+ self.epoch = epochToInteger(epoch)
+
+ // time
+ time, err := epochToTime(epoch)
+ self.time = time // Is either a valid time, or the zero-value for time.Time
+
+ // value & isNaN
+ if err != nil {
+ self.isNaN = true
+ self.epoch = -1
+ self.value = NaNValue()
+ } else {
+ self.value = toValue_int64(self.epoch)
+ }
+}
+
+func epochToInteger(value float64) int64 {
+ if value > 0 {
+ return int64(math.Floor(value))
+ }
+ return int64(math.Ceil(value))
+}
+
+func epochToTime(value float64) (time Time.Time, err error) {
+ epochWithMilli := value
+ if math.IsNaN(epochWithMilli) || math.IsInf(epochWithMilli, 0) {
+ err = fmt.Errorf("Invalid time %v", value)
+ return
+ }
+
+ epoch := int64(epochWithMilli / 1000)
+ milli := int64(epochWithMilli) % 1000
+
+ time = Time.Unix(int64(epoch), milli*1000000).UTC()
+ return
+}
+
+func timeToEpoch(time Time.Time) float64 {
+ return float64(time.UnixNano() / (1000 * 1000))
+}
+
+func (runtime *_runtime) newDateObject(epoch float64) *_object {
+ self := runtime.newObject()
+ self.class = "Date"
+
+ // FIXME This is ugly...
+ date := _dateObject{}
+ date.Set(epoch)
+ self.value = date
+ return self
+}
+
+func (self *_object) dateValue() _dateObject {
+ value, _ := self.value.(_dateObject)
+ return value
+}
+
+func dateObjectOf(rt *_runtime, _dateObject *_object) _dateObject {
+ if _dateObject == nil || _dateObject.class != "Date" {
+ panic(rt.panicTypeError())
+ }
+ return _dateObject.dateValue()
+}
+
+// JavaScript is 0-based, Go is 1-based (15.9.1.4)
+func dateToGoMonth(month int) Time.Month {
+ return Time.Month(month + 1)
+}
+
+func dateFromGoMonth(month Time.Month) int {
+ return int(month) - 1
+}
+
+// Both JavaScript & Go are 0-based (Sunday == 0)
+func dateToGoDay(day int) Time.Weekday {
+ return Time.Weekday(day)
+}
+
+func dateFromGoDay(day Time.Weekday) int {
+ return int(day)
+}
+
+func newDateTime(argumentList []Value, location *Time.Location) (epoch float64) {
+
+ pick := func(index int, default_ float64) (float64, bool) {
+ if index >= len(argumentList) {
+ return default_, false
+ }
+ value := argumentList[index].float64()
+ if math.IsNaN(value) || math.IsInf(value, 0) {
+ return 0, true
+ }
+ return value, false
+ }
+
+ if len(argumentList) >= 2 { // 2-argument, 3-argument, ...
+ var year, month, day, hour, minute, second, millisecond float64
+ var invalid bool
+ if year, invalid = pick(0, 1900.0); invalid {
+ goto INVALID
+ }
+ if month, invalid = pick(1, 0.0); invalid {
+ goto INVALID
+ }
+ if day, invalid = pick(2, 1.0); invalid {
+ goto INVALID
+ }
+ if hour, invalid = pick(3, 0.0); invalid {
+ goto INVALID
+ }
+ if minute, invalid = pick(4, 0.0); invalid {
+ goto INVALID
+ }
+ if second, invalid = pick(5, 0.0); invalid {
+ goto INVALID
+ }
+ if millisecond, invalid = pick(6, 0.0); invalid {
+ goto INVALID
+ }
+
+ if year >= 0 && year <= 99 {
+ year += 1900
+ }
+
+ time := Time.Date(int(year), dateToGoMonth(int(month)), int(day), int(hour), int(minute), int(second), int(millisecond)*1000*1000, location)
+ return timeToEpoch(time)
+
+ } else if len(argumentList) == 0 { // 0-argument
+ time := Time.Now().UTC()
+ return timeToEpoch(time)
+ } else { // 1-argument
+ value := valueOfArrayIndex(argumentList, 0)
+ value = toPrimitive(value)
+ if value.IsString() {
+ return dateParse(value.string())
+ }
+
+ return value.float64()
+ }
+
+INVALID:
+ epoch = math.NaN()
+ return
+}
+
+var (
+ dateLayoutList = []string{
+ "2006",
+ "2006-01",
+ "2006-01-02",
+
+ "2006T15:04",
+ "2006-01T15:04",
+ "2006-01-02T15:04",
+
+ "2006T15:04:05",
+ "2006-01T15:04:05",
+ "2006-01-02T15:04:05",
+
+ "2006T15:04:05.000",
+ "2006-01T15:04:05.000",
+ "2006-01-02T15:04:05.000",
+
+ "2006T15:04-0700",
+ "2006-01T15:04-0700",
+ "2006-01-02T15:04-0700",
+
+ "2006T15:04:05-0700",
+ "2006-01T15:04:05-0700",
+ "2006-01-02T15:04:05-0700",
+
+ "2006T15:04:05.000-0700",
+ "2006-01T15:04:05.000-0700",
+ "2006-01-02T15:04:05.000-0700",
+
+ Time.RFC1123,
+ }
+ matchDateTimeZone = regexp.MustCompile(`^(.*)(?:(Z)|([\+\-]\d{2}):(\d{2}))$`)
+)
+
+func dateParse(date string) (epoch float64) {
+ // YYYY-MM-DDTHH:mm:ss.sssZ
+ var time Time.Time
+ var err error
+ {
+ date := date
+ if match := matchDateTimeZone.FindStringSubmatch(date); match != nil {
+ if match[2] == "Z" {
+ date = match[1] + "+0000"
+ } else {
+ date = match[1] + match[3] + match[4]
+ }
+ }
+ for _, layout := range dateLayoutList {
+ time, err = Time.Parse(layout, date)
+ if err == nil {
+ break
+ }
+ }
+ }
+ if err != nil {
+ return math.NaN()
+ }
+ return float64(time.UnixNano()) / (1000 * 1000) // UnixMilli()
+}
diff --git a/vendor/github.com/robertkrimen/otto/type_error.go b/vendor/github.com/robertkrimen/otto/type_error.go
new file mode 100644
index 000000000..84e5d79b1
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/type_error.go
@@ -0,0 +1,24 @@
+package otto
+
+func (rt *_runtime) newErrorObject(name string, message Value, stackFramesToPop int) *_object {
+ self := rt.newClassObject("Error")
+ if message.IsDefined() {
+ msg := message.string()
+ self.defineProperty("message", toValue_string(msg), 0111, false)
+ self.value = newError(rt, name, stackFramesToPop, msg)
+ } else {
+ self.value = newError(rt, name, stackFramesToPop)
+ }
+
+ self.defineOwnProperty("stack", _property{
+ value: _propertyGetSet{
+ rt.newNativeFunction("get", "internal", 0, func(FunctionCall) Value {
+ return toValue_string(self.value.(_error).formatWithStack())
+ }),
+ &_nilGetSetObject,
+ },
+ mode: modeConfigureMask & modeOnMask,
+ }, false)
+
+ return self
+}
diff --git a/vendor/github.com/robertkrimen/otto/type_function.go b/vendor/github.com/robertkrimen/otto/type_function.go
new file mode 100644
index 000000000..5637dc605
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/type_function.go
@@ -0,0 +1,292 @@
+package otto
+
+// _constructFunction
+type _constructFunction func(*_object, []Value) Value
+
+// 13.2.2 [[Construct]]
+func defaultConstruct(fn *_object, argumentList []Value) Value {
+ object := fn.runtime.newObject()
+ object.class = "Object"
+
+ prototype := fn.get("prototype")
+ if prototype.kind != valueObject {
+ prototype = toValue_object(fn.runtime.global.ObjectPrototype)
+ }
+ object.prototype = prototype._object()
+
+ this := toValue_object(object)
+ value := fn.call(this, argumentList, false, nativeFrame)
+ if value.kind == valueObject {
+ return value
+ }
+ return this
+}
+
+// _nativeFunction
+type _nativeFunction func(FunctionCall) Value
+
+// ===================== //
+// _nativeFunctionObject //
+// ===================== //
+
+type _nativeFunctionObject struct {
+ name string
+ file string
+ line int
+ call _nativeFunction // [[Call]]
+ construct _constructFunction // [[Construct]]
+}
+
+func (runtime *_runtime) newNativeFunctionObject(name, file string, line int, native _nativeFunction, length int) *_object {
+ self := runtime.newClassObject("Function")
+ self.value = _nativeFunctionObject{
+ name: name,
+ file: file,
+ line: line,
+ call: native,
+ construct: defaultConstruct,
+ }
+ self.defineProperty("length", toValue_int(length), 0000, false)
+ return self
+}
+
+// =================== //
+// _bindFunctionObject //
+// =================== //
+
+type _bindFunctionObject struct {
+ target *_object
+ this Value
+ argumentList []Value
+}
+
+func (runtime *_runtime) newBoundFunctionObject(target *_object, this Value, argumentList []Value) *_object {
+ self := runtime.newClassObject("Function")
+ self.value = _bindFunctionObject{
+ target: target,
+ this: this,
+ argumentList: argumentList,
+ }
+ length := int(toInt32(target.get("length")))
+ length -= len(argumentList)
+ if length < 0 {
+ length = 0
+ }
+ self.defineProperty("length", toValue_int(length), 0000, false)
+ self.defineProperty("caller", Value{}, 0000, false) // TODO Should throw a TypeError
+ self.defineProperty("arguments", Value{}, 0000, false) // TODO Should throw a TypeError
+ return self
+}
+
+// [[Construct]]
+func (fn _bindFunctionObject) construct(argumentList []Value) Value {
+ object := fn.target
+ switch value := object.value.(type) {
+ case _nativeFunctionObject:
+ return value.construct(object, fn.argumentList)
+ case _nodeFunctionObject:
+ argumentList = append(fn.argumentList, argumentList...)
+ return object.construct(argumentList)
+ }
+ panic(fn.target.runtime.panicTypeError())
+}
+
+// =================== //
+// _nodeFunctionObject //
+// =================== //
+
+type _nodeFunctionObject struct {
+ node *_nodeFunctionLiteral
+ stash _stash
+}
+
+func (runtime *_runtime) newNodeFunctionObject(node *_nodeFunctionLiteral, stash _stash) *_object {
+ self := runtime.newClassObject("Function")
+ self.value = _nodeFunctionObject{
+ node: node,
+ stash: stash,
+ }
+ self.defineProperty("length", toValue_int(len(node.parameterList)), 0000, false)
+ return self
+}
+
+// ======= //
+// _object //
+// ======= //
+
+func (self *_object) isCall() bool {
+ switch fn := self.value.(type) {
+ case _nativeFunctionObject:
+ return fn.call != nil
+ case _bindFunctionObject:
+ return true
+ case _nodeFunctionObject:
+ return true
+ }
+ return false
+}
+
+func (self *_object) call(this Value, argumentList []Value, eval bool, frame _frame) Value {
+ switch fn := self.value.(type) {
+
+ case _nativeFunctionObject:
+ // Since eval is a native function, we only have to check for it here
+ if eval {
+ eval = self == self.runtime.eval // If eval is true, then it IS a direct eval
+ }
+
+ // Enter a scope, name from the native object...
+ rt := self.runtime
+ if rt.scope != nil && !eval {
+ rt.enterFunctionScope(rt.scope.lexical, this)
+ rt.scope.frame = _frame{
+ native: true,
+ nativeFile: fn.file,
+ nativeLine: fn.line,
+ callee: fn.name,
+ file: nil,
+ }
+ defer func() {
+ rt.leaveScope()
+ }()
+ }
+
+ return fn.call(FunctionCall{
+ runtime: self.runtime,
+ eval: eval,
+
+ This: this,
+ ArgumentList: argumentList,
+ Otto: self.runtime.otto,
+ })
+
+ case _bindFunctionObject:
+ // TODO Passthrough site, do not enter a scope
+ argumentList = append(fn.argumentList, argumentList...)
+ return fn.target.call(fn.this, argumentList, false, frame)
+
+ case _nodeFunctionObject:
+ rt := self.runtime
+ stash := rt.enterFunctionScope(fn.stash, this)
+ rt.scope.frame = _frame{
+ callee: fn.node.name,
+ file: fn.node.file,
+ }
+ defer func() {
+ rt.leaveScope()
+ }()
+ callValue := rt.cmpl_call_nodeFunction(self, stash, fn.node, this, argumentList)
+ if value, valid := callValue.value.(_result); valid {
+ return value.value
+ }
+ return callValue
+ }
+
+ panic(self.runtime.panicTypeError("%v is not a function", toValue_object(self)))
+}
+
+func (self *_object) construct(argumentList []Value) Value {
+ switch fn := self.value.(type) {
+
+ case _nativeFunctionObject:
+ if fn.call == nil {
+ panic(self.runtime.panicTypeError("%v is not a function", toValue_object(self)))
+ }
+ if fn.construct == nil {
+ panic(self.runtime.panicTypeError("%v is not a constructor", toValue_object(self)))
+ }
+ return fn.construct(self, argumentList)
+
+ case _bindFunctionObject:
+ return fn.construct(argumentList)
+
+ case _nodeFunctionObject:
+ return defaultConstruct(self, argumentList)
+ }
+
+ panic(self.runtime.panicTypeError("%v is not a function", toValue_object(self)))
+}
+
+// 15.3.5.3
+func (self *_object) hasInstance(of Value) bool {
+ if !self.isCall() {
+ // We should not have a hasInstance method
+ panic(self.runtime.panicTypeError())
+ }
+ if !of.IsObject() {
+ return false
+ }
+ prototype := self.get("prototype")
+ if !prototype.IsObject() {
+ panic(self.runtime.panicTypeError())
+ }
+ prototypeObject := prototype._object()
+
+ value := of._object().prototype
+ for value != nil {
+ if value == prototypeObject {
+ return true
+ }
+ value = value.prototype
+ }
+ return false
+}
+
+// ============ //
+// FunctionCall //
+// ============ //
+
+// FunctionCall is an encapsulation of a JavaScript function call.
+type FunctionCall struct {
+ runtime *_runtime
+ _thisObject *_object
+ eval bool // This call is a direct call to eval
+
+ This Value
+ ArgumentList []Value
+ Otto *Otto
+}
+
+// Argument will return the value of the argument at the given index.
+//
+// If no such argument exists, undefined is returned.
+func (self FunctionCall) Argument(index int) Value {
+ return valueOfArrayIndex(self.ArgumentList, index)
+}
+
+func (self FunctionCall) getArgument(index int) (Value, bool) {
+ return getValueOfArrayIndex(self.ArgumentList, index)
+}
+
+func (self FunctionCall) slice(index int) []Value {
+ if index < len(self.ArgumentList) {
+ return self.ArgumentList[index:]
+ }
+ return []Value{}
+}
+
+func (self *FunctionCall) thisObject() *_object {
+ if self._thisObject == nil {
+ this := self.This.resolve() // FIXME Is this right?
+ self._thisObject = self.runtime.toObject(this)
+ }
+ return self._thisObject
+}
+
+func (self *FunctionCall) thisClassObject(class string) *_object {
+ thisObject := self.thisObject()
+ if thisObject.class != class {
+ panic(self.runtime.panicTypeError())
+ }
+ return self._thisObject
+}
+
+func (self FunctionCall) toObject(value Value) *_object {
+ return self.runtime.toObject(value)
+}
+
+// CallerLocation will return file location information (file:line:pos) where this function is being called.
+func (self FunctionCall) CallerLocation() string {
+ // see error.go for location()
+ return self.runtime.scope.outer.frame.location()
+}
diff --git a/vendor/github.com/robertkrimen/otto/type_go_array.go b/vendor/github.com/robertkrimen/otto/type_go_array.go
new file mode 100644
index 000000000..13a0b10f2
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/type_go_array.go
@@ -0,0 +1,134 @@
+package otto
+
+import (
+ "reflect"
+ "strconv"
+)
+
+func (runtime *_runtime) newGoArrayObject(value reflect.Value) *_object {
+ self := runtime.newObject()
+ self.class = "GoArray"
+ self.objectClass = _classGoArray
+ self.value = _newGoArrayObject(value)
+ return self
+}
+
+type _goArrayObject struct {
+ value reflect.Value
+ writable bool
+ propertyMode _propertyMode
+}
+
+func _newGoArrayObject(value reflect.Value) *_goArrayObject {
+ writable := value.Kind() == reflect.Ptr // The Array is addressable (like a Slice)
+ mode := _propertyMode(0010)
+ if writable {
+ mode = 0110
+ }
+ self := &_goArrayObject{
+ value: value,
+ writable: writable,
+ propertyMode: mode,
+ }
+ return self
+}
+
+func (self _goArrayObject) getValue(index int64) (reflect.Value, bool) {
+ value := reflect.Indirect(self.value)
+ if index < int64(value.Len()) {
+ return value.Index(int(index)), true
+ }
+ return reflect.Value{}, false
+}
+
+func (self _goArrayObject) setValue(index int64, value Value) bool {
+ indexValue, exists := self.getValue(index)
+ if !exists {
+ return false
+ }
+ reflectValue, err := value.toReflectValue(reflect.Indirect(self.value).Type().Elem().Kind())
+ if err != nil {
+ panic(err)
+ }
+ indexValue.Set(reflectValue)
+ return true
+}
+
+func goArrayGetOwnProperty(self *_object, name string) *_property {
+ // length
+ if name == "length" {
+ return &_property{
+ value: toValue(reflect.Indirect(self.value.(*_goArrayObject).value).Len()),
+ mode: 0,
+ }
+ }
+
+ // .0, .1, .2, ...
+ index := stringToArrayIndex(name)
+ if index >= 0 {
+ object := self.value.(*_goArrayObject)
+ value := Value{}
+ reflectValue, exists := object.getValue(index)
+ if exists {
+ value = self.runtime.toValue(reflectValue.Interface())
+ }
+ return &_property{
+ value: value,
+ mode: object.propertyMode,
+ }
+ }
+
+ return objectGetOwnProperty(self, name)
+}
+
+func goArrayEnumerate(self *_object, all bool, each func(string) bool) {
+ object := self.value.(*_goArrayObject)
+ // .0, .1, .2, ...
+
+ for index, length := 0, object.value.Len(); index < length; index++ {
+ name := strconv.FormatInt(int64(index), 10)
+ if !each(name) {
+ return
+ }
+ }
+
+ objectEnumerate(self, all, each)
+}
+
+func goArrayDefineOwnProperty(self *_object, name string, descriptor _property, throw bool) bool {
+ if name == "length" {
+ return self.runtime.typeErrorResult(throw)
+ } else if index := stringToArrayIndex(name); index >= 0 {
+ object := self.value.(*_goArrayObject)
+ if object.writable {
+ if self.value.(*_goArrayObject).setValue(index, descriptor.value.(Value)) {
+ return true
+ }
+ }
+ return self.runtime.typeErrorResult(throw)
+ }
+ return objectDefineOwnProperty(self, name, descriptor, throw)
+}
+
+func goArrayDelete(self *_object, name string, throw bool) bool {
+ // length
+ if name == "length" {
+ return self.runtime.typeErrorResult(throw)
+ }
+
+ // .0, .1, .2, ...
+ index := stringToArrayIndex(name)
+ if index >= 0 {
+ object := self.value.(*_goArrayObject)
+ if object.writable {
+ indexValue, exists := object.getValue(index)
+ if exists {
+ indexValue.Set(reflect.Zero(reflect.Indirect(object.value).Type().Elem()))
+ return true
+ }
+ }
+ return self.runtime.typeErrorResult(throw)
+ }
+
+ return self.delete(name, throw)
+}
diff --git a/vendor/github.com/robertkrimen/otto/type_go_map.go b/vendor/github.com/robertkrimen/otto/type_go_map.go
new file mode 100644
index 000000000..3e204a028
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/type_go_map.go
@@ -0,0 +1,87 @@
+package otto
+
+import (
+ "reflect"
+)
+
+func (runtime *_runtime) newGoMapObject(value reflect.Value) *_object {
+ self := runtime.newObject()
+ self.class = "Object" // TODO Should this be something else?
+ self.objectClass = _classGoMap
+ self.value = _newGoMapObject(value)
+ return self
+}
+
+type _goMapObject struct {
+ value reflect.Value
+ keyKind reflect.Kind
+ valueKind reflect.Kind
+}
+
+func _newGoMapObject(value reflect.Value) *_goMapObject {
+ if value.Kind() != reflect.Map {
+ dbgf("%/panic//%@: %v != reflect.Map", value.Kind())
+ }
+ self := &_goMapObject{
+ value: value,
+ keyKind: value.Type().Key().Kind(),
+ valueKind: value.Type().Elem().Kind(),
+ }
+ return self
+}
+
+func (self _goMapObject) toKey(name string) reflect.Value {
+ reflectValue, err := stringToReflectValue(name, self.keyKind)
+ if err != nil {
+ panic(err)
+ }
+ return reflectValue
+}
+
+func (self _goMapObject) toValue(value Value) reflect.Value {
+ reflectValue, err := value.toReflectValue(self.valueKind)
+ if err != nil {
+ panic(err)
+ }
+ return reflectValue
+}
+
+func goMapGetOwnProperty(self *_object, name string) *_property {
+ object := self.value.(*_goMapObject)
+ value := object.value.MapIndex(object.toKey(name))
+ if value.IsValid() {
+ return &_property{self.runtime.toValue(value.Interface()), 0111}
+ }
+
+ return nil
+}
+
+func goMapEnumerate(self *_object, all bool, each func(string) bool) {
+ object := self.value.(*_goMapObject)
+ keys := object.value.MapKeys()
+ for _, key := range keys {
+ if !each(toValue(key).String()) {
+ return
+ }
+ }
+}
+
+func goMapDefineOwnProperty(self *_object, name string, descriptor _property, throw bool) bool {
+ object := self.value.(*_goMapObject)
+ // TODO ...or 0222
+ if descriptor.mode != 0111 {
+ return self.runtime.typeErrorResult(throw)
+ }
+ if !descriptor.isDataDescriptor() {
+ return self.runtime.typeErrorResult(throw)
+ }
+ object.value.SetMapIndex(object.toKey(name), object.toValue(descriptor.value.(Value)))
+ return true
+}
+
+func goMapDelete(self *_object, name string, throw bool) bool {
+ object := self.value.(*_goMapObject)
+ object.value.SetMapIndex(object.toKey(name), reflect.Value{})
+ // FIXME
+ return true
+}
diff --git a/vendor/github.com/robertkrimen/otto/type_go_slice.go b/vendor/github.com/robertkrimen/otto/type_go_slice.go
new file mode 100644
index 000000000..78c69e684
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/type_go_slice.go
@@ -0,0 +1,126 @@
+package otto
+
+import (
+ "reflect"
+ "strconv"
+)
+
+func (runtime *_runtime) newGoSliceObject(value reflect.Value) *_object {
+ self := runtime.newObject()
+ self.class = "GoArray" // TODO GoSlice?
+ self.objectClass = _classGoSlice
+ self.value = _newGoSliceObject(value)
+ return self
+}
+
+type _goSliceObject struct {
+ value reflect.Value
+}
+
+func _newGoSliceObject(value reflect.Value) *_goSliceObject {
+ self := &_goSliceObject{
+ value: value,
+ }
+ return self
+}
+
+func (self _goSliceObject) getValue(index int64) (reflect.Value, bool) {
+ if index < int64(self.value.Len()) {
+ return self.value.Index(int(index)), true
+ }
+ return reflect.Value{}, false
+}
+
+func (self _goSliceObject) setValue(index int64, value Value) bool {
+ indexValue, exists := self.getValue(index)
+ if !exists {
+ return false
+ }
+ reflectValue, err := value.toReflectValue(self.value.Type().Elem().Kind())
+ if err != nil {
+ panic(err)
+ }
+ indexValue.Set(reflectValue)
+ return true
+}
+
+func goSliceGetOwnProperty(self *_object, name string) *_property {
+ // length
+ if name == "length" {
+ return &_property{
+ value: toValue(self.value.(*_goSliceObject).value.Len()),
+ mode: 0,
+ }
+ }
+
+ // .0, .1, .2, ...
+ index := stringToArrayIndex(name)
+ if index >= 0 {
+ value := Value{}
+ reflectValue, exists := self.value.(*_goSliceObject).getValue(index)
+ if exists {
+ value = self.runtime.toValue(reflectValue.Interface())
+ }
+ return &_property{
+ value: value,
+ mode: 0110,
+ }
+ }
+
+ // Other methods
+ if method := self.value.(*_goSliceObject).value.MethodByName(name); (method != reflect.Value{}) {
+ return &_property{
+ value: self.runtime.toValue(method.Interface()),
+ mode: 0110,
+ }
+ }
+
+ return objectGetOwnProperty(self, name)
+}
+
+func goSliceEnumerate(self *_object, all bool, each func(string) bool) {
+ object := self.value.(*_goSliceObject)
+ // .0, .1, .2, ...
+
+ for index, length := 0, object.value.Len(); index < length; index++ {
+ name := strconv.FormatInt(int64(index), 10)
+ if !each(name) {
+ return
+ }
+ }
+
+ objectEnumerate(self, all, each)
+}
+
+func goSliceDefineOwnProperty(self *_object, name string, descriptor _property, throw bool) bool {
+ if name == "length" {
+ return self.runtime.typeErrorResult(throw)
+ } else if index := stringToArrayIndex(name); index >= 0 {
+ if self.value.(*_goSliceObject).setValue(index, descriptor.value.(Value)) {
+ return true
+ }
+ return self.runtime.typeErrorResult(throw)
+ }
+ return objectDefineOwnProperty(self, name, descriptor, throw)
+}
+
+func goSliceDelete(self *_object, name string, throw bool) bool {
+ // length
+ if name == "length" {
+ return self.runtime.typeErrorResult(throw)
+ }
+
+ // .0, .1, .2, ...
+ index := stringToArrayIndex(name)
+ if index >= 0 {
+ object := self.value.(*_goSliceObject)
+ indexValue, exists := object.getValue(index)
+ if exists {
+ indexValue.Set(reflect.Zero(object.value.Type().Elem()))
+ return true
+ }
+ return self.runtime.typeErrorResult(throw)
+ }
+
+ return self.delete(name, throw)
+}
diff --git a/vendor/github.com/robertkrimen/otto/type_go_struct.go b/vendor/github.com/robertkrimen/otto/type_go_struct.go
new file mode 100644
index 000000000..608ac6660
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/type_go_struct.go
@@ -0,0 +1,146 @@
+package otto
+
+import (
+ "encoding/json"
+ "reflect"
+)
+
+// FIXME Make a note about not being able to modify a struct unless it was
+// passed as a pointer-to: &struct{ ... }
+// This seems to be a limitation of the reflect package.
+// This goes for the other Go constructs too.
+// I guess we could get around it by either:
+// 1. Creating a new struct every time
+// 2. Creating an addressable? struct in the constructor
+
+func (runtime *_runtime) newGoStructObject(value reflect.Value) *_object {
+ self := runtime.newObject()
+ self.class = "Object" // TODO Should this be something else?
+ self.objectClass = _classGoStruct
+ self.value = _newGoStructObject(value)
+ return self
+}
+
+type _goStructObject struct {
+ value reflect.Value
+}
+
+func _newGoStructObject(value reflect.Value) *_goStructObject {
+ if reflect.Indirect(value).Kind() != reflect.Struct {
+ dbgf("%/panic//%@: %v != reflect.Struct", value.Kind())
+ }
+ self := &_goStructObject{
+ value: value,
+ }
+ return self
+}
+
+func (self _goStructObject) getValue(name string) reflect.Value {
+ if validGoStructName(name) {
+ // Do not reveal hidden or unexported fields
+ if field := reflect.Indirect(self.value).FieldByName(name); (field != reflect.Value{}) {
+ return field
+ }
+
+ if method := self.value.MethodByName(name); (method != reflect.Value{}) {
+ return method
+ }
+ }
+
+ return reflect.Value{}
+}
+
+func (self _goStructObject) field(name string) (reflect.StructField, bool) {
+ return reflect.Indirect(self.value).Type().FieldByName(name)
+}
+
+func (self _goStructObject) method(name string) (reflect.Method, bool) {
+ return reflect.Indirect(self.value).Type().MethodByName(name)
+}
+
+func (self _goStructObject) setValue(name string, value Value) bool {
+ field, exists := self.field(name)
+ if !exists {
+ return false
+ }
+ fieldValue := self.getValue(name)
+ reflectValue, err := value.toReflectValue(field.Type.Kind())
+ if err != nil {
+ panic(err)
+ }
+ fieldValue.Set(reflectValue)
+
+ return true
+}
+
+func goStructGetOwnProperty(self *_object, name string) *_property {
+ object := self.value.(*_goStructObject)
+ value := object.getValue(name)
+ if value.IsValid() {
+ return &_property{self.runtime.toValue(value.Interface()), 0110}
+ }
+
+ return objectGetOwnProperty(self, name)
+}
+
+func validGoStructName(name string) bool {
+ if name == "" {
+ return false
+ }
+ return 'A' <= name[0] && name[0] <= 'Z' // TODO What about Unicode?
+}
+
+func goStructEnumerate(self *_object, all bool, each func(string) bool) {
+ object := self.value.(*_goStructObject)
+
+ // Enumerate fields
+ for index := 0; index < reflect.Indirect(object.value).NumField(); index++ {
+ name := reflect.Indirect(object.value).Type().Field(index).Name
+ if validGoStructName(name) {
+ if !each(name) {
+ return
+ }
+ }
+ }
+
+ // Enumerate methods
+ for index := 0; index < object.value.NumMethod(); index++ {
+ name := object.value.Type().Method(index).Name
+ if validGoStructName(name) {
+ if !each(name) {
+ return
+ }
+ }
+ }
+
+ objectEnumerate(self, all, each)
+}
+
+func goStructCanPut(self *_object, name string) bool {
+ object := self.value.(*_goStructObject)
+ value := object.getValue(name)
+ if value.IsValid() {
+ return true
+ }
+
+ return objectCanPut(self, name)
+}
+
+func goStructPut(self *_object, name string, value Value, throw bool) {
+ object := self.value.(*_goStructObject)
+ if object.setValue(name, value) {
+ return
+ }
+
+ objectPut(self, name, value, throw)
+}
+
+func goStructMarshalJSON(self *_object) json.Marshaler {
+ object := self.value.(*_goStructObject)
+ goValue := reflect.Indirect(object.value).Interface()
+ switch marshaler := goValue.(type) {
+ case json.Marshaler:
+ return marshaler
+ }
+ return nil
+}
diff --git a/vendor/github.com/robertkrimen/otto/type_number.go b/vendor/github.com/robertkrimen/otto/type_number.go
new file mode 100644
index 000000000..28de4444c
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/type_number.go
@@ -0,0 +1,5 @@
+package otto
+
+func (runtime *_runtime) newNumberObject(value Value) *_object {
+ return runtime.newPrimitiveObject("Number", value.numberValue())
+}
diff --git a/vendor/github.com/robertkrimen/otto/type_reference.go b/vendor/github.com/robertkrimen/otto/type_reference.go
new file mode 100644
index 000000000..fd770c6f4
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/type_reference.go
@@ -0,0 +1,103 @@
+package otto
+
+type _reference interface {
+ invalid() bool // IsUnresolvableReference
+ getValue() Value // getValue
+ putValue(Value) string // PutValue
+ delete() bool
+}
+
+// PropertyReference
+
+type _propertyReference struct {
+ name string
+ strict bool
+ base *_object
+ runtime *_runtime
+ at _at
+}
+
+func newPropertyReference(rt *_runtime, base *_object, name string, strict bool, at _at) *_propertyReference {
+ return &_propertyReference{
+ runtime: rt,
+ name: name,
+ strict: strict,
+ base: base,
+ at: at,
+ }
+}
+
+func (self *_propertyReference) invalid() bool {
+ return self.base == nil
+}
+
+func (self *_propertyReference) getValue() Value {
+ if self.base == nil {
+ panic(self.runtime.panicReferenceError("'%s' is not defined", self.name, self.at))
+ }
+ return self.base.get(self.name)
+}
+
+func (self *_propertyReference) putValue(value Value) string {
+ if self.base == nil {
+ return self.name
+ }
+ self.base.put(self.name, value, self.strict)
+ return ""
+}
+
+func (self *_propertyReference) delete() bool {
+ if self.base == nil {
+ // TODO Throw an error if strict
+ return true
+ }
+ return self.base.delete(self.name, self.strict)
+}
+
+// ArgumentReference
+
+func newArgumentReference(runtime *_runtime, base *_object, name string, strict bool, at _at) *_propertyReference {
+ if base == nil {
+ panic(hereBeDragons())
+ }
+ return newPropertyReference(runtime, base, name, strict, at)
+}
+
+type _stashReference struct {
+ name string
+ strict bool
+ base _stash
+}
+
+func (self *_stashReference) invalid() bool {
+ return false // The base (an environment) will never be nil
+}
+
+func (self *_stashReference) getValue() Value {
+ return self.base.getBinding(self.name, self.strict)
+}
+
+func (self *_stashReference) putValue(value Value) string {
+ self.base.setValue(self.name, value, self.strict)
+ return ""
+}
+
+func (self *_stashReference) delete() bool {
+ if self.base == nil {
+ // This should never be reached, but just in case
+ return false
+ }
+ return self.base.deleteBinding(self.name)
+}
+
+// getIdentifierReference
+
+func getIdentifierReference(runtime *_runtime, stash _stash, name string, strict bool, at _at) _reference {
+ if stash == nil {
+ return newPropertyReference(runtime, nil, name, strict, at)
+ }
+ if stash.hasBinding(name) {
+ return stash.newReference(name, strict, at)
+ }
+ return getIdentifierReference(runtime, stash.outer(), name, strict, at)
+}
diff --git a/vendor/github.com/robertkrimen/otto/type_regexp.go b/vendor/github.com/robertkrimen/otto/type_regexp.go
new file mode 100644
index 000000000..57fe31640
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/type_regexp.go
@@ -0,0 +1,146 @@
+package otto
+
+import (
+ "fmt"
+ "regexp"
+ "unicode/utf8"
+
+ "github.com/robertkrimen/otto/parser"
+)
+
+type _regExpObject struct {
+ regularExpression *regexp.Regexp
+ global bool
+ ignoreCase bool
+ multiline bool
+ source string
+ flags string
+}
+
+func (runtime *_runtime) newRegExpObject(pattern string, flags string) *_object {
+ self := runtime.newObject()
+ self.class = "RegExp"
+
+ global := false
+ ignoreCase := false
+ multiline := false
+ re2flags := ""
+
+ // TODO Maybe clean up the panicking here... TypeError, SyntaxError, ?
+
+ for _, chr := range flags {
+ switch chr {
+ case 'g':
+ if global {
+ panic(runtime.panicSyntaxError("newRegExpObject: %s %s", pattern, flags))
+ }
+ global = true
+ case 'm':
+ if multiline {
+ panic(runtime.panicSyntaxError("newRegExpObject: %s %s", pattern, flags))
+ }
+ multiline = true
+ re2flags += "m"
+ case 'i':
+ if ignoreCase {
+ panic(runtime.panicSyntaxError("newRegExpObject: %s %s", pattern, flags))
+ }
+ ignoreCase = true
+ re2flags += "i"
+ }
+ }
+
+ re2pattern, err := parser.TransformRegExp(pattern)
+ if err != nil {
+ panic(runtime.panicTypeError("Invalid regular expression: %s", err.Error()))
+ }
+ if len(re2flags) > 0 {
+ re2pattern = fmt.Sprintf("(?%s:%s)", re2flags, re2pattern)
+ }
+
+ regularExpression, err := regexp.Compile(re2pattern)
+ if err != nil {
+ panic(runtime.panicSyntaxError("Invalid regular expression: %s", err.Error()[22:]))
+ }
+
+ self.value = _regExpObject{
+ regularExpression: regularExpression,
+ global: global,
+ ignoreCase: ignoreCase,
+ multiline: multiline,
+ source: pattern,
+ flags: flags,
+ }
+ self.defineProperty("global", toValue_bool(global), 0, false)
+ self.defineProperty("ignoreCase", toValue_bool(ignoreCase), 0, false)
+ self.defineProperty("multiline", toValue_bool(multiline), 0, false)
+ self.defineProperty("lastIndex", toValue_int(0), 0100, false)
+ self.defineProperty("source", toValue_string(pattern), 0, false)
+ return self
+}
+
+func (self *_object) regExpValue() _regExpObject {
+ value, _ := self.value.(_regExpObject)
+ return value
+}
+
+func execRegExp(this *_object, target string) (match bool, result []int) {
+ if this.class != "RegExp" {
+ panic(this.runtime.panicTypeError("Calling RegExp.exec on a non-RegExp object"))
+ }
+ lastIndex := this.get("lastIndex").number().int64
+ index := lastIndex
+ global := this.get("global").bool()
+ if !global {
+ index = 0
+ }
+ if 0 > index || index > int64(len(target)) {
+ } else {
+ result = this.regExpValue().regularExpression.FindStringSubmatchIndex(target[index:])
+ }
+ if result == nil {
+ //this.defineProperty("lastIndex", toValue_(0), 0111, true)
+ this.put("lastIndex", toValue_int(0), true)
+ return // !match
+ }
+ match = true
+ startIndex := index
+ endIndex := int(lastIndex) + result[1]
+ // We do this shift here because the .FindStringSubmatchIndex above
+ // was done on a local subordinate slice of the string, not the whole string
+ for index, _ := range result {
+ result[index] += int(startIndex)
+ }
+ if global {
+ //this.defineProperty("lastIndex", toValue_(endIndex), 0111, true)
+ this.put("lastIndex", toValue_int(endIndex), true)
+ }
+ return // match
+}
+
+func execResultToArray(runtime *_runtime, target string, result []int) *_object {
+ captureCount := len(result) / 2
+ valueArray := make([]Value, captureCount)
+ for index := 0; index < captureCount; index++ {
+ offset := 2 * index
+ if result[offset] != -1 {
+ valueArray[index] = toValue_string(target[result[offset]:result[offset+1]])
+ } else {
+ valueArray[index] = Value{}
+ }
+ }
+ matchIndex := result[0]
+ if matchIndex != 0 {
+ matchIndex = 0
+ // Find the rune index in the string, not the byte index
+ for index := 0; index < result[0]; {
+ _, size := utf8.DecodeRuneInString(target[index:])
+ matchIndex += 1
+ index += size
+ }
+ }
+ match := runtime.newArrayOf(valueArray)
+ match.defineProperty("input", toValue_string(target), 0111, false)
+ match.defineProperty("index", toValue_int(matchIndex), 0111, false)
+ return match
+}
diff --git a/vendor/github.com/robertkrimen/otto/type_string.go b/vendor/github.com/robertkrimen/otto/type_string.go
new file mode 100644
index 000000000..ef3afa42b
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/type_string.go
@@ -0,0 +1,112 @@
+package otto
+
+import (
+ "strconv"
+ "unicode/utf8"
+)
+
+type _stringObject interface {
+ Length() int
+ At(int) rune
+ String() string
+}
+
+type _stringASCII string
+
+func (str _stringASCII) Length() int {
+ return len(str)
+}
+
+func (str _stringASCII) At(at int) rune {
+ return rune(str[at])
+}
+
+func (str _stringASCII) String() string {
+ return string(str)
+}
+
+type _stringWide struct {
+ string string
+ length int
+ runes []rune
+}
+
+func (str _stringWide) Length() int {
+ return str.length
+}
+
+func (str _stringWide) At(at int) rune {
+ if str.runes == nil {
+ str.runes = []rune(str.string)
+ }
+ return str.runes[at]
+}
+
+func (str _stringWide) String() string {
+ return str.string
+}
+
+func _newStringObject(str string) _stringObject {
+ for i := 0; i < len(str); i++ {
+ if str[i] >= utf8.RuneSelf {
+ goto wide
+ }
+ }
+
+ return _stringASCII(str)
+
+wide:
+ return &_stringWide{
+ string: str,
+ length: utf8.RuneCountInString(str),
+ }
+}
+
+func stringAt(str _stringObject, index int) rune {
+ if 0 <= index && index < str.Length() {
+ return str.At(index)
+ }
+ return utf8.RuneError
+}
+
+func (runtime *_runtime) newStringObject(value Value) *_object {
+ str := _newStringObject(value.string())
+
+ self := runtime.newClassObject("String")
+ self.defineProperty("length", toValue_int(str.Length()), 0, false)
+ self.objectClass = _classString
+ self.value = str
+ return self
+}
+
+func (self *_object) stringValue() _stringObject {
+ if str, ok := self.value.(_stringObject); ok {
+ return str
+ }
+ return nil
+}
+
+func stringEnumerate(self *_object, all bool, each func(string) bool) {
+ if str := self.stringValue(); str != nil {
+ length := str.Length()
+ for index := 0; index < length; index++ {
+ if !each(strconv.FormatInt(int64(index), 10)) {
+ return
+ }
+ }
+ }
+ objectEnumerate(self, all, each)
+}
+
+func stringGetOwnProperty(self *_object, name string) *_property {
+ if property := objectGetOwnProperty(self, name); property != nil {
+ return property
+ }
+ // TODO Test a string of length >= +int32 + 1?
+ if index := stringToArrayIndex(name); index >= 0 {
+ if chr := stringAt(self.stringValue(), int(index)); chr != utf8.RuneError {
+ return &_property{toValue_string(string(chr)), 0}
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/robertkrimen/otto/value.go b/vendor/github.com/robertkrimen/otto/value.go
new file mode 100644
index 000000000..90c140604
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/value.go
@@ -0,0 +1,1033 @@
+package otto
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "unicode/utf16"
+)
+
+type _valueKind int
+
+const (
+ valueUndefined _valueKind = iota
+ valueNull
+ valueNumber
+ valueString
+ valueBoolean
+ valueObject
+
+ // These are invalid outside of the runtime
+ valueEmpty
+ valueResult
+ valueReference
+)
+
+// Value is the representation of a JavaScript value.
+type Value struct {
+ kind _valueKind
+ value interface{}
+}
+
+func (value Value) safe() bool {
+ return value.kind < valueEmpty
+}
+
+var (
+ emptyValue = Value{kind: valueEmpty}
+ nullValue = Value{kind: valueNull}
+ falseValue = Value{kind: valueBoolean, value: false}
+ trueValue = Value{kind: valueBoolean, value: true}
+)
+
+// ToValue will convert an interface{} value to a value digestible by otto/JavaScript
+//
+// This function will not work for advanced types (struct, map, slice/array, etc.) and
+// you should use Otto.ToValue instead.
+func ToValue(value interface{}) (Value, error) {
+ result := Value{}
+ err := catchPanic(func() {
+ result = toValue(value)
+ })
+ return result, err
+}
+
+func (value Value) isEmpty() bool {
+ return value.kind == valueEmpty
+}
+
+// Undefined
+
+// UndefinedValue will return a Value representing undefined.
+func UndefinedValue() Value {
+ return Value{}
+}
+
+// IsDefined will return false if the value is undefined, and true otherwise.
+func (value Value) IsDefined() bool {
+ return value.kind != valueUndefined
+}
+
+// IsUndefined will return true if the value is undefined, and false otherwise.
+func (value Value) IsUndefined() bool {
+ return value.kind == valueUndefined
+}
+
+// NullValue will return a Value representing null.
+func NullValue() Value {
+ return Value{kind: valueNull}
+}
+
+// IsNull will return true if the value is null, and false otherwise.
+func (value Value) IsNull() bool {
+ return value.kind == valueNull
+}
+
+// ---
+
+func (value Value) isCallable() bool {
+ switch value := value.value.(type) {
+ case *_object:
+ return value.isCall()
+ }
+ return false
+}
+
+// Call the value as a function with the given this value and argument list and
+// return the result of invocation. It is essentially equivalent to:
+//
+// value.apply(thisValue, argumentList)
+//
+// An undefined value and an error will result if:
+//
+// 1. There is an error during conversion of the argument list
+// 2. The value is not actually a function
+// 3. An (uncaught) exception is thrown
+//
+func (value Value) Call(this Value, argumentList ...interface{}) (Value, error) {
+ result := Value{}
+ err := catchPanic(func() {
+ // FIXME
+ result = value.call(nil, this, argumentList...)
+ })
+ if !value.safe() {
+ value = Value{}
+ }
+ return result, err
+}
+
+func (value Value) call(rt *_runtime, this Value, argumentList ...interface{}) Value {
+ switch function := value.value.(type) {
+ case *_object:
+ return function.call(this, function.runtime.toValueArray(argumentList...), false, nativeFrame)
+ }
+ if rt == nil {
+ panic("FIXME TypeError")
+ }
+ panic(rt.panicTypeError())
+}
+
+func (value Value) constructSafe(rt *_runtime, this Value, argumentList ...interface{}) (Value, error) {
+ result := Value{}
+ err := catchPanic(func() {
+ result = value.construct(rt, this, argumentList...)
+ })
+ return result, err
+}
+
+func (value Value) construct(rt *_runtime, this Value, argumentList ...interface{}) Value {
+ switch fn := value.value.(type) {
+ case *_object:
+ return fn.construct(fn.runtime.toValueArray(argumentList...))
+ }
+ if rt == nil {
+ panic("FIXME TypeError")
+ }
+ panic(rt.panicTypeError())
+}
+
+// IsPrimitive will return true if value is a primitive (any kind of primitive).
+func (value Value) IsPrimitive() bool {
+ return !value.IsObject()
+}
+
+// IsBoolean will return true if value is a boolean (primitive).
+func (value Value) IsBoolean() bool {
+ return value.kind == valueBoolean
+}
+
+// IsNumber will return true if value is a number (primitive).
+func (value Value) IsNumber() bool {
+ return value.kind == valueNumber
+}
+
+// IsNaN will return true if value is NaN (or would convert to NaN).
+func (value Value) IsNaN() bool {
+ switch value := value.value.(type) {
+ case float64:
+ return math.IsNaN(value)
+ case float32:
+ return math.IsNaN(float64(value))
+ case int, int8, int32, int64:
+ return false
+ case uint, uint8, uint32, uint64:
+ return false
+ }
+
+ return math.IsNaN(value.float64())
+}
+
+// IsString will return true if value is a string (primitive).
+func (value Value) IsString() bool {
+ return value.kind == valueString
+}
+
+// IsObject will return true if value is an object.
+func (value Value) IsObject() bool {
+ return value.kind == valueObject
+}
+
+// IsFunction will return true if value is a function.
+func (value Value) IsFunction() bool {
+ if value.kind != valueObject {
+ return false
+ }
+ return value.value.(*_object).class == "Function"
+}
+
+// Class will return the class string of the value or the empty string if value is not an object.
+//
+// The return value will (generally) be one of:
+//
+// Object
+// Function
+// Array
+// String
+// Number
+// Boolean
+// Date
+// RegExp
+//
+func (value Value) Class() string {
+ if value.kind != valueObject {
+ return ""
+ }
+ return value.value.(*_object).class
+}
+
+func (value Value) isArray() bool {
+ if value.kind != valueObject {
+ return false
+ }
+ return isArray(value.value.(*_object))
+}
+
+func (value Value) isStringObject() bool {
+ if value.kind != valueObject {
+ return false
+ }
+ return value.value.(*_object).class == "String"
+}
+
+func (value Value) isBooleanObject() bool {
+ if value.kind != valueObject {
+ return false
+ }
+ return value.value.(*_object).class == "Boolean"
+}
+
+func (value Value) isNumberObject() bool {
+ if value.kind != valueObject {
+ return false
+ }
+ return value.value.(*_object).class == "Number"
+}
+
+func (value Value) isDate() bool {
+ if value.kind != valueObject {
+ return false
+ }
+ return value.value.(*_object).class == "Date"
+}
+
+func (value Value) isRegExp() bool {
+ if value.kind != valueObject {
+ return false
+ }
+ return value.value.(*_object).class == "RegExp"
+}
+
+func (value Value) isError() bool {
+ if value.kind != valueObject {
+ return false
+ }
+ return value.value.(*_object).class == "Error"
+}
+
+// ---
+
+func toValue_reflectValuePanic(value interface{}, kind reflect.Kind) {
+ // FIXME?
+ switch kind {
+ case reflect.Struct:
+ panic(newError(nil, "TypeError", 0, "invalid value (struct): missing runtime: %v (%T)", value, value))
+ case reflect.Map:
+ panic(newError(nil, "TypeError", 0, "invalid value (map): missing runtime: %v (%T)", value, value))
+ case reflect.Slice:
+ panic(newError(nil, "TypeError", 0, "invalid value (slice): missing runtime: %v (%T)", value, value))
+ }
+}
+
+func toValue(value interface{}) Value {
+ switch value := value.(type) {
+ case Value:
+ return value
+ case bool:
+ return Value{valueBoolean, value}
+ case int:
+ return Value{valueNumber, value}
+ case int8:
+ return Value{valueNumber, value}
+ case int16:
+ return Value{valueNumber, value}
+ case int32:
+ return Value{valueNumber, value}
+ case int64:
+ return Value{valueNumber, value}
+ case uint:
+ return Value{valueNumber, value}
+ case uint8:
+ return Value{valueNumber, value}
+ case uint16:
+ return Value{valueNumber, value}
+ case uint32:
+ return Value{valueNumber, value}
+ case uint64:
+ return Value{valueNumber, value}
+ case float32:
+ return Value{valueNumber, float64(value)}
+ case float64:
+ return Value{valueNumber, value}
+ case []uint16:
+ return Value{valueString, value}
+ case string:
+ return Value{valueString, value}
+ // A rune is actually an int32, which is handled above
+ case *_object:
+ return Value{valueObject, value}
+ case *Object:
+ return Value{valueObject, value.object}
+ case Object:
+ return Value{valueObject, value.object}
+ case _reference: // reference is an interface (already a pointer)
+ return Value{valueReference, value}
+ case _result:
+ return Value{valueResult, value}
+ case nil:
+ // TODO Ugh.
+ return Value{}
+ case reflect.Value:
+ for value.Kind() == reflect.Ptr {
+ // We were given a pointer, so we'll drill down until we get a non-pointer
+ //
+ // These semantics might change if we want to start supporting pointers to values transparently
+ // (It would be best not to depend on this behavior)
+ // FIXME: UNDEFINED
+ if value.IsNil() {
+ return Value{}
+ }
+ value = value.Elem()
+ }
+ switch value.Kind() {
+ case reflect.Bool:
+ return Value{valueBoolean, bool(value.Bool())}
+ case reflect.Int:
+ return Value{valueNumber, int(value.Int())}
+ case reflect.Int8:
+ return Value{valueNumber, int8(value.Int())}
+ case reflect.Int16:
+ return Value{valueNumber, int16(value.Int())}
+ case reflect.Int32:
+ return Value{valueNumber, int32(value.Int())}
+ case reflect.Int64:
+ return Value{valueNumber, int64(value.Int())}
+ case reflect.Uint:
+ return Value{valueNumber, uint(value.Uint())}
+ case reflect.Uint8:
+ return Value{valueNumber, uint8(value.Uint())}
+ case reflect.Uint16:
+ return Value{valueNumber, uint16(value.Uint())}
+ case reflect.Uint32:
+ return Value{valueNumber, uint32(value.Uint())}
+ case reflect.Uint64:
+ return Value{valueNumber, uint64(value.Uint())}
+ case reflect.Float32:
+ return Value{valueNumber, float32(value.Float())}
+ case reflect.Float64:
+ return Value{valueNumber, float64(value.Float())}
+ case reflect.String:
+ return Value{valueString, string(value.String())}
+ default:
+ toValue_reflectValuePanic(value.Interface(), value.Kind())
+ }
+ default:
+ return toValue(reflect.ValueOf(value))
+ }
+ // FIXME?
+ panic(newError(nil, "TypeError", 0, "invalid value: %v (%T)", value, value))
+}
+
+// String will return the value as a string.
+//
+// This method will make return the empty string if there is an error.
+func (value Value) String() string {
+ result := ""
+ catchPanic(func() {
+ result = value.string()
+ })
+ return result
+}
+
+// ToBoolean will convert the value to a boolean (bool).
+//
+// ToValue(0).ToBoolean() => false
+// ToValue("").ToBoolean() => false
+// ToValue(true).ToBoolean() => true
+// ToValue(1).ToBoolean() => true
+// ToValue("Nothing happens").ToBoolean() => true
+//
+// If there is an error during the conversion process (like an uncaught exception), then the result will be false and an error.
+func (value Value) ToBoolean() (bool, error) {
+ result := false
+ err := catchPanic(func() {
+ result = value.bool()
+ })
+ return result, err
+}
+
+func (value Value) numberValue() Value {
+ if value.kind == valueNumber {
+ return value
+ }
+ return Value{valueNumber, value.float64()}
+}
+
+// ToFloat will convert the value to a number (float64).
+//
+// ToValue(0).ToFloat() => 0.
+// ToValue(1.1).ToFloat() => 1.1
+// ToValue("11").ToFloat() => 11.
+//
+// If there is an error during the conversion process (like an uncaught exception), then the result will be 0 and an error.
+func (value Value) ToFloat() (float64, error) {
+ result := float64(0)
+ err := catchPanic(func() {
+ result = value.float64()
+ })
+ return result, err
+}
+
+// ToInteger will convert the value to a number (int64).
+//
+// ToValue(0).ToInteger() => 0
+// ToValue(1.1).ToInteger() => 1
+// ToValue("11").ToInteger() => 11
+//
+// If there is an error during the conversion process (like an uncaught exception), then the result will be 0 and an error.
+func (value Value) ToInteger() (int64, error) {
+ result := int64(0)
+ err := catchPanic(func() {
+ result = value.number().int64
+ })
+ return result, err
+}
+
+// ToString will convert the value to a string (string).
+//
+// ToValue(0).ToString() => "0"
+// ToValue(false).ToString() => "false"
+// ToValue(1.1).ToString() => "1.1"
+// ToValue("11").ToString() => "11"
+// ToValue('Nothing happens.').ToString() => "Nothing happens."
+//
+// If there is an error during the conversion process (like an uncaught exception), then the result will be the empty string ("") and an error.
+func (value Value) ToString() (string, error) {
+ result := ""
+ err := catchPanic(func() {
+ result = value.string()
+ })
+ return result, err
+}
+
+func (value Value) _object() *_object {
+ switch value := value.value.(type) {
+ case *_object:
+ return value
+ }
+ return nil
+}
+
+// Object will return the object of the value, or nil if value is not an object.
+//
+// This method will not do any implicit conversion. For example, calling this method on a string primitive value will not return a String object.
+func (value Value) Object() *Object {
+ switch object := value.value.(type) {
+ case *_object:
+ return _newObject(object, value)
+ }
+ return nil
+}
+
+func (value Value) reference() _reference {
+ switch value := value.value.(type) {
+ case _reference:
+ return value
+ }
+ return nil
+}
+
+func (value Value) resolve() Value {
+ switch value := value.value.(type) {
+ case _reference:
+ return value.getValue()
+ }
+ return value
+}
+
+var (
+ __NaN__ float64 = math.NaN()
+ __PositiveInfinity__ float64 = math.Inf(+1)
+ __NegativeInfinity__ float64 = math.Inf(-1)
+ __PositiveZero__ float64 = 0
+ __NegativeZero__ float64 = math.Float64frombits(0 | (1 << 63))
+)
+
+func positiveInfinity() float64 {
+ return __PositiveInfinity__
+}
+
+func negativeInfinity() float64 {
+ return __NegativeInfinity__
+}
+
+func positiveZero() float64 {
+ return __PositiveZero__
+}
+
+func negativeZero() float64 {
+ return __NegativeZero__
+}
+
+// NaNValue will return a value representing NaN.
+//
+// It is equivalent to:
+//
+// ToValue(math.NaN())
+//
+func NaNValue() Value {
+ return Value{valueNumber, __NaN__}
+}
+
+func positiveInfinityValue() Value {
+ return Value{valueNumber, __PositiveInfinity__}
+}
+
+func negativeInfinityValue() Value {
+ return Value{valueNumber, __NegativeInfinity__}
+}
+
+func positiveZeroValue() Value {
+ return Value{valueNumber, __PositiveZero__}
+}
+
+func negativeZeroValue() Value {
+ return Value{valueNumber, __NegativeZero__}
+}
+
+// TrueValue will return a value representing true.
+//
+// It is equivalent to:
+//
+// ToValue(true)
+//
+func TrueValue() Value {
+ return Value{valueBoolean, true}
+}
+
+// FalseValue will return a value representing false.
+//
+// It is equivalent to:
+//
+// ToValue(false)
+//
+func FalseValue() Value {
+ return Value{valueBoolean, false}
+}
+
+func sameValue(x Value, y Value) bool {
+ if x.kind != y.kind {
+ return false
+ }
+ result := false
+ switch x.kind {
+ case valueUndefined, valueNull:
+ result = true
+ case valueNumber:
+ x := x.float64()
+ y := y.float64()
+ if math.IsNaN(x) && math.IsNaN(y) {
+ result = true
+ } else {
+ result = x == y
+ if result && x == 0 {
+ // Since +0 != -0
+ result = math.Signbit(x) == math.Signbit(y)
+ }
+ }
+ case valueString:
+ result = x.string() == y.string()
+ case valueBoolean:
+ result = x.bool() == y.bool()
+ case valueObject:
+ result = x._object() == y._object()
+ default:
+ panic(hereBeDragons())
+ }
+
+ return result
+}
+
+func strictEqualityComparison(x Value, y Value) bool {
+ if x.kind != y.kind {
+ return false
+ }
+ result := false
+ switch x.kind {
+ case valueUndefined, valueNull:
+ result = true
+ case valueNumber:
+ x := x.float64()
+ y := y.float64()
+ if math.IsNaN(x) && math.IsNaN(y) {
+ result = false
+ } else {
+ result = x == y
+ }
+ case valueString:
+ result = x.string() == y.string()
+ case valueBoolean:
+ result = x.bool() == y.bool()
+ case valueObject:
+ result = x._object() == y._object()
+ default:
+ panic(hereBeDragons())
+ }
+
+ return result
+}
+
+// Export will attempt to convert the value to a Go representation
+// and return it via an interface{} kind.
+//
+// Export returns an error, but it will always be nil. It is present
+// for backwards compatibility.
+//
+// If a reasonable conversion is not possible, then the original
+// value is returned.
+//
+// undefined -> nil (FIXME?: Should be Value{})
+// null -> nil
+// boolean -> bool
+// number -> A number type (int, float32, uint64, ...)
+// string -> string
+// Array -> []interface{}
+// Object -> map[string]interface{}
+//
+func (self Value) Export() (interface{}, error) {
+ return self.export(), nil
+}
+
+func (self Value) export() interface{} {
+
+ switch self.kind {
+ case valueUndefined:
+ return nil
+ case valueNull:
+ return nil
+ case valueNumber, valueBoolean:
+ return self.value
+ case valueString:
+ switch value := self.value.(type) {
+ case string:
+ return value
+ case []uint16:
+ return string(utf16.Decode(value))
+ }
+ case valueObject:
+ object := self._object()
+ switch value := object.value.(type) {
+ case *_goStructObject:
+ return value.value.Interface()
+ case *_goMapObject:
+ return value.value.Interface()
+ case *_goArrayObject:
+ return value.value.Interface()
+ case *_goSliceObject:
+ return value.value.Interface()
+ }
+ if object.class == "Array" {
+ result := make([]interface{}, 0)
+ lengthValue := object.get("length")
+ length := lengthValue.value.(uint32)
+ kind := reflect.Invalid
+ state := 0
+ var t reflect.Type
+ for index := uint32(0); index < length; index += 1 {
+ name := strconv.FormatInt(int64(index), 10)
+ if !object.hasProperty(name) {
+ continue
+ }
+ value := object.get(name).export()
+
+ t = reflect.TypeOf(value)
+
+ var k reflect.Kind
+ if t != nil {
+ k = t.Kind()
+ }
+
+ if state == 0 {
+ kind = k
+ state = 1
+ } else if state == 1 && kind != k {
+ state = 2
+ }
+
+ result = append(result, value)
+ }
+
+ if state != 1 || kind == reflect.Interface || t == nil {
+ // No common type
+ return result
+ }
+
+ // Convert to the common type
+ val := reflect.MakeSlice(reflect.SliceOf(t), len(result), len(result))
+ for i, v := range result {
+ val.Index(i).Set(reflect.ValueOf(v))
+ }
+ return val.Interface()
+ } else {
+ result := make(map[string]interface{})
+ // TODO Should we export everything? Or just what is enumerable?
+ object.enumerate(false, func(name string) bool {
+ value := object.get(name)
+ if value.IsDefined() {
+ result[name] = value.export()
+ }
+ return true
+ })
+ return result
+ }
+ }
+
+ if self.safe() {
+ return self
+ }
+
+ return Value{}
+}
+
+func (self Value) evaluateBreakContinue(labels []string) _resultKind {
+ result := self.value.(_result)
+ if result.kind == resultBreak || result.kind == resultContinue {
+ for _, label := range labels {
+ if label == result.target {
+ return result.kind
+ }
+ }
+ }
+ return resultReturn
+}
+
+func (self Value) evaluateBreak(labels []string) _resultKind {
+ result := self.value.(_result)
+ if result.kind == resultBreak {
+ for _, label := range labels {
+ if label == result.target {
+ return result.kind
+ }
+ }
+ }
+ return resultReturn
+}
+
+func (self Value) exportNative() interface{} {
+
+ switch self.kind {
+ case valueUndefined:
+ return self
+ case valueNull:
+ return nil
+ case valueNumber, valueBoolean:
+ return self.value
+ case valueString:
+ switch value := self.value.(type) {
+ case string:
+ return value
+ case []uint16:
+ return string(utf16.Decode(value))
+ }
+ case valueObject:
+ object := self._object()
+ switch value := object.value.(type) {
+ case *_goStructObject:
+ return value.value.Interface()
+ case *_goMapObject:
+ return value.value.Interface()
+ case *_goArrayObject:
+ return value.value.Interface()
+ case *_goSliceObject:
+ return value.value.Interface()
+ }
+ }
+
+ return self
+}
+
+// Make a best effort to return a reflect.Value corresponding to reflect.Kind, but
+// fallback to just returning the Go value we have handy.
+func (value Value) toReflectValue(kind reflect.Kind) (reflect.Value, error) {
+ if kind != reflect.Float32 && kind != reflect.Float64 && kind != reflect.Interface {
+ switch value := value.value.(type) {
+ case float32:
+ _, frac := math.Modf(float64(value))
+ if frac > 0 {
+ return reflect.Value{}, fmt.Errorf("RangeError: %v to reflect.Kind: %v", value, kind)
+ }
+ case float64:
+ _, frac := math.Modf(value)
+ if frac > 0 {
+ return reflect.Value{}, fmt.Errorf("RangeError: %v to reflect.Kind: %v", value, kind)
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Bool: // Bool
+ return reflect.ValueOf(value.bool()), nil
+ case reflect.Int: // Int
+ // We convert to float64 here because converting to int64 will not tell us
+ // if a value is outside the range of int64
+ tmp := toIntegerFloat(value)
+ if tmp < float_minInt || tmp > float_maxInt {
+ return reflect.Value{}, fmt.Errorf("RangeError: %f (%v) to int", tmp, value)
+ } else {
+ return reflect.ValueOf(int(tmp)), nil
+ }
+ case reflect.Int8: // Int8
+ tmp := value.number().int64
+ if tmp < int64_minInt8 || tmp > int64_maxInt8 {
+ return reflect.Value{}, fmt.Errorf("RangeError: %d (%v) to int8", tmp, value)
+ } else {
+ return reflect.ValueOf(int8(tmp)), nil
+ }
+ case reflect.Int16: // Int16
+ tmp := value.number().int64
+ if tmp < int64_minInt16 || tmp > int64_maxInt16 {
+ return reflect.Value{}, fmt.Errorf("RangeError: %d (%v) to int16", tmp, value)
+ } else {
+ return reflect.ValueOf(int16(tmp)), nil
+ }
+ case reflect.Int32: // Int32
+ tmp := value.number().int64
+ if tmp < int64_minInt32 || tmp > int64_maxInt32 {
+ return reflect.Value{}, fmt.Errorf("RangeError: %d (%v) to int32", tmp, value)
+ } else {
+ return reflect.ValueOf(int32(tmp)), nil
+ }
+ case reflect.Int64: // Int64
+ // We convert to float64 here because converting to int64 will not tell us
+ // if a value is outside the range of int64
+ tmp := toIntegerFloat(value)
+ if tmp < float_minInt64 || tmp > float_maxInt64 {
+ return reflect.Value{}, fmt.Errorf("RangeError: %f (%v) to int", tmp, value)
+ } else {
+ return reflect.ValueOf(int64(tmp)), nil
+ }
+ case reflect.Uint: // Uint
+ // We convert to float64 here because converting to int64 will not tell us
+ // if a value is outside the range of uint
+ tmp := toIntegerFloat(value)
+ if tmp < 0 || tmp > float_maxUint {
+ return reflect.Value{}, fmt.Errorf("RangeError: %f (%v) to uint", tmp, value)
+ } else {
+ return reflect.ValueOf(uint(tmp)), nil
+ }
+ case reflect.Uint8: // Uint8
+ tmp := value.number().int64
+ if tmp < 0 || tmp > int64_maxUint8 {
+ return reflect.Value{}, fmt.Errorf("RangeError: %d (%v) to uint8", tmp, value)
+ } else {
+ return reflect.ValueOf(uint8(tmp)), nil
+ }
+ case reflect.Uint16: // Uint16
+ tmp := value.number().int64
+ if tmp < 0 || tmp > int64_maxUint16 {
+ return reflect.Value{}, fmt.Errorf("RangeError: %d (%v) to uint16", tmp, value)
+ } else {
+ return reflect.ValueOf(uint16(tmp)), nil
+ }
+ case reflect.Uint32: // Uint32
+ tmp := value.number().int64
+ if tmp < 0 || tmp > int64_maxUint32 {
+ return reflect.Value{}, fmt.Errorf("RangeError: %d (%v) to uint32", tmp, value)
+ } else {
+ return reflect.ValueOf(uint32(tmp)), nil
+ }
+ case reflect.Uint64: // Uint64
+ // We convert to float64 here because converting to int64 will not tell us
+ // if a value is outside the range of uint64
+ tmp := toIntegerFloat(value)
+ if tmp < 0 || tmp > float_maxUint64 {
+ return reflect.Value{}, fmt.Errorf("RangeError: %f (%v) to uint64", tmp, value)
+ } else {
+ return reflect.ValueOf(uint64(tmp)), nil
+ }
+ case reflect.Float32: // Float32
+ tmp := value.float64()
+ tmp1 := tmp
+ if 0 > tmp1 {
+ tmp1 = -tmp1
+ }
+ if tmp1 > 0 && (tmp1 < math.SmallestNonzeroFloat32 || tmp1 > math.MaxFloat32) {
+ return reflect.Value{}, fmt.Errorf("RangeError: %f (%v) to float32", tmp, value)
+ } else {
+ return reflect.ValueOf(float32(tmp)), nil
+ }
+ case reflect.Float64: // Float64
+ value := value.float64()
+ return reflect.ValueOf(float64(value)), nil
+ case reflect.String: // String
+ return reflect.ValueOf(value.string()), nil
+ case reflect.Invalid: // Invalid
+ case reflect.Complex64: // FIXME? Complex64
+ case reflect.Complex128: // FIXME? Complex128
+ case reflect.Chan: // FIXME? Chan
+ case reflect.Func: // FIXME? Func
+ case reflect.Ptr: // FIXME? Ptr
+ case reflect.UnsafePointer: // FIXME? UnsafePointer
+ default:
+ switch value.kind {
+ case valueObject:
+ object := value._object()
+ switch vl := object.value.(type) {
+ case *_goStructObject: // Struct
+ return reflect.ValueOf(vl.value.Interface()), nil
+ case *_goMapObject: // Map
+ return reflect.ValueOf(vl.value.Interface()), nil
+ case *_goArrayObject: // Array
+ return reflect.ValueOf(vl.value.Interface()), nil
+ case *_goSliceObject: // Slice
+ return reflect.ValueOf(vl.value.Interface()), nil
+ }
+ return reflect.ValueOf(value.exportNative()), nil
+ case valueEmpty, valueResult, valueReference:
+ // These are invalid, and should panic
+ default:
+ return reflect.ValueOf(value.value), nil
+ }
+ }
+
+ // FIXME Should this end up as a TypeError?
+ panic(fmt.Errorf("invalid conversion of %v (%v) to reflect.Kind: %v", value.kind, value, kind))
+}
+
+func stringToReflectValue(value string, kind reflect.Kind) (reflect.Value, error) {
+ switch kind {
+ case reflect.Bool:
+ value, err := strconv.ParseBool(value)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ return reflect.ValueOf(value), nil
+ case reflect.Int:
+ value, err := strconv.ParseInt(value, 0, 0)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ return reflect.ValueOf(int(value)), nil
+ case reflect.Int8:
+ value, err := strconv.ParseInt(value, 0, 8)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ return reflect.ValueOf(int8(value)), nil
+ case reflect.Int16:
+ value, err := strconv.ParseInt(value, 0, 16)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ return reflect.ValueOf(int16(value)), nil
+ case reflect.Int32:
+ value, err := strconv.ParseInt(value, 0, 32)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ return reflect.ValueOf(int32(value)), nil
+ case reflect.Int64:
+ value, err := strconv.ParseInt(value, 0, 64)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ return reflect.ValueOf(int64(value)), nil
+ case reflect.Uint:
+ value, err := strconv.ParseUint(value, 0, 0)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ return reflect.ValueOf(uint(value)), nil
+ case reflect.Uint8:
+ value, err := strconv.ParseUint(value, 0, 8)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ return reflect.ValueOf(uint8(value)), nil
+ case reflect.Uint16:
+ value, err := strconv.ParseUint(value, 0, 16)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ return reflect.ValueOf(uint16(value)), nil
+ case reflect.Uint32:
+ value, err := strconv.ParseUint(value, 0, 32)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ return reflect.ValueOf(uint32(value)), nil
+ case reflect.Uint64:
+ value, err := strconv.ParseUint(value, 0, 64)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ return reflect.ValueOf(uint64(value)), nil
+ case reflect.Float32:
+ value, err := strconv.ParseFloat(value, 32)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ return reflect.ValueOf(float32(value)), nil
+ case reflect.Float64:
+ value, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ return reflect.ValueOf(float64(value)), nil
+ case reflect.String:
+ return reflect.ValueOf(value), nil
+ }
+
+ // FIXME This should end up as a TypeError?
+ panic(fmt.Errorf("invalid conversion of %q to reflect.Kind: %v", value, kind))
+}
diff --git a/vendor/github.com/robertkrimen/otto/value_boolean.go b/vendor/github.com/robertkrimen/otto/value_boolean.go
new file mode 100644
index 000000000..3040f4163
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/value_boolean.go
@@ -0,0 +1,40 @@
+package otto
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+)
+
+func (value Value) bool() bool {
+ if value.kind == valueBoolean {
+ return value.value.(bool)
+ }
+ if value.IsUndefined() {
+ return false
+ }
+ if value.IsNull() {
+ return false
+ }
+ switch value := value.value.(type) {
+ case bool:
+ return value
+ case int, int8, int16, int32, int64:
+ return 0 != reflect.ValueOf(value).Int()
+ case uint, uint8, uint16, uint32, uint64:
+ return 0 != reflect.ValueOf(value).Uint()
+ case float32:
+ return 0 != value
+ case float64:
+ if math.IsNaN(value) || value == 0 {
+ return false
+ }
+ return true
+ case string:
+ return 0 != len(value)
+ }
+ if value.IsObject() {
+ return true
+ }
+ panic(fmt.Errorf("toBoolean(%T)", value.value))
+}
diff --git a/vendor/github.com/robertkrimen/otto/value_number.go b/vendor/github.com/robertkrimen/otto/value_number.go
new file mode 100644
index 000000000..870bf115b
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/value_number.go
@@ -0,0 +1,324 @@
+package otto
+
+import (
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var stringToNumberParseInteger = regexp.MustCompile(`^(?:0[xX])`)
+
+func parseNumber(value string) float64 {
+ value = strings.TrimSpace(value)
+
+ if value == "" {
+ return 0
+ }
+
+ parseFloat := false
+ if strings.IndexRune(value, '.') != -1 {
+ parseFloat = true
+ } else if stringToNumberParseInteger.MatchString(value) {
+ parseFloat = false
+ } else {
+ parseFloat = true
+ }
+
+ if parseFloat {
+ number, err := strconv.ParseFloat(value, 64)
+ if err != nil && err.(*strconv.NumError).Err != strconv.ErrRange {
+ return math.NaN()
+ }
+ return number
+ }
+
+ number, err := strconv.ParseInt(value, 0, 64)
+ if err != nil {
+ return math.NaN()
+ }
+ return float64(number)
+}
+
+func (value Value) float64() float64 {
+ switch value.kind {
+ case valueUndefined:
+ return math.NaN()
+ case valueNull:
+ return 0
+ }
+ switch value := value.value.(type) {
+ case bool:
+ if value {
+ return 1
+ }
+ return 0
+ case int:
+ return float64(value)
+ case int8:
+ return float64(value)
+ case int16:
+ return float64(value)
+ case int32:
+ return float64(value)
+ case int64:
+ return float64(value)
+ case uint:
+ return float64(value)
+ case uint8:
+ return float64(value)
+ case uint16:
+ return float64(value)
+ case uint32:
+ return float64(value)
+ case uint64:
+ return float64(value)
+ case float64:
+ return value
+ case string:
+ return parseNumber(value)
+ case *_object:
+ return value.DefaultValue(defaultValueHintNumber).float64()
+ }
+ panic(fmt.Errorf("toFloat(%T)", value.value))
+}
+
+const (
+ float_2_64 float64 = 18446744073709551616.0
+ float_2_63 float64 = 9223372036854775808.0
+ float_2_32 float64 = 4294967296.0
+ float_2_31 float64 = 2147483648.0
+ float_2_16 float64 = 65536.0
+ integer_2_32 int64 = 4294967296
+ integer_2_31 int64 = 2146483648
+ sqrt1_2 float64 = math.Sqrt2 / 2
+)
+
+const (
+ maxInt8 = math.MaxInt8
+ minInt8 = math.MinInt8
+ maxInt16 = math.MaxInt16
+ minInt16 = math.MinInt16
+ maxInt32 = math.MaxInt32
+ minInt32 = math.MinInt32
+ maxInt64 = math.MaxInt64
+ minInt64 = math.MinInt64
+ maxUint8 = math.MaxUint8
+ maxUint16 = math.MaxUint16
+ maxUint32 = math.MaxUint32
+ maxUint64 = math.MaxUint64
+ maxUint = ^uint(0)
+ minUint = 0
+ maxInt = int(^uint(0) >> 1)
+ minInt = -maxInt - 1
+
+ // int64
+ int64_maxInt int64 = int64(maxInt)
+ int64_minInt int64 = int64(minInt)
+ int64_maxInt8 int64 = math.MaxInt8
+ int64_minInt8 int64 = math.MinInt8
+ int64_maxInt16 int64 = math.MaxInt16
+ int64_minInt16 int64 = math.MinInt16
+ int64_maxInt32 int64 = math.MaxInt32
+ int64_minInt32 int64 = math.MinInt32
+ int64_maxUint8 int64 = math.MaxUint8
+ int64_maxUint16 int64 = math.MaxUint16
+ int64_maxUint32 int64 = math.MaxUint32
+
+ // float64
+ float_maxInt float64 = float64(int(^uint(0) >> 1))
+ float_minInt float64 = float64(int(-maxInt - 1))
+ float_minUint float64 = float64(0)
+ float_maxUint float64 = float64(uint(^uint(0)))
+ float_minUint64 float64 = float64(0)
+ float_maxUint64 float64 = math.MaxUint64
+ float_maxInt64 float64 = math.MaxInt64
+ float_minInt64 float64 = math.MinInt64
+)
+
+func toIntegerFloat(value Value) float64 {
+ float := value.float64()
+ if math.IsInf(float, 0) {
+ } else if math.IsNaN(float) {
+ float = 0
+ } else if float > 0 {
+ float = math.Floor(float)
+ } else {
+ float = math.Ceil(float)
+ }
+ return float
+}
+
+type _numberKind int
+
+const (
+ numberInteger _numberKind = iota // 3.0 => 3.0
+ numberFloat // 3.14159 => 3.0, 1+2**63 > 2**63-1
+ numberInfinity // Infinity => 2**63-1
+ numberNaN // NaN => 0
+)
+
+type _number struct {
+ kind _numberKind
+ int64 int64
+ float64 float64
+}
+
+// FIXME
+// http://www.goinggo.net/2013/08/gustavos-ieee-754-brain-teaser.html
+// http://bazaar.launchpad.net/~niemeyer/strepr/trunk/view/6/strepr.go#L160
+func (value Value) number() (number _number) {
+ switch value := value.value.(type) {
+ case int8:
+ number.int64 = int64(value)
+ return
+ case int16:
+ number.int64 = int64(value)
+ return
+ case uint8:
+ number.int64 = int64(value)
+ return
+ case uint16:
+ number.int64 = int64(value)
+ return
+ case uint32:
+ number.int64 = int64(value)
+ return
+ case int:
+ number.int64 = int64(value)
+ return
+ case int64:
+ number.int64 = value
+ return
+ }
+
+ float := value.float64()
+ if float == 0 {
+ return
+ }
+
+ number.kind = numberFloat
+ number.float64 = float
+
+ if math.IsNaN(float) {
+ number.kind = numberNaN
+ return
+ }
+
+ if math.IsInf(float, 0) {
+ number.kind = numberInfinity
+ }
+
+ if float >= float_maxInt64 {
+ number.int64 = math.MaxInt64
+ return
+ }
+
+ if float <= float_minInt64 {
+ number.int64 = math.MinInt64
+ return
+ }
+
+ integer := float64(0)
+ if float > 0 {
+ integer = math.Floor(float)
+ } else {
+ integer = math.Ceil(float)
+ }
+
+ if float == integer {
+ number.kind = numberInteger
+ }
+ number.int64 = int64(float)
+ return
+}
+
+// ECMA 262: 9.5
+func toInt32(value Value) int32 {
+ {
+ switch value := value.value.(type) {
+ case int8:
+ return int32(value)
+ case int16:
+ return int32(value)
+ case int32:
+ return value
+ }
+ }
+ floatValue := value.float64()
+ if math.IsNaN(floatValue) || math.IsInf(floatValue, 0) {
+ return 0
+ }
+ if floatValue == 0 { // This will work for +0 & -0
+ return 0
+ }
+ remainder := math.Mod(floatValue, float_2_32)
+ if remainder > 0 {
+ remainder = math.Floor(remainder)
+ } else {
+ remainder = math.Ceil(remainder) + float_2_32
+ }
+ if remainder > float_2_31 {
+ return int32(remainder - float_2_32)
+ }
+ return int32(remainder)
+}
+
+func toUint32(value Value) uint32 {
+ {
+ switch value := value.value.(type) {
+ case int8:
+ return uint32(value)
+ case int16:
+ return uint32(value)
+ case uint8:
+ return uint32(value)
+ case uint16:
+ return uint32(value)
+ case uint32:
+ return value
+ }
+ }
+ floatValue := value.float64()
+ if math.IsNaN(floatValue) || math.IsInf(floatValue, 0) {
+ return 0
+ }
+ if floatValue == 0 {
+ return 0
+ }
+ remainder := math.Mod(floatValue, float_2_32)
+ if remainder > 0 {
+ remainder = math.Floor(remainder)
+ } else {
+ remainder = math.Ceil(remainder) + float_2_32
+ }
+ return uint32(remainder)
+}
+
+func toUint16(value Value) uint16 {
+ {
+ switch value := value.value.(type) {
+ case int8:
+ return uint16(value)
+ case uint8:
+ return uint16(value)
+ case uint16:
+ return value
+ }
+ }
+ floatValue := value.float64()
+ if math.IsNaN(floatValue) || math.IsInf(floatValue, 0) {
+ return 0
+ }
+ if floatValue == 0 {
+ return 0
+ }
+ remainder := math.Mod(floatValue, float_2_16)
+ if remainder > 0 {
+ remainder = math.Floor(remainder)
+ } else {
+ remainder = math.Ceil(remainder) + float_2_16
+ }
+ return uint16(remainder)
+}
diff --git a/vendor/github.com/robertkrimen/otto/value_primitive.go b/vendor/github.com/robertkrimen/otto/value_primitive.go
new file mode 100644
index 000000000..11ed329d1
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/value_primitive.go
@@ -0,0 +1,23 @@
+package otto
+
+func toStringPrimitive(value Value) Value {
+ return _toPrimitive(value, defaultValueHintString)
+}
+
+func toNumberPrimitive(value Value) Value {
+ return _toPrimitive(value, defaultValueHintNumber)
+}
+
+func toPrimitive(value Value) Value {
+ return _toPrimitive(value, defaultValueNoHint)
+}
+
+func _toPrimitive(value Value, hint _defaultValueHint) Value {
+ switch value.kind {
+ case valueNull, valueUndefined, valueNumber, valueString, valueBoolean:
+ return value
+ case valueObject:
+ return value._object().DefaultValue(hint)
+ }
+ panic(hereBeDragons(value.kind, value))
+}
diff --git a/vendor/github.com/robertkrimen/otto/value_string.go b/vendor/github.com/robertkrimen/otto/value_string.go
new file mode 100644
index 000000000..0fbfd6b25
--- /dev/null
+++ b/vendor/github.com/robertkrimen/otto/value_string.go
@@ -0,0 +1,103 @@
+package otto
+
+import (
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "unicode/utf16"
+)
+
+var matchLeading0Exponent = regexp.MustCompile(`([eE][\+\-])0+([1-9])`) // 1e-07 => 1e-7
+
+// FIXME
+// https://code.google.com/p/v8/source/browse/branches/bleeding_edge/src/conversions.cc?spec=svn18082&r=18082
+func floatToString(value float64, bitsize int) string {
+ // TODO Fit to ECMA-262 9.8.1 specification
+ if math.IsNaN(value) {
+ return "NaN"
+ } else if math.IsInf(value, 0) {
+ if math.Signbit(value) {
+ return "-Infinity"
+ }
+ return "Infinity"
+ }
+ exponent := math.Log10(math.Abs(value))
+ if exponent >= 21 || exponent < -6 {
+ return matchLeading0Exponent.ReplaceAllString(strconv.FormatFloat(value, 'g', -1, bitsize), "$1$2")
+ }
+ return strconv.FormatFloat(value, 'f', -1, bitsize)
+}
+
+func numberToStringRadix(value Value, radix int) string {
+ float := value.float64()
+ if math.IsNaN(float) {
+ return "NaN"
+ } else if math.IsInf(float, 1) {
+ return "Infinity"
+ } else if math.IsInf(float, -1) {
+ return "-Infinity"
+ }
+ // FIXME This is very broken
+ // Need to do proper radix conversion for floats, ...
+ // This truncates large floats (so bad).
+ return strconv.FormatInt(int64(float), radix)
+}
+
+func (value Value) string() string {
+ if value.kind == valueString {
+ switch value := value.value.(type) {
+ case string:
+ return value
+ case []uint16:
+ return string(utf16.Decode(value))
+ }
+ }
+ if value.IsUndefined() {
+ return "undefined"
+ }
+ if value.IsNull() {
+ return "null"
+ }
+ switch value := value.value.(type) {
+ case bool:
+ return strconv.FormatBool(value)
+ case int:
+ return strconv.FormatInt(int64(value), 10)
+ case int8:
+ return strconv.FormatInt(int64(value), 10)
+ case int16:
+ return strconv.FormatInt(int64(value), 10)
+ case int32:
+ return strconv.FormatInt(int64(value), 10)
+ case int64:
+ return strconv.FormatInt(value, 10)
+ case uint:
+ return strconv.FormatUint(uint64(value), 10)
+ case uint8:
+ return strconv.FormatUint(uint64(value), 10)
+ case uint16:
+ return strconv.FormatUint(uint64(value), 10)
+ case uint32:
+ return strconv.FormatUint(uint64(value), 10)
+ case uint64:
+ return strconv.FormatUint(value, 10)
+ case float32:
+ if value == 0 {
+ return "0" // Take care not to return -0
+ }
+ return floatToString(float64(value), 32)
+ case float64:
+ if value == 0 {
+ return "0" // Take care not to return -0
+ }
+ return floatToString(value, 64)
+ case []uint16:
+ return string(utf16.Decode(value))
+ case string:
+ return value
+ case *_object:
+ return value.DefaultValue(defaultValueHintString).string()
+ }
+ panic(fmt.Errorf("%v.string( %T)", value.value, value.value))
+}
diff --git a/vendor/github.com/rs/cors/.travis.yml b/vendor/github.com/rs/cors/.travis.yml
new file mode 100644
index 000000000..bbb5185a2
--- /dev/null
+++ b/vendor/github.com/rs/cors/.travis.yml
@@ -0,0 +1,4 @@
+language: go
+go:
+- 1.3
+- 1.4
diff --git a/vendor/github.com/rs/cors/LICENSE b/vendor/github.com/rs/cors/LICENSE
new file mode 100644
index 000000000..d8e2df5a4
--- /dev/null
+++ b/vendor/github.com/rs/cors/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2014 Olivier Poitrey <rs@dailymotion.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is furnished
+to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/rs/cors/README.md b/vendor/github.com/rs/cors/README.md
new file mode 100644
index 000000000..4bf56724e
--- /dev/null
+++ b/vendor/github.com/rs/cors/README.md
@@ -0,0 +1,99 @@
+# Go CORS handler [![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/cors) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/cors/master/LICENSE) [![build](https://img.shields.io/travis/rs/cors.svg?style=flat)](https://travis-ci.org/rs/cors) [![Coverage](http://gocover.io/_badge/github.com/rs/cors)](http://gocover.io/github.com/rs/cors)
+
+CORS is a `net/http` handler implementing [Cross Origin Resource Sharing W3 specification](http://www.w3.org/TR/cors/) in Golang.
+
+## Getting Started
+
+After installing Go and setting up your [GOPATH](http://golang.org/doc/code.html#GOPATH), create your first `.go` file. We'll call it `server.go`.
+
+```go
+package main
+
+import (
+ "net/http"
+
+ "github.com/rs/cors"
+)
+
+func main() {
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte("{\"hello\": \"world\"}"))
+ })
+
+ // cors.Default() setup the middleware with default options being
+ // all origins accepted with simple methods (GET, POST). See
+ // documentation below for more options.
+ handler := cors.Default().Handler(mux)
+ http.ListenAndServe(":8080", handler)
+}
+```
+
+Install `cors`:
+
+ go get github.com/rs/cors
+
+Then run your server:
+
+ go run server.go
+
+The server now runs on `localhost:8080`:
+
+ $ curl -D - -H 'Origin: http://foo.com' http://localhost:8080/
+ HTTP/1.1 200 OK
+ Access-Control-Allow-Origin: foo.com
+ Content-Type: application/json
+ Date: Sat, 25 Oct 2014 03:43:57 GMT
+ Content-Length: 18
+
+ {"hello": "world"}
+
+### More Examples
+
+* `net/http`: [examples/nethttp/server.go](https://github.com/rs/cors/blob/master/examples/nethttp/server.go)
+* [Goji](https://goji.io): [examples/goji/server.go](https://github.com/rs/cors/blob/master/examples/goji/server.go)
+* [Martini](http://martini.codegangsta.io): [examples/martini/server.go](https://github.com/rs/cors/blob/master/examples/martini/server.go)
+* [Negroni](https://github.com/codegangsta/negroni): [examples/negroni/server.go](https://github.com/rs/cors/blob/master/examples/negroni/server.go)
+* [Alice](https://github.com/justinas/alice): [examples/alice/server.go](https://github.com/rs/cors/blob/master/examples/alice/server.go)
+
+## Parameters
+
+Parameters are passed to the middleware thru the `cors.New` method as follow:
+
+```go
+c := cors.New(cors.Options{
+ AllowedOrigins: []string{"http://foo.com"},
+ AllowCredentials: true,
+})
+
+// Insert the middleware
+handler = c.Handler(handler)
+```
+
+* **AllowedOrigins** `[]string`: A list of origins a cross-domain request can be executed from. If the special `*` value is present in the list, all origins will be allowed. An origin may contain a wildcard (`*`) to replace 0 or more characters (i.e.: `http://*.domain.com`). Usage of wildcards implies a small performance penality. Only one wildcard can be used per origin. The default value is `*`.
+* **AllowOriginFunc** `func (origin string) bool`: A custom function to validate the origin. It take the origin as argument and returns true if allowed or false otherwise. If this option is set, the content of `AllowedOrigins` is ignored
+* **AllowedMethods** `[]string`: A list of methods the client is allowed to use with cross-domain requests. Default value is simple methods (`GET` and `POST`).
+* **AllowedHeaders** `[]string`: A list of non simple headers the client is allowed to use with cross-domain requests.
+* **ExposedHeaders** `[]string`: Indicates which headers are safe to expose to the API of a CORS API specification
+* **AllowCredentials** `bool`: Indicates whether the request can include user credentials like cookies, HTTP authentication or client side SSL certificates. The default is `false`.
+* **MaxAge** `int`: Indicates how long (in seconds) the results of a preflight request can be cached. The default is `0` which stands for no max age.
+* **OptionsPassthrough** `bool`: Instructs preflight to let other potential next handlers to process the `OPTIONS` method. Turn this on if your application handles `OPTIONS`.
+* **Debug** `bool`: Debugging flag adds additional output to debug server side CORS issues.
+
+See [API documentation](http://godoc.org/github.com/rs/cors) for more info.
+
+## Benchmarks
+
+ BenchmarkWithout 20000000 64.6 ns/op 8 B/op 1 allocs/op
+ BenchmarkDefault 3000000 469 ns/op 114 B/op 2 allocs/op
+ BenchmarkAllowedOrigin 3000000 608 ns/op 114 B/op 2 allocs/op
+ BenchmarkPreflight 20000000 73.2 ns/op 0 B/op 0 allocs/op
+ BenchmarkPreflightHeader 20000000 73.6 ns/op 0 B/op 0 allocs/op
+ BenchmarkParseHeaderList 2000000 847 ns/op 184 B/op 6 allocs/op
+ BenchmarkParse…Single 5000000 290 ns/op 32 B/op 3 allocs/op
+ BenchmarkParse…Normalized 2000000 776 ns/op 160 B/op 6 allocs/op
+
+## Licenses
+
+All source code is licensed under the [MIT License](https://raw.github.com/rs/cors/master/LICENSE).
diff --git a/vendor/github.com/rs/cors/cors.go b/vendor/github.com/rs/cors/cors.go
new file mode 100644
index 000000000..4bb22d8fc
--- /dev/null
+++ b/vendor/github.com/rs/cors/cors.go
@@ -0,0 +1,412 @@
+/*
+Package cors is net/http handler to handle CORS related requests
+as defined by http://www.w3.org/TR/cors/
+
+You can configure it by passing an option struct to cors.New:
+
+ c := cors.New(cors.Options{
+ AllowedOrigins: []string{"foo.com"},
+ AllowedMethods: []string{"GET", "POST", "DELETE"},
+ AllowCredentials: true,
+ })
+
+Then insert the handler in the chain:
+
+ handler = c.Handler(handler)
+
+See Options documentation for more options.
+
+The resulting handler is a standard net/http handler.
+*/
+package cors
+
+import (
+ "log"
+ "net/http"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/rs/xhandler"
+ "golang.org/x/net/context"
+)
+
+// Options is a configuration container to setup the CORS middleware.
+type Options struct {
+ // AllowedOrigins is a list of origins a cross-domain request can be executed from.
+ // If the special "*" value is present in the list, all origins will be allowed.
+ // An origin may contain a wildcard (*) to replace 0 or more characters
+ // (i.e.: http://*.domain.com). Usage of wildcards implies a small performance penality.
+ // Only one wildcard can be used per origin.
+ // Default value is ["*"]
+ AllowedOrigins []string
+ // AllowOriginFunc is a custom function to validate the origin. It take the origin
+ // as argument and returns true if allowed or false otherwise. If this option is
+ // set, the content of AllowedOrigins is ignored.
+ AllowOriginFunc func(origin string) bool
+ // AllowedMethods is a list of methods the client is allowed to use with
+ // cross-domain requests. Default value is simple methods (GET and POST)
+ AllowedMethods []string
+ // AllowedHeaders is list of non simple headers the client is allowed to use with
+ // cross-domain requests.
+ // If the special "*" value is present in the list, all headers will be allowed.
+ // Default value is [] but "Origin" is always appended to the list.
+ AllowedHeaders []string
+ // ExposedHeaders indicates which headers are safe to expose to the API of a CORS
+ // API specification
+ ExposedHeaders []string
+ // AllowCredentials indicates whether the request can include user credentials like
+ // cookies, HTTP authentication or client side SSL certificates.
+ AllowCredentials bool
+ // MaxAge indicates how long (in seconds) the results of a preflight request
+ // can be cached
+ MaxAge int
+ // OptionsPassthrough instructs preflight to let other potential next handlers to
+ // process the OPTIONS method. Turn this on if your application handles OPTIONS.
+ OptionsPassthrough bool
+ // Debugging flag adds additional output to debug server side CORS issues
+ Debug bool
+}
+
+// Cors http handler
+type Cors struct {
+ // Debug logger
+ Log *log.Logger
+ // Set to true when allowed origins contains a "*"
+ allowedOriginsAll bool
+ // Normalized list of plain allowed origins
+ allowedOrigins []string
+ // List of allowed origins containing wildcards
+ allowedWOrigins []wildcard
+ // Optional origin validator function
+ allowOriginFunc func(origin string) bool
+ // Set to true when allowed headers contains a "*"
+ allowedHeadersAll bool
+ // Normalized list of allowed headers
+ allowedHeaders []string
+ // Normalized list of allowed methods
+ allowedMethods []string
+ // Normalized list of exposed headers
+ exposedHeaders []string
+ allowCredentials bool
+ maxAge int
+ optionPassthrough bool
+}
+
+// New creates a new Cors handler with the provided options.
+func New(options Options) *Cors {
+ c := &Cors{
+ exposedHeaders: convert(options.ExposedHeaders, http.CanonicalHeaderKey),
+ allowOriginFunc: options.AllowOriginFunc,
+ allowCredentials: options.AllowCredentials,
+ maxAge: options.MaxAge,
+ optionPassthrough: options.OptionsPassthrough,
+ }
+ if options.Debug {
+ c.Log = log.New(os.Stdout, "[cors] ", log.LstdFlags)
+ }
+
+ // Normalize options
+ // Note: for origins and methods matching, the spec requires a case-sensitive matching.
+ // As it may error prone, we chose to ignore the spec here.
+
+ // Allowed Origins
+ if len(options.AllowedOrigins) == 0 {
+ // Default is all origins
+ c.allowedOriginsAll = true
+ } else {
+ c.allowedOrigins = []string{}
+ c.allowedWOrigins = []wildcard{}
+ for _, origin := range options.AllowedOrigins {
+ // Normalize
+ origin = strings.ToLower(origin)
+ if origin == "*" {
+ // If "*" is present in the list, turn the whole list into a match all
+ c.allowedOriginsAll = true
+ c.allowedOrigins = nil
+ c.allowedWOrigins = nil
+ break
+ } else if i := strings.IndexByte(origin, '*'); i >= 0 {
+ // Split the origin in two: start and end string without the *
+ w := wildcard{origin[0:i], origin[i+1 : len(origin)]}
+ c.allowedWOrigins = append(c.allowedWOrigins, w)
+ } else {
+ c.allowedOrigins = append(c.allowedOrigins, origin)
+ }
+ }
+ }
+
+ // Allowed Headers
+ if len(options.AllowedHeaders) == 0 {
+ // Use sensible defaults
+ c.allowedHeaders = []string{"Origin", "Accept", "Content-Type"}
+ } else {
+ // Origin is always appended as some browsers will always request for this header at preflight
+ c.allowedHeaders = convert(append(options.AllowedHeaders, "Origin"), http.CanonicalHeaderKey)
+ for _, h := range options.AllowedHeaders {
+ if h == "*" {
+ c.allowedHeadersAll = true
+ c.allowedHeaders = nil
+ break
+ }
+ }
+ }
+
+ // Allowed Methods
+ if len(options.AllowedMethods) == 0 {
+ // Default is spec's "simple" methods
+ c.allowedMethods = []string{"GET", "POST"}
+ } else {
+ c.allowedMethods = convert(options.AllowedMethods, strings.ToUpper)
+ }
+
+ return c
+}
+
+// Default creates a new Cors handler with default options
+func Default() *Cors {
+ return New(Options{})
+}
+
+// Handler apply the CORS specification on the request, and add relevant CORS headers
+// as necessary.
+func (c *Cors) Handler(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Method == "OPTIONS" {
+ c.logf("Handler: Preflight request")
+ c.handlePreflight(w, r)
+ // Preflight requests are standalone and should stop the chain as some other
+ // middleware may not handle OPTIONS requests correctly. One typical example
+ // is authentication middleware ; OPTIONS requests won't carry authentication
+ // headers (see #1)
+ if c.optionPassthrough {
+ h.ServeHTTP(w, r)
+ } else {
+ w.WriteHeader(http.StatusOK)
+ }
+ } else {
+ c.logf("Handler: Actual request")
+ c.handleActualRequest(w, r)
+ h.ServeHTTP(w, r)
+ }
+ })
+}
+
+// HandlerC is net/context aware handler
+func (c *Cors) HandlerC(h xhandler.HandlerC) xhandler.HandlerC {
+ return xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
+ if r.Method == "OPTIONS" {
+ c.logf("Handler: Preflight request")
+ c.handlePreflight(w, r)
+ // Preflight requests are standalone and should stop the chain as some other
+ // middleware may not handle OPTIONS requests correctly. One typical example
+ // is authentication middleware ; OPTIONS requests won't carry authentication
+ // headers (see #1)
+ if c.optionPassthrough {
+ h.ServeHTTPC(ctx, w, r)
+ } else {
+ w.WriteHeader(http.StatusOK)
+ }
+ } else {
+ c.logf("Handler: Actual request")
+ c.handleActualRequest(w, r)
+ h.ServeHTTPC(ctx, w, r)
+ }
+ })
+}
+
+// HandlerFunc provides Martini compatible handler
+func (c *Cors) HandlerFunc(w http.ResponseWriter, r *http.Request) {
+ if r.Method == "OPTIONS" {
+ c.logf("HandlerFunc: Preflight request")
+ c.handlePreflight(w, r)
+ } else {
+ c.logf("HandlerFunc: Actual request")
+ c.handleActualRequest(w, r)
+ }
+}
+
+// Negroni compatible interface
+func (c *Cors) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
+ if r.Method == "OPTIONS" {
+ c.logf("ServeHTTP: Preflight request")
+ c.handlePreflight(w, r)
+ // Preflight requests are standalone and should stop the chain as some other
+ // middleware may not handle OPTIONS requests correctly. One typical example
+ // is authentication middleware ; OPTIONS requests won't carry authentication
+ // headers (see #1)
+ if c.optionPassthrough {
+ next(w, r)
+ } else {
+ w.WriteHeader(http.StatusOK)
+ }
+ } else {
+ c.logf("ServeHTTP: Actual request")
+ c.handleActualRequest(w, r)
+ next(w, r)
+ }
+}
+
+// handlePreflight handles pre-flight CORS requests
+func (c *Cors) handlePreflight(w http.ResponseWriter, r *http.Request) {
+ headers := w.Header()
+ origin := r.Header.Get("Origin")
+
+ if r.Method != "OPTIONS" {
+ c.logf(" Preflight aborted: %s!=OPTIONS", r.Method)
+ return
+ }
+ // Always set Vary headers
+ // see https://github.com/rs/cors/issues/10,
+ // https://github.com/rs/cors/commit/dbdca4d95feaa7511a46e6f1efb3b3aa505bc43f#commitcomment-12352001
+ headers.Add("Vary", "Origin")
+ headers.Add("Vary", "Access-Control-Request-Method")
+ headers.Add("Vary", "Access-Control-Request-Headers")
+
+ if origin == "" {
+ c.logf(" Preflight aborted: empty origin")
+ return
+ }
+ if !c.isOriginAllowed(origin) {
+ c.logf(" Preflight aborted: origin '%s' not allowed", origin)
+ return
+ }
+
+ reqMethod := r.Header.Get("Access-Control-Request-Method")
+ if !c.isMethodAllowed(reqMethod) {
+ c.logf(" Preflight aborted: method '%s' not allowed", reqMethod)
+ return
+ }
+ reqHeaders := parseHeaderList(r.Header.Get("Access-Control-Request-Headers"))
+ if !c.areHeadersAllowed(reqHeaders) {
+ c.logf(" Preflight aborted: headers '%v' not allowed", reqHeaders)
+ return
+ }
+ headers.Set("Access-Control-Allow-Origin", origin)
+ // Spec says: Since the list of methods can be unbounded, simply returning the method indicated
+ // by Access-Control-Request-Method (if supported) can be enough
+ headers.Set("Access-Control-Allow-Methods", strings.ToUpper(reqMethod))
+ if len(reqHeaders) > 0 {
+
+ // Spec says: Since the list of headers can be unbounded, simply returning supported headers
+ // from Access-Control-Request-Headers can be enough
+ headers.Set("Access-Control-Allow-Headers", strings.Join(reqHeaders, ", "))
+ }
+ if c.allowCredentials {
+ headers.Set("Access-Control-Allow-Credentials", "true")
+ }
+ if c.maxAge > 0 {
+ headers.Set("Access-Control-Max-Age", strconv.Itoa(c.maxAge))
+ }
+ c.logf(" Preflight response headers: %v", headers)
+}
+
+// handleActualRequest handles simple cross-origin requests, actual request or redirects
+func (c *Cors) handleActualRequest(w http.ResponseWriter, r *http.Request) {
+ headers := w.Header()
+ origin := r.Header.Get("Origin")
+
+ if r.Method == "OPTIONS" {
+ c.logf(" Actual request no headers added: method == %s", r.Method)
+ return
+ }
+ // Always set Vary, see https://github.com/rs/cors/issues/10
+ headers.Add("Vary", "Origin")
+ if origin == "" {
+ c.logf(" Actual request no headers added: missing origin")
+ return
+ }
+ if !c.isOriginAllowed(origin) {
+ c.logf(" Actual request no headers added: origin '%s' not allowed", origin)
+ return
+ }
+
+ // Note that spec does define a way to specifically disallow a simple method like GET or
+ // POST. Access-Control-Allow-Methods is only used for pre-flight requests and the
+ // spec doesn't instruct to check the allowed methods for simple cross-origin requests.
+ // We think it's a nice feature to be able to have control on those methods though.
+ if !c.isMethodAllowed(r.Method) {
+ c.logf(" Actual request no headers added: method '%s' not allowed", r.Method)
+
+ return
+ }
+ headers.Set("Access-Control-Allow-Origin", origin)
+ if len(c.exposedHeaders) > 0 {
+ headers.Set("Access-Control-Expose-Headers", strings.Join(c.exposedHeaders, ", "))
+ }
+ if c.allowCredentials {
+ headers.Set("Access-Control-Allow-Credentials", "true")
+ }
+ c.logf(" Actual response added headers: %v", headers)
+}
+
+// convenience method. checks if debugging is turned on before printing
+func (c *Cors) logf(format string, a ...interface{}) {
+ if c.Log != nil {
+ c.Log.Printf(format, a...)
+ }
+}
+
+// isOriginAllowed checks if a given origin is allowed to perform cross-domain requests
+// on the endpoint
+func (c *Cors) isOriginAllowed(origin string) bool {
+ if c.allowOriginFunc != nil {
+ return c.allowOriginFunc(origin)
+ }
+ if c.allowedOriginsAll {
+ return true
+ }
+ origin = strings.ToLower(origin)
+ for _, o := range c.allowedOrigins {
+ if o == origin {
+ return true
+ }
+ }
+ for _, w := range c.allowedWOrigins {
+ if w.match(origin) {
+ return true
+ }
+ }
+ return false
+}
+
+// isMethodAllowed checks if a given method can be used as part of a cross-domain request
+// on the endpoing
+func (c *Cors) isMethodAllowed(method string) bool {
+ if len(c.allowedMethods) == 0 {
+ // If no method allowed, always return false, even for preflight request
+ return false
+ }
+ method = strings.ToUpper(method)
+ if method == "OPTIONS" {
+ // Always allow preflight requests
+ return true
+ }
+ for _, m := range c.allowedMethods {
+ if m == method {
+ return true
+ }
+ }
+ return false
+}
+
+// areHeadersAllowed checks if a given list of headers are allowed to used within
+// a cross-domain request.
+func (c *Cors) areHeadersAllowed(requestedHeaders []string) bool {
+ if c.allowedHeadersAll || len(requestedHeaders) == 0 {
+ return true
+ }
+ for _, header := range requestedHeaders {
+ header = http.CanonicalHeaderKey(header)
+ found := false
+ for _, h := range c.allowedHeaders {
+ if h == header {
+ found = true
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/rs/cors/utils.go b/vendor/github.com/rs/cors/utils.go
new file mode 100644
index 000000000..c7a0aa060
--- /dev/null
+++ b/vendor/github.com/rs/cors/utils.go
@@ -0,0 +1,70 @@
+package cors
+
+import "strings"
+
+const toLower = 'a' - 'A'
+
+type converter func(string) string
+
+type wildcard struct {
+ prefix string
+ suffix string
+}
+
+func (w wildcard) match(s string) bool {
+ return len(s) >= len(w.prefix+w.suffix) && strings.HasPrefix(s, w.prefix) && strings.HasSuffix(s, w.suffix)
+}
+
+// convert converts a list of string using the passed converter function
+func convert(s []string, c converter) []string {
+ out := []string{}
+ for _, i := range s {
+ out = append(out, c(i))
+ }
+ return out
+}
+
+// parseHeaderList tokenize + normalize a string containing a list of headers
+func parseHeaderList(headerList string) []string {
+ l := len(headerList)
+ h := make([]byte, 0, l)
+ upper := true
+ // Estimate the number headers in order to allocate the right splice size
+ t := 0
+ for i := 0; i < l; i++ {
+ if headerList[i] == ',' {
+ t++
+ }
+ }
+ headers := make([]string, 0, t)
+ for i := 0; i < l; i++ {
+ b := headerList[i]
+ if b >= 'a' && b <= 'z' {
+ if upper {
+ h = append(h, b-toLower)
+ } else {
+ h = append(h, b)
+ }
+ } else if b >= 'A' && b <= 'Z' {
+ if !upper {
+ h = append(h, b+toLower)
+ } else {
+ h = append(h, b)
+ }
+ } else if b == '-' || b == '_' || (b >= '0' && b <= '9') {
+ h = append(h, b)
+ }
+
+ if b == ' ' || b == ',' || i == l-1 {
+ if len(h) > 0 {
+ // Flush the found header
+ headers = append(headers, string(h))
+ h = h[:0]
+ upper = true
+ }
+ } else {
+ upper = b == '-' || b == '_'
+ }
+ }
+ return headers
+}
diff --git a/vendor/github.com/rs/xhandler/.travis.yml b/vendor/github.com/rs/xhandler/.travis.yml
new file mode 100644
index 000000000..b65c7a9f1
--- /dev/null
+++ b/vendor/github.com/rs/xhandler/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+go:
+- 1.5
+- tip
+matrix:
+ allow_failures:
+ - go: tip
diff --git a/vendor/github.com/rs/xhandler/LICENSE b/vendor/github.com/rs/xhandler/LICENSE
new file mode 100644
index 000000000..47c5e9d2d
--- /dev/null
+++ b/vendor/github.com/rs/xhandler/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2015 Olivier Poitrey <rs@dailymotion.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is furnished
+to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/rs/xhandler/README.md b/vendor/github.com/rs/xhandler/README.md
new file mode 100644
index 000000000..91c594bd2
--- /dev/null
+++ b/vendor/github.com/rs/xhandler/README.md
@@ -0,0 +1,134 @@
+# XHandler
+
+[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/xhandler) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/xhandler/master/LICENSE) [![Build Status](https://travis-ci.org/rs/xhandler.svg?branch=master)](https://travis-ci.org/rs/xhandler) [![Coverage](http://gocover.io/_badge/github.com/rs/xhandler)](http://gocover.io/github.com/rs/xhandler)
+
+XHandler is a bridge between [net/context](https://godoc.org/golang.org/x/net/context) and `http.Handler`.
+
+It lets you enforce `net/context` in your handlers without sacrificing compatibility with existing `http.Handlers` nor imposing a specific router.
+
+Thanks to `net/context` deadline management, `xhandler` is able to enforce a per request deadline and will cancel the context when the client closes the connection unexpectedly.
+
+You may create your own `net/context` aware handler pretty much the same way as you would do with http.Handler.
+
+Read more about xhandler on [Dailymotion engineering blog](http://engineering.dailymotion.com/our-way-to-go/).
+
+## Installing
+
+ go get -u github.com/rs/xhandler
+
+## Usage
+
+```go
+package main
+
+import (
+ "log"
+ "net/http"
+ "time"
+
+ "github.com/rs/cors"
+ "github.com/rs/xhandler"
+ "golang.org/x/net/context"
+)
+
+type myMiddleware struct {
+ next xhandler.HandlerC
+}
+
+func (h myMiddleware) ServeHTTPC(ctx context.Context, w http.ResponseWriter, r *http.Request) {
+ ctx = context.WithValue(ctx, "test", "World")
+ h.next.ServeHTTPC(ctx, w, r)
+}
+
+func main() {
+ c := xhandler.Chain{}
+
+ // Add close notifier handler so context is cancelled when the client closes
+ // the connection
+ c.UseC(xhandler.CloseHandler)
+
+ // Add timeout handler
+ c.UseC(xhandler.TimeoutHandler(2 * time.Second))
+
+ // Middleware putting something in the context
+ c.UseC(func(next xhandler.HandlerC) xhandler.HandlerC {
+ return myMiddleware{next: next}
+ })
+
+ // Mix it with a non-context-aware middleware handler
+ c.Use(cors.Default().Handler)
+
+ // Final handler (using handlerFuncC), reading from the context
+ xh := xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
+ value := ctx.Value("test").(string)
+ w.Write([]byte("Hello " + value))
+ })
+
+ // Bridge context aware handlers with http.Handler using xhandler.Handle()
+ http.Handle("/test", c.Handler(xh))
+
+ if err := http.ListenAndServe(":8080", nil); err != nil {
+ log.Fatal(err)
+ }
+}
+```
+
+### Using xmux
+
+Xhandler comes with an optional context aware [muxer](https://github.com/rs/xmux) forked from [httprouter](https://github.com/julienschmidt/httprouter):
+
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "time"
+
+ "github.com/rs/xhandler"
+ "github.com/rs/xmux"
+ "golang.org/x/net/context"
+)
+
+func main() {
+ c := xhandler.Chain{}
+
+ // Append a context-aware middleware handler
+ c.UseC(xhandler.CloseHandler)
+
+ // Another context-aware middleware handler
+ c.UseC(xhandler.TimeoutHandler(2 * time.Second))
+
+ mux := xmux.New()
+
+ // Use c.Handler to terminate the chain with your final handler
+ mux.GET("/welcome/:name", xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, req *http.Request) {
+ fmt.Fprintf(w, "Welcome %s!", xmux.Params(ctx).Get("name"))
+ }))
+
+ if err := http.ListenAndServe(":8080", c.Handler(mux)); err != nil {
+ log.Fatal(err)
+ }
+}
+```
+
+See [xmux](https://github.com/rs/xmux) for more examples.
+
+## Context Aware Middleware
+
+Here is a list of `net/context` aware middleware handlers implementing `xhandler.HandlerC` interface.
+
+Feel free to put up a PR linking your middleware if you have built one:
+
+| Middleware | Author | Description |
+| ---------- | ------ | ----------- |
+| [xmux](https://github.com/rs/xmux) | [Olivier Poitrey](https://github.com/rs) | HTTP request muxer |
+| [xlog](https://github.com/rs/xlog) | [Olivier Poitrey](https://github.com/rs) | HTTP handler logger |
+| [xstats](https://github.com/rs/xstats) | [Olivier Poitrey](https://github.com/rs) | A generic client for service instrumentation |
+| [xaccess](https://github.com/rs/xaccess) | [Olivier Poitrey](https://github.com/rs) | HTTP handler access logger with [xlog](https://github.com/rs/xlog) and [xstats](https://github.com/rs/xstats) |
+| [cors](https://github.com/rs/cors) | [Olivier Poitrey](https://github.com/rs) | [Cross Origin Resource Sharing](http://www.w3.org/TR/cors/) (CORS) support |
+
+## Licenses
+
+All source code is licensed under the [MIT License](https://raw.github.com/rs/xhandler/master/LICENSE).
diff --git a/vendor/github.com/rs/xhandler/chain.go b/vendor/github.com/rs/xhandler/chain.go
new file mode 100644
index 000000000..3e4bd359c
--- /dev/null
+++ b/vendor/github.com/rs/xhandler/chain.go
@@ -0,0 +1,121 @@
+package xhandler
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+)
+
+// Chain is a helper for chaining middleware handlers together for easier
+// management.
+type Chain []func(next HandlerC) HandlerC
+
+// Add appends a variable number of additional middleware handlers
+// to the middleware chain. Middleware handlers can either be
+// context-aware or non-context aware handlers with the appropriate
+// function signatures.
+func (c *Chain) Add(f ...interface{}) {
+ for _, h := range f {
+ switch v := h.(type) {
+ case func(http.Handler) http.Handler:
+ c.Use(v)
+ case func(HandlerC) HandlerC:
+ c.UseC(v)
+ default:
+ panic("Adding invalid handler to the middleware chain")
+ }
+ }
+}
+
+// With creates a new middleware chain from an existing chain,
+// extending it with additional middleware. Middleware handlers
+// can either be context-aware or non-context aware handlers
+// with the appropriate function signatures.
+func (c *Chain) With(f ...interface{}) *Chain {
+ n := make(Chain, len(*c))
+ copy(n, *c)
+ n.Add(f...)
+ return &n
+}
+
+// UseC appends a context-aware handler to the middleware chain.
+func (c *Chain) UseC(f func(next HandlerC) HandlerC) {
+ *c = append(*c, f)
+}
+
+// Use appends a standard http.Handler to the middleware chain without
+// losing track of the context when inserted between two context aware handlers.
+//
+// Caveat: the f function will be called on each request so you are better off putting
+// any initialization sequence outside of this function.
+func (c *Chain) Use(f func(next http.Handler) http.Handler) {
+ xf := func(next HandlerC) HandlerC {
+ return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
+ n := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTPC(ctx, w, r)
+ })
+ f(n).ServeHTTP(w, r)
+ })
+ }
+ *c = append(*c, xf)
+}
+
+// Handler wraps the provided final handler with all the middleware appended to
+// the chain and returns a new standard http.Handler instance.
+// The context.Background() context is injected automatically.
+func (c Chain) Handler(xh HandlerC) http.Handler {
+ ctx := context.Background()
+ return c.HandlerCtx(ctx, xh)
+}
+
+// HandlerFC is a helper to provide a function (HandlerFuncC) to Handler().
+//
+// HandlerFC is equivalent to:
+// c.Handler(xhandler.HandlerFuncC(xhc))
+func (c Chain) HandlerFC(xhf HandlerFuncC) http.Handler {
+ ctx := context.Background()
+ return c.HandlerCtx(ctx, HandlerFuncC(xhf))
+}
+
+// HandlerH is a helper to provide a standard http handler (http.HandlerFunc)
+// to Handler(). Your final handler won't have access to the context though.
+func (c Chain) HandlerH(h http.Handler) http.Handler {
+ ctx := context.Background()
+ return c.HandlerCtx(ctx, HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
+ h.ServeHTTP(w, r)
+ }))
+}
+
+// HandlerF is a helper to provide a standard http handler function
+// (http.HandlerFunc) to Handler(). Your final handler won't have access
+// to the context though.
+func (c Chain) HandlerF(hf http.HandlerFunc) http.Handler {
+ ctx := context.Background()
+ return c.HandlerCtx(ctx, HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
+ hf(w, r)
+ }))
+}
+
+// HandlerCtx wraps the provided final handler with all the middleware appended to
+// the chain and returns a new standard http.Handler instance.
+func (c Chain) HandlerCtx(ctx context.Context, xh HandlerC) http.Handler {
+ return New(ctx, c.HandlerC(xh))
+}
+
+// HandlerC wraps the provided final handler with all the middleware appended to
+// the chain and returns a HandlerC instance.
+func (c Chain) HandlerC(xh HandlerC) HandlerC {
+ for i := len(c) - 1; i >= 0; i-- {
+ xh = c[i](xh)
+ }
+ return xh
+}
+
+// HandlerCF wraps the provided final handler func with all the middleware appended to
+// the chain and returns a HandlerC instance.
+//
+// HandlerCF is equivalent to:
+// c.HandlerC(xhandler.HandlerFuncC(xhc))
+func (c Chain) HandlerCF(xhc HandlerFuncC) HandlerC {
+ return c.HandlerC(HandlerFuncC(xhc))
+}
diff --git a/vendor/github.com/rs/xhandler/middleware.go b/vendor/github.com/rs/xhandler/middleware.go
new file mode 100644
index 000000000..7ad8fba62
--- /dev/null
+++ b/vendor/github.com/rs/xhandler/middleware.go
@@ -0,0 +1,59 @@
+package xhandler
+
+import (
+ "net/http"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+// CloseHandler returns a Handler, cancelling the context when the client
+// connection closes unexpectedly.
+func CloseHandler(next HandlerC) HandlerC {
+ return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
+ // Cancel the context if the client closes the connection
+ if wcn, ok := w.(http.CloseNotifier); ok {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithCancel(ctx)
+ defer cancel()
+
+ notify := wcn.CloseNotify()
+ go func() {
+ select {
+ case <-notify:
+ cancel()
+ case <-ctx.Done():
+ }
+ }()
+ }
+
+ next.ServeHTTPC(ctx, w, r)
+ })
+}
+
+// TimeoutHandler returns a Handler which adds a timeout to the context.
+//
+// Child handlers have the responsability of obeying the context deadline and to return
+// an appropriate error (or not) response in case of timeout.
+func TimeoutHandler(timeout time.Duration) func(next HandlerC) HandlerC {
+ return func(next HandlerC) HandlerC {
+ return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
+ ctx, _ = context.WithTimeout(ctx, timeout)
+ next.ServeHTTPC(ctx, w, r)
+ })
+ }
+}
+
+// If is a special handler that will skip insert the condNext handler only if a condition
+// applies at runtime.
+func If(cond func(ctx context.Context, w http.ResponseWriter, r *http.Request) bool, condNext func(next HandlerC) HandlerC) func(next HandlerC) HandlerC {
+ return func(next HandlerC) HandlerC {
+ return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
+ if cond(ctx, w, r) {
+ condNext(next).ServeHTTPC(ctx, w, r)
+ } else {
+ next.ServeHTTPC(ctx, w, r)
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/rs/xhandler/xhandler.go b/vendor/github.com/rs/xhandler/xhandler.go
new file mode 100644
index 000000000..bc832cb1f
--- /dev/null
+++ b/vendor/github.com/rs/xhandler/xhandler.go
@@ -0,0 +1,42 @@
+// Package xhandler provides a bridge between http.Handler and net/context.
+//
+// xhandler enforces net/context in your handlers without sacrificing
+// compatibility with existing http.Handlers nor imposing a specific router.
+//
+// Thanks to net/context deadline management, xhandler is able to enforce
+// a per request deadline and will cancel the context in when the client close
+// the connection unexpectedly.
+//
+// You may create net/context aware middlewares pretty much the same way as
+// you would with http.Handler.
+package xhandler // import "github.com/rs/xhandler"
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+)
+
+// HandlerC is a net/context aware http.Handler
+type HandlerC interface {
+ ServeHTTPC(context.Context, http.ResponseWriter, *http.Request)
+}
+
+// HandlerFuncC type is an adapter to allow the use of ordinary functions
+// as an xhandler.Handler. If f is a function with the appropriate signature,
+// xhandler.HandlerFuncC(f) is a xhandler.Handler object that calls f.
+type HandlerFuncC func(context.Context, http.ResponseWriter, *http.Request)
+
+// ServeHTTPC calls f(ctx, w, r).
+func (f HandlerFuncC) ServeHTTPC(ctx context.Context, w http.ResponseWriter, r *http.Request) {
+ f(ctx, w, r)
+}
+
+// New creates a conventional http.Handler injecting the provided root
+// context to sub handlers. This handler is used as a bridge between conventional
+// http.Handler and context aware handlers.
+func New(ctx context.Context, h HandlerC) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ h.ServeHTTPC(ctx, w, r)
+ })
+}
diff --git a/vendor/github.com/syndtr/goleveldb/.travis.yml b/vendor/github.com/syndtr/goleveldb/.travis.yml
new file mode 100644
index 000000000..82de37735
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+
+go:
+ - 1.4
+ - 1.5
+ - 1.6
+ - 1.7
+ - tip
+
+script:
+ - go test -timeout 1h ./...
+ - go test -timeout 30m -race -run "TestDB_(Concurrent|GoleveldbIssue74)" ./leveldb
diff --git a/vendor/github.com/syndtr/goleveldb/LICENSE b/vendor/github.com/syndtr/goleveldb/LICENSE
new file mode 100644
index 000000000..4a772d1ab
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/LICENSE
@@ -0,0 +1,24 @@
+Copyright 2012 Suryandaru Triandana <syndtr@gmail.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/syndtr/goleveldb/README.md b/vendor/github.com/syndtr/goleveldb/README.md
new file mode 100644
index 000000000..259286f55
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/README.md
@@ -0,0 +1,105 @@
+This is an implementation of the [LevelDB key/value database](http:code.google.com/p/leveldb) in the [Go programming language](http:golang.org).
+
+[![Build Status](https://travis-ci.org/syndtr/goleveldb.png?branch=master)](https://travis-ci.org/syndtr/goleveldb)
+
+Installation
+-----------
+
+ go get github.com/syndtr/goleveldb/leveldb
+
+Requirements
+-----------
+
+* Need at least `go1.4` or newer.
+
+Usage
+-----------
+
+Create or open a database:
+```go
+db, err := leveldb.OpenFile("path/to/db", nil)
+...
+defer db.Close()
+...
+```
+Read or modify the database content:
+```go
+// Remember that the contents of the returned slice should not be modified.
+data, err := db.Get([]byte("key"), nil)
+...
+err = db.Put([]byte("key"), []byte("value"), nil)
+...
+err = db.Delete([]byte("key"), nil)
+...
+```
+
+Iterate over database content:
+```go
+iter := db.NewIterator(nil, nil)
+for iter.Next() {
+ // Remember that the contents of the returned slice should not be modified, and
+ // only valid until the next call to Next.
+ key := iter.Key()
+ value := iter.Value()
+ ...
+}
+iter.Release()
+err = iter.Error()
+...
+```
+Seek-then-Iterate:
+```go
+iter := db.NewIterator(nil, nil)
+for ok := iter.Seek(key); ok; ok = iter.Next() {
+ // Use key/value.
+ ...
+}
+iter.Release()
+err = iter.Error()
+...
+```
+Iterate over subset of database content:
+```go
+iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil)
+for iter.Next() {
+ // Use key/value.
+ ...
+}
+iter.Release()
+err = iter.Error()
+...
+```
+Iterate over subset of database content with a particular prefix:
+```go
+iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil)
+for iter.Next() {
+ // Use key/value.
+ ...
+}
+iter.Release()
+err = iter.Error()
+...
+```
+Batch writes:
+```go
+batch := new(leveldb.Batch)
+batch.Put([]byte("foo"), []byte("value"))
+batch.Put([]byte("bar"), []byte("another value"))
+batch.Delete([]byte("baz"))
+err = db.Write(batch, nil)
+...
+```
+Use bloom filter:
+```go
+o := &opt.Options{
+ Filter: filter.NewBloomFilter(10),
+}
+db, err := leveldb.OpenFile("path/to/db", o)
+...
+defer db.Close()
+...
+```
+Documentation
+-----------
+
+You can read package documentation [here](http:godoc.org/github.com/syndtr/goleveldb).
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/batch.go b/vendor/github.com/syndtr/goleveldb/leveldb/batch.go
new file mode 100644
index 000000000..225920002
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/batch.go
@@ -0,0 +1,349 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+// ErrBatchCorrupted records reason of batch corruption. This error will be
+// wrapped with errors.ErrCorrupted.
+type ErrBatchCorrupted struct {
+ Reason string
+}
+
+func (e *ErrBatchCorrupted) Error() string {
+ return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason)
+}
+
+func newErrBatchCorrupted(reason string) error {
+ return errors.NewErrCorrupted(storage.FileDesc{}, &ErrBatchCorrupted{reason})
+}
+
+const (
+ batchHeaderLen = 8 + 4
+ batchGrowRec = 3000
+ batchBufioSize = 16
+)
+
+// BatchReplay wraps basic batch operations.
+type BatchReplay interface {
+ Put(key, value []byte)
+ Delete(key []byte)
+}
+
+type batchIndex struct {
+ keyType keyType
+ keyPos, keyLen int
+ valuePos, valueLen int
+}
+
+func (index batchIndex) k(data []byte) []byte {
+ return data[index.keyPos : index.keyPos+index.keyLen]
+}
+
+func (index batchIndex) v(data []byte) []byte {
+ if index.valueLen != 0 {
+ return data[index.valuePos : index.valuePos+index.valueLen]
+ }
+ return nil
+}
+
+func (index batchIndex) kv(data []byte) (key, value []byte) {
+ return index.k(data), index.v(data)
+}
+
+// Batch is a write batch.
+type Batch struct {
+ data []byte
+ index []batchIndex
+
+ // internalLen is sums of key/value pair length plus 8-bytes internal key.
+ internalLen int
+}
+
+func (b *Batch) grow(n int) {
+ o := len(b.data)
+ if cap(b.data)-o < n {
+ div := 1
+ if len(b.index) > batchGrowRec {
+ div = len(b.index) / batchGrowRec
+ }
+ ndata := make([]byte, o, o+n+o/div)
+ copy(ndata, b.data)
+ b.data = ndata
+ }
+}
+
+func (b *Batch) appendRec(kt keyType, key, value []byte) {
+ n := 1 + binary.MaxVarintLen32 + len(key)
+ if kt == keyTypeVal {
+ n += binary.MaxVarintLen32 + len(value)
+ }
+ b.grow(n)
+ index := batchIndex{keyType: kt}
+ o := len(b.data)
+ data := b.data[:o+n]
+ data[o] = byte(kt)
+ o++
+ o += binary.PutUvarint(data[o:], uint64(len(key)))
+ index.keyPos = o
+ index.keyLen = len(key)
+ o += copy(data[o:], key)
+ if kt == keyTypeVal {
+ o += binary.PutUvarint(data[o:], uint64(len(value)))
+ index.valuePos = o
+ index.valueLen = len(value)
+ o += copy(data[o:], value)
+ }
+ b.data = data[:o]
+ b.index = append(b.index, index)
+ b.internalLen += index.keyLen + index.valueLen + 8
+}
+
+// Put appends 'put operation' of the given key/value pair to the batch.
+// It is safe to modify the contents of the argument after Put returns but not
+// before.
+func (b *Batch) Put(key, value []byte) {
+ b.appendRec(keyTypeVal, key, value)
+}
+
+// Delete appends 'delete operation' of the given key to the batch.
+// It is safe to modify the contents of the argument after Delete returns but
+// not before.
+func (b *Batch) Delete(key []byte) {
+ b.appendRec(keyTypeDel, key, nil)
+}
+
+// Dump dumps batch contents. The returned slice can be loaded into the
+// batch using Load method.
+// The returned slice is not its own copy, so the contents should not be
+// modified.
+func (b *Batch) Dump() []byte {
+ return b.data
+}
+
+// Load loads given slice into the batch. Previous contents of the batch
+// will be discarded.
+// The given slice will not be copied and will be used as batch buffer, so
+// it is not safe to modify the contents of the slice.
+func (b *Batch) Load(data []byte) error {
+ return b.decode(data, -1)
+}
+
+// Replay replays batch contents.
+func (b *Batch) Replay(r BatchReplay) error {
+ for _, index := range b.index {
+ switch index.keyType {
+ case keyTypeVal:
+ r.Put(index.k(b.data), index.v(b.data))
+ case keyTypeDel:
+ r.Delete(index.k(b.data))
+ }
+ }
+ return nil
+}
+
+// Len returns number of records in the batch.
+func (b *Batch) Len() int {
+ return len(b.index)
+}
+
+// Reset resets the batch.
+func (b *Batch) Reset() {
+ b.data = b.data[:0]
+ b.index = b.index[:0]
+ b.internalLen = 0
+}
+
+func (b *Batch) replayInternal(fn func(i int, kt keyType, k, v []byte) error) error {
+ for i, index := range b.index {
+ if err := fn(i, index.keyType, index.k(b.data), index.v(b.data)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Batch) append(p *Batch) {
+ ob := len(b.data)
+ oi := len(b.index)
+ b.data = append(b.data, p.data...)
+ b.index = append(b.index, p.index...)
+ b.internalLen += p.internalLen
+
+ // Updating index offset.
+ if ob != 0 {
+ for ; oi < len(b.index); oi++ {
+ index := &b.index[oi]
+ index.keyPos += ob
+ if index.valueLen != 0 {
+ index.valuePos += ob
+ }
+ }
+ }
+}
+
+func (b *Batch) decode(data []byte, expectedLen int) error {
+ b.data = data
+ b.index = b.index[:0]
+ b.internalLen = 0
+ err := decodeBatch(data, func(i int, index batchIndex) error {
+ b.index = append(b.index, index)
+ b.internalLen += index.keyLen + index.valueLen + 8
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ if expectedLen >= 0 && len(b.index) != expectedLen {
+ return newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", expectedLen, len(b.index)))
+ }
+ return nil
+}
+
+func (b *Batch) putMem(seq uint64, mdb *memdb.DB) error {
+ var ik []byte
+ for i, index := range b.index {
+ ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType)
+ if err := mdb.Put(ik, index.v(b.data)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Batch) revertMem(seq uint64, mdb *memdb.DB) error {
+ var ik []byte
+ for i, index := range b.index {
+ ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType)
+ if err := mdb.Delete(ik); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func newBatch() interface{} {
+ return &Batch{}
+}
+
+func decodeBatch(data []byte, fn func(i int, index batchIndex) error) error {
+ var index batchIndex
+ for i, o := 0, 0; o < len(data); i++ {
+ // Key type.
+ index.keyType = keyType(data[o])
+ if index.keyType > keyTypeVal {
+ return newErrBatchCorrupted(fmt.Sprintf("bad record: invalid type %#x", uint(index.keyType)))
+ }
+ o++
+
+ // Key.
+ x, n := binary.Uvarint(data[o:])
+ o += n
+ if n <= 0 || o+int(x) > len(data) {
+ return newErrBatchCorrupted("bad record: invalid key length")
+ }
+ index.keyPos = o
+ index.keyLen = int(x)
+ o += index.keyLen
+
+ // Value.
+ if index.keyType == keyTypeVal {
+ x, n = binary.Uvarint(data[o:])
+ o += n
+ if n <= 0 || o+int(x) > len(data) {
+ return newErrBatchCorrupted("bad record: invalid value length")
+ }
+ index.valuePos = o
+ index.valueLen = int(x)
+ o += index.valueLen
+ } else {
+ index.valuePos = 0
+ index.valueLen = 0
+ }
+
+ if err := fn(i, index); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func decodeBatchToMem(data []byte, expectSeq uint64, mdb *memdb.DB) (seq uint64, batchLen int, err error) {
+ seq, batchLen, err = decodeBatchHeader(data)
+ if err != nil {
+ return 0, 0, err
+ }
+ if seq < expectSeq {
+ return 0, 0, newErrBatchCorrupted("invalid sequence number")
+ }
+ data = data[batchHeaderLen:]
+ var ik []byte
+ var decodedLen int
+ err = decodeBatch(data, func(i int, index batchIndex) error {
+ if i >= batchLen {
+ return newErrBatchCorrupted("invalid records length")
+ }
+ ik = makeInternalKey(ik, index.k(data), seq+uint64(i), index.keyType)
+ if err := mdb.Put(ik, index.v(data)); err != nil {
+ return err
+ }
+ decodedLen++
+ return nil
+ })
+ if err == nil && decodedLen != batchLen {
+ err = newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", batchLen, decodedLen))
+ }
+ return
+}
+
+func encodeBatchHeader(dst []byte, seq uint64, batchLen int) []byte {
+ dst = ensureBuffer(dst, batchHeaderLen)
+ binary.LittleEndian.PutUint64(dst, seq)
+ binary.LittleEndian.PutUint32(dst[8:], uint32(batchLen))
+ return dst
+}
+
+func decodeBatchHeader(data []byte) (seq uint64, batchLen int, err error) {
+ if len(data) < batchHeaderLen {
+ return 0, 0, newErrBatchCorrupted("too short")
+ }
+
+ seq = binary.LittleEndian.Uint64(data)
+ batchLen = int(binary.LittleEndian.Uint32(data[8:]))
+ if batchLen < 0 {
+ return 0, 0, newErrBatchCorrupted("invalid records length")
+ }
+ return
+}
+
+func batchesLen(batches []*Batch) int {
+ batchLen := 0
+ for _, batch := range batches {
+ batchLen += batch.Len()
+ }
+ return batchLen
+}
+
+func writeBatchesWithHeader(wr io.Writer, batches []*Batch, seq uint64) error {
+ if _, err := wr.Write(encodeBatchHeader(nil, seq, batchesLen(batches))); err != nil {
+ return err
+ }
+ for _, batch := range batches {
+ if _, err := wr.Write(batch.data); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
new file mode 100644
index 000000000..c5940b232
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
@@ -0,0 +1,705 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package cache provides interface and implementation of a cache algorithms.
+package cache
+
+import (
+ "sync"
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Cacher provides interface to implements a caching functionality.
+// An implementation must be safe for concurrent use.
+type Cacher interface {
+ // Capacity returns cache capacity.
+ Capacity() int
+
+ // SetCapacity sets cache capacity.
+ SetCapacity(capacity int)
+
+ // Promote promotes the 'cache node'.
+ Promote(n *Node)
+
+ // Ban evicts the 'cache node' and prevent subsequent 'promote'.
+ Ban(n *Node)
+
+ // Evict evicts the 'cache node'.
+ Evict(n *Node)
+
+ // EvictNS evicts 'cache node' with the given namespace.
+ EvictNS(ns uint64)
+
+ // EvictAll evicts all 'cache node'.
+ EvictAll()
+
+ // Close closes the 'cache tree'
+ Close() error
+}
+
+// Value is a 'cacheable object'. It may implements util.Releaser, if
+// so the the Release method will be called once object is released.
+type Value interface{}
+
+// NamespaceGetter provides convenient wrapper for namespace.
+type NamespaceGetter struct {
+ Cache *Cache
+ NS uint64
+}
+
+// Get simply calls Cache.Get() method.
+func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
+ return g.Cache.Get(g.NS, key, setFunc)
+}
+
+// The hash tables implementation is based on:
+// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu,
+// Kunlong Zhang, and Michael Spear.
+// ACM Symposium on Principles of Distributed Computing, Jul 2014.
+
+const (
+ mInitialSize = 1 << 4
+ mOverflowThreshold = 1 << 5
+ mOverflowGrowThreshold = 1 << 7
+)
+
+type mBucket struct {
+ mu sync.Mutex
+ node []*Node
+ frozen bool
+}
+
+func (b *mBucket) freeze() []*Node {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if !b.frozen {
+ b.frozen = true
+ }
+ return b.node
+}
+
+func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) {
+ b.mu.Lock()
+
+ if b.frozen {
+ b.mu.Unlock()
+ return
+ }
+
+ // Scan the node.
+ for _, n := range b.node {
+ if n.hash == hash && n.ns == ns && n.key == key {
+ atomic.AddInt32(&n.ref, 1)
+ b.mu.Unlock()
+ return true, false, n
+ }
+ }
+
+ // Get only.
+ if noset {
+ b.mu.Unlock()
+ return true, false, nil
+ }
+
+ // Create node.
+ n = &Node{
+ r: r,
+ hash: hash,
+ ns: ns,
+ key: key,
+ ref: 1,
+ }
+ // Add node to bucket.
+ b.node = append(b.node, n)
+ bLen := len(b.node)
+ b.mu.Unlock()
+
+ // Update counter.
+ grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold
+ if bLen > mOverflowThreshold {
+ grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold
+ }
+
+ // Grow.
+ if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
+ nhLen := len(h.buckets) << 1
+ nh := &mNode{
+ buckets: make([]unsafe.Pointer, nhLen),
+ mask: uint32(nhLen) - 1,
+ pred: unsafe.Pointer(h),
+ growThreshold: int32(nhLen * mOverflowThreshold),
+ shrinkThreshold: int32(nhLen >> 1),
+ }
+ ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
+ if !ok {
+ panic("BUG: failed swapping head")
+ }
+ go nh.initBuckets()
+ }
+
+ return true, true, n
+}
+
+func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) {
+ b.mu.Lock()
+
+ if b.frozen {
+ b.mu.Unlock()
+ return
+ }
+
+ // Scan the node.
+ var (
+ n *Node
+ bLen int
+ )
+ for i := range b.node {
+ n = b.node[i]
+ if n.ns == ns && n.key == key {
+ if atomic.LoadInt32(&n.ref) == 0 {
+ deleted = true
+
+ // Call releaser.
+ if n.value != nil {
+ if r, ok := n.value.(util.Releaser); ok {
+ r.Release()
+ }
+ n.value = nil
+ }
+
+ // Remove node from bucket.
+ b.node = append(b.node[:i], b.node[i+1:]...)
+ bLen = len(b.node)
+ }
+ break
+ }
+ }
+ b.mu.Unlock()
+
+ if deleted {
+ // Call OnDel.
+ for _, f := range n.onDel {
+ f()
+ }
+
+ // Update counter.
+ atomic.AddInt32(&r.size, int32(n.size)*-1)
+ shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold
+ if bLen >= mOverflowThreshold {
+ atomic.AddInt32(&h.overflow, -1)
+ }
+
+ // Shrink.
+ if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
+ nhLen := len(h.buckets) >> 1
+ nh := &mNode{
+ buckets: make([]unsafe.Pointer, nhLen),
+ mask: uint32(nhLen) - 1,
+ pred: unsafe.Pointer(h),
+ growThreshold: int32(nhLen * mOverflowThreshold),
+ shrinkThreshold: int32(nhLen >> 1),
+ }
+ ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
+ if !ok {
+ panic("BUG: failed swapping head")
+ }
+ go nh.initBuckets()
+ }
+ }
+
+ return true, deleted
+}
+
+type mNode struct {
+ buckets []unsafe.Pointer // []*mBucket
+ mask uint32
+ pred unsafe.Pointer // *mNode
+ resizeInProgess int32
+
+ overflow int32
+ growThreshold int32
+ shrinkThreshold int32
+}
+
+func (n *mNode) initBucket(i uint32) *mBucket {
+ if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil {
+ return b
+ }
+
+ p := (*mNode)(atomic.LoadPointer(&n.pred))
+ if p != nil {
+ var node []*Node
+ if n.mask > p.mask {
+ // Grow.
+ pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask]))
+ if pb == nil {
+ pb = p.initBucket(i & p.mask)
+ }
+ m := pb.freeze()
+ // Split nodes.
+ for _, x := range m {
+ if x.hash&n.mask == i {
+ node = append(node, x)
+ }
+ }
+ } else {
+ // Shrink.
+ pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i]))
+ if pb0 == nil {
+ pb0 = p.initBucket(i)
+ }
+ pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))]))
+ if pb1 == nil {
+ pb1 = p.initBucket(i + uint32(len(n.buckets)))
+ }
+ m0 := pb0.freeze()
+ m1 := pb1.freeze()
+ // Merge nodes.
+ node = make([]*Node, 0, len(m0)+len(m1))
+ node = append(node, m0...)
+ node = append(node, m1...)
+ }
+ b := &mBucket{node: node}
+ if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) {
+ if len(node) > mOverflowThreshold {
+ atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold))
+ }
+ return b
+ }
+ }
+
+ return (*mBucket)(atomic.LoadPointer(&n.buckets[i]))
+}
+
+func (n *mNode) initBuckets() {
+ for i := range n.buckets {
+ n.initBucket(uint32(i))
+ }
+ atomic.StorePointer(&n.pred, nil)
+}
+
+// Cache is a 'cache map'.
+type Cache struct {
+ mu sync.RWMutex
+ mHead unsafe.Pointer // *mNode
+ nodes int32
+ size int32
+ cacher Cacher
+ closed bool
+}
+
+// NewCache creates a new 'cache map'. The cacher is optional and
+// may be nil.
+func NewCache(cacher Cacher) *Cache {
+ h := &mNode{
+ buckets: make([]unsafe.Pointer, mInitialSize),
+ mask: mInitialSize - 1,
+ growThreshold: int32(mInitialSize * mOverflowThreshold),
+ shrinkThreshold: 0,
+ }
+ for i := range h.buckets {
+ h.buckets[i] = unsafe.Pointer(&mBucket{})
+ }
+ r := &Cache{
+ mHead: unsafe.Pointer(h),
+ cacher: cacher,
+ }
+ return r
+}
+
+func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) {
+ h := (*mNode)(atomic.LoadPointer(&r.mHead))
+ i := hash & h.mask
+ b := (*mBucket)(atomic.LoadPointer(&h.buckets[i]))
+ if b == nil {
+ b = h.initBucket(i)
+ }
+ return h, b
+}
+
+func (r *Cache) delete(n *Node) bool {
+ for {
+ h, b := r.getBucket(n.hash)
+ done, deleted := b.delete(r, h, n.hash, n.ns, n.key)
+ if done {
+ return deleted
+ }
+ }
+ return false
+}
+
+// Nodes returns number of 'cache node' in the map.
+func (r *Cache) Nodes() int {
+ return int(atomic.LoadInt32(&r.nodes))
+}
+
+// Size returns sums of 'cache node' size in the map.
+func (r *Cache) Size() int {
+ return int(atomic.LoadInt32(&r.size))
+}
+
+// Capacity returns cache capacity.
+func (r *Cache) Capacity() int {
+ if r.cacher == nil {
+ return 0
+ }
+ return r.cacher.Capacity()
+}
+
+// SetCapacity sets cache capacity.
+func (r *Cache) SetCapacity(capacity int) {
+ if r.cacher != nil {
+ r.cacher.SetCapacity(capacity)
+ }
+}
+
+// Get gets 'cache node' with the given namespace and key.
+// If cache node is not found and setFunc is not nil, Get will atomically creates
+// the 'cache node' by calling setFunc. Otherwise Get will returns nil.
+//
+// The returned 'cache handle' should be released after use by calling Release
+// method.
+func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return nil
+ }
+
+ hash := murmur32(ns, key, 0xf00)
+ for {
+ h, b := r.getBucket(hash)
+ done, _, n := b.get(r, h, hash, ns, key, setFunc == nil)
+ if done {
+ if n != nil {
+ n.mu.Lock()
+ if n.value == nil {
+ if setFunc == nil {
+ n.mu.Unlock()
+ n.unref()
+ return nil
+ }
+
+ n.size, n.value = setFunc()
+ if n.value == nil {
+ n.size = 0
+ n.mu.Unlock()
+ n.unref()
+ return nil
+ }
+ atomic.AddInt32(&r.size, int32(n.size))
+ }
+ n.mu.Unlock()
+ if r.cacher != nil {
+ r.cacher.Promote(n)
+ }
+ return &Handle{unsafe.Pointer(n)}
+ }
+
+ break
+ }
+ }
+ return nil
+}
+
+// Delete removes and ban 'cache node' with the given namespace and key.
+// A banned 'cache node' will never inserted into the 'cache tree'. Ban
+// only attributed to the particular 'cache node', so when a 'cache node'
+// is recreated it will not be banned.
+//
+// If onDel is not nil, then it will be executed if such 'cache node'
+// doesn't exist or once the 'cache node' is released.
+//
+// Delete return true is such 'cache node' exist.
+func (r *Cache) Delete(ns, key uint64, onDel func()) bool {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return false
+ }
+
+ hash := murmur32(ns, key, 0xf00)
+ for {
+ h, b := r.getBucket(hash)
+ done, _, n := b.get(r, h, hash, ns, key, true)
+ if done {
+ if n != nil {
+ if onDel != nil {
+ n.mu.Lock()
+ n.onDel = append(n.onDel, onDel)
+ n.mu.Unlock()
+ }
+ if r.cacher != nil {
+ r.cacher.Ban(n)
+ }
+ n.unref()
+ return true
+ }
+
+ break
+ }
+ }
+
+ if onDel != nil {
+ onDel()
+ }
+
+ return false
+}
+
+// Evict evicts 'cache node' with the given namespace and key. This will
+// simply call Cacher.Evict.
+//
+// Evict return true is such 'cache node' exist.
+func (r *Cache) Evict(ns, key uint64) bool {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return false
+ }
+
+ hash := murmur32(ns, key, 0xf00)
+ for {
+ h, b := r.getBucket(hash)
+ done, _, n := b.get(r, h, hash, ns, key, true)
+ if done {
+ if n != nil {
+ if r.cacher != nil {
+ r.cacher.Evict(n)
+ }
+ n.unref()
+ return true
+ }
+
+ break
+ }
+ }
+
+ return false
+}
+
+// EvictNS evicts 'cache node' with the given namespace. This will
+// simply call Cacher.EvictNS.
+func (r *Cache) EvictNS(ns uint64) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return
+ }
+
+ if r.cacher != nil {
+ r.cacher.EvictNS(ns)
+ }
+}
+
+// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll.
+func (r *Cache) EvictAll() {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return
+ }
+
+ if r.cacher != nil {
+ r.cacher.EvictAll()
+ }
+}
+
+// Close closes the 'cache map' and forcefully releases all 'cache node'.
+func (r *Cache) Close() error {
+ r.mu.Lock()
+ if !r.closed {
+ r.closed = true
+
+ h := (*mNode)(r.mHead)
+ h.initBuckets()
+
+ for i := range h.buckets {
+ b := (*mBucket)(h.buckets[i])
+ for _, n := range b.node {
+ // Call releaser.
+ if n.value != nil {
+ if r, ok := n.value.(util.Releaser); ok {
+ r.Release()
+ }
+ n.value = nil
+ }
+
+ // Call OnDel.
+ for _, f := range n.onDel {
+ f()
+ }
+ n.onDel = nil
+ }
+ }
+ }
+ r.mu.Unlock()
+
+ // Avoid deadlock.
+ if r.cacher != nil {
+ if err := r.cacher.Close(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// CloseWeak closes the 'cache map' and evict all 'cache node' from cacher, but
+// unlike Close it doesn't forcefully releases 'cache node'.
+func (r *Cache) CloseWeak() error {
+ r.mu.Lock()
+ if !r.closed {
+ r.closed = true
+ }
+ r.mu.Unlock()
+
+ // Avoid deadlock.
+ if r.cacher != nil {
+ r.cacher.EvictAll()
+ if err := r.cacher.Close(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Node is a 'cache node'.
+type Node struct {
+ r *Cache
+
+ hash uint32
+ ns, key uint64
+
+ mu sync.Mutex
+ size int
+ value Value
+
+ ref int32
+ onDel []func()
+
+ CacheData unsafe.Pointer
+}
+
+// NS returns this 'cache node' namespace.
+func (n *Node) NS() uint64 {
+ return n.ns
+}
+
+// Key returns this 'cache node' key.
+func (n *Node) Key() uint64 {
+ return n.key
+}
+
+// Size returns this 'cache node' size.
+func (n *Node) Size() int {
+ return n.size
+}
+
+// Value returns this 'cache node' value.
+func (n *Node) Value() Value {
+ return n.value
+}
+
+// Ref returns this 'cache node' ref counter.
+func (n *Node) Ref() int32 {
+ return atomic.LoadInt32(&n.ref)
+}
+
+// GetHandle returns an handle for this 'cache node'.
+func (n *Node) GetHandle() *Handle {
+ if atomic.AddInt32(&n.ref, 1) <= 1 {
+ panic("BUG: Node.GetHandle on zero ref")
+ }
+ return &Handle{unsafe.Pointer(n)}
+}
+
+func (n *Node) unref() {
+ if atomic.AddInt32(&n.ref, -1) == 0 {
+ n.r.delete(n)
+ }
+}
+
+func (n *Node) unrefLocked() {
+ if atomic.AddInt32(&n.ref, -1) == 0 {
+ n.r.mu.RLock()
+ if !n.r.closed {
+ n.r.delete(n)
+ }
+ n.r.mu.RUnlock()
+ }
+}
+
+// Handle is a 'cache handle' of a 'cache node'.
+type Handle struct {
+ n unsafe.Pointer // *Node
+}
+
+// Value returns the value of the 'cache node'.
+func (h *Handle) Value() Value {
+ n := (*Node)(atomic.LoadPointer(&h.n))
+ if n != nil {
+ return n.value
+ }
+ return nil
+}
+
+// Release releases this 'cache handle'.
+// It is safe to call release multiple times.
+func (h *Handle) Release() {
+ nPtr := atomic.LoadPointer(&h.n)
+ if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {
+ n := (*Node)(nPtr)
+ n.unrefLocked()
+ }
+}
+
+func murmur32(ns, key uint64, seed uint32) uint32 {
+ const (
+ m = uint32(0x5bd1e995)
+ r = 24
+ )
+
+ k1 := uint32(ns >> 32)
+ k2 := uint32(ns)
+ k3 := uint32(key >> 32)
+ k4 := uint32(key)
+
+ k1 *= m
+ k1 ^= k1 >> r
+ k1 *= m
+
+ k2 *= m
+ k2 ^= k2 >> r
+ k2 *= m
+
+ k3 *= m
+ k3 ^= k3 >> r
+ k3 *= m
+
+ k4 *= m
+ k4 ^= k4 >> r
+ k4 *= m
+
+ h := seed
+
+ h *= m
+ h ^= k1
+ h *= m
+ h ^= k2
+ h *= m
+ h ^= k3
+ h *= m
+ h ^= k4
+
+ h ^= h >> 13
+ h *= m
+ h ^= h >> 15
+
+ return h
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go
new file mode 100644
index 000000000..d9a84cde1
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go
@@ -0,0 +1,195 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package cache
+
+import (
+ "sync"
+ "unsafe"
+)
+
+type lruNode struct {
+ n *Node
+ h *Handle
+ ban bool
+
+ next, prev *lruNode
+}
+
+func (n *lruNode) insert(at *lruNode) {
+ x := at.next
+ at.next = n
+ n.prev = at
+ n.next = x
+ x.prev = n
+}
+
+func (n *lruNode) remove() {
+ if n.prev != nil {
+ n.prev.next = n.next
+ n.next.prev = n.prev
+ n.prev = nil
+ n.next = nil
+ } else {
+ panic("BUG: removing removed node")
+ }
+}
+
+type lru struct {
+ mu sync.Mutex
+ capacity int
+ used int
+ recent lruNode
+}
+
+func (r *lru) reset() {
+ r.recent.next = &r.recent
+ r.recent.prev = &r.recent
+ r.used = 0
+}
+
+func (r *lru) Capacity() int {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ return r.capacity
+}
+
+func (r *lru) SetCapacity(capacity int) {
+ var evicted []*lruNode
+
+ r.mu.Lock()
+ r.capacity = capacity
+ for r.used > r.capacity {
+ rn := r.recent.prev
+ if rn == nil {
+ panic("BUG: invalid LRU used or capacity counter")
+ }
+ rn.remove()
+ rn.n.CacheData = nil
+ r.used -= rn.n.Size()
+ evicted = append(evicted, rn)
+ }
+ r.mu.Unlock()
+
+ for _, rn := range evicted {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) Promote(n *Node) {
+ var evicted []*lruNode
+
+ r.mu.Lock()
+ if n.CacheData == nil {
+ if n.Size() <= r.capacity {
+ rn := &lruNode{n: n, h: n.GetHandle()}
+ rn.insert(&r.recent)
+ n.CacheData = unsafe.Pointer(rn)
+ r.used += n.Size()
+
+ for r.used > r.capacity {
+ rn := r.recent.prev
+ if rn == nil {
+ panic("BUG: invalid LRU used or capacity counter")
+ }
+ rn.remove()
+ rn.n.CacheData = nil
+ r.used -= rn.n.Size()
+ evicted = append(evicted, rn)
+ }
+ }
+ } else {
+ rn := (*lruNode)(n.CacheData)
+ if !rn.ban {
+ rn.remove()
+ rn.insert(&r.recent)
+ }
+ }
+ r.mu.Unlock()
+
+ for _, rn := range evicted {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) Ban(n *Node) {
+ r.mu.Lock()
+ if n.CacheData == nil {
+ n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true})
+ } else {
+ rn := (*lruNode)(n.CacheData)
+ if !rn.ban {
+ rn.remove()
+ rn.ban = true
+ r.used -= rn.n.Size()
+ r.mu.Unlock()
+
+ rn.h.Release()
+ rn.h = nil
+ return
+ }
+ }
+ r.mu.Unlock()
+}
+
+func (r *lru) Evict(n *Node) {
+ r.mu.Lock()
+ rn := (*lruNode)(n.CacheData)
+ if rn == nil || rn.ban {
+ r.mu.Unlock()
+ return
+ }
+ n.CacheData = nil
+ r.mu.Unlock()
+
+ rn.h.Release()
+}
+
+func (r *lru) EvictNS(ns uint64) {
+ var evicted []*lruNode
+
+ r.mu.Lock()
+ for e := r.recent.prev; e != &r.recent; {
+ rn := e
+ e = e.prev
+ if rn.n.NS() == ns {
+ rn.remove()
+ rn.n.CacheData = nil
+ r.used -= rn.n.Size()
+ evicted = append(evicted, rn)
+ }
+ }
+ r.mu.Unlock()
+
+ for _, rn := range evicted {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) EvictAll() {
+ r.mu.Lock()
+ back := r.recent.prev
+ for rn := back; rn != &r.recent; rn = rn.prev {
+ rn.n.CacheData = nil
+ }
+ r.reset()
+ r.mu.Unlock()
+
+ for rn := back; rn != &r.recent; rn = rn.prev {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) Close() error {
+ return nil
+}
+
+// NewLRU create a new LRU-cache.
+func NewLRU(capacity int) Cacher {
+ r := &lru{capacity: capacity}
+ r.reset()
+ return r
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go
new file mode 100644
index 000000000..448402b82
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go
@@ -0,0 +1,67 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+)
+
+type iComparer struct {
+ ucmp comparer.Comparer
+}
+
+func (icmp *iComparer) uName() string {
+ return icmp.ucmp.Name()
+}
+
+func (icmp *iComparer) uCompare(a, b []byte) int {
+ return icmp.ucmp.Compare(a, b)
+}
+
+func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte {
+ return icmp.ucmp.Separator(dst, a, b)
+}
+
+func (icmp *iComparer) uSuccessor(dst, b []byte) []byte {
+ return icmp.ucmp.Successor(dst, b)
+}
+
+func (icmp *iComparer) Name() string {
+ return icmp.uName()
+}
+
+func (icmp *iComparer) Compare(a, b []byte) int {
+ x := icmp.uCompare(internalKey(a).ukey(), internalKey(b).ukey())
+ if x == 0 {
+ if m, n := internalKey(a).num(), internalKey(b).num(); m > n {
+ return -1
+ } else if m < n {
+ return 1
+ }
+ }
+ return x
+}
+
+func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
+ ua, ub := internalKey(a).ukey(), internalKey(b).ukey()
+ dst = icmp.uSeparator(dst, ua, ub)
+ if dst != nil && len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
+ // Append earliest possible number.
+ return append(dst, keyMaxNumBytes...)
+ }
+ return nil
+}
+
+func (icmp *iComparer) Successor(dst, b []byte) []byte {
+ ub := internalKey(b).ukey()
+ dst = icmp.uSuccessor(dst, ub)
+ if dst != nil && len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
+ // Append earliest possible number.
+ return append(dst, keyMaxNumBytes...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
new file mode 100644
index 000000000..14dddf88d
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
@@ -0,0 +1,51 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package comparer
+
+import "bytes"
+
+type bytesComparer struct{}
+
+func (bytesComparer) Compare(a, b []byte) int {
+ return bytes.Compare(a, b)
+}
+
+func (bytesComparer) Name() string {
+ return "leveldb.BytewiseComparator"
+}
+
+func (bytesComparer) Separator(dst, a, b []byte) []byte {
+ i, n := 0, len(a)
+ if n > len(b) {
+ n = len(b)
+ }
+ for ; i < n && a[i] == b[i]; i++ {
+ }
+ if i >= n {
+ // Do not shorten if one string is a prefix of the other
+ } else if c := a[i]; c < 0xff && c+1 < b[i] {
+ dst = append(dst, a[:i+1]...)
+ dst[i]++
+ return dst
+ }
+ return nil
+}
+
+func (bytesComparer) Successor(dst, b []byte) []byte {
+ for i, c := range b {
+ if c != 0xff {
+ dst = append(dst, b[:i+1]...)
+ dst[i]++
+ return dst
+ }
+ }
+ return nil
+}
+
+// DefaultComparer are default implementation of the Comparer interface.
+// It uses the natural ordering, consistent with bytes.Compare.
+var DefaultComparer = bytesComparer{}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
new file mode 100644
index 000000000..14a28f16f
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
@@ -0,0 +1,57 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package comparer provides interface and implementation for ordering
+// sets of data.
+package comparer
+
+// BasicComparer is the interface that wraps the basic Compare method.
+type BasicComparer interface {
+ // Compare returns -1, 0, or +1 depending on whether a is 'less than',
+ // 'equal to' or 'greater than' b. The two arguments can only be 'equal'
+ // if their contents are exactly equal. Furthermore, the empty slice
+ // must be 'less than' any non-empty slice.
+ Compare(a, b []byte) int
+}
+
+// Comparer defines a total ordering over the space of []byte keys: a 'less
+// than' relationship.
+type Comparer interface {
+ BasicComparer
+
+ // Name returns name of the comparer.
+ //
+ // The Level-DB on-disk format stores the comparer name, and opening a
+ // database with a different comparer from the one it was created with
+ // will result in an error.
+ //
+ // An implementation to a new name whenever the comparer implementation
+ // changes in a way that will cause the relative ordering of any two keys
+ // to change.
+ //
+ // Names starting with "leveldb." are reserved and should not be used
+ // by any users of this package.
+ Name() string
+
+ // Bellow are advanced functions used used to reduce the space requirements
+ // for internal data structures such as index blocks.
+
+ // Separator appends a sequence of bytes x to dst such that a <= x && x < b,
+ // where 'less than' is consistent with Compare. An implementation should
+ // return nil if x equal to a.
+ //
+ // Either contents of a or b should not by any means modified. Doing so
+ // may cause corruption on the internal state.
+ Separator(dst, a, b []byte) []byte
+
+ // Successor appends a sequence of bytes x to dst such that x >= b, where
+ // 'less than' is consistent with Compare. An implementation should return
+ // nil if x equal to b.
+ //
+ // Contents of b should not by any means modified. Doing so may cause
+ // corruption on the internal state.
+ Successor(dst, b []byte) []byte
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db.go b/vendor/github.com/syndtr/goleveldb/leveldb/db.go
new file mode 100644
index 000000000..a02cb2c50
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db.go
@@ -0,0 +1,1091 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "container/list"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/journal"
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/table"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// DB is a LevelDB database.
+type DB struct {
+ // Need 64-bit alignment.
+ seq uint64
+
+ // Session.
+ s *session
+
+ // MemDB.
+ memMu sync.RWMutex
+ memPool chan *memdb.DB
+ mem, frozenMem *memDB
+ journal *journal.Writer
+ journalWriter storage.Writer
+ journalFd storage.FileDesc
+ frozenJournalFd storage.FileDesc
+ frozenSeq uint64
+
+ // Snapshot.
+ snapsMu sync.Mutex
+ snapsList *list.List
+
+ // Stats.
+ aliveSnaps, aliveIters int32
+
+ // Write.
+ batchPool sync.Pool
+ writeMergeC chan writeMerge
+ writeMergedC chan bool
+ writeLockC chan struct{}
+ writeAckC chan error
+ writeDelay time.Duration
+ writeDelayN int
+ tr *Transaction
+
+ // Compaction.
+ compCommitLk sync.Mutex
+ tcompCmdC chan cCmd
+ tcompPauseC chan chan<- struct{}
+ mcompCmdC chan cCmd
+ compErrC chan error
+ compPerErrC chan error
+ compErrSetC chan error
+ compWriteLocking bool
+ compStats cStats
+ memdbMaxLevel int // For testing.
+
+ // Close.
+ closeW sync.WaitGroup
+ closeC chan struct{}
+ closed uint32
+ closer io.Closer
+}
+
+func openDB(s *session) (*DB, error) {
+ s.log("db@open opening")
+ start := time.Now()
+ db := &DB{
+ s: s,
+ // Initial sequence
+ seq: s.stSeqNum,
+ // MemDB
+ memPool: make(chan *memdb.DB, 1),
+ // Snapshot
+ snapsList: list.New(),
+ // Write
+ batchPool: sync.Pool{New: newBatch},
+ writeMergeC: make(chan writeMerge),
+ writeMergedC: make(chan bool),
+ writeLockC: make(chan struct{}, 1),
+ writeAckC: make(chan error),
+ // Compaction
+ tcompCmdC: make(chan cCmd),
+ tcompPauseC: make(chan chan<- struct{}),
+ mcompCmdC: make(chan cCmd),
+ compErrC: make(chan error),
+ compPerErrC: make(chan error),
+ compErrSetC: make(chan error),
+ // Close
+ closeC: make(chan struct{}),
+ }
+
+ // Read-only mode.
+ readOnly := s.o.GetReadOnly()
+
+ if readOnly {
+ // Recover journals (read-only mode).
+ if err := db.recoverJournalRO(); err != nil {
+ return nil, err
+ }
+ } else {
+ // Recover journals.
+ if err := db.recoverJournal(); err != nil {
+ return nil, err
+ }
+
+ // Remove any obsolete files.
+ if err := db.checkAndCleanFiles(); err != nil {
+ // Close journal.
+ if db.journal != nil {
+ db.journal.Close()
+ db.journalWriter.Close()
+ }
+ return nil, err
+ }
+
+ }
+
+ // Doesn't need to be included in the wait group.
+ go db.compactionError()
+ go db.mpoolDrain()
+
+ if readOnly {
+ db.SetReadOnly()
+ } else {
+ db.closeW.Add(2)
+ go db.tCompaction()
+ go db.mCompaction()
+ // go db.jWriter()
+ }
+
+ s.logf("db@open done T·%v", time.Since(start))
+
+ runtime.SetFinalizer(db, (*DB).Close)
+ return db, nil
+}
+
+// Open opens or creates a DB for the given storage.
+// The DB will be created if not exist, unless ErrorIfMissing is true.
+// Also, if ErrorIfExist is true and the DB exist Open will returns
+// os.ErrExist error.
+//
+// Open will return an error with type of ErrCorrupted if corruption
+// detected in the DB. Use errors.IsCorrupted to test whether an error is
+// due to corruption. Corrupted DB can be recovered with Recover function.
+//
+// The returned DB instance is safe for concurrent use.
+// The DB must be closed after use, by calling Close method.
+func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
+ s, err := newSession(stor, o)
+ if err != nil {
+ return
+ }
+ defer func() {
+ if err != nil {
+ s.close()
+ s.release()
+ }
+ }()
+
+ err = s.recover()
+ if err != nil {
+ if !os.IsNotExist(err) || s.o.GetErrorIfMissing() {
+ return
+ }
+ err = s.create()
+ if err != nil {
+ return
+ }
+ } else if s.o.GetErrorIfExist() {
+ err = os.ErrExist
+ return
+ }
+
+ return openDB(s)
+}
+
+// OpenFile opens or creates a DB for the given path.
+// The DB will be created if not exist, unless ErrorIfMissing is true.
+// Also, if ErrorIfExist is true and the DB exist OpenFile will returns
+// os.ErrExist error.
+//
+// OpenFile uses standard file-system backed storage implementation as
+// described in the leveldb/storage package.
+//
+// OpenFile will return an error with type of ErrCorrupted if corruption
+// detected in the DB. Use errors.IsCorrupted to test whether an error is
+// due to corruption. Corrupted DB can be recovered with Recover function.
+//
+// The returned DB instance is safe for concurrent use.
+// The DB must be closed after use, by calling Close method.
+func OpenFile(path string, o *opt.Options) (db *DB, err error) {
+ stor, err := storage.OpenFile(path, o.GetReadOnly())
+ if err != nil {
+ return
+ }
+ db, err = Open(stor, o)
+ if err != nil {
+ stor.Close()
+ } else {
+ db.closer = stor
+ }
+ return
+}
+
+// Recover recovers and opens a DB with missing or corrupted manifest files
+// for the given storage. It will ignore any manifest files, valid or not.
+// The DB must already exist or it will returns an error.
+// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
+//
+// The returned DB instance is safe for concurrent use.
+// The DB must be closed after use, by calling Close method.
+func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) {
+ s, err := newSession(stor, o)
+ if err != nil {
+ return
+ }
+ defer func() {
+ if err != nil {
+ s.close()
+ s.release()
+ }
+ }()
+
+ err = recoverTable(s, o)
+ if err != nil {
+ return
+ }
+ return openDB(s)
+}
+
+// RecoverFile recovers and opens a DB with missing or corrupted manifest files
+// for the given path. It will ignore any manifest files, valid or not.
+// The DB must already exist or it will returns an error.
+// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
+//
+// RecoverFile uses standard file-system backed storage implementation as described
+// in the leveldb/storage package.
+//
+// The returned DB instance is safe for concurrent use.
+// The DB must be closed after use, by calling Close method.
+func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
+ stor, err := storage.OpenFile(path, false)
+ if err != nil {
+ return
+ }
+ db, err = Recover(stor, o)
+ if err != nil {
+ stor.Close()
+ } else {
+ db.closer = stor
+ }
+ return
+}
+
+func recoverTable(s *session, o *opt.Options) error {
+ o = dupOptions(o)
+ // Mask StrictReader, lets StrictRecovery doing its job.
+ o.Strict &= ^opt.StrictReader
+
+ // Get all tables and sort it by file number.
+ fds, err := s.stor.List(storage.TypeTable)
+ if err != nil {
+ return err
+ }
+ sortFds(fds)
+
+ var (
+ maxSeq uint64
+ recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int
+
+ // We will drop corrupted table.
+ strict = o.GetStrict(opt.StrictRecovery)
+ noSync = o.GetNoSync()
+
+ rec = &sessionRecord{}
+ bpool = util.NewBufferPool(o.GetBlockSize() + 5)
+ )
+ buildTable := func(iter iterator.Iterator) (tmpFd storage.FileDesc, size int64, err error) {
+ tmpFd = s.newTemp()
+ writer, err := s.stor.Create(tmpFd)
+ if err != nil {
+ return
+ }
+ defer func() {
+ writer.Close()
+ if err != nil {
+ s.stor.Remove(tmpFd)
+ tmpFd = storage.FileDesc{}
+ }
+ }()
+
+ // Copy entries.
+ tw := table.NewWriter(writer, o)
+ for iter.Next() {
+ key := iter.Key()
+ if validInternalKey(key) {
+ err = tw.Append(key, iter.Value())
+ if err != nil {
+ return
+ }
+ }
+ }
+ err = iter.Error()
+ if err != nil {
+ return
+ }
+ err = tw.Close()
+ if err != nil {
+ return
+ }
+ if !noSync {
+ err = writer.Sync()
+ if err != nil {
+ return
+ }
+ }
+ size = int64(tw.BytesLen())
+ return
+ }
+ recoverTable := func(fd storage.FileDesc) error {
+ s.logf("table@recovery recovering @%d", fd.Num)
+ reader, err := s.stor.Open(fd)
+ if err != nil {
+ return err
+ }
+ var closed bool
+ defer func() {
+ if !closed {
+ reader.Close()
+ }
+ }()
+
+ // Get file size.
+ size, err := reader.Seek(0, 2)
+ if err != nil {
+ return err
+ }
+
+ var (
+ tSeq uint64
+ tgoodKey, tcorruptedKey, tcorruptedBlock int
+ imin, imax []byte
+ )
+ tr, err := table.NewReader(reader, size, fd, nil, bpool, o)
+ if err != nil {
+ return err
+ }
+ iter := tr.NewIterator(nil, nil)
+ if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok {
+ itererr.SetErrorCallback(func(err error) {
+ if errors.IsCorrupted(err) {
+ s.logf("table@recovery block corruption @%d %q", fd.Num, err)
+ tcorruptedBlock++
+ }
+ })
+ }
+
+ // Scan the table.
+ for iter.Next() {
+ key := iter.Key()
+ _, seq, _, kerr := parseInternalKey(key)
+ if kerr != nil {
+ tcorruptedKey++
+ continue
+ }
+ tgoodKey++
+ if seq > tSeq {
+ tSeq = seq
+ }
+ if imin == nil {
+ imin = append([]byte{}, key...)
+ }
+ imax = append(imax[:0], key...)
+ }
+ if err := iter.Error(); err != nil {
+ iter.Release()
+ return err
+ }
+ iter.Release()
+
+ goodKey += tgoodKey
+ corruptedKey += tcorruptedKey
+ corruptedBlock += tcorruptedBlock
+
+ if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) {
+ droppedTable++
+ s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
+ return nil
+ }
+
+ if tgoodKey > 0 {
+ if tcorruptedKey > 0 || tcorruptedBlock > 0 {
+ // Rebuild the table.
+ s.logf("table@recovery rebuilding @%d", fd.Num)
+ iter := tr.NewIterator(nil, nil)
+ tmpFd, newSize, err := buildTable(iter)
+ iter.Release()
+ if err != nil {
+ return err
+ }
+ closed = true
+ reader.Close()
+ if err := s.stor.Rename(tmpFd, fd); err != nil {
+ return err
+ }
+ size = newSize
+ }
+ if tSeq > maxSeq {
+ maxSeq = tSeq
+ }
+ recoveredKey += tgoodKey
+ // Add table to level 0.
+ rec.addTable(0, fd.Num, size, imin, imax)
+ s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
+ } else {
+ droppedTable++
+ s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", fd.Num, tcorruptedKey, tcorruptedBlock, size)
+ }
+
+ return nil
+ }
+
+ // Recover all tables.
+ if len(fds) > 0 {
+ s.logf("table@recovery F·%d", len(fds))
+
+ // Mark file number as used.
+ s.markFileNum(fds[len(fds)-1].Num)
+
+ for _, fd := range fds {
+ if err := recoverTable(fd); err != nil {
+ return err
+ }
+ }
+
+ s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(fds), recoveredKey, goodKey, corruptedKey, maxSeq)
+ }
+
+ // Set sequence number.
+ rec.setSeqNum(maxSeq)
+
+ // Create new manifest.
+ if err := s.create(); err != nil {
+ return err
+ }
+
+ // Commit.
+ return s.commit(rec)
+}
+
+func (db *DB) recoverJournal() error {
+ // Get all journals and sort it by file number.
+ rawFds, err := db.s.stor.List(storage.TypeJournal)
+ if err != nil {
+ return err
+ }
+ sortFds(rawFds)
+
+ // Journals that will be recovered.
+ var fds []storage.FileDesc
+ for _, fd := range rawFds {
+ if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
+ fds = append(fds, fd)
+ }
+ }
+
+ var (
+ ofd storage.FileDesc // Obsolete file.
+ rec = &sessionRecord{}
+ )
+
+ // Recover journals.
+ if len(fds) > 0 {
+ db.logf("journal@recovery F·%d", len(fds))
+
+ // Mark file number as used.
+ db.s.markFileNum(fds[len(fds)-1].Num)
+
+ var (
+ // Options.
+ strict = db.s.o.GetStrict(opt.StrictJournal)
+ checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
+ writeBuffer = db.s.o.GetWriteBuffer()
+
+ jr *journal.Reader
+ mdb = memdb.New(db.s.icmp, writeBuffer)
+ buf = &util.Buffer{}
+ batchSeq uint64
+ batchLen int
+ )
+
+ for _, fd := range fds {
+ db.logf("journal@recovery recovering @%d", fd.Num)
+
+ fr, err := db.s.stor.Open(fd)
+ if err != nil {
+ return err
+ }
+
+ // Create or reset journal reader instance.
+ if jr == nil {
+ jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum)
+ } else {
+ jr.Reset(fr, dropper{db.s, fd}, strict, checksum)
+ }
+
+ // Flush memdb and remove obsolete journal file.
+ if !ofd.Zero() {
+ if mdb.Len() > 0 {
+ if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
+ fr.Close()
+ return err
+ }
+ }
+
+ rec.setJournalNum(fd.Num)
+ rec.setSeqNum(db.seq)
+ if err := db.s.commit(rec); err != nil {
+ fr.Close()
+ return err
+ }
+ rec.resetAddedTables()
+
+ db.s.stor.Remove(ofd)
+ ofd = storage.FileDesc{}
+ }
+
+ // Replay journal to memdb.
+ mdb.Reset()
+ for {
+ r, err := jr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+
+ fr.Close()
+ return errors.SetFd(err, fd)
+ }
+
+ buf.Reset()
+ if _, err := buf.ReadFrom(r); err != nil {
+ if err == io.ErrUnexpectedEOF {
+ // This is error returned due to corruption, with strict == false.
+ continue
+ }
+
+ fr.Close()
+ return errors.SetFd(err, fd)
+ }
+ batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb)
+ if err != nil {
+ if !strict && errors.IsCorrupted(err) {
+ db.s.logf("journal error: %v (skipped)", err)
+ // We won't apply sequence number as it might be corrupted.
+ continue
+ }
+
+ fr.Close()
+ return errors.SetFd(err, fd)
+ }
+
+ // Save sequence number.
+ db.seq = batchSeq + uint64(batchLen)
+
+ // Flush it if large enough.
+ if mdb.Size() >= writeBuffer {
+ if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
+ fr.Close()
+ return err
+ }
+
+ mdb.Reset()
+ }
+ }
+
+ fr.Close()
+ ofd = fd
+ }
+
+ // Flush the last memdb.
+ if mdb.Len() > 0 {
+ if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Create a new journal.
+ if _, err := db.newMem(0); err != nil {
+ return err
+ }
+
+ // Commit.
+ rec.setJournalNum(db.journalFd.Num)
+ rec.setSeqNum(db.seq)
+ if err := db.s.commit(rec); err != nil {
+ // Close journal on error.
+ if db.journal != nil {
+ db.journal.Close()
+ db.journalWriter.Close()
+ }
+ return err
+ }
+
+ // Remove the last obsolete journal file.
+ if !ofd.Zero() {
+ db.s.stor.Remove(ofd)
+ }
+
+ return nil
+}
+
+func (db *DB) recoverJournalRO() error {
+ // Get all journals and sort it by file number.
+ rawFds, err := db.s.stor.List(storage.TypeJournal)
+ if err != nil {
+ return err
+ }
+ sortFds(rawFds)
+
+ // Journals that will be recovered.
+ var fds []storage.FileDesc
+ for _, fd := range rawFds {
+ if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
+ fds = append(fds, fd)
+ }
+ }
+
+ var (
+ // Options.
+ strict = db.s.o.GetStrict(opt.StrictJournal)
+ checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
+ writeBuffer = db.s.o.GetWriteBuffer()
+
+ mdb = memdb.New(db.s.icmp, writeBuffer)
+ )
+
+ // Recover journals.
+ if len(fds) > 0 {
+ db.logf("journal@recovery RO·Mode F·%d", len(fds))
+
+ var (
+ jr *journal.Reader
+ buf = &util.Buffer{}
+ batchSeq uint64
+ batchLen int
+ )
+
+ for _, fd := range fds {
+ db.logf("journal@recovery recovering @%d", fd.Num)
+
+ fr, err := db.s.stor.Open(fd)
+ if err != nil {
+ return err
+ }
+
+ // Create or reset journal reader instance.
+ if jr == nil {
+ jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum)
+ } else {
+ jr.Reset(fr, dropper{db.s, fd}, strict, checksum)
+ }
+
+ // Replay journal to memdb.
+ for {
+ r, err := jr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+
+ fr.Close()
+ return errors.SetFd(err, fd)
+ }
+
+ buf.Reset()
+ if _, err := buf.ReadFrom(r); err != nil {
+ if err == io.ErrUnexpectedEOF {
+ // This is error returned due to corruption, with strict == false.
+ continue
+ }
+
+ fr.Close()
+ return errors.SetFd(err, fd)
+ }
+ batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb)
+ if err != nil {
+ if !strict && errors.IsCorrupted(err) {
+ db.s.logf("journal error: %v (skipped)", err)
+ // We won't apply sequence number as it might be corrupted.
+ continue
+ }
+
+ fr.Close()
+ return errors.SetFd(err, fd)
+ }
+
+ // Save sequence number.
+ db.seq = batchSeq + uint64(batchLen)
+ }
+
+ fr.Close()
+ }
+ }
+
+ // Set memDB.
+ db.mem = &memDB{db: db, DB: mdb, ref: 1}
+
+ return nil
+}
+
+func memGet(mdb *memdb.DB, ikey internalKey, icmp *iComparer) (ok bool, mv []byte, err error) {
+ mk, mv, err := mdb.Find(ikey)
+ if err == nil {
+ ukey, _, kt, kerr := parseInternalKey(mk)
+ if kerr != nil {
+ // Shouldn't have had happen.
+ panic(kerr)
+ }
+ if icmp.uCompare(ukey, ikey.ukey()) == 0 {
+ if kt == keyTypeDel {
+ return true, nil, ErrNotFound
+ }
+ return true, mv, nil
+
+ }
+ } else if err != ErrNotFound {
+ return true, nil, err
+ }
+ return
+}
+
+func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
+ ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
+
+ if auxm != nil {
+ if ok, mv, me := memGet(auxm, ikey, db.s.icmp); ok {
+ return append([]byte{}, mv...), me
+ }
+ }
+
+ em, fm := db.getMems()
+ for _, m := range [...]*memDB{em, fm} {
+ if m == nil {
+ continue
+ }
+ defer m.decref()
+
+ if ok, mv, me := memGet(m.DB, ikey, db.s.icmp); ok {
+ return append([]byte{}, mv...), me
+ }
+ }
+
+ v := db.s.version()
+ value, cSched, err := v.get(auxt, ikey, ro, false)
+ v.release()
+ if cSched {
+ // Trigger table compaction.
+ db.compTrigger(db.tcompCmdC)
+ }
+ return
+}
+
+func nilIfNotFound(err error) error {
+ if err == ErrNotFound {
+ return nil
+ }
+ return err
+}
+
+func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
+ ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
+
+ if auxm != nil {
+ if ok, _, me := memGet(auxm, ikey, db.s.icmp); ok {
+ return me == nil, nilIfNotFound(me)
+ }
+ }
+
+ em, fm := db.getMems()
+ for _, m := range [...]*memDB{em, fm} {
+ if m == nil {
+ continue
+ }
+ defer m.decref()
+
+ if ok, _, me := memGet(m.DB, ikey, db.s.icmp); ok {
+ return me == nil, nilIfNotFound(me)
+ }
+ }
+
+ v := db.s.version()
+ _, cSched, err := v.get(auxt, ikey, ro, true)
+ v.release()
+ if cSched {
+ // Trigger table compaction.
+ db.compTrigger(db.tcompCmdC)
+ }
+ if err == nil {
+ ret = true
+ } else if err == ErrNotFound {
+ err = nil
+ }
+ return
+}
+
+// Get gets the value for the given key. It returns ErrNotFound if the
+// DB does not contains the key.
+//
+// The returned slice is its own copy, it is safe to modify the contents
+// of the returned slice.
+// It is safe to modify the contents of the argument after Get returns.
+func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+ err = db.ok()
+ if err != nil {
+ return
+ }
+
+ se := db.acquireSnapshot()
+ defer db.releaseSnapshot(se)
+ return db.get(nil, nil, key, se.seq, ro)
+}
+
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
+ err = db.ok()
+ if err != nil {
+ return
+ }
+
+ se := db.acquireSnapshot()
+ defer db.releaseSnapshot(se)
+ return db.has(nil, nil, key, se.seq, ro)
+}
+
+// NewIterator returns an iterator for the latest snapshot of the
+// underlying DB.
+// The returned iterator is not safe for concurrent use, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+// It is also safe to use an iterator concurrently with modifying its
+// underlying DB. The resultant key/value pairs are guaranteed to be
+// consistent.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// DB. And a nil Range.Limit is treated as a key after all keys in
+// the DB.
+//
+// The iterator must be released after use, by calling Release method.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ if err := db.ok(); err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+
+ se := db.acquireSnapshot()
+ defer db.releaseSnapshot(se)
+ // Iterator holds 'version' lock, 'version' is immutable so snapshot
+ // can be released after iterator created.
+ return db.newIterator(nil, nil, se.seq, slice, ro)
+}
+
+// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot
+// is a frozen snapshot of a DB state at a particular point in time. The
+// content of snapshot are guaranteed to be consistent.
+//
+// The snapshot must be released after use, by calling Release method.
+func (db *DB) GetSnapshot() (*Snapshot, error) {
+ if err := db.ok(); err != nil {
+ return nil, err
+ }
+
+ return db.newSnapshot(), nil
+}
+
+// GetProperty returns value of the given property name.
+//
+// Property names:
+// leveldb.num-files-at-level{n}
+// Returns the number of files at level 'n'.
+// leveldb.stats
+// Returns statistics of the underlying DB.
+// leveldb.sstables
+// Returns sstables list for each level.
+// leveldb.blockpool
+// Returns block pool stats.
+// leveldb.cachedblock
+// Returns size of cached block.
+// leveldb.openedtables
+// Returns number of opened tables.
+// leveldb.alivesnaps
+// Returns number of alive snapshots.
+// leveldb.aliveiters
+// Returns number of alive iterators.
+func (db *DB) GetProperty(name string) (value string, err error) {
+ err = db.ok()
+ if err != nil {
+ return
+ }
+
+ const prefix = "leveldb."
+ if !strings.HasPrefix(name, prefix) {
+ return "", ErrNotFound
+ }
+ p := name[len(prefix):]
+
+ v := db.s.version()
+ defer v.release()
+
+ numFilesPrefix := "num-files-at-level"
+ switch {
+ case strings.HasPrefix(p, numFilesPrefix):
+ var level uint
+ var rest string
+ n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest)
+ if n != 1 {
+ err = ErrNotFound
+ } else {
+ value = fmt.Sprint(v.tLen(int(level)))
+ }
+ case p == "stats":
+ value = "Compactions\n" +
+ " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" +
+ "-------+------------+---------------+---------------+---------------+---------------\n"
+ for level, tables := range v.levels {
+ duration, read, write := db.compStats.getStat(level)
+ if len(tables) == 0 && duration == 0 {
+ continue
+ }
+ value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n",
+ level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(),
+ float64(read)/1048576.0, float64(write)/1048576.0)
+ }
+ case p == "sstables":
+ for level, tables := range v.levels {
+ value += fmt.Sprintf("--- level %d ---\n", level)
+ for _, t := range tables {
+ value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.fd.Num, t.size, t.imin, t.imax)
+ }
+ }
+ case p == "blockpool":
+ value = fmt.Sprintf("%v", db.s.tops.bpool)
+ case p == "cachedblock":
+ if db.s.tops.bcache != nil {
+ value = fmt.Sprintf("%d", db.s.tops.bcache.Size())
+ } else {
+ value = "<nil>"
+ }
+ case p == "openedtables":
+ value = fmt.Sprintf("%d", db.s.tops.cache.Size())
+ case p == "alivesnaps":
+ value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps))
+ case p == "aliveiters":
+ value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters))
+ default:
+ err = ErrNotFound
+ }
+
+ return
+}
+
+// SizeOf calculates approximate sizes of the given key ranges.
+// The length of the returned sizes are equal with the length of the given
+// ranges. The returned sizes measure storage space usage, so if the user
+// data compresses by a factor of ten, the returned sizes will be one-tenth
+// the size of the corresponding user data size.
+// The results may not include the sizes of recently written data.
+func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
+ if err := db.ok(); err != nil {
+ return nil, err
+ }
+
+ v := db.s.version()
+ defer v.release()
+
+ sizes := make(Sizes, 0, len(ranges))
+ for _, r := range ranges {
+ imin := makeInternalKey(nil, r.Start, keyMaxSeq, keyTypeSeek)
+ imax := makeInternalKey(nil, r.Limit, keyMaxSeq, keyTypeSeek)
+ start, err := v.offsetOf(imin)
+ if err != nil {
+ return nil, err
+ }
+ limit, err := v.offsetOf(imax)
+ if err != nil {
+ return nil, err
+ }
+ var size int64
+ if limit >= start {
+ size = limit - start
+ }
+ sizes = append(sizes, size)
+ }
+
+ return sizes, nil
+}
+
+// Close closes the DB. This will also releases any outstanding snapshot,
+// abort any in-flight compaction and discard open transaction.
+//
+// It is not safe to close a DB until all outstanding iterators are released.
+// It is valid to call Close multiple times. Other methods should not be
+// called after the DB has been closed.
+func (db *DB) Close() error {
+ if !db.setClosed() {
+ return ErrClosed
+ }
+
+ start := time.Now()
+ db.log("db@close closing")
+
+ // Clear the finalizer.
+ runtime.SetFinalizer(db, nil)
+
+ // Get compaction error.
+ var err error
+ select {
+ case err = <-db.compErrC:
+ if err == ErrReadOnly {
+ err = nil
+ }
+ default:
+ }
+
+ // Signal all goroutines.
+ close(db.closeC)
+
+ // Discard open transaction.
+ if db.tr != nil {
+ db.tr.Discard()
+ }
+
+ // Acquire writer lock.
+ db.writeLockC <- struct{}{}
+
+ // Wait for all gorotines to exit.
+ db.closeW.Wait()
+
+ // Closes journal.
+ if db.journal != nil {
+ db.journal.Close()
+ db.journalWriter.Close()
+ db.journal = nil
+ db.journalWriter = nil
+ }
+
+ if db.writeDelayN > 0 {
+ db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
+ }
+
+ // Close session.
+ db.s.close()
+ db.logf("db@close done T·%v", time.Since(start))
+ db.s.release()
+
+ if db.closer != nil {
+ if err1 := db.closer.Close(); err == nil {
+ err = err1
+ }
+ db.closer = nil
+ }
+
+ // Clear memdbs.
+ db.clearMems()
+
+ return err
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
new file mode 100644
index 000000000..2d0ad0753
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
@@ -0,0 +1,826 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "sync"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+var (
+ errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting")
+)
+
+type cStat struct {
+ duration time.Duration
+ read int64
+ write int64
+}
+
+func (p *cStat) add(n *cStatStaging) {
+ p.duration += n.duration
+ p.read += n.read
+ p.write += n.write
+}
+
+func (p *cStat) get() (duration time.Duration, read, write int64) {
+ return p.duration, p.read, p.write
+}
+
+type cStatStaging struct {
+ start time.Time
+ duration time.Duration
+ on bool
+ read int64
+ write int64
+}
+
+func (p *cStatStaging) startTimer() {
+ if !p.on {
+ p.start = time.Now()
+ p.on = true
+ }
+}
+
+func (p *cStatStaging) stopTimer() {
+ if p.on {
+ p.duration += time.Since(p.start)
+ p.on = false
+ }
+}
+
+type cStats struct {
+ lk sync.Mutex
+ stats []cStat
+}
+
+func (p *cStats) addStat(level int, n *cStatStaging) {
+ p.lk.Lock()
+ if level >= len(p.stats) {
+ newStats := make([]cStat, level+1)
+ copy(newStats, p.stats)
+ p.stats = newStats
+ }
+ p.stats[level].add(n)
+ p.lk.Unlock()
+}
+
+func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) {
+ p.lk.Lock()
+ defer p.lk.Unlock()
+ if level < len(p.stats) {
+ return p.stats[level].get()
+ }
+ return
+}
+
+func (db *DB) compactionError() {
+ var err error
+noerr:
+ // No error.
+ for {
+ select {
+ case err = <-db.compErrSetC:
+ switch {
+ case err == nil:
+ case err == ErrReadOnly, errors.IsCorrupted(err):
+ goto hasperr
+ default:
+ goto haserr
+ }
+ case <-db.closeC:
+ return
+ }
+ }
+haserr:
+ // Transient error.
+ for {
+ select {
+ case db.compErrC <- err:
+ case err = <-db.compErrSetC:
+ switch {
+ case err == nil:
+ goto noerr
+ case err == ErrReadOnly, errors.IsCorrupted(err):
+ goto hasperr
+ default:
+ }
+ case <-db.closeC:
+ return
+ }
+ }
+hasperr:
+ // Persistent error.
+ for {
+ select {
+ case db.compErrC <- err:
+ case db.compPerErrC <- err:
+ case db.writeLockC <- struct{}{}:
+ // Hold write lock, so that write won't pass-through.
+ db.compWriteLocking = true
+ case <-db.closeC:
+ if db.compWriteLocking {
+ // We should release the lock or Close will hang.
+ <-db.writeLockC
+ }
+ return
+ }
+ }
+}
+
+type compactionTransactCounter int
+
+func (cnt *compactionTransactCounter) incr() {
+ *cnt++
+}
+
+type compactionTransactInterface interface {
+ run(cnt *compactionTransactCounter) error
+ revert() error
+}
+
+func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
+ defer func() {
+ if x := recover(); x != nil {
+ if x == errCompactionTransactExiting {
+ if err := t.revert(); err != nil {
+ db.logf("%s revert error %q", name, err)
+ }
+ }
+ panic(x)
+ }
+ }()
+
+ const (
+ backoffMin = 1 * time.Second
+ backoffMax = 8 * time.Second
+ backoffMul = 2 * time.Second
+ )
+ var (
+ backoff = backoffMin
+ backoffT = time.NewTimer(backoff)
+ lastCnt = compactionTransactCounter(0)
+
+ disableBackoff = db.s.o.GetDisableCompactionBackoff()
+ )
+ for n := 0; ; n++ {
+ // Check whether the DB is closed.
+ if db.isClosed() {
+ db.logf("%s exiting", name)
+ db.compactionExitTransact()
+ } else if n > 0 {
+ db.logf("%s retrying N·%d", name, n)
+ }
+
+ // Execute.
+ cnt := compactionTransactCounter(0)
+ err := t.run(&cnt)
+ if err != nil {
+ db.logf("%s error I·%d %q", name, cnt, err)
+ }
+
+ // Set compaction error status.
+ select {
+ case db.compErrSetC <- err:
+ case perr := <-db.compPerErrC:
+ if err != nil {
+ db.logf("%s exiting (persistent error %q)", name, perr)
+ db.compactionExitTransact()
+ }
+ case <-db.closeC:
+ db.logf("%s exiting", name)
+ db.compactionExitTransact()
+ }
+ if err == nil {
+ return
+ }
+ if errors.IsCorrupted(err) {
+ db.logf("%s exiting (corruption detected)", name)
+ db.compactionExitTransact()
+ }
+
+ if !disableBackoff {
+ // Reset backoff duration if counter is advancing.
+ if cnt > lastCnt {
+ backoff = backoffMin
+ lastCnt = cnt
+ }
+
+ // Backoff.
+ backoffT.Reset(backoff)
+ if backoff < backoffMax {
+ backoff *= backoffMul
+ if backoff > backoffMax {
+ backoff = backoffMax
+ }
+ }
+ select {
+ case <-backoffT.C:
+ case <-db.closeC:
+ db.logf("%s exiting", name)
+ db.compactionExitTransact()
+ }
+ }
+ }
+}
+
+type compactionTransactFunc struct {
+ runFunc func(cnt *compactionTransactCounter) error
+ revertFunc func() error
+}
+
+func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error {
+ return t.runFunc(cnt)
+}
+
+func (t *compactionTransactFunc) revert() error {
+ if t.revertFunc != nil {
+ return t.revertFunc()
+ }
+ return nil
+}
+
+func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) {
+ db.compactionTransact(name, &compactionTransactFunc{run, revert})
+}
+
+func (db *DB) compactionExitTransact() {
+ panic(errCompactionTransactExiting)
+}
+
+func (db *DB) compactionCommit(name string, rec *sessionRecord) {
+ db.compCommitLk.Lock()
+ defer db.compCommitLk.Unlock() // Defer is necessary.
+ db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error {
+ return db.s.commit(rec)
+ }, nil)
+}
+
+func (db *DB) memCompaction() {
+ mdb := db.getFrozenMem()
+ if mdb == nil {
+ return
+ }
+ defer mdb.decref()
+
+ db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size()))
+
+ // Don't compact empty memdb.
+ if mdb.Len() == 0 {
+ db.logf("memdb@flush skipping")
+ // drop frozen memdb
+ db.dropFrozenMem()
+ return
+ }
+
+ // Pause table compaction.
+ resumeC := make(chan struct{})
+ select {
+ case db.tcompPauseC <- (chan<- struct{})(resumeC):
+ case <-db.compPerErrC:
+ close(resumeC)
+ resumeC = nil
+ case <-db.closeC:
+ return
+ }
+
+ var (
+ rec = &sessionRecord{}
+ stats = &cStatStaging{}
+ flushLevel int
+ )
+
+ // Generate tables.
+ db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) {
+ stats.startTimer()
+ flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel)
+ stats.stopTimer()
+ return
+ }, func() error {
+ for _, r := range rec.addedTables {
+ db.logf("memdb@flush revert @%d", r.num)
+ if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+
+ rec.setJournalNum(db.journalFd.Num)
+ rec.setSeqNum(db.frozenSeq)
+
+ // Commit.
+ stats.startTimer()
+ db.compactionCommit("memdb", rec)
+ stats.stopTimer()
+
+ db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
+
+ for _, r := range rec.addedTables {
+ stats.write += r.size
+ }
+ db.compStats.addStat(flushLevel, stats)
+
+ // Drop frozen memdb.
+ db.dropFrozenMem()
+
+ // Resume table compaction.
+ if resumeC != nil {
+ select {
+ case <-resumeC:
+ close(resumeC)
+ case <-db.closeC:
+ return
+ }
+ }
+
+ // Trigger table compaction.
+ db.compTrigger(db.tcompCmdC)
+}
+
+type tableCompactionBuilder struct {
+ db *DB
+ s *session
+ c *compaction
+ rec *sessionRecord
+ stat0, stat1 *cStatStaging
+
+ snapHasLastUkey bool
+ snapLastUkey []byte
+ snapLastSeq uint64
+ snapIter int
+ snapKerrCnt int
+ snapDropCnt int
+
+ kerrCnt int
+ dropCnt int
+
+ minSeq uint64
+ strict bool
+ tableSize int
+
+ tw *tWriter
+}
+
+func (b *tableCompactionBuilder) appendKV(key, value []byte) error {
+ // Create new table if not already.
+ if b.tw == nil {
+ // Check for pause event.
+ if b.db != nil {
+ select {
+ case ch := <-b.db.tcompPauseC:
+ b.db.pauseCompaction(ch)
+ case <-b.db.closeC:
+ b.db.compactionExitTransact()
+ default:
+ }
+ }
+
+ // Create new table.
+ var err error
+ b.tw, err = b.s.tops.create()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Write key/value into table.
+ return b.tw.append(key, value)
+}
+
+func (b *tableCompactionBuilder) needFlush() bool {
+ return b.tw.tw.BytesLen() >= b.tableSize
+}
+
+func (b *tableCompactionBuilder) flush() error {
+ t, err := b.tw.finish()
+ if err != nil {
+ return err
+ }
+ b.rec.addTableFile(b.c.sourceLevel+1, t)
+ b.stat1.write += t.size
+ b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
+ b.tw = nil
+ return nil
+}
+
+func (b *tableCompactionBuilder) cleanup() {
+ if b.tw != nil {
+ b.tw.drop()
+ b.tw = nil
+ }
+}
+
+func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
+ snapResumed := b.snapIter > 0
+ hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary.
+ lastUkey := append([]byte{}, b.snapLastUkey...)
+ lastSeq := b.snapLastSeq
+ b.kerrCnt = b.snapKerrCnt
+ b.dropCnt = b.snapDropCnt
+ // Restore compaction state.
+ b.c.restore()
+
+ defer b.cleanup()
+
+ b.stat1.startTimer()
+ defer b.stat1.stopTimer()
+
+ iter := b.c.newIterator()
+ defer iter.Release()
+ for i := 0; iter.Next(); i++ {
+ // Incr transact counter.
+ cnt.incr()
+
+ // Skip until last state.
+ if i < b.snapIter {
+ continue
+ }
+
+ resumed := false
+ if snapResumed {
+ resumed = true
+ snapResumed = false
+ }
+
+ ikey := iter.Key()
+ ukey, seq, kt, kerr := parseInternalKey(ikey)
+
+ if kerr == nil {
+ shouldStop := !resumed && b.c.shouldStopBefore(ikey)
+
+ if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 {
+ // First occurrence of this user key.
+
+ // Only rotate tables if ukey doesn't hop across.
+ if b.tw != nil && (shouldStop || b.needFlush()) {
+ if err := b.flush(); err != nil {
+ return err
+ }
+
+ // Creates snapshot of the state.
+ b.c.save()
+ b.snapHasLastUkey = hasLastUkey
+ b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...)
+ b.snapLastSeq = lastSeq
+ b.snapIter = i
+ b.snapKerrCnt = b.kerrCnt
+ b.snapDropCnt = b.dropCnt
+ }
+
+ hasLastUkey = true
+ lastUkey = append(lastUkey[:0], ukey...)
+ lastSeq = keyMaxSeq
+ }
+
+ switch {
+ case lastSeq <= b.minSeq:
+ // Dropped because newer entry for same user key exist
+ fallthrough // (A)
+ case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
+ // For this user key:
+ // (1) there is no data in higher levels
+ // (2) data in lower levels will have larger seq numbers
+ // (3) data in layers that are being compacted here and have
+ // smaller seq numbers will be dropped in the next
+ // few iterations of this loop (by rule (A) above).
+ // Therefore this deletion marker is obsolete and can be dropped.
+ lastSeq = seq
+ b.dropCnt++
+ continue
+ default:
+ lastSeq = seq
+ }
+ } else {
+ if b.strict {
+ return kerr
+ }
+
+ // Don't drop corrupted keys.
+ hasLastUkey = false
+ lastUkey = lastUkey[:0]
+ lastSeq = keyMaxSeq
+ b.kerrCnt++
+ }
+
+ if err := b.appendKV(ikey, iter.Value()); err != nil {
+ return err
+ }
+ }
+
+ if err := iter.Error(); err != nil {
+ return err
+ }
+
+ // Finish last table.
+ if b.tw != nil && !b.tw.empty() {
+ return b.flush()
+ }
+ return nil
+}
+
+func (b *tableCompactionBuilder) revert() error {
+ for _, at := range b.rec.addedTables {
+ b.s.logf("table@build revert @%d", at.num)
+ if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
+ defer c.release()
+
+ rec := &sessionRecord{}
+ rec.addCompPtr(c.sourceLevel, c.imax)
+
+ if !noTrivial && c.trivial() {
+ t := c.levels[0][0]
+ db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1)
+ rec.delTable(c.sourceLevel, t.fd.Num)
+ rec.addTableFile(c.sourceLevel+1, t)
+ db.compactionCommit("table-move", rec)
+ return
+ }
+
+ var stats [2]cStatStaging
+ for i, tables := range c.levels {
+ for _, t := range tables {
+ stats[i].read += t.size
+ // Insert deleted tables into record
+ rec.delTable(c.sourceLevel+i, t.fd.Num)
+ }
+ }
+ sourceSize := int(stats[0].read + stats[1].read)
+ minSeq := db.minSeq()
+ db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq)
+
+ b := &tableCompactionBuilder{
+ db: db,
+ s: db.s,
+ c: c,
+ rec: rec,
+ stat1: &stats[1],
+ minSeq: minSeq,
+ strict: db.s.o.GetStrict(opt.StrictCompaction),
+ tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1),
+ }
+ db.compactionTransact("table@build", b)
+
+ // Commit.
+ stats[1].startTimer()
+ db.compactionCommit("table", rec)
+ stats[1].stopTimer()
+
+ resultSize := int(stats[1].write)
+ db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration)
+
+ // Save compaction stats
+ for i := range stats {
+ db.compStats.addStat(c.sourceLevel+1, &stats[i])
+ }
+}
+
+func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error {
+ db.logf("table@compaction range L%d %q:%q", level, umin, umax)
+ if level >= 0 {
+ if c := db.s.getCompactionRange(level, umin, umax, true); c != nil {
+ db.tableCompaction(c, true)
+ }
+ } else {
+ // Retry until nothing to compact.
+ for {
+ compacted := false
+
+ // Scan for maximum level with overlapped tables.
+ v := db.s.version()
+ m := 1
+ for i := m; i < len(v.levels); i++ {
+ tables := v.levels[i]
+ if tables.overlaps(db.s.icmp, umin, umax, false) {
+ m = i
+ }
+ }
+ v.release()
+
+ for level := 0; level < m; level++ {
+ if c := db.s.getCompactionRange(level, umin, umax, false); c != nil {
+ db.tableCompaction(c, true)
+ compacted = true
+ }
+ }
+
+ if !compacted {
+ break
+ }
+ }
+ }
+
+ return nil
+}
+
+func (db *DB) tableAutoCompaction() {
+ if c := db.s.pickCompaction(); c != nil {
+ db.tableCompaction(c, false)
+ }
+}
+
+func (db *DB) tableNeedCompaction() bool {
+ v := db.s.version()
+ defer v.release()
+ return v.needCompaction()
+}
+
+func (db *DB) pauseCompaction(ch chan<- struct{}) {
+ select {
+ case ch <- struct{}{}:
+ case <-db.closeC:
+ db.compactionExitTransact()
+ }
+}
+
+type cCmd interface {
+ ack(err error)
+}
+
+type cAuto struct {
+ ackC chan<- error
+}
+
+func (r cAuto) ack(err error) {
+ if r.ackC != nil {
+ defer func() {
+ recover()
+ }()
+ r.ackC <- err
+ }
+}
+
+type cRange struct {
+ level int
+ min, max []byte
+ ackC chan<- error
+}
+
+func (r cRange) ack(err error) {
+ if r.ackC != nil {
+ defer func() {
+ recover()
+ }()
+ r.ackC <- err
+ }
+}
+
+// This will trigger auto compaction but will not wait for it.
+func (db *DB) compTrigger(compC chan<- cCmd) {
+ select {
+ case compC <- cAuto{}:
+ default:
+ }
+}
+
+// This will trigger auto compaction and/or wait for all compaction to be done.
+func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) {
+ ch := make(chan error)
+ defer close(ch)
+ // Send cmd.
+ select {
+ case compC <- cAuto{ch}:
+ case err = <-db.compErrC:
+ return
+ case <-db.closeC:
+ return ErrClosed
+ }
+ // Wait cmd.
+ select {
+ case err = <-ch:
+ case err = <-db.compErrC:
+ case <-db.closeC:
+ return ErrClosed
+ }
+ return err
+}
+
+// Send range compaction request.
+func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
+ ch := make(chan error)
+ defer close(ch)
+ // Send cmd.
+ select {
+ case compC <- cRange{level, min, max, ch}:
+ case err := <-db.compErrC:
+ return err
+ case <-db.closeC:
+ return ErrClosed
+ }
+ // Wait cmd.
+ select {
+ case err = <-ch:
+ case err = <-db.compErrC:
+ case <-db.closeC:
+ return ErrClosed
+ }
+ return err
+}
+
+func (db *DB) mCompaction() {
+ var x cCmd
+
+ defer func() {
+ if x := recover(); x != nil {
+ if x != errCompactionTransactExiting {
+ panic(x)
+ }
+ }
+ if x != nil {
+ x.ack(ErrClosed)
+ }
+ db.closeW.Done()
+ }()
+
+ for {
+ select {
+ case x = <-db.mcompCmdC:
+ switch x.(type) {
+ case cAuto:
+ db.memCompaction()
+ x.ack(nil)
+ x = nil
+ default:
+ panic("leveldb: unknown command")
+ }
+ case <-db.closeC:
+ return
+ }
+ }
+}
+
+func (db *DB) tCompaction() {
+ var x cCmd
+ var ackQ []cCmd
+
+ defer func() {
+ if x := recover(); x != nil {
+ if x != errCompactionTransactExiting {
+ panic(x)
+ }
+ }
+ for i := range ackQ {
+ ackQ[i].ack(ErrClosed)
+ ackQ[i] = nil
+ }
+ if x != nil {
+ x.ack(ErrClosed)
+ }
+ db.closeW.Done()
+ }()
+
+ for {
+ if db.tableNeedCompaction() {
+ select {
+ case x = <-db.tcompCmdC:
+ case ch := <-db.tcompPauseC:
+ db.pauseCompaction(ch)
+ continue
+ case <-db.closeC:
+ return
+ default:
+ }
+ } else {
+ for i := range ackQ {
+ ackQ[i].ack(nil)
+ ackQ[i] = nil
+ }
+ ackQ = ackQ[:0]
+ select {
+ case x = <-db.tcompCmdC:
+ case ch := <-db.tcompPauseC:
+ db.pauseCompaction(ch)
+ continue
+ case <-db.closeC:
+ return
+ }
+ }
+ if x != nil {
+ switch cmd := x.(type) {
+ case cAuto:
+ ackQ = append(ackQ, x)
+ case cRange:
+ x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max))
+ default:
+ panic("leveldb: unknown command")
+ }
+ x = nil
+ }
+ db.tableAutoCompaction()
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go
new file mode 100644
index 000000000..03c24cdab
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go
@@ -0,0 +1,360 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "errors"
+ "math/rand"
+ "runtime"
+ "sync"
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+ errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key")
+)
+
+type memdbReleaser struct {
+ once sync.Once
+ m *memDB
+}
+
+func (mr *memdbReleaser) Release() {
+ mr.once.Do(func() {
+ mr.m.decref()
+ })
+}
+
+func (db *DB) newRawIterator(auxm *memDB, auxt tFiles, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader)
+ em, fm := db.getMems()
+ v := db.s.version()
+
+ tableIts := v.getIterators(slice, ro)
+ n := len(tableIts) + len(auxt) + 3
+ its := make([]iterator.Iterator, 0, n)
+
+ if auxm != nil {
+ ami := auxm.NewIterator(slice)
+ ami.SetReleaser(&memdbReleaser{m: auxm})
+ its = append(its, ami)
+ }
+ for _, t := range auxt {
+ its = append(its, v.s.tops.newIterator(t, slice, ro))
+ }
+
+ emi := em.NewIterator(slice)
+ emi.SetReleaser(&memdbReleaser{m: em})
+ its = append(its, emi)
+ if fm != nil {
+ fmi := fm.NewIterator(slice)
+ fmi.SetReleaser(&memdbReleaser{m: fm})
+ its = append(its, fmi)
+ }
+ its = append(its, tableIts...)
+ mi := iterator.NewMergedIterator(its, db.s.icmp, strict)
+ mi.SetReleaser(&versionReleaser{v: v})
+ return mi
+}
+
+func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter {
+ var islice *util.Range
+ if slice != nil {
+ islice = &util.Range{}
+ if slice.Start != nil {
+ islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek)
+ }
+ if slice.Limit != nil {
+ islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek)
+ }
+ }
+ rawIter := db.newRawIterator(auxm, auxt, islice, ro)
+ iter := &dbIter{
+ db: db,
+ icmp: db.s.icmp,
+ iter: rawIter,
+ seq: seq,
+ strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader),
+ key: make([]byte, 0),
+ value: make([]byte, 0),
+ }
+ atomic.AddInt32(&db.aliveIters, 1)
+ runtime.SetFinalizer(iter, (*dbIter).Release)
+ return iter
+}
+
+func (db *DB) iterSamplingRate() int {
+ return rand.Intn(2 * db.s.o.GetIteratorSamplingRate())
+}
+
+type dir int
+
+const (
+ dirReleased dir = iota - 1
+ dirSOI
+ dirEOI
+ dirBackward
+ dirForward
+)
+
+// dbIter represent an interator states over a database session.
+type dbIter struct {
+ db *DB
+ icmp *iComparer
+ iter iterator.Iterator
+ seq uint64
+ strict bool
+
+ smaplingGap int
+ dir dir
+ key []byte
+ value []byte
+ err error
+ releaser util.Releaser
+}
+
+func (i *dbIter) sampleSeek() {
+ ikey := i.iter.Key()
+ i.smaplingGap -= len(ikey) + len(i.iter.Value())
+ for i.smaplingGap < 0 {
+ i.smaplingGap += i.db.iterSamplingRate()
+ i.db.sampleSeek(ikey)
+ }
+}
+
+func (i *dbIter) setErr(err error) {
+ i.err = err
+ i.key = nil
+ i.value = nil
+}
+
+func (i *dbIter) iterErr() {
+ if err := i.iter.Error(); err != nil {
+ i.setErr(err)
+ }
+}
+
+func (i *dbIter) Valid() bool {
+ return i.err == nil && i.dir > dirEOI
+}
+
+func (i *dbIter) First() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.iter.First() {
+ i.dir = dirSOI
+ return i.next()
+ }
+ i.dir = dirEOI
+ i.iterErr()
+ return false
+}
+
+func (i *dbIter) Last() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.iter.Last() {
+ return i.prev()
+ }
+ i.dir = dirSOI
+ i.iterErr()
+ return false
+}
+
+func (i *dbIter) Seek(key []byte) bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek)
+ if i.iter.Seek(ikey) {
+ i.dir = dirSOI
+ return i.next()
+ }
+ i.dir = dirEOI
+ i.iterErr()
+ return false
+}
+
+func (i *dbIter) next() bool {
+ for {
+ if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
+ i.sampleSeek()
+ if seq <= i.seq {
+ switch kt {
+ case keyTypeDel:
+ // Skip deleted key.
+ i.key = append(i.key[:0], ukey...)
+ i.dir = dirForward
+ case keyTypeVal:
+ if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 {
+ i.key = append(i.key[:0], ukey...)
+ i.value = append(i.value[:0], i.iter.Value()...)
+ i.dir = dirForward
+ return true
+ }
+ }
+ }
+ } else if i.strict {
+ i.setErr(kerr)
+ break
+ }
+ if !i.iter.Next() {
+ i.dir = dirEOI
+ i.iterErr()
+ break
+ }
+ }
+ return false
+}
+
+func (i *dbIter) Next() bool {
+ if i.dir == dirEOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) {
+ i.dir = dirEOI
+ i.iterErr()
+ return false
+ }
+ return i.next()
+}
+
+func (i *dbIter) prev() bool {
+ i.dir = dirBackward
+ del := true
+ if i.iter.Valid() {
+ for {
+ if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
+ i.sampleSeek()
+ if seq <= i.seq {
+ if !del && i.icmp.uCompare(ukey, i.key) < 0 {
+ return true
+ }
+ del = (kt == keyTypeDel)
+ if !del {
+ i.key = append(i.key[:0], ukey...)
+ i.value = append(i.value[:0], i.iter.Value()...)
+ }
+ }
+ } else if i.strict {
+ i.setErr(kerr)
+ return false
+ }
+ if !i.iter.Prev() {
+ break
+ }
+ }
+ }
+ if del {
+ i.dir = dirSOI
+ i.iterErr()
+ return false
+ }
+ return true
+}
+
+func (i *dbIter) Prev() bool {
+ if i.dir == dirSOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch i.dir {
+ case dirEOI:
+ return i.Last()
+ case dirForward:
+ for i.iter.Prev() {
+ if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
+ i.sampleSeek()
+ if i.icmp.uCompare(ukey, i.key) < 0 {
+ goto cont
+ }
+ } else if i.strict {
+ i.setErr(kerr)
+ return false
+ }
+ }
+ i.dir = dirSOI
+ i.iterErr()
+ return false
+ }
+
+cont:
+ return i.prev()
+}
+
+func (i *dbIter) Key() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.key
+}
+
+func (i *dbIter) Value() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.value
+}
+
+func (i *dbIter) Release() {
+ if i.dir != dirReleased {
+ // Clear the finalizer.
+ runtime.SetFinalizer(i, nil)
+
+ if i.releaser != nil {
+ i.releaser.Release()
+ i.releaser = nil
+ }
+
+ i.dir = dirReleased
+ i.key = nil
+ i.value = nil
+ i.iter.Release()
+ i.iter = nil
+ atomic.AddInt32(&i.db.aliveIters, -1)
+ i.db = nil
+ }
+}
+
+func (i *dbIter) SetReleaser(releaser util.Releaser) {
+ if i.dir == dirReleased {
+ panic(util.ErrReleased)
+ }
+ if i.releaser != nil && releaser != nil {
+ panic(util.ErrHasReleaser)
+ }
+ i.releaser = releaser
+}
+
+func (i *dbIter) Error() error {
+ return i.err
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
new file mode 100644
index 000000000..2c69d2e53
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
@@ -0,0 +1,183 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "container/list"
+ "fmt"
+ "runtime"
+ "sync"
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type snapshotElement struct {
+ seq uint64
+ ref int
+ e *list.Element
+}
+
+// Acquires a snapshot, based on latest sequence.
+func (db *DB) acquireSnapshot() *snapshotElement {
+ db.snapsMu.Lock()
+ defer db.snapsMu.Unlock()
+
+ seq := db.getSeq()
+
+ if e := db.snapsList.Back(); e != nil {
+ se := e.Value.(*snapshotElement)
+ if se.seq == seq {
+ se.ref++
+ return se
+ } else if seq < se.seq {
+ panic("leveldb: sequence number is not increasing")
+ }
+ }
+ se := &snapshotElement{seq: seq, ref: 1}
+ se.e = db.snapsList.PushBack(se)
+ return se
+}
+
+// Releases given snapshot element.
+func (db *DB) releaseSnapshot(se *snapshotElement) {
+ db.snapsMu.Lock()
+ defer db.snapsMu.Unlock()
+
+ se.ref--
+ if se.ref == 0 {
+ db.snapsList.Remove(se.e)
+ se.e = nil
+ } else if se.ref < 0 {
+ panic("leveldb: Snapshot: negative element reference")
+ }
+}
+
+// Gets minimum sequence that not being snapshotted.
+func (db *DB) minSeq() uint64 {
+ db.snapsMu.Lock()
+ defer db.snapsMu.Unlock()
+
+ if e := db.snapsList.Front(); e != nil {
+ return e.Value.(*snapshotElement).seq
+ }
+
+ return db.getSeq()
+}
+
+// Snapshot is a DB snapshot.
+type Snapshot struct {
+ db *DB
+ elem *snapshotElement
+ mu sync.RWMutex
+ released bool
+}
+
+// Creates new snapshot object.
+func (db *DB) newSnapshot() *Snapshot {
+ snap := &Snapshot{
+ db: db,
+ elem: db.acquireSnapshot(),
+ }
+ atomic.AddInt32(&db.aliveSnaps, 1)
+ runtime.SetFinalizer(snap, (*Snapshot).Release)
+ return snap
+}
+
+func (snap *Snapshot) String() string {
+ return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq)
+}
+
+// Get gets the value for the given key. It returns ErrNotFound if
+// the DB does not contains the key.
+//
+// The caller should not modify the contents of the returned slice, but
+// it is safe to modify the contents of the argument after Get returns.
+func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+ err = snap.db.ok()
+ if err != nil {
+ return
+ }
+ snap.mu.RLock()
+ defer snap.mu.RUnlock()
+ if snap.released {
+ err = ErrSnapshotReleased
+ return
+ }
+ return snap.db.get(nil, nil, key, snap.elem.seq, ro)
+}
+
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
+ err = snap.db.ok()
+ if err != nil {
+ return
+ }
+ snap.mu.RLock()
+ defer snap.mu.RUnlock()
+ if snap.released {
+ err = ErrSnapshotReleased
+ return
+ }
+ return snap.db.has(nil, nil, key, snap.elem.seq, ro)
+}
+
+// NewIterator returns an iterator for the snapshot of the underlying DB.
+// The returned iterator is not safe for concurrent use, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+// It is also safe to use an iterator concurrently with modifying its
+// underlying DB. The resultant key/value pairs are guaranteed to be
+// consistent.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// DB. And a nil Range.Limit is treated as a key after all keys in
+// the DB.
+//
+// The iterator must be released after use, by calling Release method.
+// Releasing the snapshot doesn't mean releasing the iterator too, the
+// iterator would be still valid until released.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ if err := snap.db.ok(); err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+ snap.mu.Lock()
+ defer snap.mu.Unlock()
+ if snap.released {
+ return iterator.NewEmptyIterator(ErrSnapshotReleased)
+ }
+ // Since iterator already hold version ref, it doesn't need to
+ // hold snapshot ref.
+ return snap.db.newIterator(nil, nil, snap.elem.seq, slice, ro)
+}
+
+// Release releases the snapshot. This will not release any returned
+// iterators, the iterators would still be valid until released or the
+// underlying DB is closed.
+//
+// Other methods should not be called after the snapshot has been released.
+func (snap *Snapshot) Release() {
+ snap.mu.Lock()
+ defer snap.mu.Unlock()
+
+ if !snap.released {
+ // Clear the finalizer.
+ runtime.SetFinalizer(snap, nil)
+
+ snap.released = true
+ snap.db.releaseSnapshot(snap.elem)
+ atomic.AddInt32(&snap.db.aliveSnaps, -1)
+ snap.db = nil
+ snap.elem = nil
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go
new file mode 100644
index 000000000..85b02d24b
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go
@@ -0,0 +1,234 @@
+// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "sync/atomic"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/journal"
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+type memDB struct {
+ db *DB
+ *memdb.DB
+ ref int32
+}
+
+func (m *memDB) getref() int32 {
+ return atomic.LoadInt32(&m.ref)
+}
+
+func (m *memDB) incref() {
+ atomic.AddInt32(&m.ref, 1)
+}
+
+func (m *memDB) decref() {
+ if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
+ // Only put back memdb with std capacity.
+ if m.Capacity() == m.db.s.o.GetWriteBuffer() {
+ m.Reset()
+ m.db.mpoolPut(m.DB)
+ }
+ m.db = nil
+ m.DB = nil
+ } else if ref < 0 {
+ panic("negative memdb ref")
+ }
+}
+
+// Get latest sequence number.
+func (db *DB) getSeq() uint64 {
+ return atomic.LoadUint64(&db.seq)
+}
+
+// Atomically adds delta to seq.
+func (db *DB) addSeq(delta uint64) {
+ atomic.AddUint64(&db.seq, delta)
+}
+
+func (db *DB) setSeq(seq uint64) {
+ atomic.StoreUint64(&db.seq, seq)
+}
+
+func (db *DB) sampleSeek(ikey internalKey) {
+ v := db.s.version()
+ if v.sampleSeek(ikey) {
+ // Trigger table compaction.
+ db.compTrigger(db.tcompCmdC)
+ }
+ v.release()
+}
+
+func (db *DB) mpoolPut(mem *memdb.DB) {
+ if !db.isClosed() {
+ select {
+ case db.memPool <- mem:
+ default:
+ }
+ }
+}
+
+func (db *DB) mpoolGet(n int) *memDB {
+ var mdb *memdb.DB
+ select {
+ case mdb = <-db.memPool:
+ default:
+ }
+ if mdb == nil || mdb.Capacity() < n {
+ mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
+ }
+ return &memDB{
+ db: db,
+ DB: mdb,
+ }
+}
+
+func (db *DB) mpoolDrain() {
+ ticker := time.NewTicker(30 * time.Second)
+ for {
+ select {
+ case <-ticker.C:
+ select {
+ case <-db.memPool:
+ default:
+ }
+ case <-db.closeC:
+ ticker.Stop()
+ // Make sure the pool is drained.
+ select {
+ case <-db.memPool:
+ case <-time.After(time.Second):
+ }
+ close(db.memPool)
+ return
+ }
+ }
+}
+
+// Create new memdb and froze the old one; need external synchronization.
+// newMem only called synchronously by the writer.
+func (db *DB) newMem(n int) (mem *memDB, err error) {
+ fd := storage.FileDesc{Type: storage.TypeJournal, Num: db.s.allocFileNum()}
+ w, err := db.s.stor.Create(fd)
+ if err != nil {
+ db.s.reuseFileNum(fd.Num)
+ return
+ }
+
+ db.memMu.Lock()
+ defer db.memMu.Unlock()
+
+ if db.frozenMem != nil {
+ panic("still has frozen mem")
+ }
+
+ if db.journal == nil {
+ db.journal = journal.NewWriter(w)
+ } else {
+ db.journal.Reset(w)
+ db.journalWriter.Close()
+ db.frozenJournalFd = db.journalFd
+ }
+ db.journalWriter = w
+ db.journalFd = fd
+ db.frozenMem = db.mem
+ mem = db.mpoolGet(n)
+ mem.incref() // for self
+ mem.incref() // for caller
+ db.mem = mem
+ // The seq only incremented by the writer. And whoever called newMem
+ // should hold write lock, so no need additional synchronization here.
+ db.frozenSeq = db.seq
+ return
+}
+
+// Get all memdbs.
+func (db *DB) getMems() (e, f *memDB) {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ if db.mem != nil {
+ db.mem.incref()
+ } else if !db.isClosed() {
+ panic("nil effective mem")
+ }
+ if db.frozenMem != nil {
+ db.frozenMem.incref()
+ }
+ return db.mem, db.frozenMem
+}
+
+// Get effective memdb.
+func (db *DB) getEffectiveMem() *memDB {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ if db.mem != nil {
+ db.mem.incref()
+ } else if !db.isClosed() {
+ panic("nil effective mem")
+ }
+ return db.mem
+}
+
+// Check whether we has frozen memdb.
+func (db *DB) hasFrozenMem() bool {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ return db.frozenMem != nil
+}
+
+// Get frozen memdb.
+func (db *DB) getFrozenMem() *memDB {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ if db.frozenMem != nil {
+ db.frozenMem.incref()
+ }
+ return db.frozenMem
+}
+
+// Drop frozen memdb; assume that frozen memdb isn't nil.
+func (db *DB) dropFrozenMem() {
+ db.memMu.Lock()
+ if err := db.s.stor.Remove(db.frozenJournalFd); err != nil {
+ db.logf("journal@remove removing @%d %q", db.frozenJournalFd.Num, err)
+ } else {
+ db.logf("journal@remove removed @%d", db.frozenJournalFd.Num)
+ }
+ db.frozenJournalFd = storage.FileDesc{}
+ db.frozenMem.decref()
+ db.frozenMem = nil
+ db.memMu.Unlock()
+}
+
+// Clear mems ptr; used by DB.Close().
+func (db *DB) clearMems() {
+ db.memMu.Lock()
+ db.mem = nil
+ db.frozenMem = nil
+ db.memMu.Unlock()
+}
+
+// Set closed flag; return true if not already closed.
+func (db *DB) setClosed() bool {
+ return atomic.CompareAndSwapUint32(&db.closed, 0, 1)
+}
+
+// Check whether DB was closed.
+func (db *DB) isClosed() bool {
+ return atomic.LoadUint32(&db.closed) != 0
+}
+
+// Check read ok status.
+func (db *DB) ok() error {
+ if db.isClosed() {
+ return ErrClosed
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go
new file mode 100644
index 000000000..b8f7e7d21
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go
@@ -0,0 +1,325 @@
+// Copyright (c) 2016, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "errors"
+ "sync"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var errTransactionDone = errors.New("leveldb: transaction already closed")
+
+// Transaction is the transaction handle.
+type Transaction struct {
+ db *DB
+ lk sync.RWMutex
+ seq uint64
+ mem *memDB
+ tables tFiles
+ ikScratch []byte
+ rec sessionRecord
+ stats cStatStaging
+ closed bool
+}
+
+// Get gets the value for the given key. It returns ErrNotFound if the
+// DB does not contains the key.
+//
+// The returned slice is its own copy, it is safe to modify the contents
+// of the returned slice.
+// It is safe to modify the contents of the argument after Get returns.
+func (tr *Transaction) Get(key []byte, ro *opt.ReadOptions) ([]byte, error) {
+ tr.lk.RLock()
+ defer tr.lk.RUnlock()
+ if tr.closed {
+ return nil, errTransactionDone
+ }
+ return tr.db.get(tr.mem.DB, tr.tables, key, tr.seq, ro)
+}
+
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Has returns.
+func (tr *Transaction) Has(key []byte, ro *opt.ReadOptions) (bool, error) {
+ tr.lk.RLock()
+ defer tr.lk.RUnlock()
+ if tr.closed {
+ return false, errTransactionDone
+ }
+ return tr.db.has(tr.mem.DB, tr.tables, key, tr.seq, ro)
+}
+
+// NewIterator returns an iterator for the latest snapshot of the transaction.
+// The returned iterator is not safe for concurrent use, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+// It is also safe to use an iterator concurrently while writes to the
+// transaction. The resultant key/value pairs are guaranteed to be consistent.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// DB. And a nil Range.Limit is treated as a key after all keys in
+// the DB.
+//
+// The iterator must be released after use, by calling Release method.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (tr *Transaction) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ tr.lk.RLock()
+ defer tr.lk.RUnlock()
+ if tr.closed {
+ return iterator.NewEmptyIterator(errTransactionDone)
+ }
+ tr.mem.incref()
+ return tr.db.newIterator(tr.mem, tr.tables, tr.seq, slice, ro)
+}
+
+func (tr *Transaction) flush() error {
+ // Flush memdb.
+ if tr.mem.Len() != 0 {
+ tr.stats.startTimer()
+ iter := tr.mem.NewIterator(nil)
+ t, n, err := tr.db.s.tops.createFrom(iter)
+ iter.Release()
+ tr.stats.stopTimer()
+ if err != nil {
+ return err
+ }
+ if tr.mem.getref() == 1 {
+ tr.mem.Reset()
+ } else {
+ tr.mem.decref()
+ tr.mem = tr.db.mpoolGet(0)
+ tr.mem.incref()
+ }
+ tr.tables = append(tr.tables, t)
+ tr.rec.addTableFile(0, t)
+ tr.stats.write += t.size
+ tr.db.logf("transaction@flush created L0@%d N·%d S·%s %q:%q", t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax)
+ }
+ return nil
+}
+
+func (tr *Transaction) put(kt keyType, key, value []byte) error {
+ tr.ikScratch = makeInternalKey(tr.ikScratch, key, tr.seq+1, kt)
+ if tr.mem.Free() < len(tr.ikScratch)+len(value) {
+ if err := tr.flush(); err != nil {
+ return err
+ }
+ }
+ if err := tr.mem.Put(tr.ikScratch, value); err != nil {
+ return err
+ }
+ tr.seq++
+ return nil
+}
+
+// Put sets the value for the given key. It overwrites any previous value
+// for that key; a DB is not a multi-map.
+// Please note that the transaction is not compacted until committed, so if you
+// writes 10 same keys, then those 10 same keys are in the transaction.
+//
+// It is safe to modify the contents of the arguments after Put returns.
+func (tr *Transaction) Put(key, value []byte, wo *opt.WriteOptions) error {
+ tr.lk.Lock()
+ defer tr.lk.Unlock()
+ if tr.closed {
+ return errTransactionDone
+ }
+ return tr.put(keyTypeVal, key, value)
+}
+
+// Delete deletes the value for the given key.
+// Please note that the transaction is not compacted until committed, so if you
+// writes 10 same keys, then those 10 same keys are in the transaction.
+//
+// It is safe to modify the contents of the arguments after Delete returns.
+func (tr *Transaction) Delete(key []byte, wo *opt.WriteOptions) error {
+ tr.lk.Lock()
+ defer tr.lk.Unlock()
+ if tr.closed {
+ return errTransactionDone
+ }
+ return tr.put(keyTypeDel, key, nil)
+}
+
+// Write apply the given batch to the transaction. The batch will be applied
+// sequentially.
+// Please note that the transaction is not compacted until committed, so if you
+// writes 10 same keys, then those 10 same keys are in the transaction.
+//
+// It is safe to modify the contents of the arguments after Write returns.
+func (tr *Transaction) Write(b *Batch, wo *opt.WriteOptions) error {
+ if b == nil || b.Len() == 0 {
+ return nil
+ }
+
+ tr.lk.Lock()
+ defer tr.lk.Unlock()
+ if tr.closed {
+ return errTransactionDone
+ }
+ return b.replayInternal(func(i int, kt keyType, k, v []byte) error {
+ return tr.put(kt, k, v)
+ })
+}
+
+func (tr *Transaction) setDone() {
+ tr.closed = true
+ tr.db.tr = nil
+ tr.mem.decref()
+ <-tr.db.writeLockC
+}
+
+// Commit commits the transaction. If error is not nil, then the transaction is
+// not committed, it can then either be retried or discarded.
+//
+// Other methods should not be called after transaction has been committed.
+func (tr *Transaction) Commit() error {
+ if err := tr.db.ok(); err != nil {
+ return err
+ }
+
+ tr.lk.Lock()
+ defer tr.lk.Unlock()
+ if tr.closed {
+ return errTransactionDone
+ }
+ if err := tr.flush(); err != nil {
+ // Return error, lets user decide either to retry or discard
+ // transaction.
+ return err
+ }
+ if len(tr.tables) != 0 {
+ // Committing transaction.
+ tr.rec.setSeqNum(tr.seq)
+ tr.db.compCommitLk.Lock()
+ tr.stats.startTimer()
+ var cerr error
+ for retry := 0; retry < 3; retry++ {
+ cerr = tr.db.s.commit(&tr.rec)
+ if cerr != nil {
+ tr.db.logf("transaction@commit error R·%d %q", retry, cerr)
+ select {
+ case <-time.After(time.Second):
+ case <-tr.db.closeC:
+ tr.db.logf("transaction@commit exiting")
+ tr.db.compCommitLk.Unlock()
+ return cerr
+ }
+ } else {
+ // Success. Set db.seq.
+ tr.db.setSeq(tr.seq)
+ break
+ }
+ }
+ tr.stats.stopTimer()
+ if cerr != nil {
+ // Return error, lets user decide either to retry or discard
+ // transaction.
+ return cerr
+ }
+
+ // Update compaction stats. This is safe as long as we hold compCommitLk.
+ tr.db.compStats.addStat(0, &tr.stats)
+
+ // Trigger table auto-compaction.
+ tr.db.compTrigger(tr.db.tcompCmdC)
+ tr.db.compCommitLk.Unlock()
+
+ // Additionally, wait compaction when certain threshold reached.
+ // Ignore error, returns error only if transaction can't be committed.
+ tr.db.waitCompaction()
+ }
+ // Only mark as done if transaction committed successfully.
+ tr.setDone()
+ return nil
+}
+
+func (tr *Transaction) discard() {
+ // Discard transaction.
+ for _, t := range tr.tables {
+ tr.db.logf("transaction@discard @%d", t.fd.Num)
+ if err1 := tr.db.s.stor.Remove(t.fd); err1 == nil {
+ tr.db.s.reuseFileNum(t.fd.Num)
+ }
+ }
+}
+
+// Discard discards the transaction.
+//
+// Other methods should not be called after transaction has been discarded.
+func (tr *Transaction) Discard() {
+ tr.lk.Lock()
+ if !tr.closed {
+ tr.discard()
+ tr.setDone()
+ }
+ tr.lk.Unlock()
+}
+
+func (db *DB) waitCompaction() error {
+ if db.s.tLen(0) >= db.s.o.GetWriteL0PauseTrigger() {
+ return db.compTriggerWait(db.tcompCmdC)
+ }
+ return nil
+}
+
+// OpenTransaction opens an atomic DB transaction. Only one transaction can be
+// opened at a time. Subsequent call to Write and OpenTransaction will be blocked
+// until in-flight transaction is committed or discarded.
+// The returned transaction handle is safe for concurrent use.
+//
+// Transaction is expensive and can overwhelm compaction, especially if
+// transaction size is small. Use with caution.
+//
+// The transaction must be closed once done, either by committing or discarding
+// the transaction.
+// Closing the DB will discard open transaction.
+func (db *DB) OpenTransaction() (*Transaction, error) {
+ if err := db.ok(); err != nil {
+ return nil, err
+ }
+
+ // The write happen synchronously.
+ select {
+ case db.writeLockC <- struct{}{}:
+ case err := <-db.compPerErrC:
+ return nil, err
+ case <-db.closeC:
+ return nil, ErrClosed
+ }
+
+ if db.tr != nil {
+ panic("leveldb: has open transaction")
+ }
+
+ // Flush current memdb.
+ if db.mem != nil && db.mem.Len() != 0 {
+ if _, err := db.rotateMem(0, true); err != nil {
+ return nil, err
+ }
+ }
+
+ // Wait compaction when certain threshold reached.
+ if err := db.waitCompaction(); err != nil {
+ return nil, err
+ }
+
+ tr := &Transaction{
+ db: db,
+ seq: db.seq,
+ mem: db.mpoolGet(0),
+ }
+ tr.mem.incref()
+ db.tr = tr
+ return tr, nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go
new file mode 100644
index 000000000..7ecd960d2
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go
@@ -0,0 +1,102 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Reader is the interface that wraps basic Get and NewIterator methods.
+// This interface implemented by both DB and Snapshot.
+type Reader interface {
+ Get(key []byte, ro *opt.ReadOptions) (value []byte, err error)
+ NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator
+}
+
+// Sizes is list of size.
+type Sizes []int64
+
+// Sum returns sum of the sizes.
+func (sizes Sizes) Sum() int64 {
+ var sum int64
+ for _, size := range sizes {
+ sum += size
+ }
+ return sum
+}
+
+// Logging.
+func (db *DB) log(v ...interface{}) { db.s.log(v...) }
+func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) }
+
+// Check and clean files.
+func (db *DB) checkAndCleanFiles() error {
+ v := db.s.version()
+ defer v.release()
+
+ tmap := make(map[int64]bool)
+ for _, tables := range v.levels {
+ for _, t := range tables {
+ tmap[t.fd.Num] = false
+ }
+ }
+
+ fds, err := db.s.stor.List(storage.TypeAll)
+ if err != nil {
+ return err
+ }
+
+ var nt int
+ var rem []storage.FileDesc
+ for _, fd := range fds {
+ keep := true
+ switch fd.Type {
+ case storage.TypeManifest:
+ keep = fd.Num >= db.s.manifestFd.Num
+ case storage.TypeJournal:
+ if !db.frozenJournalFd.Zero() {
+ keep = fd.Num >= db.frozenJournalFd.Num
+ } else {
+ keep = fd.Num >= db.journalFd.Num
+ }
+ case storage.TypeTable:
+ _, keep = tmap[fd.Num]
+ if keep {
+ tmap[fd.Num] = true
+ nt++
+ }
+ }
+
+ if !keep {
+ rem = append(rem, fd)
+ }
+ }
+
+ if nt != len(tmap) {
+ var mfds []storage.FileDesc
+ for num, present := range tmap {
+ if !present {
+ mfds = append(mfds, storage.FileDesc{storage.TypeTable, num})
+ db.logf("db@janitor table missing @%d", num)
+ }
+ }
+ return errors.NewErrCorrupted(storage.FileDesc{}, &errors.ErrMissingFiles{Fds: mfds})
+ }
+
+ db.logf("db@janitor F·%d G·%d", len(fds), len(rem))
+ for _, fd := range rem {
+ db.logf("db@janitor removing %s-%d", fd.Type, fd.Num)
+ if err := db.s.stor.Remove(fd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go
new file mode 100644
index 000000000..cc428b695
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go
@@ -0,0 +1,443 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+func (db *DB) writeJournal(batches []*Batch, seq uint64, sync bool) error {
+ wr, err := db.journal.Next()
+ if err != nil {
+ return err
+ }
+ if err := writeBatchesWithHeader(wr, batches, seq); err != nil {
+ return err
+ }
+ if err := db.journal.Flush(); err != nil {
+ return err
+ }
+ if sync {
+ return db.journalWriter.Sync()
+ }
+ return nil
+}
+
+func (db *DB) rotateMem(n int, wait bool) (mem *memDB, err error) {
+ // Wait for pending memdb compaction.
+ err = db.compTriggerWait(db.mcompCmdC)
+ if err != nil {
+ return
+ }
+
+ // Create new memdb and journal.
+ mem, err = db.newMem(n)
+ if err != nil {
+ return
+ }
+
+ // Schedule memdb compaction.
+ if wait {
+ err = db.compTriggerWait(db.mcompCmdC)
+ } else {
+ db.compTrigger(db.mcompCmdC)
+ }
+ return
+}
+
+func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
+ delayed := false
+ slowdownTrigger := db.s.o.GetWriteL0SlowdownTrigger()
+ pauseTrigger := db.s.o.GetWriteL0PauseTrigger()
+ flush := func() (retry bool) {
+ mdb = db.getEffectiveMem()
+ if mdb == nil {
+ err = ErrClosed
+ return false
+ }
+ defer func() {
+ if retry {
+ mdb.decref()
+ mdb = nil
+ }
+ }()
+ tLen := db.s.tLen(0)
+ mdbFree = mdb.Free()
+ switch {
+ case tLen >= slowdownTrigger && !delayed:
+ delayed = true
+ time.Sleep(time.Millisecond)
+ case mdbFree >= n:
+ return false
+ case tLen >= pauseTrigger:
+ delayed = true
+ err = db.compTriggerWait(db.tcompCmdC)
+ if err != nil {
+ return false
+ }
+ default:
+ // Allow memdb to grow if it has no entry.
+ if mdb.Len() == 0 {
+ mdbFree = n
+ } else {
+ mdb.decref()
+ mdb, err = db.rotateMem(n, false)
+ if err == nil {
+ mdbFree = mdb.Free()
+ } else {
+ mdbFree = 0
+ }
+ }
+ return false
+ }
+ return true
+ }
+ start := time.Now()
+ for flush() {
+ }
+ if delayed {
+ db.writeDelay += time.Since(start)
+ db.writeDelayN++
+ } else if db.writeDelayN > 0 {
+ db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
+ db.writeDelay = 0
+ db.writeDelayN = 0
+ }
+ return
+}
+
+type writeMerge struct {
+ sync bool
+ batch *Batch
+ keyType keyType
+ key, value []byte
+}
+
+func (db *DB) unlockWrite(overflow bool, merged int, err error) {
+ for i := 0; i < merged; i++ {
+ db.writeAckC <- err
+ }
+ if overflow {
+ // Pass lock to the next write (that failed to merge).
+ db.writeMergedC <- false
+ } else {
+ // Release lock.
+ <-db.writeLockC
+ }
+}
+
+// ourBatch if defined should equal with batch.
+func (db *DB) writeLocked(batch, ourBatch *Batch, merge, sync bool) error {
+ // Try to flush memdb. This method would also trying to throttle writes
+ // if it is too fast and compaction cannot catch-up.
+ mdb, mdbFree, err := db.flush(batch.internalLen)
+ if err != nil {
+ db.unlockWrite(false, 0, err)
+ return err
+ }
+ defer mdb.decref()
+
+ var (
+ overflow bool
+ merged int
+ batches = []*Batch{batch}
+ )
+
+ if merge {
+ // Merge limit.
+ var mergeLimit int
+ if batch.internalLen > 128<<10 {
+ mergeLimit = (1 << 20) - batch.internalLen
+ } else {
+ mergeLimit = 128 << 10
+ }
+ mergeCap := mdbFree - batch.internalLen
+ if mergeLimit > mergeCap {
+ mergeLimit = mergeCap
+ }
+
+ merge:
+ for mergeLimit > 0 {
+ select {
+ case incoming := <-db.writeMergeC:
+ if incoming.batch != nil {
+ // Merge batch.
+ if incoming.batch.internalLen > mergeLimit {
+ overflow = true
+ break merge
+ }
+ batches = append(batches, incoming.batch)
+ mergeLimit -= incoming.batch.internalLen
+ } else {
+ // Merge put.
+ internalLen := len(incoming.key) + len(incoming.value) + 8
+ if internalLen > mergeLimit {
+ overflow = true
+ break merge
+ }
+ if ourBatch == nil {
+ ourBatch = db.batchPool.Get().(*Batch)
+ ourBatch.Reset()
+ batches = append(batches, ourBatch)
+ }
+ // We can use same batch since concurrent write doesn't
+ // guarantee write order.
+ ourBatch.appendRec(incoming.keyType, incoming.key, incoming.value)
+ mergeLimit -= internalLen
+ }
+ sync = sync || incoming.sync
+ merged++
+ db.writeMergedC <- true
+
+ default:
+ break merge
+ }
+ }
+ }
+
+ // Seq number.
+ seq := db.seq + 1
+
+ // Write journal.
+ if err := db.writeJournal(batches, seq, sync); err != nil {
+ db.unlockWrite(overflow, merged, err)
+ return err
+ }
+
+ // Put batches.
+ for _, batch := range batches {
+ if err := batch.putMem(seq, mdb.DB); err != nil {
+ panic(err)
+ }
+ seq += uint64(batch.Len())
+ }
+
+ // Incr seq number.
+ db.addSeq(uint64(batchesLen(batches)))
+
+ // Rotate memdb if it's reach the threshold.
+ if batch.internalLen >= mdbFree {
+ db.rotateMem(0, false)
+ }
+
+ db.unlockWrite(overflow, merged, nil)
+ return nil
+}
+
+// Write apply the given batch to the DB. The batch records will be applied
+// sequentially. Write might be used concurrently, when used concurrently and
+// batch is small enough, write will try to merge the batches. Set NoWriteMerge
+// option to true to disable write merge.
+//
+// It is safe to modify the contents of the arguments after Write returns but
+// not before. Write will not modify content of the batch.
+func (db *DB) Write(batch *Batch, wo *opt.WriteOptions) error {
+ if err := db.ok(); err != nil || batch == nil || batch.Len() == 0 {
+ return err
+ }
+
+ // If the batch size is larger than write buffer, it may justified to write
+ // using transaction instead. Using transaction the batch will be written
+ // into tables directly, skipping the journaling.
+ if batch.internalLen > db.s.o.GetWriteBuffer() && !db.s.o.GetDisableLargeBatchTransaction() {
+ tr, err := db.OpenTransaction()
+ if err != nil {
+ return err
+ }
+ if err := tr.Write(batch, wo); err != nil {
+ tr.Discard()
+ return err
+ }
+ return tr.Commit()
+ }
+
+ merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge()
+ sync := wo.GetSync() && !db.s.o.GetNoSync()
+
+ // Acquire write lock.
+ if merge {
+ select {
+ case db.writeMergeC <- writeMerge{sync: sync, batch: batch}:
+ if <-db.writeMergedC {
+ // Write is merged.
+ return <-db.writeAckC
+ }
+ // Write is not merged, the write lock is handed to us. Continue.
+ case db.writeLockC <- struct{}{}:
+ // Write lock acquired.
+ case err := <-db.compPerErrC:
+ // Compaction error.
+ return err
+ case <-db.closeC:
+ // Closed
+ return ErrClosed
+ }
+ } else {
+ select {
+ case db.writeLockC <- struct{}{}:
+ // Write lock acquired.
+ case err := <-db.compPerErrC:
+ // Compaction error.
+ return err
+ case <-db.closeC:
+ // Closed
+ return ErrClosed
+ }
+ }
+
+ return db.writeLocked(batch, nil, merge, sync)
+}
+
+func (db *DB) putRec(kt keyType, key, value []byte, wo *opt.WriteOptions) error {
+ if err := db.ok(); err != nil {
+ return err
+ }
+
+ merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge()
+ sync := wo.GetSync() && !db.s.o.GetNoSync()
+
+ // Acquire write lock.
+ if merge {
+ select {
+ case db.writeMergeC <- writeMerge{sync: sync, keyType: kt, key: key, value: value}:
+ if <-db.writeMergedC {
+ // Write is merged.
+ return <-db.writeAckC
+ }
+ // Write is not merged, the write lock is handed to us. Continue.
+ case db.writeLockC <- struct{}{}:
+ // Write lock acquired.
+ case err := <-db.compPerErrC:
+ // Compaction error.
+ return err
+ case <-db.closeC:
+ // Closed
+ return ErrClosed
+ }
+ } else {
+ select {
+ case db.writeLockC <- struct{}{}:
+ // Write lock acquired.
+ case err := <-db.compPerErrC:
+ // Compaction error.
+ return err
+ case <-db.closeC:
+ // Closed
+ return ErrClosed
+ }
+ }
+
+ batch := db.batchPool.Get().(*Batch)
+ batch.Reset()
+ batch.appendRec(kt, key, value)
+ return db.writeLocked(batch, batch, merge, sync)
+}
+
+// Put sets the value for the given key. It overwrites any previous value
+// for that key; a DB is not a multi-map. Write merge also applies for Put, see
+// Write.
+//
+// It is safe to modify the contents of the arguments after Put returns but not
+// before.
+func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error {
+ return db.putRec(keyTypeVal, key, value, wo)
+}
+
+// Delete deletes the value for the given key. Delete will not returns error if
+// key doesn't exist. Write merge also applies for Delete, see Write.
+//
+// It is safe to modify the contents of the arguments after Delete returns but
+// not before.
+func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error {
+ return db.putRec(keyTypeDel, key, nil, wo)
+}
+
+func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
+ iter := mem.NewIterator(nil)
+ defer iter.Release()
+ return (max == nil || (iter.First() && icmp.uCompare(max, internalKey(iter.Key()).ukey()) >= 0)) &&
+ (min == nil || (iter.Last() && icmp.uCompare(min, internalKey(iter.Key()).ukey()) <= 0))
+}
+
+// CompactRange compacts the underlying DB for the given key range.
+// In particular, deleted and overwritten versions are discarded,
+// and the data is rearranged to reduce the cost of operations
+// needed to access the data. This operation should typically only
+// be invoked by users who understand the underlying implementation.
+//
+// A nil Range.Start is treated as a key before all keys in the DB.
+// And a nil Range.Limit is treated as a key after all keys in the DB.
+// Therefore if both is nil then it will compact entire DB.
+func (db *DB) CompactRange(r util.Range) error {
+ if err := db.ok(); err != nil {
+ return err
+ }
+
+ // Lock writer.
+ select {
+ case db.writeLockC <- struct{}{}:
+ case err := <-db.compPerErrC:
+ return err
+ case <-db.closeC:
+ return ErrClosed
+ }
+
+ // Check for overlaps in memdb.
+ mdb := db.getEffectiveMem()
+ if mdb == nil {
+ return ErrClosed
+ }
+ defer mdb.decref()
+ if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) {
+ // Memdb compaction.
+ if _, err := db.rotateMem(0, false); err != nil {
+ <-db.writeLockC
+ return err
+ }
+ <-db.writeLockC
+ if err := db.compTriggerWait(db.mcompCmdC); err != nil {
+ return err
+ }
+ } else {
+ <-db.writeLockC
+ }
+
+ // Table compaction.
+ return db.compTriggerRange(db.tcompCmdC, -1, r.Start, r.Limit)
+}
+
+// SetReadOnly makes DB read-only. It will stay read-only until reopened.
+func (db *DB) SetReadOnly() error {
+ if err := db.ok(); err != nil {
+ return err
+ }
+
+ // Lock writer.
+ select {
+ case db.writeLockC <- struct{}{}:
+ db.compWriteLocking = true
+ case err := <-db.compPerErrC:
+ return err
+ case <-db.closeC:
+ return ErrClosed
+ }
+
+ // Set compaction read-only.
+ select {
+ case db.compErrSetC <- ErrReadOnly:
+ case perr := <-db.compPerErrC:
+ return perr
+ case <-db.closeC:
+ return ErrClosed
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/doc.go b/vendor/github.com/syndtr/goleveldb/leveldb/doc.go
new file mode 100644
index 000000000..53f13bb24
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/doc.go
@@ -0,0 +1,90 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package leveldb provides implementation of LevelDB key/value database.
+//
+// Create or open a database:
+//
+// db, err := leveldb.OpenFile("path/to/db", nil)
+// ...
+// defer db.Close()
+// ...
+//
+// Read or modify the database content:
+//
+// // Remember that the contents of the returned slice should not be modified.
+// data, err := db.Get([]byte("key"), nil)
+// ...
+// err = db.Put([]byte("key"), []byte("value"), nil)
+// ...
+// err = db.Delete([]byte("key"), nil)
+// ...
+//
+// Iterate over database content:
+//
+// iter := db.NewIterator(nil, nil)
+// for iter.Next() {
+// // Remember that the contents of the returned slice should not be modified, and
+// // only valid until the next call to Next.
+// key := iter.Key()
+// value := iter.Value()
+// ...
+// }
+// iter.Release()
+// err = iter.Error()
+// ...
+//
+// Iterate over subset of database content with a particular prefix:
+// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil)
+// for iter.Next() {
+// // Use key/value.
+// ...
+// }
+// iter.Release()
+// err = iter.Error()
+// ...
+//
+// Seek-then-Iterate:
+//
+// iter := db.NewIterator(nil, nil)
+// for ok := iter.Seek(key); ok; ok = iter.Next() {
+// // Use key/value.
+// ...
+// }
+// iter.Release()
+// err = iter.Error()
+// ...
+//
+// Iterate over subset of database content:
+//
+// iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil)
+// for iter.Next() {
+// // Use key/value.
+// ...
+// }
+// iter.Release()
+// err = iter.Error()
+// ...
+//
+// Batch writes:
+//
+// batch := new(leveldb.Batch)
+// batch.Put([]byte("foo"), []byte("value"))
+// batch.Put([]byte("bar"), []byte("another value"))
+// batch.Delete([]byte("baz"))
+// err = db.Write(batch, nil)
+// ...
+//
+// Use bloom filter:
+//
+// o := &opt.Options{
+// Filter: filter.NewBloomFilter(10),
+// }
+// db, err := leveldb.OpenFile("path/to/db", o)
+// ...
+// defer db.Close()
+// ...
+package leveldb
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors.go
new file mode 100644
index 000000000..de2649812
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/errors.go
@@ -0,0 +1,20 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/errors"
+)
+
+// Common errors.
+var (
+ ErrNotFound = errors.ErrNotFound
+ ErrReadOnly = errors.New("leveldb: read-only mode")
+ ErrSnapshotReleased = errors.New("leveldb: snapshot released")
+ ErrIterReleased = errors.New("leveldb: iterator released")
+ ErrClosed = errors.New("leveldb: closed")
+)
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go
new file mode 100644
index 000000000..8d6146b6f
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go
@@ -0,0 +1,78 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package errors provides common error types used throughout leveldb.
+package errors
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Common errors.
+var (
+ ErrNotFound = New("leveldb: not found")
+ ErrReleased = util.ErrReleased
+ ErrHasReleaser = util.ErrHasReleaser
+)
+
+// New returns an error that formats as the given text.
+func New(text string) error {
+ return errors.New(text)
+}
+
+// ErrCorrupted is the type that wraps errors that indicate corruption in
+// the database.
+type ErrCorrupted struct {
+ Fd storage.FileDesc
+ Err error
+}
+
+func (e *ErrCorrupted) Error() string {
+ if !e.Fd.Zero() {
+ return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd)
+ }
+ return e.Err.Error()
+}
+
+// NewErrCorrupted creates new ErrCorrupted error.
+func NewErrCorrupted(fd storage.FileDesc, err error) error {
+ return &ErrCorrupted{fd, err}
+}
+
+// IsCorrupted returns a boolean indicating whether the error is indicating
+// a corruption.
+func IsCorrupted(err error) bool {
+ switch err.(type) {
+ case *ErrCorrupted:
+ return true
+ case *storage.ErrCorrupted:
+ return true
+ }
+ return false
+}
+
+// ErrMissingFiles is the type that indicating a corruption due to missing
+// files. ErrMissingFiles always wrapped with ErrCorrupted.
+type ErrMissingFiles struct {
+ Fds []storage.FileDesc
+}
+
+func (e *ErrMissingFiles) Error() string { return "file missing" }
+
+// SetFd sets 'file info' of the given error with the given file.
+// Currently only ErrCorrupted is supported, otherwise will do nothing.
+func SetFd(err error, fd storage.FileDesc) error {
+ switch x := err.(type) {
+ case *ErrCorrupted:
+ x.Fd = fd
+ return x
+ }
+ return err
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter.go
new file mode 100644
index 000000000..e961e420d
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/filter"
+)
+
+type iFilter struct {
+ filter.Filter
+}
+
+func (f iFilter) Contains(filter, key []byte) bool {
+ return f.Filter.Contains(filter, internalKey(key).ukey())
+}
+
+func (f iFilter) NewGenerator() filter.FilterGenerator {
+ return iFilterGenerator{f.Filter.NewGenerator()}
+}
+
+type iFilterGenerator struct {
+ filter.FilterGenerator
+}
+
+func (g iFilterGenerator) Add(key []byte) {
+ g.FilterGenerator.Add(internalKey(key).ukey())
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go
new file mode 100644
index 000000000..bab0e9970
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go
@@ -0,0 +1,116 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package filter
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+func bloomHash(key []byte) uint32 {
+ return util.Hash(key, 0xbc9f1d34)
+}
+
+type bloomFilter int
+
+// The bloom filter serializes its parameters and is backward compatible
+// with respect to them. Therefor, its parameters are not added to its
+// name.
+func (bloomFilter) Name() string {
+ return "leveldb.BuiltinBloomFilter"
+}
+
+func (f bloomFilter) Contains(filter, key []byte) bool {
+ nBytes := len(filter) - 1
+ if nBytes < 1 {
+ return false
+ }
+ nBits := uint32(nBytes * 8)
+
+ // Use the encoded k so that we can read filters generated by
+ // bloom filters created using different parameters.
+ k := filter[nBytes]
+ if k > 30 {
+ // Reserved for potentially new encodings for short bloom filters.
+ // Consider it a match.
+ return true
+ }
+
+ kh := bloomHash(key)
+ delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits
+ for j := uint8(0); j < k; j++ {
+ bitpos := kh % nBits
+ if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 {
+ return false
+ }
+ kh += delta
+ }
+ return true
+}
+
+func (f bloomFilter) NewGenerator() FilterGenerator {
+ // Round down to reduce probing cost a little bit.
+ k := uint8(f * 69 / 100) // 0.69 =~ ln(2)
+ if k < 1 {
+ k = 1
+ } else if k > 30 {
+ k = 30
+ }
+ return &bloomFilterGenerator{
+ n: int(f),
+ k: k,
+ }
+}
+
+type bloomFilterGenerator struct {
+ n int
+ k uint8
+
+ keyHashes []uint32
+}
+
+func (g *bloomFilterGenerator) Add(key []byte) {
+ // Use double-hashing to generate a sequence of hash values.
+ // See analysis in [Kirsch,Mitzenmacher 2006].
+ g.keyHashes = append(g.keyHashes, bloomHash(key))
+}
+
+func (g *bloomFilterGenerator) Generate(b Buffer) {
+ // Compute bloom filter size (in both bits and bytes)
+ nBits := uint32(len(g.keyHashes) * g.n)
+ // For small n, we can see a very high false positive rate. Fix it
+ // by enforcing a minimum bloom filter length.
+ if nBits < 64 {
+ nBits = 64
+ }
+ nBytes := (nBits + 7) / 8
+ nBits = nBytes * 8
+
+ dest := b.Alloc(int(nBytes) + 1)
+ dest[nBytes] = g.k
+ for _, kh := range g.keyHashes {
+ delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits
+ for j := uint8(0); j < g.k; j++ {
+ bitpos := kh % nBits
+ dest[bitpos/8] |= (1 << (bitpos % 8))
+ kh += delta
+ }
+ }
+
+ g.keyHashes = g.keyHashes[:0]
+}
+
+// NewBloomFilter creates a new initialized bloom filter for given
+// bitsPerKey.
+//
+// Since bitsPerKey is persisted individually for each bloom filter
+// serialization, bloom filters are backwards compatible with respect to
+// changing bitsPerKey. This means that no big performance penalty will
+// be experienced when changing the parameter. See documentation for
+// opt.Options.Filter for more information.
+func NewBloomFilter(bitsPerKey int) Filter {
+ return bloomFilter(bitsPerKey)
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go
new file mode 100644
index 000000000..7a925c5a8
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package filter provides interface and implementation of probabilistic
+// data structure.
+//
+// The filter is resposible for creating small filter from a set of keys.
+// These filter will then used to test whether a key is a member of the set.
+// In many cases, a filter can cut down the number of disk seeks from a
+// handful to a single disk seek per DB.Get call.
+package filter
+
+// Buffer is the interface that wraps basic Alloc, Write and WriteByte methods.
+type Buffer interface {
+ // Alloc allocs n bytes of slice from the buffer. This also advancing
+ // write offset.
+ Alloc(n int) []byte
+
+ // Write appends the contents of p to the buffer.
+ Write(p []byte) (n int, err error)
+
+ // WriteByte appends the byte c to the buffer.
+ WriteByte(c byte) error
+}
+
+// Filter is the filter.
+type Filter interface {
+ // Name returns the name of this policy.
+ //
+ // Note that if the filter encoding changes in an incompatible way,
+ // the name returned by this method must be changed. Otherwise, old
+ // incompatible filters may be passed to methods of this type.
+ Name() string
+
+ // NewGenerator creates a new filter generator.
+ NewGenerator() FilterGenerator
+
+ // Contains returns true if the filter contains the given key.
+ //
+ // The filter are filters generated by the filter generator.
+ Contains(filter, key []byte) bool
+}
+
+// FilterGenerator is the filter generator.
+type FilterGenerator interface {
+ // Add adds a key to the filter generator.
+ //
+ // The key may become invalid after call to this method end, therefor
+ // key must be copied if implementation require keeping key for later
+ // use. The key should not modified directly, doing so may cause
+ // undefined results.
+ Add(key []byte)
+
+ // Generate generates filters based on keys passed so far. After call
+ // to Generate the filter generator maybe resetted, depends on implementation.
+ Generate(b Buffer)
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
new file mode 100644
index 000000000..a23ab05f7
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
@@ -0,0 +1,184 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package iterator
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// BasicArray is the interface that wraps basic Len and Search method.
+type BasicArray interface {
+ // Len returns length of the array.
+ Len() int
+
+ // Search finds smallest index that point to a key that is greater
+ // than or equal to the given key.
+ Search(key []byte) int
+}
+
+// Array is the interface that wraps BasicArray and basic Index method.
+type Array interface {
+ BasicArray
+
+ // Index returns key/value pair with index of i.
+ Index(i int) (key, value []byte)
+}
+
+// Array is the interface that wraps BasicArray and basic Get method.
+type ArrayIndexer interface {
+ BasicArray
+
+ // Get returns a new data iterator with index of i.
+ Get(i int) Iterator
+}
+
+type basicArrayIterator struct {
+ util.BasicReleaser
+ array BasicArray
+ pos int
+ err error
+}
+
+func (i *basicArrayIterator) Valid() bool {
+ return i.pos >= 0 && i.pos < i.array.Len() && !i.Released()
+}
+
+func (i *basicArrayIterator) First() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.array.Len() == 0 {
+ i.pos = -1
+ return false
+ }
+ i.pos = 0
+ return true
+}
+
+func (i *basicArrayIterator) Last() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ n := i.array.Len()
+ if n == 0 {
+ i.pos = 0
+ return false
+ }
+ i.pos = n - 1
+ return true
+}
+
+func (i *basicArrayIterator) Seek(key []byte) bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ n := i.array.Len()
+ if n == 0 {
+ i.pos = 0
+ return false
+ }
+ i.pos = i.array.Search(key)
+ if i.pos >= n {
+ return false
+ }
+ return true
+}
+
+func (i *basicArrayIterator) Next() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.pos++
+ if n := i.array.Len(); i.pos >= n {
+ i.pos = n
+ return false
+ }
+ return true
+}
+
+func (i *basicArrayIterator) Prev() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.pos--
+ if i.pos < 0 {
+ i.pos = -1
+ return false
+ }
+ return true
+}
+
+func (i *basicArrayIterator) Error() error { return i.err }
+
+type arrayIterator struct {
+ basicArrayIterator
+ array Array
+ pos int
+ key, value []byte
+}
+
+func (i *arrayIterator) updateKV() {
+ if i.pos == i.basicArrayIterator.pos {
+ return
+ }
+ i.pos = i.basicArrayIterator.pos
+ if i.Valid() {
+ i.key, i.value = i.array.Index(i.pos)
+ } else {
+ i.key = nil
+ i.value = nil
+ }
+}
+
+func (i *arrayIterator) Key() []byte {
+ i.updateKV()
+ return i.key
+}
+
+func (i *arrayIterator) Value() []byte {
+ i.updateKV()
+ return i.value
+}
+
+type arrayIteratorIndexer struct {
+ basicArrayIterator
+ array ArrayIndexer
+}
+
+func (i *arrayIteratorIndexer) Get() Iterator {
+ if i.Valid() {
+ return i.array.Get(i.basicArrayIterator.pos)
+ }
+ return nil
+}
+
+// NewArrayIterator returns an iterator from the given array.
+func NewArrayIterator(array Array) Iterator {
+ return &arrayIterator{
+ basicArrayIterator: basicArrayIterator{array: array, pos: -1},
+ array: array,
+ pos: -1,
+ }
+}
+
+// NewArrayIndexer returns an index iterator from the given array.
+func NewArrayIndexer(array ArrayIndexer) IteratorIndexer {
+ return &arrayIteratorIndexer{
+ basicArrayIterator: basicArrayIterator{array: array, pos: -1},
+ array: array,
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
new file mode 100644
index 000000000..939adbb93
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
@@ -0,0 +1,242 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package iterator
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// IteratorIndexer is the interface that wraps CommonIterator and basic Get
+// method. IteratorIndexer provides index for indexed iterator.
+type IteratorIndexer interface {
+ CommonIterator
+
+ // Get returns a new data iterator for the current position, or nil if
+ // done.
+ Get() Iterator
+}
+
+type indexedIterator struct {
+ util.BasicReleaser
+ index IteratorIndexer
+ strict bool
+
+ data Iterator
+ err error
+ errf func(err error)
+ closed bool
+}
+
+func (i *indexedIterator) setData() {
+ if i.data != nil {
+ i.data.Release()
+ }
+ i.data = i.index.Get()
+}
+
+func (i *indexedIterator) clearData() {
+ if i.data != nil {
+ i.data.Release()
+ }
+ i.data = nil
+}
+
+func (i *indexedIterator) indexErr() {
+ if err := i.index.Error(); err != nil {
+ if i.errf != nil {
+ i.errf(err)
+ }
+ i.err = err
+ }
+}
+
+func (i *indexedIterator) dataErr() bool {
+ if err := i.data.Error(); err != nil {
+ if i.errf != nil {
+ i.errf(err)
+ }
+ if i.strict || !errors.IsCorrupted(err) {
+ i.err = err
+ return true
+ }
+ }
+ return false
+}
+
+func (i *indexedIterator) Valid() bool {
+ return i.data != nil && i.data.Valid()
+}
+
+func (i *indexedIterator) First() bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if !i.index.First() {
+ i.indexErr()
+ i.clearData()
+ return false
+ }
+ i.setData()
+ return i.Next()
+}
+
+func (i *indexedIterator) Last() bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if !i.index.Last() {
+ i.indexErr()
+ i.clearData()
+ return false
+ }
+ i.setData()
+ if !i.data.Last() {
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ return i.Prev()
+ }
+ return true
+}
+
+func (i *indexedIterator) Seek(key []byte) bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if !i.index.Seek(key) {
+ i.indexErr()
+ i.clearData()
+ return false
+ }
+ i.setData()
+ if !i.data.Seek(key) {
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ return i.Next()
+ }
+ return true
+}
+
+func (i *indexedIterator) Next() bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch {
+ case i.data != nil && !i.data.Next():
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ fallthrough
+ case i.data == nil:
+ if !i.index.Next() {
+ i.indexErr()
+ return false
+ }
+ i.setData()
+ return i.Next()
+ }
+ return true
+}
+
+func (i *indexedIterator) Prev() bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch {
+ case i.data != nil && !i.data.Prev():
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ fallthrough
+ case i.data == nil:
+ if !i.index.Prev() {
+ i.indexErr()
+ return false
+ }
+ i.setData()
+ if !i.data.Last() {
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ return i.Prev()
+ }
+ }
+ return true
+}
+
+func (i *indexedIterator) Key() []byte {
+ if i.data == nil {
+ return nil
+ }
+ return i.data.Key()
+}
+
+func (i *indexedIterator) Value() []byte {
+ if i.data == nil {
+ return nil
+ }
+ return i.data.Value()
+}
+
+func (i *indexedIterator) Release() {
+ i.clearData()
+ i.index.Release()
+ i.BasicReleaser.Release()
+}
+
+func (i *indexedIterator) Error() error {
+ if i.err != nil {
+ return i.err
+ }
+ if err := i.index.Error(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (i *indexedIterator) SetErrorCallback(f func(err error)) {
+ i.errf = f
+}
+
+// NewIndexedIterator returns an 'indexed iterator'. An index is iterator
+// that returns another iterator, a 'data iterator'. A 'data iterator' is the
+// iterator that contains actual key/value pairs.
+//
+// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true)
+// won't be ignored and will halt 'indexed iterator', otherwise the iterator will
+// continue to the next 'data iterator'. Corruption on 'index iterator' will not be
+// ignored and will halt the iterator.
+func NewIndexedIterator(index IteratorIndexer, strict bool) Iterator {
+ return &indexedIterator{index: index, strict: strict}
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
new file mode 100644
index 000000000..3b5553274
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
@@ -0,0 +1,132 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package iterator provides interface and implementation to traverse over
+// contents of a database.
+package iterator
+
+import (
+ "errors"
+
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+ ErrIterReleased = errors.New("leveldb/iterator: iterator released")
+)
+
+// IteratorSeeker is the interface that wraps the 'seeks method'.
+type IteratorSeeker interface {
+ // First moves the iterator to the first key/value pair. If the iterator
+ // only contains one key/value pair then First and Last would moves
+ // to the same key/value pair.
+ // It returns whether such pair exist.
+ First() bool
+
+ // Last moves the iterator to the last key/value pair. If the iterator
+ // only contains one key/value pair then First and Last would moves
+ // to the same key/value pair.
+ // It returns whether such pair exist.
+ Last() bool
+
+ // Seek moves the iterator to the first key/value pair whose key is greater
+ // than or equal to the given key.
+ // It returns whether such pair exist.
+ //
+ // It is safe to modify the contents of the argument after Seek returns.
+ Seek(key []byte) bool
+
+ // Next moves the iterator to the next key/value pair.
+ // It returns whether the iterator is exhausted.
+ Next() bool
+
+ // Prev moves the iterator to the previous key/value pair.
+ // It returns whether the iterator is exhausted.
+ Prev() bool
+}
+
+// CommonIterator is the interface that wraps common iterator methods.
+type CommonIterator interface {
+ IteratorSeeker
+
+ // util.Releaser is the interface that wraps basic Release method.
+ // When called Release will releases any resources associated with the
+ // iterator.
+ util.Releaser
+
+ // util.ReleaseSetter is the interface that wraps the basic SetReleaser
+ // method.
+ util.ReleaseSetter
+
+ // TODO: Remove this when ready.
+ Valid() bool
+
+ // Error returns any accumulated error. Exhausting all the key/value pairs
+ // is not considered to be an error.
+ Error() error
+}
+
+// Iterator iterates over a DB's key/value pairs in key order.
+//
+// When encounter an error any 'seeks method' will return false and will
+// yield no key/value pairs. The error can be queried by calling the Error
+// method. Calling Release is still necessary.
+//
+// An iterator must be released after use, but it is not necessary to read
+// an iterator until exhaustion.
+// Also, an iterator is not necessarily safe for concurrent use, but it is
+// safe to use multiple iterators concurrently, with each in a dedicated
+// goroutine.
+type Iterator interface {
+ CommonIterator
+
+ // Key returns the key of the current key/value pair, or nil if done.
+ // The caller should not modify the contents of the returned slice, and
+ // its contents may change on the next call to any 'seeks method'.
+ Key() []byte
+
+ // Value returns the key of the current key/value pair, or nil if done.
+ // The caller should not modify the contents of the returned slice, and
+ // its contents may change on the next call to any 'seeks method'.
+ Value() []byte
+}
+
+// ErrorCallbackSetter is the interface that wraps basic SetErrorCallback
+// method.
+//
+// ErrorCallbackSetter implemented by indexed and merged iterator.
+type ErrorCallbackSetter interface {
+ // SetErrorCallback allows set an error callback of the corresponding
+ // iterator. Use nil to clear the callback.
+ SetErrorCallback(f func(err error))
+}
+
+type emptyIterator struct {
+ util.BasicReleaser
+ err error
+}
+
+func (i *emptyIterator) rErr() {
+ if i.err == nil && i.Released() {
+ i.err = ErrIterReleased
+ }
+}
+
+func (*emptyIterator) Valid() bool { return false }
+func (i *emptyIterator) First() bool { i.rErr(); return false }
+func (i *emptyIterator) Last() bool { i.rErr(); return false }
+func (i *emptyIterator) Seek(key []byte) bool { i.rErr(); return false }
+func (i *emptyIterator) Next() bool { i.rErr(); return false }
+func (i *emptyIterator) Prev() bool { i.rErr(); return false }
+func (*emptyIterator) Key() []byte { return nil }
+func (*emptyIterator) Value() []byte { return nil }
+func (i *emptyIterator) Error() error { return i.err }
+
+// NewEmptyIterator creates an empty iterator. The err parameter can be
+// nil, but if not nil the given err will be returned by Error method.
+func NewEmptyIterator(err error) Iterator {
+ return &emptyIterator{err: err}
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
new file mode 100644
index 000000000..1a7e29df8
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
@@ -0,0 +1,304 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package iterator
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type dir int
+
+const (
+ dirReleased dir = iota - 1
+ dirSOI
+ dirEOI
+ dirBackward
+ dirForward
+)
+
+type mergedIterator struct {
+ cmp comparer.Comparer
+ iters []Iterator
+ strict bool
+
+ keys [][]byte
+ index int
+ dir dir
+ err error
+ errf func(err error)
+ releaser util.Releaser
+}
+
+func assertKey(key []byte) []byte {
+ if key == nil {
+ panic("leveldb/iterator: nil key")
+ }
+ return key
+}
+
+func (i *mergedIterator) iterErr(iter Iterator) bool {
+ if err := iter.Error(); err != nil {
+ if i.errf != nil {
+ i.errf(err)
+ }
+ if i.strict || !errors.IsCorrupted(err) {
+ i.err = err
+ return true
+ }
+ }
+ return false
+}
+
+func (i *mergedIterator) Valid() bool {
+ return i.err == nil && i.dir > dirEOI
+}
+
+func (i *mergedIterator) First() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ for x, iter := range i.iters {
+ switch {
+ case iter.First():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ }
+ i.dir = dirSOI
+ return i.next()
+}
+
+func (i *mergedIterator) Last() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ for x, iter := range i.iters {
+ switch {
+ case iter.Last():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ }
+ i.dir = dirEOI
+ return i.prev()
+}
+
+func (i *mergedIterator) Seek(key []byte) bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ for x, iter := range i.iters {
+ switch {
+ case iter.Seek(key):
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ }
+ i.dir = dirSOI
+ return i.next()
+}
+
+func (i *mergedIterator) next() bool {
+ var key []byte
+ if i.dir == dirForward {
+ key = i.keys[i.index]
+ }
+ for x, tkey := range i.keys {
+ if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) {
+ key = tkey
+ i.index = x
+ }
+ }
+ if key == nil {
+ i.dir = dirEOI
+ return false
+ }
+ i.dir = dirForward
+ return true
+}
+
+func (i *mergedIterator) Next() bool {
+ if i.dir == dirEOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch i.dir {
+ case dirSOI:
+ return i.First()
+ case dirBackward:
+ key := append([]byte{}, i.keys[i.index]...)
+ if !i.Seek(key) {
+ return false
+ }
+ return i.Next()
+ }
+
+ x := i.index
+ iter := i.iters[x]
+ switch {
+ case iter.Next():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ return i.next()
+}
+
+func (i *mergedIterator) prev() bool {
+ var key []byte
+ if i.dir == dirBackward {
+ key = i.keys[i.index]
+ }
+ for x, tkey := range i.keys {
+ if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) {
+ key = tkey
+ i.index = x
+ }
+ }
+ if key == nil {
+ i.dir = dirSOI
+ return false
+ }
+ i.dir = dirBackward
+ return true
+}
+
+func (i *mergedIterator) Prev() bool {
+ if i.dir == dirSOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch i.dir {
+ case dirEOI:
+ return i.Last()
+ case dirForward:
+ key := append([]byte{}, i.keys[i.index]...)
+ for x, iter := range i.iters {
+ if x == i.index {
+ continue
+ }
+ seek := iter.Seek(key)
+ switch {
+ case seek && iter.Prev(), !seek && iter.Last():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ }
+ }
+
+ x := i.index
+ iter := i.iters[x]
+ switch {
+ case iter.Prev():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ return i.prev()
+}
+
+func (i *mergedIterator) Key() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.keys[i.index]
+}
+
+func (i *mergedIterator) Value() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.iters[i.index].Value()
+}
+
+func (i *mergedIterator) Release() {
+ if i.dir != dirReleased {
+ i.dir = dirReleased
+ for _, iter := range i.iters {
+ iter.Release()
+ }
+ i.iters = nil
+ i.keys = nil
+ if i.releaser != nil {
+ i.releaser.Release()
+ i.releaser = nil
+ }
+ }
+}
+
+func (i *mergedIterator) SetReleaser(releaser util.Releaser) {
+ if i.dir == dirReleased {
+ panic(util.ErrReleased)
+ }
+ if i.releaser != nil && releaser != nil {
+ panic(util.ErrHasReleaser)
+ }
+ i.releaser = releaser
+}
+
+func (i *mergedIterator) Error() error {
+ return i.err
+}
+
+func (i *mergedIterator) SetErrorCallback(f func(err error)) {
+ i.errf = f
+}
+
+// NewMergedIterator returns an iterator that merges its input. Walking the
+// resultant iterator will return all key/value pairs of all input iterators
+// in strictly increasing key order, as defined by cmp.
+// The input's key ranges may overlap, but there are assumed to be no duplicate
+// keys: if iters[i] contains a key k then iters[j] will not contain that key k.
+// None of the iters may be nil.
+//
+// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true)
+// won't be ignored and will halt 'merged iterator', otherwise the iterator will
+// continue to the next 'input iterator'.
+func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator {
+ return &mergedIterator{
+ iters: iters,
+ cmp: cmp,
+ strict: strict,
+ keys: make([][]byte, len(iters)),
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go
new file mode 100644
index 000000000..d094c3d0f
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go
@@ -0,0 +1,524 @@
+// Copyright 2011 The LevelDB-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0
+// License, authors and contributors informations can be found at bellow URLs respectively:
+// https://code.google.com/p/leveldb-go/source/browse/LICENSE
+// https://code.google.com/p/leveldb-go/source/browse/AUTHORS
+// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS
+
+// Package journal reads and writes sequences of journals. Each journal is a stream
+// of bytes that completes before the next journal starts.
+//
+// When reading, call Next to obtain an io.Reader for the next journal. Next will
+// return io.EOF when there are no more journals. It is valid to call Next
+// without reading the current journal to exhaustion.
+//
+// When writing, call Next to obtain an io.Writer for the next journal. Calling
+// Next finishes the current journal. Call Close to finish the final journal.
+//
+// Optionally, call Flush to finish the current journal and flush the underlying
+// writer without starting a new journal. To start a new journal after flushing,
+// call Next.
+//
+// Neither Readers or Writers are safe to use concurrently.
+//
+// Example code:
+// func read(r io.Reader) ([]string, error) {
+// var ss []string
+// journals := journal.NewReader(r, nil, true, true)
+// for {
+// j, err := journals.Next()
+// if err == io.EOF {
+// break
+// }
+// if err != nil {
+// return nil, err
+// }
+// s, err := ioutil.ReadAll(j)
+// if err != nil {
+// return nil, err
+// }
+// ss = append(ss, string(s))
+// }
+// return ss, nil
+// }
+//
+// func write(w io.Writer, ss []string) error {
+// journals := journal.NewWriter(w)
+// for _, s := range ss {
+// j, err := journals.Next()
+// if err != nil {
+// return err
+// }
+// if _, err := j.Write([]byte(s)), err != nil {
+// return err
+// }
+// }
+// return journals.Close()
+// }
+//
+// The wire format is that the stream is divided into 32KiB blocks, and each
+// block contains a number of tightly packed chunks. Chunks cannot cross block
+// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a
+// block must be zero.
+//
+// A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4
+// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type)
+// followed by a payload. The checksum is over the chunk type and the payload.
+//
+// There are four chunk types: whether the chunk is the full journal, or the
+// first, middle or last chunk of a multi-chunk journal. A multi-chunk journal
+// has one first chunk, zero or more middle chunks, and one last chunk.
+//
+// The wire format allows for limited recovery in the face of data corruption:
+// on a format error (such as a checksum mismatch), the reader moves to the
+// next block and looks for the next full or first chunk.
+package journal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// These constants are part of the wire format and should not be changed.
+const (
+ fullChunkType = 1
+ firstChunkType = 2
+ middleChunkType = 3
+ lastChunkType = 4
+)
+
+const (
+ blockSize = 32 * 1024
+ headerSize = 7
+)
+
+type flusher interface {
+ Flush() error
+}
+
+// ErrCorrupted is the error type that generated by corrupted block or chunk.
+type ErrCorrupted struct {
+ Size int
+ Reason string
+}
+
+func (e *ErrCorrupted) Error() string {
+ return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size)
+}
+
+// Dropper is the interface that wrap simple Drop method. The Drop
+// method will be called when the journal reader dropping a block or chunk.
+type Dropper interface {
+ Drop(err error)
+}
+
+// Reader reads journals from an underlying io.Reader.
+type Reader struct {
+ // r is the underlying reader.
+ r io.Reader
+ // the dropper.
+ dropper Dropper
+ // strict flag.
+ strict bool
+ // checksum flag.
+ checksum bool
+ // seq is the sequence number of the current journal.
+ seq int
+ // buf[i:j] is the unread portion of the current chunk's payload.
+ // The low bound, i, excludes the chunk header.
+ i, j int
+ // n is the number of bytes of buf that are valid. Once reading has started,
+ // only the final block can have n < blockSize.
+ n int
+ // last is whether the current chunk is the last chunk of the journal.
+ last bool
+ // err is any accumulated error.
+ err error
+ // buf is the buffer.
+ buf [blockSize]byte
+}
+
+// NewReader returns a new reader. The dropper may be nil, and if
+// strict is true then corrupted or invalid chunk will halt the journal
+// reader entirely.
+func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader {
+ return &Reader{
+ r: r,
+ dropper: dropper,
+ strict: strict,
+ checksum: checksum,
+ last: true,
+ }
+}
+
+var errSkip = errors.New("leveldb/journal: skipped")
+
+func (r *Reader) corrupt(n int, reason string, skip bool) error {
+ if r.dropper != nil {
+ r.dropper.Drop(&ErrCorrupted{n, reason})
+ }
+ if r.strict && !skip {
+ r.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrCorrupted{n, reason})
+ return r.err
+ }
+ return errSkip
+}
+
+// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the
+// next block into the buffer if necessary.
+func (r *Reader) nextChunk(first bool) error {
+ for {
+ if r.j+headerSize <= r.n {
+ checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4])
+ length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6])
+ chunkType := r.buf[r.j+6]
+ unprocBlock := r.n - r.j
+ if checksum == 0 && length == 0 && chunkType == 0 {
+ // Drop entire block.
+ r.i = r.n
+ r.j = r.n
+ return r.corrupt(unprocBlock, "zero header", false)
+ }
+ if chunkType < fullChunkType || chunkType > lastChunkType {
+ // Drop entire block.
+ r.i = r.n
+ r.j = r.n
+ return r.corrupt(unprocBlock, fmt.Sprintf("invalid chunk type %#x", chunkType), false)
+ }
+ r.i = r.j + headerSize
+ r.j = r.j + headerSize + int(length)
+ if r.j > r.n {
+ // Drop entire block.
+ r.i = r.n
+ r.j = r.n
+ return r.corrupt(unprocBlock, "chunk length overflows block", false)
+ } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() {
+ // Drop entire block.
+ r.i = r.n
+ r.j = r.n
+ return r.corrupt(unprocBlock, "checksum mismatch", false)
+ }
+ if first && chunkType != fullChunkType && chunkType != firstChunkType {
+ chunkLength := (r.j - r.i) + headerSize
+ r.i = r.j
+ // Report the error, but skip it.
+ return r.corrupt(chunkLength, "orphan chunk", true)
+ }
+ r.last = chunkType == fullChunkType || chunkType == lastChunkType
+ return nil
+ }
+
+ // The last block.
+ if r.n < blockSize && r.n > 0 {
+ if !first {
+ return r.corrupt(0, "missing chunk part", false)
+ }
+ r.err = io.EOF
+ return r.err
+ }
+
+ // Read block.
+ n, err := io.ReadFull(r.r, r.buf[:])
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ return err
+ }
+ if n == 0 {
+ if !first {
+ return r.corrupt(0, "missing chunk part", false)
+ }
+ r.err = io.EOF
+ return r.err
+ }
+ r.i, r.j, r.n = 0, 0, n
+ }
+}
+
+// Next returns a reader for the next journal. It returns io.EOF if there are no
+// more journals. The reader returned becomes stale after the next Next call,
+// and should no longer be used. If strict is false, the reader will returns
+// io.ErrUnexpectedEOF error when found corrupted journal.
+func (r *Reader) Next() (io.Reader, error) {
+ r.seq++
+ if r.err != nil {
+ return nil, r.err
+ }
+ r.i = r.j
+ for {
+ if err := r.nextChunk(true); err == nil {
+ break
+ } else if err != errSkip {
+ return nil, err
+ }
+ }
+ return &singleReader{r, r.seq, nil}, nil
+}
+
+// Reset resets the journal reader, allows reuse of the journal reader. Reset returns
+// last accumulated error.
+func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error {
+ r.seq++
+ err := r.err
+ r.r = reader
+ r.dropper = dropper
+ r.strict = strict
+ r.checksum = checksum
+ r.i = 0
+ r.j = 0
+ r.n = 0
+ r.last = true
+ r.err = nil
+ return err
+}
+
+type singleReader struct {
+ r *Reader
+ seq int
+ err error
+}
+
+func (x *singleReader) Read(p []byte) (int, error) {
+ r := x.r
+ if r.seq != x.seq {
+ return 0, errors.New("leveldb/journal: stale reader")
+ }
+ if x.err != nil {
+ return 0, x.err
+ }
+ if r.err != nil {
+ return 0, r.err
+ }
+ for r.i == r.j {
+ if r.last {
+ return 0, io.EOF
+ }
+ x.err = r.nextChunk(false)
+ if x.err != nil {
+ if x.err == errSkip {
+ x.err = io.ErrUnexpectedEOF
+ }
+ return 0, x.err
+ }
+ }
+ n := copy(p, r.buf[r.i:r.j])
+ r.i += n
+ return n, nil
+}
+
+func (x *singleReader) ReadByte() (byte, error) {
+ r := x.r
+ if r.seq != x.seq {
+ return 0, errors.New("leveldb/journal: stale reader")
+ }
+ if x.err != nil {
+ return 0, x.err
+ }
+ if r.err != nil {
+ return 0, r.err
+ }
+ for r.i == r.j {
+ if r.last {
+ return 0, io.EOF
+ }
+ x.err = r.nextChunk(false)
+ if x.err != nil {
+ if x.err == errSkip {
+ x.err = io.ErrUnexpectedEOF
+ }
+ return 0, x.err
+ }
+ }
+ c := r.buf[r.i]
+ r.i++
+ return c, nil
+}
+
+// Writer writes journals to an underlying io.Writer.
+type Writer struct {
+ // w is the underlying writer.
+ w io.Writer
+ // seq is the sequence number of the current journal.
+ seq int
+ // f is w as a flusher.
+ f flusher
+ // buf[i:j] is the bytes that will become the current chunk.
+ // The low bound, i, includes the chunk header.
+ i, j int
+ // buf[:written] has already been written to w.
+ // written is zero unless Flush has been called.
+ written int
+ // first is whether the current chunk is the first chunk of the journal.
+ first bool
+ // pending is whether a chunk is buffered but not yet written.
+ pending bool
+ // err is any accumulated error.
+ err error
+ // buf is the buffer.
+ buf [blockSize]byte
+}
+
+// NewWriter returns a new Writer.
+func NewWriter(w io.Writer) *Writer {
+ f, _ := w.(flusher)
+ return &Writer{
+ w: w,
+ f: f,
+ }
+}
+
+// fillHeader fills in the header for the pending chunk.
+func (w *Writer) fillHeader(last bool) {
+ if w.i+headerSize > w.j || w.j > blockSize {
+ panic("leveldb/journal: bad writer state")
+ }
+ if last {
+ if w.first {
+ w.buf[w.i+6] = fullChunkType
+ } else {
+ w.buf[w.i+6] = lastChunkType
+ }
+ } else {
+ if w.first {
+ w.buf[w.i+6] = firstChunkType
+ } else {
+ w.buf[w.i+6] = middleChunkType
+ }
+ }
+ binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value())
+ binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize))
+}
+
+// writeBlock writes the buffered block to the underlying writer, and reserves
+// space for the next chunk's header.
+func (w *Writer) writeBlock() {
+ _, w.err = w.w.Write(w.buf[w.written:])
+ w.i = 0
+ w.j = headerSize
+ w.written = 0
+}
+
+// writePending finishes the current journal and writes the buffer to the
+// underlying writer.
+func (w *Writer) writePending() {
+ if w.err != nil {
+ return
+ }
+ if w.pending {
+ w.fillHeader(true)
+ w.pending = false
+ }
+ _, w.err = w.w.Write(w.buf[w.written:w.j])
+ w.written = w.j
+}
+
+// Close finishes the current journal and closes the writer.
+func (w *Writer) Close() error {
+ w.seq++
+ w.writePending()
+ if w.err != nil {
+ return w.err
+ }
+ w.err = errors.New("leveldb/journal: closed Writer")
+ return nil
+}
+
+// Flush finishes the current journal, writes to the underlying writer, and
+// flushes it if that writer implements interface{ Flush() error }.
+func (w *Writer) Flush() error {
+ w.seq++
+ w.writePending()
+ if w.err != nil {
+ return w.err
+ }
+ if w.f != nil {
+ w.err = w.f.Flush()
+ return w.err
+ }
+ return nil
+}
+
+// Reset resets the journal writer, allows reuse of the journal writer. Reset
+// will also closes the journal writer if not already.
+func (w *Writer) Reset(writer io.Writer) (err error) {
+ w.seq++
+ if w.err == nil {
+ w.writePending()
+ err = w.err
+ }
+ w.w = writer
+ w.f, _ = writer.(flusher)
+ w.i = 0
+ w.j = 0
+ w.written = 0
+ w.first = false
+ w.pending = false
+ w.err = nil
+ return
+}
+
+// Next returns a writer for the next journal. The writer returned becomes stale
+// after the next Close, Flush or Next call, and should no longer be used.
+func (w *Writer) Next() (io.Writer, error) {
+ w.seq++
+ if w.err != nil {
+ return nil, w.err
+ }
+ if w.pending {
+ w.fillHeader(true)
+ }
+ w.i = w.j
+ w.j = w.j + headerSize
+ // Check if there is room in the block for the header.
+ if w.j > blockSize {
+ // Fill in the rest of the block with zeroes.
+ for k := w.i; k < blockSize; k++ {
+ w.buf[k] = 0
+ }
+ w.writeBlock()
+ if w.err != nil {
+ return nil, w.err
+ }
+ }
+ w.first = true
+ w.pending = true
+ return singleWriter{w, w.seq}, nil
+}
+
+type singleWriter struct {
+ w *Writer
+ seq int
+}
+
+func (x singleWriter) Write(p []byte) (int, error) {
+ w := x.w
+ if w.seq != x.seq {
+ return 0, errors.New("leveldb/journal: stale writer")
+ }
+ if w.err != nil {
+ return 0, w.err
+ }
+ n0 := len(p)
+ for len(p) > 0 {
+ // Write a block, if it is full.
+ if w.j == blockSize {
+ w.fillHeader(false)
+ w.writeBlock()
+ if w.err != nil {
+ return 0, w.err
+ }
+ w.first = false
+ }
+ // Copy bytes into the buffer.
+ n := copy(w.buf[w.j:], p)
+ w.j += n
+ p = p[n:]
+ }
+ return n0, nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/key.go b/vendor/github.com/syndtr/goleveldb/leveldb/key.go
new file mode 100644
index 000000000..ad8f51ec8
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/key.go
@@ -0,0 +1,143 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+// ErrInternalKeyCorrupted records internal key corruption.
+type ErrInternalKeyCorrupted struct {
+ Ikey []byte
+ Reason string
+}
+
+func (e *ErrInternalKeyCorrupted) Error() string {
+ return fmt.Sprintf("leveldb: internal key %q corrupted: %s", e.Ikey, e.Reason)
+}
+
+func newErrInternalKeyCorrupted(ikey []byte, reason string) error {
+ return errors.NewErrCorrupted(storage.FileDesc{}, &ErrInternalKeyCorrupted{append([]byte{}, ikey...), reason})
+}
+
+type keyType uint
+
+func (kt keyType) String() string {
+ switch kt {
+ case keyTypeDel:
+ return "d"
+ case keyTypeVal:
+ return "v"
+ }
+ return fmt.Sprintf("<invalid:%#x>", uint(kt))
+}
+
+// Value types encoded as the last component of internal keys.
+// Don't modify; this value are saved to disk.
+const (
+ keyTypeDel = keyType(0)
+ keyTypeVal = keyType(1)
+)
+
+// keyTypeSeek defines the keyType that should be passed when constructing an
+// internal key for seeking to a particular sequence number (since we
+// sort sequence numbers in decreasing order and the value type is
+// embedded as the low 8 bits in the sequence number in internal keys,
+// we need to use the highest-numbered ValueType, not the lowest).
+const keyTypeSeek = keyTypeVal
+
+const (
+ // Maximum value possible for sequence number; the 8-bits are
+ // used by value type, so its can packed together in single
+ // 64-bit integer.
+ keyMaxSeq = (uint64(1) << 56) - 1
+ // Maximum value possible for packed sequence number and type.
+ keyMaxNum = (keyMaxSeq << 8) | uint64(keyTypeSeek)
+)
+
+// Maximum number encoded in bytes.
+var keyMaxNumBytes = make([]byte, 8)
+
+func init() {
+ binary.LittleEndian.PutUint64(keyMaxNumBytes, keyMaxNum)
+}
+
+type internalKey []byte
+
+func makeInternalKey(dst, ukey []byte, seq uint64, kt keyType) internalKey {
+ if seq > keyMaxSeq {
+ panic("leveldb: invalid sequence number")
+ } else if kt > keyTypeVal {
+ panic("leveldb: invalid type")
+ }
+
+ dst = ensureBuffer(dst, len(ukey)+8)
+ copy(dst, ukey)
+ binary.LittleEndian.PutUint64(dst[len(ukey):], (seq<<8)|uint64(kt))
+ return internalKey(dst)
+}
+
+func parseInternalKey(ik []byte) (ukey []byte, seq uint64, kt keyType, err error) {
+ if len(ik) < 8 {
+ return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid length")
+ }
+ num := binary.LittleEndian.Uint64(ik[len(ik)-8:])
+ seq, kt = uint64(num>>8), keyType(num&0xff)
+ if kt > keyTypeVal {
+ return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid type")
+ }
+ ukey = ik[:len(ik)-8]
+ return
+}
+
+func validInternalKey(ik []byte) bool {
+ _, _, _, err := parseInternalKey(ik)
+ return err == nil
+}
+
+func (ik internalKey) assert() {
+ if ik == nil {
+ panic("leveldb: nil internalKey")
+ }
+ if len(ik) < 8 {
+ panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid length", []byte(ik), len(ik)))
+ }
+}
+
+func (ik internalKey) ukey() []byte {
+ ik.assert()
+ return ik[:len(ik)-8]
+}
+
+func (ik internalKey) num() uint64 {
+ ik.assert()
+ return binary.LittleEndian.Uint64(ik[len(ik)-8:])
+}
+
+func (ik internalKey) parseNum() (seq uint64, kt keyType) {
+ num := ik.num()
+ seq, kt = uint64(num>>8), keyType(num&0xff)
+ if kt > keyTypeVal {
+ panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
+ }
+ return
+}
+
+func (ik internalKey) String() string {
+ if ik == nil {
+ return "<nil>"
+ }
+
+ if ukey, seq, kt, err := parseInternalKey(ik); err == nil {
+ return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq)
+ }
+ return fmt.Sprintf("<invalid:%#x>", []byte(ik))
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
new file mode 100644
index 000000000..18a19ed42
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
@@ -0,0 +1,475 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package memdb provides in-memory key/value database implementation.
+package memdb
+
+import (
+ "math/rand"
+ "sync"
+
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Common errors.
+var (
+ ErrNotFound = errors.ErrNotFound
+ ErrIterReleased = errors.New("leveldb/memdb: iterator released")
+)
+
+const tMaxHeight = 12
+
+type dbIter struct {
+ util.BasicReleaser
+ p *DB
+ slice *util.Range
+ node int
+ forward bool
+ key, value []byte
+ err error
+}
+
+func (i *dbIter) fill(checkStart, checkLimit bool) bool {
+ if i.node != 0 {
+ n := i.p.nodeData[i.node]
+ m := n + i.p.nodeData[i.node+nKey]
+ i.key = i.p.kvData[n:m]
+ if i.slice != nil {
+ switch {
+ case checkLimit && i.slice.Limit != nil && i.p.cmp.Compare(i.key, i.slice.Limit) >= 0:
+ fallthrough
+ case checkStart && i.slice.Start != nil && i.p.cmp.Compare(i.key, i.slice.Start) < 0:
+ i.node = 0
+ goto bail
+ }
+ }
+ i.value = i.p.kvData[m : m+i.p.nodeData[i.node+nVal]]
+ return true
+ }
+bail:
+ i.key = nil
+ i.value = nil
+ return false
+}
+
+func (i *dbIter) Valid() bool {
+ return i.node != 0
+}
+
+func (i *dbIter) First() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.forward = true
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ if i.slice != nil && i.slice.Start != nil {
+ i.node, _ = i.p.findGE(i.slice.Start, false)
+ } else {
+ i.node = i.p.nodeData[nNext]
+ }
+ return i.fill(false, true)
+}
+
+func (i *dbIter) Last() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.forward = false
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ if i.slice != nil && i.slice.Limit != nil {
+ i.node = i.p.findLT(i.slice.Limit)
+ } else {
+ i.node = i.p.findLast()
+ }
+ return i.fill(true, false)
+}
+
+func (i *dbIter) Seek(key []byte) bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.forward = true
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ if i.slice != nil && i.slice.Start != nil && i.p.cmp.Compare(key, i.slice.Start) < 0 {
+ key = i.slice.Start
+ }
+ i.node, _ = i.p.findGE(key, false)
+ return i.fill(false, true)
+}
+
+func (i *dbIter) Next() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.node == 0 {
+ if !i.forward {
+ return i.First()
+ }
+ return false
+ }
+ i.forward = true
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ i.node = i.p.nodeData[i.node+nNext]
+ return i.fill(false, true)
+}
+
+func (i *dbIter) Prev() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.node == 0 {
+ if i.forward {
+ return i.Last()
+ }
+ return false
+ }
+ i.forward = false
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ i.node = i.p.findLT(i.key)
+ return i.fill(true, false)
+}
+
+func (i *dbIter) Key() []byte {
+ return i.key
+}
+
+func (i *dbIter) Value() []byte {
+ return i.value
+}
+
+func (i *dbIter) Error() error { return i.err }
+
+func (i *dbIter) Release() {
+ if !i.Released() {
+ i.p = nil
+ i.node = 0
+ i.key = nil
+ i.value = nil
+ i.BasicReleaser.Release()
+ }
+}
+
+const (
+ nKV = iota
+ nKey
+ nVal
+ nHeight
+ nNext
+)
+
+// DB is an in-memory key/value database.
+type DB struct {
+ cmp comparer.BasicComparer
+ rnd *rand.Rand
+
+ mu sync.RWMutex
+ kvData []byte
+ // Node data:
+ // [0] : KV offset
+ // [1] : Key length
+ // [2] : Value length
+ // [3] : Height
+ // [3..height] : Next nodes
+ nodeData []int
+ prevNode [tMaxHeight]int
+ maxHeight int
+ n int
+ kvSize int
+}
+
+func (p *DB) randHeight() (h int) {
+ const branching = 4
+ h = 1
+ for h < tMaxHeight && p.rnd.Int()%branching == 0 {
+ h++
+ }
+ return
+}
+
+// Must hold RW-lock if prev == true, as it use shared prevNode slice.
+func (p *DB) findGE(key []byte, prev bool) (int, bool) {
+ node := 0
+ h := p.maxHeight - 1
+ for {
+ next := p.nodeData[node+nNext+h]
+ cmp := 1
+ if next != 0 {
+ o := p.nodeData[next]
+ cmp = p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key)
+ }
+ if cmp < 0 {
+ // Keep searching in this list
+ node = next
+ } else {
+ if prev {
+ p.prevNode[h] = node
+ } else if cmp == 0 {
+ return next, true
+ }
+ if h == 0 {
+ return next, cmp == 0
+ }
+ h--
+ }
+ }
+}
+
+func (p *DB) findLT(key []byte) int {
+ node := 0
+ h := p.maxHeight - 1
+ for {
+ next := p.nodeData[node+nNext+h]
+ o := p.nodeData[next]
+ if next == 0 || p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) >= 0 {
+ if h == 0 {
+ break
+ }
+ h--
+ } else {
+ node = next
+ }
+ }
+ return node
+}
+
+func (p *DB) findLast() int {
+ node := 0
+ h := p.maxHeight - 1
+ for {
+ next := p.nodeData[node+nNext+h]
+ if next == 0 {
+ if h == 0 {
+ break
+ }
+ h--
+ } else {
+ node = next
+ }
+ }
+ return node
+}
+
+// Put sets the value for the given key. It overwrites any previous value
+// for that key; a DB is not a multi-map.
+//
+// It is safe to modify the contents of the arguments after Put returns.
+func (p *DB) Put(key []byte, value []byte) error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ if node, exact := p.findGE(key, true); exact {
+ kvOffset := len(p.kvData)
+ p.kvData = append(p.kvData, key...)
+ p.kvData = append(p.kvData, value...)
+ p.nodeData[node] = kvOffset
+ m := p.nodeData[node+nVal]
+ p.nodeData[node+nVal] = len(value)
+ p.kvSize += len(value) - m
+ return nil
+ }
+
+ h := p.randHeight()
+ if h > p.maxHeight {
+ for i := p.maxHeight; i < h; i++ {
+ p.prevNode[i] = 0
+ }
+ p.maxHeight = h
+ }
+
+ kvOffset := len(p.kvData)
+ p.kvData = append(p.kvData, key...)
+ p.kvData = append(p.kvData, value...)
+ // Node
+ node := len(p.nodeData)
+ p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h)
+ for i, n := range p.prevNode[:h] {
+ m := n + nNext + i
+ p.nodeData = append(p.nodeData, p.nodeData[m])
+ p.nodeData[m] = node
+ }
+
+ p.kvSize += len(key) + len(value)
+ p.n++
+ return nil
+}
+
+// Delete deletes the value for the given key. It returns ErrNotFound if
+// the DB does not contain the key.
+//
+// It is safe to modify the contents of the arguments after Delete returns.
+func (p *DB) Delete(key []byte) error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ node, exact := p.findGE(key, true)
+ if !exact {
+ return ErrNotFound
+ }
+
+ h := p.nodeData[node+nHeight]
+ for i, n := range p.prevNode[:h] {
+ m := n + 4 + i
+ p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i]
+ }
+
+ p.kvSize -= p.nodeData[node+nKey] + p.nodeData[node+nVal]
+ p.n--
+ return nil
+}
+
+// Contains returns true if the given key are in the DB.
+//
+// It is safe to modify the contents of the arguments after Contains returns.
+func (p *DB) Contains(key []byte) bool {
+ p.mu.RLock()
+ _, exact := p.findGE(key, false)
+ p.mu.RUnlock()
+ return exact
+}
+
+// Get gets the value for the given key. It returns error.ErrNotFound if the
+// DB does not contain the key.
+//
+// The caller should not modify the contents of the returned slice, but
+// it is safe to modify the contents of the argument after Get returns.
+func (p *DB) Get(key []byte) (value []byte, err error) {
+ p.mu.RLock()
+ if node, exact := p.findGE(key, false); exact {
+ o := p.nodeData[node] + p.nodeData[node+nKey]
+ value = p.kvData[o : o+p.nodeData[node+nVal]]
+ } else {
+ err = ErrNotFound
+ }
+ p.mu.RUnlock()
+ return
+}
+
+// Find finds key/value pair whose key is greater than or equal to the
+// given key. It returns ErrNotFound if the table doesn't contain
+// such pair.
+//
+// The caller should not modify the contents of the returned slice, but
+// it is safe to modify the contents of the argument after Find returns.
+func (p *DB) Find(key []byte) (rkey, value []byte, err error) {
+ p.mu.RLock()
+ if node, _ := p.findGE(key, false); node != 0 {
+ n := p.nodeData[node]
+ m := n + p.nodeData[node+nKey]
+ rkey = p.kvData[n:m]
+ value = p.kvData[m : m+p.nodeData[node+nVal]]
+ } else {
+ err = ErrNotFound
+ }
+ p.mu.RUnlock()
+ return
+}
+
+// NewIterator returns an iterator of the DB.
+// The returned iterator is not safe for concurrent use, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+// It is also safe to use an iterator concurrently with modifying its
+// underlying DB. However, the resultant key/value pairs are not guaranteed
+// to be a consistent snapshot of the DB at a particular point in time.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// DB. And a nil Range.Limit is treated as a key after all keys in
+// the DB.
+//
+// The iterator must be released after use, by calling Release method.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (p *DB) NewIterator(slice *util.Range) iterator.Iterator {
+ return &dbIter{p: p, slice: slice}
+}
+
+// Capacity returns keys/values buffer capacity.
+func (p *DB) Capacity() int {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ return cap(p.kvData)
+}
+
+// Size returns sum of keys and values length. Note that deleted
+// key/value will not be accounted for, but it will still consume
+// the buffer, since the buffer is append only.
+func (p *DB) Size() int {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ return p.kvSize
+}
+
+// Free returns keys/values free buffer before need to grow.
+func (p *DB) Free() int {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ return cap(p.kvData) - len(p.kvData)
+}
+
+// Len returns the number of entries in the DB.
+func (p *DB) Len() int {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ return p.n
+}
+
+// Reset resets the DB to initial empty state. Allows reuse the buffer.
+func (p *DB) Reset() {
+ p.mu.Lock()
+ p.rnd = rand.New(rand.NewSource(0xdeadbeef))
+ p.maxHeight = 1
+ p.n = 0
+ p.kvSize = 0
+ p.kvData = p.kvData[:0]
+ p.nodeData = p.nodeData[:nNext+tMaxHeight]
+ p.nodeData[nKV] = 0
+ p.nodeData[nKey] = 0
+ p.nodeData[nVal] = 0
+ p.nodeData[nHeight] = tMaxHeight
+ for n := 0; n < tMaxHeight; n++ {
+ p.nodeData[nNext+n] = 0
+ p.prevNode[n] = 0
+ }
+ p.mu.Unlock()
+}
+
+// New creates a new initialized in-memory key/value DB. The capacity
+// is the initial key/value buffer capacity. The capacity is advisory,
+// not enforced.
+//
+// This DB is append-only, deleting an entry would remove entry node but not
+// reclaim KV buffer.
+//
+// The returned DB instance is safe for concurrent use.
+func New(cmp comparer.BasicComparer, capacity int) *DB {
+ p := &DB{
+ cmp: cmp,
+ rnd: rand.New(rand.NewSource(0xdeadbeef)),
+ maxHeight: 1,
+ kvData: make([]byte, 0, capacity),
+ nodeData: make([]int, 4+tMaxHeight),
+ }
+ p.nodeData[nHeight] = tMaxHeight
+ return p
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go
new file mode 100644
index 000000000..44e7d9adc
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go
@@ -0,0 +1,684 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package opt provides sets of options used by LevelDB.
+package opt
+
+import (
+ "math"
+
+ "github.com/syndtr/goleveldb/leveldb/cache"
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/filter"
+)
+
+const (
+ KiB = 1024
+ MiB = KiB * 1024
+ GiB = MiB * 1024
+)
+
+var (
+ DefaultBlockCacher = LRUCacher
+ DefaultBlockCacheCapacity = 8 * MiB
+ DefaultBlockRestartInterval = 16
+ DefaultBlockSize = 4 * KiB
+ DefaultCompactionExpandLimitFactor = 25
+ DefaultCompactionGPOverlapsFactor = 10
+ DefaultCompactionL0Trigger = 4
+ DefaultCompactionSourceLimitFactor = 1
+ DefaultCompactionTableSize = 2 * MiB
+ DefaultCompactionTableSizeMultiplier = 1.0
+ DefaultCompactionTotalSize = 10 * MiB
+ DefaultCompactionTotalSizeMultiplier = 10.0
+ DefaultCompressionType = SnappyCompression
+ DefaultIteratorSamplingRate = 1 * MiB
+ DefaultOpenFilesCacher = LRUCacher
+ DefaultOpenFilesCacheCapacity = 500
+ DefaultWriteBuffer = 4 * MiB
+ DefaultWriteL0PauseTrigger = 12
+ DefaultWriteL0SlowdownTrigger = 8
+)
+
+// Cacher is a caching algorithm.
+type Cacher interface {
+ New(capacity int) cache.Cacher
+}
+
+type CacherFunc struct {
+ NewFunc func(capacity int) cache.Cacher
+}
+
+func (f *CacherFunc) New(capacity int) cache.Cacher {
+ if f.NewFunc != nil {
+ return f.NewFunc(capacity)
+ }
+ return nil
+}
+
+func noCacher(int) cache.Cacher { return nil }
+
+var (
+ // LRUCacher is the LRU-cache algorithm.
+ LRUCacher = &CacherFunc{cache.NewLRU}
+
+ // NoCacher is the value to disable caching algorithm.
+ NoCacher = &CacherFunc{}
+)
+
+// Compression is the 'sorted table' block compression algorithm to use.
+type Compression uint
+
+func (c Compression) String() string {
+ switch c {
+ case DefaultCompression:
+ return "default"
+ case NoCompression:
+ return "none"
+ case SnappyCompression:
+ return "snappy"
+ }
+ return "invalid"
+}
+
+const (
+ DefaultCompression Compression = iota
+ NoCompression
+ SnappyCompression
+ nCompression
+)
+
+// Strict is the DB 'strict level'.
+type Strict uint
+
+const (
+ // If present then a corrupted or invalid chunk or block in manifest
+ // journal will cause an error instead of being dropped.
+ // This will prevent database with corrupted manifest to be opened.
+ StrictManifest Strict = 1 << iota
+
+ // If present then journal chunk checksum will be verified.
+ StrictJournalChecksum
+
+ // If present then a corrupted or invalid chunk or block in journal
+ // will cause an error instead of being dropped.
+ // This will prevent database with corrupted journal to be opened.
+ StrictJournal
+
+ // If present then 'sorted table' block checksum will be verified.
+ // This has effect on both 'read operation' and compaction.
+ StrictBlockChecksum
+
+ // If present then a corrupted 'sorted table' will fails compaction.
+ // The database will enter read-only mode.
+ StrictCompaction
+
+ // If present then a corrupted 'sorted table' will halts 'read operation'.
+ StrictReader
+
+ // If present then leveldb.Recover will drop corrupted 'sorted table'.
+ StrictRecovery
+
+ // This only applicable for ReadOptions, if present then this ReadOptions
+ // 'strict level' will override global ones.
+ StrictOverride
+
+ // StrictAll enables all strict flags.
+ StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery
+
+ // DefaultStrict is the default strict flags. Specify any strict flags
+ // will override default strict flags as whole (i.e. not OR'ed).
+ DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader
+
+ // NoStrict disables all strict flags. Override default strict flags.
+ NoStrict = ^StrictAll
+)
+
+// Options holds the optional parameters for the DB at large.
+type Options struct {
+ // AltFilters defines one or more 'alternative filters'.
+ // 'alternative filters' will be used during reads if a filter block
+ // does not match with the 'effective filter'.
+ //
+ // The default value is nil
+ AltFilters []filter.Filter
+
+ // BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching.
+ // Specify NoCacher to disable caching algorithm.
+ //
+ // The default value is LRUCacher.
+ BlockCacher Cacher
+
+ // BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
+ // Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher.
+ //
+ // The default value is 8MiB.
+ BlockCacheCapacity int
+
+ // BlockRestartInterval is the number of keys between restart points for
+ // delta encoding of keys.
+ //
+ // The default value is 16.
+ BlockRestartInterval int
+
+ // BlockSize is the minimum uncompressed size in bytes of each 'sorted table'
+ // block.
+ //
+ // The default value is 4KiB.
+ BlockSize int
+
+ // CompactionExpandLimitFactor limits compaction size after expanded.
+ // This will be multiplied by table size limit at compaction target level.
+ //
+ // The default value is 25.
+ CompactionExpandLimitFactor int
+
+ // CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a
+ // single 'sorted table' generates.
+ // This will be multiplied by table size limit at grandparent level.
+ //
+ // The default value is 10.
+ CompactionGPOverlapsFactor int
+
+ // CompactionL0Trigger defines number of 'sorted table' at level-0 that will
+ // trigger compaction.
+ //
+ // The default value is 4.
+ CompactionL0Trigger int
+
+ // CompactionSourceLimitFactor limits compaction source size. This doesn't apply to
+ // level-0.
+ // This will be multiplied by table size limit at compaction target level.
+ //
+ // The default value is 1.
+ CompactionSourceLimitFactor int
+
+ // CompactionTableSize limits size of 'sorted table' that compaction generates.
+ // The limits for each level will be calculated as:
+ // CompactionTableSize * (CompactionTableSizeMultiplier ^ Level)
+ // The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel.
+ //
+ // The default value is 2MiB.
+ CompactionTableSize int
+
+ // CompactionTableSizeMultiplier defines multiplier for CompactionTableSize.
+ //
+ // The default value is 1.
+ CompactionTableSizeMultiplier float64
+
+ // CompactionTableSizeMultiplierPerLevel defines per-level multiplier for
+ // CompactionTableSize.
+ // Use zero to skip a level.
+ //
+ // The default value is nil.
+ CompactionTableSizeMultiplierPerLevel []float64
+
+ // CompactionTotalSize limits total size of 'sorted table' for each level.
+ // The limits for each level will be calculated as:
+ // CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level)
+ // The multiplier for each level can also fine-tuned using
+ // CompactionTotalSizeMultiplierPerLevel.
+ //
+ // The default value is 10MiB.
+ CompactionTotalSize int
+
+ // CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize.
+ //
+ // The default value is 10.
+ CompactionTotalSizeMultiplier float64
+
+ // CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for
+ // CompactionTotalSize.
+ // Use zero to skip a level.
+ //
+ // The default value is nil.
+ CompactionTotalSizeMultiplierPerLevel []float64
+
+ // Comparer defines a total ordering over the space of []byte keys: a 'less
+ // than' relationship. The same comparison algorithm must be used for reads
+ // and writes over the lifetime of the DB.
+ //
+ // The default value uses the same ordering as bytes.Compare.
+ Comparer comparer.Comparer
+
+ // Compression defines the 'sorted table' block compression to use.
+ //
+ // The default value (DefaultCompression) uses snappy compression.
+ Compression Compression
+
+ // DisableBufferPool allows disable use of util.BufferPool functionality.
+ //
+ // The default value is false.
+ DisableBufferPool bool
+
+ // DisableBlockCache allows disable use of cache.Cache functionality on
+ // 'sorted table' block.
+ //
+ // The default value is false.
+ DisableBlockCache bool
+
+ // DisableCompactionBackoff allows disable compaction retry backoff.
+ //
+ // The default value is false.
+ DisableCompactionBackoff bool
+
+ // DisableLargeBatchTransaction allows disabling switch-to-transaction mode
+ // on large batch write. If enable batch writes large than WriteBuffer will
+ // use transaction.
+ //
+ // The default is false.
+ DisableLargeBatchTransaction bool
+
+ // ErrorIfExist defines whether an error should returned if the DB already
+ // exist.
+ //
+ // The default value is false.
+ ErrorIfExist bool
+
+ // ErrorIfMissing defines whether an error should returned if the DB is
+ // missing. If false then the database will be created if missing, otherwise
+ // an error will be returned.
+ //
+ // The default value is false.
+ ErrorIfMissing bool
+
+ // Filter defines an 'effective filter' to use. An 'effective filter'
+ // if defined will be used to generate per-table filter block.
+ // The filter name will be stored on disk.
+ // During reads LevelDB will try to find matching filter from
+ // 'effective filter' and 'alternative filters'.
+ //
+ // Filter can be changed after a DB has been created. It is recommended
+ // to put old filter to the 'alternative filters' to mitigate lack of
+ // filter during transition period.
+ //
+ // A filter is used to reduce disk reads when looking for a specific key.
+ //
+ // The default value is nil.
+ Filter filter.Filter
+
+ // IteratorSamplingRate defines approximate gap (in bytes) between read
+ // sampling of an iterator. The samples will be used to determine when
+ // compaction should be triggered.
+ //
+ // The default is 1MiB.
+ IteratorSamplingRate int
+
+ // NoSync allows completely disable fsync.
+ //
+ // The default is false.
+ NoSync bool
+
+ // NoWriteMerge allows disabling write merge.
+ //
+ // The default is false.
+ NoWriteMerge bool
+
+ // OpenFilesCacher provides cache algorithm for open files caching.
+ // Specify NoCacher to disable caching algorithm.
+ //
+ // The default value is LRUCacher.
+ OpenFilesCacher Cacher
+
+ // OpenFilesCacheCapacity defines the capacity of the open files caching.
+ // Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher.
+ //
+ // The default value is 500.
+ OpenFilesCacheCapacity int
+
+ // If true then opens DB in read-only mode.
+ //
+ // The default value is false.
+ ReadOnly bool
+
+ // Strict defines the DB strict level.
+ Strict Strict
+
+ // WriteBuffer defines maximum size of a 'memdb' before flushed to
+ // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk
+ // unsorted journal.
+ //
+ // LevelDB may held up to two 'memdb' at the same time.
+ //
+ // The default value is 4MiB.
+ WriteBuffer int
+
+ // WriteL0StopTrigger defines number of 'sorted table' at level-0 that will
+ // pause write.
+ //
+ // The default value is 12.
+ WriteL0PauseTrigger int
+
+ // WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that
+ // will trigger write slowdown.
+ //
+ // The default value is 8.
+ WriteL0SlowdownTrigger int
+}
+
+func (o *Options) GetAltFilters() []filter.Filter {
+ if o == nil {
+ return nil
+ }
+ return o.AltFilters
+}
+
+func (o *Options) GetBlockCacher() Cacher {
+ if o == nil || o.BlockCacher == nil {
+ return DefaultBlockCacher
+ } else if o.BlockCacher == NoCacher {
+ return nil
+ }
+ return o.BlockCacher
+}
+
+func (o *Options) GetBlockCacheCapacity() int {
+ if o == nil || o.BlockCacheCapacity == 0 {
+ return DefaultBlockCacheCapacity
+ } else if o.BlockCacheCapacity < 0 {
+ return 0
+ }
+ return o.BlockCacheCapacity
+}
+
+func (o *Options) GetBlockRestartInterval() int {
+ if o == nil || o.BlockRestartInterval <= 0 {
+ return DefaultBlockRestartInterval
+ }
+ return o.BlockRestartInterval
+}
+
+func (o *Options) GetBlockSize() int {
+ if o == nil || o.BlockSize <= 0 {
+ return DefaultBlockSize
+ }
+ return o.BlockSize
+}
+
+func (o *Options) GetCompactionExpandLimit(level int) int {
+ factor := DefaultCompactionExpandLimitFactor
+ if o != nil && o.CompactionExpandLimitFactor > 0 {
+ factor = o.CompactionExpandLimitFactor
+ }
+ return o.GetCompactionTableSize(level+1) * factor
+}
+
+func (o *Options) GetCompactionGPOverlaps(level int) int {
+ factor := DefaultCompactionGPOverlapsFactor
+ if o != nil && o.CompactionGPOverlapsFactor > 0 {
+ factor = o.CompactionGPOverlapsFactor
+ }
+ return o.GetCompactionTableSize(level+2) * factor
+}
+
+func (o *Options) GetCompactionL0Trigger() int {
+ if o == nil || o.CompactionL0Trigger == 0 {
+ return DefaultCompactionL0Trigger
+ }
+ return o.CompactionL0Trigger
+}
+
+func (o *Options) GetCompactionSourceLimit(level int) int {
+ factor := DefaultCompactionSourceLimitFactor
+ if o != nil && o.CompactionSourceLimitFactor > 0 {
+ factor = o.CompactionSourceLimitFactor
+ }
+ return o.GetCompactionTableSize(level+1) * factor
+}
+
+func (o *Options) GetCompactionTableSize(level int) int {
+ var (
+ base = DefaultCompactionTableSize
+ mult float64
+ )
+ if o != nil {
+ if o.CompactionTableSize > 0 {
+ base = o.CompactionTableSize
+ }
+ if level < len(o.CompactionTableSizeMultiplierPerLevel) && o.CompactionTableSizeMultiplierPerLevel[level] > 0 {
+ mult = o.CompactionTableSizeMultiplierPerLevel[level]
+ } else if o.CompactionTableSizeMultiplier > 0 {
+ mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level))
+ }
+ }
+ if mult == 0 {
+ mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level))
+ }
+ return int(float64(base) * mult)
+}
+
+func (o *Options) GetCompactionTotalSize(level int) int64 {
+ var (
+ base = DefaultCompactionTotalSize
+ mult float64
+ )
+ if o != nil {
+ if o.CompactionTotalSize > 0 {
+ base = o.CompactionTotalSize
+ }
+ if level < len(o.CompactionTotalSizeMultiplierPerLevel) && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 {
+ mult = o.CompactionTotalSizeMultiplierPerLevel[level]
+ } else if o.CompactionTotalSizeMultiplier > 0 {
+ mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level))
+ }
+ }
+ if mult == 0 {
+ mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level))
+ }
+ return int64(float64(base) * mult)
+}
+
+func (o *Options) GetComparer() comparer.Comparer {
+ if o == nil || o.Comparer == nil {
+ return comparer.DefaultComparer
+ }
+ return o.Comparer
+}
+
+func (o *Options) GetCompression() Compression {
+ if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression {
+ return DefaultCompressionType
+ }
+ return o.Compression
+}
+
+func (o *Options) GetDisableBufferPool() bool {
+ if o == nil {
+ return false
+ }
+ return o.DisableBufferPool
+}
+
+func (o *Options) GetDisableBlockCache() bool {
+ if o == nil {
+ return false
+ }
+ return o.DisableBlockCache
+}
+
+func (o *Options) GetDisableCompactionBackoff() bool {
+ if o == nil {
+ return false
+ }
+ return o.DisableCompactionBackoff
+}
+
+func (o *Options) GetDisableLargeBatchTransaction() bool {
+ if o == nil {
+ return false
+ }
+ return o.DisableLargeBatchTransaction
+}
+
+func (o *Options) GetErrorIfExist() bool {
+ if o == nil {
+ return false
+ }
+ return o.ErrorIfExist
+}
+
+func (o *Options) GetErrorIfMissing() bool {
+ if o == nil {
+ return false
+ }
+ return o.ErrorIfMissing
+}
+
+func (o *Options) GetFilter() filter.Filter {
+ if o == nil {
+ return nil
+ }
+ return o.Filter
+}
+
+func (o *Options) GetIteratorSamplingRate() int {
+ if o == nil || o.IteratorSamplingRate <= 0 {
+ return DefaultIteratorSamplingRate
+ }
+ return o.IteratorSamplingRate
+}
+
+func (o *Options) GetNoSync() bool {
+ if o == nil {
+ return false
+ }
+ return o.NoSync
+}
+
+func (o *Options) GetNoWriteMerge() bool {
+ if o == nil {
+ return false
+ }
+ return o.NoWriteMerge
+}
+
+func (o *Options) GetOpenFilesCacher() Cacher {
+ if o == nil || o.OpenFilesCacher == nil {
+ return DefaultOpenFilesCacher
+ }
+ if o.OpenFilesCacher == NoCacher {
+ return nil
+ }
+ return o.OpenFilesCacher
+}
+
+func (o *Options) GetOpenFilesCacheCapacity() int {
+ if o == nil || o.OpenFilesCacheCapacity == 0 {
+ return DefaultOpenFilesCacheCapacity
+ } else if o.OpenFilesCacheCapacity < 0 {
+ return 0
+ }
+ return o.OpenFilesCacheCapacity
+}
+
+func (o *Options) GetReadOnly() bool {
+ if o == nil {
+ return false
+ }
+ return o.ReadOnly
+}
+
+func (o *Options) GetStrict(strict Strict) bool {
+ if o == nil || o.Strict == 0 {
+ return DefaultStrict&strict != 0
+ }
+ return o.Strict&strict != 0
+}
+
+func (o *Options) GetWriteBuffer() int {
+ if o == nil || o.WriteBuffer <= 0 {
+ return DefaultWriteBuffer
+ }
+ return o.WriteBuffer
+}
+
+func (o *Options) GetWriteL0PauseTrigger() int {
+ if o == nil || o.WriteL0PauseTrigger == 0 {
+ return DefaultWriteL0PauseTrigger
+ }
+ return o.WriteL0PauseTrigger
+}
+
+func (o *Options) GetWriteL0SlowdownTrigger() int {
+ if o == nil || o.WriteL0SlowdownTrigger == 0 {
+ return DefaultWriteL0SlowdownTrigger
+ }
+ return o.WriteL0SlowdownTrigger
+}
+
+// ReadOptions holds the optional parameters for 'read operation'. The
+// 'read operation' includes Get, Find and NewIterator.
+type ReadOptions struct {
+ // DontFillCache defines whether block reads for this 'read operation'
+ // should be cached. If false then the block will be cached. This does
+ // not affects already cached block.
+ //
+ // The default value is false.
+ DontFillCache bool
+
+ // Strict will be OR'ed with global DB 'strict level' unless StrictOverride
+ // is present. Currently only StrictReader that has effect here.
+ Strict Strict
+}
+
+func (ro *ReadOptions) GetDontFillCache() bool {
+ if ro == nil {
+ return false
+ }
+ return ro.DontFillCache
+}
+
+func (ro *ReadOptions) GetStrict(strict Strict) bool {
+ if ro == nil {
+ return false
+ }
+ return ro.Strict&strict != 0
+}
+
+// WriteOptions holds the optional parameters for 'write operation'. The
+// 'write operation' includes Write, Put and Delete.
+type WriteOptions struct {
+ // NoWriteMerge allows disabling write merge.
+ //
+ // The default is false.
+ NoWriteMerge bool
+
+ // Sync is whether to sync underlying writes from the OS buffer cache
+ // through to actual disk, if applicable. Setting Sync can result in
+ // slower writes.
+ //
+ // If false, and the machine crashes, then some recent writes may be lost.
+ // Note that if it is just the process that crashes (and the machine does
+ // not) then no writes will be lost.
+ //
+ // In other words, Sync being false has the same semantics as a write
+ // system call. Sync being true means write followed by fsync.
+ //
+ // The default value is false.
+ Sync bool
+}
+
+func (wo *WriteOptions) GetNoWriteMerge() bool {
+ if wo == nil {
+ return false
+ }
+ return wo.NoWriteMerge
+}
+
+func (wo *WriteOptions) GetSync() bool {
+ if wo == nil {
+ return false
+ }
+ return wo.Sync
+}
+
+func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool {
+ if ro.GetStrict(StrictOverride) {
+ return ro.GetStrict(strict)
+ } else {
+ return o.GetStrict(strict) || ro.GetStrict(strict)
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/options.go
new file mode 100644
index 000000000..b072b1ac4
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/options.go
@@ -0,0 +1,107 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/filter"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+func dupOptions(o *opt.Options) *opt.Options {
+ newo := &opt.Options{}
+ if o != nil {
+ *newo = *o
+ }
+ if newo.Strict == 0 {
+ newo.Strict = opt.DefaultStrict
+ }
+ return newo
+}
+
+func (s *session) setOptions(o *opt.Options) {
+ no := dupOptions(o)
+ // Alternative filters.
+ if filters := o.GetAltFilters(); len(filters) > 0 {
+ no.AltFilters = make([]filter.Filter, len(filters))
+ for i, filter := range filters {
+ no.AltFilters[i] = &iFilter{filter}
+ }
+ }
+ // Comparer.
+ s.icmp = &iComparer{o.GetComparer()}
+ no.Comparer = s.icmp
+ // Filter.
+ if filter := o.GetFilter(); filter != nil {
+ no.Filter = &iFilter{filter}
+ }
+
+ s.o = &cachedOptions{Options: no}
+ s.o.cache()
+}
+
+const optCachedLevel = 7
+
+type cachedOptions struct {
+ *opt.Options
+
+ compactionExpandLimit []int
+ compactionGPOverlaps []int
+ compactionSourceLimit []int
+ compactionTableSize []int
+ compactionTotalSize []int64
+}
+
+func (co *cachedOptions) cache() {
+ co.compactionExpandLimit = make([]int, optCachedLevel)
+ co.compactionGPOverlaps = make([]int, optCachedLevel)
+ co.compactionSourceLimit = make([]int, optCachedLevel)
+ co.compactionTableSize = make([]int, optCachedLevel)
+ co.compactionTotalSize = make([]int64, optCachedLevel)
+
+ for level := 0; level < optCachedLevel; level++ {
+ co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level)
+ co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level)
+ co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level)
+ co.compactionTableSize[level] = co.Options.GetCompactionTableSize(level)
+ co.compactionTotalSize[level] = co.Options.GetCompactionTotalSize(level)
+ }
+}
+
+func (co *cachedOptions) GetCompactionExpandLimit(level int) int {
+ if level < optCachedLevel {
+ return co.compactionExpandLimit[level]
+ }
+ return co.Options.GetCompactionExpandLimit(level)
+}
+
+func (co *cachedOptions) GetCompactionGPOverlaps(level int) int {
+ if level < optCachedLevel {
+ return co.compactionGPOverlaps[level]
+ }
+ return co.Options.GetCompactionGPOverlaps(level)
+}
+
+func (co *cachedOptions) GetCompactionSourceLimit(level int) int {
+ if level < optCachedLevel {
+ return co.compactionSourceLimit[level]
+ }
+ return co.Options.GetCompactionSourceLimit(level)
+}
+
+func (co *cachedOptions) GetCompactionTableSize(level int) int {
+ if level < optCachedLevel {
+ return co.compactionTableSize[level]
+ }
+ return co.Options.GetCompactionTableSize(level)
+}
+
+func (co *cachedOptions) GetCompactionTotalSize(level int) int64 {
+ if level < optCachedLevel {
+ return co.compactionTotalSize[level]
+ }
+ return co.Options.GetCompactionTotalSize(level)
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session.go b/vendor/github.com/syndtr/goleveldb/leveldb/session.go
new file mode 100644
index 000000000..f3e747701
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/session.go
@@ -0,0 +1,208 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "sync"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/journal"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+// ErrManifestCorrupted records manifest corruption. This error will be
+// wrapped with errors.ErrCorrupted.
+type ErrManifestCorrupted struct {
+ Field string
+ Reason string
+}
+
+func (e *ErrManifestCorrupted) Error() string {
+ return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason)
+}
+
+func newErrManifestCorrupted(fd storage.FileDesc, field, reason string) error {
+ return errors.NewErrCorrupted(fd, &ErrManifestCorrupted{field, reason})
+}
+
+// session represent a persistent database session.
+type session struct {
+ // Need 64-bit alignment.
+ stNextFileNum int64 // current unused file number
+ stJournalNum int64 // current journal file number; need external synchronization
+ stPrevJournalNum int64 // prev journal file number; no longer used; for compatibility with older version of leveldb
+ stTempFileNum int64
+ stSeqNum uint64 // last mem compacted seq; need external synchronization
+
+ stor storage.Storage
+ storLock storage.Locker
+ o *cachedOptions
+ icmp *iComparer
+ tops *tOps
+
+ manifest *journal.Writer
+ manifestWriter storage.Writer
+ manifestFd storage.FileDesc
+
+ stCompPtrs []internalKey // compaction pointers; need external synchronization
+ stVersion *version // current version
+ vmu sync.Mutex
+}
+
+// Creates new initialized session instance.
+func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
+ if stor == nil {
+ return nil, os.ErrInvalid
+ }
+ storLock, err := stor.Lock()
+ if err != nil {
+ return
+ }
+ s = &session{
+ stor: stor,
+ storLock: storLock,
+ }
+ s.setOptions(o)
+ s.tops = newTableOps(s)
+ s.setVersion(newVersion(s))
+ s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed")
+ return
+}
+
+// Close session.
+func (s *session) close() {
+ s.tops.close()
+ if s.manifest != nil {
+ s.manifest.Close()
+ }
+ if s.manifestWriter != nil {
+ s.manifestWriter.Close()
+ }
+ s.manifest = nil
+ s.manifestWriter = nil
+ s.setVersion(&version{s: s, closing: true})
+}
+
+// Release session lock.
+func (s *session) release() {
+ s.storLock.Unlock()
+}
+
+// Create a new database session; need external synchronization.
+func (s *session) create() error {
+ // create manifest
+ return s.newManifest(nil, nil)
+}
+
+// Recover a database session; need external synchronization.
+func (s *session) recover() (err error) {
+ defer func() {
+ if os.IsNotExist(err) {
+ // Don't return os.ErrNotExist if the underlying storage contains
+ // other files that belong to LevelDB. So the DB won't get trashed.
+ if fds, _ := s.stor.List(storage.TypeAll); len(fds) > 0 {
+ err = &errors.ErrCorrupted{Fd: storage.FileDesc{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}}
+ }
+ }
+ }()
+
+ fd, err := s.stor.GetMeta()
+ if err != nil {
+ return
+ }
+
+ reader, err := s.stor.Open(fd)
+ if err != nil {
+ return
+ }
+ defer reader.Close()
+
+ var (
+ // Options.
+ strict = s.o.GetStrict(opt.StrictManifest)
+
+ jr = journal.NewReader(reader, dropper{s, fd}, strict, true)
+ rec = &sessionRecord{}
+ staging = s.stVersion.newStaging()
+ )
+ for {
+ var r io.Reader
+ r, err = jr.Next()
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ break
+ }
+ return errors.SetFd(err, fd)
+ }
+
+ err = rec.decode(r)
+ if err == nil {
+ // save compact pointers
+ for _, r := range rec.compPtrs {
+ s.setCompPtr(r.level, internalKey(r.ikey))
+ }
+ // commit record to version staging
+ staging.commit(rec)
+ } else {
+ err = errors.SetFd(err, fd)
+ if strict || !errors.IsCorrupted(err) {
+ return
+ }
+ s.logf("manifest error: %v (skipped)", errors.SetFd(err, fd))
+ }
+ rec.resetCompPtrs()
+ rec.resetAddedTables()
+ rec.resetDeletedTables()
+ }
+
+ switch {
+ case !rec.has(recComparer):
+ return newErrManifestCorrupted(fd, "comparer", "missing")
+ case rec.comparer != s.icmp.uName():
+ return newErrManifestCorrupted(fd, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer))
+ case !rec.has(recNextFileNum):
+ return newErrManifestCorrupted(fd, "next-file-num", "missing")
+ case !rec.has(recJournalNum):
+ return newErrManifestCorrupted(fd, "journal-file-num", "missing")
+ case !rec.has(recSeqNum):
+ return newErrManifestCorrupted(fd, "seq-num", "missing")
+ }
+
+ s.manifestFd = fd
+ s.setVersion(staging.finish())
+ s.setNextFileNum(rec.nextFileNum)
+ s.recordCommited(rec)
+ return nil
+}
+
+// Commit session; need external synchronization.
+func (s *session) commit(r *sessionRecord) (err error) {
+ v := s.version()
+ defer v.release()
+
+ // spawn new version based on current version
+ nv := v.spawn(r)
+
+ if s.manifest == nil {
+ // manifest journal writer not yet created, create one
+ err = s.newManifest(r, nv)
+ } else {
+ err = s.flushManifest(r)
+ }
+
+ // finally, apply new version if no error rise
+ if err == nil {
+ s.setVersion(nv)
+ }
+
+ return
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go
new file mode 100644
index 000000000..089cd00b2
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go
@@ -0,0 +1,302 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+func (s *session) pickMemdbLevel(umin, umax []byte, maxLevel int) int {
+ v := s.version()
+ defer v.release()
+ return v.pickMemdbLevel(umin, umax, maxLevel)
+}
+
+func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, maxLevel int) (int, error) {
+ // Create sorted table.
+ iter := mdb.NewIterator(nil)
+ defer iter.Release()
+ t, n, err := s.tops.createFrom(iter)
+ if err != nil {
+ return 0, err
+ }
+
+ // Pick level other than zero can cause compaction issue with large
+ // bulk insert and delete on strictly incrementing key-space. The
+ // problem is that the small deletion markers trapped at lower level,
+ // while key/value entries keep growing at higher level. Since the
+ // key-space is strictly incrementing it will not overlaps with
+ // higher level, thus maximum possible level is always picked, while
+ // overlapping deletion marker pushed into lower level.
+ // See: https://github.com/syndtr/goleveldb/issues/127.
+ flushLevel := s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey(), maxLevel)
+ rec.addTableFile(flushLevel, t)
+
+ s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", flushLevel, t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax)
+ return flushLevel, nil
+}
+
+// Pick a compaction based on current state; need external synchronization.
+func (s *session) pickCompaction() *compaction {
+ v := s.version()
+
+ var sourceLevel int
+ var t0 tFiles
+ if v.cScore >= 1 {
+ sourceLevel = v.cLevel
+ cptr := s.getCompPtr(sourceLevel)
+ tables := v.levels[sourceLevel]
+ for _, t := range tables {
+ if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
+ t0 = append(t0, t)
+ break
+ }
+ }
+ if len(t0) == 0 {
+ t0 = append(t0, tables[0])
+ }
+ } else {
+ if p := atomic.LoadPointer(&v.cSeek); p != nil {
+ ts := (*tSet)(p)
+ sourceLevel = ts.level
+ t0 = append(t0, ts.table)
+ } else {
+ v.release()
+ return nil
+ }
+ }
+
+ return newCompaction(s, v, sourceLevel, t0)
+}
+
+// Create compaction from given level and range; need external synchronization.
+func (s *session) getCompactionRange(sourceLevel int, umin, umax []byte, noLimit bool) *compaction {
+ v := s.version()
+
+ if sourceLevel >= len(v.levels) {
+ v.release()
+ return nil
+ }
+
+ t0 := v.levels[sourceLevel].getOverlaps(nil, s.icmp, umin, umax, sourceLevel == 0)
+ if len(t0) == 0 {
+ v.release()
+ return nil
+ }
+
+ // Avoid compacting too much in one shot in case the range is large.
+ // But we cannot do this for level-0 since level-0 files can overlap
+ // and we must not pick one file and drop another older file if the
+ // two files overlap.
+ if !noLimit && sourceLevel > 0 {
+ limit := int64(v.s.o.GetCompactionSourceLimit(sourceLevel))
+ total := int64(0)
+ for i, t := range t0 {
+ total += t.size
+ if total >= limit {
+ s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1)
+ t0 = t0[:i+1]
+ break
+ }
+ }
+ }
+
+ return newCompaction(s, v, sourceLevel, t0)
+}
+
+func newCompaction(s *session, v *version, sourceLevel int, t0 tFiles) *compaction {
+ c := &compaction{
+ s: s,
+ v: v,
+ sourceLevel: sourceLevel,
+ levels: [2]tFiles{t0, nil},
+ maxGPOverlaps: int64(s.o.GetCompactionGPOverlaps(sourceLevel)),
+ tPtrs: make([]int, len(v.levels)),
+ }
+ c.expand()
+ c.save()
+ return c
+}
+
+// compaction represent a compaction state.
+type compaction struct {
+ s *session
+ v *version
+
+ sourceLevel int
+ levels [2]tFiles
+ maxGPOverlaps int64
+
+ gp tFiles
+ gpi int
+ seenKey bool
+ gpOverlappedBytes int64
+ imin, imax internalKey
+ tPtrs []int
+ released bool
+
+ snapGPI int
+ snapSeenKey bool
+ snapGPOverlappedBytes int64
+ snapTPtrs []int
+}
+
+func (c *compaction) save() {
+ c.snapGPI = c.gpi
+ c.snapSeenKey = c.seenKey
+ c.snapGPOverlappedBytes = c.gpOverlappedBytes
+ c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
+}
+
+func (c *compaction) restore() {
+ c.gpi = c.snapGPI
+ c.seenKey = c.snapSeenKey
+ c.gpOverlappedBytes = c.snapGPOverlappedBytes
+ c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
+}
+
+func (c *compaction) release() {
+ if !c.released {
+ c.released = true
+ c.v.release()
+ }
+}
+
+// Expand compacted tables; need external synchronization.
+func (c *compaction) expand() {
+ limit := int64(c.s.o.GetCompactionExpandLimit(c.sourceLevel))
+ vt0 := c.v.levels[c.sourceLevel]
+ vt1 := tFiles{}
+ if level := c.sourceLevel + 1; level < len(c.v.levels) {
+ vt1 = c.v.levels[level]
+ }
+
+ t0, t1 := c.levels[0], c.levels[1]
+ imin, imax := t0.getRange(c.s.icmp)
+ // We expand t0 here just incase ukey hop across tables.
+ t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.sourceLevel == 0)
+ if len(t0) != len(c.levels[0]) {
+ imin, imax = t0.getRange(c.s.icmp)
+ }
+ t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
+ // Get entire range covered by compaction.
+ amin, amax := append(t0, t1...).getRange(c.s.icmp)
+
+ // See if we can grow the number of inputs in "sourceLevel" without
+ // changing the number of "sourceLevel+1" files we pick up.
+ if len(t1) > 0 {
+ exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.sourceLevel == 0)
+ if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
+ xmin, xmax := exp0.getRange(c.s.icmp)
+ exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
+ if len(exp1) == len(t1) {
+ c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
+ c.sourceLevel, c.sourceLevel+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
+ len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
+ imin, imax = xmin, xmax
+ t0, t1 = exp0, exp1
+ amin, amax = append(t0, t1...).getRange(c.s.icmp)
+ }
+ }
+ }
+
+ // Compute the set of grandparent files that overlap this compaction
+ // (parent == sourceLevel+1; grandparent == sourceLevel+2)
+ if level := c.sourceLevel + 2; level < len(c.v.levels) {
+ c.gp = c.v.levels[level].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
+ }
+
+ c.levels[0], c.levels[1] = t0, t1
+ c.imin, c.imax = imin, imax
+}
+
+// Check whether compaction is trivial.
+func (c *compaction) trivial() bool {
+ return len(c.levels[0]) == 1 && len(c.levels[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
+}
+
+func (c *compaction) baseLevelForKey(ukey []byte) bool {
+ for level := c.sourceLevel + 2; level < len(c.v.levels); level++ {
+ tables := c.v.levels[level]
+ for c.tPtrs[level] < len(tables) {
+ t := tables[c.tPtrs[level]]
+ if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
+ // We've advanced far enough.
+ if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
+ // Key falls in this file's range, so definitely not base level.
+ return false
+ }
+ break
+ }
+ c.tPtrs[level]++
+ }
+ }
+ return true
+}
+
+func (c *compaction) shouldStopBefore(ikey internalKey) bool {
+ for ; c.gpi < len(c.gp); c.gpi++ {
+ gp := c.gp[c.gpi]
+ if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
+ break
+ }
+ if c.seenKey {
+ c.gpOverlappedBytes += gp.size
+ }
+ }
+ c.seenKey = true
+
+ if c.gpOverlappedBytes > c.maxGPOverlaps {
+ // Too much overlap for current output; start new output.
+ c.gpOverlappedBytes = 0
+ return true
+ }
+ return false
+}
+
+// Creates an iterator.
+func (c *compaction) newIterator() iterator.Iterator {
+ // Creates iterator slice.
+ icap := len(c.levels)
+ if c.sourceLevel == 0 {
+ // Special case for level-0.
+ icap = len(c.levels[0]) + 1
+ }
+ its := make([]iterator.Iterator, 0, icap)
+
+ // Options.
+ ro := &opt.ReadOptions{
+ DontFillCache: true,
+ Strict: opt.StrictOverride,
+ }
+ strict := c.s.o.GetStrict(opt.StrictCompaction)
+ if strict {
+ ro.Strict |= opt.StrictReader
+ }
+
+ for i, tables := range c.levels {
+ if len(tables) == 0 {
+ continue
+ }
+
+ // Level-0 is not sorted and may overlaps each other.
+ if c.sourceLevel+i == 0 {
+ for _, t := range tables {
+ its = append(its, c.s.tops.newIterator(t, nil, ro))
+ }
+ } else {
+ it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
+ its = append(its, it)
+ }
+ }
+
+ return iterator.NewMergedIterator(its, c.s.icmp, strict)
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go
new file mode 100644
index 000000000..854e1aa6f
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go
@@ -0,0 +1,323 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "bufio"
+ "encoding/binary"
+ "io"
+ "strings"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+type byteReader interface {
+ io.Reader
+ io.ByteReader
+}
+
+// These numbers are written to disk and should not be changed.
+const (
+ recComparer = 1
+ recJournalNum = 2
+ recNextFileNum = 3
+ recSeqNum = 4
+ recCompPtr = 5
+ recDelTable = 6
+ recAddTable = 7
+ // 8 was used for large value refs
+ recPrevJournalNum = 9
+)
+
+type cpRecord struct {
+ level int
+ ikey internalKey
+}
+
+type atRecord struct {
+ level int
+ num int64
+ size int64
+ imin internalKey
+ imax internalKey
+}
+
+type dtRecord struct {
+ level int
+ num int64
+}
+
+type sessionRecord struct {
+ hasRec int
+ comparer string
+ journalNum int64
+ prevJournalNum int64
+ nextFileNum int64
+ seqNum uint64
+ compPtrs []cpRecord
+ addedTables []atRecord
+ deletedTables []dtRecord
+
+ scratch [binary.MaxVarintLen64]byte
+ err error
+}
+
+func (p *sessionRecord) has(rec int) bool {
+ return p.hasRec&(1<<uint(rec)) != 0
+}
+
+func (p *sessionRecord) setComparer(name string) {
+ p.hasRec |= 1 << recComparer
+ p.comparer = name
+}
+
+func (p *sessionRecord) setJournalNum(num int64) {
+ p.hasRec |= 1 << recJournalNum
+ p.journalNum = num
+}
+
+func (p *sessionRecord) setPrevJournalNum(num int64) {
+ p.hasRec |= 1 << recPrevJournalNum
+ p.prevJournalNum = num
+}
+
+func (p *sessionRecord) setNextFileNum(num int64) {
+ p.hasRec |= 1 << recNextFileNum
+ p.nextFileNum = num
+}
+
+func (p *sessionRecord) setSeqNum(num uint64) {
+ p.hasRec |= 1 << recSeqNum
+ p.seqNum = num
+}
+
+func (p *sessionRecord) addCompPtr(level int, ikey internalKey) {
+ p.hasRec |= 1 << recCompPtr
+ p.compPtrs = append(p.compPtrs, cpRecord{level, ikey})
+}
+
+func (p *sessionRecord) resetCompPtrs() {
+ p.hasRec &= ^(1 << recCompPtr)
+ p.compPtrs = p.compPtrs[:0]
+}
+
+func (p *sessionRecord) addTable(level int, num, size int64, imin, imax internalKey) {
+ p.hasRec |= 1 << recAddTable
+ p.addedTables = append(p.addedTables, atRecord{level, num, size, imin, imax})
+}
+
+func (p *sessionRecord) addTableFile(level int, t *tFile) {
+ p.addTable(level, t.fd.Num, t.size, t.imin, t.imax)
+}
+
+func (p *sessionRecord) resetAddedTables() {
+ p.hasRec &= ^(1 << recAddTable)
+ p.addedTables = p.addedTables[:0]
+}
+
+func (p *sessionRecord) delTable(level int, num int64) {
+ p.hasRec |= 1 << recDelTable
+ p.deletedTables = append(p.deletedTables, dtRecord{level, num})
+}
+
+func (p *sessionRecord) resetDeletedTables() {
+ p.hasRec &= ^(1 << recDelTable)
+ p.deletedTables = p.deletedTables[:0]
+}
+
+func (p *sessionRecord) putUvarint(w io.Writer, x uint64) {
+ if p.err != nil {
+ return
+ }
+ n := binary.PutUvarint(p.scratch[:], x)
+ _, p.err = w.Write(p.scratch[:n])
+}
+
+func (p *sessionRecord) putVarint(w io.Writer, x int64) {
+ if x < 0 {
+ panic("invalid negative value")
+ }
+ p.putUvarint(w, uint64(x))
+}
+
+func (p *sessionRecord) putBytes(w io.Writer, x []byte) {
+ if p.err != nil {
+ return
+ }
+ p.putUvarint(w, uint64(len(x)))
+ if p.err != nil {
+ return
+ }
+ _, p.err = w.Write(x)
+}
+
+func (p *sessionRecord) encode(w io.Writer) error {
+ p.err = nil
+ if p.has(recComparer) {
+ p.putUvarint(w, recComparer)
+ p.putBytes(w, []byte(p.comparer))
+ }
+ if p.has(recJournalNum) {
+ p.putUvarint(w, recJournalNum)
+ p.putVarint(w, p.journalNum)
+ }
+ if p.has(recNextFileNum) {
+ p.putUvarint(w, recNextFileNum)
+ p.putVarint(w, p.nextFileNum)
+ }
+ if p.has(recSeqNum) {
+ p.putUvarint(w, recSeqNum)
+ p.putUvarint(w, p.seqNum)
+ }
+ for _, r := range p.compPtrs {
+ p.putUvarint(w, recCompPtr)
+ p.putUvarint(w, uint64(r.level))
+ p.putBytes(w, r.ikey)
+ }
+ for _, r := range p.deletedTables {
+ p.putUvarint(w, recDelTable)
+ p.putUvarint(w, uint64(r.level))
+ p.putVarint(w, r.num)
+ }
+ for _, r := range p.addedTables {
+ p.putUvarint(w, recAddTable)
+ p.putUvarint(w, uint64(r.level))
+ p.putVarint(w, r.num)
+ p.putVarint(w, r.size)
+ p.putBytes(w, r.imin)
+ p.putBytes(w, r.imax)
+ }
+ return p.err
+}
+
+func (p *sessionRecord) readUvarintMayEOF(field string, r io.ByteReader, mayEOF bool) uint64 {
+ if p.err != nil {
+ return 0
+ }
+ x, err := binary.ReadUvarint(r)
+ if err != nil {
+ if err == io.ErrUnexpectedEOF || (mayEOF == false && err == io.EOF) {
+ p.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrManifestCorrupted{field, "short read"})
+ } else if strings.HasPrefix(err.Error(), "binary:") {
+ p.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrManifestCorrupted{field, err.Error()})
+ } else {
+ p.err = err
+ }
+ return 0
+ }
+ return x
+}
+
+func (p *sessionRecord) readUvarint(field string, r io.ByteReader) uint64 {
+ return p.readUvarintMayEOF(field, r, false)
+}
+
+func (p *sessionRecord) readVarint(field string, r io.ByteReader) int64 {
+ x := int64(p.readUvarintMayEOF(field, r, false))
+ if x < 0 {
+ p.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrManifestCorrupted{field, "invalid negative value"})
+ }
+ return x
+}
+
+func (p *sessionRecord) readBytes(field string, r byteReader) []byte {
+ if p.err != nil {
+ return nil
+ }
+ n := p.readUvarint(field, r)
+ if p.err != nil {
+ return nil
+ }
+ x := make([]byte, n)
+ _, p.err = io.ReadFull(r, x)
+ if p.err != nil {
+ if p.err == io.ErrUnexpectedEOF {
+ p.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrManifestCorrupted{field, "short read"})
+ }
+ return nil
+ }
+ return x
+}
+
+func (p *sessionRecord) readLevel(field string, r io.ByteReader) int {
+ if p.err != nil {
+ return 0
+ }
+ x := p.readUvarint(field, r)
+ if p.err != nil {
+ return 0
+ }
+ return int(x)
+}
+
+func (p *sessionRecord) decode(r io.Reader) error {
+ br, ok := r.(byteReader)
+ if !ok {
+ br = bufio.NewReader(r)
+ }
+ p.err = nil
+ for p.err == nil {
+ rec := p.readUvarintMayEOF("field-header", br, true)
+ if p.err != nil {
+ if p.err == io.EOF {
+ return nil
+ }
+ return p.err
+ }
+ switch rec {
+ case recComparer:
+ x := p.readBytes("comparer", br)
+ if p.err == nil {
+ p.setComparer(string(x))
+ }
+ case recJournalNum:
+ x := p.readVarint("journal-num", br)
+ if p.err == nil {
+ p.setJournalNum(x)
+ }
+ case recPrevJournalNum:
+ x := p.readVarint("prev-journal-num", br)
+ if p.err == nil {
+ p.setPrevJournalNum(x)
+ }
+ case recNextFileNum:
+ x := p.readVarint("next-file-num", br)
+ if p.err == nil {
+ p.setNextFileNum(x)
+ }
+ case recSeqNum:
+ x := p.readUvarint("seq-num", br)
+ if p.err == nil {
+ p.setSeqNum(x)
+ }
+ case recCompPtr:
+ level := p.readLevel("comp-ptr.level", br)
+ ikey := p.readBytes("comp-ptr.ikey", br)
+ if p.err == nil {
+ p.addCompPtr(level, internalKey(ikey))
+ }
+ case recAddTable:
+ level := p.readLevel("add-table.level", br)
+ num := p.readVarint("add-table.num", br)
+ size := p.readVarint("add-table.size", br)
+ imin := p.readBytes("add-table.imin", br)
+ imax := p.readBytes("add-table.imax", br)
+ if p.err == nil {
+ p.addTable(level, num, size, imin, imax)
+ }
+ case recDelTable:
+ level := p.readLevel("del-table.level", br)
+ num := p.readVarint("del-table.num", br)
+ if p.err == nil {
+ p.delTable(level, num)
+ }
+ }
+ }
+
+ return p.err
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go
new file mode 100644
index 000000000..34ad61798
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go
@@ -0,0 +1,258 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/journal"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+// Logging.
+
+type dropper struct {
+ s *session
+ fd storage.FileDesc
+}
+
+func (d dropper) Drop(err error) {
+ if e, ok := err.(*journal.ErrCorrupted); ok {
+ d.s.logf("journal@drop %s-%d S·%s %q", d.fd.Type, d.fd.Num, shortenb(e.Size), e.Reason)
+ } else {
+ d.s.logf("journal@drop %s-%d %q", d.fd.Type, d.fd.Num, err)
+ }
+}
+
+func (s *session) log(v ...interface{}) { s.stor.Log(fmt.Sprint(v...)) }
+func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) }
+
+// File utils.
+
+func (s *session) newTemp() storage.FileDesc {
+ num := atomic.AddInt64(&s.stTempFileNum, 1) - 1
+ return storage.FileDesc{storage.TypeTemp, num}
+}
+
+// Session state.
+
+// Get current version. This will incr version ref, must call
+// version.release (exactly once) after use.
+func (s *session) version() *version {
+ s.vmu.Lock()
+ defer s.vmu.Unlock()
+ s.stVersion.ref++
+ return s.stVersion
+}
+
+func (s *session) tLen(level int) int {
+ s.vmu.Lock()
+ defer s.vmu.Unlock()
+ return s.stVersion.tLen(level)
+}
+
+// Set current version to v.
+func (s *session) setVersion(v *version) {
+ s.vmu.Lock()
+ v.ref = 1 // Holds by session.
+ if old := s.stVersion; old != nil {
+ v.ref++ // Holds by old version.
+ old.next = v
+ old.releaseNB()
+ }
+ s.stVersion = v
+ s.vmu.Unlock()
+}
+
+// Get current unused file number.
+func (s *session) nextFileNum() int64 {
+ return atomic.LoadInt64(&s.stNextFileNum)
+}
+
+// Set current unused file number to num.
+func (s *session) setNextFileNum(num int64) {
+ atomic.StoreInt64(&s.stNextFileNum, num)
+}
+
+// Mark file number as used.
+func (s *session) markFileNum(num int64) {
+ nextFileNum := num + 1
+ for {
+ old, x := s.stNextFileNum, nextFileNum
+ if old > x {
+ x = old
+ }
+ if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) {
+ break
+ }
+ }
+}
+
+// Allocate a file number.
+func (s *session) allocFileNum() int64 {
+ return atomic.AddInt64(&s.stNextFileNum, 1) - 1
+}
+
+// Reuse given file number.
+func (s *session) reuseFileNum(num int64) {
+ for {
+ old, x := s.stNextFileNum, num
+ if old != x+1 {
+ x = old
+ }
+ if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) {
+ break
+ }
+ }
+}
+
+// Set compaction ptr at given level; need external synchronization.
+func (s *session) setCompPtr(level int, ik internalKey) {
+ if level >= len(s.stCompPtrs) {
+ newCompPtrs := make([]internalKey, level+1)
+ copy(newCompPtrs, s.stCompPtrs)
+ s.stCompPtrs = newCompPtrs
+ }
+ s.stCompPtrs[level] = append(internalKey{}, ik...)
+}
+
+// Get compaction ptr at given level; need external synchronization.
+func (s *session) getCompPtr(level int) internalKey {
+ if level >= len(s.stCompPtrs) {
+ return nil
+ }
+ return s.stCompPtrs[level]
+}
+
+// Manifest related utils.
+
+// Fill given session record obj with current states; need external
+// synchronization.
+func (s *session) fillRecord(r *sessionRecord, snapshot bool) {
+ r.setNextFileNum(s.nextFileNum())
+
+ if snapshot {
+ if !r.has(recJournalNum) {
+ r.setJournalNum(s.stJournalNum)
+ }
+
+ if !r.has(recSeqNum) {
+ r.setSeqNum(s.stSeqNum)
+ }
+
+ for level, ik := range s.stCompPtrs {
+ if ik != nil {
+ r.addCompPtr(level, ik)
+ }
+ }
+
+ r.setComparer(s.icmp.uName())
+ }
+}
+
+// Mark if record has been committed, this will update session state;
+// need external synchronization.
+func (s *session) recordCommited(rec *sessionRecord) {
+ if rec.has(recJournalNum) {
+ s.stJournalNum = rec.journalNum
+ }
+
+ if rec.has(recPrevJournalNum) {
+ s.stPrevJournalNum = rec.prevJournalNum
+ }
+
+ if rec.has(recSeqNum) {
+ s.stSeqNum = rec.seqNum
+ }
+
+ for _, r := range rec.compPtrs {
+ s.setCompPtr(r.level, internalKey(r.ikey))
+ }
+}
+
+// Create a new manifest file; need external synchronization.
+func (s *session) newManifest(rec *sessionRecord, v *version) (err error) {
+ fd := storage.FileDesc{storage.TypeManifest, s.allocFileNum()}
+ writer, err := s.stor.Create(fd)
+ if err != nil {
+ return
+ }
+ jw := journal.NewWriter(writer)
+
+ if v == nil {
+ v = s.version()
+ defer v.release()
+ }
+ if rec == nil {
+ rec = &sessionRecord{}
+ }
+ s.fillRecord(rec, true)
+ v.fillRecord(rec)
+
+ defer func() {
+ if err == nil {
+ s.recordCommited(rec)
+ if s.manifest != nil {
+ s.manifest.Close()
+ }
+ if s.manifestWriter != nil {
+ s.manifestWriter.Close()
+ }
+ if !s.manifestFd.Zero() {
+ s.stor.Remove(s.manifestFd)
+ }
+ s.manifestFd = fd
+ s.manifestWriter = writer
+ s.manifest = jw
+ } else {
+ writer.Close()
+ s.stor.Remove(fd)
+ s.reuseFileNum(fd.Num)
+ }
+ }()
+
+ w, err := jw.Next()
+ if err != nil {
+ return
+ }
+ err = rec.encode(w)
+ if err != nil {
+ return
+ }
+ err = jw.Flush()
+ if err != nil {
+ return
+ }
+ err = s.stor.SetMeta(fd)
+ return
+}
+
+// Flush record to disk.
+func (s *session) flushManifest(rec *sessionRecord) (err error) {
+ s.fillRecord(rec, false)
+ w, err := s.manifest.Next()
+ if err != nil {
+ return
+ }
+ err = rec.encode(w)
+ if err != nil {
+ return
+ }
+ err = s.manifest.Flush()
+ if err != nil {
+ return
+ }
+ if !s.o.GetNoSync() {
+ err = s.manifestWriter.Sync()
+ if err != nil {
+ return
+ }
+ }
+ s.recordCommited(rec)
+ return
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
new file mode 100644
index 000000000..e53434cab
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
@@ -0,0 +1,583 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reservefs.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+var (
+ errFileOpen = errors.New("leveldb/storage: file still open")
+ errReadOnly = errors.New("leveldb/storage: storage is read-only")
+)
+
+type fileLock interface {
+ release() error
+}
+
+type fileStorageLock struct {
+ fs *fileStorage
+}
+
+func (lock *fileStorageLock) Unlock() {
+ if lock.fs != nil {
+ lock.fs.mu.Lock()
+ defer lock.fs.mu.Unlock()
+ if lock.fs.slock == lock {
+ lock.fs.slock = nil
+ }
+ }
+}
+
+const logSizeThreshold = 1024 * 1024 // 1 MiB
+
+// fileStorage is a file-system backed storage.
+type fileStorage struct {
+ path string
+ readOnly bool
+
+ mu sync.Mutex
+ flock fileLock
+ slock *fileStorageLock
+ logw *os.File
+ logSize int64
+ buf []byte
+ // Opened file counter; if open < 0 means closed.
+ open int
+ day int
+}
+
+// OpenFile returns a new filesytem-backed storage implementation with the given
+// path. This also acquire a file lock, so any subsequent attempt to open the
+// same path will fail.
+//
+// The storage must be closed after use, by calling Close method.
+func OpenFile(path string, readOnly bool) (Storage, error) {
+ if fi, err := os.Stat(path); err == nil {
+ if !fi.IsDir() {
+ return nil, fmt.Errorf("leveldb/storage: open %s: not a directory", path)
+ }
+ } else if os.IsNotExist(err) && !readOnly {
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, err
+ }
+
+ flock, err := newFileLock(filepath.Join(path, "LOCK"), readOnly)
+ if err != nil {
+ return nil, err
+ }
+
+ defer func() {
+ if err != nil {
+ flock.release()
+ }
+ }()
+
+ var (
+ logw *os.File
+ logSize int64
+ )
+ if !readOnly {
+ logw, err = os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644)
+ if err != nil {
+ return nil, err
+ }
+ logSize, err = logw.Seek(0, os.SEEK_END)
+ if err != nil {
+ logw.Close()
+ return nil, err
+ }
+ }
+
+ fs := &fileStorage{
+ path: path,
+ readOnly: readOnly,
+ flock: flock,
+ logw: logw,
+ logSize: logSize,
+ }
+ runtime.SetFinalizer(fs, (*fileStorage).Close)
+ return fs, nil
+}
+
+func (fs *fileStorage) Lock() (Locker, error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return nil, ErrClosed
+ }
+ if fs.readOnly {
+ return &fileStorageLock{}, nil
+ }
+ if fs.slock != nil {
+ return nil, ErrLocked
+ }
+ fs.slock = &fileStorageLock{fs: fs}
+ return fs.slock, nil
+}
+
+func itoa(buf []byte, i int, wid int) []byte {
+ u := uint(i)
+ if u == 0 && wid <= 1 {
+ return append(buf, '0')
+ }
+
+ // Assemble decimal in reverse order.
+ var b [32]byte
+ bp := len(b)
+ for ; u > 0 || wid > 0; u /= 10 {
+ bp--
+ wid--
+ b[bp] = byte(u%10) + '0'
+ }
+ return append(buf, b[bp:]...)
+}
+
+func (fs *fileStorage) printDay(t time.Time) {
+ if fs.day == t.Day() {
+ return
+ }
+ fs.day = t.Day()
+ fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n"))
+}
+
+func (fs *fileStorage) doLog(t time.Time, str string) {
+ if fs.logSize > logSizeThreshold {
+ // Rotate log file.
+ fs.logw.Close()
+ fs.logw = nil
+ fs.logSize = 0
+ rename(filepath.Join(fs.path, "LOG"), filepath.Join(fs.path, "LOG.old"))
+ }
+ if fs.logw == nil {
+ var err error
+ fs.logw, err = os.OpenFile(filepath.Join(fs.path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644)
+ if err != nil {
+ return
+ }
+ // Force printDay on new log file.
+ fs.day = 0
+ }
+ fs.printDay(t)
+ hour, min, sec := t.Clock()
+ msec := t.Nanosecond() / 1e3
+ // time
+ fs.buf = itoa(fs.buf[:0], hour, 2)
+ fs.buf = append(fs.buf, ':')
+ fs.buf = itoa(fs.buf, min, 2)
+ fs.buf = append(fs.buf, ':')
+ fs.buf = itoa(fs.buf, sec, 2)
+ fs.buf = append(fs.buf, '.')
+ fs.buf = itoa(fs.buf, msec, 6)
+ fs.buf = append(fs.buf, ' ')
+ // write
+ fs.buf = append(fs.buf, []byte(str)...)
+ fs.buf = append(fs.buf, '\n')
+ fs.logw.Write(fs.buf)
+}
+
+func (fs *fileStorage) Log(str string) {
+ if !fs.readOnly {
+ t := time.Now()
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return
+ }
+ fs.doLog(t, str)
+ }
+}
+
+func (fs *fileStorage) log(str string) {
+ if !fs.readOnly {
+ fs.doLog(time.Now(), str)
+ }
+}
+
+func (fs *fileStorage) SetMeta(fd FileDesc) (err error) {
+ if !FileDescOk(fd) {
+ return ErrInvalidFile
+ }
+ if fs.readOnly {
+ return errReadOnly
+ }
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return ErrClosed
+ }
+ defer func() {
+ if err != nil {
+ fs.log(fmt.Sprintf("CURRENT: %v", err))
+ }
+ }()
+ path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num)
+ w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ return
+ }
+ _, err = fmt.Fprintln(w, fsGenName(fd))
+ // Close the file first.
+ if cerr := w.Close(); cerr != nil {
+ fs.log(fmt.Sprintf("close CURRENT.%d: %v", fd.Num, cerr))
+ }
+ if err != nil {
+ return
+ }
+ return rename(path, filepath.Join(fs.path, "CURRENT"))
+}
+
+func (fs *fileStorage) GetMeta() (fd FileDesc, err error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return FileDesc{}, ErrClosed
+ }
+ dir, err := os.Open(fs.path)
+ if err != nil {
+ return
+ }
+ names, err := dir.Readdirnames(0)
+ // Close the dir first before checking for Readdirnames error.
+ if ce := dir.Close(); ce != nil {
+ fs.log(fmt.Sprintf("close dir: %v", ce))
+ }
+ if err != nil {
+ return
+ }
+ // Find latest CURRENT file.
+ var rem []string
+ var pend bool
+ var cerr error
+ for _, name := range names {
+ if strings.HasPrefix(name, "CURRENT") {
+ pend1 := len(name) > 7
+ var pendNum int64
+ // Make sure it is valid name for a CURRENT file, otherwise skip it.
+ if pend1 {
+ if name[7] != '.' || len(name) < 9 {
+ fs.log(fmt.Sprintf("skipping %s: invalid file name", name))
+ continue
+ }
+ var e1 error
+ if pendNum, e1 = strconv.ParseInt(name[8:], 10, 0); e1 != nil {
+ fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", name, e1))
+ continue
+ }
+ }
+ path := filepath.Join(fs.path, name)
+ r, e1 := os.OpenFile(path, os.O_RDONLY, 0)
+ if e1 != nil {
+ return FileDesc{}, e1
+ }
+ b, e1 := ioutil.ReadAll(r)
+ if e1 != nil {
+ r.Close()
+ return FileDesc{}, e1
+ }
+ var fd1 FileDesc
+ if len(b) < 1 || b[len(b)-1] != '\n' || !fsParseNamePtr(string(b[:len(b)-1]), &fd1) {
+ fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", name))
+ if pend1 {
+ rem = append(rem, name)
+ }
+ if !pend1 || cerr == nil {
+ metaFd, _ := fsParseName(name)
+ cerr = &ErrCorrupted{
+ Fd: metaFd,
+ Err: errors.New("leveldb/storage: corrupted or incomplete meta file"),
+ }
+ }
+ } else if pend1 && pendNum != fd1.Num {
+ fs.log(fmt.Sprintf("skipping %s: inconsistent pending-file num: %d vs %d", name, pendNum, fd1.Num))
+ rem = append(rem, name)
+ } else if fd1.Num < fd.Num {
+ fs.log(fmt.Sprintf("skipping %s: obsolete", name))
+ if pend1 {
+ rem = append(rem, name)
+ }
+ } else {
+ fd = fd1
+ pend = pend1
+ }
+ if err := r.Close(); err != nil {
+ fs.log(fmt.Sprintf("close %s: %v", name, err))
+ }
+ }
+ }
+ // Don't remove any files if there is no valid CURRENT file.
+ if fd.Zero() {
+ if cerr != nil {
+ err = cerr
+ } else {
+ err = os.ErrNotExist
+ }
+ return
+ }
+ if !fs.readOnly {
+ // Rename pending CURRENT file to an effective CURRENT.
+ if pend {
+ path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num)
+ if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil {
+ fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", fd.Num, err))
+ }
+ }
+ // Remove obsolete or incomplete pending CURRENT files.
+ for _, name := range rem {
+ path := filepath.Join(fs.path, name)
+ if err := os.Remove(path); err != nil {
+ fs.log(fmt.Sprintf("remove %s: %v", name, err))
+ }
+ }
+ }
+ return
+}
+
+func (fs *fileStorage) List(ft FileType) (fds []FileDesc, err error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return nil, ErrClosed
+ }
+ dir, err := os.Open(fs.path)
+ if err != nil {
+ return
+ }
+ names, err := dir.Readdirnames(0)
+ // Close the dir first before checking for Readdirnames error.
+ if cerr := dir.Close(); cerr != nil {
+ fs.log(fmt.Sprintf("close dir: %v", cerr))
+ }
+ if err == nil {
+ for _, name := range names {
+ if fd, ok := fsParseName(name); ok && fd.Type&ft != 0 {
+ fds = append(fds, fd)
+ }
+ }
+ }
+ return
+}
+
+func (fs *fileStorage) Open(fd FileDesc) (Reader, error) {
+ if !FileDescOk(fd) {
+ return nil, ErrInvalidFile
+ }
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return nil, ErrClosed
+ }
+ of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_RDONLY, 0)
+ if err != nil {
+ if fsHasOldName(fd) && os.IsNotExist(err) {
+ of, err = os.OpenFile(filepath.Join(fs.path, fsGenOldName(fd)), os.O_RDONLY, 0)
+ if err == nil {
+ goto ok
+ }
+ }
+ return nil, err
+ }
+ok:
+ fs.open++
+ return &fileWrap{File: of, fs: fs, fd: fd}, nil
+}
+
+func (fs *fileStorage) Create(fd FileDesc) (Writer, error) {
+ if !FileDescOk(fd) {
+ return nil, ErrInvalidFile
+ }
+ if fs.readOnly {
+ return nil, errReadOnly
+ }
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return nil, ErrClosed
+ }
+ of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ return nil, err
+ }
+ fs.open++
+ return &fileWrap{File: of, fs: fs, fd: fd}, nil
+}
+
+func (fs *fileStorage) Remove(fd FileDesc) error {
+ if !FileDescOk(fd) {
+ return ErrInvalidFile
+ }
+ if fs.readOnly {
+ return errReadOnly
+ }
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return ErrClosed
+ }
+ err := os.Remove(filepath.Join(fs.path, fsGenName(fd)))
+ if err != nil {
+ if fsHasOldName(fd) && os.IsNotExist(err) {
+ if e1 := os.Remove(filepath.Join(fs.path, fsGenOldName(fd))); !os.IsNotExist(e1) {
+ fs.log(fmt.Sprintf("remove %s: %v (old name)", fd, err))
+ err = e1
+ }
+ } else {
+ fs.log(fmt.Sprintf("remove %s: %v", fd, err))
+ }
+ }
+ return err
+}
+
+func (fs *fileStorage) Rename(oldfd, newfd FileDesc) error {
+ if !FileDescOk(oldfd) || !FileDescOk(newfd) {
+ return ErrInvalidFile
+ }
+ if oldfd == newfd {
+ return nil
+ }
+ if fs.readOnly {
+ return errReadOnly
+ }
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return ErrClosed
+ }
+ return rename(filepath.Join(fs.path, fsGenName(oldfd)), filepath.Join(fs.path, fsGenName(newfd)))
+}
+
+func (fs *fileStorage) Close() error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return ErrClosed
+ }
+ // Clear the finalizer.
+ runtime.SetFinalizer(fs, nil)
+
+ if fs.open > 0 {
+ fs.log(fmt.Sprintf("close: warning, %d files still open", fs.open))
+ }
+ fs.open = -1
+ if fs.logw != nil {
+ fs.logw.Close()
+ }
+ return fs.flock.release()
+}
+
+type fileWrap struct {
+ *os.File
+ fs *fileStorage
+ fd FileDesc
+ closed bool
+}
+
+func (fw *fileWrap) Sync() error {
+ if err := fw.File.Sync(); err != nil {
+ return err
+ }
+ if fw.fd.Type == TypeManifest {
+ // Also sync parent directory if file type is manifest.
+ // See: https://code.google.com/p/leveldb/issues/detail?id=190.
+ if err := syncDir(fw.fs.path); err != nil {
+ fw.fs.log(fmt.Sprintf("syncDir: %v", err))
+ return err
+ }
+ }
+ return nil
+}
+
+func (fw *fileWrap) Close() error {
+ fw.fs.mu.Lock()
+ defer fw.fs.mu.Unlock()
+ if fw.closed {
+ return ErrClosed
+ }
+ fw.closed = true
+ fw.fs.open--
+ err := fw.File.Close()
+ if err != nil {
+ fw.fs.log(fmt.Sprintf("close %s: %v", fw.fd, err))
+ }
+ return err
+}
+
+func fsGenName(fd FileDesc) string {
+ switch fd.Type {
+ case TypeManifest:
+ return fmt.Sprintf("MANIFEST-%06d", fd.Num)
+ case TypeJournal:
+ return fmt.Sprintf("%06d.log", fd.Num)
+ case TypeTable:
+ return fmt.Sprintf("%06d.ldb", fd.Num)
+ case TypeTemp:
+ return fmt.Sprintf("%06d.tmp", fd.Num)
+ default:
+ panic("invalid file type")
+ }
+}
+
+func fsHasOldName(fd FileDesc) bool {
+ return fd.Type == TypeTable
+}
+
+func fsGenOldName(fd FileDesc) string {
+ switch fd.Type {
+ case TypeTable:
+ return fmt.Sprintf("%06d.sst", fd.Num)
+ }
+ return fsGenName(fd)
+}
+
+func fsParseName(name string) (fd FileDesc, ok bool) {
+ var tail string
+ _, err := fmt.Sscanf(name, "%d.%s", &fd.Num, &tail)
+ if err == nil {
+ switch tail {
+ case "log":
+ fd.Type = TypeJournal
+ case "ldb", "sst":
+ fd.Type = TypeTable
+ case "tmp":
+ fd.Type = TypeTemp
+ default:
+ return
+ }
+ return fd, true
+ }
+ n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &fd.Num, &tail)
+ if n == 1 {
+ fd.Type = TypeManifest
+ return fd, true
+ }
+ return
+}
+
+func fsParseNamePtr(name string, fd *FileDesc) bool {
+ _fd, ok := fsParseName(name)
+ if fd != nil {
+ *fd = _fd
+ }
+ return ok
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go
new file mode 100644
index 000000000..5545aeef2
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go
@@ -0,0 +1,34 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build nacl
+
+package storage
+
+import (
+ "os"
+ "syscall"
+)
+
+func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
+ return nil, syscall.ENOTSUP
+}
+
+func setFileLock(f *os.File, readOnly, lock bool) error {
+ return syscall.ENOTSUP
+}
+
+func rename(oldpath, newpath string) error {
+ return syscall.ENOTSUP
+}
+
+func isErrInvalid(err error) bool {
+ return false
+}
+
+func syncDir(name string) error {
+ return syscall.ENOTSUP
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
new file mode 100644
index 000000000..bab62bfce
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
@@ -0,0 +1,65 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+ "os"
+ "path/filepath"
+)
+
+type plan9FileLock struct {
+ f *os.File
+}
+
+func (fl *plan9FileLock) release() error {
+ return fl.f.Close()
+}
+
+func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
+ var (
+ flag int
+ perm os.FileMode
+ )
+ if readOnly {
+ flag = os.O_RDONLY
+ } else {
+ flag = os.O_RDWR
+ perm = os.ModeExclusive
+ }
+ f, err := os.OpenFile(path, flag, perm)
+ if os.IsNotExist(err) {
+ f, err = os.OpenFile(path, flag|os.O_CREATE, perm|0644)
+ }
+ if err != nil {
+ return
+ }
+ fl = &plan9FileLock{f: f}
+ return
+}
+
+func rename(oldpath, newpath string) error {
+ if _, err := os.Stat(newpath); err == nil {
+ if err := os.Remove(newpath); err != nil {
+ return err
+ }
+ }
+
+ _, fname := filepath.Split(newpath)
+ return os.Rename(oldpath, fname)
+}
+
+func syncDir(name string) error {
+ f, err := os.Open(name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if err := f.Sync(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go
new file mode 100644
index 000000000..79901ee4a
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go
@@ -0,0 +1,81 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build solaris
+
+package storage
+
+import (
+ "os"
+ "syscall"
+)
+
+type unixFileLock struct {
+ f *os.File
+}
+
+func (fl *unixFileLock) release() error {
+ if err := setFileLock(fl.f, false, false); err != nil {
+ return err
+ }
+ return fl.f.Close()
+}
+
+func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
+ var flag int
+ if readOnly {
+ flag = os.O_RDONLY
+ } else {
+ flag = os.O_RDWR
+ }
+ f, err := os.OpenFile(path, flag, 0)
+ if os.IsNotExist(err) {
+ f, err = os.OpenFile(path, flag|os.O_CREATE, 0644)
+ }
+ if err != nil {
+ return
+ }
+ err = setFileLock(f, readOnly, true)
+ if err != nil {
+ f.Close()
+ return
+ }
+ fl = &unixFileLock{f: f}
+ return
+}
+
+func setFileLock(f *os.File, readOnly, lock bool) error {
+ flock := syscall.Flock_t{
+ Type: syscall.F_UNLCK,
+ Start: 0,
+ Len: 0,
+ Whence: 1,
+ }
+ if lock {
+ if readOnly {
+ flock.Type = syscall.F_RDLCK
+ } else {
+ flock.Type = syscall.F_WRLCK
+ }
+ }
+ return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock)
+}
+
+func rename(oldpath, newpath string) error {
+ return os.Rename(oldpath, newpath)
+}
+
+func syncDir(name string) error {
+ f, err := os.Open(name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if err := f.Sync(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
new file mode 100644
index 000000000..7e2991537
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
@@ -0,0 +1,86 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package storage
+
+import (
+ "os"
+ "syscall"
+)
+
+type unixFileLock struct {
+ f *os.File
+}
+
+func (fl *unixFileLock) release() error {
+ if err := setFileLock(fl.f, false, false); err != nil {
+ return err
+ }
+ return fl.f.Close()
+}
+
+func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
+ var flag int
+ if readOnly {
+ flag = os.O_RDONLY
+ } else {
+ flag = os.O_RDWR
+ }
+ f, err := os.OpenFile(path, flag, 0)
+ if os.IsNotExist(err) {
+ f, err = os.OpenFile(path, flag|os.O_CREATE, 0644)
+ }
+ if err != nil {
+ return
+ }
+ err = setFileLock(f, readOnly, true)
+ if err != nil {
+ f.Close()
+ return
+ }
+ fl = &unixFileLock{f: f}
+ return
+}
+
+func setFileLock(f *os.File, readOnly, lock bool) error {
+ how := syscall.LOCK_UN
+ if lock {
+ if readOnly {
+ how = syscall.LOCK_SH
+ } else {
+ how = syscall.LOCK_EX
+ }
+ }
+ return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB)
+}
+
+func rename(oldpath, newpath string) error {
+ return os.Rename(oldpath, newpath)
+}
+
+func isErrInvalid(err error) bool {
+ if err == os.ErrInvalid {
+ return true
+ }
+ if syserr, ok := err.(*os.SyscallError); ok && syserr.Err == syscall.EINVAL {
+ return true
+ }
+ return false
+}
+
+func syncDir(name string) error {
+ f, err := os.Open(name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if err := f.Sync(); err != nil && !isErrInvalid(err) {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go
new file mode 100644
index 000000000..899335fd7
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go
@@ -0,0 +1,78 @@
+// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var (
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+ procMoveFileExW = modkernel32.NewProc("MoveFileExW")
+)
+
+const (
+ _MOVEFILE_REPLACE_EXISTING = 1
+)
+
+type windowsFileLock struct {
+ fd syscall.Handle
+}
+
+func (fl *windowsFileLock) release() error {
+ return syscall.Close(fl.fd)
+}
+
+func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
+ pathp, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return
+ }
+ var access, shareMode uint32
+ if readOnly {
+ access = syscall.GENERIC_READ
+ shareMode = syscall.FILE_SHARE_READ
+ } else {
+ access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
+ }
+ fd, err := syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_EXISTING, syscall.FILE_ATTRIBUTE_NORMAL, 0)
+ if err == syscall.ERROR_FILE_NOT_FOUND {
+ fd, err = syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0)
+ }
+ if err != nil {
+ return
+ }
+ fl = &windowsFileLock{fd: fd}
+ return
+}
+
+func moveFileEx(from *uint16, to *uint16, flags uint32) error {
+ r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
+ if r1 == 0 {
+ if e1 != 0 {
+ return error(e1)
+ }
+ return syscall.EINVAL
+ }
+ return nil
+}
+
+func rename(oldpath, newpath string) error {
+ from, err := syscall.UTF16PtrFromString(oldpath)
+ if err != nil {
+ return err
+ }
+ to, err := syscall.UTF16PtrFromString(newpath)
+ if err != nil {
+ return err
+ }
+ return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING)
+}
+
+func syncDir(name string) error { return nil }
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
new file mode 100644
index 000000000..9b0421f03
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
@@ -0,0 +1,218 @@
+// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+ "bytes"
+ "os"
+ "sync"
+)
+
+const typeShift = 3
+
+type memStorageLock struct {
+ ms *memStorage
+}
+
+func (lock *memStorageLock) Unlock() {
+ ms := lock.ms
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ if ms.slock == lock {
+ ms.slock = nil
+ }
+ return
+}
+
+// memStorage is a memory-backed storage.
+type memStorage struct {
+ mu sync.Mutex
+ slock *memStorageLock
+ files map[uint64]*memFile
+ meta FileDesc
+}
+
+// NewMemStorage returns a new memory-backed storage implementation.
+func NewMemStorage() Storage {
+ return &memStorage{
+ files: make(map[uint64]*memFile),
+ }
+}
+
+func (ms *memStorage) Lock() (Locker, error) {
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ if ms.slock != nil {
+ return nil, ErrLocked
+ }
+ ms.slock = &memStorageLock{ms: ms}
+ return ms.slock, nil
+}
+
+func (*memStorage) Log(str string) {}
+
+func (ms *memStorage) SetMeta(fd FileDesc) error {
+ if !FileDescOk(fd) {
+ return ErrInvalidFile
+ }
+
+ ms.mu.Lock()
+ ms.meta = fd
+ ms.mu.Unlock()
+ return nil
+}
+
+func (ms *memStorage) GetMeta() (FileDesc, error) {
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ if ms.meta.Zero() {
+ return FileDesc{}, os.ErrNotExist
+ }
+ return ms.meta, nil
+}
+
+func (ms *memStorage) List(ft FileType) ([]FileDesc, error) {
+ ms.mu.Lock()
+ var fds []FileDesc
+ for x := range ms.files {
+ fd := unpackFile(x)
+ if fd.Type&ft != 0 {
+ fds = append(fds, fd)
+ }
+ }
+ ms.mu.Unlock()
+ return fds, nil
+}
+
+func (ms *memStorage) Open(fd FileDesc) (Reader, error) {
+ if !FileDescOk(fd) {
+ return nil, ErrInvalidFile
+ }
+
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ if m, exist := ms.files[packFile(fd)]; exist {
+ if m.open {
+ return nil, errFileOpen
+ }
+ m.open = true
+ return &memReader{Reader: bytes.NewReader(m.Bytes()), ms: ms, m: m}, nil
+ }
+ return nil, os.ErrNotExist
+}
+
+func (ms *memStorage) Create(fd FileDesc) (Writer, error) {
+ if !FileDescOk(fd) {
+ return nil, ErrInvalidFile
+ }
+
+ x := packFile(fd)
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ m, exist := ms.files[x]
+ if exist {
+ if m.open {
+ return nil, errFileOpen
+ }
+ m.Reset()
+ } else {
+ m = &memFile{}
+ ms.files[x] = m
+ }
+ m.open = true
+ return &memWriter{memFile: m, ms: ms}, nil
+}
+
+func (ms *memStorage) Remove(fd FileDesc) error {
+ if !FileDescOk(fd) {
+ return ErrInvalidFile
+ }
+
+ x := packFile(fd)
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ if _, exist := ms.files[x]; exist {
+ delete(ms.files, x)
+ return nil
+ }
+ return os.ErrNotExist
+}
+
+func (ms *memStorage) Rename(oldfd, newfd FileDesc) error {
+ if FileDescOk(oldfd) || FileDescOk(newfd) {
+ return ErrInvalidFile
+ }
+ if oldfd == newfd {
+ return nil
+ }
+
+ oldx := packFile(oldfd)
+ newx := packFile(newfd)
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ oldm, exist := ms.files[oldx]
+ if !exist {
+ return os.ErrNotExist
+ }
+ newm, exist := ms.files[newx]
+ if (exist && newm.open) || oldm.open {
+ return errFileOpen
+ }
+ delete(ms.files, oldx)
+ ms.files[newx] = oldm
+ return nil
+}
+
+func (*memStorage) Close() error { return nil }
+
+type memFile struct {
+ bytes.Buffer
+ open bool
+}
+
+type memReader struct {
+ *bytes.Reader
+ ms *memStorage
+ m *memFile
+ closed bool
+}
+
+func (mr *memReader) Close() error {
+ mr.ms.mu.Lock()
+ defer mr.ms.mu.Unlock()
+ if mr.closed {
+ return ErrClosed
+ }
+ mr.m.open = false
+ return nil
+}
+
+type memWriter struct {
+ *memFile
+ ms *memStorage
+ closed bool
+}
+
+func (*memWriter) Sync() error { return nil }
+
+func (mw *memWriter) Close() error {
+ mw.ms.mu.Lock()
+ defer mw.ms.mu.Unlock()
+ if mw.closed {
+ return ErrClosed
+ }
+ mw.memFile.open = false
+ return nil
+}
+
+func packFile(fd FileDesc) uint64 {
+ return uint64(fd.Num)<<typeShift | uint64(fd.Type)
+}
+
+func unpackFile(x uint64) FileDesc {
+ return FileDesc{FileType(x) & TypeAll, int64(x >> typeShift)}
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go
new file mode 100644
index 000000000..c16bce6b6
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go
@@ -0,0 +1,179 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package storage provides storage abstraction for LevelDB.
+package storage
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+// FileType represent a file type.
+type FileType int
+
+// File types.
+const (
+ TypeManifest FileType = 1 << iota
+ TypeJournal
+ TypeTable
+ TypeTemp
+
+ TypeAll = TypeManifest | TypeJournal | TypeTable | TypeTemp
+)
+
+func (t FileType) String() string {
+ switch t {
+ case TypeManifest:
+ return "manifest"
+ case TypeJournal:
+ return "journal"
+ case TypeTable:
+ return "table"
+ case TypeTemp:
+ return "temp"
+ }
+ return fmt.Sprintf("<unknown:%d>", t)
+}
+
+// Common error.
+var (
+ ErrInvalidFile = errors.New("leveldb/storage: invalid file for argument")
+ ErrLocked = errors.New("leveldb/storage: already locked")
+ ErrClosed = errors.New("leveldb/storage: closed")
+)
+
+// ErrCorrupted is the type that wraps errors that indicate corruption of
+// a file. Package storage has its own type instead of using
+// errors.ErrCorrupted to prevent circular import.
+type ErrCorrupted struct {
+ Fd FileDesc
+ Err error
+}
+
+func (e *ErrCorrupted) Error() string {
+ if !e.Fd.Zero() {
+ return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd)
+ }
+ return e.Err.Error()
+}
+
+// Syncer is the interface that wraps basic Sync method.
+type Syncer interface {
+ // Sync commits the current contents of the file to stable storage.
+ Sync() error
+}
+
+// Reader is the interface that groups the basic Read, Seek, ReadAt and Close
+// methods.
+type Reader interface {
+ io.ReadSeeker
+ io.ReaderAt
+ io.Closer
+}
+
+// Writer is the interface that groups the basic Write, Sync and Close
+// methods.
+type Writer interface {
+ io.WriteCloser
+ Syncer
+}
+
+// Locker is the interface that wraps Unlock method.
+type Locker interface {
+ Unlock()
+}
+
+// FileDesc is a 'file descriptor'.
+type FileDesc struct {
+ Type FileType
+ Num int64
+}
+
+func (fd FileDesc) String() string {
+ switch fd.Type {
+ case TypeManifest:
+ return fmt.Sprintf("MANIFEST-%06d", fd.Num)
+ case TypeJournal:
+ return fmt.Sprintf("%06d.log", fd.Num)
+ case TypeTable:
+ return fmt.Sprintf("%06d.ldb", fd.Num)
+ case TypeTemp:
+ return fmt.Sprintf("%06d.tmp", fd.Num)
+ default:
+ return fmt.Sprintf("%#x-%d", fd.Type, fd.Num)
+ }
+}
+
+// Zero returns true if fd == (FileDesc{}).
+func (fd FileDesc) Zero() bool {
+ return fd == (FileDesc{})
+}
+
+// FileDescOk returns true if fd is a valid 'file descriptor'.
+func FileDescOk(fd FileDesc) bool {
+ switch fd.Type {
+ case TypeManifest:
+ case TypeJournal:
+ case TypeTable:
+ case TypeTemp:
+ default:
+ return false
+ }
+ return fd.Num >= 0
+}
+
+// Storage is the storage. A storage instance must be safe for concurrent use.
+type Storage interface {
+ // Lock locks the storage. Any subsequent attempt to call Lock will fail
+ // until the last lock released.
+ // Caller should call Unlock method after use.
+ Lock() (Locker, error)
+
+ // Log logs a string. This is used for logging.
+ // An implementation may write to a file, stdout or simply do nothing.
+ Log(str string)
+
+ // SetMeta store 'file descriptor' that can later be acquired using GetMeta
+ // method. The 'file descriptor' should point to a valid file.
+ // SetMeta should be implemented in such way that changes should happen
+ // atomically.
+ SetMeta(fd FileDesc) error
+
+ // GetMeta returns 'file descriptor' stored in meta. The 'file descriptor'
+ // can be updated using SetMeta method.
+ // Returns os.ErrNotExist if meta doesn't store any 'file descriptor', or
+ // 'file descriptor' point to nonexistent file.
+ GetMeta() (FileDesc, error)
+
+ // List returns file descriptors that match the given file types.
+ // The file types may be OR'ed together.
+ List(ft FileType) ([]FileDesc, error)
+
+ // Open opens file with the given 'file descriptor' read-only.
+ // Returns os.ErrNotExist error if the file does not exist.
+ // Returns ErrClosed if the underlying storage is closed.
+ Open(fd FileDesc) (Reader, error)
+
+ // Create creates file with the given 'file descriptor', truncate if already
+ // exist and opens write-only.
+ // Returns ErrClosed if the underlying storage is closed.
+ Create(fd FileDesc) (Writer, error)
+
+ // Remove removes file with the given 'file descriptor'.
+ // Returns ErrClosed if the underlying storage is closed.
+ Remove(fd FileDesc) error
+
+ // Rename renames file from oldfd to newfd.
+ // Returns ErrClosed if the underlying storage is closed.
+ Rename(oldfd, newfd FileDesc) error
+
+ // Close closes the storage.
+ // It is valid to call Close multiple times. Other methods should not be
+ // called after the storage has been closed.
+ Close() error
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table.go
new file mode 100644
index 000000000..81d18a531
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/table.go
@@ -0,0 +1,529 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "fmt"
+ "sort"
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/cache"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/table"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// tFile holds basic information about a table.
+type tFile struct {
+ fd storage.FileDesc
+ seekLeft int32
+ size int64
+ imin, imax internalKey
+}
+
+// Returns true if given key is after largest key of this table.
+func (t *tFile) after(icmp *iComparer, ukey []byte) bool {
+ return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0
+}
+
+// Returns true if given key is before smallest key of this table.
+func (t *tFile) before(icmp *iComparer, ukey []byte) bool {
+ return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0
+}
+
+// Returns true if given key range overlaps with this table key range.
+func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool {
+ return !t.after(icmp, umin) && !t.before(icmp, umax)
+}
+
+// Cosumes one seek and return current seeks left.
+func (t *tFile) consumeSeek() int32 {
+ return atomic.AddInt32(&t.seekLeft, -1)
+}
+
+// Creates new tFile.
+func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile {
+ f := &tFile{
+ fd: fd,
+ size: size,
+ imin: imin,
+ imax: imax,
+ }
+
+ // We arrange to automatically compact this file after
+ // a certain number of seeks. Let's assume:
+ // (1) One seek costs 10ms
+ // (2) Writing or reading 1MB costs 10ms (100MB/s)
+ // (3) A compaction of 1MB does 25MB of IO:
+ // 1MB read from this level
+ // 10-12MB read from next level (boundaries may be misaligned)
+ // 10-12MB written to next level
+ // This implies that 25 seeks cost the same as the compaction
+ // of 1MB of data. I.e., one seek costs approximately the
+ // same as the compaction of 40KB of data. We are a little
+ // conservative and allow approximately one seek for every 16KB
+ // of data before triggering a compaction.
+ f.seekLeft = int32(size / 16384)
+ if f.seekLeft < 100 {
+ f.seekLeft = 100
+ }
+
+ return f
+}
+
+func tableFileFromRecord(r atRecord) *tFile {
+ return newTableFile(storage.FileDesc{storage.TypeTable, r.num}, r.size, r.imin, r.imax)
+}
+
+// tFiles hold multiple tFile.
+type tFiles []*tFile
+
+func (tf tFiles) Len() int { return len(tf) }
+func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] }
+
+func (tf tFiles) nums() string {
+ x := "[ "
+ for i, f := range tf {
+ if i != 0 {
+ x += ", "
+ }
+ x += fmt.Sprint(f.fd.Num)
+ }
+ x += " ]"
+ return x
+}
+
+// Returns true if i smallest key is less than j.
+// This used for sort by key in ascending order.
+func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool {
+ a, b := tf[i], tf[j]
+ n := icmp.Compare(a.imin, b.imin)
+ if n == 0 {
+ return a.fd.Num < b.fd.Num
+ }
+ return n < 0
+}
+
+// Returns true if i file number is greater than j.
+// This used for sort by file number in descending order.
+func (tf tFiles) lessByNum(i, j int) bool {
+ return tf[i].fd.Num > tf[j].fd.Num
+}
+
+// Sorts tables by key in ascending order.
+func (tf tFiles) sortByKey(icmp *iComparer) {
+ sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp})
+}
+
+// Sorts tables by file number in descending order.
+func (tf tFiles) sortByNum() {
+ sort.Sort(&tFilesSortByNum{tFiles: tf})
+}
+
+// Returns sum of all tables size.
+func (tf tFiles) size() (sum int64) {
+ for _, t := range tf {
+ sum += t.size
+ }
+ return sum
+}
+
+// Searches smallest index of tables whose its smallest
+// key is after or equal with given key.
+func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int {
+ return sort.Search(len(tf), func(i int) bool {
+ return icmp.Compare(tf[i].imin, ikey) >= 0
+ })
+}
+
+// Searches smallest index of tables whose its largest
+// key is after or equal with given key.
+func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int {
+ return sort.Search(len(tf), func(i int) bool {
+ return icmp.Compare(tf[i].imax, ikey) >= 0
+ })
+}
+
+// Returns true if given key range overlaps with one or more
+// tables key range. If unsorted is true then binary search will not be used.
+func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool {
+ if unsorted {
+ // Check against all files.
+ for _, t := range tf {
+ if t.overlaps(icmp, umin, umax) {
+ return true
+ }
+ }
+ return false
+ }
+
+ i := 0
+ if len(umin) > 0 {
+ // Find the earliest possible internal key for min.
+ i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek))
+ }
+ if i >= len(tf) {
+ // Beginning of range is after all files, so no overlap.
+ return false
+ }
+ return !tf[i].before(icmp, umax)
+}
+
+// Returns tables whose its key range overlaps with given key range.
+// Range will be expanded if ukey found hop across tables.
+// If overlapped is true then the search will be restarted if umax
+// expanded.
+// The dst content will be overwritten.
+func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles {
+ dst = dst[:0]
+ for i := 0; i < len(tf); {
+ t := tf[i]
+ if t.overlaps(icmp, umin, umax) {
+ if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 {
+ umin = t.imin.ukey()
+ dst = dst[:0]
+ i = 0
+ continue
+ } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 {
+ umax = t.imax.ukey()
+ // Restart search if it is overlapped.
+ if overlapped {
+ dst = dst[:0]
+ i = 0
+ continue
+ }
+ }
+
+ dst = append(dst, t)
+ }
+ i++
+ }
+
+ return dst
+}
+
+// Returns tables key range.
+func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) {
+ for i, t := range tf {
+ if i == 0 {
+ imin, imax = t.imin, t.imax
+ continue
+ }
+ if icmp.Compare(t.imin, imin) < 0 {
+ imin = t.imin
+ }
+ if icmp.Compare(t.imax, imax) > 0 {
+ imax = t.imax
+ }
+ }
+
+ return
+}
+
+// Creates iterator index from tables.
+func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer {
+ if slice != nil {
+ var start, limit int
+ if slice.Start != nil {
+ start = tf.searchMax(icmp, internalKey(slice.Start))
+ }
+ if slice.Limit != nil {
+ limit = tf.searchMin(icmp, internalKey(slice.Limit))
+ } else {
+ limit = tf.Len()
+ }
+ tf = tf[start:limit]
+ }
+ return iterator.NewArrayIndexer(&tFilesArrayIndexer{
+ tFiles: tf,
+ tops: tops,
+ icmp: icmp,
+ slice: slice,
+ ro: ro,
+ })
+}
+
+// Tables iterator index.
+type tFilesArrayIndexer struct {
+ tFiles
+ tops *tOps
+ icmp *iComparer
+ slice *util.Range
+ ro *opt.ReadOptions
+}
+
+func (a *tFilesArrayIndexer) Search(key []byte) int {
+ return a.searchMax(a.icmp, internalKey(key))
+}
+
+func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator {
+ if i == 0 || i == a.Len()-1 {
+ return a.tops.newIterator(a.tFiles[i], a.slice, a.ro)
+ }
+ return a.tops.newIterator(a.tFiles[i], nil, a.ro)
+}
+
+// Helper type for sortByKey.
+type tFilesSortByKey struct {
+ tFiles
+ icmp *iComparer
+}
+
+func (x *tFilesSortByKey) Less(i, j int) bool {
+ return x.lessByKey(x.icmp, i, j)
+}
+
+// Helper type for sortByNum.
+type tFilesSortByNum struct {
+ tFiles
+}
+
+func (x *tFilesSortByNum) Less(i, j int) bool {
+ return x.lessByNum(i, j)
+}
+
+// Table operations.
+type tOps struct {
+ s *session
+ noSync bool
+ cache *cache.Cache
+ bcache *cache.Cache
+ bpool *util.BufferPool
+}
+
+// Creates an empty table and returns table writer.
+func (t *tOps) create() (*tWriter, error) {
+ fd := storage.FileDesc{storage.TypeTable, t.s.allocFileNum()}
+ fw, err := t.s.stor.Create(fd)
+ if err != nil {
+ return nil, err
+ }
+ return &tWriter{
+ t: t,
+ fd: fd,
+ w: fw,
+ tw: table.NewWriter(fw, t.s.o.Options),
+ }, nil
+}
+
+// Builds table from src iterator.
+func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
+ w, err := t.create()
+ if err != nil {
+ return
+ }
+
+ defer func() {
+ if err != nil {
+ w.drop()
+ }
+ }()
+
+ for src.Next() {
+ err = w.append(src.Key(), src.Value())
+ if err != nil {
+ return
+ }
+ }
+ err = src.Error()
+ if err != nil {
+ return
+ }
+
+ n = w.tw.EntriesLen()
+ f, err = w.finish()
+ return
+}
+
+// Opens table. It returns a cache handle, which should
+// be released after use.
+func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) {
+ ch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) {
+ var r storage.Reader
+ r, err = t.s.stor.Open(f.fd)
+ if err != nil {
+ return 0, nil
+ }
+
+ var bcache *cache.NamespaceGetter
+ if t.bcache != nil {
+ bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)}
+ }
+
+ var tr *table.Reader
+ tr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options)
+ if err != nil {
+ r.Close()
+ return 0, nil
+ }
+ return 1, tr
+
+ })
+ if ch == nil && err == nil {
+ err = ErrClosed
+ }
+ return
+}
+
+// Finds key/value pair whose key is greater than or equal to the
+// given key.
+func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) {
+ ch, err := t.open(f)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer ch.Release()
+ return ch.Value().(*table.Reader).Find(key, true, ro)
+}
+
+// Finds key that is greater than or equal to the given key.
+func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) {
+ ch, err := t.open(f)
+ if err != nil {
+ return nil, err
+ }
+ defer ch.Release()
+ return ch.Value().(*table.Reader).FindKey(key, true, ro)
+}
+
+// Returns approximate offset of the given key.
+func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) {
+ ch, err := t.open(f)
+ if err != nil {
+ return
+ }
+ defer ch.Release()
+ return ch.Value().(*table.Reader).OffsetOf(key)
+}
+
+// Creates an iterator from the given table.
+func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ ch, err := t.open(f)
+ if err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+ iter := ch.Value().(*table.Reader).NewIterator(slice, ro)
+ iter.SetReleaser(ch)
+ return iter
+}
+
+// Removes table from persistent storage. It waits until
+// no one use the the table.
+func (t *tOps) remove(f *tFile) {
+ t.cache.Delete(0, uint64(f.fd.Num), func() {
+ if err := t.s.stor.Remove(f.fd); err != nil {
+ t.s.logf("table@remove removing @%d %q", f.fd.Num, err)
+ } else {
+ t.s.logf("table@remove removed @%d", f.fd.Num)
+ }
+ if t.bcache != nil {
+ t.bcache.EvictNS(uint64(f.fd.Num))
+ }
+ })
+}
+
+// Closes the table ops instance. It will close all tables,
+// regadless still used or not.
+func (t *tOps) close() {
+ t.bpool.Close()
+ t.cache.Close()
+ if t.bcache != nil {
+ t.bcache.CloseWeak()
+ }
+}
+
+// Creates new initialized table ops instance.
+func newTableOps(s *session) *tOps {
+ var (
+ cacher cache.Cacher
+ bcache *cache.Cache
+ bpool *util.BufferPool
+ )
+ if s.o.GetOpenFilesCacheCapacity() > 0 {
+ cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity())
+ }
+ if !s.o.GetDisableBlockCache() {
+ var bcacher cache.Cacher
+ if s.o.GetBlockCacheCapacity() > 0 {
+ bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity())
+ }
+ bcache = cache.NewCache(bcacher)
+ }
+ if !s.o.GetDisableBufferPool() {
+ bpool = util.NewBufferPool(s.o.GetBlockSize() + 5)
+ }
+ return &tOps{
+ s: s,
+ noSync: s.o.GetNoSync(),
+ cache: cache.NewCache(cacher),
+ bcache: bcache,
+ bpool: bpool,
+ }
+}
+
+// tWriter wraps the table writer. It keep track of file descriptor
+// and added key range.
+type tWriter struct {
+ t *tOps
+
+ fd storage.FileDesc
+ w storage.Writer
+ tw *table.Writer
+
+ first, last []byte
+}
+
+// Append key/value pair to the table.
+func (w *tWriter) append(key, value []byte) error {
+ if w.first == nil {
+ w.first = append([]byte{}, key...)
+ }
+ w.last = append(w.last[:0], key...)
+ return w.tw.Append(key, value)
+}
+
+// Returns true if the table is empty.
+func (w *tWriter) empty() bool {
+ return w.first == nil
+}
+
+// Closes the storage.Writer.
+func (w *tWriter) close() {
+ if w.w != nil {
+ w.w.Close()
+ w.w = nil
+ }
+}
+
+// Finalizes the table and returns table file.
+func (w *tWriter) finish() (f *tFile, err error) {
+ defer w.close()
+ err = w.tw.Close()
+ if err != nil {
+ return
+ }
+ if !w.t.noSync {
+ err = w.w.Sync()
+ if err != nil {
+ return
+ }
+ }
+ f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last))
+ return
+}
+
+// Drops the table.
+func (w *tWriter) drop() {
+ w.close()
+ w.t.s.stor.Remove(w.fd)
+ w.t.s.reuseFileNum(w.fd.Num)
+ w.tw = nil
+ w.first = nil
+ w.last = nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go
new file mode 100644
index 000000000..c5be420b3
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go
@@ -0,0 +1,1134 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package table
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/golang/snappy"
+
+ "github.com/syndtr/goleveldb/leveldb/cache"
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/filter"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Reader errors.
+var (
+ ErrNotFound = errors.ErrNotFound
+ ErrReaderReleased = errors.New("leveldb/table: reader released")
+ ErrIterReleased = errors.New("leveldb/table: iterator released")
+)
+
+// ErrCorrupted describes error due to corruption. This error will be wrapped
+// with errors.ErrCorrupted.
+type ErrCorrupted struct {
+ Pos int64
+ Size int64
+ Kind string
+ Reason string
+}
+
+func (e *ErrCorrupted) Error() string {
+ return fmt.Sprintf("leveldb/table: corruption on %s (pos=%d): %s", e.Kind, e.Pos, e.Reason)
+}
+
+func max(x, y int) int {
+ if x > y {
+ return x
+ }
+ return y
+}
+
+type block struct {
+ bpool *util.BufferPool
+ bh blockHandle
+ data []byte
+ restartsLen int
+ restartsOffset int
+}
+
+func (b *block) seek(cmp comparer.Comparer, rstart, rlimit int, key []byte) (index, offset int, err error) {
+ index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
+ offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):]))
+ offset++ // shared always zero, since this is a restart point
+ v1, n1 := binary.Uvarint(b.data[offset:]) // key length
+ _, n2 := binary.Uvarint(b.data[offset+n1:]) // value length
+ m := offset + n1 + n2
+ return cmp.Compare(b.data[m:m+int(v1)], key) > 0
+ }) + rstart - 1
+ if index < rstart {
+ // The smallest key is greater-than key sought.
+ index = rstart
+ }
+ offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:]))
+ return
+}
+
+func (b *block) restartIndex(rstart, rlimit, offset int) int {
+ return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
+ return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset
+ }) + rstart - 1
+}
+
+func (b *block) restartOffset(index int) int {
+ return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:]))
+}
+
+func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) {
+ if offset >= b.restartsOffset {
+ if offset != b.restartsOffset {
+ err = &ErrCorrupted{Reason: "entries offset not aligned"}
+ }
+ return
+ }
+ v0, n0 := binary.Uvarint(b.data[offset:]) // Shared prefix length
+ v1, n1 := binary.Uvarint(b.data[offset+n0:]) // Key length
+ v2, n2 := binary.Uvarint(b.data[offset+n0+n1:]) // Value length
+ m := n0 + n1 + n2
+ n = m + int(v1) + int(v2)
+ if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset {
+ err = &ErrCorrupted{Reason: "entries corrupted"}
+ return
+ }
+ key = b.data[offset+m : offset+m+int(v1)]
+ value = b.data[offset+m+int(v1) : offset+n]
+ nShared = int(v0)
+ return
+}
+
+func (b *block) Release() {
+ b.bpool.Put(b.data)
+ b.bpool = nil
+ b.data = nil
+}
+
+type dir int
+
+const (
+ dirReleased dir = iota - 1
+ dirSOI
+ dirEOI
+ dirBackward
+ dirForward
+)
+
+type blockIter struct {
+ tr *Reader
+ block *block
+ blockReleaser util.Releaser
+ releaser util.Releaser
+ key, value []byte
+ offset int
+ // Previous offset, only filled by Next.
+ prevOffset int
+ prevNode []int
+ prevKeys []byte
+ restartIndex int
+ // Iterator direction.
+ dir dir
+ // Restart index slice range.
+ riStart int
+ riLimit int
+ // Offset slice range.
+ offsetStart int
+ offsetRealStart int
+ offsetLimit int
+ // Error.
+ err error
+}
+
+func (i *blockIter) sErr(err error) {
+ i.err = err
+ i.key = nil
+ i.value = nil
+ i.prevNode = nil
+ i.prevKeys = nil
+}
+
+func (i *blockIter) reset() {
+ if i.dir == dirBackward {
+ i.prevNode = i.prevNode[:0]
+ i.prevKeys = i.prevKeys[:0]
+ }
+ i.restartIndex = i.riStart
+ i.offset = i.offsetStart
+ i.dir = dirSOI
+ i.key = i.key[:0]
+ i.value = nil
+}
+
+func (i *blockIter) isFirst() bool {
+ switch i.dir {
+ case dirForward:
+ return i.prevOffset == i.offsetRealStart
+ case dirBackward:
+ return len(i.prevNode) == 1 && i.restartIndex == i.riStart
+ }
+ return false
+}
+
+func (i *blockIter) isLast() bool {
+ switch i.dir {
+ case dirForward, dirBackward:
+ return i.offset == i.offsetLimit
+ }
+ return false
+}
+
+func (i *blockIter) First() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.dir == dirBackward {
+ i.prevNode = i.prevNode[:0]
+ i.prevKeys = i.prevKeys[:0]
+ }
+ i.dir = dirSOI
+ return i.Next()
+}
+
+func (i *blockIter) Last() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.dir == dirBackward {
+ i.prevNode = i.prevNode[:0]
+ i.prevKeys = i.prevKeys[:0]
+ }
+ i.dir = dirEOI
+ return i.Prev()
+}
+
+func (i *blockIter) Seek(key []byte) bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ ri, offset, err := i.block.seek(i.tr.cmp, i.riStart, i.riLimit, key)
+ if err != nil {
+ i.sErr(err)
+ return false
+ }
+ i.restartIndex = ri
+ i.offset = max(i.offsetStart, offset)
+ if i.dir == dirSOI || i.dir == dirEOI {
+ i.dir = dirForward
+ }
+ for i.Next() {
+ if i.tr.cmp.Compare(i.key, key) >= 0 {
+ return true
+ }
+ }
+ return false
+}
+
+func (i *blockIter) Next() bool {
+ if i.dir == dirEOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.dir == dirSOI {
+ i.restartIndex = i.riStart
+ i.offset = i.offsetStart
+ } else if i.dir == dirBackward {
+ i.prevNode = i.prevNode[:0]
+ i.prevKeys = i.prevKeys[:0]
+ }
+ for i.offset < i.offsetRealStart {
+ key, value, nShared, n, err := i.block.entry(i.offset)
+ if err != nil {
+ i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
+ return false
+ }
+ if n == 0 {
+ i.dir = dirEOI
+ return false
+ }
+ i.key = append(i.key[:nShared], key...)
+ i.value = value
+ i.offset += n
+ }
+ if i.offset >= i.offsetLimit {
+ i.dir = dirEOI
+ if i.offset != i.offsetLimit {
+ i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned"))
+ }
+ return false
+ }
+ key, value, nShared, n, err := i.block.entry(i.offset)
+ if err != nil {
+ i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
+ return false
+ }
+ if n == 0 {
+ i.dir = dirEOI
+ return false
+ }
+ i.key = append(i.key[:nShared], key...)
+ i.value = value
+ i.prevOffset = i.offset
+ i.offset += n
+ i.dir = dirForward
+ return true
+}
+
+func (i *blockIter) Prev() bool {
+ if i.dir == dirSOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ var ri int
+ if i.dir == dirForward {
+ // Change direction.
+ i.offset = i.prevOffset
+ if i.offset == i.offsetRealStart {
+ i.dir = dirSOI
+ return false
+ }
+ ri = i.block.restartIndex(i.restartIndex, i.riLimit, i.offset)
+ i.dir = dirBackward
+ } else if i.dir == dirEOI {
+ // At the end of iterator.
+ i.restartIndex = i.riLimit
+ i.offset = i.offsetLimit
+ if i.offset == i.offsetRealStart {
+ i.dir = dirSOI
+ return false
+ }
+ ri = i.riLimit - 1
+ i.dir = dirBackward
+ } else if len(i.prevNode) == 1 {
+ // This is the end of a restart range.
+ i.offset = i.prevNode[0]
+ i.prevNode = i.prevNode[:0]
+ if i.restartIndex == i.riStart {
+ i.dir = dirSOI
+ return false
+ }
+ i.restartIndex--
+ ri = i.restartIndex
+ } else {
+ // In the middle of restart range, get from cache.
+ n := len(i.prevNode) - 3
+ node := i.prevNode[n:]
+ i.prevNode = i.prevNode[:n]
+ // Get the key.
+ ko := node[0]
+ i.key = append(i.key[:0], i.prevKeys[ko:]...)
+ i.prevKeys = i.prevKeys[:ko]
+ // Get the value.
+ vo := node[1]
+ vl := vo + node[2]
+ i.value = i.block.data[vo:vl]
+ i.offset = vl
+ return true
+ }
+ // Build entries cache.
+ i.key = i.key[:0]
+ i.value = nil
+ offset := i.block.restartOffset(ri)
+ if offset == i.offset {
+ ri--
+ if ri < 0 {
+ i.dir = dirSOI
+ return false
+ }
+ offset = i.block.restartOffset(ri)
+ }
+ i.prevNode = append(i.prevNode, offset)
+ for {
+ key, value, nShared, n, err := i.block.entry(offset)
+ if err != nil {
+ i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
+ return false
+ }
+ if offset >= i.offsetRealStart {
+ if i.value != nil {
+ // Appends 3 variables:
+ // 1. Previous keys offset
+ // 2. Value offset in the data block
+ // 3. Value length
+ i.prevNode = append(i.prevNode, len(i.prevKeys), offset-len(i.value), len(i.value))
+ i.prevKeys = append(i.prevKeys, i.key...)
+ }
+ i.value = value
+ }
+ i.key = append(i.key[:nShared], key...)
+ offset += n
+ // Stop if target offset reached.
+ if offset >= i.offset {
+ if offset != i.offset {
+ i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned"))
+ return false
+ }
+
+ break
+ }
+ }
+ i.restartIndex = ri
+ i.offset = offset
+ return true
+}
+
+func (i *blockIter) Key() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.key
+}
+
+func (i *blockIter) Value() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.value
+}
+
+func (i *blockIter) Release() {
+ if i.dir != dirReleased {
+ i.tr = nil
+ i.block = nil
+ i.prevNode = nil
+ i.prevKeys = nil
+ i.key = nil
+ i.value = nil
+ i.dir = dirReleased
+ if i.blockReleaser != nil {
+ i.blockReleaser.Release()
+ i.blockReleaser = nil
+ }
+ if i.releaser != nil {
+ i.releaser.Release()
+ i.releaser = nil
+ }
+ }
+}
+
+func (i *blockIter) SetReleaser(releaser util.Releaser) {
+ if i.dir == dirReleased {
+ panic(util.ErrReleased)
+ }
+ if i.releaser != nil && releaser != nil {
+ panic(util.ErrHasReleaser)
+ }
+ i.releaser = releaser
+}
+
+func (i *blockIter) Valid() bool {
+ return i.err == nil && (i.dir == dirBackward || i.dir == dirForward)
+}
+
+func (i *blockIter) Error() error {
+ return i.err
+}
+
+type filterBlock struct {
+ bpool *util.BufferPool
+ data []byte
+ oOffset int
+ baseLg uint
+ filtersNum int
+}
+
+func (b *filterBlock) contains(filter filter.Filter, offset uint64, key []byte) bool {
+ i := int(offset >> b.baseLg)
+ if i < b.filtersNum {
+ o := b.data[b.oOffset+i*4:]
+ n := int(binary.LittleEndian.Uint32(o))
+ m := int(binary.LittleEndian.Uint32(o[4:]))
+ if n < m && m <= b.oOffset {
+ return filter.Contains(b.data[n:m], key)
+ } else if n == m {
+ return false
+ }
+ }
+ return true
+}
+
+func (b *filterBlock) Release() {
+ b.bpool.Put(b.data)
+ b.bpool = nil
+ b.data = nil
+}
+
+type indexIter struct {
+ *blockIter
+ tr *Reader
+ slice *util.Range
+ // Options
+ fillCache bool
+}
+
+func (i *indexIter) Get() iterator.Iterator {
+ value := i.Value()
+ if value == nil {
+ return nil
+ }
+ dataBH, n := decodeBlockHandle(value)
+ if n == 0 {
+ return iterator.NewEmptyIterator(i.tr.newErrCorruptedBH(i.tr.indexBH, "bad data block handle"))
+ }
+
+ var slice *util.Range
+ if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) {
+ slice = i.slice
+ }
+ return i.tr.getDataIterErr(dataBH, slice, i.tr.verifyChecksum, i.fillCache)
+}
+
+// Reader is a table reader.
+type Reader struct {
+ mu sync.RWMutex
+ fd storage.FileDesc
+ reader io.ReaderAt
+ cache *cache.NamespaceGetter
+ err error
+ bpool *util.BufferPool
+ // Options
+ o *opt.Options
+ cmp comparer.Comparer
+ filter filter.Filter
+ verifyChecksum bool
+
+ dataEnd int64
+ metaBH, indexBH, filterBH blockHandle
+ indexBlock *block
+ filterBlock *filterBlock
+}
+
+func (r *Reader) blockKind(bh blockHandle) string {
+ switch bh.offset {
+ case r.metaBH.offset:
+ return "meta-block"
+ case r.indexBH.offset:
+ return "index-block"
+ case r.filterBH.offset:
+ if r.filterBH.length > 0 {
+ return "filter-block"
+ }
+ }
+ return "data-block"
+}
+
+func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error {
+ return &errors.ErrCorrupted{Fd: r.fd, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}}
+}
+
+func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error {
+ return r.newErrCorrupted(int64(bh.offset), int64(bh.length), r.blockKind(bh), reason)
+}
+
+func (r *Reader) fixErrCorruptedBH(bh blockHandle, err error) error {
+ if cerr, ok := err.(*ErrCorrupted); ok {
+ cerr.Pos = int64(bh.offset)
+ cerr.Size = int64(bh.length)
+ cerr.Kind = r.blockKind(bh)
+ return &errors.ErrCorrupted{Fd: r.fd, Err: cerr}
+ }
+ return err
+}
+
+func (r *Reader) readRawBlock(bh blockHandle, verifyChecksum bool) ([]byte, error) {
+ data := r.bpool.Get(int(bh.length + blockTrailerLen))
+ if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF {
+ return nil, err
+ }
+
+ if verifyChecksum {
+ n := bh.length + 1
+ checksum0 := binary.LittleEndian.Uint32(data[n:])
+ checksum1 := util.NewCRC(data[:n]).Value()
+ if checksum0 != checksum1 {
+ r.bpool.Put(data)
+ return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("checksum mismatch, want=%#x got=%#x", checksum0, checksum1))
+ }
+ }
+
+ switch data[bh.length] {
+ case blockTypeNoCompression:
+ data = data[:bh.length]
+ case blockTypeSnappyCompression:
+ decLen, err := snappy.DecodedLen(data[:bh.length])
+ if err != nil {
+ return nil, r.newErrCorruptedBH(bh, err.Error())
+ }
+ decData := r.bpool.Get(decLen)
+ decData, err = snappy.Decode(decData, data[:bh.length])
+ r.bpool.Put(data)
+ if err != nil {
+ r.bpool.Put(decData)
+ return nil, r.newErrCorruptedBH(bh, err.Error())
+ }
+ data = decData
+ default:
+ r.bpool.Put(data)
+ return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("unknown compression type %#x", data[bh.length]))
+ }
+ return data, nil
+}
+
+func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error) {
+ data, err := r.readRawBlock(bh, verifyChecksum)
+ if err != nil {
+ return nil, err
+ }
+ restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
+ b := &block{
+ bpool: r.bpool,
+ bh: bh,
+ data: data,
+ restartsLen: restartsLen,
+ restartsOffset: len(data) - (restartsLen+1)*4,
+ }
+ return b, nil
+}
+
+func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) {
+ if r.cache != nil {
+ var (
+ err error
+ ch *cache.Handle
+ )
+ if fillCache {
+ ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
+ var b *block
+ b, err = r.readBlock(bh, verifyChecksum)
+ if err != nil {
+ return 0, nil
+ }
+ return cap(b.data), b
+ })
+ } else {
+ ch = r.cache.Get(bh.offset, nil)
+ }
+ if ch != nil {
+ b, ok := ch.Value().(*block)
+ if !ok {
+ ch.Release()
+ return nil, nil, errors.New("leveldb/table: inconsistent block type")
+ }
+ return b, ch, err
+ } else if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ b, err := r.readBlock(bh, verifyChecksum)
+ return b, b, err
+}
+
+func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) {
+ data, err := r.readRawBlock(bh, true)
+ if err != nil {
+ return nil, err
+ }
+ n := len(data)
+ if n < 5 {
+ return nil, r.newErrCorruptedBH(bh, "too short")
+ }
+ m := n - 5
+ oOffset := int(binary.LittleEndian.Uint32(data[m:]))
+ if oOffset > m {
+ return nil, r.newErrCorruptedBH(bh, "invalid data-offsets offset")
+ }
+ b := &filterBlock{
+ bpool: r.bpool,
+ data: data,
+ oOffset: oOffset,
+ baseLg: uint(data[n-1]),
+ filtersNum: (m - oOffset) / 4,
+ }
+ return b, nil
+}
+
+func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) {
+ if r.cache != nil {
+ var (
+ err error
+ ch *cache.Handle
+ )
+ if fillCache {
+ ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
+ var b *filterBlock
+ b, err = r.readFilterBlock(bh)
+ if err != nil {
+ return 0, nil
+ }
+ return cap(b.data), b
+ })
+ } else {
+ ch = r.cache.Get(bh.offset, nil)
+ }
+ if ch != nil {
+ b, ok := ch.Value().(*filterBlock)
+ if !ok {
+ ch.Release()
+ return nil, nil, errors.New("leveldb/table: inconsistent block type")
+ }
+ return b, ch, err
+ } else if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ b, err := r.readFilterBlock(bh)
+ return b, b, err
+}
+
+func (r *Reader) getIndexBlock(fillCache bool) (b *block, rel util.Releaser, err error) {
+ if r.indexBlock == nil {
+ return r.readBlockCached(r.indexBH, true, fillCache)
+ }
+ return r.indexBlock, util.NoopReleaser{}, nil
+}
+
+func (r *Reader) getFilterBlock(fillCache bool) (*filterBlock, util.Releaser, error) {
+ if r.filterBlock == nil {
+ return r.readFilterBlockCached(r.filterBH, fillCache)
+ }
+ return r.filterBlock, util.NoopReleaser{}, nil
+}
+
+func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter {
+ bi := &blockIter{
+ tr: r,
+ block: b,
+ blockReleaser: bReleaser,
+ // Valid key should never be nil.
+ key: make([]byte, 0),
+ dir: dirSOI,
+ riStart: 0,
+ riLimit: b.restartsLen,
+ offsetStart: 0,
+ offsetRealStart: 0,
+ offsetLimit: b.restartsOffset,
+ }
+ if slice != nil {
+ if slice.Start != nil {
+ if bi.Seek(slice.Start) {
+ bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset)
+ bi.offsetStart = b.restartOffset(bi.riStart)
+ bi.offsetRealStart = bi.prevOffset
+ } else {
+ bi.riStart = b.restartsLen
+ bi.offsetStart = b.restartsOffset
+ bi.offsetRealStart = b.restartsOffset
+ }
+ }
+ if slice.Limit != nil {
+ if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) {
+ bi.offsetLimit = bi.prevOffset
+ bi.riLimit = bi.restartIndex + 1
+ }
+ }
+ bi.reset()
+ if bi.offsetStart > bi.offsetLimit {
+ bi.sErr(errors.New("leveldb/table: invalid slice range"))
+ }
+ }
+ return bi
+}
+
+func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator {
+ b, rel, err := r.readBlockCached(dataBH, verifyChecksum, fillCache)
+ if err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+ return r.newBlockIter(b, rel, slice, false)
+}
+
+func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ return iterator.NewEmptyIterator(r.err)
+ }
+
+ return r.getDataIter(dataBH, slice, verifyChecksum, fillCache)
+}
+
+// NewIterator creates an iterator from the table.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// table. And a nil Range.Limit is treated as a key after all keys in
+// the table.
+//
+// The returned iterator is not safe for concurrent use and should be released
+// after use.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ return iterator.NewEmptyIterator(r.err)
+ }
+
+ fillCache := !ro.GetDontFillCache()
+ indexBlock, rel, err := r.getIndexBlock(fillCache)
+ if err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+ index := &indexIter{
+ blockIter: r.newBlockIter(indexBlock, rel, slice, true),
+ tr: r,
+ slice: slice,
+ fillCache: !ro.GetDontFillCache(),
+ }
+ return iterator.NewIndexedIterator(index, opt.GetStrict(r.o, ro, opt.StrictReader))
+}
+
+func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bool) (rkey, value []byte, err error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ err = r.err
+ return
+ }
+
+ indexBlock, rel, err := r.getIndexBlock(true)
+ if err != nil {
+ return
+ }
+ defer rel.Release()
+
+ index := r.newBlockIter(indexBlock, nil, nil, true)
+ defer index.Release()
+
+ if !index.Seek(key) {
+ if err = index.Error(); err == nil {
+ err = ErrNotFound
+ }
+ return
+ }
+
+ dataBH, n := decodeBlockHandle(index.Value())
+ if n == 0 {
+ r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle")
+ return nil, nil, r.err
+ }
+
+ // The filter should only used for exact match.
+ if filtered && r.filter != nil {
+ filterBlock, frel, ferr := r.getFilterBlock(true)
+ if ferr == nil {
+ if !filterBlock.contains(r.filter, dataBH.offset, key) {
+ frel.Release()
+ return nil, nil, ErrNotFound
+ }
+ frel.Release()
+ } else if !errors.IsCorrupted(ferr) {
+ return nil, nil, ferr
+ }
+ }
+
+ data := r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache())
+ if !data.Seek(key) {
+ data.Release()
+ if err = data.Error(); err != nil {
+ return
+ }
+
+ // The nearest greater-than key is the first key of the next block.
+ if !index.Next() {
+ if err = index.Error(); err == nil {
+ err = ErrNotFound
+ }
+ return
+ }
+
+ dataBH, n = decodeBlockHandle(index.Value())
+ if n == 0 {
+ r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle")
+ return nil, nil, r.err
+ }
+
+ data = r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache())
+ if !data.Next() {
+ data.Release()
+ if err = data.Error(); err == nil {
+ err = ErrNotFound
+ }
+ return
+ }
+ }
+
+ // Key doesn't use block buffer, no need to copy the buffer.
+ rkey = data.Key()
+ if !noValue {
+ if r.bpool == nil {
+ value = data.Value()
+ } else {
+ // Value does use block buffer, and since the buffer will be
+ // recycled, it need to be copied.
+ value = append([]byte{}, data.Value()...)
+ }
+ }
+ data.Release()
+ return
+}
+
+// Find finds key/value pair whose key is greater than or equal to the
+// given key. It returns ErrNotFound if the table doesn't contain
+// such pair.
+// If filtered is true then the nearest 'block' will be checked against
+// 'filter data' (if present) and will immediately return ErrNotFound if
+// 'filter data' indicates that such pair doesn't exist.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) Find(key []byte, filtered bool, ro *opt.ReadOptions) (rkey, value []byte, err error) {
+ return r.find(key, filtered, ro, false)
+}
+
+// FindKey finds key that is greater than or equal to the given key.
+// It returns ErrNotFound if the table doesn't contain such key.
+// If filtered is true then the nearest 'block' will be checked against
+// 'filter data' (if present) and will immediately return ErrNotFound if
+// 'filter data' indicates that such key doesn't exist.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) FindKey(key []byte, filtered bool, ro *opt.ReadOptions) (rkey []byte, err error) {
+ rkey, _, err = r.find(key, filtered, ro, true)
+ return
+}
+
+// Get gets the value for the given key. It returns errors.ErrNotFound
+// if the table does not contain the key.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ err = r.err
+ return
+ }
+
+ rkey, value, err := r.find(key, false, ro, false)
+ if err == nil && r.cmp.Compare(rkey, key) != 0 {
+ value = nil
+ err = ErrNotFound
+ }
+ return
+}
+
+// OffsetOf returns approximate offset for the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ err = r.err
+ return
+ }
+
+ indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true)
+ if err != nil {
+ return
+ }
+ defer rel.Release()
+
+ index := r.newBlockIter(indexBlock, nil, nil, true)
+ defer index.Release()
+ if index.Seek(key) {
+ dataBH, n := decodeBlockHandle(index.Value())
+ if n == 0 {
+ r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle")
+ return
+ }
+ offset = int64(dataBH.offset)
+ return
+ }
+ err = index.Error()
+ if err == nil {
+ offset = r.dataEnd
+ }
+ return
+}
+
+// Release implements util.Releaser.
+// It also close the file if it is an io.Closer.
+func (r *Reader) Release() {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if closer, ok := r.reader.(io.Closer); ok {
+ closer.Close()
+ }
+ if r.indexBlock != nil {
+ r.indexBlock.Release()
+ r.indexBlock = nil
+ }
+ if r.filterBlock != nil {
+ r.filterBlock.Release()
+ r.filterBlock = nil
+ }
+ r.reader = nil
+ r.cache = nil
+ r.bpool = nil
+ r.err = ErrReaderReleased
+}
+
+// NewReader creates a new initialized table reader for the file.
+// The fi, cache and bpool is optional and can be nil.
+//
+// The returned table reader instance is safe for concurrent use.
+func NewReader(f io.ReaderAt, size int64, fd storage.FileDesc, cache *cache.NamespaceGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
+ if f == nil {
+ return nil, errors.New("leveldb/table: nil file")
+ }
+
+ r := &Reader{
+ fd: fd,
+ reader: f,
+ cache: cache,
+ bpool: bpool,
+ o: o,
+ cmp: o.GetComparer(),
+ verifyChecksum: o.GetStrict(opt.StrictBlockChecksum),
+ }
+
+ if size < footerLen {
+ r.err = r.newErrCorrupted(0, size, "table", "too small")
+ return r, nil
+ }
+
+ footerPos := size - footerLen
+ var footer [footerLen]byte
+ if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF {
+ return nil, err
+ }
+ if string(footer[footerLen-len(magic):footerLen]) != magic {
+ r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number")
+ return r, nil
+ }
+
+ var n int
+ // Decode the metaindex block handle.
+ r.metaBH, n = decodeBlockHandle(footer[:])
+ if n == 0 {
+ r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle")
+ return r, nil
+ }
+
+ // Decode the index block handle.
+ r.indexBH, n = decodeBlockHandle(footer[n:])
+ if n == 0 {
+ r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle")
+ return r, nil
+ }
+
+ // Read metaindex block.
+ metaBlock, err := r.readBlock(r.metaBH, true)
+ if err != nil {
+ if errors.IsCorrupted(err) {
+ r.err = err
+ return r, nil
+ }
+ return nil, err
+ }
+
+ // Set data end.
+ r.dataEnd = int64(r.metaBH.offset)
+
+ // Read metaindex.
+ metaIter := r.newBlockIter(metaBlock, nil, nil, true)
+ for metaIter.Next() {
+ key := string(metaIter.Key())
+ if !strings.HasPrefix(key, "filter.") {
+ continue
+ }
+ fn := key[7:]
+ if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn {
+ r.filter = f0
+ } else {
+ for _, f0 := range o.GetAltFilters() {
+ if f0.Name() == fn {
+ r.filter = f0
+ break
+ }
+ }
+ }
+ if r.filter != nil {
+ filterBH, n := decodeBlockHandle(metaIter.Value())
+ if n == 0 {
+ continue
+ }
+ r.filterBH = filterBH
+ // Update data end.
+ r.dataEnd = int64(filterBH.offset)
+ break
+ }
+ }
+ metaIter.Release()
+ metaBlock.Release()
+
+ // Cache index and filter block locally, since we don't have global cache.
+ if cache == nil {
+ r.indexBlock, err = r.readBlock(r.indexBH, true)
+ if err != nil {
+ if errors.IsCorrupted(err) {
+ r.err = err
+ return r, nil
+ }
+ return nil, err
+ }
+ if r.filter != nil {
+ r.filterBlock, err = r.readFilterBlock(r.filterBH)
+ if err != nil {
+ if !errors.IsCorrupted(err) {
+ return nil, err
+ }
+
+ // Don't use filter then.
+ r.filter = nil
+ }
+ }
+ }
+
+ return r, nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go
new file mode 100644
index 000000000..beacdc1f0
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go
@@ -0,0 +1,177 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package table allows read and write sorted key/value.
+package table
+
+import (
+ "encoding/binary"
+)
+
+/*
+Table:
+
+Table is consist of one or more data blocks, an optional filter block
+a metaindex block, an index block and a table footer. Metaindex block
+is a special block used to keep parameters of the table, such as filter
+block name and its block handle. Index block is a special block used to
+keep record of data blocks offset and length, index block use one as
+restart interval. The key used by index block are the last key of preceding
+block, shorter separator of adjacent blocks or shorter successor of the
+last key of the last block. Filter block is an optional block contains
+sequence of filter data generated by a filter generator.
+
+Table data structure:
+ + optional
+ /
+ +--------------+--------------+--------------+------+-------+-----------------+-------------+--------+
+ | data block 1 | ... | data block n | filter block | metaindex block | index block | footer |
+ +--------------+--------------+--------------+--------------+-----------------+-------------+--------+
+
+ Each block followed by a 5-bytes trailer contains compression type and checksum.
+
+Table block trailer:
+
+ +---------------------------+-------------------+
+ | compression type (1-byte) | checksum (4-byte) |
+ +---------------------------+-------------------+
+
+ The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression
+ type also included in the checksum.
+
+Table footer:
+
+ +------------------- 40-bytes -------------------+
+ / \
+ +------------------------+--------------------+------+-----------------+
+ | metaindex block handle / index block handle / ---- | magic (8-bytes) |
+ +------------------------+--------------------+------+-----------------+
+
+ The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/".
+
+NOTE: All fixed-length integer are little-endian.
+*/
+
+/*
+Block:
+
+Block is consist of one or more key/value entries and a block trailer.
+Block entry shares key prefix with its preceding key until a restart
+point reached. A block should contains at least one restart point.
+First restart point are always zero.
+
+Block data structure:
+
+ + restart point + restart point (depends on restart interval)
+ / /
+ +---------------+---------------+---------------+---------------+---------+
+ | block entry 1 | block entry 2 | ... | block entry n | trailer |
+ +---------------+---------------+---------------+---------------+---------+
+
+Key/value entry:
+
+ +---- key len ----+
+ / \
+ +-------+---------+-----------+---------+--------------------+--------------+----------------+
+ | shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) |
+ +-----------------+---------------------+--------------------+--------------+----------------+
+
+ Block entry shares key prefix with its preceding key:
+ Conditions:
+ restart_interval=2
+ entry one : key=deck,value=v1
+ entry two : key=dock,value=v2
+ entry three: key=duck,value=v3
+ The entries will be encoded as follow:
+
+ + restart point (offset=0) + restart point (offset=16)
+ / /
+ +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+
+ | 0 | 4 | 2 | "deck" | "v1" | 1 | 3 | 2 | "ock" | "v2" | 0 | 4 | 2 | "duck" | "v3" |
+ +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+
+ \ / \ / \ /
+ +----------- entry one -----------+ +----------- entry two ----------+ +---------- entry three ----------+
+
+ The block trailer will contains two restart points:
+
+ +------------+-----------+--------+
+ | 0 | 16 | 2 |
+ +------------+-----------+---+----+
+ \ / \
+ +-- restart points --+ + restart points length
+
+Block trailer:
+
+ +-- 4-bytes --+
+ / \
+ +-----------------+-----------------+-----------------+------------------------------+
+ | restart point 1 | .... | restart point n | restart points len (4-bytes) |
+ +-----------------+-----------------+-----------------+------------------------------+
+
+
+NOTE: All fixed-length integer are little-endian.
+*/
+
+/*
+Filter block:
+
+Filter block consist of one or more filter data and a filter block trailer.
+The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg.
+
+Filter block data structure:
+
+ + offset 1 + offset 2 + offset n + trailer offset
+ / / / /
+ +---------------+---------------+---------------+---------+
+ | filter data 1 | ... | filter data n | trailer |
+ +---------------+---------------+---------------+---------+
+
+Filter block trailer:
+
+ +- 4-bytes -+
+ / \
+ +---------------+---------------+---------------+-------------------------------+------------------+
+ | data 1 offset | .... | data n offset | data-offsets offset (4-bytes) | base Lg (1-byte) |
+ +-------------- +---------------+---------------+-------------------------------+------------------+
+
+
+NOTE: All fixed-length integer are little-endian.
+*/
+
+const (
+ blockTrailerLen = 5
+ footerLen = 48
+
+ magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb"
+
+ // The block type gives the per-block compression format.
+ // These constants are part of the file format and should not be changed.
+ blockTypeNoCompression = 0
+ blockTypeSnappyCompression = 1
+
+ // Generate new filter every 2KB of data
+ filterBaseLg = 11
+ filterBase = 1 << filterBaseLg
+)
+
+type blockHandle struct {
+ offset, length uint64
+}
+
+func decodeBlockHandle(src []byte) (blockHandle, int) {
+ offset, n := binary.Uvarint(src)
+ length, m := binary.Uvarint(src[n:])
+ if n == 0 || m == 0 {
+ return blockHandle{}, 0
+ }
+ return blockHandle{offset, length}, n + m
+}
+
+func encodeBlockHandle(dst []byte, b blockHandle) int {
+ n := binary.PutUvarint(dst, b.offset)
+ m := binary.PutUvarint(dst[n:], b.length)
+ return n + m
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go
new file mode 100644
index 000000000..b96b271d8
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go
@@ -0,0 +1,375 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package table
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/golang/snappy"
+
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/filter"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+func sharedPrefixLen(a, b []byte) int {
+ i, n := 0, len(a)
+ if n > len(b) {
+ n = len(b)
+ }
+ for i < n && a[i] == b[i] {
+ i++
+ }
+ return i
+}
+
+type blockWriter struct {
+ restartInterval int
+ buf util.Buffer
+ nEntries int
+ prevKey []byte
+ restarts []uint32
+ scratch []byte
+}
+
+func (w *blockWriter) append(key, value []byte) {
+ nShared := 0
+ if w.nEntries%w.restartInterval == 0 {
+ w.restarts = append(w.restarts, uint32(w.buf.Len()))
+ } else {
+ nShared = sharedPrefixLen(w.prevKey, key)
+ }
+ n := binary.PutUvarint(w.scratch[0:], uint64(nShared))
+ n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared))
+ n += binary.PutUvarint(w.scratch[n:], uint64(len(value)))
+ w.buf.Write(w.scratch[:n])
+ w.buf.Write(key[nShared:])
+ w.buf.Write(value)
+ w.prevKey = append(w.prevKey[:0], key...)
+ w.nEntries++
+}
+
+func (w *blockWriter) finish() {
+ // Write restarts entry.
+ if w.nEntries == 0 {
+ // Must have at least one restart entry.
+ w.restarts = append(w.restarts, 0)
+ }
+ w.restarts = append(w.restarts, uint32(len(w.restarts)))
+ for _, x := range w.restarts {
+ buf4 := w.buf.Alloc(4)
+ binary.LittleEndian.PutUint32(buf4, x)
+ }
+}
+
+func (w *blockWriter) reset() {
+ w.buf.Reset()
+ w.nEntries = 0
+ w.restarts = w.restarts[:0]
+}
+
+func (w *blockWriter) bytesLen() int {
+ restartsLen := len(w.restarts)
+ if restartsLen == 0 {
+ restartsLen = 1
+ }
+ return w.buf.Len() + 4*restartsLen + 4
+}
+
+type filterWriter struct {
+ generator filter.FilterGenerator
+ buf util.Buffer
+ nKeys int
+ offsets []uint32
+}
+
+func (w *filterWriter) add(key []byte) {
+ if w.generator == nil {
+ return
+ }
+ w.generator.Add(key)
+ w.nKeys++
+}
+
+func (w *filterWriter) flush(offset uint64) {
+ if w.generator == nil {
+ return
+ }
+ for x := int(offset / filterBase); x > len(w.offsets); {
+ w.generate()
+ }
+}
+
+func (w *filterWriter) finish() {
+ if w.generator == nil {
+ return
+ }
+ // Generate last keys.
+
+ if w.nKeys > 0 {
+ w.generate()
+ }
+ w.offsets = append(w.offsets, uint32(w.buf.Len()))
+ for _, x := range w.offsets {
+ buf4 := w.buf.Alloc(4)
+ binary.LittleEndian.PutUint32(buf4, x)
+ }
+ w.buf.WriteByte(filterBaseLg)
+}
+
+func (w *filterWriter) generate() {
+ // Record offset.
+ w.offsets = append(w.offsets, uint32(w.buf.Len()))
+ // Generate filters.
+ if w.nKeys > 0 {
+ w.generator.Generate(&w.buf)
+ w.nKeys = 0
+ }
+}
+
+// Writer is a table writer.
+type Writer struct {
+ writer io.Writer
+ err error
+ // Options
+ cmp comparer.Comparer
+ filter filter.Filter
+ compression opt.Compression
+ blockSize int
+
+ dataBlock blockWriter
+ indexBlock blockWriter
+ filterBlock filterWriter
+ pendingBH blockHandle
+ offset uint64
+ nEntries int
+ // Scratch allocated enough for 5 uvarint. Block writer should not use
+ // first 20-bytes since it will be used to encode block handle, which
+ // then passed to the block writer itself.
+ scratch [50]byte
+ comparerScratch []byte
+ compressionScratch []byte
+}
+
+func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) {
+ // Compress the buffer if necessary.
+ var b []byte
+ if compression == opt.SnappyCompression {
+ // Allocate scratch enough for compression and block trailer.
+ if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n {
+ w.compressionScratch = make([]byte, n)
+ }
+ compressed := snappy.Encode(w.compressionScratch, buf.Bytes())
+ n := len(compressed)
+ b = compressed[:n+blockTrailerLen]
+ b[n] = blockTypeSnappyCompression
+ } else {
+ tmp := buf.Alloc(blockTrailerLen)
+ tmp[0] = blockTypeNoCompression
+ b = buf.Bytes()
+ }
+
+ // Calculate the checksum.
+ n := len(b) - 4
+ checksum := util.NewCRC(b[:n]).Value()
+ binary.LittleEndian.PutUint32(b[n:], checksum)
+
+ // Write the buffer to the file.
+ _, err = w.writer.Write(b)
+ if err != nil {
+ return
+ }
+ bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)}
+ w.offset += uint64(len(b))
+ return
+}
+
+func (w *Writer) flushPendingBH(key []byte) {
+ if w.pendingBH.length == 0 {
+ return
+ }
+ var separator []byte
+ if len(key) == 0 {
+ separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey)
+ } else {
+ separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key)
+ }
+ if separator == nil {
+ separator = w.dataBlock.prevKey
+ } else {
+ w.comparerScratch = separator
+ }
+ n := encodeBlockHandle(w.scratch[:20], w.pendingBH)
+ // Append the block handle to the index block.
+ w.indexBlock.append(separator, w.scratch[:n])
+ // Reset prev key of the data block.
+ w.dataBlock.prevKey = w.dataBlock.prevKey[:0]
+ // Clear pending block handle.
+ w.pendingBH = blockHandle{}
+}
+
+func (w *Writer) finishBlock() error {
+ w.dataBlock.finish()
+ bh, err := w.writeBlock(&w.dataBlock.buf, w.compression)
+ if err != nil {
+ return err
+ }
+ w.pendingBH = bh
+ // Reset the data block.
+ w.dataBlock.reset()
+ // Flush the filter block.
+ w.filterBlock.flush(w.offset)
+ return nil
+}
+
+// Append appends key/value pair to the table. The keys passed must
+// be in increasing order.
+//
+// It is safe to modify the contents of the arguments after Append returns.
+func (w *Writer) Append(key, value []byte) error {
+ if w.err != nil {
+ return w.err
+ }
+ if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 {
+ w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key)
+ return w.err
+ }
+
+ w.flushPendingBH(key)
+ // Append key/value pair to the data block.
+ w.dataBlock.append(key, value)
+ // Add key to the filter block.
+ w.filterBlock.add(key)
+
+ // Finish the data block if block size target reached.
+ if w.dataBlock.bytesLen() >= w.blockSize {
+ if err := w.finishBlock(); err != nil {
+ w.err = err
+ return w.err
+ }
+ }
+ w.nEntries++
+ return nil
+}
+
+// BlocksLen returns number of blocks written so far.
+func (w *Writer) BlocksLen() int {
+ n := w.indexBlock.nEntries
+ if w.pendingBH.length > 0 {
+ // Includes the pending block.
+ n++
+ }
+ return n
+}
+
+// EntriesLen returns number of entries added so far.
+func (w *Writer) EntriesLen() int {
+ return w.nEntries
+}
+
+// BytesLen returns number of bytes written so far.
+func (w *Writer) BytesLen() int {
+ return int(w.offset)
+}
+
+// Close will finalize the table. Calling Append is not possible
+// after Close, but calling BlocksLen, EntriesLen and BytesLen
+// is still possible.
+func (w *Writer) Close() error {
+ if w.err != nil {
+ return w.err
+ }
+
+ // Write the last data block. Or empty data block if there
+ // aren't any data blocks at all.
+ if w.dataBlock.nEntries > 0 || w.nEntries == 0 {
+ if err := w.finishBlock(); err != nil {
+ w.err = err
+ return w.err
+ }
+ }
+ w.flushPendingBH(nil)
+
+ // Write the filter block.
+ var filterBH blockHandle
+ w.filterBlock.finish()
+ if buf := &w.filterBlock.buf; buf.Len() > 0 {
+ filterBH, w.err = w.writeBlock(buf, opt.NoCompression)
+ if w.err != nil {
+ return w.err
+ }
+ }
+
+ // Write the metaindex block.
+ if filterBH.length > 0 {
+ key := []byte("filter." + w.filter.Name())
+ n := encodeBlockHandle(w.scratch[:20], filterBH)
+ w.dataBlock.append(key, w.scratch[:n])
+ }
+ w.dataBlock.finish()
+ metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression)
+ if err != nil {
+ w.err = err
+ return w.err
+ }
+
+ // Write the index block.
+ w.indexBlock.finish()
+ indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression)
+ if err != nil {
+ w.err = err
+ return w.err
+ }
+
+ // Write the table footer.
+ footer := w.scratch[:footerLen]
+ for i := range footer {
+ footer[i] = 0
+ }
+ n := encodeBlockHandle(footer, metaindexBH)
+ encodeBlockHandle(footer[n:], indexBH)
+ copy(footer[footerLen-len(magic):], magic)
+ if _, err := w.writer.Write(footer); err != nil {
+ w.err = err
+ return w.err
+ }
+ w.offset += footerLen
+
+ w.err = errors.New("leveldb/table: writer is closed")
+ return nil
+}
+
+// NewWriter creates a new initialized table writer for the file.
+//
+// Table writer is not safe for concurrent use.
+func NewWriter(f io.Writer, o *opt.Options) *Writer {
+ w := &Writer{
+ writer: f,
+ cmp: o.GetComparer(),
+ filter: o.GetFilter(),
+ compression: o.GetCompression(),
+ blockSize: o.GetBlockSize(),
+ comparerScratch: make([]byte, 0),
+ }
+ // data block
+ w.dataBlock.restartInterval = o.GetBlockRestartInterval()
+ // The first 20-bytes are used for encoding block handle.
+ w.dataBlock.scratch = w.scratch[20:]
+ // index block
+ w.indexBlock.restartInterval = 1
+ w.indexBlock.scratch = w.scratch[20:]
+ // filter block
+ if w.filter != nil {
+ w.filterBlock.generator = w.filter.NewGenerator()
+ w.filterBlock.flush(0)
+ }
+ return w
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util.go
new file mode 100644
index 000000000..e572a329e
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util.go
@@ -0,0 +1,98 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+func shorten(str string) string {
+ if len(str) <= 8 {
+ return str
+ }
+ return str[:3] + ".." + str[len(str)-3:]
+}
+
+var bunits = [...]string{"", "Ki", "Mi", "Gi"}
+
+func shortenb(bytes int) string {
+ i := 0
+ for ; bytes > 1024 && i < 4; i++ {
+ bytes /= 1024
+ }
+ return fmt.Sprintf("%d%sB", bytes, bunits[i])
+}
+
+func sshortenb(bytes int) string {
+ if bytes == 0 {
+ return "~"
+ }
+ sign := "+"
+ if bytes < 0 {
+ sign = "-"
+ bytes *= -1
+ }
+ i := 0
+ for ; bytes > 1024 && i < 4; i++ {
+ bytes /= 1024
+ }
+ return fmt.Sprintf("%s%d%sB", sign, bytes, bunits[i])
+}
+
+func sint(x int) string {
+ if x == 0 {
+ return "~"
+ }
+ sign := "+"
+ if x < 0 {
+ sign = "-"
+ x *= -1
+ }
+ return fmt.Sprintf("%s%d", sign, x)
+}
+
+func minInt(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func maxInt(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+type fdSorter []storage.FileDesc
+
+func (p fdSorter) Len() int {
+ return len(p)
+}
+
+func (p fdSorter) Less(i, j int) bool {
+ return p[i].Num < p[j].Num
+}
+
+func (p fdSorter) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
+func sortFds(fds []storage.FileDesc) {
+ sort.Sort(fdSorter(fds))
+}
+
+func ensureBuffer(b []byte, n int) []byte {
+ if cap(b) < n {
+ return make([]byte, n)
+ }
+ return b[:n]
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go
new file mode 100644
index 000000000..21de24255
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go
@@ -0,0 +1,293 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package util
+
+// This a copy of Go std bytes.Buffer with some modification
+// and some features stripped.
+
+import (
+ "bytes"
+ "io"
+)
+
+// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
+// The zero value for Buffer is an empty buffer ready to use.
+type Buffer struct {
+ buf []byte // contents are the bytes buf[off : len(buf)]
+ off int // read at &buf[off], write at &buf[len(buf)]
+ bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation.
+}
+
+// Bytes returns a slice of the contents of the unread portion of the buffer;
+// len(b.Bytes()) == b.Len(). If the caller changes the contents of the
+// returned slice, the contents of the buffer will change provided there
+// are no intervening method calls on the Buffer.
+func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
+
+// String returns the contents of the unread portion of the buffer
+// as a string. If the Buffer is a nil pointer, it returns "<nil>".
+func (b *Buffer) String() string {
+ if b == nil {
+ // Special case, useful in debugging.
+ return "<nil>"
+ }
+ return string(b.buf[b.off:])
+}
+
+// Len returns the number of bytes of the unread portion of the buffer;
+// b.Len() == len(b.Bytes()).
+func (b *Buffer) Len() int { return len(b.buf) - b.off }
+
+// Truncate discards all but the first n unread bytes from the buffer.
+// It panics if n is negative or greater than the length of the buffer.
+func (b *Buffer) Truncate(n int) {
+ switch {
+ case n < 0 || n > b.Len():
+ panic("leveldb/util.Buffer: truncation out of range")
+ case n == 0:
+ // Reuse buffer space.
+ b.off = 0
+ }
+ b.buf = b.buf[0 : b.off+n]
+}
+
+// Reset resets the buffer so it has no content.
+// b.Reset() is the same as b.Truncate(0).
+func (b *Buffer) Reset() { b.Truncate(0) }
+
+// grow grows the buffer to guarantee space for n more bytes.
+// It returns the index where bytes should be written.
+// If the buffer can't grow it will panic with bytes.ErrTooLarge.
+func (b *Buffer) grow(n int) int {
+ m := b.Len()
+ // If buffer is empty, reset to recover space.
+ if m == 0 && b.off != 0 {
+ b.Truncate(0)
+ }
+ if len(b.buf)+n > cap(b.buf) {
+ var buf []byte
+ if b.buf == nil && n <= len(b.bootstrap) {
+ buf = b.bootstrap[0:]
+ } else if m+n <= cap(b.buf)/2 {
+ // We can slide things down instead of allocating a new
+ // slice. We only need m+n <= cap(b.buf) to slide, but
+ // we instead let capacity get twice as large so we
+ // don't spend all our time copying.
+ copy(b.buf[:], b.buf[b.off:])
+ buf = b.buf[:m]
+ } else {
+ // not enough space anywhere
+ buf = makeSlice(2*cap(b.buf) + n)
+ copy(buf, b.buf[b.off:])
+ }
+ b.buf = buf
+ b.off = 0
+ }
+ b.buf = b.buf[0 : b.off+m+n]
+ return b.off + m
+}
+
+// Alloc allocs n bytes of slice from the buffer, growing the buffer as
+// needed. If n is negative, Alloc will panic.
+// If the buffer can't grow it will panic with bytes.ErrTooLarge.
+func (b *Buffer) Alloc(n int) []byte {
+ if n < 0 {
+ panic("leveldb/util.Buffer.Alloc: negative count")
+ }
+ m := b.grow(n)
+ return b.buf[m:]
+}
+
+// Grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After Grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+// If n is negative, Grow will panic.
+// If the buffer can't grow it will panic with bytes.ErrTooLarge.
+func (b *Buffer) Grow(n int) {
+ if n < 0 {
+ panic("leveldb/util.Buffer.Grow: negative count")
+ }
+ m := b.grow(n)
+ b.buf = b.buf[0:m]
+}
+
+// Write appends the contents of p to the buffer, growing the buffer as
+// needed. The return value n is the length of p; err is always nil. If the
+// buffer becomes too large, Write will panic with bytes.ErrTooLarge.
+func (b *Buffer) Write(p []byte) (n int, err error) {
+ m := b.grow(len(p))
+ return copy(b.buf[m:], p), nil
+}
+
+// MinRead is the minimum slice size passed to a Read call by
+// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
+// what is required to hold the contents of r, ReadFrom will not grow the
+// underlying buffer.
+const MinRead = 512
+
+// ReadFrom reads data from r until EOF and appends it to the buffer, growing
+// the buffer as needed. The return value n is the number of bytes read. Any
+// error except io.EOF encountered during the read is also returned. If the
+// buffer becomes too large, ReadFrom will panic with bytes.ErrTooLarge.
+func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
+ // If buffer is empty, reset to recover space.
+ if b.off >= len(b.buf) {
+ b.Truncate(0)
+ }
+ for {
+ if free := cap(b.buf) - len(b.buf); free < MinRead {
+ // not enough space at end
+ newBuf := b.buf
+ if b.off+free < MinRead {
+ // not enough space using beginning of buffer;
+ // double buffer capacity
+ newBuf = makeSlice(2*cap(b.buf) + MinRead)
+ }
+ copy(newBuf, b.buf[b.off:])
+ b.buf = newBuf[:len(b.buf)-b.off]
+ b.off = 0
+ }
+ m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
+ b.buf = b.buf[0 : len(b.buf)+m]
+ n += int64(m)
+ if e == io.EOF {
+ break
+ }
+ if e != nil {
+ return n, e
+ }
+ }
+ return n, nil // err is EOF, so return nil explicitly
+}
+
+// makeSlice allocates a slice of size n. If the allocation fails, it panics
+// with bytes.ErrTooLarge.
+func makeSlice(n int) []byte {
+ // If the make fails, give a known error.
+ defer func() {
+ if recover() != nil {
+ panic(bytes.ErrTooLarge)
+ }
+ }()
+ return make([]byte, n)
+}
+
+// WriteTo writes data to w until the buffer is drained or an error occurs.
+// The return value n is the number of bytes written; it always fits into an
+// int, but it is int64 to match the io.WriterTo interface. Any error
+// encountered during the write is also returned.
+func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
+ if b.off < len(b.buf) {
+ nBytes := b.Len()
+ m, e := w.Write(b.buf[b.off:])
+ if m > nBytes {
+ panic("leveldb/util.Buffer.WriteTo: invalid Write count")
+ }
+ b.off += m
+ n = int64(m)
+ if e != nil {
+ return n, e
+ }
+ // all bytes should have been written, by definition of
+ // Write method in io.Writer
+ if m != nBytes {
+ return n, io.ErrShortWrite
+ }
+ }
+ // Buffer is now empty; reset.
+ b.Truncate(0)
+ return
+}
+
+// WriteByte appends the byte c to the buffer, growing the buffer as needed.
+// The returned error is always nil, but is included to match bufio.Writer's
+// WriteByte. If the buffer becomes too large, WriteByte will panic with
+// bytes.ErrTooLarge.
+func (b *Buffer) WriteByte(c byte) error {
+ m := b.grow(1)
+ b.buf[m] = c
+ return nil
+}
+
+// Read reads the next len(p) bytes from the buffer or until the buffer
+// is drained. The return value n is the number of bytes read. If the
+// buffer has no data to return, err is io.EOF (unless len(p) is zero);
+// otherwise it is nil.
+func (b *Buffer) Read(p []byte) (n int, err error) {
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ if len(p) == 0 {
+ return
+ }
+ return 0, io.EOF
+ }
+ n = copy(p, b.buf[b.off:])
+ b.off += n
+ return
+}
+
+// Next returns a slice containing the next n bytes from the buffer,
+// advancing the buffer as if the bytes had been returned by Read.
+// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
+// The slice is only valid until the next call to a read or write method.
+func (b *Buffer) Next(n int) []byte {
+ m := b.Len()
+ if n > m {
+ n = m
+ }
+ data := b.buf[b.off : b.off+n]
+ b.off += n
+ return data
+}
+
+// ReadByte reads and returns the next byte from the buffer.
+// If no byte is available, it returns error io.EOF.
+func (b *Buffer) ReadByte() (c byte, err error) {
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ return 0, io.EOF
+ }
+ c = b.buf[b.off]
+ b.off++
+ return c, nil
+}
+
+// ReadBytes reads until the first occurrence of delim in the input,
+// returning a slice containing the data up to and including the delimiter.
+// If ReadBytes encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadBytes returns err != nil if and only if the returned data does not end in
+// delim.
+func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
+ slice, err := b.readSlice(delim)
+ // return a copy of slice. The buffer's backing array may
+ // be overwritten by later calls.
+ line = append(line, slice...)
+ return
+}
+
+// readSlice is like ReadBytes but returns a reference to internal buffer data.
+func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
+ i := bytes.IndexByte(b.buf[b.off:], delim)
+ end := b.off + i + 1
+ if i < 0 {
+ end = len(b.buf)
+ err = io.EOF
+ }
+ line = b.buf[b.off:end]
+ b.off = end
+ return line, err
+}
+
+// NewBuffer creates and initializes a new Buffer using buf as its initial
+// contents. It is intended to prepare a Buffer to read existing data. It
+// can also be used to size the internal buffer for writing. To do that,
+// buf should have the desired capacity but a length of zero.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
+func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
new file mode 100644
index 000000000..2f3db974a
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
@@ -0,0 +1,239 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+type buffer struct {
+ b []byte
+ miss int
+}
+
+// BufferPool is a 'buffer pool'.
+type BufferPool struct {
+ pool [6]chan []byte
+ size [5]uint32
+ sizeMiss [5]uint32
+ sizeHalf [5]uint32
+ baseline [4]int
+ baseline0 int
+
+ mu sync.RWMutex
+ closed bool
+ closeC chan struct{}
+
+ get uint32
+ put uint32
+ half uint32
+ less uint32
+ equal uint32
+ greater uint32
+ miss uint32
+}
+
+func (p *BufferPool) poolNum(n int) int {
+ if n <= p.baseline0 && n > p.baseline0/2 {
+ return 0
+ }
+ for i, x := range p.baseline {
+ if n <= x {
+ return i + 1
+ }
+ }
+ return len(p.baseline) + 1
+}
+
+// Get returns buffer with length of n.
+func (p *BufferPool) Get(n int) []byte {
+ if p == nil {
+ return make([]byte, n)
+ }
+
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+
+ if p.closed {
+ return make([]byte, n)
+ }
+
+ atomic.AddUint32(&p.get, 1)
+
+ poolNum := p.poolNum(n)
+ pool := p.pool[poolNum]
+ if poolNum == 0 {
+ // Fast path.
+ select {
+ case b := <-pool:
+ switch {
+ case cap(b) > n:
+ if cap(b)-n >= n {
+ atomic.AddUint32(&p.half, 1)
+ select {
+ case pool <- b:
+ default:
+ }
+ return make([]byte, n)
+ } else {
+ atomic.AddUint32(&p.less, 1)
+ return b[:n]
+ }
+ case cap(b) == n:
+ atomic.AddUint32(&p.equal, 1)
+ return b[:n]
+ default:
+ atomic.AddUint32(&p.greater, 1)
+ }
+ default:
+ atomic.AddUint32(&p.miss, 1)
+ }
+
+ return make([]byte, n, p.baseline0)
+ } else {
+ sizePtr := &p.size[poolNum-1]
+
+ select {
+ case b := <-pool:
+ switch {
+ case cap(b) > n:
+ if cap(b)-n >= n {
+ atomic.AddUint32(&p.half, 1)
+ sizeHalfPtr := &p.sizeHalf[poolNum-1]
+ if atomic.AddUint32(sizeHalfPtr, 1) == 20 {
+ atomic.StoreUint32(sizePtr, uint32(cap(b)/2))
+ atomic.StoreUint32(sizeHalfPtr, 0)
+ } else {
+ select {
+ case pool <- b:
+ default:
+ }
+ }
+ return make([]byte, n)
+ } else {
+ atomic.AddUint32(&p.less, 1)
+ return b[:n]
+ }
+ case cap(b) == n:
+ atomic.AddUint32(&p.equal, 1)
+ return b[:n]
+ default:
+ atomic.AddUint32(&p.greater, 1)
+ if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) {
+ select {
+ case pool <- b:
+ default:
+ }
+ }
+ }
+ default:
+ atomic.AddUint32(&p.miss, 1)
+ }
+
+ if size := atomic.LoadUint32(sizePtr); uint32(n) > size {
+ if size == 0 {
+ atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n))
+ } else {
+ sizeMissPtr := &p.sizeMiss[poolNum-1]
+ if atomic.AddUint32(sizeMissPtr, 1) == 20 {
+ atomic.StoreUint32(sizePtr, uint32(n))
+ atomic.StoreUint32(sizeMissPtr, 0)
+ }
+ }
+ return make([]byte, n)
+ } else {
+ return make([]byte, n, size)
+ }
+ }
+}
+
+// Put adds given buffer to the pool.
+func (p *BufferPool) Put(b []byte) {
+ if p == nil {
+ return
+ }
+
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+
+ if p.closed {
+ return
+ }
+
+ atomic.AddUint32(&p.put, 1)
+
+ pool := p.pool[p.poolNum(cap(b))]
+ select {
+ case pool <- b:
+ default:
+ }
+
+}
+
+func (p *BufferPool) Close() {
+ if p == nil {
+ return
+ }
+
+ p.mu.Lock()
+ if !p.closed {
+ p.closed = true
+ p.closeC <- struct{}{}
+ }
+ p.mu.Unlock()
+}
+
+func (p *BufferPool) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+
+ return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}",
+ p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss)
+}
+
+func (p *BufferPool) drain() {
+ ticker := time.NewTicker(2 * time.Second)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ for _, ch := range p.pool {
+ select {
+ case <-ch:
+ default:
+ }
+ }
+ case <-p.closeC:
+ close(p.closeC)
+ for _, ch := range p.pool {
+ close(ch)
+ }
+ return
+ }
+ }
+}
+
+// NewBufferPool creates a new initialized 'buffer pool'.
+func NewBufferPool(baseline int) *BufferPool {
+ if baseline <= 0 {
+ panic("baseline can't be <= 0")
+ }
+ p := &BufferPool{
+ baseline0: baseline,
+ baseline: [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4},
+ closeC: make(chan struct{}, 1),
+ }
+ for i, cap := range []int{2, 2, 4, 4, 2, 1} {
+ p.pool[i] = make(chan []byte, cap)
+ }
+ go p.drain()
+ return p
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go
new file mode 100644
index 000000000..631c9d610
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go
@@ -0,0 +1,30 @@
+// Copyright 2011 The LevelDB-Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+import (
+ "hash/crc32"
+)
+
+var table = crc32.MakeTable(crc32.Castagnoli)
+
+// CRC is a CRC-32 checksum computed using Castagnoli's polynomial.
+type CRC uint32
+
+// NewCRC creates a new crc based on the given bytes.
+func NewCRC(b []byte) CRC {
+ return CRC(0).Update(b)
+}
+
+// Update updates the crc with the given bytes.
+func (c CRC) Update(b []byte) CRC {
+ return CRC(crc32.Update(uint32(c), table, b))
+}
+
+// Value returns a masked crc.
+func (c CRC) Value() uint32 {
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go
new file mode 100644
index 000000000..7f3fa4e2c
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+import (
+ "encoding/binary"
+)
+
+// Hash return hash of the given data.
+func Hash(data []byte, seed uint32) uint32 {
+ // Similar to murmur hash
+ const (
+ m = uint32(0xc6a4a793)
+ r = uint32(24)
+ )
+ var (
+ h = seed ^ (uint32(len(data)) * m)
+ i int
+ )
+
+ for n := len(data) - len(data)%4; i < n; i += 4 {
+ h += binary.LittleEndian.Uint32(data[i:])
+ h *= m
+ h ^= (h >> 16)
+ }
+
+ switch len(data) - i {
+ default:
+ panic("not reached")
+ case 3:
+ h += uint32(data[i+2]) << 16
+ fallthrough
+ case 2:
+ h += uint32(data[i+1]) << 8
+ fallthrough
+ case 1:
+ h += uint32(data[i])
+ h *= m
+ h ^= (h >> r)
+ case 0:
+ }
+
+ return h
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go
new file mode 100644
index 000000000..85159583d
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+// Range is a key range.
+type Range struct {
+ // Start of the key range, include in the range.
+ Start []byte
+
+ // Limit of the key range, not include in the range.
+ Limit []byte
+}
+
+// BytesPrefix returns key range that satisfy the given prefix.
+// This only applicable for the standard 'bytes comparer'.
+func BytesPrefix(prefix []byte) *Range {
+ var limit []byte
+ for i := len(prefix) - 1; i >= 0; i-- {
+ c := prefix[i]
+ if c < 0xff {
+ limit = make([]byte, i+1)
+ copy(limit, prefix)
+ limit[i] = c + 1
+ break
+ }
+ }
+ return &Range{prefix, limit}
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go
new file mode 100644
index 000000000..f35976865
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go
@@ -0,0 +1,73 @@
+// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package util provides utilities used throughout leveldb.
+package util
+
+import (
+ "errors"
+)
+
+var (
+ ErrReleased = errors.New("leveldb: resource already relesed")
+ ErrHasReleaser = errors.New("leveldb: releaser already defined")
+)
+
+// Releaser is the interface that wraps the basic Release method.
+type Releaser interface {
+ // Release releases associated resources. Release should always success
+ // and can be called multipe times without causing error.
+ Release()
+}
+
+// ReleaseSetter is the interface that wraps the basic SetReleaser method.
+type ReleaseSetter interface {
+ // SetReleaser associates the given releaser to the resources. The
+ // releaser will be called once coresponding resources released.
+ // Calling SetReleaser with nil will clear the releaser.
+ //
+ // This will panic if a releaser already present or coresponding
+ // resource is already released. Releaser should be cleared first
+ // before assigned a new one.
+ SetReleaser(releaser Releaser)
+}
+
+// BasicReleaser provides basic implementation of Releaser and ReleaseSetter.
+type BasicReleaser struct {
+ releaser Releaser
+ released bool
+}
+
+// Released returns whether Release method already called.
+func (r *BasicReleaser) Released() bool {
+ return r.released
+}
+
+// Release implements Releaser.Release.
+func (r *BasicReleaser) Release() {
+ if !r.released {
+ if r.releaser != nil {
+ r.releaser.Release()
+ r.releaser = nil
+ }
+ r.released = true
+ }
+}
+
+// SetReleaser implements ReleaseSetter.SetReleaser.
+func (r *BasicReleaser) SetReleaser(releaser Releaser) {
+ if r.released {
+ panic(ErrReleased)
+ }
+ if r.releaser != nil && releaser != nil {
+ panic(ErrHasReleaser)
+ }
+ r.releaser = releaser
+}
+
+type NoopReleaser struct{}
+
+func (NoopReleaser) Release() {}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/version.go b/vendor/github.com/syndtr/goleveldb/leveldb/version.go
new file mode 100644
index 000000000..c60f12c20
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/version.go
@@ -0,0 +1,524 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "fmt"
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type tSet struct {
+ level int
+ table *tFile
+}
+
+type version struct {
+ s *session
+
+ levels []tFiles
+
+ // Level that should be compacted next and its compaction score.
+ // Score < 1 means compaction is not strictly needed. These fields
+ // are initialized by computeCompaction()
+ cLevel int
+ cScore float64
+
+ cSeek unsafe.Pointer
+
+ closing bool
+ ref int
+ // Succeeding version.
+ next *version
+}
+
+func newVersion(s *session) *version {
+ return &version{s: s}
+}
+
+func (v *version) releaseNB() {
+ v.ref--
+ if v.ref > 0 {
+ return
+ }
+ if v.ref < 0 {
+ panic("negative version ref")
+ }
+
+ nextTables := make(map[int64]bool)
+ for _, tt := range v.next.levels {
+ for _, t := range tt {
+ num := t.fd.Num
+ nextTables[num] = true
+ }
+ }
+
+ for _, tt := range v.levels {
+ for _, t := range tt {
+ num := t.fd.Num
+ if _, ok := nextTables[num]; !ok {
+ v.s.tops.remove(t)
+ }
+ }
+ }
+
+ v.next.releaseNB()
+ v.next = nil
+}
+
+func (v *version) release() {
+ v.s.vmu.Lock()
+ v.releaseNB()
+ v.s.vmu.Unlock()
+}
+
+func (v *version) walkOverlapping(aux tFiles, ikey internalKey, f func(level int, t *tFile) bool, lf func(level int) bool) {
+ ukey := ikey.ukey()
+
+ // Aux level.
+ if aux != nil {
+ for _, t := range aux {
+ if t.overlaps(v.s.icmp, ukey, ukey) {
+ if !f(-1, t) {
+ return
+ }
+ }
+ }
+
+ if lf != nil && !lf(-1) {
+ return
+ }
+ }
+
+ // Walk tables level-by-level.
+ for level, tables := range v.levels {
+ if len(tables) == 0 {
+ continue
+ }
+
+ if level == 0 {
+ // Level-0 files may overlap each other. Find all files that
+ // overlap ukey.
+ for _, t := range tables {
+ if t.overlaps(v.s.icmp, ukey, ukey) {
+ if !f(level, t) {
+ return
+ }
+ }
+ }
+ } else {
+ if i := tables.searchMax(v.s.icmp, ikey); i < len(tables) {
+ t := tables[i]
+ if v.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
+ if !f(level, t) {
+ return
+ }
+ }
+ }
+ }
+
+ if lf != nil && !lf(level) {
+ return
+ }
+ }
+}
+
+func (v *version) get(aux tFiles, ikey internalKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) {
+ if v.closing {
+ return nil, false, ErrClosed
+ }
+
+ ukey := ikey.ukey()
+
+ var (
+ tset *tSet
+ tseek bool
+
+ // Level-0.
+ zfound bool
+ zseq uint64
+ zkt keyType
+ zval []byte
+ )
+
+ err = ErrNotFound
+
+ // Since entries never hop across level, finding key/value
+ // in smaller level make later levels irrelevant.
+ v.walkOverlapping(aux, ikey, func(level int, t *tFile) bool {
+ if level >= 0 && !tseek {
+ if tset == nil {
+ tset = &tSet{level, t}
+ } else {
+ tseek = true
+ }
+ }
+
+ var (
+ fikey, fval []byte
+ ferr error
+ )
+ if noValue {
+ fikey, ferr = v.s.tops.findKey(t, ikey, ro)
+ } else {
+ fikey, fval, ferr = v.s.tops.find(t, ikey, ro)
+ }
+
+ switch ferr {
+ case nil:
+ case ErrNotFound:
+ return true
+ default:
+ err = ferr
+ return false
+ }
+
+ if fukey, fseq, fkt, fkerr := parseInternalKey(fikey); fkerr == nil {
+ if v.s.icmp.uCompare(ukey, fukey) == 0 {
+ // Level <= 0 may overlaps each-other.
+ if level <= 0 {
+ if fseq >= zseq {
+ zfound = true
+ zseq = fseq
+ zkt = fkt
+ zval = fval
+ }
+ } else {
+ switch fkt {
+ case keyTypeVal:
+ value = fval
+ err = nil
+ case keyTypeDel:
+ default:
+ panic("leveldb: invalid internalKey type")
+ }
+ return false
+ }
+ }
+ } else {
+ err = fkerr
+ return false
+ }
+
+ return true
+ }, func(level int) bool {
+ if zfound {
+ switch zkt {
+ case keyTypeVal:
+ value = zval
+ err = nil
+ case keyTypeDel:
+ default:
+ panic("leveldb: invalid internalKey type")
+ }
+ return false
+ }
+
+ return true
+ })
+
+ if tseek && tset.table.consumeSeek() <= 0 {
+ tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
+ }
+
+ return
+}
+
+func (v *version) sampleSeek(ikey internalKey) (tcomp bool) {
+ var tset *tSet
+
+ v.walkOverlapping(nil, ikey, func(level int, t *tFile) bool {
+ if tset == nil {
+ tset = &tSet{level, t}
+ return true
+ }
+ if tset.table.consumeSeek() <= 0 {
+ tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
+ }
+ return false
+ }, nil)
+
+ return
+}
+
+func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) {
+ strict := opt.GetStrict(v.s.o.Options, ro, opt.StrictReader)
+ for level, tables := range v.levels {
+ if level == 0 {
+ // Merge all level zero files together since they may overlap.
+ for _, t := range tables {
+ its = append(its, v.s.tops.newIterator(t, slice, ro))
+ }
+ } else if len(tables) != 0 {
+ its = append(its, iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict))
+ }
+ }
+ return
+}
+
+func (v *version) newStaging() *versionStaging {
+ return &versionStaging{base: v}
+}
+
+// Spawn a new version based on this version.
+func (v *version) spawn(r *sessionRecord) *version {
+ staging := v.newStaging()
+ staging.commit(r)
+ return staging.finish()
+}
+
+func (v *version) fillRecord(r *sessionRecord) {
+ for level, tables := range v.levels {
+ for _, t := range tables {
+ r.addTableFile(level, t)
+ }
+ }
+}
+
+func (v *version) tLen(level int) int {
+ if level < len(v.levels) {
+ return len(v.levels[level])
+ }
+ return 0
+}
+
+func (v *version) offsetOf(ikey internalKey) (n int64, err error) {
+ for level, tables := range v.levels {
+ for _, t := range tables {
+ if v.s.icmp.Compare(t.imax, ikey) <= 0 {
+ // Entire file is before "ikey", so just add the file size
+ n += t.size
+ } else if v.s.icmp.Compare(t.imin, ikey) > 0 {
+ // Entire file is after "ikey", so ignore
+ if level > 0 {
+ // Files other than level 0 are sorted by meta->min, so
+ // no further files in this level will contain data for
+ // "ikey".
+ break
+ }
+ } else {
+ // "ikey" falls in the range for this table. Add the
+ // approximate offset of "ikey" within the table.
+ if m, err := v.s.tops.offsetOf(t, ikey); err == nil {
+ n += m
+ } else {
+ return 0, err
+ }
+ }
+ }
+ }
+
+ return
+}
+
+func (v *version) pickMemdbLevel(umin, umax []byte, maxLevel int) (level int) {
+ if maxLevel > 0 {
+ if len(v.levels) == 0 {
+ return maxLevel
+ }
+ if !v.levels[0].overlaps(v.s.icmp, umin, umax, true) {
+ var overlaps tFiles
+ for ; level < maxLevel; level++ {
+ if pLevel := level + 1; pLevel >= len(v.levels) {
+ return maxLevel
+ } else if v.levels[pLevel].overlaps(v.s.icmp, umin, umax, false) {
+ break
+ }
+ if gpLevel := level + 2; gpLevel < len(v.levels) {
+ overlaps = v.levels[gpLevel].getOverlaps(overlaps, v.s.icmp, umin, umax, false)
+ if overlaps.size() > int64(v.s.o.GetCompactionGPOverlaps(level)) {
+ break
+ }
+ }
+ }
+ }
+ }
+ return
+}
+
+func (v *version) computeCompaction() {
+ // Precomputed best level for next compaction
+ bestLevel := int(-1)
+ bestScore := float64(-1)
+
+ statFiles := make([]int, len(v.levels))
+ statSizes := make([]string, len(v.levels))
+ statScore := make([]string, len(v.levels))
+ statTotSize := int64(0)
+
+ for level, tables := range v.levels {
+ var score float64
+ size := tables.size()
+ if level == 0 {
+ // We treat level-0 specially by bounding the number of files
+ // instead of number of bytes for two reasons:
+ //
+ // (1) With larger write-buffer sizes, it is nice not to do too
+ // many level-0 compaction.
+ //
+ // (2) The files in level-0 are merged on every read and
+ // therefore we wish to avoid too many files when the individual
+ // file size is small (perhaps because of a small write-buffer
+ // setting, or very high compression ratios, or lots of
+ // overwrites/deletions).
+ score = float64(len(tables)) / float64(v.s.o.GetCompactionL0Trigger())
+ } else {
+ score = float64(size) / float64(v.s.o.GetCompactionTotalSize(level))
+ }
+
+ if score > bestScore {
+ bestLevel = level
+ bestScore = score
+ }
+
+ statFiles[level] = len(tables)
+ statSizes[level] = shortenb(int(size))
+ statScore[level] = fmt.Sprintf("%.2f", score)
+ statTotSize += size
+ }
+
+ v.cLevel = bestLevel
+ v.cScore = bestScore
+
+ v.s.logf("version@stat F·%v S·%s%v Sc·%v", statFiles, shortenb(int(statTotSize)), statSizes, statScore)
+}
+
+func (v *version) needCompaction() bool {
+ return v.cScore >= 1 || atomic.LoadPointer(&v.cSeek) != nil
+}
+
+type tablesScratch struct {
+ added map[int64]atRecord
+ deleted map[int64]struct{}
+}
+
+type versionStaging struct {
+ base *version
+ levels []tablesScratch
+}
+
+func (p *versionStaging) getScratch(level int) *tablesScratch {
+ if level >= len(p.levels) {
+ newLevels := make([]tablesScratch, level+1)
+ copy(newLevels, p.levels)
+ p.levels = newLevels
+ }
+ return &(p.levels[level])
+}
+
+func (p *versionStaging) commit(r *sessionRecord) {
+ // Deleted tables.
+ for _, r := range r.deletedTables {
+ scratch := p.getScratch(r.level)
+ if r.level < len(p.base.levels) && len(p.base.levels[r.level]) > 0 {
+ if scratch.deleted == nil {
+ scratch.deleted = make(map[int64]struct{})
+ }
+ scratch.deleted[r.num] = struct{}{}
+ }
+ if scratch.added != nil {
+ delete(scratch.added, r.num)
+ }
+ }
+
+ // New tables.
+ for _, r := range r.addedTables {
+ scratch := p.getScratch(r.level)
+ if scratch.added == nil {
+ scratch.added = make(map[int64]atRecord)
+ }
+ scratch.added[r.num] = r
+ if scratch.deleted != nil {
+ delete(scratch.deleted, r.num)
+ }
+ }
+}
+
+func (p *versionStaging) finish() *version {
+ // Build new version.
+ nv := newVersion(p.base.s)
+ numLevel := len(p.levels)
+ if len(p.base.levels) > numLevel {
+ numLevel = len(p.base.levels)
+ }
+ nv.levels = make([]tFiles, numLevel)
+ for level := 0; level < numLevel; level++ {
+ var baseTabels tFiles
+ if level < len(p.base.levels) {
+ baseTabels = p.base.levels[level]
+ }
+
+ if level < len(p.levels) {
+ scratch := p.levels[level]
+
+ var nt tFiles
+ // Prealloc list if possible.
+ if n := len(baseTabels) + len(scratch.added) - len(scratch.deleted); n > 0 {
+ nt = make(tFiles, 0, n)
+ }
+
+ // Base tables.
+ for _, t := range baseTabels {
+ if _, ok := scratch.deleted[t.fd.Num]; ok {
+ continue
+ }
+ if _, ok := scratch.added[t.fd.Num]; ok {
+ continue
+ }
+ nt = append(nt, t)
+ }
+
+ // New tables.
+ for _, r := range scratch.added {
+ nt = append(nt, tableFileFromRecord(r))
+ }
+
+ if len(nt) != 0 {
+ // Sort tables.
+ if level == 0 {
+ nt.sortByNum()
+ } else {
+ nt.sortByKey(p.base.s.icmp)
+ }
+
+ nv.levels[level] = nt
+ }
+ } else {
+ nv.levels[level] = baseTabels
+ }
+ }
+
+ // Trim levels.
+ n := len(nv.levels)
+ for ; n > 0 && nv.levels[n-1] == nil; n-- {
+ }
+ nv.levels = nv.levels[:n]
+
+ // Compute compaction score for new version.
+ nv.computeCompaction()
+
+ return nv
+}
+
+type versionReleaser struct {
+ v *version
+ once bool
+}
+
+func (vr *versionReleaser) Release() {
+ v := vr.v
+ v.s.vmu.Lock()
+ if !vr.once {
+ v.releaseNB()
+ vr.once = true
+ }
+ v.s.vmu.Unlock()
+}