aboutsummaryrefslogtreecommitdiffstats
path: root/vendor
diff options
context:
space:
mode:
authorAnton Evangelatov <anton.evangelatov@gmail.com>2018-02-23 17:56:08 +0800
committerPéter Szilágyi <peterke@gmail.com>2018-02-23 17:56:08 +0800
commitae9f97221a96a86e4343a5c3cc4b1db44627a2f3 (patch)
tree0154be72d0f2e1f032d129b9433d1bf3939cd8f0 /vendor
parent7f74bdf8dded0e1ac3c01e043c2ed89d78f308cf (diff)
downloaddexon-ae9f97221a96a86e4343a5c3cc4b1db44627a2f3.tar
dexon-ae9f97221a96a86e4343a5c3cc4b1db44627a2f3.tar.gz
dexon-ae9f97221a96a86e4343a5c3cc4b1db44627a2f3.tar.bz2
dexon-ae9f97221a96a86e4343a5c3cc4b1db44627a2f3.tar.lz
dexon-ae9f97221a96a86e4343a5c3cc4b1db44627a2f3.tar.xz
dexon-ae9f97221a96a86e4343a5c3cc4b1db44627a2f3.tar.zst
dexon-ae9f97221a96a86e4343a5c3cc4b1db44627a2f3.zip
metrics: pull library and introduce ResettingTimer and InfluxDB reporter (#15910)
* go-metrics: fork library and introduce ResettingTimer and InfluxDB reporter. * vendor: change nonsense/go-metrics to ethersphere/go-metrics * go-metrics: add tests. move ResettingTimer logic from reporter to type. * all, metrics: pull in metrics package in go-ethereum * metrics/test: make sure metrics are enabled for tests * metrics: apply gosimple rules * metrics/exp, internal/debug: init expvar endpoint when starting pprof server * internal/debug: tiny comment formatting fix
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/influxdata/influxdb/LICENSE20
-rw-r--r--vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md62
-rw-r--r--vendor/github.com/influxdata/influxdb/client/README.md306
-rw-r--r--vendor/github.com/influxdata/influxdb/client/influxdb.go840
-rw-r--r--vendor/github.com/influxdata/influxdb/client/v2/client.go635
-rw-r--r--vendor/github.com/influxdata/influxdb/client/v2/udp.go112
-rw-r--r--vendor/github.com/influxdata/influxdb/models/consistency.go48
-rw-r--r--vendor/github.com/influxdata/influxdb/models/inline_fnv.go32
-rw-r--r--vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go44
-rw-r--r--vendor/github.com/influxdata/influxdb/models/points.go2337
-rw-r--r--vendor/github.com/influxdata/influxdb/models/rows.go62
-rw-r--r--vendor/github.com/influxdata/influxdb/models/statistic.go42
-rw-r--r--vendor/github.com/influxdata/influxdb/models/time.go74
-rw-r--r--vendor/github.com/influxdata/influxdb/models/uint_support.go7
-rw-r--r--vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go115
-rw-r--r--vendor/github.com/influxdata/influxdb/pkg/escape/strings.go21
-rw-r--r--vendor/github.com/rcrowley/go-metrics/LICENSE29
-rw-r--r--vendor/github.com/rcrowley/go-metrics/README.md153
-rw-r--r--vendor/github.com/rcrowley/go-metrics/counter.go112
-rw-r--r--vendor/github.com/rcrowley/go-metrics/debug.go76
-rw-r--r--vendor/github.com/rcrowley/go-metrics/ewma.go118
-rw-r--r--vendor/github.com/rcrowley/go-metrics/exp/exp.go156
-rw-r--r--vendor/github.com/rcrowley/go-metrics/gauge.go120
-rw-r--r--vendor/github.com/rcrowley/go-metrics/gauge_float64.go127
-rw-r--r--vendor/github.com/rcrowley/go-metrics/graphite.go113
-rw-r--r--vendor/github.com/rcrowley/go-metrics/healthcheck.go61
-rw-r--r--vendor/github.com/rcrowley/go-metrics/histogram.go202
-rw-r--r--vendor/github.com/rcrowley/go-metrics/json.go87
-rw-r--r--vendor/github.com/rcrowley/go-metrics/log.go80
-rw-r--r--vendor/github.com/rcrowley/go-metrics/memory.md285
-rw-r--r--vendor/github.com/rcrowley/go-metrics/meter.go233
-rw-r--r--vendor/github.com/rcrowley/go-metrics/metrics.go13
-rw-r--r--vendor/github.com/rcrowley/go-metrics/opentsdb.go119
-rw-r--r--vendor/github.com/rcrowley/go-metrics/registry.go270
-rw-r--r--vendor/github.com/rcrowley/go-metrics/runtime.go212
-rw-r--r--vendor/github.com/rcrowley/go-metrics/runtime_cgo.go10
-rw-r--r--vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go9
-rw-r--r--vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go7
-rw-r--r--vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go9
-rw-r--r--vendor/github.com/rcrowley/go-metrics/sample.go616
-rw-r--r--vendor/github.com/rcrowley/go-metrics/syslog.go78
-rw-r--r--vendor/github.com/rcrowley/go-metrics/timer.go311
-rwxr-xr-xvendor/github.com/rcrowley/go-metrics/validate.sh10
-rw-r--r--vendor/github.com/rcrowley/go-metrics/writer.go100
-rw-r--r--vendor/vendor.json36
45 files changed, 4781 insertions, 3728 deletions
diff --git a/vendor/github.com/influxdata/influxdb/LICENSE b/vendor/github.com/influxdata/influxdb/LICENSE
new file mode 100644
index 000000000..63cef79ba
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013-2016 Errplane Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md
new file mode 100644
index 000000000..ea6fc69f3
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md
@@ -0,0 +1,62 @@
+- # List
+- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE)
+- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
+- github.com/BurntSushi/toml [MIT LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING)
+- github.com/RoaringBitmap/roaring [APACHE LICENSE](https://github.com/RoaringBitmap/roaring/blob/master/LICENSE)
+- github.com/beorn7/perks [MIT LICENSE](https://github.com/beorn7/perks/blob/master/LICENSE)
+- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license)
+- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
+- github.com/cespare/xxhash [MIT LICENSE](https://github.com/cespare/xxhash/blob/master/LICENSE.txt)
+- github.com/clarkduvall/hyperloglog [MIT LICENSE](https://github.com/clarkduvall/hyperloglog/blob/master/LICENSE)
+- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE)
+- github.com/dgrijalva/jwt-go [MIT LICENSE](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE)
+- github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE)
+- github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE)
+- github.com/glycerine/go-unsnap-stream [MIT LICENSE](https://github.com/glycerine/go-unsnap-stream/blob/master/LICENSE)
+- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
+- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
+- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
+- github.com/google/go-cmp [BSD LICENSE](https://github.com/google/go-cmp/blob/master/LICENSE)
+- github.com/influxdata/influxql [MIT LICENSE](https://github.com/influxdata/influxql/blob/master/LICENSE)
+- github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt)
+- github.com/influxdata/yamux [MOZILLA PUBLIC LICENSE](https://github.com/influxdata/yamux/blob/master/LICENSE)
+- github.com/influxdata/yarpc [MIT LICENSE](https://github.com/influxdata/yarpc/blob/master/LICENSE)
+- github.com/jsternberg/zap-logfmt [MIT LICENSE](https://github.com/jsternberg/zap-logfmt/blob/master/LICENSE)
+- github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE)
+- github.com/mattn/go-isatty [MIT LICENSE](https://github.com/mattn/go-isatty/blob/master/LICENSE)
+- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
+- github.com/opentracing/opentracing-go [MIT LICENSE](https://github.com/opentracing/opentracing-go/blob/master/LICENSE)
+- github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE)
+- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING)
+- github.com/philhofer/fwd [MIT LICENSE](https://github.com/philhofer/fwd/blob/master/LICENSE.md)
+- github.com/prometheus/client_golang [MIT LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
+- github.com/prometheus/client_model [MIT LICENSE](https://github.com/prometheus/client_model/blob/master/LICENSE)
+- github.com/prometheus/common [APACHE LICENSE](https://github.com/prometheus/common/blob/master/LICENSE)
+- github.com/prometheus/procfs [APACHE LICENSE](https://github.com/prometheus/procfs/blob/master/LICENSE)
+- github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE)
+- github.com/retailnext/hllpp [BSD LICENSE](https://github.com/retailnext/hllpp/blob/master/LICENSE)
+- github.com/tinylib/msgp [MIT LICENSE](https://github.com/tinylib/msgp/blob/master/LICENSE)
+- go.uber.org/atomic [MIT LICENSE](https://github.com/uber-go/atomic/blob/master/LICENSE.txt)
+- go.uber.org/multierr [MIT LICENSE](https://github.com/uber-go/multierr/blob/master/LICENSE.txt)
+- go.uber.org/zap [MIT LICENSE](https://github.com/uber-go/zap/blob/master/LICENSE.txt)
+- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
+- golang.org/x/net [BSD LICENSE](https://github.com/golang/net/blob/master/LICENSE)
+- golang.org/x/sys [BSD LICENSE](https://github.com/golang/sys/blob/master/LICENSE)
+- golang.org/x/text [BSD LICENSE](https://github.com/golang/text/blob/master/LICENSE)
+- golang.org/x/time [BSD LICENSE](https://github.com/golang/time/blob/master/LICENSE)
+- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt)
+- github.com/xlab/treeprint [MIT LICENSE](https://github.com/xlab/treeprint/blob/master/LICENSE)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/influxdata/influxdb/client/README.md b/vendor/github.com/influxdata/influxdb/client/README.md
new file mode 100644
index 000000000..773a11122
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/client/README.md
@@ -0,0 +1,306 @@
+# InfluxDB Client
+
+[![GoDoc](https://godoc.org/github.com/influxdata/influxdb?status.svg)](http://godoc.org/github.com/influxdata/influxdb/client/v2)
+
+## Description
+
+**NOTE:** The Go client library now has a "v2" version, with the old version
+being deprecated. The new version can be imported at
+`import "github.com/influxdata/influxdb/client/v2"`. It is not backwards-compatible.
+
+A Go client library written and maintained by the **InfluxDB** team.
+This package provides convenience functions to read and write time series data.
+It uses the HTTP protocol to communicate with your **InfluxDB** cluster.
+
+
+## Getting Started
+
+### Connecting To Your Database
+
+Connecting to an **InfluxDB** database is straightforward. You will need a host
+name, a port and the cluster user credentials if applicable. The default port is
+8086. You can customize these settings to your specific installation via the
+**InfluxDB** configuration file.
+
+Though not necessary for experimentation, you may want to create a new user
+and authenticate the connection to your database.
+
+For more information please check out the
+[Admin Docs](https://docs.influxdata.com/influxdb/latest/administration/).
+
+For the impatient, you can create a new admin user _bubba_ by firing off the
+[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).
+
+```shell
+influx
+> create user bubba with password 'bumblebeetuna'
+> grant all privileges to bubba
+```
+
+And now for good measure set the credentials in you shell environment.
+In the example below we will use $INFLUX_USER and $INFLUX_PWD
+
+Now with the administrivia out of the way, let's connect to our database.
+
+NOTE: If you've opted out of creating a user, you can omit Username and Password in
+the configuration below.
+
+```go
+package main
+
+import (
+ "log"
+ "time"
+
+ "github.com/influxdata/influxdb/client/v2"
+)
+
+const (
+ MyDB = "square_holes"
+ username = "bubba"
+ password = "bumblebeetuna"
+)
+
+
+func main() {
+ // Create a new HTTPClient
+ c, err := client.NewHTTPClient(client.HTTPConfig{
+ Addr: "http://localhost:8086",
+ Username: username,
+ Password: password,
+ })
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Create a new point batch
+ bp, err := client.NewBatchPoints(client.BatchPointsConfig{
+ Database: MyDB,
+ Precision: "s",
+ })
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Create a point and add to batch
+ tags := map[string]string{"cpu": "cpu-total"}
+ fields := map[string]interface{}{
+ "idle": 10.1,
+ "system": 53.3,
+ "user": 46.6,
+ }
+
+ pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
+ if err != nil {
+ log.Fatal(err)
+ }
+ bp.AddPoint(pt)
+
+ // Write the batch
+ if err := c.Write(bp); err != nil {
+ log.Fatal(err)
+ }
+}
+
+```
+
+### Inserting Data
+
+Time series data aka *points* are written to the database using batch inserts.
+The mechanism is to create one or more points and then create a batch aka
+*batch points* and write these to a given database and series. A series is a
+combination of a measurement (time/values) and a set of tags.
+
+In this sample we will create a batch of a 1,000 points. Each point has a time and
+a single value as well as 2 tags indicating a shape and color. We write these points
+to a database called _square_holes_ using a measurement named _shapes_.
+
+NOTE: You can specify a RetentionPolicy as part of the batch points. If not
+provided InfluxDB will use the database _default_ retention policy.
+
+```go
+
+func writePoints(clnt client.Client) {
+ sampleSize := 1000
+
+ bp, err := client.NewBatchPoints(client.BatchPointsConfig{
+ Database: "systemstats",
+ Precision: "us",
+ })
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ rand.Seed(time.Now().UnixNano())
+ for i := 0; i < sampleSize; i++ {
+ regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"}
+ tags := map[string]string{
+ "cpu": "cpu-total",
+ "host": fmt.Sprintf("host%d", rand.Intn(1000)),
+ "region": regions[rand.Intn(len(regions))],
+ }
+
+ idle := rand.Float64() * 100.0
+ fields := map[string]interface{}{
+ "idle": idle,
+ "busy": 100.0 - idle,
+ }
+
+ pt, err := client.NewPoint(
+ "cpu_usage",
+ tags,
+ fields,
+ time.Now(),
+ )
+ if err != nil {
+ log.Fatal(err)
+ }
+ bp.AddPoint(pt)
+ }
+
+ if err := clnt.Write(bp); err != nil {
+ log.Fatal(err)
+ }
+}
+```
+
+#### Uint64 Support
+
+The `uint64` data type is supported if your server is version `1.4.0` or
+greater. To write a data point as an unsigned integer, you must insert
+the point as `uint64`. You cannot use `uint` or any of the other
+derivatives because previous versions of the client have supported
+writing those types as an integer.
+
+### Querying Data
+
+One nice advantage of using **InfluxDB** the ability to query your data using familiar
+SQL constructs. In this example we can create a convenience function to query the database
+as follows:
+
+```go
+// queryDB convenience function to query the database
+func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) {
+ q := client.Query{
+ Command: cmd,
+ Database: MyDB,
+ }
+ if response, err := clnt.Query(q); err == nil {
+ if response.Error() != nil {
+ return res, response.Error()
+ }
+ res = response.Results
+ } else {
+ return res, err
+ }
+ return res, nil
+}
+```
+
+#### Creating a Database
+
+```go
+_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB))
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+#### Count Records
+
+```go
+q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement)
+res, err := queryDB(clnt, q)
+if err != nil {
+ log.Fatal(err)
+}
+count := res[0].Series[0].Values[0][1]
+log.Printf("Found a total of %v records\n", count)
+```
+
+#### Find the last 10 _shapes_ records
+
+```go
+q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 10)
+res, err = queryDB(clnt, q)
+if err != nil {
+ log.Fatal(err)
+}
+
+for i, row := range res[0].Series[0].Values {
+ t, err := time.Parse(time.RFC3339, row[0].(string))
+ if err != nil {
+ log.Fatal(err)
+ }
+ val := row[1].(string)
+ log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val)
+}
+```
+
+### Using the UDP Client
+
+The **InfluxDB** client also supports writing over UDP.
+
+```go
+func WriteUDP() {
+ // Make client
+ c, err := client.NewUDPClient("localhost:8089")
+ if err != nil {
+ panic(err.Error())
+ }
+
+ // Create a new point batch
+ bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
+ Precision: "s",
+ })
+
+ // Create a point and add to batch
+ tags := map[string]string{"cpu": "cpu-total"}
+ fields := map[string]interface{}{
+ "idle": 10.1,
+ "system": 53.3,
+ "user": 46.6,
+ }
+ pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
+ if err != nil {
+ panic(err.Error())
+ }
+ bp.AddPoint(pt)
+
+ // Write the batch
+ c.Write(bp)
+}
+```
+
+### Point Splitting
+
+The UDP client now supports splitting single points that exceed the configured
+payload size. The logic for processing each point is listed here, starting with
+an empty payload.
+
+1. If adding the point to the current (non-empty) payload would exceed the
+ configured size, send the current payload. Otherwise, add it to the current
+ payload.
+1. If the point is smaller than the configured size, add it to the payload.
+1. If the point has no timestamp, just try to send the entire point as a single
+ UDP payload, and process the next point.
+1. Since the point has a timestamp, re-use the existing measurement name,
+ tagset, and timestamp and create multiple new points by splitting up the
+ fields. The per-point length will be kept close to the configured size,
+ staying under it if possible. This does mean that one large field, maybe a
+ long string, could be sent as a larger-than-configured payload.
+
+The above logic attempts to respect configured payload sizes, but not sacrifice
+any data integrity. Points without a timestamp can't be split, as that may
+cause fields to have differing timestamps when processed by the server.
+
+## Go Docs
+
+Please refer to
+[http://godoc.org/github.com/influxdata/influxdb/client/v2](http://godoc.org/github.com/influxdata/influxdb/client/v2)
+for documentation.
+
+## See Also
+
+You can also examine how the client library is used by the
+[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).
diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb.go b/vendor/github.com/influxdata/influxdb/client/influxdb.go
new file mode 100644
index 000000000..98d362d50
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/client/influxdb.go
@@ -0,0 +1,840 @@
+// Package client implements a now-deprecated client for InfluxDB;
+// use github.com/influxdata/influxdb/client/v2 instead.
+package client // import "github.com/influxdata/influxdb/client"
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/influxdata/influxdb/models"
+)
+
+const (
+ // DefaultHost is the default host used to connect to an InfluxDB instance
+ DefaultHost = "localhost"
+
+ // DefaultPort is the default port used to connect to an InfluxDB instance
+ DefaultPort = 8086
+
+ // DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance
+ DefaultTimeout = 0
+)
+
+// Query is used to send a command to the server. Both Command and Database are required.
+type Query struct {
+ Command string
+ Database string
+
+ // Chunked tells the server to send back chunked responses. This places
+ // less load on the server by sending back chunks of the response rather
+ // than waiting for the entire response all at once.
+ Chunked bool
+
+ // ChunkSize sets the maximum number of rows that will be returned per
+ // chunk. Chunks are either divided based on their series or if they hit
+ // the chunk size limit.
+ //
+ // Chunked must be set to true for this option to be used.
+ ChunkSize int
+}
+
+// ParseConnectionString will parse a string to create a valid connection URL
+func ParseConnectionString(path string, ssl bool) (url.URL, error) {
+ var host string
+ var port int
+
+ h, p, err := net.SplitHostPort(path)
+ if err != nil {
+ if path == "" {
+ host = DefaultHost
+ } else {
+ host = path
+ }
+ // If they didn't specify a port, always use the default port
+ port = DefaultPort
+ } else {
+ host = h
+ port, err = strconv.Atoi(p)
+ if err != nil {
+ return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err)
+ }
+ }
+
+ u := url.URL{
+ Scheme: "http",
+ }
+ if ssl {
+ u.Scheme = "https"
+ }
+
+ u.Host = net.JoinHostPort(host, strconv.Itoa(port))
+
+ return u, nil
+}
+
+// Config is used to specify what server to connect to.
+// URL: The URL of the server connecting to.
+// Username/Password are optional. They will be passed via basic auth if provided.
+// UserAgent: If not provided, will default "InfluxDBClient",
+// Timeout: If not provided, will default to 0 (no timeout)
+type Config struct {
+ URL url.URL
+ UnixSocket string
+ Username string
+ Password string
+ UserAgent string
+ Timeout time.Duration
+ Precision string
+ WriteConsistency string
+ UnsafeSsl bool
+}
+
+// NewConfig will create a config to be used in connecting to the client
+func NewConfig() Config {
+ return Config{
+ Timeout: DefaultTimeout,
+ }
+}
+
+// Client is used to make calls to the server.
+type Client struct {
+ url url.URL
+ unixSocket string
+ username string
+ password string
+ httpClient *http.Client
+ userAgent string
+ precision string
+}
+
+const (
+ // ConsistencyOne requires at least one data node acknowledged a write.
+ ConsistencyOne = "one"
+
+ // ConsistencyAll requires all data nodes to acknowledge a write.
+ ConsistencyAll = "all"
+
+ // ConsistencyQuorum requires a quorum of data nodes to acknowledge a write.
+ ConsistencyQuorum = "quorum"
+
+ // ConsistencyAny allows for hinted hand off, potentially no write happened yet.
+ ConsistencyAny = "any"
+)
+
+// NewClient will instantiate and return a connected client to issue commands to the server.
+func NewClient(c Config) (*Client, error) {
+ tlsConfig := &tls.Config{
+ InsecureSkipVerify: c.UnsafeSsl,
+ }
+
+ tr := &http.Transport{
+ TLSClientConfig: tlsConfig,
+ }
+
+ if c.UnixSocket != "" {
+ // No need for compression in local communications.
+ tr.DisableCompression = true
+
+ tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
+ return net.Dial("unix", c.UnixSocket)
+ }
+ }
+
+ client := Client{
+ url: c.URL,
+ unixSocket: c.UnixSocket,
+ username: c.Username,
+ password: c.Password,
+ httpClient: &http.Client{Timeout: c.Timeout, Transport: tr},
+ userAgent: c.UserAgent,
+ precision: c.Precision,
+ }
+ if client.userAgent == "" {
+ client.userAgent = "InfluxDBClient"
+ }
+ return &client, nil
+}
+
+// SetAuth will update the username and passwords
+func (c *Client) SetAuth(u, p string) {
+ c.username = u
+ c.password = p
+}
+
+// SetPrecision will update the precision
+func (c *Client) SetPrecision(precision string) {
+ c.precision = precision
+}
+
+// Query sends a command to the server and returns the Response
+func (c *Client) Query(q Query) (*Response, error) {
+ return c.QueryContext(context.Background(), q)
+}
+
+// QueryContext sends a command to the server and returns the Response
+// It uses a context that can be cancelled by the command line client
+func (c *Client) QueryContext(ctx context.Context, q Query) (*Response, error) {
+ u := c.url
+
+ u.Path = "query"
+ values := u.Query()
+ values.Set("q", q.Command)
+ values.Set("db", q.Database)
+ if q.Chunked {
+ values.Set("chunked", "true")
+ if q.ChunkSize > 0 {
+ values.Set("chunk_size", strconv.Itoa(q.ChunkSize))
+ }
+ }
+ if c.precision != "" {
+ values.Set("epoch", c.precision)
+ }
+ u.RawQuery = values.Encode()
+
+ req, err := http.NewRequest("POST", u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("User-Agent", c.userAgent)
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+
+ req = req.WithContext(ctx)
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var response Response
+ if q.Chunked {
+ cr := NewChunkedResponse(resp.Body)
+ for {
+ r, err := cr.NextResponse()
+ if err != nil {
+ // If we got an error while decoding the response, send that back.
+ return nil, err
+ }
+
+ if r == nil {
+ break
+ }
+
+ response.Results = append(response.Results, r.Results...)
+ if r.Err != nil {
+ response.Err = r.Err
+ break
+ }
+ }
+ } else {
+ dec := json.NewDecoder(resp.Body)
+ dec.UseNumber()
+ if err := dec.Decode(&response); err != nil {
+ // Ignore EOF errors if we got an invalid status code.
+ if !(err == io.EOF && resp.StatusCode != http.StatusOK) {
+ return nil, err
+ }
+ }
+ }
+
+ // If we don't have an error in our json response, and didn't get StatusOK,
+ // then send back an error.
+ if resp.StatusCode != http.StatusOK && response.Error() == nil {
+ return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
+ }
+ return &response, nil
+}
+
+// Write takes BatchPoints and allows for writing of multiple points with defaults
+// If successful, error is nil and Response is nil
+// If an error occurs, Response may contain additional information if populated.
+func (c *Client) Write(bp BatchPoints) (*Response, error) {
+ u := c.url
+ u.Path = "write"
+
+ var b bytes.Buffer
+ for _, p := range bp.Points {
+ err := checkPointTypes(p)
+ if err != nil {
+ return nil, err
+ }
+ if p.Raw != "" {
+ if _, err := b.WriteString(p.Raw); err != nil {
+ return nil, err
+ }
+ } else {
+ for k, v := range bp.Tags {
+ if p.Tags == nil {
+ p.Tags = make(map[string]string, len(bp.Tags))
+ }
+ p.Tags[k] = v
+ }
+
+ if _, err := b.WriteString(p.MarshalString()); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := b.WriteByte('\n'); err != nil {
+ return nil, err
+ }
+ }
+
+ req, err := http.NewRequest("POST", u.String(), &b)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "")
+ req.Header.Set("User-Agent", c.userAgent)
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+
+ precision := bp.Precision
+ if precision == "" {
+ precision = "ns"
+ }
+
+ params := req.URL.Query()
+ params.Set("db", bp.Database)
+ params.Set("rp", bp.RetentionPolicy)
+ params.Set("precision", precision)
+ params.Set("consistency", bp.WriteConsistency)
+ req.URL.RawQuery = params.Encode()
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var response Response
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
+ var err = fmt.Errorf(string(body))
+ response.Err = err
+ return &response, err
+ }
+
+ return nil, nil
+}
+
+// WriteLineProtocol takes a string with line returns to delimit each write
+// If successful, error is nil and Response is nil
+// If an error occurs, Response may contain additional information if populated.
+func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) {
+ u := c.url
+ u.Path = "write"
+
+ r := strings.NewReader(data)
+
+ req, err := http.NewRequest("POST", u.String(), r)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "")
+ req.Header.Set("User-Agent", c.userAgent)
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+ params := req.URL.Query()
+ params.Set("db", database)
+ params.Set("rp", retentionPolicy)
+ params.Set("precision", precision)
+ params.Set("consistency", writeConsistency)
+ req.URL.RawQuery = params.Encode()
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var response Response
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
+ err := fmt.Errorf(string(body))
+ response.Err = err
+ return &response, err
+ }
+
+ return nil, nil
+}
+
+// Ping will check to see if the server is up
+// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
+func (c *Client) Ping() (time.Duration, string, error) {
+ now := time.Now()
+ u := c.url
+ u.Path = "ping"
+
+ req, err := http.NewRequest("GET", u.String(), nil)
+ if err != nil {
+ return 0, "", err
+ }
+ req.Header.Set("User-Agent", c.userAgent)
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return 0, "", err
+ }
+ defer resp.Body.Close()
+
+ version := resp.Header.Get("X-Influxdb-Version")
+ return time.Since(now), version, nil
+}
+
+// Structs
+
+// Message represents a user message.
+type Message struct {
+ Level string `json:"level,omitempty"`
+ Text string `json:"text,omitempty"`
+}
+
+// Result represents a resultset returned from a single statement.
+type Result struct {
+ Series []models.Row
+ Messages []*Message
+ Err error
+}
+
+// MarshalJSON encodes the result into JSON.
+func (r *Result) MarshalJSON() ([]byte, error) {
+ // Define a struct that outputs "error" as a string.
+ var o struct {
+ Series []models.Row `json:"series,omitempty"`
+ Messages []*Message `json:"messages,omitempty"`
+ Err string `json:"error,omitempty"`
+ }
+
+ // Copy fields to output struct.
+ o.Series = r.Series
+ o.Messages = r.Messages
+ if r.Err != nil {
+ o.Err = r.Err.Error()
+ }
+
+ return json.Marshal(&o)
+}
+
+// UnmarshalJSON decodes the data into the Result struct
+func (r *Result) UnmarshalJSON(b []byte) error {
+ var o struct {
+ Series []models.Row `json:"series,omitempty"`
+ Messages []*Message `json:"messages,omitempty"`
+ Err string `json:"error,omitempty"`
+ }
+
+ dec := json.NewDecoder(bytes.NewBuffer(b))
+ dec.UseNumber()
+ err := dec.Decode(&o)
+ if err != nil {
+ return err
+ }
+ r.Series = o.Series
+ r.Messages = o.Messages
+ if o.Err != "" {
+ r.Err = errors.New(o.Err)
+ }
+ return nil
+}
+
+// Response represents a list of statement results.
+type Response struct {
+ Results []Result
+ Err error
+}
+
+// MarshalJSON encodes the response into JSON.
+func (r *Response) MarshalJSON() ([]byte, error) {
+ // Define a struct that outputs "error" as a string.
+ var o struct {
+ Results []Result `json:"results,omitempty"`
+ Err string `json:"error,omitempty"`
+ }
+
+ // Copy fields to output struct.
+ o.Results = r.Results
+ if r.Err != nil {
+ o.Err = r.Err.Error()
+ }
+
+ return json.Marshal(&o)
+}
+
+// UnmarshalJSON decodes the data into the Response struct
+func (r *Response) UnmarshalJSON(b []byte) error {
+ var o struct {
+ Results []Result `json:"results,omitempty"`
+ Err string `json:"error,omitempty"`
+ }
+
+ dec := json.NewDecoder(bytes.NewBuffer(b))
+ dec.UseNumber()
+ err := dec.Decode(&o)
+ if err != nil {
+ return err
+ }
+ r.Results = o.Results
+ if o.Err != "" {
+ r.Err = errors.New(o.Err)
+ }
+ return nil
+}
+
+// Error returns the first error from any statement.
+// Returns nil if no errors occurred on any statements.
+func (r *Response) Error() error {
+ if r.Err != nil {
+ return r.Err
+ }
+ for _, result := range r.Results {
+ if result.Err != nil {
+ return result.Err
+ }
+ }
+ return nil
+}
+
+// duplexReader reads responses and writes it to another writer while
+// satisfying the reader interface.
+type duplexReader struct {
+ r io.Reader
+ w io.Writer
+}
+
+func (r *duplexReader) Read(p []byte) (n int, err error) {
+ n, err = r.r.Read(p)
+ if err == nil {
+ r.w.Write(p[:n])
+ }
+ return n, err
+}
+
+// ChunkedResponse represents a response from the server that
+// uses chunking to stream the output.
+type ChunkedResponse struct {
+ dec *json.Decoder
+ duplex *duplexReader
+ buf bytes.Buffer
+}
+
+// NewChunkedResponse reads a stream and produces responses from the stream.
+func NewChunkedResponse(r io.Reader) *ChunkedResponse {
+ resp := &ChunkedResponse{}
+ resp.duplex = &duplexReader{r: r, w: &resp.buf}
+ resp.dec = json.NewDecoder(resp.duplex)
+ resp.dec.UseNumber()
+ return resp
+}
+
+// NextResponse reads the next line of the stream and returns a response.
+func (r *ChunkedResponse) NextResponse() (*Response, error) {
+ var response Response
+ if err := r.dec.Decode(&response); err != nil {
+ if err == io.EOF {
+ return nil, nil
+ }
+ // A decoding error happened. This probably means the server crashed
+ // and sent a last-ditch error message to us. Ensure we have read the
+ // entirety of the connection to get any remaining error text.
+ io.Copy(ioutil.Discard, r.duplex)
+ return nil, errors.New(strings.TrimSpace(r.buf.String()))
+ }
+ r.buf.Reset()
+ return &response, nil
+}
+
+// Point defines the fields that will be written to the database
+// Measurement, Time, and Fields are required
+// Precision can be specified if the time is in epoch format (integer).
+// Valid values for Precision are n, u, ms, s, m, and h
+type Point struct {
+ Measurement string
+ Tags map[string]string
+ Time time.Time
+ Fields map[string]interface{}
+ Precision string
+ Raw string
+}
+
+// MarshalJSON will format the time in RFC3339Nano
+// Precision is also ignored as it is only used for writing, not reading
+// Or another way to say it is we always send back in nanosecond precision
+func (p *Point) MarshalJSON() ([]byte, error) {
+ point := struct {
+ Measurement string `json:"measurement,omitempty"`
+ Tags map[string]string `json:"tags,omitempty"`
+ Time string `json:"time,omitempty"`
+ Fields map[string]interface{} `json:"fields,omitempty"`
+ Precision string `json:"precision,omitempty"`
+ }{
+ Measurement: p.Measurement,
+ Tags: p.Tags,
+ Fields: p.Fields,
+ Precision: p.Precision,
+ }
+ // Let it omit empty if it's really zero
+ if !p.Time.IsZero() {
+ point.Time = p.Time.UTC().Format(time.RFC3339Nano)
+ }
+ return json.Marshal(&point)
+}
+
+// MarshalString renders string representation of a Point with specified
+// precision. The default precision is nanoseconds.
+func (p *Point) MarshalString() string {
+ pt, err := models.NewPoint(p.Measurement, models.NewTags(p.Tags), p.Fields, p.Time)
+ if err != nil {
+ return "# ERROR: " + err.Error() + " " + p.Measurement
+ }
+ if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" {
+ return pt.String()
+ }
+ return pt.PrecisionString(p.Precision)
+}
+
+// UnmarshalJSON decodes the data into the Point struct
+func (p *Point) UnmarshalJSON(b []byte) error {
+ var normal struct {
+ Measurement string `json:"measurement"`
+ Tags map[string]string `json:"tags"`
+ Time time.Time `json:"time"`
+ Precision string `json:"precision"`
+ Fields map[string]interface{} `json:"fields"`
+ }
+ var epoch struct {
+ Measurement string `json:"measurement"`
+ Tags map[string]string `json:"tags"`
+ Time *int64 `json:"time"`
+ Precision string `json:"precision"`
+ Fields map[string]interface{} `json:"fields"`
+ }
+
+ if err := func() error {
+ var err error
+ dec := json.NewDecoder(bytes.NewBuffer(b))
+ dec.UseNumber()
+ if err = dec.Decode(&epoch); err != nil {
+ return err
+ }
+ // Convert from epoch to time.Time, but only if Time
+ // was actually set.
+ var ts time.Time
+ if epoch.Time != nil {
+ ts, err = EpochToTime(*epoch.Time, epoch.Precision)
+ if err != nil {
+ return err
+ }
+ }
+ p.Measurement = epoch.Measurement
+ p.Tags = epoch.Tags
+ p.Time = ts
+ p.Precision = epoch.Precision
+ p.Fields = normalizeFields(epoch.Fields)
+ return nil
+ }(); err == nil {
+ return nil
+ }
+
+ dec := json.NewDecoder(bytes.NewBuffer(b))
+ dec.UseNumber()
+ if err := dec.Decode(&normal); err != nil {
+ return err
+ }
+ normal.Time = SetPrecision(normal.Time, normal.Precision)
+ p.Measurement = normal.Measurement
+ p.Tags = normal.Tags
+ p.Time = normal.Time
+ p.Precision = normal.Precision
+ p.Fields = normalizeFields(normal.Fields)
+
+ return nil
+}
+
+// Remove any notion of json.Number
+func normalizeFields(fields map[string]interface{}) map[string]interface{} {
+ newFields := map[string]interface{}{}
+
+ for k, v := range fields {
+ switch v := v.(type) {
+ case json.Number:
+ jv, e := v.Float64()
+ if e != nil {
+ panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e))
+ }
+ newFields[k] = jv
+ default:
+ newFields[k] = v
+ }
+ }
+ return newFields
+}
+
+// BatchPoints is used to send batched data in a single write.
+// Database and Points are required
+// If no retention policy is specified, it will use the databases default retention policy.
+// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored.
+// If time is specified, it will be applied to any point with an empty time.
+// Precision can be specified if the time is in epoch format (integer).
+// Valid values for Precision are n, u, ms, s, m, and h
+type BatchPoints struct {
+ Points []Point `json:"points,omitempty"`
+ Database string `json:"database,omitempty"`
+ RetentionPolicy string `json:"retentionPolicy,omitempty"`
+ Tags map[string]string `json:"tags,omitempty"`
+ Time time.Time `json:"time,omitempty"`
+ Precision string `json:"precision,omitempty"`
+ WriteConsistency string `json:"-"`
+}
+
+// UnmarshalJSON decodes the data into the BatchPoints struct
+func (bp *BatchPoints) UnmarshalJSON(b []byte) error {
+ var normal struct {
+ Points []Point `json:"points"`
+ Database string `json:"database"`
+ RetentionPolicy string `json:"retentionPolicy"`
+ Tags map[string]string `json:"tags"`
+ Time time.Time `json:"time"`
+ Precision string `json:"precision"`
+ }
+ var epoch struct {
+ Points []Point `json:"points"`
+ Database string `json:"database"`
+ RetentionPolicy string `json:"retentionPolicy"`
+ Tags map[string]string `json:"tags"`
+ Time *int64 `json:"time"`
+ Precision string `json:"precision"`
+ }
+
+ if err := func() error {
+ var err error
+ if err = json.Unmarshal(b, &epoch); err != nil {
+ return err
+ }
+ // Convert from epoch to time.Time
+ var ts time.Time
+ if epoch.Time != nil {
+ ts, err = EpochToTime(*epoch.Time, epoch.Precision)
+ if err != nil {
+ return err
+ }
+ }
+ bp.Points = epoch.Points
+ bp.Database = epoch.Database
+ bp.RetentionPolicy = epoch.RetentionPolicy
+ bp.Tags = epoch.Tags
+ bp.Time = ts
+ bp.Precision = epoch.Precision
+ return nil
+ }(); err == nil {
+ return nil
+ }
+
+ if err := json.Unmarshal(b, &normal); err != nil {
+ return err
+ }
+ normal.Time = SetPrecision(normal.Time, normal.Precision)
+ bp.Points = normal.Points
+ bp.Database = normal.Database
+ bp.RetentionPolicy = normal.RetentionPolicy
+ bp.Tags = normal.Tags
+ bp.Time = normal.Time
+ bp.Precision = normal.Precision
+
+ return nil
+}
+
+// utility functions
+
+// Addr provides the current url as a string of the server the client is connected to.
+func (c *Client) Addr() string {
+ if c.unixSocket != "" {
+ return c.unixSocket
+ }
+ return c.url.String()
+}
+
+// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found.
+func checkPointTypes(p Point) error {
+ for _, v := range p.Fields {
+ switch v.(type) {
+ case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool, string, nil:
+ return nil
+ default:
+ return fmt.Errorf("unsupported point type: %T", v)
+ }
+ }
+ return nil
+}
+
+// helper functions
+
+// EpochToTime takes a unix epoch time and uses precision to return back a time.Time
+func EpochToTime(epoch int64, precision string) (time.Time, error) {
+ if precision == "" {
+ precision = "s"
+ }
+ var t time.Time
+ switch precision {
+ case "h":
+ t = time.Unix(0, epoch*int64(time.Hour))
+ case "m":
+ t = time.Unix(0, epoch*int64(time.Minute))
+ case "s":
+ t = time.Unix(0, epoch*int64(time.Second))
+ case "ms":
+ t = time.Unix(0, epoch*int64(time.Millisecond))
+ case "u":
+ t = time.Unix(0, epoch*int64(time.Microsecond))
+ case "n":
+ t = time.Unix(0, epoch)
+ default:
+ return time.Time{}, fmt.Errorf("Unknown precision %q", precision)
+ }
+ return t, nil
+}
+
+// SetPrecision will round a time to the specified precision
+func SetPrecision(t time.Time, precision string) time.Time {
+ switch precision {
+ case "n":
+ case "u":
+ return t.Round(time.Microsecond)
+ case "ms":
+ return t.Round(time.Millisecond)
+ case "s":
+ return t.Round(time.Second)
+ case "m":
+ return t.Round(time.Minute)
+ case "h":
+ return t.Round(time.Hour)
+ }
+ return t
+}
diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client.go b/vendor/github.com/influxdata/influxdb/client/v2/client.go
new file mode 100644
index 000000000..77d44f2b3
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/client/v2/client.go
@@ -0,0 +1,635 @@
+// Package client (v2) is the current official Go client for InfluxDB.
+package client // import "github.com/influxdata/influxdb/client/v2"
+
+import (
+ "bytes"
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/influxdata/influxdb/models"
+)
+
+// HTTPConfig is the config data needed to create an HTTP Client.
+type HTTPConfig struct {
+ // Addr should be of the form "http://host:port"
+ // or "http://[ipv6-host%zone]:port".
+ Addr string
+
+ // Username is the influxdb username, optional.
+ Username string
+
+ // Password is the influxdb password, optional.
+ Password string
+
+ // UserAgent is the http User Agent, defaults to "InfluxDBClient".
+ UserAgent string
+
+ // Timeout for influxdb writes, defaults to no timeout.
+ Timeout time.Duration
+
+ // InsecureSkipVerify gets passed to the http client, if true, it will
+ // skip https certificate verification. Defaults to false.
+ InsecureSkipVerify bool
+
+ // TLSConfig allows the user to set their own TLS config for the HTTP
+ // Client. If set, this option overrides InsecureSkipVerify.
+ TLSConfig *tls.Config
+}
+
+// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct.
+type BatchPointsConfig struct {
+ // Precision is the write precision of the points, defaults to "ns".
+ Precision string
+
+ // Database is the database to write points to.
+ Database string
+
+ // RetentionPolicy is the retention policy of the points.
+ RetentionPolicy string
+
+ // Write consistency is the number of servers required to confirm write.
+ WriteConsistency string
+}
+
+// Client is a client interface for writing & querying the database.
+type Client interface {
+ // Ping checks that status of cluster, and will always return 0 time and no
+ // error for UDP clients.
+ Ping(timeout time.Duration) (time.Duration, string, error)
+
+ // Write takes a BatchPoints object and writes all Points to InfluxDB.
+ Write(bp BatchPoints) error
+
+ // Query makes an InfluxDB Query on the database. This will fail if using
+ // the UDP client.
+ Query(q Query) (*Response, error)
+
+ // Close releases any resources a Client may be using.
+ Close() error
+}
+
+// NewHTTPClient returns a new Client from the provided config.
+// Client is safe for concurrent use by multiple goroutines.
+func NewHTTPClient(conf HTTPConfig) (Client, error) {
+ if conf.UserAgent == "" {
+ conf.UserAgent = "InfluxDBClient"
+ }
+
+ u, err := url.Parse(conf.Addr)
+ if err != nil {
+ return nil, err
+ } else if u.Scheme != "http" && u.Scheme != "https" {
+ m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+
+ " must start with http:// or https://", u.Scheme)
+ return nil, errors.New(m)
+ }
+
+ tr := &http.Transport{
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: conf.InsecureSkipVerify,
+ },
+ }
+ if conf.TLSConfig != nil {
+ tr.TLSClientConfig = conf.TLSConfig
+ }
+ return &client{
+ url: *u,
+ username: conf.Username,
+ password: conf.Password,
+ useragent: conf.UserAgent,
+ httpClient: &http.Client{
+ Timeout: conf.Timeout,
+ Transport: tr,
+ },
+ transport: tr,
+ }, nil
+}
+
+// Ping will check to see if the server is up with an optional timeout on waiting for leader.
+// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
+func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) {
+ now := time.Now()
+ u := c.url
+ u.Path = "ping"
+
+ req, err := http.NewRequest("GET", u.String(), nil)
+ if err != nil {
+ return 0, "", err
+ }
+
+ req.Header.Set("User-Agent", c.useragent)
+
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+
+ if timeout > 0 {
+ params := req.URL.Query()
+ params.Set("wait_for_leader", fmt.Sprintf("%.0fs", timeout.Seconds()))
+ req.URL.RawQuery = params.Encode()
+ }
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return 0, "", err
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return 0, "", err
+ }
+
+ if resp.StatusCode != http.StatusNoContent {
+ var err = fmt.Errorf(string(body))
+ return 0, "", err
+ }
+
+ version := resp.Header.Get("X-Influxdb-Version")
+ return time.Since(now), version, nil
+}
+
+// Close releases the client's resources.
+func (c *client) Close() error {
+ c.transport.CloseIdleConnections()
+ return nil
+}
+
+// client is safe for concurrent use as the fields are all read-only
+// once the client is instantiated.
+type client struct {
+ // N.B - if url.UserInfo is accessed in future modifications to the
+ // methods on client, you will need to syncronise access to url.
+ url url.URL
+ username string
+ password string
+ useragent string
+ httpClient *http.Client
+ transport *http.Transport
+}
+
+// BatchPoints is an interface into a batched grouping of points to write into
+// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate
+// batch for each goroutine.
+type BatchPoints interface {
+ // AddPoint adds the given point to the Batch of points.
+ AddPoint(p *Point)
+ // AddPoints adds the given points to the Batch of points.
+ AddPoints(ps []*Point)
+ // Points lists the points in the Batch.
+ Points() []*Point
+
+ // Precision returns the currently set precision of this Batch.
+ Precision() string
+ // SetPrecision sets the precision of this batch.
+ SetPrecision(s string) error
+
+ // Database returns the currently set database of this Batch.
+ Database() string
+ // SetDatabase sets the database of this Batch.
+ SetDatabase(s string)
+
+ // WriteConsistency returns the currently set write consistency of this Batch.
+ WriteConsistency() string
+ // SetWriteConsistency sets the write consistency of this Batch.
+ SetWriteConsistency(s string)
+
+ // RetentionPolicy returns the currently set retention policy of this Batch.
+ RetentionPolicy() string
+ // SetRetentionPolicy sets the retention policy of this Batch.
+ SetRetentionPolicy(s string)
+}
+
+// NewBatchPoints returns a BatchPoints interface based on the given config.
+func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) {
+ if conf.Precision == "" {
+ conf.Precision = "ns"
+ }
+ if _, err := time.ParseDuration("1" + conf.Precision); err != nil {
+ return nil, err
+ }
+ bp := &batchpoints{
+ database: conf.Database,
+ precision: conf.Precision,
+ retentionPolicy: conf.RetentionPolicy,
+ writeConsistency: conf.WriteConsistency,
+ }
+ return bp, nil
+}
+
+type batchpoints struct {
+ points []*Point
+ database string
+ precision string
+ retentionPolicy string
+ writeConsistency string
+}
+
+func (bp *batchpoints) AddPoint(p *Point) {
+ bp.points = append(bp.points, p)
+}
+
+func (bp *batchpoints) AddPoints(ps []*Point) {
+ bp.points = append(bp.points, ps...)
+}
+
+func (bp *batchpoints) Points() []*Point {
+ return bp.points
+}
+
+func (bp *batchpoints) Precision() string {
+ return bp.precision
+}
+
+func (bp *batchpoints) Database() string {
+ return bp.database
+}
+
+func (bp *batchpoints) WriteConsistency() string {
+ return bp.writeConsistency
+}
+
+func (bp *batchpoints) RetentionPolicy() string {
+ return bp.retentionPolicy
+}
+
+func (bp *batchpoints) SetPrecision(p string) error {
+ if _, err := time.ParseDuration("1" + p); err != nil {
+ return err
+ }
+ bp.precision = p
+ return nil
+}
+
+func (bp *batchpoints) SetDatabase(db string) {
+ bp.database = db
+}
+
+func (bp *batchpoints) SetWriteConsistency(wc string) {
+ bp.writeConsistency = wc
+}
+
+func (bp *batchpoints) SetRetentionPolicy(rp string) {
+ bp.retentionPolicy = rp
+}
+
+// Point represents a single data point.
+type Point struct {
+ pt models.Point
+}
+
+// NewPoint returns a point with the given timestamp. If a timestamp is not
+// given, then data is sent to the database without a timestamp, in which case
+// the server will assign local time upon reception. NOTE: it is recommended to
+// send data with a timestamp.
+func NewPoint(
+ name string,
+ tags map[string]string,
+ fields map[string]interface{},
+ t ...time.Time,
+) (*Point, error) {
+ var T time.Time
+ if len(t) > 0 {
+ T = t[0]
+ }
+
+ pt, err := models.NewPoint(name, models.NewTags(tags), fields, T)
+ if err != nil {
+ return nil, err
+ }
+ return &Point{
+ pt: pt,
+ }, nil
+}
+
+// String returns a line-protocol string of the Point.
+func (p *Point) String() string {
+ return p.pt.String()
+}
+
+// PrecisionString returns a line-protocol string of the Point,
+// with the timestamp formatted for the given precision.
+func (p *Point) PrecisionString(precison string) string {
+ return p.pt.PrecisionString(precison)
+}
+
+// Name returns the measurement name of the point.
+func (p *Point) Name() string {
+ return string(p.pt.Name())
+}
+
+// Tags returns the tags associated with the point.
+func (p *Point) Tags() map[string]string {
+ return p.pt.Tags().Map()
+}
+
+// Time return the timestamp for the point.
+func (p *Point) Time() time.Time {
+ return p.pt.Time()
+}
+
+// UnixNano returns timestamp of the point in nanoseconds since Unix epoch.
+func (p *Point) UnixNano() int64 {
+ return p.pt.UnixNano()
+}
+
+// Fields returns the fields for the point.
+func (p *Point) Fields() (map[string]interface{}, error) {
+ return p.pt.Fields()
+}
+
+// NewPointFrom returns a point from the provided models.Point.
+func NewPointFrom(pt models.Point) *Point {
+ return &Point{pt: pt}
+}
+
+func (c *client) Write(bp BatchPoints) error {
+ var b bytes.Buffer
+
+ for _, p := range bp.Points() {
+ if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil {
+ return err
+ }
+
+ if err := b.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ u := c.url
+ u.Path = "write"
+ req, err := http.NewRequest("POST", u.String(), &b)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "")
+ req.Header.Set("User-Agent", c.useragent)
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+
+ params := req.URL.Query()
+ params.Set("db", bp.Database())
+ params.Set("rp", bp.RetentionPolicy())
+ params.Set("precision", bp.Precision())
+ params.Set("consistency", bp.WriteConsistency())
+ req.URL.RawQuery = params.Encode()
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+
+ if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
+ var err = fmt.Errorf(string(body))
+ return err
+ }
+
+ return nil
+}
+
+// Query defines a query to send to the server.
+type Query struct {
+ Command string
+ Database string
+ Precision string
+ Chunked bool
+ ChunkSize int
+ Parameters map[string]interface{}
+}
+
+// NewQuery returns a query object.
+// The database and precision arguments can be empty strings if they are not needed for the query.
+func NewQuery(command, database, precision string) Query {
+ return Query{
+ Command: command,
+ Database: database,
+ Precision: precision,
+ Parameters: make(map[string]interface{}),
+ }
+}
+
+// NewQueryWithParameters returns a query object.
+// The database and precision arguments can be empty strings if they are not needed for the query.
+// parameters is a map of the parameter names used in the command to their values.
+func NewQueryWithParameters(command, database, precision string, parameters map[string]interface{}) Query {
+ return Query{
+ Command: command,
+ Database: database,
+ Precision: precision,
+ Parameters: parameters,
+ }
+}
+
+// Response represents a list of statement results.
+type Response struct {
+ Results []Result
+ Err string `json:"error,omitempty"`
+}
+
+// Error returns the first error from any statement.
+// It returns nil if no errors occurred on any statements.
+func (r *Response) Error() error {
+ if r.Err != "" {
+ return fmt.Errorf(r.Err)
+ }
+ for _, result := range r.Results {
+ if result.Err != "" {
+ return fmt.Errorf(result.Err)
+ }
+ }
+ return nil
+}
+
+// Message represents a user message.
+type Message struct {
+ Level string
+ Text string
+}
+
+// Result represents a resultset returned from a single statement.
+type Result struct {
+ Series []models.Row
+ Messages []*Message
+ Err string `json:"error,omitempty"`
+}
+
+// Query sends a command to the server and returns the Response.
+func (c *client) Query(q Query) (*Response, error) {
+ u := c.url
+ u.Path = "query"
+
+ jsonParameters, err := json.Marshal(q.Parameters)
+
+ if err != nil {
+ return nil, err
+ }
+
+ req, err := http.NewRequest("POST", u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("Content-Type", "")
+ req.Header.Set("User-Agent", c.useragent)
+
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+
+ params := req.URL.Query()
+ params.Set("q", q.Command)
+ params.Set("db", q.Database)
+ params.Set("params", string(jsonParameters))
+ if q.Chunked {
+ params.Set("chunked", "true")
+ if q.ChunkSize > 0 {
+ params.Set("chunk_size", strconv.Itoa(q.ChunkSize))
+ }
+ }
+
+ if q.Precision != "" {
+ params.Set("epoch", q.Precision)
+ }
+ req.URL.RawQuery = params.Encode()
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ // If we lack a X-Influxdb-Version header, then we didn't get a response from influxdb
+ // but instead some other service. If the error code is also a 500+ code, then some
+ // downstream loadbalancer/proxy/etc had an issue and we should report that.
+ if resp.Header.Get("X-Influxdb-Version") == "" && resp.StatusCode >= http.StatusInternalServerError {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil || len(body) == 0 {
+ return nil, fmt.Errorf("received status code %d from downstream server", resp.StatusCode)
+ }
+
+ return nil, fmt.Errorf("received status code %d from downstream server, with response body: %q", resp.StatusCode, body)
+ }
+
+ // If we get an unexpected content type, then it is also not from influx direct and therefore
+ // we want to know what we received and what status code was returned for debugging purposes.
+ if cType, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type")); cType != "application/json" {
+ // Read up to 1kb of the body to help identify downstream errors and limit the impact of things
+ // like downstream serving a large file
+ body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024))
+ if err != nil || len(body) == 0 {
+ return nil, fmt.Errorf("expected json response, got empty body, with status: %v", resp.StatusCode)
+ }
+
+ return nil, fmt.Errorf("expected json response, got %q, with status: %v and response body: %q", cType, resp.StatusCode, body)
+ }
+
+ var response Response
+ if q.Chunked {
+ cr := NewChunkedResponse(resp.Body)
+ for {
+ r, err := cr.NextResponse()
+ if err != nil {
+ // If we got an error while decoding the response, send that back.
+ return nil, err
+ }
+
+ if r == nil {
+ break
+ }
+
+ response.Results = append(response.Results, r.Results...)
+ if r.Err != "" {
+ response.Err = r.Err
+ break
+ }
+ }
+ } else {
+ dec := json.NewDecoder(resp.Body)
+ dec.UseNumber()
+ decErr := dec.Decode(&response)
+
+ // ignore this error if we got an invalid status code
+ if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK {
+ decErr = nil
+ }
+ // If we got a valid decode error, send that back
+ if decErr != nil {
+ return nil, fmt.Errorf("unable to decode json: received status code %d err: %s", resp.StatusCode, decErr)
+ }
+ }
+
+ // If we don't have an error in our json response, and didn't get statusOK
+ // then send back an error
+ if resp.StatusCode != http.StatusOK && response.Error() == nil {
+ return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
+ }
+ return &response, nil
+}
+
+// duplexReader reads responses and writes it to another writer while
+// satisfying the reader interface.
+type duplexReader struct {
+ r io.Reader
+ w io.Writer
+}
+
+func (r *duplexReader) Read(p []byte) (n int, err error) {
+ n, err = r.r.Read(p)
+ if err == nil {
+ r.w.Write(p[:n])
+ }
+ return n, err
+}
+
+// ChunkedResponse represents a response from the server that
+// uses chunking to stream the output.
+type ChunkedResponse struct {
+ dec *json.Decoder
+ duplex *duplexReader
+ buf bytes.Buffer
+}
+
+// NewChunkedResponse reads a stream and produces responses from the stream.
+func NewChunkedResponse(r io.Reader) *ChunkedResponse {
+ resp := &ChunkedResponse{}
+ resp.duplex = &duplexReader{r: r, w: &resp.buf}
+ resp.dec = json.NewDecoder(resp.duplex)
+ resp.dec.UseNumber()
+ return resp
+}
+
+// NextResponse reads the next line of the stream and returns a response.
+func (r *ChunkedResponse) NextResponse() (*Response, error) {
+ var response Response
+
+ if err := r.dec.Decode(&response); err != nil {
+ if err == io.EOF {
+ return nil, nil
+ }
+ // A decoding error happened. This probably means the server crashed
+ // and sent a last-ditch error message to us. Ensure we have read the
+ // entirety of the connection to get any remaining error text.
+ io.Copy(ioutil.Discard, r.duplex)
+ return nil, errors.New(strings.TrimSpace(r.buf.String()))
+ }
+
+ r.buf.Reset()
+ return &response, nil
+}
diff --git a/vendor/github.com/influxdata/influxdb/client/v2/udp.go b/vendor/github.com/influxdata/influxdb/client/v2/udp.go
new file mode 100644
index 000000000..779a28b33
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/client/v2/udp.go
@@ -0,0 +1,112 @@
+package client
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "time"
+)
+
+const (
+ // UDPPayloadSize is a reasonable default payload size for UDP packets that
+ // could be travelling over the internet.
+ UDPPayloadSize = 512
+)
+
+// UDPConfig is the config data needed to create a UDP Client.
+type UDPConfig struct {
+ // Addr should be of the form "host:port"
+ // or "[ipv6-host%zone]:port".
+ Addr string
+
+ // PayloadSize is the maximum size of a UDP client message, optional
+ // Tune this based on your network. Defaults to UDPPayloadSize.
+ PayloadSize int
+}
+
+// NewUDPClient returns a client interface for writing to an InfluxDB UDP
+// service from the given config.
+func NewUDPClient(conf UDPConfig) (Client, error) {
+ var udpAddr *net.UDPAddr
+ udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr)
+ if err != nil {
+ return nil, err
+ }
+
+ conn, err := net.DialUDP("udp", nil, udpAddr)
+ if err != nil {
+ return nil, err
+ }
+
+ payloadSize := conf.PayloadSize
+ if payloadSize == 0 {
+ payloadSize = UDPPayloadSize
+ }
+
+ return &udpclient{
+ conn: conn,
+ payloadSize: payloadSize,
+ }, nil
+}
+
+// Close releases the udpclient's resources.
+func (uc *udpclient) Close() error {
+ return uc.conn.Close()
+}
+
+type udpclient struct {
+ conn io.WriteCloser
+ payloadSize int
+}
+
+func (uc *udpclient) Write(bp BatchPoints) error {
+ var b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed
+ var d, _ = time.ParseDuration("1" + bp.Precision())
+
+ var delayedError error
+
+ var checkBuffer = func(n int) {
+ if len(b) > 0 && len(b)+n > uc.payloadSize {
+ if _, err := uc.conn.Write(b); err != nil {
+ delayedError = err
+ }
+ b = b[:0]
+ }
+ }
+
+ for _, p := range bp.Points() {
+ p.pt.Round(d)
+ pointSize := p.pt.StringSize() + 1 // include newline in size
+ //point := p.pt.RoundedString(d) + "\n"
+
+ checkBuffer(pointSize)
+
+ if p.Time().IsZero() || pointSize <= uc.payloadSize {
+ b = p.pt.AppendString(b)
+ b = append(b, '\n')
+ continue
+ }
+
+ points := p.pt.Split(uc.payloadSize - 1) // account for newline character
+ for _, sp := range points {
+ checkBuffer(sp.StringSize() + 1)
+ b = sp.AppendString(b)
+ b = append(b, '\n')
+ }
+ }
+
+ if len(b) > 0 {
+ if _, err := uc.conn.Write(b); err != nil {
+ return err
+ }
+ }
+ return delayedError
+}
+
+func (uc *udpclient) Query(q Query) (*Response, error) {
+ return nil, fmt.Errorf("Querying via UDP is not supported")
+}
+
+func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) {
+ return 0, "", nil
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/consistency.go b/vendor/github.com/influxdata/influxdb/models/consistency.go
new file mode 100644
index 000000000..2a3269bca
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/consistency.go
@@ -0,0 +1,48 @@
+package models
+
+import (
+ "errors"
+ "strings"
+)
+
+// ConsistencyLevel represent a required replication criteria before a write can
+// be returned as successful.
+//
+// The consistency level is handled in open-source InfluxDB but only applicable to clusters.
+type ConsistencyLevel int
+
+const (
+ // ConsistencyLevelAny allows for hinted handoff, potentially no write happened yet.
+ ConsistencyLevelAny ConsistencyLevel = iota
+
+ // ConsistencyLevelOne requires at least one data node acknowledged a write.
+ ConsistencyLevelOne
+
+ // ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write.
+ ConsistencyLevelQuorum
+
+ // ConsistencyLevelAll requires all data nodes to acknowledge a write.
+ ConsistencyLevelAll
+)
+
+var (
+ // ErrInvalidConsistencyLevel is returned when parsing the string version
+ // of a consistency level.
+ ErrInvalidConsistencyLevel = errors.New("invalid consistency level")
+)
+
+// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const.
+func ParseConsistencyLevel(level string) (ConsistencyLevel, error) {
+ switch strings.ToLower(level) {
+ case "any":
+ return ConsistencyLevelAny, nil
+ case "one":
+ return ConsistencyLevelOne, nil
+ case "quorum":
+ return ConsistencyLevelQuorum, nil
+ case "all":
+ return ConsistencyLevelAll, nil
+ default:
+ return 0, ErrInvalidConsistencyLevel
+ }
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/inline_fnv.go b/vendor/github.com/influxdata/influxdb/models/inline_fnv.go
new file mode 100644
index 000000000..eec1ae8b0
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/inline_fnv.go
@@ -0,0 +1,32 @@
+package models // import "github.com/influxdata/influxdb/models"
+
+// from stdlib hash/fnv/fnv.go
+const (
+ prime64 = 1099511628211
+ offset64 = 14695981039346656037
+)
+
+// InlineFNV64a is an alloc-free port of the standard library's fnv64a.
+// See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function.
+type InlineFNV64a uint64
+
+// NewInlineFNV64a returns a new instance of InlineFNV64a.
+func NewInlineFNV64a() InlineFNV64a {
+ return offset64
+}
+
+// Write adds data to the running hash.
+func (s *InlineFNV64a) Write(data []byte) (int, error) {
+ hash := uint64(*s)
+ for _, c := range data {
+ hash ^= uint64(c)
+ hash *= prime64
+ }
+ *s = InlineFNV64a(hash)
+ return len(data), nil
+}
+
+// Sum64 returns the uint64 of the current resulting hash.
+func (s *InlineFNV64a) Sum64() uint64 {
+ return uint64(*s)
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go
new file mode 100644
index 000000000..8db483738
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go
@@ -0,0 +1,44 @@
+package models // import "github.com/influxdata/influxdb/models"
+
+import (
+ "reflect"
+ "strconv"
+ "unsafe"
+)
+
+// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
+func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
+ s := unsafeBytesToString(b)
+ return strconv.ParseInt(s, base, bitSize)
+}
+
+// parseUintBytes is a zero-alloc wrapper around strconv.ParseUint.
+func parseUintBytes(b []byte, base int, bitSize int) (i uint64, err error) {
+ s := unsafeBytesToString(b)
+ return strconv.ParseUint(s, base, bitSize)
+}
+
+// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
+func parseFloatBytes(b []byte, bitSize int) (float64, error) {
+ s := unsafeBytesToString(b)
+ return strconv.ParseFloat(s, bitSize)
+}
+
+// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
+func parseBoolBytes(b []byte) (bool, error) {
+ return strconv.ParseBool(unsafeBytesToString(b))
+}
+
+// unsafeBytesToString converts a []byte to a string without a heap allocation.
+//
+// It is unsafe, and is intended to prepare input to short-lived functions
+// that require strings.
+func unsafeBytesToString(in []byte) string {
+ src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
+ dst := reflect.StringHeader{
+ Data: src.Data,
+ Len: src.Len,
+ }
+ s := *(*string)(unsafe.Pointer(&dst))
+ return s
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/points.go b/vendor/github.com/influxdata/influxdb/models/points.go
new file mode 100644
index 000000000..ad80a816b
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/points.go
@@ -0,0 +1,2337 @@
+// Package models implements basic objects used throughout the TICK stack.
+package models // import "github.com/influxdata/influxdb/models"
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/influxdata/influxdb/pkg/escape"
+)
+
+var (
+ measurementEscapeCodes = map[byte][]byte{
+ ',': []byte(`\,`),
+ ' ': []byte(`\ `),
+ }
+
+ tagEscapeCodes = map[byte][]byte{
+ ',': []byte(`\,`),
+ ' ': []byte(`\ `),
+ '=': []byte(`\=`),
+ }
+
+ // ErrPointMustHaveAField is returned when operating on a point that does not have any fields.
+ ErrPointMustHaveAField = errors.New("point without fields is unsupported")
+
+ // ErrInvalidNumber is returned when a number is expected but not provided.
+ ErrInvalidNumber = errors.New("invalid number")
+
+ // ErrInvalidPoint is returned when a point cannot be parsed correctly.
+ ErrInvalidPoint = errors.New("point is invalid")
+)
+
+const (
+ // MaxKeyLength is the largest allowed size of the combined measurement and tag keys.
+ MaxKeyLength = 65535
+)
+
+// enableUint64Support will enable uint64 support if set to true.
+var enableUint64Support = false
+
+// EnableUintSupport manually enables uint support for the point parser.
+// This function will be removed in the future and only exists for unit tests during the
+// transition.
+func EnableUintSupport() {
+ enableUint64Support = true
+}
+
+// Point defines the values that will be written to the database.
+type Point interface {
+ // Name return the measurement name for the point.
+ Name() []byte
+
+ // SetName updates the measurement name for the point.
+ SetName(string)
+
+ // Tags returns the tag set for the point.
+ Tags() Tags
+
+ // AddTag adds or replaces a tag value for a point.
+ AddTag(key, value string)
+
+ // SetTags replaces the tags for the point.
+ SetTags(tags Tags)
+
+ // HasTag returns true if the tag exists for the point.
+ HasTag(tag []byte) bool
+
+ // Fields returns the fields for the point.
+ Fields() (Fields, error)
+
+ // Time return the timestamp for the point.
+ Time() time.Time
+
+ // SetTime updates the timestamp for the point.
+ SetTime(t time.Time)
+
+ // UnixNano returns the timestamp of the point as nanoseconds since Unix epoch.
+ UnixNano() int64
+
+ // HashID returns a non-cryptographic checksum of the point's key.
+ HashID() uint64
+
+ // Key returns the key (measurement joined with tags) of the point.
+ Key() []byte
+
+ // String returns a string representation of the point. If there is a
+ // timestamp associated with the point then it will be specified with the default
+ // precision of nanoseconds.
+ String() string
+
+ // MarshalBinary returns a binary representation of the point.
+ MarshalBinary() ([]byte, error)
+
+ // PrecisionString returns a string representation of the point. If there
+ // is a timestamp associated with the point then it will be specified in the
+ // given unit.
+ PrecisionString(precision string) string
+
+ // RoundedString returns a string representation of the point. If there
+ // is a timestamp associated with the point, then it will be rounded to the
+ // given duration.
+ RoundedString(d time.Duration) string
+
+ // Split will attempt to return multiple points with the same timestamp whose
+ // string representations are no longer than size. Points with a single field or
+ // a point without a timestamp may exceed the requested size.
+ Split(size int) []Point
+
+ // Round will round the timestamp of the point to the given duration.
+ Round(d time.Duration)
+
+ // StringSize returns the length of the string that would be returned by String().
+ StringSize() int
+
+ // AppendString appends the result of String() to the provided buffer and returns
+ // the result, potentially reducing string allocations.
+ AppendString(buf []byte) []byte
+
+ // FieldIterator retuns a FieldIterator that can be used to traverse the
+ // fields of a point without constructing the in-memory map.
+ FieldIterator() FieldIterator
+}
+
+// FieldType represents the type of a field.
+type FieldType int
+
+const (
+ // Integer indicates the field's type is integer.
+ Integer FieldType = iota
+
+ // Float indicates the field's type is float.
+ Float
+
+ // Boolean indicates the field's type is boolean.
+ Boolean
+
+ // String indicates the field's type is string.
+ String
+
+ // Empty is used to indicate that there is no field.
+ Empty
+
+ // Unsigned indicates the field's type is an unsigned integer.
+ Unsigned
+)
+
+// FieldIterator provides a low-allocation interface to iterate through a point's fields.
+type FieldIterator interface {
+ // Next indicates whether there any fields remaining.
+ Next() bool
+
+ // FieldKey returns the key of the current field.
+ FieldKey() []byte
+
+ // Type returns the FieldType of the current field.
+ Type() FieldType
+
+ // StringValue returns the string value of the current field.
+ StringValue() string
+
+ // IntegerValue returns the integer value of the current field.
+ IntegerValue() (int64, error)
+
+ // UnsignedValue returns the unsigned value of the current field.
+ UnsignedValue() (uint64, error)
+
+ // BooleanValue returns the boolean value of the current field.
+ BooleanValue() (bool, error)
+
+ // FloatValue returns the float value of the current field.
+ FloatValue() (float64, error)
+
+ // Reset resets the iterator to its initial state.
+ Reset()
+}
+
+// Points represents a sortable list of points by timestamp.
+type Points []Point
+
+// Len implements sort.Interface.
+func (a Points) Len() int { return len(a) }
+
+// Less implements sort.Interface.
+func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) }
+
+// Swap implements sort.Interface.
+func (a Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// point is the default implementation of Point.
+type point struct {
+ time time.Time
+
+ // text encoding of measurement and tags
+ // key must always be stored sorted by tags, if the original line was not sorted,
+ // we need to resort it
+ key []byte
+
+ // text encoding of field data
+ fields []byte
+
+ // text encoding of timestamp
+ ts []byte
+
+ // cached version of parsed fields from data
+ cachedFields map[string]interface{}
+
+ // cached version of parsed name from key
+ cachedName string
+
+ // cached version of parsed tags
+ cachedTags Tags
+
+ it fieldIterator
+}
+
+// type assertions
+var (
+ _ Point = (*point)(nil)
+ _ FieldIterator = (*point)(nil)
+)
+
+const (
+ // the number of characters for the largest possible int64 (9223372036854775807)
+ maxInt64Digits = 19
+
+ // the number of characters for the smallest possible int64 (-9223372036854775808)
+ minInt64Digits = 20
+
+ // the number of characters for the largest possible uint64 (18446744073709551615)
+ maxUint64Digits = 20
+
+ // the number of characters required for the largest float64 before a range check
+ // would occur during parsing
+ maxFloat64Digits = 25
+
+ // the number of characters required for smallest float64 before a range check occur
+ // would occur during parsing
+ minFloat64Digits = 27
+)
+
+// ParsePoints returns a slice of Points from a text representation of a point
+// with each point separated by newlines. If any points fail to parse, a non-nil error
+// will be returned in addition to the points that parsed successfully.
+func ParsePoints(buf []byte) ([]Point, error) {
+ return ParsePointsWithPrecision(buf, time.Now().UTC(), "n")
+}
+
+// ParsePointsString is identical to ParsePoints but accepts a string.
+func ParsePointsString(buf string) ([]Point, error) {
+ return ParsePoints([]byte(buf))
+}
+
+// ParseKey returns the measurement name and tags from a point.
+//
+// NOTE: to minimize heap allocations, the returned Tags will refer to subslices of buf.
+// This can have the unintended effect preventing buf from being garbage collected.
+func ParseKey(buf []byte) (string, Tags) {
+ meas, tags := ParseKeyBytes(buf)
+ return string(meas), tags
+}
+
+func ParseKeyBytes(buf []byte) ([]byte, Tags) {
+ // Ignore the error because scanMeasurement returns "missing fields" which we ignore
+ // when just parsing a key
+ state, i, _ := scanMeasurement(buf, 0)
+
+ var tags Tags
+ if state == tagKeyState {
+ tags = parseTags(buf)
+ // scanMeasurement returns the location of the comma if there are tags, strip that off
+ return buf[:i-1], tags
+ }
+ return buf[:i], tags
+}
+
+func ParseTags(buf []byte) Tags {
+ return parseTags(buf)
+}
+
+func ParseName(buf []byte) ([]byte, error) {
+ // Ignore the error because scanMeasurement returns "missing fields" which we ignore
+ // when just parsing a key
+ state, i, _ := scanMeasurement(buf, 0)
+ if state == tagKeyState {
+ return buf[:i-1], nil
+ }
+ return buf[:i], nil
+}
+
+// ParsePointsWithPrecision is similar to ParsePoints, but allows the
+// caller to provide a precision for time.
+//
+// NOTE: to minimize heap allocations, the returned Points will refer to subslices of buf.
+// This can have the unintended effect preventing buf from being garbage collected.
+func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) {
+ points := make([]Point, 0, bytes.Count(buf, []byte{'\n'})+1)
+ var (
+ pos int
+ block []byte
+ failed []string
+ )
+ for pos < len(buf) {
+ pos, block = scanLine(buf, pos)
+ pos++
+
+ if len(block) == 0 {
+ continue
+ }
+
+ // lines which start with '#' are comments
+ start := skipWhitespace(block, 0)
+
+ // If line is all whitespace, just skip it
+ if start >= len(block) {
+ continue
+ }
+
+ if block[start] == '#' {
+ continue
+ }
+
+ // strip the newline if one is present
+ if block[len(block)-1] == '\n' {
+ block = block[:len(block)-1]
+ }
+
+ pt, err := parsePoint(block[start:], defaultTime, precision)
+ if err != nil {
+ failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:]), err))
+ } else {
+ points = append(points, pt)
+ }
+
+ }
+ if len(failed) > 0 {
+ return points, fmt.Errorf("%s", strings.Join(failed, "\n"))
+ }
+ return points, nil
+
+}
+
+func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) {
+ // scan the first block which is measurement[,tag1=value1,tag2=value=2...]
+ pos, key, err := scanKey(buf, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ // measurement name is required
+ if len(key) == 0 {
+ return nil, fmt.Errorf("missing measurement")
+ }
+
+ if len(key) > MaxKeyLength {
+ return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
+ }
+
+ // scan the second block is which is field1=value1[,field2=value2,...]
+ pos, fields, err := scanFields(buf, pos)
+ if err != nil {
+ return nil, err
+ }
+
+ // at least one field is required
+ if len(fields) == 0 {
+ return nil, fmt.Errorf("missing fields")
+ }
+
+ var maxKeyErr error
+ walkFields(fields, func(k, v []byte) bool {
+ if sz := seriesKeySize(key, k); sz > MaxKeyLength {
+ maxKeyErr = fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength)
+ return false
+ }
+ return true
+ })
+
+ if maxKeyErr != nil {
+ return nil, maxKeyErr
+ }
+
+ // scan the last block which is an optional integer timestamp
+ pos, ts, err := scanTime(buf, pos)
+ if err != nil {
+ return nil, err
+ }
+
+ pt := &point{
+ key: key,
+ fields: fields,
+ ts: ts,
+ }
+
+ if len(ts) == 0 {
+ pt.time = defaultTime
+ pt.SetPrecision(precision)
+ } else {
+ ts, err := parseIntBytes(ts, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ pt.time, err = SafeCalcTime(ts, precision)
+ if err != nil {
+ return nil, err
+ }
+
+ // Determine if there are illegal non-whitespace characters after the
+ // timestamp block.
+ for pos < len(buf) {
+ if buf[pos] != ' ' {
+ return nil, ErrInvalidPoint
+ }
+ pos++
+ }
+ }
+ return pt, nil
+}
+
+// GetPrecisionMultiplier will return a multiplier for the precision specified.
+func GetPrecisionMultiplier(precision string) int64 {
+ d := time.Nanosecond
+ switch precision {
+ case "u":
+ d = time.Microsecond
+ case "ms":
+ d = time.Millisecond
+ case "s":
+ d = time.Second
+ case "m":
+ d = time.Minute
+ case "h":
+ d = time.Hour
+ }
+ return int64(d)
+}
+
+// scanKey scans buf starting at i for the measurement and tag portion of the point.
+// It returns the ending position and the byte slice of key within buf. If there
+// are tags, they will be sorted if they are not already.
+func scanKey(buf []byte, i int) (int, []byte, error) {
+ start := skipWhitespace(buf, i)
+
+ i = start
+
+ // Determines whether the tags are sort, assume they are
+ sorted := true
+
+ // indices holds the indexes within buf of the start of each tag. For example,
+ // a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20]
+ // which indicates that the first tag starts at buf[4], seconds at buf[11], and
+ // last at buf[20]
+ indices := make([]int, 100)
+
+ // tracks how many commas we've seen so we know how many values are indices.
+ // Since indices is an arbitrarily large slice,
+ // we need to know how many values in the buffer are in use.
+ commas := 0
+
+ // First scan the Point's measurement.
+ state, i, err := scanMeasurement(buf, i)
+ if err != nil {
+ return i, buf[start:i], err
+ }
+
+ // Optionally scan tags if needed.
+ if state == tagKeyState {
+ i, commas, indices, err = scanTags(buf, i, indices)
+ if err != nil {
+ return i, buf[start:i], err
+ }
+ }
+
+ // Now we know where the key region is within buf, and the location of tags, we
+ // need to determine if duplicate tags exist and if the tags are sorted. This iterates
+ // over the list comparing each tag in the sequence with each other.
+ for j := 0; j < commas-1; j++ {
+ // get the left and right tags
+ _, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=')
+ _, right := scanTo(buf[indices[j+1]:indices[j+2]-1], 0, '=')
+
+ // If left is greater than right, the tags are not sorted. We do not have to
+ // continue because the short path no longer works.
+ // If the tags are equal, then there are duplicate tags, and we should abort.
+ // If the tags are not sorted, this pass may not find duplicate tags and we
+ // need to do a more exhaustive search later.
+ if cmp := bytes.Compare(left, right); cmp > 0 {
+ sorted = false
+ break
+ } else if cmp == 0 {
+ return i, buf[start:i], fmt.Errorf("duplicate tags")
+ }
+ }
+
+ // If the tags are not sorted, then sort them. This sort is inline and
+ // uses the tag indices we created earlier. The actual buffer is not sorted, the
+ // indices are using the buffer for value comparison. After the indices are sorted,
+ // the buffer is reconstructed from the sorted indices.
+ if !sorted && commas > 0 {
+ // Get the measurement name for later
+ measurement := buf[start : indices[0]-1]
+
+ // Sort the indices
+ indices := indices[:commas]
+ insertionSort(0, commas, buf, indices)
+
+ // Create a new key using the measurement and sorted indices
+ b := make([]byte, len(buf[start:i]))
+ pos := copy(b, measurement)
+ for _, i := range indices {
+ b[pos] = ','
+ pos++
+ _, v := scanToSpaceOr(buf, i, ',')
+ pos += copy(b[pos:], v)
+ }
+
+ // Check again for duplicate tags now that the tags are sorted.
+ for j := 0; j < commas-1; j++ {
+ // get the left and right tags
+ _, left := scanTo(buf[indices[j]:], 0, '=')
+ _, right := scanTo(buf[indices[j+1]:], 0, '=')
+
+ // If the tags are equal, then there are duplicate tags, and we should abort.
+ // If the tags are not sorted, this pass may not find duplicate tags and we
+ // need to do a more exhaustive search later.
+ if bytes.Equal(left, right) {
+ return i, b, fmt.Errorf("duplicate tags")
+ }
+ }
+
+ return i, b, nil
+ }
+
+ return i, buf[start:i], nil
+}
+
+// The following constants allow us to specify which state to move to
+// next, when scanning sections of a Point.
+const (
+ tagKeyState = iota
+ tagValueState
+ fieldsState
+)
+
+// scanMeasurement examines the measurement part of a Point, returning
+// the next state to move to, and the current location in the buffer.
+func scanMeasurement(buf []byte, i int) (int, int, error) {
+ // Check first byte of measurement, anything except a comma is fine.
+ // It can't be a space, since whitespace is stripped prior to this
+ // function call.
+ if i >= len(buf) || buf[i] == ',' {
+ return -1, i, fmt.Errorf("missing measurement")
+ }
+
+ for {
+ i++
+ if i >= len(buf) {
+ // cpu
+ return -1, i, fmt.Errorf("missing fields")
+ }
+
+ if buf[i-1] == '\\' {
+ // Skip character (it's escaped).
+ continue
+ }
+
+ // Unescaped comma; move onto scanning the tags.
+ if buf[i] == ',' {
+ return tagKeyState, i + 1, nil
+ }
+
+ // Unescaped space; move onto scanning the fields.
+ if buf[i] == ' ' {
+ // cpu value=1.0
+ return fieldsState, i, nil
+ }
+ }
+}
+
+// scanTags examines all the tags in a Point, keeping track of and
+// returning the updated indices slice, number of commas and location
+// in buf where to start examining the Point fields.
+func scanTags(buf []byte, i int, indices []int) (int, int, []int, error) {
+ var (
+ err error
+ commas int
+ state = tagKeyState
+ )
+
+ for {
+ switch state {
+ case tagKeyState:
+ // Grow our indices slice if we have too many tags.
+ if commas >= len(indices) {
+ newIndics := make([]int, cap(indices)*2)
+ copy(newIndics, indices)
+ indices = newIndics
+ }
+ indices[commas] = i
+ commas++
+
+ i, err = scanTagsKey(buf, i)
+ state = tagValueState // tag value always follows a tag key
+ case tagValueState:
+ state, i, err = scanTagsValue(buf, i)
+ case fieldsState:
+ indices[commas] = i + 1
+ return i, commas, indices, nil
+ }
+
+ if err != nil {
+ return i, commas, indices, err
+ }
+ }
+}
+
+// scanTagsKey scans each character in a tag key.
+func scanTagsKey(buf []byte, i int) (int, error) {
+ // First character of the key.
+ if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {
+ // cpu,{'', ' ', ',', '='}
+ return i, fmt.Errorf("missing tag key")
+ }
+
+ // Examine each character in the tag key until we hit an unescaped
+ // equals (the tag value), or we hit an error (i.e., unescaped
+ // space or comma).
+ for {
+ i++
+
+ // Either we reached the end of the buffer or we hit an
+ // unescaped comma or space.
+ if i >= len(buf) ||
+ ((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') {
+ // cpu,tag{'', ' ', ','}
+ return i, fmt.Errorf("missing tag value")
+ }
+
+ if buf[i] == '=' && buf[i-1] != '\\' {
+ // cpu,tag=
+ return i + 1, nil
+ }
+ }
+}
+
+// scanTagsValue scans each character in a tag value.
+func scanTagsValue(buf []byte, i int) (int, int, error) {
+ // Tag value cannot be empty.
+ if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {
+ // cpu,tag={',', ' '}
+ return -1, i, fmt.Errorf("missing tag value")
+ }
+
+ // Examine each character in the tag value until we hit an unescaped
+ // comma (move onto next tag key), an unescaped space (move onto
+ // fields), or we error out.
+ for {
+ i++
+ if i >= len(buf) {
+ // cpu,tag=value
+ return -1, i, fmt.Errorf("missing fields")
+ }
+
+ // An unescaped equals sign is an invalid tag value.
+ if buf[i] == '=' && buf[i-1] != '\\' {
+ // cpu,tag={'=', 'fo=o'}
+ return -1, i, fmt.Errorf("invalid tag format")
+ }
+
+ if buf[i] == ',' && buf[i-1] != '\\' {
+ // cpu,tag=foo,
+ return tagKeyState, i + 1, nil
+ }
+
+ // cpu,tag=foo value=1.0
+ // cpu, tag=foo\= value=1.0
+ if buf[i] == ' ' && buf[i-1] != '\\' {
+ return fieldsState, i, nil
+ }
+ }
+}
+
+func insertionSort(l, r int, buf []byte, indices []int) {
+ for i := l + 1; i < r; i++ {
+ for j := i; j > l && less(buf, indices, j, j-1); j-- {
+ indices[j], indices[j-1] = indices[j-1], indices[j]
+ }
+ }
+}
+
+func less(buf []byte, indices []int, i, j int) bool {
+ // This grabs the tag names for i & j, it ignores the values
+ _, a := scanTo(buf, indices[i], '=')
+ _, b := scanTo(buf, indices[j], '=')
+ return bytes.Compare(a, b) < 0
+}
+
+// scanFields scans buf, starting at i for the fields section of a point. It returns
+// the ending position and the byte slice of the fields within buf.
+func scanFields(buf []byte, i int) (int, []byte, error) {
+ start := skipWhitespace(buf, i)
+ i = start
+ quoted := false
+
+ // tracks how many '=' we've seen
+ equals := 0
+
+ // tracks how many commas we've seen
+ commas := 0
+
+ for {
+ // reached the end of buf?
+ if i >= len(buf) {
+ break
+ }
+
+ // escaped characters?
+ if buf[i] == '\\' && i+1 < len(buf) {
+ i += 2
+ continue
+ }
+
+ // If the value is quoted, scan until we get to the end quote
+ // Only quote values in the field value since quotes are not significant
+ // in the field key
+ if buf[i] == '"' && equals > commas {
+ quoted = !quoted
+ i++
+ continue
+ }
+
+ // If we see an =, ensure that there is at least on char before and after it
+ if buf[i] == '=' && !quoted {
+ equals++
+
+ // check for "... =123" but allow "a\ =123"
+ if buf[i-1] == ' ' && buf[i-2] != '\\' {
+ return i, buf[start:i], fmt.Errorf("missing field key")
+ }
+
+ // check for "...a=123,=456" but allow "a=123,a\,=456"
+ if buf[i-1] == ',' && buf[i-2] != '\\' {
+ return i, buf[start:i], fmt.Errorf("missing field key")
+ }
+
+ // check for "... value="
+ if i+1 >= len(buf) {
+ return i, buf[start:i], fmt.Errorf("missing field value")
+ }
+
+ // check for "... value=,value2=..."
+ if buf[i+1] == ',' || buf[i+1] == ' ' {
+ return i, buf[start:i], fmt.Errorf("missing field value")
+ }
+
+ if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
+ var err error
+ i, err = scanNumber(buf, i+1)
+ if err != nil {
+ return i, buf[start:i], err
+ }
+ continue
+ }
+ // If next byte is not a double-quote, the value must be a boolean
+ if buf[i+1] != '"' {
+ var err error
+ i, _, err = scanBoolean(buf, i+1)
+ if err != nil {
+ return i, buf[start:i], err
+ }
+ continue
+ }
+ }
+
+ if buf[i] == ',' && !quoted {
+ commas++
+ }
+
+ // reached end of block?
+ if buf[i] == ' ' && !quoted {
+ break
+ }
+ i++
+ }
+
+ if quoted {
+ return i, buf[start:i], fmt.Errorf("unbalanced quotes")
+ }
+
+ // check that all field sections had key and values (e.g. prevent "a=1,b"
+ if equals == 0 || commas != equals-1 {
+ return i, buf[start:i], fmt.Errorf("invalid field format")
+ }
+
+ return i, buf[start:i], nil
+}
+
+// scanTime scans buf, starting at i for the time section of a point. It
+// returns the ending position and the byte slice of the timestamp within buf
+// and and error if the timestamp is not in the correct numeric format.
+func scanTime(buf []byte, i int) (int, []byte, error) {
+ start := skipWhitespace(buf, i)
+ i = start
+
+ for {
+ // reached the end of buf?
+ if i >= len(buf) {
+ break
+ }
+
+ // Reached end of block or trailing whitespace?
+ if buf[i] == '\n' || buf[i] == ' ' {
+ break
+ }
+
+ // Handle negative timestamps
+ if i == start && buf[i] == '-' {
+ i++
+ continue
+ }
+
+ // Timestamps should be integers, make sure they are so we don't need
+ // to actually parse the timestamp until needed.
+ if buf[i] < '0' || buf[i] > '9' {
+ return i, buf[start:i], fmt.Errorf("bad timestamp")
+ }
+ i++
+ }
+ return i, buf[start:i], nil
+}
+
+func isNumeric(b byte) bool {
+ return (b >= '0' && b <= '9') || b == '.'
+}
+
+// scanNumber returns the end position within buf, start at i after
+// scanning over buf for an integer, or float. It returns an
+// error if a invalid number is scanned.
+func scanNumber(buf []byte, i int) (int, error) {
+ start := i
+ var isInt, isUnsigned bool
+
+ // Is negative number?
+ if i < len(buf) && buf[i] == '-' {
+ i++
+ // There must be more characters now, as just '-' is illegal.
+ if i == len(buf) {
+ return i, ErrInvalidNumber
+ }
+ }
+
+ // how many decimal points we've see
+ decimal := false
+
+ // indicates the number is float in scientific notation
+ scientific := false
+
+ for {
+ if i >= len(buf) {
+ break
+ }
+
+ if buf[i] == ',' || buf[i] == ' ' {
+ break
+ }
+
+ if buf[i] == 'i' && i > start && !(isInt || isUnsigned) {
+ isInt = true
+ i++
+ continue
+ } else if buf[i] == 'u' && i > start && !(isInt || isUnsigned) {
+ isUnsigned = true
+ i++
+ continue
+ }
+
+ if buf[i] == '.' {
+ // Can't have more than 1 decimal (e.g. 1.1.1 should fail)
+ if decimal {
+ return i, ErrInvalidNumber
+ }
+ decimal = true
+ }
+
+ // `e` is valid for floats but not as the first char
+ if i > start && (buf[i] == 'e' || buf[i] == 'E') {
+ scientific = true
+ i++
+ continue
+ }
+
+ // + and - are only valid at this point if they follow an e (scientific notation)
+ if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
+ i++
+ continue
+ }
+
+ // NaN is an unsupported value
+ if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
+ return i, ErrInvalidNumber
+ }
+
+ if !isNumeric(buf[i]) {
+ return i, ErrInvalidNumber
+ }
+ i++
+ }
+
+ if (isInt || isUnsigned) && (decimal || scientific) {
+ return i, ErrInvalidNumber
+ }
+
+ numericDigits := i - start
+ if isInt {
+ numericDigits--
+ }
+ if decimal {
+ numericDigits--
+ }
+ if buf[start] == '-' {
+ numericDigits--
+ }
+
+ if numericDigits == 0 {
+ return i, ErrInvalidNumber
+ }
+
+ // It's more common that numbers will be within min/max range for their type but we need to prevent
+ // out or range numbers from being parsed successfully. This uses some simple heuristics to decide
+ // if we should parse the number to the actual type. It does not do it all the time because it incurs
+ // extra allocations and we end up converting the type again when writing points to disk.
+ if isInt {
+ // Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
+ if buf[i-1] != 'i' {
+ return i, ErrInvalidNumber
+ }
+ // Parse the int to check bounds the number of digits could be larger than the max range
+ // We subtract 1 from the index to remove the `i` from our tests
+ if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
+ if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil {
+ return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err)
+ }
+ }
+ } else if isUnsigned {
+ // Return an error if uint64 support has not been enabled.
+ if !enableUint64Support {
+ return i, ErrInvalidNumber
+ }
+ // Make sure the last char is a 'u' for unsigned
+ if buf[i-1] != 'u' {
+ return i, ErrInvalidNumber
+ }
+ // Make sure the first char is not a '-' for unsigned
+ if buf[start] == '-' {
+ return i, ErrInvalidNumber
+ }
+ // Parse the uint to check bounds the number of digits could be larger than the max range
+ // We subtract 1 from the index to remove the `u` from our tests
+ if len(buf[start:i-1]) >= maxUint64Digits {
+ if _, err := parseUintBytes(buf[start:i-1], 10, 64); err != nil {
+ return i, fmt.Errorf("unable to parse unsigned %s: %s", buf[start:i-1], err)
+ }
+ }
+ } else {
+ // Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
+ if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
+ if _, err := parseFloatBytes(buf[start:i], 10); err != nil {
+ return i, fmt.Errorf("invalid float")
+ }
+ }
+ }
+
+ return i, nil
+}
+
+// scanBoolean returns the end position within buf, start at i after
+// scanning over buf for boolean. Valid values for a boolean are
+// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean
+// is scanned.
+func scanBoolean(buf []byte, i int) (int, []byte, error) {
+ start := i
+
+ if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
+ return i, buf[start:i], fmt.Errorf("invalid boolean")
+ }
+
+ i++
+ for {
+ if i >= len(buf) {
+ break
+ }
+
+ if buf[i] == ',' || buf[i] == ' ' {
+ break
+ }
+ i++
+ }
+
+ // Single char bool (t, T, f, F) is ok
+ if i-start == 1 {
+ return i, buf[start:i], nil
+ }
+
+ // length must be 4 for true or TRUE
+ if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
+ return i, buf[start:i], fmt.Errorf("invalid boolean")
+ }
+
+ // length must be 5 for false or FALSE
+ if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
+ return i, buf[start:i], fmt.Errorf("invalid boolean")
+ }
+
+ // Otherwise
+ valid := false
+ switch buf[start] {
+ case 't':
+ valid = bytes.Equal(buf[start:i], []byte("true"))
+ case 'f':
+ valid = bytes.Equal(buf[start:i], []byte("false"))
+ case 'T':
+ valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
+ case 'F':
+ valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
+ }
+
+ if !valid {
+ return i, buf[start:i], fmt.Errorf("invalid boolean")
+ }
+
+ return i, buf[start:i], nil
+
+}
+
+// skipWhitespace returns the end position within buf, starting at i after
+// scanning over spaces in tags.
+func skipWhitespace(buf []byte, i int) int {
+ for i < len(buf) {
+ if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
+ break
+ }
+ i++
+ }
+ return i
+}
+
+// scanLine returns the end position in buf and the next line found within
+// buf.
+func scanLine(buf []byte, i int) (int, []byte) {
+ start := i
+ quoted := false
+ fields := false
+
+ // tracks how many '=' and commas we've seen
+ // this duplicates some of the functionality in scanFields
+ equals := 0
+ commas := 0
+ for {
+ // reached the end of buf?
+ if i >= len(buf) {
+ break
+ }
+
+ // skip past escaped characters
+ if buf[i] == '\\' && i+2 < len(buf) {
+ i += 2
+ continue
+ }
+
+ if buf[i] == ' ' {
+ fields = true
+ }
+
+ // If we see a double quote, makes sure it is not escaped
+ if fields {
+ if !quoted && buf[i] == '=' {
+ i++
+ equals++
+ continue
+ } else if !quoted && buf[i] == ',' {
+ i++
+ commas++
+ continue
+ } else if buf[i] == '"' && equals > commas {
+ i++
+ quoted = !quoted
+ continue
+ }
+ }
+
+ if buf[i] == '\n' && !quoted {
+ break
+ }
+
+ i++
+ }
+
+ return i, buf[start:i]
+}
+
+// scanTo returns the end position in buf and the next consecutive block
+// of bytes, starting from i and ending with stop byte, where stop byte
+// has not been escaped.
+//
+// If there are leading spaces, they are skipped.
+func scanTo(buf []byte, i int, stop byte) (int, []byte) {
+ start := i
+ for {
+ // reached the end of buf?
+ if i >= len(buf) {
+ break
+ }
+
+ // Reached unescaped stop value?
+ if buf[i] == stop && (i == 0 || buf[i-1] != '\\') {
+ break
+ }
+ i++
+ }
+
+ return i, buf[start:i]
+}
+
+// scanTo returns the end position in buf and the next consecutive block
+// of bytes, starting from i and ending with stop byte. If there are leading
+// spaces, they are skipped.
+func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) {
+ start := i
+ if buf[i] == stop || buf[i] == ' ' {
+ return i, buf[start:i]
+ }
+
+ for {
+ i++
+ if buf[i-1] == '\\' {
+ continue
+ }
+
+ // reached the end of buf?
+ if i >= len(buf) {
+ return i, buf[start:i]
+ }
+
+ // reached end of block?
+ if buf[i] == stop || buf[i] == ' ' {
+ return i, buf[start:i]
+ }
+ }
+}
+
+func scanTagValue(buf []byte, i int) (int, []byte) {
+ start := i
+ for {
+ if i >= len(buf) {
+ break
+ }
+
+ if buf[i] == ',' && buf[i-1] != '\\' {
+ break
+ }
+ i++
+ }
+ if i > len(buf) {
+ return i, nil
+ }
+ return i, buf[start:i]
+}
+
+func scanFieldValue(buf []byte, i int) (int, []byte) {
+ start := i
+ quoted := false
+ for i < len(buf) {
+ // Only escape char for a field value is a double-quote and backslash
+ if buf[i] == '\\' && i+1 < len(buf) && (buf[i+1] == '"' || buf[i+1] == '\\') {
+ i += 2
+ continue
+ }
+
+ // Quoted value? (e.g. string)
+ if buf[i] == '"' {
+ i++
+ quoted = !quoted
+ continue
+ }
+
+ if buf[i] == ',' && !quoted {
+ break
+ }
+ i++
+ }
+ return i, buf[start:i]
+}
+
+func EscapeMeasurement(in []byte) []byte {
+ for b, esc := range measurementEscapeCodes {
+ in = bytes.Replace(in, []byte{b}, esc, -1)
+ }
+ return in
+}
+
+func unescapeMeasurement(in []byte) []byte {
+ for b, esc := range measurementEscapeCodes {
+ in = bytes.Replace(in, esc, []byte{b}, -1)
+ }
+ return in
+}
+
+func escapeTag(in []byte) []byte {
+ for b, esc := range tagEscapeCodes {
+ if bytes.IndexByte(in, b) != -1 {
+ in = bytes.Replace(in, []byte{b}, esc, -1)
+ }
+ }
+ return in
+}
+
+func unescapeTag(in []byte) []byte {
+ if bytes.IndexByte(in, '\\') == -1 {
+ return in
+ }
+
+ for b, esc := range tagEscapeCodes {
+ if bytes.IndexByte(in, b) != -1 {
+ in = bytes.Replace(in, esc, []byte{b}, -1)
+ }
+ }
+ return in
+}
+
+// escapeStringFieldReplacer replaces double quotes and backslashes
+// with the same character preceded by a backslash.
+// As of Go 1.7 this benchmarked better in allocations and CPU time
+// compared to iterating through a string byte-by-byte and appending to a new byte slice,
+// calling strings.Replace twice, and better than (*Regex).ReplaceAllString.
+var escapeStringFieldReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`)
+
+// EscapeStringField returns a copy of in with any double quotes or
+// backslashes with escaped values.
+func EscapeStringField(in string) string {
+ return escapeStringFieldReplacer.Replace(in)
+}
+
+// unescapeStringField returns a copy of in with any escaped double-quotes
+// or backslashes unescaped.
+func unescapeStringField(in string) string {
+ if strings.IndexByte(in, '\\') == -1 {
+ return in
+ }
+
+ var out []byte
+ i := 0
+ for {
+ if i >= len(in) {
+ break
+ }
+ // unescape backslashes
+ if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' {
+ out = append(out, '\\')
+ i += 2
+ continue
+ }
+ // unescape double-quotes
+ if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' {
+ out = append(out, '"')
+ i += 2
+ continue
+ }
+ out = append(out, in[i])
+ i++
+
+ }
+ return string(out)
+}
+
+// NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If
+// an unsupported field value (NaN) or out of range time is passed, this function returns an error.
+func NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error) {
+ key, err := pointKey(name, tags, fields, t)
+ if err != nil {
+ return nil, err
+ }
+
+ return &point{
+ key: key,
+ time: t,
+ fields: fields.MarshalBinary(),
+ }, nil
+}
+
+// pointKey checks some basic requirements for valid points, and returns the
+// key, along with an possible error.
+func pointKey(measurement string, tags Tags, fields Fields, t time.Time) ([]byte, error) {
+ if len(fields) == 0 {
+ return nil, ErrPointMustHaveAField
+ }
+
+ if !t.IsZero() {
+ if err := CheckTime(t); err != nil {
+ return nil, err
+ }
+ }
+
+ for key, value := range fields {
+ switch value := value.(type) {
+ case float64:
+ // Ensure the caller validates and handles invalid field values
+ if math.IsNaN(value) {
+ return nil, fmt.Errorf("NaN is an unsupported value for field %s", key)
+ }
+ case float32:
+ // Ensure the caller validates and handles invalid field values
+ if math.IsNaN(float64(value)) {
+ return nil, fmt.Errorf("NaN is an unsupported value for field %s", key)
+ }
+ }
+ if len(key) == 0 {
+ return nil, fmt.Errorf("all fields must have non-empty names")
+ }
+ }
+
+ key := MakeKey([]byte(measurement), tags)
+ for field := range fields {
+ sz := seriesKeySize(key, []byte(field))
+ if sz > MaxKeyLength {
+ return nil, fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength)
+ }
+ }
+
+ return key, nil
+}
+
+func seriesKeySize(key, field []byte) int {
+ // 4 is the length of the tsm1.fieldKeySeparator constant. It's inlined here to avoid a circular
+ // dependency.
+ return len(key) + 4 + len(field)
+}
+
+// NewPointFromBytes returns a new Point from a marshalled Point.
+func NewPointFromBytes(b []byte) (Point, error) {
+ p := &point{}
+ if err := p.UnmarshalBinary(b); err != nil {
+ return nil, err
+ }
+
+ // This does some basic validation to ensure there are fields and they
+ // can be unmarshalled as well.
+ iter := p.FieldIterator()
+ var hasField bool
+ for iter.Next() {
+ if len(iter.FieldKey()) == 0 {
+ continue
+ }
+ hasField = true
+ switch iter.Type() {
+ case Float:
+ _, err := iter.FloatValue()
+ if err != nil {
+ return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+ }
+ case Integer:
+ _, err := iter.IntegerValue()
+ if err != nil {
+ return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+ }
+ case Unsigned:
+ _, err := iter.UnsignedValue()
+ if err != nil {
+ return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+ }
+ case String:
+ // Skip since this won't return an error
+ case Boolean:
+ _, err := iter.BooleanValue()
+ if err != nil {
+ return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+ }
+ }
+ }
+
+ if !hasField {
+ return nil, ErrPointMustHaveAField
+ }
+
+ return p, nil
+}
+
+// MustNewPoint returns a new point with the given measurement name, tags, fields and timestamp. If
+// an unsupported field value (NaN) is passed, this function panics.
+func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point {
+ pt, err := NewPoint(name, tags, fields, time)
+ if err != nil {
+ panic(err.Error())
+ }
+ return pt
+}
+
+// Key returns the key (measurement joined with tags) of the point.
+func (p *point) Key() []byte {
+ return p.key
+}
+
+func (p *point) name() []byte {
+ _, name := scanTo(p.key, 0, ',')
+ return name
+}
+
+func (p *point) Name() []byte {
+ return escape.Unescape(p.name())
+}
+
+// SetName updates the measurement name for the point.
+func (p *point) SetName(name string) {
+ p.cachedName = ""
+ p.key = MakeKey([]byte(name), p.Tags())
+}
+
+// Time return the timestamp for the point.
+func (p *point) Time() time.Time {
+ return p.time
+}
+
+// SetTime updates the timestamp for the point.
+func (p *point) SetTime(t time.Time) {
+ p.time = t
+}
+
+// Round will round the timestamp of the point to the given duration.
+func (p *point) Round(d time.Duration) {
+ p.time = p.time.Round(d)
+}
+
+// Tags returns the tag set for the point.
+func (p *point) Tags() Tags {
+ if p.cachedTags != nil {
+ return p.cachedTags
+ }
+ p.cachedTags = parseTags(p.key)
+ return p.cachedTags
+}
+
+func (p *point) HasTag(tag []byte) bool {
+ if len(p.key) == 0 {
+ return false
+ }
+
+ var exists bool
+ walkTags(p.key, func(key, value []byte) bool {
+ if bytes.Equal(tag, key) {
+ exists = true
+ return false
+ }
+ return true
+ })
+
+ return exists
+}
+
+func walkTags(buf []byte, fn func(key, value []byte) bool) {
+ if len(buf) == 0 {
+ return
+ }
+
+ pos, name := scanTo(buf, 0, ',')
+
+ // it's an empty key, so there are no tags
+ if len(name) == 0 {
+ return
+ }
+
+ hasEscape := bytes.IndexByte(buf, '\\') != -1
+ i := pos + 1
+ var key, value []byte
+ for {
+ if i >= len(buf) {
+ break
+ }
+ i, key = scanTo(buf, i, '=')
+ i, value = scanTagValue(buf, i+1)
+
+ if len(value) == 0 {
+ continue
+ }
+
+ if hasEscape {
+ if !fn(unescapeTag(key), unescapeTag(value)) {
+ return
+ }
+ } else {
+ if !fn(key, value) {
+ return
+ }
+ }
+
+ i++
+ }
+}
+
+// walkFields walks each field key and value via fn. If fn returns false, the iteration
+// is stopped. The values are the raw byte slices and not the converted types.
+func walkFields(buf []byte, fn func(key, value []byte) bool) {
+ var i int
+ var key, val []byte
+ for len(buf) > 0 {
+ i, key = scanTo(buf, 0, '=')
+ buf = buf[i+1:]
+ i, val = scanFieldValue(buf, 0)
+ buf = buf[i:]
+ if !fn(key, val) {
+ break
+ }
+
+ // slice off comma
+ if len(buf) > 0 {
+ buf = buf[1:]
+ }
+ }
+}
+
+func parseTags(buf []byte) Tags {
+ if len(buf) == 0 {
+ return nil
+ }
+
+ tags := make(Tags, bytes.Count(buf, []byte(",")))
+ p := 0
+ walkTags(buf, func(key, value []byte) bool {
+ tags[p].Key = key
+ tags[p].Value = value
+ p++
+ return true
+ })
+ return tags
+}
+
+// MakeKey creates a key for a set of tags.
+func MakeKey(name []byte, tags Tags) []byte {
+ // unescape the name and then re-escape it to avoid double escaping.
+ // The key should always be stored in escaped form.
+ return append(EscapeMeasurement(unescapeMeasurement(name)), tags.HashKey()...)
+}
+
+// SetTags replaces the tags for the point.
+func (p *point) SetTags(tags Tags) {
+ p.key = MakeKey(p.Name(), tags)
+ p.cachedTags = tags
+}
+
+// AddTag adds or replaces a tag value for a point.
+func (p *point) AddTag(key, value string) {
+ tags := p.Tags()
+ tags = append(tags, Tag{Key: []byte(key), Value: []byte(value)})
+ sort.Sort(tags)
+ p.cachedTags = tags
+ p.key = MakeKey(p.Name(), tags)
+}
+
+// Fields returns the fields for the point.
+func (p *point) Fields() (Fields, error) {
+ if p.cachedFields != nil {
+ return p.cachedFields, nil
+ }
+ cf, err := p.unmarshalBinary()
+ if err != nil {
+ return nil, err
+ }
+ p.cachedFields = cf
+ return p.cachedFields, nil
+}
+
+// SetPrecision will round a time to the specified precision.
+func (p *point) SetPrecision(precision string) {
+ switch precision {
+ case "n":
+ case "u":
+ p.SetTime(p.Time().Truncate(time.Microsecond))
+ case "ms":
+ p.SetTime(p.Time().Truncate(time.Millisecond))
+ case "s":
+ p.SetTime(p.Time().Truncate(time.Second))
+ case "m":
+ p.SetTime(p.Time().Truncate(time.Minute))
+ case "h":
+ p.SetTime(p.Time().Truncate(time.Hour))
+ }
+}
+
+// String returns the string representation of the point.
+func (p *point) String() string {
+ if p.Time().IsZero() {
+ return string(p.Key()) + " " + string(p.fields)
+ }
+ return string(p.Key()) + " " + string(p.fields) + " " + strconv.FormatInt(p.UnixNano(), 10)
+}
+
+// AppendString appends the string representation of the point to buf.
+func (p *point) AppendString(buf []byte) []byte {
+ buf = append(buf, p.key...)
+ buf = append(buf, ' ')
+ buf = append(buf, p.fields...)
+
+ if !p.time.IsZero() {
+ buf = append(buf, ' ')
+ buf = strconv.AppendInt(buf, p.UnixNano(), 10)
+ }
+
+ return buf
+}
+
+// StringSize returns the length of the string that would be returned by String().
+func (p *point) StringSize() int {
+ size := len(p.key) + len(p.fields) + 1
+
+ if !p.time.IsZero() {
+ digits := 1 // even "0" has one digit
+ t := p.UnixNano()
+ if t < 0 {
+ // account for negative sign, then negate
+ digits++
+ t = -t
+ }
+ for t > 9 { // already accounted for one digit
+ digits++
+ t /= 10
+ }
+ size += digits + 1 // digits and a space
+ }
+
+ return size
+}
+
+// MarshalBinary returns a binary representation of the point.
+func (p *point) MarshalBinary() ([]byte, error) {
+ if len(p.fields) == 0 {
+ return nil, ErrPointMustHaveAField
+ }
+
+ tb, err := p.time.MarshalBinary()
+ if err != nil {
+ return nil, err
+ }
+
+ b := make([]byte, 8+len(p.key)+len(p.fields)+len(tb))
+ i := 0
+
+ binary.BigEndian.PutUint32(b[i:], uint32(len(p.key)))
+ i += 4
+
+ i += copy(b[i:], p.key)
+
+ binary.BigEndian.PutUint32(b[i:i+4], uint32(len(p.fields)))
+ i += 4
+
+ i += copy(b[i:], p.fields)
+
+ copy(b[i:], tb)
+ return b, nil
+}
+
+// UnmarshalBinary decodes a binary representation of the point into a point struct.
+func (p *point) UnmarshalBinary(b []byte) error {
+ var n int
+
+ // Read key length.
+ if len(b) < 4 {
+ return io.ErrShortBuffer
+ }
+ n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:]
+
+ // Read key.
+ if len(b) < n {
+ return io.ErrShortBuffer
+ }
+ p.key, b = b[:n], b[n:]
+
+ // Read fields length.
+ if len(b) < 4 {
+ return io.ErrShortBuffer
+ }
+ n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:]
+
+ // Read fields.
+ if len(b) < n {
+ return io.ErrShortBuffer
+ }
+ p.fields, b = b[:n], b[n:]
+
+ // Read timestamp.
+ if err := p.time.UnmarshalBinary(b); err != nil {
+ return err
+ }
+ return nil
+}
+
+// PrecisionString returns a string representation of the point. If there
+// is a timestamp associated with the point then it will be specified in the
+// given unit.
+func (p *point) PrecisionString(precision string) string {
+ if p.Time().IsZero() {
+ return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
+ }
+ return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
+ p.UnixNano()/GetPrecisionMultiplier(precision))
+}
+
+// RoundedString returns a string representation of the point. If there
+// is a timestamp associated with the point, then it will be rounded to the
+// given duration.
+func (p *point) RoundedString(d time.Duration) string {
+ if p.Time().IsZero() {
+ return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
+ }
+ return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
+ p.time.Round(d).UnixNano())
+}
+
+func (p *point) unmarshalBinary() (Fields, error) {
+ iter := p.FieldIterator()
+ fields := make(Fields, 8)
+ for iter.Next() {
+ if len(iter.FieldKey()) == 0 {
+ continue
+ }
+ switch iter.Type() {
+ case Float:
+ v, err := iter.FloatValue()
+ if err != nil {
+ return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+ }
+ fields[string(iter.FieldKey())] = v
+ case Integer:
+ v, err := iter.IntegerValue()
+ if err != nil {
+ return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+ }
+ fields[string(iter.FieldKey())] = v
+ case Unsigned:
+ v, err := iter.UnsignedValue()
+ if err != nil {
+ return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+ }
+ fields[string(iter.FieldKey())] = v
+ case String:
+ fields[string(iter.FieldKey())] = iter.StringValue()
+ case Boolean:
+ v, err := iter.BooleanValue()
+ if err != nil {
+ return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+ }
+ fields[string(iter.FieldKey())] = v
+ }
+ }
+ return fields, nil
+}
+
+// HashID returns a non-cryptographic checksum of the point's key.
+func (p *point) HashID() uint64 {
+ h := NewInlineFNV64a()
+ h.Write(p.key)
+ sum := h.Sum64()
+ return sum
+}
+
+// UnixNano returns the timestamp of the point as nanoseconds since Unix epoch.
+func (p *point) UnixNano() int64 {
+ return p.Time().UnixNano()
+}
+
+// Split will attempt to return multiple points with the same timestamp whose
+// string representations are no longer than size. Points with a single field or
+// a point without a timestamp may exceed the requested size.
+func (p *point) Split(size int) []Point {
+ if p.time.IsZero() || p.StringSize() <= size {
+ return []Point{p}
+ }
+
+ // key string, timestamp string, spaces
+ size -= len(p.key) + len(strconv.FormatInt(p.time.UnixNano(), 10)) + 2
+
+ var points []Point
+ var start, cur int
+
+ for cur < len(p.fields) {
+ end, _ := scanTo(p.fields, cur, '=')
+ end, _ = scanFieldValue(p.fields, end+1)
+
+ if cur > start && end-start > size {
+ points = append(points, &point{
+ key: p.key,
+ time: p.time,
+ fields: p.fields[start : cur-1],
+ })
+ start = cur
+ }
+
+ cur = end + 1
+ }
+
+ points = append(points, &point{
+ key: p.key,
+ time: p.time,
+ fields: p.fields[start:],
+ })
+
+ return points
+}
+
+// Tag represents a single key/value tag pair.
+type Tag struct {
+ Key []byte
+ Value []byte
+}
+
+// NewTag returns a new Tag.
+func NewTag(key, value []byte) Tag {
+ return Tag{
+ Key: key,
+ Value: value,
+ }
+}
+
+// Size returns the size of the key and value.
+func (t Tag) Size() int { return len(t.Key) + len(t.Value) }
+
+// Clone returns a shallow copy of Tag.
+//
+// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed.
+// Use Clone to create a Tag with new byte slices that do not refer to the argument to ParsePointsWithPrecision.
+func (t Tag) Clone() Tag {
+ other := Tag{
+ Key: make([]byte, len(t.Key)),
+ Value: make([]byte, len(t.Value)),
+ }
+
+ copy(other.Key, t.Key)
+ copy(other.Value, t.Value)
+
+ return other
+}
+
+// String returns the string reprsentation of the tag.
+func (t *Tag) String() string {
+ var buf bytes.Buffer
+ buf.WriteByte('{')
+ buf.WriteString(string(t.Key))
+ buf.WriteByte(' ')
+ buf.WriteString(string(t.Value))
+ buf.WriteByte('}')
+ return buf.String()
+}
+
+// Tags represents a sorted list of tags.
+type Tags []Tag
+
+// NewTags returns a new Tags from a map.
+func NewTags(m map[string]string) Tags {
+ if len(m) == 0 {
+ return nil
+ }
+ a := make(Tags, 0, len(m))
+ for k, v := range m {
+ a = append(a, NewTag([]byte(k), []byte(v)))
+ }
+ sort.Sort(a)
+ return a
+}
+
+// Keys returns the list of keys for a tag set.
+func (a Tags) Keys() []string {
+ if len(a) == 0 {
+ return nil
+ }
+ keys := make([]string, len(a))
+ for i, tag := range a {
+ keys[i] = string(tag.Key)
+ }
+ return keys
+}
+
+// Values returns the list of values for a tag set.
+func (a Tags) Values() []string {
+ if len(a) == 0 {
+ return nil
+ }
+ values := make([]string, len(a))
+ for i, tag := range a {
+ values[i] = string(tag.Value)
+ }
+ return values
+}
+
+// String returns the string representation of the tags.
+func (a Tags) String() string {
+ var buf bytes.Buffer
+ buf.WriteByte('[')
+ for i := range a {
+ buf.WriteString(a[i].String())
+ if i < len(a)-1 {
+ buf.WriteByte(' ')
+ }
+ }
+ buf.WriteByte(']')
+ return buf.String()
+}
+
+// Size returns the number of bytes needed to store all tags. Note, this is
+// the number of bytes needed to store all keys and values and does not account
+// for data structures or delimiters for example.
+func (a Tags) Size() int {
+ var total int
+ for _, t := range a {
+ total += t.Size()
+ }
+ return total
+}
+
+// Clone returns a copy of the slice where the elements are a result of calling `Clone` on the original elements
+//
+// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed.
+// Use Clone to create Tags with new byte slices that do not refer to the argument to ParsePointsWithPrecision.
+func (a Tags) Clone() Tags {
+ if len(a) == 0 {
+ return nil
+ }
+
+ others := make(Tags, len(a))
+ for i := range a {
+ others[i] = a[i].Clone()
+ }
+
+ return others
+}
+
+func (a Tags) Len() int { return len(a) }
+func (a Tags) Less(i, j int) bool { return bytes.Compare(a[i].Key, a[j].Key) == -1 }
+func (a Tags) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// Equal returns true if a equals other.
+func (a Tags) Equal(other Tags) bool {
+ if len(a) != len(other) {
+ return false
+ }
+ for i := range a {
+ if !bytes.Equal(a[i].Key, other[i].Key) || !bytes.Equal(a[i].Value, other[i].Value) {
+ return false
+ }
+ }
+ return true
+}
+
+// CompareTags returns -1 if a < b, 1 if a > b, and 0 if a == b.
+func CompareTags(a, b Tags) int {
+ // Compare each key & value until a mismatch.
+ for i := 0; i < len(a) && i < len(b); i++ {
+ if cmp := bytes.Compare(a[i].Key, b[i].Key); cmp != 0 {
+ return cmp
+ }
+ if cmp := bytes.Compare(a[i].Value, b[i].Value); cmp != 0 {
+ return cmp
+ }
+ }
+
+ // If all tags are equal up to this point then return shorter tagset.
+ if len(a) < len(b) {
+ return -1
+ } else if len(a) > len(b) {
+ return 1
+ }
+
+ // All tags are equal.
+ return 0
+}
+
+// Get returns the value for a key.
+func (a Tags) Get(key []byte) []byte {
+ // OPTIMIZE: Use sort.Search if tagset is large.
+
+ for _, t := range a {
+ if bytes.Equal(t.Key, key) {
+ return t.Value
+ }
+ }
+ return nil
+}
+
+// GetString returns the string value for a string key.
+func (a Tags) GetString(key string) string {
+ return string(a.Get([]byte(key)))
+}
+
+// Set sets the value for a key.
+func (a *Tags) Set(key, value []byte) {
+ for i, t := range *a {
+ if bytes.Equal(t.Key, key) {
+ (*a)[i].Value = value
+ return
+ }
+ }
+ *a = append(*a, Tag{Key: key, Value: value})
+ sort.Sort(*a)
+}
+
+// SetString sets the string value for a string key.
+func (a *Tags) SetString(key, value string) {
+ a.Set([]byte(key), []byte(value))
+}
+
+// Delete removes a tag by key.
+func (a *Tags) Delete(key []byte) {
+ for i, t := range *a {
+ if bytes.Equal(t.Key, key) {
+ copy((*a)[i:], (*a)[i+1:])
+ (*a)[len(*a)-1] = Tag{}
+ *a = (*a)[:len(*a)-1]
+ return
+ }
+ }
+}
+
+// Map returns a map representation of the tags.
+func (a Tags) Map() map[string]string {
+ m := make(map[string]string, len(a))
+ for _, t := range a {
+ m[string(t.Key)] = string(t.Value)
+ }
+ return m
+}
+
+// Merge merges the tags combining the two. If both define a tag with the
+// same key, the merged value overwrites the old value.
+// A new map is returned.
+func (a Tags) Merge(other map[string]string) Tags {
+ merged := make(map[string]string, len(a)+len(other))
+ for _, t := range a {
+ merged[string(t.Key)] = string(t.Value)
+ }
+ for k, v := range other {
+ merged[k] = v
+ }
+ return NewTags(merged)
+}
+
+// HashKey hashes all of a tag's keys.
+func (a Tags) HashKey() []byte {
+ // Empty maps marshal to empty bytes.
+ if len(a) == 0 {
+ return nil
+ }
+
+ // Type invariant: Tags are sorted
+
+ escaped := make(Tags, 0, len(a))
+ sz := 0
+ for _, t := range a {
+ ek := escapeTag(t.Key)
+ ev := escapeTag(t.Value)
+
+ if len(ev) > 0 {
+ escaped = append(escaped, Tag{Key: ek, Value: ev})
+ sz += len(ek) + len(ev)
+ }
+ }
+
+ sz += len(escaped) + (len(escaped) * 2) // separators
+
+ // Generate marshaled bytes.
+ b := make([]byte, sz)
+ buf := b
+ idx := 0
+ for _, k := range escaped {
+ buf[idx] = ','
+ idx++
+ copy(buf[idx:idx+len(k.Key)], k.Key)
+ idx += len(k.Key)
+ buf[idx] = '='
+ idx++
+ copy(buf[idx:idx+len(k.Value)], k.Value)
+ idx += len(k.Value)
+ }
+ return b[:idx]
+}
+
+// CopyTags returns a shallow copy of tags.
+func CopyTags(a Tags) Tags {
+ other := make(Tags, len(a))
+ copy(other, a)
+ return other
+}
+
+// DeepCopyTags returns a deep copy of tags.
+func DeepCopyTags(a Tags) Tags {
+ // Calculate size of keys/values in bytes.
+ var n int
+ for _, t := range a {
+ n += len(t.Key) + len(t.Value)
+ }
+
+ // Build single allocation for all key/values.
+ buf := make([]byte, n)
+
+ // Copy tags to new set.
+ other := make(Tags, len(a))
+ for i, t := range a {
+ copy(buf, t.Key)
+ other[i].Key, buf = buf[:len(t.Key)], buf[len(t.Key):]
+
+ copy(buf, t.Value)
+ other[i].Value, buf = buf[:len(t.Value)], buf[len(t.Value):]
+ }
+
+ return other
+}
+
+// Fields represents a mapping between a Point's field names and their
+// values.
+type Fields map[string]interface{}
+
+// FieldIterator retuns a FieldIterator that can be used to traverse the
+// fields of a point without constructing the in-memory map.
+func (p *point) FieldIterator() FieldIterator {
+ p.Reset()
+ return p
+}
+
+type fieldIterator struct {
+ start, end int
+ key, keybuf []byte
+ valueBuf []byte
+ fieldType FieldType
+}
+
+// Next indicates whether there any fields remaining.
+func (p *point) Next() bool {
+ p.it.start = p.it.end
+ if p.it.start >= len(p.fields) {
+ return false
+ }
+
+ p.it.end, p.it.key = scanTo(p.fields, p.it.start, '=')
+ if escape.IsEscaped(p.it.key) {
+ p.it.keybuf = escape.AppendUnescaped(p.it.keybuf[:0], p.it.key)
+ p.it.key = p.it.keybuf
+ }
+
+ p.it.end, p.it.valueBuf = scanFieldValue(p.fields, p.it.end+1)
+ p.it.end++
+
+ if len(p.it.valueBuf) == 0 {
+ p.it.fieldType = Empty
+ return true
+ }
+
+ c := p.it.valueBuf[0]
+
+ if c == '"' {
+ p.it.fieldType = String
+ return true
+ }
+
+ if strings.IndexByte(`0123456789-.nNiIu`, c) >= 0 {
+ if p.it.valueBuf[len(p.it.valueBuf)-1] == 'i' {
+ p.it.fieldType = Integer
+ p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1]
+ } else if p.it.valueBuf[len(p.it.valueBuf)-1] == 'u' {
+ p.it.fieldType = Unsigned
+ p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1]
+ } else {
+ p.it.fieldType = Float
+ }
+ return true
+ }
+
+ // to keep the same behavior that currently exists, default to boolean
+ p.it.fieldType = Boolean
+ return true
+}
+
+// FieldKey returns the key of the current field.
+func (p *point) FieldKey() []byte {
+ return p.it.key
+}
+
+// Type returns the FieldType of the current field.
+func (p *point) Type() FieldType {
+ return p.it.fieldType
+}
+
+// StringValue returns the string value of the current field.
+func (p *point) StringValue() string {
+ return unescapeStringField(string(p.it.valueBuf[1 : len(p.it.valueBuf)-1]))
+}
+
+// IntegerValue returns the integer value of the current field.
+func (p *point) IntegerValue() (int64, error) {
+ n, err := parseIntBytes(p.it.valueBuf, 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("unable to parse integer value %q: %v", p.it.valueBuf, err)
+ }
+ return n, nil
+}
+
+// UnsignedValue returns the unsigned value of the current field.
+func (p *point) UnsignedValue() (uint64, error) {
+ n, err := parseUintBytes(p.it.valueBuf, 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("unable to parse unsigned value %q: %v", p.it.valueBuf, err)
+ }
+ return n, nil
+}
+
+// BooleanValue returns the boolean value of the current field.
+func (p *point) BooleanValue() (bool, error) {
+ b, err := parseBoolBytes(p.it.valueBuf)
+ if err != nil {
+ return false, fmt.Errorf("unable to parse bool value %q: %v", p.it.valueBuf, err)
+ }
+ return b, nil
+}
+
+// FloatValue returns the float value of the current field.
+func (p *point) FloatValue() (float64, error) {
+ f, err := parseFloatBytes(p.it.valueBuf, 64)
+ if err != nil {
+ return 0, fmt.Errorf("unable to parse floating point value %q: %v", p.it.valueBuf, err)
+ }
+ return f, nil
+}
+
+// Reset resets the iterator to its initial state.
+func (p *point) Reset() {
+ p.it.fieldType = Empty
+ p.it.key = nil
+ p.it.valueBuf = nil
+ p.it.start = 0
+ p.it.end = 0
+}
+
+// MarshalBinary encodes all the fields to their proper type and returns the binary
+// represenation
+// NOTE: uint64 is specifically not supported due to potential overflow when we decode
+// again later to an int64
+// NOTE2: uint is accepted, and may be 64 bits, and is for some reason accepted...
+func (p Fields) MarshalBinary() []byte {
+ var b []byte
+ keys := make([]string, 0, len(p))
+
+ for k := range p {
+ keys = append(keys, k)
+ }
+
+ // Not really necessary, can probably be removed.
+ sort.Strings(keys)
+
+ for i, k := range keys {
+ if i > 0 {
+ b = append(b, ',')
+ }
+ b = appendField(b, k, p[k])
+ }
+
+ return b
+}
+
+func appendField(b []byte, k string, v interface{}) []byte {
+ b = append(b, []byte(escape.String(k))...)
+ b = append(b, '=')
+
+ // check popular types first
+ switch v := v.(type) {
+ case float64:
+ b = strconv.AppendFloat(b, v, 'f', -1, 64)
+ case int64:
+ b = strconv.AppendInt(b, v, 10)
+ b = append(b, 'i')
+ case string:
+ b = append(b, '"')
+ b = append(b, []byte(EscapeStringField(v))...)
+ b = append(b, '"')
+ case bool:
+ b = strconv.AppendBool(b, v)
+ case int32:
+ b = strconv.AppendInt(b, int64(v), 10)
+ b = append(b, 'i')
+ case int16:
+ b = strconv.AppendInt(b, int64(v), 10)
+ b = append(b, 'i')
+ case int8:
+ b = strconv.AppendInt(b, int64(v), 10)
+ b = append(b, 'i')
+ case int:
+ b = strconv.AppendInt(b, int64(v), 10)
+ b = append(b, 'i')
+ case uint64:
+ b = strconv.AppendUint(b, v, 10)
+ b = append(b, 'u')
+ case uint32:
+ b = strconv.AppendInt(b, int64(v), 10)
+ b = append(b, 'i')
+ case uint16:
+ b = strconv.AppendInt(b, int64(v), 10)
+ b = append(b, 'i')
+ case uint8:
+ b = strconv.AppendInt(b, int64(v), 10)
+ b = append(b, 'i')
+ case uint:
+ // TODO: 'uint' should be converted to writing as an unsigned integer,
+ // but we cannot since that would break backwards compatibility.
+ b = strconv.AppendInt(b, int64(v), 10)
+ b = append(b, 'i')
+ case float32:
+ b = strconv.AppendFloat(b, float64(v), 'f', -1, 32)
+ case []byte:
+ b = append(b, v...)
+ case nil:
+ // skip
+ default:
+ // Can't determine the type, so convert to string
+ b = append(b, '"')
+ b = append(b, []byte(EscapeStringField(fmt.Sprintf("%v", v)))...)
+ b = append(b, '"')
+
+ }
+
+ return b
+}
+
+type byteSlices [][]byte
+
+func (a byteSlices) Len() int { return len(a) }
+func (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 }
+func (a byteSlices) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
diff --git a/vendor/github.com/influxdata/influxdb/models/rows.go b/vendor/github.com/influxdata/influxdb/models/rows.go
new file mode 100644
index 000000000..c087a4882
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/rows.go
@@ -0,0 +1,62 @@
+package models
+
+import (
+ "sort"
+)
+
+// Row represents a single row returned from the execution of a statement.
+type Row struct {
+ Name string `json:"name,omitempty"`
+ Tags map[string]string `json:"tags,omitempty"`
+ Columns []string `json:"columns,omitempty"`
+ Values [][]interface{} `json:"values,omitempty"`
+ Partial bool `json:"partial,omitempty"`
+}
+
+// SameSeries returns true if r contains values for the same series as o.
+func (r *Row) SameSeries(o *Row) bool {
+ return r.tagsHash() == o.tagsHash() && r.Name == o.Name
+}
+
+// tagsHash returns a hash of tag key/value pairs.
+func (r *Row) tagsHash() uint64 {
+ h := NewInlineFNV64a()
+ keys := r.tagsKeys()
+ for _, k := range keys {
+ h.Write([]byte(k))
+ h.Write([]byte(r.Tags[k]))
+ }
+ return h.Sum64()
+}
+
+// tagKeys returns a sorted list of tag keys.
+func (r *Row) tagsKeys() []string {
+ a := make([]string, 0, len(r.Tags))
+ for k := range r.Tags {
+ a = append(a, k)
+ }
+ sort.Strings(a)
+ return a
+}
+
+// Rows represents a collection of rows. Rows implements sort.Interface.
+type Rows []*Row
+
+// Len implements sort.Interface.
+func (p Rows) Len() int { return len(p) }
+
+// Less implements sort.Interface.
+func (p Rows) Less(i, j int) bool {
+ // Sort by name first.
+ if p[i].Name != p[j].Name {
+ return p[i].Name < p[j].Name
+ }
+
+ // Sort by tag set hash. Tags don't have a meaningful sort order so we
+ // just compute a hash and sort by that instead. This allows the tests
+ // to receive rows in a predictable order every time.
+ return p[i].tagsHash() < p[j].tagsHash()
+}
+
+// Swap implements sort.Interface.
+func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/influxdata/influxdb/models/statistic.go b/vendor/github.com/influxdata/influxdb/models/statistic.go
new file mode 100644
index 000000000..553e9d09f
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/statistic.go
@@ -0,0 +1,42 @@
+package models
+
+// Statistic is the representation of a statistic used by the monitoring service.
+type Statistic struct {
+ Name string `json:"name"`
+ Tags map[string]string `json:"tags"`
+ Values map[string]interface{} `json:"values"`
+}
+
+// NewStatistic returns an initialized Statistic.
+func NewStatistic(name string) Statistic {
+ return Statistic{
+ Name: name,
+ Tags: make(map[string]string),
+ Values: make(map[string]interface{}),
+ }
+}
+
+// StatisticTags is a map that can be merged with others without causing
+// mutations to either map.
+type StatisticTags map[string]string
+
+// Merge creates a new map containing the merged contents of tags and t.
+// If both tags and the receiver map contain the same key, the value in tags
+// is used in the resulting map.
+//
+// Merge always returns a usable map.
+func (t StatisticTags) Merge(tags map[string]string) map[string]string {
+ // Add everything in tags to the result.
+ out := make(map[string]string, len(tags))
+ for k, v := range tags {
+ out[k] = v
+ }
+
+ // Only add values from t that don't appear in tags.
+ for k, v := range t {
+ if _, ok := tags[k]; !ok {
+ out[k] = v
+ }
+ }
+ return out
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/time.go b/vendor/github.com/influxdata/influxdb/models/time.go
new file mode 100644
index 000000000..e98f2cb33
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/time.go
@@ -0,0 +1,74 @@
+package models
+
+// Helper time methods since parsing time can easily overflow and we only support a
+// specific time range.
+
+import (
+ "fmt"
+ "math"
+ "time"
+)
+
+const (
+ // MinNanoTime is the minumum time that can be represented.
+ //
+ // 1677-09-21 00:12:43.145224194 +0000 UTC
+ //
+ // The two lowest minimum integers are used as sentinel values. The
+ // minimum value needs to be used as a value lower than any other value for
+ // comparisons and another separate value is needed to act as a sentinel
+ // default value that is unusable by the user, but usable internally.
+ // Because these two values need to be used for a special purpose, we do
+ // not allow users to write points at these two times.
+ MinNanoTime = int64(math.MinInt64) + 2
+
+ // MaxNanoTime is the maximum time that can be represented.
+ //
+ // 2262-04-11 23:47:16.854775806 +0000 UTC
+ //
+ // The highest time represented by a nanosecond needs to be used for an
+ // exclusive range in the shard group, so the maximum time needs to be one
+ // less than the possible maximum number of nanoseconds representable by an
+ // int64 so that we don't lose a point at that one time.
+ MaxNanoTime = int64(math.MaxInt64) - 1
+)
+
+var (
+ minNanoTime = time.Unix(0, MinNanoTime).UTC()
+ maxNanoTime = time.Unix(0, MaxNanoTime).UTC()
+
+ // ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch.
+ ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime)
+)
+
+// SafeCalcTime safely calculates the time given. Will return error if the time is outside the
+// supported range.
+func SafeCalcTime(timestamp int64, precision string) (time.Time, error) {
+ mult := GetPrecisionMultiplier(precision)
+ if t, ok := safeSignedMult(timestamp, mult); ok {
+ tme := time.Unix(0, t).UTC()
+ return tme, CheckTime(tme)
+ }
+
+ return time.Time{}, ErrTimeOutOfRange
+}
+
+// CheckTime checks that a time is within the safe range.
+func CheckTime(t time.Time) error {
+ if t.Before(minNanoTime) || t.After(maxNanoTime) {
+ return ErrTimeOutOfRange
+ }
+ return nil
+}
+
+// Perform the multiplication and check to make sure it didn't overflow.
+func safeSignedMult(a, b int64) (int64, bool) {
+ if a == 0 || b == 0 || a == 1 || b == 1 {
+ return a * b, true
+ }
+ if a == MinNanoTime || b == MaxNanoTime {
+ return 0, false
+ }
+ c := a * b
+ return c, c/b == a
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/uint_support.go b/vendor/github.com/influxdata/influxdb/models/uint_support.go
new file mode 100644
index 000000000..18d1ca06e
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/uint_support.go
@@ -0,0 +1,7 @@
+// +build uint uint64
+
+package models
+
+func init() {
+ EnableUintSupport()
+}
diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go
new file mode 100644
index 000000000..f3b31f42d
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go
@@ -0,0 +1,115 @@
+// Package escape contains utilities for escaping parts of InfluxQL
+// and InfluxDB line protocol.
+package escape // import "github.com/influxdata/influxdb/pkg/escape"
+
+import (
+ "bytes"
+ "strings"
+)
+
+// Codes is a map of bytes to be escaped.
+var Codes = map[byte][]byte{
+ ',': []byte(`\,`),
+ '"': []byte(`\"`),
+ ' ': []byte(`\ `),
+ '=': []byte(`\=`),
+}
+
+// Bytes escapes characters on the input slice, as defined by Codes.
+func Bytes(in []byte) []byte {
+ for b, esc := range Codes {
+ in = bytes.Replace(in, []byte{b}, esc, -1)
+ }
+ return in
+}
+
+const escapeChars = `," =`
+
+// IsEscaped returns whether b has any escaped characters,
+// i.e. whether b seems to have been processed by Bytes.
+func IsEscaped(b []byte) bool {
+ for len(b) > 0 {
+ i := bytes.IndexByte(b, '\\')
+ if i < 0 {
+ return false
+ }
+
+ if i+1 < len(b) && strings.IndexByte(escapeChars, b[i+1]) >= 0 {
+ return true
+ }
+ b = b[i+1:]
+ }
+ return false
+}
+
+// AppendUnescaped appends the unescaped version of src to dst
+// and returns the resulting slice.
+func AppendUnescaped(dst, src []byte) []byte {
+ var pos int
+ for len(src) > 0 {
+ next := bytes.IndexByte(src[pos:], '\\')
+ if next < 0 || pos+next+1 >= len(src) {
+ return append(dst, src...)
+ }
+
+ if pos+next+1 < len(src) && strings.IndexByte(escapeChars, src[pos+next+1]) >= 0 {
+ if pos+next > 0 {
+ dst = append(dst, src[:pos+next]...)
+ }
+ src = src[pos+next+1:]
+ pos = 0
+ } else {
+ pos += next + 1
+ }
+ }
+
+ return dst
+}
+
+// Unescape returns a new slice containing the unescaped version of in.
+func Unescape(in []byte) []byte {
+ if len(in) == 0 {
+ return nil
+ }
+
+ if bytes.IndexByte(in, '\\') == -1 {
+ return in
+ }
+
+ i := 0
+ inLen := len(in)
+
+ // The output size will be no more than inLen. Preallocating the
+ // capacity of the output is faster and uses less memory than
+ // letting append() do its own (over)allocation.
+ out := make([]byte, 0, inLen)
+
+ for {
+ if i >= inLen {
+ break
+ }
+ if in[i] == '\\' && i+1 < inLen {
+ switch in[i+1] {
+ case ',':
+ out = append(out, ',')
+ i += 2
+ continue
+ case '"':
+ out = append(out, '"')
+ i += 2
+ continue
+ case ' ':
+ out = append(out, ' ')
+ i += 2
+ continue
+ case '=':
+ out = append(out, '=')
+ i += 2
+ continue
+ }
+ }
+ out = append(out, in[i])
+ i += 1
+ }
+ return out
+}
diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go b/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go
new file mode 100644
index 000000000..db98033b0
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go
@@ -0,0 +1,21 @@
+package escape
+
+import "strings"
+
+var (
+ escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`)
+ unescaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`)
+)
+
+// UnescapeString returns unescaped version of in.
+func UnescapeString(in string) string {
+ if strings.IndexByte(in, '\\') == -1 {
+ return in
+ }
+ return unescaper.Replace(in)
+}
+
+// String returns the escaped version of in.
+func String(in string) string {
+ return escaper.Replace(in)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/LICENSE b/vendor/github.com/rcrowley/go-metrics/LICENSE
deleted file mode 100644
index 363fa9ee7..000000000
--- a/vendor/github.com/rcrowley/go-metrics/LICENSE
+++ /dev/null
@@ -1,29 +0,0 @@
-Copyright 2012 Richard Crowley. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following
- disclaimer in the documentation and/or other materials provided
- with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS
-OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
-THE POSSIBILITY OF SUCH DAMAGE.
-
-The views and conclusions contained in the software and documentation
-are those of the authors and should not be interpreted as representing
-official policies, either expressed or implied, of Richard Crowley.
diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md
deleted file mode 100644
index 2d1a6dcfa..000000000
--- a/vendor/github.com/rcrowley/go-metrics/README.md
+++ /dev/null
@@ -1,153 +0,0 @@
-go-metrics
-==========
-
-![travis build status](https://travis-ci.org/rcrowley/go-metrics.svg?branch=master)
-
-Go port of Coda Hale's Metrics library: <https://github.com/dropwizard/metrics>.
-
-Documentation: <http://godoc.org/github.com/rcrowley/go-metrics>.
-
-Usage
------
-
-Create and update metrics:
-
-```go
-c := metrics.NewCounter()
-metrics.Register("foo", c)
-c.Inc(47)
-
-g := metrics.NewGauge()
-metrics.Register("bar", g)
-g.Update(47)
-
-r := NewRegistry()
-g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() })
-
-s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028)
-h := metrics.NewHistogram(s)
-metrics.Register("baz", h)
-h.Update(47)
-
-m := metrics.NewMeter()
-metrics.Register("quux", m)
-m.Mark(47)
-
-t := metrics.NewTimer()
-metrics.Register("bang", t)
-t.Time(func() {})
-t.Update(47)
-```
-
-Register() is not threadsafe. For threadsafe metric registration use
-GetOrRegister:
-
-```
-t := metrics.GetOrRegisterTimer("account.create.latency", nil)
-t.Time(func() {})
-t.Update(47)
-```
-
-Periodically log every metric in human-readable form to standard error:
-
-```go
-go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
-```
-
-Periodically log every metric in slightly-more-parseable form to syslog:
-
-```go
-w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
-go metrics.Syslog(metrics.DefaultRegistry, 60e9, w)
-```
-
-Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite):
-
-```go
-
-import "github.com/cyberdelia/go-metrics-graphite"
-
-addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
-go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr)
-```
-
-Periodically emit every metric into InfluxDB:
-
-**NOTE:** this has been pulled out of the library due to constant fluctuations
-in the InfluxDB API. In fact, all client libraries are on their way out. see
-issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and
-[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details.
-
-```go
-import "github.com/vrischmann/go-metrics-influxdb"
-
-go influxdb.Influxdb(metrics.DefaultRegistry, 10e9, &influxdb.Config{
- Host: "127.0.0.1:8086",
- Database: "metrics",
- Username: "test",
- Password: "test",
-})
-```
-
-Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato):
-
-**Note**: the client included with this repository under the `librato` package
-has been deprecated and moved to the repository linked above.
-
-```go
-import "github.com/mihasya/go-metrics-librato"
-
-go librato.Librato(metrics.DefaultRegistry,
- 10e9, // interval
- "example@example.com", // account owner email address
- "token", // Librato API token
- "hostname", // source
- []float64{0.95}, // percentiles to send
- time.Millisecond, // time unit
-)
-```
-
-Periodically emit every metric to StatHat:
-
-```go
-import "github.com/rcrowley/go-metrics/stathat"
-
-go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com")
-```
-
-Maintain all metrics along with expvars at `/debug/metrics`:
-
-This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/)
-but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars
-as well as all your go-metrics.
-
-
-```go
-import "github.com/rcrowley/go-metrics/exp"
-
-exp.Exp(metrics.DefaultRegistry)
-```
-
-Installation
-------------
-
-```sh
-go get github.com/rcrowley/go-metrics
-```
-
-StatHat support additionally requires their Go client:
-
-```sh
-go get github.com/stathat/go
-```
-
-Publishing Metrics
-------------------
-
-Clients are available for the following destinations:
-
-* Librato - [https://github.com/mihasya/go-metrics-librato](https://github.com/mihasya/go-metrics-librato)
-* Graphite - [https://github.com/cyberdelia/go-metrics-graphite](https://github.com/cyberdelia/go-metrics-graphite)
-* InfluxDB - [https://github.com/vrischmann/go-metrics-influxdb](https://github.com/vrischmann/go-metrics-influxdb)
-* Ganglia - [https://github.com/appscode/metlia](https://github.com/appscode/metlia)
-* Prometheus - [https://github.com/deathowl/go-metrics-prometheus](https://github.com/deathowl/go-metrics-prometheus)
diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/vendor/github.com/rcrowley/go-metrics/counter.go
deleted file mode 100644
index bb7b039cb..000000000
--- a/vendor/github.com/rcrowley/go-metrics/counter.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package metrics
-
-import "sync/atomic"
-
-// Counters hold an int64 value that can be incremented and decremented.
-type Counter interface {
- Clear()
- Count() int64
- Dec(int64)
- Inc(int64)
- Snapshot() Counter
-}
-
-// GetOrRegisterCounter returns an existing Counter or constructs and registers
-// a new StandardCounter.
-func GetOrRegisterCounter(name string, r Registry) Counter {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewCounter).(Counter)
-}
-
-// NewCounter constructs a new StandardCounter.
-func NewCounter() Counter {
- if UseNilMetrics {
- return NilCounter{}
- }
- return &StandardCounter{0}
-}
-
-// NewRegisteredCounter constructs and registers a new StandardCounter.
-func NewRegisteredCounter(name string, r Registry) Counter {
- c := NewCounter()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// CounterSnapshot is a read-only copy of another Counter.
-type CounterSnapshot int64
-
-// Clear panics.
-func (CounterSnapshot) Clear() {
- panic("Clear called on a CounterSnapshot")
-}
-
-// Count returns the count at the time the snapshot was taken.
-func (c CounterSnapshot) Count() int64 { return int64(c) }
-
-// Dec panics.
-func (CounterSnapshot) Dec(int64) {
- panic("Dec called on a CounterSnapshot")
-}
-
-// Inc panics.
-func (CounterSnapshot) Inc(int64) {
- panic("Inc called on a CounterSnapshot")
-}
-
-// Snapshot returns the snapshot.
-func (c CounterSnapshot) Snapshot() Counter { return c }
-
-// NilCounter is a no-op Counter.
-type NilCounter struct{}
-
-// Clear is a no-op.
-func (NilCounter) Clear() {}
-
-// Count is a no-op.
-func (NilCounter) Count() int64 { return 0 }
-
-// Dec is a no-op.
-func (NilCounter) Dec(i int64) {}
-
-// Inc is a no-op.
-func (NilCounter) Inc(i int64) {}
-
-// Snapshot is a no-op.
-func (NilCounter) Snapshot() Counter { return NilCounter{} }
-
-// StandardCounter is the standard implementation of a Counter and uses the
-// sync/atomic package to manage a single int64 value.
-type StandardCounter struct {
- count int64
-}
-
-// Clear sets the counter to zero.
-func (c *StandardCounter) Clear() {
- atomic.StoreInt64(&c.count, 0)
-}
-
-// Count returns the current count.
-func (c *StandardCounter) Count() int64 {
- return atomic.LoadInt64(&c.count)
-}
-
-// Dec decrements the counter by the given amount.
-func (c *StandardCounter) Dec(i int64) {
- atomic.AddInt64(&c.count, -i)
-}
-
-// Inc increments the counter by the given amount.
-func (c *StandardCounter) Inc(i int64) {
- atomic.AddInt64(&c.count, i)
-}
-
-// Snapshot returns a read-only copy of the counter.
-func (c *StandardCounter) Snapshot() Counter {
- return CounterSnapshot(c.Count())
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go
deleted file mode 100644
index 043ccefab..000000000
--- a/vendor/github.com/rcrowley/go-metrics/debug.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package metrics
-
-import (
- "runtime/debug"
- "time"
-)
-
-var (
- debugMetrics struct {
- GCStats struct {
- LastGC Gauge
- NumGC Gauge
- Pause Histogram
- //PauseQuantiles Histogram
- PauseTotal Gauge
- }
- ReadGCStats Timer
- }
- gcStats debug.GCStats
-)
-
-// Capture new values for the Go garbage collector statistics exported in
-// debug.GCStats. This is designed to be called as a goroutine.
-func CaptureDebugGCStats(r Registry, d time.Duration) {
- for _ = range time.Tick(d) {
- CaptureDebugGCStatsOnce(r)
- }
-}
-
-// Capture new values for the Go garbage collector statistics exported in
-// debug.GCStats. This is designed to be called in a background goroutine.
-// Giving a registry which has not been given to RegisterDebugGCStats will
-// panic.
-//
-// Be careful (but much less so) with this because debug.ReadGCStats calls
-// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world
-// operation, isn't something you want to be doing all the time.
-func CaptureDebugGCStatsOnce(r Registry) {
- lastGC := gcStats.LastGC
- t := time.Now()
- debug.ReadGCStats(&gcStats)
- debugMetrics.ReadGCStats.UpdateSince(t)
-
- debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano()))
- debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC))
- if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
- debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0]))
- }
- //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles)
- debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal))
-}
-
-// Register metrics for the Go garbage collector statistics exported in
-// debug.GCStats. The metrics are named by their fully-qualified Go symbols,
-// i.e. debug.GCStats.PauseTotal.
-func RegisterDebugGCStats(r Registry) {
- debugMetrics.GCStats.LastGC = NewGauge()
- debugMetrics.GCStats.NumGC = NewGauge()
- debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015))
- //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015))
- debugMetrics.GCStats.PauseTotal = NewGauge()
- debugMetrics.ReadGCStats = NewTimer()
-
- r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC)
- r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC)
- r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause)
- //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles)
- r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal)
- r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats)
-}
-
-// Allocate an initial slice for gcStats.Pause to avoid allocations during
-// normal operation.
-func init() {
- gcStats.Pause = make([]time.Duration, 11)
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go
deleted file mode 100644
index 694a1d033..000000000
--- a/vendor/github.com/rcrowley/go-metrics/ewma.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package metrics
-
-import (
- "math"
- "sync"
- "sync/atomic"
-)
-
-// EWMAs continuously calculate an exponentially-weighted moving average
-// based on an outside source of clock ticks.
-type EWMA interface {
- Rate() float64
- Snapshot() EWMA
- Tick()
- Update(int64)
-}
-
-// NewEWMA constructs a new EWMA with the given alpha.
-func NewEWMA(alpha float64) EWMA {
- if UseNilMetrics {
- return NilEWMA{}
- }
- return &StandardEWMA{alpha: alpha}
-}
-
-// NewEWMA1 constructs a new EWMA for a one-minute moving average.
-func NewEWMA1() EWMA {
- return NewEWMA(1 - math.Exp(-5.0/60.0/1))
-}
-
-// NewEWMA5 constructs a new EWMA for a five-minute moving average.
-func NewEWMA5() EWMA {
- return NewEWMA(1 - math.Exp(-5.0/60.0/5))
-}
-
-// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
-func NewEWMA15() EWMA {
- return NewEWMA(1 - math.Exp(-5.0/60.0/15))
-}
-
-// EWMASnapshot is a read-only copy of another EWMA.
-type EWMASnapshot float64
-
-// Rate returns the rate of events per second at the time the snapshot was
-// taken.
-func (a EWMASnapshot) Rate() float64 { return float64(a) }
-
-// Snapshot returns the snapshot.
-func (a EWMASnapshot) Snapshot() EWMA { return a }
-
-// Tick panics.
-func (EWMASnapshot) Tick() {
- panic("Tick called on an EWMASnapshot")
-}
-
-// Update panics.
-func (EWMASnapshot) Update(int64) {
- panic("Update called on an EWMASnapshot")
-}
-
-// NilEWMA is a no-op EWMA.
-type NilEWMA struct{}
-
-// Rate is a no-op.
-func (NilEWMA) Rate() float64 { return 0.0 }
-
-// Snapshot is a no-op.
-func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
-
-// Tick is a no-op.
-func (NilEWMA) Tick() {}
-
-// Update is a no-op.
-func (NilEWMA) Update(n int64) {}
-
-// StandardEWMA is the standard implementation of an EWMA and tracks the number
-// of uncounted events and processes them on each tick. It uses the
-// sync/atomic package to manage uncounted events.
-type StandardEWMA struct {
- uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
- alpha float64
- rate float64
- init bool
- mutex sync.Mutex
-}
-
-// Rate returns the moving average rate of events per second.
-func (a *StandardEWMA) Rate() float64 {
- a.mutex.Lock()
- defer a.mutex.Unlock()
- return a.rate * float64(1e9)
-}
-
-// Snapshot returns a read-only copy of the EWMA.
-func (a *StandardEWMA) Snapshot() EWMA {
- return EWMASnapshot(a.Rate())
-}
-
-// Tick ticks the clock to update the moving average. It assumes it is called
-// every five seconds.
-func (a *StandardEWMA) Tick() {
- count := atomic.LoadInt64(&a.uncounted)
- atomic.AddInt64(&a.uncounted, -count)
- instantRate := float64(count) / float64(5e9)
- a.mutex.Lock()
- defer a.mutex.Unlock()
- if a.init {
- a.rate += a.alpha * (instantRate - a.rate)
- } else {
- a.init = true
- a.rate = instantRate
- }
-}
-
-// Update adds n uncounted events.
-func (a *StandardEWMA) Update(n int64) {
- atomic.AddInt64(&a.uncounted, n)
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/exp/exp.go b/vendor/github.com/rcrowley/go-metrics/exp/exp.go
deleted file mode 100644
index 11dd3f898..000000000
--- a/vendor/github.com/rcrowley/go-metrics/exp/exp.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Hook go-metrics into expvar
-// on any /debug/metrics request, load all vars from the registry into expvar, and execute regular expvar handler
-package exp
-
-import (
- "expvar"
- "fmt"
- "net/http"
- "sync"
-
- "github.com/rcrowley/go-metrics"
-)
-
-type exp struct {
- expvarLock sync.Mutex // expvar panics if you try to register the same var twice, so we must probe it safely
- registry metrics.Registry
-}
-
-func (exp *exp) expHandler(w http.ResponseWriter, r *http.Request) {
- // load our variables into expvar
- exp.syncToExpvar()
-
- // now just run the official expvar handler code (which is not publicly callable, so pasted inline)
- w.Header().Set("Content-Type", "application/json; charset=utf-8")
- fmt.Fprintf(w, "{\n")
- first := true
- expvar.Do(func(kv expvar.KeyValue) {
- if !first {
- fmt.Fprintf(w, ",\n")
- }
- first = false
- fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
- })
- fmt.Fprintf(w, "\n}\n")
-}
-
-// Exp will register an expvar powered metrics handler with http.DefaultServeMux on "/debug/vars"
-func Exp(r metrics.Registry) {
- h := ExpHandler(r)
- // this would cause a panic:
- // panic: http: multiple registrations for /debug/vars
- // http.HandleFunc("/debug/vars", e.expHandler)
- // haven't found an elegant way, so just use a different endpoint
- http.Handle("/debug/metrics", h)
-}
-
-// ExpHandler will return an expvar powered metrics handler.
-func ExpHandler(r metrics.Registry) http.Handler {
- e := exp{sync.Mutex{}, r}
- return http.HandlerFunc(e.expHandler)
-}
-
-func (exp *exp) getInt(name string) *expvar.Int {
- var v *expvar.Int
- exp.expvarLock.Lock()
- p := expvar.Get(name)
- if p != nil {
- v = p.(*expvar.Int)
- } else {
- v = new(expvar.Int)
- expvar.Publish(name, v)
- }
- exp.expvarLock.Unlock()
- return v
-}
-
-func (exp *exp) getFloat(name string) *expvar.Float {
- var v *expvar.Float
- exp.expvarLock.Lock()
- p := expvar.Get(name)
- if p != nil {
- v = p.(*expvar.Float)
- } else {
- v = new(expvar.Float)
- expvar.Publish(name, v)
- }
- exp.expvarLock.Unlock()
- return v
-}
-
-func (exp *exp) publishCounter(name string, metric metrics.Counter) {
- v := exp.getInt(name)
- v.Set(metric.Count())
-}
-
-func (exp *exp) publishGauge(name string, metric metrics.Gauge) {
- v := exp.getInt(name)
- v.Set(metric.Value())
-}
-func (exp *exp) publishGaugeFloat64(name string, metric metrics.GaugeFloat64) {
- exp.getFloat(name).Set(metric.Value())
-}
-
-func (exp *exp) publishHistogram(name string, metric metrics.Histogram) {
- h := metric.Snapshot()
- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- exp.getInt(name + ".count").Set(h.Count())
- exp.getFloat(name + ".min").Set(float64(h.Min()))
- exp.getFloat(name + ".max").Set(float64(h.Max()))
- exp.getFloat(name + ".mean").Set(float64(h.Mean()))
- exp.getFloat(name + ".std-dev").Set(float64(h.StdDev()))
- exp.getFloat(name + ".50-percentile").Set(float64(ps[0]))
- exp.getFloat(name + ".75-percentile").Set(float64(ps[1]))
- exp.getFloat(name + ".95-percentile").Set(float64(ps[2]))
- exp.getFloat(name + ".99-percentile").Set(float64(ps[3]))
- exp.getFloat(name + ".999-percentile").Set(float64(ps[4]))
-}
-
-func (exp *exp) publishMeter(name string, metric metrics.Meter) {
- m := metric.Snapshot()
- exp.getInt(name + ".count").Set(m.Count())
- exp.getFloat(name + ".one-minute").Set(float64(m.Rate1()))
- exp.getFloat(name + ".five-minute").Set(float64(m.Rate5()))
- exp.getFloat(name + ".fifteen-minute").Set(float64((m.Rate15())))
- exp.getFloat(name + ".mean").Set(float64(m.RateMean()))
-}
-
-func (exp *exp) publishTimer(name string, metric metrics.Timer) {
- t := metric.Snapshot()
- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- exp.getInt(name + ".count").Set(t.Count())
- exp.getFloat(name + ".min").Set(float64(t.Min()))
- exp.getFloat(name + ".max").Set(float64(t.Max()))
- exp.getFloat(name + ".mean").Set(float64(t.Mean()))
- exp.getFloat(name + ".std-dev").Set(float64(t.StdDev()))
- exp.getFloat(name + ".50-percentile").Set(float64(ps[0]))
- exp.getFloat(name + ".75-percentile").Set(float64(ps[1]))
- exp.getFloat(name + ".95-percentile").Set(float64(ps[2]))
- exp.getFloat(name + ".99-percentile").Set(float64(ps[3]))
- exp.getFloat(name + ".999-percentile").Set(float64(ps[4]))
- exp.getFloat(name + ".one-minute").Set(float64(t.Rate1()))
- exp.getFloat(name + ".five-minute").Set(float64(t.Rate5()))
- exp.getFloat(name + ".fifteen-minute").Set(float64((t.Rate15())))
- exp.getFloat(name + ".mean-rate").Set(float64(t.RateMean()))
-}
-
-func (exp *exp) syncToExpvar() {
- exp.registry.Each(func(name string, i interface{}) {
- switch i.(type) {
- case metrics.Counter:
- exp.publishCounter(name, i.(metrics.Counter))
- case metrics.Gauge:
- exp.publishGauge(name, i.(metrics.Gauge))
- case metrics.GaugeFloat64:
- exp.publishGaugeFloat64(name, i.(metrics.GaugeFloat64))
- case metrics.Histogram:
- exp.publishHistogram(name, i.(metrics.Histogram))
- case metrics.Meter:
- exp.publishMeter(name, i.(metrics.Meter))
- case metrics.Timer:
- exp.publishTimer(name, i.(metrics.Timer))
- default:
- panic(fmt.Sprintf("unsupported type for '%s': %T", name, i))
- }
- })
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go
deleted file mode 100644
index cb57a9388..000000000
--- a/vendor/github.com/rcrowley/go-metrics/gauge.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package metrics
-
-import "sync/atomic"
-
-// Gauges hold an int64 value that can be set arbitrarily.
-type Gauge interface {
- Snapshot() Gauge
- Update(int64)
- Value() int64
-}
-
-// GetOrRegisterGauge returns an existing Gauge or constructs and registers a
-// new StandardGauge.
-func GetOrRegisterGauge(name string, r Registry) Gauge {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewGauge).(Gauge)
-}
-
-// NewGauge constructs a new StandardGauge.
-func NewGauge() Gauge {
- if UseNilMetrics {
- return NilGauge{}
- }
- return &StandardGauge{0}
-}
-
-// NewRegisteredGauge constructs and registers a new StandardGauge.
-func NewRegisteredGauge(name string, r Registry) Gauge {
- c := NewGauge()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// NewFunctionalGauge constructs a new FunctionalGauge.
-func NewFunctionalGauge(f func() int64) Gauge {
- if UseNilMetrics {
- return NilGauge{}
- }
- return &FunctionalGauge{value: f}
-}
-
-// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
-func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge {
- c := NewFunctionalGauge(f)
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// GaugeSnapshot is a read-only copy of another Gauge.
-type GaugeSnapshot int64
-
-// Snapshot returns the snapshot.
-func (g GaugeSnapshot) Snapshot() Gauge { return g }
-
-// Update panics.
-func (GaugeSnapshot) Update(int64) {
- panic("Update called on a GaugeSnapshot")
-}
-
-// Value returns the value at the time the snapshot was taken.
-func (g GaugeSnapshot) Value() int64 { return int64(g) }
-
-// NilGauge is a no-op Gauge.
-type NilGauge struct{}
-
-// Snapshot is a no-op.
-func (NilGauge) Snapshot() Gauge { return NilGauge{} }
-
-// Update is a no-op.
-func (NilGauge) Update(v int64) {}
-
-// Value is a no-op.
-func (NilGauge) Value() int64 { return 0 }
-
-// StandardGauge is the standard implementation of a Gauge and uses the
-// sync/atomic package to manage a single int64 value.
-type StandardGauge struct {
- value int64
-}
-
-// Snapshot returns a read-only copy of the gauge.
-func (g *StandardGauge) Snapshot() Gauge {
- return GaugeSnapshot(g.Value())
-}
-
-// Update updates the gauge's value.
-func (g *StandardGauge) Update(v int64) {
- atomic.StoreInt64(&g.value, v)
-}
-
-// Value returns the gauge's current value.
-func (g *StandardGauge) Value() int64 {
- return atomic.LoadInt64(&g.value)
-}
-
-// FunctionalGauge returns value from given function
-type FunctionalGauge struct {
- value func() int64
-}
-
-// Value returns the gauge's current value.
-func (g FunctionalGauge) Value() int64 {
- return g.value()
-}
-
-// Snapshot returns the snapshot.
-func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) }
-
-// Update panics.
-func (FunctionalGauge) Update(int64) {
- panic("Update called on a FunctionalGauge")
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
deleted file mode 100644
index 6f93920b2..000000000
--- a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
+++ /dev/null
@@ -1,127 +0,0 @@
-package metrics
-
-import "sync"
-
-// GaugeFloat64s hold a float64 value that can be set arbitrarily.
-type GaugeFloat64 interface {
- Snapshot() GaugeFloat64
- Update(float64)
- Value() float64
-}
-
-// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
-// new StandardGaugeFloat64.
-func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64)
-}
-
-// NewGaugeFloat64 constructs a new StandardGaugeFloat64.
-func NewGaugeFloat64() GaugeFloat64 {
- if UseNilMetrics {
- return NilGaugeFloat64{}
- }
- return &StandardGaugeFloat64{
- value: 0.0,
- }
-}
-
-// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64.
-func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
- c := NewGaugeFloat64()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// NewFunctionalGauge constructs a new FunctionalGauge.
-func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 {
- if UseNilMetrics {
- return NilGaugeFloat64{}
- }
- return &FunctionalGaugeFloat64{value: f}
-}
-
-// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
-func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 {
- c := NewFunctionalGaugeFloat64(f)
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
-type GaugeFloat64Snapshot float64
-
-// Snapshot returns the snapshot.
-func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g }
-
-// Update panics.
-func (GaugeFloat64Snapshot) Update(float64) {
- panic("Update called on a GaugeFloat64Snapshot")
-}
-
-// Value returns the value at the time the snapshot was taken.
-func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
-
-// NilGauge is a no-op Gauge.
-type NilGaugeFloat64 struct{}
-
-// Snapshot is a no-op.
-func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} }
-
-// Update is a no-op.
-func (NilGaugeFloat64) Update(v float64) {}
-
-// Value is a no-op.
-func (NilGaugeFloat64) Value() float64 { return 0.0 }
-
-// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
-// sync.Mutex to manage a single float64 value.
-type StandardGaugeFloat64 struct {
- mutex sync.Mutex
- value float64
-}
-
-// Snapshot returns a read-only copy of the gauge.
-func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
- return GaugeFloat64Snapshot(g.Value())
-}
-
-// Update updates the gauge's value.
-func (g *StandardGaugeFloat64) Update(v float64) {
- g.mutex.Lock()
- defer g.mutex.Unlock()
- g.value = v
-}
-
-// Value returns the gauge's current value.
-func (g *StandardGaugeFloat64) Value() float64 {
- g.mutex.Lock()
- defer g.mutex.Unlock()
- return g.value
-}
-
-// FunctionalGaugeFloat64 returns value from given function
-type FunctionalGaugeFloat64 struct {
- value func() float64
-}
-
-// Value returns the gauge's current value.
-func (g FunctionalGaugeFloat64) Value() float64 {
- return g.value()
-}
-
-// Snapshot returns the snapshot.
-func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) }
-
-// Update panics.
-func (FunctionalGaugeFloat64) Update(float64) {
- panic("Update called on a FunctionalGaugeFloat64")
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/vendor/github.com/rcrowley/go-metrics/graphite.go
deleted file mode 100644
index abd0a7d29..000000000
--- a/vendor/github.com/rcrowley/go-metrics/graphite.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package metrics
-
-import (
- "bufio"
- "fmt"
- "log"
- "net"
- "strconv"
- "strings"
- "time"
-)
-
-// GraphiteConfig provides a container with configuration parameters for
-// the Graphite exporter
-type GraphiteConfig struct {
- Addr *net.TCPAddr // Network address to connect to
- Registry Registry // Registry to be exported
- FlushInterval time.Duration // Flush interval
- DurationUnit time.Duration // Time conversion unit for durations
- Prefix string // Prefix to be prepended to metric names
- Percentiles []float64 // Percentiles to export from timers and histograms
-}
-
-// Graphite is a blocking exporter function which reports metrics in r
-// to a graphite server located at addr, flushing them every d duration
-// and prepending metric names with prefix.
-func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
- GraphiteWithConfig(GraphiteConfig{
- Addr: addr,
- Registry: r,
- FlushInterval: d,
- DurationUnit: time.Nanosecond,
- Prefix: prefix,
- Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999},
- })
-}
-
-// GraphiteWithConfig is a blocking exporter function just like Graphite,
-// but it takes a GraphiteConfig instead.
-func GraphiteWithConfig(c GraphiteConfig) {
- log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
- for _ = range time.Tick(c.FlushInterval) {
- if err := graphite(&c); nil != err {
- log.Println(err)
- }
- }
-}
-
-// GraphiteOnce performs a single submission to Graphite, returning a
-// non-nil error on failed connections. This can be used in a loop
-// similar to GraphiteWithConfig for custom error handling.
-func GraphiteOnce(c GraphiteConfig) error {
- log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
- return graphite(&c)
-}
-
-func graphite(c *GraphiteConfig) error {
- now := time.Now().Unix()
- du := float64(c.DurationUnit)
- conn, err := net.DialTCP("tcp", nil, c.Addr)
- if nil != err {
- return err
- }
- defer conn.Close()
- w := bufio.NewWriter(conn)
- c.Registry.Each(func(name string, i interface{}) {
- switch metric := i.(type) {
- case Counter:
- fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
- case Gauge:
- fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
- case GaugeFloat64:
- fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now)
- case Histogram:
- h := metric.Snapshot()
- ps := h.Percentiles(c.Percentiles)
- fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now)
- fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now)
- fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now)
- fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now)
- fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now)
- for psIdx, psKey := range c.Percentiles {
- key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
- fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
- }
- case Meter:
- m := metric.Snapshot()
- fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now)
- fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now)
- fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now)
- fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now)
- fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now)
- case Timer:
- t := metric.Snapshot()
- ps := t.Percentiles(c.Percentiles)
- fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now)
- fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now)
- fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now)
- fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now)
- fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now)
- for psIdx, psKey := range c.Percentiles {
- key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
- fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
- }
- fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now)
- fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now)
- fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now)
- fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now)
- }
- w.Flush()
- })
- return nil
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/vendor/github.com/rcrowley/go-metrics/healthcheck.go
deleted file mode 100644
index 445131cae..000000000
--- a/vendor/github.com/rcrowley/go-metrics/healthcheck.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package metrics
-
-// Healthchecks hold an error value describing an arbitrary up/down status.
-type Healthcheck interface {
- Check()
- Error() error
- Healthy()
- Unhealthy(error)
-}
-
-// NewHealthcheck constructs a new Healthcheck which will use the given
-// function to update its status.
-func NewHealthcheck(f func(Healthcheck)) Healthcheck {
- if UseNilMetrics {
- return NilHealthcheck{}
- }
- return &StandardHealthcheck{nil, f}
-}
-
-// NilHealthcheck is a no-op.
-type NilHealthcheck struct{}
-
-// Check is a no-op.
-func (NilHealthcheck) Check() {}
-
-// Error is a no-op.
-func (NilHealthcheck) Error() error { return nil }
-
-// Healthy is a no-op.
-func (NilHealthcheck) Healthy() {}
-
-// Unhealthy is a no-op.
-func (NilHealthcheck) Unhealthy(error) {}
-
-// StandardHealthcheck is the standard implementation of a Healthcheck and
-// stores the status and a function to call to update the status.
-type StandardHealthcheck struct {
- err error
- f func(Healthcheck)
-}
-
-// Check runs the healthcheck function to update the healthcheck's status.
-func (h *StandardHealthcheck) Check() {
- h.f(h)
-}
-
-// Error returns the healthcheck's status, which will be nil if it is healthy.
-func (h *StandardHealthcheck) Error() error {
- return h.err
-}
-
-// Healthy marks the healthcheck as healthy.
-func (h *StandardHealthcheck) Healthy() {
- h.err = nil
-}
-
-// Unhealthy marks the healthcheck as unhealthy. The error is stored and
-// may be retrieved by the Error method.
-func (h *StandardHealthcheck) Unhealthy(err error) {
- h.err = err
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/vendor/github.com/rcrowley/go-metrics/histogram.go
deleted file mode 100644
index dbc837fe4..000000000
--- a/vendor/github.com/rcrowley/go-metrics/histogram.go
+++ /dev/null
@@ -1,202 +0,0 @@
-package metrics
-
-// Histograms calculate distribution statistics from a series of int64 values.
-type Histogram interface {
- Clear()
- Count() int64
- Max() int64
- Mean() float64
- Min() int64
- Percentile(float64) float64
- Percentiles([]float64) []float64
- Sample() Sample
- Snapshot() Histogram
- StdDev() float64
- Sum() int64
- Update(int64)
- Variance() float64
-}
-
-// GetOrRegisterHistogram returns an existing Histogram or constructs and
-// registers a new StandardHistogram.
-func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram)
-}
-
-// NewHistogram constructs a new StandardHistogram from a Sample.
-func NewHistogram(s Sample) Histogram {
- if UseNilMetrics {
- return NilHistogram{}
- }
- return &StandardHistogram{sample: s}
-}
-
-// NewRegisteredHistogram constructs and registers a new StandardHistogram from
-// a Sample.
-func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
- c := NewHistogram(s)
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// HistogramSnapshot is a read-only copy of another Histogram.
-type HistogramSnapshot struct {
- sample *SampleSnapshot
-}
-
-// Clear panics.
-func (*HistogramSnapshot) Clear() {
- panic("Clear called on a HistogramSnapshot")
-}
-
-// Count returns the number of samples recorded at the time the snapshot was
-// taken.
-func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
-
-// Max returns the maximum value in the sample at the time the snapshot was
-// taken.
-func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
-
-// Mean returns the mean of the values in the sample at the time the snapshot
-// was taken.
-func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
-
-// Min returns the minimum value in the sample at the time the snapshot was
-// taken.
-func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
-
-// Percentile returns an arbitrary percentile of values in the sample at the
-// time the snapshot was taken.
-func (h *HistogramSnapshot) Percentile(p float64) float64 {
- return h.sample.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values in the sample
-// at the time the snapshot was taken.
-func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
- return h.sample.Percentiles(ps)
-}
-
-// Sample returns the Sample underlying the histogram.
-func (h *HistogramSnapshot) Sample() Sample { return h.sample }
-
-// Snapshot returns the snapshot.
-func (h *HistogramSnapshot) Snapshot() Histogram { return h }
-
-// StdDev returns the standard deviation of the values in the sample at the
-// time the snapshot was taken.
-func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
-
-// Sum returns the sum in the sample at the time the snapshot was taken.
-func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
-
-// Update panics.
-func (*HistogramSnapshot) Update(int64) {
- panic("Update called on a HistogramSnapshot")
-}
-
-// Variance returns the variance of inputs at the time the snapshot was taken.
-func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
-
-// NilHistogram is a no-op Histogram.
-type NilHistogram struct{}
-
-// Clear is a no-op.
-func (NilHistogram) Clear() {}
-
-// Count is a no-op.
-func (NilHistogram) Count() int64 { return 0 }
-
-// Max is a no-op.
-func (NilHistogram) Max() int64 { return 0 }
-
-// Mean is a no-op.
-func (NilHistogram) Mean() float64 { return 0.0 }
-
-// Min is a no-op.
-func (NilHistogram) Min() int64 { return 0 }
-
-// Percentile is a no-op.
-func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
-
-// Percentiles is a no-op.
-func (NilHistogram) Percentiles(ps []float64) []float64 {
- return make([]float64, len(ps))
-}
-
-// Sample is a no-op.
-func (NilHistogram) Sample() Sample { return NilSample{} }
-
-// Snapshot is a no-op.
-func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
-
-// StdDev is a no-op.
-func (NilHistogram) StdDev() float64 { return 0.0 }
-
-// Sum is a no-op.
-func (NilHistogram) Sum() int64 { return 0 }
-
-// Update is a no-op.
-func (NilHistogram) Update(v int64) {}
-
-// Variance is a no-op.
-func (NilHistogram) Variance() float64 { return 0.0 }
-
-// StandardHistogram is the standard implementation of a Histogram and uses a
-// Sample to bound its memory use.
-type StandardHistogram struct {
- sample Sample
-}
-
-// Clear clears the histogram and its sample.
-func (h *StandardHistogram) Clear() { h.sample.Clear() }
-
-// Count returns the number of samples recorded since the histogram was last
-// cleared.
-func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
-
-// Max returns the maximum value in the sample.
-func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
-
-// Mean returns the mean of the values in the sample.
-func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
-
-// Min returns the minimum value in the sample.
-func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
-
-// Percentile returns an arbitrary percentile of the values in the sample.
-func (h *StandardHistogram) Percentile(p float64) float64 {
- return h.sample.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of the values in the
-// sample.
-func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
- return h.sample.Percentiles(ps)
-}
-
-// Sample returns the Sample underlying the histogram.
-func (h *StandardHistogram) Sample() Sample { return h.sample }
-
-// Snapshot returns a read-only copy of the histogram.
-func (h *StandardHistogram) Snapshot() Histogram {
- return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
-
-// Sum returns the sum in the sample.
-func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
-
-// Update samples a new value.
-func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
-
-// Variance returns the variance of the values in the sample.
-func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go
deleted file mode 100644
index 2fdcbcfbf..000000000
--- a/vendor/github.com/rcrowley/go-metrics/json.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package metrics
-
-import (
- "encoding/json"
- "io"
- "time"
-)
-
-// MarshalJSON returns a byte slice containing a JSON representation of all
-// the metrics in the Registry.
-func (r *StandardRegistry) MarshalJSON() ([]byte, error) {
- data := make(map[string]map[string]interface{})
- r.Each(func(name string, i interface{}) {
- values := make(map[string]interface{})
- switch metric := i.(type) {
- case Counter:
- values["count"] = metric.Count()
- case Gauge:
- values["value"] = metric.Value()
- case GaugeFloat64:
- values["value"] = metric.Value()
- case Healthcheck:
- values["error"] = nil
- metric.Check()
- if err := metric.Error(); nil != err {
- values["error"] = metric.Error().Error()
- }
- case Histogram:
- h := metric.Snapshot()
- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- values["count"] = h.Count()
- values["min"] = h.Min()
- values["max"] = h.Max()
- values["mean"] = h.Mean()
- values["stddev"] = h.StdDev()
- values["median"] = ps[0]
- values["75%"] = ps[1]
- values["95%"] = ps[2]
- values["99%"] = ps[3]
- values["99.9%"] = ps[4]
- case Meter:
- m := metric.Snapshot()
- values["count"] = m.Count()
- values["1m.rate"] = m.Rate1()
- values["5m.rate"] = m.Rate5()
- values["15m.rate"] = m.Rate15()
- values["mean.rate"] = m.RateMean()
- case Timer:
- t := metric.Snapshot()
- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- values["count"] = t.Count()
- values["min"] = t.Min()
- values["max"] = t.Max()
- values["mean"] = t.Mean()
- values["stddev"] = t.StdDev()
- values["median"] = ps[0]
- values["75%"] = ps[1]
- values["95%"] = ps[2]
- values["99%"] = ps[3]
- values["99.9%"] = ps[4]
- values["1m.rate"] = t.Rate1()
- values["5m.rate"] = t.Rate5()
- values["15m.rate"] = t.Rate15()
- values["mean.rate"] = t.RateMean()
- }
- data[name] = values
- })
- return json.Marshal(data)
-}
-
-// WriteJSON writes metrics from the given registry periodically to the
-// specified io.Writer as JSON.
-func WriteJSON(r Registry, d time.Duration, w io.Writer) {
- for _ = range time.Tick(d) {
- WriteJSONOnce(r, w)
- }
-}
-
-// WriteJSONOnce writes metrics from the given registry to the specified
-// io.Writer as JSON.
-func WriteJSONOnce(r Registry, w io.Writer) {
- json.NewEncoder(w).Encode(r)
-}
-
-func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) {
- return json.Marshal(p.underlying)
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go
deleted file mode 100644
index f8074c045..000000000
--- a/vendor/github.com/rcrowley/go-metrics/log.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package metrics
-
-import (
- "time"
-)
-
-type Logger interface {
- Printf(format string, v ...interface{})
-}
-
-func Log(r Registry, freq time.Duration, l Logger) {
- LogScaled(r, freq, time.Nanosecond, l)
-}
-
-// Output each metric in the given registry periodically using the given
-// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos.
-func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
- du := float64(scale)
- duSuffix := scale.String()[1:]
-
- for _ = range time.Tick(freq) {
- r.Each(func(name string, i interface{}) {
- switch metric := i.(type) {
- case Counter:
- l.Printf("counter %s\n", name)
- l.Printf(" count: %9d\n", metric.Count())
- case Gauge:
- l.Printf("gauge %s\n", name)
- l.Printf(" value: %9d\n", metric.Value())
- case GaugeFloat64:
- l.Printf("gauge %s\n", name)
- l.Printf(" value: %f\n", metric.Value())
- case Healthcheck:
- metric.Check()
- l.Printf("healthcheck %s\n", name)
- l.Printf(" error: %v\n", metric.Error())
- case Histogram:
- h := metric.Snapshot()
- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- l.Printf("histogram %s\n", name)
- l.Printf(" count: %9d\n", h.Count())
- l.Printf(" min: %9d\n", h.Min())
- l.Printf(" max: %9d\n", h.Max())
- l.Printf(" mean: %12.2f\n", h.Mean())
- l.Printf(" stddev: %12.2f\n", h.StdDev())
- l.Printf(" median: %12.2f\n", ps[0])
- l.Printf(" 75%%: %12.2f\n", ps[1])
- l.Printf(" 95%%: %12.2f\n", ps[2])
- l.Printf(" 99%%: %12.2f\n", ps[3])
- l.Printf(" 99.9%%: %12.2f\n", ps[4])
- case Meter:
- m := metric.Snapshot()
- l.Printf("meter %s\n", name)
- l.Printf(" count: %9d\n", m.Count())
- l.Printf(" 1-min rate: %12.2f\n", m.Rate1())
- l.Printf(" 5-min rate: %12.2f\n", m.Rate5())
- l.Printf(" 15-min rate: %12.2f\n", m.Rate15())
- l.Printf(" mean rate: %12.2f\n", m.RateMean())
- case Timer:
- t := metric.Snapshot()
- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- l.Printf("timer %s\n", name)
- l.Printf(" count: %9d\n", t.Count())
- l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix)
- l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix)
- l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix)
- l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix)
- l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix)
- l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix)
- l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix)
- l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix)
- l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix)
- l.Printf(" 1-min rate: %12.2f\n", t.Rate1())
- l.Printf(" 5-min rate: %12.2f\n", t.Rate5())
- l.Printf(" 15-min rate: %12.2f\n", t.Rate15())
- l.Printf(" mean rate: %12.2f\n", t.RateMean())
- }
- })
- }
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/memory.md b/vendor/github.com/rcrowley/go-metrics/memory.md
deleted file mode 100644
index 47454f54b..000000000
--- a/vendor/github.com/rcrowley/go-metrics/memory.md
+++ /dev/null
@@ -1,285 +0,0 @@
-Memory usage
-============
-
-(Highly unscientific.)
-
-Command used to gather static memory usage:
-
-```sh
-grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status"
-```
-
-Program used to gather baseline memory usage:
-
-```go
-package main
-
-import "time"
-
-func main() {
- time.Sleep(600e9)
-}
-```
-
-Baseline
---------
-
-```
-VmPeak: 42604 kB
-VmSize: 42604 kB
-VmLck: 0 kB
-VmHWM: 1120 kB
-VmRSS: 1120 kB
-VmData: 35460 kB
-VmStk: 136 kB
-VmExe: 1020 kB
-VmLib: 1848 kB
-VmPTE: 36 kB
-VmSwap: 0 kB
-```
-
-Program used to gather metric memory usage (with other metrics being similar):
-
-```go
-package main
-
-import (
- "fmt"
- "metrics"
- "time"
-)
-
-func main() {
- fmt.Sprintf("foo")
- metrics.NewRegistry()
- time.Sleep(600e9)
-}
-```
-
-1000 counters registered
-------------------------
-
-```
-VmPeak: 44016 kB
-VmSize: 44016 kB
-VmLck: 0 kB
-VmHWM: 1928 kB
-VmRSS: 1928 kB
-VmData: 36868 kB
-VmStk: 136 kB
-VmExe: 1024 kB
-VmLib: 1848 kB
-VmPTE: 40 kB
-VmSwap: 0 kB
-```
-
-**1.412 kB virtual, TODO 0.808 kB resident per counter.**
-
-100000 counters registered
---------------------------
-
-```
-VmPeak: 55024 kB
-VmSize: 55024 kB
-VmLck: 0 kB
-VmHWM: 12440 kB
-VmRSS: 12440 kB
-VmData: 47876 kB
-VmStk: 136 kB
-VmExe: 1024 kB
-VmLib: 1848 kB
-VmPTE: 64 kB
-VmSwap: 0 kB
-```
-
-**0.1242 kB virtual, 0.1132 kB resident per counter.**
-
-1000 gauges registered
-----------------------
-
-```
-VmPeak: 44012 kB
-VmSize: 44012 kB
-VmLck: 0 kB
-VmHWM: 1928 kB
-VmRSS: 1928 kB
-VmData: 36868 kB
-VmStk: 136 kB
-VmExe: 1020 kB
-VmLib: 1848 kB
-VmPTE: 40 kB
-VmSwap: 0 kB
-```
-
-**1.408 kB virtual, 0.808 kB resident per counter.**
-
-100000 gauges registered
-------------------------
-
-```
-VmPeak: 55020 kB
-VmSize: 55020 kB
-VmLck: 0 kB
-VmHWM: 12432 kB
-VmRSS: 12432 kB
-VmData: 47876 kB
-VmStk: 136 kB
-VmExe: 1020 kB
-VmLib: 1848 kB
-VmPTE: 60 kB
-VmSwap: 0 kB
-```
-
-**0.12416 kB virtual, 0.11312 resident per gauge.**
-
-1000 histograms with a uniform sample size of 1028
---------------------------------------------------
-
-```
-VmPeak: 72272 kB
-VmSize: 72272 kB
-VmLck: 0 kB
-VmHWM: 16204 kB
-VmRSS: 16204 kB
-VmData: 65100 kB
-VmStk: 136 kB
-VmExe: 1048 kB
-VmLib: 1848 kB
-VmPTE: 80 kB
-VmSwap: 0 kB
-```
-
-**29.668 kB virtual, TODO 15.084 resident per histogram.**
-
-10000 histograms with a uniform sample size of 1028
----------------------------------------------------
-
-```
-VmPeak: 256912 kB
-VmSize: 256912 kB
-VmLck: 0 kB
-VmHWM: 146204 kB
-VmRSS: 146204 kB
-VmData: 249740 kB
-VmStk: 136 kB
-VmExe: 1048 kB
-VmLib: 1848 kB
-VmPTE: 448 kB
-VmSwap: 0 kB
-```
-
-**21.4308 kB virtual, 14.5084 kB resident per histogram.**
-
-50000 histograms with a uniform sample size of 1028
----------------------------------------------------
-
-```
-VmPeak: 908112 kB
-VmSize: 908112 kB
-VmLck: 0 kB
-VmHWM: 645832 kB
-VmRSS: 645588 kB
-VmData: 900940 kB
-VmStk: 136 kB
-VmExe: 1048 kB
-VmLib: 1848 kB
-VmPTE: 1716 kB
-VmSwap: 1544 kB
-```
-
-**17.31016 kB virtual, 12.88936 kB resident per histogram.**
-
-1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
--------------------------------------------------------------------------------------
-
-```
-VmPeak: 62480 kB
-VmSize: 62480 kB
-VmLck: 0 kB
-VmHWM: 11572 kB
-VmRSS: 11572 kB
-VmData: 55308 kB
-VmStk: 136 kB
-VmExe: 1048 kB
-VmLib: 1848 kB
-VmPTE: 64 kB
-VmSwap: 0 kB
-```
-
-**19.876 kB virtual, 10.452 kB resident per histogram.**
-
-10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
---------------------------------------------------------------------------------------
-
-```
-VmPeak: 153296 kB
-VmSize: 153296 kB
-VmLck: 0 kB
-VmHWM: 101176 kB
-VmRSS: 101176 kB
-VmData: 146124 kB
-VmStk: 136 kB
-VmExe: 1048 kB
-VmLib: 1848 kB
-VmPTE: 240 kB
-VmSwap: 0 kB
-```
-
-**11.0692 kB virtual, 10.0056 kB resident per histogram.**
-
-50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
---------------------------------------------------------------------------------------
-
-```
-VmPeak: 557264 kB
-VmSize: 557264 kB
-VmLck: 0 kB
-VmHWM: 501056 kB
-VmRSS: 501056 kB
-VmData: 550092 kB
-VmStk: 136 kB
-VmExe: 1048 kB
-VmLib: 1848 kB
-VmPTE: 1032 kB
-VmSwap: 0 kB
-```
-
-**10.2932 kB virtual, 9.99872 kB resident per histogram.**
-
-1000 meters
------------
-
-```
-VmPeak: 74504 kB
-VmSize: 74504 kB
-VmLck: 0 kB
-VmHWM: 24124 kB
-VmRSS: 24124 kB
-VmData: 67340 kB
-VmStk: 136 kB
-VmExe: 1040 kB
-VmLib: 1848 kB
-VmPTE: 92 kB
-VmSwap: 0 kB
-```
-
-**31.9 kB virtual, 23.004 kB resident per meter.**
-
-10000 meters
-------------
-
-```
-VmPeak: 278920 kB
-VmSize: 278920 kB
-VmLck: 0 kB
-VmHWM: 227300 kB
-VmRSS: 227300 kB
-VmData: 271756 kB
-VmStk: 136 kB
-VmExe: 1040 kB
-VmLib: 1848 kB
-VmPTE: 488 kB
-VmSwap: 0 kB
-```
-
-**23.6316 kB virtual, 22.618 kB resident per meter.**
diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go
deleted file mode 100644
index 0389ab0b8..000000000
--- a/vendor/github.com/rcrowley/go-metrics/meter.go
+++ /dev/null
@@ -1,233 +0,0 @@
-package metrics
-
-import (
- "sync"
- "time"
-)
-
-// Meters count events to produce exponentially-weighted moving average rates
-// at one-, five-, and fifteen-minutes and a mean rate.
-type Meter interface {
- Count() int64
- Mark(int64)
- Rate1() float64
- Rate5() float64
- Rate15() float64
- RateMean() float64
- Snapshot() Meter
-}
-
-// GetOrRegisterMeter returns an existing Meter or constructs and registers a
-// new StandardMeter.
-func GetOrRegisterMeter(name string, r Registry) Meter {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewMeter).(Meter)
-}
-
-// NewMeter constructs a new StandardMeter and launches a goroutine.
-func NewMeter() Meter {
- if UseNilMetrics {
- return NilMeter{}
- }
- m := newStandardMeter()
- arbiter.Lock()
- defer arbiter.Unlock()
- arbiter.meters = append(arbiter.meters, m)
- if !arbiter.started {
- arbiter.started = true
- go arbiter.tick()
- }
- return m
-}
-
-// NewMeter constructs and registers a new StandardMeter and launches a
-// goroutine.
-func NewRegisteredMeter(name string, r Registry) Meter {
- c := NewMeter()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// MeterSnapshot is a read-only copy of another Meter.
-type MeterSnapshot struct {
- count int64
- rate1, rate5, rate15, rateMean float64
-}
-
-// Count returns the count of events at the time the snapshot was taken.
-func (m *MeterSnapshot) Count() int64 { return m.count }
-
-// Mark panics.
-func (*MeterSnapshot) Mark(n int64) {
- panic("Mark called on a MeterSnapshot")
-}
-
-// Rate1 returns the one-minute moving average rate of events per second at the
-// time the snapshot was taken.
-func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
-
-// Rate5 returns the five-minute moving average rate of events per second at
-// the time the snapshot was taken.
-func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
-
-// Rate15 returns the fifteen-minute moving average rate of events per second
-// at the time the snapshot was taken.
-func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
-
-// RateMean returns the meter's mean rate of events per second at the time the
-// snapshot was taken.
-func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
-
-// Snapshot returns the snapshot.
-func (m *MeterSnapshot) Snapshot() Meter { return m }
-
-// NilMeter is a no-op Meter.
-type NilMeter struct{}
-
-// Count is a no-op.
-func (NilMeter) Count() int64 { return 0 }
-
-// Mark is a no-op.
-func (NilMeter) Mark(n int64) {}
-
-// Rate1 is a no-op.
-func (NilMeter) Rate1() float64 { return 0.0 }
-
-// Rate5 is a no-op.
-func (NilMeter) Rate5() float64 { return 0.0 }
-
-// Rate15is a no-op.
-func (NilMeter) Rate15() float64 { return 0.0 }
-
-// RateMean is a no-op.
-func (NilMeter) RateMean() float64 { return 0.0 }
-
-// Snapshot is a no-op.
-func (NilMeter) Snapshot() Meter { return NilMeter{} }
-
-// StandardMeter is the standard implementation of a Meter.
-type StandardMeter struct {
- lock sync.RWMutex
- snapshot *MeterSnapshot
- a1, a5, a15 EWMA
- startTime time.Time
-}
-
-func newStandardMeter() *StandardMeter {
- return &StandardMeter{
- snapshot: &MeterSnapshot{},
- a1: NewEWMA1(),
- a5: NewEWMA5(),
- a15: NewEWMA15(),
- startTime: time.Now(),
- }
-}
-
-// Count returns the number of events recorded.
-func (m *StandardMeter) Count() int64 {
- m.lock.RLock()
- count := m.snapshot.count
- m.lock.RUnlock()
- return count
-}
-
-// Mark records the occurance of n events.
-func (m *StandardMeter) Mark(n int64) {
- m.lock.Lock()
- defer m.lock.Unlock()
- m.snapshot.count += n
- m.a1.Update(n)
- m.a5.Update(n)
- m.a15.Update(n)
- m.updateSnapshot()
-}
-
-// Rate1 returns the one-minute moving average rate of events per second.
-func (m *StandardMeter) Rate1() float64 {
- m.lock.RLock()
- rate1 := m.snapshot.rate1
- m.lock.RUnlock()
- return rate1
-}
-
-// Rate5 returns the five-minute moving average rate of events per second.
-func (m *StandardMeter) Rate5() float64 {
- m.lock.RLock()
- rate5 := m.snapshot.rate5
- m.lock.RUnlock()
- return rate5
-}
-
-// Rate15 returns the fifteen-minute moving average rate of events per second.
-func (m *StandardMeter) Rate15() float64 {
- m.lock.RLock()
- rate15 := m.snapshot.rate15
- m.lock.RUnlock()
- return rate15
-}
-
-// RateMean returns the meter's mean rate of events per second.
-func (m *StandardMeter) RateMean() float64 {
- m.lock.RLock()
- rateMean := m.snapshot.rateMean
- m.lock.RUnlock()
- return rateMean
-}
-
-// Snapshot returns a read-only copy of the meter.
-func (m *StandardMeter) Snapshot() Meter {
- m.lock.RLock()
- snapshot := *m.snapshot
- m.lock.RUnlock()
- return &snapshot
-}
-
-func (m *StandardMeter) updateSnapshot() {
- // should run with write lock held on m.lock
- snapshot := m.snapshot
- snapshot.rate1 = m.a1.Rate()
- snapshot.rate5 = m.a5.Rate()
- snapshot.rate15 = m.a15.Rate()
- snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
-}
-
-func (m *StandardMeter) tick() {
- m.lock.Lock()
- defer m.lock.Unlock()
- m.a1.Tick()
- m.a5.Tick()
- m.a15.Tick()
- m.updateSnapshot()
-}
-
-type meterArbiter struct {
- sync.RWMutex
- started bool
- meters []*StandardMeter
- ticker *time.Ticker
-}
-
-var arbiter = meterArbiter{ticker: time.NewTicker(5e9)}
-
-// Ticks meters on the scheduled interval
-func (ma *meterArbiter) tick() {
- for {
- select {
- case <-ma.ticker.C:
- ma.tickMeters()
- }
- }
-}
-
-func (ma *meterArbiter) tickMeters() {
- ma.RLock()
- defer ma.RUnlock()
- for _, meter := range ma.meters {
- meter.tick()
- }
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go
deleted file mode 100644
index b97a49ed1..000000000
--- a/vendor/github.com/rcrowley/go-metrics/metrics.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Go port of Coda Hale's Metrics library
-//
-// <https://github.com/rcrowley/go-metrics>
-//
-// Coda Hale's original work: <https://github.com/codahale/metrics>
-package metrics
-
-// UseNilMetrics is checked by the constructor functions for all of the
-// standard metrics. If it is true, the metric returned is a stub.
-//
-// This global kill-switch helps quantify the observer effect and makes
-// for less cluttered pprof profiles.
-var UseNilMetrics bool = false
diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/vendor/github.com/rcrowley/go-metrics/opentsdb.go
deleted file mode 100644
index 266b6c93d..000000000
--- a/vendor/github.com/rcrowley/go-metrics/opentsdb.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package metrics
-
-import (
- "bufio"
- "fmt"
- "log"
- "net"
- "os"
- "strings"
- "time"
-)
-
-var shortHostName string = ""
-
-// OpenTSDBConfig provides a container with configuration parameters for
-// the OpenTSDB exporter
-type OpenTSDBConfig struct {
- Addr *net.TCPAddr // Network address to connect to
- Registry Registry // Registry to be exported
- FlushInterval time.Duration // Flush interval
- DurationUnit time.Duration // Time conversion unit for durations
- Prefix string // Prefix to be prepended to metric names
-}
-
-// OpenTSDB is a blocking exporter function which reports metrics in r
-// to a TSDB server located at addr, flushing them every d duration
-// and prepending metric names with prefix.
-func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
- OpenTSDBWithConfig(OpenTSDBConfig{
- Addr: addr,
- Registry: r,
- FlushInterval: d,
- DurationUnit: time.Nanosecond,
- Prefix: prefix,
- })
-}
-
-// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB,
-// but it takes a OpenTSDBConfig instead.
-func OpenTSDBWithConfig(c OpenTSDBConfig) {
- for _ = range time.Tick(c.FlushInterval) {
- if err := openTSDB(&c); nil != err {
- log.Println(err)
- }
- }
-}
-
-func getShortHostname() string {
- if shortHostName == "" {
- host, _ := os.Hostname()
- if index := strings.Index(host, "."); index > 0 {
- shortHostName = host[:index]
- } else {
- shortHostName = host
- }
- }
- return shortHostName
-}
-
-func openTSDB(c *OpenTSDBConfig) error {
- shortHostname := getShortHostname()
- now := time.Now().Unix()
- du := float64(c.DurationUnit)
- conn, err := net.DialTCP("tcp", nil, c.Addr)
- if nil != err {
- return err
- }
- defer conn.Close()
- w := bufio.NewWriter(conn)
- c.Registry.Each(func(name string, i interface{}) {
- switch metric := i.(type) {
- case Counter:
- fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
- case Gauge:
- fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
- case GaugeFloat64:
- fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
- case Histogram:
- h := metric.Snapshot()
- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname)
- fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname)
- fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname)
- fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname)
- fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname)
- case Meter:
- m := metric.Snapshot()
- fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname)
- case Timer:
- t := metric.Snapshot()
- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname)
- fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname)
- fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname)
- }
- w.Flush()
- })
- return nil
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go
deleted file mode 100644
index 2bb7a1e7d..000000000
--- a/vendor/github.com/rcrowley/go-metrics/registry.go
+++ /dev/null
@@ -1,270 +0,0 @@
-package metrics
-
-import (
- "fmt"
- "reflect"
- "strings"
- "sync"
-)
-
-// DuplicateMetric is the error returned by Registry.Register when a metric
-// already exists. If you mean to Register that metric you must first
-// Unregister the existing metric.
-type DuplicateMetric string
-
-func (err DuplicateMetric) Error() string {
- return fmt.Sprintf("duplicate metric: %s", string(err))
-}
-
-// A Registry holds references to a set of metrics by name and can iterate
-// over them, calling callback functions provided by the user.
-//
-// This is an interface so as to encourage other structs to implement
-// the Registry API as appropriate.
-type Registry interface {
-
- // Call the given function for each registered metric.
- Each(func(string, interface{}))
-
- // Get the metric by the given name or nil if none is registered.
- Get(string) interface{}
-
- // Gets an existing metric or registers the given one.
- // The interface can be the metric to register if not found in registry,
- // or a function returning the metric for lazy instantiation.
- GetOrRegister(string, interface{}) interface{}
-
- // Register the given metric under the given name.
- Register(string, interface{}) error
-
- // Run all registered healthchecks.
- RunHealthchecks()
-
- // Unregister the metric with the given name.
- Unregister(string)
-
- // Unregister all metrics. (Mostly for testing.)
- UnregisterAll()
-}
-
-// The standard implementation of a Registry is a mutex-protected map
-// of names to metrics.
-type StandardRegistry struct {
- metrics map[string]interface{}
- mutex sync.Mutex
-}
-
-// Create a new registry.
-func NewRegistry() Registry {
- return &StandardRegistry{metrics: make(map[string]interface{})}
-}
-
-// Call the given function for each registered metric.
-func (r *StandardRegistry) Each(f func(string, interface{})) {
- for name, i := range r.registered() {
- f(name, i)
- }
-}
-
-// Get the metric by the given name or nil if none is registered.
-func (r *StandardRegistry) Get(name string) interface{} {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- return r.metrics[name]
-}
-
-// Gets an existing metric or creates and registers a new one. Threadsafe
-// alternative to calling Get and Register on failure.
-// The interface can be the metric to register if not found in registry,
-// or a function returning the metric for lazy instantiation.
-func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- if metric, ok := r.metrics[name]; ok {
- return metric
- }
- if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
- i = v.Call(nil)[0].Interface()
- }
- r.register(name, i)
- return i
-}
-
-// Register the given metric under the given name. Returns a DuplicateMetric
-// if a metric by the given name is already registered.
-func (r *StandardRegistry) Register(name string, i interface{}) error {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- return r.register(name, i)
-}
-
-// Run all registered healthchecks.
-func (r *StandardRegistry) RunHealthchecks() {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- for _, i := range r.metrics {
- if h, ok := i.(Healthcheck); ok {
- h.Check()
- }
- }
-}
-
-// Unregister the metric with the given name.
-func (r *StandardRegistry) Unregister(name string) {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- delete(r.metrics, name)
-}
-
-// Unregister all metrics. (Mostly for testing.)
-func (r *StandardRegistry) UnregisterAll() {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- for name, _ := range r.metrics {
- delete(r.metrics, name)
- }
-}
-
-func (r *StandardRegistry) register(name string, i interface{}) error {
- if _, ok := r.metrics[name]; ok {
- return DuplicateMetric(name)
- }
- switch i.(type) {
- case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer:
- r.metrics[name] = i
- }
- return nil
-}
-
-func (r *StandardRegistry) registered() map[string]interface{} {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- metrics := make(map[string]interface{}, len(r.metrics))
- for name, i := range r.metrics {
- metrics[name] = i
- }
- return metrics
-}
-
-type PrefixedRegistry struct {
- underlying Registry
- prefix string
-}
-
-func NewPrefixedRegistry(prefix string) Registry {
- return &PrefixedRegistry{
- underlying: NewRegistry(),
- prefix: prefix,
- }
-}
-
-func NewPrefixedChildRegistry(parent Registry, prefix string) Registry {
- return &PrefixedRegistry{
- underlying: parent,
- prefix: prefix,
- }
-}
-
-// Call the given function for each registered metric.
-func (r *PrefixedRegistry) Each(fn func(string, interface{})) {
- wrappedFn := func(prefix string) func(string, interface{}) {
- return func(name string, iface interface{}) {
- if strings.HasPrefix(name, prefix) {
- fn(name, iface)
- } else {
- return
- }
- }
- }
-
- baseRegistry, prefix := findPrefix(r, "")
- baseRegistry.Each(wrappedFn(prefix))
-}
-
-func findPrefix(registry Registry, prefix string) (Registry, string) {
- switch r := registry.(type) {
- case *PrefixedRegistry:
- return findPrefix(r.underlying, r.prefix+prefix)
- case *StandardRegistry:
- return r, prefix
- }
- return nil, ""
-}
-
-// Get the metric by the given name or nil if none is registered.
-func (r *PrefixedRegistry) Get(name string) interface{} {
- realName := r.prefix + name
- return r.underlying.Get(realName)
-}
-
-// Gets an existing metric or registers the given one.
-// The interface can be the metric to register if not found in registry,
-// or a function returning the metric for lazy instantiation.
-func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} {
- realName := r.prefix + name
- return r.underlying.GetOrRegister(realName, metric)
-}
-
-// Register the given metric under the given name. The name will be prefixed.
-func (r *PrefixedRegistry) Register(name string, metric interface{}) error {
- realName := r.prefix + name
- return r.underlying.Register(realName, metric)
-}
-
-// Run all registered healthchecks.
-func (r *PrefixedRegistry) RunHealthchecks() {
- r.underlying.RunHealthchecks()
-}
-
-// Unregister the metric with the given name. The name will be prefixed.
-func (r *PrefixedRegistry) Unregister(name string) {
- realName := r.prefix + name
- r.underlying.Unregister(realName)
-}
-
-// Unregister all metrics. (Mostly for testing.)
-func (r *PrefixedRegistry) UnregisterAll() {
- r.underlying.UnregisterAll()
-}
-
-var DefaultRegistry Registry = NewRegistry()
-
-// Call the given function for each registered metric.
-func Each(f func(string, interface{})) {
- DefaultRegistry.Each(f)
-}
-
-// Get the metric by the given name or nil if none is registered.
-func Get(name string) interface{} {
- return DefaultRegistry.Get(name)
-}
-
-// Gets an existing metric or creates and registers a new one. Threadsafe
-// alternative to calling Get and Register on failure.
-func GetOrRegister(name string, i interface{}) interface{} {
- return DefaultRegistry.GetOrRegister(name, i)
-}
-
-// Register the given metric under the given name. Returns a DuplicateMetric
-// if a metric by the given name is already registered.
-func Register(name string, i interface{}) error {
- return DefaultRegistry.Register(name, i)
-}
-
-// Register the given metric under the given name. Panics if a metric by the
-// given name is already registered.
-func MustRegister(name string, i interface{}) {
- if err := Register(name, i); err != nil {
- panic(err)
- }
-}
-
-// Run all registered healthchecks.
-func RunHealthchecks() {
- DefaultRegistry.RunHealthchecks()
-}
-
-// Unregister the metric with the given name.
-func Unregister(name string) {
- DefaultRegistry.Unregister(name)
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go
deleted file mode 100644
index 11c6b785a..000000000
--- a/vendor/github.com/rcrowley/go-metrics/runtime.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package metrics
-
-import (
- "runtime"
- "runtime/pprof"
- "time"
-)
-
-var (
- memStats runtime.MemStats
- runtimeMetrics struct {
- MemStats struct {
- Alloc Gauge
- BuckHashSys Gauge
- DebugGC Gauge
- EnableGC Gauge
- Frees Gauge
- HeapAlloc Gauge
- HeapIdle Gauge
- HeapInuse Gauge
- HeapObjects Gauge
- HeapReleased Gauge
- HeapSys Gauge
- LastGC Gauge
- Lookups Gauge
- Mallocs Gauge
- MCacheInuse Gauge
- MCacheSys Gauge
- MSpanInuse Gauge
- MSpanSys Gauge
- NextGC Gauge
- NumGC Gauge
- GCCPUFraction GaugeFloat64
- PauseNs Histogram
- PauseTotalNs Gauge
- StackInuse Gauge
- StackSys Gauge
- Sys Gauge
- TotalAlloc Gauge
- }
- NumCgoCall Gauge
- NumGoroutine Gauge
- NumThread Gauge
- ReadMemStats Timer
- }
- frees uint64
- lookups uint64
- mallocs uint64
- numGC uint32
- numCgoCalls int64
-
- threadCreateProfile = pprof.Lookup("threadcreate")
-)
-
-// Capture new values for the Go runtime statistics exported in
-// runtime.MemStats. This is designed to be called as a goroutine.
-func CaptureRuntimeMemStats(r Registry, d time.Duration) {
- for _ = range time.Tick(d) {
- CaptureRuntimeMemStatsOnce(r)
- }
-}
-
-// Capture new values for the Go runtime statistics exported in
-// runtime.MemStats. This is designed to be called in a background
-// goroutine. Giving a registry which has not been given to
-// RegisterRuntimeMemStats will panic.
-//
-// Be very careful with this because runtime.ReadMemStats calls the C
-// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld()
-// and that last one does what it says on the tin.
-func CaptureRuntimeMemStatsOnce(r Registry) {
- t := time.Now()
- runtime.ReadMemStats(&memStats) // This takes 50-200us.
- runtimeMetrics.ReadMemStats.UpdateSince(t)
-
- runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc))
- runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys))
- if memStats.DebugGC {
- runtimeMetrics.MemStats.DebugGC.Update(1)
- } else {
- runtimeMetrics.MemStats.DebugGC.Update(0)
- }
- if memStats.EnableGC {
- runtimeMetrics.MemStats.EnableGC.Update(1)
- } else {
- runtimeMetrics.MemStats.EnableGC.Update(0)
- }
-
- runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees))
- runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc))
- runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle))
- runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse))
- runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects))
- runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased))
- runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys))
- runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC))
- runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups))
- runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs))
- runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse))
- runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys))
- runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse))
- runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys))
- runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC))
- runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC))
- runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats))
-
- // <https://code.google.com/p/go/source/browse/src/pkg/runtime/mgc0.c>
- i := numGC % uint32(len(memStats.PauseNs))
- ii := memStats.NumGC % uint32(len(memStats.PauseNs))
- if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) {
- for i = 0; i < uint32(len(memStats.PauseNs)); i++ {
- runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
- }
- } else {
- if i > ii {
- for ; i < uint32(len(memStats.PauseNs)); i++ {
- runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
- }
- i = 0
- }
- for ; i < ii; i++ {
- runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
- }
- }
- frees = memStats.Frees
- lookups = memStats.Lookups
- mallocs = memStats.Mallocs
- numGC = memStats.NumGC
-
- runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs))
- runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse))
- runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys))
- runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys))
- runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc))
-
- currentNumCgoCalls := numCgoCall()
- runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls)
- numCgoCalls = currentNumCgoCalls
-
- runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine()))
-
- runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count()))
-}
-
-// Register runtimeMetrics for the Go runtime statistics exported in runtime and
-// specifically runtime.MemStats. The runtimeMetrics are named by their
-// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc.
-func RegisterRuntimeMemStats(r Registry) {
- runtimeMetrics.MemStats.Alloc = NewGauge()
- runtimeMetrics.MemStats.BuckHashSys = NewGauge()
- runtimeMetrics.MemStats.DebugGC = NewGauge()
- runtimeMetrics.MemStats.EnableGC = NewGauge()
- runtimeMetrics.MemStats.Frees = NewGauge()
- runtimeMetrics.MemStats.HeapAlloc = NewGauge()
- runtimeMetrics.MemStats.HeapIdle = NewGauge()
- runtimeMetrics.MemStats.HeapInuse = NewGauge()
- runtimeMetrics.MemStats.HeapObjects = NewGauge()
- runtimeMetrics.MemStats.HeapReleased = NewGauge()
- runtimeMetrics.MemStats.HeapSys = NewGauge()
- runtimeMetrics.MemStats.LastGC = NewGauge()
- runtimeMetrics.MemStats.Lookups = NewGauge()
- runtimeMetrics.MemStats.Mallocs = NewGauge()
- runtimeMetrics.MemStats.MCacheInuse = NewGauge()
- runtimeMetrics.MemStats.MCacheSys = NewGauge()
- runtimeMetrics.MemStats.MSpanInuse = NewGauge()
- runtimeMetrics.MemStats.MSpanSys = NewGauge()
- runtimeMetrics.MemStats.NextGC = NewGauge()
- runtimeMetrics.MemStats.NumGC = NewGauge()
- runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64()
- runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
- runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
- runtimeMetrics.MemStats.StackInuse = NewGauge()
- runtimeMetrics.MemStats.StackSys = NewGauge()
- runtimeMetrics.MemStats.Sys = NewGauge()
- runtimeMetrics.MemStats.TotalAlloc = NewGauge()
- runtimeMetrics.NumCgoCall = NewGauge()
- runtimeMetrics.NumGoroutine = NewGauge()
- runtimeMetrics.NumThread = NewGauge()
- runtimeMetrics.ReadMemStats = NewTimer()
-
- r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
- r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys)
- r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC)
- r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC)
- r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees)
- r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc)
- r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle)
- r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse)
- r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects)
- r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased)
- r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys)
- r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC)
- r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups)
- r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs)
- r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse)
- r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys)
- r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse)
- r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
- r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
- r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
- r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction)
- r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
- r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
- r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
- r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys)
- r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys)
- r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
- r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
- r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
- r.Register("runtime.NumThread", runtimeMetrics.NumThread)
- r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
deleted file mode 100644
index e3391f4e8..000000000
--- a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build cgo
-// +build !appengine
-
-package metrics
-
-import "runtime"
-
-func numCgoCall() int64 {
- return runtime.NumCgoCall()
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
deleted file mode 100644
index ca12c05ba..000000000
--- a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build go1.5
-
-package metrics
-
-import "runtime"
-
-func gcCPUFraction(memStats *runtime.MemStats) float64 {
- return memStats.GCCPUFraction
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
deleted file mode 100644
index 616a3b475..000000000
--- a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build !cgo appengine
-
-package metrics
-
-func numCgoCall() int64 {
- return 0
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
deleted file mode 100644
index be96aa6f1..000000000
--- a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build !go1.5
-
-package metrics
-
-import "runtime"
-
-func gcCPUFraction(memStats *runtime.MemStats) float64 {
- return 0
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go
deleted file mode 100644
index fecee5ef6..000000000
--- a/vendor/github.com/rcrowley/go-metrics/sample.go
+++ /dev/null
@@ -1,616 +0,0 @@
-package metrics
-
-import (
- "math"
- "math/rand"
- "sort"
- "sync"
- "time"
-)
-
-const rescaleThreshold = time.Hour
-
-// Samples maintain a statistically-significant selection of values from
-// a stream.
-type Sample interface {
- Clear()
- Count() int64
- Max() int64
- Mean() float64
- Min() int64
- Percentile(float64) float64
- Percentiles([]float64) []float64
- Size() int
- Snapshot() Sample
- StdDev() float64
- Sum() int64
- Update(int64)
- Values() []int64
- Variance() float64
-}
-
-// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
-// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time
-// Decay Model for Streaming Systems".
-//
-// <http://dimacs.rutgers.edu/~graham/pubs/papers/fwddecay.pdf>
-type ExpDecaySample struct {
- alpha float64
- count int64
- mutex sync.Mutex
- reservoirSize int
- t0, t1 time.Time
- values *expDecaySampleHeap
-}
-
-// NewExpDecaySample constructs a new exponentially-decaying sample with the
-// given reservoir size and alpha.
-func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
- if UseNilMetrics {
- return NilSample{}
- }
- s := &ExpDecaySample{
- alpha: alpha,
- reservoirSize: reservoirSize,
- t0: time.Now(),
- values: newExpDecaySampleHeap(reservoirSize),
- }
- s.t1 = s.t0.Add(rescaleThreshold)
- return s
-}
-
-// Clear clears all samples.
-func (s *ExpDecaySample) Clear() {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- s.count = 0
- s.t0 = time.Now()
- s.t1 = s.t0.Add(rescaleThreshold)
- s.values.Clear()
-}
-
-// Count returns the number of samples recorded, which may exceed the
-// reservoir size.
-func (s *ExpDecaySample) Count() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return s.count
-}
-
-// Max returns the maximum value in the sample, which may not be the maximum
-// value ever to be part of the sample.
-func (s *ExpDecaySample) Max() int64 {
- return SampleMax(s.Values())
-}
-
-// Mean returns the mean of the values in the sample.
-func (s *ExpDecaySample) Mean() float64 {
- return SampleMean(s.Values())
-}
-
-// Min returns the minimum value in the sample, which may not be the minimum
-// value ever to be part of the sample.
-func (s *ExpDecaySample) Min() int64 {
- return SampleMin(s.Values())
-}
-
-// Percentile returns an arbitrary percentile of values in the sample.
-func (s *ExpDecaySample) Percentile(p float64) float64 {
- return SamplePercentile(s.Values(), p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values in the
-// sample.
-func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
- return SamplePercentiles(s.Values(), ps)
-}
-
-// Size returns the size of the sample, which is at most the reservoir size.
-func (s *ExpDecaySample) Size() int {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return s.values.Size()
-}
-
-// Snapshot returns a read-only copy of the sample.
-func (s *ExpDecaySample) Snapshot() Sample {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- vals := s.values.Values()
- values := make([]int64, len(vals))
- for i, v := range vals {
- values[i] = v.v
- }
- return &SampleSnapshot{
- count: s.count,
- values: values,
- }
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (s *ExpDecaySample) StdDev() float64 {
- return SampleStdDev(s.Values())
-}
-
-// Sum returns the sum of the values in the sample.
-func (s *ExpDecaySample) Sum() int64 {
- return SampleSum(s.Values())
-}
-
-// Update samples a new value.
-func (s *ExpDecaySample) Update(v int64) {
- s.update(time.Now(), v)
-}
-
-// Values returns a copy of the values in the sample.
-func (s *ExpDecaySample) Values() []int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- vals := s.values.Values()
- values := make([]int64, len(vals))
- for i, v := range vals {
- values[i] = v.v
- }
- return values
-}
-
-// Variance returns the variance of the values in the sample.
-func (s *ExpDecaySample) Variance() float64 {
- return SampleVariance(s.Values())
-}
-
-// update samples a new value at a particular timestamp. This is a method all
-// its own to facilitate testing.
-func (s *ExpDecaySample) update(t time.Time, v int64) {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- s.count++
- if s.values.Size() == s.reservoirSize {
- s.values.Pop()
- }
- s.values.Push(expDecaySample{
- k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
- v: v,
- })
- if t.After(s.t1) {
- values := s.values.Values()
- t0 := s.t0
- s.values.Clear()
- s.t0 = t
- s.t1 = s.t0.Add(rescaleThreshold)
- for _, v := range values {
- v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
- s.values.Push(v)
- }
- }
-}
-
-// NilSample is a no-op Sample.
-type NilSample struct{}
-
-// Clear is a no-op.
-func (NilSample) Clear() {}
-
-// Count is a no-op.
-func (NilSample) Count() int64 { return 0 }
-
-// Max is a no-op.
-func (NilSample) Max() int64 { return 0 }
-
-// Mean is a no-op.
-func (NilSample) Mean() float64 { return 0.0 }
-
-// Min is a no-op.
-func (NilSample) Min() int64 { return 0 }
-
-// Percentile is a no-op.
-func (NilSample) Percentile(p float64) float64 { return 0.0 }
-
-// Percentiles is a no-op.
-func (NilSample) Percentiles(ps []float64) []float64 {
- return make([]float64, len(ps))
-}
-
-// Size is a no-op.
-func (NilSample) Size() int { return 0 }
-
-// Sample is a no-op.
-func (NilSample) Snapshot() Sample { return NilSample{} }
-
-// StdDev is a no-op.
-func (NilSample) StdDev() float64 { return 0.0 }
-
-// Sum is a no-op.
-func (NilSample) Sum() int64 { return 0 }
-
-// Update is a no-op.
-func (NilSample) Update(v int64) {}
-
-// Values is a no-op.
-func (NilSample) Values() []int64 { return []int64{} }
-
-// Variance is a no-op.
-func (NilSample) Variance() float64 { return 0.0 }
-
-// SampleMax returns the maximum value of the slice of int64.
-func SampleMax(values []int64) int64 {
- if 0 == len(values) {
- return 0
- }
- var max int64 = math.MinInt64
- for _, v := range values {
- if max < v {
- max = v
- }
- }
- return max
-}
-
-// SampleMean returns the mean value of the slice of int64.
-func SampleMean(values []int64) float64 {
- if 0 == len(values) {
- return 0.0
- }
- return float64(SampleSum(values)) / float64(len(values))
-}
-
-// SampleMin returns the minimum value of the slice of int64.
-func SampleMin(values []int64) int64 {
- if 0 == len(values) {
- return 0
- }
- var min int64 = math.MaxInt64
- for _, v := range values {
- if min > v {
- min = v
- }
- }
- return min
-}
-
-// SamplePercentiles returns an arbitrary percentile of the slice of int64.
-func SamplePercentile(values int64Slice, p float64) float64 {
- return SamplePercentiles(values, []float64{p})[0]
-}
-
-// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
-// int64.
-func SamplePercentiles(values int64Slice, ps []float64) []float64 {
- scores := make([]float64, len(ps))
- size := len(values)
- if size > 0 {
- sort.Sort(values)
- for i, p := range ps {
- pos := p * float64(size+1)
- if pos < 1.0 {
- scores[i] = float64(values[0])
- } else if pos >= float64(size) {
- scores[i] = float64(values[size-1])
- } else {
- lower := float64(values[int(pos)-1])
- upper := float64(values[int(pos)])
- scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
- }
- }
- }
- return scores
-}
-
-// SampleSnapshot is a read-only copy of another Sample.
-type SampleSnapshot struct {
- count int64
- values []int64
-}
-
-func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot {
- return &SampleSnapshot{
- count: count,
- values: values,
- }
-}
-
-// Clear panics.
-func (*SampleSnapshot) Clear() {
- panic("Clear called on a SampleSnapshot")
-}
-
-// Count returns the count of inputs at the time the snapshot was taken.
-func (s *SampleSnapshot) Count() int64 { return s.count }
-
-// Max returns the maximal value at the time the snapshot was taken.
-func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
-
-// Mean returns the mean value at the time the snapshot was taken.
-func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
-
-// Min returns the minimal value at the time the snapshot was taken.
-func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
-
-// Percentile returns an arbitrary percentile of values at the time the
-// snapshot was taken.
-func (s *SampleSnapshot) Percentile(p float64) float64 {
- return SamplePercentile(s.values, p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values at the time
-// the snapshot was taken.
-func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
- return SamplePercentiles(s.values, ps)
-}
-
-// Size returns the size of the sample at the time the snapshot was taken.
-func (s *SampleSnapshot) Size() int { return len(s.values) }
-
-// Snapshot returns the snapshot.
-func (s *SampleSnapshot) Snapshot() Sample { return s }
-
-// StdDev returns the standard deviation of values at the time the snapshot was
-// taken.
-func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
-
-// Sum returns the sum of values at the time the snapshot was taken.
-func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
-
-// Update panics.
-func (*SampleSnapshot) Update(int64) {
- panic("Update called on a SampleSnapshot")
-}
-
-// Values returns a copy of the values in the sample.
-func (s *SampleSnapshot) Values() []int64 {
- values := make([]int64, len(s.values))
- copy(values, s.values)
- return values
-}
-
-// Variance returns the variance of values at the time the snapshot was taken.
-func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
-
-// SampleStdDev returns the standard deviation of the slice of int64.
-func SampleStdDev(values []int64) float64 {
- return math.Sqrt(SampleVariance(values))
-}
-
-// SampleSum returns the sum of the slice of int64.
-func SampleSum(values []int64) int64 {
- var sum int64
- for _, v := range values {
- sum += v
- }
- return sum
-}
-
-// SampleVariance returns the variance of the slice of int64.
-func SampleVariance(values []int64) float64 {
- if 0 == len(values) {
- return 0.0
- }
- m := SampleMean(values)
- var sum float64
- for _, v := range values {
- d := float64(v) - m
- sum += d * d
- }
- return sum / float64(len(values))
-}
-
-// A uniform sample using Vitter's Algorithm R.
-//
-// <http://www.cs.umd.edu/~samir/498/vitter.pdf>
-type UniformSample struct {
- count int64
- mutex sync.Mutex
- reservoirSize int
- values []int64
-}
-
-// NewUniformSample constructs a new uniform sample with the given reservoir
-// size.
-func NewUniformSample(reservoirSize int) Sample {
- if UseNilMetrics {
- return NilSample{}
- }
- return &UniformSample{
- reservoirSize: reservoirSize,
- values: make([]int64, 0, reservoirSize),
- }
-}
-
-// Clear clears all samples.
-func (s *UniformSample) Clear() {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- s.count = 0
- s.values = make([]int64, 0, s.reservoirSize)
-}
-
-// Count returns the number of samples recorded, which may exceed the
-// reservoir size.
-func (s *UniformSample) Count() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return s.count
-}
-
-// Max returns the maximum value in the sample, which may not be the maximum
-// value ever to be part of the sample.
-func (s *UniformSample) Max() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleMax(s.values)
-}
-
-// Mean returns the mean of the values in the sample.
-func (s *UniformSample) Mean() float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleMean(s.values)
-}
-
-// Min returns the minimum value in the sample, which may not be the minimum
-// value ever to be part of the sample.
-func (s *UniformSample) Min() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleMin(s.values)
-}
-
-// Percentile returns an arbitrary percentile of values in the sample.
-func (s *UniformSample) Percentile(p float64) float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SamplePercentile(s.values, p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values in the
-// sample.
-func (s *UniformSample) Percentiles(ps []float64) []float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SamplePercentiles(s.values, ps)
-}
-
-// Size returns the size of the sample, which is at most the reservoir size.
-func (s *UniformSample) Size() int {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return len(s.values)
-}
-
-// Snapshot returns a read-only copy of the sample.
-func (s *UniformSample) Snapshot() Sample {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- values := make([]int64, len(s.values))
- copy(values, s.values)
- return &SampleSnapshot{
- count: s.count,
- values: values,
- }
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (s *UniformSample) StdDev() float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleStdDev(s.values)
-}
-
-// Sum returns the sum of the values in the sample.
-func (s *UniformSample) Sum() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleSum(s.values)
-}
-
-// Update samples a new value.
-func (s *UniformSample) Update(v int64) {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- s.count++
- if len(s.values) < s.reservoirSize {
- s.values = append(s.values, v)
- } else {
- r := rand.Int63n(s.count)
- if r < int64(len(s.values)) {
- s.values[int(r)] = v
- }
- }
-}
-
-// Values returns a copy of the values in the sample.
-func (s *UniformSample) Values() []int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- values := make([]int64, len(s.values))
- copy(values, s.values)
- return values
-}
-
-// Variance returns the variance of the values in the sample.
-func (s *UniformSample) Variance() float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleVariance(s.values)
-}
-
-// expDecaySample represents an individual sample in a heap.
-type expDecaySample struct {
- k float64
- v int64
-}
-
-func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap {
- return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)}
-}
-
-// expDecaySampleHeap is a min-heap of expDecaySamples.
-// The internal implementation is copied from the standard library's container/heap
-type expDecaySampleHeap struct {
- s []expDecaySample
-}
-
-func (h *expDecaySampleHeap) Clear() {
- h.s = h.s[:0]
-}
-
-func (h *expDecaySampleHeap) Push(s expDecaySample) {
- n := len(h.s)
- h.s = h.s[0 : n+1]
- h.s[n] = s
- h.up(n)
-}
-
-func (h *expDecaySampleHeap) Pop() expDecaySample {
- n := len(h.s) - 1
- h.s[0], h.s[n] = h.s[n], h.s[0]
- h.down(0, n)
-
- n = len(h.s)
- s := h.s[n-1]
- h.s = h.s[0 : n-1]
- return s
-}
-
-func (h *expDecaySampleHeap) Size() int {
- return len(h.s)
-}
-
-func (h *expDecaySampleHeap) Values() []expDecaySample {
- return h.s
-}
-
-func (h *expDecaySampleHeap) up(j int) {
- for {
- i := (j - 1) / 2 // parent
- if i == j || !(h.s[j].k < h.s[i].k) {
- break
- }
- h.s[i], h.s[j] = h.s[j], h.s[i]
- j = i
- }
-}
-
-func (h *expDecaySampleHeap) down(i, n int) {
- for {
- j1 := 2*i + 1
- if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
- break
- }
- j := j1 // left child
- if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) {
- j = j2 // = 2*i + 2 // right child
- }
- if !(h.s[j].k < h.s[i].k) {
- break
- }
- h.s[i], h.s[j] = h.s[j], h.s[i]
- i = j
- }
-}
-
-type int64Slice []int64
-
-func (p int64Slice) Len() int { return len(p) }
-func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/vendor/github.com/rcrowley/go-metrics/syslog.go
deleted file mode 100644
index 693f19085..000000000
--- a/vendor/github.com/rcrowley/go-metrics/syslog.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// +build !windows
-
-package metrics
-
-import (
- "fmt"
- "log/syslog"
- "time"
-)
-
-// Output each metric in the given registry to syslog periodically using
-// the given syslogger.
-func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
- for _ = range time.Tick(d) {
- r.Each(func(name string, i interface{}) {
- switch metric := i.(type) {
- case Counter:
- w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
- case Gauge:
- w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
- case GaugeFloat64:
- w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value()))
- case Healthcheck:
- metric.Check()
- w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))
- case Histogram:
- h := metric.Snapshot()
- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- w.Info(fmt.Sprintf(
- "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f",
- name,
- h.Count(),
- h.Min(),
- h.Max(),
- h.Mean(),
- h.StdDev(),
- ps[0],
- ps[1],
- ps[2],
- ps[3],
- ps[4],
- ))
- case Meter:
- m := metric.Snapshot()
- w.Info(fmt.Sprintf(
- "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f",
- name,
- m.Count(),
- m.Rate1(),
- m.Rate5(),
- m.Rate15(),
- m.RateMean(),
- ))
- case Timer:
- t := metric.Snapshot()
- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- w.Info(fmt.Sprintf(
- "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f",
- name,
- t.Count(),
- t.Min(),
- t.Max(),
- t.Mean(),
- t.StdDev(),
- ps[0],
- ps[1],
- ps[2],
- ps[3],
- ps[4],
- t.Rate1(),
- t.Rate5(),
- t.Rate15(),
- t.RateMean(),
- ))
- }
- })
- }
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go
deleted file mode 100644
index 17db8f8d2..000000000
--- a/vendor/github.com/rcrowley/go-metrics/timer.go
+++ /dev/null
@@ -1,311 +0,0 @@
-package metrics
-
-import (
- "sync"
- "time"
-)
-
-// Timers capture the duration and rate of events.
-type Timer interface {
- Count() int64
- Max() int64
- Mean() float64
- Min() int64
- Percentile(float64) float64
- Percentiles([]float64) []float64
- Rate1() float64
- Rate5() float64
- Rate15() float64
- RateMean() float64
- Snapshot() Timer
- StdDev() float64
- Sum() int64
- Time(func())
- Update(time.Duration)
- UpdateSince(time.Time)
- Variance() float64
-}
-
-// GetOrRegisterTimer returns an existing Timer or constructs and registers a
-// new StandardTimer.
-func GetOrRegisterTimer(name string, r Registry) Timer {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewTimer).(Timer)
-}
-
-// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
-func NewCustomTimer(h Histogram, m Meter) Timer {
- if UseNilMetrics {
- return NilTimer{}
- }
- return &StandardTimer{
- histogram: h,
- meter: m,
- }
-}
-
-// NewRegisteredTimer constructs and registers a new StandardTimer.
-func NewRegisteredTimer(name string, r Registry) Timer {
- c := NewTimer()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// NewTimer constructs a new StandardTimer using an exponentially-decaying
-// sample with the same reservoir size and alpha as UNIX load averages.
-func NewTimer() Timer {
- if UseNilMetrics {
- return NilTimer{}
- }
- return &StandardTimer{
- histogram: NewHistogram(NewExpDecaySample(1028, 0.015)),
- meter: NewMeter(),
- }
-}
-
-// NilTimer is a no-op Timer.
-type NilTimer struct {
- h Histogram
- m Meter
-}
-
-// Count is a no-op.
-func (NilTimer) Count() int64 { return 0 }
-
-// Max is a no-op.
-func (NilTimer) Max() int64 { return 0 }
-
-// Mean is a no-op.
-func (NilTimer) Mean() float64 { return 0.0 }
-
-// Min is a no-op.
-func (NilTimer) Min() int64 { return 0 }
-
-// Percentile is a no-op.
-func (NilTimer) Percentile(p float64) float64 { return 0.0 }
-
-// Percentiles is a no-op.
-func (NilTimer) Percentiles(ps []float64) []float64 {
- return make([]float64, len(ps))
-}
-
-// Rate1 is a no-op.
-func (NilTimer) Rate1() float64 { return 0.0 }
-
-// Rate5 is a no-op.
-func (NilTimer) Rate5() float64 { return 0.0 }
-
-// Rate15 is a no-op.
-func (NilTimer) Rate15() float64 { return 0.0 }
-
-// RateMean is a no-op.
-func (NilTimer) RateMean() float64 { return 0.0 }
-
-// Snapshot is a no-op.
-func (NilTimer) Snapshot() Timer { return NilTimer{} }
-
-// StdDev is a no-op.
-func (NilTimer) StdDev() float64 { return 0.0 }
-
-// Sum is a no-op.
-func (NilTimer) Sum() int64 { return 0 }
-
-// Time is a no-op.
-func (NilTimer) Time(func()) {}
-
-// Update is a no-op.
-func (NilTimer) Update(time.Duration) {}
-
-// UpdateSince is a no-op.
-func (NilTimer) UpdateSince(time.Time) {}
-
-// Variance is a no-op.
-func (NilTimer) Variance() float64 { return 0.0 }
-
-// StandardTimer is the standard implementation of a Timer and uses a Histogram
-// and Meter.
-type StandardTimer struct {
- histogram Histogram
- meter Meter
- mutex sync.Mutex
-}
-
-// Count returns the number of events recorded.
-func (t *StandardTimer) Count() int64 {
- return t.histogram.Count()
-}
-
-// Max returns the maximum value in the sample.
-func (t *StandardTimer) Max() int64 {
- return t.histogram.Max()
-}
-
-// Mean returns the mean of the values in the sample.
-func (t *StandardTimer) Mean() float64 {
- return t.histogram.Mean()
-}
-
-// Min returns the minimum value in the sample.
-func (t *StandardTimer) Min() int64 {
- return t.histogram.Min()
-}
-
-// Percentile returns an arbitrary percentile of the values in the sample.
-func (t *StandardTimer) Percentile(p float64) float64 {
- return t.histogram.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of the values in the
-// sample.
-func (t *StandardTimer) Percentiles(ps []float64) []float64 {
- return t.histogram.Percentiles(ps)
-}
-
-// Rate1 returns the one-minute moving average rate of events per second.
-func (t *StandardTimer) Rate1() float64 {
- return t.meter.Rate1()
-}
-
-// Rate5 returns the five-minute moving average rate of events per second.
-func (t *StandardTimer) Rate5() float64 {
- return t.meter.Rate5()
-}
-
-// Rate15 returns the fifteen-minute moving average rate of events per second.
-func (t *StandardTimer) Rate15() float64 {
- return t.meter.Rate15()
-}
-
-// RateMean returns the meter's mean rate of events per second.
-func (t *StandardTimer) RateMean() float64 {
- return t.meter.RateMean()
-}
-
-// Snapshot returns a read-only copy of the timer.
-func (t *StandardTimer) Snapshot() Timer {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- return &TimerSnapshot{
- histogram: t.histogram.Snapshot().(*HistogramSnapshot),
- meter: t.meter.Snapshot().(*MeterSnapshot),
- }
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (t *StandardTimer) StdDev() float64 {
- return t.histogram.StdDev()
-}
-
-// Sum returns the sum in the sample.
-func (t *StandardTimer) Sum() int64 {
- return t.histogram.Sum()
-}
-
-// Record the duration of the execution of the given function.
-func (t *StandardTimer) Time(f func()) {
- ts := time.Now()
- f()
- t.Update(time.Since(ts))
-}
-
-// Record the duration of an event.
-func (t *StandardTimer) Update(d time.Duration) {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- t.histogram.Update(int64(d))
- t.meter.Mark(1)
-}
-
-// Record the duration of an event that started at a time and ends now.
-func (t *StandardTimer) UpdateSince(ts time.Time) {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- t.histogram.Update(int64(time.Since(ts)))
- t.meter.Mark(1)
-}
-
-// Variance returns the variance of the values in the sample.
-func (t *StandardTimer) Variance() float64 {
- return t.histogram.Variance()
-}
-
-// TimerSnapshot is a read-only copy of another Timer.
-type TimerSnapshot struct {
- histogram *HistogramSnapshot
- meter *MeterSnapshot
-}
-
-// Count returns the number of events recorded at the time the snapshot was
-// taken.
-func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
-
-// Max returns the maximum value at the time the snapshot was taken.
-func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
-
-// Mean returns the mean value at the time the snapshot was taken.
-func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
-
-// Min returns the minimum value at the time the snapshot was taken.
-func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
-
-// Percentile returns an arbitrary percentile of sampled values at the time the
-// snapshot was taken.
-func (t *TimerSnapshot) Percentile(p float64) float64 {
- return t.histogram.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of sampled values at
-// the time the snapshot was taken.
-func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
- return t.histogram.Percentiles(ps)
-}
-
-// Rate1 returns the one-minute moving average rate of events per second at the
-// time the snapshot was taken.
-func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
-
-// Rate5 returns the five-minute moving average rate of events per second at
-// the time the snapshot was taken.
-func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
-
-// Rate15 returns the fifteen-minute moving average rate of events per second
-// at the time the snapshot was taken.
-func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
-
-// RateMean returns the meter's mean rate of events per second at the time the
-// snapshot was taken.
-func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
-
-// Snapshot returns the snapshot.
-func (t *TimerSnapshot) Snapshot() Timer { return t }
-
-// StdDev returns the standard deviation of the values at the time the snapshot
-// was taken.
-func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
-
-// Sum returns the sum at the time the snapshot was taken.
-func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
-
-// Time panics.
-func (*TimerSnapshot) Time(func()) {
- panic("Time called on a TimerSnapshot")
-}
-
-// Update panics.
-func (*TimerSnapshot) Update(time.Duration) {
- panic("Update called on a TimerSnapshot")
-}
-
-// UpdateSince panics.
-func (*TimerSnapshot) UpdateSince(time.Time) {
- panic("UpdateSince called on a TimerSnapshot")
-}
-
-// Variance returns the variance of the values at the time the snapshot was
-// taken.
-func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/vendor/github.com/rcrowley/go-metrics/validate.sh
deleted file mode 100755
index f6499982e..000000000
--- a/vendor/github.com/rcrowley/go-metrics/validate.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-set -e
-
-# check there are no formatting issues
-GOFMT_LINES=`gofmt -l . | wc -l | xargs`
-test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues"
-
-# run the tests for the root package
-go test .
diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/vendor/github.com/rcrowley/go-metrics/writer.go
deleted file mode 100644
index 091e971d2..000000000
--- a/vendor/github.com/rcrowley/go-metrics/writer.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package metrics
-
-import (
- "fmt"
- "io"
- "sort"
- "time"
-)
-
-// Write sorts writes each metric in the given registry periodically to the
-// given io.Writer.
-func Write(r Registry, d time.Duration, w io.Writer) {
- for _ = range time.Tick(d) {
- WriteOnce(r, w)
- }
-}
-
-// WriteOnce sorts and writes metrics in the given registry to the given
-// io.Writer.
-func WriteOnce(r Registry, w io.Writer) {
- var namedMetrics namedMetricSlice
- r.Each(func(name string, i interface{}) {
- namedMetrics = append(namedMetrics, namedMetric{name, i})
- })
-
- sort.Sort(namedMetrics)
- for _, namedMetric := range namedMetrics {
- switch metric := namedMetric.m.(type) {
- case Counter:
- fmt.Fprintf(w, "counter %s\n", namedMetric.name)
- fmt.Fprintf(w, " count: %9d\n", metric.Count())
- case Gauge:
- fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
- fmt.Fprintf(w, " value: %9d\n", metric.Value())
- case GaugeFloat64:
- fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
- fmt.Fprintf(w, " value: %f\n", metric.Value())
- case Healthcheck:
- metric.Check()
- fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name)
- fmt.Fprintf(w, " error: %v\n", metric.Error())
- case Histogram:
- h := metric.Snapshot()
- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- fmt.Fprintf(w, "histogram %s\n", namedMetric.name)
- fmt.Fprintf(w, " count: %9d\n", h.Count())
- fmt.Fprintf(w, " min: %9d\n", h.Min())
- fmt.Fprintf(w, " max: %9d\n", h.Max())
- fmt.Fprintf(w, " mean: %12.2f\n", h.Mean())
- fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev())
- fmt.Fprintf(w, " median: %12.2f\n", ps[0])
- fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
- fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
- fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
- fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
- case Meter:
- m := metric.Snapshot()
- fmt.Fprintf(w, "meter %s\n", namedMetric.name)
- fmt.Fprintf(w, " count: %9d\n", m.Count())
- fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1())
- fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5())
- fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15())
- fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean())
- case Timer:
- t := metric.Snapshot()
- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- fmt.Fprintf(w, "timer %s\n", namedMetric.name)
- fmt.Fprintf(w, " count: %9d\n", t.Count())
- fmt.Fprintf(w, " min: %9d\n", t.Min())
- fmt.Fprintf(w, " max: %9d\n", t.Max())
- fmt.Fprintf(w, " mean: %12.2f\n", t.Mean())
- fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev())
- fmt.Fprintf(w, " median: %12.2f\n", ps[0])
- fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
- fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
- fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
- fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
- fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1())
- fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5())
- fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15())
- fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean())
- }
- }
-}
-
-type namedMetric struct {
- name string
- m interface{}
-}
-
-// namedMetricSlice is a slice of namedMetrics that implements sort.Interface.
-type namedMetricSlice []namedMetric
-
-func (nms namedMetricSlice) Len() int { return len(nms) }
-
-func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] }
-
-func (nms namedMetricSlice) Less(i, j int) bool {
- return nms[i].name < nms[j].name
-}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index e938ce5e6..134158995 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -207,6 +207,30 @@
"revisionTime": "2016-12-24T10:41:01Z"
},
{
+ "checksumSHA1": "6tNwbL5tUS0dxYzADKVZtI2d/lE=",
+ "path": "github.com/influxdata/influxdb/client",
+ "revision": "a55dd0f50edd14c9c798d3564189eb4f53914309",
+ "revisionTime": "2017-10-09T17:24:46Z"
+ },
+ {
+ "checksumSHA1": "O4XpbSNeUhSIMD2FWtQximJiFIs=",
+ "path": "github.com/influxdata/influxdb/client/v2",
+ "revision": "b36b9f109f2da91c8941679caf5356e08eee0b2b",
+ "revisionTime": "2018-01-17T01:42:09Z"
+ },
+ {
+ "checksumSHA1": "cfumoC9gHEUROd+fA8qK3WLFAZQ=",
+ "path": "github.com/influxdata/influxdb/models",
+ "revision": "b36b9f109f2da91c8941679caf5356e08eee0b2b",
+ "revisionTime": "2018-01-17T01:42:09Z"
+ },
+ {
+ "checksumSHA1": "Z0Bb5PWa5WL/j5Dm2KJCLGn1l7U=",
+ "path": "github.com/influxdata/influxdb/pkg/escape",
+ "revision": "01288bdb0883a01cac999326bd34421b29acaec8",
+ "revisionTime": "2018-02-21T22:33:40Z"
+ },
+ {
"checksumSHA1": "vTGKMIfiMwz43y5bsgx9PrL+AVw=",
"path": "github.com/jackpal/go-nat-pmp",
"revision": "1fa385a6f45828c83361136b45b1a21a12139493",
@@ -310,18 +334,6 @@
"revisionTime": "2017-08-14T17:01:13Z"
},
{
- "checksumSHA1": "KAzbLjI9MzW2tjfcAsK75lVRp6I=",
- "path": "github.com/rcrowley/go-metrics",
- "revision": "1f30fe9094a513ce4c700b9a54458bbb0c96996c",
- "revisionTime": "2016-11-28T21:05:44Z"
- },
- {
- "checksumSHA1": "q/d9nXRQYKEJ/EWn+5y6jL8rPGs=",
- "path": "github.com/rcrowley/go-metrics/exp",
- "revision": "1f30fe9094a513ce4c700b9a54458bbb0c96996c",
- "revisionTime": "2016-11-28T21:05:44Z"
- },
- {
"checksumSHA1": "28UVHMmHx0iqO0XiJsjx+fwILyI=",
"path": "github.com/rjeczalik/notify",
"revision": "c31e5f2cb22b3e4ef3f882f413847669bf2652b9",