aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com/influxdata
diff options
context:
space:
mode:
authorAnton Evangelatov <anton.evangelatov@gmail.com>2018-02-23 17:56:08 +0800
committerPéter Szilágyi <peterke@gmail.com>2018-02-23 17:56:08 +0800
commitae9f97221a96a86e4343a5c3cc4b1db44627a2f3 (patch)
tree0154be72d0f2e1f032d129b9433d1bf3939cd8f0 /vendor/github.com/influxdata
parent7f74bdf8dded0e1ac3c01e043c2ed89d78f308cf (diff)
downloadgo-tangerine-ae9f97221a96a86e4343a5c3cc4b1db44627a2f3.tar
go-tangerine-ae9f97221a96a86e4343a5c3cc4b1db44627a2f3.tar.gz
go-tangerine-ae9f97221a96a86e4343a5c3cc4b1db44627a2f3.tar.bz2
go-tangerine-ae9f97221a96a86e4343a5c3cc4b1db44627a2f3.tar.lz
go-tangerine-ae9f97221a96a86e4343a5c3cc4b1db44627a2f3.tar.xz
go-tangerine-ae9f97221a96a86e4343a5c3cc4b1db44627a2f3.tar.zst
go-tangerine-ae9f97221a96a86e4343a5c3cc4b1db44627a2f3.zip
metrics: pull library and introduce ResettingTimer and InfluxDB reporter (#15910)
* go-metrics: fork library and introduce ResettingTimer and InfluxDB reporter. * vendor: change nonsense/go-metrics to ethersphere/go-metrics * go-metrics: add tests. move ResettingTimer logic from reporter to type. * all, metrics: pull in metrics package in go-ethereum * metrics/test: make sure metrics are enabled for tests * metrics: apply gosimple rules * metrics/exp, internal/debug: init expvar endpoint when starting pprof server * internal/debug: tiny comment formatting fix
Diffstat (limited to 'vendor/github.com/influxdata')
-rw-r--r--vendor/github.com/influxdata/influxdb/LICENSE20
-rw-r--r--vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md62
-rw-r--r--vendor/github.com/influxdata/influxdb/client/README.md306
-rw-r--r--vendor/github.com/influxdata/influxdb/client/influxdb.go840
-rw-r--r--vendor/github.com/influxdata/influxdb/client/v2/client.go635
-rw-r--r--vendor/github.com/influxdata/influxdb/client/v2/udp.go112
-rw-r--r--vendor/github.com/influxdata/influxdb/models/consistency.go48
-rw-r--r--vendor/github.com/influxdata/influxdb/models/inline_fnv.go32
-rw-r--r--vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go44
-rw-r--r--vendor/github.com/influxdata/influxdb/models/points.go2337
-rw-r--r--vendor/github.com/influxdata/influxdb/models/rows.go62
-rw-r--r--vendor/github.com/influxdata/influxdb/models/statistic.go42
-rw-r--r--vendor/github.com/influxdata/influxdb/models/time.go74
-rw-r--r--vendor/github.com/influxdata/influxdb/models/uint_support.go7
-rw-r--r--vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go115
-rw-r--r--vendor/github.com/influxdata/influxdb/pkg/escape/strings.go21
16 files changed, 4757 insertions, 0 deletions
diff --git a/vendor/github.com/influxdata/influxdb/LICENSE b/vendor/github.com/influxdata/influxdb/LICENSE
new file mode 100644
index 000000000..63cef79ba
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013-2016 Errplane Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md
new file mode 100644
index 000000000..ea6fc69f3
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md
@@ -0,0 +1,62 @@
+- # List
+- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE)
+- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
+- github.com/BurntSushi/toml [MIT LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING)
+- github.com/RoaringBitmap/roaring [APACHE LICENSE](https://github.com/RoaringBitmap/roaring/blob/master/LICENSE)
+- github.com/beorn7/perks [MIT LICENSE](https://github.com/beorn7/perks/blob/master/LICENSE)
+- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license)
+- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
+- github.com/cespare/xxhash [MIT LICENSE](https://github.com/cespare/xxhash/blob/master/LICENSE.txt)
+- github.com/clarkduvall/hyperloglog [MIT LICENSE](https://github.com/clarkduvall/hyperloglog/blob/master/LICENSE)
+- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE)
+- github.com/dgrijalva/jwt-go [MIT LICENSE](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE)
+- github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE)
+- github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE)
+- github.com/glycerine/go-unsnap-stream [MIT LICENSE](https://github.com/glycerine/go-unsnap-stream/blob/master/LICENSE)
+- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
+- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
+- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
+- github.com/google/go-cmp [BSD LICENSE](https://github.com/google/go-cmp/blob/master/LICENSE)
+- github.com/influxdata/influxql [MIT LICENSE](https://github.com/influxdata/influxql/blob/master/LICENSE)
+- github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt)
+- github.com/influxdata/yamux [MOZILLA PUBLIC LICENSE](https://github.com/influxdata/yamux/blob/master/LICENSE)
+- github.com/influxdata/yarpc [MIT LICENSE](https://github.com/influxdata/yarpc/blob/master/LICENSE)
+- github.com/jsternberg/zap-logfmt [MIT LICENSE](https://github.com/jsternberg/zap-logfmt/blob/master/LICENSE)
+- github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE)
+- github.com/mattn/go-isatty [MIT LICENSE](https://github.com/mattn/go-isatty/blob/master/LICENSE)
+- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
+- github.com/opentracing/opentracing-go [MIT LICENSE](https://github.com/opentracing/opentracing-go/blob/master/LICENSE)
+- github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE)
+- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING)
+- github.com/philhofer/fwd [MIT LICENSE](https://github.com/philhofer/fwd/blob/master/LICENSE.md)
+- github.com/prometheus/client_golang [MIT LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
+- github.com/prometheus/client_model [MIT LICENSE](https://github.com/prometheus/client_model/blob/master/LICENSE)
+- github.com/prometheus/common [APACHE LICENSE](https://github.com/prometheus/common/blob/master/LICENSE)
+- github.com/prometheus/procfs [APACHE LICENSE](https://github.com/prometheus/procfs/blob/master/LICENSE)
+- github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE)
+- github.com/retailnext/hllpp [BSD LICENSE](https://github.com/retailnext/hllpp/blob/master/LICENSE)
+- github.com/tinylib/msgp [MIT LICENSE](https://github.com/tinylib/msgp/blob/master/LICENSE)
+- go.uber.org/atomic [MIT LICENSE](https://github.com/uber-go/atomic/blob/master/LICENSE.txt)
+- go.uber.org/multierr [MIT LICENSE](https://github.com/uber-go/multierr/blob/master/LICENSE.txt)
+- go.uber.org/zap [MIT LICENSE](https://github.com/uber-go/zap/blob/master/LICENSE.txt)
+- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
+- golang.org/x/net [BSD LICENSE](https://github.com/golang/net/blob/master/LICENSE)
+- golang.org/x/sys [BSD LICENSE](https://github.com/golang/sys/blob/master/LICENSE)
+- golang.org/x/text [BSD LICENSE](https://github.com/golang/text/blob/master/LICENSE)
+- golang.org/x/time [BSD LICENSE](https://github.com/golang/time/blob/master/LICENSE)
+- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt)
+- github.com/xlab/treeprint [MIT LICENSE](https://github.com/xlab/treeprint/blob/master/LICENSE)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/influxdata/influxdb/client/README.md b/vendor/github.com/influxdata/influxdb/client/README.md
new file mode 100644
index 000000000..773a11122
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/client/README.md
@@ -0,0 +1,306 @@
+# InfluxDB Client
+
+[![GoDoc](https://godoc.org/github.com/influxdata/influxdb?status.svg)](http://godoc.org/github.com/influxdata/influxdb/client/v2)
+
+## Description
+
+**NOTE:** The Go client library now has a "v2" version, with the old version
+being deprecated. The new version can be imported at
+`import "github.com/influxdata/influxdb/client/v2"`. It is not backwards-compatible.
+
+A Go client library written and maintained by the **InfluxDB** team.
+This package provides convenience functions to read and write time series data.
+It uses the HTTP protocol to communicate with your **InfluxDB** cluster.
+
+
+## Getting Started
+
+### Connecting To Your Database
+
+Connecting to an **InfluxDB** database is straightforward. You will need a host
+name, a port and the cluster user credentials if applicable. The default port is
+8086. You can customize these settings to your specific installation via the
+**InfluxDB** configuration file.
+
+Though not necessary for experimentation, you may want to create a new user
+and authenticate the connection to your database.
+
+For more information please check out the
+[Admin Docs](https://docs.influxdata.com/influxdb/latest/administration/).
+
+For the impatient, you can create a new admin user _bubba_ by firing off the
+[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).
+
+```shell
+influx
+> create user bubba with password 'bumblebeetuna'
+> grant all privileges to bubba
+```
+
+And now for good measure set the credentials in you shell environment.
+In the example below we will use $INFLUX_USER and $INFLUX_PWD
+
+Now with the administrivia out of the way, let's connect to our database.
+
+NOTE: If you've opted out of creating a user, you can omit Username and Password in
+the configuration below.
+
+```go
+package main
+
+import (
+ "log"
+ "time"
+
+ "github.com/influxdata/influxdb/client/v2"
+)
+
+const (
+ MyDB = "square_holes"
+ username = "bubba"
+ password = "bumblebeetuna"
+)
+
+
+func main() {
+ // Create a new HTTPClient
+ c, err := client.NewHTTPClient(client.HTTPConfig{
+ Addr: "http://localhost:8086",
+ Username: username,
+ Password: password,
+ })
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Create a new point batch
+ bp, err := client.NewBatchPoints(client.BatchPointsConfig{
+ Database: MyDB,
+ Precision: "s",
+ })
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Create a point and add to batch
+ tags := map[string]string{"cpu": "cpu-total"}
+ fields := map[string]interface{}{
+ "idle": 10.1,
+ "system": 53.3,
+ "user": 46.6,
+ }
+
+ pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
+ if err != nil {
+ log.Fatal(err)
+ }
+ bp.AddPoint(pt)
+
+ // Write the batch
+ if err := c.Write(bp); err != nil {
+ log.Fatal(err)
+ }
+}
+
+```
+
+### Inserting Data
+
+Time series data aka *points* are written to the database using batch inserts.
+The mechanism is to create one or more points and then create a batch aka
+*batch points* and write these to a given database and series. A series is a
+combination of a measurement (time/values) and a set of tags.
+
+In this sample we will create a batch of a 1,000 points. Each point has a time and
+a single value as well as 2 tags indicating a shape and color. We write these points
+to a database called _square_holes_ using a measurement named _shapes_.
+
+NOTE: You can specify a RetentionPolicy as part of the batch points. If not
+provided InfluxDB will use the database _default_ retention policy.
+
+```go
+
+func writePoints(clnt client.Client) {
+ sampleSize := 1000
+
+ bp, err := client.NewBatchPoints(client.BatchPointsConfig{
+ Database: "systemstats",
+ Precision: "us",
+ })
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ rand.Seed(time.Now().UnixNano())
+ for i := 0; i < sampleSize; i++ {
+ regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"}
+ tags := map[string]string{
+ "cpu": "cpu-total",
+ "host": fmt.Sprintf("host%d", rand.Intn(1000)),
+ "region": regions[rand.Intn(len(regions))],
+ }
+
+ idle := rand.Float64() * 100.0
+ fields := map[string]interface{}{
+ "idle": idle,
+ "busy": 100.0 - idle,
+ }
+
+ pt, err := client.NewPoint(
+ "cpu_usage",
+ tags,
+ fields,
+ time.Now(),
+ )
+ if err != nil {
+ log.Fatal(err)
+ }
+ bp.AddPoint(pt)
+ }
+
+ if err := clnt.Write(bp); err != nil {
+ log.Fatal(err)
+ }
+}
+```
+
+#### Uint64 Support
+
+The `uint64` data type is supported if your server is version `1.4.0` or
+greater. To write a data point as an unsigned integer, you must insert
+the point as `uint64`. You cannot use `uint` or any of the other
+derivatives because previous versions of the client have supported
+writing those types as an integer.
+
+### Querying Data
+
+One nice advantage of using **InfluxDB** the ability to query your data using familiar
+SQL constructs. In this example we can create a convenience function to query the database
+as follows:
+
+```go
+// queryDB convenience function to query the database
+func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) {
+ q := client.Query{
+ Command: cmd,
+ Database: MyDB,
+ }
+ if response, err := clnt.Query(q); err == nil {
+ if response.Error() != nil {
+ return res, response.Error()
+ }
+ res = response.Results
+ } else {
+ return res, err
+ }
+ return res, nil
+}
+```
+
+#### Creating a Database
+
+```go
+_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB))
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+#### Count Records
+
+```go
+q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement)
+res, err := queryDB(clnt, q)
+if err != nil {
+ log.Fatal(err)
+}
+count := res[0].Series[0].Values[0][1]
+log.Printf("Found a total of %v records\n", count)
+```
+
+#### Find the last 10 _shapes_ records
+
+```go
+q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 10)
+res, err = queryDB(clnt, q)
+if err != nil {
+ log.Fatal(err)
+}
+
+for i, row := range res[0].Series[0].Values {
+ t, err := time.Parse(time.RFC3339, row[0].(string))
+ if err != nil {
+ log.Fatal(err)
+ }
+ val := row[1].(string)
+ log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val)
+}
+```
+
+### Using the UDP Client
+
+The **InfluxDB** client also supports writing over UDP.
+
+```go
+func WriteUDP() {
+ // Make client
+ c, err := client.NewUDPClient("localhost:8089")
+ if err != nil {
+ panic(err.Error())
+ }
+
+ // Create a new point batch
+ bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
+ Precision: "s",
+ })
+
+ // Create a point and add to batch
+ tags := map[string]string{"cpu": "cpu-total"}
+ fields := map[string]interface{}{
+ "idle": 10.1,
+ "system": 53.3,
+ "user": 46.6,
+ }
+ pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
+ if err != nil {
+ panic(err.Error())
+ }
+ bp.AddPoint(pt)
+
+ // Write the batch
+ c.Write(bp)
+}
+```
+
+### Point Splitting
+
+The UDP client now supports splitting single points that exceed the configured
+payload size. The logic for processing each point is listed here, starting with
+an empty payload.
+
+1. If adding the point to the current (non-empty) payload would exceed the
+ configured size, send the current payload. Otherwise, add it to the current
+ payload.
+1. If the point is smaller than the configured size, add it to the payload.
+1. If the point has no timestamp, just try to send the entire point as a single
+ UDP payload, and process the next point.
+1. Since the point has a timestamp, re-use the existing measurement name,
+ tagset, and timestamp and create multiple new points by splitting up the
+ fields. The per-point length will be kept close to the configured size,
+ staying under it if possible. This does mean that one large field, maybe a
+ long string, could be sent as a larger-than-configured payload.
+
+The above logic attempts to respect configured payload sizes, but not sacrifice
+any data integrity. Points without a timestamp can't be split, as that may
+cause fields to have differing timestamps when processed by the server.
+
+## Go Docs
+
+Please refer to
+[http://godoc.org/github.com/influxdata/influxdb/client/v2](http://godoc.org/github.com/influxdata/influxdb/client/v2)
+for documentation.
+
+## See Also
+
+You can also examine how the client library is used by the
+[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).
diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb.go b/vendor/github.com/influxdata/influxdb/client/influxdb.go
new file mode 100644
index 000000000..98d362d50
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/client/influxdb.go
@@ -0,0 +1,840 @@
+// Package client implements a now-deprecated client for InfluxDB;
+// use github.com/influxdata/influxdb/client/v2 instead.
+package client // import "github.com/influxdata/influxdb/client"
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/influxdata/influxdb/models"
+)
+
+const (
+ // DefaultHost is the default host used to connect to an InfluxDB instance
+ DefaultHost = "localhost"
+
+ // DefaultPort is the default port used to connect to an InfluxDB instance
+ DefaultPort = 8086
+
+ // DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance
+ DefaultTimeout = 0
+)
+
+// Query is used to send a command to the server. Both Command and Database are required.
+type Query struct {
+ Command string
+ Database string
+
+ // Chunked tells the server to send back chunked responses. This places
+ // less load on the server by sending back chunks of the response rather
+ // than waiting for the entire response all at once.
+ Chunked bool
+
+ // ChunkSize sets the maximum number of rows that will be returned per
+ // chunk. Chunks are either divided based on their series or if they hit
+ // the chunk size limit.
+ //
+ // Chunked must be set to true for this option to be used.
+ ChunkSize int
+}
+
+// ParseConnectionString will parse a string to create a valid connection URL
+func ParseConnectionString(path string, ssl bool) (url.URL, error) {
+ var host string
+ var port int
+
+ h, p, err := net.SplitHostPort(path)
+ if err != nil {
+ if path == "" {
+ host = DefaultHost
+ } else {
+ host = path
+ }
+ // If they didn't specify a port, always use the default port
+ port = DefaultPort
+ } else {
+ host = h
+ port, err = strconv.Atoi(p)
+ if err != nil {
+ return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err)
+ }
+ }
+
+ u := url.URL{
+ Scheme: "http",
+ }
+ if ssl {
+ u.Scheme = "https"
+ }
+
+ u.Host = net.JoinHostPort(host, strconv.Itoa(port))
+
+ return u, nil
+}
+
+// Config is used to specify what server to connect to.
+// URL: The URL of the server connecting to.
+// Username/Password are optional. They will be passed via basic auth if provided.
+// UserAgent: If not provided, will default "InfluxDBClient",
+// Timeout: If not provided, will default to 0 (no timeout)
+type Config struct {
+ URL url.URL
+ UnixSocket string
+ Username string
+ Password string
+ UserAgent string
+ Timeout time.Duration
+ Precision string
+ WriteConsistency string
+ UnsafeSsl bool
+}
+
+// NewConfig will create a config to be used in connecting to the client
+func NewConfig() Config {
+ return Config{
+ Timeout: DefaultTimeout,
+ }
+}
+
+// Client is used to make calls to the server.
+type Client struct {
+ url url.URL
+ unixSocket string
+ username string
+ password string
+ httpClient *http.Client
+ userAgent string
+ precision string
+}
+
+const (
+ // ConsistencyOne requires at least one data node acknowledged a write.
+ ConsistencyOne = "one"
+
+ // ConsistencyAll requires all data nodes to acknowledge a write.
+ ConsistencyAll = "all"
+
+ // ConsistencyQuorum requires a quorum of data nodes to acknowledge a write.
+ ConsistencyQuorum = "quorum"
+
+ // ConsistencyAny allows for hinted hand off, potentially no write happened yet.
+ ConsistencyAny = "any"
+)
+
+// NewClient will instantiate and return a connected client to issue commands to the server.
+func NewClient(c Config) (*Client, error) {
+ tlsConfig := &tls.Config{
+ InsecureSkipVerify: c.UnsafeSsl,
+ }
+
+ tr := &http.Transport{
+ TLSClientConfig: tlsConfig,
+ }
+
+ if c.UnixSocket != "" {
+ // No need for compression in local communications.
+ tr.DisableCompression = true
+
+ tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
+ return net.Dial("unix", c.UnixSocket)
+ }
+ }
+
+ client := Client{
+ url: c.URL,
+ unixSocket: c.UnixSocket,
+ username: c.Username,
+ password: c.Password,
+ httpClient: &http.Client{Timeout: c.Timeout, Transport: tr},
+ userAgent: c.UserAgent,
+ precision: c.Precision,
+ }
+ if client.userAgent == "" {
+ client.userAgent = "InfluxDBClient"
+ }
+ return &client, nil
+}
+
+// SetAuth will update the username and passwords
+func (c *Client) SetAuth(u, p string) {
+ c.username = u
+ c.password = p
+}
+
+// SetPrecision will update the precision
+func (c *Client) SetPrecision(precision string) {
+ c.precision = precision
+}
+
+// Query sends a command to the server and returns the Response
+func (c *Client) Query(q Query) (*Response, error) {
+ return c.QueryContext(context.Background(), q)
+}
+
+// QueryContext sends a command to the server and returns the Response
+// It uses a context that can be cancelled by the command line client
+func (c *Client) QueryContext(ctx context.Context, q Query) (*Response, error) {
+ u := c.url
+
+ u.Path = "query"
+ values := u.Query()
+ values.Set("q", q.Command)
+ values.Set("db", q.Database)
+ if q.Chunked {
+ values.Set("chunked", "true")
+ if q.ChunkSize > 0 {
+ values.Set("chunk_size", strconv.Itoa(q.ChunkSize))
+ }
+ }
+ if c.precision != "" {
+ values.Set("epoch", c.precision)
+ }
+ u.RawQuery = values.Encode()
+
+ req, err := http.NewRequest("POST", u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("User-Agent", c.userAgent)
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+
+ req = req.WithContext(ctx)
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var response Response
+ if q.Chunked {
+ cr := NewChunkedResponse(resp.Body)
+ for {
+ r, err := cr.NextResponse()
+ if err != nil {
+ // If we got an error while decoding the response, send that back.
+ return nil, err
+ }
+
+ if r == nil {
+ break
+ }
+
+ response.Results = append(response.Results, r.Results...)
+ if r.Err != nil {
+ response.Err = r.Err
+ break
+ }
+ }
+ } else {
+ dec := json.NewDecoder(resp.Body)
+ dec.UseNumber()
+ if err := dec.Decode(&response); err != nil {
+ // Ignore EOF errors if we got an invalid status code.
+ if !(err == io.EOF && resp.StatusCode != http.StatusOK) {
+ return nil, err
+ }
+ }
+ }
+
+ // If we don't have an error in our json response, and didn't get StatusOK,
+ // then send back an error.
+ if resp.StatusCode != http.StatusOK && response.Error() == nil {
+ return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
+ }
+ return &response, nil
+}
+
+// Write takes BatchPoints and allows for writing of multiple points with defaults
+// If successful, error is nil and Response is nil
+// If an error occurs, Response may contain additional information if populated.
+func (c *Client) Write(bp BatchPoints) (*Response, error) {
+ u := c.url
+ u.Path = "write"
+
+ var b bytes.Buffer
+ for _, p := range bp.Points {
+ err := checkPointTypes(p)
+ if err != nil {
+ return nil, err
+ }
+ if p.Raw != "" {
+ if _, err := b.WriteString(p.Raw); err != nil {
+ return nil, err
+ }
+ } else {
+ for k, v := range bp.Tags {
+ if p.Tags == nil {
+ p.Tags = make(map[string]string, len(bp.Tags))
+ }
+ p.Tags[k] = v
+ }
+
+ if _, err := b.WriteString(p.MarshalString()); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := b.WriteByte('\n'); err != nil {
+ return nil, err
+ }
+ }
+
+ req, err := http.NewRequest("POST", u.String(), &b)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "")
+ req.Header.Set("User-Agent", c.userAgent)
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+
+ precision := bp.Precision
+ if precision == "" {
+ precision = "ns"
+ }
+
+ params := req.URL.Query()
+ params.Set("db", bp.Database)
+ params.Set("rp", bp.RetentionPolicy)
+ params.Set("precision", precision)
+ params.Set("consistency", bp.WriteConsistency)
+ req.URL.RawQuery = params.Encode()
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var response Response
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
+ var err = fmt.Errorf(string(body))
+ response.Err = err
+ return &response, err
+ }
+
+ return nil, nil
+}
+
+// WriteLineProtocol takes a string with line returns to delimit each write
+// If successful, error is nil and Response is nil
+// If an error occurs, Response may contain additional information if populated.
+func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) {
+ u := c.url
+ u.Path = "write"
+
+ r := strings.NewReader(data)
+
+ req, err := http.NewRequest("POST", u.String(), r)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "")
+ req.Header.Set("User-Agent", c.userAgent)
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+ params := req.URL.Query()
+ params.Set("db", database)
+ params.Set("rp", retentionPolicy)
+ params.Set("precision", precision)
+ params.Set("consistency", writeConsistency)
+ req.URL.RawQuery = params.Encode()
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var response Response
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
+ err := fmt.Errorf(string(body))
+ response.Err = err
+ return &response, err
+ }
+
+ return nil, nil
+}
+
+// Ping will check to see if the server is up
+// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
+func (c *Client) Ping() (time.Duration, string, error) {
+ now := time.Now()
+ u := c.url
+ u.Path = "ping"
+
+ req, err := http.NewRequest("GET", u.String(), nil)
+ if err != nil {
+ return 0, "", err
+ }
+ req.Header.Set("User-Agent", c.userAgent)
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return 0, "", err
+ }
+ defer resp.Body.Close()
+
+ version := resp.Header.Get("X-Influxdb-Version")
+ return time.Since(now), version, nil
+}
+
+// Structs
+
+// Message represents a user message.
+type Message struct {
+ Level string `json:"level,omitempty"`
+ Text string `json:"text,omitempty"`
+}
+
+// Result represents a resultset returned from a single statement.
+type Result struct {
+ Series []models.Row
+ Messages []*Message
+ Err error
+}
+
+// MarshalJSON encodes the result into JSON.
+func (r *Result) MarshalJSON() ([]byte, error) {
+ // Define a struct that outputs "error" as a string.
+ var o struct {
+ Series []models.Row `json:"series,omitempty"`
+ Messages []*Message `json:"messages,omitempty"`
+ Err string `json:"error,omitempty"`
+ }
+
+ // Copy fields to output struct.
+ o.Series = r.Series
+ o.Messages = r.Messages
+ if r.Err != nil {
+ o.Err = r.Err.Error()
+ }
+
+ return json.Marshal(&o)
+}
+
+// UnmarshalJSON decodes the data into the Result struct
+func (r *Result) UnmarshalJSON(b []byte) error {
+ var o struct {
+ Series []models.Row `json:"series,omitempty"`
+ Messages []*Message `json:"messages,omitempty"`
+ Err string `json:"error,omitempty"`
+ }
+
+ dec := json.NewDecoder(bytes.NewBuffer(b))
+ dec.UseNumber()
+ err := dec.Decode(&o)
+ if err != nil {
+ return err
+ }
+ r.Series = o.Series
+ r.Messages = o.Messages
+ if o.Err != "" {
+ r.Err = errors.New(o.Err)
+ }
+ return nil
+}
+
+// Response represents a list of statement results.
+type Response struct {
+ Results []Result
+ Err error
+}
+
+// MarshalJSON encodes the response into JSON.
+func (r *Response) MarshalJSON() ([]byte, error) {
+ // Define a struct that outputs "error" as a string.
+ var o struct {
+ Results []Result `json:"results,omitempty"`
+ Err string `json:"error,omitempty"`
+ }
+
+ // Copy fields to output struct.
+ o.Results = r.Results
+ if r.Err != nil {
+ o.Err = r.Err.Error()
+ }
+
+ return json.Marshal(&o)
+}
+
+// UnmarshalJSON decodes the data into the Response struct
+func (r *Response) UnmarshalJSON(b []byte) error {
+ var o struct {
+ Results []Result `json:"results,omitempty"`
+ Err string `json:"error,omitempty"`
+ }
+
+ dec := json.NewDecoder(bytes.NewBuffer(b))
+ dec.UseNumber()
+ err := dec.Decode(&o)
+ if err != nil {
+ return err
+ }
+ r.Results = o.Results
+ if o.Err != "" {
+ r.Err = errors.New(o.Err)
+ }
+ return nil
+}
+
+// Error returns the first error from any statement.
+// Returns nil if no errors occurred on any statements.
+func (r *Response) Error() error {
+ if r.Err != nil {
+ return r.Err
+ }
+ for _, result := range r.Results {
+ if result.Err != nil {
+ return result.Err
+ }
+ }
+ return nil
+}
+
+// duplexReader reads responses and writes it to another writer while
+// satisfying the reader interface.
+type duplexReader struct {
+ r io.Reader
+ w io.Writer
+}
+
+func (r *duplexReader) Read(p []byte) (n int, err error) {
+ n, err = r.r.Read(p)
+ if err == nil {
+ r.w.Write(p[:n])
+ }
+ return n, err
+}
+
+// ChunkedResponse represents a response from the server that
+// uses chunking to stream the output.
+type ChunkedResponse struct {
+ dec *json.Decoder
+ duplex *duplexReader
+ buf bytes.Buffer
+}
+
+// NewChunkedResponse reads a stream and produces responses from the stream.
+func NewChunkedResponse(r io.Reader) *ChunkedResponse {
+ resp := &ChunkedResponse{}
+ resp.duplex = &duplexReader{r: r, w: &resp.buf}
+ resp.dec = json.NewDecoder(resp.duplex)
+ resp.dec.UseNumber()
+ return resp
+}
+
+// NextResponse reads the next line of the stream and returns a response.
+func (r *ChunkedResponse) NextResponse() (*Response, error) {
+ var response Response
+ if err := r.dec.Decode(&response); err != nil {
+ if err == io.EOF {
+ return nil, nil
+ }
+ // A decoding error happened. This probably means the server crashed
+ // and sent a last-ditch error message to us. Ensure we have read the
+ // entirety of the connection to get any remaining error text.
+ io.Copy(ioutil.Discard, r.duplex)
+ return nil, errors.New(strings.TrimSpace(r.buf.String()))
+ }
+ r.buf.Reset()
+ return &response, nil
+}
+
+// Point defines the fields that will be written to the database
+// Measurement, Time, and Fields are required
+// Precision can be specified if the time is in epoch format (integer).
+// Valid values for Precision are n, u, ms, s, m, and h
+type Point struct {
+ Measurement string
+ Tags map[string]string
+ Time time.Time
+ Fields map[string]interface{}
+ Precision string
+ Raw string
+}
+
+// MarshalJSON will format the time in RFC3339Nano
+// Precision is also ignored as it is only used for writing, not reading
+// Or another way to say it is we always send back in nanosecond precision
+func (p *Point) MarshalJSON() ([]byte, error) {
+ point := struct {
+ Measurement string `json:"measurement,omitempty"`
+ Tags map[string]string `json:"tags,omitempty"`
+ Time string `json:"time,omitempty"`
+ Fields map[string]interface{} `json:"fields,omitempty"`
+ Precision string `json:"precision,omitempty"`
+ }{
+ Measurement: p.Measurement,
+ Tags: p.Tags,
+ Fields: p.Fields,
+ Precision: p.Precision,
+ }
+ // Let it omit empty if it's really zero
+ if !p.Time.IsZero() {
+ point.Time = p.Time.UTC().Format(time.RFC3339Nano)
+ }
+ return json.Marshal(&point)
+}
+
+// MarshalString renders string representation of a Point with specified
+// precision. The default precision is nanoseconds.
+func (p *Point) MarshalString() string {
+ pt, err := models.NewPoint(p.Measurement, models.NewTags(p.Tags), p.Fields, p.Time)
+ if err != nil {
+ return "# ERROR: " + err.Error() + " " + p.Measurement
+ }
+ if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" {
+ return pt.String()
+ }
+ return pt.PrecisionString(p.Precision)
+}
+
+// UnmarshalJSON decodes the data into the Point struct
+func (p *Point) UnmarshalJSON(b []byte) error {
+ var normal struct {
+ Measurement string `json:"measurement"`
+ Tags map[string]string `json:"tags"`
+ Time time.Time `json:"time"`
+ Precision string `json:"precision"`
+ Fields map[string]interface{} `json:"fields"`
+ }
+ var epoch struct {
+ Measurement string `json:"measurement"`
+ Tags map[string]string `json:"tags"`
+ Time *int64 `json:"time"`
+ Precision string `json:"precision"`
+ Fields map[string]interface{} `json:"fields"`
+ }
+
+ if err := func() error {
+ var err error
+ dec := json.NewDecoder(bytes.NewBuffer(b))
+ dec.UseNumber()
+ if err = dec.Decode(&epoch); err != nil {
+ return err
+ }
+ // Convert from epoch to time.Time, but only if Time
+ // was actually set.
+ var ts time.Time
+ if epoch.Time != nil {
+ ts, err = EpochToTime(*epoch.Time, epoch.Precision)
+ if err != nil {
+ return err
+ }
+ }
+ p.Measurement = epoch.Measurement
+ p.Tags = epoch.Tags
+ p.Time = ts
+ p.Precision = epoch.Precision
+ p.Fields = normalizeFields(epoch.Fields)
+ return nil
+ }(); err == nil {
+ return nil
+ }
+
+ dec := json.NewDecoder(bytes.NewBuffer(b))
+ dec.UseNumber()
+ if err := dec.Decode(&normal); err != nil {
+ return err
+ }
+ normal.Time = SetPrecision(normal.Time, normal.Precision)
+ p.Measurement = normal.Measurement
+ p.Tags = normal.Tags
+ p.Time = normal.Time
+ p.Precision = normal.Precision
+ p.Fields = normalizeFields(normal.Fields)
+
+ return nil
+}
+
+// Remove any notion of json.Number
+func normalizeFields(fields map[string]interface{}) map[string]interface{} {
+ newFields := map[string]interface{}{}
+
+ for k, v := range fields {
+ switch v := v.(type) {
+ case json.Number:
+ jv, e := v.Float64()
+ if e != nil {
+ panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e))
+ }
+ newFields[k] = jv
+ default:
+ newFields[k] = v
+ }
+ }
+ return newFields
+}
+
+// BatchPoints is used to send batched data in a single write.
+// Database and Points are required
+// If no retention policy is specified, it will use the databases default retention policy.
+// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored.
+// If time is specified, it will be applied to any point with an empty time.
+// Precision can be specified if the time is in epoch format (integer).
+// Valid values for Precision are n, u, ms, s, m, and h
+type BatchPoints struct {
+ Points []Point `json:"points,omitempty"`
+ Database string `json:"database,omitempty"`
+ RetentionPolicy string `json:"retentionPolicy,omitempty"`
+ Tags map[string]string `json:"tags,omitempty"`
+ Time time.Time `json:"time,omitempty"`
+ Precision string `json:"precision,omitempty"`
+ WriteConsistency string `json:"-"`
+}
+
+// UnmarshalJSON decodes the data into the BatchPoints struct
+func (bp *BatchPoints) UnmarshalJSON(b []byte) error {
+ var normal struct {
+ Points []Point `json:"points"`
+ Database string `json:"database"`
+ RetentionPolicy string `json:"retentionPolicy"`
+ Tags map[string]string `json:"tags"`
+ Time time.Time `json:"time"`
+ Precision string `json:"precision"`
+ }
+ var epoch struct {
+ Points []Point `json:"points"`
+ Database string `json:"database"`
+ RetentionPolicy string `json:"retentionPolicy"`
+ Tags map[string]string `json:"tags"`
+ Time *int64 `json:"time"`
+ Precision string `json:"precision"`
+ }
+
+ if err := func() error {
+ var err error
+ if err = json.Unmarshal(b, &epoch); err != nil {
+ return err
+ }
+ // Convert from epoch to time.Time
+ var ts time.Time
+ if epoch.Time != nil {
+ ts, err = EpochToTime(*epoch.Time, epoch.Precision)
+ if err != nil {
+ return err
+ }
+ }
+ bp.Points = epoch.Points
+ bp.Database = epoch.Database
+ bp.RetentionPolicy = epoch.RetentionPolicy
+ bp.Tags = epoch.Tags
+ bp.Time = ts
+ bp.Precision = epoch.Precision
+ return nil
+ }(); err == nil {
+ return nil
+ }
+
+ if err := json.Unmarshal(b, &normal); err != nil {
+ return err
+ }
+ normal.Time = SetPrecision(normal.Time, normal.Precision)
+ bp.Points = normal.Points
+ bp.Database = normal.Database
+ bp.RetentionPolicy = normal.RetentionPolicy
+ bp.Tags = normal.Tags
+ bp.Time = normal.Time
+ bp.Precision = normal.Precision
+
+ return nil
+}
+
+// utility functions
+
+// Addr provides the current url as a string of the server the client is connected to.
+func (c *Client) Addr() string {
+ if c.unixSocket != "" {
+ return c.unixSocket
+ }
+ return c.url.String()
+}
+
+// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found.
+func checkPointTypes(p Point) error {
+ for _, v := range p.Fields {
+ switch v.(type) {
+ case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool, string, nil:
+ return nil
+ default:
+ return fmt.Errorf("unsupported point type: %T", v)
+ }
+ }
+ return nil
+}
+
+// helper functions
+
+// EpochToTime takes a unix epoch time and uses precision to return back a time.Time
+func EpochToTime(epoch int64, precision string) (time.Time, error) {
+ if precision == "" {
+ precision = "s"
+ }
+ var t time.Time
+ switch precision {
+ case "h":
+ t = time.Unix(0, epoch*int64(time.Hour))
+ case "m":
+ t = time.Unix(0, epoch*int64(time.Minute))
+ case "s":
+ t = time.Unix(0, epoch*int64(time.Second))
+ case "ms":
+ t = time.Unix(0, epoch*int64(time.Millisecond))
+ case "u":
+ t = time.Unix(0, epoch*int64(time.Microsecond))
+ case "n":
+ t = time.Unix(0, epoch)
+ default:
+ return time.Time{}, fmt.Errorf("Unknown precision %q", precision)
+ }
+ return t, nil
+}
+
+// SetPrecision will round a time to the specified precision
+func SetPrecision(t time.Time, precision string) time.Time {
+ switch precision {
+ case "n":
+ case "u":
+ return t.Round(time.Microsecond)
+ case "ms":
+ return t.Round(time.Millisecond)
+ case "s":
+ return t.Round(time.Second)
+ case "m":
+ return t.Round(time.Minute)
+ case "h":
+ return t.Round(time.Hour)
+ }
+ return t
+}
diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client.go b/vendor/github.com/influxdata/influxdb/client/v2/client.go
new file mode 100644
index 000000000..77d44f2b3
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/client/v2/client.go
@@ -0,0 +1,635 @@
+// Package client (v2) is the current official Go client for InfluxDB.
+package client // import "github.com/influxdata/influxdb/client/v2"
+
+import (
+ "bytes"
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/influxdata/influxdb/models"
+)
+
+// HTTPConfig is the config data needed to create an HTTP Client.
+type HTTPConfig struct {
+ // Addr should be of the form "http://host:port"
+ // or "http://[ipv6-host%zone]:port".
+ Addr string
+
+ // Username is the influxdb username, optional.
+ Username string
+
+ // Password is the influxdb password, optional.
+ Password string
+
+ // UserAgent is the http User Agent, defaults to "InfluxDBClient".
+ UserAgent string
+
+ // Timeout for influxdb writes, defaults to no timeout.
+ Timeout time.Duration
+
+ // InsecureSkipVerify gets passed to the http client, if true, it will
+ // skip https certificate verification. Defaults to false.
+ InsecureSkipVerify bool
+
+ // TLSConfig allows the user to set their own TLS config for the HTTP
+ // Client. If set, this option overrides InsecureSkipVerify.
+ TLSConfig *tls.Config
+}
+
+// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct.
+type BatchPointsConfig struct {
+ // Precision is the write precision of the points, defaults to "ns".
+ Precision string
+
+ // Database is the database to write points to.
+ Database string
+
+ // RetentionPolicy is the retention policy of the points.
+ RetentionPolicy string
+
+ // Write consistency is the number of servers required to confirm write.
+ WriteConsistency string
+}
+
+// Client is a client interface for writing & querying the database.
+type Client interface {
+ // Ping checks that status of cluster, and will always return 0 time and no
+ // error for UDP clients.
+ Ping(timeout time.Duration) (time.Duration, string, error)
+
+ // Write takes a BatchPoints object and writes all Points to InfluxDB.
+ Write(bp BatchPoints) error
+
+ // Query makes an InfluxDB Query on the database. This will fail if using
+ // the UDP client.
+ Query(q Query) (*Response, error)
+
+ // Close releases any resources a Client may be using.
+ Close() error
+}
+
+// NewHTTPClient returns a new Client from the provided config.
+// Client is safe for concurrent use by multiple goroutines.
+func NewHTTPClient(conf HTTPConfig) (Client, error) {
+ if conf.UserAgent == "" {
+ conf.UserAgent = "InfluxDBClient"
+ }
+
+ u, err := url.Parse(conf.Addr)
+ if err != nil {
+ return nil, err
+ } else if u.Scheme != "http" && u.Scheme != "https" {
+ m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+
+ " must start with http:// or https://", u.Scheme)
+ return nil, errors.New(m)
+ }
+
+ tr := &http.Transport{
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: conf.InsecureSkipVerify,
+ },
+ }
+ if conf.TLSConfig != nil {
+ tr.TLSClientConfig = conf.TLSConfig
+ }
+ return &client{
+ url: *u,
+ username: conf.Username,
+ password: conf.Password,
+ useragent: conf.UserAgent,
+ httpClient: &http.Client{
+ Timeout: conf.Timeout,
+ Transport: tr,
+ },
+ transport: tr,
+ }, nil
+}
+
+// Ping will check to see if the server is up with an optional timeout on waiting for leader.
+// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
+func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) {
+ now := time.Now()
+ u := c.url
+ u.Path = "ping"
+
+ req, err := http.NewRequest("GET", u.String(), nil)
+ if err != nil {
+ return 0, "", err
+ }
+
+ req.Header.Set("User-Agent", c.useragent)
+
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+
+ if timeout > 0 {
+ params := req.URL.Query()
+ params.Set("wait_for_leader", fmt.Sprintf("%.0fs", timeout.Seconds()))
+ req.URL.RawQuery = params.Encode()
+ }
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return 0, "", err
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return 0, "", err
+ }
+
+ if resp.StatusCode != http.StatusNoContent {
+ var err = fmt.Errorf(string(body))
+ return 0, "", err
+ }
+
+ version := resp.Header.Get("X-Influxdb-Version")
+ return time.Since(now), version, nil
+}
+
+// Close releases the client's resources.
+func (c *client) Close() error {
+ c.transport.CloseIdleConnections()
+ return nil
+}
+
+// client is safe for concurrent use as the fields are all read-only
+// once the client is instantiated.
+type client struct {
+ // N.B - if url.UserInfo is accessed in future modifications to the
+ // methods on client, you will need to syncronise access to url.
+ url url.URL
+ username string
+ password string
+ useragent string
+ httpClient *http.Client
+ transport *http.Transport
+}
+
+// BatchPoints is an interface into a batched grouping of points to write into
+// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate
+// batch for each goroutine.
+type BatchPoints interface {
+ // AddPoint adds the given point to the Batch of points.
+ AddPoint(p *Point)
+ // AddPoints adds the given points to the Batch of points.
+ AddPoints(ps []*Point)
+ // Points lists the points in the Batch.
+ Points() []*Point
+
+ // Precision returns the currently set precision of this Batch.
+ Precision() string
+ // SetPrecision sets the precision of this batch.
+ SetPrecision(s string) error
+
+ // Database returns the currently set database of this Batch.
+ Database() string
+ // SetDatabase sets the database of this Batch.
+ SetDatabase(s string)
+
+ // WriteConsistency returns the currently set write consistency of this Batch.
+ WriteConsistency() string
+ // SetWriteConsistency sets the write consistency of this Batch.
+ SetWriteConsistency(s string)
+
+ // RetentionPolicy returns the currently set retention policy of this Batch.
+ RetentionPolicy() string
+ // SetRetentionPolicy sets the retention policy of this Batch.
+ SetRetentionPolicy(s string)
+}
+
+// NewBatchPoints returns a BatchPoints interface based on the given config.
+func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) {
+ if conf.Precision == "" {
+ conf.Precision = "ns"
+ }
+ if _, err := time.ParseDuration("1" + conf.Precision); err != nil {
+ return nil, err
+ }
+ bp := &batchpoints{
+ database: conf.Database,
+ precision: conf.Precision,
+ retentionPolicy: conf.RetentionPolicy,
+ writeConsistency: conf.WriteConsistency,
+ }
+ return bp, nil
+}
+
+type batchpoints struct {
+ points []*Point
+ database string
+ precision string
+ retentionPolicy string
+ writeConsistency string
+}
+
+func (bp *batchpoints) AddPoint(p *Point) {
+ bp.points = append(bp.points, p)
+}
+
+func (bp *batchpoints) AddPoints(ps []*Point) {
+ bp.points = append(bp.points, ps...)
+}
+
+func (bp *batchpoints) Points() []*Point {
+ return bp.points
+}
+
+func (bp *batchpoints) Precision() string {
+ return bp.precision
+}
+
+func (bp *batchpoints) Database() string {
+ return bp.database
+}
+
+func (bp *batchpoints) WriteConsistency() string {
+ return bp.writeConsistency
+}
+
+func (bp *batchpoints) RetentionPolicy() string {
+ return bp.retentionPolicy
+}
+
+func (bp *batchpoints) SetPrecision(p string) error {
+ if _, err := time.ParseDuration("1" + p); err != nil {
+ return err
+ }
+ bp.precision = p
+ return nil
+}
+
+func (bp *batchpoints) SetDatabase(db string) {
+ bp.database = db
+}
+
+func (bp *batchpoints) SetWriteConsistency(wc string) {
+ bp.writeConsistency = wc
+}
+
+func (bp *batchpoints) SetRetentionPolicy(rp string) {
+ bp.retentionPolicy = rp
+}
+
+// Point represents a single data point.
+type Point struct {
+ pt models.Point
+}
+
+// NewPoint returns a point with the given timestamp. If a timestamp is not
+// given, then data is sent to the database without a timestamp, in which case
+// the server will assign local time upon reception. NOTE: it is recommended to
+// send data with a timestamp.
+func NewPoint(
+ name string,
+ tags map[string]string,
+ fields map[string]interface{},
+ t ...time.Time,
+) (*Point, error) {
+ var T time.Time
+ if len(t) > 0 {
+ T = t[0]
+ }
+
+ pt, err := models.NewPoint(name, models.NewTags(tags), fields, T)
+ if err != nil {
+ return nil, err
+ }
+ return &Point{
+ pt: pt,
+ }, nil
+}
+
+// String returns a line-protocol string of the Point.
+func (p *Point) String() string {
+ return p.pt.String()
+}
+
+// PrecisionString returns a line-protocol string of the Point,
+// with the timestamp formatted for the given precision.
+func (p *Point) PrecisionString(precison string) string {
+ return p.pt.PrecisionString(precison)
+}
+
+// Name returns the measurement name of the point.
+func (p *Point) Name() string {
+ return string(p.pt.Name())
+}
+
+// Tags returns the tags associated with the point.
+func (p *Point) Tags() map[string]string {
+ return p.pt.Tags().Map()
+}
+
+// Time return the timestamp for the point.
+func (p *Point) Time() time.Time {
+ return p.pt.Time()
+}
+
+// UnixNano returns timestamp of the point in nanoseconds since Unix epoch.
+func (p *Point) UnixNano() int64 {
+ return p.pt.UnixNano()
+}
+
+// Fields returns the fields for the point.
+func (p *Point) Fields() (map[string]interface{}, error) {
+ return p.pt.Fields()
+}
+
+// NewPointFrom returns a point from the provided models.Point.
+func NewPointFrom(pt models.Point) *Point {
+ return &Point{pt: pt}
+}
+
+func (c *client) Write(bp BatchPoints) error {
+ var b bytes.Buffer
+
+ for _, p := range bp.Points() {
+ if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil {
+ return err
+ }
+
+ if err := b.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ u := c.url
+ u.Path = "write"
+ req, err := http.NewRequest("POST", u.String(), &b)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "")
+ req.Header.Set("User-Agent", c.useragent)
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+
+ params := req.URL.Query()
+ params.Set("db", bp.Database())
+ params.Set("rp", bp.RetentionPolicy())
+ params.Set("precision", bp.Precision())
+ params.Set("consistency", bp.WriteConsistency())
+ req.URL.RawQuery = params.Encode()
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+
+ if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
+ var err = fmt.Errorf(string(body))
+ return err
+ }
+
+ return nil
+}
+
+// Query defines a query to send to the server.
+type Query struct {
+ Command string
+ Database string
+ Precision string
+ Chunked bool
+ ChunkSize int
+ Parameters map[string]interface{}
+}
+
+// NewQuery returns a query object.
+// The database and precision arguments can be empty strings if they are not needed for the query.
+func NewQuery(command, database, precision string) Query {
+ return Query{
+ Command: command,
+ Database: database,
+ Precision: precision,
+ Parameters: make(map[string]interface{}),
+ }
+}
+
+// NewQueryWithParameters returns a query object.
+// The database and precision arguments can be empty strings if they are not needed for the query.
+// parameters is a map of the parameter names used in the command to their values.
+func NewQueryWithParameters(command, database, precision string, parameters map[string]interface{}) Query {
+ return Query{
+ Command: command,
+ Database: database,
+ Precision: precision,
+ Parameters: parameters,
+ }
+}
+
+// Response represents a list of statement results.
+type Response struct {
+ Results []Result
+ Err string `json:"error,omitempty"`
+}
+
+// Error returns the first error from any statement.
+// It returns nil if no errors occurred on any statements.
+func (r *Response) Error() error {
+ if r.Err != "" {
+ return fmt.Errorf(r.Err)
+ }
+ for _, result := range r.Results {
+ if result.Err != "" {
+ return fmt.Errorf(result.Err)
+ }
+ }
+ return nil
+}
+
+// Message represents a user message.
+type Message struct {
+ Level string
+ Text string
+}
+
+// Result represents a resultset returned from a single statement.
+type Result struct {
+ Series []models.Row
+ Messages []*Message
+ Err string `json:"error,omitempty"`
+}
+
+// Query sends a command to the server and returns the Response.
+func (c *client) Query(q Query) (*Response, error) {
+ u := c.url
+ u.Path = "query"
+
+ jsonParameters, err := json.Marshal(q.Parameters)
+
+ if err != nil {
+ return nil, err
+ }
+
+ req, err := http.NewRequest("POST", u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("Content-Type", "")
+ req.Header.Set("User-Agent", c.useragent)
+
+ if c.username != "" {
+ req.SetBasicAuth(c.username, c.password)
+ }
+
+ params := req.URL.Query()
+ params.Set("q", q.Command)
+ params.Set("db", q.Database)
+ params.Set("params", string(jsonParameters))
+ if q.Chunked {
+ params.Set("chunked", "true")
+ if q.ChunkSize > 0 {
+ params.Set("chunk_size", strconv.Itoa(q.ChunkSize))
+ }
+ }
+
+ if q.Precision != "" {
+ params.Set("epoch", q.Precision)
+ }
+ req.URL.RawQuery = params.Encode()
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ // If we lack a X-Influxdb-Version header, then we didn't get a response from influxdb
+ // but instead some other service. If the error code is also a 500+ code, then some
+ // downstream loadbalancer/proxy/etc had an issue and we should report that.
+ if resp.Header.Get("X-Influxdb-Version") == "" && resp.StatusCode >= http.StatusInternalServerError {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil || len(body) == 0 {
+ return nil, fmt.Errorf("received status code %d from downstream server", resp.StatusCode)
+ }
+
+ return nil, fmt.Errorf("received status code %d from downstream server, with response body: %q", resp.StatusCode, body)
+ }
+
+ // If we get an unexpected content type, then it is also not from influx direct and therefore
+ // we want to know what we received and what status code was returned for debugging purposes.
+ if cType, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type")); cType != "application/json" {
+ // Read up to 1kb of the body to help identify downstream errors and limit the impact of things
+ // like downstream serving a large file
+ body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024))
+ if err != nil || len(body) == 0 {
+ return nil, fmt.Errorf("expected json response, got empty body, with status: %v", resp.StatusCode)
+ }
+
+ return nil, fmt.Errorf("expected json response, got %q, with status: %v and response body: %q", cType, resp.StatusCode, body)
+ }
+
+ var response Response
+ if q.Chunked {
+ cr := NewChunkedResponse(resp.Body)
+ for {
+ r, err := cr.NextResponse()
+ if err != nil {
+ // If we got an error while decoding the response, send that back.
+ return nil, err
+ }
+
+ if r == nil {
+ break
+ }
+
+ response.Results = append(response.Results, r.Results...)
+ if r.Err != "" {
+ response.Err = r.Err
+ break
+ }
+ }
+ } else {
+ dec := json.NewDecoder(resp.Body)
+ dec.UseNumber()
+ decErr := dec.Decode(&response)
+
+ // ignore this error if we got an invalid status code
+ if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK {
+ decErr = nil
+ }
+ // If we got a valid decode error, send that back
+ if decErr != nil {
+ return nil, fmt.Errorf("unable to decode json: received status code %d err: %s", resp.StatusCode, decErr)
+ }
+ }
+
+ // If we don't have an error in our json response, and didn't get statusOK
+ // then send back an error
+ if resp.StatusCode != http.StatusOK && response.Error() == nil {
+ return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
+ }
+ return &response, nil
+}
+
+// duplexReader reads responses and writes it to another writer while
+// satisfying the reader interface.
+type duplexReader struct {
+ r io.Reader
+ w io.Writer
+}
+
+func (r *duplexReader) Read(p []byte) (n int, err error) {
+ n, err = r.r.Read(p)
+ if err == nil {
+ r.w.Write(p[:n])
+ }
+ return n, err
+}
+
+// ChunkedResponse represents a response from the server that
+// uses chunking to stream the output.
+type ChunkedResponse struct {
+ dec *json.Decoder
+ duplex *duplexReader
+ buf bytes.Buffer
+}
+
+// NewChunkedResponse reads a stream and produces responses from the stream.
+func NewChunkedResponse(r io.Reader) *ChunkedResponse {
+ resp := &ChunkedResponse{}
+ resp.duplex = &duplexReader{r: r, w: &resp.buf}
+ resp.dec = json.NewDecoder(resp.duplex)
+ resp.dec.UseNumber()
+ return resp
+}
+
+// NextResponse reads the next line of the stream and returns a response.
+func (r *ChunkedResponse) NextResponse() (*Response, error) {
+ var response Response
+
+ if err := r.dec.Decode(&response); err != nil {
+ if err == io.EOF {
+ return nil, nil
+ }
+ // A decoding error happened. This probably means the server crashed
+ // and sent a last-ditch error message to us. Ensure we have read the
+ // entirety of the connection to get any remaining error text.
+ io.Copy(ioutil.Discard, r.duplex)
+ return nil, errors.New(strings.TrimSpace(r.buf.String()))
+ }
+
+ r.buf.Reset()
+ return &response, nil
+}
diff --git a/vendor/github.com/influxdata/influxdb/client/v2/udp.go b/vendor/github.com/influxdata/influxdb/client/v2/udp.go
new file mode 100644
index 000000000..779a28b33
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/client/v2/udp.go
@@ -0,0 +1,112 @@
+package client
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "time"
+)
+
+const (
+ // UDPPayloadSize is a reasonable default payload size for UDP packets that
+ // could be travelling over the internet.
+ UDPPayloadSize = 512
+)
+
+// UDPConfig is the config data needed to create a UDP Client.
+type UDPConfig struct {
+ // Addr should be of the form "host:port"
+ // or "[ipv6-host%zone]:port".
+ Addr string
+
+ // PayloadSize is the maximum size of a UDP client message, optional
+ // Tune this based on your network. Defaults to UDPPayloadSize.
+ PayloadSize int
+}
+
+// NewUDPClient returns a client interface for writing to an InfluxDB UDP
+// service from the given config.
+func NewUDPClient(conf UDPConfig) (Client, error) {
+ var udpAddr *net.UDPAddr
+ udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr)
+ if err != nil {
+ return nil, err
+ }
+
+ conn, err := net.DialUDP("udp", nil, udpAddr)
+ if err != nil {
+ return nil, err
+ }
+
+ payloadSize := conf.PayloadSize
+ if payloadSize == 0 {
+ payloadSize = UDPPayloadSize
+ }
+
+ return &udpclient{
+ conn: conn,
+ payloadSize: payloadSize,
+ }, nil
+}
+
+// Close releases the udpclient's resources.
+func (uc *udpclient) Close() error {
+ return uc.conn.Close()
+}
+
+type udpclient struct {
+ conn io.WriteCloser
+ payloadSize int
+}
+
+func (uc *udpclient) Write(bp BatchPoints) error {
+ var b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed
+ var d, _ = time.ParseDuration("1" + bp.Precision())
+
+ var delayedError error
+
+ var checkBuffer = func(n int) {
+ if len(b) > 0 && len(b)+n > uc.payloadSize {
+ if _, err := uc.conn.Write(b); err != nil {
+ delayedError = err
+ }
+ b = b[:0]
+ }
+ }
+
+ for _, p := range bp.Points() {
+ p.pt.Round(d)
+ pointSize := p.pt.StringSize() + 1 // include newline in size
+ //point := p.pt.RoundedString(d) + "\n"
+
+ checkBuffer(pointSize)
+
+ if p.Time().IsZero() || pointSize <= uc.payloadSize {
+ b = p.pt.AppendString(b)
+ b = append(b, '\n')
+ continue
+ }
+
+ points := p.pt.Split(uc.payloadSize - 1) // account for newline character
+ for _, sp := range points {
+ checkBuffer(sp.StringSize() + 1)
+ b = sp.AppendString(b)
+ b = append(b, '\n')
+ }
+ }
+
+ if len(b) > 0 {
+ if _, err := uc.conn.Write(b); err != nil {
+ return err
+ }
+ }
+ return delayedError
+}
+
+func (uc *udpclient) Query(q Query) (*Response, error) {
+ return nil, fmt.Errorf("Querying via UDP is not supported")
+}
+
+func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) {
+ return 0, "", nil
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/consistency.go b/vendor/github.com/influxdata/influxdb/models/consistency.go
new file mode 100644
index 000000000..2a3269bca
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/consistency.go
@@ -0,0 +1,48 @@
+package models
+
+import (
+ "errors"
+ "strings"
+)
+
+// ConsistencyLevel represent a required replication criteria before a write can
+// be returned as successful.
+//
+// The consistency level is handled in open-source InfluxDB but only applicable to clusters.
+type ConsistencyLevel int
+
+const (
+ // ConsistencyLevelAny allows for hinted handoff, potentially no write happened yet.
+ ConsistencyLevelAny ConsistencyLevel = iota
+
+ // ConsistencyLevelOne requires at least one data node acknowledged a write.
+ ConsistencyLevelOne
+
+ // ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write.
+ ConsistencyLevelQuorum
+
+ // ConsistencyLevelAll requires all data nodes to acknowledge a write.
+ ConsistencyLevelAll
+)
+
+var (
+ // ErrInvalidConsistencyLevel is returned when parsing the string version
+ // of a consistency level.
+ ErrInvalidConsistencyLevel = errors.New("invalid consistency level")
+)
+
+// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const.
+func ParseConsistencyLevel(level string) (ConsistencyLevel, error) {
+ switch strings.ToLower(level) {
+ case "any":
+ return ConsistencyLevelAny, nil
+ case "one":
+ return ConsistencyLevelOne, nil
+ case "quorum":
+ return ConsistencyLevelQuorum, nil
+ case "all":
+ return ConsistencyLevelAll, nil
+ default:
+ return 0, ErrInvalidConsistencyLevel
+ }
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/inline_fnv.go b/vendor/github.com/influxdata/influxdb/models/inline_fnv.go
new file mode 100644
index 000000000..eec1ae8b0
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/inline_fnv.go
@@ -0,0 +1,32 @@
+package models // import "github.com/influxdata/influxdb/models"
+
+// from stdlib hash/fnv/fnv.go
+const (
+ prime64 = 1099511628211
+ offset64 = 14695981039346656037
+)
+
+// InlineFNV64a is an alloc-free port of the standard library's fnv64a.
+// See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function.
+type InlineFNV64a uint64
+
+// NewInlineFNV64a returns a new instance of InlineFNV64a.
+func NewInlineFNV64a() InlineFNV64a {
+ return offset64
+}
+
+// Write adds data to the running hash.
+func (s *InlineFNV64a) Write(data []byte) (int, error) {
+ hash := uint64(*s)
+ for _, c := range data {
+ hash ^= uint64(c)
+ hash *= prime64
+ }
+ *s = InlineFNV64a(hash)
+ return len(data), nil
+}
+
+// Sum64 returns the uint64 of the current resulting hash.
+func (s *InlineFNV64a) Sum64() uint64 {
+ return uint64(*s)
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go
new file mode 100644
index 000000000..8db483738
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go
@@ -0,0 +1,44 @@
+package models // import "github.com/influxdata/influxdb/models"
+
+import (
+ "reflect"
+ "strconv"
+ "unsafe"
+)
+
+// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
+func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
+ s := unsafeBytesToString(b)
+ return strconv.ParseInt(s, base, bitSize)
+}
+
+// parseUintBytes is a zero-alloc wrapper around strconv.ParseUint.
+func parseUintBytes(b []byte, base int, bitSize int) (i uint64, err error) {
+ s := unsafeBytesToString(b)
+ return strconv.ParseUint(s, base, bitSize)
+}
+
+// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
+func parseFloatBytes(b []byte, bitSize int) (float64, error) {
+ s := unsafeBytesToString(b)
+ return strconv.ParseFloat(s, bitSize)
+}
+
+// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
+func parseBoolBytes(b []byte) (bool, error) {
+ return strconv.ParseBool(unsafeBytesToString(b))
+}
+
+// unsafeBytesToString converts a []byte to a string without a heap allocation.
+//
+// It is unsafe, and is intended to prepare input to short-lived functions
+// that require strings.
+func unsafeBytesToString(in []byte) string {
+ src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
+ dst := reflect.StringHeader{
+ Data: src.Data,
+ Len: src.Len,
+ }
+ s := *(*string)(unsafe.Pointer(&dst))
+ return s
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/points.go b/vendor/github.com/influxdata/influxdb/models/points.go
new file mode 100644
index 000000000..ad80a816b
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/points.go
@@ -0,0 +1,2337 @@
+// Package models implements basic objects used throughout the TICK stack.
+package models // import "github.com/influxdata/influxdb/models"
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/influxdata/influxdb/pkg/escape"
+)
+
+var (
+ measurementEscapeCodes = map[byte][]byte{
+ ',': []byte(`\,`),
+ ' ': []byte(`\ `),
+ }
+
+ tagEscapeCodes = map[byte][]byte{
+ ',': []byte(`\,`),
+ ' ': []byte(`\ `),
+ '=': []byte(`\=`),
+ }
+
+ // ErrPointMustHaveAField is returned when operating on a point that does not have any fields.
+ ErrPointMustHaveAField = errors.New("point without fields is unsupported")
+
+ // ErrInvalidNumber is returned when a number is expected but not provided.
+ ErrInvalidNumber = errors.New("invalid number")
+
+ // ErrInvalidPoint is returned when a point cannot be parsed correctly.
+ ErrInvalidPoint = errors.New("point is invalid")
+)
+
+const (
+ // MaxKeyLength is the largest allowed size of the combined measurement and tag keys.
+ MaxKeyLength = 65535
+)
+
+// enableUint64Support will enable uint64 support if set to true.
+var enableUint64Support = false
+
+// EnableUintSupport manually enables uint support for the point parser.
+// This function will be removed in the future and only exists for unit tests during the
+// transition.
+func EnableUintSupport() {
+ enableUint64Support = true
+}
+
+// Point defines the values that will be written to the database.
+type Point interface {
+ // Name return the measurement name for the point.
+ Name() []byte
+
+ // SetName updates the measurement name for the point.
+ SetName(string)
+
+ // Tags returns the tag set for the point.
+ Tags() Tags
+
+ // AddTag adds or replaces a tag value for a point.
+ AddTag(key, value string)
+
+ // SetTags replaces the tags for the point.
+ SetTags(tags Tags)
+
+ // HasTag returns true if the tag exists for the point.
+ HasTag(tag []byte) bool
+
+ // Fields returns the fields for the point.
+ Fields() (Fields, error)
+
+ // Time return the timestamp for the point.
+ Time() time.Time
+
+ // SetTime updates the timestamp for the point.
+ SetTime(t time.Time)
+
+ // UnixNano returns the timestamp of the point as nanoseconds since Unix epoch.
+ UnixNano() int64
+
+ // HashID returns a non-cryptographic checksum of the point's key.
+ HashID() uint64
+
+ // Key returns the key (measurement joined with tags) of the point.
+ Key() []byte
+
+ // String returns a string representation of the point. If there is a
+ // timestamp associated with the point then it will be specified with the default
+ // precision of nanoseconds.
+ String() string
+
+ // MarshalBinary returns a binary representation of the point.
+ MarshalBinary() ([]byte, error)
+
+ // PrecisionString returns a string representation of the point. If there
+ // is a timestamp associated with the point then it will be specified in the
+ // given unit.
+ PrecisionString(precision string) string
+
+ // RoundedString returns a string representation of the point. If there
+ // is a timestamp associated with the point, then it will be rounded to the
+ // given duration.
+ RoundedString(d time.Duration) string
+
+ // Split will attempt to return multiple points with the same timestamp whose
+ // string representations are no longer than size. Points with a single field or
+ // a point without a timestamp may exceed the requested size.
+ Split(size int) []Point
+
+ // Round will round the timestamp of the point to the given duration.
+ Round(d time.Duration)
+
+ // StringSize returns the length of the string that would be returned by String().
+ StringSize() int
+
+ // AppendString appends the result of String() to the provided buffer and returns
+ // the result, potentially reducing string allocations.
+ AppendString(buf []byte) []byte
+
+ // FieldIterator retuns a FieldIterator that can be used to traverse the
+ // fields of a point without constructing the in-memory map.
+ FieldIterator() FieldIterator
+}
+
+// FieldType represents the type of a field.
+type FieldType int
+
+const (
+ // Integer indicates the field's type is integer.
+ Integer FieldType = iota
+
+ // Float indicates the field's type is float.
+ Float
+
+ // Boolean indicates the field's type is boolean.
+ Boolean
+
+ // String indicates the field's type is string.
+ String
+
+ // Empty is used to indicate that there is no field.
+ Empty
+
+ // Unsigned indicates the field's type is an unsigned integer.
+ Unsigned
+)
+
+// FieldIterator provides a low-allocation interface to iterate through a point's fields.
+type FieldIterator interface {
+ // Next indicates whether there any fields remaining.
+ Next() bool
+
+ // FieldKey returns the key of the current field.
+ FieldKey() []byte
+
+ // Type returns the FieldType of the current field.
+ Type() FieldType
+
+ // StringValue returns the string value of the current field.
+ StringValue() string
+
+ // IntegerValue returns the integer value of the current field.
+ IntegerValue() (int64, error)
+
+ // UnsignedValue returns the unsigned value of the current field.
+ UnsignedValue() (uint64, error)
+
+ // BooleanValue returns the boolean value of the current field.
+ BooleanValue() (bool, error)
+
+ // FloatValue returns the float value of the current field.
+ FloatValue() (float64, error)
+
+ // Reset resets the iterator to its initial state.
+ Reset()
+}
+
+// Points represents a sortable list of points by timestamp.
+type Points []Point
+
+// Len implements sort.Interface.
+func (a Points) Len() int { return len(a) }
+
+// Less implements sort.Interface.
+func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) }
+
+// Swap implements sort.Interface.
+func (a Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// point is the default implementation of Point.
+type point struct {
+ time time.Time
+
+ // text encoding of measurement and tags
+ // key must always be stored sorted by tags, if the original line was not sorted,
+ // we need to resort it
+ key []byte
+
+ // text encoding of field data
+ fields []byte
+
+ // text encoding of timestamp
+ ts []byte
+
+ // cached version of parsed fields from data
+ cachedFields map[string]interface{}
+
+ // cached version of parsed name from key
+ cachedName string
+
+ // cached version of parsed tags
+ cachedTags Tags
+
+ it fieldIterator
+}
+
+// type assertions
+var (
+ _ Point = (*point)(nil)
+ _ FieldIterator = (*point)(nil)
+)
+
+const (
+ // the number of characters for the largest possible int64 (9223372036854775807)
+ maxInt64Digits = 19
+
+ // the number of characters for the smallest possible int64 (-9223372036854775808)
+ minInt64Digits = 20
+
+ // the number of characters for the largest possible uint64 (18446744073709551615)
+ maxUint64Digits = 20
+
+ // the number of characters required for the largest float64 before a range check
+ // would occur during parsing
+ maxFloat64Digits = 25
+
+ // the number of characters required for smallest float64 before a range check occur
+ // would occur during parsing
+ minFloat64Digits = 27
+)
+
+// ParsePoints returns a slice of Points from a text representation of a point
+// with each point separated by newlines. If any points fail to parse, a non-nil error
+// will be returned in addition to the points that parsed successfully.
+func ParsePoints(buf []byte) ([]Point, error) {
+ return ParsePointsWithPrecision(buf, time.Now().UTC(), "n")
+}
+
+// ParsePointsString is identical to ParsePoints but accepts a string.
+func ParsePointsString(buf string) ([]Point, error) {
+ return ParsePoints([]byte(buf))
+}
+
+// ParseKey returns the measurement name and tags from a point.
+//
+// NOTE: to minimize heap allocations, the returned Tags will refer to subslices of buf.
+// This can have the unintended effect preventing buf from being garbage collected.
+func ParseKey(buf []byte) (string, Tags) {
+ meas, tags := ParseKeyBytes(buf)
+ return string(meas), tags
+}
+
+func ParseKeyBytes(buf []byte) ([]byte, Tags) {
+ // Ignore the error because scanMeasurement returns "missing fields" which we ignore
+ // when just parsing a key
+ state, i, _ := scanMeasurement(buf, 0)
+
+ var tags Tags
+ if state == tagKeyState {
+ tags = parseTags(buf)
+ // scanMeasurement returns the location of the comma if there are tags, strip that off
+ return buf[:i-1], tags
+ }
+ return buf[:i], tags
+}
+
+func ParseTags(buf []byte) Tags {
+ return parseTags(buf)
+}
+
+func ParseName(buf []byte) ([]byte, error) {
+ // Ignore the error because scanMeasurement returns "missing fields" which we ignore
+ // when just parsing a key
+ state, i, _ := scanMeasurement(buf, 0)
+ if state == tagKeyState {
+ return buf[:i-1], nil
+ }
+ return buf[:i], nil
+}
+
+// ParsePointsWithPrecision is similar to ParsePoints, but allows the
+// caller to provide a precision for time.
+//
+// NOTE: to minimize heap allocations, the returned Points will refer to subslices of buf.
+// This can have the unintended effect preventing buf from being garbage collected.
+func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) {
+ points := make([]Point, 0, bytes.Count(buf, []byte{'\n'})+1)
+ var (
+ pos int
+ block []byte
+ failed []string
+ )
+ for pos < len(buf) {
+ pos, block = scanLine(buf, pos)
+ pos++
+
+ if len(block) == 0 {
+ continue
+ }
+
+ // lines which start with '#' are comments
+ start := skipWhitespace(block, 0)
+
+ // If line is all whitespace, just skip it
+ if start >= len(block) {
+ continue
+ }
+
+ if block[start] == '#' {
+ continue
+ }
+
+ // strip the newline if one is present
+ if block[len(block)-1] == '\n' {
+ block = block[:len(block)-1]
+ }
+
+ pt, err := parsePoint(block[start:], defaultTime, precision)
+ if err != nil {
+ failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:]), err))
+ } else {
+ points = append(points, pt)
+ }
+
+ }
+ if len(failed) > 0 {
+ return points, fmt.Errorf("%s", strings.Join(failed, "\n"))
+ }
+ return points, nil
+
+}
+
+func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) {
+ // scan the first block which is measurement[,tag1=value1,tag2=value=2...]
+ pos, key, err := scanKey(buf, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ // measurement name is required
+ if len(key) == 0 {
+ return nil, fmt.Errorf("missing measurement")
+ }
+
+ if len(key) > MaxKeyLength {
+ return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
+ }
+
+ // scan the second block is which is field1=value1[,field2=value2,...]
+ pos, fields, err := scanFields(buf, pos)
+ if err != nil {
+ return nil, err
+ }
+
+ // at least one field is required
+ if len(fields) == 0 {
+ return nil, fmt.Errorf("missing fields")
+ }
+
+ var maxKeyErr error
+ walkFields(fields, func(k, v []byte) bool {
+ if sz := seriesKeySize(key, k); sz > MaxKeyLength {
+ maxKeyErr = fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength)
+ return false
+ }
+ return true
+ })
+
+ if maxKeyErr != nil {
+ return nil, maxKeyErr
+ }
+
+ // scan the last block which is an optional integer timestamp
+ pos, ts, err := scanTime(buf, pos)
+ if err != nil {
+ return nil, err
+ }
+
+ pt := &point{
+ key: key,
+ fields: fields,
+ ts: ts,
+ }
+
+ if len(ts) == 0 {
+ pt.time = defaultTime
+ pt.SetPrecision(precision)
+ } else {
+ ts, err := parseIntBytes(ts, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ pt.time, err = SafeCalcTime(ts, precision)
+ if err != nil {
+ return nil, err
+ }
+
+ // Determine if there are illegal non-whitespace characters after the
+ // timestamp block.
+ for pos < len(buf) {
+ if buf[pos] != ' ' {
+ return nil, ErrInvalidPoint
+ }
+ pos++
+ }
+ }
+ return pt, nil
+}
+
+// GetPrecisionMultiplier will return a multiplier for the precision specified.
+func GetPrecisionMultiplier(precision string) int64 {
+ d := time.Nanosecond
+ switch precision {
+ case "u":
+ d = time.Microsecond
+ case "ms":
+ d = time.Millisecond
+ case "s":
+ d = time.Second
+ case "m":
+ d = time.Minute
+ case "h":
+ d = time.Hour
+ }
+ return int64(d)
+}
+
+// scanKey scans buf starting at i for the measurement and tag portion of the point.
+// It returns the ending position and the byte slice of key within buf. If there
+// are tags, they will be sorted if they are not already.
+func scanKey(buf []byte, i int) (int, []byte, error) {
+ start := skipWhitespace(buf, i)
+
+ i = start
+
+ // Determines whether the tags are sort, assume they are
+ sorted := true
+
+ // indices holds the indexes within buf of the start of each tag. For example,
+ // a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20]
+ // which indicates that the first tag starts at buf[4], seconds at buf[11], and
+ // last at buf[20]
+ indices := make([]int, 100)
+
+ // tracks how many commas we've seen so we know how many values are indices.
+ // Since indices is an arbitrarily large slice,
+ // we need to know how many values in the buffer are in use.
+ commas := 0
+
+ // First scan the Point's measurement.
+ state, i, err := scanMeasurement(buf, i)
+ if err != nil {
+ return i, buf[start:i], err
+ }
+
+ // Optionally scan tags if needed.
+ if state == tagKeyState {
+ i, commas, indices, err = scanTags(buf, i, indices)
+ if err != nil {
+ return i, buf[start:i], err
+ }
+ }
+
+ // Now we know where the key region is within buf, and the location of tags, we
+ // need to determine if duplicate tags exist and if the tags are sorted. This iterates
+ // over the list comparing each tag in the sequence with each other.
+ for j := 0; j < commas-1; j++ {
+ // get the left and right tags
+ _, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=')
+ _, right := scanTo(buf[indices[j+1]:indices[j+2]-1], 0, '=')
+
+ // If left is greater than right, the tags are not sorted. We do not have to
+ // continue because the short path no longer works.
+ // If the tags are equal, then there are duplicate tags, and we should abort.
+ // If the tags are not sorted, this pass may not find duplicate tags and we
+ // need to do a more exhaustive search later.
+ if cmp := bytes.Compare(left, right); cmp > 0 {
+ sorted = false
+ break
+ } else if cmp == 0 {
+ return i, buf[start:i], fmt.Errorf("duplicate tags")
+ }
+ }
+
+ // If the tags are not sorted, then sort them. This sort is inline and
+ // uses the tag indices we created earlier. The actual buffer is not sorted, the
+ // indices are using the buffer for value comparison. After the indices are sorted,
+ // the buffer is reconstructed from the sorted indices.
+ if !sorted && commas > 0 {
+ // Get the measurement name for later
+ measurement := buf[start : indices[0]-1]
+
+ // Sort the indices
+ indices := indices[:commas]
+ insertionSort(0, commas, buf, indices)
+
+ // Create a new key using the measurement and sorted indices
+ b := make([]byte, len(buf[start:i]))
+ pos := copy(b, measurement)
+ for _, i := range indices {
+ b[pos] = ','
+ pos++
+ _, v := scanToSpaceOr(buf, i, ',')
+ pos += copy(b[pos:], v)
+ }
+
+ // Check again for duplicate tags now that the tags are sorted.
+ for j := 0; j < commas-1; j++ {
+ // get the left and right tags
+ _, left := scanTo(buf[indices[j]:], 0, '=')
+ _, right := scanTo(buf[indices[j+1]:], 0, '=')
+
+ // If the tags are equal, then there are duplicate tags, and we should abort.
+ // If the tags are not sorted, this pass may not find duplicate tags and we
+ // need to do a more exhaustive search later.
+ if bytes.Equal(left, right) {
+ return i, b, fmt.Errorf("duplicate tags")
+ }
+ }
+
+ return i, b, nil
+ }
+
+ return i, buf[start:i], nil
+}
+
+// The following constants allow us to specify which state to move to
+// next, when scanning sections of a Point.
+const (
+ tagKeyState = iota
+ tagValueState
+ fieldsState
+)
+
+// scanMeasurement examines the measurement part of a Point, returning
+// the next state to move to, and the current location in the buffer.
+func scanMeasurement(buf []byte, i int) (int, int, error) {
+ // Check first byte of measurement, anything except a comma is fine.
+ // It can't be a space, since whitespace is stripped prior to this
+ // function call.
+ if i >= len(buf) || buf[i] == ',' {
+ return -1, i, fmt.Errorf("missing measurement")
+ }
+
+ for {
+ i++
+ if i >= len(buf) {
+ // cpu
+ return -1, i, fmt.Errorf("missing fields")
+ }
+
+ if buf[i-1] == '\\' {
+ // Skip character (it's escaped).
+ continue
+ }
+
+ // Unescaped comma; move onto scanning the tags.
+ if buf[i] == ',' {
+ return tagKeyState, i + 1, nil
+ }
+
+ // Unescaped space; move onto scanning the fields.
+ if buf[i] == ' ' {
+ // cpu value=1.0
+ return fieldsState, i, nil
+ }
+ }
+}
+
+// scanTags examines all the tags in a Point, keeping track of and
+// returning the updated indices slice, number of commas and location
+// in buf where to start examining the Point fields.
+func scanTags(buf []byte, i int, indices []int) (int, int, []int, error) {
+ var (
+ err error
+ commas int
+ state = tagKeyState
+ )
+
+ for {
+ switch state {
+ case tagKeyState:
+ // Grow our indices slice if we have too many tags.
+ if commas >= len(indices) {
+ newIndics := make([]int, cap(indices)*2)
+ copy(newIndics, indices)
+ indices = newIndics
+ }
+ indices[commas] = i
+ commas++
+
+ i, err = scanTagsKey(buf, i)
+ state = tagValueState // tag value always follows a tag key
+ case tagValueState:
+ state, i, err = scanTagsValue(buf, i)
+ case fieldsState:
+ indices[commas] = i + 1
+ return i, commas, indices, nil
+ }
+
+ if err != nil {
+ return i, commas, indices, err
+ }
+ }
+}
+
+// scanTagsKey scans each character in a tag key.
+func scanTagsKey(buf []byte, i int) (int, error) {
+ // First character of the key.
+ if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {
+ // cpu,{'', ' ', ',', '='}
+ return i, fmt.Errorf("missing tag key")
+ }
+
+ // Examine each character in the tag key until we hit an unescaped
+ // equals (the tag value), or we hit an error (i.e., unescaped
+ // space or comma).
+ for {
+ i++
+
+ // Either we reached the end of the buffer or we hit an
+ // unescaped comma or space.
+ if i >= len(buf) ||
+ ((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') {
+ // cpu,tag{'', ' ', ','}
+ return i, fmt.Errorf("missing tag value")
+ }
+
+ if buf[i] == '=' && buf[i-1] != '\\' {
+ // cpu,tag=
+ return i + 1, nil
+ }
+ }
+}
+
+// scanTagsValue scans each character in a tag value.
+func scanTagsValue(buf []byte, i int) (int, int, error) {
+ // Tag value cannot be empty.
+ if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {
+ // cpu,tag={',', ' '}
+ return -1, i, fmt.Errorf("missing tag value")
+ }
+
+ // Examine each character in the tag value until we hit an unescaped
+ // comma (move onto next tag key), an unescaped space (move onto
+ // fields), or we error out.
+ for {
+ i++
+ if i >= len(buf) {
+ // cpu,tag=value
+ return -1, i, fmt.Errorf("missing fields")
+ }
+
+ // An unescaped equals sign is an invalid tag value.
+ if buf[i] == '=' && buf[i-1] != '\\' {
+ // cpu,tag={'=', 'fo=o'}
+ return -1, i, fmt.Errorf("invalid tag format")
+ }
+
+ if buf[i] == ',' && buf[i-1] != '\\' {
+ // cpu,tag=foo,
+ return tagKeyState, i + 1, nil
+ }
+
+ // cpu,tag=foo value=1.0
+ // cpu, tag=foo\= value=1.0
+ if buf[i] == ' ' && buf[i-1] != '\\' {
+ return fieldsState, i, nil
+ }
+ }
+}
+
+func insertionSort(l, r int, buf []byte, indices []int) {
+ for i := l + 1; i < r; i++ {
+ for j := i; j > l && less(buf, indices, j, j-1); j-- {
+ indices[j], indices[j-1] = indices[j-1], indices[j]
+ }
+ }
+}
+
+func less(buf []byte, indices []int, i, j int) bool {
+ // This grabs the tag names for i & j, it ignores the values
+ _, a := scanTo(buf, indices[i], '=')
+ _, b := scanTo(buf, indices[j], '=')
+ return bytes.Compare(a, b) < 0
+}
+
+// scanFields scans buf, starting at i for the fields section of a point. It returns
+// the ending position and the byte slice of the fields within buf.
+func scanFields(buf []byte, i int) (int, []byte, error) {
+ start := skipWhitespace(buf, i)
+ i = start
+ quoted := false
+
+ // tracks how many '=' we've seen
+ equals := 0
+
+ // tracks how many commas we've seen
+ commas := 0
+
+ for {
+ // reached the end of buf?
+ if i >= len(buf) {
+ break
+ }
+
+ // escaped characters?
+ if buf[i] == '\\' && i+1 < len(buf) {
+ i += 2
+ continue
+ }
+
+ // If the value is quoted, scan until we get to the end quote
+ // Only quote values in the field value since quotes are not significant
+ // in the field key
+ if buf[i] == '"' && equals > commas {
+ quoted = !quoted
+ i++
+ continue
+ }
+
+ // If we see an =, ensure that there is at least on char before and after it
+ if buf[i] == '=' && !quoted {
+ equals++
+
+ // check for "... =123" but allow "a\ =123"
+ if buf[i-1] == ' ' && buf[i-2] != '\\' {
+ return i, buf[start:i], fmt.Errorf("missing field key")
+ }
+
+ // check for "...a=123,=456" but allow "a=123,a\,=456"
+ if buf[i-1] == ',' && buf[i-2] != '\\' {
+ return i, buf[start:i], fmt.Errorf("missing field key")
+ }
+
+ // check for "... value="
+ if i+1 >= len(buf) {
+ return i, buf[start:i], fmt.Errorf("missing field value")
+ }
+
+ // check for "... value=,value2=..."
+ if buf[i+1] == ',' || buf[i+1] == ' ' {
+ return i, buf[start:i], fmt.Errorf("missing field value")
+ }
+
+ if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
+ var err error
+ i, err = scanNumber(buf, i+1)
+ if err != nil {
+ return i, buf[start:i], err
+ }
+ continue
+ }
+ // If next byte is not a double-quote, the value must be a boolean
+ if buf[i+1] != '"' {
+ var err error
+ i, _, err = scanBoolean(buf, i+1)
+ if err != nil {
+ return i, buf[start:i], err
+ }
+ continue
+ }
+ }
+
+ if buf[i] == ',' && !quoted {
+ commas++
+ }
+
+ // reached end of block?
+ if buf[i] == ' ' && !quoted {
+ break
+ }
+ i++
+ }
+
+ if quoted {
+ return i, buf[start:i], fmt.Errorf("unbalanced quotes")
+ }
+
+ // check that all field sections had key and values (e.g. prevent "a=1,b"
+ if equals == 0 || commas != equals-1 {
+ return i, buf[start:i], fmt.Errorf("invalid field format")
+ }
+
+ return i, buf[start:i], nil
+}
+
+// scanTime scans buf, starting at i for the time section of a point. It
+// returns the ending position and the byte slice of the timestamp within buf
+// and and error if the timestamp is not in the correct numeric format.
+func scanTime(buf []byte, i int) (int, []byte, error) {
+ start := skipWhitespace(buf, i)
+ i = start
+
+ for {
+ // reached the end of buf?
+ if i >= len(buf) {
+ break
+ }
+
+ // Reached end of block or trailing whitespace?
+ if buf[i] == '\n' || buf[i] == ' ' {
+ break
+ }
+
+ // Handle negative timestamps
+ if i == start && buf[i] == '-' {
+ i++
+ continue
+ }
+
+ // Timestamps should be integers, make sure they are so we don't need
+ // to actually parse the timestamp until needed.
+ if buf[i] < '0' || buf[i] > '9' {
+ return i, buf[start:i], fmt.Errorf("bad timestamp")
+ }
+ i++
+ }
+ return i, buf[start:i], nil
+}
+
+func isNumeric(b byte) bool {
+ return (b >= '0' && b <= '9') || b == '.'
+}
+
+// scanNumber returns the end position within buf, start at i after
+// scanning over buf for an integer, or float. It returns an
+// error if a invalid number is scanned.
+func scanNumber(buf []byte, i int) (int, error) {
+ start := i
+ var isInt, isUnsigned bool
+
+ // Is negative number?
+ if i < len(buf) && buf[i] == '-' {
+ i++
+ // There must be more characters now, as just '-' is illegal.
+ if i == len(buf) {
+ return i, ErrInvalidNumber
+ }
+ }
+
+ // how many decimal points we've see
+ decimal := false
+
+ // indicates the number is float in scientific notation
+ scientific := false
+
+ for {
+ if i >= len(buf) {
+ break
+ }
+
+ if buf[i] == ',' || buf[i] == ' ' {
+ break
+ }
+
+ if buf[i] == 'i' && i > start && !(isInt || isUnsigned) {
+ isInt = true
+ i++
+ continue
+ } else if buf[i] == 'u' && i > start && !(isInt || isUnsigned) {
+ isUnsigned = true
+ i++
+ continue
+ }
+
+ if buf[i] == '.' {
+ // Can't have more than 1 decimal (e.g. 1.1.1 should fail)
+ if decimal {
+ return i, ErrInvalidNumber
+ }
+ decimal = true
+ }
+
+ // `e` is valid for floats but not as the first char
+ if i > start && (buf[i] == 'e' || buf[i] == 'E') {
+ scientific = true
+ i++
+ continue
+ }
+
+ // + and - are only valid at this point if they follow an e (scientific notation)
+ if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
+ i++
+ continue
+ }
+
+ // NaN is an unsupported value
+ if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
+ return i, ErrInvalidNumber
+ }
+
+ if !isNumeric(buf[i]) {
+ return i, ErrInvalidNumber
+ }
+ i++
+ }
+
+ if (isInt || isUnsigned) && (decimal || scientific) {
+ return i, ErrInvalidNumber
+ }
+
+ numericDigits := i - start
+ if isInt {
+ numericDigits--
+ }
+ if decimal {
+ numericDigits--
+ }
+ if buf[start] == '-' {
+ numericDigits--
+ }
+
+ if numericDigits == 0 {
+ return i, ErrInvalidNumber
+ }
+
+ // It's more common that numbers will be within min/max range for their type but we need to prevent
+ // out or range numbers from being parsed successfully. This uses some simple heuristics to decide
+ // if we should parse the number to the actual type. It does not do it all the time because it incurs
+ // extra allocations and we end up converting the type again when writing points to disk.
+ if isInt {
+ // Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
+ if buf[i-1] != 'i' {
+ return i, ErrInvalidNumber
+ }
+ // Parse the int to check bounds the number of digits could be larger than the max range
+ // We subtract 1 from the index to remove the `i` from our tests
+ if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
+ if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil {
+ return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err)
+ }
+ }
+ } else if isUnsigned {
+ // Return an error if uint64 support has not been enabled.
+ if !enableUint64Support {
+ return i, ErrInvalidNumber
+ }
+ // Make sure the last char is a 'u' for unsigned
+ if buf[i-1] != 'u' {
+ return i, ErrInvalidNumber
+ }
+ // Make sure the first char is not a '-' for unsigned
+ if buf[start] == '-' {
+ return i, ErrInvalidNumber
+ }
+ // Parse the uint to check bounds the number of digits could be larger than the max range
+ // We subtract 1 from the index to remove the `u` from our tests
+ if len(buf[start:i-1]) >= maxUint64Digits {
+ if _, err := parseUintBytes(buf[start:i-1], 10, 64); err != nil {
+ return i, fmt.Errorf("unable to parse unsigned %s: %s", buf[start:i-1], err)
+ }
+ }
+ } else {
+ // Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
+ if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
+ if _, err := parseFloatBytes(buf[start:i], 10); err != nil {
+ return i, fmt.Errorf("invalid float")
+ }
+ }
+ }
+
+ return i, nil
+}
+
+// scanBoolean returns the end position within buf, start at i after
+// scanning over buf for boolean. Valid values for a boolean are
+// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean
+// is scanned.
+func scanBoolean(buf []byte, i int) (int, []byte, error) {
+ start := i
+
+ if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
+ return i, buf[start:i], fmt.Errorf("invalid boolean")
+ }
+
+ i++
+ for {
+ if i >= len(buf) {
+ break
+ }
+
+ if buf[i] == ',' || buf[i] == ' ' {
+ break
+ }
+ i++
+ }
+
+ // Single char bool (t, T, f, F) is ok
+ if i-start == 1 {
+ return i, buf[start:i], nil
+ }
+
+ // length must be 4 for true or TRUE
+ if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
+ return i, buf[start:i], fmt.Errorf("invalid boolean")
+ }
+
+ // length must be 5 for false or FALSE
+ if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
+ return i, buf[start:i], fmt.Errorf("invalid boolean")
+ }
+
+ // Otherwise
+ valid := false
+ switch buf[start] {
+ case 't':
+ valid = bytes.Equal(buf[start:i], []byte("true"))
+ case 'f':
+ valid = bytes.Equal(buf[start:i], []byte("false"))
+ case 'T':
+ valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
+ case 'F':
+ valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
+ }
+
+ if !valid {
+ return i, buf[start:i], fmt.Errorf("invalid boolean")
+ }
+
+ return i, buf[start:i], nil
+
+}
+
+// skipWhitespace returns the end position within buf, starting at i after
+// scanning over spaces in tags.
+func skipWhitespace(buf []byte, i int) int {
+ for i < len(buf) {
+ if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
+ break
+ }
+ i++
+ }
+ return i
+}
+
+// scanLine returns the end position in buf and the next line found within
+// buf.
+func scanLine(buf []byte, i int) (int, []byte) {
+ start := i
+ quoted := false
+ fields := false
+
+ // tracks how many '=' and commas we've seen
+ // this duplicates some of the functionality in scanFields
+ equals := 0
+ commas := 0
+ for {
+ // reached the end of buf?
+ if i >= len(buf) {
+ break
+ }
+
+ // skip past escaped characters
+ if buf[i] == '\\' && i+2 < len(buf) {
+ i += 2
+ continue
+ }
+
+ if buf[i] == ' ' {
+ fields = true
+ }
+
+ // If we see a double quote, makes sure it is not escaped
+ if fields {
+ if !quoted && buf[i] == '=' {
+ i++
+ equals++
+ continue
+ } else if !quoted && buf[i] == ',' {
+ i++
+ commas++
+ continue
+ } else if buf[i] == '"' && equals > commas {
+ i++
+ quoted = !quoted
+ continue
+ }
+ }
+
+ if buf[i] == '\n' && !quoted {
+ break
+ }
+
+ i++
+ }
+
+ return i, buf[start:i]
+}
+
+// scanTo returns the end position in buf and the next consecutive block
+// of bytes, starting from i and ending with stop byte, where stop byte
+// has not been escaped.
+//
+// If there are leading spaces, they are skipped.
+func scanTo(buf []byte, i int, stop byte) (int, []byte) {
+ start := i
+ for {
+ // reached the end of buf?
+ if i >= len(buf) {
+ break
+ }
+
+ // Reached unescaped stop value?
+ if buf[i] == stop && (i == 0 || buf[i-1] != '\\') {
+ break
+ }
+ i++
+ }
+
+ return i, buf[start:i]
+}
+
+// scanTo returns the end position in buf and the next consecutive block
+// of bytes, starting from i and ending with stop byte. If there are leading
+// spaces, they are skipped.
+func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) {
+ start := i
+ if buf[i] == stop || buf[i] == ' ' {
+ return i, buf[start:i]
+ }
+
+ for {
+ i++
+ if buf[i-1] == '\\' {
+ continue
+ }
+
+ // reached the end of buf?
+ if i >= len(buf) {
+ return i, buf[start:i]
+ }
+
+ // reached end of block?
+ if buf[i] == stop || buf[i] == ' ' {
+ return i, buf[start:i]
+ }
+ }
+}
+
+func scanTagValue(buf []byte, i int) (int, []byte) {
+ start := i
+ for {
+ if i >= len(buf) {
+ break
+ }
+
+ if buf[i] == ',' && buf[i-1] != '\\' {
+ break
+ }
+ i++
+ }
+ if i > len(buf) {
+ return i, nil
+ }
+ return i, buf[start:i]
+}
+
+func scanFieldValue(buf []byte, i int) (int, []byte) {
+ start := i
+ quoted := false
+ for i < len(buf) {
+ // Only escape char for a field value is a double-quote and backslash
+ if buf[i] == '\\' && i+1 < len(buf) && (buf[i+1] == '"' || buf[i+1] == '\\') {
+ i += 2
+ continue
+ }
+
+ // Quoted value? (e.g. string)
+ if buf[i] == '"' {
+ i++
+ quoted = !quoted
+ continue
+ }
+
+ if buf[i] == ',' && !quoted {
+ break
+ }
+ i++
+ }
+ return i, buf[start:i]
+}
+
+func EscapeMeasurement(in []byte) []byte {
+ for b, esc := range measurementEscapeCodes {
+ in = bytes.Replace(in, []byte{b}, esc, -1)
+ }
+ return in
+}
+
+func unescapeMeasurement(in []byte) []byte {
+ for b, esc := range measurementEscapeCodes {
+ in = bytes.Replace(in, esc, []byte{b}, -1)
+ }
+ return in
+}
+
+func escapeTag(in []byte) []byte {
+ for b, esc := range tagEscapeCodes {
+ if bytes.IndexByte(in, b) != -1 {
+ in = bytes.Replace(in, []byte{b}, esc, -1)
+ }
+ }
+ return in
+}
+
+func unescapeTag(in []byte) []byte {
+ if bytes.IndexByte(in, '\\') == -1 {
+ return in
+ }
+
+ for b, esc := range tagEscapeCodes {
+ if bytes.IndexByte(in, b) != -1 {
+ in = bytes.Replace(in, esc, []byte{b}, -1)
+ }
+ }
+ return in
+}
+
+// escapeStringFieldReplacer replaces double quotes and backslashes
+// with the same character preceded by a backslash.
+// As of Go 1.7 this benchmarked better in allocations and CPU time
+// compared to iterating through a string byte-by-byte and appending to a new byte slice,
+// calling strings.Replace twice, and better than (*Regex).ReplaceAllString.
+var escapeStringFieldReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`)
+
+// EscapeStringField returns a copy of in with any double quotes or
+// backslashes with escaped values.
+func EscapeStringField(in string) string {
+ return escapeStringFieldReplacer.Replace(in)
+}
+
+// unescapeStringField returns a copy of in with any escaped double-quotes
+// or backslashes unescaped.
+func unescapeStringField(in string) string {
+ if strings.IndexByte(in, '\\') == -1 {
+ return in
+ }
+
+ var out []byte
+ i := 0
+ for {
+ if i >= len(in) {
+ break
+ }
+ // unescape backslashes
+ if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' {
+ out = append(out, '\\')
+ i += 2
+ continue
+ }
+ // unescape double-quotes
+ if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' {
+ out = append(out, '"')
+ i += 2
+ continue
+ }
+ out = append(out, in[i])
+ i++
+
+ }
+ return string(out)
+}
+
+// NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If
+// an unsupported field value (NaN) or out of range time is passed, this function returns an error.
+func NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error) {
+ key, err := pointKey(name, tags, fields, t)
+ if err != nil {
+ return nil, err
+ }
+
+ return &point{
+ key: key,
+ time: t,
+ fields: fields.MarshalBinary(),
+ }, nil
+}
+
+// pointKey checks some basic requirements for valid points, and returns the
+// key, along with an possible error.
+func pointKey(measurement string, tags Tags, fields Fields, t time.Time) ([]byte, error) {
+ if len(fields) == 0 {
+ return nil, ErrPointMustHaveAField
+ }
+
+ if !t.IsZero() {
+ if err := CheckTime(t); err != nil {
+ return nil, err
+ }
+ }
+
+ for key, value := range fields {
+ switch value := value.(type) {
+ case float64:
+ // Ensure the caller validates and handles invalid field values
+ if math.IsNaN(value) {
+ return nil, fmt.Errorf("NaN is an unsupported value for field %s", key)
+ }
+ case float32:
+ // Ensure the caller validates and handles invalid field values
+ if math.IsNaN(float64(value)) {
+ return nil, fmt.Errorf("NaN is an unsupported value for field %s", key)
+ }
+ }
+ if len(key) == 0 {
+ return nil, fmt.Errorf("all fields must have non-empty names")
+ }
+ }
+
+ key := MakeKey([]byte(measurement), tags)
+ for field := range fields {
+ sz := seriesKeySize(key, []byte(field))
+ if sz > MaxKeyLength {
+ return nil, fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength)
+ }
+ }
+
+ return key, nil
+}
+
+func seriesKeySize(key, field []byte) int {
+ // 4 is the length of the tsm1.fieldKeySeparator constant. It's inlined here to avoid a circular
+ // dependency.
+ return len(key) + 4 + len(field)
+}
+
+// NewPointFromBytes returns a new Point from a marshalled Point.
+func NewPointFromBytes(b []byte) (Point, error) {
+ p := &point{}
+ if err := p.UnmarshalBinary(b); err != nil {
+ return nil, err
+ }
+
+ // This does some basic validation to ensure there are fields and they
+ // can be unmarshalled as well.
+ iter := p.FieldIterator()
+ var hasField bool
+ for iter.Next() {
+ if len(iter.FieldKey()) == 0 {
+ continue
+ }
+ hasField = true
+ switch iter.Type() {
+ case Float:
+ _, err := iter.FloatValue()
+ if err != nil {
+ return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+ }
+ case Integer:
+ _, err := iter.IntegerValue()
+ if err != nil {
+ return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+ }
+ case Unsigned:
+ _, err := iter.UnsignedValue()
+ if err != nil {
+ return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+ }
+ case String:
+ // Skip since this won't return an error
+ case Boolean:
+ _, err := iter.BooleanValue()
+ if err != nil {
+ return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+ }
+ }
+ }
+
+ if !hasField {
+ return nil, ErrPointMustHaveAField
+ }
+
+ return p, nil
+}
+
+// MustNewPoint returns a new point with the given measurement name, tags, fields and timestamp. If
+// an unsupported field value (NaN) is passed, this function panics.
+func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point {
+ pt, err := NewPoint(name, tags, fields, time)
+ if err != nil {
+ panic(err.Error())
+ }
+ return pt
+}
+
+// Key returns the key (measurement joined with tags) of the point.
+func (p *point) Key() []byte {
+ return p.key
+}
+
+func (p *point) name() []byte {
+ _, name := scanTo(p.key, 0, ',')
+ return name
+}
+
+func (p *point) Name() []byte {
+ return escape.Unescape(p.name())
+}
+
+// SetName updates the measurement name for the point.
+func (p *point) SetName(name string) {
+ p.cachedName = ""
+ p.key = MakeKey([]byte(name), p.Tags())
+}
+
+// Time return the timestamp for the point.
+func (p *point) Time() time.Time {
+ return p.time
+}
+
+// SetTime updates the timestamp for the point.
+func (p *point) SetTime(t time.Time) {
+ p.time = t
+}
+
+// Round will round the timestamp of the point to the given duration.
+func (p *point) Round(d time.Duration) {
+ p.time = p.time.Round(d)
+}
+
+// Tags returns the tag set for the point.
+func (p *point) Tags() Tags {
+ if p.cachedTags != nil {
+ return p.cachedTags
+ }
+ p.cachedTags = parseTags(p.key)
+ return p.cachedTags
+}
+
+func (p *point) HasTag(tag []byte) bool {
+ if len(p.key) == 0 {
+ return false
+ }
+
+ var exists bool
+ walkTags(p.key, func(key, value []byte) bool {
+ if bytes.Equal(tag, key) {
+ exists = true
+ return false
+ }
+ return true
+ })
+
+ return exists
+}
+
+func walkTags(buf []byte, fn func(key, value []byte) bool) {
+ if len(buf) == 0 {
+ return
+ }
+
+ pos, name := scanTo(buf, 0, ',')
+
+ // it's an empty key, so there are no tags
+ if len(name) == 0 {
+ return
+ }
+
+ hasEscape := bytes.IndexByte(buf, '\\') != -1
+ i := pos + 1
+ var key, value []byte
+ for {
+ if i >= len(buf) {
+ break
+ }
+ i, key = scanTo(buf, i, '=')
+ i, value = scanTagValue(buf, i+1)
+
+ if len(value) == 0 {
+ continue
+ }
+
+ if hasEscape {
+ if !fn(unescapeTag(key), unescapeTag(value)) {
+ return
+ }
+ } else {
+ if !fn(key, value) {
+ return
+ }
+ }
+
+ i++
+ }
+}
+
+// walkFields walks each field key and value via fn. If fn returns false, the iteration
+// is stopped. The values are the raw byte slices and not the converted types.
+func walkFields(buf []byte, fn func(key, value []byte) bool) {
+ var i int
+ var key, val []byte
+ for len(buf) > 0 {
+ i, key = scanTo(buf, 0, '=')
+ buf = buf[i+1:]
+ i, val = scanFieldValue(buf, 0)
+ buf = buf[i:]
+ if !fn(key, val) {
+ break
+ }
+
+ // slice off comma
+ if len(buf) > 0 {
+ buf = buf[1:]
+ }
+ }
+}
+
+func parseTags(buf []byte) Tags {
+ if len(buf) == 0 {
+ return nil
+ }
+
+ tags := make(Tags, bytes.Count(buf, []byte(",")))
+ p := 0
+ walkTags(buf, func(key, value []byte) bool {
+ tags[p].Key = key
+ tags[p].Value = value
+ p++
+ return true
+ })
+ return tags
+}
+
+// MakeKey creates a key for a set of tags.
+func MakeKey(name []byte, tags Tags) []byte {
+ // unescape the name and then re-escape it to avoid double escaping.
+ // The key should always be stored in escaped form.
+ return append(EscapeMeasurement(unescapeMeasurement(name)), tags.HashKey()...)
+}
+
+// SetTags replaces the tags for the point.
+func (p *point) SetTags(tags Tags) {
+ p.key = MakeKey(p.Name(), tags)
+ p.cachedTags = tags
+}
+
+// AddTag adds or replaces a tag value for a point.
+func (p *point) AddTag(key, value string) {
+ tags := p.Tags()
+ tags = append(tags, Tag{Key: []byte(key), Value: []byte(value)})
+ sort.Sort(tags)
+ p.cachedTags = tags
+ p.key = MakeKey(p.Name(), tags)
+}
+
+// Fields returns the fields for the point.
+func (p *point) Fields() (Fields, error) {
+ if p.cachedFields != nil {
+ return p.cachedFields, nil
+ }
+ cf, err := p.unmarshalBinary()
+ if err != nil {
+ return nil, err
+ }
+ p.cachedFields = cf
+ return p.cachedFields, nil
+}
+
+// SetPrecision will round a time to the specified precision.
+func (p *point) SetPrecision(precision string) {
+ switch precision {
+ case "n":
+ case "u":
+ p.SetTime(p.Time().Truncate(time.Microsecond))
+ case "ms":
+ p.SetTime(p.Time().Truncate(time.Millisecond))
+ case "s":
+ p.SetTime(p.Time().Truncate(time.Second))
+ case "m":
+ p.SetTime(p.Time().Truncate(time.Minute))
+ case "h":
+ p.SetTime(p.Time().Truncate(time.Hour))
+ }
+}
+
+// String returns the string representation of the point.
+func (p *point) String() string {
+ if p.Time().IsZero() {
+ return string(p.Key()) + " " + string(p.fields)
+ }
+ return string(p.Key()) + " " + string(p.fields) + " " + strconv.FormatInt(p.UnixNano(), 10)
+}
+
+// AppendString appends the string representation of the point to buf.
+func (p *point) AppendString(buf []byte) []byte {
+ buf = append(buf, p.key...)
+ buf = append(buf, ' ')
+ buf = append(buf, p.fields...)
+
+ if !p.time.IsZero() {
+ buf = append(buf, ' ')
+ buf = strconv.AppendInt(buf, p.UnixNano(), 10)
+ }
+
+ return buf
+}
+
+// StringSize returns the length of the string that would be returned by String().
+func (p *point) StringSize() int {
+ size := len(p.key) + len(p.fields) + 1
+
+ if !p.time.IsZero() {
+ digits := 1 // even "0" has one digit
+ t := p.UnixNano()
+ if t < 0 {
+ // account for negative sign, then negate
+ digits++
+ t = -t
+ }
+ for t > 9 { // already accounted for one digit
+ digits++
+ t /= 10
+ }
+ size += digits + 1 // digits and a space
+ }
+
+ return size
+}
+
+// MarshalBinary returns a binary representation of the point.
+func (p *point) MarshalBinary() ([]byte, error) {
+ if len(p.fields) == 0 {
+ return nil, ErrPointMustHaveAField
+ }
+
+ tb, err := p.time.MarshalBinary()
+ if err != nil {
+ return nil, err
+ }
+
+ b := make([]byte, 8+len(p.key)+len(p.fields)+len(tb))
+ i := 0
+
+ binary.BigEndian.PutUint32(b[i:], uint32(len(p.key)))
+ i += 4
+
+ i += copy(b[i:], p.key)
+
+ binary.BigEndian.PutUint32(b[i:i+4], uint32(len(p.fields)))
+ i += 4
+
+ i += copy(b[i:], p.fields)
+
+ copy(b[i:], tb)
+ return b, nil
+}
+
+// UnmarshalBinary decodes a binary representation of the point into a point struct.
+func (p *point) UnmarshalBinary(b []byte) error {
+ var n int
+
+ // Read key length.
+ if len(b) < 4 {
+ return io.ErrShortBuffer
+ }
+ n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:]
+
+ // Read key.
+ if len(b) < n {
+ return io.ErrShortBuffer
+ }
+ p.key, b = b[:n], b[n:]
+
+ // Read fields length.
+ if len(b) < 4 {
+ return io.ErrShortBuffer
+ }
+ n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:]
+
+ // Read fields.
+ if len(b) < n {
+ return io.ErrShortBuffer
+ }
+ p.fields, b = b[:n], b[n:]
+
+ // Read timestamp.
+ if err := p.time.UnmarshalBinary(b); err != nil {
+ return err
+ }
+ return nil
+}
+
+// PrecisionString returns a string representation of the point. If there
+// is a timestamp associated with the point then it will be specified in the
+// given unit.
+func (p *point) PrecisionString(precision string) string {
+ if p.Time().IsZero() {
+ return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
+ }
+ return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
+ p.UnixNano()/GetPrecisionMultiplier(precision))
+}
+
+// RoundedString returns a string representation of the point. If there
+// is a timestamp associated with the point, then it will be rounded to the
+// given duration.
+func (p *point) RoundedString(d time.Duration) string {
+ if p.Time().IsZero() {
+ return fmt.Sprintf("%s %s", p.Key(), string(p.fields))
+ }
+ return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields),
+ p.time.Round(d).UnixNano())
+}
+
+func (p *point) unmarshalBinary() (Fields, error) {
+ iter := p.FieldIterator()
+ fields := make(Fields, 8)
+ for iter.Next() {
+ if len(iter.FieldKey()) == 0 {
+ continue
+ }
+ switch iter.Type() {
+ case Float:
+ v, err := iter.FloatValue()
+ if err != nil {
+ return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+ }
+ fields[string(iter.FieldKey())] = v
+ case Integer:
+ v, err := iter.IntegerValue()
+ if err != nil {
+ return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+ }
+ fields[string(iter.FieldKey())] = v
+ case Unsigned:
+ v, err := iter.UnsignedValue()
+ if err != nil {
+ return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+ }
+ fields[string(iter.FieldKey())] = v
+ case String:
+ fields[string(iter.FieldKey())] = iter.StringValue()
+ case Boolean:
+ v, err := iter.BooleanValue()
+ if err != nil {
+ return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
+ }
+ fields[string(iter.FieldKey())] = v
+ }
+ }
+ return fields, nil
+}
+
+// HashID returns a non-cryptographic checksum of the point's key.
+func (p *point) HashID() uint64 {
+ h := NewInlineFNV64a()
+ h.Write(p.key)
+ sum := h.Sum64()
+ return sum
+}
+
+// UnixNano returns the timestamp of the point as nanoseconds since Unix epoch.
+func (p *point) UnixNano() int64 {
+ return p.Time().UnixNano()
+}
+
+// Split will attempt to return multiple points with the same timestamp whose
+// string representations are no longer than size. Points with a single field or
+// a point without a timestamp may exceed the requested size.
+func (p *point) Split(size int) []Point {
+ if p.time.IsZero() || p.StringSize() <= size {
+ return []Point{p}
+ }
+
+ // key string, timestamp string, spaces
+ size -= len(p.key) + len(strconv.FormatInt(p.time.UnixNano(), 10)) + 2
+
+ var points []Point
+ var start, cur int
+
+ for cur < len(p.fields) {
+ end, _ := scanTo(p.fields, cur, '=')
+ end, _ = scanFieldValue(p.fields, end+1)
+
+ if cur > start && end-start > size {
+ points = append(points, &point{
+ key: p.key,
+ time: p.time,
+ fields: p.fields[start : cur-1],
+ })
+ start = cur
+ }
+
+ cur = end + 1
+ }
+
+ points = append(points, &point{
+ key: p.key,
+ time: p.time,
+ fields: p.fields[start:],
+ })
+
+ return points
+}
+
+// Tag represents a single key/value tag pair.
+type Tag struct {
+ Key []byte
+ Value []byte
+}
+
+// NewTag returns a new Tag.
+func NewTag(key, value []byte) Tag {
+ return Tag{
+ Key: key,
+ Value: value,
+ }
+}
+
+// Size returns the size of the key and value.
+func (t Tag) Size() int { return len(t.Key) + len(t.Value) }
+
+// Clone returns a shallow copy of Tag.
+//
+// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed.
+// Use Clone to create a Tag with new byte slices that do not refer to the argument to ParsePointsWithPrecision.
+func (t Tag) Clone() Tag {
+ other := Tag{
+ Key: make([]byte, len(t.Key)),
+ Value: make([]byte, len(t.Value)),
+ }
+
+ copy(other.Key, t.Key)
+ copy(other.Value, t.Value)
+
+ return other
+}
+
+// String returns the string reprsentation of the tag.
+func (t *Tag) String() string {
+ var buf bytes.Buffer
+ buf.WriteByte('{')
+ buf.WriteString(string(t.Key))
+ buf.WriteByte(' ')
+ buf.WriteString(string(t.Value))
+ buf.WriteByte('}')
+ return buf.String()
+}
+
+// Tags represents a sorted list of tags.
+type Tags []Tag
+
+// NewTags returns a new Tags from a map.
+func NewTags(m map[string]string) Tags {
+ if len(m) == 0 {
+ return nil
+ }
+ a := make(Tags, 0, len(m))
+ for k, v := range m {
+ a = append(a, NewTag([]byte(k), []byte(v)))
+ }
+ sort.Sort(a)
+ return a
+}
+
+// Keys returns the list of keys for a tag set.
+func (a Tags) Keys() []string {
+ if len(a) == 0 {
+ return nil
+ }
+ keys := make([]string, len(a))
+ for i, tag := range a {
+ keys[i] = string(tag.Key)
+ }
+ return keys
+}
+
+// Values returns the list of values for a tag set.
+func (a Tags) Values() []string {
+ if len(a) == 0 {
+ return nil
+ }
+ values := make([]string, len(a))
+ for i, tag := range a {
+ values[i] = string(tag.Value)
+ }
+ return values
+}
+
+// String returns the string representation of the tags.
+func (a Tags) String() string {
+ var buf bytes.Buffer
+ buf.WriteByte('[')
+ for i := range a {
+ buf.WriteString(a[i].String())
+ if i < len(a)-1 {
+ buf.WriteByte(' ')
+ }
+ }
+ buf.WriteByte(']')
+ return buf.String()
+}
+
+// Size returns the number of bytes needed to store all tags. Note, this is
+// the number of bytes needed to store all keys and values and does not account
+// for data structures or delimiters for example.
+func (a Tags) Size() int {
+ var total int
+ for _, t := range a {
+ total += t.Size()
+ }
+ return total
+}
+
+// Clone returns a copy of the slice where the elements are a result of calling `Clone` on the original elements
+//
+// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed.
+// Use Clone to create Tags with new byte slices that do not refer to the argument to ParsePointsWithPrecision.
+func (a Tags) Clone() Tags {
+ if len(a) == 0 {
+ return nil
+ }
+
+ others := make(Tags, len(a))
+ for i := range a {
+ others[i] = a[i].Clone()
+ }
+
+ return others
+}
+
+func (a Tags) Len() int { return len(a) }
+func (a Tags) Less(i, j int) bool { return bytes.Compare(a[i].Key, a[j].Key) == -1 }
+func (a Tags) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// Equal returns true if a equals other.
+func (a Tags) Equal(other Tags) bool {
+ if len(a) != len(other) {
+ return false
+ }
+ for i := range a {
+ if !bytes.Equal(a[i].Key, other[i].Key) || !bytes.Equal(a[i].Value, other[i].Value) {
+ return false
+ }
+ }
+ return true
+}
+
+// CompareTags returns -1 if a < b, 1 if a > b, and 0 if a == b.
+func CompareTags(a, b Tags) int {
+ // Compare each key & value until a mismatch.
+ for i := 0; i < len(a) && i < len(b); i++ {
+ if cmp := bytes.Compare(a[i].Key, b[i].Key); cmp != 0 {
+ return cmp
+ }
+ if cmp := bytes.Compare(a[i].Value, b[i].Value); cmp != 0 {
+ return cmp
+ }
+ }
+
+ // If all tags are equal up to this point then return shorter tagset.
+ if len(a) < len(b) {
+ return -1
+ } else if len(a) > len(b) {
+ return 1
+ }
+
+ // All tags are equal.
+ return 0
+}
+
+// Get returns the value for a key.
+func (a Tags) Get(key []byte) []byte {
+ // OPTIMIZE: Use sort.Search if tagset is large.
+
+ for _, t := range a {
+ if bytes.Equal(t.Key, key) {
+ return t.Value
+ }
+ }
+ return nil
+}
+
+// GetString returns the string value for a string key.
+func (a Tags) GetString(key string) string {
+ return string(a.Get([]byte(key)))
+}
+
+// Set sets the value for a key.
+func (a *Tags) Set(key, value []byte) {
+ for i, t := range *a {
+ if bytes.Equal(t.Key, key) {
+ (*a)[i].Value = value
+ return
+ }
+ }
+ *a = append(*a, Tag{Key: key, Value: value})
+ sort.Sort(*a)
+}
+
+// SetString sets the string value for a string key.
+func (a *Tags) SetString(key, value string) {
+ a.Set([]byte(key), []byte(value))
+}
+
+// Delete removes a tag by key.
+func (a *Tags) Delete(key []byte) {
+ for i, t := range *a {
+ if bytes.Equal(t.Key, key) {
+ copy((*a)[i:], (*a)[i+1:])
+ (*a)[len(*a)-1] = Tag{}
+ *a = (*a)[:len(*a)-1]
+ return
+ }
+ }
+}
+
+// Map returns a map representation of the tags.
+func (a Tags) Map() map[string]string {
+ m := make(map[string]string, len(a))
+ for _, t := range a {
+ m[string(t.Key)] = string(t.Value)
+ }
+ return m
+}
+
+// Merge merges the tags combining the two. If both define a tag with the
+// same key, the merged value overwrites the old value.
+// A new map is returned.
+func (a Tags) Merge(other map[string]string) Tags {
+ merged := make(map[string]string, len(a)+len(other))
+ for _, t := range a {
+ merged[string(t.Key)] = string(t.Value)
+ }
+ for k, v := range other {
+ merged[k] = v
+ }
+ return NewTags(merged)
+}
+
+// HashKey hashes all of a tag's keys.
+func (a Tags) HashKey() []byte {
+ // Empty maps marshal to empty bytes.
+ if len(a) == 0 {
+ return nil
+ }
+
+ // Type invariant: Tags are sorted
+
+ escaped := make(Tags, 0, len(a))
+ sz := 0
+ for _, t := range a {
+ ek := escapeTag(t.Key)
+ ev := escapeTag(t.Value)
+
+ if len(ev) > 0 {
+ escaped = append(escaped, Tag{Key: ek, Value: ev})
+ sz += len(ek) + len(ev)
+ }
+ }
+
+ sz += len(escaped) + (len(escaped) * 2) // separators
+
+ // Generate marshaled bytes.
+ b := make([]byte, sz)
+ buf := b
+ idx := 0
+ for _, k := range escaped {
+ buf[idx] = ','
+ idx++
+ copy(buf[idx:idx+len(k.Key)], k.Key)
+ idx += len(k.Key)
+ buf[idx] = '='
+ idx++
+ copy(buf[idx:idx+len(k.Value)], k.Value)
+ idx += len(k.Value)
+ }
+ return b[:idx]
+}
+
+// CopyTags returns a shallow copy of tags.
+func CopyTags(a Tags) Tags {
+ other := make(Tags, len(a))
+ copy(other, a)
+ return other
+}
+
+// DeepCopyTags returns a deep copy of tags.
+func DeepCopyTags(a Tags) Tags {
+ // Calculate size of keys/values in bytes.
+ var n int
+ for _, t := range a {
+ n += len(t.Key) + len(t.Value)
+ }
+
+ // Build single allocation for all key/values.
+ buf := make([]byte, n)
+
+ // Copy tags to new set.
+ other := make(Tags, len(a))
+ for i, t := range a {
+ copy(buf, t.Key)
+ other[i].Key, buf = buf[:len(t.Key)], buf[len(t.Key):]
+
+ copy(buf, t.Value)
+ other[i].Value, buf = buf[:len(t.Value)], buf[len(t.Value):]
+ }
+
+ return other
+}
+
+// Fields represents a mapping between a Point's field names and their
+// values.
+type Fields map[string]interface{}
+
+// FieldIterator retuns a FieldIterator that can be used to traverse the
+// fields of a point without constructing the in-memory map.
+func (p *point) FieldIterator() FieldIterator {
+ p.Reset()
+ return p
+}
+
+type fieldIterator struct {
+ start, end int
+ key, keybuf []byte
+ valueBuf []byte
+ fieldType FieldType
+}
+
+// Next indicates whether there any fields remaining.
+func (p *point) Next() bool {
+ p.it.start = p.it.end
+ if p.it.start >= len(p.fields) {
+ return false
+ }
+
+ p.it.end, p.it.key = scanTo(p.fields, p.it.start, '=')
+ if escape.IsEscaped(p.it.key) {
+ p.it.keybuf = escape.AppendUnescaped(p.it.keybuf[:0], p.it.key)
+ p.it.key = p.it.keybuf
+ }
+
+ p.it.end, p.it.valueBuf = scanFieldValue(p.fields, p.it.end+1)
+ p.it.end++
+
+ if len(p.it.valueBuf) == 0 {
+ p.it.fieldType = Empty
+ return true
+ }
+
+ c := p.it.valueBuf[0]
+
+ if c == '"' {
+ p.it.fieldType = String
+ return true
+ }
+
+ if strings.IndexByte(`0123456789-.nNiIu`, c) >= 0 {
+ if p.it.valueBuf[len(p.it.valueBuf)-1] == 'i' {
+ p.it.fieldType = Integer
+ p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1]
+ } else if p.it.valueBuf[len(p.it.valueBuf)-1] == 'u' {
+ p.it.fieldType = Unsigned
+ p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1]
+ } else {
+ p.it.fieldType = Float
+ }
+ return true
+ }
+
+ // to keep the same behavior that currently exists, default to boolean
+ p.it.fieldType = Boolean
+ return true
+}
+
+// FieldKey returns the key of the current field.
+func (p *point) FieldKey() []byte {
+ return p.it.key
+}
+
+// Type returns the FieldType of the current field.
+func (p *point) Type() FieldType {
+ return p.it.fieldType
+}
+
+// StringValue returns the string value of the current field.
+func (p *point) StringValue() string {
+ return unescapeStringField(string(p.it.valueBuf[1 : len(p.it.valueBuf)-1]))
+}
+
+// IntegerValue returns the integer value of the current field.
+func (p *point) IntegerValue() (int64, error) {
+ n, err := parseIntBytes(p.it.valueBuf, 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("unable to parse integer value %q: %v", p.it.valueBuf, err)
+ }
+ return n, nil
+}
+
+// UnsignedValue returns the unsigned value of the current field.
+func (p *point) UnsignedValue() (uint64, error) {
+ n, err := parseUintBytes(p.it.valueBuf, 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("unable to parse unsigned value %q: %v", p.it.valueBuf, err)
+ }
+ return n, nil
+}
+
+// BooleanValue returns the boolean value of the current field.
+func (p *point) BooleanValue() (bool, error) {
+ b, err := parseBoolBytes(p.it.valueBuf)
+ if err != nil {
+ return false, fmt.Errorf("unable to parse bool value %q: %v", p.it.valueBuf, err)
+ }
+ return b, nil
+}
+
+// FloatValue returns the float value of the current field.
+func (p *point) FloatValue() (float64, error) {
+ f, err := parseFloatBytes(p.it.valueBuf, 64)
+ if err != nil {
+ return 0, fmt.Errorf("unable to parse floating point value %q: %v", p.it.valueBuf, err)
+ }
+ return f, nil
+}
+
+// Reset resets the iterator to its initial state.
+func (p *point) Reset() {
+ p.it.fieldType = Empty
+ p.it.key = nil
+ p.it.valueBuf = nil
+ p.it.start = 0
+ p.it.end = 0
+}
+
+// MarshalBinary encodes all the fields to their proper type and returns the binary
+// represenation
+// NOTE: uint64 is specifically not supported due to potential overflow when we decode
+// again later to an int64
+// NOTE2: uint is accepted, and may be 64 bits, and is for some reason accepted...
+func (p Fields) MarshalBinary() []byte {
+ var b []byte
+ keys := make([]string, 0, len(p))
+
+ for k := range p {
+ keys = append(keys, k)
+ }
+
+ // Not really necessary, can probably be removed.
+ sort.Strings(keys)
+
+ for i, k := range keys {
+ if i > 0 {
+ b = append(b, ',')
+ }
+ b = appendField(b, k, p[k])
+ }
+
+ return b
+}
+
+func appendField(b []byte, k string, v interface{}) []byte {
+ b = append(b, []byte(escape.String(k))...)
+ b = append(b, '=')
+
+ // check popular types first
+ switch v := v.(type) {
+ case float64:
+ b = strconv.AppendFloat(b, v, 'f', -1, 64)
+ case int64:
+ b = strconv.AppendInt(b, v, 10)
+ b = append(b, 'i')
+ case string:
+ b = append(b, '"')
+ b = append(b, []byte(EscapeStringField(v))...)
+ b = append(b, '"')
+ case bool:
+ b = strconv.AppendBool(b, v)
+ case int32:
+ b = strconv.AppendInt(b, int64(v), 10)
+ b = append(b, 'i')
+ case int16:
+ b = strconv.AppendInt(b, int64(v), 10)
+ b = append(b, 'i')
+ case int8:
+ b = strconv.AppendInt(b, int64(v), 10)
+ b = append(b, 'i')
+ case int:
+ b = strconv.AppendInt(b, int64(v), 10)
+ b = append(b, 'i')
+ case uint64:
+ b = strconv.AppendUint(b, v, 10)
+ b = append(b, 'u')
+ case uint32:
+ b = strconv.AppendInt(b, int64(v), 10)
+ b = append(b, 'i')
+ case uint16:
+ b = strconv.AppendInt(b, int64(v), 10)
+ b = append(b, 'i')
+ case uint8:
+ b = strconv.AppendInt(b, int64(v), 10)
+ b = append(b, 'i')
+ case uint:
+ // TODO: 'uint' should be converted to writing as an unsigned integer,
+ // but we cannot since that would break backwards compatibility.
+ b = strconv.AppendInt(b, int64(v), 10)
+ b = append(b, 'i')
+ case float32:
+ b = strconv.AppendFloat(b, float64(v), 'f', -1, 32)
+ case []byte:
+ b = append(b, v...)
+ case nil:
+ // skip
+ default:
+ // Can't determine the type, so convert to string
+ b = append(b, '"')
+ b = append(b, []byte(EscapeStringField(fmt.Sprintf("%v", v)))...)
+ b = append(b, '"')
+
+ }
+
+ return b
+}
+
+type byteSlices [][]byte
+
+func (a byteSlices) Len() int { return len(a) }
+func (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 }
+func (a byteSlices) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
diff --git a/vendor/github.com/influxdata/influxdb/models/rows.go b/vendor/github.com/influxdata/influxdb/models/rows.go
new file mode 100644
index 000000000..c087a4882
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/rows.go
@@ -0,0 +1,62 @@
+package models
+
+import (
+ "sort"
+)
+
+// Row represents a single row returned from the execution of a statement.
+type Row struct {
+ Name string `json:"name,omitempty"`
+ Tags map[string]string `json:"tags,omitempty"`
+ Columns []string `json:"columns,omitempty"`
+ Values [][]interface{} `json:"values,omitempty"`
+ Partial bool `json:"partial,omitempty"`
+}
+
+// SameSeries returns true if r contains values for the same series as o.
+func (r *Row) SameSeries(o *Row) bool {
+ return r.tagsHash() == o.tagsHash() && r.Name == o.Name
+}
+
+// tagsHash returns a hash of tag key/value pairs.
+func (r *Row) tagsHash() uint64 {
+ h := NewInlineFNV64a()
+ keys := r.tagsKeys()
+ for _, k := range keys {
+ h.Write([]byte(k))
+ h.Write([]byte(r.Tags[k]))
+ }
+ return h.Sum64()
+}
+
+// tagKeys returns a sorted list of tag keys.
+func (r *Row) tagsKeys() []string {
+ a := make([]string, 0, len(r.Tags))
+ for k := range r.Tags {
+ a = append(a, k)
+ }
+ sort.Strings(a)
+ return a
+}
+
+// Rows represents a collection of rows. Rows implements sort.Interface.
+type Rows []*Row
+
+// Len implements sort.Interface.
+func (p Rows) Len() int { return len(p) }
+
+// Less implements sort.Interface.
+func (p Rows) Less(i, j int) bool {
+ // Sort by name first.
+ if p[i].Name != p[j].Name {
+ return p[i].Name < p[j].Name
+ }
+
+ // Sort by tag set hash. Tags don't have a meaningful sort order so we
+ // just compute a hash and sort by that instead. This allows the tests
+ // to receive rows in a predictable order every time.
+ return p[i].tagsHash() < p[j].tagsHash()
+}
+
+// Swap implements sort.Interface.
+func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/influxdata/influxdb/models/statistic.go b/vendor/github.com/influxdata/influxdb/models/statistic.go
new file mode 100644
index 000000000..553e9d09f
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/statistic.go
@@ -0,0 +1,42 @@
+package models
+
+// Statistic is the representation of a statistic used by the monitoring service.
+type Statistic struct {
+ Name string `json:"name"`
+ Tags map[string]string `json:"tags"`
+ Values map[string]interface{} `json:"values"`
+}
+
+// NewStatistic returns an initialized Statistic.
+func NewStatistic(name string) Statistic {
+ return Statistic{
+ Name: name,
+ Tags: make(map[string]string),
+ Values: make(map[string]interface{}),
+ }
+}
+
+// StatisticTags is a map that can be merged with others without causing
+// mutations to either map.
+type StatisticTags map[string]string
+
+// Merge creates a new map containing the merged contents of tags and t.
+// If both tags and the receiver map contain the same key, the value in tags
+// is used in the resulting map.
+//
+// Merge always returns a usable map.
+func (t StatisticTags) Merge(tags map[string]string) map[string]string {
+ // Add everything in tags to the result.
+ out := make(map[string]string, len(tags))
+ for k, v := range tags {
+ out[k] = v
+ }
+
+ // Only add values from t that don't appear in tags.
+ for k, v := range t {
+ if _, ok := tags[k]; !ok {
+ out[k] = v
+ }
+ }
+ return out
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/time.go b/vendor/github.com/influxdata/influxdb/models/time.go
new file mode 100644
index 000000000..e98f2cb33
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/time.go
@@ -0,0 +1,74 @@
+package models
+
+// Helper time methods since parsing time can easily overflow and we only support a
+// specific time range.
+
+import (
+ "fmt"
+ "math"
+ "time"
+)
+
+const (
+ // MinNanoTime is the minumum time that can be represented.
+ //
+ // 1677-09-21 00:12:43.145224194 +0000 UTC
+ //
+ // The two lowest minimum integers are used as sentinel values. The
+ // minimum value needs to be used as a value lower than any other value for
+ // comparisons and another separate value is needed to act as a sentinel
+ // default value that is unusable by the user, but usable internally.
+ // Because these two values need to be used for a special purpose, we do
+ // not allow users to write points at these two times.
+ MinNanoTime = int64(math.MinInt64) + 2
+
+ // MaxNanoTime is the maximum time that can be represented.
+ //
+ // 2262-04-11 23:47:16.854775806 +0000 UTC
+ //
+ // The highest time represented by a nanosecond needs to be used for an
+ // exclusive range in the shard group, so the maximum time needs to be one
+ // less than the possible maximum number of nanoseconds representable by an
+ // int64 so that we don't lose a point at that one time.
+ MaxNanoTime = int64(math.MaxInt64) - 1
+)
+
+var (
+ minNanoTime = time.Unix(0, MinNanoTime).UTC()
+ maxNanoTime = time.Unix(0, MaxNanoTime).UTC()
+
+ // ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch.
+ ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime)
+)
+
+// SafeCalcTime safely calculates the time given. Will return error if the time is outside the
+// supported range.
+func SafeCalcTime(timestamp int64, precision string) (time.Time, error) {
+ mult := GetPrecisionMultiplier(precision)
+ if t, ok := safeSignedMult(timestamp, mult); ok {
+ tme := time.Unix(0, t).UTC()
+ return tme, CheckTime(tme)
+ }
+
+ return time.Time{}, ErrTimeOutOfRange
+}
+
+// CheckTime checks that a time is within the safe range.
+func CheckTime(t time.Time) error {
+ if t.Before(minNanoTime) || t.After(maxNanoTime) {
+ return ErrTimeOutOfRange
+ }
+ return nil
+}
+
+// Perform the multiplication and check to make sure it didn't overflow.
+func safeSignedMult(a, b int64) (int64, bool) {
+ if a == 0 || b == 0 || a == 1 || b == 1 {
+ return a * b, true
+ }
+ if a == MinNanoTime || b == MaxNanoTime {
+ return 0, false
+ }
+ c := a * b
+ return c, c/b == a
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/uint_support.go b/vendor/github.com/influxdata/influxdb/models/uint_support.go
new file mode 100644
index 000000000..18d1ca06e
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/models/uint_support.go
@@ -0,0 +1,7 @@
+// +build uint uint64
+
+package models
+
+func init() {
+ EnableUintSupport()
+}
diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go
new file mode 100644
index 000000000..f3b31f42d
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go
@@ -0,0 +1,115 @@
+// Package escape contains utilities for escaping parts of InfluxQL
+// and InfluxDB line protocol.
+package escape // import "github.com/influxdata/influxdb/pkg/escape"
+
+import (
+ "bytes"
+ "strings"
+)
+
+// Codes is a map of bytes to be escaped.
+var Codes = map[byte][]byte{
+ ',': []byte(`\,`),
+ '"': []byte(`\"`),
+ ' ': []byte(`\ `),
+ '=': []byte(`\=`),
+}
+
+// Bytes escapes characters on the input slice, as defined by Codes.
+func Bytes(in []byte) []byte {
+ for b, esc := range Codes {
+ in = bytes.Replace(in, []byte{b}, esc, -1)
+ }
+ return in
+}
+
+const escapeChars = `," =`
+
+// IsEscaped returns whether b has any escaped characters,
+// i.e. whether b seems to have been processed by Bytes.
+func IsEscaped(b []byte) bool {
+ for len(b) > 0 {
+ i := bytes.IndexByte(b, '\\')
+ if i < 0 {
+ return false
+ }
+
+ if i+1 < len(b) && strings.IndexByte(escapeChars, b[i+1]) >= 0 {
+ return true
+ }
+ b = b[i+1:]
+ }
+ return false
+}
+
+// AppendUnescaped appends the unescaped version of src to dst
+// and returns the resulting slice.
+func AppendUnescaped(dst, src []byte) []byte {
+ var pos int
+ for len(src) > 0 {
+ next := bytes.IndexByte(src[pos:], '\\')
+ if next < 0 || pos+next+1 >= len(src) {
+ return append(dst, src...)
+ }
+
+ if pos+next+1 < len(src) && strings.IndexByte(escapeChars, src[pos+next+1]) >= 0 {
+ if pos+next > 0 {
+ dst = append(dst, src[:pos+next]...)
+ }
+ src = src[pos+next+1:]
+ pos = 0
+ } else {
+ pos += next + 1
+ }
+ }
+
+ return dst
+}
+
+// Unescape returns a new slice containing the unescaped version of in.
+func Unescape(in []byte) []byte {
+ if len(in) == 0 {
+ return nil
+ }
+
+ if bytes.IndexByte(in, '\\') == -1 {
+ return in
+ }
+
+ i := 0
+ inLen := len(in)
+
+ // The output size will be no more than inLen. Preallocating the
+ // capacity of the output is faster and uses less memory than
+ // letting append() do its own (over)allocation.
+ out := make([]byte, 0, inLen)
+
+ for {
+ if i >= inLen {
+ break
+ }
+ if in[i] == '\\' && i+1 < inLen {
+ switch in[i+1] {
+ case ',':
+ out = append(out, ',')
+ i += 2
+ continue
+ case '"':
+ out = append(out, '"')
+ i += 2
+ continue
+ case ' ':
+ out = append(out, ' ')
+ i += 2
+ continue
+ case '=':
+ out = append(out, '=')
+ i += 2
+ continue
+ }
+ }
+ out = append(out, in[i])
+ i += 1
+ }
+ return out
+}
diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go b/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go
new file mode 100644
index 000000000..db98033b0
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go
@@ -0,0 +1,21 @@
+package escape
+
+import "strings"
+
+var (
+ escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`)
+ unescaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`)
+)
+
+// UnescapeString returns unescaped version of in.
+func UnescapeString(in string) string {
+ if strings.IndexByte(in, '\\') == -1 {
+ return in
+ }
+ return unescaper.Replace(in)
+}
+
+// String returns the escaped version of in.
+func String(in string) string {
+ return escaper.Replace(in)
+}