aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go
blob: ab8d74c29185ce0600b7894cc35128884dd95f85 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package rpcmetrics

import (
    "sync"

    "github.com/uber/jaeger-lib/metrics"
)

const (
    otherEndpointsPlaceholder = "other"
    endpointNameMetricTag     = "endpoint"
)

// Metrics is a collection of metrics for an endpoint describing
// throughput, success, errors, and performance.
type Metrics struct {
    // RequestCountSuccess is a counter of the total number of successes.
    RequestCountSuccess metrics.Counter `metric:"requests" tags:"error=false"`

    // RequestCountFailures is a counter of the number of times any failure has been observed.
    RequestCountFailures metrics.Counter `metric:"requests" tags:"error=true"`

    // RequestLatencySuccess is a latency histogram of succesful requests.
    RequestLatencySuccess metrics.Timer `metric:"request_latency" tags:"error=false"`

    // RequestLatencyFailures is a latency histogram of failed requests.
    RequestLatencyFailures metrics.Timer `metric:"request_latency" tags:"error=true"`

    // HTTPStatusCode2xx is a counter of the total number of requests with HTTP status code 200-299
    HTTPStatusCode2xx metrics.Counter `metric:"http_requests" tags:"status_code=2xx"`

    // HTTPStatusCode3xx is a counter of the total number of requests with HTTP status code 300-399
    HTTPStatusCode3xx metrics.Counter `metric:"http_requests" tags:"status_code=3xx"`

    // HTTPStatusCode4xx is a counter of the total number of requests with HTTP status code 400-499
    HTTPStatusCode4xx metrics.Counter `metric:"http_requests" tags:"status_code=4xx"`

    // HTTPStatusCode5xx is a counter of the total number of requests with HTTP status code 500-599
    HTTPStatusCode5xx metrics.Counter `metric:"http_requests" tags:"status_code=5xx"`
}

func (m *Metrics) recordHTTPStatusCode(statusCode uint16) {
    if statusCode >= 200 && statusCode < 300 {
        m.HTTPStatusCode2xx.Inc(1)
    } else if statusCode >= 300 && statusCode < 400 {
        m.HTTPStatusCode3xx.Inc(1)
    } else if statusCode >= 400 && statusCode < 500 {
        m.HTTPStatusCode4xx.Inc(1)
    } else if statusCode >= 500 && statusCode < 600 {
        m.HTTPStatusCode5xx.Inc(1)
    }
}

// MetricsByEndpoint is a registry/cache of metrics for each unique endpoint name.
// Only maxNumberOfEndpoints Metrics are stored, all other endpoint names are mapped
// to a generic endpoint name "other".
type MetricsByEndpoint struct {
    metricsFactory    metrics.Factory
    endpoints         *normalizedEndpoints
    metricsByEndpoint map[string]*Metrics
    mux               sync.RWMutex
}

func newMetricsByEndpoint(
    metricsFactory metrics.Factory,
    normalizer NameNormalizer,
    maxNumberOfEndpoints int,
) *MetricsByEndpoint {
    return &MetricsByEndpoint{
        metricsFactory:    metricsFactory,
        endpoints:         newNormalizedEndpoints(maxNumberOfEndpoints, normalizer),
        metricsByEndpoint: make(map[string]*Metrics, maxNumberOfEndpoints+1), // +1 for "other"
    }
}

func (m *MetricsByEndpoint) get(endpoint string) *Metrics {
    safeName := m.endpoints.normalize(endpoint)
    if safeName == "" {
        safeName = otherEndpointsPlaceholder
    }
    m.mux.RLock()
    met := m.metricsByEndpoint[safeName]
    m.mux.RUnlock()
    if met != nil {
        return met
    }

    return m.getWithWriteLock(safeName)
}

// split to make easier to test
func (m *MetricsByEndpoint) getWithWriteLock(safeName string) *Metrics {
    m.mux.Lock()
    defer m.mux.Unlock()

    // it is possible that the name has been already registered after we released
    // the read lock and before we grabbed the write lock, so check for that.
    if met, ok := m.metricsByEndpoint[safeName]; ok {
        return met
    }

    // it would be nice to create the struct before locking, since Init() is somewhat
    // expensive, however some metrics backends (e.g. expvar) may not like duplicate metrics.
    met := &Metrics{}
    tags := map[string]string{endpointNameMetricTag: safeName}
    metrics.Init(met, m.metricsFactory, tags)

    m.metricsByEndpoint[safeName] = met
    return met
}