Add vendoring
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 50s
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 50s
This commit is contained in:
22
vendor/github.com/VictoriaMetrics/metrics/LICENSE
generated
vendored
Normal file
22
vendor/github.com/VictoriaMetrics/metrics/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2019 VictoriaMetrics
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
116
vendor/github.com/VictoriaMetrics/metrics/README.md
generated
vendored
Normal file
116
vendor/github.com/VictoriaMetrics/metrics/README.md
generated
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
[](https://github.com/VictoriaMetrics/metrics/actions)
|
||||
[](http://godoc.org/github.com/VictoriaMetrics/metrics)
|
||||
[](https://goreportcard.com/report/github.com/VictoriaMetrics/metrics)
|
||||
[](https://codecov.io/gh/VictoriaMetrics/metrics)
|
||||
|
||||
|
||||
# metrics - lightweight package for exporting metrics in Prometheus format
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* Lightweight. Has minimal number of third-party dependencies and all these deps are small.
|
||||
See [this article](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d) for details.
|
||||
* Easy to use. See the [API docs](http://godoc.org/github.com/VictoriaMetrics/metrics).
|
||||
* Fast.
|
||||
* Allows exporting distinct metric sets via distinct endpoints. See [Set](http://godoc.org/github.com/VictoriaMetrics/metrics#Set).
|
||||
* Supports [easy-to-use histograms](http://godoc.org/github.com/VictoriaMetrics/metrics#Histogram), which just work without any tuning.
|
||||
Read more about VictoriaMetrics histograms at [this article](https://medium.com/@valyala/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350).
|
||||
* Can push metrics to VictoriaMetrics or to any other remote storage, which accepts metrics
|
||||
in [Prometheus text exposition format](https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format).
|
||||
See [these docs](http://godoc.org/github.com/VictoriaMetrics/metrics#InitPush).
|
||||
|
||||
|
||||
### Limitations
|
||||
|
||||
* It doesn't implement advanced functionality from [github.com/prometheus/client_golang](https://godoc.org/github.com/prometheus/client_golang).
|
||||
|
||||
|
||||
### Usage
|
||||
|
||||
```go
|
||||
import "github.com/VictoriaMetrics/metrics"
|
||||
|
||||
// Register various metrics.
|
||||
// Metric name may contain labels in Prometheus format - see below.
|
||||
var (
|
||||
// Register counter without labels.
|
||||
requestsTotal = metrics.NewCounter("requests_total")
|
||||
|
||||
// Register summary with a single label.
|
||||
requestDuration = metrics.NewSummary(`requests_duration_seconds{path="/foobar/baz"}`)
|
||||
|
||||
// Register gauge with two labels.
|
||||
queueSize = metrics.NewGauge(`queue_size{queue="foobar",topic="baz"}`, func() float64 {
|
||||
return float64(foobarQueue.Len())
|
||||
})
|
||||
|
||||
// Register histogram with a single label.
|
||||
responseSize = metrics.NewHistogram(`response_size{path="/foo/bar"}`)
|
||||
)
|
||||
|
||||
// ...
|
||||
func requestHandler() {
|
||||
// Increment requestTotal counter.
|
||||
requestsTotal.Inc()
|
||||
|
||||
startTime := time.Now()
|
||||
processRequest()
|
||||
// Update requestDuration summary.
|
||||
requestDuration.UpdateDuration(startTime)
|
||||
|
||||
// Update responseSize histogram.
|
||||
responseSize.Update(responseSize)
|
||||
}
|
||||
|
||||
// Expose the registered metrics at `/metrics` path.
|
||||
http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
|
||||
metrics.WritePrometheus(w, true)
|
||||
})
|
||||
|
||||
// ... or push registered metrics every 10 seconds to http://victoria-metrics:8428/api/v1/import/prometheus
|
||||
// with the added `instance="foobar"` label to all the pushed metrics.
|
||||
metrics.InitPush("http://victoria-metrics:8428/api/v1/import/prometheus", 10*time.Second, `instance="foobar"`, true)
|
||||
```
|
||||
|
||||
By default, exposed metrics [do not have](https://github.com/VictoriaMetrics/metrics/issues/48#issuecomment-1620765811)
|
||||
`TYPE` or `HELP` meta information. Call [`ExposeMetadata(true)`](https://pkg.go.dev/github.com/VictoriaMetrics/metrics#ExposeMetadata)
|
||||
in order to generate `TYPE` and `HELP` meta information per each metric.
|
||||
|
||||
See [docs](https://pkg.go.dev/github.com/VictoriaMetrics/metrics) for more info.
|
||||
|
||||
### Users
|
||||
|
||||
* `Metrics` has been extracted from [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) sources.
|
||||
See [this article](https://medium.com/devopslinks/victoriametrics-creating-the-best-remote-storage-for-prometheus-5d92d66787ac)
|
||||
for more info about `VictoriaMetrics`.
|
||||
|
||||
|
||||
### FAQ
|
||||
|
||||
#### Why the `metrics` API isn't compatible with `github.com/prometheus/client_golang`?
|
||||
|
||||
Because the `github.com/prometheus/client_golang` is too complex and is hard to use.
|
||||
|
||||
|
||||
#### Why the `metrics.WritePrometheus` doesn't expose documentation for each metric?
|
||||
|
||||
Because this documentation is ignored by Prometheus. The documentation is for users.
|
||||
Just give [meaningful names to the exported metrics](https://prometheus.io/docs/practices/naming/#metric-names)
|
||||
or add comments in the source code or in other suitable place explaining each metric exposed from your application.
|
||||
|
||||
|
||||
#### How to implement [CounterVec](https://godoc.org/github.com/prometheus/client_golang/prometheus#CounterVec) in `metrics`?
|
||||
|
||||
Just use [GetOrCreateCounter](http://godoc.org/github.com/VictoriaMetrics/metrics#GetOrCreateCounter)
|
||||
instead of `CounterVec.With`. See [this example](https://pkg.go.dev/github.com/VictoriaMetrics/metrics#example-Counter-Vec) for details.
|
||||
|
||||
|
||||
#### Why [Histogram](http://godoc.org/github.com/VictoriaMetrics/metrics#Histogram) buckets contain `vmrange` labels instead of `le` labels like in Prometheus histograms?
|
||||
|
||||
Buckets with `vmrange` labels occupy less disk space compared to Prometheus-style buckets with `le` labels,
|
||||
because `vmrange` buckets don't include counters for the previous ranges. [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) provides `prometheus_buckets`
|
||||
function, which converts `vmrange` buckets to Prometheus-style buckets with `le` labels. This is useful for building heatmaps in Grafana.
|
||||
Additionally, its `histogram_quantile` function transparently handles histogram buckets with `vmrange` labels.
|
||||
|
||||
However, for compatibility purposes package provides classic [Prometheus Histograms](http://godoc.org/github.com/VictoriaMetrics/metrics#PrometheusHistogram) with `le` labels.
|
||||
86
vendor/github.com/VictoriaMetrics/metrics/counter.go
generated
vendored
Normal file
86
vendor/github.com/VictoriaMetrics/metrics/counter.go
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// NewCounter registers and returns new counter with the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned counter is safe to use from concurrent goroutines.
|
||||
func NewCounter(name string) *Counter {
|
||||
return defaultSet.NewCounter(name)
|
||||
}
|
||||
|
||||
// Counter is a counter.
|
||||
//
|
||||
// It may be used as a gauge if Dec and Set are called.
|
||||
type Counter struct {
|
||||
n uint64
|
||||
}
|
||||
|
||||
// Inc increments c.
|
||||
func (c *Counter) Inc() {
|
||||
atomic.AddUint64(&c.n, 1)
|
||||
}
|
||||
|
||||
// Dec decrements c.
|
||||
func (c *Counter) Dec() {
|
||||
atomic.AddUint64(&c.n, ^uint64(0))
|
||||
}
|
||||
|
||||
// Add adds n to c.
|
||||
func (c *Counter) Add(n int) {
|
||||
atomic.AddUint64(&c.n, uint64(n))
|
||||
}
|
||||
|
||||
// AddInt64 adds n to c.
|
||||
func (c *Counter) AddInt64(n int64) {
|
||||
atomic.AddUint64(&c.n, uint64(n))
|
||||
}
|
||||
|
||||
// Get returns the current value for c.
|
||||
func (c *Counter) Get() uint64 {
|
||||
return atomic.LoadUint64(&c.n)
|
||||
}
|
||||
|
||||
// Set sets c value to n.
|
||||
func (c *Counter) Set(n uint64) {
|
||||
atomic.StoreUint64(&c.n, n)
|
||||
}
|
||||
|
||||
// marshalTo marshals c with the given prefix to w.
|
||||
func (c *Counter) marshalTo(prefix string, w io.Writer) {
|
||||
v := c.Get()
|
||||
fmt.Fprintf(w, "%s %d\n", prefix, v)
|
||||
}
|
||||
|
||||
func (c *Counter) metricType() string {
|
||||
return "counter"
|
||||
}
|
||||
|
||||
// GetOrCreateCounter returns registered counter with the given name
|
||||
// or creates new counter if the registry doesn't contain counter with
|
||||
// the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned counter is safe to use from concurrent goroutines.
|
||||
//
|
||||
// Performance tip: prefer NewCounter instead of GetOrCreateCounter.
|
||||
func GetOrCreateCounter(name string) *Counter {
|
||||
return defaultSet.GetOrCreateCounter(name)
|
||||
}
|
||||
86
vendor/github.com/VictoriaMetrics/metrics/floatcounter.go
generated
vendored
Normal file
86
vendor/github.com/VictoriaMetrics/metrics/floatcounter.go
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// NewFloatCounter registers and returns new counter of float64 type with the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned counter is safe to use from concurrent goroutines.
|
||||
func NewFloatCounter(name string) *FloatCounter {
|
||||
return defaultSet.NewFloatCounter(name)
|
||||
}
|
||||
|
||||
// FloatCounter is a float64 counter guarded by RWmutex.
|
||||
//
|
||||
// It may be used as a gauge if Add and Sub are called.
|
||||
type FloatCounter struct {
|
||||
mu sync.Mutex
|
||||
n float64
|
||||
}
|
||||
|
||||
// Add adds n to fc.
|
||||
func (fc *FloatCounter) Add(n float64) {
|
||||
fc.mu.Lock()
|
||||
fc.n += n
|
||||
fc.mu.Unlock()
|
||||
}
|
||||
|
||||
// Sub substracts n from fc.
|
||||
func (fc *FloatCounter) Sub(n float64) {
|
||||
fc.mu.Lock()
|
||||
fc.n -= n
|
||||
fc.mu.Unlock()
|
||||
}
|
||||
|
||||
// Get returns the current value for fc.
|
||||
func (fc *FloatCounter) Get() float64 {
|
||||
fc.mu.Lock()
|
||||
n := fc.n
|
||||
fc.mu.Unlock()
|
||||
return n
|
||||
}
|
||||
|
||||
// Set sets fc value to n.
|
||||
func (fc *FloatCounter) Set(n float64) {
|
||||
fc.mu.Lock()
|
||||
fc.n = n
|
||||
fc.mu.Unlock()
|
||||
}
|
||||
|
||||
// marshalTo marshals fc with the given prefix to w.
|
||||
func (fc *FloatCounter) marshalTo(prefix string, w io.Writer) {
|
||||
v := fc.Get()
|
||||
fmt.Fprintf(w, "%s %g\n", prefix, v)
|
||||
}
|
||||
|
||||
func (fc *FloatCounter) metricType() string {
|
||||
return "counter"
|
||||
}
|
||||
|
||||
// GetOrCreateFloatCounter returns registered FloatCounter with the given name
|
||||
// or creates new FloatCounter if the registry doesn't contain FloatCounter with
|
||||
// the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned FloatCounter is safe to use from concurrent goroutines.
|
||||
//
|
||||
// Performance tip: prefer NewFloatCounter instead of GetOrCreateFloatCounter.
|
||||
func GetOrCreateFloatCounter(name string) *FloatCounter {
|
||||
return defaultSet.GetOrCreateFloatCounter(name)
|
||||
}
|
||||
122
vendor/github.com/VictoriaMetrics/metrics/gauge.go
generated
vendored
Normal file
122
vendor/github.com/VictoriaMetrics/metrics/gauge.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// NewGauge registers and returns gauge with the given name, which calls f to obtain gauge value.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// f must be safe for concurrent calls.
|
||||
// if f is nil, then it is expected that the gauge value is changed via Set(), Inc(), Dec() and Add() calls.
|
||||
//
|
||||
// The returned gauge is safe to use from concurrent goroutines.
|
||||
//
|
||||
// See also FloatCounter for working with floating-point values.
|
||||
func NewGauge(name string, f func() float64) *Gauge {
|
||||
return defaultSet.NewGauge(name, f)
|
||||
}
|
||||
|
||||
// Gauge is a float64 gauge.
|
||||
type Gauge struct {
|
||||
// valueBits contains uint64 representation of float64 passed to Gauge.Set.
|
||||
valueBits uint64
|
||||
|
||||
// f is a callback, which is called for returning the gauge value.
|
||||
f func() float64
|
||||
}
|
||||
|
||||
// Get returns the current value for g.
|
||||
func (g *Gauge) Get() float64 {
|
||||
if f := g.f; f != nil {
|
||||
return f()
|
||||
}
|
||||
n := atomic.LoadUint64(&g.valueBits)
|
||||
return math.Float64frombits(n)
|
||||
}
|
||||
|
||||
// Set sets g value to v.
|
||||
//
|
||||
// The g must be created with nil callback in order to be able to call this function.
|
||||
func (g *Gauge) Set(v float64) {
|
||||
if g.f != nil {
|
||||
panic(fmt.Errorf("cannot call Set on gauge created with non-nil callback"))
|
||||
}
|
||||
n := math.Float64bits(v)
|
||||
atomic.StoreUint64(&g.valueBits, n)
|
||||
}
|
||||
|
||||
// Inc increments g by 1.
|
||||
//
|
||||
// The g must be created with nil callback in order to be able to call this function.
|
||||
func (g *Gauge) Inc() {
|
||||
g.Add(1)
|
||||
}
|
||||
|
||||
// Dec decrements g by 1.
|
||||
//
|
||||
// The g must be created with nil callback in order to be able to call this function.
|
||||
func (g *Gauge) Dec() {
|
||||
g.Add(-1)
|
||||
}
|
||||
|
||||
// Add adds fAdd to g. fAdd may be positive and negative.
|
||||
//
|
||||
// The g must be created with nil callback in order to be able to call this function.
|
||||
func (g *Gauge) Add(fAdd float64) {
|
||||
if g.f != nil {
|
||||
panic(fmt.Errorf("cannot call Set on gauge created with non-nil callback"))
|
||||
}
|
||||
for {
|
||||
n := atomic.LoadUint64(&g.valueBits)
|
||||
f := math.Float64frombits(n)
|
||||
fNew := f + fAdd
|
||||
nNew := math.Float64bits(fNew)
|
||||
if atomic.CompareAndSwapUint64(&g.valueBits, n, nNew) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *Gauge) marshalTo(prefix string, w io.Writer) {
|
||||
v := g.Get()
|
||||
if float64(int64(v)) == v {
|
||||
// Marshal integer values without scientific notation
|
||||
fmt.Fprintf(w, "%s %d\n", prefix, int64(v))
|
||||
} else {
|
||||
fmt.Fprintf(w, "%s %g\n", prefix, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (g *Gauge) metricType() string {
|
||||
return "gauge"
|
||||
}
|
||||
|
||||
// GetOrCreateGauge returns registered gauge with the given name
|
||||
// or creates new gauge if the registry doesn't contain gauge with
|
||||
// the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned gauge is safe to use from concurrent goroutines.
|
||||
//
|
||||
// Performance tip: prefer NewGauge instead of GetOrCreateGauge.
|
||||
//
|
||||
// See also FloatCounter for working with floating-point values.
|
||||
func GetOrCreateGauge(name string, f func() float64) *Gauge {
|
||||
return defaultSet.GetOrCreateGauge(name, f)
|
||||
}
|
||||
192
vendor/github.com/VictoriaMetrics/metrics/go_metrics.go
generated
vendored
Normal file
192
vendor/github.com/VictoriaMetrics/metrics/go_metrics.go
generated
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"runtime"
|
||||
runtimemetrics "runtime/metrics"
|
||||
"strings"
|
||||
|
||||
"github.com/valyala/histogram"
|
||||
)
|
||||
|
||||
// See https://pkg.go.dev/runtime/metrics#hdr-Supported_metrics
|
||||
var runtimeMetrics = [][2]string{
|
||||
{"/sched/latencies:seconds", "go_sched_latencies_seconds"},
|
||||
{"/sync/mutex/wait/total:seconds", "go_mutex_wait_seconds_total"},
|
||||
{"/cpu/classes/gc/mark/assist:cpu-seconds", "go_gc_mark_assist_cpu_seconds_total"},
|
||||
{"/cpu/classes/gc/total:cpu-seconds", "go_gc_cpu_seconds_total"},
|
||||
{"/gc/pauses:seconds", "go_gc_pauses_seconds"},
|
||||
{"/cpu/classes/scavenge/total:cpu-seconds", "go_scavenge_cpu_seconds_total"},
|
||||
{"/gc/gomemlimit:bytes", "go_memlimit_bytes"},
|
||||
}
|
||||
|
||||
var supportedRuntimeMetrics = initSupportedRuntimeMetrics(runtimeMetrics)
|
||||
|
||||
func initSupportedRuntimeMetrics(rms [][2]string) [][2]string {
|
||||
exposedMetrics := make(map[string]struct{})
|
||||
for _, d := range runtimemetrics.All() {
|
||||
exposedMetrics[d.Name] = struct{}{}
|
||||
}
|
||||
var supportedMetrics [][2]string
|
||||
for _, rm := range rms {
|
||||
metricName := rm[0]
|
||||
if _, ok := exposedMetrics[metricName]; ok {
|
||||
supportedMetrics = append(supportedMetrics, rm)
|
||||
} else {
|
||||
log.Printf("github.com/VictoriaMetrics/metrics: do not expose %s metric, since the corresponding metric %s isn't supported in the current Go runtime", rm[1], metricName)
|
||||
}
|
||||
}
|
||||
return supportedMetrics
|
||||
}
|
||||
|
||||
func writeGoMetrics(w io.Writer) {
|
||||
writeRuntimeMetrics(w)
|
||||
|
||||
var ms runtime.MemStats
|
||||
runtime.ReadMemStats(&ms)
|
||||
WriteGaugeUint64(w, "go_memstats_alloc_bytes", ms.Alloc)
|
||||
WriteCounterUint64(w, "go_memstats_alloc_bytes_total", ms.TotalAlloc)
|
||||
WriteGaugeUint64(w, "go_memstats_buck_hash_sys_bytes", ms.BuckHashSys)
|
||||
WriteCounterUint64(w, "go_memstats_frees_total", ms.Frees)
|
||||
WriteGaugeFloat64(w, "go_memstats_gc_cpu_fraction", ms.GCCPUFraction)
|
||||
WriteGaugeUint64(w, "go_memstats_gc_sys_bytes", ms.GCSys)
|
||||
|
||||
WriteGaugeUint64(w, "go_memstats_heap_alloc_bytes", ms.HeapAlloc)
|
||||
WriteGaugeUint64(w, "go_memstats_heap_idle_bytes", ms.HeapIdle)
|
||||
WriteGaugeUint64(w, "go_memstats_heap_inuse_bytes", ms.HeapInuse)
|
||||
WriteGaugeUint64(w, "go_memstats_heap_objects", ms.HeapObjects)
|
||||
WriteGaugeUint64(w, "go_memstats_heap_released_bytes", ms.HeapReleased)
|
||||
WriteGaugeUint64(w, "go_memstats_heap_sys_bytes", ms.HeapSys)
|
||||
WriteGaugeFloat64(w, "go_memstats_last_gc_time_seconds", float64(ms.LastGC)/1e9)
|
||||
WriteCounterUint64(w, "go_memstats_lookups_total", ms.Lookups)
|
||||
WriteCounterUint64(w, "go_memstats_mallocs_total", ms.Mallocs)
|
||||
WriteGaugeUint64(w, "go_memstats_mcache_inuse_bytes", ms.MCacheInuse)
|
||||
WriteGaugeUint64(w, "go_memstats_mcache_sys_bytes", ms.MCacheSys)
|
||||
WriteGaugeUint64(w, "go_memstats_mspan_inuse_bytes", ms.MSpanInuse)
|
||||
WriteGaugeUint64(w, "go_memstats_mspan_sys_bytes", ms.MSpanSys)
|
||||
WriteGaugeUint64(w, "go_memstats_next_gc_bytes", ms.NextGC)
|
||||
WriteGaugeUint64(w, "go_memstats_other_sys_bytes", ms.OtherSys)
|
||||
WriteGaugeUint64(w, "go_memstats_stack_inuse_bytes", ms.StackInuse)
|
||||
WriteGaugeUint64(w, "go_memstats_stack_sys_bytes", ms.StackSys)
|
||||
WriteGaugeUint64(w, "go_memstats_sys_bytes", ms.Sys)
|
||||
|
||||
WriteCounterUint64(w, "go_cgo_calls_count", uint64(runtime.NumCgoCall()))
|
||||
WriteGaugeUint64(w, "go_cpu_count", uint64(runtime.NumCPU()))
|
||||
|
||||
gcPauses := histogram.NewFast()
|
||||
for _, pauseNs := range ms.PauseNs[:] {
|
||||
gcPauses.Update(float64(pauseNs) / 1e9)
|
||||
}
|
||||
phis := []float64{0, 0.25, 0.5, 0.75, 1}
|
||||
quantiles := make([]float64, 0, len(phis))
|
||||
WriteMetadataIfNeeded(w, "go_gc_duration_seconds", "summary")
|
||||
for i, q := range gcPauses.Quantiles(quantiles[:0], phis) {
|
||||
fmt.Fprintf(w, `go_gc_duration_seconds{quantile="%g"} %g`+"\n", phis[i], q)
|
||||
}
|
||||
fmt.Fprintf(w, "go_gc_duration_seconds_sum %g\n", float64(ms.PauseTotalNs)/1e9)
|
||||
fmt.Fprintf(w, "go_gc_duration_seconds_count %d\n", ms.NumGC)
|
||||
|
||||
WriteCounterUint64(w, "go_gc_forced_count", uint64(ms.NumForcedGC))
|
||||
|
||||
WriteGaugeUint64(w, "go_gomaxprocs", uint64(runtime.GOMAXPROCS(0)))
|
||||
WriteGaugeUint64(w, "go_goroutines", uint64(runtime.NumGoroutine()))
|
||||
numThread, _ := runtime.ThreadCreateProfile(nil)
|
||||
WriteGaugeUint64(w, "go_threads", uint64(numThread))
|
||||
|
||||
// Export build details.
|
||||
WriteMetadataIfNeeded(w, "go_info", "gauge")
|
||||
fmt.Fprintf(w, "go_info{version=%q} 1\n", runtime.Version())
|
||||
|
||||
WriteMetadataIfNeeded(w, "go_info_ext", "gauge")
|
||||
fmt.Fprintf(w, "go_info_ext{compiler=%q, GOARCH=%q, GOOS=%q, GOROOT=%q} 1\n",
|
||||
runtime.Compiler, runtime.GOARCH, runtime.GOOS, runtime.GOROOT())
|
||||
}
|
||||
|
||||
func writeRuntimeMetrics(w io.Writer) {
|
||||
samples := make([]runtimemetrics.Sample, len(supportedRuntimeMetrics))
|
||||
for i, rm := range supportedRuntimeMetrics {
|
||||
samples[i].Name = rm[0]
|
||||
}
|
||||
runtimemetrics.Read(samples)
|
||||
for i, rm := range supportedRuntimeMetrics {
|
||||
writeRuntimeMetric(w, rm[1], &samples[i])
|
||||
}
|
||||
}
|
||||
|
||||
func writeRuntimeMetric(w io.Writer, name string, sample *runtimemetrics.Sample) {
|
||||
kind := sample.Value.Kind()
|
||||
switch kind {
|
||||
case runtimemetrics.KindBad:
|
||||
panic(fmt.Errorf("BUG: unexpected runtimemetrics.KindBad for sample.Name=%q", sample.Name))
|
||||
case runtimemetrics.KindUint64:
|
||||
v := sample.Value.Uint64()
|
||||
if strings.HasSuffix(name, "_total") {
|
||||
WriteCounterUint64(w, name, v)
|
||||
} else {
|
||||
WriteGaugeUint64(w, name, v)
|
||||
}
|
||||
case runtimemetrics.KindFloat64:
|
||||
v := sample.Value.Float64()
|
||||
if isCounterName(name) {
|
||||
WriteCounterFloat64(w, name, v)
|
||||
} else {
|
||||
WriteGaugeFloat64(w, name, v)
|
||||
}
|
||||
case runtimemetrics.KindFloat64Histogram:
|
||||
h := sample.Value.Float64Histogram()
|
||||
writeRuntimeHistogramMetric(w, name, h)
|
||||
default:
|
||||
panic(fmt.Errorf("unexpected metric kind=%d", kind))
|
||||
}
|
||||
}
|
||||
|
||||
func writeRuntimeHistogramMetric(w io.Writer, name string, h *runtimemetrics.Float64Histogram) {
|
||||
buckets := h.Buckets
|
||||
counts := h.Counts
|
||||
if len(buckets) != len(counts)+1 {
|
||||
panic(fmt.Errorf("the number of buckets must be bigger than the number of counts by 1 in histogram %s; got buckets=%d, counts=%d", name, len(buckets), len(counts)))
|
||||
}
|
||||
tailCount := uint64(0)
|
||||
if strings.HasSuffix(name, "_seconds") {
|
||||
// Limit the maximum bucket to 1 second, since Go runtime exposes buckets with 10K seconds,
|
||||
// which have little sense. At the same time such buckets may lead to high cardinality issues
|
||||
// at the scraper side.
|
||||
for len(buckets) > 0 && buckets[len(buckets)-1] > 1 {
|
||||
buckets = buckets[:len(buckets)-1]
|
||||
tailCount += counts[len(counts)-1]
|
||||
counts = counts[:len(counts)-1]
|
||||
}
|
||||
}
|
||||
|
||||
iStep := float64(len(buckets)) / maxRuntimeHistogramBuckets
|
||||
|
||||
totalCount := uint64(0)
|
||||
iNext := 0.0
|
||||
WriteMetadataIfNeeded(w, name, "histogram")
|
||||
for i, count := range counts {
|
||||
totalCount += count
|
||||
if float64(i) >= iNext {
|
||||
iNext += iStep
|
||||
le := buckets[i+1]
|
||||
if !math.IsInf(le, 1) {
|
||||
fmt.Fprintf(w, `%s_bucket{le="%g"} %d`+"\n", name, le, totalCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
totalCount += tailCount
|
||||
fmt.Fprintf(w, `%s_bucket{le="+Inf"} %d`+"\n", name, totalCount)
|
||||
// _sum and _count are not exposed because the Go runtime histogram lacks accurate sum data.
|
||||
// Estimating the sum (as Prometheus does) could be misleading, while exposing only `_count` without `_sum` is impractical.
|
||||
// We can reconsider if precise sum data becomes available.
|
||||
//
|
||||
// References:
|
||||
// - Go runtime histogram: https://github.com/golang/go/blob/3432c68467d50ffc622fed230a37cd401d82d4bf/src/runtime/metrics/histogram.go#L8
|
||||
// - Prometheus estimate: https://github.com/prometheus/client_golang/blob/5fe1d33cea76068edd4ece5f58e52f81d225b13c/prometheus/go_collector_latest.go#L498
|
||||
// - Related discussion: https://github.com/VictoriaMetrics/metrics/issues/94
|
||||
}
|
||||
|
||||
// Limit the number of buckets for Go runtime histograms in order to prevent from high cardinality issues at scraper side.
|
||||
const maxRuntimeHistogramBuckets = 30
|
||||
270
vendor/github.com/VictoriaMetrics/metrics/histogram.go
generated
vendored
Normal file
270
vendor/github.com/VictoriaMetrics/metrics/histogram.go
generated
vendored
Normal file
@@ -0,0 +1,270 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
e10Min = -9
|
||||
e10Max = 18
|
||||
bucketsPerDecimal = 18
|
||||
decimalBucketsCount = e10Max - e10Min
|
||||
bucketsCount = decimalBucketsCount * bucketsPerDecimal
|
||||
)
|
||||
|
||||
var bucketMultiplier = math.Pow(10, 1.0/bucketsPerDecimal)
|
||||
|
||||
// Histogram is a histogram for non-negative values with automatically created buckets.
|
||||
//
|
||||
// See https://medium.com/@valyala/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350
|
||||
//
|
||||
// Each bucket contains a counter for values in the given range.
|
||||
// Each non-empty bucket is exposed via the following metric:
|
||||
//
|
||||
// <metric_name>_bucket{<optional_tags>,vmrange="<start>...<end>"} <counter>
|
||||
//
|
||||
// Where:
|
||||
//
|
||||
// - <metric_name> is the metric name passed to NewHistogram
|
||||
// - <optional_tags> is optional tags for the <metric_name>, which are passed to NewHistogram
|
||||
// - <start> and <end> - start and end values for the given bucket
|
||||
// - <counter> - the number of hits to the given bucket during Update* calls
|
||||
//
|
||||
// Histogram buckets can be converted to Prometheus-like buckets with `le` labels
|
||||
// with `prometheus_buckets(<metric_name>_bucket)` function from PromQL extensions in VictoriaMetrics.
|
||||
// (see https://docs.victoriametrics.com/victoriametrics/metricsql/ ):
|
||||
//
|
||||
// prometheus_buckets(request_duration_bucket)
|
||||
//
|
||||
// Time series produced by the Histogram have better compression ratio comparing to
|
||||
// Prometheus histogram buckets with `le` labels, since they don't include counters
|
||||
// for all the previous buckets.
|
||||
//
|
||||
// Zero histogram is usable.
|
||||
type Histogram struct {
|
||||
// Mu guarantees synchronous update for all the counters and sum.
|
||||
//
|
||||
// Do not use sync.RWMutex, since it has zero sense from performance PoV.
|
||||
// It only complicates the code.
|
||||
mu sync.Mutex
|
||||
|
||||
// decimalBuckets contains counters for histogram buckets
|
||||
decimalBuckets [decimalBucketsCount]*[bucketsPerDecimal]uint64
|
||||
|
||||
// lower is the number of values, which hit the lower bucket
|
||||
lower uint64
|
||||
|
||||
// upper is the number of values, which hit the upper bucket
|
||||
upper uint64
|
||||
|
||||
// sum is the sum of all the values put into Histogram
|
||||
sum float64
|
||||
}
|
||||
|
||||
// Reset resets the given histogram.
|
||||
func (h *Histogram) Reset() {
|
||||
h.mu.Lock()
|
||||
for _, db := range h.decimalBuckets[:] {
|
||||
if db == nil {
|
||||
continue
|
||||
}
|
||||
for i := range db[:] {
|
||||
db[i] = 0
|
||||
}
|
||||
}
|
||||
h.lower = 0
|
||||
h.upper = 0
|
||||
h.sum = 0
|
||||
h.mu.Unlock()
|
||||
}
|
||||
|
||||
// Update updates h with v.
|
||||
//
|
||||
// Negative values and NaNs are ignored.
|
||||
func (h *Histogram) Update(v float64) {
|
||||
if math.IsNaN(v) || v < 0 {
|
||||
// Skip NaNs and negative values.
|
||||
return
|
||||
}
|
||||
bucketIdx := (math.Log10(v) - e10Min) * bucketsPerDecimal
|
||||
h.mu.Lock()
|
||||
h.sum += v
|
||||
if bucketIdx < 0 {
|
||||
h.lower++
|
||||
} else if bucketIdx >= bucketsCount {
|
||||
h.upper++
|
||||
} else {
|
||||
idx := uint(bucketIdx)
|
||||
if bucketIdx == float64(idx) && idx > 0 {
|
||||
// Edge case for 10^n values, which must go to the lower bucket
|
||||
// according to Prometheus logic for `le`-based histograms.
|
||||
idx--
|
||||
}
|
||||
decimalBucketIdx := idx / bucketsPerDecimal
|
||||
offset := idx % bucketsPerDecimal
|
||||
db := h.decimalBuckets[decimalBucketIdx]
|
||||
if db == nil {
|
||||
var b [bucketsPerDecimal]uint64
|
||||
db = &b
|
||||
h.decimalBuckets[decimalBucketIdx] = db
|
||||
}
|
||||
db[offset]++
|
||||
}
|
||||
h.mu.Unlock()
|
||||
}
|
||||
|
||||
// Merge merges src to h
|
||||
func (h *Histogram) Merge(src *Histogram) {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
src.mu.Lock()
|
||||
defer src.mu.Unlock()
|
||||
|
||||
h.lower += src.lower
|
||||
h.upper += src.upper
|
||||
h.sum += src.sum
|
||||
|
||||
for i, dbSrc := range src.decimalBuckets {
|
||||
if dbSrc == nil {
|
||||
continue
|
||||
}
|
||||
dbDst := h.decimalBuckets[i]
|
||||
if dbDst == nil {
|
||||
var b [bucketsPerDecimal]uint64
|
||||
dbDst = &b
|
||||
h.decimalBuckets[i] = dbDst
|
||||
}
|
||||
for j := range dbSrc {
|
||||
dbDst[j] += dbSrc[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// VisitNonZeroBuckets calls f for all buckets with non-zero counters.
|
||||
//
|
||||
// vmrange contains "<start>...<end>" string with bucket bounds. The lower bound
|
||||
// isn't included in the bucket, while the upper bound is included.
|
||||
// This is required to be compatible with Prometheus-style histogram buckets
|
||||
// with `le` (less or equal) labels.
|
||||
func (h *Histogram) VisitNonZeroBuckets(f func(vmrange string, count uint64)) {
|
||||
h.mu.Lock()
|
||||
if h.lower > 0 {
|
||||
f(lowerBucketRange, h.lower)
|
||||
}
|
||||
for decimalBucketIdx, db := range h.decimalBuckets[:] {
|
||||
if db == nil {
|
||||
continue
|
||||
}
|
||||
for offset, count := range db[:] {
|
||||
if count > 0 {
|
||||
bucketIdx := decimalBucketIdx*bucketsPerDecimal + offset
|
||||
vmrange := getVMRange(bucketIdx)
|
||||
f(vmrange, count)
|
||||
}
|
||||
}
|
||||
}
|
||||
if h.upper > 0 {
|
||||
f(upperBucketRange, h.upper)
|
||||
}
|
||||
h.mu.Unlock()
|
||||
}
|
||||
|
||||
// NewHistogram creates and returns new histogram with the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned histogram is safe to use from concurrent goroutines.
|
||||
func NewHistogram(name string) *Histogram {
|
||||
return defaultSet.NewHistogram(name)
|
||||
}
|
||||
|
||||
// GetOrCreateHistogram returns registered histogram with the given name
|
||||
// or creates new histogram if the registry doesn't contain histogram with
|
||||
// the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned histogram is safe to use from concurrent goroutines.
|
||||
//
|
||||
// Performance tip: prefer NewHistogram instead of GetOrCreateHistogram.
|
||||
func GetOrCreateHistogram(name string) *Histogram {
|
||||
return defaultSet.GetOrCreateHistogram(name)
|
||||
}
|
||||
|
||||
// UpdateDuration updates request duration based on the given startTime.
|
||||
func (h *Histogram) UpdateDuration(startTime time.Time) {
|
||||
d := time.Since(startTime).Seconds()
|
||||
h.Update(d)
|
||||
}
|
||||
|
||||
func getVMRange(bucketIdx int) string {
|
||||
bucketRangesOnce.Do(initBucketRanges)
|
||||
return bucketRanges[bucketIdx]
|
||||
}
|
||||
|
||||
func initBucketRanges() {
|
||||
v := math.Pow10(e10Min)
|
||||
start := fmt.Sprintf("%.3e", v)
|
||||
for i := 0; i < bucketsCount; i++ {
|
||||
v *= bucketMultiplier
|
||||
end := fmt.Sprintf("%.3e", v)
|
||||
bucketRanges[i] = start + "..." + end
|
||||
start = end
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
lowerBucketRange = fmt.Sprintf("0...%.3e", math.Pow10(e10Min))
|
||||
upperBucketRange = fmt.Sprintf("%.3e...+Inf", math.Pow10(e10Max))
|
||||
|
||||
bucketRanges [bucketsCount]string
|
||||
bucketRangesOnce sync.Once
|
||||
)
|
||||
|
||||
func (h *Histogram) marshalTo(prefix string, w io.Writer) {
|
||||
countTotal := uint64(0)
|
||||
h.VisitNonZeroBuckets(func(vmrange string, count uint64) {
|
||||
tag := fmt.Sprintf("vmrange=%q", vmrange)
|
||||
metricName := addTag(prefix, tag)
|
||||
name, labels := splitMetricName(metricName)
|
||||
fmt.Fprintf(w, "%s_bucket%s %d\n", name, labels, count)
|
||||
countTotal += count
|
||||
})
|
||||
if countTotal == 0 {
|
||||
return
|
||||
}
|
||||
name, labels := splitMetricName(prefix)
|
||||
sum := h.getSum()
|
||||
if float64(int64(sum)) == sum {
|
||||
fmt.Fprintf(w, "%s_sum%s %d\n", name, labels, int64(sum))
|
||||
} else {
|
||||
fmt.Fprintf(w, "%s_sum%s %g\n", name, labels, sum)
|
||||
}
|
||||
fmt.Fprintf(w, "%s_count%s %d\n", name, labels, countTotal)
|
||||
}
|
||||
|
||||
func (h *Histogram) getSum() float64 {
|
||||
h.mu.Lock()
|
||||
sum := h.sum
|
||||
h.mu.Unlock()
|
||||
return sum
|
||||
}
|
||||
|
||||
func (h *Histogram) metricType() string {
|
||||
return "histogram"
|
||||
}
|
||||
355
vendor/github.com/VictoriaMetrics/metrics/metrics.go
generated
vendored
Normal file
355
vendor/github.com/VictoriaMetrics/metrics/metrics.go
generated
vendored
Normal file
@@ -0,0 +1,355 @@
|
||||
// Package metrics implements Prometheus-compatible metrics for applications.
|
||||
//
|
||||
// This package is lightweight alternative to https://github.com/prometheus/client_golang
|
||||
// with simpler API and smaller dependencies.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// 1. Register the required metrics via New* functions.
|
||||
// 2. Expose them to `/metrics` page via WritePrometheus.
|
||||
// 3. Update the registered metrics during application lifetime.
|
||||
//
|
||||
// The package has been extracted from https://victoriametrics.com/
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type namedMetric struct {
|
||||
name string
|
||||
metric metric
|
||||
isAux bool
|
||||
}
|
||||
|
||||
type metric interface {
|
||||
marshalTo(prefix string, w io.Writer)
|
||||
metricType() string
|
||||
}
|
||||
|
||||
var defaultSet = NewSet()
|
||||
|
||||
func init() {
|
||||
RegisterSet(defaultSet)
|
||||
}
|
||||
|
||||
var (
|
||||
registeredSets = make(map[*Set]struct{})
|
||||
registeredSetsLock sync.Mutex
|
||||
)
|
||||
|
||||
// RegisterSet registers the given set s for metrics export via global WritePrometheus() call.
|
||||
//
|
||||
// See also UnregisterSet.
|
||||
func RegisterSet(s *Set) {
|
||||
registeredSetsLock.Lock()
|
||||
registeredSets[s] = struct{}{}
|
||||
registeredSetsLock.Unlock()
|
||||
}
|
||||
|
||||
// UnregisterSet stops exporting metrics for the given s via global WritePrometheus() call.
|
||||
//
|
||||
// If destroySet is set to true, then s.UnregisterAllMetrics() is called on s after unregistering it,
|
||||
// so s becomes destroyed. Otherwise the s can be registered again in the set by passing it to RegisterSet().
|
||||
func UnregisterSet(s *Set, destroySet bool) {
|
||||
registeredSetsLock.Lock()
|
||||
delete(registeredSets, s)
|
||||
registeredSetsLock.Unlock()
|
||||
|
||||
if destroySet {
|
||||
s.UnregisterAllMetrics()
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterMetricsWriter registers writeMetrics callback for including metrics in the output generated by WritePrometheus.
|
||||
//
|
||||
// The writeMetrics callback must write metrics to w in Prometheus text exposition format without timestamps and trailing comments.
|
||||
// The last line generated by writeMetrics must end with \n.
|
||||
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
||||
//
|
||||
// It is OK to register multiple writeMetrics callbacks - all of them will be called sequentially for gererating the output at WritePrometheus.
|
||||
func RegisterMetricsWriter(writeMetrics func(w io.Writer)) {
|
||||
defaultSet.RegisterMetricsWriter(writeMetrics)
|
||||
}
|
||||
|
||||
// WritePrometheus writes all the metrics in Prometheus format from the default set, all the added sets and metrics writers to w.
|
||||
//
|
||||
// Additional sets can be registered via RegisterSet() call.
|
||||
// Additional metric writers can be registered via RegisterMetricsWriter() call.
|
||||
//
|
||||
// If exposeProcessMetrics is true, then various `go_*` and `process_*` metrics
|
||||
// are exposed for the current process.
|
||||
//
|
||||
// The WritePrometheus func is usually called inside "/metrics" handler:
|
||||
//
|
||||
// http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
|
||||
// metrics.WritePrometheus(w, true)
|
||||
// })
|
||||
func WritePrometheus(w io.Writer, exposeProcessMetrics bool) {
|
||||
registeredSetsLock.Lock()
|
||||
sets := make([]*Set, 0, len(registeredSets))
|
||||
for s := range registeredSets {
|
||||
sets = append(sets, s)
|
||||
}
|
||||
registeredSetsLock.Unlock()
|
||||
|
||||
sort.Slice(sets, func(i, j int) bool {
|
||||
return uintptr(unsafe.Pointer(sets[i])) < uintptr(unsafe.Pointer(sets[j]))
|
||||
})
|
||||
for _, s := range sets {
|
||||
s.WritePrometheus(w)
|
||||
}
|
||||
if exposeProcessMetrics {
|
||||
WriteProcessMetrics(w)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteProcessMetrics writes additional process metrics in Prometheus format to w.
|
||||
//
|
||||
// The following `go_*` and `process_*` metrics are exposed for the currently
|
||||
// running process. Below is a short description for the exposed `process_*` metrics:
|
||||
//
|
||||
// - process_cpu_seconds_system_total - CPU time spent in syscalls
|
||||
//
|
||||
// - process_cpu_seconds_user_total - CPU time spent in userspace
|
||||
//
|
||||
// - process_cpu_seconds_total - CPU time spent by the process
|
||||
//
|
||||
// - process_major_pagefaults_total - page faults resulted in disk IO
|
||||
//
|
||||
// - process_minor_pagefaults_total - page faults resolved without disk IO
|
||||
//
|
||||
// - process_resident_memory_bytes - recently accessed memory (aka RSS or resident memory)
|
||||
//
|
||||
// - process_resident_memory_peak_bytes - the maximum RSS memory usage
|
||||
//
|
||||
// - process_resident_memory_anon_bytes - RSS for memory-mapped files
|
||||
//
|
||||
// - process_resident_memory_file_bytes - RSS for memory allocated by the process
|
||||
//
|
||||
// - process_resident_memory_shared_bytes - RSS for memory shared between multiple processes
|
||||
//
|
||||
// - process_virtual_memory_bytes - virtual memory usage
|
||||
//
|
||||
// - process_virtual_memory_peak_bytes - the maximum virtual memory usage
|
||||
//
|
||||
// - process_num_threads - the number of threads
|
||||
//
|
||||
// - process_start_time_seconds - process start time as unix timestamp
|
||||
//
|
||||
// - process_io_read_bytes_total - the number of bytes read via syscalls
|
||||
//
|
||||
// - process_io_written_bytes_total - the number of bytes written via syscalls
|
||||
//
|
||||
// - process_io_read_syscalls_total - the number of read syscalls
|
||||
//
|
||||
// - process_io_write_syscalls_total - the number of write syscalls
|
||||
//
|
||||
// - process_io_storage_read_bytes_total - the number of bytes actually read from disk
|
||||
//
|
||||
// - process_io_storage_written_bytes_total - the number of bytes actually written to disk
|
||||
//
|
||||
// - process_pressure_cpu_waiting_seconds_total - the number of seconds processes in the current cgroup v2 were waiting to be executed
|
||||
//
|
||||
// - process_pressure_cpu_stalled_seconds_total - the number of seconds all the processes in the current cgroup v2 were stalled
|
||||
//
|
||||
// - process_pressure_io_waiting_seconds_total - the number of seconds processes in the current cgroup v2 were waiting for io to complete
|
||||
//
|
||||
// - process_pressure_io_stalled_seconds_total - the number of seconds all the processes in the current cgroup v2 were waiting for io to complete
|
||||
//
|
||||
// - process_pressure_memory_waiting_seconds_total - the number of seconds processes in the current cgroup v2 were waiting for memory access to complete
|
||||
//
|
||||
// - process_pressure_memory_stalled_seconds_total - the number of seconds all the processes in the current cgroup v2 were waiting for memory access to complete
|
||||
//
|
||||
// - go_sched_latencies_seconds - time spent by goroutines in ready state before they start execution
|
||||
//
|
||||
// - go_mutex_wait_seconds_total - summary time spent by all the goroutines while waiting for locked mutex
|
||||
//
|
||||
// - go_gc_mark_assist_cpu_seconds_total - summary CPU time spent by goroutines in GC mark assist state
|
||||
//
|
||||
// - go_gc_cpu_seconds_total - summary time spent in GC
|
||||
//
|
||||
// - go_gc_pauses_seconds - duration of GC pauses
|
||||
//
|
||||
// - go_scavenge_cpu_seconds_total - CPU time spent on returning the memory to OS
|
||||
//
|
||||
// - go_memlimit_bytes - the GOMEMLIMIT env var value
|
||||
//
|
||||
// - go_memstats_alloc_bytes - memory usage for Go objects in the heap
|
||||
//
|
||||
// - go_memstats_alloc_bytes_total - the cumulative counter for total size of allocated Go objects
|
||||
//
|
||||
// - go_memstats_buck_hash_sys_bytes - bytes of memory in profiling bucket hash tables
|
||||
//
|
||||
// - go_memstats_frees_total - the cumulative counter for number of freed Go objects
|
||||
//
|
||||
// - go_memstats_gc_cpu_fraction - the fraction of CPU spent in Go garbage collector
|
||||
//
|
||||
// - go_memstats_gc_sys_bytes - the size of Go garbage collector metadata
|
||||
//
|
||||
// - go_memstats_heap_alloc_bytes - the same as go_memstats_alloc_bytes
|
||||
//
|
||||
// - go_memstats_heap_idle_bytes - idle memory ready for new Go object allocations
|
||||
//
|
||||
// - go_memstats_heap_inuse_bytes - bytes in in-use spans
|
||||
//
|
||||
// - go_memstats_heap_objects - the number of Go objects in the heap
|
||||
//
|
||||
// - go_memstats_heap_released_bytes - bytes of physical memory returned to the OS
|
||||
//
|
||||
// - go_memstats_heap_sys_bytes - memory requested for Go objects from the OS
|
||||
//
|
||||
// - go_memstats_last_gc_time_seconds - unix timestamp the last garbage collection finished
|
||||
//
|
||||
// - go_memstats_lookups_total - the number of pointer lookups performed by the runtime
|
||||
//
|
||||
// - go_memstats_mallocs_total - the number of allocations for Go objects
|
||||
//
|
||||
// - go_memstats_mcache_inuse_bytes - bytes of allocated mcache structures
|
||||
//
|
||||
// - go_memstats_mcache_sys_bytes - bytes of memory obtained from the OS for mcache structures
|
||||
//
|
||||
// - go_memstats_mspan_inuse_bytes - bytes of allocated mspan structures
|
||||
//
|
||||
// - go_memstats_mspan_sys_bytes - bytes of memory obtained from the OS for mspan structures
|
||||
//
|
||||
// - go_memstats_next_gc_bytes - the target heap size when the next garbage collection should start
|
||||
//
|
||||
// - go_memstats_other_sys_bytes - bytes of memory in miscellaneous off-heap runtime allocations
|
||||
//
|
||||
// - go_memstats_stack_inuse_bytes - memory used for goroutine stacks
|
||||
//
|
||||
// - go_memstats_stack_sys_bytes - memory requested fromthe OS for goroutine stacks
|
||||
//
|
||||
// - go_memstats_sys_bytes - memory requested by Go runtime from the OS
|
||||
//
|
||||
// - go_cgo_calls_count - the total number of CGO calls
|
||||
//
|
||||
// - go_cpu_count - the number of CPU cores on the host where the app runs
|
||||
//
|
||||
// The WriteProcessMetrics func is usually called in combination with writing Set metrics
|
||||
// inside "/metrics" handler:
|
||||
//
|
||||
// http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
|
||||
// mySet.WritePrometheus(w)
|
||||
// metrics.WriteProcessMetrics(w)
|
||||
// })
|
||||
//
|
||||
// See also WriteFDMetrics.
|
||||
func WriteProcessMetrics(w io.Writer) {
|
||||
writeGoMetrics(w)
|
||||
writeProcessMetrics(w)
|
||||
writePushMetrics(w)
|
||||
}
|
||||
|
||||
// WriteFDMetrics writes `process_max_fds` and `process_open_fds` metrics to w.
|
||||
func WriteFDMetrics(w io.Writer) {
|
||||
writeFDMetrics(w)
|
||||
}
|
||||
|
||||
// UnregisterMetric removes metric with the given name from default set.
|
||||
//
|
||||
// See also UnregisterAllMetrics.
|
||||
func UnregisterMetric(name string) bool {
|
||||
return defaultSet.UnregisterMetric(name)
|
||||
}
|
||||
|
||||
// UnregisterAllMetrics unregisters all the metrics from default set.
|
||||
//
|
||||
// It also unregisters writeMetrics callbacks passed to RegisterMetricsWriter.
|
||||
func UnregisterAllMetrics() {
|
||||
defaultSet.UnregisterAllMetrics()
|
||||
}
|
||||
|
||||
// ListMetricNames returns sorted list of all the metric names from default set.
|
||||
func ListMetricNames() []string {
|
||||
return defaultSet.ListMetricNames()
|
||||
}
|
||||
|
||||
// GetDefaultSet returns the default metrics set.
|
||||
func GetDefaultSet() *Set {
|
||||
return defaultSet
|
||||
}
|
||||
|
||||
// ExposeMetadata allows enabling adding TYPE and HELP metadata to the exposed metrics globally.
|
||||
//
|
||||
// It is safe to call this method multiple times. It is allowed to change it in runtime.
|
||||
// ExposeMetadata is set to false by default.
|
||||
func ExposeMetadata(v bool) {
|
||||
n := 0
|
||||
if v {
|
||||
n = 1
|
||||
}
|
||||
atomic.StoreUint32(&exposeMetadata, uint32(n))
|
||||
}
|
||||
|
||||
func isMetadataEnabled() bool {
|
||||
n := atomic.LoadUint32(&exposeMetadata)
|
||||
return n != 0
|
||||
}
|
||||
|
||||
var exposeMetadata uint32
|
||||
|
||||
func isCounterName(name string) bool {
|
||||
return strings.HasSuffix(name, "_total")
|
||||
}
|
||||
|
||||
// WriteGaugeUint64 writes gauge metric with the given name and value to w in Prometheus text exposition format.
|
||||
func WriteGaugeUint64(w io.Writer, name string, value uint64) {
|
||||
writeMetricUint64(w, name, "gauge", value)
|
||||
}
|
||||
|
||||
// WriteGaugeFloat64 writes gauge metric with the given name and value to w in Prometheus text exposition format.
|
||||
func WriteGaugeFloat64(w io.Writer, name string, value float64) {
|
||||
writeMetricFloat64(w, name, "gauge", value)
|
||||
}
|
||||
|
||||
// WriteCounterUint64 writes counter metric with the given name and value to w in Prometheus text exposition format.
|
||||
func WriteCounterUint64(w io.Writer, name string, value uint64) {
|
||||
writeMetricUint64(w, name, "counter", value)
|
||||
}
|
||||
|
||||
// WriteCounterFloat64 writes counter metric with the given name and value to w in Prometheus text exposition format.
|
||||
func WriteCounterFloat64(w io.Writer, name string, value float64) {
|
||||
writeMetricFloat64(w, name, "counter", value)
|
||||
}
|
||||
|
||||
func writeMetricUint64(w io.Writer, metricName, metricType string, value uint64) {
|
||||
WriteMetadataIfNeeded(w, metricName, metricType)
|
||||
fmt.Fprintf(w, "%s %d\n", metricName, value)
|
||||
}
|
||||
|
||||
func writeMetricFloat64(w io.Writer, metricName, metricType string, value float64) {
|
||||
WriteMetadataIfNeeded(w, metricName, metricType)
|
||||
fmt.Fprintf(w, "%s %g\n", metricName, value)
|
||||
}
|
||||
|
||||
// WriteMetadataIfNeeded writes HELP and TYPE metadata for the given metricName and metricType if this is globally enabled via ExposeMetadata().
|
||||
//
|
||||
// If the metadata exposition isn't enabled, then this function is no-op.
|
||||
func WriteMetadataIfNeeded(w io.Writer, metricName, metricType string) {
|
||||
if !isMetadataEnabled() {
|
||||
return
|
||||
}
|
||||
metricFamily := getMetricFamily(metricName)
|
||||
writeMetadata(w, metricFamily, metricType)
|
||||
}
|
||||
|
||||
func writeMetadata(w io.Writer, metricFamily, metricType string) {
|
||||
fmt.Fprintf(w, "# HELP %s\n", metricFamily)
|
||||
fmt.Fprintf(w, "# TYPE %s %s\n", metricFamily, metricType)
|
||||
}
|
||||
|
||||
func getMetricFamily(metricName string) string {
|
||||
n := strings.IndexByte(metricName, '{')
|
||||
if n < 0 {
|
||||
return metricName
|
||||
}
|
||||
return metricName[:n]
|
||||
}
|
||||
419
vendor/github.com/VictoriaMetrics/metrics/process_metrics_linux.go
generated
vendored
Normal file
419
vendor/github.com/VictoriaMetrics/metrics/process_metrics_linux.go
generated
vendored
Normal file
@@ -0,0 +1,419 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// See https://github.com/prometheus/procfs/blob/a4ac0826abceb44c40fc71daed2b301db498b93e/proc_stat.go#L40 .
|
||||
const userHZ = 100
|
||||
|
||||
// Different environments may have different page size.
|
||||
//
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6457
|
||||
var pageSizeBytes = uint64(os.Getpagesize())
|
||||
|
||||
// See http://man7.org/linux/man-pages/man5/proc.5.html
|
||||
type procStat struct {
|
||||
State byte
|
||||
Ppid int
|
||||
Pgrp int
|
||||
Session int
|
||||
TtyNr int
|
||||
Tpgid int
|
||||
Flags uint
|
||||
Minflt uint
|
||||
Cminflt uint
|
||||
Majflt uint
|
||||
Cmajflt uint
|
||||
Utime uint
|
||||
Stime uint
|
||||
Cutime int
|
||||
Cstime int
|
||||
Priority int
|
||||
Nice int
|
||||
NumThreads int
|
||||
ItrealValue int
|
||||
Starttime uint64
|
||||
Vsize uint
|
||||
Rss int
|
||||
}
|
||||
|
||||
func writeProcessMetrics(w io.Writer) {
|
||||
statFilepath := "/proc/self/stat"
|
||||
data, err := ioutil.ReadFile(statFilepath)
|
||||
if err != nil {
|
||||
log.Printf("ERROR: metrics: cannot open %s: %s", statFilepath, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Search for the end of command.
|
||||
n := bytes.LastIndex(data, []byte(") "))
|
||||
if n < 0 {
|
||||
log.Printf("ERROR: metrics: cannot find command in parentheses in %q read from %s", data, statFilepath)
|
||||
return
|
||||
}
|
||||
data = data[n+2:]
|
||||
|
||||
var p procStat
|
||||
bb := bytes.NewBuffer(data)
|
||||
_, err = fmt.Fscanf(bb, "%c %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d",
|
||||
&p.State, &p.Ppid, &p.Pgrp, &p.Session, &p.TtyNr, &p.Tpgid, &p.Flags, &p.Minflt, &p.Cminflt, &p.Majflt, &p.Cmajflt,
|
||||
&p.Utime, &p.Stime, &p.Cutime, &p.Cstime, &p.Priority, &p.Nice, &p.NumThreads, &p.ItrealValue, &p.Starttime, &p.Vsize, &p.Rss)
|
||||
if err != nil {
|
||||
log.Printf("ERROR: metrics: cannot parse %q read from %s: %s", data, statFilepath, err)
|
||||
return
|
||||
}
|
||||
|
||||
// It is expensive obtaining `process_open_fds` when big number of file descriptors is opened,
|
||||
// so don't do it here.
|
||||
// See writeFDMetrics instead.
|
||||
|
||||
utime := float64(p.Utime) / userHZ
|
||||
stime := float64(p.Stime) / userHZ
|
||||
|
||||
// Calculate totalTime by dividing the sum of p.Utime and p.Stime by userHZ.
|
||||
// This reduces possible floating-point precision loss
|
||||
totalTime := float64(p.Utime+p.Stime) / userHZ
|
||||
|
||||
WriteCounterFloat64(w, "process_cpu_seconds_system_total", stime)
|
||||
WriteCounterFloat64(w, "process_cpu_seconds_total", totalTime)
|
||||
WriteCounterFloat64(w, "process_cpu_seconds_user_total", utime)
|
||||
WriteCounterUint64(w, "process_major_pagefaults_total", uint64(p.Majflt))
|
||||
WriteCounterUint64(w, "process_minor_pagefaults_total", uint64(p.Minflt))
|
||||
WriteGaugeUint64(w, "process_num_threads", uint64(p.NumThreads))
|
||||
WriteGaugeUint64(w, "process_resident_memory_bytes", uint64(p.Rss)*pageSizeBytes)
|
||||
WriteGaugeUint64(w, "process_start_time_seconds", uint64(startTimeSeconds))
|
||||
WriteGaugeUint64(w, "process_virtual_memory_bytes", uint64(p.Vsize))
|
||||
writeProcessMemMetrics(w)
|
||||
writeIOMetrics(w)
|
||||
writePSIMetrics(w)
|
||||
}
|
||||
|
||||
var procSelfIOErrLogged uint32
|
||||
|
||||
func writeIOMetrics(w io.Writer) {
|
||||
ioFilepath := "/proc/self/io"
|
||||
data, err := ioutil.ReadFile(ioFilepath)
|
||||
if err != nil {
|
||||
// Do not spam the logs with errors - this error cannot be fixed without process restart.
|
||||
// See https://github.com/VictoriaMetrics/metrics/issues/42
|
||||
if atomic.CompareAndSwapUint32(&procSelfIOErrLogged, 0, 1) {
|
||||
log.Printf("ERROR: metrics: cannot read process_io_* metrics from %q, so these metrics won't be updated until the error is fixed; "+
|
||||
"see https://github.com/VictoriaMetrics/metrics/issues/42 ; The error: %s", ioFilepath, err)
|
||||
}
|
||||
}
|
||||
|
||||
getInt := func(s string) int64 {
|
||||
n := strings.IndexByte(s, ' ')
|
||||
if n < 0 {
|
||||
log.Printf("ERROR: metrics: cannot find whitespace in %q at %q", s, ioFilepath)
|
||||
return 0
|
||||
}
|
||||
v, err := strconv.ParseInt(s[n+1:], 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("ERROR: metrics: cannot parse %q at %q: %s", s, ioFilepath, err)
|
||||
return 0
|
||||
}
|
||||
return v
|
||||
}
|
||||
var rchar, wchar, syscr, syscw, readBytes, writeBytes int64
|
||||
lines := strings.Split(string(data), "\n")
|
||||
for _, s := range lines {
|
||||
s = strings.TrimSpace(s)
|
||||
switch {
|
||||
case strings.HasPrefix(s, "rchar: "):
|
||||
rchar = getInt(s)
|
||||
case strings.HasPrefix(s, "wchar: "):
|
||||
wchar = getInt(s)
|
||||
case strings.HasPrefix(s, "syscr: "):
|
||||
syscr = getInt(s)
|
||||
case strings.HasPrefix(s, "syscw: "):
|
||||
syscw = getInt(s)
|
||||
case strings.HasPrefix(s, "read_bytes: "):
|
||||
readBytes = getInt(s)
|
||||
case strings.HasPrefix(s, "write_bytes: "):
|
||||
writeBytes = getInt(s)
|
||||
}
|
||||
}
|
||||
WriteGaugeUint64(w, "process_io_read_bytes_total", uint64(rchar))
|
||||
WriteGaugeUint64(w, "process_io_written_bytes_total", uint64(wchar))
|
||||
WriteGaugeUint64(w, "process_io_read_syscalls_total", uint64(syscr))
|
||||
WriteGaugeUint64(w, "process_io_write_syscalls_total", uint64(syscw))
|
||||
WriteGaugeUint64(w, "process_io_storage_read_bytes_total", uint64(readBytes))
|
||||
WriteGaugeUint64(w, "process_io_storage_written_bytes_total", uint64(writeBytes))
|
||||
}
|
||||
|
||||
var startTimeSeconds = time.Now().Unix()
|
||||
|
||||
// writeFDMetrics writes process_max_fds and process_open_fds metrics to w.
|
||||
func writeFDMetrics(w io.Writer) {
|
||||
totalOpenFDs, err := getOpenFDsCount("/proc/self/fd")
|
||||
if err != nil {
|
||||
log.Printf("ERROR: metrics: cannot determine open file descriptors count: %s", err)
|
||||
return
|
||||
}
|
||||
maxOpenFDs, err := getMaxFilesLimit("/proc/self/limits")
|
||||
if err != nil {
|
||||
log.Printf("ERROR: metrics: cannot determine the limit on open file descritors: %s", err)
|
||||
return
|
||||
}
|
||||
WriteGaugeUint64(w, "process_max_fds", maxOpenFDs)
|
||||
WriteGaugeUint64(w, "process_open_fds", totalOpenFDs)
|
||||
}
|
||||
|
||||
func getOpenFDsCount(path string) (uint64, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.Close()
|
||||
var totalOpenFDs uint64
|
||||
for {
|
||||
names, err := f.Readdirnames(512)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("unexpected error at Readdirnames: %s", err)
|
||||
}
|
||||
totalOpenFDs += uint64(len(names))
|
||||
}
|
||||
return totalOpenFDs, nil
|
||||
}
|
||||
|
||||
func getMaxFilesLimit(path string) (uint64, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
lines := strings.Split(string(data), "\n")
|
||||
const prefix = "Max open files"
|
||||
for _, s := range lines {
|
||||
if !strings.HasPrefix(s, prefix) {
|
||||
continue
|
||||
}
|
||||
text := strings.TrimSpace(s[len(prefix):])
|
||||
// Extract soft limit.
|
||||
n := strings.IndexByte(text, ' ')
|
||||
if n < 0 {
|
||||
return 0, fmt.Errorf("cannot extract soft limit from %q", s)
|
||||
}
|
||||
text = text[:n]
|
||||
if text == "unlimited" {
|
||||
return 1<<64 - 1, nil
|
||||
}
|
||||
limit, err := strconv.ParseUint(text, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse soft limit from %q: %s", s, err)
|
||||
}
|
||||
return limit, nil
|
||||
}
|
||||
return 0, fmt.Errorf("cannot find max open files limit")
|
||||
}
|
||||
|
||||
// https://man7.org/linux/man-pages/man5/procfs.5.html
|
||||
type memStats struct {
|
||||
vmPeak uint64
|
||||
rssPeak uint64
|
||||
rssAnon uint64
|
||||
rssFile uint64
|
||||
rssShmem uint64
|
||||
}
|
||||
|
||||
func writeProcessMemMetrics(w io.Writer) {
|
||||
ms, err := getMemStats("/proc/self/status")
|
||||
if err != nil {
|
||||
log.Printf("ERROR: metrics: cannot determine memory status: %s", err)
|
||||
return
|
||||
}
|
||||
WriteGaugeUint64(w, "process_virtual_memory_peak_bytes", ms.vmPeak)
|
||||
WriteGaugeUint64(w, "process_resident_memory_peak_bytes", ms.rssPeak)
|
||||
WriteGaugeUint64(w, "process_resident_memory_anon_bytes", ms.rssAnon)
|
||||
WriteGaugeUint64(w, "process_resident_memory_file_bytes", ms.rssFile)
|
||||
WriteGaugeUint64(w, "process_resident_memory_shared_bytes", ms.rssShmem)
|
||||
}
|
||||
|
||||
func getMemStats(path string) (*memStats, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var ms memStats
|
||||
lines := strings.Split(string(data), "\n")
|
||||
for _, s := range lines {
|
||||
if !strings.HasPrefix(s, "Vm") && !strings.HasPrefix(s, "Rss") {
|
||||
continue
|
||||
}
|
||||
// Extract key value.
|
||||
line := strings.Fields(s)
|
||||
if len(line) != 3 {
|
||||
return nil, fmt.Errorf("unexpected number of fields found in %q; got %d; want %d", s, len(line), 3)
|
||||
}
|
||||
memStatName := line[0]
|
||||
memStatValue := line[1]
|
||||
value, err := strconv.ParseUint(memStatValue, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse number from %q: %w", s, err)
|
||||
}
|
||||
if line[2] != "kB" {
|
||||
return nil, fmt.Errorf("expecting kB value in %q; got %q", s, line[2])
|
||||
}
|
||||
value *= 1024
|
||||
switch memStatName {
|
||||
case "VmPeak:":
|
||||
ms.vmPeak = value
|
||||
case "VmHWM:":
|
||||
ms.rssPeak = value
|
||||
case "RssAnon:":
|
||||
ms.rssAnon = value
|
||||
case "RssFile:":
|
||||
ms.rssFile = value
|
||||
case "RssShmem:":
|
||||
ms.rssShmem = value
|
||||
}
|
||||
}
|
||||
return &ms, nil
|
||||
}
|
||||
|
||||
// writePSIMetrics writes PSI total metrics for the current process to w.
|
||||
//
|
||||
// See https://docs.kernel.org/accounting/psi.html
|
||||
func writePSIMetrics(w io.Writer) {
|
||||
if psiMetricsStart == nil {
|
||||
// Failed to initialize PSI metrics
|
||||
return
|
||||
}
|
||||
|
||||
m, err := getPSIMetrics()
|
||||
if err != nil {
|
||||
log.Printf("ERROR: metrics: cannot expose PSI metrics: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
WriteCounterFloat64(w, "process_pressure_cpu_waiting_seconds_total", psiTotalSecs(m.cpuSome-psiMetricsStart.cpuSome))
|
||||
WriteCounterFloat64(w, "process_pressure_cpu_stalled_seconds_total", psiTotalSecs(m.cpuFull-psiMetricsStart.cpuFull))
|
||||
|
||||
WriteCounterFloat64(w, "process_pressure_io_waiting_seconds_total", psiTotalSecs(m.ioSome-psiMetricsStart.ioSome))
|
||||
WriteCounterFloat64(w, "process_pressure_io_stalled_seconds_total", psiTotalSecs(m.ioFull-psiMetricsStart.ioFull))
|
||||
|
||||
WriteCounterFloat64(w, "process_pressure_memory_waiting_seconds_total", psiTotalSecs(m.memSome-psiMetricsStart.memSome))
|
||||
WriteCounterFloat64(w, "process_pressure_memory_stalled_seconds_total", psiTotalSecs(m.memFull-psiMetricsStart.memFull))
|
||||
}
|
||||
|
||||
func psiTotalSecs(microsecs uint64) float64 {
|
||||
// PSI total stats is in microseconds according to https://docs.kernel.org/accounting/psi.html
|
||||
// Convert it to seconds.
|
||||
return float64(microsecs) / 1e6
|
||||
}
|
||||
|
||||
// psiMetricsStart contains the initial PSI metric values on program start.
|
||||
// it is needed in order to make sure the exposed PSI metrics start from zero.
|
||||
var psiMetricsStart = func() *psiMetrics {
|
||||
m, err := getPSIMetrics()
|
||||
if err != nil {
|
||||
log.Printf("INFO: metrics: disable exposing PSI metrics because of failed init: %s", err)
|
||||
return nil
|
||||
}
|
||||
return m
|
||||
}()
|
||||
|
||||
type psiMetrics struct {
|
||||
cpuSome uint64
|
||||
cpuFull uint64
|
||||
ioSome uint64
|
||||
ioFull uint64
|
||||
memSome uint64
|
||||
memFull uint64
|
||||
}
|
||||
|
||||
func getPSIMetrics() (*psiMetrics, error) {
|
||||
cgroupPath := getCgroupV2Path()
|
||||
if cgroupPath == "" {
|
||||
// Do nothing, since PSI requires cgroup v2, and the process doesn't run under cgroup v2.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
cpuSome, cpuFull, err := readPSITotals(cgroupPath, "cpu.pressure")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ioSome, ioFull, err := readPSITotals(cgroupPath, "io.pressure")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
memSome, memFull, err := readPSITotals(cgroupPath, "memory.pressure")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := &psiMetrics{
|
||||
cpuSome: cpuSome,
|
||||
cpuFull: cpuFull,
|
||||
ioSome: ioSome,
|
||||
ioFull: ioFull,
|
||||
memSome: memSome,
|
||||
memFull: memFull,
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func readPSITotals(cgroupPath, statsName string) (uint64, uint64, error) {
|
||||
filePath := cgroupPath + "/" + statsName
|
||||
data, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
lines := strings.Split(string(data), "\n")
|
||||
some := uint64(0)
|
||||
full := uint64(0)
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if !strings.HasPrefix(line, "some ") && !strings.HasPrefix(line, "full ") {
|
||||
continue
|
||||
}
|
||||
|
||||
tmp := strings.SplitN(line, "total=", 2)
|
||||
if len(tmp) != 2 {
|
||||
return 0, 0, fmt.Errorf("cannot find total from the line %q at %q", line, filePath)
|
||||
}
|
||||
microsecs, err := strconv.ParseUint(tmp[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("cannot parse total=%q at %q: %w", tmp[1], filePath, err)
|
||||
}
|
||||
|
||||
switch {
|
||||
case strings.HasPrefix(line, "some "):
|
||||
some = microsecs
|
||||
case strings.HasPrefix(line, "full "):
|
||||
full = microsecs
|
||||
}
|
||||
}
|
||||
return some, full, nil
|
||||
}
|
||||
|
||||
func getCgroupV2Path() string {
|
||||
data, err := ioutil.ReadFile("/proc/self/cgroup")
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
tmp := strings.SplitN(string(data), "::", 2)
|
||||
if len(tmp) != 2 {
|
||||
return ""
|
||||
}
|
||||
path := "/sys/fs/cgroup" + strings.TrimSpace(tmp[1])
|
||||
|
||||
// Drop trailing slash if it exsits. This prevents from '//' in the constructed paths by the caller.
|
||||
return strings.TrimSuffix(path, "/")
|
||||
}
|
||||
15
vendor/github.com/VictoriaMetrics/metrics/process_metrics_other.go
generated
vendored
Normal file
15
vendor/github.com/VictoriaMetrics/metrics/process_metrics_other.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
//go:build !linux && !windows && !solaris
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
func writeProcessMetrics(w io.Writer) {
|
||||
// TODO: implement it
|
||||
}
|
||||
|
||||
func writeFDMetrics(w io.Writer) {
|
||||
// TODO: implement it.
|
||||
}
|
||||
595
vendor/github.com/VictoriaMetrics/metrics/process_metrics_solaris.go
generated
vendored
Normal file
595
vendor/github.com/VictoriaMetrics/metrics/process_metrics_solaris.go
generated
vendored
Normal file
@@ -0,0 +1,595 @@
|
||||
//go:build solaris
|
||||
|
||||
// Author: Jens Elkner (C) 2025
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
/** Solaris 11.3 types deduced from /usr/include/sys/procfs.h **/
|
||||
// requires go v1.18+
|
||||
type uchar_t uint8 // unsigned char
|
||||
type char int8 // signed char
|
||||
type short int16
|
||||
type ushort_t uint16
|
||||
type id_t int32
|
||||
type pid_t int32
|
||||
type uid_t uint32
|
||||
type gid_t uid_t
|
||||
type taskid_t id_t
|
||||
type projid_t id_t
|
||||
type zoneid_t id_t
|
||||
type poolid_t id_t
|
||||
type uintptr_t uint64
|
||||
type long int64
|
||||
type ulong_t uint64
|
||||
type dev_t ulong_t
|
||||
type size_t ulong_t
|
||||
type time_t long
|
||||
type sigset_t [16]char // we do not need those struct, so just pad
|
||||
type fltset_t [16]char // we do not need those struct, so just pad
|
||||
type sysset_t [64]char // we do not need those struct, so just pad
|
||||
type lwpstatus_t [1296]char // we do not need those struct, so just pad
|
||||
type lwpsinfo_t [152]char // we do not need those struct, so just pad
|
||||
|
||||
type timestruc_t struct {
|
||||
tv_sec time_t
|
||||
tv_nsec long
|
||||
}
|
||||
|
||||
/* process status file: /proc/<pid>/status */
|
||||
type pstatus_t struct {
|
||||
pr_flags int32 /* flags (see below) */
|
||||
pr_nlwp int32 /* number of active lwps in the process */
|
||||
pr_pid pid_t /* process id */
|
||||
pr_ppid pid_t /* parent process id */
|
||||
pr_pgid pid_t /* process group id */
|
||||
pr_sid pid_t /* session id */
|
||||
pr_aslwpid id_t /* historical; now always zero */
|
||||
pr_agentid id_t /* lwp id of the /proc agent lwp, if any */
|
||||
// 32
|
||||
pr_sigpend sigset_t /* set of process pending signals */
|
||||
pr_brkbase uintptr_t /* address of the process heap */
|
||||
pr_brksize size_t /* size of the process heap, in bytes */
|
||||
// 64
|
||||
pr_stkbase uintptr_t /* address of the process stack */
|
||||
pr_stksize size_t /* size of the process stack, in bytes */
|
||||
pr_utime timestruc_t /* # process user cpu time */
|
||||
// 96
|
||||
pr_stime timestruc_t /* # process system cpu time */
|
||||
pr_cutime timestruc_t /* # sum of children's user times */
|
||||
// 128
|
||||
pr_cstime timestruc_t /* # sum of children's system times */
|
||||
pr_sigtrace sigset_t /* sigset_t: set of traced signals */
|
||||
// 160
|
||||
pr_flttrace fltset_t /* set of traced faults */
|
||||
pr_sysentry sysset_t /* set of system calls traced on entry */
|
||||
// 240
|
||||
pr_sysexit sysset_t /* set of system calls traced on exit */
|
||||
// 304
|
||||
pr_dmodel char /* data model of the process (see below) */
|
||||
pr_va_mask uchar_t /* VA masking bits, where supported */
|
||||
pr_adi_nbits uchar_t /* # of VA bits used by ADI when enabled */
|
||||
pr_pad [1]char
|
||||
pr_taskid taskid_t /* task id */
|
||||
// 312
|
||||
pr_projid projid_t /* project id */
|
||||
pr_nzomb int32 /* number of zombie lwps in the process */
|
||||
pr_zoneid zoneid_t /* zone id */
|
||||
// 324
|
||||
pr_filler [15]int32 /* reserved for future use */
|
||||
// 384
|
||||
pr_lwp lwpstatus_t /* status of the representative lwp */
|
||||
// 1680
|
||||
}
|
||||
|
||||
const PRARGSZ = 80 /* number of chars of arguments */
|
||||
const PRFNSZ = 16 /* Maximum size of execed filename */
|
||||
|
||||
/* process ps(1) information file. /proc/<pid>/psinfo */
|
||||
type psinfo_t struct {
|
||||
pr_flag int32 /* process flags (DEPRECATED; do not use) */
|
||||
pr_nlwp int32 /* number of active lwps in the process */
|
||||
pr_pid pid_t /* unique process id */
|
||||
pr_ppid pid_t /* process id of parent */
|
||||
pr_pgid pid_t /* pid of process group leader */
|
||||
pr_sid pid_t /* session id */
|
||||
pr_uid uid_t /* real user id */
|
||||
pr_euid uid_t /* effective user id */
|
||||
// 32
|
||||
pr_gid gid_t /* real group id */
|
||||
pr_egid gid_t /* effective group id */
|
||||
pr_addr uintptr_t /* address of process */
|
||||
pr_size size_t /* size of process image in Kbytes */
|
||||
pr_rssize size_t /* resident set size in Kbytes */
|
||||
// 64
|
||||
pr_rssizepriv size_t /* resident set size of private mappings */
|
||||
pr_ttydev dev_t /* controlling tty device (or PRNODEV) */
|
||||
/* The following percent numbers are 16-bit binary */
|
||||
/* fractions [0 .. 1] with the binary point to the */
|
||||
/* right of the high-order bit (1.0 == 0x8000) */
|
||||
pr_pctcpu ushort_t /* % of recent cpu time used by all lwps */
|
||||
pr_pctmem ushort_t /* % of system memory used by process */
|
||||
pr_dummy int32 /* 8 byte alignment: GO doesn't do it automagically */
|
||||
// 84 + 4 = 88
|
||||
pr_start timestruc_t /* process start time, from the epoch */
|
||||
pr_time timestruc_t /* usr+sys cpu time for this process */
|
||||
pr_ctime timestruc_t /* usr+sys cpu time for reaped children */
|
||||
// 136
|
||||
pr_fname [PRFNSZ]char /* name of execed file */
|
||||
pr_psargs [PRARGSZ]char /* initial characters of arg list */
|
||||
// 232
|
||||
pr_wstat int32 /* if zombie, the wait() status */
|
||||
pr_argc int32 /* initial argument count */
|
||||
pr_argv uintptr_t /* address of initial argument vector */
|
||||
pr_envp uintptr_t /* address of initial environment vector */
|
||||
pr_dmodel char /* data model of the process */
|
||||
pr_pad2 [3]char
|
||||
pr_taskid taskid_t /* task id */
|
||||
// 264
|
||||
pr_projid projid_t /* project id */
|
||||
pr_nzomb int32 /* number of zombie lwps in the process */
|
||||
pr_poolid poolid_t /* pool id */
|
||||
pr_zoneid zoneid_t /* zone id */
|
||||
pr_contract id_t /* process contract */
|
||||
pr_filler [1]int32 /* reserved for future use */
|
||||
// 288
|
||||
pr_lwp lwpsinfo_t /* information for representative lwp */
|
||||
// 440
|
||||
}
|
||||
|
||||
/* Resource usage. /proc/<pid>/usage /proc/<pid>/lwp/<lwpid>/lwpusage */
|
||||
type prusage_t struct {
|
||||
pr_lwpid id_t /* lwp id. 0: process or defunct */
|
||||
pr_count int32 /* number of contributing lwps */
|
||||
// 8
|
||||
pr_tstamp timestruc_t /* current time stamp */
|
||||
pr_create timestruc_t /* process/lwp creation time stamp */
|
||||
pr_term timestruc_t /* process/lwp termination time stamp */
|
||||
pr_rtime timestruc_t /* total lwp real (elapsed) time */
|
||||
// 72
|
||||
pr_utime timestruc_t /* user level cpu time */
|
||||
pr_stime timestruc_t /* system call cpu time */
|
||||
pr_ttime timestruc_t /* other system trap cpu time */
|
||||
pr_tftime timestruc_t /* text page fault sleep time */
|
||||
// 136
|
||||
pr_dftime timestruc_t /* data page fault sleep time */
|
||||
pr_kftime timestruc_t /* kernel page fault sleep time */
|
||||
pr_ltime timestruc_t /* user lock wait sleep time */
|
||||
pr_slptime timestruc_t /* all other sleep time */
|
||||
// 200
|
||||
pr_wtime timestruc_t /* wait-cpu (latency) time */
|
||||
pr_stoptime timestruc_t /* stopped time */
|
||||
// 232
|
||||
filltime [6]timestruc_t /* filler for future expansion */
|
||||
// 328
|
||||
pr_minf ulong_t /* minor page faults */
|
||||
pr_majf ulong_t /* major page faults */
|
||||
pr_nswap ulong_t /* swaps */
|
||||
pr_inblk ulong_t /* input blocks (JEL: disk events not always recorded, so perhaps usable as an indicator but not more) */
|
||||
// 360
|
||||
pr_oublk ulong_t /* output blocks (JEL: disk events not always recorded, so perhaps usable as an indicator but not more) */
|
||||
pr_msnd ulong_t /* messages sent */
|
||||
pr_mrcv ulong_t /* messages received */
|
||||
pr_sigs ulong_t /* signals received */
|
||||
// 392
|
||||
pr_vctx ulong_t /* voluntary context switches */
|
||||
pr_ictx ulong_t /* involuntary context switches */
|
||||
pr_sysc ulong_t /* system calls */
|
||||
pr_ioch ulong_t /* chars read and written (JEL: no matter, whether to/from disk or somewhere else) */
|
||||
// 424
|
||||
filler [10]ulong_t /* filler for future expansion */
|
||||
// 504
|
||||
}
|
||||
|
||||
/** End Of Solaris types **/
|
||||
|
||||
type ProcMetric uint32
|
||||
|
||||
const (
|
||||
PM_OPEN_FDS ProcMetric = iota
|
||||
PM_MAX_FDS
|
||||
PM_MINFLT
|
||||
PM_MAJFLT
|
||||
PM_CPU_UTIL
|
||||
PM_MEM_UTIL
|
||||
PM_CMINFLT // Linux, only
|
||||
PM_CMAJFLT // Linux, only
|
||||
PM_UTIME
|
||||
PM_STIME
|
||||
PM_TIME
|
||||
PM_CUTIME
|
||||
PM_CSTIME
|
||||
PM_CTIME
|
||||
PM_NUM_THREADS
|
||||
PM_STARTTIME
|
||||
PM_VSIZE
|
||||
PM_RSS
|
||||
PM_VCTX
|
||||
PM_ICTX
|
||||
PM_BLKIO // Linux, only
|
||||
PM_COUNT /* contract: must be the last one */
|
||||
)
|
||||
|
||||
type MetricInfo struct {
|
||||
name, help, mtype string
|
||||
}
|
||||
|
||||
/* process metric names and descriptions */
|
||||
var pm_desc = [PM_COUNT]MetricInfo{
|
||||
{ // PM_OPEN_FDS
|
||||
"process_open_fds",
|
||||
"Number of open file descriptors",
|
||||
"gauge",
|
||||
}, { // PM_MAX_FDS
|
||||
"process_max_fds",
|
||||
"Max. number of open file descriptors (soft limit)",
|
||||
"gauge",
|
||||
}, { // PM_MINFLT
|
||||
"process_minor_pagefaults",
|
||||
"Number of minor faults of the process not caused a page load from disk",
|
||||
"counter",
|
||||
}, { // PM_MAJFLT
|
||||
"process_major_pagefaults",
|
||||
"Number of major faults of the process caused a page load from disk",
|
||||
"counter",
|
||||
}, { // PM_CPU_UTIL
|
||||
"process_cpu_utilization_percent",
|
||||
"Percent of recent cpu time used by all lwps",
|
||||
"gauge",
|
||||
}, { // PM_MEM_UTIL
|
||||
"process_mem_utilization_percent",
|
||||
"Percent of system memory used by process",
|
||||
"gauge",
|
||||
}, { // PM_CMINFLT
|
||||
"process_children_minor_pagefaults",
|
||||
"Number of minor faults of the process waited-for children not caused a page load from disk",
|
||||
"counter",
|
||||
}, { // PM_CMAJFLT
|
||||
"process_children_major_pagefaults",
|
||||
"Number of major faults of the process's waited-for children caused a page load from disk",
|
||||
"counter",
|
||||
}, { // PM_UTIME
|
||||
"process_user_cpu_seconds",
|
||||
"Total CPU time the process spent in user mode in seconds",
|
||||
"counter",
|
||||
}, { // PM_STIME
|
||||
"process_system_cpu_seconds",
|
||||
"Total CPU time the process spent in kernel mode in seconds",
|
||||
"counter",
|
||||
}, { // PM_TIME
|
||||
"process_total_cpu_seconds",
|
||||
"Total CPU time the process spent in user and kernel mode in seconds",
|
||||
"counter",
|
||||
}, { // PM_CUTIME
|
||||
"process_children_user_cpu_seconds",
|
||||
"Total CPU time the process's waited-for children spent in user mode in seconds",
|
||||
"counter",
|
||||
}, { // PM_CSTIME
|
||||
"process_children_system_cpu_seconds",
|
||||
"Total CPU time the process's waited-for children spent in kernel mode in seconds",
|
||||
"counter",
|
||||
}, { // PM_CTIME
|
||||
"process_children_total_cpu_seconds",
|
||||
"Total CPU time the process's waited-for children spent in user and in kernel mode in seconds",
|
||||
"counter",
|
||||
}, { // PM_NUM_THREADS
|
||||
"process_threads_total",
|
||||
"Number of threads in this process",
|
||||
"gauge",
|
||||
}, { // PM_STARTTIME
|
||||
"process_start_time_seconds",
|
||||
"The time the process has been started in seconds elapsed since Epoch",
|
||||
"counter",
|
||||
}, { // PM_VSIZE
|
||||
"process_virtual_memory_bytes",
|
||||
"Virtual memory size in bytes",
|
||||
"gauge",
|
||||
}, { // PM_RSS
|
||||
"process_resident_memory_bytes",
|
||||
"Resident set size of memory in bytes",
|
||||
"gauge",
|
||||
}, { // PM_VCTX
|
||||
"process_voluntary_ctxsw_total",
|
||||
"Number of voluntary context switches",
|
||||
"counter",
|
||||
}, { // PM_ICTX
|
||||
"process_involuntary_ctxsw_total",
|
||||
"Number of involuntary context switches",
|
||||
"counter",
|
||||
}, { // PM_BLKIO
|
||||
"process_delayacct_blkio_ticks",
|
||||
"Aggregated block I/O delays, measured in clock ticks (centiseconds)",
|
||||
"counter",
|
||||
},
|
||||
}
|
||||
|
||||
type ProcFd uint32
|
||||
|
||||
const (
|
||||
FD_LIMITS ProcFd = iota
|
||||
FD_STAT
|
||||
FD_PSINFO // solaris/illumos, only
|
||||
FD_USAGE // solaris/illumos, only
|
||||
FD_COUNT /* contract: must be the last one */
|
||||
)
|
||||
|
||||
/* emittable process metrics for solaris */
|
||||
var activeProcMetrics = []ProcMetric{
|
||||
PM_MINFLT,
|
||||
PM_MAJFLT,
|
||||
PM_CPU_UTIL,
|
||||
PM_MEM_UTIL,
|
||||
PM_UTIME,
|
||||
PM_STIME,
|
||||
PM_TIME,
|
||||
PM_CUTIME,
|
||||
PM_CSTIME,
|
||||
PM_CTIME,
|
||||
PM_NUM_THREADS,
|
||||
PM_STARTTIME,
|
||||
PM_VSIZE,
|
||||
PM_RSS,
|
||||
PM_VCTX,
|
||||
PM_ICTX,
|
||||
}
|
||||
|
||||
/* emittable fd metrics for solaris */
|
||||
var activeFdMetrics = []ProcMetric{
|
||||
PM_OPEN_FDS,
|
||||
PM_MAX_FDS,
|
||||
}
|
||||
|
||||
/*
|
||||
process metrics related file descriptors for files we always need, and
|
||||
|
||||
do not want to open/close all the time
|
||||
*/
|
||||
var pm_fd [FD_COUNT]int
|
||||
|
||||
/*
|
||||
to avaid, that go closes the files in the background, which makes the FDs
|
||||
|
||||
above useless, we need to keep the reference to them as well
|
||||
*/
|
||||
var pm_file [FD_COUNT]*os.File
|
||||
|
||||
/*
|
||||
process metric values. TSDBs use internally always float64, so we do not
|
||||
|
||||
need to make a difference between int and non-int values
|
||||
*/
|
||||
var pm_val [PM_COUNT]float64
|
||||
|
||||
/* path used to count open FDs */
|
||||
var fd_path string
|
||||
|
||||
/* lazy init of this process related metrics */
|
||||
func init() {
|
||||
var testdata_dir = ""
|
||||
var onTest = len(os.Args) > 1 && strings.HasSuffix(os.Args[0], ".test")
|
||||
if onTest {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
panic("Unknwon directory: " + err.Error())
|
||||
}
|
||||
testdata_dir = cwd + "/testdata"
|
||||
fmt.Printf("Using test data in %s ...\n", testdata_dir)
|
||||
}
|
||||
|
||||
// we preset all so that it is safe to use these vals even if the rest of
|
||||
// init fails
|
||||
for i := 0; i < int(PM_COUNT); i++ {
|
||||
pm_val[i] = 0
|
||||
}
|
||||
for i := 0; i < int(FD_COUNT); i++ {
|
||||
pm_fd[i] = -1
|
||||
}
|
||||
pid := os.Getpid()
|
||||
if onTest {
|
||||
fd_path = testdata_dir + "/fd"
|
||||
} else {
|
||||
fd_path = fmt.Sprintf("/proc/%d/fd", pid)
|
||||
}
|
||||
|
||||
// NOTE: We do NOT close these FDs intentionally to avoid the open/close
|
||||
// overhead for each update.
|
||||
var path string
|
||||
if onTest {
|
||||
path = fmt.Sprintf(testdata_dir + "/solaris.ps_status")
|
||||
} else {
|
||||
path = fmt.Sprintf("/proc/%d/status", pid)
|
||||
}
|
||||
f, err := os.OpenFile(path, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
log.Printf("ERROR: metrics: Unable to open %s (%v).", path, err)
|
||||
} else {
|
||||
pm_file[FD_STAT] = f
|
||||
pm_fd[FD_STAT] = int(f.Fd())
|
||||
}
|
||||
if onTest {
|
||||
path = fmt.Sprintf(testdata_dir + "/solaris.ps_info")
|
||||
} else {
|
||||
path = fmt.Sprintf("/proc/%d/psinfo", pid)
|
||||
}
|
||||
f, err = os.OpenFile(path, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
log.Printf("ERROR: metrics: Unable to open %s (%v).", path, err)
|
||||
} else {
|
||||
pm_file[FD_PSINFO] = f
|
||||
pm_fd[FD_PSINFO] = int(f.Fd())
|
||||
}
|
||||
if onTest {
|
||||
path = fmt.Sprintf(testdata_dir + "/solaris.ps_usage")
|
||||
} else {
|
||||
path = fmt.Sprintf("/proc/%d/usage", pid)
|
||||
}
|
||||
f, err = os.OpenFile(path, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
log.Printf("ERROR: metrics: Unable to open %s (%v).", path, err)
|
||||
} else {
|
||||
pm_file[FD_USAGE] = f
|
||||
pm_fd[FD_USAGE] = int(f.Fd())
|
||||
}
|
||||
|
||||
/* usually an app does|cannot not change its own FD limits. So we handle
|
||||
it as a const - determine it once, only */
|
||||
var lim syscall.Rlimit
|
||||
err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &lim)
|
||||
if err == nil {
|
||||
pm_val[PM_MAX_FDS] = float64(lim.Cur)
|
||||
} else {
|
||||
log.Printf("ERROR: metrics: Unable determin max. fd limit.")
|
||||
pm_val[PM_MAX_FDS] = -1
|
||||
}
|
||||
}
|
||||
|
||||
var nan = math.NaN()
|
||||
|
||||
func time2float(t timestruc_t) float64 {
|
||||
return float64(t.tv_sec) + float64(t.tv_nsec)*1e-9
|
||||
}
|
||||
func time2float2(a timestruc_t, b timestruc_t) float64 {
|
||||
return float64(a.tv_sec+b.tv_sec) + float64(a.tv_nsec+b.tv_nsec)*1e-9
|
||||
}
|
||||
|
||||
func updateProcMetrics() {
|
||||
var status pstatus_t
|
||||
var psinfo psinfo_t
|
||||
var usage prusage_t
|
||||
|
||||
var fail = pm_fd[FD_STAT] < 0
|
||||
if !fail {
|
||||
n, err := syscall.Pread(pm_fd[FD_STAT],
|
||||
(*(*[unsafe.Sizeof(status)]byte)(unsafe.Pointer(&status)))[:], 0)
|
||||
fail = (n < 324 || err != nil)
|
||||
if fail {
|
||||
fmt.Printf("WARNING: read %s@%d failed: %v\n",
|
||||
pm_file[FD_STAT].Name(), n, err)
|
||||
}
|
||||
}
|
||||
if fail {
|
||||
pm_val[PM_NUM_THREADS] = nan
|
||||
pm_val[PM_UTIME] = nan
|
||||
pm_val[PM_STIME] = nan
|
||||
pm_val[PM_TIME] = nan
|
||||
pm_val[PM_CUTIME] = nan
|
||||
pm_val[PM_CSTIME] = nan
|
||||
pm_val[PM_CTIME] = nan
|
||||
} else {
|
||||
pm_val[PM_NUM_THREADS] = float64(status.pr_nlwp + status.pr_nzomb)
|
||||
pm_val[PM_UTIME] = time2float(status.pr_utime)
|
||||
pm_val[PM_STIME] = time2float(status.pr_stime)
|
||||
pm_val[PM_TIME] = time2float2(status.pr_utime, status.pr_stime)
|
||||
pm_val[PM_CUTIME] = time2float(status.pr_cutime)
|
||||
pm_val[PM_CSTIME] = time2float(status.pr_cstime)
|
||||
pm_val[PM_CTIME] = time2float2(status.pr_cutime, status.pr_cstime)
|
||||
}
|
||||
fail = pm_fd[FD_PSINFO] < 0
|
||||
if !fail {
|
||||
n, err := syscall.Pread(pm_fd[FD_PSINFO],
|
||||
(*(*[unsafe.Sizeof(psinfo)]byte)(unsafe.Pointer(&psinfo)))[:], 0)
|
||||
fail = (n < 272 || err != nil)
|
||||
if fail {
|
||||
fmt.Printf("WARNING: read %s@%d failed: %v\n",
|
||||
pm_file[FD_PSINFO].Name(), n, err)
|
||||
}
|
||||
}
|
||||
if fail {
|
||||
pm_val[PM_VSIZE] = nan
|
||||
pm_val[PM_RSS] = nan
|
||||
pm_val[PM_CPU_UTIL] = nan
|
||||
pm_val[PM_MEM_UTIL] = nan
|
||||
pm_val[PM_STARTTIME] = nan
|
||||
} else {
|
||||
//num_threads = psinfo.pr_nlwp + psinfo.pr_nzomb // already by status
|
||||
pm_val[PM_VSIZE] = float64(psinfo.pr_size << 10)
|
||||
pm_val[PM_RSS] = float64(psinfo.pr_rssize << 10)
|
||||
pm_val[PM_CPU_UTIL] = 100 * float64(psinfo.pr_pctcpu) / float64(0x8000)
|
||||
pm_val[PM_MEM_UTIL] = 100 * float64(psinfo.pr_pctmem) / float64(0x8000)
|
||||
pm_val[PM_STARTTIME] = float64(psinfo.pr_start.tv_sec)
|
||||
}
|
||||
fail = pm_fd[FD_USAGE] < 0
|
||||
if !fail {
|
||||
n, err := syscall.Pread(pm_fd[FD_USAGE],
|
||||
(*(*[unsafe.Sizeof(usage)]byte)(unsafe.Pointer(&usage)))[:], 0)
|
||||
fail = (n < 424 || err != nil)
|
||||
if fail {
|
||||
fmt.Printf("WARNING: read %s@%d failed: %v\n",
|
||||
pm_file[FD_USAGE].Name(), n, err)
|
||||
}
|
||||
}
|
||||
if fail {
|
||||
pm_val[PM_MINFLT] = nan
|
||||
pm_val[PM_MAJFLT] = nan
|
||||
pm_val[PM_VCTX] = nan
|
||||
pm_val[PM_ICTX] = nan
|
||||
} else {
|
||||
pm_val[PM_MINFLT] = float64(usage.pr_minf)
|
||||
pm_val[PM_MAJFLT] = float64(usage.pr_majf)
|
||||
pm_val[PM_VCTX] = float64(usage.pr_vctx)
|
||||
pm_val[PM_ICTX] = float64(usage.pr_ictx)
|
||||
}
|
||||
}
|
||||
|
||||
func updateFdMetrics() {
|
||||
pm_val[PM_OPEN_FDS] = 0
|
||||
f, err := os.Open(fd_path)
|
||||
if err != nil {
|
||||
log.Printf("ERROR: metrics: Unable to open %s", fd_path)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
for {
|
||||
names, err := f.Readdirnames(512)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("ERROR: metrics: Read error for %s: %s", fd_path, err)
|
||||
return
|
||||
}
|
||||
pm_val[PM_OPEN_FDS] += float64(len(names))
|
||||
}
|
||||
}
|
||||
|
||||
func writeProcessMetrics(w io.Writer) {
|
||||
updateProcMetrics()
|
||||
if isMetadataEnabled() {
|
||||
for _, v := range activeProcMetrics {
|
||||
fmt.Fprintf(w, "# HELP %s %s\n# TYPE %s %s\n%s %.17g\n",
|
||||
pm_desc[v].name, pm_desc[v].help,
|
||||
pm_desc[v].name, pm_desc[v].mtype,
|
||||
pm_desc[v].name, pm_val[v])
|
||||
}
|
||||
} else {
|
||||
for _, v := range activeProcMetrics {
|
||||
fmt.Fprintf(w, "%s %.17g\n", pm_desc[v].name, pm_val[v])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func writeFDMetrics(w io.Writer) {
|
||||
updateFdMetrics()
|
||||
if isMetadataEnabled() {
|
||||
for _, v := range activeFdMetrics {
|
||||
fmt.Fprintf(w, "# HELP %s %s\n# TYPE %s %s\n%s %.17g\n",
|
||||
pm_desc[v].name, pm_desc[v].help,
|
||||
pm_desc[v].name, pm_desc[v].mtype,
|
||||
pm_desc[v].name, pm_val[v])
|
||||
}
|
||||
} else {
|
||||
for _, v := range activeFdMetrics {
|
||||
fmt.Fprintf(w, "%s %.17g\n", pm_desc[v].name, pm_val[v])
|
||||
}
|
||||
}
|
||||
}
|
||||
83
vendor/github.com/VictoriaMetrics/metrics/process_metrics_windows.go
generated
vendored
Normal file
83
vendor/github.com/VictoriaMetrics/metrics/process_metrics_windows.go
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
//go:build windows
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var (
|
||||
modpsapi = syscall.NewLazyDLL("psapi.dll")
|
||||
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/psapi/nf-psapi-getprocessmemoryinfo
|
||||
procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo")
|
||||
procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount")
|
||||
)
|
||||
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/psapi/ns-psapi-process_memory_counters_ex
|
||||
type processMemoryCounters struct {
|
||||
_ uint32
|
||||
PageFaultCount uint32
|
||||
PeakWorkingSetSize uintptr
|
||||
WorkingSetSize uintptr
|
||||
QuotaPeakPagedPoolUsage uintptr
|
||||
QuotaPagedPoolUsage uintptr
|
||||
QuotaPeakNonPagedPoolUsage uintptr
|
||||
QuotaNonPagedPoolUsage uintptr
|
||||
PagefileUsage uintptr
|
||||
PeakPagefileUsage uintptr
|
||||
PrivateUsage uintptr
|
||||
}
|
||||
|
||||
func writeProcessMetrics(w io.Writer) {
|
||||
h := windows.CurrentProcess()
|
||||
var startTime, exitTime, stime, utime windows.Filetime
|
||||
err := windows.GetProcessTimes(h, &startTime, &exitTime, &stime, &utime)
|
||||
if err != nil {
|
||||
log.Printf("ERROR: metrics: cannot read process times: %s", err)
|
||||
return
|
||||
}
|
||||
var mc processMemoryCounters
|
||||
r1, _, err := procGetProcessMemoryInfo.Call(
|
||||
uintptr(h),
|
||||
uintptr(unsafe.Pointer(&mc)),
|
||||
unsafe.Sizeof(mc),
|
||||
)
|
||||
if r1 != 1 {
|
||||
log.Printf("ERROR: metrics: cannot read process memory information: %s", err)
|
||||
return
|
||||
}
|
||||
stimeSeconds := float64(uint64(stime.HighDateTime)<<32+uint64(stime.LowDateTime)) / 1e7
|
||||
utimeSeconds := float64(uint64(utime.HighDateTime)<<32+uint64(utime.LowDateTime)) / 1e7
|
||||
WriteCounterFloat64(w, "process_cpu_seconds_system_total", stimeSeconds)
|
||||
WriteCounterFloat64(w, "process_cpu_seconds_total", stimeSeconds+utimeSeconds)
|
||||
WriteCounterFloat64(w, "process_cpu_seconds_user_total", stimeSeconds)
|
||||
WriteCounterUint64(w, "process_pagefaults_total", uint64(mc.PageFaultCount))
|
||||
WriteGaugeUint64(w, "process_start_time_seconds", uint64(startTime.Nanoseconds())/1e9)
|
||||
WriteGaugeUint64(w, "process_virtual_memory_bytes", uint64(mc.PrivateUsage))
|
||||
WriteGaugeUint64(w, "process_resident_memory_peak_bytes", uint64(mc.PeakWorkingSetSize))
|
||||
WriteGaugeUint64(w, "process_resident_memory_bytes", uint64(mc.WorkingSetSize))
|
||||
}
|
||||
|
||||
func writeFDMetrics(w io.Writer) {
|
||||
h := windows.CurrentProcess()
|
||||
var count uint32
|
||||
r1, _, err := procGetProcessHandleCount.Call(
|
||||
uintptr(h),
|
||||
uintptr(unsafe.Pointer(&count)),
|
||||
)
|
||||
if r1 != 1 {
|
||||
log.Printf("ERROR: metrics: cannot determine open file descriptors count: %s", err)
|
||||
return
|
||||
}
|
||||
// it seems to be hard-coded limit for 64-bit systems
|
||||
// https://learn.microsoft.com/en-us/archive/blogs/markrussinovich/pushing-the-limits-of-windows-handles#maximum-number-of-handles
|
||||
WriteGaugeUint64(w, "process_max_fds", 16777216)
|
||||
WriteGaugeUint64(w, "process_open_fds", uint64(count))
|
||||
}
|
||||
273
vendor/github.com/VictoriaMetrics/metrics/prometheus_histogram.go
generated
vendored
Normal file
273
vendor/github.com/VictoriaMetrics/metrics/prometheus_histogram.go
generated
vendored
Normal file
@@ -0,0 +1,273 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PrometheusHistogramDefaultBuckets is a list of the default bucket upper
|
||||
// bounds. Those default buckets are quite generic, and it is recommended to
|
||||
// pick custom buckets for improved accuracy.
|
||||
var PrometheusHistogramDefaultBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
|
||||
|
||||
// PrometheusHistogram is a histogram for non-negative values with pre-defined buckets
|
||||
//
|
||||
// Each bucket contains a counter for values in the given range.
|
||||
// Each bucket is exposed via the following metric:
|
||||
//
|
||||
// <metric_name>_bucket{<optional_tags>,le="upper_bound"} <counter>
|
||||
//
|
||||
// Where:
|
||||
//
|
||||
// - <metric_name> is the metric name passed to NewPrometheusHistogram
|
||||
// - <optional_tags> is optional tags for the <metric_name>, which are passed to NewPrometheusHistogram
|
||||
// - <upper_bound> - upper bound of the current bucket. all samples <= upper_bound are in that bucket
|
||||
// - <counter> - the number of hits to the given bucket during Update* calls
|
||||
//
|
||||
// Next to the bucket metrics, two additional metrics track the total number of
|
||||
// samples (_count) and the total sum (_sum) of all samples:
|
||||
//
|
||||
// - <metric_name>_sum{<optional_tags>} <counter>
|
||||
// - <metric_name>_count{<optional_tags>} <counter>
|
||||
type PrometheusHistogram struct {
|
||||
// mu guarantees synchronous update for all the counters.
|
||||
//
|
||||
// Do not use sync.RWMutex, since it has zero sense from performance PoV.
|
||||
// It only complicates the code.
|
||||
mu sync.Mutex
|
||||
|
||||
// upperBounds and buckets are aligned by element position:
|
||||
// upperBounds[i] defines the upper bound for buckets[i].
|
||||
// buckets[i] contains the count of elements <= upperBounds[i]
|
||||
upperBounds []float64
|
||||
buckets []uint64
|
||||
|
||||
// count is the counter for all observations on this histogram
|
||||
count uint64
|
||||
|
||||
// sum is the sum of all the values put into Histogram
|
||||
sum float64
|
||||
}
|
||||
|
||||
// Reset resets previous observations in h.
|
||||
func (h *PrometheusHistogram) Reset() {
|
||||
h.mu.Lock()
|
||||
for i := range h.buckets {
|
||||
h.buckets[i] = 0
|
||||
}
|
||||
h.sum = 0
|
||||
h.count = 0
|
||||
h.mu.Unlock()
|
||||
}
|
||||
|
||||
// Update updates h with v.
|
||||
//
|
||||
// Negative values and NaNs are ignored.
|
||||
func (h *PrometheusHistogram) Update(v float64) {
|
||||
if math.IsNaN(v) || v < 0 {
|
||||
// Skip NaNs and negative values.
|
||||
return
|
||||
}
|
||||
bucketIdx := -1
|
||||
for i, ub := range h.upperBounds {
|
||||
if v <= ub {
|
||||
bucketIdx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
h.mu.Lock()
|
||||
h.sum += v
|
||||
h.count++
|
||||
if bucketIdx == -1 {
|
||||
// +Inf, nothing to do, already accounted for in the total sum
|
||||
h.mu.Unlock()
|
||||
return
|
||||
}
|
||||
h.buckets[bucketIdx]++
|
||||
h.mu.Unlock()
|
||||
}
|
||||
|
||||
// UpdateDuration updates request duration based on the given startTime.
|
||||
func (h *PrometheusHistogram) UpdateDuration(startTime time.Time) {
|
||||
d := time.Since(startTime).Seconds()
|
||||
h.Update(d)
|
||||
}
|
||||
|
||||
// NewPrometheusHistogram creates and returns new PrometheusHistogram with the given name
|
||||
// and PrometheusHistogramDefaultBuckets.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned histogram is safe to use from concurrent goroutines.
|
||||
func NewPrometheusHistogram(name string) *PrometheusHistogram {
|
||||
return defaultSet.NewPrometheusHistogram(name)
|
||||
}
|
||||
|
||||
// NewPrometheusHistogramExt creates and returns new PrometheusHistogram with the given name
|
||||
// and given upperBounds.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned histogram is safe to use from concurrent goroutines.
|
||||
func NewPrometheusHistogramExt(name string, upperBounds []float64) *PrometheusHistogram {
|
||||
return defaultSet.NewPrometheusHistogramExt(name, upperBounds)
|
||||
}
|
||||
|
||||
// GetOrCreatePrometheusHistogram returns registered PrometheusHistogram with the given name
|
||||
// or creates a new PrometheusHistogram if the registry doesn't contain histogram with
|
||||
// the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned histogram is safe to use from concurrent goroutines.
|
||||
//
|
||||
// Performance tip: prefer NewPrometheusHistogram instead of GetOrCreatePrometheusHistogram.
|
||||
func GetOrCreatePrometheusHistogram(name string) *PrometheusHistogram {
|
||||
return defaultSet.GetOrCreatePrometheusHistogram(name)
|
||||
}
|
||||
|
||||
// GetOrCreatePrometheusHistogramExt returns registered PrometheusHistogram with the given name and
|
||||
// upperBounds or creates new PrometheusHistogram if the registry doesn't contain histogram
|
||||
// with the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned histogram is safe to use from concurrent goroutines.
|
||||
//
|
||||
// Performance tip: prefer NewPrometheusHistogramExt instead of GetOrCreatePrometheusHistogramExt.
|
||||
func GetOrCreatePrometheusHistogramExt(name string, upperBounds []float64) *PrometheusHistogram {
|
||||
return defaultSet.GetOrCreatePrometheusHistogramExt(name, upperBounds)
|
||||
}
|
||||
|
||||
func newPrometheusHistogram(upperBounds []float64) *PrometheusHistogram {
|
||||
mustValidateBuckets(upperBounds)
|
||||
last := len(upperBounds) - 1
|
||||
if math.IsInf(upperBounds[last], +1) {
|
||||
upperBounds = upperBounds[:last] // ignore +Inf bucket as it is covered anyways
|
||||
}
|
||||
h := PrometheusHistogram{
|
||||
upperBounds: upperBounds,
|
||||
buckets: make([]uint64, len(upperBounds)),
|
||||
}
|
||||
|
||||
return &h
|
||||
}
|
||||
|
||||
func mustValidateBuckets(upperBounds []float64) {
|
||||
if err := ValidateBuckets(upperBounds); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateBuckets validates the given upperBounds and returns an error
|
||||
// if validation failed.
|
||||
func ValidateBuckets(upperBounds []float64) error {
|
||||
if len(upperBounds) == 0 {
|
||||
return fmt.Errorf("upperBounds can't be empty")
|
||||
}
|
||||
for i := 0; i < len(upperBounds)-1; i++ {
|
||||
if upperBounds[i] >= upperBounds[i+1] {
|
||||
return fmt.Errorf("upper bounds for the buckets must be strictly increasing")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LinearBuckets returns a list of upperBounds for PrometheusHistogram,
|
||||
// and whose distribution is as follows:
|
||||
//
|
||||
// [start, start + width, start + 2 * width, ... start + (count-1) * width]
|
||||
//
|
||||
// Panics if given start, width and count produce negative buckets or none buckets at all.
|
||||
func LinearBuckets(start, width float64, count int) []float64 {
|
||||
if count < 1 {
|
||||
panic("LinearBuckets: count can't be less than 1")
|
||||
}
|
||||
upperBounds := make([]float64, count)
|
||||
for i := range upperBounds {
|
||||
upperBounds[i] = start
|
||||
start += width
|
||||
}
|
||||
mustValidateBuckets(upperBounds)
|
||||
return upperBounds
|
||||
}
|
||||
|
||||
// ExponentialBuckets returns a list of upperBounds for PrometheusHistogram,
|
||||
// and whose distribution is as follows:
|
||||
//
|
||||
// [start, start * factor pow 1, start * factor pow 2, ... start * factor pow (count-1)]
|
||||
//
|
||||
// Panics if given start, width and count produce negative buckets or none buckets at all.
|
||||
func ExponentialBuckets(start, factor float64, count int) []float64 {
|
||||
if count < 1 {
|
||||
panic("ExponentialBuckets: count can't be less than 1")
|
||||
}
|
||||
if factor <= 1 {
|
||||
panic("ExponentialBuckets: factor must be greater than 1")
|
||||
}
|
||||
if start <= 0 {
|
||||
panic("ExponentialBuckets: start can't be less than 0")
|
||||
}
|
||||
upperBounds := make([]float64, count)
|
||||
for i := range upperBounds {
|
||||
upperBounds[i] = start
|
||||
start *= factor
|
||||
}
|
||||
mustValidateBuckets(upperBounds)
|
||||
return upperBounds
|
||||
}
|
||||
|
||||
func (h *PrometheusHistogram) marshalTo(prefix string, w io.Writer) {
|
||||
cumulativeSum := uint64(0)
|
||||
h.mu.Lock()
|
||||
count := h.count
|
||||
sum := h.sum
|
||||
for i, ub := range h.upperBounds {
|
||||
cumulativeSum += h.buckets[i]
|
||||
tag := fmt.Sprintf(`le="%v"`, ub)
|
||||
metricName := addTag(prefix, tag)
|
||||
name, labels := splitMetricName(metricName)
|
||||
fmt.Fprintf(w, "%s_bucket%s %d\n", name, labels, cumulativeSum)
|
||||
}
|
||||
h.mu.Unlock()
|
||||
|
||||
tag := fmt.Sprintf("le=%q", "+Inf")
|
||||
metricName := addTag(prefix, tag)
|
||||
name, labels := splitMetricName(metricName)
|
||||
fmt.Fprintf(w, "%s_bucket%s %d\n", name, labels, count)
|
||||
|
||||
name, labels = splitMetricName(prefix)
|
||||
if float64(int64(sum)) == sum {
|
||||
fmt.Fprintf(w, "%s_sum%s %d\n", name, labels, int64(sum))
|
||||
} else {
|
||||
fmt.Fprintf(w, "%s_sum%s %g\n", name, labels, sum)
|
||||
}
|
||||
fmt.Fprintf(w, "%s_count%s %d\n", name, labels, count)
|
||||
}
|
||||
|
||||
func (h *PrometheusHistogram) metricType() string {
|
||||
return "histogram"
|
||||
}
|
||||
510
vendor/github.com/VictoriaMetrics/metrics/push.go
generated
vendored
Normal file
510
vendor/github.com/VictoriaMetrics/metrics/push.go
generated
vendored
Normal file
@@ -0,0 +1,510 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"compress/gzip"
|
||||
)
|
||||
|
||||
// PushOptions is the list of options, which may be applied to InitPushWithOptions().
|
||||
type PushOptions struct {
|
||||
// ExtraLabels is an optional comma-separated list of `label="value"` labels, which must be added to all the metrics before pushing them to pushURL.
|
||||
ExtraLabels string
|
||||
|
||||
// Headers is an optional list of HTTP headers to add to every push request to pushURL.
|
||||
//
|
||||
// Every item in the list must have the form `Header: value`. For example, `Authorization: Custom my-top-secret`.
|
||||
Headers []string
|
||||
|
||||
// Whether to disable HTTP request body compression before sending the metrics to pushURL.
|
||||
//
|
||||
// By default the compression is enabled.
|
||||
DisableCompression bool
|
||||
|
||||
// Method is HTTP request method to use when pushing metrics to pushURL.
|
||||
//
|
||||
// By default the Method is GET.
|
||||
Method string
|
||||
|
||||
// Optional WaitGroup for waiting until all the push workers created with this WaitGroup are stopped.
|
||||
WaitGroup *sync.WaitGroup
|
||||
}
|
||||
|
||||
// InitPushWithOptions sets up periodic push for globally registered metrics to the given pushURL with the given interval.
|
||||
//
|
||||
// The periodic push is stopped when ctx is canceled.
|
||||
// It is possible to wait until the background metrics push worker is stopped on a WaitGroup passed via opts.WaitGroup.
|
||||
//
|
||||
// If pushProcessMetrics is set to true, then 'process_*' and `go_*` metrics are also pushed to pushURL.
|
||||
//
|
||||
// opts may contain additional configuration options if non-nil.
|
||||
//
|
||||
// The metrics are pushed to pushURL in Prometheus text exposition format.
|
||||
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
||||
//
|
||||
// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
|
||||
// https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-prometheus-exposition-format
|
||||
//
|
||||
// It is OK calling InitPushWithOptions multiple times with different pushURL -
|
||||
// in this case metrics are pushed to all the provided pushURL urls.
|
||||
func InitPushWithOptions(ctx context.Context, pushURL string, interval time.Duration, pushProcessMetrics bool, opts *PushOptions) error {
|
||||
writeMetrics := func(w io.Writer) {
|
||||
WritePrometheus(w, pushProcessMetrics)
|
||||
}
|
||||
return InitPushExtWithOptions(ctx, pushURL, interval, writeMetrics, opts)
|
||||
}
|
||||
|
||||
// InitPushProcessMetrics sets up periodic push for 'process_*' metrics to the given pushURL with the given interval.
|
||||
//
|
||||
// extraLabels may contain comma-separated list of `label="value"` labels, which will be added
|
||||
// to all the metrics before pushing them to pushURL.
|
||||
//
|
||||
// The metrics are pushed to pushURL in Prometheus text exposition format.
|
||||
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
||||
//
|
||||
// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
|
||||
// https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-prometheus-exposition-format
|
||||
//
|
||||
// It is OK calling InitPushProcessMetrics multiple times with different pushURL -
|
||||
// in this case metrics are pushed to all the provided pushURL urls.
|
||||
func InitPushProcessMetrics(pushURL string, interval time.Duration, extraLabels string) error {
|
||||
return InitPushExt(pushURL, interval, extraLabels, WriteProcessMetrics)
|
||||
}
|
||||
|
||||
// InitPush sets up periodic push for globally registered metrics to the given pushURL with the given interval.
|
||||
//
|
||||
// extraLabels may contain comma-separated list of `label="value"` labels, which will be added
|
||||
// to all the metrics before pushing them to pushURL.
|
||||
//
|
||||
// If pushProcessMetrics is set to true, then 'process_*' and `go_*` metrics are also pushed to pushURL.
|
||||
//
|
||||
// The metrics are pushed to pushURL in Prometheus text exposition format.
|
||||
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
||||
//
|
||||
// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
|
||||
// https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-prometheus-exposition-format
|
||||
//
|
||||
// It is OK calling InitPush multiple times with different pushURL -
|
||||
// in this case metrics are pushed to all the provided pushURL urls.
|
||||
func InitPush(pushURL string, interval time.Duration, extraLabels string, pushProcessMetrics bool) error {
|
||||
writeMetrics := func(w io.Writer) {
|
||||
WritePrometheus(w, pushProcessMetrics)
|
||||
}
|
||||
return InitPushExt(pushURL, interval, extraLabels, writeMetrics)
|
||||
}
|
||||
|
||||
// PushMetrics pushes globally registered metrics to pushURL.
|
||||
//
|
||||
// If pushProcessMetrics is set to true, then 'process_*' and `go_*` metrics are also pushed to pushURL.
|
||||
//
|
||||
// opts may contain additional configuration options if non-nil.
|
||||
//
|
||||
// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
|
||||
// https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-prometheus-exposition-format
|
||||
func PushMetrics(ctx context.Context, pushURL string, pushProcessMetrics bool, opts *PushOptions) error {
|
||||
writeMetrics := func(w io.Writer) {
|
||||
WritePrometheus(w, pushProcessMetrics)
|
||||
}
|
||||
return PushMetricsExt(ctx, pushURL, writeMetrics, opts)
|
||||
}
|
||||
|
||||
// InitPushWithOptions sets up periodic push for metrics from s to the given pushURL with the given interval.
|
||||
//
|
||||
// The periodic push is stopped when the ctx is canceled.
|
||||
// It is possible to wait until the background metrics push worker is stopped on a WaitGroup passed via opts.WaitGroup.
|
||||
//
|
||||
// opts may contain additional configuration options if non-nil.
|
||||
//
|
||||
// The metrics are pushed to pushURL in Prometheus text exposition format.
|
||||
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
||||
//
|
||||
// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
|
||||
// https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-prometheus-exposition-format
|
||||
//
|
||||
// It is OK calling InitPushWithOptions multiple times with different pushURL -
|
||||
// in this case metrics are pushed to all the provided pushURL urls.
|
||||
func (s *Set) InitPushWithOptions(ctx context.Context, pushURL string, interval time.Duration, opts *PushOptions) error {
|
||||
return InitPushExtWithOptions(ctx, pushURL, interval, s.WritePrometheus, opts)
|
||||
}
|
||||
|
||||
// InitPush sets up periodic push for metrics from s to the given pushURL with the given interval.
|
||||
//
|
||||
// extraLabels may contain comma-separated list of `label="value"` labels, which will be added
|
||||
// to all the metrics before pushing them to pushURL.
|
||||
//
|
||||
// The metrics are pushed to pushURL in Prometheus text exposition format.
|
||||
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
||||
//
|
||||
// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
|
||||
// https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-prometheus-exposition-format
|
||||
//
|
||||
// It is OK calling InitPush multiple times with different pushURL -
|
||||
// in this case metrics are pushed to all the provided pushURL urls.
|
||||
func (s *Set) InitPush(pushURL string, interval time.Duration, extraLabels string) error {
|
||||
return InitPushExt(pushURL, interval, extraLabels, s.WritePrometheus)
|
||||
}
|
||||
|
||||
// PushMetrics pushes s metrics to pushURL.
|
||||
//
|
||||
// opts may contain additional configuration options if non-nil.
|
||||
//
|
||||
// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
|
||||
// https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-prometheus-exposition-format
|
||||
func (s *Set) PushMetrics(ctx context.Context, pushURL string, opts *PushOptions) error {
|
||||
return PushMetricsExt(ctx, pushURL, s.WritePrometheus, opts)
|
||||
}
|
||||
|
||||
// InitPushExt sets up periodic push for metrics obtained by calling writeMetrics with the given interval.
|
||||
//
|
||||
// extraLabels may contain comma-separated list of `label="value"` labels, which will be added
|
||||
// to all the metrics before pushing them to pushURL.
|
||||
//
|
||||
// The writeMetrics callback must write metrics to w in Prometheus text exposition format without timestamps and trailing comments.
|
||||
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
||||
//
|
||||
// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
|
||||
// https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-prometheus-exposition-format
|
||||
//
|
||||
// It is OK calling InitPushExt multiple times with different pushURL -
|
||||
// in this case metrics are pushed to all the provided pushURL urls.
|
||||
//
|
||||
// It is OK calling InitPushExt multiple times with different writeMetrics -
|
||||
// in this case all the metrics generated by writeMetrics callbacks are written to pushURL.
|
||||
func InitPushExt(pushURL string, interval time.Duration, extraLabels string, writeMetrics func(w io.Writer)) error {
|
||||
opts := &PushOptions{
|
||||
ExtraLabels: extraLabels,
|
||||
}
|
||||
return InitPushExtWithOptions(context.Background(), pushURL, interval, writeMetrics, opts)
|
||||
}
|
||||
|
||||
// InitPushExtWithOptions sets up periodic push for metrics obtained by calling writeMetrics with the given interval.
|
||||
//
|
||||
// The writeMetrics callback must write metrics to w in Prometheus text exposition format without timestamps and trailing comments.
|
||||
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
||||
//
|
||||
// The periodic push is stopped when the ctx is canceled.
|
||||
// It is possible to wait until the background metrics push worker is stopped on a WaitGroup passed via opts.WaitGroup.
|
||||
//
|
||||
// opts may contain additional configuration options if non-nil.
|
||||
//
|
||||
// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
|
||||
// https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-prometheus-exposition-format
|
||||
//
|
||||
// It is OK calling InitPushExtWithOptions multiple times with different pushURL -
|
||||
// in this case metrics are pushed to all the provided pushURL urls.
|
||||
//
|
||||
// It is OK calling InitPushExtWithOptions multiple times with different writeMetrics -
|
||||
// in this case all the metrics generated by writeMetrics callbacks are written to pushURL.
|
||||
func InitPushExtWithOptions(ctx context.Context, pushURL string, interval time.Duration, writeMetrics func(w io.Writer), opts *PushOptions) error {
|
||||
pc, err := newPushContext(pushURL, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// validate interval
|
||||
if interval <= 0 {
|
||||
return fmt.Errorf("interval must be positive; got %s", interval)
|
||||
}
|
||||
pushMetricsSet.GetOrCreateFloatCounter(fmt.Sprintf(`metrics_push_interval_seconds{url=%q}`, pc.pushURLRedacted)).Set(interval.Seconds())
|
||||
|
||||
var wg *sync.WaitGroup
|
||||
if opts != nil {
|
||||
wg = opts.WaitGroup
|
||||
if wg != nil {
|
||||
wg.Add(1)
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
stopCh := ctx.Done()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
ctxLocal, cancel := context.WithTimeout(ctx, interval+time.Second)
|
||||
err := pc.pushMetrics(ctxLocal, writeMetrics)
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Printf("ERROR: metrics.push: %s", err)
|
||||
}
|
||||
case <-stopCh:
|
||||
if wg != nil {
|
||||
wg.Done()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PushMetricsExt pushes metrics generated by wirteMetrics to pushURL.
|
||||
//
|
||||
// The writeMetrics callback must write metrics to w in Prometheus text exposition format without timestamps and trailing comments.
|
||||
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
||||
//
|
||||
// opts may contain additional configuration options if non-nil.
|
||||
//
|
||||
// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
|
||||
// https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-prometheus-exposition-format
|
||||
func PushMetricsExt(ctx context.Context, pushURL string, writeMetrics func(w io.Writer), opts *PushOptions) error {
|
||||
pc, err := newPushContext(pushURL, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return pc.pushMetrics(ctx, writeMetrics)
|
||||
}
|
||||
|
||||
type pushContext struct {
|
||||
pushURL *url.URL
|
||||
method string
|
||||
pushURLRedacted string
|
||||
extraLabels string
|
||||
headers http.Header
|
||||
disableCompression bool
|
||||
|
||||
client *http.Client
|
||||
|
||||
pushesTotal *Counter
|
||||
bytesPushedTotal *Counter
|
||||
pushBlockSize *Histogram
|
||||
pushDuration *Histogram
|
||||
pushErrors *Counter
|
||||
}
|
||||
|
||||
func newPushContext(pushURL string, opts *PushOptions) (*pushContext, error) {
|
||||
if opts == nil {
|
||||
opts = &PushOptions{}
|
||||
}
|
||||
|
||||
// validate pushURL
|
||||
pu, err := url.Parse(pushURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse pushURL=%q: %w", pushURL, err)
|
||||
}
|
||||
if pu.Scheme != "http" && pu.Scheme != "https" {
|
||||
return nil, fmt.Errorf("unsupported scheme in pushURL=%q; expecting 'http' or 'https'", pushURL)
|
||||
}
|
||||
if pu.Host == "" {
|
||||
return nil, fmt.Errorf("missing host in pushURL=%q", pushURL)
|
||||
}
|
||||
|
||||
method := opts.Method
|
||||
if method == "" {
|
||||
method = http.MethodGet
|
||||
}
|
||||
|
||||
// validate ExtraLabels
|
||||
extraLabels := opts.ExtraLabels
|
||||
if err := validateTags(extraLabels); err != nil {
|
||||
return nil, fmt.Errorf("invalid extraLabels=%q: %w", extraLabels, err)
|
||||
}
|
||||
|
||||
// validate Headers
|
||||
headers := make(http.Header)
|
||||
for _, h := range opts.Headers {
|
||||
n := strings.IndexByte(h, ':')
|
||||
if n < 0 {
|
||||
return nil, fmt.Errorf("missing `:` delimiter in the header %q", h)
|
||||
}
|
||||
name := strings.TrimSpace(h[:n])
|
||||
value := strings.TrimSpace(h[n+1:])
|
||||
headers.Add(name, value)
|
||||
}
|
||||
|
||||
pushURLRedacted := pu.Redacted()
|
||||
client := &http.Client{}
|
||||
return &pushContext{
|
||||
pushURL: pu,
|
||||
method: method,
|
||||
pushURLRedacted: pushURLRedacted,
|
||||
extraLabels: extraLabels,
|
||||
headers: headers,
|
||||
disableCompression: opts.DisableCompression,
|
||||
|
||||
client: client,
|
||||
|
||||
pushesTotal: pushMetricsSet.GetOrCreateCounter(fmt.Sprintf(`metrics_push_total{url=%q}`, pushURLRedacted)),
|
||||
bytesPushedTotal: pushMetricsSet.GetOrCreateCounter(fmt.Sprintf(`metrics_push_bytes_pushed_total{url=%q}`, pushURLRedacted)),
|
||||
pushBlockSize: pushMetricsSet.GetOrCreateHistogram(fmt.Sprintf(`metrics_push_block_size_bytes{url=%q}`, pushURLRedacted)),
|
||||
pushDuration: pushMetricsSet.GetOrCreateHistogram(fmt.Sprintf(`metrics_push_duration_seconds{url=%q}`, pushURLRedacted)),
|
||||
pushErrors: pushMetricsSet.GetOrCreateCounter(fmt.Sprintf(`metrics_push_errors_total{url=%q}`, pushURLRedacted)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (pc *pushContext) pushMetrics(ctx context.Context, writeMetrics func(w io.Writer)) error {
|
||||
bb := getBytesBuffer()
|
||||
defer putBytesBuffer(bb)
|
||||
|
||||
writeMetrics(bb)
|
||||
|
||||
if len(pc.extraLabels) > 0 {
|
||||
bbTmp := getBytesBuffer()
|
||||
bbTmp.B = append(bbTmp.B[:0], bb.B...)
|
||||
bb.B = addExtraLabels(bb.B[:0], bbTmp.B, pc.extraLabels)
|
||||
putBytesBuffer(bbTmp)
|
||||
}
|
||||
if !pc.disableCompression {
|
||||
bbTmp := getBytesBuffer()
|
||||
bbTmp.B = append(bbTmp.B[:0], bb.B...)
|
||||
bb.B = bb.B[:0]
|
||||
zw := getGzipWriter(bb)
|
||||
if _, err := zw.Write(bbTmp.B); err != nil {
|
||||
panic(fmt.Errorf("BUG: cannot write %d bytes to gzip writer: %s", len(bbTmp.B), err))
|
||||
}
|
||||
if err := zw.Close(); err != nil {
|
||||
panic(fmt.Errorf("BUG: cannot flush metrics to gzip writer: %s", err))
|
||||
}
|
||||
putGzipWriter(zw)
|
||||
putBytesBuffer(bbTmp)
|
||||
}
|
||||
|
||||
// Update metrics
|
||||
pc.pushesTotal.Inc()
|
||||
blockLen := len(bb.B)
|
||||
pc.bytesPushedTotal.Add(blockLen)
|
||||
pc.pushBlockSize.Update(float64(blockLen))
|
||||
|
||||
// Prepare the request to sent to pc.pushURL
|
||||
reqBody := bytes.NewReader(bb.B)
|
||||
req, err := http.NewRequestWithContext(ctx, pc.method, pc.pushURL.String(), reqBody)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("BUG: metrics.push: cannot initialize request for metrics push to %q: %w", pc.pushURLRedacted, err))
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "text/plain")
|
||||
// Set the needed headers, and `Content-Type` allowed be overwrited.
|
||||
for name, values := range pc.headers {
|
||||
for _, value := range values {
|
||||
req.Header.Add(name, value)
|
||||
}
|
||||
}
|
||||
if !pc.disableCompression {
|
||||
req.Header.Set("Content-Encoding", "gzip")
|
||||
}
|
||||
|
||||
// Perform the request
|
||||
startTime := time.Now()
|
||||
resp, err := pc.client.Do(req)
|
||||
pc.pushDuration.UpdateDuration(startTime)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return nil
|
||||
}
|
||||
pc.pushErrors.Inc()
|
||||
return fmt.Errorf("cannot push metrics to %q: %s", pc.pushURLRedacted, err)
|
||||
}
|
||||
if resp.StatusCode/100 != 2 {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
pc.pushErrors.Inc()
|
||||
return fmt.Errorf("unexpected status code in response from %q: %d; expecting 2xx; response body: %q", pc.pushURLRedacted, resp.StatusCode, body)
|
||||
}
|
||||
_ = resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
var pushMetricsSet = NewSet()
|
||||
|
||||
func writePushMetrics(w io.Writer) {
|
||||
pushMetricsSet.WritePrometheus(w)
|
||||
}
|
||||
|
||||
func addExtraLabels(dst, src []byte, extraLabels string) []byte {
|
||||
for len(src) > 0 {
|
||||
var line []byte
|
||||
n := bytes.IndexByte(src, '\n')
|
||||
if n >= 0 {
|
||||
line = src[:n]
|
||||
src = src[n+1:]
|
||||
} else {
|
||||
line = src
|
||||
src = nil
|
||||
}
|
||||
line = bytes.TrimSpace(line)
|
||||
if len(line) == 0 {
|
||||
// Skip empy lines
|
||||
continue
|
||||
}
|
||||
if bytes.HasPrefix(line, bashBytes) {
|
||||
// Copy comments as is
|
||||
dst = append(dst, line...)
|
||||
dst = append(dst, '\n')
|
||||
continue
|
||||
}
|
||||
n = bytes.IndexByte(line, '{')
|
||||
if n >= 0 {
|
||||
dst = append(dst, line[:n+1]...)
|
||||
dst = append(dst, extraLabels...)
|
||||
dst = append(dst, ',')
|
||||
dst = append(dst, line[n+1:]...)
|
||||
} else {
|
||||
n = bytes.LastIndexByte(line, ' ')
|
||||
if n < 0 {
|
||||
panic(fmt.Errorf("BUG: missing whitespace between metric name and metric value in Prometheus text exposition line %q", line))
|
||||
}
|
||||
dst = append(dst, line[:n]...)
|
||||
dst = append(dst, '{')
|
||||
dst = append(dst, extraLabels...)
|
||||
dst = append(dst, '}')
|
||||
dst = append(dst, line[n:]...)
|
||||
}
|
||||
dst = append(dst, '\n')
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
var bashBytes = []byte("#")
|
||||
|
||||
func getBytesBuffer() *bytesBuffer {
|
||||
v := bytesBufferPool.Get()
|
||||
if v == nil {
|
||||
return &bytesBuffer{}
|
||||
}
|
||||
return v.(*bytesBuffer)
|
||||
}
|
||||
|
||||
func putBytesBuffer(bb *bytesBuffer) {
|
||||
bb.B = bb.B[:0]
|
||||
bytesBufferPool.Put(bb)
|
||||
}
|
||||
|
||||
var bytesBufferPool sync.Pool
|
||||
|
||||
type bytesBuffer struct {
|
||||
B []byte
|
||||
}
|
||||
|
||||
func (bb *bytesBuffer) Write(p []byte) (int, error) {
|
||||
bb.B = append(bb.B, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func getGzipWriter(w io.Writer) *gzip.Writer {
|
||||
v := gzipWriterPool.Get()
|
||||
if v == nil {
|
||||
return gzip.NewWriter(w)
|
||||
}
|
||||
zw := v.(*gzip.Writer)
|
||||
zw.Reset(w)
|
||||
return zw
|
||||
}
|
||||
|
||||
func putGzipWriter(zw *gzip.Writer) {
|
||||
zw.Reset(io.Discard)
|
||||
gzipWriterPool.Put(zw)
|
||||
}
|
||||
|
||||
var gzipWriterPool sync.Pool
|
||||
703
vendor/github.com/VictoriaMetrics/metrics/set.go
generated
vendored
Normal file
703
vendor/github.com/VictoriaMetrics/metrics/set.go
generated
vendored
Normal file
@@ -0,0 +1,703 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Set is a set of metrics.
|
||||
//
|
||||
// Metrics belonging to a set are exported separately from global metrics.
|
||||
//
|
||||
// Set.WritePrometheus must be called for exporting metrics from the set.
|
||||
type Set struct {
|
||||
mu sync.Mutex
|
||||
a []*namedMetric
|
||||
m map[string]*namedMetric
|
||||
summaries []*Summary
|
||||
|
||||
metricsWriters []func(w io.Writer)
|
||||
}
|
||||
|
||||
// NewSet creates new set of metrics.
|
||||
//
|
||||
// Pass the set to RegisterSet() function in order to export its metrics via global WritePrometheus() call.
|
||||
func NewSet() *Set {
|
||||
return &Set{
|
||||
m: make(map[string]*namedMetric),
|
||||
}
|
||||
}
|
||||
|
||||
// WritePrometheus writes all the metrics from s to w in Prometheus format.
|
||||
func (s *Set) WritePrometheus(w io.Writer) {
|
||||
// Collect all the metrics in in-memory buffer in order to prevent from long locking due to slow w.
|
||||
var bb bytes.Buffer
|
||||
lessFunc := func(i, j int) bool {
|
||||
// the sorting must be stable.
|
||||
// see edge cases why we can't simply do `s.a[i].name < s.a[j].name` here:
|
||||
// https://github.com/VictoriaMetrics/metrics/pull/99#issuecomment-3277072175
|
||||
|
||||
// sort by metric family name first, to group the same metric family in one place.
|
||||
fName1, fName2 := getMetricFamily(s.a[i].name), getMetricFamily(s.a[j].name)
|
||||
if fName1 != fName2 {
|
||||
return fName1 < fName2
|
||||
}
|
||||
|
||||
// Only summary and quantile(s) have different metric types.
|
||||
// Sorting by metric type will stabilize the order for summary and quantile(s).
|
||||
mType1 := s.a[i].metric.metricType()
|
||||
mType2 := s.a[j].metric.metricType()
|
||||
if mType1 != mType2 {
|
||||
return mType1 < mType2
|
||||
}
|
||||
|
||||
// lastly by metric names, which is for quantiles and histogram buckets.
|
||||
return s.a[i].name < s.a[j].name
|
||||
}
|
||||
s.mu.Lock()
|
||||
for _, sm := range s.summaries {
|
||||
sm.updateQuantiles()
|
||||
}
|
||||
if !sort.SliceIsSorted(s.a, lessFunc) {
|
||||
sort.Slice(s.a, lessFunc)
|
||||
}
|
||||
sa := append([]*namedMetric(nil), s.a...)
|
||||
metricsWriters := s.metricsWriters
|
||||
s.mu.Unlock()
|
||||
|
||||
// metricsWithMetadataBuf is used to hold marshalTo temporary, and decide whether metadata is needed.
|
||||
// it will be written to `bb` at the end and then reset for next *namedMetric in for-loop.
|
||||
var metricsWithMetadataBuf bytes.Buffer
|
||||
var prevMetricFamily string
|
||||
for _, nm := range sa {
|
||||
if !isMetadataEnabled() {
|
||||
// Call marshalTo without the global lock, since certain metric types such as Gauge
|
||||
// can call a callback, which, in turn, can try calling s.mu.Lock again.
|
||||
nm.metric.marshalTo(nm.name, &bb)
|
||||
continue
|
||||
}
|
||||
|
||||
metricsWithMetadataBuf.Reset()
|
||||
// Call marshalTo without the global lock, since certain metric types such as Gauge
|
||||
// can call a callback, which, in turn, can try calling s.mu.Lock again.
|
||||
nm.metric.marshalTo(nm.name, &metricsWithMetadataBuf)
|
||||
if metricsWithMetadataBuf.Len() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
metricFamily := getMetricFamily(nm.name)
|
||||
if metricFamily != prevMetricFamily {
|
||||
// write metadata only once per metric family
|
||||
metricType := nm.metric.metricType()
|
||||
writeMetadata(&bb, metricFamily, metricType)
|
||||
prevMetricFamily = metricFamily
|
||||
}
|
||||
bb.Write(metricsWithMetadataBuf.Bytes())
|
||||
}
|
||||
w.Write(bb.Bytes())
|
||||
|
||||
for _, writeMetrics := range metricsWriters {
|
||||
writeMetrics(w)
|
||||
}
|
||||
}
|
||||
|
||||
// NewHistogram creates and returns new histogram in s with the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned histogram is safe to use from concurrent goroutines.
|
||||
func (s *Set) NewHistogram(name string) *Histogram {
|
||||
h := &Histogram{}
|
||||
s.registerMetric(name, h)
|
||||
return h
|
||||
}
|
||||
|
||||
// GetOrCreateHistogram returns registered histogram in s with the given name
|
||||
// or creates new histogram if s doesn't contain histogram with the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned histogram is safe to use from concurrent goroutines.
|
||||
//
|
||||
// Performance tip: prefer NewHistogram instead of GetOrCreateHistogram.
|
||||
func (s *Set) GetOrCreateHistogram(name string) *Histogram {
|
||||
s.mu.Lock()
|
||||
nm := s.m[name]
|
||||
s.mu.Unlock()
|
||||
if nm == nil {
|
||||
// Slow path - create and register missing histogram.
|
||||
if err := ValidateMetric(name); err != nil {
|
||||
panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err))
|
||||
}
|
||||
nmNew := &namedMetric{
|
||||
name: name,
|
||||
metric: &Histogram{},
|
||||
}
|
||||
s.mu.Lock()
|
||||
nm = s.m[name]
|
||||
if nm == nil {
|
||||
nm = nmNew
|
||||
s.m[name] = nm
|
||||
s.a = append(s.a, nm)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
h, ok := nm.metric.(*Histogram)
|
||||
if !ok {
|
||||
panic(fmt.Errorf("BUG: metric %q isn't a Histogram. It is %T", name, nm.metric))
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// NewPrometheusHistogram creates and returns new PrometheusHistogram in s
|
||||
// with the given name and PrometheusHistogramDefaultBuckets.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned histogram is safe to use from concurrent goroutines.
|
||||
func (s *Set) NewPrometheusHistogram(name string) *PrometheusHistogram {
|
||||
return s.NewPrometheusHistogramExt(name, PrometheusHistogramDefaultBuckets)
|
||||
}
|
||||
|
||||
// NewPrometheusHistogramExt creates and returns new PrometheusHistogram in s
|
||||
// with the given name and upperBounds.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned histogram is safe to use from concurrent goroutines.
|
||||
func (s *Set) NewPrometheusHistogramExt(name string, upperBounds []float64) *PrometheusHistogram {
|
||||
h := newPrometheusHistogram(upperBounds)
|
||||
s.registerMetric(name, h)
|
||||
return h
|
||||
}
|
||||
|
||||
// GetOrCreatePrometheusHistogram returns registered prometheus histogram in s
|
||||
// with the given name or creates new histogram if s doesn't contain histogram
|
||||
// with the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned histogram is safe to use from concurrent goroutines.
|
||||
//
|
||||
// Performance tip: prefer NewPrometheusHistogram instead of GetOrCreatePrometheusHistogram.
|
||||
func (s *Set) GetOrCreatePrometheusHistogram(name string) *PrometheusHistogram {
|
||||
return s.GetOrCreatePrometheusHistogramExt(name, PrometheusHistogramDefaultBuckets)
|
||||
}
|
||||
|
||||
// GetOrCreatePrometheusHistogramExt returns registered prometheus histogram in
|
||||
// s with the given name or creates new histogram if s doesn't contain
|
||||
// histogram with the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned histogram is safe to use from concurrent goroutines.
|
||||
//
|
||||
// Performance tip: prefer NewPrometheusHistogramExt instead of GetOrCreatePrometheusHistogramExt.
|
||||
func (s *Set) GetOrCreatePrometheusHistogramExt(name string, upperBounds []float64) *PrometheusHistogram {
|
||||
s.mu.Lock()
|
||||
nm := s.m[name]
|
||||
s.mu.Unlock()
|
||||
if nm == nil {
|
||||
// Slow path - create and register missing histogram.
|
||||
if err := ValidateMetric(name); err != nil {
|
||||
panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err))
|
||||
}
|
||||
nmNew := &namedMetric{
|
||||
name: name,
|
||||
metric: newPrometheusHistogram(upperBounds),
|
||||
}
|
||||
s.mu.Lock()
|
||||
nm = s.m[name]
|
||||
if nm == nil {
|
||||
nm = nmNew
|
||||
s.m[name] = nm
|
||||
s.a = append(s.a, nm)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
h, ok := nm.metric.(*PrometheusHistogram)
|
||||
if !ok {
|
||||
panic(fmt.Errorf("BUG: metric %q isn't a PrometheusHistogram. It is %T", name, nm.metric))
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// NewCounter registers and returns new counter with the given name in the s.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned counter is safe to use from concurrent goroutines.
|
||||
func (s *Set) NewCounter(name string) *Counter {
|
||||
c := &Counter{}
|
||||
s.registerMetric(name, c)
|
||||
return c
|
||||
}
|
||||
|
||||
// GetOrCreateCounter returns registered counter in s with the given name
|
||||
// or creates new counter if s doesn't contain counter with the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned counter is safe to use from concurrent goroutines.
|
||||
//
|
||||
// Performance tip: prefer NewCounter instead of GetOrCreateCounter.
|
||||
func (s *Set) GetOrCreateCounter(name string) *Counter {
|
||||
s.mu.Lock()
|
||||
nm := s.m[name]
|
||||
s.mu.Unlock()
|
||||
if nm == nil {
|
||||
// Slow path - create and register missing counter.
|
||||
if err := ValidateMetric(name); err != nil {
|
||||
panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err))
|
||||
}
|
||||
nmNew := &namedMetric{
|
||||
name: name,
|
||||
metric: &Counter{},
|
||||
}
|
||||
s.mu.Lock()
|
||||
nm = s.m[name]
|
||||
if nm == nil {
|
||||
nm = nmNew
|
||||
s.m[name] = nm
|
||||
s.a = append(s.a, nm)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
c, ok := nm.metric.(*Counter)
|
||||
if !ok {
|
||||
panic(fmt.Errorf("BUG: metric %q isn't a Counter. It is %T", name, nm.metric))
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// NewFloatCounter registers and returns new FloatCounter with the given name in the s.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned FloatCounter is safe to use from concurrent goroutines.
|
||||
func (s *Set) NewFloatCounter(name string) *FloatCounter {
|
||||
c := &FloatCounter{}
|
||||
s.registerMetric(name, c)
|
||||
return c
|
||||
}
|
||||
|
||||
// GetOrCreateFloatCounter returns registered FloatCounter in s with the given name
|
||||
// or creates new FloatCounter if s doesn't contain FloatCounter with the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned FloatCounter is safe to use from concurrent goroutines.
|
||||
//
|
||||
// Performance tip: prefer NewFloatCounter instead of GetOrCreateFloatCounter.
|
||||
func (s *Set) GetOrCreateFloatCounter(name string) *FloatCounter {
|
||||
s.mu.Lock()
|
||||
nm := s.m[name]
|
||||
s.mu.Unlock()
|
||||
if nm == nil {
|
||||
// Slow path - create and register missing counter.
|
||||
if err := ValidateMetric(name); err != nil {
|
||||
panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err))
|
||||
}
|
||||
nmNew := &namedMetric{
|
||||
name: name,
|
||||
metric: &FloatCounter{},
|
||||
}
|
||||
s.mu.Lock()
|
||||
nm = s.m[name]
|
||||
if nm == nil {
|
||||
nm = nmNew
|
||||
s.m[name] = nm
|
||||
s.a = append(s.a, nm)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
c, ok := nm.metric.(*FloatCounter)
|
||||
if !ok {
|
||||
panic(fmt.Errorf("BUG: metric %q isn't a Counter. It is %T", name, nm.metric))
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// NewGauge registers and returns gauge with the given name in s, which calls f
|
||||
// to obtain gauge value.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// f must be safe for concurrent calls.
|
||||
//
|
||||
// The returned gauge is safe to use from concurrent goroutines.
|
||||
func (s *Set) NewGauge(name string, f func() float64) *Gauge {
|
||||
g := &Gauge{
|
||||
f: f,
|
||||
}
|
||||
s.registerMetric(name, g)
|
||||
return g
|
||||
}
|
||||
|
||||
// GetOrCreateGauge returns registered gauge with the given name in s
|
||||
// or creates new gauge if s doesn't contain gauge with the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned gauge is safe to use from concurrent goroutines.
|
||||
//
|
||||
// Performance tip: prefer NewGauge instead of GetOrCreateGauge.
|
||||
func (s *Set) GetOrCreateGauge(name string, f func() float64) *Gauge {
|
||||
s.mu.Lock()
|
||||
nm := s.m[name]
|
||||
s.mu.Unlock()
|
||||
if nm == nil {
|
||||
// Slow path - create and register missing gauge.
|
||||
if err := ValidateMetric(name); err != nil {
|
||||
panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err))
|
||||
}
|
||||
nmNew := &namedMetric{
|
||||
name: name,
|
||||
metric: &Gauge{
|
||||
f: f,
|
||||
},
|
||||
}
|
||||
s.mu.Lock()
|
||||
nm = s.m[name]
|
||||
if nm == nil {
|
||||
nm = nmNew
|
||||
s.m[name] = nm
|
||||
s.a = append(s.a, nm)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
g, ok := nm.metric.(*Gauge)
|
||||
if !ok {
|
||||
panic(fmt.Errorf("BUG: metric %q isn't a Gauge. It is %T", name, nm.metric))
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
// NewSummary creates and returns new summary with the given name in s.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned summary is safe to use from concurrent goroutines.
|
||||
func (s *Set) NewSummary(name string) *Summary {
|
||||
return s.NewSummaryExt(name, defaultSummaryWindow, defaultSummaryQuantiles)
|
||||
}
|
||||
|
||||
// NewSummaryExt creates and returns new summary in s with the given name,
|
||||
// window and quantiles.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned summary is safe to use from concurrent goroutines.
|
||||
func (s *Set) NewSummaryExt(name string, window time.Duration, quantiles []float64) *Summary {
|
||||
if err := ValidateMetric(name); err != nil {
|
||||
panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err))
|
||||
}
|
||||
sm := newSummary(window, quantiles)
|
||||
|
||||
s.mu.Lock()
|
||||
// defer will unlock in case of panic
|
||||
// checks in tests
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.mustRegisterLocked(name, sm, false)
|
||||
registerSummaryLocked(sm)
|
||||
s.registerSummaryQuantilesLocked(name, sm)
|
||||
s.summaries = append(s.summaries, sm)
|
||||
return sm
|
||||
}
|
||||
|
||||
// GetOrCreateSummary returns registered summary with the given name in s
|
||||
// or creates new summary if s doesn't contain summary with the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned summary is safe to use from concurrent goroutines.
|
||||
//
|
||||
// Performance tip: prefer NewSummary instead of GetOrCreateSummary.
|
||||
func (s *Set) GetOrCreateSummary(name string) *Summary {
|
||||
return s.GetOrCreateSummaryExt(name, defaultSummaryWindow, defaultSummaryQuantiles)
|
||||
}
|
||||
|
||||
// GetOrCreateSummaryExt returns registered summary with the given name,
|
||||
// window and quantiles in s or creates new summary if s doesn't
|
||||
// contain summary with the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned summary is safe to use from concurrent goroutines.
|
||||
//
|
||||
// Performance tip: prefer NewSummaryExt instead of GetOrCreateSummaryExt.
|
||||
func (s *Set) GetOrCreateSummaryExt(name string, window time.Duration, quantiles []float64) *Summary {
|
||||
s.mu.Lock()
|
||||
nm := s.m[name]
|
||||
s.mu.Unlock()
|
||||
if nm == nil {
|
||||
// Slow path - create and register missing summary.
|
||||
if err := ValidateMetric(name); err != nil {
|
||||
panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err))
|
||||
}
|
||||
sm := newSummary(window, quantiles)
|
||||
nmNew := &namedMetric{
|
||||
name: name,
|
||||
metric: sm,
|
||||
}
|
||||
s.mu.Lock()
|
||||
nm = s.m[name]
|
||||
if nm == nil {
|
||||
nm = nmNew
|
||||
s.m[name] = nm
|
||||
s.a = append(s.a, nm)
|
||||
registerSummaryLocked(sm)
|
||||
s.registerSummaryQuantilesLocked(name, sm)
|
||||
}
|
||||
s.summaries = append(s.summaries, sm)
|
||||
s.mu.Unlock()
|
||||
}
|
||||
sm, ok := nm.metric.(*Summary)
|
||||
if !ok {
|
||||
panic(fmt.Errorf("BUG: metric %q isn't a Summary. It is %T", name, nm.metric))
|
||||
}
|
||||
if sm.window != window {
|
||||
panic(fmt.Errorf("BUG: invalid window requested for the summary %q; requested %s; need %s", name, window, sm.window))
|
||||
}
|
||||
if !isEqualQuantiles(sm.quantiles, quantiles) {
|
||||
panic(fmt.Errorf("BUG: invalid quantiles requested from the summary %q; requested %v; need %v", name, quantiles, sm.quantiles))
|
||||
}
|
||||
return sm
|
||||
}
|
||||
|
||||
func (s *Set) registerSummaryQuantilesLocked(name string, sm *Summary) {
|
||||
for i, q := range sm.quantiles {
|
||||
quantileValueName := addTag(name, fmt.Sprintf(`quantile="%g"`, q))
|
||||
qv := &quantileValue{
|
||||
sm: sm,
|
||||
idx: i,
|
||||
}
|
||||
s.mustRegisterLocked(quantileValueName, qv, true)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Set) registerMetric(name string, m metric) {
|
||||
if err := ValidateMetric(name); err != nil {
|
||||
panic(fmt.Errorf("BUG: invalid metric name %q: %s", name, err))
|
||||
}
|
||||
s.mu.Lock()
|
||||
// defer will unlock in case of panic
|
||||
// checks in test
|
||||
defer s.mu.Unlock()
|
||||
s.mustRegisterLocked(name, m, false)
|
||||
}
|
||||
|
||||
// mustRegisterLocked registers given metric with the given name.
|
||||
//
|
||||
// Panics if the given name was already registered before.
|
||||
func (s *Set) mustRegisterLocked(name string, m metric, isAux bool) {
|
||||
nm, ok := s.m[name]
|
||||
if !ok {
|
||||
nm = &namedMetric{
|
||||
name: name,
|
||||
metric: m,
|
||||
isAux: isAux,
|
||||
}
|
||||
s.m[name] = nm
|
||||
s.a = append(s.a, nm)
|
||||
}
|
||||
if ok {
|
||||
panic(fmt.Errorf("BUG: metric %q is already registered", name))
|
||||
}
|
||||
}
|
||||
|
||||
// UnregisterMetric removes metric with the given name from s.
|
||||
//
|
||||
// True is returned if the metric has been removed.
|
||||
// False is returned if the given metric is missing in s.
|
||||
func (s *Set) UnregisterMetric(name string) bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
nm, ok := s.m[name]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if nm.isAux {
|
||||
// Do not allow deleting auxiliary metrics such as summary_metric{quantile="..."}
|
||||
// Such metrics must be deleted via parent metric name, e.g. summary_metric .
|
||||
return false
|
||||
}
|
||||
return s.unregisterMetricLocked(nm)
|
||||
}
|
||||
|
||||
func (s *Set) unregisterMetricLocked(nm *namedMetric) bool {
|
||||
name := nm.name
|
||||
delete(s.m, name)
|
||||
|
||||
deleteFromList := func(metricName string) {
|
||||
for i, nm := range s.a {
|
||||
if nm.name == metricName {
|
||||
s.a = append(s.a[:i], s.a[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
panic(fmt.Errorf("BUG: cannot find metric %q in the list of registered metrics", name))
|
||||
}
|
||||
|
||||
// remove metric from s.a
|
||||
deleteFromList(name)
|
||||
|
||||
sm, ok := nm.metric.(*Summary)
|
||||
if !ok {
|
||||
// There is no need in cleaning up non-summary metrics.
|
||||
return true
|
||||
}
|
||||
|
||||
// cleanup registry from per-quantile metrics
|
||||
for _, q := range sm.quantiles {
|
||||
quantileValueName := addTag(name, fmt.Sprintf(`quantile="%g"`, q))
|
||||
delete(s.m, quantileValueName)
|
||||
deleteFromList(quantileValueName)
|
||||
}
|
||||
|
||||
// Remove sm from s.summaries
|
||||
found := false
|
||||
for i, xsm := range s.summaries {
|
||||
if xsm == sm {
|
||||
s.summaries = append(s.summaries[:i], s.summaries[i+1:]...)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
panic(fmt.Errorf("BUG: cannot find summary %q in the list of registered summaries", name))
|
||||
}
|
||||
unregisterSummary(sm)
|
||||
return true
|
||||
}
|
||||
|
||||
// UnregisterAllMetrics de-registers all metrics registered in s.
|
||||
//
|
||||
// It also de-registers writeMetrics callbacks passed to RegisterMetricsWriter.
|
||||
func (s *Set) UnregisterAllMetrics() {
|
||||
metricNames := s.ListMetricNames()
|
||||
for _, name := range metricNames {
|
||||
s.UnregisterMetric(name)
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.metricsWriters = nil
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// ListMetricNames returns sorted list of all the metrics in s.
|
||||
//
|
||||
// The returned list doesn't include metrics generated by metricsWriter passed to RegisterMetricsWriter.
|
||||
func (s *Set) ListMetricNames() []string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
metricNames := make([]string, 0, len(s.m))
|
||||
for _, nm := range s.m {
|
||||
if nm.isAux {
|
||||
continue
|
||||
}
|
||||
metricNames = append(metricNames, nm.name)
|
||||
}
|
||||
sort.Strings(metricNames)
|
||||
return metricNames
|
||||
}
|
||||
|
||||
// RegisterMetricsWriter registers writeMetrics callback for including metrics in the output generated by s.WritePrometheus.
|
||||
//
|
||||
// The writeMetrics callback must write metrics to w in Prometheus text exposition format without timestamps and trailing comments.
|
||||
// The last line generated by writeMetrics must end with \n.
|
||||
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
||||
//
|
||||
// It is OK to reguster multiple writeMetrics callbacks - all of them will be called sequentially for gererating the output at s.WritePrometheus.
|
||||
func (s *Set) RegisterMetricsWriter(writeMetrics func(w io.Writer)) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.metricsWriters = append(s.metricsWriters, writeMetrics)
|
||||
}
|
||||
276
vendor/github.com/VictoriaMetrics/metrics/summary.go
generated
vendored
Normal file
276
vendor/github.com/VictoriaMetrics/metrics/summary.go
generated
vendored
Normal file
@@ -0,0 +1,276 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/valyala/histogram"
|
||||
)
|
||||
|
||||
const defaultSummaryWindow = 5 * time.Minute
|
||||
|
||||
var defaultSummaryQuantiles = []float64{0.5, 0.9, 0.97, 0.99, 1}
|
||||
|
||||
// Summary implements summary.
|
||||
type Summary struct {
|
||||
mu sync.Mutex
|
||||
|
||||
curr *histogram.Fast
|
||||
next *histogram.Fast
|
||||
|
||||
quantiles []float64
|
||||
quantileValues []float64
|
||||
|
||||
sum float64
|
||||
count uint64
|
||||
|
||||
window time.Duration
|
||||
}
|
||||
|
||||
// NewSummary creates and returns new summary with the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned summary is safe to use from concurrent goroutines.
|
||||
func NewSummary(name string) *Summary {
|
||||
return defaultSet.NewSummary(name)
|
||||
}
|
||||
|
||||
// NewSummaryExt creates and returns new summary with the given name,
|
||||
// window and quantiles.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned summary is safe to use from concurrent goroutines.
|
||||
func NewSummaryExt(name string, window time.Duration, quantiles []float64) *Summary {
|
||||
return defaultSet.NewSummaryExt(name, window, quantiles)
|
||||
}
|
||||
|
||||
func newSummary(window time.Duration, quantiles []float64) *Summary {
|
||||
// Make a copy of quantiles in order to prevent from their modification by the caller.
|
||||
quantiles = append([]float64{}, quantiles...)
|
||||
validateQuantiles(quantiles)
|
||||
sm := &Summary{
|
||||
curr: histogram.NewFast(),
|
||||
next: histogram.NewFast(),
|
||||
quantiles: quantiles,
|
||||
quantileValues: make([]float64, len(quantiles)),
|
||||
window: window,
|
||||
}
|
||||
return sm
|
||||
}
|
||||
|
||||
func validateQuantiles(quantiles []float64) {
|
||||
for _, q := range quantiles {
|
||||
if q < 0 || q > 1 {
|
||||
panic(fmt.Errorf("BUG: quantile must be in the range [0..1]; got %v", q))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update updates the summary.
|
||||
func (sm *Summary) Update(v float64) {
|
||||
sm.mu.Lock()
|
||||
sm.curr.Update(v)
|
||||
sm.next.Update(v)
|
||||
sm.sum += v
|
||||
sm.count++
|
||||
sm.mu.Unlock()
|
||||
}
|
||||
|
||||
// UpdateDuration updates request duration based on the given startTime.
|
||||
func (sm *Summary) UpdateDuration(startTime time.Time) {
|
||||
d := time.Since(startTime).Seconds()
|
||||
sm.Update(d)
|
||||
}
|
||||
|
||||
func (sm *Summary) marshalTo(prefix string, w io.Writer) {
|
||||
// Marshal only *_sum and *_count values.
|
||||
// Quantile values should be already updated by the caller via sm.updateQuantiles() call.
|
||||
// sm.quantileValues will be marshaled later via quantileValue.marshalTo.
|
||||
sm.mu.Lock()
|
||||
sum := sm.sum
|
||||
count := sm.count
|
||||
sm.mu.Unlock()
|
||||
|
||||
if count > 0 {
|
||||
name, filters := splitMetricName(prefix)
|
||||
if float64(int64(sum)) == sum {
|
||||
// Marshal integer sum without scientific notation
|
||||
fmt.Fprintf(w, "%s_sum%s %d\n", name, filters, int64(sum))
|
||||
} else {
|
||||
fmt.Fprintf(w, "%s_sum%s %g\n", name, filters, sum)
|
||||
}
|
||||
fmt.Fprintf(w, "%s_count%s %d\n", name, filters, count)
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *Summary) metricType() string {
|
||||
// this metric type should not be printed, because summary (sum and count)
|
||||
// of the same metric family will be printed after quantile(s).
|
||||
// If metadata is needed, the metadata from quantile(s) should be used.
|
||||
// quantile will be printed first, so its metrics type won't be printed as metadata.
|
||||
// Printing quantiles before sum and count aligns this code with Prometheus behavior.
|
||||
// See: https://github.com/VictoriaMetrics/metrics/pull/99
|
||||
return "unsupported"
|
||||
}
|
||||
|
||||
func splitMetricName(name string) (string, string) {
|
||||
n := strings.IndexByte(name, '{')
|
||||
if n < 0 {
|
||||
return name, ""
|
||||
}
|
||||
return name[:n], name[n:]
|
||||
}
|
||||
|
||||
func (sm *Summary) updateQuantiles() {
|
||||
sm.mu.Lock()
|
||||
sm.quantileValues = sm.curr.Quantiles(sm.quantileValues[:0], sm.quantiles)
|
||||
sm.mu.Unlock()
|
||||
}
|
||||
|
||||
// GetOrCreateSummary returns registered summary with the given name
|
||||
// or creates new summary if the registry doesn't contain summary with
|
||||
// the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned summary is safe to use from concurrent goroutines.
|
||||
//
|
||||
// Performance tip: prefer NewSummary instead of GetOrCreateSummary.
|
||||
func GetOrCreateSummary(name string) *Summary {
|
||||
return defaultSet.GetOrCreateSummary(name)
|
||||
}
|
||||
|
||||
// GetOrCreateSummaryExt returns registered summary with the given name,
|
||||
// window and quantiles or creates new summary if the registry doesn't
|
||||
// contain summary with the given name.
|
||||
//
|
||||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned summary is safe to use from concurrent goroutines.
|
||||
//
|
||||
// Performance tip: prefer NewSummaryExt instead of GetOrCreateSummaryExt.
|
||||
func GetOrCreateSummaryExt(name string, window time.Duration, quantiles []float64) *Summary {
|
||||
return defaultSet.GetOrCreateSummaryExt(name, window, quantiles)
|
||||
}
|
||||
|
||||
func isEqualQuantiles(a, b []float64) bool {
|
||||
// Do not use relfect.DeepEqual, since it is slower than the direct comparison.
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type quantileValue struct {
|
||||
sm *Summary
|
||||
idx int
|
||||
}
|
||||
|
||||
func (qv *quantileValue) marshalTo(prefix string, w io.Writer) {
|
||||
qv.sm.mu.Lock()
|
||||
v := qv.sm.quantileValues[qv.idx]
|
||||
qv.sm.mu.Unlock()
|
||||
if !math.IsNaN(v) {
|
||||
fmt.Fprintf(w, "%s %g\n", prefix, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (qv *quantileValue) metricType() string {
|
||||
return "summary"
|
||||
}
|
||||
|
||||
func addTag(name, tag string) string {
|
||||
if len(name) == 0 || name[len(name)-1] != '}' {
|
||||
return fmt.Sprintf("%s{%s}", name, tag)
|
||||
}
|
||||
name = name[:len(name)-1]
|
||||
if len(name) == 0 {
|
||||
panic(fmt.Errorf("BUG: metric name cannot be empty"))
|
||||
}
|
||||
if name[len(name)-1] == '{' {
|
||||
// case for empty labels set metric_name{}
|
||||
return fmt.Sprintf("%s%s}", name, tag)
|
||||
}
|
||||
return fmt.Sprintf("%s,%s}", name, tag)
|
||||
}
|
||||
|
||||
func registerSummaryLocked(sm *Summary) {
|
||||
window := sm.window
|
||||
summariesLock.Lock()
|
||||
summaries[window] = append(summaries[window], sm)
|
||||
if len(summaries[window]) == 1 {
|
||||
go summariesSwapCron(window)
|
||||
}
|
||||
summariesLock.Unlock()
|
||||
}
|
||||
|
||||
func unregisterSummary(sm *Summary) {
|
||||
window := sm.window
|
||||
summariesLock.Lock()
|
||||
sms := summaries[window]
|
||||
found := false
|
||||
for i, xsm := range sms {
|
||||
if xsm == sm {
|
||||
sms = append(sms[:i], sms[i+1:]...)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
panic(fmt.Errorf("BUG: cannot find registered summary %p", sm))
|
||||
}
|
||||
summaries[window] = sms
|
||||
summariesLock.Unlock()
|
||||
}
|
||||
|
||||
func summariesSwapCron(window time.Duration) {
|
||||
for {
|
||||
time.Sleep(window / 2)
|
||||
summariesLock.Lock()
|
||||
for _, sm := range summaries[window] {
|
||||
sm.mu.Lock()
|
||||
tmp := sm.curr
|
||||
sm.curr = sm.next
|
||||
sm.next = tmp
|
||||
sm.next.Reset()
|
||||
sm.mu.Unlock()
|
||||
}
|
||||
summariesLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
summaries = map[time.Duration][]*Summary{}
|
||||
summariesLock sync.Mutex
|
||||
)
|
||||
94
vendor/github.com/VictoriaMetrics/metrics/validator.go
generated
vendored
Normal file
94
vendor/github.com/VictoriaMetrics/metrics/validator.go
generated
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ValidateMetric validates provided string
|
||||
// to be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
func ValidateMetric(s string) error {
|
||||
if len(s) == 0 {
|
||||
return fmt.Errorf("metric cannot be empty")
|
||||
}
|
||||
if strings.IndexByte(s, '\n') >= 0 {
|
||||
return fmt.Errorf("metric cannot contain line breaks")
|
||||
}
|
||||
n := strings.IndexByte(s, '{')
|
||||
if n < 0 {
|
||||
return validateIdent(s)
|
||||
}
|
||||
ident := s[:n]
|
||||
s = s[n+1:]
|
||||
if err := validateIdent(ident); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(s) == 0 || s[len(s)-1] != '}' {
|
||||
return fmt.Errorf("missing closing curly brace at the end of %q", ident)
|
||||
}
|
||||
return validateTags(s[:len(s)-1])
|
||||
}
|
||||
|
||||
func validateTags(s string) error {
|
||||
if len(s) == 0 {
|
||||
return nil
|
||||
}
|
||||
for {
|
||||
n := strings.IndexByte(s, '=')
|
||||
if n < 0 {
|
||||
return fmt.Errorf("missing `=` after %q", s)
|
||||
}
|
||||
ident := s[:n]
|
||||
s = s[n+1:]
|
||||
if err := validateIdent(ident); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(s) == 0 || s[0] != '"' {
|
||||
return fmt.Errorf("missing starting `\"` for %q value; tail=%q", ident, s)
|
||||
}
|
||||
s = s[1:]
|
||||
again:
|
||||
n = strings.IndexByte(s, '"')
|
||||
if n < 0 {
|
||||
return fmt.Errorf("missing trailing `\"` for %q value; tail=%q", ident, s)
|
||||
}
|
||||
m := n
|
||||
for m > 0 && s[m-1] == '\\' {
|
||||
m--
|
||||
}
|
||||
if (n-m)%2 == 1 {
|
||||
s = s[n+1:]
|
||||
goto again
|
||||
}
|
||||
s = s[n+1:]
|
||||
if len(s) == 0 {
|
||||
return nil
|
||||
}
|
||||
if !strings.HasPrefix(s, ",") {
|
||||
return fmt.Errorf("missing `,` after %q value; tail=%q", ident, s)
|
||||
}
|
||||
s = skipSpace(s[1:])
|
||||
}
|
||||
}
|
||||
|
||||
func skipSpace(s string) string {
|
||||
for len(s) > 0 && s[0] == ' ' {
|
||||
s = s[1:]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func validateIdent(s string) error {
|
||||
if !identRegexp.MatchString(s) {
|
||||
return fmt.Errorf("invalid identifier %q", s)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var identRegexp = regexp.MustCompile("^[a-zA-Z_:.][a-zA-Z0-9_:.]*$")
|
||||
16
vendor/github.com/valyala/fastrand/.travis.yml
generated
vendored
Normal file
16
vendor/github.com/valyala/fastrand/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.7
|
||||
- 1.8
|
||||
|
||||
script:
|
||||
# build test for supported platforms
|
||||
- GOOS=linux go build
|
||||
- GOOS=darwin go build
|
||||
- GOOS=freebsd go build
|
||||
- GOARCH=386 go build
|
||||
|
||||
# run tests on a standard platform
|
||||
- go test -v ./...
|
||||
|
||||
21
vendor/github.com/valyala/fastrand/LICENSE
generated
vendored
Normal file
21
vendor/github.com/valyala/fastrand/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2017 Aliaksandr Valialkin
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
76
vendor/github.com/valyala/fastrand/README.md
generated
vendored
Normal file
76
vendor/github.com/valyala/fastrand/README.md
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
[](https://travis-ci.org/valyala/fastrand)
|
||||
[](http://godoc.org/github.com/valyala/fastrand)
|
||||
[](https://goreportcard.com/report/github.com/valyala/fastrand)
|
||||
|
||||
|
||||
# fastrand
|
||||
|
||||
Fast pseudorandom number generator.
|
||||
|
||||
|
||||
# Features
|
||||
|
||||
- Optimized for speed.
|
||||
- Performance scales on multiple CPUs.
|
||||
|
||||
# How does it work?
|
||||
|
||||
It abuses [sync.Pool](https://golang.org/pkg/sync/#Pool) for maintaining
|
||||
"per-CPU" pseudorandom number generators.
|
||||
|
||||
TODO: firgure out how to use real per-CPU pseudorandom number generators.
|
||||
|
||||
|
||||
# Benchmark results
|
||||
|
||||
|
||||
```
|
||||
$ GOMAXPROCS=1 go test -bench=. github.com/valyala/fastrand
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
pkg: github.com/valyala/fastrand
|
||||
BenchmarkUint32n 50000000 29.7 ns/op
|
||||
BenchmarkRNGUint32n 200000000 6.50 ns/op
|
||||
BenchmarkRNGUint32nWithLock 100000000 21.5 ns/op
|
||||
BenchmarkMathRandInt31n 50000000 31.8 ns/op
|
||||
BenchmarkMathRandRNGInt31n 100000000 17.9 ns/op
|
||||
BenchmarkMathRandRNGInt31nWithLock 50000000 30.2 ns/op
|
||||
PASS
|
||||
ok github.com/valyala/fastrand 10.634s
|
||||
```
|
||||
|
||||
```
|
||||
$ GOMAXPROCS=2 go test -bench=. github.com/valyala/fastrand
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
pkg: github.com/valyala/fastrand
|
||||
BenchmarkUint32n-2 100000000 17.6 ns/op
|
||||
BenchmarkRNGUint32n-2 500000000 3.36 ns/op
|
||||
BenchmarkRNGUint32nWithLock-2 50000000 32.0 ns/op
|
||||
BenchmarkMathRandInt31n-2 20000000 51.2 ns/op
|
||||
BenchmarkMathRandRNGInt31n-2 100000000 11.0 ns/op
|
||||
BenchmarkMathRandRNGInt31nWithLock-2 20000000 91.0 ns/op
|
||||
PASS
|
||||
ok github.com/valyala/fastrand 9.543s
|
||||
```
|
||||
|
||||
```
|
||||
$ GOMAXPROCS=4 go test -bench=. github.com/valyala/fastrand
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
pkg: github.com/valyala/fastrand
|
||||
BenchmarkUint32n-4 100000000 14.2 ns/op
|
||||
BenchmarkRNGUint32n-4 500000000 3.30 ns/op
|
||||
BenchmarkRNGUint32nWithLock-4 20000000 88.7 ns/op
|
||||
BenchmarkMathRandInt31n-4 10000000 145 ns/op
|
||||
BenchmarkMathRandRNGInt31n-4 200000000 8.35 ns/op
|
||||
BenchmarkMathRandRNGInt31nWithLock-4 20000000 102 ns/op
|
||||
PASS
|
||||
ok github.com/valyala/fastrand 11.534s
|
||||
```
|
||||
|
||||
As you can see, [fastrand.Uint32n](https://godoc.org/github.com/valyala/fastrand#Uint32n)
|
||||
scales on multiple CPUs, while [rand.Int31n](https://golang.org/pkg/math/rand/#Int31n)
|
||||
doesn't scale. Their performance is comparable on `GOMAXPROCS=1`,
|
||||
but `fastrand.Uint32n` runs 3x faster than `rand.Int31n` on `GOMAXPROCS=2`
|
||||
and 10x faster than `rand.Int31n` on `GOMAXPROCS=4`.
|
||||
79
vendor/github.com/valyala/fastrand/fastrand.go
generated
vendored
Normal file
79
vendor/github.com/valyala/fastrand/fastrand.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
// Package fastrand implements fast pesudorandom number generator
|
||||
// that should scale well on multi-CPU systems.
|
||||
//
|
||||
// Use crypto/rand instead of this package for generating
|
||||
// cryptographically secure random numbers.
|
||||
package fastrand
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Uint32 returns pseudorandom uint32.
|
||||
//
|
||||
// It is safe calling this function from concurrent goroutines.
|
||||
func Uint32() uint32 {
|
||||
v := rngPool.Get()
|
||||
if v == nil {
|
||||
v = &RNG{}
|
||||
}
|
||||
r := v.(*RNG)
|
||||
x := r.Uint32()
|
||||
rngPool.Put(r)
|
||||
return x
|
||||
}
|
||||
|
||||
var rngPool sync.Pool
|
||||
|
||||
// Uint32n returns pseudorandom uint32 in the range [0..maxN).
|
||||
//
|
||||
// It is safe calling this function from concurrent goroutines.
|
||||
func Uint32n(maxN uint32) uint32 {
|
||||
x := Uint32()
|
||||
// See http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
|
||||
return uint32((uint64(x) * uint64(maxN)) >> 32)
|
||||
}
|
||||
|
||||
// RNG is a pseudorandom number generator.
|
||||
//
|
||||
// It is unsafe to call RNG methods from concurrent goroutines.
|
||||
type RNG struct {
|
||||
x uint32
|
||||
}
|
||||
|
||||
// Uint32 returns pseudorandom uint32.
|
||||
//
|
||||
// It is unsafe to call this method from concurrent goroutines.
|
||||
func (r *RNG) Uint32() uint32 {
|
||||
for r.x == 0 {
|
||||
r.x = getRandomUint32()
|
||||
}
|
||||
|
||||
// See https://en.wikipedia.org/wiki/Xorshift
|
||||
x := r.x
|
||||
x ^= x << 13
|
||||
x ^= x >> 17
|
||||
x ^= x << 5
|
||||
r.x = x
|
||||
return x
|
||||
}
|
||||
|
||||
// Uint32n returns pseudorandom uint32 in the range [0..maxN).
|
||||
//
|
||||
// It is unsafe to call this method from concurrent goroutines.
|
||||
func (r *RNG) Uint32n(maxN uint32) uint32 {
|
||||
x := r.Uint32()
|
||||
// See http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
|
||||
return uint32((uint64(x) * uint64(maxN)) >> 32)
|
||||
}
|
||||
|
||||
// Seed sets the r state to n.
|
||||
func (r *RNG) Seed(n uint32) {
|
||||
r.x = n
|
||||
}
|
||||
|
||||
func getRandomUint32() uint32 {
|
||||
x := time.Now().UnixNano()
|
||||
return uint32((x >> 32) ^ x)
|
||||
}
|
||||
21
vendor/github.com/valyala/histogram/LICENSE
generated
vendored
Normal file
21
vendor/github.com/valyala/histogram/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2019 Aliaksandr Valialkin
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
9
vendor/github.com/valyala/histogram/README.md
generated
vendored
Normal file
9
vendor/github.com/valyala/histogram/README.md
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
[](http://godoc.org/github.com/valyala/histogram)
|
||||
[](https://goreportcard.com/report/github.com/valyala/histogram)
|
||||
|
||||
|
||||
# histogram
|
||||
|
||||
Fast histograms for Go.
|
||||
|
||||
See [docs](https://godoc.org/github.com/valyala/histogram).
|
||||
131
vendor/github.com/valyala/histogram/histogram.go
generated
vendored
Normal file
131
vendor/github.com/valyala/histogram/histogram.go
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
// Package histogram provides building blocks for fast histograms.
|
||||
package histogram
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/valyala/fastrand"
|
||||
)
|
||||
|
||||
var (
|
||||
infNeg = math.Inf(-1)
|
||||
infPos = math.Inf(1)
|
||||
nan = math.NaN()
|
||||
)
|
||||
|
||||
// Fast is a fast histogram.
|
||||
//
|
||||
// It cannot be used from concurrently running goroutines without
|
||||
// external synchronization.
|
||||
type Fast struct {
|
||||
max float64
|
||||
min float64
|
||||
count uint64
|
||||
|
||||
a []float64
|
||||
tmp []float64
|
||||
rng fastrand.RNG
|
||||
}
|
||||
|
||||
// NewFast returns new fast histogram.
|
||||
func NewFast() *Fast {
|
||||
f := &Fast{}
|
||||
f.Reset()
|
||||
return f
|
||||
}
|
||||
|
||||
// Reset resets the histogram.
|
||||
func (f *Fast) Reset() {
|
||||
f.max = infNeg
|
||||
f.min = infPos
|
||||
f.count = 0
|
||||
if len(f.a) > 0 {
|
||||
f.a = f.a[:0]
|
||||
f.tmp = f.tmp[:0]
|
||||
} else {
|
||||
// Free up memory occupied by unused histogram.
|
||||
f.a = nil
|
||||
f.tmp = nil
|
||||
}
|
||||
// Reset rng state in order to get repeatable results
|
||||
// for the same sequence of values passed to Fast.Update.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1612
|
||||
f.rng.Seed(1)
|
||||
}
|
||||
|
||||
// Update updates the f with v.
|
||||
func (f *Fast) Update(v float64) {
|
||||
if v > f.max {
|
||||
f.max = v
|
||||
}
|
||||
if v < f.min {
|
||||
f.min = v
|
||||
}
|
||||
|
||||
f.count++
|
||||
if len(f.a) < maxSamples {
|
||||
f.a = append(f.a, v)
|
||||
return
|
||||
}
|
||||
if n := int(f.rng.Uint32n(uint32(f.count))); n < len(f.a) {
|
||||
f.a[n] = v
|
||||
}
|
||||
}
|
||||
|
||||
const maxSamples = 1000
|
||||
|
||||
// Quantile returns the quantile value for the given phi.
|
||||
func (f *Fast) Quantile(phi float64) float64 {
|
||||
f.tmp = append(f.tmp[:0], f.a...)
|
||||
sort.Float64s(f.tmp)
|
||||
return f.quantile(phi)
|
||||
}
|
||||
|
||||
// Quantiles appends quantile values to dst for the given phis.
|
||||
func (f *Fast) Quantiles(dst, phis []float64) []float64 {
|
||||
f.tmp = append(f.tmp[:0], f.a...)
|
||||
sort.Float64s(f.tmp)
|
||||
for _, phi := range phis {
|
||||
q := f.quantile(phi)
|
||||
dst = append(dst, q)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func (f *Fast) quantile(phi float64) float64 {
|
||||
if len(f.tmp) == 0 || math.IsNaN(phi) {
|
||||
return nan
|
||||
}
|
||||
if phi <= 0 {
|
||||
return f.min
|
||||
}
|
||||
if phi >= 1 {
|
||||
return f.max
|
||||
}
|
||||
idx := uint(phi*float64(len(f.tmp)-1) + 0.5)
|
||||
if idx >= uint(len(f.tmp)) {
|
||||
idx = uint(len(f.tmp) - 1)
|
||||
}
|
||||
return f.tmp[idx]
|
||||
}
|
||||
|
||||
// GetFast returns a histogram from a pool.
|
||||
func GetFast() *Fast {
|
||||
v := fastPool.Get()
|
||||
if v == nil {
|
||||
return NewFast()
|
||||
}
|
||||
return v.(*Fast)
|
||||
}
|
||||
|
||||
// PutFast puts hf to the pool.
|
||||
//
|
||||
// hf cannot be used after this call.
|
||||
func PutFast(f *Fast) {
|
||||
f.Reset()
|
||||
fastPool.Put(f)
|
||||
}
|
||||
|
||||
var fastPool sync.Pool
|
||||
Reference in New Issue
Block a user