mirror of
https://github.com/matrix-org/dendrite.git
synced 2025-12-12 17:33:09 -06:00
bla3
This commit is contained in:
parent
33da716364
commit
0307135156
|
|
@ -3,13 +3,12 @@ package dugong
|
||||||
import (
|
import (
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// RotationScheduler determines when files should be rotated.
|
// RotationScheduler determines when files should be rotated.
|
||||||
|
|
@ -42,9 +41,6 @@ func dayOffset(t time.Time, offsetDays int) time.Time {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShouldRotate compares the current time with the rotation schedule.
|
|
||||||
// If the rotation should occur, returns (true, suffix) where suffix is the
|
|
||||||
// suffix for the rotated file. Else, returns (false, "")
|
|
||||||
func (rs *DailyRotationSchedule) ShouldRotate() (bool, string) {
|
func (rs *DailyRotationSchedule) ShouldRotate() (bool, string) {
|
||||||
now := currentTime()
|
now := currentTime()
|
||||||
if rs.rotateAfter == nil {
|
if rs.rotateAfter == nil {
|
||||||
|
|
@ -72,13 +68,15 @@ func (rs *DailyRotationSchedule) ShouldGZip() bool {
|
||||||
// contains the messages with that severity or higher. If a formatter is
|
// contains the messages with that severity or higher. If a formatter is
|
||||||
// not specified, they will be logged using a JSON formatter. If a
|
// not specified, they will be logged using a JSON formatter. If a
|
||||||
// RotationScheduler is set, the files will be cycled according to its rules.
|
// RotationScheduler is set, the files will be cycled according to its rules.
|
||||||
func NewFSHook(path string, formatter log.Formatter, rotSched RotationScheduler) log.Hook {
|
func NewFSHook(infoPath, warnPath, errorPath string, formatter log.Formatter, rotSched RotationScheduler) log.Hook {
|
||||||
if formatter == nil {
|
if formatter == nil {
|
||||||
formatter = &log.JSONFormatter{}
|
formatter = &log.JSONFormatter{}
|
||||||
}
|
}
|
||||||
hook := &fsHook{
|
hook := &fsHook{
|
||||||
entries: make(chan log.Entry, 1024),
|
entries: make(chan log.Entry, 1024),
|
||||||
path: path,
|
infoPath: infoPath,
|
||||||
|
warnPath: warnPath,
|
||||||
|
errorPath: errorPath,
|
||||||
formatter: formatter,
|
formatter: formatter,
|
||||||
scheduler: rotSched,
|
scheduler: rotSched,
|
||||||
}
|
}
|
||||||
|
|
@ -98,7 +96,9 @@ func NewFSHook(path string, formatter log.Formatter, rotSched RotationScheduler)
|
||||||
type fsHook struct {
|
type fsHook struct {
|
||||||
entries chan log.Entry
|
entries chan log.Entry
|
||||||
queueSize int32
|
queueSize int32
|
||||||
path string
|
infoPath string
|
||||||
|
warnPath string
|
||||||
|
errorPath string
|
||||||
formatter log.Formatter
|
formatter log.Formatter
|
||||||
scheduler RotationScheduler
|
scheduler RotationScheduler
|
||||||
}
|
}
|
||||||
|
|
@ -123,8 +123,22 @@ func (hook *fsHook) writeEntry(entry *log.Entry) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := logToFile(hook.path, msg); err != nil {
|
if entry.Level <= log.ErrorLevel {
|
||||||
return err
|
if err := logToFile(hook.errorPath, msg); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry.Level <= log.WarnLevel {
|
||||||
|
if err := logToFile(hook.warnPath, msg); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry.Level <= log.InfoLevel {
|
||||||
|
if err := logToFile(hook.infoPath, msg); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -137,7 +151,6 @@ func (hook *fsHook) Levels() []log.Level {
|
||||||
log.ErrorLevel,
|
log.ErrorLevel,
|
||||||
log.WarnLevel,
|
log.WarnLevel,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.DebugLevel,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -148,14 +161,17 @@ func (hook *fsHook) Levels() []log.Level {
|
||||||
// one which does the logging. Since we don't hold open a handle to the
|
// one which does the logging. Since we don't hold open a handle to the
|
||||||
// file when writing, a simple Rename is all that is required.
|
// file when writing, a simple Rename is all that is required.
|
||||||
func (hook *fsHook) rotate(suffix string, gzip bool) error {
|
func (hook *fsHook) rotate(suffix string, gzip bool) error {
|
||||||
logFilePath := hook.path + suffix
|
for _, fpath := range []string{hook.errorPath, hook.warnPath, hook.infoPath} {
|
||||||
if err := os.Rename(hook.path, logFilePath); err != nil {
|
logFilePath := fpath + suffix
|
||||||
// e.g. because there were no errors in error.log for this day
|
if err := os.Rename(fpath, logFilePath); err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "Error rotating file %s: %v\n", hook.path, err)
|
// e.g. because there were no errors in error.log for this day
|
||||||
} else if gzip {
|
fmt.Fprintf(os.Stderr, "Error rotating file %s: %v\n", fpath, err)
|
||||||
// Don't try to gzip if we failed to rotate
|
continue // don't try to gzip if we failed to rotate
|
||||||
if err := gzipFile(logFilePath); err != nil {
|
}
|
||||||
fmt.Fprintf(os.Stderr, "Failed to gzip file %s: %v\n", logFilePath, err)
|
if gzip {
|
||||||
|
if err := gzipFile(logFilePath); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to gzip file %s: %v\n", logFilePath, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ package dugong
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
@ -11,8 +12,6 @@ import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -20,7 +19,7 @@ const (
|
||||||
fieldValue = "my_value"
|
fieldValue = "my_value"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFSHook(t *testing.T) {
|
func TestFSHookInfo(t *testing.T) {
|
||||||
logger, hook, wait, teardown := setupLogHook(t)
|
logger, hook, wait, teardown := setupLogHook(t)
|
||||||
defer teardown()
|
defer teardown()
|
||||||
|
|
||||||
|
|
@ -28,7 +27,32 @@ func TestFSHook(t *testing.T) {
|
||||||
|
|
||||||
wait()
|
wait()
|
||||||
|
|
||||||
checkLogFile(t, hook.path, "info")
|
checkLogFile(t, hook.infoPath, "info")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFSHookWarn(t *testing.T) {
|
||||||
|
logger, hook, wait, teardown := setupLogHook(t)
|
||||||
|
defer teardown()
|
||||||
|
|
||||||
|
logger.WithField(fieldName, fieldValue).Warn("Warn message")
|
||||||
|
|
||||||
|
wait()
|
||||||
|
|
||||||
|
checkLogFile(t, hook.infoPath, "warning")
|
||||||
|
checkLogFile(t, hook.warnPath, "warning")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFSHookError(t *testing.T) {
|
||||||
|
logger, hook, wait, teardown := setupLogHook(t)
|
||||||
|
defer teardown()
|
||||||
|
|
||||||
|
logger.WithField(fieldName, fieldValue).Error("Error message")
|
||||||
|
|
||||||
|
wait()
|
||||||
|
|
||||||
|
checkLogFile(t, hook.infoPath, "error")
|
||||||
|
checkLogFile(t, hook.warnPath, "error")
|
||||||
|
checkLogFile(t, hook.errorPath, "error")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFsHookInterleaved(t *testing.T) {
|
func TestFsHookInterleaved(t *testing.T) {
|
||||||
|
|
@ -43,7 +67,7 @@ func TestFsHookInterleaved(t *testing.T) {
|
||||||
|
|
||||||
wait()
|
wait()
|
||||||
|
|
||||||
file, err := os.Open(hook.path)
|
file, err := os.Open(hook.infoPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to open file: %v", err)
|
t.Fatalf("Failed to open file: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -77,7 +101,7 @@ func TestFSHookMultiple(t *testing.T) {
|
||||||
|
|
||||||
wait()
|
wait()
|
||||||
|
|
||||||
file, err := os.Open(hook.path)
|
file, err := os.Open(hook.infoPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to open file: %v", err)
|
t.Fatalf("Failed to open file: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -119,7 +143,7 @@ func TestFSHookConcurrent(t *testing.T) {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
wait()
|
wait()
|
||||||
|
|
||||||
file, err := os.Open(hook.path)
|
file, err := os.Open(hook.infoPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to open file: %v", err)
|
t.Fatalf("Failed to open file: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -152,7 +176,7 @@ func TestDailySchedule(t *testing.T) {
|
||||||
// Time ticks from 23:50 to 00:10 in 1 minute increments. Log each tick as 'counter'.
|
// Time ticks from 23:50 to 00:10 in 1 minute increments. Log each tick as 'counter'.
|
||||||
minutesGoneBy := 0
|
minutesGoneBy := 0
|
||||||
currentTime = func() time.Time {
|
currentTime = func() time.Time {
|
||||||
minutesGoneBy++
|
minutesGoneBy += 1
|
||||||
return time.Date(2016, 10, 26, 23, 50+minutesGoneBy, 00, 0, loc)
|
return time.Date(2016, 10, 26, 23, 50+minutesGoneBy, 00, 0, loc)
|
||||||
}
|
}
|
||||||
for i := 0; i < 20; i++ {
|
for i := 0; i < 20; i++ {
|
||||||
|
|
@ -162,11 +186,11 @@ func TestDailySchedule(t *testing.T) {
|
||||||
|
|
||||||
wait()
|
wait()
|
||||||
|
|
||||||
// fshook.log.2016-10-26 should have 0 -> 9
|
// info.log.2016-10-26 should have 0 -> 9
|
||||||
checkFileHasSequentialCounts(t, hook.path+".2016-10-26", 0, 9)
|
checkFileHasSequentialCounts(t, hook.infoPath+".2016-10-26", 0, 9)
|
||||||
|
|
||||||
// fshook.log should have 10 -> 19 inclusive
|
// info.log should have 10 -> 19 inclusive
|
||||||
checkFileHasSequentialCounts(t, hook.path, 10, 19)
|
checkFileHasSequentialCounts(t, hook.infoPath, 10, 19)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDailyScheduleMultipleRotations(t *testing.T) {
|
func TestDailyScheduleMultipleRotations(t *testing.T) {
|
||||||
|
|
@ -186,7 +210,7 @@ func TestDailyScheduleMultipleRotations(t *testing.T) {
|
||||||
// Start from 10/29 01:37
|
// Start from 10/29 01:37
|
||||||
return time.Date(2016, 10, 28, 13+hoursGoneBy, 37, 00, 0, loc)
|
return time.Date(2016, 10, 28, 13+hoursGoneBy, 37, 00, 0, loc)
|
||||||
}
|
}
|
||||||
// log 8 lines
|
// log 2 lines per file, to 4 files (so 8 log lines)
|
||||||
for i := 0; i < 8; i++ {
|
for i := 0; i < 8; i++ {
|
||||||
ts := time.Date(2016, 10, 28, 13+((i+1)*12), 37, 00, 0, loc)
|
ts := time.Date(2016, 10, 28, 13+((i+1)*12), 37, 00, 0, loc)
|
||||||
logger.WithField("counter", i).Infof("The time is now %s", ts)
|
logger.WithField("counter", i).Infof("The time is now %s", ts)
|
||||||
|
|
@ -194,17 +218,17 @@ func TestDailyScheduleMultipleRotations(t *testing.T) {
|
||||||
|
|
||||||
wait()
|
wait()
|
||||||
|
|
||||||
// fshook.log.2016-10-29 should have 0-1
|
// info.log.2016-10-29 should have 0-1
|
||||||
checkFileHasSequentialCounts(t, hook.path+".2016-10-29", 0, 1)
|
checkFileHasSequentialCounts(t, hook.infoPath+".2016-10-29", 0, 1)
|
||||||
|
|
||||||
// fshook.log.2016-10-30 should have 2-3
|
// info.log.2016-10-30 should have 2-3
|
||||||
checkFileHasSequentialCounts(t, hook.path+".2016-10-30", 2, 3)
|
checkFileHasSequentialCounts(t, hook.infoPath+".2016-10-30", 2, 3)
|
||||||
|
|
||||||
// fshook.log.2016-10-31 should have 4-5
|
// info.log.2016-10-31 should have 4-5
|
||||||
checkFileHasSequentialCounts(t, hook.path+".2016-10-31", 4, 5)
|
checkFileHasSequentialCounts(t, hook.infoPath+".2016-10-31", 4, 5)
|
||||||
|
|
||||||
// fshook.log should have 6-7 (current day is 11/01)
|
// info.log should have 6-7 (current day is 11/01)
|
||||||
checkFileHasSequentialCounts(t, hook.path, 6, 7)
|
checkFileHasSequentialCounts(t, hook.infoPath, 6, 7)
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkFileHasSequentialCounts based on a JSON "counter" key being a monotonically
|
// checkFileHasSequentialCounts based on a JSON "counter" key being a monotonically
|
||||||
|
|
@ -247,9 +271,11 @@ func setupLogHook(t *testing.T) (logger *log.Logger, hook *fsHook, wait func(),
|
||||||
t.Fatalf("Failed to make temporary directory: %v", err)
|
t.Fatalf("Failed to make temporary directory: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
path := filepath.Join(dir, "fshook.log")
|
infoPath := filepath.Join(dir, "info.log")
|
||||||
|
warnPath := filepath.Join(dir, "warn.log")
|
||||||
|
errorPath := filepath.Join(dir, "error.log")
|
||||||
|
|
||||||
hook = NewFSHook(path, nil, nil).(*fsHook)
|
hook = NewFSHook(infoPath, warnPath, errorPath, nil, nil).(*fsHook)
|
||||||
|
|
||||||
logger = log.New()
|
logger = log.New()
|
||||||
logger.Hooks.Add(hook)
|
logger.Hooks.Add(hook)
|
||||||
|
|
|
||||||
|
|
@ -1,11 +0,0 @@
|
||||||
The Prometheus project was started by Matt T. Proud (emeritus) and
|
|
||||||
Julius Volz in 2012.
|
|
||||||
|
|
||||||
Maintainers of this repository:
|
|
||||||
|
|
||||||
* Björn Rabenstein <beorn@soundcloud.com>
|
|
||||||
|
|
||||||
More than [30 individuals][1] have contributed to this repository. Please refer
|
|
||||||
to the Git commit log for a complete list.
|
|
||||||
|
|
||||||
[1]: https://github.com/prometheus/client_golang/graphs/contributors
|
|
||||||
|
|
@ -1,109 +0,0 @@
|
||||||
## 0.8.0 / 2016-08-17
|
|
||||||
* [CHANGE] Registry is doing more consistency checks. This might break
|
|
||||||
existing setups that used to export inconsistent metrics.
|
|
||||||
* [CHANGE] Pushing to Pushgateway moved to package `push` and changed to allow
|
|
||||||
arbitrary grouping.
|
|
||||||
* [CHANGE] Removed `SelfCollector`.
|
|
||||||
* [CHANGE] Removed `PanicOnCollectError` and `EnableCollectChecks` methods.
|
|
||||||
* [CHANGE] Moved packages to the prometheus/common repo: `text`, `model`,
|
|
||||||
`extraction`.
|
|
||||||
* [CHANGE] Deprecated a number of functions.
|
|
||||||
* [FEATURE] Allow custom registries. Added `Registerer` and `Gatherer`
|
|
||||||
interfaces.
|
|
||||||
* [FEATURE] Separated HTTP exposition, allowing custom HTTP handlers (package
|
|
||||||
`promhttp`) and enabling the creation of other exposition mechanisms.
|
|
||||||
* [FEATURE] `MustRegister` is variadic now, allowing registration of many
|
|
||||||
collectors in one call.
|
|
||||||
* [FEATURE] Added HTTP API v1 package.
|
|
||||||
* [ENHANCEMENT] Numerous documentation improvements.
|
|
||||||
* [ENHANCEMENT] Improved metric sorting.
|
|
||||||
* [ENHANCEMENT] Inlined fnv64a hashing for improved performance.
|
|
||||||
* [ENHANCEMENT] Several test improvements.
|
|
||||||
* [BUGFIX] Handle collisions in MetricVec.
|
|
||||||
|
|
||||||
## 0.7.0 / 2015-07-27
|
|
||||||
* [CHANGE] Rename ExporterLabelPrefix to ExportedLabelPrefix.
|
|
||||||
* [BUGFIX] Closed gaps in metric consistency check.
|
|
||||||
* [BUGFIX] Validate LabelName/LabelSet on JSON unmarshaling.
|
|
||||||
* [ENHANCEMENT] Document the possibility to create "empty" metrics in
|
|
||||||
a metric vector.
|
|
||||||
* [ENHANCEMENT] Fix and clarify various doc comments and the README.md.
|
|
||||||
* [ENHANCEMENT] (Kind of) solve "The Proxy Problem" of http.InstrumentHandler.
|
|
||||||
* [ENHANCEMENT] Change responseWriterDelegator.written to int64.
|
|
||||||
|
|
||||||
## 0.6.0 / 2015-06-01
|
|
||||||
* [CHANGE] Rename process_goroutines to go_goroutines.
|
|
||||||
* [ENHANCEMENT] Validate label names during YAML decoding.
|
|
||||||
* [ENHANCEMENT] Add LabelName regular expression.
|
|
||||||
* [BUGFIX] Ensure alignment of struct members for 32-bit systems.
|
|
||||||
|
|
||||||
## 0.5.0 / 2015-05-06
|
|
||||||
* [BUGFIX] Removed a weakness in the fingerprinting aka signature code.
|
|
||||||
This makes fingerprinting slower and more allocation-heavy, but the
|
|
||||||
weakness was too severe to be tolerated.
|
|
||||||
* [CHANGE] As a result of the above, Metric.Fingerprint is now returning
|
|
||||||
a different fingerprint. To keep the same fingerprint, the new method
|
|
||||||
Metric.FastFingerprint was introduced, which will be used by the
|
|
||||||
Prometheus server for storage purposes (implying that a collision
|
|
||||||
detection has to be added, too).
|
|
||||||
* [ENHANCEMENT] The Metric.Equal and Metric.Before do not depend on
|
|
||||||
fingerprinting anymore, removing the possibility of an undetected
|
|
||||||
fingerprint collision.
|
|
||||||
* [FEATURE] The Go collector in the exposition library includes garbage
|
|
||||||
collection stats.
|
|
||||||
* [FEATURE] The exposition library allows to create constant "throw-away"
|
|
||||||
summaries and histograms.
|
|
||||||
* [CHANGE] A number of new reserved labels and prefixes.
|
|
||||||
|
|
||||||
## 0.4.0 / 2015-04-08
|
|
||||||
* [CHANGE] Return NaN when Summaries have no observations yet.
|
|
||||||
* [BUGFIX] Properly handle Summary decay upon Write().
|
|
||||||
* [BUGFIX] Fix the documentation link to the consumption library.
|
|
||||||
* [FEATURE] Allow the metric family injection hook to merge with existing
|
|
||||||
metric families.
|
|
||||||
* [ENHANCEMENT] Removed cgo dependency and conditional compilation of procfs.
|
|
||||||
* [MAINTENANCE] Adjusted to changes in matttproud/golang_protobuf_extensions.
|
|
||||||
|
|
||||||
## 0.3.2 / 2015-03-11
|
|
||||||
* [BUGFIX] Fixed the receiver type of COWMetric.Set(). This method is
|
|
||||||
only used by the Prometheus server internally.
|
|
||||||
* [CLEANUP] Added licenses of vendored code left out by godep.
|
|
||||||
|
|
||||||
## 0.3.1 / 2015-03-04
|
|
||||||
* [ENHANCEMENT] Switched fingerprinting functions from own free list to
|
|
||||||
sync.Pool.
|
|
||||||
* [CHANGE] Makefile uses Go 1.4.2 now (only relevant for examples and tests).
|
|
||||||
|
|
||||||
## 0.3.0 / 2015-03-03
|
|
||||||
* [CHANGE] Changed the fingerprinting for metrics. THIS WILL INVALIDATE ALL
|
|
||||||
PERSISTED FINGERPRINTS. IF YOU COMPILE THE PROMETHEUS SERVER WITH THIS
|
|
||||||
VERSION, YOU HAVE TO WIPE THE PREVIOUSLY CREATED STORAGE.
|
|
||||||
* [CHANGE] LabelValuesToSignature removed. (Nobody had used it, and it was
|
|
||||||
arguably broken.)
|
|
||||||
* [CHANGE] Vendored dependencies. Those are only used by the Makefile. If
|
|
||||||
client_golang is used as a library, the vendoring will stay out of your way.
|
|
||||||
* [BUGFIX] Remove a weakness in the fingerprinting for metrics. (This made
|
|
||||||
the fingerprinting change above necessary.)
|
|
||||||
* [FEATURE] Added new fingerprinting functions SignatureForLabels and
|
|
||||||
SignatureWithoutLabels to be used by the Prometheus server. These functions
|
|
||||||
require fewer allocations than the ones currently used by the server.
|
|
||||||
|
|
||||||
## 0.2.0 / 2015-02-23
|
|
||||||
* [FEATURE] Introduce new Histagram metric type.
|
|
||||||
* [CHANGE] Ignore process collector errors for now (better error handling
|
|
||||||
pending).
|
|
||||||
* [CHANGE] Use clear error interface for process pidFn.
|
|
||||||
* [BUGFIX] Fix Go download links for several archs and OSes.
|
|
||||||
* [ENHANCEMENT] Massively improve Gauge and Counter performance.
|
|
||||||
* [ENHANCEMENT] Catch illegal label names for summaries in histograms.
|
|
||||||
* [ENHANCEMENT] Reduce allocations during fingerprinting.
|
|
||||||
* [ENHANCEMENT] Remove cgo dependency. procfs package will only be included if
|
|
||||||
both cgo is available and the build is for an OS with procfs.
|
|
||||||
* [CLEANUP] Clean up code style issues.
|
|
||||||
* [CLEANUP] Mark slow test as such and exclude them from travis.
|
|
||||||
* [CLEANUP] Update protobuf library package name.
|
|
||||||
* [CLEANUP] Updated vendoring of beorn7/perks.
|
|
||||||
|
|
||||||
## 0.1.0 / 2015-02-02
|
|
||||||
* [CLEANUP] Introduced semantic versioning and changelog. From now on,
|
|
||||||
changes will be reported in this file.
|
|
||||||
|
|
@ -1,18 +0,0 @@
|
||||||
# Contributing
|
|
||||||
|
|
||||||
Prometheus uses GitHub to manage reviews of pull requests.
|
|
||||||
|
|
||||||
* If you have a trivial fix or improvement, go ahead and create a pull
|
|
||||||
request, addressing (with `@...`) one or more of the maintainers
|
|
||||||
(see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
|
|
||||||
|
|
||||||
* If you plan to do something more involved, first discuss your ideas
|
|
||||||
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
|
|
||||||
This will avoid unnecessary work and surely give you and us a good deal
|
|
||||||
of inspiration.
|
|
||||||
|
|
||||||
* Relevant coding style guidelines are the [Go Code Review
|
|
||||||
Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
|
|
||||||
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
|
|
||||||
Practices for Production
|
|
||||||
Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
|
|
||||||
|
|
@ -1,201 +0,0 @@
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
|
|
@ -1,23 +0,0 @@
|
||||||
Prometheus instrumentation library for Go applications
|
|
||||||
Copyright 2012-2015 The Prometheus Authors
|
|
||||||
|
|
||||||
This product includes software developed at
|
|
||||||
SoundCloud Ltd. (http://soundcloud.com/).
|
|
||||||
|
|
||||||
|
|
||||||
The following components are included in this product:
|
|
||||||
|
|
||||||
perks - a fork of https://github.com/bmizerany/perks
|
|
||||||
https://github.com/beorn7/perks
|
|
||||||
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
|
|
||||||
See https://github.com/beorn7/perks/blob/master/README.md for license details.
|
|
||||||
|
|
||||||
Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
http://github.com/golang/protobuf/
|
|
||||||
Copyright 2010 The Go Authors
|
|
||||||
See source code for license details.
|
|
||||||
|
|
||||||
Support for streaming Protocol Buffer messages for the Go language (golang).
|
|
||||||
https://github.com/matttproud/golang_protobuf_extensions
|
|
||||||
Copyright 2013 Matt T. Proud
|
|
||||||
Licensed under the Apache License, Version 2.0
|
|
||||||
|
|
@ -1,46 +0,0 @@
|
||||||
# Prometheus Go client library
|
|
||||||
|
|
||||||
[](https://travis-ci.org/prometheus/client_golang)
|
|
||||||
[](https://goreportcard.com/report/github.com/prometheus/client_golang)
|
|
||||||
|
|
||||||
This is the [Go](http://golang.org) client library for
|
|
||||||
[Prometheus](http://prometheus.io). It has two separate parts, one for
|
|
||||||
instrumenting application code, and one for creating clients that talk to the
|
|
||||||
Prometheus HTTP API.
|
|
||||||
|
|
||||||
## Instrumenting applications
|
|
||||||
|
|
||||||
[](http://gocover.io/github.com/prometheus/client_golang/prometheus) [](https://godoc.org/github.com/prometheus/client_golang/prometheus)
|
|
||||||
|
|
||||||
The
|
|
||||||
[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus)
|
|
||||||
contains the instrumentation library. See the
|
|
||||||
[best practices section](http://prometheus.io/docs/practices/naming/) of the
|
|
||||||
Prometheus documentation to learn more about instrumenting applications.
|
|
||||||
|
|
||||||
The
|
|
||||||
[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples)
|
|
||||||
contains simple examples of instrumented code.
|
|
||||||
|
|
||||||
## Client for the Prometheus HTTP API
|
|
||||||
|
|
||||||
[](http://gocover.io/github.com/prometheus/client_golang/api/prometheus) [](https://godoc.org/github.com/prometheus/client_golang/api/prometheus)
|
|
||||||
|
|
||||||
The
|
|
||||||
[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus)
|
|
||||||
contains the client for the
|
|
||||||
[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you
|
|
||||||
to write Go applications that query time series data from a Prometheus server.
|
|
||||||
|
|
||||||
## Where is `model`, `extraction`, and `text`?
|
|
||||||
|
|
||||||
The `model` packages has been moved to
|
|
||||||
[`prometheus/common/model`](https://github.com/prometheus/common/tree/master/model).
|
|
||||||
|
|
||||||
The `extraction` and `text` packages are now contained in
|
|
||||||
[`prometheus/common/expfmt`](https://github.com/prometheus/common/tree/master/expfmt).
|
|
||||||
|
|
||||||
## Contributing and community
|
|
||||||
|
|
||||||
See the [contributing guidelines](CONTRIBUTING.md) and the
|
|
||||||
[Community section](http://prometheus.io/community/) of the homepage.
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
0.8.0
|
|
||||||
|
|
@ -1,348 +0,0 @@
|
||||||
// Copyright 2015 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package prometheus provides bindings to the Prometheus HTTP API:
|
|
||||||
// http://prometheus.io/docs/querying/api/
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"golang.org/x/net/context/ctxhttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
statusAPIError = 422
|
|
||||||
apiPrefix = "/api/v1"
|
|
||||||
|
|
||||||
epQuery = "/query"
|
|
||||||
epQueryRange = "/query_range"
|
|
||||||
epLabelValues = "/label/:name/values"
|
|
||||||
epSeries = "/series"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrorType models the different API error types.
|
|
||||||
type ErrorType string
|
|
||||||
|
|
||||||
// Possible values for ErrorType.
|
|
||||||
const (
|
|
||||||
ErrBadData ErrorType = "bad_data"
|
|
||||||
ErrTimeout = "timeout"
|
|
||||||
ErrCanceled = "canceled"
|
|
||||||
ErrExec = "execution"
|
|
||||||
ErrBadResponse = "bad_response"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Error is an error returned by the API.
|
|
||||||
type Error struct {
|
|
||||||
Type ErrorType
|
|
||||||
Msg string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Error) Error() string {
|
|
||||||
return fmt.Sprintf("%s: %s", e.Type, e.Msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CancelableTransport is like net.Transport but provides
|
|
||||||
// per-request cancelation functionality.
|
|
||||||
type CancelableTransport interface {
|
|
||||||
http.RoundTripper
|
|
||||||
CancelRequest(req *http.Request)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultTransport is used if no Transport is set in Config.
|
|
||||||
var DefaultTransport CancelableTransport = &http.Transport{
|
|
||||||
Proxy: http.ProxyFromEnvironment,
|
|
||||||
Dial: (&net.Dialer{
|
|
||||||
Timeout: 30 * time.Second,
|
|
||||||
KeepAlive: 30 * time.Second,
|
|
||||||
}).Dial,
|
|
||||||
TLSHandshakeTimeout: 10 * time.Second,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config defines configuration parameters for a new client.
|
|
||||||
type Config struct {
|
|
||||||
// The address of the Prometheus to connect to.
|
|
||||||
Address string
|
|
||||||
|
|
||||||
// Transport is used by the Client to drive HTTP requests. If not
|
|
||||||
// provided, DefaultTransport will be used.
|
|
||||||
Transport CancelableTransport
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cfg *Config) transport() CancelableTransport {
|
|
||||||
if cfg.Transport == nil {
|
|
||||||
return DefaultTransport
|
|
||||||
}
|
|
||||||
return cfg.Transport
|
|
||||||
}
|
|
||||||
|
|
||||||
// Client is the interface for an API client.
|
|
||||||
type Client interface {
|
|
||||||
url(ep string, args map[string]string) *url.URL
|
|
||||||
do(context.Context, *http.Request) (*http.Response, []byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a new Client.
|
|
||||||
//
|
|
||||||
// It is safe to use the returned Client from multiple goroutines.
|
|
||||||
func New(cfg Config) (Client, error) {
|
|
||||||
u, err := url.Parse(cfg.Address)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
u.Path = strings.TrimRight(u.Path, "/") + apiPrefix
|
|
||||||
|
|
||||||
return &httpClient{
|
|
||||||
endpoint: u,
|
|
||||||
transport: cfg.transport(),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type httpClient struct {
|
|
||||||
endpoint *url.URL
|
|
||||||
transport CancelableTransport
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *httpClient) url(ep string, args map[string]string) *url.URL {
|
|
||||||
p := path.Join(c.endpoint.Path, ep)
|
|
||||||
|
|
||||||
for arg, val := range args {
|
|
||||||
arg = ":" + arg
|
|
||||||
p = strings.Replace(p, arg, val, -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
u := *c.endpoint
|
|
||||||
u.Path = p
|
|
||||||
|
|
||||||
return &u
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *httpClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
|
|
||||||
resp, err := ctxhttp.Do(ctx, &http.Client{Transport: c.transport}, req)
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if resp != nil {
|
|
||||||
resp.Body.Close()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var body []byte
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
body, err = ioutil.ReadAll(resp.Body)
|
|
||||||
close(done)
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
err = resp.Body.Close()
|
|
||||||
<-done
|
|
||||||
if err == nil {
|
|
||||||
err = ctx.Err()
|
|
||||||
}
|
|
||||||
case <-done:
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp, body, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// apiClient wraps a regular client and processes successful API responses.
|
|
||||||
// Successful also includes responses that errored at the API level.
|
|
||||||
type apiClient struct {
|
|
||||||
Client
|
|
||||||
}
|
|
||||||
|
|
||||||
type apiResponse struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
Data json.RawMessage `json:"data"`
|
|
||||||
ErrorType ErrorType `json:"errorType"`
|
|
||||||
Error string `json:"error"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c apiClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
|
|
||||||
resp, body, err := c.Client.do(ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
return resp, body, err
|
|
||||||
}
|
|
||||||
|
|
||||||
code := resp.StatusCode
|
|
||||||
|
|
||||||
if code/100 != 2 && code != statusAPIError {
|
|
||||||
return resp, body, &Error{
|
|
||||||
Type: ErrBadResponse,
|
|
||||||
Msg: fmt.Sprintf("bad response code %d", resp.StatusCode),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var result apiResponse
|
|
||||||
|
|
||||||
if err = json.Unmarshal(body, &result); err != nil {
|
|
||||||
return resp, body, &Error{
|
|
||||||
Type: ErrBadResponse,
|
|
||||||
Msg: err.Error(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (code == statusAPIError) != (result.Status == "error") {
|
|
||||||
err = &Error{
|
|
||||||
Type: ErrBadResponse,
|
|
||||||
Msg: "inconsistent body for response code",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if code == statusAPIError && result.Status == "error" {
|
|
||||||
err = &Error{
|
|
||||||
Type: result.ErrorType,
|
|
||||||
Msg: result.Error,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp, []byte(result.Data), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Range represents a sliced time range.
|
|
||||||
type Range struct {
|
|
||||||
// The boundaries of the time range.
|
|
||||||
Start, End time.Time
|
|
||||||
// The maximum time between two slices within the boundaries.
|
|
||||||
Step time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// queryResult contains result data for a query.
|
|
||||||
type queryResult struct {
|
|
||||||
Type model.ValueType `json:"resultType"`
|
|
||||||
Result interface{} `json:"result"`
|
|
||||||
|
|
||||||
// The decoded value.
|
|
||||||
v model.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (qr *queryResult) UnmarshalJSON(b []byte) error {
|
|
||||||
v := struct {
|
|
||||||
Type model.ValueType `json:"resultType"`
|
|
||||||
Result json.RawMessage `json:"result"`
|
|
||||||
}{}
|
|
||||||
|
|
||||||
err := json.Unmarshal(b, &v)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v.Type {
|
|
||||||
case model.ValScalar:
|
|
||||||
var sv model.Scalar
|
|
||||||
err = json.Unmarshal(v.Result, &sv)
|
|
||||||
qr.v = &sv
|
|
||||||
|
|
||||||
case model.ValVector:
|
|
||||||
var vv model.Vector
|
|
||||||
err = json.Unmarshal(v.Result, &vv)
|
|
||||||
qr.v = vv
|
|
||||||
|
|
||||||
case model.ValMatrix:
|
|
||||||
var mv model.Matrix
|
|
||||||
err = json.Unmarshal(v.Result, &mv)
|
|
||||||
qr.v = mv
|
|
||||||
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("unexpected value type %q", v.Type)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryAPI provides bindings the Prometheus's query API.
|
|
||||||
type QueryAPI interface {
|
|
||||||
// Query performs a query for the given time.
|
|
||||||
Query(ctx context.Context, query string, ts time.Time) (model.Value, error)
|
|
||||||
// Query performs a query for the given range.
|
|
||||||
QueryRange(ctx context.Context, query string, r Range) (model.Value, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewQueryAPI returns a new QueryAPI for the client.
|
|
||||||
//
|
|
||||||
// It is safe to use the returned QueryAPI from multiple goroutines.
|
|
||||||
func NewQueryAPI(c Client) QueryAPI {
|
|
||||||
return &httpQueryAPI{client: apiClient{c}}
|
|
||||||
}
|
|
||||||
|
|
||||||
type httpQueryAPI struct {
|
|
||||||
client Client
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *httpQueryAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, error) {
|
|
||||||
u := h.client.url(epQuery, nil)
|
|
||||||
q := u.Query()
|
|
||||||
|
|
||||||
q.Set("query", query)
|
|
||||||
q.Set("time", ts.Format(time.RFC3339Nano))
|
|
||||||
|
|
||||||
u.RawQuery = q.Encode()
|
|
||||||
|
|
||||||
req, _ := http.NewRequest("GET", u.String(), nil)
|
|
||||||
|
|
||||||
_, body, err := h.client.do(ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var qres queryResult
|
|
||||||
err = json.Unmarshal(body, &qres)
|
|
||||||
|
|
||||||
return model.Value(qres.v), err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *httpQueryAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, error) {
|
|
||||||
u := h.client.url(epQueryRange, nil)
|
|
||||||
q := u.Query()
|
|
||||||
|
|
||||||
var (
|
|
||||||
start = r.Start.Format(time.RFC3339Nano)
|
|
||||||
end = r.End.Format(time.RFC3339Nano)
|
|
||||||
step = strconv.FormatFloat(r.Step.Seconds(), 'f', 3, 64)
|
|
||||||
)
|
|
||||||
|
|
||||||
q.Set("query", query)
|
|
||||||
q.Set("start", start)
|
|
||||||
q.Set("end", end)
|
|
||||||
q.Set("step", step)
|
|
||||||
|
|
||||||
u.RawQuery = q.Encode()
|
|
||||||
|
|
||||||
req, _ := http.NewRequest("GET", u.String(), nil)
|
|
||||||
|
|
||||||
_, body, err := h.client.do(ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var qres queryResult
|
|
||||||
err = json.Unmarshal(body, &qres)
|
|
||||||
|
|
||||||
return model.Value(qres.v), err
|
|
||||||
}
|
|
||||||
|
|
@ -1,453 +0,0 @@
|
||||||
// Copyright 2015 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"reflect"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConfig(t *testing.T) {
|
|
||||||
c := Config{}
|
|
||||||
if c.transport() != DefaultTransport {
|
|
||||||
t.Fatalf("expected default transport for nil Transport field")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClientURL(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
address string
|
|
||||||
endpoint string
|
|
||||||
args map[string]string
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
address: "http://localhost:9090",
|
|
||||||
endpoint: "/test",
|
|
||||||
expected: "http://localhost:9090/test",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
address: "http://localhost",
|
|
||||||
endpoint: "/test",
|
|
||||||
expected: "http://localhost/test",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
address: "http://localhost:9090",
|
|
||||||
endpoint: "test",
|
|
||||||
expected: "http://localhost:9090/test",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
address: "http://localhost:9090/prefix",
|
|
||||||
endpoint: "/test",
|
|
||||||
expected: "http://localhost:9090/prefix/test",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
address: "https://localhost:9090/",
|
|
||||||
endpoint: "/test/",
|
|
||||||
expected: "https://localhost:9090/test",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
address: "http://localhost:9090",
|
|
||||||
endpoint: "/test/:param",
|
|
||||||
args: map[string]string{
|
|
||||||
"param": "content",
|
|
||||||
},
|
|
||||||
expected: "http://localhost:9090/test/content",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
address: "http://localhost:9090",
|
|
||||||
endpoint: "/test/:param/more/:param",
|
|
||||||
args: map[string]string{
|
|
||||||
"param": "content",
|
|
||||||
},
|
|
||||||
expected: "http://localhost:9090/test/content/more/content",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
address: "http://localhost:9090",
|
|
||||||
endpoint: "/test/:param/more/:foo",
|
|
||||||
args: map[string]string{
|
|
||||||
"param": "content",
|
|
||||||
"foo": "bar",
|
|
||||||
},
|
|
||||||
expected: "http://localhost:9090/test/content/more/bar",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
address: "http://localhost:9090",
|
|
||||||
endpoint: "/test/:param",
|
|
||||||
args: map[string]string{
|
|
||||||
"nonexistant": "content",
|
|
||||||
},
|
|
||||||
expected: "http://localhost:9090/test/:param",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
ep, err := url.Parse(test.address)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
hclient := &httpClient{
|
|
||||||
endpoint: ep,
|
|
||||||
transport: DefaultTransport,
|
|
||||||
}
|
|
||||||
|
|
||||||
u := hclient.url(test.endpoint, test.args)
|
|
||||||
if u.String() != test.expected {
|
|
||||||
t.Errorf("unexpected result: got %s, want %s", u, test.expected)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// The apiClient must return exactly the same result as the httpClient.
|
|
||||||
aclient := &apiClient{hclient}
|
|
||||||
|
|
||||||
u = aclient.url(test.endpoint, test.args)
|
|
||||||
if u.String() != test.expected {
|
|
||||||
t.Errorf("unexpected result: got %s, want %s", u, test.expected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type testClient struct {
|
|
||||||
*testing.T
|
|
||||||
|
|
||||||
ch chan apiClientTest
|
|
||||||
req *http.Request
|
|
||||||
}
|
|
||||||
|
|
||||||
type apiClientTest struct {
|
|
||||||
code int
|
|
||||||
response interface{}
|
|
||||||
expected string
|
|
||||||
err *Error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *testClient) url(ep string, args map[string]string) *url.URL {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *testClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
|
|
||||||
if ctx == nil {
|
|
||||||
c.Fatalf("context was not passed down")
|
|
||||||
}
|
|
||||||
if req != c.req {
|
|
||||||
c.Fatalf("request was not passed down")
|
|
||||||
}
|
|
||||||
|
|
||||||
test := <-c.ch
|
|
||||||
|
|
||||||
var b []byte
|
|
||||||
var err error
|
|
||||||
|
|
||||||
switch v := test.response.(type) {
|
|
||||||
case string:
|
|
||||||
b = []byte(v)
|
|
||||||
default:
|
|
||||||
b, err = json.Marshal(v)
|
|
||||||
if err != nil {
|
|
||||||
c.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := &http.Response{
|
|
||||||
StatusCode: test.code,
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp, b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAPIClientDo(t *testing.T) {
|
|
||||||
tests := []apiClientTest{
|
|
||||||
{
|
|
||||||
response: &apiResponse{
|
|
||||||
Status: "error",
|
|
||||||
Data: json.RawMessage(`null`),
|
|
||||||
ErrorType: ErrBadData,
|
|
||||||
Error: "failed",
|
|
||||||
},
|
|
||||||
err: &Error{
|
|
||||||
Type: ErrBadData,
|
|
||||||
Msg: "failed",
|
|
||||||
},
|
|
||||||
code: statusAPIError,
|
|
||||||
expected: `null`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
response: &apiResponse{
|
|
||||||
Status: "error",
|
|
||||||
Data: json.RawMessage(`"test"`),
|
|
||||||
ErrorType: ErrTimeout,
|
|
||||||
Error: "timed out",
|
|
||||||
},
|
|
||||||
err: &Error{
|
|
||||||
Type: ErrTimeout,
|
|
||||||
Msg: "timed out",
|
|
||||||
},
|
|
||||||
code: statusAPIError,
|
|
||||||
expected: `test`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
response: "bad json",
|
|
||||||
err: &Error{
|
|
||||||
Type: ErrBadResponse,
|
|
||||||
Msg: "bad response code 400",
|
|
||||||
},
|
|
||||||
code: http.StatusBadRequest,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
response: "bad json",
|
|
||||||
err: &Error{
|
|
||||||
Type: ErrBadResponse,
|
|
||||||
Msg: "invalid character 'b' looking for beginning of value",
|
|
||||||
},
|
|
||||||
code: statusAPIError,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
response: &apiResponse{
|
|
||||||
Status: "success",
|
|
||||||
Data: json.RawMessage(`"test"`),
|
|
||||||
},
|
|
||||||
err: &Error{
|
|
||||||
Type: ErrBadResponse,
|
|
||||||
Msg: "inconsistent body for response code",
|
|
||||||
},
|
|
||||||
code: statusAPIError,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
response: &apiResponse{
|
|
||||||
Status: "success",
|
|
||||||
Data: json.RawMessage(`"test"`),
|
|
||||||
ErrorType: ErrTimeout,
|
|
||||||
Error: "timed out",
|
|
||||||
},
|
|
||||||
err: &Error{
|
|
||||||
Type: ErrBadResponse,
|
|
||||||
Msg: "inconsistent body for response code",
|
|
||||||
},
|
|
||||||
code: statusAPIError,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
response: &apiResponse{
|
|
||||||
Status: "error",
|
|
||||||
Data: json.RawMessage(`"test"`),
|
|
||||||
ErrorType: ErrTimeout,
|
|
||||||
Error: "timed out",
|
|
||||||
},
|
|
||||||
err: &Error{
|
|
||||||
Type: ErrBadResponse,
|
|
||||||
Msg: "inconsistent body for response code",
|
|
||||||
},
|
|
||||||
code: http.StatusOK,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
tc := &testClient{
|
|
||||||
T: t,
|
|
||||||
ch: make(chan apiClientTest, 1),
|
|
||||||
req: &http.Request{},
|
|
||||||
}
|
|
||||||
client := &apiClient{tc}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
|
|
||||||
tc.ch <- test
|
|
||||||
|
|
||||||
_, body, err := client.do(context.Background(), tc.req)
|
|
||||||
|
|
||||||
if test.err != nil {
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("expected error %q but got none", test.err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if test.err.Error() != err.Error() {
|
|
||||||
t.Errorf("unexpected error: want %q, got %q", test.err, err)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("unexpeceted error %s", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
want, got := test.expected, string(body)
|
|
||||||
if want != got {
|
|
||||||
t.Errorf("unexpected body: want %q, got %q", want, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type apiTestClient struct {
|
|
||||||
*testing.T
|
|
||||||
curTest apiTest
|
|
||||||
}
|
|
||||||
|
|
||||||
type apiTest struct {
|
|
||||||
do func() (interface{}, error)
|
|
||||||
inErr error
|
|
||||||
inRes interface{}
|
|
||||||
|
|
||||||
reqPath string
|
|
||||||
reqParam url.Values
|
|
||||||
reqMethod string
|
|
||||||
res interface{}
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *apiTestClient) url(ep string, args map[string]string) *url.URL {
|
|
||||||
u := &url.URL{
|
|
||||||
Host: "test:9090",
|
|
||||||
Path: apiPrefix + ep,
|
|
||||||
}
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *apiTestClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
|
|
||||||
|
|
||||||
test := c.curTest
|
|
||||||
|
|
||||||
if req.URL.Path != test.reqPath {
|
|
||||||
c.Errorf("unexpected request path: want %s, got %s", test.reqPath, req.URL.Path)
|
|
||||||
}
|
|
||||||
if req.Method != test.reqMethod {
|
|
||||||
c.Errorf("unexpected request method: want %s, got %s", test.reqMethod, req.Method)
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := json.Marshal(test.inRes)
|
|
||||||
if err != nil {
|
|
||||||
c.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := &http.Response{}
|
|
||||||
if test.inErr != nil {
|
|
||||||
resp.StatusCode = statusAPIError
|
|
||||||
} else {
|
|
||||||
resp.StatusCode = http.StatusOK
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp, b, test.inErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAPIs(t *testing.T) {
|
|
||||||
|
|
||||||
testTime := time.Now()
|
|
||||||
|
|
||||||
client := &apiTestClient{T: t}
|
|
||||||
|
|
||||||
queryAPI := &httpQueryAPI{
|
|
||||||
client: client,
|
|
||||||
}
|
|
||||||
|
|
||||||
doQuery := func(q string, ts time.Time) func() (interface{}, error) {
|
|
||||||
return func() (interface{}, error) {
|
|
||||||
return queryAPI.Query(context.Background(), q, ts)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
doQueryRange := func(q string, rng Range) func() (interface{}, error) {
|
|
||||||
return func() (interface{}, error) {
|
|
||||||
return queryAPI.QueryRange(context.Background(), q, rng)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
queryTests := []apiTest{
|
|
||||||
{
|
|
||||||
do: doQuery("2", testTime),
|
|
||||||
inRes: &queryResult{
|
|
||||||
Type: model.ValScalar,
|
|
||||||
Result: &model.Scalar{
|
|
||||||
Value: 2,
|
|
||||||
Timestamp: model.TimeFromUnix(testTime.Unix()),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
reqMethod: "GET",
|
|
||||||
reqPath: "/api/v1/query",
|
|
||||||
reqParam: url.Values{
|
|
||||||
"query": []string{"2"},
|
|
||||||
"time": []string{testTime.Format(time.RFC3339Nano)},
|
|
||||||
},
|
|
||||||
res: &model.Scalar{
|
|
||||||
Value: 2,
|
|
||||||
Timestamp: model.TimeFromUnix(testTime.Unix()),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
do: doQuery("2", testTime),
|
|
||||||
inErr: fmt.Errorf("some error"),
|
|
||||||
|
|
||||||
reqMethod: "GET",
|
|
||||||
reqPath: "/api/v1/query",
|
|
||||||
reqParam: url.Values{
|
|
||||||
"query": []string{"2"},
|
|
||||||
"time": []string{testTime.Format(time.RFC3339Nano)},
|
|
||||||
},
|
|
||||||
err: fmt.Errorf("some error"),
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
|
||||||
do: doQueryRange("2", Range{
|
|
||||||
Start: testTime.Add(-time.Minute),
|
|
||||||
End: testTime,
|
|
||||||
Step: time.Minute,
|
|
||||||
}),
|
|
||||||
inErr: fmt.Errorf("some error"),
|
|
||||||
|
|
||||||
reqMethod: "GET",
|
|
||||||
reqPath: "/api/v1/query_range",
|
|
||||||
reqParam: url.Values{
|
|
||||||
"query": []string{"2"},
|
|
||||||
"start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
|
|
||||||
"end": []string{testTime.Format(time.RFC3339Nano)},
|
|
||||||
"step": []string{time.Minute.String()},
|
|
||||||
},
|
|
||||||
err: fmt.Errorf("some error"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var tests []apiTest
|
|
||||||
tests = append(tests, queryTests...)
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
client.curTest = test
|
|
||||||
|
|
||||||
res, err := test.do()
|
|
||||||
|
|
||||||
if test.err != nil {
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("expected error %q but got none", test.err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err.Error() != test.err.Error() {
|
|
||||||
t.Errorf("unexpected error: want %s, got %s", test.err, err)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("unexpected error: %s", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !reflect.DeepEqual(res, test.res) {
|
|
||||||
t.Errorf("unexpected result: want %v, got %v", test.res, res)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,106 +0,0 @@
|
||||||
// Copyright 2015 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// A simple example exposing fictional RPC latencies with different types of
|
|
||||||
// random distributions (uniform, normal, and exponential) as Prometheus
|
|
||||||
// metrics.
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"log"
|
|
||||||
"math"
|
|
||||||
"math/rand"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.")
|
|
||||||
uniformDomain = flag.Float64("uniform.domain", 0.0002, "The domain for the uniform distribution.")
|
|
||||||
normDomain = flag.Float64("normal.domain", 0.0002, "The domain for the normal distribution.")
|
|
||||||
normMean = flag.Float64("normal.mean", 0.00001, "The mean for the normal distribution.")
|
|
||||||
oscillationPeriod = flag.Duration("oscillation-period", 10*time.Minute, "The duration of the rate oscillation period.")
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Create a summary to track fictional interservice RPC latencies for three
|
|
||||||
// distinct services with different latency distributions. These services are
|
|
||||||
// differentiated via a "service" label.
|
|
||||||
rpcDurations = prometheus.NewSummaryVec(
|
|
||||||
prometheus.SummaryOpts{
|
|
||||||
Name: "rpc_durations_seconds",
|
|
||||||
Help: "RPC latency distributions.",
|
|
||||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
|
||||||
},
|
|
||||||
[]string{"service"},
|
|
||||||
)
|
|
||||||
// The same as above, but now as a histogram, and only for the normal
|
|
||||||
// distribution. The buckets are targeted to the parameters of the
|
|
||||||
// normal distribution, with 20 buckets centered on the mean, each
|
|
||||||
// half-sigma wide.
|
|
||||||
rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{
|
|
||||||
Name: "rpc_durations_histogram_seconds",
|
|
||||||
Help: "RPC latency distributions.",
|
|
||||||
Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20),
|
|
||||||
})
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Register the summary and the histogram with Prometheus's default registry.
|
|
||||||
prometheus.MustRegister(rpcDurations)
|
|
||||||
prometheus.MustRegister(rpcDurationsHistogram)
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
start := time.Now()
|
|
||||||
|
|
||||||
oscillationFactor := func() float64 {
|
|
||||||
return 2 + math.Sin(math.Sin(2*math.Pi*float64(time.Since(start))/float64(*oscillationPeriod)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Periodically record some sample latencies for the three services.
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
v := rand.Float64() * *uniformDomain
|
|
||||||
rpcDurations.WithLabelValues("uniform").Observe(v)
|
|
||||||
time.Sleep(time.Duration(100*oscillationFactor()) * time.Millisecond)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
v := (rand.NormFloat64() * *normDomain) + *normMean
|
|
||||||
rpcDurations.WithLabelValues("normal").Observe(v)
|
|
||||||
rpcDurationsHistogram.Observe(v)
|
|
||||||
time.Sleep(time.Duration(75*oscillationFactor()) * time.Millisecond)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
v := rand.ExpFloat64() / 1e6
|
|
||||||
rpcDurations.WithLabelValues("exponential").Observe(v)
|
|
||||||
time.Sleep(time.Duration(50*oscillationFactor()) * time.Millisecond)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Expose the registered metrics via HTTP.
|
|
||||||
http.Handle("/metrics", promhttp.Handler())
|
|
||||||
log.Fatal(http.ListenAndServe(*addr, nil))
|
|
||||||
}
|
|
||||||
|
|
@ -1,31 +0,0 @@
|
||||||
// Copyright 2015 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// A minimal example of how to include Prometheus instrumentation.
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
var addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.")
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
http.Handle("/metrics", promhttp.Handler())
|
|
||||||
log.Fatal(http.ListenAndServe(*addr, nil))
|
|
||||||
}
|
|
||||||
|
|
@ -1 +1,94 @@
|
||||||
See [](https://godoc.org/github.com/prometheus/client_golang/prometheus).
|
# Prometheus Go client library
|
||||||
|
|
||||||
|
[](https://travis-ci.org/prometheus/client_golang)
|
||||||
|
[](https://goreportcard.com/report/github.com/prometheus/client_golang)
|
||||||
|
[](https://godoc.org/github.com/prometheus/client_golang)
|
||||||
|
|
||||||
|
This is the [Go](http://golang.org) client library for
|
||||||
|
[Prometheus](http://prometheus.io). It has two separate parts, one for
|
||||||
|
instrumenting application code, and one for creating clients that talk to the
|
||||||
|
Prometheus HTTP API.
|
||||||
|
|
||||||
|
__This library requires Go1.7 or later.__
|
||||||
|
|
||||||
|
## Important note about releases, versioning, tagging, stability, and your favorite Go dependency management tool
|
||||||
|
|
||||||
|
While our goal is to follow [Semantic Versioning](https://semver.org/), this
|
||||||
|
repository is still pre-1.0.0. To quote the
|
||||||
|
[Semantic Versioning spec](https://semver.org/#spec-item-4): “Anything may
|
||||||
|
change at any time. The public API should not be considered stable.” We know
|
||||||
|
that this is at odds with the widespread use of this library. However, just
|
||||||
|
declaring something 1.0.0 doesn't make it 1.0.0. Instead, we are working
|
||||||
|
towards a 1.0.0 release that actually deserves its major version number.
|
||||||
|
|
||||||
|
Having said that, we aim for always keeping the tip of master in a workable
|
||||||
|
state and for only introducing ”mildly” breaking changes up to and including
|
||||||
|
[v0.9.0](https://github.com/prometheus/client_golang/milestone/1). After that,
|
||||||
|
a number of ”hard” breaking changes are planned, see the
|
||||||
|
[v0.10.0 milestone](https://github.com/prometheus/client_golang/milestone/2),
|
||||||
|
which should get the library much closer to 1.0.0 state.
|
||||||
|
|
||||||
|
Dependency management in Go projects is still in flux, and there are many tools
|
||||||
|
floating around. While [dep](https://golang.github.io/dep/) might develop into
|
||||||
|
the de-facto standard tool, it is still officially experimental. The roadmap
|
||||||
|
for this library has been laid out with a lot of sometimes painful experience
|
||||||
|
in mind. We really cannot adjust it every other month to the needs of the
|
||||||
|
currently most popular or most promising Go dependency management tool. The
|
||||||
|
recommended course of action with dependency management tools is the following:
|
||||||
|
|
||||||
|
- Do not expect strict post-1.0.0 semver semantics prior to the 1.0.0
|
||||||
|
release. If your dependency management tool expects strict post-1.0.0 semver
|
||||||
|
semantics, you have to wait. Sorry.
|
||||||
|
- If you want absolute certainty, please lock to a specific commit. You can
|
||||||
|
also lock to tags, but please don't ask for more tagging. This would suggest
|
||||||
|
some release or stability testing procedure that simply is not in place. As
|
||||||
|
said above, we are aiming for stability of the tip of master, but if we
|
||||||
|
tagged every single commit, locking to tags would be the same as locking to
|
||||||
|
commits.
|
||||||
|
- If you want to get the newer features and improvements and are willing to
|
||||||
|
take the minor risk of newly introduced bugs and “mild” breakage, just always
|
||||||
|
update to the tip of master (which is essentially the original idea of Go
|
||||||
|
dependency management). We recommend to not use features marked as
|
||||||
|
_deprecated_ in this case.
|
||||||
|
- Once [v0.9.0](https://github.com/prometheus/client_golang/milestone/1) is
|
||||||
|
out, you could lock to v0.9.x to get bugfixes (and perhaps minor new
|
||||||
|
features) while avoiding the “hard” breakage that will come with post-0.9
|
||||||
|
features.
|
||||||
|
|
||||||
|
## Instrumenting applications
|
||||||
|
|
||||||
|
[](http://gocover.io/github.com/prometheus/client_golang/prometheus) [](https://godoc.org/github.com/prometheus/client_golang/prometheus)
|
||||||
|
|
||||||
|
The
|
||||||
|
[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus)
|
||||||
|
contains the instrumentation library. See the
|
||||||
|
[best practices section](http://prometheus.io/docs/practices/naming/) of the
|
||||||
|
Prometheus documentation to learn more about instrumenting applications.
|
||||||
|
|
||||||
|
The
|
||||||
|
[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples)
|
||||||
|
contains simple examples of instrumented code.
|
||||||
|
|
||||||
|
## Client for the Prometheus HTTP API
|
||||||
|
|
||||||
|
[](http://gocover.io/github.com/prometheus/client_golang/api/prometheus/v1) [](https://godoc.org/github.com/prometheus/client_golang/api)
|
||||||
|
|
||||||
|
The
|
||||||
|
[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus)
|
||||||
|
contains the client for the
|
||||||
|
[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you
|
||||||
|
to write Go applications that query time series data from a Prometheus
|
||||||
|
server. It is still in alpha stage.
|
||||||
|
|
||||||
|
## Where is `model`, `extraction`, and `text`?
|
||||||
|
|
||||||
|
The `model` packages has been moved to
|
||||||
|
[`prometheus/common/model`](https://github.com/prometheus/common/tree/master/model).
|
||||||
|
|
||||||
|
The `extraction` and `text` packages are now contained in
|
||||||
|
[`prometheus/common/expfmt`](https://github.com/prometheus/common/tree/master/expfmt).
|
||||||
|
|
||||||
|
## Contributing and community
|
||||||
|
|
||||||
|
See the [contributing guidelines](CONTRIBUTING.md) and the
|
||||||
|
[Community section](http://prometheus.io/community/) of the homepage.
|
||||||
|
|
|
||||||
|
|
@ -1,185 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func BenchmarkCounterWithLabelValues(b *testing.B) {
|
|
||||||
m := NewCounterVec(
|
|
||||||
CounterOpts{
|
|
||||||
Name: "benchmark_counter",
|
|
||||||
Help: "A counter to benchmark it.",
|
|
||||||
},
|
|
||||||
[]string{"one", "two", "three"},
|
|
||||||
)
|
|
||||||
b.ReportAllocs()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
m.WithLabelValues("eins", "zwei", "drei").Inc()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkCounterWithLabelValuesConcurrent(b *testing.B) {
|
|
||||||
m := NewCounterVec(
|
|
||||||
CounterOpts{
|
|
||||||
Name: "benchmark_counter",
|
|
||||||
Help: "A counter to benchmark it.",
|
|
||||||
},
|
|
||||||
[]string{"one", "two", "three"},
|
|
||||||
)
|
|
||||||
b.ReportAllocs()
|
|
||||||
b.ResetTimer()
|
|
||||||
wg := sync.WaitGroup{}
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
for j := 0; j < b.N/10; j++ {
|
|
||||||
m.WithLabelValues("eins", "zwei", "drei").Inc()
|
|
||||||
}
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkCounterWithMappedLabels(b *testing.B) {
|
|
||||||
m := NewCounterVec(
|
|
||||||
CounterOpts{
|
|
||||||
Name: "benchmark_counter",
|
|
||||||
Help: "A counter to benchmark it.",
|
|
||||||
},
|
|
||||||
[]string{"one", "two", "three"},
|
|
||||||
)
|
|
||||||
b.ReportAllocs()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
m.With(Labels{"two": "zwei", "one": "eins", "three": "drei"}).Inc()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkCounterWithPreparedMappedLabels(b *testing.B) {
|
|
||||||
m := NewCounterVec(
|
|
||||||
CounterOpts{
|
|
||||||
Name: "benchmark_counter",
|
|
||||||
Help: "A counter to benchmark it.",
|
|
||||||
},
|
|
||||||
[]string{"one", "two", "three"},
|
|
||||||
)
|
|
||||||
b.ReportAllocs()
|
|
||||||
b.ResetTimer()
|
|
||||||
labels := Labels{"two": "zwei", "one": "eins", "three": "drei"}
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
m.With(labels).Inc()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkCounterNoLabels(b *testing.B) {
|
|
||||||
m := NewCounter(CounterOpts{
|
|
||||||
Name: "benchmark_counter",
|
|
||||||
Help: "A counter to benchmark it.",
|
|
||||||
})
|
|
||||||
b.ReportAllocs()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
m.Inc()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkGaugeWithLabelValues(b *testing.B) {
|
|
||||||
m := NewGaugeVec(
|
|
||||||
GaugeOpts{
|
|
||||||
Name: "benchmark_gauge",
|
|
||||||
Help: "A gauge to benchmark it.",
|
|
||||||
},
|
|
||||||
[]string{"one", "two", "three"},
|
|
||||||
)
|
|
||||||
b.ReportAllocs()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
m.WithLabelValues("eins", "zwei", "drei").Set(3.1415)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkGaugeNoLabels(b *testing.B) {
|
|
||||||
m := NewGauge(GaugeOpts{
|
|
||||||
Name: "benchmark_gauge",
|
|
||||||
Help: "A gauge to benchmark it.",
|
|
||||||
})
|
|
||||||
b.ReportAllocs()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
m.Set(3.1415)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSummaryWithLabelValues(b *testing.B) {
|
|
||||||
m := NewSummaryVec(
|
|
||||||
SummaryOpts{
|
|
||||||
Name: "benchmark_summary",
|
|
||||||
Help: "A summary to benchmark it.",
|
|
||||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
|
||||||
},
|
|
||||||
[]string{"one", "two", "three"},
|
|
||||||
)
|
|
||||||
b.ReportAllocs()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSummaryNoLabels(b *testing.B) {
|
|
||||||
m := NewSummary(SummaryOpts{
|
|
||||||
Name: "benchmark_summary",
|
|
||||||
Help: "A summary to benchmark it.",
|
|
||||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
b.ReportAllocs()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
m.Observe(3.1415)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkHistogramWithLabelValues(b *testing.B) {
|
|
||||||
m := NewHistogramVec(
|
|
||||||
HistogramOpts{
|
|
||||||
Name: "benchmark_histogram",
|
|
||||||
Help: "A histogram to benchmark it.",
|
|
||||||
},
|
|
||||||
[]string{"one", "two", "three"},
|
|
||||||
)
|
|
||||||
b.ReportAllocs()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkHistogramNoLabels(b *testing.B) {
|
|
||||||
m := NewHistogram(HistogramOpts{
|
|
||||||
Name: "benchmark_histogram",
|
|
||||||
Help: "A histogram to benchmark it.",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
b.ReportAllocs()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
m.Observe(3.1415)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,75 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
// Collector is the interface implemented by anything that can be used by
|
|
||||||
// Prometheus to collect metrics. A Collector has to be registered for
|
|
||||||
// collection. See Registerer.Register.
|
|
||||||
//
|
|
||||||
// The stock metrics provided by this package (Gauge, Counter, Summary,
|
|
||||||
// Histogram, Untyped) are also Collectors (which only ever collect one metric,
|
|
||||||
// namely itself). An implementer of Collector may, however, collect multiple
|
|
||||||
// metrics in a coordinated fashion and/or create metrics on the fly. Examples
|
|
||||||
// for collectors already implemented in this library are the metric vectors
|
|
||||||
// (i.e. collection of multiple instances of the same Metric but with different
|
|
||||||
// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
|
|
||||||
type Collector interface {
|
|
||||||
// Describe sends the super-set of all possible descriptors of metrics
|
|
||||||
// collected by this Collector to the provided channel and returns once
|
|
||||||
// the last descriptor has been sent. The sent descriptors fulfill the
|
|
||||||
// consistency and uniqueness requirements described in the Desc
|
|
||||||
// documentation. (It is valid if one and the same Collector sends
|
|
||||||
// duplicate descriptors. Those duplicates are simply ignored. However,
|
|
||||||
// two different Collectors must not send duplicate descriptors.) This
|
|
||||||
// method idempotently sends the same descriptors throughout the
|
|
||||||
// lifetime of the Collector. If a Collector encounters an error while
|
|
||||||
// executing this method, it must send an invalid descriptor (created
|
|
||||||
// with NewInvalidDesc) to signal the error to the registry.
|
|
||||||
Describe(chan<- *Desc)
|
|
||||||
// Collect is called by the Prometheus registry when collecting
|
|
||||||
// metrics. The implementation sends each collected metric via the
|
|
||||||
// provided channel and returns once the last metric has been sent. The
|
|
||||||
// descriptor of each sent metric is one of those returned by
|
|
||||||
// Describe. Returned metrics that share the same descriptor must differ
|
|
||||||
// in their variable label values. This method may be called
|
|
||||||
// concurrently and must therefore be implemented in a concurrency safe
|
|
||||||
// way. Blocking occurs at the expense of total performance of rendering
|
|
||||||
// all registered metrics. Ideally, Collector implementations support
|
|
||||||
// concurrent readers.
|
|
||||||
Collect(chan<- Metric)
|
|
||||||
}
|
|
||||||
|
|
||||||
// selfCollector implements Collector for a single Metric so that the Metric
|
|
||||||
// collects itself. Add it as an anonymous field to a struct that implements
|
|
||||||
// Metric, and call init with the Metric itself as an argument.
|
|
||||||
type selfCollector struct {
|
|
||||||
self Metric
|
|
||||||
}
|
|
||||||
|
|
||||||
// init provides the selfCollector with a reference to the metric it is supposed
|
|
||||||
// to collect. It is usually called within the factory function to create a
|
|
||||||
// metric. See example.
|
|
||||||
func (c *selfCollector) init(self Metric) {
|
|
||||||
c.self = self
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describe implements Collector.
|
|
||||||
func (c *selfCollector) Describe(ch chan<- *Desc) {
|
|
||||||
ch <- c.self.Desc()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect implements Collector.
|
|
||||||
func (c *selfCollector) Collect(ch chan<- Metric) {
|
|
||||||
ch <- c.self
|
|
||||||
}
|
|
||||||
|
|
@ -1,164 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Counter is a Metric that represents a single numerical value that only ever
|
|
||||||
// goes up. That implies that it cannot be used to count items whose number can
|
|
||||||
// also go down, e.g. the number of currently running goroutines. Those
|
|
||||||
// "counters" are represented by Gauges.
|
|
||||||
//
|
|
||||||
// A Counter is typically used to count requests served, tasks completed, errors
|
|
||||||
// occurred, etc.
|
|
||||||
//
|
|
||||||
// To create Counter instances, use NewCounter.
|
|
||||||
type Counter interface {
|
|
||||||
Metric
|
|
||||||
Collector
|
|
||||||
|
|
||||||
// Inc increments the counter by 1. Use Add to increment it by arbitrary
|
|
||||||
// non-negative values.
|
|
||||||
Inc()
|
|
||||||
// Add adds the given value to the counter. It panics if the value is <
|
|
||||||
// 0.
|
|
||||||
Add(float64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CounterOpts is an alias for Opts. See there for doc comments.
|
|
||||||
type CounterOpts Opts
|
|
||||||
|
|
||||||
// NewCounter creates a new Counter based on the provided CounterOpts.
|
|
||||||
func NewCounter(opts CounterOpts) Counter {
|
|
||||||
desc := NewDesc(
|
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
||||||
opts.Help,
|
|
||||||
nil,
|
|
||||||
opts.ConstLabels,
|
|
||||||
)
|
|
||||||
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
|
|
||||||
result.init(result) // Init self-collection.
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
type counter struct {
|
|
||||||
value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *counter) Add(v float64) {
|
|
||||||
if v < 0 {
|
|
||||||
panic(errors.New("counter cannot decrease in value"))
|
|
||||||
}
|
|
||||||
c.value.Add(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CounterVec is a Collector that bundles a set of Counters that all share the
|
|
||||||
// same Desc, but have different values for their variable labels. This is used
|
|
||||||
// if you want to count the same thing partitioned by various dimensions
|
|
||||||
// (e.g. number of HTTP requests, partitioned by response code and
|
|
||||||
// method). Create instances with NewCounterVec.
|
|
||||||
//
|
|
||||||
// CounterVec embeds MetricVec. See there for a full list of methods with
|
|
||||||
// detailed documentation.
|
|
||||||
type CounterVec struct {
|
|
||||||
*MetricVec
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
|
||||||
// partitioned by the given label names. At least one label name must be
|
|
||||||
// provided.
|
|
||||||
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
|
||||||
desc := NewDesc(
|
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
||||||
opts.Help,
|
|
||||||
labelNames,
|
|
||||||
opts.ConstLabels,
|
|
||||||
)
|
|
||||||
return &CounterVec{
|
|
||||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
|
||||||
result := &counter{value: value{
|
|
||||||
desc: desc,
|
|
||||||
valType: CounterValue,
|
|
||||||
labelPairs: makeLabelPairs(desc, lvs),
|
|
||||||
}}
|
|
||||||
result.init(result) // Init self-collection.
|
|
||||||
return result
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetricWithLabelValues replaces the method of the same name in
|
|
||||||
// MetricVec. The difference is that this method returns a Counter and not a
|
|
||||||
// Metric so that no type conversion is required.
|
|
||||||
func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
|
|
||||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
|
||||||
if metric != nil {
|
|
||||||
return metric.(Counter), err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
|
||||||
// difference is that this method returns a Counter and not a Metric so that no
|
|
||||||
// type conversion is required.
|
|
||||||
func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
|
|
||||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
|
||||||
if metric != nil {
|
|
||||||
return metric.(Counter), err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
|
||||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
|
||||||
// error, WithLabelValues allows shortcuts like
|
|
||||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
|
||||||
func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
|
|
||||||
return m.MetricVec.WithLabelValues(lvs...).(Counter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
|
||||||
// returned an error. By not returning an error, With allows shortcuts like
|
|
||||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
|
||||||
func (m *CounterVec) With(labels Labels) Counter {
|
|
||||||
return m.MetricVec.With(labels).(Counter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CounterFunc is a Counter whose value is determined at collect time by calling a
|
|
||||||
// provided function.
|
|
||||||
//
|
|
||||||
// To create CounterFunc instances, use NewCounterFunc.
|
|
||||||
type CounterFunc interface {
|
|
||||||
Metric
|
|
||||||
Collector
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCounterFunc creates a new CounterFunc based on the provided
|
|
||||||
// CounterOpts. The value reported is determined by calling the given function
|
|
||||||
// from within the Write method. Take into account that metric collection may
|
|
||||||
// happen concurrently. If that results in concurrent calls to Write, like in
|
|
||||||
// the case where a CounterFunc is directly registered with Prometheus, the
|
|
||||||
// provided function must be concurrency-safe. The function should also honor
|
|
||||||
// the contract for a Counter (values only go up, not down), but compliance will
|
|
||||||
// not be checked.
|
|
||||||
func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
|
|
||||||
return newValueFunc(NewDesc(
|
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
||||||
opts.Help,
|
|
||||||
nil,
|
|
||||||
opts.ConstLabels,
|
|
||||||
), CounterValue, function)
|
|
||||||
}
|
|
||||||
|
|
@ -1,58 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCounterAdd(t *testing.T) {
|
|
||||||
counter := NewCounter(CounterOpts{
|
|
||||||
Name: "test",
|
|
||||||
Help: "test help",
|
|
||||||
ConstLabels: Labels{"a": "1", "b": "2"},
|
|
||||||
}).(*counter)
|
|
||||||
counter.Inc()
|
|
||||||
if expected, got := 1., math.Float64frombits(counter.valBits); expected != got {
|
|
||||||
t.Errorf("Expected %f, got %f.", expected, got)
|
|
||||||
}
|
|
||||||
counter.Add(42)
|
|
||||||
if expected, got := 43., math.Float64frombits(counter.valBits); expected != got {
|
|
||||||
t.Errorf("Expected %f, got %f.", expected, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
if expected, got := "counter cannot decrease in value", decreaseCounter(counter).Error(); expected != got {
|
|
||||||
t.Errorf("Expected error %q, got %q.", expected, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &dto.Metric{}
|
|
||||||
counter.Write(m)
|
|
||||||
|
|
||||||
if expected, got := `label:<name:"a" value:"1" > label:<name:"b" value:"2" > counter:<value:43 > `, m.String(); expected != got {
|
|
||||||
t.Errorf("expected %q, got %q", expected, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func decreaseCounter(c *counter) (err error) {
|
|
||||||
defer func() {
|
|
||||||
if e := recover(); e != nil {
|
|
||||||
err = e.(error)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
c.Add(-1)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,200 +0,0 @@
|
||||||
// Copyright 2016 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
)
|
|
||||||
|
|
||||||
// reservedLabelPrefix is a prefix which is not legal in user-supplied
|
|
||||||
// label names.
|
|
||||||
const reservedLabelPrefix = "__"
|
|
||||||
|
|
||||||
// Labels represents a collection of label name -> value mappings. This type is
|
|
||||||
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
|
|
||||||
// metric vector Collectors, e.g.:
|
|
||||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
|
||||||
//
|
|
||||||
// The other use-case is the specification of constant label pairs in Opts or to
|
|
||||||
// create a Desc.
|
|
||||||
type Labels map[string]string
|
|
||||||
|
|
||||||
// Desc is the descriptor used by every Prometheus Metric. It is essentially
|
|
||||||
// the immutable meta-data of a Metric. The normal Metric implementations
|
|
||||||
// included in this package manage their Desc under the hood. Users only have to
|
|
||||||
// deal with Desc if they use advanced features like the ExpvarCollector or
|
|
||||||
// custom Collectors and Metrics.
|
|
||||||
//
|
|
||||||
// Descriptors registered with the same registry have to fulfill certain
|
|
||||||
// consistency and uniqueness criteria if they share the same fully-qualified
|
|
||||||
// name: They must have the same help string and the same label names (aka label
|
|
||||||
// dimensions) in each, constLabels and variableLabels, but they must differ in
|
|
||||||
// the values of the constLabels.
|
|
||||||
//
|
|
||||||
// Descriptors that share the same fully-qualified names and the same label
|
|
||||||
// values of their constLabels are considered equal.
|
|
||||||
//
|
|
||||||
// Use NewDesc to create new Desc instances.
|
|
||||||
type Desc struct {
|
|
||||||
// fqName has been built from Namespace, Subsystem, and Name.
|
|
||||||
fqName string
|
|
||||||
// help provides some helpful information about this metric.
|
|
||||||
help string
|
|
||||||
// constLabelPairs contains precalculated DTO label pairs based on
|
|
||||||
// the constant labels.
|
|
||||||
constLabelPairs []*dto.LabelPair
|
|
||||||
// VariableLabels contains names of labels for which the metric
|
|
||||||
// maintains variable values.
|
|
||||||
variableLabels []string
|
|
||||||
// id is a hash of the values of the ConstLabels and fqName. This
|
|
||||||
// must be unique among all registered descriptors and can therefore be
|
|
||||||
// used as an identifier of the descriptor.
|
|
||||||
id uint64
|
|
||||||
// dimHash is a hash of the label names (preset and variable) and the
|
|
||||||
// Help string. Each Desc with the same fqName must have the same
|
|
||||||
// dimHash.
|
|
||||||
dimHash uint64
|
|
||||||
// err is an error that occurred during construction. It is reported on
|
|
||||||
// registration time.
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
|
|
||||||
// and will be reported on registration time. variableLabels and constLabels can
|
|
||||||
// be nil if no such labels should be set. fqName and help must not be empty.
|
|
||||||
//
|
|
||||||
// variableLabels only contain the label names. Their label values are variable
|
|
||||||
// and therefore not part of the Desc. (They are managed within the Metric.)
|
|
||||||
//
|
|
||||||
// For constLabels, the label values are constant. Therefore, they are fully
|
|
||||||
// specified in the Desc. See the Opts documentation for the implications of
|
|
||||||
// constant labels.
|
|
||||||
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
|
|
||||||
d := &Desc{
|
|
||||||
fqName: fqName,
|
|
||||||
help: help,
|
|
||||||
variableLabels: variableLabels,
|
|
||||||
}
|
|
||||||
if help == "" {
|
|
||||||
d.err = errors.New("empty help string")
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
if !model.IsValidMetricName(model.LabelValue(fqName)) {
|
|
||||||
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
// labelValues contains the label values of const labels (in order of
|
|
||||||
// their sorted label names) plus the fqName (at position 0).
|
|
||||||
labelValues := make([]string, 1, len(constLabels)+1)
|
|
||||||
labelValues[0] = fqName
|
|
||||||
labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
|
|
||||||
labelNameSet := map[string]struct{}{}
|
|
||||||
// First add only the const label names and sort them...
|
|
||||||
for labelName := range constLabels {
|
|
||||||
if !checkLabelName(labelName) {
|
|
||||||
d.err = fmt.Errorf("%q is not a valid label name", labelName)
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
labelNames = append(labelNames, labelName)
|
|
||||||
labelNameSet[labelName] = struct{}{}
|
|
||||||
}
|
|
||||||
sort.Strings(labelNames)
|
|
||||||
// ... so that we can now add const label values in the order of their names.
|
|
||||||
for _, labelName := range labelNames {
|
|
||||||
labelValues = append(labelValues, constLabels[labelName])
|
|
||||||
}
|
|
||||||
// Now add the variable label names, but prefix them with something that
|
|
||||||
// cannot be in a regular label name. That prevents matching the label
|
|
||||||
// dimension with a different mix between preset and variable labels.
|
|
||||||
for _, labelName := range variableLabels {
|
|
||||||
if !checkLabelName(labelName) {
|
|
||||||
d.err = fmt.Errorf("%q is not a valid label name", labelName)
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
labelNames = append(labelNames, "$"+labelName)
|
|
||||||
labelNameSet[labelName] = struct{}{}
|
|
||||||
}
|
|
||||||
if len(labelNames) != len(labelNameSet) {
|
|
||||||
d.err = errors.New("duplicate label names")
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
vh := hashNew()
|
|
||||||
for _, val := range labelValues {
|
|
||||||
vh = hashAdd(vh, val)
|
|
||||||
vh = hashAddByte(vh, separatorByte)
|
|
||||||
}
|
|
||||||
d.id = vh
|
|
||||||
// Sort labelNames so that order doesn't matter for the hash.
|
|
||||||
sort.Strings(labelNames)
|
|
||||||
// Now hash together (in this order) the help string and the sorted
|
|
||||||
// label names.
|
|
||||||
lh := hashNew()
|
|
||||||
lh = hashAdd(lh, help)
|
|
||||||
lh = hashAddByte(lh, separatorByte)
|
|
||||||
for _, labelName := range labelNames {
|
|
||||||
lh = hashAdd(lh, labelName)
|
|
||||||
lh = hashAddByte(lh, separatorByte)
|
|
||||||
}
|
|
||||||
d.dimHash = lh
|
|
||||||
|
|
||||||
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
|
|
||||||
for n, v := range constLabels {
|
|
||||||
d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
|
|
||||||
Name: proto.String(n),
|
|
||||||
Value: proto.String(v),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
sort.Sort(LabelPairSorter(d.constLabelPairs))
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
|
|
||||||
// provided error set. If a collector returning such a descriptor is registered,
|
|
||||||
// registration will fail with the provided error. NewInvalidDesc can be used by
|
|
||||||
// a Collector to signal inability to describe itself.
|
|
||||||
func NewInvalidDesc(err error) *Desc {
|
|
||||||
return &Desc{
|
|
||||||
err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Desc) String() string {
|
|
||||||
lpStrings := make([]string, 0, len(d.constLabelPairs))
|
|
||||||
for _, lp := range d.constLabelPairs {
|
|
||||||
lpStrings = append(
|
|
||||||
lpStrings,
|
|
||||||
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
|
|
||||||
d.fqName,
|
|
||||||
d.help,
|
|
||||||
strings.Join(lpStrings, ","),
|
|
||||||
d.variableLabels,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkLabelName(l string) bool {
|
|
||||||
return model.LabelName(l).IsValid() &&
|
|
||||||
!strings.HasPrefix(l, reservedLabelPrefix)
|
|
||||||
}
|
|
||||||
|
|
@ -1,181 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package prometheus provides metrics primitives to instrument code for
|
|
||||||
// monitoring. It also offers a registry for metrics. Sub-packages allow to
|
|
||||||
// expose the registered metrics via HTTP (package promhttp) or push them to a
|
|
||||||
// Pushgateway (package push).
|
|
||||||
//
|
|
||||||
// All exported functions and methods are safe to be used concurrently unless
|
|
||||||
//specified otherwise.
|
|
||||||
//
|
|
||||||
// A Basic Example
|
|
||||||
//
|
|
||||||
// As a starting point, a very basic usage example:
|
|
||||||
//
|
|
||||||
// package main
|
|
||||||
//
|
|
||||||
// import (
|
|
||||||
// "net/http"
|
|
||||||
//
|
|
||||||
// "github.com/prometheus/client_golang/prometheus"
|
|
||||||
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
|
||||||
// )
|
|
||||||
//
|
|
||||||
// var (
|
|
||||||
// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
||||||
// Name: "cpu_temperature_celsius",
|
|
||||||
// Help: "Current temperature of the CPU.",
|
|
||||||
// })
|
|
||||||
// hdFailures = prometheus.NewCounterVec(
|
|
||||||
// prometheus.CounterOpts{
|
|
||||||
// Name: "hd_errors_total",
|
|
||||||
// Help: "Number of hard-disk errors.",
|
|
||||||
// },
|
|
||||||
// []string{"device"},
|
|
||||||
// )
|
|
||||||
// )
|
|
||||||
//
|
|
||||||
// func init() {
|
|
||||||
// // Metrics have to be registered to be exposed:
|
|
||||||
// prometheus.MustRegister(cpuTemp)
|
|
||||||
// prometheus.MustRegister(hdFailures)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// func main() {
|
|
||||||
// cpuTemp.Set(65.3)
|
|
||||||
// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
|
|
||||||
//
|
|
||||||
// // The Handler function provides a default handler to expose metrics
|
|
||||||
// // via an HTTP server. "/metrics" is the usual endpoint for that.
|
|
||||||
// http.Handle("/metrics", promhttp.Handler())
|
|
||||||
// log.Fatal(http.ListenAndServe(":8080", nil))
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// This is a complete program that exports two metrics, a Gauge and a Counter,
|
|
||||||
// the latter with a label attached to turn it into a (one-dimensional) vector.
|
|
||||||
//
|
|
||||||
// Metrics
|
|
||||||
//
|
|
||||||
// The number of exported identifiers in this package might appear a bit
|
|
||||||
// overwhelming. Hovever, in addition to the basic plumbing shown in the example
|
|
||||||
// above, you only need to understand the different metric types and their
|
|
||||||
// vector versions for basic usage.
|
|
||||||
//
|
|
||||||
// Above, you have already touched the Counter and the Gauge. There are two more
|
|
||||||
// advanced metric types: the Summary and Histogram. A more thorough description
|
|
||||||
// of those four metric types can be found in the Prometheus docs:
|
|
||||||
// https://prometheus.io/docs/concepts/metric_types/
|
|
||||||
//
|
|
||||||
// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
|
|
||||||
// Prometheus server not to assume anything about its type.
|
|
||||||
//
|
|
||||||
// In addition to the fundamental metric types Gauge, Counter, Summary,
|
|
||||||
// Histogram, and Untyped, a very important part of the Prometheus data model is
|
|
||||||
// the partitioning of samples along dimensions called labels, which results in
|
|
||||||
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
|
|
||||||
// HistogramVec, and UntypedVec.
|
|
||||||
//
|
|
||||||
// While only the fundamental metric types implement the Metric interface, both
|
|
||||||
// the metrics and their vector versions implement the Collector interface. A
|
|
||||||
// Collector manages the collection of a number of Metrics, but for convenience,
|
|
||||||
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
|
|
||||||
// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
|
|
||||||
// SummaryVec, HistogramVec, and UntypedVec are not.
|
|
||||||
//
|
|
||||||
// To create instances of Metrics and their vector versions, you need a suitable
|
|
||||||
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
|
|
||||||
// HistogramOpts, or UntypedOpts.
|
|
||||||
//
|
|
||||||
// Custom Collectors and constant Metrics
|
|
||||||
//
|
|
||||||
// While you could create your own implementations of Metric, most likely you
|
|
||||||
// will only ever implement the Collector interface on your own. At a first
|
|
||||||
// glance, a custom Collector seems handy to bundle Metrics for common
|
|
||||||
// registration (with the prime example of the different metric vectors above,
|
|
||||||
// which bundle all the metrics of the same name but with different labels).
|
|
||||||
//
|
|
||||||
// There is a more involved use case, too: If you already have metrics
|
|
||||||
// available, created outside of the Prometheus context, you don't need the
|
|
||||||
// interface of the various Metric types. You essentially want to mirror the
|
|
||||||
// existing numbers into Prometheus Metrics during collection. An own
|
|
||||||
// implementation of the Collector interface is perfect for that. You can create
|
|
||||||
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
|
|
||||||
// NewConstSummary (and their respective Must… versions). That will happen in
|
|
||||||
// the Collect method. The Describe method has to return separate Desc
|
|
||||||
// instances, representative of the “throw-away” metrics to be created
|
|
||||||
// later. NewDesc comes in handy to create those Desc instances.
|
|
||||||
//
|
|
||||||
// The Collector example illustrates the use case. You can also look at the
|
|
||||||
// source code of the processCollector (mirroring process metrics), the
|
|
||||||
// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
|
|
||||||
// metrics) as examples that are used in this package itself.
|
|
||||||
//
|
|
||||||
// If you just need to call a function to get a single float value to collect as
|
|
||||||
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
|
|
||||||
// shortcuts.
|
|
||||||
//
|
|
||||||
// Advanced Uses of the Registry
|
|
||||||
//
|
|
||||||
// While MustRegister is the by far most common way of registering a Collector,
|
|
||||||
// sometimes you might want to handle the errors the registration might
|
|
||||||
// cause. As suggested by the name, MustRegister panics if an error occurs. With
|
|
||||||
// the Register function, the error is returned and can be handled.
|
|
||||||
//
|
|
||||||
// An error is returned if the registered Collector is incompatible or
|
|
||||||
// inconsistent with already registered metrics. The registry aims for
|
|
||||||
// consistency of the collected metrics according to the Prometheus data
|
|
||||||
// model. Inconsistencies are ideally detected at registration time, not at
|
|
||||||
// collect time. The former will usually be detected at start-up time of a
|
|
||||||
// program, while the latter will only happen at scrape time, possibly not even
|
|
||||||
// on the first scrape if the inconsistency only becomes relevant later. That is
|
|
||||||
// the main reason why a Collector and a Metric have to describe themselves to
|
|
||||||
// the registry.
|
|
||||||
//
|
|
||||||
// So far, everything we did operated on the so-called default registry, as it
|
|
||||||
// can be found in the global DefaultRegistry variable. With NewRegistry, you
|
|
||||||
// can create a custom registry, or you can even implement the Registerer or
|
|
||||||
// Gatherer interfaces yourself. The methods Register and Unregister work in
|
|
||||||
// the same way on a custom registry as the global functions Register and
|
|
||||||
// Unregister on the default registry.
|
|
||||||
//
|
|
||||||
// There are a number of uses for custom registries: You can use registries
|
|
||||||
// with special properties, see NewPedanticRegistry. You can avoid global state,
|
|
||||||
// as it is imposed by the DefaultRegistry. You can use multiple registries at
|
|
||||||
// the same time to expose different metrics in different ways. You can use
|
|
||||||
// separate registries for testing purposes.
|
|
||||||
//
|
|
||||||
// Also note that the DefaultRegistry comes registered with a Collector for Go
|
|
||||||
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
|
|
||||||
// NewProcessCollector). With a custom registry, you are in control and decide
|
|
||||||
// yourself about the Collectors to register.
|
|
||||||
//
|
|
||||||
// HTTP Exposition
|
|
||||||
//
|
|
||||||
// The Registry implements the Gatherer interface. The caller of the Gather
|
|
||||||
// method can then expose the gathered metrics in some way. Usually, the metrics
|
|
||||||
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
|
||||||
// above. The tools to expose metrics via HTTP are in the promhttp
|
|
||||||
// sub-package. (The top-level functions in the prometheus package are
|
|
||||||
// deprecated.)
|
|
||||||
//
|
|
||||||
// Pushing to the Pushgateway
|
|
||||||
//
|
|
||||||
// Function for pushing to the Pushgateway can be found in the push sub-package.
|
|
||||||
//
|
|
||||||
// Other Means of Exposition
|
|
||||||
//
|
|
||||||
// More ways of exposing metrics can easily be added. Sending metrics to
|
|
||||||
// Graphite would be an example that will soon be implemented.
|
|
||||||
package prometheus
|
|
||||||
|
|
@ -1,118 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus_test
|
|
||||||
|
|
||||||
import "github.com/prometheus/client_golang/prometheus"
|
|
||||||
|
|
||||||
// ClusterManager is an example for a system that might have been built without
|
|
||||||
// Prometheus in mind. It models a central manager of jobs running in a
|
|
||||||
// cluster. To turn it into something that collects Prometheus metrics, we
|
|
||||||
// simply add the two methods required for the Collector interface.
|
|
||||||
//
|
|
||||||
// An additional challenge is that multiple instances of the ClusterManager are
|
|
||||||
// run within the same binary, each in charge of a different zone. We need to
|
|
||||||
// make use of ConstLabels to be able to register each ClusterManager instance
|
|
||||||
// with Prometheus.
|
|
||||||
type ClusterManager struct {
|
|
||||||
Zone string
|
|
||||||
OOMCountDesc *prometheus.Desc
|
|
||||||
RAMUsageDesc *prometheus.Desc
|
|
||||||
// ... many more fields
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a
|
|
||||||
// real cluster manager would have to do. Since it may actually be really
|
|
||||||
// expensive, it must only be called once per collection. This implementation,
|
|
||||||
// obviously, only returns some made-up data.
|
|
||||||
func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() (
|
|
||||||
oomCountByHost map[string]int, ramUsageByHost map[string]float64,
|
|
||||||
) {
|
|
||||||
// Just example fake data.
|
|
||||||
oomCountByHost = map[string]int{
|
|
||||||
"foo.example.org": 42,
|
|
||||||
"bar.example.org": 2001,
|
|
||||||
}
|
|
||||||
ramUsageByHost = map[string]float64{
|
|
||||||
"foo.example.org": 6.023e23,
|
|
||||||
"bar.example.org": 3.14,
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describe simply sends the two Descs in the struct to the channel.
|
|
||||||
func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) {
|
|
||||||
ch <- c.OOMCountDesc
|
|
||||||
ch <- c.RAMUsageDesc
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it
|
|
||||||
// creates constant metrics for each host on the fly based on the returned data.
|
|
||||||
//
|
|
||||||
// Note that Collect could be called concurrently, so we depend on
|
|
||||||
// ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe.
|
|
||||||
func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {
|
|
||||||
oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState()
|
|
||||||
for host, oomCount := range oomCountByHost {
|
|
||||||
ch <- prometheus.MustNewConstMetric(
|
|
||||||
c.OOMCountDesc,
|
|
||||||
prometheus.CounterValue,
|
|
||||||
float64(oomCount),
|
|
||||||
host,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
for host, ramUsage := range ramUsageByHost {
|
|
||||||
ch <- prometheus.MustNewConstMetric(
|
|
||||||
c.RAMUsageDesc,
|
|
||||||
prometheus.GaugeValue,
|
|
||||||
ramUsage,
|
|
||||||
host,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClusterManager creates the two Descs OOMCountDesc and RAMUsageDesc. Note
|
|
||||||
// that the zone is set as a ConstLabel. (It's different in each instance of the
|
|
||||||
// ClusterManager, but constant over the lifetime of an instance.) Then there is
|
|
||||||
// a variable label "host", since we want to partition the collected metrics by
|
|
||||||
// host. Since all Descs created in this way are consistent across instances,
|
|
||||||
// with a guaranteed distinction by the "zone" label, we can register different
|
|
||||||
// ClusterManager instances with the same registry.
|
|
||||||
func NewClusterManager(zone string) *ClusterManager {
|
|
||||||
return &ClusterManager{
|
|
||||||
Zone: zone,
|
|
||||||
OOMCountDesc: prometheus.NewDesc(
|
|
||||||
"clustermanager_oom_crashes_total",
|
|
||||||
"Number of OOM crashes.",
|
|
||||||
[]string{"host"},
|
|
||||||
prometheus.Labels{"zone": zone},
|
|
||||||
),
|
|
||||||
RAMUsageDesc: prometheus.NewDesc(
|
|
||||||
"clustermanager_ram_usage_bytes",
|
|
||||||
"RAM usage as reported to the cluster manager.",
|
|
||||||
[]string{"host"},
|
|
||||||
prometheus.Labels{"zone": zone},
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleCollector() {
|
|
||||||
workerDB := NewClusterManager("db")
|
|
||||||
workerCA := NewClusterManager("ca")
|
|
||||||
|
|
||||||
// Since we are dealing with custom Collector implementations, it might
|
|
||||||
// be a good idea to try it out with a pedantic registry.
|
|
||||||
reg := prometheus.NewPedanticRegistry()
|
|
||||||
reg.MustRegister(workerDB)
|
|
||||||
reg.MustRegister(workerCA)
|
|
||||||
}
|
|
||||||
|
|
@ -1,71 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// apiRequestDuration tracks the duration separate for each HTTP status
|
|
||||||
// class (1xx, 2xx, ...). This creates a fair amount of time series on
|
|
||||||
// the Prometheus server. Usually, you would track the duration of
|
|
||||||
// serving HTTP request without partitioning by outcome. Do something
|
|
||||||
// like this only if needed. Also note how only status classes are
|
|
||||||
// tracked, not every single status code. The latter would create an
|
|
||||||
// even larger amount of time series. Request counters partitioned by
|
|
||||||
// status code are usually OK as each counter only creates one time
|
|
||||||
// series. Histograms are way more expensive, so partition with care and
|
|
||||||
// only where you really need separate latency tracking. Partitioning by
|
|
||||||
// status class is only an example. In concrete cases, other partitions
|
|
||||||
// might make more sense.
|
|
||||||
apiRequestDuration = prometheus.NewHistogramVec(
|
|
||||||
prometheus.HistogramOpts{
|
|
||||||
Name: "api_request_duration_seconds",
|
|
||||||
Help: "Histogram for the request duration of the public API, partitioned by status class.",
|
|
||||||
Buckets: prometheus.ExponentialBuckets(0.1, 1.5, 5),
|
|
||||||
},
|
|
||||||
[]string{"status_class"},
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
func handler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
status := http.StatusOK
|
|
||||||
// The ObserverFunc gets called by the deferred ObserveDuration and
|
|
||||||
// decides wich Histogram's Observe method is called.
|
|
||||||
timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) {
|
|
||||||
switch {
|
|
||||||
case status >= 500: // Server error.
|
|
||||||
apiRequestDuration.WithLabelValues("5xx").Observe(v)
|
|
||||||
case status >= 400: // Client error.
|
|
||||||
apiRequestDuration.WithLabelValues("4xx").Observe(v)
|
|
||||||
case status >= 300: // Redirection.
|
|
||||||
apiRequestDuration.WithLabelValues("3xx").Observe(v)
|
|
||||||
case status >= 200: // Success.
|
|
||||||
apiRequestDuration.WithLabelValues("2xx").Observe(v)
|
|
||||||
default: // Informational.
|
|
||||||
apiRequestDuration.WithLabelValues("1xx").Observe(v)
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
defer timer.ObserveDuration()
|
|
||||||
|
|
||||||
// Handle the request. Set status accordingly.
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleTimer_complex() {
|
|
||||||
http.HandleFunc("/api", handler)
|
|
||||||
}
|
|
||||||
|
|
@ -1,38 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus_test
|
|
||||||
|
|
||||||
import "github.com/prometheus/client_golang/prometheus"
|
|
||||||
|
|
||||||
var (
|
|
||||||
// If a function is called rarely (i.e. not more often than scrapes
|
|
||||||
// happen) or ideally only once (like in a batch job), it can make sense
|
|
||||||
// to use a Gauge for timing the function call. For timing a batch job
|
|
||||||
// and pushing the result to a Pushgateway, see also the comprehensive
|
|
||||||
// example in the push package.
|
|
||||||
funcDuration = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
||||||
Name: "example_function_duration_seconds",
|
|
||||||
Help: "Duration of the last call of an example function.",
|
|
||||||
})
|
|
||||||
)
|
|
||||||
|
|
||||||
func ExampleTimer_gauge() error {
|
|
||||||
// The Set method of the Gauge is used to observe the duration.
|
|
||||||
timer := prometheus.NewTimer(prometheus.ObserverFunc(funcDuration.Set))
|
|
||||||
defer timer.ObserveDuration()
|
|
||||||
|
|
||||||
// Do something. Return errors as encountered. The use of 'defer' above
|
|
||||||
// makes sure the function is still timed properly.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,40 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/rand"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
requestDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
|
|
||||||
Name: "example_request_duration_seconds",
|
|
||||||
Help: "Histogram for the runtime of a simple example function.",
|
|
||||||
Buckets: prometheus.LinearBuckets(0.01, 0.01, 10),
|
|
||||||
})
|
|
||||||
)
|
|
||||||
|
|
||||||
func ExampleTimer() {
|
|
||||||
// timer times this example function. It uses a Histogram, but a Summary
|
|
||||||
// would also work, as both implement Observer. Check out
|
|
||||||
// https://prometheus.io/docs/practices/histograms/ for differences.
|
|
||||||
timer := prometheus.NewTimer(requestDuration)
|
|
||||||
defer timer.ObserveDuration()
|
|
||||||
|
|
||||||
// Do something here that takes time.
|
|
||||||
time.Sleep(time.Duration(rand.NormFloat64()*10000+50000) * time.Microsecond)
|
|
||||||
}
|
|
||||||
|
|
@ -1,754 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"net/http"
|
|
||||||
"runtime"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
"github.com/prometheus/common/expfmt"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
)
|
|
||||||
|
|
||||||
func ExampleGauge() {
|
|
||||||
opsQueued := prometheus.NewGauge(prometheus.GaugeOpts{
|
|
||||||
Namespace: "our_company",
|
|
||||||
Subsystem: "blob_storage",
|
|
||||||
Name: "ops_queued",
|
|
||||||
Help: "Number of blob storage operations waiting to be processed.",
|
|
||||||
})
|
|
||||||
prometheus.MustRegister(opsQueued)
|
|
||||||
|
|
||||||
// 10 operations queued by the goroutine managing incoming requests.
|
|
||||||
opsQueued.Add(10)
|
|
||||||
// A worker goroutine has picked up a waiting operation.
|
|
||||||
opsQueued.Dec()
|
|
||||||
// And once more...
|
|
||||||
opsQueued.Dec()
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleGaugeVec() {
|
|
||||||
opsQueued := prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Namespace: "our_company",
|
|
||||||
Subsystem: "blob_storage",
|
|
||||||
Name: "ops_queued",
|
|
||||||
Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.",
|
|
||||||
},
|
|
||||||
[]string{
|
|
||||||
// Which user has requested the operation?
|
|
||||||
"user",
|
|
||||||
// Of what type is the operation?
|
|
||||||
"type",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
prometheus.MustRegister(opsQueued)
|
|
||||||
|
|
||||||
// Increase a value using compact (but order-sensitive!) WithLabelValues().
|
|
||||||
opsQueued.WithLabelValues("bob", "put").Add(4)
|
|
||||||
// Increase a value with a map using WithLabels. More verbose, but order
|
|
||||||
// doesn't matter anymore.
|
|
||||||
opsQueued.With(prometheus.Labels{"type": "delete", "user": "alice"}).Inc()
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleGaugeFunc() {
|
|
||||||
if err := prometheus.Register(prometheus.NewGaugeFunc(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Subsystem: "runtime",
|
|
||||||
Name: "goroutines_count",
|
|
||||||
Help: "Number of goroutines that currently exist.",
|
|
||||||
},
|
|
||||||
func() float64 { return float64(runtime.NumGoroutine()) },
|
|
||||||
)); err == nil {
|
|
||||||
fmt.Println("GaugeFunc 'goroutines_count' registered.")
|
|
||||||
}
|
|
||||||
// Note that the count of goroutines is a gauge (and not a counter) as
|
|
||||||
// it can go up and down.
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// GaugeFunc 'goroutines_count' registered.
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleCounter() {
|
|
||||||
pushCounter := prometheus.NewCounter(prometheus.CounterOpts{
|
|
||||||
Name: "repository_pushes", // Note: No help string...
|
|
||||||
})
|
|
||||||
err := prometheus.Register(pushCounter) // ... so this will return an error.
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("Push counter couldn't be registered, no counting will happen:", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try it once more, this time with a help string.
|
|
||||||
pushCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
|
||||||
Name: "repository_pushes",
|
|
||||||
Help: "Number of pushes to external repository.",
|
|
||||||
})
|
|
||||||
err = prometheus.Register(pushCounter)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("Push counter couldn't be registered AGAIN, no counting will happen:", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
pushComplete := make(chan struct{})
|
|
||||||
// TODO: Start a goroutine that performs repository pushes and reports
|
|
||||||
// each completion via the channel.
|
|
||||||
for range pushComplete {
|
|
||||||
pushCounter.Inc()
|
|
||||||
}
|
|
||||||
// Output:
|
|
||||||
// Push counter couldn't be registered, no counting will happen: descriptor Desc{fqName: "repository_pushes", help: "", constLabels: {}, variableLabels: []} is invalid: empty help string
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleCounterVec() {
|
|
||||||
httpReqs := prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "http_requests_total",
|
|
||||||
Help: "How many HTTP requests processed, partitioned by status code and HTTP method.",
|
|
||||||
},
|
|
||||||
[]string{"code", "method"},
|
|
||||||
)
|
|
||||||
prometheus.MustRegister(httpReqs)
|
|
||||||
|
|
||||||
httpReqs.WithLabelValues("404", "POST").Add(42)
|
|
||||||
|
|
||||||
// If you have to access the same set of labels very frequently, it
|
|
||||||
// might be good to retrieve the metric only once and keep a handle to
|
|
||||||
// it. But beware of deletion of that metric, see below!
|
|
||||||
m := httpReqs.WithLabelValues("200", "GET")
|
|
||||||
for i := 0; i < 1000000; i++ {
|
|
||||||
m.Inc()
|
|
||||||
}
|
|
||||||
// Delete a metric from the vector. If you have previously kept a handle
|
|
||||||
// to that metric (as above), future updates via that handle will go
|
|
||||||
// unseen (even if you re-create a metric with the same label set
|
|
||||||
// later).
|
|
||||||
httpReqs.DeleteLabelValues("200", "GET")
|
|
||||||
// Same thing with the more verbose Labels syntax.
|
|
||||||
httpReqs.Delete(prometheus.Labels{"method": "GET", "code": "200"})
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleInstrumentHandler() {
|
|
||||||
// Handle the "/doc" endpoint with the standard http.FileServer handler.
|
|
||||||
// By wrapping the handler with InstrumentHandler, request count,
|
|
||||||
// request and response sizes, and request latency are automatically
|
|
||||||
// exported to Prometheus, partitioned by HTTP status code and method
|
|
||||||
// and by the handler name (here "fileserver").
|
|
||||||
http.Handle("/doc", prometheus.InstrumentHandler(
|
|
||||||
"fileserver", http.FileServer(http.Dir("/usr/share/doc")),
|
|
||||||
))
|
|
||||||
// The Prometheus handler still has to be registered to handle the
|
|
||||||
// "/metrics" endpoint. The handler returned by prometheus.Handler() is
|
|
||||||
// already instrumented - with "prometheus" as the handler name. In this
|
|
||||||
// example, we want the handler name to be "metrics", so we instrument
|
|
||||||
// the uninstrumented Prometheus handler ourselves.
|
|
||||||
http.Handle("/metrics", prometheus.InstrumentHandler(
|
|
||||||
"metrics", prometheus.UninstrumentedHandler(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleLabelPairSorter() {
|
|
||||||
labelPairs := []*dto.LabelPair{
|
|
||||||
{Name: proto.String("status"), Value: proto.String("404")},
|
|
||||||
{Name: proto.String("method"), Value: proto.String("get")},
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(prometheus.LabelPairSorter(labelPairs))
|
|
||||||
|
|
||||||
fmt.Println(labelPairs)
|
|
||||||
// Output:
|
|
||||||
// [name:"method" value:"get" name:"status" value:"404" ]
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleRegister() {
|
|
||||||
// Imagine you have a worker pool and want to count the tasks completed.
|
|
||||||
taskCounter := prometheus.NewCounter(prometheus.CounterOpts{
|
|
||||||
Subsystem: "worker_pool",
|
|
||||||
Name: "completed_tasks_total",
|
|
||||||
Help: "Total number of tasks completed.",
|
|
||||||
})
|
|
||||||
// This will register fine.
|
|
||||||
if err := prometheus.Register(taskCounter); err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
} else {
|
|
||||||
fmt.Println("taskCounter registered.")
|
|
||||||
}
|
|
||||||
// Don't forget to tell the HTTP server about the Prometheus handler.
|
|
||||||
// (In a real program, you still need to start the HTTP server...)
|
|
||||||
http.Handle("/metrics", prometheus.Handler())
|
|
||||||
|
|
||||||
// Now you can start workers and give every one of them a pointer to
|
|
||||||
// taskCounter and let it increment it whenever it completes a task.
|
|
||||||
taskCounter.Inc() // This has to happen somewhere in the worker code.
|
|
||||||
|
|
||||||
// But wait, you want to see how individual workers perform. So you need
|
|
||||||
// a vector of counters, with one element for each worker.
|
|
||||||
taskCounterVec := prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Subsystem: "worker_pool",
|
|
||||||
Name: "completed_tasks_total",
|
|
||||||
Help: "Total number of tasks completed.",
|
|
||||||
},
|
|
||||||
[]string{"worker_id"},
|
|
||||||
)
|
|
||||||
|
|
||||||
// Registering will fail because we already have a metric of that name.
|
|
||||||
if err := prometheus.Register(taskCounterVec); err != nil {
|
|
||||||
fmt.Println("taskCounterVec not registered:", err)
|
|
||||||
} else {
|
|
||||||
fmt.Println("taskCounterVec registered.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// To fix, first unregister the old taskCounter.
|
|
||||||
if prometheus.Unregister(taskCounter) {
|
|
||||||
fmt.Println("taskCounter unregistered.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try registering taskCounterVec again.
|
|
||||||
if err := prometheus.Register(taskCounterVec); err != nil {
|
|
||||||
fmt.Println("taskCounterVec not registered:", err)
|
|
||||||
} else {
|
|
||||||
fmt.Println("taskCounterVec registered.")
|
|
||||||
}
|
|
||||||
// Bummer! Still doesn't work.
|
|
||||||
|
|
||||||
// Prometheus will not allow you to ever export metrics with
|
|
||||||
// inconsistent help strings or label names. After unregistering, the
|
|
||||||
// unregistered metrics will cease to show up in the /metrics HTTP
|
|
||||||
// response, but the registry still remembers that those metrics had
|
|
||||||
// been exported before. For this example, we will now choose a
|
|
||||||
// different name. (In a real program, you would obviously not export
|
|
||||||
// the obsolete metric in the first place.)
|
|
||||||
taskCounterVec = prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Subsystem: "worker_pool",
|
|
||||||
Name: "completed_tasks_by_id",
|
|
||||||
Help: "Total number of tasks completed.",
|
|
||||||
},
|
|
||||||
[]string{"worker_id"},
|
|
||||||
)
|
|
||||||
if err := prometheus.Register(taskCounterVec); err != nil {
|
|
||||||
fmt.Println("taskCounterVec not registered:", err)
|
|
||||||
} else {
|
|
||||||
fmt.Println("taskCounterVec registered.")
|
|
||||||
}
|
|
||||||
// Finally it worked!
|
|
||||||
|
|
||||||
// The workers have to tell taskCounterVec their id to increment the
|
|
||||||
// right element in the metric vector.
|
|
||||||
taskCounterVec.WithLabelValues("42").Inc() // Code from worker 42.
|
|
||||||
|
|
||||||
// Each worker could also keep a reference to their own counter element
|
|
||||||
// around. Pick the counter at initialization time of the worker.
|
|
||||||
myCounter := taskCounterVec.WithLabelValues("42") // From worker 42 initialization code.
|
|
||||||
myCounter.Inc() // Somewhere in the code of that worker.
|
|
||||||
|
|
||||||
// Note that something like WithLabelValues("42", "spurious arg") would
|
|
||||||
// panic (because you have provided too many label values). If you want
|
|
||||||
// to get an error instead, use GetMetricWithLabelValues(...) instead.
|
|
||||||
notMyCounter, err := taskCounterVec.GetMetricWithLabelValues("42", "spurious arg")
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("Worker initialization failed:", err)
|
|
||||||
}
|
|
||||||
if notMyCounter == nil {
|
|
||||||
fmt.Println("notMyCounter is nil.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// A different (and somewhat tricky) approach is to use
|
|
||||||
// ConstLabels. ConstLabels are pairs of label names and label values
|
|
||||||
// that never change. You might ask what those labels are good for (and
|
|
||||||
// rightfully so - if they never change, they could as well be part of
|
|
||||||
// the metric name). There are essentially two use-cases: The first is
|
|
||||||
// if labels are constant throughout the lifetime of a binary execution,
|
|
||||||
// but they vary over time or between different instances of a running
|
|
||||||
// binary. The second is what we have here: Each worker creates and
|
|
||||||
// registers an own Counter instance where the only difference is in the
|
|
||||||
// value of the ConstLabels. Those Counters can all be registered
|
|
||||||
// because the different ConstLabel values guarantee that each worker
|
|
||||||
// will increment a different Counter metric.
|
|
||||||
counterOpts := prometheus.CounterOpts{
|
|
||||||
Subsystem: "worker_pool",
|
|
||||||
Name: "completed_tasks",
|
|
||||||
Help: "Total number of tasks completed.",
|
|
||||||
ConstLabels: prometheus.Labels{"worker_id": "42"},
|
|
||||||
}
|
|
||||||
taskCounterForWorker42 := prometheus.NewCounter(counterOpts)
|
|
||||||
if err := prometheus.Register(taskCounterForWorker42); err != nil {
|
|
||||||
fmt.Println("taskCounterVForWorker42 not registered:", err)
|
|
||||||
} else {
|
|
||||||
fmt.Println("taskCounterForWorker42 registered.")
|
|
||||||
}
|
|
||||||
// Obviously, in real code, taskCounterForWorker42 would be a member
|
|
||||||
// variable of a worker struct, and the "42" would be retrieved with a
|
|
||||||
// GetId() method or something. The Counter would be created and
|
|
||||||
// registered in the initialization code of the worker.
|
|
||||||
|
|
||||||
// For the creation of the next Counter, we can recycle
|
|
||||||
// counterOpts. Just change the ConstLabels.
|
|
||||||
counterOpts.ConstLabels = prometheus.Labels{"worker_id": "2001"}
|
|
||||||
taskCounterForWorker2001 := prometheus.NewCounter(counterOpts)
|
|
||||||
if err := prometheus.Register(taskCounterForWorker2001); err != nil {
|
|
||||||
fmt.Println("taskCounterVForWorker2001 not registered:", err)
|
|
||||||
} else {
|
|
||||||
fmt.Println("taskCounterForWorker2001 registered.")
|
|
||||||
}
|
|
||||||
|
|
||||||
taskCounterForWorker2001.Inc()
|
|
||||||
taskCounterForWorker42.Inc()
|
|
||||||
taskCounterForWorker2001.Inc()
|
|
||||||
|
|
||||||
// Yet another approach would be to turn the workers themselves into
|
|
||||||
// Collectors and register them. See the Collector example for details.
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// taskCounter registered.
|
|
||||||
// taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string
|
|
||||||
// taskCounter unregistered.
|
|
||||||
// taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string
|
|
||||||
// taskCounterVec registered.
|
|
||||||
// Worker initialization failed: inconsistent label cardinality
|
|
||||||
// notMyCounter is nil.
|
|
||||||
// taskCounterForWorker42 registered.
|
|
||||||
// taskCounterForWorker2001 registered.
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleSummary() {
|
|
||||||
temps := prometheus.NewSummary(prometheus.SummaryOpts{
|
|
||||||
Name: "pond_temperature_celsius",
|
|
||||||
Help: "The temperature of the frog pond.",
|
|
||||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
|
||||||
})
|
|
||||||
|
|
||||||
// Simulate some observations.
|
|
||||||
for i := 0; i < 1000; i++ {
|
|
||||||
temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Just for demonstration, let's check the state of the summary by
|
|
||||||
// (ab)using its Write method (which is usually only used by Prometheus
|
|
||||||
// internally).
|
|
||||||
metric := &dto.Metric{}
|
|
||||||
temps.Write(metric)
|
|
||||||
fmt.Println(proto.MarshalTextString(metric))
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// summary: <
|
|
||||||
// sample_count: 1000
|
|
||||||
// sample_sum: 29969.50000000001
|
|
||||||
// quantile: <
|
|
||||||
// quantile: 0.5
|
|
||||||
// value: 31.1
|
|
||||||
// >
|
|
||||||
// quantile: <
|
|
||||||
// quantile: 0.9
|
|
||||||
// value: 41.3
|
|
||||||
// >
|
|
||||||
// quantile: <
|
|
||||||
// quantile: 0.99
|
|
||||||
// value: 41.9
|
|
||||||
// >
|
|
||||||
// >
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleSummaryVec() {
|
|
||||||
temps := prometheus.NewSummaryVec(
|
|
||||||
prometheus.SummaryOpts{
|
|
||||||
Name: "pond_temperature_celsius",
|
|
||||||
Help: "The temperature of the frog pond.",
|
|
||||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
|
||||||
},
|
|
||||||
[]string{"species"},
|
|
||||||
)
|
|
||||||
|
|
||||||
// Simulate some observations.
|
|
||||||
for i := 0; i < 1000; i++ {
|
|
||||||
temps.WithLabelValues("litoria-caerulea").Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)
|
|
||||||
temps.WithLabelValues("lithobates-catesbeianus").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a Summary without any observations.
|
|
||||||
temps.WithLabelValues("leiopelma-hochstetteri")
|
|
||||||
|
|
||||||
// Just for demonstration, let's check the state of the summary vector
|
|
||||||
// by registering it with a custom registry and then let it collect the
|
|
||||||
// metrics.
|
|
||||||
reg := prometheus.NewRegistry()
|
|
||||||
reg.MustRegister(temps)
|
|
||||||
|
|
||||||
metricFamilies, err := reg.Gather()
|
|
||||||
if err != nil || len(metricFamilies) != 1 {
|
|
||||||
panic("unexpected behavior of custom test registry")
|
|
||||||
}
|
|
||||||
fmt.Println(proto.MarshalTextString(metricFamilies[0]))
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// name: "pond_temperature_celsius"
|
|
||||||
// help: "The temperature of the frog pond."
|
|
||||||
// type: SUMMARY
|
|
||||||
// metric: <
|
|
||||||
// label: <
|
|
||||||
// name: "species"
|
|
||||||
// value: "leiopelma-hochstetteri"
|
|
||||||
// >
|
|
||||||
// summary: <
|
|
||||||
// sample_count: 0
|
|
||||||
// sample_sum: 0
|
|
||||||
// quantile: <
|
|
||||||
// quantile: 0.5
|
|
||||||
// value: nan
|
|
||||||
// >
|
|
||||||
// quantile: <
|
|
||||||
// quantile: 0.9
|
|
||||||
// value: nan
|
|
||||||
// >
|
|
||||||
// quantile: <
|
|
||||||
// quantile: 0.99
|
|
||||||
// value: nan
|
|
||||||
// >
|
|
||||||
// >
|
|
||||||
// >
|
|
||||||
// metric: <
|
|
||||||
// label: <
|
|
||||||
// name: "species"
|
|
||||||
// value: "lithobates-catesbeianus"
|
|
||||||
// >
|
|
||||||
// summary: <
|
|
||||||
// sample_count: 1000
|
|
||||||
// sample_sum: 31956.100000000017
|
|
||||||
// quantile: <
|
|
||||||
// quantile: 0.5
|
|
||||||
// value: 32.4
|
|
||||||
// >
|
|
||||||
// quantile: <
|
|
||||||
// quantile: 0.9
|
|
||||||
// value: 41.4
|
|
||||||
// >
|
|
||||||
// quantile: <
|
|
||||||
// quantile: 0.99
|
|
||||||
// value: 41.9
|
|
||||||
// >
|
|
||||||
// >
|
|
||||||
// >
|
|
||||||
// metric: <
|
|
||||||
// label: <
|
|
||||||
// name: "species"
|
|
||||||
// value: "litoria-caerulea"
|
|
||||||
// >
|
|
||||||
// summary: <
|
|
||||||
// sample_count: 1000
|
|
||||||
// sample_sum: 29969.50000000001
|
|
||||||
// quantile: <
|
|
||||||
// quantile: 0.5
|
|
||||||
// value: 31.1
|
|
||||||
// >
|
|
||||||
// quantile: <
|
|
||||||
// quantile: 0.9
|
|
||||||
// value: 41.3
|
|
||||||
// >
|
|
||||||
// quantile: <
|
|
||||||
// quantile: 0.99
|
|
||||||
// value: 41.9
|
|
||||||
// >
|
|
||||||
// >
|
|
||||||
// >
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleNewConstSummary() {
|
|
||||||
desc := prometheus.NewDesc(
|
|
||||||
"http_request_duration_seconds",
|
|
||||||
"A summary of the HTTP request durations.",
|
|
||||||
[]string{"code", "method"},
|
|
||||||
prometheus.Labels{"owner": "example"},
|
|
||||||
)
|
|
||||||
|
|
||||||
// Create a constant summary from values we got from a 3rd party telemetry system.
|
|
||||||
s := prometheus.MustNewConstSummary(
|
|
||||||
desc,
|
|
||||||
4711, 403.34,
|
|
||||||
map[float64]float64{0.5: 42.3, 0.9: 323.3},
|
|
||||||
"200", "get",
|
|
||||||
)
|
|
||||||
|
|
||||||
// Just for demonstration, let's check the state of the summary by
|
|
||||||
// (ab)using its Write method (which is usually only used by Prometheus
|
|
||||||
// internally).
|
|
||||||
metric := &dto.Metric{}
|
|
||||||
s.Write(metric)
|
|
||||||
fmt.Println(proto.MarshalTextString(metric))
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// label: <
|
|
||||||
// name: "code"
|
|
||||||
// value: "200"
|
|
||||||
// >
|
|
||||||
// label: <
|
|
||||||
// name: "method"
|
|
||||||
// value: "get"
|
|
||||||
// >
|
|
||||||
// label: <
|
|
||||||
// name: "owner"
|
|
||||||
// value: "example"
|
|
||||||
// >
|
|
||||||
// summary: <
|
|
||||||
// sample_count: 4711
|
|
||||||
// sample_sum: 403.34
|
|
||||||
// quantile: <
|
|
||||||
// quantile: 0.5
|
|
||||||
// value: 42.3
|
|
||||||
// >
|
|
||||||
// quantile: <
|
|
||||||
// quantile: 0.9
|
|
||||||
// value: 323.3
|
|
||||||
// >
|
|
||||||
// >
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleHistogram() {
|
|
||||||
temps := prometheus.NewHistogram(prometheus.HistogramOpts{
|
|
||||||
Name: "pond_temperature_celsius",
|
|
||||||
Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells.
|
|
||||||
Buckets: prometheus.LinearBuckets(20, 5, 5), // 5 buckets, each 5 centigrade wide.
|
|
||||||
})
|
|
||||||
|
|
||||||
// Simulate some observations.
|
|
||||||
for i := 0; i < 1000; i++ {
|
|
||||||
temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Just for demonstration, let's check the state of the histogram by
|
|
||||||
// (ab)using its Write method (which is usually only used by Prometheus
|
|
||||||
// internally).
|
|
||||||
metric := &dto.Metric{}
|
|
||||||
temps.Write(metric)
|
|
||||||
fmt.Println(proto.MarshalTextString(metric))
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// histogram: <
|
|
||||||
// sample_count: 1000
|
|
||||||
// sample_sum: 29969.50000000001
|
|
||||||
// bucket: <
|
|
||||||
// cumulative_count: 192
|
|
||||||
// upper_bound: 20
|
|
||||||
// >
|
|
||||||
// bucket: <
|
|
||||||
// cumulative_count: 366
|
|
||||||
// upper_bound: 25
|
|
||||||
// >
|
|
||||||
// bucket: <
|
|
||||||
// cumulative_count: 501
|
|
||||||
// upper_bound: 30
|
|
||||||
// >
|
|
||||||
// bucket: <
|
|
||||||
// cumulative_count: 638
|
|
||||||
// upper_bound: 35
|
|
||||||
// >
|
|
||||||
// bucket: <
|
|
||||||
// cumulative_count: 816
|
|
||||||
// upper_bound: 40
|
|
||||||
// >
|
|
||||||
// >
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleNewConstHistogram() {
|
|
||||||
desc := prometheus.NewDesc(
|
|
||||||
"http_request_duration_seconds",
|
|
||||||
"A histogram of the HTTP request durations.",
|
|
||||||
[]string{"code", "method"},
|
|
||||||
prometheus.Labels{"owner": "example"},
|
|
||||||
)
|
|
||||||
|
|
||||||
// Create a constant histogram from values we got from a 3rd party telemetry system.
|
|
||||||
h := prometheus.MustNewConstHistogram(
|
|
||||||
desc,
|
|
||||||
4711, 403.34,
|
|
||||||
map[float64]uint64{25: 121, 50: 2403, 100: 3221, 200: 4233},
|
|
||||||
"200", "get",
|
|
||||||
)
|
|
||||||
|
|
||||||
// Just for demonstration, let's check the state of the histogram by
|
|
||||||
// (ab)using its Write method (which is usually only used by Prometheus
|
|
||||||
// internally).
|
|
||||||
metric := &dto.Metric{}
|
|
||||||
h.Write(metric)
|
|
||||||
fmt.Println(proto.MarshalTextString(metric))
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// label: <
|
|
||||||
// name: "code"
|
|
||||||
// value: "200"
|
|
||||||
// >
|
|
||||||
// label: <
|
|
||||||
// name: "method"
|
|
||||||
// value: "get"
|
|
||||||
// >
|
|
||||||
// label: <
|
|
||||||
// name: "owner"
|
|
||||||
// value: "example"
|
|
||||||
// >
|
|
||||||
// histogram: <
|
|
||||||
// sample_count: 4711
|
|
||||||
// sample_sum: 403.34
|
|
||||||
// bucket: <
|
|
||||||
// cumulative_count: 121
|
|
||||||
// upper_bound: 25
|
|
||||||
// >
|
|
||||||
// bucket: <
|
|
||||||
// cumulative_count: 2403
|
|
||||||
// upper_bound: 50
|
|
||||||
// >
|
|
||||||
// bucket: <
|
|
||||||
// cumulative_count: 3221
|
|
||||||
// upper_bound: 100
|
|
||||||
// >
|
|
||||||
// bucket: <
|
|
||||||
// cumulative_count: 4233
|
|
||||||
// upper_bound: 200
|
|
||||||
// >
|
|
||||||
// >
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleAlreadyRegisteredError() {
|
|
||||||
reqCounter := prometheus.NewCounter(prometheus.CounterOpts{
|
|
||||||
Name: "requests_total",
|
|
||||||
Help: "The total number of requests served.",
|
|
||||||
})
|
|
||||||
if err := prometheus.Register(reqCounter); err != nil {
|
|
||||||
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
|
|
||||||
// A counter for that metric has been registered before.
|
|
||||||
// Use the old counter from now on.
|
|
||||||
reqCounter = are.ExistingCollector.(prometheus.Counter)
|
|
||||||
} else {
|
|
||||||
// Something else went wrong!
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
reqCounter.Inc()
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleGatherers() {
|
|
||||||
reg := prometheus.NewRegistry()
|
|
||||||
temp := prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Name: "temperature_kelvin",
|
|
||||||
Help: "Temperature in Kelvin.",
|
|
||||||
},
|
|
||||||
[]string{"location"},
|
|
||||||
)
|
|
||||||
reg.MustRegister(temp)
|
|
||||||
temp.WithLabelValues("outside").Set(273.14)
|
|
||||||
temp.WithLabelValues("inside").Set(298.44)
|
|
||||||
|
|
||||||
var parser expfmt.TextParser
|
|
||||||
|
|
||||||
text := `
|
|
||||||
# TYPE humidity_percent gauge
|
|
||||||
# HELP humidity_percent Humidity in %.
|
|
||||||
humidity_percent{location="outside"} 45.4
|
|
||||||
humidity_percent{location="inside"} 33.2
|
|
||||||
# TYPE temperature_kelvin gauge
|
|
||||||
# HELP temperature_kelvin Temperature in Kelvin.
|
|
||||||
temperature_kelvin{location="somewhere else"} 4.5
|
|
||||||
`
|
|
||||||
|
|
||||||
parseText := func() ([]*dto.MetricFamily, error) {
|
|
||||||
parsed, err := parser.TextToMetricFamilies(strings.NewReader(text))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var result []*dto.MetricFamily
|
|
||||||
for _, mf := range parsed {
|
|
||||||
result = append(result, mf)
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
gatherers := prometheus.Gatherers{
|
|
||||||
reg,
|
|
||||||
prometheus.GathererFunc(parseText),
|
|
||||||
}
|
|
||||||
|
|
||||||
gathering, err := gatherers.Gather()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
out := &bytes.Buffer{}
|
|
||||||
for _, mf := range gathering {
|
|
||||||
if _, err := expfmt.MetricFamilyToText(out, mf); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Print(out.String())
|
|
||||||
fmt.Println("----------")
|
|
||||||
|
|
||||||
// Note how the temperature_kelvin metric family has been merged from
|
|
||||||
// different sources. Now try
|
|
||||||
text = `
|
|
||||||
# TYPE humidity_percent gauge
|
|
||||||
# HELP humidity_percent Humidity in %.
|
|
||||||
humidity_percent{location="outside"} 45.4
|
|
||||||
humidity_percent{location="inside"} 33.2
|
|
||||||
# TYPE temperature_kelvin gauge
|
|
||||||
# HELP temperature_kelvin Temperature in Kelvin.
|
|
||||||
# Duplicate metric:
|
|
||||||
temperature_kelvin{location="outside"} 265.3
|
|
||||||
# Wrong labels:
|
|
||||||
temperature_kelvin 4.5
|
|
||||||
`
|
|
||||||
|
|
||||||
gathering, err = gatherers.Gather()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
}
|
|
||||||
// Note that still as many metrics as possible are returned:
|
|
||||||
out.Reset()
|
|
||||||
for _, mf := range gathering {
|
|
||||||
if _, err := expfmt.MetricFamilyToText(out, mf); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Print(out.String())
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// # HELP humidity_percent Humidity in %.
|
|
||||||
// # TYPE humidity_percent gauge
|
|
||||||
// humidity_percent{location="inside"} 33.2
|
|
||||||
// humidity_percent{location="outside"} 45.4
|
|
||||||
// # HELP temperature_kelvin Temperature in Kelvin.
|
|
||||||
// # TYPE temperature_kelvin gauge
|
|
||||||
// temperature_kelvin{location="inside"} 298.44
|
|
||||||
// temperature_kelvin{location="outside"} 273.14
|
|
||||||
// temperature_kelvin{location="somewhere else"} 4.5
|
|
||||||
// ----------
|
|
||||||
// 2 error(s) occurred:
|
|
||||||
// * collected metric temperature_kelvin label:<name:"location" value:"outside" > gauge:<value:265.3 > was collected before with the same name and label values
|
|
||||||
// * collected metric temperature_kelvin gauge:<value:4.5 > has label dimensions inconsistent with previously collected metrics in the same metric family
|
|
||||||
// # HELP humidity_percent Humidity in %.
|
|
||||||
// # TYPE humidity_percent gauge
|
|
||||||
// humidity_percent{location="inside"} 33.2
|
|
||||||
// humidity_percent{location="outside"} 45.4
|
|
||||||
// # HELP temperature_kelvin Temperature in Kelvin.
|
|
||||||
// # TYPE temperature_kelvin gauge
|
|
||||||
// temperature_kelvin{location="inside"} 298.44
|
|
||||||
// temperature_kelvin{location="outside"} 273.14
|
|
||||||
}
|
|
||||||
|
|
@ -1,119 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"expvar"
|
|
||||||
)
|
|
||||||
|
|
||||||
type expvarCollector struct {
|
|
||||||
exports map[string]*Desc
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewExpvarCollector returns a newly allocated expvar Collector that still has
|
|
||||||
// to be registered with a Prometheus registry.
|
|
||||||
//
|
|
||||||
// An expvar Collector collects metrics from the expvar interface. It provides a
|
|
||||||
// quick way to expose numeric values that are already exported via expvar as
|
|
||||||
// Prometheus metrics. Note that the data models of expvar and Prometheus are
|
|
||||||
// fundamentally different, and that the expvar Collector is inherently slower
|
|
||||||
// than native Prometheus metrics. Thus, the expvar Collector is probably great
|
|
||||||
// for experiments and prototying, but you should seriously consider a more
|
|
||||||
// direct implementation of Prometheus metrics for monitoring production
|
|
||||||
// systems.
|
|
||||||
//
|
|
||||||
// The exports map has the following meaning:
|
|
||||||
//
|
|
||||||
// The keys in the map correspond to expvar keys, i.e. for every expvar key you
|
|
||||||
// want to export as Prometheus metric, you need an entry in the exports
|
|
||||||
// map. The descriptor mapped to each key describes how to export the expvar
|
|
||||||
// value. It defines the name and the help string of the Prometheus metric
|
|
||||||
// proxying the expvar value. The type will always be Untyped.
|
|
||||||
//
|
|
||||||
// For descriptors without variable labels, the expvar value must be a number or
|
|
||||||
// a bool. The number is then directly exported as the Prometheus sample
|
|
||||||
// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
|
|
||||||
// that are not numbers or bools are silently ignored.
|
|
||||||
//
|
|
||||||
// If the descriptor has one variable label, the expvar value must be an expvar
|
|
||||||
// map. The keys in the expvar map become the various values of the one
|
|
||||||
// Prometheus label. The values in the expvar map must be numbers or bools again
|
|
||||||
// as above.
|
|
||||||
//
|
|
||||||
// For descriptors with more than one variable label, the expvar must be a
|
|
||||||
// nested expvar map, i.e. where the values of the topmost map are maps again
|
|
||||||
// etc. until a depth is reached that corresponds to the number of labels. The
|
|
||||||
// leaves of that structure must be numbers or bools as above to serve as the
|
|
||||||
// sample values.
|
|
||||||
//
|
|
||||||
// Anything that does not fit into the scheme above is silently ignored.
|
|
||||||
func NewExpvarCollector(exports map[string]*Desc) Collector {
|
|
||||||
return &expvarCollector{
|
|
||||||
exports: exports,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describe implements Collector.
|
|
||||||
func (e *expvarCollector) Describe(ch chan<- *Desc) {
|
|
||||||
for _, desc := range e.exports {
|
|
||||||
ch <- desc
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect implements Collector.
|
|
||||||
func (e *expvarCollector) Collect(ch chan<- Metric) {
|
|
||||||
for name, desc := range e.exports {
|
|
||||||
var m Metric
|
|
||||||
expVar := expvar.Get(name)
|
|
||||||
if expVar == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var v interface{}
|
|
||||||
labels := make([]string, len(desc.variableLabels))
|
|
||||||
if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
|
|
||||||
ch <- NewInvalidMetric(desc, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var processValue func(v interface{}, i int)
|
|
||||||
processValue = func(v interface{}, i int) {
|
|
||||||
if i >= len(labels) {
|
|
||||||
copiedLabels := append(make([]string, 0, len(labels)), labels...)
|
|
||||||
switch v := v.(type) {
|
|
||||||
case float64:
|
|
||||||
m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
|
|
||||||
case bool:
|
|
||||||
if v {
|
|
||||||
m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
|
|
||||||
} else {
|
|
||||||
m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ch <- m
|
|
||||||
return
|
|
||||||
}
|
|
||||||
vm, ok := v.(map[string]interface{})
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for lv, val := range vm {
|
|
||||||
labels[i] = lv
|
|
||||||
processValue(val, i+1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
processValue(v, 0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,97 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"expvar"
|
|
||||||
"fmt"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
)
|
|
||||||
|
|
||||||
func ExampleNewExpvarCollector() {
|
|
||||||
expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{
|
|
||||||
"memstats": prometheus.NewDesc(
|
|
||||||
"expvar_memstats",
|
|
||||||
"All numeric memstats as one metric family. Not a good role-model, actually... ;-)",
|
|
||||||
[]string{"type"}, nil,
|
|
||||||
),
|
|
||||||
"lone-int": prometheus.NewDesc(
|
|
||||||
"expvar_lone_int",
|
|
||||||
"Just an expvar int as an example.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
"http-request-map": prometheus.NewDesc(
|
|
||||||
"expvar_http_request_total",
|
|
||||||
"How many http requests processed, partitioned by status code and http method.",
|
|
||||||
[]string{"code", "method"}, nil,
|
|
||||||
),
|
|
||||||
})
|
|
||||||
prometheus.MustRegister(expvarCollector)
|
|
||||||
|
|
||||||
// The Prometheus part is done here. But to show that this example is
|
|
||||||
// doing anything, we have to manually export something via expvar. In
|
|
||||||
// real-life use-cases, some library would already have exported via
|
|
||||||
// expvar what we want to re-export as Prometheus metrics.
|
|
||||||
expvar.NewInt("lone-int").Set(42)
|
|
||||||
expvarMap := expvar.NewMap("http-request-map")
|
|
||||||
var (
|
|
||||||
expvarMap1, expvarMap2 expvar.Map
|
|
||||||
expvarInt11, expvarInt12, expvarInt21, expvarInt22 expvar.Int
|
|
||||||
)
|
|
||||||
expvarMap1.Init()
|
|
||||||
expvarMap2.Init()
|
|
||||||
expvarInt11.Set(3)
|
|
||||||
expvarInt12.Set(13)
|
|
||||||
expvarInt21.Set(11)
|
|
||||||
expvarInt22.Set(212)
|
|
||||||
expvarMap1.Set("POST", &expvarInt11)
|
|
||||||
expvarMap1.Set("GET", &expvarInt12)
|
|
||||||
expvarMap2.Set("POST", &expvarInt21)
|
|
||||||
expvarMap2.Set("GET", &expvarInt22)
|
|
||||||
expvarMap.Set("404", &expvarMap1)
|
|
||||||
expvarMap.Set("200", &expvarMap2)
|
|
||||||
// Results in the following expvar map:
|
|
||||||
// "http-request-count": {"200": {"POST": 11, "GET": 212}, "404": {"POST": 3, "GET": 13}}
|
|
||||||
|
|
||||||
// Let's see what the scrape would yield, but exclude the memstats metrics.
|
|
||||||
metricStrings := []string{}
|
|
||||||
metric := dto.Metric{}
|
|
||||||
metricChan := make(chan prometheus.Metric)
|
|
||||||
go func() {
|
|
||||||
expvarCollector.Collect(metricChan)
|
|
||||||
close(metricChan)
|
|
||||||
}()
|
|
||||||
for m := range metricChan {
|
|
||||||
if strings.Index(m.Desc().String(), "expvar_memstats") == -1 {
|
|
||||||
metric.Reset()
|
|
||||||
m.Write(&metric)
|
|
||||||
metricStrings = append(metricStrings, metric.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.Strings(metricStrings)
|
|
||||||
for _, s := range metricStrings {
|
|
||||||
fmt.Println(strings.TrimRight(s, " "))
|
|
||||||
}
|
|
||||||
// Output:
|
|
||||||
// label:<name:"code" value:"200" > label:<name:"method" value:"GET" > untyped:<value:212 >
|
|
||||||
// label:<name:"code" value:"200" > label:<name:"method" value:"POST" > untyped:<value:11 >
|
|
||||||
// label:<name:"code" value:"404" > label:<name:"method" value:"GET" > untyped:<value:13 >
|
|
||||||
// label:<name:"code" value:"404" > label:<name:"method" value:"POST" > untyped:<value:3 >
|
|
||||||
// untyped:<value:42 >
|
|
||||||
}
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
||||||
package prometheus
|
|
||||||
|
|
||||||
// Inline and byte-free variant of hash/fnv's fnv64a.
|
|
||||||
|
|
||||||
const (
|
|
||||||
offset64 = 14695981039346656037
|
|
||||||
prime64 = 1099511628211
|
|
||||||
)
|
|
||||||
|
|
||||||
// hashNew initializies a new fnv64a hash value.
|
|
||||||
func hashNew() uint64 {
|
|
||||||
return offset64
|
|
||||||
}
|
|
||||||
|
|
||||||
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
|
|
||||||
func hashAdd(h uint64, s string) uint64 {
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
h ^= uint64(s[i])
|
|
||||||
h *= prime64
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
|
|
||||||
func hashAddByte(h uint64, b byte) uint64 {
|
|
||||||
h ^= uint64(b)
|
|
||||||
h *= prime64
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
@ -1,145 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
// Gauge is a Metric that represents a single numerical value that can
|
|
||||||
// arbitrarily go up and down.
|
|
||||||
//
|
|
||||||
// A Gauge is typically used for measured values like temperatures or current
|
|
||||||
// memory usage, but also "counts" that can go up and down, like the number of
|
|
||||||
// running goroutines.
|
|
||||||
//
|
|
||||||
// To create Gauge instances, use NewGauge.
|
|
||||||
type Gauge interface {
|
|
||||||
Metric
|
|
||||||
Collector
|
|
||||||
|
|
||||||
// Set sets the Gauge to an arbitrary value.
|
|
||||||
Set(float64)
|
|
||||||
// Inc increments the Gauge by 1. Use Add to increment it by arbitrary
|
|
||||||
// values.
|
|
||||||
Inc()
|
|
||||||
// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
|
|
||||||
// values.
|
|
||||||
Dec()
|
|
||||||
// Add adds the given value to the Gauge. (The value can be negative,
|
|
||||||
// resulting in a decrease of the Gauge.)
|
|
||||||
Add(float64)
|
|
||||||
// Sub subtracts the given value from the Gauge. (The value can be
|
|
||||||
// negative, resulting in an increase of the Gauge.)
|
|
||||||
Sub(float64)
|
|
||||||
|
|
||||||
// SetToCurrentTime sets the Gauge to the current Unix time in seconds.
|
|
||||||
SetToCurrentTime()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GaugeOpts is an alias for Opts. See there for doc comments.
|
|
||||||
type GaugeOpts Opts
|
|
||||||
|
|
||||||
// NewGauge creates a new Gauge based on the provided GaugeOpts.
|
|
||||||
func NewGauge(opts GaugeOpts) Gauge {
|
|
||||||
return newValue(NewDesc(
|
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
||||||
opts.Help,
|
|
||||||
nil,
|
|
||||||
opts.ConstLabels,
|
|
||||||
), GaugeValue, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
|
|
||||||
// Desc, but have different values for their variable labels. This is used if
|
|
||||||
// you want to count the same thing partitioned by various dimensions
|
|
||||||
// (e.g. number of operations queued, partitioned by user and operation
|
|
||||||
// type). Create instances with NewGaugeVec.
|
|
||||||
type GaugeVec struct {
|
|
||||||
*MetricVec
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
|
||||||
// partitioned by the given label names. At least one label name must be
|
|
||||||
// provided.
|
|
||||||
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
|
||||||
desc := NewDesc(
|
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
||||||
opts.Help,
|
|
||||||
labelNames,
|
|
||||||
opts.ConstLabels,
|
|
||||||
)
|
|
||||||
return &GaugeVec{
|
|
||||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
|
||||||
return newValue(desc, GaugeValue, 0, lvs...)
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetricWithLabelValues replaces the method of the same name in
|
|
||||||
// MetricVec. The difference is that this method returns a Gauge and not a
|
|
||||||
// Metric so that no type conversion is required.
|
|
||||||
func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
|
|
||||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
|
||||||
if metric != nil {
|
|
||||||
return metric.(Gauge), err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
|
||||||
// difference is that this method returns a Gauge and not a Metric so that no
|
|
||||||
// type conversion is required.
|
|
||||||
func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
|
|
||||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
|
||||||
if metric != nil {
|
|
||||||
return metric.(Gauge), err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
|
||||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
|
||||||
// error, WithLabelValues allows shortcuts like
|
|
||||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
|
||||||
func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
|
|
||||||
return m.MetricVec.WithLabelValues(lvs...).(Gauge)
|
|
||||||
}
|
|
||||||
|
|
||||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
|
||||||
// returned an error. By not returning an error, With allows shortcuts like
|
|
||||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
|
||||||
func (m *GaugeVec) With(labels Labels) Gauge {
|
|
||||||
return m.MetricVec.With(labels).(Gauge)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GaugeFunc is a Gauge whose value is determined at collect time by calling a
|
|
||||||
// provided function.
|
|
||||||
//
|
|
||||||
// To create GaugeFunc instances, use NewGaugeFunc.
|
|
||||||
type GaugeFunc interface {
|
|
||||||
Metric
|
|
||||||
Collector
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
|
|
||||||
// value reported is determined by calling the given function from within the
|
|
||||||
// Write method. Take into account that metric collection may happen
|
|
||||||
// concurrently. If that results in concurrent calls to Write, like in the case
|
|
||||||
// where a GaugeFunc is directly registered with Prometheus, the provided
|
|
||||||
// function must be concurrency-safe.
|
|
||||||
func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
|
|
||||||
return newValueFunc(NewDesc(
|
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
||||||
opts.Help,
|
|
||||||
nil,
|
|
||||||
opts.ConstLabels,
|
|
||||||
), GaugeValue, function)
|
|
||||||
}
|
|
||||||
|
|
@ -1,202 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"math/rand"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
"testing/quick"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func listenGaugeStream(vals, result chan float64, done chan struct{}) {
|
|
||||||
var sum float64
|
|
||||||
outer:
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-done:
|
|
||||||
close(vals)
|
|
||||||
for v := range vals {
|
|
||||||
sum += v
|
|
||||||
}
|
|
||||||
break outer
|
|
||||||
case v := <-vals:
|
|
||||||
sum += v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
result <- sum
|
|
||||||
close(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGaugeConcurrency(t *testing.T) {
|
|
||||||
it := func(n uint32) bool {
|
|
||||||
mutations := int(n % 10000)
|
|
||||||
concLevel := int(n%15 + 1)
|
|
||||||
|
|
||||||
var start, end sync.WaitGroup
|
|
||||||
start.Add(1)
|
|
||||||
end.Add(concLevel)
|
|
||||||
|
|
||||||
sStream := make(chan float64, mutations*concLevel)
|
|
||||||
result := make(chan float64)
|
|
||||||
done := make(chan struct{})
|
|
||||||
|
|
||||||
go listenGaugeStream(sStream, result, done)
|
|
||||||
go func() {
|
|
||||||
end.Wait()
|
|
||||||
close(done)
|
|
||||||
}()
|
|
||||||
|
|
||||||
gge := NewGauge(GaugeOpts{
|
|
||||||
Name: "test_gauge",
|
|
||||||
Help: "no help can be found here",
|
|
||||||
})
|
|
||||||
for i := 0; i < concLevel; i++ {
|
|
||||||
vals := make([]float64, mutations)
|
|
||||||
for j := 0; j < mutations; j++ {
|
|
||||||
vals[j] = rand.Float64() - 0.5
|
|
||||||
}
|
|
||||||
|
|
||||||
go func(vals []float64) {
|
|
||||||
start.Wait()
|
|
||||||
for _, v := range vals {
|
|
||||||
sStream <- v
|
|
||||||
gge.Add(v)
|
|
||||||
}
|
|
||||||
end.Done()
|
|
||||||
}(vals)
|
|
||||||
}
|
|
||||||
start.Done()
|
|
||||||
|
|
||||||
if expected, got := <-result, math.Float64frombits(gge.(*value).valBits); math.Abs(expected-got) > 0.000001 {
|
|
||||||
t.Fatalf("expected approx. %f, got %f", expected, got)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := quick.Check(it, nil); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGaugeVecConcurrency(t *testing.T) {
|
|
||||||
it := func(n uint32) bool {
|
|
||||||
mutations := int(n % 10000)
|
|
||||||
concLevel := int(n%15 + 1)
|
|
||||||
vecLength := int(n%5 + 1)
|
|
||||||
|
|
||||||
var start, end sync.WaitGroup
|
|
||||||
start.Add(1)
|
|
||||||
end.Add(concLevel)
|
|
||||||
|
|
||||||
sStreams := make([]chan float64, vecLength)
|
|
||||||
results := make([]chan float64, vecLength)
|
|
||||||
done := make(chan struct{})
|
|
||||||
|
|
||||||
for i := 0; i < vecLength; i++ {
|
|
||||||
sStreams[i] = make(chan float64, mutations*concLevel)
|
|
||||||
results[i] = make(chan float64)
|
|
||||||
go listenGaugeStream(sStreams[i], results[i], done)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
end.Wait()
|
|
||||||
close(done)
|
|
||||||
}()
|
|
||||||
|
|
||||||
gge := NewGaugeVec(
|
|
||||||
GaugeOpts{
|
|
||||||
Name: "test_gauge",
|
|
||||||
Help: "no help can be found here",
|
|
||||||
},
|
|
||||||
[]string{"label"},
|
|
||||||
)
|
|
||||||
for i := 0; i < concLevel; i++ {
|
|
||||||
vals := make([]float64, mutations)
|
|
||||||
pick := make([]int, mutations)
|
|
||||||
for j := 0; j < mutations; j++ {
|
|
||||||
vals[j] = rand.Float64() - 0.5
|
|
||||||
pick[j] = rand.Intn(vecLength)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func(vals []float64) {
|
|
||||||
start.Wait()
|
|
||||||
for i, v := range vals {
|
|
||||||
sStreams[pick[i]] <- v
|
|
||||||
gge.WithLabelValues(string('A' + pick[i])).Add(v)
|
|
||||||
}
|
|
||||||
end.Done()
|
|
||||||
}(vals)
|
|
||||||
}
|
|
||||||
start.Done()
|
|
||||||
|
|
||||||
for i := range sStreams {
|
|
||||||
if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*value).valBits); math.Abs(expected-got) > 0.000001 {
|
|
||||||
t.Fatalf("expected approx. %f, got %f", expected, got)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := quick.Check(it, nil); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGaugeFunc(t *testing.T) {
|
|
||||||
gf := NewGaugeFunc(
|
|
||||||
GaugeOpts{
|
|
||||||
Name: "test_name",
|
|
||||||
Help: "test help",
|
|
||||||
ConstLabels: Labels{"a": "1", "b": "2"},
|
|
||||||
},
|
|
||||||
func() float64 { return 3.1415 },
|
|
||||||
)
|
|
||||||
|
|
||||||
if expected, got := `Desc{fqName: "test_name", help: "test help", constLabels: {a="1",b="2"}, variableLabels: []}`, gf.Desc().String(); expected != got {
|
|
||||||
t.Errorf("expected %q, got %q", expected, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &dto.Metric{}
|
|
||||||
gf.Write(m)
|
|
||||||
|
|
||||||
if expected, got := `label:<name:"a" value:"1" > label:<name:"b" value:"2" > gauge:<value:3.1415 > `, m.String(); expected != got {
|
|
||||||
t.Errorf("expected %q, got %q", expected, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGaugeSetCurrentTime(t *testing.T) {
|
|
||||||
g := NewGauge(GaugeOpts{
|
|
||||||
Name: "test_name",
|
|
||||||
Help: "test help",
|
|
||||||
})
|
|
||||||
g.SetToCurrentTime()
|
|
||||||
unixTime := float64(time.Now().Unix())
|
|
||||||
|
|
||||||
m := &dto.Metric{}
|
|
||||||
g.Write(m)
|
|
||||||
|
|
||||||
delta := unixTime - m.GetGauge().GetValue()
|
|
||||||
// This is just a smoke test to make sure SetToCurrentTime is not
|
|
||||||
// totally off. Tests with current time involved are hard...
|
|
||||||
if math.Abs(delta) > 5 {
|
|
||||||
t.Errorf("Gauge set to current time deviates from current time by more than 5s, delta is %f seconds", delta)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,263 +0,0 @@
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
"runtime/debug"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type goCollector struct {
|
|
||||||
goroutines Gauge
|
|
||||||
gcDesc *Desc
|
|
||||||
|
|
||||||
// metrics to describe and collect
|
|
||||||
metrics memStatsMetrics
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGoCollector returns a collector which exports metrics about the current
|
|
||||||
// go process.
|
|
||||||
func NewGoCollector() Collector {
|
|
||||||
return &goCollector{
|
|
||||||
goroutines: NewGauge(GaugeOpts{
|
|
||||||
Namespace: "go",
|
|
||||||
Name: "goroutines",
|
|
||||||
Help: "Number of goroutines that currently exist.",
|
|
||||||
}),
|
|
||||||
gcDesc: NewDesc(
|
|
||||||
"go_gc_duration_seconds",
|
|
||||||
"A summary of the GC invocation durations.",
|
|
||||||
nil, nil),
|
|
||||||
metrics: memStatsMetrics{
|
|
||||||
{
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("alloc_bytes"),
|
|
||||||
"Number of bytes allocated and still in use.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("alloc_bytes_total"),
|
|
||||||
"Total number of bytes allocated, even if freed.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
|
|
||||||
valType: CounterValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("sys_bytes"),
|
|
||||||
"Number of bytes obtained from system.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("lookups_total"),
|
|
||||||
"Total number of pointer lookups.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
|
|
||||||
valType: CounterValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("mallocs_total"),
|
|
||||||
"Total number of mallocs.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
|
|
||||||
valType: CounterValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("frees_total"),
|
|
||||||
"Total number of frees.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
|
|
||||||
valType: CounterValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("heap_alloc_bytes"),
|
|
||||||
"Number of heap bytes allocated and still in use.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("heap_sys_bytes"),
|
|
||||||
"Number of heap bytes obtained from system.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("heap_idle_bytes"),
|
|
||||||
"Number of heap bytes waiting to be used.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("heap_inuse_bytes"),
|
|
||||||
"Number of heap bytes that are in use.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("heap_released_bytes"),
|
|
||||||
"Number of heap bytes released to OS.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("heap_objects"),
|
|
||||||
"Number of allocated objects.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("stack_inuse_bytes"),
|
|
||||||
"Number of bytes in use by the stack allocator.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("stack_sys_bytes"),
|
|
||||||
"Number of bytes obtained from system for stack allocator.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("mspan_inuse_bytes"),
|
|
||||||
"Number of bytes in use by mspan structures.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("mspan_sys_bytes"),
|
|
||||||
"Number of bytes used for mspan structures obtained from system.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("mcache_inuse_bytes"),
|
|
||||||
"Number of bytes in use by mcache structures.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("mcache_sys_bytes"),
|
|
||||||
"Number of bytes used for mcache structures obtained from system.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("buck_hash_sys_bytes"),
|
|
||||||
"Number of bytes used by the profiling bucket hash table.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("gc_sys_bytes"),
|
|
||||||
"Number of bytes used for garbage collection system metadata.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("other_sys_bytes"),
|
|
||||||
"Number of bytes used for other system allocations.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("next_gc_bytes"),
|
|
||||||
"Number of heap bytes when next garbage collection will take place.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("last_gc_time_seconds"),
|
|
||||||
"Number of seconds since 1970 of last garbage collection.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
|
|
||||||
valType: GaugeValue,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func memstatNamespace(s string) string {
|
|
||||||
return fmt.Sprintf("go_memstats_%s", s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describe returns all descriptions of the collector.
|
|
||||||
func (c *goCollector) Describe(ch chan<- *Desc) {
|
|
||||||
ch <- c.goroutines.Desc()
|
|
||||||
ch <- c.gcDesc
|
|
||||||
|
|
||||||
for _, i := range c.metrics {
|
|
||||||
ch <- i.desc
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect returns the current state of all metrics of the collector.
|
|
||||||
func (c *goCollector) Collect(ch chan<- Metric) {
|
|
||||||
c.goroutines.Set(float64(runtime.NumGoroutine()))
|
|
||||||
ch <- c.goroutines
|
|
||||||
|
|
||||||
var stats debug.GCStats
|
|
||||||
stats.PauseQuantiles = make([]time.Duration, 5)
|
|
||||||
debug.ReadGCStats(&stats)
|
|
||||||
|
|
||||||
quantiles := make(map[float64]float64)
|
|
||||||
for idx, pq := range stats.PauseQuantiles[1:] {
|
|
||||||
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
|
|
||||||
}
|
|
||||||
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
|
|
||||||
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
|
|
||||||
|
|
||||||
ms := &runtime.MemStats{}
|
|
||||||
runtime.ReadMemStats(ms)
|
|
||||||
for _, i := range c.metrics {
|
|
||||||
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// memStatsMetrics provide description, value, and value type for memstat metrics.
|
|
||||||
type memStatsMetrics []struct {
|
|
||||||
desc *Desc
|
|
||||||
eval func(*runtime.MemStats) float64
|
|
||||||
valType ValueType
|
|
||||||
}
|
|
||||||
|
|
@ -1,123 +0,0 @@
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGoCollector(t *testing.T) {
|
|
||||||
var (
|
|
||||||
c = NewGoCollector()
|
|
||||||
ch = make(chan Metric)
|
|
||||||
waitc = make(chan struct{})
|
|
||||||
closec = make(chan struct{})
|
|
||||||
old = -1
|
|
||||||
)
|
|
||||||
defer close(closec)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
c.Collect(ch)
|
|
||||||
go func(c <-chan struct{}) {
|
|
||||||
<-c
|
|
||||||
}(closec)
|
|
||||||
<-waitc
|
|
||||||
c.Collect(ch)
|
|
||||||
}()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case metric := <-ch:
|
|
||||||
switch m := metric.(type) {
|
|
||||||
// Attention, this also catches Counter...
|
|
||||||
case Gauge:
|
|
||||||
pb := &dto.Metric{}
|
|
||||||
m.Write(pb)
|
|
||||||
if pb.GetGauge() == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if old == -1 {
|
|
||||||
old = int(pb.GetGauge().GetValue())
|
|
||||||
close(waitc)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 {
|
|
||||||
// TODO: This is flaky in highly concurrent situations.
|
|
||||||
t.Errorf("want 1 new goroutine, got %d", diff)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GoCollector performs two sends per call.
|
|
||||||
// On line 27 we need to receive the second send
|
|
||||||
// to shut down cleanly.
|
|
||||||
<-ch
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case <-time.After(1 * time.Second):
|
|
||||||
t.Fatalf("expected collect timed out")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGCCollector(t *testing.T) {
|
|
||||||
var (
|
|
||||||
c = NewGoCollector()
|
|
||||||
ch = make(chan Metric)
|
|
||||||
waitc = make(chan struct{})
|
|
||||||
closec = make(chan struct{})
|
|
||||||
oldGC uint64
|
|
||||||
oldPause float64
|
|
||||||
)
|
|
||||||
defer close(closec)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
c.Collect(ch)
|
|
||||||
// force GC
|
|
||||||
runtime.GC()
|
|
||||||
<-waitc
|
|
||||||
c.Collect(ch)
|
|
||||||
}()
|
|
||||||
|
|
||||||
first := true
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case metric := <-ch:
|
|
||||||
switch m := metric.(type) {
|
|
||||||
case *constSummary, *value:
|
|
||||||
pb := &dto.Metric{}
|
|
||||||
m.Write(pb)
|
|
||||||
if pb.GetSummary() == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(pb.GetSummary().Quantile) != 5 {
|
|
||||||
t.Errorf("expected 4 buckets, got %d", len(pb.GetSummary().Quantile))
|
|
||||||
}
|
|
||||||
for idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} {
|
|
||||||
if *pb.GetSummary().Quantile[idx].Quantile != want {
|
|
||||||
t.Errorf("bucket #%d is off, got %f, want %f", idx, *pb.GetSummary().Quantile[idx].Quantile, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if first {
|
|
||||||
first = false
|
|
||||||
oldGC = *pb.GetSummary().SampleCount
|
|
||||||
oldPause = *pb.GetSummary().SampleSum
|
|
||||||
close(waitc)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 {
|
|
||||||
t.Errorf("want 1 new garbage collection run, got %d", diff)
|
|
||||||
}
|
|
||||||
if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 {
|
|
||||||
t.Errorf("want moar pause, got %f", diff)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case <-time.After(1 * time.Second):
|
|
||||||
t.Fatalf("expected collect timed out")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,280 +0,0 @@
|
||||||
// Copyright 2016 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package graphite provides a bridge to push Prometheus metrics to a Graphite
|
|
||||||
// server.
|
|
||||||
package graphite
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"sort"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/prometheus/common/expfmt"
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultInterval = 15 * time.Second
|
|
||||||
millisecondsPerSecond = 1000
|
|
||||||
)
|
|
||||||
|
|
||||||
// HandlerErrorHandling defines how a Handler serving metrics will handle
|
|
||||||
// errors.
|
|
||||||
type HandlerErrorHandling int
|
|
||||||
|
|
||||||
// These constants cause handlers serving metrics to behave as described if
|
|
||||||
// errors are encountered.
|
|
||||||
const (
|
|
||||||
// Ignore errors and try to push as many metrics to Graphite as possible.
|
|
||||||
ContinueOnError HandlerErrorHandling = iota
|
|
||||||
|
|
||||||
// Abort the push to Graphite upon the first error encountered.
|
|
||||||
AbortOnError
|
|
||||||
)
|
|
||||||
|
|
||||||
// Config defines the Graphite bridge config.
|
|
||||||
type Config struct {
|
|
||||||
// The url to push data to. Required.
|
|
||||||
URL string
|
|
||||||
|
|
||||||
// The prefix for the pushed Graphite metrics. Defaults to empty string.
|
|
||||||
Prefix string
|
|
||||||
|
|
||||||
// The interval to use for pushing data to Graphite. Defaults to 15 seconds.
|
|
||||||
Interval time.Duration
|
|
||||||
|
|
||||||
// The timeout for pushing metrics to Graphite. Defaults to 15 seconds.
|
|
||||||
Timeout time.Duration
|
|
||||||
|
|
||||||
// The Gatherer to use for metrics. Defaults to prometheus.DefaultGatherer.
|
|
||||||
Gatherer prometheus.Gatherer
|
|
||||||
|
|
||||||
// The logger that messages are written to. Defaults to no logging.
|
|
||||||
Logger Logger
|
|
||||||
|
|
||||||
// ErrorHandling defines how errors are handled. Note that errors are
|
|
||||||
// logged regardless of the configured ErrorHandling provided Logger
|
|
||||||
// is not nil.
|
|
||||||
ErrorHandling HandlerErrorHandling
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bridge pushes metrics to the configured Graphite server.
|
|
||||||
type Bridge struct {
|
|
||||||
url string
|
|
||||||
prefix string
|
|
||||||
interval time.Duration
|
|
||||||
timeout time.Duration
|
|
||||||
|
|
||||||
errorHandling HandlerErrorHandling
|
|
||||||
logger Logger
|
|
||||||
|
|
||||||
g prometheus.Gatherer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Logger is the minimal interface Bridge needs for logging. Note that
|
|
||||||
// log.Logger from the standard library implements this interface, and it is
|
|
||||||
// easy to implement by custom loggers, if they don't do so already anyway.
|
|
||||||
type Logger interface {
|
|
||||||
Println(v ...interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBridge returns a pointer to a new Bridge struct.
|
|
||||||
func NewBridge(c *Config) (*Bridge, error) {
|
|
||||||
b := &Bridge{}
|
|
||||||
|
|
||||||
if c.URL == "" {
|
|
||||||
return nil, errors.New("missing URL")
|
|
||||||
}
|
|
||||||
b.url = c.URL
|
|
||||||
|
|
||||||
if c.Gatherer == nil {
|
|
||||||
b.g = prometheus.DefaultGatherer
|
|
||||||
} else {
|
|
||||||
b.g = c.Gatherer
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Logger != nil {
|
|
||||||
b.logger = c.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Prefix != "" {
|
|
||||||
b.prefix = c.Prefix
|
|
||||||
}
|
|
||||||
|
|
||||||
var z time.Duration
|
|
||||||
if c.Interval == z {
|
|
||||||
b.interval = defaultInterval
|
|
||||||
} else {
|
|
||||||
b.interval = c.Interval
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Timeout == z {
|
|
||||||
b.timeout = defaultInterval
|
|
||||||
} else {
|
|
||||||
b.timeout = c.Timeout
|
|
||||||
}
|
|
||||||
|
|
||||||
b.errorHandling = c.ErrorHandling
|
|
||||||
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run starts the event loop that pushes Prometheus metrics to Graphite at the
|
|
||||||
// configured interval.
|
|
||||||
func (b *Bridge) Run(ctx context.Context) {
|
|
||||||
ticker := time.NewTicker(b.interval)
|
|
||||||
defer ticker.Stop()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
if err := b.Push(); err != nil && b.logger != nil {
|
|
||||||
b.logger.Println("error pushing to Graphite:", err)
|
|
||||||
}
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Push pushes Prometheus metrics to the configured Graphite server.
|
|
||||||
func (b *Bridge) Push() error {
|
|
||||||
mfs, err := b.g.Gather()
|
|
||||||
if err != nil || len(mfs) == 0 {
|
|
||||||
switch b.errorHandling {
|
|
||||||
case AbortOnError:
|
|
||||||
return err
|
|
||||||
case ContinueOnError:
|
|
||||||
if b.logger != nil {
|
|
||||||
b.logger.Println("continue on error:", err)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic("unrecognized error handling value")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
conn, err := net.DialTimeout("tcp", b.url, b.timeout)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
return writeMetrics(conn, mfs, b.prefix, model.Now())
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeMetrics(w io.Writer, mfs []*dto.MetricFamily, prefix string, now model.Time) error {
|
|
||||||
vec, err := expfmt.ExtractSamples(&expfmt.DecodeOptions{
|
|
||||||
Timestamp: now,
|
|
||||||
}, mfs...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := bufio.NewWriter(w)
|
|
||||||
for _, s := range vec {
|
|
||||||
if err := writeSanitized(buf, prefix); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := buf.WriteByte('.'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := writeMetric(buf, s.Metric); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := fmt.Fprintf(buf, " %g %d\n", s.Value, int64(s.Timestamp)/millisecondsPerSecond); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := buf.Flush(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeMetric(buf *bufio.Writer, m model.Metric) error {
|
|
||||||
metricName, hasName := m[model.MetricNameLabel]
|
|
||||||
numLabels := len(m) - 1
|
|
||||||
if !hasName {
|
|
||||||
numLabels = len(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
labelStrings := make([]string, 0, numLabels)
|
|
||||||
for label, value := range m {
|
|
||||||
if label != model.MetricNameLabel {
|
|
||||||
labelStrings = append(labelStrings, fmt.Sprintf("%s %s", string(label), string(value)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
switch numLabels {
|
|
||||||
case 0:
|
|
||||||
if hasName {
|
|
||||||
return writeSanitized(buf, string(metricName))
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
sort.Strings(labelStrings)
|
|
||||||
if err = writeSanitized(buf, string(metricName)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, s := range labelStrings {
|
|
||||||
if err = buf.WriteByte('.'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = writeSanitized(buf, s); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeSanitized(buf *bufio.Writer, s string) error {
|
|
||||||
prevUnderscore := false
|
|
||||||
|
|
||||||
for _, c := range s {
|
|
||||||
c = replaceInvalidRune(c)
|
|
||||||
if c == '_' {
|
|
||||||
if prevUnderscore {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
prevUnderscore = true
|
|
||||||
} else {
|
|
||||||
prevUnderscore = false
|
|
||||||
}
|
|
||||||
if _, err := buf.WriteRune(c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func replaceInvalidRune(c rune) rune {
|
|
||||||
if c == ' ' {
|
|
||||||
return '.'
|
|
||||||
}
|
|
||||||
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == ':' || (c >= '0' && c <= '9')) {
|
|
||||||
return '_'
|
|
||||||
}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
@ -1,309 +0,0 @@
|
||||||
package graphite
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestSanitize(t *testing.T) {
|
|
||||||
testCases := []struct {
|
|
||||||
in, out string
|
|
||||||
}{
|
|
||||||
{in: "hello", out: "hello"},
|
|
||||||
{in: "hE/l1o", out: "hE_l1o"},
|
|
||||||
{in: "he,*ll(.o", out: "he_ll_o"},
|
|
||||||
{in: "hello_there%^&", out: "hello_there_"},
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
w := bufio.NewWriter(&buf)
|
|
||||||
|
|
||||||
for i, tc := range testCases {
|
|
||||||
if err := writeSanitized(w, tc.in); err != nil {
|
|
||||||
t.Fatalf("write failed: %v", err)
|
|
||||||
}
|
|
||||||
if err := w.Flush(); err != nil {
|
|
||||||
t.Fatalf("flush failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if want, got := tc.out, buf.String(); want != got {
|
|
||||||
t.Fatalf("test case index %d: got sanitized string %s, want %s", i, got, want)
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.Reset()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWriteSummary(t *testing.T) {
|
|
||||||
sumVec := prometheus.NewSummaryVec(
|
|
||||||
prometheus.SummaryOpts{
|
|
||||||
Name: "name",
|
|
||||||
Help: "docstring",
|
|
||||||
ConstLabels: prometheus.Labels{"constname": "constvalue"},
|
|
||||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
|
||||||
},
|
|
||||||
[]string{"labelname"},
|
|
||||||
)
|
|
||||||
|
|
||||||
sumVec.WithLabelValues("val1").Observe(float64(10))
|
|
||||||
sumVec.WithLabelValues("val1").Observe(float64(20))
|
|
||||||
sumVec.WithLabelValues("val1").Observe(float64(30))
|
|
||||||
sumVec.WithLabelValues("val2").Observe(float64(20))
|
|
||||||
sumVec.WithLabelValues("val2").Observe(float64(30))
|
|
||||||
sumVec.WithLabelValues("val2").Observe(float64(40))
|
|
||||||
|
|
||||||
reg := prometheus.NewRegistry()
|
|
||||||
reg.MustRegister(sumVec)
|
|
||||||
|
|
||||||
mfs, err := reg.Gather()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
now := model.Time(1477043083)
|
|
||||||
var buf bytes.Buffer
|
|
||||||
err = writeMetrics(&buf, mfs, "prefix", now)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
want := `prefix.name.constname.constvalue.labelname.val1.quantile.0_5 20 1477043
|
|
||||||
prefix.name.constname.constvalue.labelname.val1.quantile.0_9 30 1477043
|
|
||||||
prefix.name.constname.constvalue.labelname.val1.quantile.0_99 30 1477043
|
|
||||||
prefix.name_sum.constname.constvalue.labelname.val1 60 1477043
|
|
||||||
prefix.name_count.constname.constvalue.labelname.val1 3 1477043
|
|
||||||
prefix.name.constname.constvalue.labelname.val2.quantile.0_5 30 1477043
|
|
||||||
prefix.name.constname.constvalue.labelname.val2.quantile.0_9 40 1477043
|
|
||||||
prefix.name.constname.constvalue.labelname.val2.quantile.0_99 40 1477043
|
|
||||||
prefix.name_sum.constname.constvalue.labelname.val2 90 1477043
|
|
||||||
prefix.name_count.constname.constvalue.labelname.val2 3 1477043
|
|
||||||
`
|
|
||||||
|
|
||||||
if got := buf.String(); want != got {
|
|
||||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWriteHistogram(t *testing.T) {
|
|
||||||
histVec := prometheus.NewHistogramVec(
|
|
||||||
prometheus.HistogramOpts{
|
|
||||||
Name: "name",
|
|
||||||
Help: "docstring",
|
|
||||||
ConstLabels: prometheus.Labels{"constname": "constvalue"},
|
|
||||||
Buckets: []float64{0.01, 0.02, 0.05, 0.1},
|
|
||||||
},
|
|
||||||
[]string{"labelname"},
|
|
||||||
)
|
|
||||||
|
|
||||||
histVec.WithLabelValues("val1").Observe(float64(10))
|
|
||||||
histVec.WithLabelValues("val1").Observe(float64(20))
|
|
||||||
histVec.WithLabelValues("val1").Observe(float64(30))
|
|
||||||
histVec.WithLabelValues("val2").Observe(float64(20))
|
|
||||||
histVec.WithLabelValues("val2").Observe(float64(30))
|
|
||||||
histVec.WithLabelValues("val2").Observe(float64(40))
|
|
||||||
|
|
||||||
reg := prometheus.NewRegistry()
|
|
||||||
reg.MustRegister(histVec)
|
|
||||||
|
|
||||||
mfs, err := reg.Gather()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
now := model.Time(1477043083)
|
|
||||||
var buf bytes.Buffer
|
|
||||||
err = writeMetrics(&buf, mfs, "prefix", now)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
want := `prefix.name_bucket.constname.constvalue.labelname.val1.le.0_01 0 1477043
|
|
||||||
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_02 0 1477043
|
|
||||||
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_05 0 1477043
|
|
||||||
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_1 0 1477043
|
|
||||||
prefix.name_sum.constname.constvalue.labelname.val1 60 1477043
|
|
||||||
prefix.name_count.constname.constvalue.labelname.val1 3 1477043
|
|
||||||
prefix.name_bucket.constname.constvalue.labelname.val1.le._Inf 3 1477043
|
|
||||||
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_01 0 1477043
|
|
||||||
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_02 0 1477043
|
|
||||||
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_05 0 1477043
|
|
||||||
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_1 0 1477043
|
|
||||||
prefix.name_sum.constname.constvalue.labelname.val2 90 1477043
|
|
||||||
prefix.name_count.constname.constvalue.labelname.val2 3 1477043
|
|
||||||
prefix.name_bucket.constname.constvalue.labelname.val2.le._Inf 3 1477043
|
|
||||||
`
|
|
||||||
if got := buf.String(); want != got {
|
|
||||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestToReader(t *testing.T) {
|
|
||||||
cntVec := prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "name",
|
|
||||||
Help: "docstring",
|
|
||||||
ConstLabels: prometheus.Labels{"constname": "constvalue"},
|
|
||||||
},
|
|
||||||
[]string{"labelname"},
|
|
||||||
)
|
|
||||||
cntVec.WithLabelValues("val1").Inc()
|
|
||||||
cntVec.WithLabelValues("val2").Inc()
|
|
||||||
|
|
||||||
reg := prometheus.NewRegistry()
|
|
||||||
reg.MustRegister(cntVec)
|
|
||||||
|
|
||||||
want := `prefix.name.constname.constvalue.labelname.val1 1 1477043
|
|
||||||
prefix.name.constname.constvalue.labelname.val2 1 1477043
|
|
||||||
`
|
|
||||||
mfs, err := reg.Gather()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
now := model.Time(1477043083)
|
|
||||||
var buf bytes.Buffer
|
|
||||||
err = writeMetrics(&buf, mfs, "prefix", now)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if got := buf.String(); want != got {
|
|
||||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPush(t *testing.T) {
|
|
||||||
reg := prometheus.NewRegistry()
|
|
||||||
cntVec := prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "name",
|
|
||||||
Help: "docstring",
|
|
||||||
ConstLabels: prometheus.Labels{"constname": "constvalue"},
|
|
||||||
},
|
|
||||||
[]string{"labelname"},
|
|
||||||
)
|
|
||||||
cntVec.WithLabelValues("val1").Inc()
|
|
||||||
cntVec.WithLabelValues("val2").Inc()
|
|
||||||
reg.MustRegister(cntVec)
|
|
||||||
|
|
||||||
host := "localhost"
|
|
||||||
port := ":56789"
|
|
||||||
b, err := NewBridge(&Config{
|
|
||||||
URL: host + port,
|
|
||||||
Gatherer: reg,
|
|
||||||
Prefix: "prefix",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error creating bridge: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
nmg, err := newMockGraphite(port)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error creating mock graphite: %v", err)
|
|
||||||
}
|
|
||||||
defer nmg.Close()
|
|
||||||
|
|
||||||
err = b.Push()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error pushing: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
wants := []string{
|
|
||||||
"prefix.name.constname.constvalue.labelname.val1 1",
|
|
||||||
"prefix.name.constname.constvalue.labelname.val2 1",
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case got := <-nmg.readc:
|
|
||||||
for _, want := range wants {
|
|
||||||
matched, err := regexp.MatchString(want, got)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error pushing: %v", err)
|
|
||||||
}
|
|
||||||
if !matched {
|
|
||||||
t.Fatalf("missing metric:\nno match for %s received by server:\n%s", want, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
case err := <-nmg.errc:
|
|
||||||
t.Fatalf("error reading push: %v", err)
|
|
||||||
case <-time.After(50 * time.Millisecond):
|
|
||||||
t.Fatalf("no result from graphite server")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMockGraphite(port string) (*mockGraphite, error) {
|
|
||||||
readc := make(chan string)
|
|
||||||
errc := make(chan error)
|
|
||||||
ln, err := net.Listen("tcp", port)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
conn, err := ln.Accept()
|
|
||||||
if err != nil {
|
|
||||||
errc <- err
|
|
||||||
}
|
|
||||||
var b bytes.Buffer
|
|
||||||
io.Copy(&b, conn)
|
|
||||||
readc <- b.String()
|
|
||||||
}()
|
|
||||||
|
|
||||||
return &mockGraphite{
|
|
||||||
readc: readc,
|
|
||||||
errc: errc,
|
|
||||||
Listener: ln,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type mockGraphite struct {
|
|
||||||
readc chan string
|
|
||||||
errc chan error
|
|
||||||
|
|
||||||
net.Listener
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleBridge() {
|
|
||||||
b, err := NewBridge(&Config{
|
|
||||||
URL: "graphite.example.org:3099",
|
|
||||||
Gatherer: prometheus.DefaultGatherer,
|
|
||||||
Prefix: "prefix",
|
|
||||||
Interval: 15 * time.Second,
|
|
||||||
Timeout: 10 * time.Second,
|
|
||||||
ErrorHandling: AbortOnError,
|
|
||||||
Logger: log.New(os.Stdout, "graphite bridge: ", log.Lshortfile),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
// Start something in a goroutine that uses metrics.
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Push initial metrics to Graphite. Fail fast if the push fails.
|
|
||||||
if err := b.Push(); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a Context to control stopping the Run() loop that pushes
|
|
||||||
// metrics to Graphite.
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// Start pushing metrics to Graphite in the Run() loop.
|
|
||||||
b.Run(ctx)
|
|
||||||
}
|
|
||||||
|
|
@ -1,444 +0,0 @@
|
||||||
// Copyright 2015 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Histogram counts individual observations from an event or sample stream in
|
|
||||||
// configurable buckets. Similar to a summary, it also provides a sum of
|
|
||||||
// observations and an observation count.
|
|
||||||
//
|
|
||||||
// On the Prometheus server, quantiles can be calculated from a Histogram using
|
|
||||||
// the histogram_quantile function in the query language.
|
|
||||||
//
|
|
||||||
// Note that Histograms, in contrast to Summaries, can be aggregated with the
|
|
||||||
// Prometheus query language (see the documentation for detailed
|
|
||||||
// procedures). However, Histograms require the user to pre-define suitable
|
|
||||||
// buckets, and they are in general less accurate. The Observe method of a
|
|
||||||
// Histogram has a very low performance overhead in comparison with the Observe
|
|
||||||
// method of a Summary.
|
|
||||||
//
|
|
||||||
// To create Histogram instances, use NewHistogram.
|
|
||||||
type Histogram interface {
|
|
||||||
Metric
|
|
||||||
Collector
|
|
||||||
|
|
||||||
// Observe adds a single observation to the histogram.
|
|
||||||
Observe(float64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// bucketLabel is used for the label that defines the upper bound of a
|
|
||||||
// bucket of a histogram ("le" -> "less or equal").
|
|
||||||
const bucketLabel = "le"
|
|
||||||
|
|
||||||
// DefBuckets are the default Histogram buckets. The default buckets are
|
|
||||||
// tailored to broadly measure the response time (in seconds) of a network
|
|
||||||
// service. Most likely, however, you will be required to define buckets
|
|
||||||
// customized to your use case.
|
|
||||||
var (
|
|
||||||
DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
|
|
||||||
|
|
||||||
errBucketLabelNotAllowed = fmt.Errorf(
|
|
||||||
"%q is not allowed as label name in histograms", bucketLabel,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
|
|
||||||
// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
|
|
||||||
// and not included in the returned slice. The returned slice is meant to be
|
|
||||||
// used for the Buckets field of HistogramOpts.
|
|
||||||
//
|
|
||||||
// The function panics if 'count' is zero or negative.
|
|
||||||
func LinearBuckets(start, width float64, count int) []float64 {
|
|
||||||
if count < 1 {
|
|
||||||
panic("LinearBuckets needs a positive count")
|
|
||||||
}
|
|
||||||
buckets := make([]float64, count)
|
|
||||||
for i := range buckets {
|
|
||||||
buckets[i] = start
|
|
||||||
start += width
|
|
||||||
}
|
|
||||||
return buckets
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
|
|
||||||
// upper bound of 'start' and each following bucket's upper bound is 'factor'
|
|
||||||
// times the previous bucket's upper bound. The final +Inf bucket is not counted
|
|
||||||
// and not included in the returned slice. The returned slice is meant to be
|
|
||||||
// used for the Buckets field of HistogramOpts.
|
|
||||||
//
|
|
||||||
// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
|
|
||||||
// or if 'factor' is less than or equal 1.
|
|
||||||
func ExponentialBuckets(start, factor float64, count int) []float64 {
|
|
||||||
if count < 1 {
|
|
||||||
panic("ExponentialBuckets needs a positive count")
|
|
||||||
}
|
|
||||||
if start <= 0 {
|
|
||||||
panic("ExponentialBuckets needs a positive start value")
|
|
||||||
}
|
|
||||||
if factor <= 1 {
|
|
||||||
panic("ExponentialBuckets needs a factor greater than 1")
|
|
||||||
}
|
|
||||||
buckets := make([]float64, count)
|
|
||||||
for i := range buckets {
|
|
||||||
buckets[i] = start
|
|
||||||
start *= factor
|
|
||||||
}
|
|
||||||
return buckets
|
|
||||||
}
|
|
||||||
|
|
||||||
// HistogramOpts bundles the options for creating a Histogram metric. It is
|
|
||||||
// mandatory to set Name and Help to a non-empty string. All other fields are
|
|
||||||
// optional and can safely be left at their zero value.
|
|
||||||
type HistogramOpts struct {
|
|
||||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
|
||||||
// name of the Histogram (created by joining these components with
|
|
||||||
// "_"). Only Name is mandatory, the others merely help structuring the
|
|
||||||
// name. Note that the fully-qualified name of the Histogram must be a
|
|
||||||
// valid Prometheus metric name.
|
|
||||||
Namespace string
|
|
||||||
Subsystem string
|
|
||||||
Name string
|
|
||||||
|
|
||||||
// Help provides information about this Histogram. Mandatory!
|
|
||||||
//
|
|
||||||
// Metrics with the same fully-qualified name must have the same Help
|
|
||||||
// string.
|
|
||||||
Help string
|
|
||||||
|
|
||||||
// ConstLabels are used to attach fixed labels to this
|
|
||||||
// Histogram. Histograms with the same fully-qualified name must have the
|
|
||||||
// same label names in their ConstLabels.
|
|
||||||
//
|
|
||||||
// Note that in most cases, labels have a value that varies during the
|
|
||||||
// lifetime of a process. Those labels are usually managed with a
|
|
||||||
// HistogramVec. ConstLabels serve only special purposes. One is for the
|
|
||||||
// special case where the value of a label does not change during the
|
|
||||||
// lifetime of a process, e.g. if the revision of the running binary is
|
|
||||||
// put into a label. Another, more advanced purpose is if more than one
|
|
||||||
// Collector needs to collect Histograms with the same fully-qualified
|
|
||||||
// name. In that case, those Summaries must differ in the values of
|
|
||||||
// their ConstLabels. See the Collector examples.
|
|
||||||
//
|
|
||||||
// If the value of a label never changes (not even between binaries),
|
|
||||||
// that label most likely should not be a label at all (but part of the
|
|
||||||
// metric name).
|
|
||||||
ConstLabels Labels
|
|
||||||
|
|
||||||
// Buckets defines the buckets into which observations are counted. Each
|
|
||||||
// element in the slice is the upper inclusive bound of a bucket. The
|
|
||||||
// values must be sorted in strictly increasing order. There is no need
|
|
||||||
// to add a highest bucket with +Inf bound, it will be added
|
|
||||||
// implicitly. The default value is DefBuckets.
|
|
||||||
Buckets []float64
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
|
|
||||||
// panics if the buckets in HistogramOpts are not in strictly increasing order.
|
|
||||||
func NewHistogram(opts HistogramOpts) Histogram {
|
|
||||||
return newHistogram(
|
|
||||||
NewDesc(
|
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
||||||
opts.Help,
|
|
||||||
nil,
|
|
||||||
opts.ConstLabels,
|
|
||||||
),
|
|
||||||
opts,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
|
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
|
||||||
panic(errInconsistentCardinality)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, n := range desc.variableLabels {
|
|
||||||
if n == bucketLabel {
|
|
||||||
panic(errBucketLabelNotAllowed)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, lp := range desc.constLabelPairs {
|
|
||||||
if lp.GetName() == bucketLabel {
|
|
||||||
panic(errBucketLabelNotAllowed)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(opts.Buckets) == 0 {
|
|
||||||
opts.Buckets = DefBuckets
|
|
||||||
}
|
|
||||||
|
|
||||||
h := &histogram{
|
|
||||||
desc: desc,
|
|
||||||
upperBounds: opts.Buckets,
|
|
||||||
labelPairs: makeLabelPairs(desc, labelValues),
|
|
||||||
}
|
|
||||||
for i, upperBound := range h.upperBounds {
|
|
||||||
if i < len(h.upperBounds)-1 {
|
|
||||||
if upperBound >= h.upperBounds[i+1] {
|
|
||||||
panic(fmt.Errorf(
|
|
||||||
"histogram buckets must be in increasing order: %f >= %f",
|
|
||||||
upperBound, h.upperBounds[i+1],
|
|
||||||
))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if math.IsInf(upperBound, +1) {
|
|
||||||
// The +Inf bucket is implicit. Remove it here.
|
|
||||||
h.upperBounds = h.upperBounds[:i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Finally we know the final length of h.upperBounds and can make counts.
|
|
||||||
h.counts = make([]uint64, len(h.upperBounds))
|
|
||||||
|
|
||||||
h.init(h) // Init self-collection.
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
type histogram struct {
|
|
||||||
// sumBits contains the bits of the float64 representing the sum of all
|
|
||||||
// observations. sumBits and count have to go first in the struct to
|
|
||||||
// guarantee alignment for atomic operations.
|
|
||||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
|
||||||
sumBits uint64
|
|
||||||
count uint64
|
|
||||||
|
|
||||||
selfCollector
|
|
||||||
// Note that there is no mutex required.
|
|
||||||
|
|
||||||
desc *Desc
|
|
||||||
|
|
||||||
upperBounds []float64
|
|
||||||
counts []uint64
|
|
||||||
|
|
||||||
labelPairs []*dto.LabelPair
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *histogram) Desc() *Desc {
|
|
||||||
return h.desc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *histogram) Observe(v float64) {
|
|
||||||
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
|
|
||||||
// slightly faster than the binary search. If we really care, we could
|
|
||||||
// switch from one search strategy to the other depending on the number
|
|
||||||
// of buckets.
|
|
||||||
//
|
|
||||||
// Microbenchmarks (BenchmarkHistogramNoLabels):
|
|
||||||
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
|
|
||||||
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
|
|
||||||
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
|
||||||
i := sort.SearchFloat64s(h.upperBounds, v)
|
|
||||||
if i < len(h.counts) {
|
|
||||||
atomic.AddUint64(&h.counts[i], 1)
|
|
||||||
}
|
|
||||||
atomic.AddUint64(&h.count, 1)
|
|
||||||
for {
|
|
||||||
oldBits := atomic.LoadUint64(&h.sumBits)
|
|
||||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
|
||||||
if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *histogram) Write(out *dto.Metric) error {
|
|
||||||
his := &dto.Histogram{}
|
|
||||||
buckets := make([]*dto.Bucket, len(h.upperBounds))
|
|
||||||
|
|
||||||
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
|
|
||||||
his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
|
|
||||||
var count uint64
|
|
||||||
for i, upperBound := range h.upperBounds {
|
|
||||||
count += atomic.LoadUint64(&h.counts[i])
|
|
||||||
buckets[i] = &dto.Bucket{
|
|
||||||
CumulativeCount: proto.Uint64(count),
|
|
||||||
UpperBound: proto.Float64(upperBound),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
his.Bucket = buckets
|
|
||||||
out.Histogram = his
|
|
||||||
out.Label = h.labelPairs
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HistogramVec is a Collector that bundles a set of Histograms that all share the
|
|
||||||
// same Desc, but have different values for their variable labels. This is used
|
|
||||||
// if you want to count the same thing partitioned by various dimensions
|
|
||||||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
|
||||||
// instances with NewHistogramVec.
|
|
||||||
type HistogramVec struct {
|
|
||||||
*MetricVec
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
|
|
||||||
// partitioned by the given label names. At least one label name must be
|
|
||||||
// provided.
|
|
||||||
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
|
|
||||||
desc := NewDesc(
|
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
||||||
opts.Help,
|
|
||||||
labelNames,
|
|
||||||
opts.ConstLabels,
|
|
||||||
)
|
|
||||||
return &HistogramVec{
|
|
||||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
|
||||||
return newHistogram(desc, opts, lvs...)
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetricWithLabelValues replaces the method of the same name in
|
|
||||||
// MetricVec. The difference is that this method returns a Histogram and not a
|
|
||||||
// Metric so that no type conversion is required.
|
|
||||||
func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
|
|
||||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
|
||||||
if metric != nil {
|
|
||||||
return metric.(Histogram), err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
|
||||||
// difference is that this method returns a Histogram and not a Metric so that no
|
|
||||||
// type conversion is required.
|
|
||||||
func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
|
|
||||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
|
||||||
if metric != nil {
|
|
||||||
return metric.(Histogram), err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
|
||||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
|
||||||
// error, WithLabelValues allows shortcuts like
|
|
||||||
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
|
||||||
func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
|
|
||||||
return m.MetricVec.WithLabelValues(lvs...).(Histogram)
|
|
||||||
}
|
|
||||||
|
|
||||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
|
||||||
// returned an error. By not returning an error, With allows shortcuts like
|
|
||||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
|
||||||
func (m *HistogramVec) With(labels Labels) Histogram {
|
|
||||||
return m.MetricVec.With(labels).(Histogram)
|
|
||||||
}
|
|
||||||
|
|
||||||
type constHistogram struct {
|
|
||||||
desc *Desc
|
|
||||||
count uint64
|
|
||||||
sum float64
|
|
||||||
buckets map[float64]uint64
|
|
||||||
labelPairs []*dto.LabelPair
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *constHistogram) Desc() *Desc {
|
|
||||||
return h.desc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *constHistogram) Write(out *dto.Metric) error {
|
|
||||||
his := &dto.Histogram{}
|
|
||||||
buckets := make([]*dto.Bucket, 0, len(h.buckets))
|
|
||||||
|
|
||||||
his.SampleCount = proto.Uint64(h.count)
|
|
||||||
his.SampleSum = proto.Float64(h.sum)
|
|
||||||
|
|
||||||
for upperBound, count := range h.buckets {
|
|
||||||
buckets = append(buckets, &dto.Bucket{
|
|
||||||
CumulativeCount: proto.Uint64(count),
|
|
||||||
UpperBound: proto.Float64(upperBound),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(buckets) > 0 {
|
|
||||||
sort.Sort(buckSort(buckets))
|
|
||||||
}
|
|
||||||
his.Bucket = buckets
|
|
||||||
|
|
||||||
out.Histogram = his
|
|
||||||
out.Label = h.labelPairs
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConstHistogram returns a metric representing a Prometheus histogram with
|
|
||||||
// fixed values for the count, sum, and bucket counts. As those parameters
|
|
||||||
// cannot be changed, the returned value does not implement the Histogram
|
|
||||||
// interface (but only the Metric interface). Users of this package will not
|
|
||||||
// have much use for it in regular operations. However, when implementing custom
|
|
||||||
// Collectors, it is useful as a throw-away metric that is generated on the fly
|
|
||||||
// to send it to Prometheus in the Collect method.
|
|
||||||
//
|
|
||||||
// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
|
|
||||||
// bucket.
|
|
||||||
//
|
|
||||||
// NewConstHistogram returns an error if the length of labelValues is not
|
|
||||||
// consistent with the variable labels in Desc.
|
|
||||||
func NewConstHistogram(
|
|
||||||
desc *Desc,
|
|
||||||
count uint64,
|
|
||||||
sum float64,
|
|
||||||
buckets map[float64]uint64,
|
|
||||||
labelValues ...string,
|
|
||||||
) (Metric, error) {
|
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
|
||||||
return nil, errInconsistentCardinality
|
|
||||||
}
|
|
||||||
return &constHistogram{
|
|
||||||
desc: desc,
|
|
||||||
count: count,
|
|
||||||
sum: sum,
|
|
||||||
buckets: buckets,
|
|
||||||
labelPairs: makeLabelPairs(desc, labelValues),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustNewConstHistogram is a version of NewConstHistogram that panics where
|
|
||||||
// NewConstMetric would have returned an error.
|
|
||||||
func MustNewConstHistogram(
|
|
||||||
desc *Desc,
|
|
||||||
count uint64,
|
|
||||||
sum float64,
|
|
||||||
buckets map[float64]uint64,
|
|
||||||
labelValues ...string,
|
|
||||||
) Metric {
|
|
||||||
m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
type buckSort []*dto.Bucket
|
|
||||||
|
|
||||||
func (s buckSort) Len() int {
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s buckSort) Swap(i, j int) {
|
|
||||||
s[i], s[j] = s[j], s[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s buckSort) Less(i, j int) bool {
|
|
||||||
return s[i].GetUpperBound() < s[j].GetUpperBound()
|
|
||||||
}
|
|
||||||
|
|
@ -1,348 +0,0 @@
|
||||||
// Copyright 2015 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"math/rand"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
"testing/quick"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func benchmarkHistogramObserve(w int, b *testing.B) {
|
|
||||||
b.StopTimer()
|
|
||||||
|
|
||||||
wg := new(sync.WaitGroup)
|
|
||||||
wg.Add(w)
|
|
||||||
|
|
||||||
g := new(sync.WaitGroup)
|
|
||||||
g.Add(1)
|
|
||||||
|
|
||||||
s := NewHistogram(HistogramOpts{})
|
|
||||||
|
|
||||||
for i := 0; i < w; i++ {
|
|
||||||
go func() {
|
|
||||||
g.Wait()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
s.Observe(float64(i))
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
b.StartTimer()
|
|
||||||
g.Done()
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkHistogramObserve1(b *testing.B) {
|
|
||||||
benchmarkHistogramObserve(1, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkHistogramObserve2(b *testing.B) {
|
|
||||||
benchmarkHistogramObserve(2, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkHistogramObserve4(b *testing.B) {
|
|
||||||
benchmarkHistogramObserve(4, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkHistogramObserve8(b *testing.B) {
|
|
||||||
benchmarkHistogramObserve(8, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchmarkHistogramWrite(w int, b *testing.B) {
|
|
||||||
b.StopTimer()
|
|
||||||
|
|
||||||
wg := new(sync.WaitGroup)
|
|
||||||
wg.Add(w)
|
|
||||||
|
|
||||||
g := new(sync.WaitGroup)
|
|
||||||
g.Add(1)
|
|
||||||
|
|
||||||
s := NewHistogram(HistogramOpts{})
|
|
||||||
|
|
||||||
for i := 0; i < 1000000; i++ {
|
|
||||||
s.Observe(float64(i))
|
|
||||||
}
|
|
||||||
|
|
||||||
for j := 0; j < w; j++ {
|
|
||||||
outs := make([]dto.Metric, b.N)
|
|
||||||
|
|
||||||
go func(o []dto.Metric) {
|
|
||||||
g.Wait()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
s.Write(&o[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Done()
|
|
||||||
}(outs)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.StartTimer()
|
|
||||||
g.Done()
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkHistogramWrite1(b *testing.B) {
|
|
||||||
benchmarkHistogramWrite(1, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkHistogramWrite2(b *testing.B) {
|
|
||||||
benchmarkHistogramWrite(2, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkHistogramWrite4(b *testing.B) {
|
|
||||||
benchmarkHistogramWrite(4, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkHistogramWrite8(b *testing.B) {
|
|
||||||
benchmarkHistogramWrite(8, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHistogramNonMonotonicBuckets(t *testing.T) {
|
|
||||||
testCases := map[string][]float64{
|
|
||||||
"not strictly monotonic": {1, 2, 2, 3},
|
|
||||||
"not monotonic at all": {1, 2, 4, 3, 5},
|
|
||||||
"have +Inf in the middle": {1, 2, math.Inf(+1), 3},
|
|
||||||
}
|
|
||||||
for name, buckets := range testCases {
|
|
||||||
func() {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r == nil {
|
|
||||||
t.Errorf("Buckets %v are %s but NewHistogram did not panic.", buckets, name)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
_ = NewHistogram(HistogramOpts{
|
|
||||||
Name: "test_histogram",
|
|
||||||
Help: "helpless",
|
|
||||||
Buckets: buckets,
|
|
||||||
})
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Intentionally adding +Inf here to test if that case is handled correctly.
|
|
||||||
// Also, getCumulativeCounts depends on it.
|
|
||||||
var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)}
|
|
||||||
|
|
||||||
func TestHistogramConcurrency(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Skipping test in short mode.")
|
|
||||||
}
|
|
||||||
|
|
||||||
rand.Seed(42)
|
|
||||||
|
|
||||||
it := func(n uint32) bool {
|
|
||||||
mutations := int(n%1e4 + 1e4)
|
|
||||||
concLevel := int(n%5 + 1)
|
|
||||||
total := mutations * concLevel
|
|
||||||
|
|
||||||
var start, end sync.WaitGroup
|
|
||||||
start.Add(1)
|
|
||||||
end.Add(concLevel)
|
|
||||||
|
|
||||||
sum := NewHistogram(HistogramOpts{
|
|
||||||
Name: "test_histogram",
|
|
||||||
Help: "helpless",
|
|
||||||
Buckets: testBuckets,
|
|
||||||
})
|
|
||||||
|
|
||||||
allVars := make([]float64, total)
|
|
||||||
var sampleSum float64
|
|
||||||
for i := 0; i < concLevel; i++ {
|
|
||||||
vals := make([]float64, mutations)
|
|
||||||
for j := 0; j < mutations; j++ {
|
|
||||||
v := rand.NormFloat64()
|
|
||||||
vals[j] = v
|
|
||||||
allVars[i*mutations+j] = v
|
|
||||||
sampleSum += v
|
|
||||||
}
|
|
||||||
|
|
||||||
go func(vals []float64) {
|
|
||||||
start.Wait()
|
|
||||||
for _, v := range vals {
|
|
||||||
sum.Observe(v)
|
|
||||||
}
|
|
||||||
end.Done()
|
|
||||||
}(vals)
|
|
||||||
}
|
|
||||||
sort.Float64s(allVars)
|
|
||||||
start.Done()
|
|
||||||
end.Wait()
|
|
||||||
|
|
||||||
m := &dto.Metric{}
|
|
||||||
sum.Write(m)
|
|
||||||
if got, want := int(*m.Histogram.SampleCount), total; got != want {
|
|
||||||
t.Errorf("got sample count %d, want %d", got, want)
|
|
||||||
}
|
|
||||||
if got, want := *m.Histogram.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 {
|
|
||||||
t.Errorf("got sample sum %f, want %f", got, want)
|
|
||||||
}
|
|
||||||
|
|
||||||
wantCounts := getCumulativeCounts(allVars)
|
|
||||||
|
|
||||||
if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want {
|
|
||||||
t.Errorf("got %d buckets in protobuf, want %d", got, want)
|
|
||||||
}
|
|
||||||
for i, wantBound := range testBuckets {
|
|
||||||
if i == len(testBuckets)-1 {
|
|
||||||
break // No +Inf bucket in protobuf.
|
|
||||||
}
|
|
||||||
if gotBound := *m.Histogram.Bucket[i].UpperBound; gotBound != wantBound {
|
|
||||||
t.Errorf("got bound %f, want %f", gotBound, wantBound)
|
|
||||||
}
|
|
||||||
if gotCount, wantCount := *m.Histogram.Bucket[i].CumulativeCount, wantCounts[i]; gotCount != wantCount {
|
|
||||||
t.Errorf("got count %d, want %d", gotCount, wantCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := quick.Check(it, nil); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHistogramVecConcurrency(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Skipping test in short mode.")
|
|
||||||
}
|
|
||||||
|
|
||||||
rand.Seed(42)
|
|
||||||
|
|
||||||
objectives := make([]float64, 0, len(DefObjectives))
|
|
||||||
for qu := range DefObjectives {
|
|
||||||
|
|
||||||
objectives = append(objectives, qu)
|
|
||||||
}
|
|
||||||
sort.Float64s(objectives)
|
|
||||||
|
|
||||||
it := func(n uint32) bool {
|
|
||||||
mutations := int(n%1e4 + 1e4)
|
|
||||||
concLevel := int(n%7 + 1)
|
|
||||||
vecLength := int(n%3 + 1)
|
|
||||||
|
|
||||||
var start, end sync.WaitGroup
|
|
||||||
start.Add(1)
|
|
||||||
end.Add(concLevel)
|
|
||||||
|
|
||||||
his := NewHistogramVec(
|
|
||||||
HistogramOpts{
|
|
||||||
Name: "test_histogram",
|
|
||||||
Help: "helpless",
|
|
||||||
Buckets: []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)},
|
|
||||||
},
|
|
||||||
[]string{"label"},
|
|
||||||
)
|
|
||||||
|
|
||||||
allVars := make([][]float64, vecLength)
|
|
||||||
sampleSums := make([]float64, vecLength)
|
|
||||||
for i := 0; i < concLevel; i++ {
|
|
||||||
vals := make([]float64, mutations)
|
|
||||||
picks := make([]int, mutations)
|
|
||||||
for j := 0; j < mutations; j++ {
|
|
||||||
v := rand.NormFloat64()
|
|
||||||
vals[j] = v
|
|
||||||
pick := rand.Intn(vecLength)
|
|
||||||
picks[j] = pick
|
|
||||||
allVars[pick] = append(allVars[pick], v)
|
|
||||||
sampleSums[pick] += v
|
|
||||||
}
|
|
||||||
|
|
||||||
go func(vals []float64) {
|
|
||||||
start.Wait()
|
|
||||||
for i, v := range vals {
|
|
||||||
his.WithLabelValues(string('A' + picks[i])).Observe(v)
|
|
||||||
}
|
|
||||||
end.Done()
|
|
||||||
}(vals)
|
|
||||||
}
|
|
||||||
for _, vars := range allVars {
|
|
||||||
sort.Float64s(vars)
|
|
||||||
}
|
|
||||||
start.Done()
|
|
||||||
end.Wait()
|
|
||||||
|
|
||||||
for i := 0; i < vecLength; i++ {
|
|
||||||
m := &dto.Metric{}
|
|
||||||
s := his.WithLabelValues(string('A' + i))
|
|
||||||
s.Write(m)
|
|
||||||
|
|
||||||
if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want {
|
|
||||||
t.Errorf("got %d buckets in protobuf, want %d", got, want)
|
|
||||||
}
|
|
||||||
if got, want := int(*m.Histogram.SampleCount), len(allVars[i]); got != want {
|
|
||||||
t.Errorf("got sample count %d, want %d", got, want)
|
|
||||||
}
|
|
||||||
if got, want := *m.Histogram.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 {
|
|
||||||
t.Errorf("got sample sum %f, want %f", got, want)
|
|
||||||
}
|
|
||||||
|
|
||||||
wantCounts := getCumulativeCounts(allVars[i])
|
|
||||||
|
|
||||||
for j, wantBound := range testBuckets {
|
|
||||||
if j == len(testBuckets)-1 {
|
|
||||||
break // No +Inf bucket in protobuf.
|
|
||||||
}
|
|
||||||
if gotBound := *m.Histogram.Bucket[j].UpperBound; gotBound != wantBound {
|
|
||||||
t.Errorf("got bound %f, want %f", gotBound, wantBound)
|
|
||||||
}
|
|
||||||
if gotCount, wantCount := *m.Histogram.Bucket[j].CumulativeCount, wantCounts[j]; gotCount != wantCount {
|
|
||||||
t.Errorf("got count %d, want %d", gotCount, wantCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := quick.Check(it, nil); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getCumulativeCounts(vars []float64) []uint64 {
|
|
||||||
counts := make([]uint64, len(testBuckets))
|
|
||||||
for _, v := range vars {
|
|
||||||
for i := len(testBuckets) - 1; i >= 0; i-- {
|
|
||||||
if v > testBuckets[i] {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
counts[i]++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return counts
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBuckets(t *testing.T) {
|
|
||||||
got := LinearBuckets(-15, 5, 6)
|
|
||||||
want := []float64{-15, -10, -5, 0, 5, 10}
|
|
||||||
if !reflect.DeepEqual(got, want) {
|
|
||||||
t.Errorf("linear buckets: got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
|
|
||||||
got = ExponentialBuckets(100, 1.2, 3)
|
|
||||||
want = []float64{100, 120, 144}
|
|
||||||
if !reflect.DeepEqual(got, want) {
|
|
||||||
t.Errorf("linear buckets: got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,526 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/prometheus/common/expfmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO(beorn7): Remove this whole file. It is a partial mirror of
|
|
||||||
// promhttp/http.go (to avoid circular import chains) where everything HTTP
|
|
||||||
// related should live. The functions here are just for avoiding
|
|
||||||
// breakage. Everything is deprecated.
|
|
||||||
|
|
||||||
const (
|
|
||||||
contentTypeHeader = "Content-Type"
|
|
||||||
contentLengthHeader = "Content-Length"
|
|
||||||
contentEncodingHeader = "Content-Encoding"
|
|
||||||
acceptEncodingHeader = "Accept-Encoding"
|
|
||||||
)
|
|
||||||
|
|
||||||
var bufPool sync.Pool
|
|
||||||
|
|
||||||
func getBuf() *bytes.Buffer {
|
|
||||||
buf := bufPool.Get()
|
|
||||||
if buf == nil {
|
|
||||||
return &bytes.Buffer{}
|
|
||||||
}
|
|
||||||
return buf.(*bytes.Buffer)
|
|
||||||
}
|
|
||||||
|
|
||||||
func giveBuf(buf *bytes.Buffer) {
|
|
||||||
buf.Reset()
|
|
||||||
bufPool.Put(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handler returns an HTTP handler for the DefaultGatherer. It is
|
|
||||||
// already instrumented with InstrumentHandler (using "prometheus" as handler
|
|
||||||
// name).
|
|
||||||
//
|
|
||||||
// Deprecated: Please note the issues described in the doc comment of
|
|
||||||
// InstrumentHandler. You might want to consider using promhttp.Handler instead
|
|
||||||
// (which is not instrumented).
|
|
||||||
func Handler() http.Handler {
|
|
||||||
return InstrumentHandler("prometheus", UninstrumentedHandler())
|
|
||||||
}
|
|
||||||
|
|
||||||
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
|
|
||||||
//
|
|
||||||
// Deprecated: Use promhttp.Handler instead. See there for further documentation.
|
|
||||||
func UninstrumentedHandler() http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
|
||||||
mfs, err := DefaultGatherer.Gather()
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
contentType := expfmt.Negotiate(req.Header)
|
|
||||||
buf := getBuf()
|
|
||||||
defer giveBuf(buf)
|
|
||||||
writer, encoding := decorateWriter(req, buf)
|
|
||||||
enc := expfmt.NewEncoder(writer, contentType)
|
|
||||||
var lastErr error
|
|
||||||
for _, mf := range mfs {
|
|
||||||
if err := enc.Encode(mf); err != nil {
|
|
||||||
lastErr = err
|
|
||||||
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if closer, ok := writer.(io.Closer); ok {
|
|
||||||
closer.Close()
|
|
||||||
}
|
|
||||||
if lastErr != nil && buf.Len() == 0 {
|
|
||||||
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
header := w.Header()
|
|
||||||
header.Set(contentTypeHeader, string(contentType))
|
|
||||||
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
|
|
||||||
if encoding != "" {
|
|
||||||
header.Set(contentEncodingHeader, encoding)
|
|
||||||
}
|
|
||||||
w.Write(buf.Bytes())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// decorateWriter wraps a writer to handle gzip compression if requested. It
|
|
||||||
// returns the decorated writer and the appropriate "Content-Encoding" header
|
|
||||||
// (which is empty if no compression is enabled).
|
|
||||||
func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
|
|
||||||
header := request.Header.Get(acceptEncodingHeader)
|
|
||||||
parts := strings.Split(header, ",")
|
|
||||||
for _, part := range parts {
|
|
||||||
part := strings.TrimSpace(part)
|
|
||||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
|
||||||
return gzip.NewWriter(writer), "gzip"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return writer, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
var instLabels = []string{"method", "code"}
|
|
||||||
|
|
||||||
type nower interface {
|
|
||||||
Now() time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type nowFunc func() time.Time
|
|
||||||
|
|
||||||
func (n nowFunc) Now() time.Time {
|
|
||||||
return n()
|
|
||||||
}
|
|
||||||
|
|
||||||
var now nower = nowFunc(func() time.Time {
|
|
||||||
return time.Now()
|
|
||||||
})
|
|
||||||
|
|
||||||
func nowSeries(t ...time.Time) nower {
|
|
||||||
return nowFunc(func() time.Time {
|
|
||||||
defer func() {
|
|
||||||
t = t[1:]
|
|
||||||
}()
|
|
||||||
|
|
||||||
return t[0]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstrumentHandler wraps the given HTTP handler for instrumentation. It
|
|
||||||
// registers four metric collectors (if not already done) and reports HTTP
|
|
||||||
// metrics to the (newly or already) registered collectors: http_requests_total
|
|
||||||
// (CounterVec), http_request_duration_microseconds (Summary),
|
|
||||||
// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
|
|
||||||
// has a constant label named "handler" with the provided handlerName as
|
|
||||||
// value. http_requests_total is a metric vector partitioned by HTTP method
|
|
||||||
// (label name "method") and HTTP status code (label name "code").
|
|
||||||
//
|
|
||||||
// Deprecated: InstrumentHandler has several issues:
|
|
||||||
//
|
|
||||||
// - It uses Summaries rather than Histograms. Summaries are not useful if
|
|
||||||
// aggregation across multiple instances is required.
|
|
||||||
//
|
|
||||||
// - It uses microseconds as unit, which is deprecated and should be replaced by
|
|
||||||
// seconds.
|
|
||||||
//
|
|
||||||
// - The size of the request is calculated in a separate goroutine. Since this
|
|
||||||
// calculator requires access to the request header, it creates a race with
|
|
||||||
// any writes to the header performed during request handling.
|
|
||||||
// httputil.ReverseProxy is a prominent example for a handler
|
|
||||||
// performing such writes.
|
|
||||||
//
|
|
||||||
// - It has additional issues with HTTP/2, cf.
|
|
||||||
// https://github.com/prometheus/client_golang/issues/272.
|
|
||||||
//
|
|
||||||
// Upcoming versions of this package will provide ways of instrumenting HTTP
|
|
||||||
// handlers that are more flexible and have fewer issues. Please prefer direct
|
|
||||||
// instrumentation in the meantime.
|
|
||||||
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
|
|
||||||
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstrumentHandlerFunc wraps the given function for instrumentation. It
|
|
||||||
// otherwise works in the same way as InstrumentHandler (and shares the same
|
|
||||||
// issues).
|
|
||||||
//
|
|
||||||
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
|
|
||||||
// InstrumentHandler is.
|
|
||||||
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
|
||||||
return InstrumentHandlerFuncWithOpts(
|
|
||||||
SummaryOpts{
|
|
||||||
Subsystem: "http",
|
|
||||||
ConstLabels: Labels{"handler": handlerName},
|
|
||||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
|
||||||
},
|
|
||||||
handlerFunc,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
|
|
||||||
// issues) but provides more flexibility (at the cost of a more complex call
|
|
||||||
// syntax). As InstrumentHandler, this function registers four metric
|
|
||||||
// collectors, but it uses the provided SummaryOpts to create them. However, the
|
|
||||||
// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
|
|
||||||
// by "requests_total", "request_duration_microseconds", "request_size_bytes",
|
|
||||||
// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
|
|
||||||
// help string. The names of the variable labels of the http_requests_total
|
|
||||||
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
|
|
||||||
//
|
|
||||||
// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
|
|
||||||
// behavior of InstrumentHandler:
|
|
||||||
//
|
|
||||||
// prometheus.InstrumentHandlerWithOpts(
|
|
||||||
// prometheus.SummaryOpts{
|
|
||||||
// Subsystem: "http",
|
|
||||||
// ConstLabels: prometheus.Labels{"handler": handlerName},
|
|
||||||
// },
|
|
||||||
// handler,
|
|
||||||
// )
|
|
||||||
//
|
|
||||||
// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
|
|
||||||
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
|
|
||||||
// and all its fields are set to the equally named fields in the provided
|
|
||||||
// SummaryOpts.
|
|
||||||
//
|
|
||||||
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
|
|
||||||
// InstrumentHandler is.
|
|
||||||
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
|
|
||||||
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
|
|
||||||
// the same issues) but provides more flexibility (at the cost of a more complex
|
|
||||||
// call syntax). See InstrumentHandlerWithOpts for details how the provided
|
|
||||||
// SummaryOpts are used.
|
|
||||||
//
|
|
||||||
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
|
|
||||||
// as InstrumentHandler is.
|
|
||||||
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
|
||||||
reqCnt := NewCounterVec(
|
|
||||||
CounterOpts{
|
|
||||||
Namespace: opts.Namespace,
|
|
||||||
Subsystem: opts.Subsystem,
|
|
||||||
Name: "requests_total",
|
|
||||||
Help: "Total number of HTTP requests made.",
|
|
||||||
ConstLabels: opts.ConstLabels,
|
|
||||||
},
|
|
||||||
instLabels,
|
|
||||||
)
|
|
||||||
if err := Register(reqCnt); err != nil {
|
|
||||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
|
||||||
reqCnt = are.ExistingCollector.(*CounterVec)
|
|
||||||
} else {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.Name = "request_duration_microseconds"
|
|
||||||
opts.Help = "The HTTP request latencies in microseconds."
|
|
||||||
reqDur := NewSummary(opts)
|
|
||||||
if err := Register(reqDur); err != nil {
|
|
||||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
|
||||||
reqDur = are.ExistingCollector.(Summary)
|
|
||||||
} else {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.Name = "request_size_bytes"
|
|
||||||
opts.Help = "The HTTP request sizes in bytes."
|
|
||||||
reqSz := NewSummary(opts)
|
|
||||||
if err := Register(reqSz); err != nil {
|
|
||||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
|
||||||
reqSz = are.ExistingCollector.(Summary)
|
|
||||||
} else {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.Name = "response_size_bytes"
|
|
||||||
opts.Help = "The HTTP response sizes in bytes."
|
|
||||||
resSz := NewSummary(opts)
|
|
||||||
if err := Register(resSz); err != nil {
|
|
||||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
|
||||||
resSz = are.ExistingCollector.(Summary)
|
|
||||||
} else {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
delegate := &responseWriterDelegator{ResponseWriter: w}
|
|
||||||
out := computeApproximateRequestSize(r)
|
|
||||||
|
|
||||||
_, cn := w.(http.CloseNotifier)
|
|
||||||
_, fl := w.(http.Flusher)
|
|
||||||
_, hj := w.(http.Hijacker)
|
|
||||||
_, rf := w.(io.ReaderFrom)
|
|
||||||
var rw http.ResponseWriter
|
|
||||||
if cn && fl && hj && rf {
|
|
||||||
rw = &fancyResponseWriterDelegator{delegate}
|
|
||||||
} else {
|
|
||||||
rw = delegate
|
|
||||||
}
|
|
||||||
handlerFunc(rw, r)
|
|
||||||
|
|
||||||
elapsed := float64(time.Since(now)) / float64(time.Microsecond)
|
|
||||||
|
|
||||||
method := sanitizeMethod(r.Method)
|
|
||||||
code := sanitizeCode(delegate.status)
|
|
||||||
reqCnt.WithLabelValues(method, code).Inc()
|
|
||||||
reqDur.Observe(elapsed)
|
|
||||||
resSz.Observe(float64(delegate.written))
|
|
||||||
reqSz.Observe(float64(<-out))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func computeApproximateRequestSize(r *http.Request) <-chan int {
|
|
||||||
// Get URL length in current go routine for avoiding a race condition.
|
|
||||||
// HandlerFunc that runs in parallel may modify the URL.
|
|
||||||
s := 0
|
|
||||||
if r.URL != nil {
|
|
||||||
s += len(r.URL.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
out := make(chan int, 1)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
s += len(r.Method)
|
|
||||||
s += len(r.Proto)
|
|
||||||
for name, values := range r.Header {
|
|
||||||
s += len(name)
|
|
||||||
for _, value := range values {
|
|
||||||
s += len(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s += len(r.Host)
|
|
||||||
|
|
||||||
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
|
|
||||||
|
|
||||||
if r.ContentLength != -1 {
|
|
||||||
s += int(r.ContentLength)
|
|
||||||
}
|
|
||||||
out <- s
|
|
||||||
close(out)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
type responseWriterDelegator struct {
|
|
||||||
http.ResponseWriter
|
|
||||||
|
|
||||||
handler, method string
|
|
||||||
status int
|
|
||||||
written int64
|
|
||||||
wroteHeader bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *responseWriterDelegator) WriteHeader(code int) {
|
|
||||||
r.status = code
|
|
||||||
r.wroteHeader = true
|
|
||||||
r.ResponseWriter.WriteHeader(code)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
|
|
||||||
if !r.wroteHeader {
|
|
||||||
r.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
n, err := r.ResponseWriter.Write(b)
|
|
||||||
r.written += int64(n)
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type fancyResponseWriterDelegator struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
|
|
||||||
return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fancyResponseWriterDelegator) Flush() {
|
|
||||||
f.ResponseWriter.(http.Flusher).Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
|
||||||
return f.ResponseWriter.(http.Hijacker).Hijack()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
|
|
||||||
if !f.wroteHeader {
|
|
||||||
f.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
|
|
||||||
f.written += n
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func sanitizeMethod(m string) string {
|
|
||||||
switch m {
|
|
||||||
case "GET", "get":
|
|
||||||
return "get"
|
|
||||||
case "PUT", "put":
|
|
||||||
return "put"
|
|
||||||
case "HEAD", "head":
|
|
||||||
return "head"
|
|
||||||
case "POST", "post":
|
|
||||||
return "post"
|
|
||||||
case "DELETE", "delete":
|
|
||||||
return "delete"
|
|
||||||
case "CONNECT", "connect":
|
|
||||||
return "connect"
|
|
||||||
case "OPTIONS", "options":
|
|
||||||
return "options"
|
|
||||||
case "NOTIFY", "notify":
|
|
||||||
return "notify"
|
|
||||||
default:
|
|
||||||
return strings.ToLower(m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func sanitizeCode(s int) string {
|
|
||||||
switch s {
|
|
||||||
case 100:
|
|
||||||
return "100"
|
|
||||||
case 101:
|
|
||||||
return "101"
|
|
||||||
|
|
||||||
case 200:
|
|
||||||
return "200"
|
|
||||||
case 201:
|
|
||||||
return "201"
|
|
||||||
case 202:
|
|
||||||
return "202"
|
|
||||||
case 203:
|
|
||||||
return "203"
|
|
||||||
case 204:
|
|
||||||
return "204"
|
|
||||||
case 205:
|
|
||||||
return "205"
|
|
||||||
case 206:
|
|
||||||
return "206"
|
|
||||||
|
|
||||||
case 300:
|
|
||||||
return "300"
|
|
||||||
case 301:
|
|
||||||
return "301"
|
|
||||||
case 302:
|
|
||||||
return "302"
|
|
||||||
case 304:
|
|
||||||
return "304"
|
|
||||||
case 305:
|
|
||||||
return "305"
|
|
||||||
case 307:
|
|
||||||
return "307"
|
|
||||||
|
|
||||||
case 400:
|
|
||||||
return "400"
|
|
||||||
case 401:
|
|
||||||
return "401"
|
|
||||||
case 402:
|
|
||||||
return "402"
|
|
||||||
case 403:
|
|
||||||
return "403"
|
|
||||||
case 404:
|
|
||||||
return "404"
|
|
||||||
case 405:
|
|
||||||
return "405"
|
|
||||||
case 406:
|
|
||||||
return "406"
|
|
||||||
case 407:
|
|
||||||
return "407"
|
|
||||||
case 408:
|
|
||||||
return "408"
|
|
||||||
case 409:
|
|
||||||
return "409"
|
|
||||||
case 410:
|
|
||||||
return "410"
|
|
||||||
case 411:
|
|
||||||
return "411"
|
|
||||||
case 412:
|
|
||||||
return "412"
|
|
||||||
case 413:
|
|
||||||
return "413"
|
|
||||||
case 414:
|
|
||||||
return "414"
|
|
||||||
case 415:
|
|
||||||
return "415"
|
|
||||||
case 416:
|
|
||||||
return "416"
|
|
||||||
case 417:
|
|
||||||
return "417"
|
|
||||||
case 418:
|
|
||||||
return "418"
|
|
||||||
|
|
||||||
case 500:
|
|
||||||
return "500"
|
|
||||||
case 501:
|
|
||||||
return "501"
|
|
||||||
case 502:
|
|
||||||
return "502"
|
|
||||||
case 503:
|
|
||||||
return "503"
|
|
||||||
case 504:
|
|
||||||
return "504"
|
|
||||||
case 505:
|
|
||||||
return "505"
|
|
||||||
|
|
||||||
case 428:
|
|
||||||
return "428"
|
|
||||||
case 429:
|
|
||||||
return "429"
|
|
||||||
case 431:
|
|
||||||
return "431"
|
|
||||||
case 511:
|
|
||||||
return "511"
|
|
||||||
|
|
||||||
default:
|
|
||||||
return strconv.Itoa(s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,154 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
)
|
|
||||||
|
|
||||||
type respBody string
|
|
||||||
|
|
||||||
func (b respBody) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.WriteHeader(http.StatusTeapot)
|
|
||||||
w.Write([]byte(b))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInstrumentHandler(t *testing.T) {
|
|
||||||
defer func(n nower) {
|
|
||||||
now = n.(nower)
|
|
||||||
}(now)
|
|
||||||
|
|
||||||
instant := time.Now()
|
|
||||||
end := instant.Add(30 * time.Second)
|
|
||||||
now = nowSeries(instant, end)
|
|
||||||
respBody := respBody("Howdy there!")
|
|
||||||
|
|
||||||
hndlr := InstrumentHandler("test-handler", respBody)
|
|
||||||
|
|
||||||
opts := SummaryOpts{
|
|
||||||
Subsystem: "http",
|
|
||||||
ConstLabels: Labels{"handler": "test-handler"},
|
|
||||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
|
||||||
}
|
|
||||||
|
|
||||||
reqCnt := NewCounterVec(
|
|
||||||
CounterOpts{
|
|
||||||
Namespace: opts.Namespace,
|
|
||||||
Subsystem: opts.Subsystem,
|
|
||||||
Name: "requests_total",
|
|
||||||
Help: "Total number of HTTP requests made.",
|
|
||||||
ConstLabels: opts.ConstLabels,
|
|
||||||
},
|
|
||||||
instLabels,
|
|
||||||
)
|
|
||||||
err := Register(reqCnt)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("expected reqCnt to be registered already")
|
|
||||||
}
|
|
||||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
|
||||||
reqCnt = are.ExistingCollector.(*CounterVec)
|
|
||||||
} else {
|
|
||||||
t.Fatal("unexpected registration error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.Name = "request_duration_microseconds"
|
|
||||||
opts.Help = "The HTTP request latencies in microseconds."
|
|
||||||
reqDur := NewSummary(opts)
|
|
||||||
err = Register(reqDur)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("expected reqDur to be registered already")
|
|
||||||
}
|
|
||||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
|
||||||
reqDur = are.ExistingCollector.(Summary)
|
|
||||||
} else {
|
|
||||||
t.Fatal("unexpected registration error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.Name = "request_size_bytes"
|
|
||||||
opts.Help = "The HTTP request sizes in bytes."
|
|
||||||
reqSz := NewSummary(opts)
|
|
||||||
err = Register(reqSz)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("expected reqSz to be registered already")
|
|
||||||
}
|
|
||||||
if _, ok := err.(AlreadyRegisteredError); !ok {
|
|
||||||
t.Fatal("unexpected registration error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.Name = "response_size_bytes"
|
|
||||||
opts.Help = "The HTTP response sizes in bytes."
|
|
||||||
resSz := NewSummary(opts)
|
|
||||||
err = Register(resSz)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("expected resSz to be registered already")
|
|
||||||
}
|
|
||||||
if _, ok := err.(AlreadyRegisteredError); !ok {
|
|
||||||
t.Fatal("unexpected registration error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
reqCnt.Reset()
|
|
||||||
|
|
||||||
resp := httptest.NewRecorder()
|
|
||||||
req := &http.Request{
|
|
||||||
Method: "GET",
|
|
||||||
}
|
|
||||||
|
|
||||||
hndlr.ServeHTTP(resp, req)
|
|
||||||
|
|
||||||
if resp.Code != http.StatusTeapot {
|
|
||||||
t.Fatalf("expected status %d, got %d", http.StatusTeapot, resp.Code)
|
|
||||||
}
|
|
||||||
if string(resp.Body.Bytes()) != "Howdy there!" {
|
|
||||||
t.Fatalf("expected body %s, got %s", "Howdy there!", string(resp.Body.Bytes()))
|
|
||||||
}
|
|
||||||
|
|
||||||
out := &dto.Metric{}
|
|
||||||
reqDur.Write(out)
|
|
||||||
if want, got := "test-handler", out.Label[0].GetValue(); want != got {
|
|
||||||
t.Errorf("want label value %q in reqDur, got %q", want, got)
|
|
||||||
}
|
|
||||||
if want, got := uint64(1), out.Summary.GetSampleCount(); want != got {
|
|
||||||
t.Errorf("want sample count %d in reqDur, got %d", want, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
out.Reset()
|
|
||||||
if want, got := 1, len(reqCnt.children); want != got {
|
|
||||||
t.Errorf("want %d children in reqCnt, got %d", want, got)
|
|
||||||
}
|
|
||||||
cnt, err := reqCnt.GetMetricWithLabelValues("get", "418")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
cnt.Write(out)
|
|
||||||
if want, got := "418", out.Label[0].GetValue(); want != got {
|
|
||||||
t.Errorf("want label value %q in reqCnt, got %q", want, got)
|
|
||||||
}
|
|
||||||
if want, got := "test-handler", out.Label[1].GetValue(); want != got {
|
|
||||||
t.Errorf("want label value %q in reqCnt, got %q", want, got)
|
|
||||||
}
|
|
||||||
if want, got := "get", out.Label[2].GetValue(); want != got {
|
|
||||||
t.Errorf("want label value %q in reqCnt, got %q", want, got)
|
|
||||||
}
|
|
||||||
if out.Counter == nil {
|
|
||||||
t.Fatal("expected non-nil counter in reqCnt")
|
|
||||||
}
|
|
||||||
if want, got := 1., out.Counter.GetValue(); want != got {
|
|
||||||
t.Errorf("want reqCnt of %f, got %f", want, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,166 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
)
|
|
||||||
|
|
||||||
const separatorByte byte = 255
|
|
||||||
|
|
||||||
// A Metric models a single sample value with its meta data being exported to
|
|
||||||
// Prometheus. Implementations of Metric in this package are Gauge, Counter,
|
|
||||||
// Histogram, Summary, and Untyped.
|
|
||||||
type Metric interface {
|
|
||||||
// Desc returns the descriptor for the Metric. This method idempotently
|
|
||||||
// returns the same descriptor throughout the lifetime of the
|
|
||||||
// Metric. The returned descriptor is immutable by contract. A Metric
|
|
||||||
// unable to describe itself must return an invalid descriptor (created
|
|
||||||
// with NewInvalidDesc).
|
|
||||||
Desc() *Desc
|
|
||||||
// Write encodes the Metric into a "Metric" Protocol Buffer data
|
|
||||||
// transmission object.
|
|
||||||
//
|
|
||||||
// Metric implementations must observe concurrency safety as reads of
|
|
||||||
// this metric may occur at any time, and any blocking occurs at the
|
|
||||||
// expense of total performance of rendering all registered
|
|
||||||
// metrics. Ideally, Metric implementations should support concurrent
|
|
||||||
// readers.
|
|
||||||
//
|
|
||||||
// While populating dto.Metric, it is the responsibility of the
|
|
||||||
// implementation to ensure validity of the Metric protobuf (like valid
|
|
||||||
// UTF-8 strings or syntactically valid metric and label names). It is
|
|
||||||
// recommended to sort labels lexicographically. (Implementers may find
|
|
||||||
// LabelPairSorter useful for that.) Callers of Write should still make
|
|
||||||
// sure of sorting if they depend on it.
|
|
||||||
Write(*dto.Metric) error
|
|
||||||
// TODO(beorn7): The original rationale of passing in a pre-allocated
|
|
||||||
// dto.Metric protobuf to save allocations has disappeared. The
|
|
||||||
// signature of this method should be changed to "Write() (*dto.Metric,
|
|
||||||
// error)".
|
|
||||||
}
|
|
||||||
|
|
||||||
// Opts bundles the options for creating most Metric types. Each metric
|
|
||||||
// implementation XXX has its own XXXOpts type, but in most cases, it is just be
|
|
||||||
// an alias of this type (which might change when the requirement arises.)
|
|
||||||
//
|
|
||||||
// It is mandatory to set Name and Help to a non-empty string. All other fields
|
|
||||||
// are optional and can safely be left at their zero value.
|
|
||||||
type Opts struct {
|
|
||||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
|
||||||
// name of the Metric (created by joining these components with
|
|
||||||
// "_"). Only Name is mandatory, the others merely help structuring the
|
|
||||||
// name. Note that the fully-qualified name of the metric must be a
|
|
||||||
// valid Prometheus metric name.
|
|
||||||
Namespace string
|
|
||||||
Subsystem string
|
|
||||||
Name string
|
|
||||||
|
|
||||||
// Help provides information about this metric. Mandatory!
|
|
||||||
//
|
|
||||||
// Metrics with the same fully-qualified name must have the same Help
|
|
||||||
// string.
|
|
||||||
Help string
|
|
||||||
|
|
||||||
// ConstLabels are used to attach fixed labels to this metric. Metrics
|
|
||||||
// with the same fully-qualified name must have the same label names in
|
|
||||||
// their ConstLabels.
|
|
||||||
//
|
|
||||||
// Note that in most cases, labels have a value that varies during the
|
|
||||||
// lifetime of a process. Those labels are usually managed with a metric
|
|
||||||
// vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
|
|
||||||
// serve only special purposes. One is for the special case where the
|
|
||||||
// value of a label does not change during the lifetime of a process,
|
|
||||||
// e.g. if the revision of the running binary is put into a
|
|
||||||
// label. Another, more advanced purpose is if more than one Collector
|
|
||||||
// needs to collect Metrics with the same fully-qualified name. In that
|
|
||||||
// case, those Metrics must differ in the values of their
|
|
||||||
// ConstLabels. See the Collector examples.
|
|
||||||
//
|
|
||||||
// If the value of a label never changes (not even between binaries),
|
|
||||||
// that label most likely should not be a label at all (but part of the
|
|
||||||
// metric name).
|
|
||||||
ConstLabels Labels
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildFQName joins the given three name components by "_". Empty name
|
|
||||||
// components are ignored. If the name parameter itself is empty, an empty
|
|
||||||
// string is returned, no matter what. Metric implementations included in this
|
|
||||||
// library use this function internally to generate the fully-qualified metric
|
|
||||||
// name from the name component in their Opts. Users of the library will only
|
|
||||||
// need this function if they implement their own Metric or instantiate a Desc
|
|
||||||
// (with NewDesc) directly.
|
|
||||||
func BuildFQName(namespace, subsystem, name string) string {
|
|
||||||
if name == "" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case namespace != "" && subsystem != "":
|
|
||||||
return strings.Join([]string{namespace, subsystem, name}, "_")
|
|
||||||
case namespace != "":
|
|
||||||
return strings.Join([]string{namespace, name}, "_")
|
|
||||||
case subsystem != "":
|
|
||||||
return strings.Join([]string{subsystem, name}, "_")
|
|
||||||
}
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
|
|
||||||
// LabelPairSorter implements sort.Interface. It is used to sort a slice of
|
|
||||||
// dto.LabelPair pointers. This is useful for implementing the Write method of
|
|
||||||
// custom metrics.
|
|
||||||
type LabelPairSorter []*dto.LabelPair
|
|
||||||
|
|
||||||
func (s LabelPairSorter) Len() int {
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s LabelPairSorter) Swap(i, j int) {
|
|
||||||
s[i], s[j] = s[j], s[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s LabelPairSorter) Less(i, j int) bool {
|
|
||||||
return s[i].GetName() < s[j].GetName()
|
|
||||||
}
|
|
||||||
|
|
||||||
type hashSorter []uint64
|
|
||||||
|
|
||||||
func (s hashSorter) Len() int {
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s hashSorter) Swap(i, j int) {
|
|
||||||
s[i], s[j] = s[j], s[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s hashSorter) Less(i, j int) bool {
|
|
||||||
return s[i] < s[j]
|
|
||||||
}
|
|
||||||
|
|
||||||
type invalidMetric struct {
|
|
||||||
desc *Desc
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewInvalidMetric returns a metric whose Write method always returns the
|
|
||||||
// provided error. It is useful if a Collector finds itself unable to collect
|
|
||||||
// a metric and wishes to report an error to the registry.
|
|
||||||
func NewInvalidMetric(desc *Desc, err error) Metric {
|
|
||||||
return &invalidMetric{desc, err}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *invalidMetric) Desc() *Desc { return m.desc }
|
|
||||||
|
|
||||||
func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
|
|
||||||
|
|
@ -1,35 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
func TestBuildFQName(t *testing.T) {
|
|
||||||
scenarios := []struct{ namespace, subsystem, name, result string }{
|
|
||||||
{"a", "b", "c", "a_b_c"},
|
|
||||||
{"", "b", "c", "b_c"},
|
|
||||||
{"a", "", "c", "a_c"},
|
|
||||||
{"", "", "c", "c"},
|
|
||||||
{"a", "b", "", ""},
|
|
||||||
{"a", "", "", ""},
|
|
||||||
{"", "b", "", ""},
|
|
||||||
{" ", "", "", ""},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, s := range scenarios {
|
|
||||||
if want, got := s.result, BuildFQName(s.namespace, s.subsystem, s.name); want != got {
|
|
||||||
t.Errorf("%d. want %s, got %s", i, want, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,140 +0,0 @@
|
||||||
// Copyright 2015 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import "github.com/prometheus/procfs"
|
|
||||||
|
|
||||||
type processCollector struct {
|
|
||||||
pid int
|
|
||||||
collectFn func(chan<- Metric)
|
|
||||||
pidFn func() (int, error)
|
|
||||||
cpuTotal *Desc
|
|
||||||
openFDs, maxFDs *Desc
|
|
||||||
vsize, rss *Desc
|
|
||||||
startTime *Desc
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewProcessCollector returns a collector which exports the current state of
|
|
||||||
// process metrics including cpu, memory and file descriptor usage as well as
|
|
||||||
// the process start time for the given process id under the given namespace.
|
|
||||||
func NewProcessCollector(pid int, namespace string) Collector {
|
|
||||||
return NewProcessCollectorPIDFn(
|
|
||||||
func() (int, error) { return pid, nil },
|
|
||||||
namespace,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewProcessCollectorPIDFn returns a collector which exports the current state
|
|
||||||
// of process metrics including cpu, memory and file descriptor usage as well
|
|
||||||
// as the process start time under the given namespace. The given pidFn is
|
|
||||||
// called on each collect and is used to determine the process to export
|
|
||||||
// metrics for.
|
|
||||||
func NewProcessCollectorPIDFn(
|
|
||||||
pidFn func() (int, error),
|
|
||||||
namespace string,
|
|
||||||
) Collector {
|
|
||||||
ns := ""
|
|
||||||
if len(namespace) > 0 {
|
|
||||||
ns = namespace + "_"
|
|
||||||
}
|
|
||||||
|
|
||||||
c := processCollector{
|
|
||||||
pidFn: pidFn,
|
|
||||||
collectFn: func(chan<- Metric) {},
|
|
||||||
|
|
||||||
cpuTotal: NewDesc(
|
|
||||||
ns+"process_cpu_seconds_total",
|
|
||||||
"Total user and system CPU time spent in seconds.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
openFDs: NewDesc(
|
|
||||||
ns+"process_open_fds",
|
|
||||||
"Number of open file descriptors.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
maxFDs: NewDesc(
|
|
||||||
ns+"process_max_fds",
|
|
||||||
"Maximum number of open file descriptors.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
vsize: NewDesc(
|
|
||||||
ns+"process_virtual_memory_bytes",
|
|
||||||
"Virtual memory size in bytes.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
rss: NewDesc(
|
|
||||||
ns+"process_resident_memory_bytes",
|
|
||||||
"Resident memory size in bytes.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
startTime: NewDesc(
|
|
||||||
ns+"process_start_time_seconds",
|
|
||||||
"Start time of the process since unix epoch in seconds.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set up process metric collection if supported by the runtime.
|
|
||||||
if _, err := procfs.NewStat(); err == nil {
|
|
||||||
c.collectFn = c.processCollect
|
|
||||||
}
|
|
||||||
|
|
||||||
return &c
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describe returns all descriptions of the collector.
|
|
||||||
func (c *processCollector) Describe(ch chan<- *Desc) {
|
|
||||||
ch <- c.cpuTotal
|
|
||||||
ch <- c.openFDs
|
|
||||||
ch <- c.maxFDs
|
|
||||||
ch <- c.vsize
|
|
||||||
ch <- c.rss
|
|
||||||
ch <- c.startTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect returns the current state of all metrics of the collector.
|
|
||||||
func (c *processCollector) Collect(ch chan<- Metric) {
|
|
||||||
c.collectFn(ch)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
|
|
||||||
// client allows users to configure the error behavior.
|
|
||||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
|
||||||
pid, err := c.pidFn()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err := procfs.NewProc(pid)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if stat, err := p.NewStat(); err == nil {
|
|
||||||
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
|
|
||||||
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
|
|
||||||
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
|
|
||||||
if startTime, err := stat.StartTime(); err == nil {
|
|
||||||
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if fds, err := p.FileDescriptorsLen(); err == nil {
|
|
||||||
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
|
|
||||||
}
|
|
||||||
|
|
||||||
if limits, err := p.NewLimits(); err == nil {
|
|
||||||
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,58 +0,0 @@
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/prometheus/common/expfmt"
|
|
||||||
"github.com/prometheus/procfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestProcessCollector(t *testing.T) {
|
|
||||||
if _, err := procfs.Self(); err != nil {
|
|
||||||
t.Skipf("skipping TestProcessCollector, procfs not available: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
registry := NewRegistry()
|
|
||||||
if err := registry.Register(NewProcessCollector(os.Getpid(), "")); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if err := registry.Register(NewProcessCollectorPIDFn(
|
|
||||||
func() (int, error) { return os.Getpid(), nil }, "foobar"),
|
|
||||||
); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
mfs, err := registry.Gather()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
for _, mf := range mfs {
|
|
||||||
if _, err := expfmt.MetricFamilyToText(&buf, mf); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, re := range []*regexp.Regexp{
|
|
||||||
regexp.MustCompile("\nprocess_cpu_seconds_total [0-9]"),
|
|
||||||
regexp.MustCompile("\nprocess_max_fds [1-9]"),
|
|
||||||
regexp.MustCompile("\nprocess_open_fds [1-9]"),
|
|
||||||
regexp.MustCompile("\nprocess_virtual_memory_bytes [1-9]"),
|
|
||||||
regexp.MustCompile("\nprocess_resident_memory_bytes [1-9]"),
|
|
||||||
regexp.MustCompile("\nprocess_start_time_seconds [0-9.]{10,}"),
|
|
||||||
regexp.MustCompile("\nfoobar_process_cpu_seconds_total [0-9]"),
|
|
||||||
regexp.MustCompile("\nfoobar_process_max_fds [1-9]"),
|
|
||||||
regexp.MustCompile("\nfoobar_process_open_fds [1-9]"),
|
|
||||||
regexp.MustCompile("\nfoobar_process_virtual_memory_bytes [1-9]"),
|
|
||||||
regexp.MustCompile("\nfoobar_process_resident_memory_bytes [1-9]"),
|
|
||||||
regexp.MustCompile("\nfoobar_process_start_time_seconds [0-9.]{10,}"),
|
|
||||||
} {
|
|
||||||
if !re.Match(buf.Bytes()) {
|
|
||||||
t.Errorf("want body to match %s\n%s", re, buf.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,201 +0,0 @@
|
||||||
// Copyright 2016 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Copyright (c) 2013, The Prometheus Authors
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be found
|
|
||||||
// in the LICENSE file.
|
|
||||||
|
|
||||||
// Package promhttp contains functions to create http.Handler instances to
|
|
||||||
// expose Prometheus metrics via HTTP. In later versions of this package, it
|
|
||||||
// will also contain tooling to instrument instances of http.Handler and
|
|
||||||
// http.RoundTripper.
|
|
||||||
//
|
|
||||||
// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor,
|
|
||||||
// you can create a handler for a custom registry or anything that implements
|
|
||||||
// the Gatherer interface. It also allows to create handlers that act
|
|
||||||
// differently on errors or allow to log errors.
|
|
||||||
package promhttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/prometheus/common/expfmt"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
contentTypeHeader = "Content-Type"
|
|
||||||
contentLengthHeader = "Content-Length"
|
|
||||||
contentEncodingHeader = "Content-Encoding"
|
|
||||||
acceptEncodingHeader = "Accept-Encoding"
|
|
||||||
)
|
|
||||||
|
|
||||||
var bufPool sync.Pool
|
|
||||||
|
|
||||||
func getBuf() *bytes.Buffer {
|
|
||||||
buf := bufPool.Get()
|
|
||||||
if buf == nil {
|
|
||||||
return &bytes.Buffer{}
|
|
||||||
}
|
|
||||||
return buf.(*bytes.Buffer)
|
|
||||||
}
|
|
||||||
|
|
||||||
func giveBuf(buf *bytes.Buffer) {
|
|
||||||
buf.Reset()
|
|
||||||
bufPool.Put(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The
|
|
||||||
// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP
|
|
||||||
// error, no error logging, and compression if requested by the client.
|
|
||||||
//
|
|
||||||
// If you want to create a Handler for the DefaultGatherer with different
|
|
||||||
// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and
|
|
||||||
// your desired HandlerOpts.
|
|
||||||
func Handler() http.Handler {
|
|
||||||
return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlerFor returns an http.Handler for the provided Gatherer. The behavior
|
|
||||||
// of the Handler is defined by the provided HandlerOpts.
|
|
||||||
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
|
||||||
mfs, err := reg.Gather()
|
|
||||||
if err != nil {
|
|
||||||
if opts.ErrorLog != nil {
|
|
||||||
opts.ErrorLog.Println("error gathering metrics:", err)
|
|
||||||
}
|
|
||||||
switch opts.ErrorHandling {
|
|
||||||
case PanicOnError:
|
|
||||||
panic(err)
|
|
||||||
case ContinueOnError:
|
|
||||||
if len(mfs) == 0 {
|
|
||||||
http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case HTTPErrorOnError:
|
|
||||||
http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
contentType := expfmt.Negotiate(req.Header)
|
|
||||||
buf := getBuf()
|
|
||||||
defer giveBuf(buf)
|
|
||||||
writer, encoding := decorateWriter(req, buf, opts.DisableCompression)
|
|
||||||
enc := expfmt.NewEncoder(writer, contentType)
|
|
||||||
var lastErr error
|
|
||||||
for _, mf := range mfs {
|
|
||||||
if err := enc.Encode(mf); err != nil {
|
|
||||||
lastErr = err
|
|
||||||
if opts.ErrorLog != nil {
|
|
||||||
opts.ErrorLog.Println("error encoding metric family:", err)
|
|
||||||
}
|
|
||||||
switch opts.ErrorHandling {
|
|
||||||
case PanicOnError:
|
|
||||||
panic(err)
|
|
||||||
case ContinueOnError:
|
|
||||||
// Handled later.
|
|
||||||
case HTTPErrorOnError:
|
|
||||||
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if closer, ok := writer.(io.Closer); ok {
|
|
||||||
closer.Close()
|
|
||||||
}
|
|
||||||
if lastErr != nil && buf.Len() == 0 {
|
|
||||||
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
header := w.Header()
|
|
||||||
header.Set(contentTypeHeader, string(contentType))
|
|
||||||
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
|
|
||||||
if encoding != "" {
|
|
||||||
header.Set(contentEncodingHeader, encoding)
|
|
||||||
}
|
|
||||||
w.Write(buf.Bytes())
|
|
||||||
// TODO(beorn7): Consider streaming serving of metrics.
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlerErrorHandling defines how a Handler serving metrics will handle
|
|
||||||
// errors.
|
|
||||||
type HandlerErrorHandling int
|
|
||||||
|
|
||||||
// These constants cause handlers serving metrics to behave as described if
|
|
||||||
// errors are encountered.
|
|
||||||
const (
|
|
||||||
// Serve an HTTP status code 500 upon the first error
|
|
||||||
// encountered. Report the error message in the body.
|
|
||||||
HTTPErrorOnError HandlerErrorHandling = iota
|
|
||||||
// Ignore errors and try to serve as many metrics as possible. However,
|
|
||||||
// if no metrics can be served, serve an HTTP status code 500 and the
|
|
||||||
// last error message in the body. Only use this in deliberate "best
|
|
||||||
// effort" metrics collection scenarios. It is recommended to at least
|
|
||||||
// log errors (by providing an ErrorLog in HandlerOpts) to not mask
|
|
||||||
// errors completely.
|
|
||||||
ContinueOnError
|
|
||||||
// Panic upon the first error encountered (useful for "crash only" apps).
|
|
||||||
PanicOnError
|
|
||||||
)
|
|
||||||
|
|
||||||
// Logger is the minimal interface HandlerOpts needs for logging. Note that
|
|
||||||
// log.Logger from the standard library implements this interface, and it is
|
|
||||||
// easy to implement by custom loggers, if they don't do so already anyway.
|
|
||||||
type Logger interface {
|
|
||||||
Println(v ...interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlerOpts specifies options how to serve metrics via an http.Handler. The
|
|
||||||
// zero value of HandlerOpts is a reasonable default.
|
|
||||||
type HandlerOpts struct {
|
|
||||||
// ErrorLog specifies an optional logger for errors collecting and
|
|
||||||
// serving metrics. If nil, errors are not logged at all.
|
|
||||||
ErrorLog Logger
|
|
||||||
// ErrorHandling defines how errors are handled. Note that errors are
|
|
||||||
// logged regardless of the configured ErrorHandling provided ErrorLog
|
|
||||||
// is not nil.
|
|
||||||
ErrorHandling HandlerErrorHandling
|
|
||||||
// If DisableCompression is true, the handler will never compress the
|
|
||||||
// response, even if requested by the client.
|
|
||||||
DisableCompression bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// decorateWriter wraps a writer to handle gzip compression if requested. It
|
|
||||||
// returns the decorated writer and the appropriate "Content-Encoding" header
|
|
||||||
// (which is empty if no compression is enabled).
|
|
||||||
func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) {
|
|
||||||
if compressionDisabled {
|
|
||||||
return writer, ""
|
|
||||||
}
|
|
||||||
header := request.Header.Get(acceptEncodingHeader)
|
|
||||||
parts := strings.Split(header, ",")
|
|
||||||
for _, part := range parts {
|
|
||||||
part := strings.TrimSpace(part)
|
|
||||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
|
||||||
return gzip.NewWriter(writer), "gzip"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return writer, ""
|
|
||||||
}
|
|
||||||
|
|
@ -1,137 +0,0 @@
|
||||||
// Copyright 2016 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Copyright (c) 2013, The Prometheus Authors
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be found
|
|
||||||
// in the LICENSE file.
|
|
||||||
|
|
||||||
package promhttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
)
|
|
||||||
|
|
||||||
type errorCollector struct{}
|
|
||||||
|
|
||||||
func (e errorCollector) Describe(ch chan<- *prometheus.Desc) {
|
|
||||||
ch <- prometheus.NewDesc("invalid_metric", "not helpful", nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e errorCollector) Collect(ch chan<- prometheus.Metric) {
|
|
||||||
ch <- prometheus.NewInvalidMetric(
|
|
||||||
prometheus.NewDesc("invalid_metric", "not helpful", nil, nil),
|
|
||||||
errors.New("collect error"),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHandlerErrorHandling(t *testing.T) {
|
|
||||||
|
|
||||||
// Create a registry that collects a MetricFamily with two elements,
|
|
||||||
// another with one, and reports an error.
|
|
||||||
reg := prometheus.NewRegistry()
|
|
||||||
|
|
||||||
cnt := prometheus.NewCounter(prometheus.CounterOpts{
|
|
||||||
Name: "the_count",
|
|
||||||
Help: "Ah-ah-ah! Thunder and lightning!",
|
|
||||||
})
|
|
||||||
reg.MustRegister(cnt)
|
|
||||||
|
|
||||||
cntVec := prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "name",
|
|
||||||
Help: "docstring",
|
|
||||||
ConstLabels: prometheus.Labels{"constname": "constvalue"},
|
|
||||||
},
|
|
||||||
[]string{"labelname"},
|
|
||||||
)
|
|
||||||
cntVec.WithLabelValues("val1").Inc()
|
|
||||||
cntVec.WithLabelValues("val2").Inc()
|
|
||||||
reg.MustRegister(cntVec)
|
|
||||||
|
|
||||||
reg.MustRegister(errorCollector{})
|
|
||||||
|
|
||||||
logBuf := &bytes.Buffer{}
|
|
||||||
logger := log.New(logBuf, "", 0)
|
|
||||||
|
|
||||||
writer := httptest.NewRecorder()
|
|
||||||
request, _ := http.NewRequest("GET", "/", nil)
|
|
||||||
request.Header.Add("Accept", "test/plain")
|
|
||||||
|
|
||||||
errorHandler := HandlerFor(reg, HandlerOpts{
|
|
||||||
ErrorLog: logger,
|
|
||||||
ErrorHandling: HTTPErrorOnError,
|
|
||||||
})
|
|
||||||
continueHandler := HandlerFor(reg, HandlerOpts{
|
|
||||||
ErrorLog: logger,
|
|
||||||
ErrorHandling: ContinueOnError,
|
|
||||||
})
|
|
||||||
panicHandler := HandlerFor(reg, HandlerOpts{
|
|
||||||
ErrorLog: logger,
|
|
||||||
ErrorHandling: PanicOnError,
|
|
||||||
})
|
|
||||||
wantMsg := `error gathering metrics: error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error
|
|
||||||
`
|
|
||||||
wantErrorBody := `An error has occurred during metrics gathering:
|
|
||||||
|
|
||||||
error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error
|
|
||||||
`
|
|
||||||
wantOKBody := `# HELP name docstring
|
|
||||||
# TYPE name counter
|
|
||||||
name{constname="constvalue",labelname="val1"} 1
|
|
||||||
name{constname="constvalue",labelname="val2"} 1
|
|
||||||
# HELP the_count Ah-ah-ah! Thunder and lightning!
|
|
||||||
# TYPE the_count counter
|
|
||||||
the_count 0
|
|
||||||
`
|
|
||||||
|
|
||||||
errorHandler.ServeHTTP(writer, request)
|
|
||||||
if got, want := writer.Code, http.StatusInternalServerError; got != want {
|
|
||||||
t.Errorf("got HTTP status code %d, want %d", got, want)
|
|
||||||
}
|
|
||||||
if got := logBuf.String(); got != wantMsg {
|
|
||||||
t.Errorf("got log message:\n%s\nwant log mesage:\n%s\n", got, wantMsg)
|
|
||||||
}
|
|
||||||
if got := writer.Body.String(); got != wantErrorBody {
|
|
||||||
t.Errorf("got body:\n%s\nwant body:\n%s\n", got, wantErrorBody)
|
|
||||||
}
|
|
||||||
logBuf.Reset()
|
|
||||||
writer.Body.Reset()
|
|
||||||
writer.Code = http.StatusOK
|
|
||||||
|
|
||||||
continueHandler.ServeHTTP(writer, request)
|
|
||||||
if got, want := writer.Code, http.StatusOK; got != want {
|
|
||||||
t.Errorf("got HTTP status code %d, want %d", got, want)
|
|
||||||
}
|
|
||||||
if got := logBuf.String(); got != wantMsg {
|
|
||||||
t.Errorf("got log message %q, want %q", got, wantMsg)
|
|
||||||
}
|
|
||||||
if got := writer.Body.String(); got != wantOKBody {
|
|
||||||
t.Errorf("got body %q, want %q", got, wantOKBody)
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err := recover(); err == nil {
|
|
||||||
t.Error("expected panic from panicHandler")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
panicHandler.ServeHTTP(writer, request)
|
|
||||||
}
|
|
||||||
|
|
@ -1,83 +0,0 @@
|
||||||
// Copyright 2016 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Copyright (c) 2013, The Prometheus Authors
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be found
|
|
||||||
// in the LICENSE file.
|
|
||||||
|
|
||||||
package push_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/client_golang/prometheus/push"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
completionTime = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
||||||
Name: "db_backup_last_completion_timestamp_seconds",
|
|
||||||
Help: "The timestamp of the last completion of a DB backup, successful or not.",
|
|
||||||
})
|
|
||||||
successTime = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
||||||
Name: "db_backup_last_success_timestamp_seconds",
|
|
||||||
Help: "The timestamp of the last successful completion of a DB backup.",
|
|
||||||
})
|
|
||||||
duration = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
||||||
Name: "db_backup_duration_seconds",
|
|
||||||
Help: "The duration of the last DB backup in seconds.",
|
|
||||||
})
|
|
||||||
records = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
||||||
Name: "db_backup_records_processed",
|
|
||||||
Help: "The number of records processed in the last DB backup.",
|
|
||||||
})
|
|
||||||
)
|
|
||||||
|
|
||||||
func performBackup() (int, error) {
|
|
||||||
// Perform the backup and return the number of backed up records and any
|
|
||||||
// applicable error.
|
|
||||||
// ...
|
|
||||||
return 42, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleAddFromGatherer() {
|
|
||||||
registry := prometheus.NewRegistry()
|
|
||||||
registry.MustRegister(completionTime, duration, records)
|
|
||||||
// Note that successTime is not registered at this time.
|
|
||||||
|
|
||||||
start := time.Now()
|
|
||||||
n, err := performBackup()
|
|
||||||
records.Set(float64(n))
|
|
||||||
duration.Set(time.Since(start).Seconds())
|
|
||||||
completionTime.SetToCurrentTime()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("DB backup failed:", err)
|
|
||||||
} else {
|
|
||||||
// Only now register successTime.
|
|
||||||
registry.MustRegister(successTime)
|
|
||||||
successTime.SetToCurrentTime()
|
|
||||||
}
|
|
||||||
// AddFromGatherer is used here rather than FromGatherer to not delete a
|
|
||||||
// previously pushed success timestamp in case of a failure of this
|
|
||||||
// backup.
|
|
||||||
if err := push.AddFromGatherer(
|
|
||||||
"db_backup", nil,
|
|
||||||
"http://pushgateway:9091",
|
|
||||||
registry,
|
|
||||||
); err != nil {
|
|
||||||
fmt.Println("Could not push to Pushgateway:", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,36 +0,0 @@
|
||||||
// Copyright 2016 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package push_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/client_golang/prometheus/push"
|
|
||||||
)
|
|
||||||
|
|
||||||
func ExampleCollectors() {
|
|
||||||
completionTime := prometheus.NewGauge(prometheus.GaugeOpts{
|
|
||||||
Name: "db_backup_last_completion_timestamp_seconds",
|
|
||||||
Help: "The timestamp of the last successful completion of a DB backup.",
|
|
||||||
})
|
|
||||||
completionTime.SetToCurrentTime()
|
|
||||||
if err := push.Collectors(
|
|
||||||
"db_backup", push.HostnameGroupingKey(),
|
|
||||||
"http://pushgateway:9091",
|
|
||||||
completionTime,
|
|
||||||
); err != nil {
|
|
||||||
fmt.Println("Could not push completion time to Pushgateway:", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,172 +0,0 @@
|
||||||
// Copyright 2015 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Copyright (c) 2013, The Prometheus Authors
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be found
|
|
||||||
// in the LICENSE file.
|
|
||||||
|
|
||||||
// Package push provides functions to push metrics to a Pushgateway. The metrics
|
|
||||||
// to push are either collected from a provided registry, or from explicitly
|
|
||||||
// listed collectors.
|
|
||||||
//
|
|
||||||
// See the documentation of the Pushgateway to understand the meaning of the
|
|
||||||
// grouping parameters and the differences between push.Registry and
|
|
||||||
// push.Collectors on the one hand and push.AddRegistry and push.AddCollectors
|
|
||||||
// on the other hand: https://github.com/prometheus/pushgateway
|
|
||||||
package push
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/prometheus/common/expfmt"
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
)
|
|
||||||
|
|
||||||
const contentTypeHeader = "Content-Type"
|
|
||||||
|
|
||||||
// FromGatherer triggers a metric collection by the provided Gatherer (which is
|
|
||||||
// usually implemented by a prometheus.Registry) and pushes all gathered metrics
|
|
||||||
// to the Pushgateway specified by url, using the provided job name and the
|
|
||||||
// (optional) further grouping labels (the grouping map may be nil). See the
|
|
||||||
// Pushgateway documentation for detailed implications of the job and other
|
|
||||||
// grouping labels. Neither the job name nor any grouping label value may
|
|
||||||
// contain a "/". The metrics pushed must not contain a job label of their own
|
|
||||||
// nor any of the grouping labels.
|
|
||||||
//
|
|
||||||
// You can use just host:port or ip:port as url, in which case 'http://' is
|
|
||||||
// added automatically. You can also include the schema in the URL. However, do
|
|
||||||
// not include the '/metrics/jobs/...' part.
|
|
||||||
//
|
|
||||||
// Note that all previously pushed metrics with the same job and other grouping
|
|
||||||
// labels will be replaced with the metrics pushed by this call. (It uses HTTP
|
|
||||||
// method 'PUT' to push to the Pushgateway.)
|
|
||||||
func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error {
|
|
||||||
return push(job, grouping, url, g, "PUT")
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddFromGatherer works like FromGatherer, but only previously pushed metrics
|
|
||||||
// with the same name (and the same job and other grouping labels) will be
|
|
||||||
// replaced. (It uses HTTP method 'POST' to push to the Pushgateway.)
|
|
||||||
func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error {
|
|
||||||
return push(job, grouping, url, g, "POST")
|
|
||||||
}
|
|
||||||
|
|
||||||
func push(job string, grouping map[string]string, pushURL string, g prometheus.Gatherer, method string) error {
|
|
||||||
if !strings.Contains(pushURL, "://") {
|
|
||||||
pushURL = "http://" + pushURL
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(pushURL, "/") {
|
|
||||||
pushURL = pushURL[:len(pushURL)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.Contains(job, "/") {
|
|
||||||
return fmt.Errorf("job contains '/': %s", job)
|
|
||||||
}
|
|
||||||
urlComponents := []string{url.QueryEscape(job)}
|
|
||||||
for ln, lv := range grouping {
|
|
||||||
if !model.LabelName(ln).IsValid() {
|
|
||||||
return fmt.Errorf("grouping label has invalid name: %s", ln)
|
|
||||||
}
|
|
||||||
if strings.Contains(lv, "/") {
|
|
||||||
return fmt.Errorf("value of grouping label %s contains '/': %s", ln, lv)
|
|
||||||
}
|
|
||||||
urlComponents = append(urlComponents, ln, lv)
|
|
||||||
}
|
|
||||||
pushURL = fmt.Sprintf("%s/metrics/job/%s", pushURL, strings.Join(urlComponents, "/"))
|
|
||||||
|
|
||||||
mfs, err := g.Gather()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
|
|
||||||
// Check for pre-existing grouping labels:
|
|
||||||
for _, mf := range mfs {
|
|
||||||
for _, m := range mf.GetMetric() {
|
|
||||||
for _, l := range m.GetLabel() {
|
|
||||||
if l.GetName() == "job" {
|
|
||||||
return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m)
|
|
||||||
}
|
|
||||||
if _, ok := grouping[l.GetName()]; ok {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"pushed metric %s (%s) already contains grouping label %s",
|
|
||||||
mf.GetName(), m, l.GetName(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
enc.Encode(mf)
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest(method, pushURL, buf)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim))
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode != 202 {
|
|
||||||
body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only.
|
|
||||||
return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collectors works like FromGatherer, but it does not use a Gatherer. Instead,
|
|
||||||
// it collects from the provided collectors directly. It is a convenient way to
|
|
||||||
// push only a few metrics.
|
|
||||||
func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
|
|
||||||
return pushCollectors(job, grouping, url, "PUT", collectors...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddCollectors works like AddFromGatherer, but it does not use a Gatherer.
|
|
||||||
// Instead, it collects from the provided collectors directly. It is a
|
|
||||||
// convenient way to push only a few metrics.
|
|
||||||
func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
|
|
||||||
return pushCollectors(job, grouping, url, "POST", collectors...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func pushCollectors(job string, grouping map[string]string, url, method string, collectors ...prometheus.Collector) error {
|
|
||||||
r := prometheus.NewRegistry()
|
|
||||||
for _, collector := range collectors {
|
|
||||||
if err := r.Register(collector); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return push(job, grouping, url, r, method)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HostnameGroupingKey returns a label map with the only entry
|
|
||||||
// {instance="<hostname>"}. This can be conveniently used as the grouping
|
|
||||||
// parameter if metrics should be pushed with the hostname as label. The
|
|
||||||
// returned map is created upon each call so that the caller is free to add more
|
|
||||||
// labels to the map.
|
|
||||||
func HostnameGroupingKey() map[string]string {
|
|
||||||
hostname, err := os.Hostname()
|
|
||||||
if err != nil {
|
|
||||||
return map[string]string{"instance": "unknown"}
|
|
||||||
}
|
|
||||||
return map[string]string{"instance": hostname}
|
|
||||||
}
|
|
||||||
|
|
@ -1,176 +0,0 @@
|
||||||
// Copyright 2016 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Copyright (c) 2013, The Prometheus Authors
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be found
|
|
||||||
// in the LICENSE file.
|
|
||||||
|
|
||||||
package push
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/prometheus/common/expfmt"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestPush(t *testing.T) {
|
|
||||||
|
|
||||||
var (
|
|
||||||
lastMethod string
|
|
||||||
lastBody []byte
|
|
||||||
lastPath string
|
|
||||||
)
|
|
||||||
|
|
||||||
host, err := os.Hostname()
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fake a Pushgateway that always responds with 202.
|
|
||||||
pgwOK := httptest.NewServer(
|
|
||||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
lastMethod = r.Method
|
|
||||||
var err error
|
|
||||||
lastBody, err = ioutil.ReadAll(r.Body)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
lastPath = r.URL.EscapedPath()
|
|
||||||
w.Header().Set("Content-Type", `text/plain; charset=utf-8`)
|
|
||||||
w.WriteHeader(http.StatusAccepted)
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
defer pgwOK.Close()
|
|
||||||
|
|
||||||
// Fake a Pushgateway that always responds with 500.
|
|
||||||
pgwErr := httptest.NewServer(
|
|
||||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
http.Error(w, "fake error", http.StatusInternalServerError)
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
defer pgwErr.Close()
|
|
||||||
|
|
||||||
metric1 := prometheus.NewCounter(prometheus.CounterOpts{
|
|
||||||
Name: "testname1",
|
|
||||||
Help: "testhelp1",
|
|
||||||
})
|
|
||||||
metric2 := prometheus.NewGauge(prometheus.GaugeOpts{
|
|
||||||
Name: "testname2",
|
|
||||||
Help: "testhelp2",
|
|
||||||
ConstLabels: prometheus.Labels{"foo": "bar", "dings": "bums"},
|
|
||||||
})
|
|
||||||
|
|
||||||
reg := prometheus.NewRegistry()
|
|
||||||
reg.MustRegister(metric1)
|
|
||||||
reg.MustRegister(metric2)
|
|
||||||
|
|
||||||
mfs, err := reg.Gather()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
|
|
||||||
|
|
||||||
for _, mf := range mfs {
|
|
||||||
if err := enc.Encode(mf); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
wantBody := buf.Bytes()
|
|
||||||
|
|
||||||
// PushCollectors, all good.
|
|
||||||
if err := Collectors("testjob", HostnameGroupingKey(), pgwOK.URL, metric1, metric2); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if lastMethod != "PUT" {
|
|
||||||
t.Error("want method PUT for PushCollectors, got", lastMethod)
|
|
||||||
}
|
|
||||||
if bytes.Compare(lastBody, wantBody) != 0 {
|
|
||||||
t.Errorf("got body %v, want %v", lastBody, wantBody)
|
|
||||||
}
|
|
||||||
if lastPath != "/metrics/job/testjob/instance/"+host {
|
|
||||||
t.Error("unexpected path:", lastPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushAddCollectors, with nil grouping, all good.
|
|
||||||
if err := AddCollectors("testjob", nil, pgwOK.URL, metric1, metric2); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if lastMethod != "POST" {
|
|
||||||
t.Error("want method POST for PushAddCollectors, got", lastMethod)
|
|
||||||
}
|
|
||||||
if bytes.Compare(lastBody, wantBody) != 0 {
|
|
||||||
t.Errorf("got body %v, want %v", lastBody, wantBody)
|
|
||||||
}
|
|
||||||
if lastPath != "/metrics/job/testjob" {
|
|
||||||
t.Error("unexpected path:", lastPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushCollectors with a broken PGW.
|
|
||||||
if err := Collectors("testjob", nil, pgwErr.URL, metric1, metric2); err == nil {
|
|
||||||
t.Error("push to broken Pushgateway succeeded")
|
|
||||||
} else {
|
|
||||||
if got, want := err.Error(), "unexpected status code 500 while pushing to "+pgwErr.URL+"/metrics/job/testjob: fake error\n"; got != want {
|
|
||||||
t.Errorf("got error %q, want %q", got, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushCollectors with invalid grouping or job.
|
|
||||||
if err := Collectors("testjob", map[string]string{"foo": "bums"}, pgwErr.URL, metric1, metric2); err == nil {
|
|
||||||
t.Error("push with grouping contained in metrics succeeded")
|
|
||||||
}
|
|
||||||
if err := Collectors("test/job", nil, pgwErr.URL, metric1, metric2); err == nil {
|
|
||||||
t.Error("push with invalid job value succeeded")
|
|
||||||
}
|
|
||||||
if err := Collectors("testjob", map[string]string{"foo/bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil {
|
|
||||||
t.Error("push with invalid grouping succeeded")
|
|
||||||
}
|
|
||||||
if err := Collectors("testjob", map[string]string{"foo-bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil {
|
|
||||||
t.Error("push with invalid grouping succeeded")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Push registry, all good.
|
|
||||||
if err := FromGatherer("testjob", HostnameGroupingKey(), pgwOK.URL, reg); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if lastMethod != "PUT" {
|
|
||||||
t.Error("want method PUT for Push, got", lastMethod)
|
|
||||||
}
|
|
||||||
if bytes.Compare(lastBody, wantBody) != 0 {
|
|
||||||
t.Errorf("got body %v, want %v", lastBody, wantBody)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushAdd registry, all good.
|
|
||||||
if err := AddFromGatherer("testjob", map[string]string{"a": "x", "b": "y"}, pgwOK.URL, reg); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if lastMethod != "POST" {
|
|
||||||
t.Error("want method POSTT for PushAdd, got", lastMethod)
|
|
||||||
}
|
|
||||||
if bytes.Compare(lastBody, wantBody) != 0 {
|
|
||||||
t.Errorf("got body %v, want %v", lastBody, wantBody)
|
|
||||||
}
|
|
||||||
if lastPath != "/metrics/job/testjob/a/x/b/y" && lastPath != "/metrics/job/testjob/b/y/a/x" {
|
|
||||||
t.Error("unexpected path:", lastPath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,755 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Capacity for the channel to collect metrics and descriptors.
|
|
||||||
capMetricChan = 1000
|
|
||||||
capDescChan = 10
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultRegisterer and DefaultGatherer are the implementations of the
|
|
||||||
// Registerer and Gatherer interface a number of convenience functions in this
|
|
||||||
// package act on. Initially, both variables point to the same Registry, which
|
|
||||||
// has a process collector (see NewProcessCollector) and a Go collector (see
|
|
||||||
// NewGoCollector) already registered. This approach to keep default instances
|
|
||||||
// as global state mirrors the approach of other packages in the Go standard
|
|
||||||
// library. Note that there are caveats. Change the variables with caution and
|
|
||||||
// only if you understand the consequences. Users who want to avoid global state
|
|
||||||
// altogether should not use the convenience function and act on custom
|
|
||||||
// instances instead.
|
|
||||||
var (
|
|
||||||
defaultRegistry = NewRegistry()
|
|
||||||
DefaultRegisterer Registerer = defaultRegistry
|
|
||||||
DefaultGatherer Gatherer = defaultRegistry
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
MustRegister(NewProcessCollector(os.Getpid(), ""))
|
|
||||||
MustRegister(NewGoCollector())
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRegistry creates a new vanilla Registry without any Collectors
|
|
||||||
// pre-registered.
|
|
||||||
func NewRegistry() *Registry {
|
|
||||||
return &Registry{
|
|
||||||
collectorsByID: map[uint64]Collector{},
|
|
||||||
descIDs: map[uint64]struct{}{},
|
|
||||||
dimHashesByName: map[string]uint64{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPedanticRegistry returns a registry that checks during collection if each
|
|
||||||
// collected Metric is consistent with its reported Desc, and if the Desc has
|
|
||||||
// actually been registered with the registry.
|
|
||||||
//
|
|
||||||
// Usually, a Registry will be happy as long as the union of all collected
|
|
||||||
// Metrics is consistent and valid even if some metrics are not consistent with
|
|
||||||
// their own Desc or a Desc provided by their registered Collector. Well-behaved
|
|
||||||
// Collectors and Metrics will only provide consistent Descs. This Registry is
|
|
||||||
// useful to test the implementation of Collectors and Metrics.
|
|
||||||
func NewPedanticRegistry() *Registry {
|
|
||||||
r := NewRegistry()
|
|
||||||
r.pedanticChecksEnabled = true
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Registerer is the interface for the part of a registry in charge of
|
|
||||||
// registering and unregistering. Users of custom registries should use
|
|
||||||
// Registerer as type for registration purposes (rather then the Registry type
|
|
||||||
// directly). In that way, they are free to use custom Registerer implementation
|
|
||||||
// (e.g. for testing purposes).
|
|
||||||
type Registerer interface {
|
|
||||||
// Register registers a new Collector to be included in metrics
|
|
||||||
// collection. It returns an error if the descriptors provided by the
|
|
||||||
// Collector are invalid or if they — in combination with descriptors of
|
|
||||||
// already registered Collectors — do not fulfill the consistency and
|
|
||||||
// uniqueness criteria described in the documentation of metric.Desc.
|
|
||||||
//
|
|
||||||
// If the provided Collector is equal to a Collector already registered
|
|
||||||
// (which includes the case of re-registering the same Collector), the
|
|
||||||
// returned error is an instance of AlreadyRegisteredError, which
|
|
||||||
// contains the previously registered Collector.
|
|
||||||
//
|
|
||||||
// It is in general not safe to register the same Collector multiple
|
|
||||||
// times concurrently.
|
|
||||||
Register(Collector) error
|
|
||||||
// MustRegister works like Register but registers any number of
|
|
||||||
// Collectors and panics upon the first registration that causes an
|
|
||||||
// error.
|
|
||||||
MustRegister(...Collector)
|
|
||||||
// Unregister unregisters the Collector that equals the Collector passed
|
|
||||||
// in as an argument. (Two Collectors are considered equal if their
|
|
||||||
// Describe method yields the same set of descriptors.) The function
|
|
||||||
// returns whether a Collector was unregistered.
|
|
||||||
//
|
|
||||||
// Note that even after unregistering, it will not be possible to
|
|
||||||
// register a new Collector that is inconsistent with the unregistered
|
|
||||||
// Collector, e.g. a Collector collecting metrics with the same name but
|
|
||||||
// a different help string. The rationale here is that the same registry
|
|
||||||
// instance must only collect consistent metrics throughout its
|
|
||||||
// lifetime.
|
|
||||||
Unregister(Collector) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gatherer is the interface for the part of a registry in charge of gathering
|
|
||||||
// the collected metrics into a number of MetricFamilies. The Gatherer interface
|
|
||||||
// comes with the same general implication as described for the Registerer
|
|
||||||
// interface.
|
|
||||||
type Gatherer interface {
|
|
||||||
// Gather calls the Collect method of the registered Collectors and then
|
|
||||||
// gathers the collected metrics into a lexicographically sorted slice
|
|
||||||
// of MetricFamily protobufs. Even if an error occurs, Gather attempts
|
|
||||||
// to gather as many metrics as possible. Hence, if a non-nil error is
|
|
||||||
// returned, the returned MetricFamily slice could be nil (in case of a
|
|
||||||
// fatal error that prevented any meaningful metric collection) or
|
|
||||||
// contain a number of MetricFamily protobufs, some of which might be
|
|
||||||
// incomplete, and some might be missing altogether. The returned error
|
|
||||||
// (which might be a MultiError) explains the details. In scenarios
|
|
||||||
// where complete collection is critical, the returned MetricFamily
|
|
||||||
// protobufs should be disregarded if the returned error is non-nil.
|
|
||||||
Gather() ([]*dto.MetricFamily, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register registers the provided Collector with the DefaultRegisterer.
|
|
||||||
//
|
|
||||||
// Register is a shortcut for DefaultRegisterer.Register(c). See there for more
|
|
||||||
// details.
|
|
||||||
func Register(c Collector) error {
|
|
||||||
return DefaultRegisterer.Register(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustRegister registers the provided Collectors with the DefaultRegisterer and
|
|
||||||
// panics if any error occurs.
|
|
||||||
//
|
|
||||||
// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See
|
|
||||||
// there for more details.
|
|
||||||
func MustRegister(cs ...Collector) {
|
|
||||||
DefaultRegisterer.MustRegister(cs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unregister removes the registration of the provided Collector from the
|
|
||||||
// DefaultRegisterer.
|
|
||||||
//
|
|
||||||
// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for
|
|
||||||
// more details.
|
|
||||||
func Unregister(c Collector) bool {
|
|
||||||
return DefaultRegisterer.Unregister(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GathererFunc turns a function into a Gatherer.
|
|
||||||
type GathererFunc func() ([]*dto.MetricFamily, error)
|
|
||||||
|
|
||||||
// Gather implements Gatherer.
|
|
||||||
func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
|
|
||||||
return gf()
|
|
||||||
}
|
|
||||||
|
|
||||||
// AlreadyRegisteredError is returned by the Register method if the Collector to
|
|
||||||
// be registered has already been registered before, or a different Collector
|
|
||||||
// that collects the same metrics has been registered before. Registration fails
|
|
||||||
// in that case, but you can detect from the kind of error what has
|
|
||||||
// happened. The error contains fields for the existing Collector and the
|
|
||||||
// (rejected) new Collector that equals the existing one. This can be used to
|
|
||||||
// find out if an equal Collector has been registered before and switch over to
|
|
||||||
// using the old one, as demonstrated in the example.
|
|
||||||
type AlreadyRegisteredError struct {
|
|
||||||
ExistingCollector, NewCollector Collector
|
|
||||||
}
|
|
||||||
|
|
||||||
func (err AlreadyRegisteredError) Error() string {
|
|
||||||
return "duplicate metrics collector registration attempted"
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiError is a slice of errors implementing the error interface. It is used
|
|
||||||
// by a Gatherer to report multiple errors during MetricFamily gathering.
|
|
||||||
type MultiError []error
|
|
||||||
|
|
||||||
func (errs MultiError) Error() string {
|
|
||||||
if len(errs) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
fmt.Fprintf(buf, "%d error(s) occurred:", len(errs))
|
|
||||||
for _, err := range errs {
|
|
||||||
fmt.Fprintf(buf, "\n* %s", err)
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
|
|
||||||
// contained error as error if len(errs is 1). In all other cases, it returns
|
|
||||||
// the MultiError directly. This is helpful for returning a MultiError in a way
|
|
||||||
// that only uses the MultiError if needed.
|
|
||||||
func (errs MultiError) MaybeUnwrap() error {
|
|
||||||
switch len(errs) {
|
|
||||||
case 0:
|
|
||||||
return nil
|
|
||||||
case 1:
|
|
||||||
return errs[0]
|
|
||||||
default:
|
|
||||||
return errs
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Registry registers Prometheus collectors, collects their metrics, and gathers
|
|
||||||
// them into MetricFamilies for exposition. It implements both Registerer and
|
|
||||||
// Gatherer. The zero value is not usable. Create instances with NewRegistry or
|
|
||||||
// NewPedanticRegistry.
|
|
||||||
type Registry struct {
|
|
||||||
mtx sync.RWMutex
|
|
||||||
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
|
|
||||||
descIDs map[uint64]struct{}
|
|
||||||
dimHashesByName map[string]uint64
|
|
||||||
pedanticChecksEnabled bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register implements Registerer.
|
|
||||||
func (r *Registry) Register(c Collector) error {
|
|
||||||
var (
|
|
||||||
descChan = make(chan *Desc, capDescChan)
|
|
||||||
newDescIDs = map[uint64]struct{}{}
|
|
||||||
newDimHashesByName = map[string]uint64{}
|
|
||||||
collectorID uint64 // Just a sum of all desc IDs.
|
|
||||||
duplicateDescErr error
|
|
||||||
)
|
|
||||||
go func() {
|
|
||||||
c.Describe(descChan)
|
|
||||||
close(descChan)
|
|
||||||
}()
|
|
||||||
r.mtx.Lock()
|
|
||||||
defer r.mtx.Unlock()
|
|
||||||
// Coduct various tests...
|
|
||||||
for desc := range descChan {
|
|
||||||
|
|
||||||
// Is the descriptor valid at all?
|
|
||||||
if desc.err != nil {
|
|
||||||
return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Is the descID unique?
|
|
||||||
// (In other words: Is the fqName + constLabel combination unique?)
|
|
||||||
if _, exists := r.descIDs[desc.id]; exists {
|
|
||||||
duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
|
|
||||||
}
|
|
||||||
// If it is not a duplicate desc in this collector, add it to
|
|
||||||
// the collectorID. (We allow duplicate descs within the same
|
|
||||||
// collector, but their existence must be a no-op.)
|
|
||||||
if _, exists := newDescIDs[desc.id]; !exists {
|
|
||||||
newDescIDs[desc.id] = struct{}{}
|
|
||||||
collectorID += desc.id
|
|
||||||
}
|
|
||||||
|
|
||||||
// Are all the label names and the help string consistent with
|
|
||||||
// previous descriptors of the same name?
|
|
||||||
// First check existing descriptors...
|
|
||||||
if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
|
|
||||||
if dimHash != desc.dimHash {
|
|
||||||
return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// ...then check the new descriptors already seen.
|
|
||||||
if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
|
|
||||||
if dimHash != desc.dimHash {
|
|
||||||
return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
newDimHashesByName[desc.fqName] = desc.dimHash
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Did anything happen at all?
|
|
||||||
if len(newDescIDs) == 0 {
|
|
||||||
return errors.New("collector has no descriptors")
|
|
||||||
}
|
|
||||||
if existing, exists := r.collectorsByID[collectorID]; exists {
|
|
||||||
return AlreadyRegisteredError{
|
|
||||||
ExistingCollector: existing,
|
|
||||||
NewCollector: c,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If the collectorID is new, but at least one of the descs existed
|
|
||||||
// before, we are in trouble.
|
|
||||||
if duplicateDescErr != nil {
|
|
||||||
return duplicateDescErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only after all tests have passed, actually register.
|
|
||||||
r.collectorsByID[collectorID] = c
|
|
||||||
for hash := range newDescIDs {
|
|
||||||
r.descIDs[hash] = struct{}{}
|
|
||||||
}
|
|
||||||
for name, dimHash := range newDimHashesByName {
|
|
||||||
r.dimHashesByName[name] = dimHash
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unregister implements Registerer.
|
|
||||||
func (r *Registry) Unregister(c Collector) bool {
|
|
||||||
var (
|
|
||||||
descChan = make(chan *Desc, capDescChan)
|
|
||||||
descIDs = map[uint64]struct{}{}
|
|
||||||
collectorID uint64 // Just a sum of the desc IDs.
|
|
||||||
)
|
|
||||||
go func() {
|
|
||||||
c.Describe(descChan)
|
|
||||||
close(descChan)
|
|
||||||
}()
|
|
||||||
for desc := range descChan {
|
|
||||||
if _, exists := descIDs[desc.id]; !exists {
|
|
||||||
collectorID += desc.id
|
|
||||||
descIDs[desc.id] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
r.mtx.RLock()
|
|
||||||
if _, exists := r.collectorsByID[collectorID]; !exists {
|
|
||||||
r.mtx.RUnlock()
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
r.mtx.RUnlock()
|
|
||||||
|
|
||||||
r.mtx.Lock()
|
|
||||||
defer r.mtx.Unlock()
|
|
||||||
|
|
||||||
delete(r.collectorsByID, collectorID)
|
|
||||||
for id := range descIDs {
|
|
||||||
delete(r.descIDs, id)
|
|
||||||
}
|
|
||||||
// dimHashesByName is left untouched as those must be consistent
|
|
||||||
// throughout the lifetime of a program.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustRegister implements Registerer.
|
|
||||||
func (r *Registry) MustRegister(cs ...Collector) {
|
|
||||||
for _, c := range cs {
|
|
||||||
if err := r.Register(c); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gather implements Gatherer.
|
|
||||||
func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
|
||||||
var (
|
|
||||||
metricChan = make(chan Metric, capMetricChan)
|
|
||||||
metricHashes = map[uint64]struct{}{}
|
|
||||||
dimHashes = map[string]uint64{}
|
|
||||||
wg sync.WaitGroup
|
|
||||||
errs MultiError // The collected errors to return in the end.
|
|
||||||
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
|
|
||||||
)
|
|
||||||
|
|
||||||
r.mtx.RLock()
|
|
||||||
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
|
|
||||||
|
|
||||||
// Scatter.
|
|
||||||
// (Collectors could be complex and slow, so we call them all at once.)
|
|
||||||
wg.Add(len(r.collectorsByID))
|
|
||||||
go func() {
|
|
||||||
wg.Wait()
|
|
||||||
close(metricChan)
|
|
||||||
}()
|
|
||||||
for _, collector := range r.collectorsByID {
|
|
||||||
go func(collector Collector) {
|
|
||||||
defer wg.Done()
|
|
||||||
collector.Collect(metricChan)
|
|
||||||
}(collector)
|
|
||||||
}
|
|
||||||
|
|
||||||
// In case pedantic checks are enabled, we have to copy the map before
|
|
||||||
// giving up the RLock.
|
|
||||||
if r.pedanticChecksEnabled {
|
|
||||||
registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs))
|
|
||||||
for id := range r.descIDs {
|
|
||||||
registeredDescIDs[id] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
r.mtx.RUnlock()
|
|
||||||
|
|
||||||
// Drain metricChan in case of premature return.
|
|
||||||
defer func() {
|
|
||||||
for range metricChan {
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Gather.
|
|
||||||
for metric := range metricChan {
|
|
||||||
// This could be done concurrently, too, but it required locking
|
|
||||||
// of metricFamiliesByName (and of metricHashes if checks are
|
|
||||||
// enabled). Most likely not worth it.
|
|
||||||
desc := metric.Desc()
|
|
||||||
dtoMetric := &dto.Metric{}
|
|
||||||
if err := metric.Write(dtoMetric); err != nil {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"error collecting metric %v: %s", desc, err,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
metricFamily, ok := metricFamiliesByName[desc.fqName]
|
|
||||||
if ok {
|
|
||||||
if metricFamily.GetHelp() != desc.help {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s has help %q but should have %q",
|
|
||||||
desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// TODO(beorn7): Simplify switch once Desc has type.
|
|
||||||
switch metricFamily.GetType() {
|
|
||||||
case dto.MetricType_COUNTER:
|
|
||||||
if dtoMetric.Counter == nil {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s should be a Counter",
|
|
||||||
desc.fqName, dtoMetric,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case dto.MetricType_GAUGE:
|
|
||||||
if dtoMetric.Gauge == nil {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s should be a Gauge",
|
|
||||||
desc.fqName, dtoMetric,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case dto.MetricType_SUMMARY:
|
|
||||||
if dtoMetric.Summary == nil {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s should be a Summary",
|
|
||||||
desc.fqName, dtoMetric,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case dto.MetricType_UNTYPED:
|
|
||||||
if dtoMetric.Untyped == nil {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s should be Untyped",
|
|
||||||
desc.fqName, dtoMetric,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case dto.MetricType_HISTOGRAM:
|
|
||||||
if dtoMetric.Histogram == nil {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s should be a Histogram",
|
|
||||||
desc.fqName, dtoMetric,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic("encountered MetricFamily with invalid type")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
metricFamily = &dto.MetricFamily{}
|
|
||||||
metricFamily.Name = proto.String(desc.fqName)
|
|
||||||
metricFamily.Help = proto.String(desc.help)
|
|
||||||
// TODO(beorn7): Simplify switch once Desc has type.
|
|
||||||
switch {
|
|
||||||
case dtoMetric.Gauge != nil:
|
|
||||||
metricFamily.Type = dto.MetricType_GAUGE.Enum()
|
|
||||||
case dtoMetric.Counter != nil:
|
|
||||||
metricFamily.Type = dto.MetricType_COUNTER.Enum()
|
|
||||||
case dtoMetric.Summary != nil:
|
|
||||||
metricFamily.Type = dto.MetricType_SUMMARY.Enum()
|
|
||||||
case dtoMetric.Untyped != nil:
|
|
||||||
metricFamily.Type = dto.MetricType_UNTYPED.Enum()
|
|
||||||
case dtoMetric.Histogram != nil:
|
|
||||||
metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
|
|
||||||
default:
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"empty metric collected: %s", dtoMetric,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
metricFamiliesByName[desc.fqName] = metricFamily
|
|
||||||
}
|
|
||||||
if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil {
|
|
||||||
errs = append(errs, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if r.pedanticChecksEnabled {
|
|
||||||
// Is the desc registered at all?
|
|
||||||
if _, exist := registeredDescIDs[desc.id]; !exist {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"collected metric %s %s with unregistered descriptor %s",
|
|
||||||
metricFamily.GetName(), dtoMetric, desc,
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
|
|
||||||
errs = append(errs, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
|
|
||||||
}
|
|
||||||
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gatherers is a slice of Gatherer instances that implements the Gatherer
|
|
||||||
// interface itself. Its Gather method calls Gather on all Gatherers in the
|
|
||||||
// slice in order and returns the merged results. Errors returned from the
|
|
||||||
// Gather calles are all returned in a flattened MultiError. Duplicate and
|
|
||||||
// inconsistent Metrics are skipped (first occurrence in slice order wins) and
|
|
||||||
// reported in the returned error.
|
|
||||||
//
|
|
||||||
// Gatherers can be used to merge the Gather results from multiple
|
|
||||||
// Registries. It also provides a way to directly inject existing MetricFamily
|
|
||||||
// protobufs into the gathering by creating a custom Gatherer with a Gather
|
|
||||||
// method that simply returns the existing MetricFamily protobufs. Note that no
|
|
||||||
// registration is involved (in contrast to Collector registration), so
|
|
||||||
// obviously registration-time checks cannot happen. Any inconsistencies between
|
|
||||||
// the gathered MetricFamilies are reported as errors by the Gather method, and
|
|
||||||
// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies
|
|
||||||
// (e.g. syntactically invalid metric or label names) will go undetected.
|
|
||||||
type Gatherers []Gatherer
|
|
||||||
|
|
||||||
// Gather implements Gatherer.
|
|
||||||
func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
|
|
||||||
var (
|
|
||||||
metricFamiliesByName = map[string]*dto.MetricFamily{}
|
|
||||||
metricHashes = map[uint64]struct{}{}
|
|
||||||
dimHashes = map[string]uint64{}
|
|
||||||
errs MultiError // The collected errors to return in the end.
|
|
||||||
)
|
|
||||||
|
|
||||||
for i, g := range gs {
|
|
||||||
mfs, err := g.Gather()
|
|
||||||
if err != nil {
|
|
||||||
if multiErr, ok := err.(MultiError); ok {
|
|
||||||
for _, err := range multiErr {
|
|
||||||
errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, mf := range mfs {
|
|
||||||
existingMF, exists := metricFamiliesByName[mf.GetName()]
|
|
||||||
if exists {
|
|
||||||
if existingMF.GetHelp() != mf.GetHelp() {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"gathered metric family %s has help %q but should have %q",
|
|
||||||
mf.GetName(), mf.GetHelp(), existingMF.GetHelp(),
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if existingMF.GetType() != mf.GetType() {
|
|
||||||
errs = append(errs, fmt.Errorf(
|
|
||||||
"gathered metric family %s has type %s but should have %s",
|
|
||||||
mf.GetName(), mf.GetType(), existingMF.GetType(),
|
|
||||||
))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
existingMF = &dto.MetricFamily{}
|
|
||||||
existingMF.Name = mf.Name
|
|
||||||
existingMF.Help = mf.Help
|
|
||||||
existingMF.Type = mf.Type
|
|
||||||
metricFamiliesByName[mf.GetName()] = existingMF
|
|
||||||
}
|
|
||||||
for _, m := range mf.Metric {
|
|
||||||
if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil {
|
|
||||||
errs = append(errs, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
existingMF.Metric = append(existingMF.Metric, m)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
// metricSorter is a sortable slice of *dto.Metric.
|
|
||||||
type metricSorter []*dto.Metric
|
|
||||||
|
|
||||||
func (s metricSorter) Len() int {
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s metricSorter) Swap(i, j int) {
|
|
||||||
s[i], s[j] = s[j], s[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s metricSorter) Less(i, j int) bool {
|
|
||||||
if len(s[i].Label) != len(s[j].Label) {
|
|
||||||
// This should not happen. The metrics are
|
|
||||||
// inconsistent. However, we have to deal with the fact, as
|
|
||||||
// people might use custom collectors or metric family injection
|
|
||||||
// to create inconsistent metrics. So let's simply compare the
|
|
||||||
// number of labels in this case. That will still yield
|
|
||||||
// reproducible sorting.
|
|
||||||
return len(s[i].Label) < len(s[j].Label)
|
|
||||||
}
|
|
||||||
for n, lp := range s[i].Label {
|
|
||||||
vi := lp.GetValue()
|
|
||||||
vj := s[j].Label[n].GetValue()
|
|
||||||
if vi != vj {
|
|
||||||
return vi < vj
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We should never arrive here. Multiple metrics with the same
|
|
||||||
// label set in the same scrape will lead to undefined ingestion
|
|
||||||
// behavior. However, as above, we have to provide stable sorting
|
|
||||||
// here, even for inconsistent metrics. So sort equal metrics
|
|
||||||
// by their timestamp, with missing timestamps (implying "now")
|
|
||||||
// coming last.
|
|
||||||
if s[i].TimestampMs == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if s[j].TimestampMs == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
|
|
||||||
}
|
|
||||||
|
|
||||||
// normalizeMetricFamilies returns a MetricFamily slice with empty
|
|
||||||
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
|
|
||||||
// the slice, with the contained Metrics sorted within each MetricFamily.
|
|
||||||
func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
|
|
||||||
for _, mf := range metricFamiliesByName {
|
|
||||||
sort.Sort(metricSorter(mf.Metric))
|
|
||||||
}
|
|
||||||
names := make([]string, 0, len(metricFamiliesByName))
|
|
||||||
for name, mf := range metricFamiliesByName {
|
|
||||||
if len(mf.Metric) > 0 {
|
|
||||||
names = append(names, name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.Strings(names)
|
|
||||||
result := make([]*dto.MetricFamily, 0, len(names))
|
|
||||||
for _, name := range names {
|
|
||||||
result = append(result, metricFamiliesByName[name])
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkMetricConsistency checks if the provided Metric is consistent with the
|
|
||||||
// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
|
|
||||||
// name. If the resulting hash is alread in the provided metricHashes, an error
|
|
||||||
// is returned. If not, it is added to metricHashes. The provided dimHashes maps
|
|
||||||
// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
|
|
||||||
// doesn't yet contain a hash for the provided MetricFamily, it is
|
|
||||||
// added. Otherwise, an error is returned if the existing dimHashes in not equal
|
|
||||||
// the calculated dimHash.
|
|
||||||
func checkMetricConsistency(
|
|
||||||
metricFamily *dto.MetricFamily,
|
|
||||||
dtoMetric *dto.Metric,
|
|
||||||
metricHashes map[uint64]struct{},
|
|
||||||
dimHashes map[string]uint64,
|
|
||||||
) error {
|
|
||||||
// Type consistency with metric family.
|
|
||||||
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
|
|
||||||
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
|
|
||||||
metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
|
|
||||||
metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
|
|
||||||
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"collected metric %s %s is not a %s",
|
|
||||||
metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Is the metric unique (i.e. no other metric with the same name and the same label values)?
|
|
||||||
h := hashNew()
|
|
||||||
h = hashAdd(h, metricFamily.GetName())
|
|
||||||
h = hashAddByte(h, separatorByte)
|
|
||||||
dh := hashNew()
|
|
||||||
// Make sure label pairs are sorted. We depend on it for the consistency
|
|
||||||
// check.
|
|
||||||
sort.Sort(LabelPairSorter(dtoMetric.Label))
|
|
||||||
for _, lp := range dtoMetric.Label {
|
|
||||||
h = hashAdd(h, lp.GetValue())
|
|
||||||
h = hashAddByte(h, separatorByte)
|
|
||||||
dh = hashAdd(dh, lp.GetName())
|
|
||||||
dh = hashAddByte(dh, separatorByte)
|
|
||||||
}
|
|
||||||
if _, exists := metricHashes[h]; exists {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"collected metric %s %s was collected before with the same name and label values",
|
|
||||||
metricFamily.GetName(), dtoMetric,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
if dimHash, ok := dimHashes[metricFamily.GetName()]; ok {
|
|
||||||
if dimHash != dh {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family",
|
|
||||||
metricFamily.GetName(), dtoMetric,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
dimHashes[metricFamily.GetName()] = dh
|
|
||||||
}
|
|
||||||
metricHashes[h] = struct{}{}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkDescConsistency(
|
|
||||||
metricFamily *dto.MetricFamily,
|
|
||||||
dtoMetric *dto.Metric,
|
|
||||||
desc *Desc,
|
|
||||||
) error {
|
|
||||||
// Desc help consistency with metric family help.
|
|
||||||
if metricFamily.GetHelp() != desc.help {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"collected metric %s %s has help %q but should have %q",
|
|
||||||
metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Is the desc consistent with the content of the metric?
|
|
||||||
lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
|
|
||||||
lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
|
|
||||||
for _, l := range desc.variableLabels {
|
|
||||||
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
|
|
||||||
Name: proto.String(l),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if len(lpsFromDesc) != len(dtoMetric.Label) {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"labels in collected metric %s %s are inconsistent with descriptor %s",
|
|
||||||
metricFamily.GetName(), dtoMetric, desc,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
sort.Sort(LabelPairSorter(lpsFromDesc))
|
|
||||||
for i, lpFromDesc := range lpsFromDesc {
|
|
||||||
lpFromMetric := dtoMetric.Label[i]
|
|
||||||
if lpFromDesc.GetName() != lpFromMetric.GetName() ||
|
|
||||||
lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"labels in collected metric %s %s are inconsistent with descriptor %s",
|
|
||||||
metricFamily.GetName(), dtoMetric, desc,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,546 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Copyright (c) 2013, The Prometheus Authors
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license that can be found
|
|
||||||
// in the LICENSE file.
|
|
||||||
|
|
||||||
package prometheus_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"github.com/prometheus/common/expfmt"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
func testHandler(t testing.TB) {
|
|
||||||
|
|
||||||
metricVec := prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "name",
|
|
||||||
Help: "docstring",
|
|
||||||
ConstLabels: prometheus.Labels{"constname": "constvalue"},
|
|
||||||
},
|
|
||||||
[]string{"labelname"},
|
|
||||||
)
|
|
||||||
|
|
||||||
metricVec.WithLabelValues("val1").Inc()
|
|
||||||
metricVec.WithLabelValues("val2").Inc()
|
|
||||||
|
|
||||||
externalMetricFamily := &dto.MetricFamily{
|
|
||||||
Name: proto.String("externalname"),
|
|
||||||
Help: proto.String("externaldocstring"),
|
|
||||||
Type: dto.MetricType_COUNTER.Enum(),
|
|
||||||
Metric: []*dto.Metric{
|
|
||||||
{
|
|
||||||
Label: []*dto.LabelPair{
|
|
||||||
{
|
|
||||||
Name: proto.String("externalconstname"),
|
|
||||||
Value: proto.String("externalconstvalue"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: proto.String("externallabelname"),
|
|
||||||
Value: proto.String("externalval1"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Counter: &dto.Counter{
|
|
||||||
Value: proto.Float64(1),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
externalBuf := &bytes.Buffer{}
|
|
||||||
enc := expfmt.NewEncoder(externalBuf, expfmt.FmtProtoDelim)
|
|
||||||
if err := enc.Encode(externalMetricFamily); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
externalMetricFamilyAsBytes := externalBuf.Bytes()
|
|
||||||
externalMetricFamilyAsText := []byte(`# HELP externalname externaldocstring
|
|
||||||
# TYPE externalname counter
|
|
||||||
externalname{externalconstname="externalconstvalue",externallabelname="externalval1"} 1
|
|
||||||
`)
|
|
||||||
externalMetricFamilyAsProtoText := []byte(`name: "externalname"
|
|
||||||
help: "externaldocstring"
|
|
||||||
type: COUNTER
|
|
||||||
metric: <
|
|
||||||
label: <
|
|
||||||
name: "externalconstname"
|
|
||||||
value: "externalconstvalue"
|
|
||||||
>
|
|
||||||
label: <
|
|
||||||
name: "externallabelname"
|
|
||||||
value: "externalval1"
|
|
||||||
>
|
|
||||||
counter: <
|
|
||||||
value: 1
|
|
||||||
>
|
|
||||||
>
|
|
||||||
|
|
||||||
`)
|
|
||||||
externalMetricFamilyAsProtoCompactText := []byte(`name:"externalname" help:"externaldocstring" type:COUNTER metric:<label:<name:"externalconstname" value:"externalconstvalue" > label:<name:"externallabelname" value:"externalval1" > counter:<value:1 > >
|
|
||||||
`)
|
|
||||||
|
|
||||||
expectedMetricFamily := &dto.MetricFamily{
|
|
||||||
Name: proto.String("name"),
|
|
||||||
Help: proto.String("docstring"),
|
|
||||||
Type: dto.MetricType_COUNTER.Enum(),
|
|
||||||
Metric: []*dto.Metric{
|
|
||||||
{
|
|
||||||
Label: []*dto.LabelPair{
|
|
||||||
{
|
|
||||||
Name: proto.String("constname"),
|
|
||||||
Value: proto.String("constvalue"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: proto.String("labelname"),
|
|
||||||
Value: proto.String("val1"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Counter: &dto.Counter{
|
|
||||||
Value: proto.Float64(1),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Label: []*dto.LabelPair{
|
|
||||||
{
|
|
||||||
Name: proto.String("constname"),
|
|
||||||
Value: proto.String("constvalue"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: proto.String("labelname"),
|
|
||||||
Value: proto.String("val2"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Counter: &dto.Counter{
|
|
||||||
Value: proto.Float64(1),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
enc = expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
|
|
||||||
if err := enc.Encode(expectedMetricFamily); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
expectedMetricFamilyAsBytes := buf.Bytes()
|
|
||||||
expectedMetricFamilyAsText := []byte(`# HELP name docstring
|
|
||||||
# TYPE name counter
|
|
||||||
name{constname="constvalue",labelname="val1"} 1
|
|
||||||
name{constname="constvalue",labelname="val2"} 1
|
|
||||||
`)
|
|
||||||
expectedMetricFamilyAsProtoText := []byte(`name: "name"
|
|
||||||
help: "docstring"
|
|
||||||
type: COUNTER
|
|
||||||
metric: <
|
|
||||||
label: <
|
|
||||||
name: "constname"
|
|
||||||
value: "constvalue"
|
|
||||||
>
|
|
||||||
label: <
|
|
||||||
name: "labelname"
|
|
||||||
value: "val1"
|
|
||||||
>
|
|
||||||
counter: <
|
|
||||||
value: 1
|
|
||||||
>
|
|
||||||
>
|
|
||||||
metric: <
|
|
||||||
label: <
|
|
||||||
name: "constname"
|
|
||||||
value: "constvalue"
|
|
||||||
>
|
|
||||||
label: <
|
|
||||||
name: "labelname"
|
|
||||||
value: "val2"
|
|
||||||
>
|
|
||||||
counter: <
|
|
||||||
value: 1
|
|
||||||
>
|
|
||||||
>
|
|
||||||
|
|
||||||
`)
|
|
||||||
expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > >
|
|
||||||
`)
|
|
||||||
|
|
||||||
externalMetricFamilyWithSameName := &dto.MetricFamily{
|
|
||||||
Name: proto.String("name"),
|
|
||||||
Help: proto.String("docstring"),
|
|
||||||
Type: dto.MetricType_COUNTER.Enum(),
|
|
||||||
Metric: []*dto.Metric{
|
|
||||||
{
|
|
||||||
Label: []*dto.LabelPair{
|
|
||||||
{
|
|
||||||
Name: proto.String("constname"),
|
|
||||||
Value: proto.String("constvalue"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: proto.String("labelname"),
|
|
||||||
Value: proto.String("different_val"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Counter: &dto.Counter{
|
|
||||||
Value: proto.Float64(42),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"different_val" > counter:<value:42 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > >
|
|
||||||
`)
|
|
||||||
|
|
||||||
type output struct {
|
|
||||||
headers map[string]string
|
|
||||||
body []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
var scenarios = []struct {
|
|
||||||
headers map[string]string
|
|
||||||
out output
|
|
||||||
collector prometheus.Collector
|
|
||||||
externalMF []*dto.MetricFamily
|
|
||||||
}{
|
|
||||||
{ // 0
|
|
||||||
headers: map[string]string{
|
|
||||||
"Accept": "foo/bar;q=0.2, dings/bums;q=0.8",
|
|
||||||
},
|
|
||||||
out: output{
|
|
||||||
headers: map[string]string{
|
|
||||||
"Content-Type": `text/plain; version=0.0.4`,
|
|
||||||
},
|
|
||||||
body: []byte{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{ // 1
|
|
||||||
headers: map[string]string{
|
|
||||||
"Accept": "foo/bar;q=0.2, application/quark;q=0.8",
|
|
||||||
},
|
|
||||||
out: output{
|
|
||||||
headers: map[string]string{
|
|
||||||
"Content-Type": `text/plain; version=0.0.4`,
|
|
||||||
},
|
|
||||||
body: []byte{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{ // 2
|
|
||||||
headers: map[string]string{
|
|
||||||
"Accept": "foo/bar;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.8",
|
|
||||||
},
|
|
||||||
out: output{
|
|
||||||
headers: map[string]string{
|
|
||||||
"Content-Type": `text/plain; version=0.0.4`,
|
|
||||||
},
|
|
||||||
body: []byte{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{ // 3
|
|
||||||
headers: map[string]string{
|
|
||||||
"Accept": "text/plain;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.8",
|
|
||||||
},
|
|
||||||
out: output{
|
|
||||||
headers: map[string]string{
|
|
||||||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
|
|
||||||
},
|
|
||||||
body: []byte{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{ // 4
|
|
||||||
headers: map[string]string{
|
|
||||||
"Accept": "application/json",
|
|
||||||
},
|
|
||||||
out: output{
|
|
||||||
headers: map[string]string{
|
|
||||||
"Content-Type": `text/plain; version=0.0.4`,
|
|
||||||
},
|
|
||||||
body: expectedMetricFamilyAsText,
|
|
||||||
},
|
|
||||||
collector: metricVec,
|
|
||||||
},
|
|
||||||
{ // 5
|
|
||||||
headers: map[string]string{
|
|
||||||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
|
|
||||||
},
|
|
||||||
out: output{
|
|
||||||
headers: map[string]string{
|
|
||||||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
|
|
||||||
},
|
|
||||||
body: expectedMetricFamilyAsBytes,
|
|
||||||
},
|
|
||||||
collector: metricVec,
|
|
||||||
},
|
|
||||||
{ // 6
|
|
||||||
headers: map[string]string{
|
|
||||||
"Accept": "application/json",
|
|
||||||
},
|
|
||||||
out: output{
|
|
||||||
headers: map[string]string{
|
|
||||||
"Content-Type": `text/plain; version=0.0.4`,
|
|
||||||
},
|
|
||||||
body: externalMetricFamilyAsText,
|
|
||||||
},
|
|
||||||
externalMF: []*dto.MetricFamily{externalMetricFamily},
|
|
||||||
},
|
|
||||||
{ // 7
|
|
||||||
headers: map[string]string{
|
|
||||||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
|
|
||||||
},
|
|
||||||
out: output{
|
|
||||||
headers: map[string]string{
|
|
||||||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
|
|
||||||
},
|
|
||||||
body: externalMetricFamilyAsBytes,
|
|
||||||
},
|
|
||||||
externalMF: []*dto.MetricFamily{externalMetricFamily},
|
|
||||||
},
|
|
||||||
{ // 8
|
|
||||||
headers: map[string]string{
|
|
||||||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
|
|
||||||
},
|
|
||||||
out: output{
|
|
||||||
headers: map[string]string{
|
|
||||||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
|
|
||||||
},
|
|
||||||
body: bytes.Join(
|
|
||||||
[][]byte{
|
|
||||||
externalMetricFamilyAsBytes,
|
|
||||||
expectedMetricFamilyAsBytes,
|
|
||||||
},
|
|
||||||
[]byte{},
|
|
||||||
),
|
|
||||||
},
|
|
||||||
collector: metricVec,
|
|
||||||
externalMF: []*dto.MetricFamily{externalMetricFamily},
|
|
||||||
},
|
|
||||||
{ // 9
|
|
||||||
headers: map[string]string{
|
|
||||||
"Accept": "text/plain",
|
|
||||||
},
|
|
||||||
out: output{
|
|
||||||
headers: map[string]string{
|
|
||||||
"Content-Type": `text/plain; version=0.0.4`,
|
|
||||||
},
|
|
||||||
body: []byte{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{ // 10
|
|
||||||
headers: map[string]string{
|
|
||||||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5",
|
|
||||||
},
|
|
||||||
out: output{
|
|
||||||
headers: map[string]string{
|
|
||||||
"Content-Type": `text/plain; version=0.0.4`,
|
|
||||||
},
|
|
||||||
body: expectedMetricFamilyAsText,
|
|
||||||
},
|
|
||||||
collector: metricVec,
|
|
||||||
},
|
|
||||||
{ // 11
|
|
||||||
headers: map[string]string{
|
|
||||||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5;version=0.0.4",
|
|
||||||
},
|
|
||||||
out: output{
|
|
||||||
headers: map[string]string{
|
|
||||||
"Content-Type": `text/plain; version=0.0.4`,
|
|
||||||
},
|
|
||||||
body: bytes.Join(
|
|
||||||
[][]byte{
|
|
||||||
externalMetricFamilyAsText,
|
|
||||||
expectedMetricFamilyAsText,
|
|
||||||
},
|
|
||||||
[]byte{},
|
|
||||||
),
|
|
||||||
},
|
|
||||||
collector: metricVec,
|
|
||||||
externalMF: []*dto.MetricFamily{externalMetricFamily},
|
|
||||||
},
|
|
||||||
{ // 12
|
|
||||||
headers: map[string]string{
|
|
||||||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.2, text/plain;q=0.5;version=0.0.2",
|
|
||||||
},
|
|
||||||
out: output{
|
|
||||||
headers: map[string]string{
|
|
||||||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
|
|
||||||
},
|
|
||||||
body: bytes.Join(
|
|
||||||
[][]byte{
|
|
||||||
externalMetricFamilyAsBytes,
|
|
||||||
expectedMetricFamilyAsBytes,
|
|
||||||
},
|
|
||||||
[]byte{},
|
|
||||||
),
|
|
||||||
},
|
|
||||||
collector: metricVec,
|
|
||||||
externalMF: []*dto.MetricFamily{externalMetricFamily},
|
|
||||||
},
|
|
||||||
{ // 13
|
|
||||||
headers: map[string]string{
|
|
||||||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=text;q=0.5, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.4",
|
|
||||||
},
|
|
||||||
out: output{
|
|
||||||
headers: map[string]string{
|
|
||||||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`,
|
|
||||||
},
|
|
||||||
body: bytes.Join(
|
|
||||||
[][]byte{
|
|
||||||
externalMetricFamilyAsProtoText,
|
|
||||||
expectedMetricFamilyAsProtoText,
|
|
||||||
},
|
|
||||||
[]byte{},
|
|
||||||
),
|
|
||||||
},
|
|
||||||
collector: metricVec,
|
|
||||||
externalMF: []*dto.MetricFamily{externalMetricFamily},
|
|
||||||
},
|
|
||||||
{ // 14
|
|
||||||
headers: map[string]string{
|
|
||||||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text",
|
|
||||||
},
|
|
||||||
out: output{
|
|
||||||
headers: map[string]string{
|
|
||||||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`,
|
|
||||||
},
|
|
||||||
body: bytes.Join(
|
|
||||||
[][]byte{
|
|
||||||
externalMetricFamilyAsProtoCompactText,
|
|
||||||
expectedMetricFamilyAsProtoCompactText,
|
|
||||||
},
|
|
||||||
[]byte{},
|
|
||||||
),
|
|
||||||
},
|
|
||||||
collector: metricVec,
|
|
||||||
externalMF: []*dto.MetricFamily{externalMetricFamily},
|
|
||||||
},
|
|
||||||
{ // 15
|
|
||||||
headers: map[string]string{
|
|
||||||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text",
|
|
||||||
},
|
|
||||||
out: output{
|
|
||||||
headers: map[string]string{
|
|
||||||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`,
|
|
||||||
},
|
|
||||||
body: bytes.Join(
|
|
||||||
[][]byte{
|
|
||||||
externalMetricFamilyAsProtoCompactText,
|
|
||||||
expectedMetricFamilyMergedWithExternalAsProtoCompactText,
|
|
||||||
},
|
|
||||||
[]byte{},
|
|
||||||
),
|
|
||||||
},
|
|
||||||
collector: metricVec,
|
|
||||||
externalMF: []*dto.MetricFamily{
|
|
||||||
externalMetricFamily,
|
|
||||||
externalMetricFamilyWithSameName,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for i, scenario := range scenarios {
|
|
||||||
registry := prometheus.NewPedanticRegistry()
|
|
||||||
gatherer := prometheus.Gatherer(registry)
|
|
||||||
if scenario.externalMF != nil {
|
|
||||||
gatherer = prometheus.Gatherers{
|
|
||||||
registry,
|
|
||||||
prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) {
|
|
||||||
return scenario.externalMF, nil
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if scenario.collector != nil {
|
|
||||||
registry.Register(scenario.collector)
|
|
||||||
}
|
|
||||||
writer := httptest.NewRecorder()
|
|
||||||
handler := prometheus.InstrumentHandler("prometheus", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{}))
|
|
||||||
request, _ := http.NewRequest("GET", "/", nil)
|
|
||||||
for key, value := range scenario.headers {
|
|
||||||
request.Header.Add(key, value)
|
|
||||||
}
|
|
||||||
handler(writer, request)
|
|
||||||
|
|
||||||
for key, value := range scenario.out.headers {
|
|
||||||
if writer.HeaderMap.Get(key) != value {
|
|
||||||
t.Errorf(
|
|
||||||
"%d. expected %q for header %q, got %q",
|
|
||||||
i, value, key, writer.Header().Get(key),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !bytes.Equal(scenario.out.body, writer.Body.Bytes()) {
|
|
||||||
t.Errorf(
|
|
||||||
"%d. expected body:\n%s\ngot body:\n%s\n",
|
|
||||||
i, scenario.out.body, writer.Body.Bytes(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHandler(t *testing.T) {
|
|
||||||
testHandler(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkHandler(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
testHandler(b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRegisterWithOrGet(t *testing.T) {
|
|
||||||
// Replace the default registerer just to be sure. This is bad, but this
|
|
||||||
// whole test will go away once RegisterOrGet is removed.
|
|
||||||
oldRegisterer := prometheus.DefaultRegisterer
|
|
||||||
defer func() {
|
|
||||||
prometheus.DefaultRegisterer = oldRegisterer
|
|
||||||
}()
|
|
||||||
prometheus.DefaultRegisterer = prometheus.NewRegistry()
|
|
||||||
original := prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "test",
|
|
||||||
Help: "help",
|
|
||||||
},
|
|
||||||
[]string{"foo", "bar"},
|
|
||||||
)
|
|
||||||
equalButNotSame := prometheus.NewCounterVec(
|
|
||||||
prometheus.CounterOpts{
|
|
||||||
Name: "test",
|
|
||||||
Help: "help",
|
|
||||||
},
|
|
||||||
[]string{"foo", "bar"},
|
|
||||||
)
|
|
||||||
var err error
|
|
||||||
if err = prometheus.Register(original); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if err = prometheus.Register(equalButNotSame); err == nil {
|
|
||||||
t.Fatal("expected error when registringe equal collector")
|
|
||||||
}
|
|
||||||
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
|
|
||||||
if are.ExistingCollector != original {
|
|
||||||
t.Error("expected original collector but got something else")
|
|
||||||
}
|
|
||||||
if are.ExistingCollector == equalButNotSame {
|
|
||||||
t.Error("expected original callector but got new one")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
t.Error("unexpected error:", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,543 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/beorn7/perks/quantile"
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
)
|
|
||||||
|
|
||||||
// quantileLabel is used for the label that defines the quantile in a
|
|
||||||
// summary.
|
|
||||||
const quantileLabel = "quantile"
|
|
||||||
|
|
||||||
// A Summary captures individual observations from an event or sample stream and
|
|
||||||
// summarizes them in a manner similar to traditional summary statistics: 1. sum
|
|
||||||
// of observations, 2. observation count, 3. rank estimations.
|
|
||||||
//
|
|
||||||
// A typical use-case is the observation of request latencies. By default, a
|
|
||||||
// Summary provides the median, the 90th and the 99th percentile of the latency
|
|
||||||
// as rank estimations.
|
|
||||||
//
|
|
||||||
// Note that the rank estimations cannot be aggregated in a meaningful way with
|
|
||||||
// the Prometheus query language (i.e. you cannot average or add them). If you
|
|
||||||
// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
|
|
||||||
// queries served across all instances of a service), consider the Histogram
|
|
||||||
// metric type. See the Prometheus documentation for more details.
|
|
||||||
//
|
|
||||||
// To create Summary instances, use NewSummary.
|
|
||||||
type Summary interface {
|
|
||||||
Metric
|
|
||||||
Collector
|
|
||||||
|
|
||||||
// Observe adds a single observation to the summary.
|
|
||||||
Observe(float64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefObjectives are the default Summary quantile values.
|
|
||||||
//
|
|
||||||
// Deprecated: DefObjectives will not be used as the default objectives in
|
|
||||||
// v0.10 of the library. The default Summary will have no quantiles then.
|
|
||||||
var (
|
|
||||||
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
|
|
||||||
|
|
||||||
errQuantileLabelNotAllowed = fmt.Errorf(
|
|
||||||
"%q is not allowed as label name in summaries", quantileLabel,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Default values for SummaryOpts.
|
|
||||||
const (
|
|
||||||
// DefMaxAge is the default duration for which observations stay
|
|
||||||
// relevant.
|
|
||||||
DefMaxAge time.Duration = 10 * time.Minute
|
|
||||||
// DefAgeBuckets is the default number of buckets used to calculate the
|
|
||||||
// age of observations.
|
|
||||||
DefAgeBuckets = 5
|
|
||||||
// DefBufCap is the standard buffer size for collecting Summary observations.
|
|
||||||
DefBufCap = 500
|
|
||||||
)
|
|
||||||
|
|
||||||
// SummaryOpts bundles the options for creating a Summary metric. It is
|
|
||||||
// mandatory to set Name and Help to a non-empty string. All other fields are
|
|
||||||
// optional and can safely be left at their zero value.
|
|
||||||
type SummaryOpts struct {
|
|
||||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
|
||||||
// name of the Summary (created by joining these components with
|
|
||||||
// "_"). Only Name is mandatory, the others merely help structuring the
|
|
||||||
// name. Note that the fully-qualified name of the Summary must be a
|
|
||||||
// valid Prometheus metric name.
|
|
||||||
Namespace string
|
|
||||||
Subsystem string
|
|
||||||
Name string
|
|
||||||
|
|
||||||
// Help provides information about this Summary. Mandatory!
|
|
||||||
//
|
|
||||||
// Metrics with the same fully-qualified name must have the same Help
|
|
||||||
// string.
|
|
||||||
Help string
|
|
||||||
|
|
||||||
// ConstLabels are used to attach fixed labels to this
|
|
||||||
// Summary. Summaries with the same fully-qualified name must have the
|
|
||||||
// same label names in their ConstLabels.
|
|
||||||
//
|
|
||||||
// Note that in most cases, labels have a value that varies during the
|
|
||||||
// lifetime of a process. Those labels are usually managed with a
|
|
||||||
// SummaryVec. ConstLabels serve only special purposes. One is for the
|
|
||||||
// special case where the value of a label does not change during the
|
|
||||||
// lifetime of a process, e.g. if the revision of the running binary is
|
|
||||||
// put into a label. Another, more advanced purpose is if more than one
|
|
||||||
// Collector needs to collect Summaries with the same fully-qualified
|
|
||||||
// name. In that case, those Summaries must differ in the values of
|
|
||||||
// their ConstLabels. See the Collector examples.
|
|
||||||
//
|
|
||||||
// If the value of a label never changes (not even between binaries),
|
|
||||||
// that label most likely should not be a label at all (but part of the
|
|
||||||
// metric name).
|
|
||||||
ConstLabels Labels
|
|
||||||
|
|
||||||
// Objectives defines the quantile rank estimates with their respective
|
|
||||||
// absolute error. If Objectives[q] = e, then the value reported for q
|
|
||||||
// will be the φ-quantile value for some φ between q-e and q+e. The
|
|
||||||
// default value is DefObjectives. It is used if Objectives is left at
|
|
||||||
// its zero value (i.e. nil). To create a Summary without Objectives,
|
|
||||||
// set it to an empty map (i.e. map[float64]float64{}).
|
|
||||||
//
|
|
||||||
// Deprecated: Note that the current value of DefObjectives is
|
|
||||||
// deprecated. It will be replaced by an empty map in v0.10 of the
|
|
||||||
// library. Please explicitly set Objectives to the desired value.
|
|
||||||
Objectives map[float64]float64
|
|
||||||
|
|
||||||
// MaxAge defines the duration for which an observation stays relevant
|
|
||||||
// for the summary. Must be positive. The default value is DefMaxAge.
|
|
||||||
MaxAge time.Duration
|
|
||||||
|
|
||||||
// AgeBuckets is the number of buckets used to exclude observations that
|
|
||||||
// are older than MaxAge from the summary. A higher number has a
|
|
||||||
// resource penalty, so only increase it if the higher resolution is
|
|
||||||
// really required. For very high observation rates, you might want to
|
|
||||||
// reduce the number of age buckets. With only one age bucket, you will
|
|
||||||
// effectively see a complete reset of the summary each time MaxAge has
|
|
||||||
// passed. The default value is DefAgeBuckets.
|
|
||||||
AgeBuckets uint32
|
|
||||||
|
|
||||||
// BufCap defines the default sample stream buffer size. The default
|
|
||||||
// value of DefBufCap should suffice for most uses. If there is a need
|
|
||||||
// to increase the value, a multiple of 500 is recommended (because that
|
|
||||||
// is the internal buffer size of the underlying package
|
|
||||||
// "github.com/bmizerany/perks/quantile").
|
|
||||||
BufCap uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// Great fuck-up with the sliding-window decay algorithm... The Merge method of
|
|
||||||
// perk/quantile is actually not working as advertised - and it might be
|
|
||||||
// unfixable, as the underlying algorithm is apparently not capable of merging
|
|
||||||
// summaries in the first place. To avoid using Merge, we are currently adding
|
|
||||||
// observations to _each_ age bucket, i.e. the effort to add a sample is
|
|
||||||
// essentially multiplied by the number of age buckets. When rotating age
|
|
||||||
// buckets, we empty the previous head stream. On scrape time, we simply take
|
|
||||||
// the quantiles from the head stream (no merging required). Result: More effort
|
|
||||||
// on observation time, less effort on scrape time, which is exactly the
|
|
||||||
// opposite of what we try to accomplish, but at least the results are correct.
|
|
||||||
//
|
|
||||||
// The quite elegant previous contraption to merge the age buckets efficiently
|
|
||||||
// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
|
|
||||||
// can't be used anymore.
|
|
||||||
|
|
||||||
// NewSummary creates a new Summary based on the provided SummaryOpts.
|
|
||||||
func NewSummary(opts SummaryOpts) Summary {
|
|
||||||
return newSummary(
|
|
||||||
NewDesc(
|
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
||||||
opts.Help,
|
|
||||||
nil,
|
|
||||||
opts.ConstLabels,
|
|
||||||
),
|
|
||||||
opts,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
|
||||||
panic(errInconsistentCardinality)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, n := range desc.variableLabels {
|
|
||||||
if n == quantileLabel {
|
|
||||||
panic(errQuantileLabelNotAllowed)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, lp := range desc.constLabelPairs {
|
|
||||||
if lp.GetName() == quantileLabel {
|
|
||||||
panic(errQuantileLabelNotAllowed)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.Objectives == nil {
|
|
||||||
opts.Objectives = DefObjectives
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.MaxAge < 0 {
|
|
||||||
panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
|
|
||||||
}
|
|
||||||
if opts.MaxAge == 0 {
|
|
||||||
opts.MaxAge = DefMaxAge
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.AgeBuckets == 0 {
|
|
||||||
opts.AgeBuckets = DefAgeBuckets
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.BufCap == 0 {
|
|
||||||
opts.BufCap = DefBufCap
|
|
||||||
}
|
|
||||||
|
|
||||||
s := &summary{
|
|
||||||
desc: desc,
|
|
||||||
|
|
||||||
objectives: opts.Objectives,
|
|
||||||
sortedObjectives: make([]float64, 0, len(opts.Objectives)),
|
|
||||||
|
|
||||||
labelPairs: makeLabelPairs(desc, labelValues),
|
|
||||||
|
|
||||||
hotBuf: make([]float64, 0, opts.BufCap),
|
|
||||||
coldBuf: make([]float64, 0, opts.BufCap),
|
|
||||||
streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
|
|
||||||
}
|
|
||||||
s.headStreamExpTime = time.Now().Add(s.streamDuration)
|
|
||||||
s.hotBufExpTime = s.headStreamExpTime
|
|
||||||
|
|
||||||
for i := uint32(0); i < opts.AgeBuckets; i++ {
|
|
||||||
s.streams = append(s.streams, s.newStream())
|
|
||||||
}
|
|
||||||
s.headStream = s.streams[0]
|
|
||||||
|
|
||||||
for qu := range s.objectives {
|
|
||||||
s.sortedObjectives = append(s.sortedObjectives, qu)
|
|
||||||
}
|
|
||||||
sort.Float64s(s.sortedObjectives)
|
|
||||||
|
|
||||||
s.init(s) // Init self-collection.
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
type summary struct {
|
|
||||||
selfCollector
|
|
||||||
|
|
||||||
bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
|
|
||||||
mtx sync.Mutex // Protects every other moving part.
|
|
||||||
// Lock bufMtx before mtx if both are needed.
|
|
||||||
|
|
||||||
desc *Desc
|
|
||||||
|
|
||||||
objectives map[float64]float64
|
|
||||||
sortedObjectives []float64
|
|
||||||
|
|
||||||
labelPairs []*dto.LabelPair
|
|
||||||
|
|
||||||
sum float64
|
|
||||||
cnt uint64
|
|
||||||
|
|
||||||
hotBuf, coldBuf []float64
|
|
||||||
|
|
||||||
streams []*quantile.Stream
|
|
||||||
streamDuration time.Duration
|
|
||||||
headStream *quantile.Stream
|
|
||||||
headStreamIdx int
|
|
||||||
headStreamExpTime, hotBufExpTime time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *summary) Desc() *Desc {
|
|
||||||
return s.desc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *summary) Observe(v float64) {
|
|
||||||
s.bufMtx.Lock()
|
|
||||||
defer s.bufMtx.Unlock()
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
if now.After(s.hotBufExpTime) {
|
|
||||||
s.asyncFlush(now)
|
|
||||||
}
|
|
||||||
s.hotBuf = append(s.hotBuf, v)
|
|
||||||
if len(s.hotBuf) == cap(s.hotBuf) {
|
|
||||||
s.asyncFlush(now)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *summary) Write(out *dto.Metric) error {
|
|
||||||
sum := &dto.Summary{}
|
|
||||||
qs := make([]*dto.Quantile, 0, len(s.objectives))
|
|
||||||
|
|
||||||
s.bufMtx.Lock()
|
|
||||||
s.mtx.Lock()
|
|
||||||
// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
|
|
||||||
s.swapBufs(time.Now())
|
|
||||||
s.bufMtx.Unlock()
|
|
||||||
|
|
||||||
s.flushColdBuf()
|
|
||||||
sum.SampleCount = proto.Uint64(s.cnt)
|
|
||||||
sum.SampleSum = proto.Float64(s.sum)
|
|
||||||
|
|
||||||
for _, rank := range s.sortedObjectives {
|
|
||||||
var q float64
|
|
||||||
if s.headStream.Count() == 0 {
|
|
||||||
q = math.NaN()
|
|
||||||
} else {
|
|
||||||
q = s.headStream.Query(rank)
|
|
||||||
}
|
|
||||||
qs = append(qs, &dto.Quantile{
|
|
||||||
Quantile: proto.Float64(rank),
|
|
||||||
Value: proto.Float64(q),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
s.mtx.Unlock()
|
|
||||||
|
|
||||||
if len(qs) > 0 {
|
|
||||||
sort.Sort(quantSort(qs))
|
|
||||||
}
|
|
||||||
sum.Quantile = qs
|
|
||||||
|
|
||||||
out.Summary = sum
|
|
||||||
out.Label = s.labelPairs
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *summary) newStream() *quantile.Stream {
|
|
||||||
return quantile.NewTargeted(s.objectives)
|
|
||||||
}
|
|
||||||
|
|
||||||
// asyncFlush needs bufMtx locked.
|
|
||||||
func (s *summary) asyncFlush(now time.Time) {
|
|
||||||
s.mtx.Lock()
|
|
||||||
s.swapBufs(now)
|
|
||||||
|
|
||||||
// Unblock the original goroutine that was responsible for the mutation
|
|
||||||
// that triggered the compaction. But hold onto the global non-buffer
|
|
||||||
// state mutex until the operation finishes.
|
|
||||||
go func() {
|
|
||||||
s.flushColdBuf()
|
|
||||||
s.mtx.Unlock()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// rotateStreams needs mtx AND bufMtx locked.
|
|
||||||
func (s *summary) maybeRotateStreams() {
|
|
||||||
for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
|
|
||||||
s.headStream.Reset()
|
|
||||||
s.headStreamIdx++
|
|
||||||
if s.headStreamIdx >= len(s.streams) {
|
|
||||||
s.headStreamIdx = 0
|
|
||||||
}
|
|
||||||
s.headStream = s.streams[s.headStreamIdx]
|
|
||||||
s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// flushColdBuf needs mtx locked.
|
|
||||||
func (s *summary) flushColdBuf() {
|
|
||||||
for _, v := range s.coldBuf {
|
|
||||||
for _, stream := range s.streams {
|
|
||||||
stream.Insert(v)
|
|
||||||
}
|
|
||||||
s.cnt++
|
|
||||||
s.sum += v
|
|
||||||
}
|
|
||||||
s.coldBuf = s.coldBuf[0:0]
|
|
||||||
s.maybeRotateStreams()
|
|
||||||
}
|
|
||||||
|
|
||||||
// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
|
|
||||||
func (s *summary) swapBufs(now time.Time) {
|
|
||||||
if len(s.coldBuf) != 0 {
|
|
||||||
panic("coldBuf is not empty")
|
|
||||||
}
|
|
||||||
s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
|
|
||||||
// hotBuf is now empty and gets new expiration set.
|
|
||||||
for now.After(s.hotBufExpTime) {
|
|
||||||
s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type quantSort []*dto.Quantile
|
|
||||||
|
|
||||||
func (s quantSort) Len() int {
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s quantSort) Swap(i, j int) {
|
|
||||||
s[i], s[j] = s[j], s[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s quantSort) Less(i, j int) bool {
|
|
||||||
return s[i].GetQuantile() < s[j].GetQuantile()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SummaryVec is a Collector that bundles a set of Summaries that all share the
|
|
||||||
// same Desc, but have different values for their variable labels. This is used
|
|
||||||
// if you want to count the same thing partitioned by various dimensions
|
|
||||||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
|
||||||
// instances with NewSummaryVec.
|
|
||||||
type SummaryVec struct {
|
|
||||||
*MetricVec
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
|
|
||||||
// partitioned by the given label names. At least one label name must be
|
|
||||||
// provided.
|
|
||||||
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
|
||||||
desc := NewDesc(
|
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
||||||
opts.Help,
|
|
||||||
labelNames,
|
|
||||||
opts.ConstLabels,
|
|
||||||
)
|
|
||||||
return &SummaryVec{
|
|
||||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
|
||||||
return newSummary(desc, opts, lvs...)
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetricWithLabelValues replaces the method of the same name in
|
|
||||||
// MetricVec. The difference is that this method returns a Summary and not a
|
|
||||||
// Metric so that no type conversion is required.
|
|
||||||
func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
|
|
||||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
|
||||||
if metric != nil {
|
|
||||||
return metric.(Summary), err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
|
||||||
// difference is that this method returns a Summary and not a Metric so that no
|
|
||||||
// type conversion is required.
|
|
||||||
func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
|
|
||||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
|
||||||
if metric != nil {
|
|
||||||
return metric.(Summary), err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
|
||||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
|
||||||
// error, WithLabelValues allows shortcuts like
|
|
||||||
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
|
||||||
func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
|
|
||||||
return m.MetricVec.WithLabelValues(lvs...).(Summary)
|
|
||||||
}
|
|
||||||
|
|
||||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
|
||||||
// returned an error. By not returning an error, With allows shortcuts like
|
|
||||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
|
||||||
func (m *SummaryVec) With(labels Labels) Summary {
|
|
||||||
return m.MetricVec.With(labels).(Summary)
|
|
||||||
}
|
|
||||||
|
|
||||||
type constSummary struct {
|
|
||||||
desc *Desc
|
|
||||||
count uint64
|
|
||||||
sum float64
|
|
||||||
quantiles map[float64]float64
|
|
||||||
labelPairs []*dto.LabelPair
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *constSummary) Desc() *Desc {
|
|
||||||
return s.desc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *constSummary) Write(out *dto.Metric) error {
|
|
||||||
sum := &dto.Summary{}
|
|
||||||
qs := make([]*dto.Quantile, 0, len(s.quantiles))
|
|
||||||
|
|
||||||
sum.SampleCount = proto.Uint64(s.count)
|
|
||||||
sum.SampleSum = proto.Float64(s.sum)
|
|
||||||
|
|
||||||
for rank, q := range s.quantiles {
|
|
||||||
qs = append(qs, &dto.Quantile{
|
|
||||||
Quantile: proto.Float64(rank),
|
|
||||||
Value: proto.Float64(q),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(qs) > 0 {
|
|
||||||
sort.Sort(quantSort(qs))
|
|
||||||
}
|
|
||||||
sum.Quantile = qs
|
|
||||||
|
|
||||||
out.Summary = sum
|
|
||||||
out.Label = s.labelPairs
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConstSummary returns a metric representing a Prometheus summary with fixed
|
|
||||||
// values for the count, sum, and quantiles. As those parameters cannot be
|
|
||||||
// changed, the returned value does not implement the Summary interface (but
|
|
||||||
// only the Metric interface). Users of this package will not have much use for
|
|
||||||
// it in regular operations. However, when implementing custom Collectors, it is
|
|
||||||
// useful as a throw-away metric that is generated on the fly to send it to
|
|
||||||
// Prometheus in the Collect method.
|
|
||||||
//
|
|
||||||
// quantiles maps ranks to quantile values. For example, a median latency of
|
|
||||||
// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
|
|
||||||
// map[float64]float64{0.5: 0.23, 0.99: 0.56}
|
|
||||||
//
|
|
||||||
// NewConstSummary returns an error if the length of labelValues is not
|
|
||||||
// consistent with the variable labels in Desc.
|
|
||||||
func NewConstSummary(
|
|
||||||
desc *Desc,
|
|
||||||
count uint64,
|
|
||||||
sum float64,
|
|
||||||
quantiles map[float64]float64,
|
|
||||||
labelValues ...string,
|
|
||||||
) (Metric, error) {
|
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
|
||||||
return nil, errInconsistentCardinality
|
|
||||||
}
|
|
||||||
return &constSummary{
|
|
||||||
desc: desc,
|
|
||||||
count: count,
|
|
||||||
sum: sum,
|
|
||||||
quantiles: quantiles,
|
|
||||||
labelPairs: makeLabelPairs(desc, labelValues),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustNewConstSummary is a version of NewConstSummary that panics where
|
|
||||||
// NewConstMetric would have returned an error.
|
|
||||||
func MustNewConstSummary(
|
|
||||||
desc *Desc,
|
|
||||||
count uint64,
|
|
||||||
sum float64,
|
|
||||||
quantiles map[float64]float64,
|
|
||||||
labelValues ...string,
|
|
||||||
) Metric {
|
|
||||||
m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
@ -1,388 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"math/rand"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
"testing/quick"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestSummaryWithDefaultObjectives(t *testing.T) {
|
|
||||||
reg := NewRegistry()
|
|
||||||
summaryWithDefaultObjectives := NewSummary(SummaryOpts{
|
|
||||||
Name: "default_objectives",
|
|
||||||
Help: "Test help.",
|
|
||||||
})
|
|
||||||
if err := reg.Register(summaryWithDefaultObjectives); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &dto.Metric{}
|
|
||||||
if err := summaryWithDefaultObjectives.Write(m); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
if len(m.GetSummary().Quantile) != len(DefObjectives) {
|
|
||||||
t.Error("expected default objectives in summary")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSummaryWithoutObjectives(t *testing.T) {
|
|
||||||
reg := NewRegistry()
|
|
||||||
summaryWithEmptyObjectives := NewSummary(SummaryOpts{
|
|
||||||
Name: "empty_objectives",
|
|
||||||
Help: "Test help.",
|
|
||||||
Objectives: map[float64]float64{},
|
|
||||||
})
|
|
||||||
if err := reg.Register(summaryWithEmptyObjectives); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
m := &dto.Metric{}
|
|
||||||
if err := summaryWithEmptyObjectives.Write(m); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
if len(m.GetSummary().Quantile) != 0 {
|
|
||||||
t.Error("expected no objectives in summary")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchmarkSummaryObserve(w int, b *testing.B) {
|
|
||||||
b.StopTimer()
|
|
||||||
|
|
||||||
wg := new(sync.WaitGroup)
|
|
||||||
wg.Add(w)
|
|
||||||
|
|
||||||
g := new(sync.WaitGroup)
|
|
||||||
g.Add(1)
|
|
||||||
|
|
||||||
s := NewSummary(SummaryOpts{})
|
|
||||||
|
|
||||||
for i := 0; i < w; i++ {
|
|
||||||
go func() {
|
|
||||||
g.Wait()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
s.Observe(float64(i))
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
b.StartTimer()
|
|
||||||
g.Done()
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSummaryObserve1(b *testing.B) {
|
|
||||||
benchmarkSummaryObserve(1, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSummaryObserve2(b *testing.B) {
|
|
||||||
benchmarkSummaryObserve(2, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSummaryObserve4(b *testing.B) {
|
|
||||||
benchmarkSummaryObserve(4, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSummaryObserve8(b *testing.B) {
|
|
||||||
benchmarkSummaryObserve(8, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchmarkSummaryWrite(w int, b *testing.B) {
|
|
||||||
b.StopTimer()
|
|
||||||
|
|
||||||
wg := new(sync.WaitGroup)
|
|
||||||
wg.Add(w)
|
|
||||||
|
|
||||||
g := new(sync.WaitGroup)
|
|
||||||
g.Add(1)
|
|
||||||
|
|
||||||
s := NewSummary(SummaryOpts{})
|
|
||||||
|
|
||||||
for i := 0; i < 1000000; i++ {
|
|
||||||
s.Observe(float64(i))
|
|
||||||
}
|
|
||||||
|
|
||||||
for j := 0; j < w; j++ {
|
|
||||||
outs := make([]dto.Metric, b.N)
|
|
||||||
|
|
||||||
go func(o []dto.Metric) {
|
|
||||||
g.Wait()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
s.Write(&o[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Done()
|
|
||||||
}(outs)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.StartTimer()
|
|
||||||
g.Done()
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSummaryWrite1(b *testing.B) {
|
|
||||||
benchmarkSummaryWrite(1, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSummaryWrite2(b *testing.B) {
|
|
||||||
benchmarkSummaryWrite(2, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSummaryWrite4(b *testing.B) {
|
|
||||||
benchmarkSummaryWrite(4, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSummaryWrite8(b *testing.B) {
|
|
||||||
benchmarkSummaryWrite(8, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSummaryConcurrency(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Skipping test in short mode.")
|
|
||||||
}
|
|
||||||
|
|
||||||
rand.Seed(42)
|
|
||||||
|
|
||||||
it := func(n uint32) bool {
|
|
||||||
mutations := int(n%1e4 + 1e4)
|
|
||||||
concLevel := int(n%5 + 1)
|
|
||||||
total := mutations * concLevel
|
|
||||||
|
|
||||||
var start, end sync.WaitGroup
|
|
||||||
start.Add(1)
|
|
||||||
end.Add(concLevel)
|
|
||||||
|
|
||||||
sum := NewSummary(SummaryOpts{
|
|
||||||
Name: "test_summary",
|
|
||||||
Help: "helpless",
|
|
||||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
|
||||||
})
|
|
||||||
|
|
||||||
allVars := make([]float64, total)
|
|
||||||
var sampleSum float64
|
|
||||||
for i := 0; i < concLevel; i++ {
|
|
||||||
vals := make([]float64, mutations)
|
|
||||||
for j := 0; j < mutations; j++ {
|
|
||||||
v := rand.NormFloat64()
|
|
||||||
vals[j] = v
|
|
||||||
allVars[i*mutations+j] = v
|
|
||||||
sampleSum += v
|
|
||||||
}
|
|
||||||
|
|
||||||
go func(vals []float64) {
|
|
||||||
start.Wait()
|
|
||||||
for _, v := range vals {
|
|
||||||
sum.Observe(v)
|
|
||||||
}
|
|
||||||
end.Done()
|
|
||||||
}(vals)
|
|
||||||
}
|
|
||||||
sort.Float64s(allVars)
|
|
||||||
start.Done()
|
|
||||||
end.Wait()
|
|
||||||
|
|
||||||
m := &dto.Metric{}
|
|
||||||
sum.Write(m)
|
|
||||||
if got, want := int(*m.Summary.SampleCount), total; got != want {
|
|
||||||
t.Errorf("got sample count %d, want %d", got, want)
|
|
||||||
}
|
|
||||||
if got, want := *m.Summary.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 {
|
|
||||||
t.Errorf("got sample sum %f, want %f", got, want)
|
|
||||||
}
|
|
||||||
|
|
||||||
objectives := make([]float64, 0, len(DefObjectives))
|
|
||||||
for qu := range DefObjectives {
|
|
||||||
objectives = append(objectives, qu)
|
|
||||||
}
|
|
||||||
sort.Float64s(objectives)
|
|
||||||
|
|
||||||
for i, wantQ := range objectives {
|
|
||||||
ε := DefObjectives[wantQ]
|
|
||||||
gotQ := *m.Summary.Quantile[i].Quantile
|
|
||||||
gotV := *m.Summary.Quantile[i].Value
|
|
||||||
min, max := getBounds(allVars, wantQ, ε)
|
|
||||||
if gotQ != wantQ {
|
|
||||||
t.Errorf("got quantile %f, want %f", gotQ, wantQ)
|
|
||||||
}
|
|
||||||
if gotV < min || gotV > max {
|
|
||||||
t.Errorf("got %f for quantile %f, want [%f,%f]", gotV, gotQ, min, max)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := quick.Check(it, nil); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSummaryVecConcurrency(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Skipping test in short mode.")
|
|
||||||
}
|
|
||||||
|
|
||||||
rand.Seed(42)
|
|
||||||
|
|
||||||
objectives := make([]float64, 0, len(DefObjectives))
|
|
||||||
for qu := range DefObjectives {
|
|
||||||
|
|
||||||
objectives = append(objectives, qu)
|
|
||||||
}
|
|
||||||
sort.Float64s(objectives)
|
|
||||||
|
|
||||||
it := func(n uint32) bool {
|
|
||||||
mutations := int(n%1e4 + 1e4)
|
|
||||||
concLevel := int(n%7 + 1)
|
|
||||||
vecLength := int(n%3 + 1)
|
|
||||||
|
|
||||||
var start, end sync.WaitGroup
|
|
||||||
start.Add(1)
|
|
||||||
end.Add(concLevel)
|
|
||||||
|
|
||||||
sum := NewSummaryVec(
|
|
||||||
SummaryOpts{
|
|
||||||
Name: "test_summary",
|
|
||||||
Help: "helpless",
|
|
||||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
|
||||||
},
|
|
||||||
[]string{"label"},
|
|
||||||
)
|
|
||||||
|
|
||||||
allVars := make([][]float64, vecLength)
|
|
||||||
sampleSums := make([]float64, vecLength)
|
|
||||||
for i := 0; i < concLevel; i++ {
|
|
||||||
vals := make([]float64, mutations)
|
|
||||||
picks := make([]int, mutations)
|
|
||||||
for j := 0; j < mutations; j++ {
|
|
||||||
v := rand.NormFloat64()
|
|
||||||
vals[j] = v
|
|
||||||
pick := rand.Intn(vecLength)
|
|
||||||
picks[j] = pick
|
|
||||||
allVars[pick] = append(allVars[pick], v)
|
|
||||||
sampleSums[pick] += v
|
|
||||||
}
|
|
||||||
|
|
||||||
go func(vals []float64) {
|
|
||||||
start.Wait()
|
|
||||||
for i, v := range vals {
|
|
||||||
sum.WithLabelValues(string('A' + picks[i])).Observe(v)
|
|
||||||
}
|
|
||||||
end.Done()
|
|
||||||
}(vals)
|
|
||||||
}
|
|
||||||
for _, vars := range allVars {
|
|
||||||
sort.Float64s(vars)
|
|
||||||
}
|
|
||||||
start.Done()
|
|
||||||
end.Wait()
|
|
||||||
|
|
||||||
for i := 0; i < vecLength; i++ {
|
|
||||||
m := &dto.Metric{}
|
|
||||||
s := sum.WithLabelValues(string('A' + i))
|
|
||||||
s.Write(m)
|
|
||||||
if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want {
|
|
||||||
t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want)
|
|
||||||
}
|
|
||||||
if got, want := *m.Summary.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 {
|
|
||||||
t.Errorf("got sample sum %f for label %c, want %f", got, 'A'+i, want)
|
|
||||||
}
|
|
||||||
for j, wantQ := range objectives {
|
|
||||||
ε := DefObjectives[wantQ]
|
|
||||||
gotQ := *m.Summary.Quantile[j].Quantile
|
|
||||||
gotV := *m.Summary.Quantile[j].Value
|
|
||||||
min, max := getBounds(allVars[i], wantQ, ε)
|
|
||||||
if gotQ != wantQ {
|
|
||||||
t.Errorf("got quantile %f for label %c, want %f", gotQ, 'A'+i, wantQ)
|
|
||||||
}
|
|
||||||
if gotV < min || gotV > max {
|
|
||||||
t.Errorf("got %f for quantile %f for label %c, want [%f,%f]", gotV, gotQ, 'A'+i, min, max)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := quick.Check(it, nil); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSummaryDecay(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Skipping test in short mode.")
|
|
||||||
// More because it depends on timing than because it is particularly long...
|
|
||||||
}
|
|
||||||
|
|
||||||
sum := NewSummary(SummaryOpts{
|
|
||||||
Name: "test_summary",
|
|
||||||
Help: "helpless",
|
|
||||||
MaxAge: 100 * time.Millisecond,
|
|
||||||
Objectives: map[float64]float64{0.1: 0.001},
|
|
||||||
AgeBuckets: 10,
|
|
||||||
})
|
|
||||||
|
|
||||||
m := &dto.Metric{}
|
|
||||||
i := 0
|
|
||||||
tick := time.NewTicker(time.Millisecond)
|
|
||||||
for range tick.C {
|
|
||||||
i++
|
|
||||||
sum.Observe(float64(i))
|
|
||||||
if i%10 == 0 {
|
|
||||||
sum.Write(m)
|
|
||||||
if got, want := *m.Summary.Quantile[0].Value, math.Max(float64(i)/10, float64(i-90)); math.Abs(got-want) > 20 {
|
|
||||||
t.Errorf("%d. got %f, want %f", i, got, want)
|
|
||||||
}
|
|
||||||
m.Reset()
|
|
||||||
}
|
|
||||||
if i >= 1000 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tick.Stop()
|
|
||||||
// Wait for MaxAge without observations and make sure quantiles are NaN.
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
sum.Write(m)
|
|
||||||
if got := *m.Summary.Quantile[0].Value; !math.IsNaN(got) {
|
|
||||||
t.Errorf("got %f, want NaN after expiration", got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getBounds(vars []float64, q, ε float64) (min, max float64) {
|
|
||||||
// TODO(beorn7): This currently tolerates an error of up to 2*ε. The
|
|
||||||
// error must be at most ε, but for some reason, it's sometimes slightly
|
|
||||||
// higher. That's a bug.
|
|
||||||
n := float64(len(vars))
|
|
||||||
lower := int((q - 2*ε) * n)
|
|
||||||
upper := int(math.Ceil((q + 2*ε) * n))
|
|
||||||
min = vars[0]
|
|
||||||
if lower > 1 {
|
|
||||||
min = vars[lower-1]
|
|
||||||
}
|
|
||||||
max = vars[len(vars)-1]
|
|
||||||
if upper < len(vars) {
|
|
||||||
max = vars[upper-1]
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
@ -1,74 +0,0 @@
|
||||||
// Copyright 2016 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// Observer is the interface that wraps the Observe method, which is used by
|
|
||||||
// Histogram and Summary to add observations.
|
|
||||||
type Observer interface {
|
|
||||||
Observe(float64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The ObserverFunc type is an adapter to allow the use of ordinary
|
|
||||||
// functions as Observers. If f is a function with the appropriate
|
|
||||||
// signature, ObserverFunc(f) is an Observer that calls f.
|
|
||||||
//
|
|
||||||
// This adapter is usually used in connection with the Timer type, and there are
|
|
||||||
// two general use cases:
|
|
||||||
//
|
|
||||||
// The most common one is to use a Gauge as the Observer for a Timer.
|
|
||||||
// See the "Gauge" Timer example.
|
|
||||||
//
|
|
||||||
// The more advanced use case is to create a function that dynamically decides
|
|
||||||
// which Observer to use for observing the duration. See the "Complex" Timer
|
|
||||||
// example.
|
|
||||||
type ObserverFunc func(float64)
|
|
||||||
|
|
||||||
// Observe calls f(value). It implements Observer.
|
|
||||||
func (f ObserverFunc) Observe(value float64) {
|
|
||||||
f(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Timer is a helper type to time functions. Use NewTimer to create new
|
|
||||||
// instances.
|
|
||||||
type Timer struct {
|
|
||||||
begin time.Time
|
|
||||||
observer Observer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTimer creates a new Timer. The provided Observer is used to observe a
|
|
||||||
// duration in seconds. Timer is usually used to time a function call in the
|
|
||||||
// following way:
|
|
||||||
// func TimeMe() {
|
|
||||||
// timer := NewTimer(myHistogram)
|
|
||||||
// defer timer.ObserveDuration()
|
|
||||||
// // Do actual work.
|
|
||||||
// }
|
|
||||||
func NewTimer(o Observer) *Timer {
|
|
||||||
return &Timer{
|
|
||||||
begin: time.Now(),
|
|
||||||
observer: o,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObserveDuration records the duration passed since the Timer was created with
|
|
||||||
// NewTimer. It calls the Observe method of the Observer provided during
|
|
||||||
// construction with the duration in seconds as an argument. ObserveDuration is
|
|
||||||
// usually called with a defer statement.
|
|
||||||
func (t *Timer) ObserveDuration() {
|
|
||||||
if t.observer != nil {
|
|
||||||
t.observer.Observe(time.Since(t.begin).Seconds())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,152 +0,0 @@
|
||||||
// Copyright 2016 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestTimerObserve(t *testing.T) {
|
|
||||||
var (
|
|
||||||
his = NewHistogram(HistogramOpts{Name: "test_histogram"})
|
|
||||||
sum = NewSummary(SummaryOpts{Name: "test_summary"})
|
|
||||||
gauge = NewGauge(GaugeOpts{Name: "test_gauge"})
|
|
||||||
)
|
|
||||||
|
|
||||||
func() {
|
|
||||||
hisTimer := NewTimer(his)
|
|
||||||
sumTimer := NewTimer(sum)
|
|
||||||
gaugeTimer := NewTimer(ObserverFunc(gauge.Set))
|
|
||||||
defer hisTimer.ObserveDuration()
|
|
||||||
defer sumTimer.ObserveDuration()
|
|
||||||
defer gaugeTimer.ObserveDuration()
|
|
||||||
}()
|
|
||||||
|
|
||||||
m := &dto.Metric{}
|
|
||||||
his.Write(m)
|
|
||||||
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
|
|
||||||
t.Errorf("want %d observations for histogram, got %d", want, got)
|
|
||||||
}
|
|
||||||
m.Reset()
|
|
||||||
sum.Write(m)
|
|
||||||
if want, got := uint64(1), m.GetSummary().GetSampleCount(); want != got {
|
|
||||||
t.Errorf("want %d observations for summary, got %d", want, got)
|
|
||||||
}
|
|
||||||
m.Reset()
|
|
||||||
gauge.Write(m)
|
|
||||||
if got := m.GetGauge().GetValue(); got <= 0 {
|
|
||||||
t.Errorf("want value > 0 for gauge, got %f", got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTimerEmpty(t *testing.T) {
|
|
||||||
emptyTimer := NewTimer(nil)
|
|
||||||
emptyTimer.ObserveDuration()
|
|
||||||
// Do nothing, just demonstrate it works without panic.
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTimerConditionalTiming(t *testing.T) {
|
|
||||||
var (
|
|
||||||
his = NewHistogram(HistogramOpts{
|
|
||||||
Name: "test_histogram",
|
|
||||||
})
|
|
||||||
timeMe = true
|
|
||||||
m = &dto.Metric{}
|
|
||||||
)
|
|
||||||
|
|
||||||
timedFunc := func() {
|
|
||||||
timer := NewTimer(ObserverFunc(func(v float64) {
|
|
||||||
if timeMe {
|
|
||||||
his.Observe(v)
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
defer timer.ObserveDuration()
|
|
||||||
}
|
|
||||||
|
|
||||||
timedFunc() // This will time.
|
|
||||||
his.Write(m)
|
|
||||||
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
|
|
||||||
t.Errorf("want %d observations for histogram, got %d", want, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
timeMe = false
|
|
||||||
timedFunc() // This will not time again.
|
|
||||||
m.Reset()
|
|
||||||
his.Write(m)
|
|
||||||
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
|
|
||||||
t.Errorf("want %d observations for histogram, got %d", want, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTimerByOutcome(t *testing.T) {
|
|
||||||
var (
|
|
||||||
his = NewHistogramVec(
|
|
||||||
HistogramOpts{Name: "test_histogram"},
|
|
||||||
[]string{"outcome"},
|
|
||||||
)
|
|
||||||
outcome = "foo"
|
|
||||||
m = &dto.Metric{}
|
|
||||||
)
|
|
||||||
|
|
||||||
timedFunc := func() {
|
|
||||||
timer := NewTimer(ObserverFunc(func(v float64) {
|
|
||||||
his.WithLabelValues(outcome).Observe(v)
|
|
||||||
}))
|
|
||||||
defer timer.ObserveDuration()
|
|
||||||
|
|
||||||
if outcome == "foo" {
|
|
||||||
outcome = "bar"
|
|
||||||
return
|
|
||||||
}
|
|
||||||
outcome = "foo"
|
|
||||||
}
|
|
||||||
|
|
||||||
timedFunc()
|
|
||||||
his.WithLabelValues("foo").Write(m)
|
|
||||||
if want, got := uint64(0), m.GetHistogram().GetSampleCount(); want != got {
|
|
||||||
t.Errorf("want %d observations for 'foo' histogram, got %d", want, got)
|
|
||||||
}
|
|
||||||
m.Reset()
|
|
||||||
his.WithLabelValues("bar").Write(m)
|
|
||||||
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
|
|
||||||
t.Errorf("want %d observations for 'bar' histogram, got %d", want, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
timedFunc()
|
|
||||||
m.Reset()
|
|
||||||
his.WithLabelValues("foo").Write(m)
|
|
||||||
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
|
|
||||||
t.Errorf("want %d observations for 'foo' histogram, got %d", want, got)
|
|
||||||
}
|
|
||||||
m.Reset()
|
|
||||||
his.WithLabelValues("bar").Write(m)
|
|
||||||
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
|
|
||||||
t.Errorf("want %d observations for 'bar' histogram, got %d", want, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
timedFunc()
|
|
||||||
m.Reset()
|
|
||||||
his.WithLabelValues("foo").Write(m)
|
|
||||||
if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
|
|
||||||
t.Errorf("want %d observations for 'foo' histogram, got %d", want, got)
|
|
||||||
}
|
|
||||||
m.Reset()
|
|
||||||
his.WithLabelValues("bar").Write(m)
|
|
||||||
if want, got := uint64(2), m.GetHistogram().GetSampleCount(); want != got {
|
|
||||||
t.Errorf("want %d observations for 'bar' histogram, got %d", want, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
@ -1,143 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
// Untyped is a Metric that represents a single numerical value that can
|
|
||||||
// arbitrarily go up and down.
|
|
||||||
//
|
|
||||||
// An Untyped metric works the same as a Gauge. The only difference is that to
|
|
||||||
// no type information is implied.
|
|
||||||
//
|
|
||||||
// To create Untyped instances, use NewUntyped.
|
|
||||||
//
|
|
||||||
// Deprecated: The Untyped type is deprecated because it doesn't make sense in
|
|
||||||
// direct instrumentation. If you need to mirror an external metric of unknown
|
|
||||||
// type (usually while writing exporters), Use MustNewConstMetric to create an
|
|
||||||
// untyped metric instance on the fly.
|
|
||||||
type Untyped interface {
|
|
||||||
Metric
|
|
||||||
Collector
|
|
||||||
|
|
||||||
// Set sets the Untyped metric to an arbitrary value.
|
|
||||||
Set(float64)
|
|
||||||
// Inc increments the Untyped metric by 1.
|
|
||||||
Inc()
|
|
||||||
// Dec decrements the Untyped metric by 1.
|
|
||||||
Dec()
|
|
||||||
// Add adds the given value to the Untyped metric. (The value can be
|
|
||||||
// negative, resulting in a decrease.)
|
|
||||||
Add(float64)
|
|
||||||
// Sub subtracts the given value from the Untyped metric. (The value can
|
|
||||||
// be negative, resulting in an increase.)
|
|
||||||
Sub(float64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UntypedOpts is an alias for Opts. See there for doc comments.
|
|
||||||
type UntypedOpts Opts
|
|
||||||
|
|
||||||
// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
|
|
||||||
func NewUntyped(opts UntypedOpts) Untyped {
|
|
||||||
return newValue(NewDesc(
|
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
||||||
opts.Help,
|
|
||||||
nil,
|
|
||||||
opts.ConstLabels,
|
|
||||||
), UntypedValue, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UntypedVec is a Collector that bundles a set of Untyped metrics that all
|
|
||||||
// share the same Desc, but have different values for their variable
|
|
||||||
// labels. This is used if you want to count the same thing partitioned by
|
|
||||||
// various dimensions. Create instances with NewUntypedVec.
|
|
||||||
type UntypedVec struct {
|
|
||||||
*MetricVec
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
|
|
||||||
// partitioned by the given label names. At least one label name must be
|
|
||||||
// provided.
|
|
||||||
func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
|
|
||||||
desc := NewDesc(
|
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
||||||
opts.Help,
|
|
||||||
labelNames,
|
|
||||||
opts.ConstLabels,
|
|
||||||
)
|
|
||||||
return &UntypedVec{
|
|
||||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
|
||||||
return newValue(desc, UntypedValue, 0, lvs...)
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetricWithLabelValues replaces the method of the same name in
|
|
||||||
// MetricVec. The difference is that this method returns an Untyped and not a
|
|
||||||
// Metric so that no type conversion is required.
|
|
||||||
func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
|
|
||||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
|
||||||
if metric != nil {
|
|
||||||
return metric.(Untyped), err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
|
||||||
// difference is that this method returns an Untyped and not a Metric so that no
|
|
||||||
// type conversion is required.
|
|
||||||
func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
|
|
||||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
|
||||||
if metric != nil {
|
|
||||||
return metric.(Untyped), err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
|
||||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
|
||||||
// error, WithLabelValues allows shortcuts like
|
|
||||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
|
||||||
func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
|
|
||||||
return m.MetricVec.WithLabelValues(lvs...).(Untyped)
|
|
||||||
}
|
|
||||||
|
|
||||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
|
||||||
// returned an error. By not returning an error, With allows shortcuts like
|
|
||||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
|
||||||
func (m *UntypedVec) With(labels Labels) Untyped {
|
|
||||||
return m.MetricVec.With(labels).(Untyped)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UntypedFunc is an Untyped whose value is determined at collect time by
|
|
||||||
// calling a provided function.
|
|
||||||
//
|
|
||||||
// To create UntypedFunc instances, use NewUntypedFunc.
|
|
||||||
type UntypedFunc interface {
|
|
||||||
Metric
|
|
||||||
Collector
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUntypedFunc creates a new UntypedFunc based on the provided
|
|
||||||
// UntypedOpts. The value reported is determined by calling the given function
|
|
||||||
// from within the Write method. Take into account that metric collection may
|
|
||||||
// happen concurrently. If that results in concurrent calls to Write, like in
|
|
||||||
// the case where an UntypedFunc is directly registered with Prometheus, the
|
|
||||||
// provided function must be concurrency-safe.
|
|
||||||
func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
|
|
||||||
return newValueFunc(NewDesc(
|
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
||||||
opts.Help,
|
|
||||||
nil,
|
|
||||||
opts.ConstLabels,
|
|
||||||
), UntypedValue, function)
|
|
||||||
}
|
|
||||||
|
|
@ -1,239 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ValueType is an enumeration of metric types that represent a simple value.
|
|
||||||
type ValueType int
|
|
||||||
|
|
||||||
// Possible values for the ValueType enum.
|
|
||||||
const (
|
|
||||||
_ ValueType = iota
|
|
||||||
CounterValue
|
|
||||||
GaugeValue
|
|
||||||
UntypedValue
|
|
||||||
)
|
|
||||||
|
|
||||||
var errInconsistentCardinality = errors.New("inconsistent label cardinality")
|
|
||||||
|
|
||||||
// value is a generic metric for simple values. It implements Metric, Collector,
|
|
||||||
// Counter, Gauge, and Untyped. Its effective type is determined by
|
|
||||||
// ValueType. This is a low-level building block used by the library to back the
|
|
||||||
// implementations of Counter, Gauge, and Untyped.
|
|
||||||
type value struct {
|
|
||||||
// valBits containst the bits of the represented float64 value. It has
|
|
||||||
// to go first in the struct to guarantee alignment for atomic
|
|
||||||
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
|
||||||
valBits uint64
|
|
||||||
|
|
||||||
selfCollector
|
|
||||||
|
|
||||||
desc *Desc
|
|
||||||
valType ValueType
|
|
||||||
labelPairs []*dto.LabelPair
|
|
||||||
}
|
|
||||||
|
|
||||||
// newValue returns a newly allocated value with the given Desc, ValueType,
|
|
||||||
// sample value and label values. It panics if the number of label
|
|
||||||
// values is different from the number of variable labels in Desc.
|
|
||||||
func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
|
|
||||||
if len(labelValues) != len(desc.variableLabels) {
|
|
||||||
panic(errInconsistentCardinality)
|
|
||||||
}
|
|
||||||
result := &value{
|
|
||||||
desc: desc,
|
|
||||||
valType: valueType,
|
|
||||||
valBits: math.Float64bits(val),
|
|
||||||
labelPairs: makeLabelPairs(desc, labelValues),
|
|
||||||
}
|
|
||||||
result.init(result)
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Desc() *Desc {
|
|
||||||
return v.desc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Set(val float64) {
|
|
||||||
atomic.StoreUint64(&v.valBits, math.Float64bits(val))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) SetToCurrentTime() {
|
|
||||||
v.Set(float64(time.Now().UnixNano()) / 1e9)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Inc() {
|
|
||||||
v.Add(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Dec() {
|
|
||||||
v.Add(-1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Add(val float64) {
|
|
||||||
for {
|
|
||||||
oldBits := atomic.LoadUint64(&v.valBits)
|
|
||||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
|
|
||||||
if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Sub(val float64) {
|
|
||||||
v.Add(val * -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *value) Write(out *dto.Metric) error {
|
|
||||||
val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
|
|
||||||
return populateMetric(v.valType, val, v.labelPairs, out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// valueFunc is a generic metric for simple values retrieved on collect time
|
|
||||||
// from a function. It implements Metric and Collector. Its effective type is
|
|
||||||
// determined by ValueType. This is a low-level building block used by the
|
|
||||||
// library to back the implementations of CounterFunc, GaugeFunc, and
|
|
||||||
// UntypedFunc.
|
|
||||||
type valueFunc struct {
|
|
||||||
selfCollector
|
|
||||||
|
|
||||||
desc *Desc
|
|
||||||
valType ValueType
|
|
||||||
function func() float64
|
|
||||||
labelPairs []*dto.LabelPair
|
|
||||||
}
|
|
||||||
|
|
||||||
// newValueFunc returns a newly allocated valueFunc with the given Desc and
|
|
||||||
// ValueType. The value reported is determined by calling the given function
|
|
||||||
// from within the Write method. Take into account that metric collection may
|
|
||||||
// happen concurrently. If that results in concurrent calls to Write, like in
|
|
||||||
// the case where a valueFunc is directly registered with Prometheus, the
|
|
||||||
// provided function must be concurrency-safe.
|
|
||||||
func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
|
|
||||||
result := &valueFunc{
|
|
||||||
desc: desc,
|
|
||||||
valType: valueType,
|
|
||||||
function: function,
|
|
||||||
labelPairs: makeLabelPairs(desc, nil),
|
|
||||||
}
|
|
||||||
result.init(result)
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *valueFunc) Desc() *Desc {
|
|
||||||
return v.desc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *valueFunc) Write(out *dto.Metric) error {
|
|
||||||
return populateMetric(v.valType, v.function(), v.labelPairs, out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConstMetric returns a metric with one fixed value that cannot be
|
|
||||||
// changed. Users of this package will not have much use for it in regular
|
|
||||||
// operations. However, when implementing custom Collectors, it is useful as a
|
|
||||||
// throw-away metric that is generated on the fly to send it to Prometheus in
|
|
||||||
// the Collect method. NewConstMetric returns an error if the length of
|
|
||||||
// labelValues is not consistent with the variable labels in Desc.
|
|
||||||
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
|
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
|
||||||
return nil, errInconsistentCardinality
|
|
||||||
}
|
|
||||||
return &constMetric{
|
|
||||||
desc: desc,
|
|
||||||
valType: valueType,
|
|
||||||
val: value,
|
|
||||||
labelPairs: makeLabelPairs(desc, labelValues),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustNewConstMetric is a version of NewConstMetric that panics where
|
|
||||||
// NewConstMetric would have returned an error.
|
|
||||||
func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
|
|
||||||
m, err := NewConstMetric(desc, valueType, value, labelValues...)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
type constMetric struct {
|
|
||||||
desc *Desc
|
|
||||||
valType ValueType
|
|
||||||
val float64
|
|
||||||
labelPairs []*dto.LabelPair
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *constMetric) Desc() *Desc {
|
|
||||||
return m.desc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *constMetric) Write(out *dto.Metric) error {
|
|
||||||
return populateMetric(m.valType, m.val, m.labelPairs, out)
|
|
||||||
}
|
|
||||||
|
|
||||||
func populateMetric(
|
|
||||||
t ValueType,
|
|
||||||
v float64,
|
|
||||||
labelPairs []*dto.LabelPair,
|
|
||||||
m *dto.Metric,
|
|
||||||
) error {
|
|
||||||
m.Label = labelPairs
|
|
||||||
switch t {
|
|
||||||
case CounterValue:
|
|
||||||
m.Counter = &dto.Counter{Value: proto.Float64(v)}
|
|
||||||
case GaugeValue:
|
|
||||||
m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
|
|
||||||
case UntypedValue:
|
|
||||||
m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("encountered unknown type %v", t)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
|
|
||||||
totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
|
|
||||||
if totalLen == 0 {
|
|
||||||
// Super fast path.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if len(desc.variableLabels) == 0 {
|
|
||||||
// Moderately fast path.
|
|
||||||
return desc.constLabelPairs
|
|
||||||
}
|
|
||||||
labelPairs := make([]*dto.LabelPair, 0, totalLen)
|
|
||||||
for i, n := range desc.variableLabels {
|
|
||||||
labelPairs = append(labelPairs, &dto.LabelPair{
|
|
||||||
Name: proto.String(n),
|
|
||||||
Value: proto.String(labelValues[i]),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
for _, lp := range desc.constLabelPairs {
|
|
||||||
labelPairs = append(labelPairs, lp)
|
|
||||||
}
|
|
||||||
sort.Sort(LabelPairSorter(labelPairs))
|
|
||||||
return labelPairs
|
|
||||||
}
|
|
||||||
|
|
@ -1,404 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MetricVec is a Collector to bundle metrics of the same name that
|
|
||||||
// differ in their label values. MetricVec is usually not used directly but as a
|
|
||||||
// building block for implementations of vectors of a given metric
|
|
||||||
// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
|
|
||||||
// provided in this package.
|
|
||||||
type MetricVec struct {
|
|
||||||
mtx sync.RWMutex // Protects the children.
|
|
||||||
children map[uint64][]metricWithLabelValues
|
|
||||||
desc *Desc
|
|
||||||
|
|
||||||
newMetric func(labelValues ...string) Metric
|
|
||||||
hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
|
|
||||||
hashAddByte func(h uint64, b byte) uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// newMetricVec returns an initialized MetricVec. The concrete value is
|
|
||||||
// returned for embedding into another struct.
|
|
||||||
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
|
|
||||||
return &MetricVec{
|
|
||||||
children: map[uint64][]metricWithLabelValues{},
|
|
||||||
desc: desc,
|
|
||||||
newMetric: newMetric,
|
|
||||||
hashAdd: hashAdd,
|
|
||||||
hashAddByte: hashAddByte,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// metricWithLabelValues provides the metric and its label values for
|
|
||||||
// disambiguation on hash collision.
|
|
||||||
type metricWithLabelValues struct {
|
|
||||||
values []string
|
|
||||||
metric Metric
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describe implements Collector. The length of the returned slice
|
|
||||||
// is always one.
|
|
||||||
func (m *MetricVec) Describe(ch chan<- *Desc) {
|
|
||||||
ch <- m.desc
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect implements Collector.
|
|
||||||
func (m *MetricVec) Collect(ch chan<- Metric) {
|
|
||||||
m.mtx.RLock()
|
|
||||||
defer m.mtx.RUnlock()
|
|
||||||
|
|
||||||
for _, metrics := range m.children {
|
|
||||||
for _, metric := range metrics {
|
|
||||||
ch <- metric.metric
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetricWithLabelValues returns the Metric for the given slice of label
|
|
||||||
// values (same order as the VariableLabels in Desc). If that combination of
|
|
||||||
// label values is accessed for the first time, a new Metric is created.
|
|
||||||
//
|
|
||||||
// It is possible to call this method without using the returned Metric to only
|
|
||||||
// create the new Metric but leave it at its start value (e.g. a Summary or
|
|
||||||
// Histogram without any observations). See also the SummaryVec example.
|
|
||||||
//
|
|
||||||
// Keeping the Metric for later use is possible (and should be considered if
|
|
||||||
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
|
||||||
// Delete can be used to delete the Metric from the MetricVec. In that case, the
|
|
||||||
// Metric will still exist, but it will not be exported anymore, even if a
|
|
||||||
// Metric with the same label values is created later. See also the CounterVec
|
|
||||||
// example.
|
|
||||||
//
|
|
||||||
// An error is returned if the number of label values is not the same as the
|
|
||||||
// number of VariableLabels in Desc.
|
|
||||||
//
|
|
||||||
// Note that for more than one label value, this method is prone to mistakes
|
|
||||||
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
|
||||||
// an alternative to avoid that type of mistake. For higher label numbers, the
|
|
||||||
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
|
||||||
// with a performance overhead (for creating and processing the Labels map).
|
|
||||||
// See also the GaugeVec example.
|
|
||||||
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
|
||||||
h, err := m.hashLabelValues(lvs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return m.getOrCreateMetricWithLabelValues(h, lvs), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetricWith returns the Metric for the given Labels map (the label names
|
|
||||||
// must match those of the VariableLabels in Desc). If that label map is
|
|
||||||
// accessed for the first time, a new Metric is created. Implications of
|
|
||||||
// creating a Metric without using it and keeping the Metric for later use are
|
|
||||||
// the same as for GetMetricWithLabelValues.
|
|
||||||
//
|
|
||||||
// An error is returned if the number and names of the Labels are inconsistent
|
|
||||||
// with those of the VariableLabels in Desc.
|
|
||||||
//
|
|
||||||
// This method is used for the same purpose as
|
|
||||||
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
|
||||||
// methods.
|
|
||||||
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
|
|
||||||
h, err := m.hashLabels(labels)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return m.getOrCreateMetricWithLabels(h, labels), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
|
|
||||||
// occurs. The method allows neat syntax like:
|
|
||||||
// httpReqs.WithLabelValues("404", "POST").Inc()
|
|
||||||
func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
|
|
||||||
metric, err := m.GetMetricWithLabelValues(lvs...)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return metric
|
|
||||||
}
|
|
||||||
|
|
||||||
// With works as GetMetricWith, but panics if an error occurs. The method allows
|
|
||||||
// neat syntax like:
|
|
||||||
// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
|
|
||||||
func (m *MetricVec) With(labels Labels) Metric {
|
|
||||||
metric, err := m.GetMetricWith(labels)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return metric
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteLabelValues removes the metric where the variable labels are the same
|
|
||||||
// as those passed in as labels (same order as the VariableLabels in Desc). It
|
|
||||||
// returns true if a metric was deleted.
|
|
||||||
//
|
|
||||||
// It is not an error if the number of label values is not the same as the
|
|
||||||
// number of VariableLabels in Desc. However, such inconsistent label count can
|
|
||||||
// never match an actual Metric, so the method will always return false in that
|
|
||||||
// case.
|
|
||||||
//
|
|
||||||
// Note that for more than one label value, this method is prone to mistakes
|
|
||||||
// caused by an incorrect order of arguments. Consider Delete(Labels) as an
|
|
||||||
// alternative to avoid that type of mistake. For higher label numbers, the
|
|
||||||
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
|
||||||
// with a performance overhead (for creating and processing the Labels map).
|
|
||||||
// See also the CounterVec example.
|
|
||||||
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
|
|
||||||
m.mtx.Lock()
|
|
||||||
defer m.mtx.Unlock()
|
|
||||||
|
|
||||||
h, err := m.hashLabelValues(lvs)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return m.deleteByHashWithLabelValues(h, lvs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes the metric where the variable labels are the same as those
|
|
||||||
// passed in as labels. It returns true if a metric was deleted.
|
|
||||||
//
|
|
||||||
// It is not an error if the number and names of the Labels are inconsistent
|
|
||||||
// with those of the VariableLabels in the Desc of the MetricVec. However, such
|
|
||||||
// inconsistent Labels can never match an actual Metric, so the method will
|
|
||||||
// always return false in that case.
|
|
||||||
//
|
|
||||||
// This method is used for the same purpose as DeleteLabelValues(...string). See
|
|
||||||
// there for pros and cons of the two methods.
|
|
||||||
func (m *MetricVec) Delete(labels Labels) bool {
|
|
||||||
m.mtx.Lock()
|
|
||||||
defer m.mtx.Unlock()
|
|
||||||
|
|
||||||
h, err := m.hashLabels(labels)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return m.deleteByHashWithLabels(h, labels)
|
|
||||||
}
|
|
||||||
|
|
||||||
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
|
|
||||||
// there are multiple matches in the bucket, use lvs to select a metric and
|
|
||||||
// remove only that metric.
|
|
||||||
func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
|
|
||||||
metrics, ok := m.children[h]
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
i := m.findMetricWithLabelValues(metrics, lvs)
|
|
||||||
if i >= len(metrics) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(metrics) > 1 {
|
|
||||||
m.children[h] = append(metrics[:i], metrics[i+1:]...)
|
|
||||||
} else {
|
|
||||||
delete(m.children, h)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
|
|
||||||
// are multiple matches in the bucket, use lvs to select a metric and remove
|
|
||||||
// only that metric.
|
|
||||||
func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
|
|
||||||
metrics, ok := m.children[h]
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
i := m.findMetricWithLabels(metrics, labels)
|
|
||||||
if i >= len(metrics) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(metrics) > 1 {
|
|
||||||
m.children[h] = append(metrics[:i], metrics[i+1:]...)
|
|
||||||
} else {
|
|
||||||
delete(m.children, h)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset deletes all metrics in this vector.
|
|
||||||
func (m *MetricVec) Reset() {
|
|
||||||
m.mtx.Lock()
|
|
||||||
defer m.mtx.Unlock()
|
|
||||||
|
|
||||||
for h := range m.children {
|
|
||||||
delete(m.children, h)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
|
|
||||||
if len(vals) != len(m.desc.variableLabels) {
|
|
||||||
return 0, errInconsistentCardinality
|
|
||||||
}
|
|
||||||
h := hashNew()
|
|
||||||
for _, val := range vals {
|
|
||||||
h = m.hashAdd(h, val)
|
|
||||||
h = m.hashAddByte(h, model.SeparatorByte)
|
|
||||||
}
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
|
|
||||||
if len(labels) != len(m.desc.variableLabels) {
|
|
||||||
return 0, errInconsistentCardinality
|
|
||||||
}
|
|
||||||
h := hashNew()
|
|
||||||
for _, label := range m.desc.variableLabels {
|
|
||||||
val, ok := labels[label]
|
|
||||||
if !ok {
|
|
||||||
return 0, fmt.Errorf("label name %q missing in label map", label)
|
|
||||||
}
|
|
||||||
h = m.hashAdd(h, val)
|
|
||||||
h = m.hashAddByte(h, model.SeparatorByte)
|
|
||||||
}
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
|
||||||
// or creates it and returns the new one.
|
|
||||||
//
|
|
||||||
// This function holds the mutex.
|
|
||||||
func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
|
|
||||||
m.mtx.RLock()
|
|
||||||
metric, ok := m.getMetricWithLabelValues(hash, lvs)
|
|
||||||
m.mtx.RUnlock()
|
|
||||||
if ok {
|
|
||||||
return metric
|
|
||||||
}
|
|
||||||
|
|
||||||
m.mtx.Lock()
|
|
||||||
defer m.mtx.Unlock()
|
|
||||||
metric, ok = m.getMetricWithLabelValues(hash, lvs)
|
|
||||||
if !ok {
|
|
||||||
// Copy to avoid allocation in case wo don't go down this code path.
|
|
||||||
copiedLVs := make([]string, len(lvs))
|
|
||||||
copy(copiedLVs, lvs)
|
|
||||||
metric = m.newMetric(copiedLVs...)
|
|
||||||
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
|
|
||||||
}
|
|
||||||
return metric
|
|
||||||
}
|
|
||||||
|
|
||||||
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
|
||||||
// or creates it and returns the new one.
|
|
||||||
//
|
|
||||||
// This function holds the mutex.
|
|
||||||
func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
|
|
||||||
m.mtx.RLock()
|
|
||||||
metric, ok := m.getMetricWithLabels(hash, labels)
|
|
||||||
m.mtx.RUnlock()
|
|
||||||
if ok {
|
|
||||||
return metric
|
|
||||||
}
|
|
||||||
|
|
||||||
m.mtx.Lock()
|
|
||||||
defer m.mtx.Unlock()
|
|
||||||
metric, ok = m.getMetricWithLabels(hash, labels)
|
|
||||||
if !ok {
|
|
||||||
lvs := m.extractLabelValues(labels)
|
|
||||||
metric = m.newMetric(lvs...)
|
|
||||||
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
|
|
||||||
}
|
|
||||||
return metric
|
|
||||||
}
|
|
||||||
|
|
||||||
// getMetricWithLabelValues gets a metric while handling possible collisions in
|
|
||||||
// the hash space. Must be called while holding read mutex.
|
|
||||||
func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
|
|
||||||
metrics, ok := m.children[h]
|
|
||||||
if ok {
|
|
||||||
if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
|
|
||||||
return metrics[i].metric, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// getMetricWithLabels gets a metric while handling possible collisions in
|
|
||||||
// the hash space. Must be called while holding read mutex.
|
|
||||||
func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
|
|
||||||
metrics, ok := m.children[h]
|
|
||||||
if ok {
|
|
||||||
if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
|
|
||||||
return metrics[i].metric, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// findMetricWithLabelValues returns the index of the matching metric or
|
|
||||||
// len(metrics) if not found.
|
|
||||||
func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
|
|
||||||
for i, metric := range metrics {
|
|
||||||
if m.matchLabelValues(metric.values, lvs) {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(metrics)
|
|
||||||
}
|
|
||||||
|
|
||||||
// findMetricWithLabels returns the index of the matching metric or len(metrics)
|
|
||||||
// if not found.
|
|
||||||
func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
|
|
||||||
for i, metric := range metrics {
|
|
||||||
if m.matchLabels(metric.values, labels) {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(metrics)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
|
|
||||||
if len(values) != len(lvs) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i, v := range values {
|
|
||||||
if v != lvs[i] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
|
|
||||||
if len(labels) != len(values) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i, k := range m.desc.variableLabels {
|
|
||||||
if values[i] != labels[k] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MetricVec) extractLabelValues(labels Labels) []string {
|
|
||||||
labelValues := make([]string, len(labels))
|
|
||||||
for i, k := range m.desc.variableLabels {
|
|
||||||
labelValues[i] = labels[k]
|
|
||||||
}
|
|
||||||
return labelValues
|
|
||||||
}
|
|
||||||
|
|
@ -1,312 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestDelete(t *testing.T) {
|
|
||||||
vec := NewUntypedVec(
|
|
||||||
UntypedOpts{
|
|
||||||
Name: "test",
|
|
||||||
Help: "helpless",
|
|
||||||
},
|
|
||||||
[]string{"l1", "l2"},
|
|
||||||
)
|
|
||||||
testDelete(t, vec)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteWithCollisions(t *testing.T) {
|
|
||||||
vec := NewUntypedVec(
|
|
||||||
UntypedOpts{
|
|
||||||
Name: "test",
|
|
||||||
Help: "helpless",
|
|
||||||
},
|
|
||||||
[]string{"l1", "l2"},
|
|
||||||
)
|
|
||||||
vec.hashAdd = func(h uint64, s string) uint64 { return 1 }
|
|
||||||
vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 }
|
|
||||||
testDelete(t, vec)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testDelete(t *testing.T, vec *UntypedVec) {
|
|
||||||
if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want {
|
|
||||||
t.Errorf("got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
|
|
||||||
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
|
|
||||||
if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want {
|
|
||||||
t.Errorf("got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want {
|
|
||||||
t.Errorf("got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
|
|
||||||
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
|
|
||||||
if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want {
|
|
||||||
t.Errorf("got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), false; got != want {
|
|
||||||
t.Errorf("got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
|
|
||||||
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
|
|
||||||
if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want {
|
|
||||||
t.Errorf("got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
if got, want := vec.Delete(Labels{"l1": "v1"}), false; got != want {
|
|
||||||
t.Errorf("got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteLabelValues(t *testing.T) {
|
|
||||||
vec := NewUntypedVec(
|
|
||||||
UntypedOpts{
|
|
||||||
Name: "test",
|
|
||||||
Help: "helpless",
|
|
||||||
},
|
|
||||||
[]string{"l1", "l2"},
|
|
||||||
)
|
|
||||||
testDeleteLabelValues(t, vec)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteLabelValuesWithCollisions(t *testing.T) {
|
|
||||||
vec := NewUntypedVec(
|
|
||||||
UntypedOpts{
|
|
||||||
Name: "test",
|
|
||||||
Help: "helpless",
|
|
||||||
},
|
|
||||||
[]string{"l1", "l2"},
|
|
||||||
)
|
|
||||||
vec.hashAdd = func(h uint64, s string) uint64 { return 1 }
|
|
||||||
vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 }
|
|
||||||
testDeleteLabelValues(t, vec)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testDeleteLabelValues(t *testing.T, vec *UntypedVec) {
|
|
||||||
if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want {
|
|
||||||
t.Errorf("got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
|
|
||||||
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
|
|
||||||
vec.With(Labels{"l1": "v1", "l2": "v3"}).(Untyped).Set(42) // Add junk data for collision.
|
|
||||||
if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want {
|
|
||||||
t.Errorf("got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want {
|
|
||||||
t.Errorf("got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
if got, want := vec.DeleteLabelValues("v1", "v3"), true; got != want {
|
|
||||||
t.Errorf("got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
|
|
||||||
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
|
|
||||||
// Delete out of order.
|
|
||||||
if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want {
|
|
||||||
t.Errorf("got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
if got, want := vec.DeleteLabelValues("v1"), false; got != want {
|
|
||||||
t.Errorf("got %v, want %v", got, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMetricVec(t *testing.T) {
|
|
||||||
vec := NewUntypedVec(
|
|
||||||
UntypedOpts{
|
|
||||||
Name: "test",
|
|
||||||
Help: "helpless",
|
|
||||||
},
|
|
||||||
[]string{"l1", "l2"},
|
|
||||||
)
|
|
||||||
testMetricVec(t, vec)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMetricVecWithCollisions(t *testing.T) {
|
|
||||||
vec := NewUntypedVec(
|
|
||||||
UntypedOpts{
|
|
||||||
Name: "test",
|
|
||||||
Help: "helpless",
|
|
||||||
},
|
|
||||||
[]string{"l1", "l2"},
|
|
||||||
)
|
|
||||||
vec.hashAdd = func(h uint64, s string) uint64 { return 1 }
|
|
||||||
vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 }
|
|
||||||
testMetricVec(t, vec)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testMetricVec(t *testing.T, vec *UntypedVec) {
|
|
||||||
vec.Reset() // Actually test Reset now!
|
|
||||||
|
|
||||||
var pair [2]string
|
|
||||||
// Keep track of metrics.
|
|
||||||
expected := map[[2]string]int{}
|
|
||||||
|
|
||||||
for i := 0; i < 1000; i++ {
|
|
||||||
pair[0], pair[1] = fmt.Sprint(i%4), fmt.Sprint(i%5) // Varying combinations multiples.
|
|
||||||
expected[pair]++
|
|
||||||
vec.WithLabelValues(pair[0], pair[1]).Inc()
|
|
||||||
|
|
||||||
expected[[2]string{"v1", "v2"}]++
|
|
||||||
vec.WithLabelValues("v1", "v2").(Untyped).Inc()
|
|
||||||
}
|
|
||||||
|
|
||||||
var total int
|
|
||||||
for _, metrics := range vec.children {
|
|
||||||
for _, metric := range metrics {
|
|
||||||
total++
|
|
||||||
copy(pair[:], metric.values)
|
|
||||||
|
|
||||||
var metricOut dto.Metric
|
|
||||||
if err := metric.metric.Write(&metricOut); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
actual := *metricOut.Untyped.Value
|
|
||||||
|
|
||||||
var actualPair [2]string
|
|
||||||
for i, label := range metricOut.Label {
|
|
||||||
actualPair[i] = *label.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test output pair against metric.values to ensure we've selected
|
|
||||||
// the right one. We check this to ensure the below check means
|
|
||||||
// anything at all.
|
|
||||||
if actualPair != pair {
|
|
||||||
t.Fatalf("unexpected pair association in metric map: %v != %v", actualPair, pair)
|
|
||||||
}
|
|
||||||
|
|
||||||
if actual != float64(expected[pair]) {
|
|
||||||
t.Fatalf("incorrect counter value for %v: %v != %v", pair, actual, expected[pair])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if total != len(expected) {
|
|
||||||
t.Fatalf("unexpected number of metrics: %v != %v", total, len(expected))
|
|
||||||
}
|
|
||||||
|
|
||||||
vec.Reset()
|
|
||||||
|
|
||||||
if len(vec.children) > 0 {
|
|
||||||
t.Fatalf("reset failed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCounterVecEndToEndWithCollision(t *testing.T) {
|
|
||||||
vec := NewCounterVec(
|
|
||||||
CounterOpts{
|
|
||||||
Name: "test",
|
|
||||||
Help: "helpless",
|
|
||||||
},
|
|
||||||
[]string{"labelname"},
|
|
||||||
)
|
|
||||||
vec.WithLabelValues("77kepQFQ8Kl").Inc()
|
|
||||||
vec.WithLabelValues("!0IC=VloaY").Add(2)
|
|
||||||
|
|
||||||
m := &dto.Metric{}
|
|
||||||
if err := vec.WithLabelValues("77kepQFQ8Kl").Write(m); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if got, want := m.GetLabel()[0].GetValue(), "77kepQFQ8Kl"; got != want {
|
|
||||||
t.Errorf("got label value %q, want %q", got, want)
|
|
||||||
}
|
|
||||||
if got, want := m.GetCounter().GetValue(), 1.; got != want {
|
|
||||||
t.Errorf("got value %f, want %f", got, want)
|
|
||||||
}
|
|
||||||
m.Reset()
|
|
||||||
if err := vec.WithLabelValues("!0IC=VloaY").Write(m); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if got, want := m.GetLabel()[0].GetValue(), "!0IC=VloaY"; got != want {
|
|
||||||
t.Errorf("got label value %q, want %q", got, want)
|
|
||||||
}
|
|
||||||
if got, want := m.GetCounter().GetValue(), 2.; got != want {
|
|
||||||
t.Errorf("got value %f, want %f", got, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkMetricVecWithLabelValuesBasic(b *testing.B) {
|
|
||||||
benchmarkMetricVecWithLabelValues(b, map[string][]string{
|
|
||||||
"l1": {"onevalue"},
|
|
||||||
"l2": {"twovalue"},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkMetricVecWithLabelValues2Keys10ValueCardinality(b *testing.B) {
|
|
||||||
benchmarkMetricVecWithLabelValuesCardinality(b, 2, 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkMetricVecWithLabelValues4Keys10ValueCardinality(b *testing.B) {
|
|
||||||
benchmarkMetricVecWithLabelValuesCardinality(b, 4, 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkMetricVecWithLabelValues2Keys100ValueCardinality(b *testing.B) {
|
|
||||||
benchmarkMetricVecWithLabelValuesCardinality(b, 2, 100)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkMetricVecWithLabelValues10Keys100ValueCardinality(b *testing.B) {
|
|
||||||
benchmarkMetricVecWithLabelValuesCardinality(b, 10, 100)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkMetricVecWithLabelValues10Keys1000ValueCardinality(b *testing.B) {
|
|
||||||
benchmarkMetricVecWithLabelValuesCardinality(b, 10, 1000)
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchmarkMetricVecWithLabelValuesCardinality(b *testing.B, nkeys, nvalues int) {
|
|
||||||
labels := map[string][]string{}
|
|
||||||
|
|
||||||
for i := 0; i < nkeys; i++ {
|
|
||||||
var (
|
|
||||||
k = fmt.Sprintf("key-%v", i)
|
|
||||||
vs = make([]string, 0, nvalues)
|
|
||||||
)
|
|
||||||
for j := 0; j < nvalues; j++ {
|
|
||||||
vs = append(vs, fmt.Sprintf("value-%v", j))
|
|
||||||
}
|
|
||||||
labels[k] = vs
|
|
||||||
}
|
|
||||||
|
|
||||||
benchmarkMetricVecWithLabelValues(b, labels)
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchmarkMetricVecWithLabelValues(b *testing.B, labels map[string][]string) {
|
|
||||||
var keys []string
|
|
||||||
for k := range labels { // Map order dependent, who cares though.
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
values := make([]string, len(labels)) // Value cache for permutations.
|
|
||||||
vec := NewUntypedVec(
|
|
||||||
UntypedOpts{
|
|
||||||
Name: "test",
|
|
||||||
Help: "helpless",
|
|
||||||
},
|
|
||||||
keys,
|
|
||||||
)
|
|
||||||
|
|
||||||
b.ReportAllocs()
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
// Varies input across provide map entries based on key size.
|
|
||||||
for j, k := range keys {
|
|
||||||
candidates := labels[k]
|
|
||||||
values[j] = candidates[i%len(candidates)]
|
|
||||||
}
|
|
||||||
|
|
||||||
vec.WithLabelValues(values...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,364 +0,0 @@
|
||||||
// Code generated by protoc-gen-go.
|
|
||||||
// source: metrics.proto
|
|
||||||
// DO NOT EDIT!
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package io_prometheus_client is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
metrics.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
LabelPair
|
|
||||||
Gauge
|
|
||||||
Counter
|
|
||||||
Quantile
|
|
||||||
Summary
|
|
||||||
Untyped
|
|
||||||
Histogram
|
|
||||||
Bucket
|
|
||||||
Metric
|
|
||||||
MetricFamily
|
|
||||||
*/
|
|
||||||
package io_prometheus_client
|
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
|
||||||
import math "math"
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
type MetricType int32
|
|
||||||
|
|
||||||
const (
|
|
||||||
MetricType_COUNTER MetricType = 0
|
|
||||||
MetricType_GAUGE MetricType = 1
|
|
||||||
MetricType_SUMMARY MetricType = 2
|
|
||||||
MetricType_UNTYPED MetricType = 3
|
|
||||||
MetricType_HISTOGRAM MetricType = 4
|
|
||||||
)
|
|
||||||
|
|
||||||
var MetricType_name = map[int32]string{
|
|
||||||
0: "COUNTER",
|
|
||||||
1: "GAUGE",
|
|
||||||
2: "SUMMARY",
|
|
||||||
3: "UNTYPED",
|
|
||||||
4: "HISTOGRAM",
|
|
||||||
}
|
|
||||||
var MetricType_value = map[string]int32{
|
|
||||||
"COUNTER": 0,
|
|
||||||
"GAUGE": 1,
|
|
||||||
"SUMMARY": 2,
|
|
||||||
"UNTYPED": 3,
|
|
||||||
"HISTOGRAM": 4,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x MetricType) Enum() *MetricType {
|
|
||||||
p := new(MetricType)
|
|
||||||
*p = x
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
func (x MetricType) String() string {
|
|
||||||
return proto.EnumName(MetricType_name, int32(x))
|
|
||||||
}
|
|
||||||
func (x *MetricType) UnmarshalJSON(data []byte) error {
|
|
||||||
value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*x = MetricType(value)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type LabelPair struct {
|
|
||||||
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
|
||||||
Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *LabelPair) Reset() { *m = LabelPair{} }
|
|
||||||
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*LabelPair) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *LabelPair) GetName() string {
|
|
||||||
if m != nil && m.Name != nil {
|
|
||||||
return *m.Name
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *LabelPair) GetValue() string {
|
|
||||||
if m != nil && m.Value != nil {
|
|
||||||
return *m.Value
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
type Gauge struct {
|
|
||||||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Gauge) Reset() { *m = Gauge{} }
|
|
||||||
func (m *Gauge) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Gauge) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *Gauge) GetValue() float64 {
|
|
||||||
if m != nil && m.Value != nil {
|
|
||||||
return *m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type Counter struct {
|
|
||||||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Counter) Reset() { *m = Counter{} }
|
|
||||||
func (m *Counter) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Counter) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *Counter) GetValue() float64 {
|
|
||||||
if m != nil && m.Value != nil {
|
|
||||||
return *m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type Quantile struct {
|
|
||||||
Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
|
|
||||||
Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Quantile) Reset() { *m = Quantile{} }
|
|
||||||
func (m *Quantile) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Quantile) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *Quantile) GetQuantile() float64 {
|
|
||||||
if m != nil && m.Quantile != nil {
|
|
||||||
return *m.Quantile
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Quantile) GetValue() float64 {
|
|
||||||
if m != nil && m.Value != nil {
|
|
||||||
return *m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type Summary struct {
|
|
||||||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
|
|
||||||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
|
|
||||||
Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Summary) Reset() { *m = Summary{} }
|
|
||||||
func (m *Summary) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Summary) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *Summary) GetSampleCount() uint64 {
|
|
||||||
if m != nil && m.SampleCount != nil {
|
|
||||||
return *m.SampleCount
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Summary) GetSampleSum() float64 {
|
|
||||||
if m != nil && m.SampleSum != nil {
|
|
||||||
return *m.SampleSum
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Summary) GetQuantile() []*Quantile {
|
|
||||||
if m != nil {
|
|
||||||
return m.Quantile
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type Untyped struct {
|
|
||||||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Untyped) Reset() { *m = Untyped{} }
|
|
||||||
func (m *Untyped) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Untyped) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *Untyped) GetValue() float64 {
|
|
||||||
if m != nil && m.Value != nil {
|
|
||||||
return *m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type Histogram struct {
|
|
||||||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
|
|
||||||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
|
|
||||||
Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Histogram) Reset() { *m = Histogram{} }
|
|
||||||
func (m *Histogram) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Histogram) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *Histogram) GetSampleCount() uint64 {
|
|
||||||
if m != nil && m.SampleCount != nil {
|
|
||||||
return *m.SampleCount
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Histogram) GetSampleSum() float64 {
|
|
||||||
if m != nil && m.SampleSum != nil {
|
|
||||||
return *m.SampleSum
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Histogram) GetBucket() []*Bucket {
|
|
||||||
if m != nil {
|
|
||||||
return m.Bucket
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type Bucket struct {
|
|
||||||
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
|
|
||||||
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Bucket) Reset() { *m = Bucket{} }
|
|
||||||
func (m *Bucket) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Bucket) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *Bucket) GetCumulativeCount() uint64 {
|
|
||||||
if m != nil && m.CumulativeCount != nil {
|
|
||||||
return *m.CumulativeCount
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Bucket) GetUpperBound() float64 {
|
|
||||||
if m != nil && m.UpperBound != nil {
|
|
||||||
return *m.UpperBound
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type Metric struct {
|
|
||||||
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
|
|
||||||
Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
|
|
||||||
Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
|
|
||||||
Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
|
|
||||||
Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
|
|
||||||
Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
|
|
||||||
TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metric) Reset() { *m = Metric{} }
|
|
||||||
func (m *Metric) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Metric) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *Metric) GetLabel() []*LabelPair {
|
|
||||||
if m != nil {
|
|
||||||
return m.Label
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metric) GetGauge() *Gauge {
|
|
||||||
if m != nil {
|
|
||||||
return m.Gauge
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metric) GetCounter() *Counter {
|
|
||||||
if m != nil {
|
|
||||||
return m.Counter
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metric) GetSummary() *Summary {
|
|
||||||
if m != nil {
|
|
||||||
return m.Summary
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metric) GetUntyped() *Untyped {
|
|
||||||
if m != nil {
|
|
||||||
return m.Untyped
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metric) GetHistogram() *Histogram {
|
|
||||||
if m != nil {
|
|
||||||
return m.Histogram
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Metric) GetTimestampMs() int64 {
|
|
||||||
if m != nil && m.TimestampMs != nil {
|
|
||||||
return *m.TimestampMs
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type MetricFamily struct {
|
|
||||||
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
|
||||||
Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
|
|
||||||
Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
|
|
||||||
Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MetricFamily) Reset() { *m = MetricFamily{} }
|
|
||||||
func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*MetricFamily) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *MetricFamily) GetName() string {
|
|
||||||
if m != nil && m.Name != nil {
|
|
||||||
return *m.Name
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MetricFamily) GetHelp() string {
|
|
||||||
if m != nil && m.Help != nil {
|
|
||||||
return *m.Help
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MetricFamily) GetType() MetricType {
|
|
||||||
if m != nil && m.Type != nil {
|
|
||||||
return *m.Type
|
|
||||||
}
|
|
||||||
return MetricType_COUNTER
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MetricFamily) GetMetric() []*Metric {
|
|
||||||
if m != nil {
|
|
||||||
return m.Metric
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
|
|
||||||
}
|
|
||||||
|
|
@ -1,167 +0,0 @@
|
||||||
// Copyright 2015 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package expfmt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
)
|
|
||||||
|
|
||||||
var parser TextParser
|
|
||||||
|
|
||||||
// Benchmarks to show how much penalty text format parsing actually inflicts.
|
|
||||||
//
|
|
||||||
// Example results on Linux 3.13.0, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz, go1.4.
|
|
||||||
//
|
|
||||||
// BenchmarkParseText 1000 1188535 ns/op 205085 B/op 6135 allocs/op
|
|
||||||
// BenchmarkParseTextGzip 1000 1376567 ns/op 246224 B/op 6151 allocs/op
|
|
||||||
// BenchmarkParseProto 10000 172790 ns/op 52258 B/op 1160 allocs/op
|
|
||||||
// BenchmarkParseProtoGzip 5000 324021 ns/op 94931 B/op 1211 allocs/op
|
|
||||||
// BenchmarkParseProtoMap 10000 187946 ns/op 58714 B/op 1203 allocs/op
|
|
||||||
//
|
|
||||||
// CONCLUSION: The overhead for the map is negligible. Text format needs ~5x more allocations.
|
|
||||||
// Without compression, it needs ~7x longer, but with compression (the more relevant scenario),
|
|
||||||
// the difference becomes less relevant, only ~4x.
|
|
||||||
//
|
|
||||||
// The test data contains 248 samples.
|
|
||||||
|
|
||||||
// BenchmarkParseText benchmarks the parsing of a text-format scrape into metric
|
|
||||||
// family DTOs.
|
|
||||||
func BenchmarkParseText(b *testing.B) {
|
|
||||||
b.StopTimer()
|
|
||||||
data, err := ioutil.ReadFile("testdata/text")
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
b.StartTimer()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
if _, err := parser.TextToMetricFamilies(bytes.NewReader(data)); err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkParseTextGzip benchmarks the parsing of a gzipped text-format scrape
|
|
||||||
// into metric family DTOs.
|
|
||||||
func BenchmarkParseTextGzip(b *testing.B) {
|
|
||||||
b.StopTimer()
|
|
||||||
data, err := ioutil.ReadFile("testdata/text.gz")
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
b.StartTimer()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
in, err := gzip.NewReader(bytes.NewReader(data))
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
if _, err := parser.TextToMetricFamilies(in); err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkParseProto benchmarks the parsing of a protobuf-format scrape into
|
|
||||||
// metric family DTOs. Note that this does not build a map of metric families
|
|
||||||
// (as the text version does), because it is not required for Prometheus
|
|
||||||
// ingestion either. (However, it is required for the text-format parsing, as
|
|
||||||
// the metric family might be sprinkled all over the text, while the
|
|
||||||
// protobuf-format guarantees bundling at one place.)
|
|
||||||
func BenchmarkParseProto(b *testing.B) {
|
|
||||||
b.StopTimer()
|
|
||||||
data, err := ioutil.ReadFile("testdata/protobuf")
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
b.StartTimer()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
family := &dto.MetricFamily{}
|
|
||||||
in := bytes.NewReader(data)
|
|
||||||
for {
|
|
||||||
family.Reset()
|
|
||||||
if _, err := pbutil.ReadDelimited(in, family); err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkParseProtoGzip is like BenchmarkParseProto above, but parses gzipped
|
|
||||||
// protobuf format.
|
|
||||||
func BenchmarkParseProtoGzip(b *testing.B) {
|
|
||||||
b.StopTimer()
|
|
||||||
data, err := ioutil.ReadFile("testdata/protobuf.gz")
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
b.StartTimer()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
family := &dto.MetricFamily{}
|
|
||||||
in, err := gzip.NewReader(bytes.NewReader(data))
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
family.Reset()
|
|
||||||
if _, err := pbutil.ReadDelimited(in, family); err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkParseProtoMap is like BenchmarkParseProto but DOES put the parsed
|
|
||||||
// metric family DTOs into a map. This is not happening during Prometheus
|
|
||||||
// ingestion. It is just here to measure the overhead of that map creation and
|
|
||||||
// separate it from the overhead of the text format parsing.
|
|
||||||
func BenchmarkParseProtoMap(b *testing.B) {
|
|
||||||
b.StopTimer()
|
|
||||||
data, err := ioutil.ReadFile("testdata/protobuf")
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
b.StartTimer()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
families := map[string]*dto.MetricFamily{}
|
|
||||||
in := bytes.NewReader(data)
|
|
||||||
for {
|
|
||||||
family := &dto.MetricFamily{}
|
|
||||||
if _, err := pbutil.ReadDelimited(in, family); err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
families[family.GetName()] = family
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,429 +0,0 @@
|
||||||
// Copyright 2015 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package expfmt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"mime"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
|
|
||||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Decoder types decode an input stream into metric families.
|
|
||||||
type Decoder interface {
|
|
||||||
Decode(*dto.MetricFamily) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeOptions contains options used by the Decoder and in sample extraction.
|
|
||||||
type DecodeOptions struct {
|
|
||||||
// Timestamp is added to each value from the stream that has no explicit timestamp set.
|
|
||||||
Timestamp model.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResponseFormat extracts the correct format from a HTTP response header.
|
|
||||||
// If no matching format can be found FormatUnknown is returned.
|
|
||||||
func ResponseFormat(h http.Header) Format {
|
|
||||||
ct := h.Get(hdrContentType)
|
|
||||||
|
|
||||||
mediatype, params, err := mime.ParseMediaType(ct)
|
|
||||||
if err != nil {
|
|
||||||
return FmtUnknown
|
|
||||||
}
|
|
||||||
|
|
||||||
const textType = "text/plain"
|
|
||||||
|
|
||||||
switch mediatype {
|
|
||||||
case ProtoType:
|
|
||||||
if p, ok := params["proto"]; ok && p != ProtoProtocol {
|
|
||||||
return FmtUnknown
|
|
||||||
}
|
|
||||||
if e, ok := params["encoding"]; ok && e != "delimited" {
|
|
||||||
return FmtUnknown
|
|
||||||
}
|
|
||||||
return FmtProtoDelim
|
|
||||||
|
|
||||||
case textType:
|
|
||||||
if v, ok := params["version"]; ok && v != TextVersion {
|
|
||||||
return FmtUnknown
|
|
||||||
}
|
|
||||||
return FmtText
|
|
||||||
}
|
|
||||||
|
|
||||||
return FmtUnknown
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDecoder returns a new decoder based on the given input format.
|
|
||||||
// If the input format does not imply otherwise, a text format decoder is returned.
|
|
||||||
func NewDecoder(r io.Reader, format Format) Decoder {
|
|
||||||
switch format {
|
|
||||||
case FmtProtoDelim:
|
|
||||||
return &protoDecoder{r: r}
|
|
||||||
}
|
|
||||||
return &textDecoder{r: r}
|
|
||||||
}
|
|
||||||
|
|
||||||
// protoDecoder implements the Decoder interface for protocol buffers.
|
|
||||||
type protoDecoder struct {
|
|
||||||
r io.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode implements the Decoder interface.
|
|
||||||
func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
|
|
||||||
_, err := pbutil.ReadDelimited(d.r, v)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
|
|
||||||
return fmt.Errorf("invalid metric name %q", v.GetName())
|
|
||||||
}
|
|
||||||
for _, m := range v.GetMetric() {
|
|
||||||
if m == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, l := range m.GetLabel() {
|
|
||||||
if l == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !model.LabelValue(l.GetValue()).IsValid() {
|
|
||||||
return fmt.Errorf("invalid label value %q", l.GetValue())
|
|
||||||
}
|
|
||||||
if !model.LabelName(l.GetName()).IsValid() {
|
|
||||||
return fmt.Errorf("invalid label name %q", l.GetName())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// textDecoder implements the Decoder interface for the text protocol.
|
|
||||||
type textDecoder struct {
|
|
||||||
r io.Reader
|
|
||||||
p TextParser
|
|
||||||
fams []*dto.MetricFamily
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode implements the Decoder interface.
|
|
||||||
func (d *textDecoder) Decode(v *dto.MetricFamily) error {
|
|
||||||
// TODO(fabxc): Wrap this as a line reader to make streaming safer.
|
|
||||||
if len(d.fams) == 0 {
|
|
||||||
// No cached metric families, read everything and parse metrics.
|
|
||||||
fams, err := d.p.TextToMetricFamilies(d.r)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(fams) == 0 {
|
|
||||||
return io.EOF
|
|
||||||
}
|
|
||||||
d.fams = make([]*dto.MetricFamily, 0, len(fams))
|
|
||||||
for _, f := range fams {
|
|
||||||
d.fams = append(d.fams, f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
*v = *d.fams[0]
|
|
||||||
d.fams = d.fams[1:]
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SampleDecoder wraps a Decoder to extract samples from the metric families
|
|
||||||
// decoded by the wrapped Decoder.
|
|
||||||
type SampleDecoder struct {
|
|
||||||
Dec Decoder
|
|
||||||
Opts *DecodeOptions
|
|
||||||
|
|
||||||
f dto.MetricFamily
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode calls the Decode method of the wrapped Decoder and then extracts the
|
|
||||||
// samples from the decoded MetricFamily into the provided model.Vector.
|
|
||||||
func (sd *SampleDecoder) Decode(s *model.Vector) error {
|
|
||||||
err := sd.Dec.Decode(&sd.f)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*s, err = extractSamples(&sd.f, sd.Opts)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtractSamples builds a slice of samples from the provided metric
|
|
||||||
// families. If an error occurs during sample extraction, it continues to
|
|
||||||
// extract from the remaining metric families. The returned error is the last
|
|
||||||
// error that has occured.
|
|
||||||
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
|
|
||||||
var (
|
|
||||||
all model.Vector
|
|
||||||
lastErr error
|
|
||||||
)
|
|
||||||
for _, f := range fams {
|
|
||||||
some, err := extractSamples(f, o)
|
|
||||||
if err != nil {
|
|
||||||
lastErr = err
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
all = append(all, some...)
|
|
||||||
}
|
|
||||||
return all, lastErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
|
|
||||||
switch f.GetType() {
|
|
||||||
case dto.MetricType_COUNTER:
|
|
||||||
return extractCounter(o, f), nil
|
|
||||||
case dto.MetricType_GAUGE:
|
|
||||||
return extractGauge(o, f), nil
|
|
||||||
case dto.MetricType_SUMMARY:
|
|
||||||
return extractSummary(o, f), nil
|
|
||||||
case dto.MetricType_UNTYPED:
|
|
||||||
return extractUntyped(o, f), nil
|
|
||||||
case dto.MetricType_HISTOGRAM:
|
|
||||||
return extractHistogram(o, f), nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
|
|
||||||
}
|
|
||||||
|
|
||||||
func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
|
||||||
samples := make(model.Vector, 0, len(f.Metric))
|
|
||||||
|
|
||||||
for _, m := range f.Metric {
|
|
||||||
if m.Counter == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
lset := make(model.LabelSet, len(m.Label)+1)
|
|
||||||
for _, p := range m.Label {
|
|
||||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
|
||||||
}
|
|
||||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
|
|
||||||
|
|
||||||
smpl := &model.Sample{
|
|
||||||
Metric: model.Metric(lset),
|
|
||||||
Value: model.SampleValue(m.Counter.GetValue()),
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.TimestampMs != nil {
|
|
||||||
smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
|
|
||||||
} else {
|
|
||||||
smpl.Timestamp = o.Timestamp
|
|
||||||
}
|
|
||||||
|
|
||||||
samples = append(samples, smpl)
|
|
||||||
}
|
|
||||||
|
|
||||||
return samples
|
|
||||||
}
|
|
||||||
|
|
||||||
func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
|
||||||
samples := make(model.Vector, 0, len(f.Metric))
|
|
||||||
|
|
||||||
for _, m := range f.Metric {
|
|
||||||
if m.Gauge == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
lset := make(model.LabelSet, len(m.Label)+1)
|
|
||||||
for _, p := range m.Label {
|
|
||||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
|
||||||
}
|
|
||||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
|
|
||||||
|
|
||||||
smpl := &model.Sample{
|
|
||||||
Metric: model.Metric(lset),
|
|
||||||
Value: model.SampleValue(m.Gauge.GetValue()),
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.TimestampMs != nil {
|
|
||||||
smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
|
|
||||||
} else {
|
|
||||||
smpl.Timestamp = o.Timestamp
|
|
||||||
}
|
|
||||||
|
|
||||||
samples = append(samples, smpl)
|
|
||||||
}
|
|
||||||
|
|
||||||
return samples
|
|
||||||
}
|
|
||||||
|
|
||||||
func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
|
||||||
samples := make(model.Vector, 0, len(f.Metric))
|
|
||||||
|
|
||||||
for _, m := range f.Metric {
|
|
||||||
if m.Untyped == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
lset := make(model.LabelSet, len(m.Label)+1)
|
|
||||||
for _, p := range m.Label {
|
|
||||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
|
||||||
}
|
|
||||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
|
|
||||||
|
|
||||||
smpl := &model.Sample{
|
|
||||||
Metric: model.Metric(lset),
|
|
||||||
Value: model.SampleValue(m.Untyped.GetValue()),
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.TimestampMs != nil {
|
|
||||||
smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
|
|
||||||
} else {
|
|
||||||
smpl.Timestamp = o.Timestamp
|
|
||||||
}
|
|
||||||
|
|
||||||
samples = append(samples, smpl)
|
|
||||||
}
|
|
||||||
|
|
||||||
return samples
|
|
||||||
}
|
|
||||||
|
|
||||||
func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
|
||||||
samples := make(model.Vector, 0, len(f.Metric))
|
|
||||||
|
|
||||||
for _, m := range f.Metric {
|
|
||||||
if m.Summary == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
timestamp := o.Timestamp
|
|
||||||
if m.TimestampMs != nil {
|
|
||||||
timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, q := range m.Summary.Quantile {
|
|
||||||
lset := make(model.LabelSet, len(m.Label)+2)
|
|
||||||
for _, p := range m.Label {
|
|
||||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
|
||||||
}
|
|
||||||
// BUG(matt): Update other names to "quantile".
|
|
||||||
lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
|
|
||||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
|
|
||||||
|
|
||||||
samples = append(samples, &model.Sample{
|
|
||||||
Metric: model.Metric(lset),
|
|
||||||
Value: model.SampleValue(q.GetValue()),
|
|
||||||
Timestamp: timestamp,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
lset := make(model.LabelSet, len(m.Label)+1)
|
|
||||||
for _, p := range m.Label {
|
|
||||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
|
||||||
}
|
|
||||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
|
|
||||||
|
|
||||||
samples = append(samples, &model.Sample{
|
|
||||||
Metric: model.Metric(lset),
|
|
||||||
Value: model.SampleValue(m.Summary.GetSampleSum()),
|
|
||||||
Timestamp: timestamp,
|
|
||||||
})
|
|
||||||
|
|
||||||
lset = make(model.LabelSet, len(m.Label)+1)
|
|
||||||
for _, p := range m.Label {
|
|
||||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
|
||||||
}
|
|
||||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
|
|
||||||
|
|
||||||
samples = append(samples, &model.Sample{
|
|
||||||
Metric: model.Metric(lset),
|
|
||||||
Value: model.SampleValue(m.Summary.GetSampleCount()),
|
|
||||||
Timestamp: timestamp,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return samples
|
|
||||||
}
|
|
||||||
|
|
||||||
func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
|
||||||
samples := make(model.Vector, 0, len(f.Metric))
|
|
||||||
|
|
||||||
for _, m := range f.Metric {
|
|
||||||
if m.Histogram == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
timestamp := o.Timestamp
|
|
||||||
if m.TimestampMs != nil {
|
|
||||||
timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
|
|
||||||
}
|
|
||||||
|
|
||||||
infSeen := false
|
|
||||||
|
|
||||||
for _, q := range m.Histogram.Bucket {
|
|
||||||
lset := make(model.LabelSet, len(m.Label)+2)
|
|
||||||
for _, p := range m.Label {
|
|
||||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
|
||||||
}
|
|
||||||
lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
|
|
||||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
|
|
||||||
|
|
||||||
if math.IsInf(q.GetUpperBound(), +1) {
|
|
||||||
infSeen = true
|
|
||||||
}
|
|
||||||
|
|
||||||
samples = append(samples, &model.Sample{
|
|
||||||
Metric: model.Metric(lset),
|
|
||||||
Value: model.SampleValue(q.GetCumulativeCount()),
|
|
||||||
Timestamp: timestamp,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
lset := make(model.LabelSet, len(m.Label)+1)
|
|
||||||
for _, p := range m.Label {
|
|
||||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
|
||||||
}
|
|
||||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
|
|
||||||
|
|
||||||
samples = append(samples, &model.Sample{
|
|
||||||
Metric: model.Metric(lset),
|
|
||||||
Value: model.SampleValue(m.Histogram.GetSampleSum()),
|
|
||||||
Timestamp: timestamp,
|
|
||||||
})
|
|
||||||
|
|
||||||
lset = make(model.LabelSet, len(m.Label)+1)
|
|
||||||
for _, p := range m.Label {
|
|
||||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
|
||||||
}
|
|
||||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
|
|
||||||
|
|
||||||
count := &model.Sample{
|
|
||||||
Metric: model.Metric(lset),
|
|
||||||
Value: model.SampleValue(m.Histogram.GetSampleCount()),
|
|
||||||
Timestamp: timestamp,
|
|
||||||
}
|
|
||||||
samples = append(samples, count)
|
|
||||||
|
|
||||||
if !infSeen {
|
|
||||||
// Append an infinity bucket sample.
|
|
||||||
lset := make(model.LabelSet, len(m.Label)+2)
|
|
||||||
for _, p := range m.Label {
|
|
||||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
|
||||||
}
|
|
||||||
lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
|
|
||||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
|
|
||||||
|
|
||||||
samples = append(samples, &model.Sample{
|
|
||||||
Metric: model.Metric(lset),
|
|
||||||
Value: count.Value,
|
|
||||||
Timestamp: timestamp,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return samples
|
|
||||||
}
|
|
||||||
|
|
@ -1,435 +0,0 @@
|
||||||
// Copyright 2015 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package expfmt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestTextDecoder(t *testing.T) {
|
|
||||||
var (
|
|
||||||
ts = model.Now()
|
|
||||||
in = `
|
|
||||||
# Only a quite simple scenario with two metric families.
|
|
||||||
# More complicated tests of the parser itself can be found in the text package.
|
|
||||||
# TYPE mf2 counter
|
|
||||||
mf2 3
|
|
||||||
mf1{label="value1"} -3.14 123456
|
|
||||||
mf1{label="value2"} 42
|
|
||||||
mf2 4
|
|
||||||
`
|
|
||||||
out = model.Vector{
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "mf1",
|
|
||||||
"label": "value1",
|
|
||||||
},
|
|
||||||
Value: -3.14,
|
|
||||||
Timestamp: 123456,
|
|
||||||
},
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "mf1",
|
|
||||||
"label": "value2",
|
|
||||||
},
|
|
||||||
Value: 42,
|
|
||||||
Timestamp: ts,
|
|
||||||
},
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "mf2",
|
|
||||||
},
|
|
||||||
Value: 3,
|
|
||||||
Timestamp: ts,
|
|
||||||
},
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "mf2",
|
|
||||||
},
|
|
||||||
Value: 4,
|
|
||||||
Timestamp: ts,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
dec := &SampleDecoder{
|
|
||||||
Dec: &textDecoder{r: strings.NewReader(in)},
|
|
||||||
Opts: &DecodeOptions{
|
|
||||||
Timestamp: ts,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
var all model.Vector
|
|
||||||
for {
|
|
||||||
var smpls model.Vector
|
|
||||||
err := dec.Decode(&smpls)
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
all = append(all, smpls...)
|
|
||||||
}
|
|
||||||
sort.Sort(all)
|
|
||||||
sort.Sort(out)
|
|
||||||
if !reflect.DeepEqual(all, out) {
|
|
||||||
t.Fatalf("output does not match")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProtoDecoder(t *testing.T) {
|
|
||||||
|
|
||||||
var testTime = model.Now()
|
|
||||||
|
|
||||||
scenarios := []struct {
|
|
||||||
in string
|
|
||||||
expected model.Vector
|
|
||||||
fail bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
in: "",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_!abel_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@",
|
|
||||||
fail: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_label_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@",
|
|
||||||
expected: model.Vector{
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "request_count",
|
|
||||||
"some_label_name": "some_label_value",
|
|
||||||
},
|
|
||||||
Value: -42,
|
|
||||||
Timestamp: testTime,
|
|
||||||
},
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "request_count",
|
|
||||||
"another_label_name": "another_label_value",
|
|
||||||
},
|
|
||||||
Value: 84,
|
|
||||||
Timestamp: testTime,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
in: "\xb9\x01\n\rrequest_count\x12\x12Number of requests\x18\x02\"O\n#\n\x0fsome_label_name\x12\x10some_label_value\"(\x1a\x12\t\xaeG\xe1z\x14\xae\xef?\x11\x00\x00\x00\x00\x00\x00E\xc0\x1a\x12\t+\x87\x16\xd9\xce\xf7\xef?\x11\x00\x00\x00\x00\x00\x00U\xc0\"A\n)\n\x12another_label_name\x12\x13another_label_value\"\x14\x1a\x12\t\x00\x00\x00\x00\x00\x00\xe0?\x11\x00\x00\x00\x00\x00\x00$@",
|
|
||||||
expected: model.Vector{
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "request_count_count",
|
|
||||||
"some_label_name": "some_label_value",
|
|
||||||
},
|
|
||||||
Value: 0,
|
|
||||||
Timestamp: testTime,
|
|
||||||
},
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "request_count_sum",
|
|
||||||
"some_label_name": "some_label_value",
|
|
||||||
},
|
|
||||||
Value: 0,
|
|
||||||
Timestamp: testTime,
|
|
||||||
},
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "request_count",
|
|
||||||
"some_label_name": "some_label_value",
|
|
||||||
"quantile": "0.99",
|
|
||||||
},
|
|
||||||
Value: -42,
|
|
||||||
Timestamp: testTime,
|
|
||||||
},
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "request_count",
|
|
||||||
"some_label_name": "some_label_value",
|
|
||||||
"quantile": "0.999",
|
|
||||||
},
|
|
||||||
Value: -84,
|
|
||||||
Timestamp: testTime,
|
|
||||||
},
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "request_count_count",
|
|
||||||
"another_label_name": "another_label_value",
|
|
||||||
},
|
|
||||||
Value: 0,
|
|
||||||
Timestamp: testTime,
|
|
||||||
},
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "request_count_sum",
|
|
||||||
"another_label_name": "another_label_value",
|
|
||||||
},
|
|
||||||
Value: 0,
|
|
||||||
Timestamp: testTime,
|
|
||||||
},
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "request_count",
|
|
||||||
"another_label_name": "another_label_value",
|
|
||||||
"quantile": "0.5",
|
|
||||||
},
|
|
||||||
Value: 10,
|
|
||||||
Timestamp: testTime,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
in: "\x8d\x01\n\x1drequest_duration_microseconds\x12\x15The response latency.\x18\x04\"S:Q\b\x85\x15\x11\xcd\xcc\xccL\x8f\xcb:A\x1a\v\b{\x11\x00\x00\x00\x00\x00\x00Y@\x1a\f\b\x9c\x03\x11\x00\x00\x00\x00\x00\x00^@\x1a\f\b\xd0\x04\x11\x00\x00\x00\x00\x00\x00b@\x1a\f\b\xf4\v\x11\x9a\x99\x99\x99\x99\x99e@\x1a\f\b\x85\x15\x11\x00\x00\x00\x00\x00\x00\xf0\u007f",
|
|
||||||
expected: model.Vector{
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "request_duration_microseconds_bucket",
|
|
||||||
"le": "100",
|
|
||||||
},
|
|
||||||
Value: 123,
|
|
||||||
Timestamp: testTime,
|
|
||||||
},
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "request_duration_microseconds_bucket",
|
|
||||||
"le": "120",
|
|
||||||
},
|
|
||||||
Value: 412,
|
|
||||||
Timestamp: testTime,
|
|
||||||
},
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "request_duration_microseconds_bucket",
|
|
||||||
"le": "144",
|
|
||||||
},
|
|
||||||
Value: 592,
|
|
||||||
Timestamp: testTime,
|
|
||||||
},
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "request_duration_microseconds_bucket",
|
|
||||||
"le": "172.8",
|
|
||||||
},
|
|
||||||
Value: 1524,
|
|
||||||
Timestamp: testTime,
|
|
||||||
},
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "request_duration_microseconds_bucket",
|
|
||||||
"le": "+Inf",
|
|
||||||
},
|
|
||||||
Value: 2693,
|
|
||||||
Timestamp: testTime,
|
|
||||||
},
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "request_duration_microseconds_sum",
|
|
||||||
},
|
|
||||||
Value: 1756047.3,
|
|
||||||
Timestamp: testTime,
|
|
||||||
},
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "request_duration_microseconds_count",
|
|
||||||
},
|
|
||||||
Value: 2693,
|
|
||||||
Timestamp: testTime,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// The metric type is unset in this protobuf, which needs to be handled
|
|
||||||
// correctly by the decoder.
|
|
||||||
in: "\x1c\n\rrequest_count\"\v\x1a\t\t\x00\x00\x00\x00\x00\x00\xf0?",
|
|
||||||
expected: model.Vector{
|
|
||||||
&model.Sample{
|
|
||||||
Metric: model.Metric{
|
|
||||||
model.MetricNameLabel: "request_count",
|
|
||||||
},
|
|
||||||
Value: 1,
|
|
||||||
Timestamp: testTime,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, scenario := range scenarios {
|
|
||||||
dec := &SampleDecoder{
|
|
||||||
Dec: &protoDecoder{r: strings.NewReader(scenario.in)},
|
|
||||||
Opts: &DecodeOptions{
|
|
||||||
Timestamp: testTime,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var all model.Vector
|
|
||||||
for {
|
|
||||||
var smpls model.Vector
|
|
||||||
err := dec.Decode(&smpls)
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if scenario.fail {
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("Expected error but got none")
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
all = append(all, smpls...)
|
|
||||||
}
|
|
||||||
sort.Sort(all)
|
|
||||||
sort.Sort(scenario.expected)
|
|
||||||
if !reflect.DeepEqual(all, scenario.expected) {
|
|
||||||
t.Fatalf("%d. output does not match, want: %#v, got %#v", i, scenario.expected, all)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testDiscriminatorHTTPHeader(t testing.TB) {
|
|
||||||
var scenarios = []struct {
|
|
||||||
input map[string]string
|
|
||||||
output Format
|
|
||||||
err error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="delimited"`},
|
|
||||||
output: FmtProtoDelim,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="illegal"; encoding="delimited"`},
|
|
||||||
output: FmtUnknown,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="illegal"`},
|
|
||||||
output: FmtUnknown,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
input: map[string]string{"Content-Type": `text/plain; version=0.0.4`},
|
|
||||||
output: FmtText,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
input: map[string]string{"Content-Type": `text/plain`},
|
|
||||||
output: FmtText,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
input: map[string]string{"Content-Type": `text/plain; version=0.0.3`},
|
|
||||||
output: FmtUnknown,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, scenario := range scenarios {
|
|
||||||
var header http.Header
|
|
||||||
|
|
||||||
if len(scenario.input) > 0 {
|
|
||||||
header = http.Header{}
|
|
||||||
}
|
|
||||||
|
|
||||||
for key, value := range scenario.input {
|
|
||||||
header.Add(key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
actual := ResponseFormat(header)
|
|
||||||
|
|
||||||
if scenario.output != actual {
|
|
||||||
t.Errorf("%d. expected %s, got %s", i, scenario.output, actual)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDiscriminatorHTTPHeader(t *testing.T) {
|
|
||||||
testDiscriminatorHTTPHeader(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkDiscriminatorHTTPHeader(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
testDiscriminatorHTTPHeader(b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExtractSamples(t *testing.T) {
|
|
||||||
var (
|
|
||||||
goodMetricFamily1 = &dto.MetricFamily{
|
|
||||||
Name: proto.String("foo"),
|
|
||||||
Help: proto.String("Help for foo."),
|
|
||||||
Type: dto.MetricType_COUNTER.Enum(),
|
|
||||||
Metric: []*dto.Metric{
|
|
||||||
&dto.Metric{
|
|
||||||
Counter: &dto.Counter{
|
|
||||||
Value: proto.Float64(4711),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
goodMetricFamily2 = &dto.MetricFamily{
|
|
||||||
Name: proto.String("bar"),
|
|
||||||
Help: proto.String("Help for bar."),
|
|
||||||
Type: dto.MetricType_GAUGE.Enum(),
|
|
||||||
Metric: []*dto.Metric{
|
|
||||||
&dto.Metric{
|
|
||||||
Gauge: &dto.Gauge{
|
|
||||||
Value: proto.Float64(3.14),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
badMetricFamily = &dto.MetricFamily{
|
|
||||||
Name: proto.String("bad"),
|
|
||||||
Help: proto.String("Help for bad."),
|
|
||||||
Type: dto.MetricType(42).Enum(),
|
|
||||||
Metric: []*dto.Metric{
|
|
||||||
&dto.Metric{
|
|
||||||
Gauge: &dto.Gauge{
|
|
||||||
Value: proto.Float64(2.7),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
opts = &DecodeOptions{
|
|
||||||
Timestamp: 42,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
got, err := ExtractSamples(opts, goodMetricFamily1, goodMetricFamily2)
|
|
||||||
if err != nil {
|
|
||||||
t.Error("Unexpected error from ExtractSamples:", err)
|
|
||||||
}
|
|
||||||
want := model.Vector{
|
|
||||||
&model.Sample{Metric: model.Metric{model.MetricNameLabel: "foo"}, Value: 4711, Timestamp: 42},
|
|
||||||
&model.Sample{Metric: model.Metric{model.MetricNameLabel: "bar"}, Value: 3.14, Timestamp: 42},
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(got, want) {
|
|
||||||
t.Errorf("unexpected samples extracted, got: %v, want: %v", got, want)
|
|
||||||
}
|
|
||||||
|
|
||||||
got, err = ExtractSamples(opts, goodMetricFamily1, badMetricFamily, goodMetricFamily2)
|
|
||||||
if err == nil {
|
|
||||||
t.Error("Expected error from ExtractSamples")
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(got, want) {
|
|
||||||
t.Errorf("unexpected samples extracted, got: %v, want: %v", got, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,88 +0,0 @@
|
||||||
// Copyright 2015 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package expfmt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
|
||||||
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Encoder types encode metric families into an underlying wire protocol.
|
|
||||||
type Encoder interface {
|
|
||||||
Encode(*dto.MetricFamily) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type encoder func(*dto.MetricFamily) error
|
|
||||||
|
|
||||||
func (e encoder) Encode(v *dto.MetricFamily) error {
|
|
||||||
return e(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Negotiate returns the Content-Type based on the given Accept header.
|
|
||||||
// If no appropriate accepted type is found, FmtText is returned.
|
|
||||||
func Negotiate(h http.Header) Format {
|
|
||||||
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
|
|
||||||
// Check for protocol buffer
|
|
||||||
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
|
||||||
switch ac.Params["encoding"] {
|
|
||||||
case "delimited":
|
|
||||||
return FmtProtoDelim
|
|
||||||
case "text":
|
|
||||||
return FmtProtoText
|
|
||||||
case "compact-text":
|
|
||||||
return FmtProtoCompact
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Check for text format.
|
|
||||||
ver := ac.Params["version"]
|
|
||||||
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
|
||||||
return FmtText
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return FmtText
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEncoder returns a new encoder based on content type negotiation.
|
|
||||||
func NewEncoder(w io.Writer, format Format) Encoder {
|
|
||||||
switch format {
|
|
||||||
case FmtProtoDelim:
|
|
||||||
return encoder(func(v *dto.MetricFamily) error {
|
|
||||||
_, err := pbutil.WriteDelimited(w, v)
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
case FmtProtoCompact:
|
|
||||||
return encoder(func(v *dto.MetricFamily) error {
|
|
||||||
_, err := fmt.Fprintln(w, v.String())
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
case FmtProtoText:
|
|
||||||
return encoder(func(v *dto.MetricFamily) error {
|
|
||||||
_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
case FmtText:
|
|
||||||
return encoder(func(v *dto.MetricFamily) error {
|
|
||||||
_, err := MetricFamilyToText(w, v)
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
panic("expfmt.NewEncoder: unknown format")
|
|
||||||
}
|
|
||||||
|
|
@ -1,38 +0,0 @@
|
||||||
// Copyright 2015 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package expfmt contains tools for reading and writing Prometheus metrics.
|
|
||||||
package expfmt
|
|
||||||
|
|
||||||
// Format specifies the HTTP content type of the different wire protocols.
|
|
||||||
type Format string
|
|
||||||
|
|
||||||
// Constants to assemble the Content-Type values for the different wire protocols.
|
|
||||||
const (
|
|
||||||
TextVersion = "0.0.4"
|
|
||||||
ProtoType = `application/vnd.google.protobuf`
|
|
||||||
ProtoProtocol = `io.prometheus.client.MetricFamily`
|
|
||||||
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
|
||||||
|
|
||||||
// The Content-Type values for the different wire protocols.
|
|
||||||
FmtUnknown Format = `<unknown>`
|
|
||||||
FmtText Format = `text/plain; version=` + TextVersion
|
|
||||||
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
|
||||||
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
|
||||||
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
hdrContentType = "Content-Type"
|
|
||||||
hdrAccept = "Accept"
|
|
||||||
)
|
|
||||||
|
|
@ -1,36 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Build only when actually fuzzing
|
|
||||||
// +build gofuzz
|
|
||||||
|
|
||||||
package expfmt
|
|
||||||
|
|
||||||
import "bytes"
|
|
||||||
|
|
||||||
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
|
|
||||||
//
|
|
||||||
// go-fuzz-build github.com/prometheus/common/expfmt
|
|
||||||
// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
|
|
||||||
//
|
|
||||||
// Further input samples should go in the folder fuzz/corpus.
|
|
||||||
func Fuzz(in []byte) int {
|
|
||||||
parser := TextParser{}
|
|
||||||
_, err := parser.TextToMetricFamilies(bytes.NewReader(in))
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
@ -1,2 +0,0 @@
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,6 +0,0 @@
|
||||||
|
|
||||||
minimal_metric 1.234
|
|
||||||
another_metric -3e3 103948
|
|
||||||
# Even that:
|
|
||||||
no_labels{} 3
|
|
||||||
# HELP line for non-existing metric will be ignored.
|
|
||||||
|
|
@ -1,12 +0,0 @@
|
||||||
|
|
||||||
# A normal comment.
|
|
||||||
#
|
|
||||||
# TYPE name counter
|
|
||||||
name{labelname="val1",basename="basevalue"} NaN
|
|
||||||
name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890
|
|
||||||
# HELP name two-line\n doc str\\ing
|
|
||||||
|
|
||||||
# HELP name2 doc str"ing 2
|
|
||||||
# TYPE name2 gauge
|
|
||||||
name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321
|
|
||||||
name2{ labelname = "val1" , }-Inf
|
|
||||||
|
|
@ -1,22 +0,0 @@
|
||||||
|
|
||||||
# TYPE my_summary summary
|
|
||||||
my_summary{n1="val1",quantile="0.5"} 110
|
|
||||||
decoy -1 -2
|
|
||||||
my_summary{n1="val1",quantile="0.9"} 140 1
|
|
||||||
my_summary_count{n1="val1"} 42
|
|
||||||
# Latest timestamp wins in case of a summary.
|
|
||||||
my_summary_sum{n1="val1"} 4711 2
|
|
||||||
fake_sum{n1="val1"} 2001
|
|
||||||
# TYPE another_summary summary
|
|
||||||
another_summary_count{n2="val2",n1="val1"} 20
|
|
||||||
my_summary_count{n2="val2",n1="val1"} 5 5
|
|
||||||
another_summary{n1="val1",n2="val2",quantile=".3"} -1.2
|
|
||||||
my_summary_sum{n1="val2"} 08 15
|
|
||||||
my_summary{n1="val3", quantile="0.2"} 4711
|
|
||||||
my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN
|
|
||||||
# some
|
|
||||||
# funny comments
|
|
||||||
# HELP
|
|
||||||
# HELP
|
|
||||||
# HELP my_summary
|
|
||||||
# HELP my_summary
|
|
||||||
|
|
@ -1,10 +0,0 @@
|
||||||
|
|
||||||
# HELP request_duration_microseconds The response latency.
|
|
||||||
# TYPE request_duration_microseconds histogram
|
|
||||||
request_duration_microseconds_bucket{le="100"} 123
|
|
||||||
request_duration_microseconds_bucket{le="120"} 412
|
|
||||||
request_duration_microseconds_bucket{le="144"} 592
|
|
||||||
request_duration_microseconds_bucket{le="172.8"} 1524
|
|
||||||
request_duration_microseconds_bucket{le="+Inf"} 2693
|
|
||||||
request_duration_microseconds_sum 1.7560473e+06
|
|
||||||
request_duration_microseconds_count 2693
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
bla 3.14
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
metric{label="\t"} 3.14
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
metric{label="bla"} 3.14 2 3
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
metric{label="bla"} blubb
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
|
|
||||||
# HELP metric one
|
|
||||||
# HELP metric two
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
|
|
||||||
# TYPE metric counter
|
|
||||||
# TYPE metric untyped
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
|
|
||||||
metric 4.12
|
|
||||||
# TYPE metric counter
|
|
||||||
|
|
@ -1,2 +0,0 @@
|
||||||
|
|
||||||
# TYPE metric bla
|
|
||||||
|
|
@ -1,2 +0,0 @@
|
||||||
|
|
||||||
# TYPE met-ric
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
@invalidmetric{label="bla"} 3.14 2
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
{label="bla"} 3.14 2
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
|
|
||||||
# TYPE metric histogram
|
|
||||||
metric_bucket{le="bla"} 3.14
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
|
|
||||||
metric{label="new
|
|
||||||
line"} 3.14
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
metric{@="bla"} 3.14
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
metric{__name__="bla"} 3.14
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
metric{label+="bla"} 3.14
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
metric{label=bla} 3.14
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
|
|
||||||
# TYPE metric summary
|
|
||||||
metric{quantile="bla"} 3.14
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
metric{label="bla"+} 3.14
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
metric{label="bla"} 3.14 2.72
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
m{} 0
|
|
||||||
|
|
@ -1,46 +0,0 @@
|
||||||
[
|
|
||||||
{
|
|
||||||
"baseLabels": {
|
|
||||||
"__name__": "rpc_calls_total",
|
|
||||||
"job": "batch_job"
|
|
||||||
},
|
|
||||||
"docstring": "RPC calls.",
|
|
||||||
"metric": {
|
|
||||||
"type": "counter",
|
|
||||||
"value": [
|
|
||||||
{
|
|
||||||
"labels": {
|
|
||||||
"service": "zed"
|
|
||||||
},
|
|
||||||
"value": 25
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"labels": {
|
|
||||||
"service": "bar"
|
|
||||||
},
|
|
||||||
"value": 24
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"baseLabels": {
|
|
||||||
"__name__": "rpc_latency_microseconds"
|
|
||||||
},
|
|
||||||
"docstring": "RPC latency.",
|
|
||||||
"metric": {
|
|
||||||
"type": "histogram",
|
|
||||||
"value": [
|
|
||||||
{
|
|
||||||
"labels": {
|
|
||||||
"service": "foo"
|
|
||||||
},
|
|
||||||
"value": {
|
|
||||||
"0.010000": 15,
|
|
||||||
"0.990000": 17
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
@ -1,46 +0,0 @@
|
||||||
[
|
|
||||||
{
|
|
||||||
"baseLabels": {
|
|
||||||
"__name__": "rpc_calls_total",
|
|
||||||
"job": "batch_job"
|
|
||||||
},
|
|
||||||
"docstring": "RPC calls.",
|
|
||||||
"metric": {
|
|
||||||
"type": "counter",
|
|
||||||
"value": [
|
|
||||||
{
|
|
||||||
"labels": {
|
|
||||||
"servic|e": "zed"
|
|
||||||
},
|
|
||||||
"value": 25
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"labels": {
|
|
||||||
"service": "bar"
|
|
||||||
},
|
|
||||||
"value": 24
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"baseLabels": {
|
|
||||||
"__name__": "rpc_latency_microseconds"
|
|
||||||
},
|
|
||||||
"docstring": "RPC latency.",
|
|
||||||
"metric": {
|
|
||||||
"type": "histogram",
|
|
||||||
"value": [
|
|
||||||
{
|
|
||||||
"labels": {
|
|
||||||
"service": "foo"
|
|
||||||
},
|
|
||||||
"value": {
|
|
||||||
"0.010000": 15,
|
|
||||||
"0.990000": 17
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
Binary file not shown.
Binary file not shown.
|
|
@ -1,322 +0,0 @@
|
||||||
# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
|
|
||||||
# TYPE http_request_duration_microseconds summary
|
|
||||||
http_request_duration_microseconds{handler="/",quantile="0.5"} 0
|
|
||||||
http_request_duration_microseconds{handler="/",quantile="0.9"} 0
|
|
||||||
http_request_duration_microseconds{handler="/",quantile="0.99"} 0
|
|
||||||
http_request_duration_microseconds_sum{handler="/"} 0
|
|
||||||
http_request_duration_microseconds_count{handler="/"} 0
|
|
||||||
http_request_duration_microseconds{handler="/alerts",quantile="0.5"} 0
|
|
||||||
http_request_duration_microseconds{handler="/alerts",quantile="0.9"} 0
|
|
||||||
http_request_duration_microseconds{handler="/alerts",quantile="0.99"} 0
|
|
||||||
http_request_duration_microseconds_sum{handler="/alerts"} 0
|
|
||||||
http_request_duration_microseconds_count{handler="/alerts"} 0
|
|
||||||
http_request_duration_microseconds{handler="/api/metrics",quantile="0.5"} 0
|
|
||||||
http_request_duration_microseconds{handler="/api/metrics",quantile="0.9"} 0
|
|
||||||
http_request_duration_microseconds{handler="/api/metrics",quantile="0.99"} 0
|
|
||||||
http_request_duration_microseconds_sum{handler="/api/metrics"} 0
|
|
||||||
http_request_duration_microseconds_count{handler="/api/metrics"} 0
|
|
||||||
http_request_duration_microseconds{handler="/api/query",quantile="0.5"} 0
|
|
||||||
http_request_duration_microseconds{handler="/api/query",quantile="0.9"} 0
|
|
||||||
http_request_duration_microseconds{handler="/api/query",quantile="0.99"} 0
|
|
||||||
http_request_duration_microseconds_sum{handler="/api/query"} 0
|
|
||||||
http_request_duration_microseconds_count{handler="/api/query"} 0
|
|
||||||
http_request_duration_microseconds{handler="/api/query_range",quantile="0.5"} 0
|
|
||||||
http_request_duration_microseconds{handler="/api/query_range",quantile="0.9"} 0
|
|
||||||
http_request_duration_microseconds{handler="/api/query_range",quantile="0.99"} 0
|
|
||||||
http_request_duration_microseconds_sum{handler="/api/query_range"} 0
|
|
||||||
http_request_duration_microseconds_count{handler="/api/query_range"} 0
|
|
||||||
http_request_duration_microseconds{handler="/api/targets",quantile="0.5"} 0
|
|
||||||
http_request_duration_microseconds{handler="/api/targets",quantile="0.9"} 0
|
|
||||||
http_request_duration_microseconds{handler="/api/targets",quantile="0.99"} 0
|
|
||||||
http_request_duration_microseconds_sum{handler="/api/targets"} 0
|
|
||||||
http_request_duration_microseconds_count{handler="/api/targets"} 0
|
|
||||||
http_request_duration_microseconds{handler="/consoles/",quantile="0.5"} 0
|
|
||||||
http_request_duration_microseconds{handler="/consoles/",quantile="0.9"} 0
|
|
||||||
http_request_duration_microseconds{handler="/consoles/",quantile="0.99"} 0
|
|
||||||
http_request_duration_microseconds_sum{handler="/consoles/"} 0
|
|
||||||
http_request_duration_microseconds_count{handler="/consoles/"} 0
|
|
||||||
http_request_duration_microseconds{handler="/graph",quantile="0.5"} 0
|
|
||||||
http_request_duration_microseconds{handler="/graph",quantile="0.9"} 0
|
|
||||||
http_request_duration_microseconds{handler="/graph",quantile="0.99"} 0
|
|
||||||
http_request_duration_microseconds_sum{handler="/graph"} 0
|
|
||||||
http_request_duration_microseconds_count{handler="/graph"} 0
|
|
||||||
http_request_duration_microseconds{handler="/heap",quantile="0.5"} 0
|
|
||||||
http_request_duration_microseconds{handler="/heap",quantile="0.9"} 0
|
|
||||||
http_request_duration_microseconds{handler="/heap",quantile="0.99"} 0
|
|
||||||
http_request_duration_microseconds_sum{handler="/heap"} 0
|
|
||||||
http_request_duration_microseconds_count{handler="/heap"} 0
|
|
||||||
http_request_duration_microseconds{handler="/static/",quantile="0.5"} 0
|
|
||||||
http_request_duration_microseconds{handler="/static/",quantile="0.9"} 0
|
|
||||||
http_request_duration_microseconds{handler="/static/",quantile="0.99"} 0
|
|
||||||
http_request_duration_microseconds_sum{handler="/static/"} 0
|
|
||||||
http_request_duration_microseconds_count{handler="/static/"} 0
|
|
||||||
http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1307.275
|
|
||||||
http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1858.632
|
|
||||||
http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 3087.384
|
|
||||||
http_request_duration_microseconds_sum{handler="prometheus"} 179886.5000000001
|
|
||||||
http_request_duration_microseconds_count{handler="prometheus"} 119
|
|
||||||
# HELP http_request_size_bytes The HTTP request sizes in bytes.
|
|
||||||
# TYPE http_request_size_bytes summary
|
|
||||||
http_request_size_bytes{handler="/",quantile="0.5"} 0
|
|
||||||
http_request_size_bytes{handler="/",quantile="0.9"} 0
|
|
||||||
http_request_size_bytes{handler="/",quantile="0.99"} 0
|
|
||||||
http_request_size_bytes_sum{handler="/"} 0
|
|
||||||
http_request_size_bytes_count{handler="/"} 0
|
|
||||||
http_request_size_bytes{handler="/alerts",quantile="0.5"} 0
|
|
||||||
http_request_size_bytes{handler="/alerts",quantile="0.9"} 0
|
|
||||||
http_request_size_bytes{handler="/alerts",quantile="0.99"} 0
|
|
||||||
http_request_size_bytes_sum{handler="/alerts"} 0
|
|
||||||
http_request_size_bytes_count{handler="/alerts"} 0
|
|
||||||
http_request_size_bytes{handler="/api/metrics",quantile="0.5"} 0
|
|
||||||
http_request_size_bytes{handler="/api/metrics",quantile="0.9"} 0
|
|
||||||
http_request_size_bytes{handler="/api/metrics",quantile="0.99"} 0
|
|
||||||
http_request_size_bytes_sum{handler="/api/metrics"} 0
|
|
||||||
http_request_size_bytes_count{handler="/api/metrics"} 0
|
|
||||||
http_request_size_bytes{handler="/api/query",quantile="0.5"} 0
|
|
||||||
http_request_size_bytes{handler="/api/query",quantile="0.9"} 0
|
|
||||||
http_request_size_bytes{handler="/api/query",quantile="0.99"} 0
|
|
||||||
http_request_size_bytes_sum{handler="/api/query"} 0
|
|
||||||
http_request_size_bytes_count{handler="/api/query"} 0
|
|
||||||
http_request_size_bytes{handler="/api/query_range",quantile="0.5"} 0
|
|
||||||
http_request_size_bytes{handler="/api/query_range",quantile="0.9"} 0
|
|
||||||
http_request_size_bytes{handler="/api/query_range",quantile="0.99"} 0
|
|
||||||
http_request_size_bytes_sum{handler="/api/query_range"} 0
|
|
||||||
http_request_size_bytes_count{handler="/api/query_range"} 0
|
|
||||||
http_request_size_bytes{handler="/api/targets",quantile="0.5"} 0
|
|
||||||
http_request_size_bytes{handler="/api/targets",quantile="0.9"} 0
|
|
||||||
http_request_size_bytes{handler="/api/targets",quantile="0.99"} 0
|
|
||||||
http_request_size_bytes_sum{handler="/api/targets"} 0
|
|
||||||
http_request_size_bytes_count{handler="/api/targets"} 0
|
|
||||||
http_request_size_bytes{handler="/consoles/",quantile="0.5"} 0
|
|
||||||
http_request_size_bytes{handler="/consoles/",quantile="0.9"} 0
|
|
||||||
http_request_size_bytes{handler="/consoles/",quantile="0.99"} 0
|
|
||||||
http_request_size_bytes_sum{handler="/consoles/"} 0
|
|
||||||
http_request_size_bytes_count{handler="/consoles/"} 0
|
|
||||||
http_request_size_bytes{handler="/graph",quantile="0.5"} 0
|
|
||||||
http_request_size_bytes{handler="/graph",quantile="0.9"} 0
|
|
||||||
http_request_size_bytes{handler="/graph",quantile="0.99"} 0
|
|
||||||
http_request_size_bytes_sum{handler="/graph"} 0
|
|
||||||
http_request_size_bytes_count{handler="/graph"} 0
|
|
||||||
http_request_size_bytes{handler="/heap",quantile="0.5"} 0
|
|
||||||
http_request_size_bytes{handler="/heap",quantile="0.9"} 0
|
|
||||||
http_request_size_bytes{handler="/heap",quantile="0.99"} 0
|
|
||||||
http_request_size_bytes_sum{handler="/heap"} 0
|
|
||||||
http_request_size_bytes_count{handler="/heap"} 0
|
|
||||||
http_request_size_bytes{handler="/static/",quantile="0.5"} 0
|
|
||||||
http_request_size_bytes{handler="/static/",quantile="0.9"} 0
|
|
||||||
http_request_size_bytes{handler="/static/",quantile="0.99"} 0
|
|
||||||
http_request_size_bytes_sum{handler="/static/"} 0
|
|
||||||
http_request_size_bytes_count{handler="/static/"} 0
|
|
||||||
http_request_size_bytes{handler="prometheus",quantile="0.5"} 291
|
|
||||||
http_request_size_bytes{handler="prometheus",quantile="0.9"} 291
|
|
||||||
http_request_size_bytes{handler="prometheus",quantile="0.99"} 291
|
|
||||||
http_request_size_bytes_sum{handler="prometheus"} 34488
|
|
||||||
http_request_size_bytes_count{handler="prometheus"} 119
|
|
||||||
# HELP http_requests_total Total number of HTTP requests made.
|
|
||||||
# TYPE http_requests_total counter
|
|
||||||
http_requests_total{code="200",handler="prometheus",method="get"} 119
|
|
||||||
# HELP http_response_size_bytes The HTTP response sizes in bytes.
|
|
||||||
# TYPE http_response_size_bytes summary
|
|
||||||
http_response_size_bytes{handler="/",quantile="0.5"} 0
|
|
||||||
http_response_size_bytes{handler="/",quantile="0.9"} 0
|
|
||||||
http_response_size_bytes{handler="/",quantile="0.99"} 0
|
|
||||||
http_response_size_bytes_sum{handler="/"} 0
|
|
||||||
http_response_size_bytes_count{handler="/"} 0
|
|
||||||
http_response_size_bytes{handler="/alerts",quantile="0.5"} 0
|
|
||||||
http_response_size_bytes{handler="/alerts",quantile="0.9"} 0
|
|
||||||
http_response_size_bytes{handler="/alerts",quantile="0.99"} 0
|
|
||||||
http_response_size_bytes_sum{handler="/alerts"} 0
|
|
||||||
http_response_size_bytes_count{handler="/alerts"} 0
|
|
||||||
http_response_size_bytes{handler="/api/metrics",quantile="0.5"} 0
|
|
||||||
http_response_size_bytes{handler="/api/metrics",quantile="0.9"} 0
|
|
||||||
http_response_size_bytes{handler="/api/metrics",quantile="0.99"} 0
|
|
||||||
http_response_size_bytes_sum{handler="/api/metrics"} 0
|
|
||||||
http_response_size_bytes_count{handler="/api/metrics"} 0
|
|
||||||
http_response_size_bytes{handler="/api/query",quantile="0.5"} 0
|
|
||||||
http_response_size_bytes{handler="/api/query",quantile="0.9"} 0
|
|
||||||
http_response_size_bytes{handler="/api/query",quantile="0.99"} 0
|
|
||||||
http_response_size_bytes_sum{handler="/api/query"} 0
|
|
||||||
http_response_size_bytes_count{handler="/api/query"} 0
|
|
||||||
http_response_size_bytes{handler="/api/query_range",quantile="0.5"} 0
|
|
||||||
http_response_size_bytes{handler="/api/query_range",quantile="0.9"} 0
|
|
||||||
http_response_size_bytes{handler="/api/query_range",quantile="0.99"} 0
|
|
||||||
http_response_size_bytes_sum{handler="/api/query_range"} 0
|
|
||||||
http_response_size_bytes_count{handler="/api/query_range"} 0
|
|
||||||
http_response_size_bytes{handler="/api/targets",quantile="0.5"} 0
|
|
||||||
http_response_size_bytes{handler="/api/targets",quantile="0.9"} 0
|
|
||||||
http_response_size_bytes{handler="/api/targets",quantile="0.99"} 0
|
|
||||||
http_response_size_bytes_sum{handler="/api/targets"} 0
|
|
||||||
http_response_size_bytes_count{handler="/api/targets"} 0
|
|
||||||
http_response_size_bytes{handler="/consoles/",quantile="0.5"} 0
|
|
||||||
http_response_size_bytes{handler="/consoles/",quantile="0.9"} 0
|
|
||||||
http_response_size_bytes{handler="/consoles/",quantile="0.99"} 0
|
|
||||||
http_response_size_bytes_sum{handler="/consoles/"} 0
|
|
||||||
http_response_size_bytes_count{handler="/consoles/"} 0
|
|
||||||
http_response_size_bytes{handler="/graph",quantile="0.5"} 0
|
|
||||||
http_response_size_bytes{handler="/graph",quantile="0.9"} 0
|
|
||||||
http_response_size_bytes{handler="/graph",quantile="0.99"} 0
|
|
||||||
http_response_size_bytes_sum{handler="/graph"} 0
|
|
||||||
http_response_size_bytes_count{handler="/graph"} 0
|
|
||||||
http_response_size_bytes{handler="/heap",quantile="0.5"} 0
|
|
||||||
http_response_size_bytes{handler="/heap",quantile="0.9"} 0
|
|
||||||
http_response_size_bytes{handler="/heap",quantile="0.99"} 0
|
|
||||||
http_response_size_bytes_sum{handler="/heap"} 0
|
|
||||||
http_response_size_bytes_count{handler="/heap"} 0
|
|
||||||
http_response_size_bytes{handler="/static/",quantile="0.5"} 0
|
|
||||||
http_response_size_bytes{handler="/static/",quantile="0.9"} 0
|
|
||||||
http_response_size_bytes{handler="/static/",quantile="0.99"} 0
|
|
||||||
http_response_size_bytes_sum{handler="/static/"} 0
|
|
||||||
http_response_size_bytes_count{handler="/static/"} 0
|
|
||||||
http_response_size_bytes{handler="prometheus",quantile="0.5"} 2049
|
|
||||||
http_response_size_bytes{handler="prometheus",quantile="0.9"} 2058
|
|
||||||
http_response_size_bytes{handler="prometheus",quantile="0.99"} 2064
|
|
||||||
http_response_size_bytes_sum{handler="prometheus"} 247001
|
|
||||||
http_response_size_bytes_count{handler="prometheus"} 119
|
|
||||||
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
|
|
||||||
# TYPE process_cpu_seconds_total counter
|
|
||||||
process_cpu_seconds_total 0.55
|
|
||||||
# HELP go_goroutines Number of goroutines that currently exist.
|
|
||||||
# TYPE go_goroutines gauge
|
|
||||||
go_goroutines 70
|
|
||||||
# HELP process_max_fds Maximum number of open file descriptors.
|
|
||||||
# TYPE process_max_fds gauge
|
|
||||||
process_max_fds 8192
|
|
||||||
# HELP process_open_fds Number of open file descriptors.
|
|
||||||
# TYPE process_open_fds gauge
|
|
||||||
process_open_fds 29
|
|
||||||
# HELP process_resident_memory_bytes Resident memory size in bytes.
|
|
||||||
# TYPE process_resident_memory_bytes gauge
|
|
||||||
process_resident_memory_bytes 5.3870592e+07
|
|
||||||
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
|
|
||||||
# TYPE process_start_time_seconds gauge
|
|
||||||
process_start_time_seconds 1.42236894836e+09
|
|
||||||
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
|
|
||||||
# TYPE process_virtual_memory_bytes gauge
|
|
||||||
process_virtual_memory_bytes 5.41478912e+08
|
|
||||||
# HELP prometheus_dns_sd_lookup_failures_total The number of DNS-SD lookup failures.
|
|
||||||
# TYPE prometheus_dns_sd_lookup_failures_total counter
|
|
||||||
prometheus_dns_sd_lookup_failures_total 0
|
|
||||||
# HELP prometheus_dns_sd_lookups_total The number of DNS-SD lookups.
|
|
||||||
# TYPE prometheus_dns_sd_lookups_total counter
|
|
||||||
prometheus_dns_sd_lookups_total 7
|
|
||||||
# HELP prometheus_evaluator_duration_milliseconds The duration for all evaluations to execute.
|
|
||||||
# TYPE prometheus_evaluator_duration_milliseconds summary
|
|
||||||
prometheus_evaluator_duration_milliseconds{quantile="0.01"} 0
|
|
||||||
prometheus_evaluator_duration_milliseconds{quantile="0.05"} 0
|
|
||||||
prometheus_evaluator_duration_milliseconds{quantile="0.5"} 0
|
|
||||||
prometheus_evaluator_duration_milliseconds{quantile="0.9"} 1
|
|
||||||
prometheus_evaluator_duration_milliseconds{quantile="0.99"} 1
|
|
||||||
prometheus_evaluator_duration_milliseconds_sum 12
|
|
||||||
prometheus_evaluator_duration_milliseconds_count 23
|
|
||||||
# HELP prometheus_local_storage_checkpoint_duration_milliseconds The duration (in milliseconds) it took to checkpoint in-memory metrics and head chunks.
|
|
||||||
# TYPE prometheus_local_storage_checkpoint_duration_milliseconds gauge
|
|
||||||
prometheus_local_storage_checkpoint_duration_milliseconds 0
|
|
||||||
# HELP prometheus_local_storage_chunk_ops_total The total number of chunk operations by their type.
|
|
||||||
# TYPE prometheus_local_storage_chunk_ops_total counter
|
|
||||||
prometheus_local_storage_chunk_ops_total{type="create"} 598
|
|
||||||
prometheus_local_storage_chunk_ops_total{type="persist"} 174
|
|
||||||
prometheus_local_storage_chunk_ops_total{type="pin"} 920
|
|
||||||
prometheus_local_storage_chunk_ops_total{type="transcode"} 415
|
|
||||||
prometheus_local_storage_chunk_ops_total{type="unpin"} 920
|
|
||||||
# HELP prometheus_local_storage_indexing_batch_latency_milliseconds Quantiles for batch indexing latencies in milliseconds.
|
|
||||||
# TYPE prometheus_local_storage_indexing_batch_latency_milliseconds summary
|
|
||||||
prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.5"} 0
|
|
||||||
prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.9"} 0
|
|
||||||
prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.99"} 0
|
|
||||||
prometheus_local_storage_indexing_batch_latency_milliseconds_sum 0
|
|
||||||
prometheus_local_storage_indexing_batch_latency_milliseconds_count 1
|
|
||||||
# HELP prometheus_local_storage_indexing_batch_sizes Quantiles for indexing batch sizes (number of metrics per batch).
|
|
||||||
# TYPE prometheus_local_storage_indexing_batch_sizes summary
|
|
||||||
prometheus_local_storage_indexing_batch_sizes{quantile="0.5"} 2
|
|
||||||
prometheus_local_storage_indexing_batch_sizes{quantile="0.9"} 2
|
|
||||||
prometheus_local_storage_indexing_batch_sizes{quantile="0.99"} 2
|
|
||||||
prometheus_local_storage_indexing_batch_sizes_sum 2
|
|
||||||
prometheus_local_storage_indexing_batch_sizes_count 1
|
|
||||||
# HELP prometheus_local_storage_indexing_queue_capacity The capacity of the indexing queue.
|
|
||||||
# TYPE prometheus_local_storage_indexing_queue_capacity gauge
|
|
||||||
prometheus_local_storage_indexing_queue_capacity 16384
|
|
||||||
# HELP prometheus_local_storage_indexing_queue_length The number of metrics waiting to be indexed.
|
|
||||||
# TYPE prometheus_local_storage_indexing_queue_length gauge
|
|
||||||
prometheus_local_storage_indexing_queue_length 0
|
|
||||||
# HELP prometheus_local_storage_ingested_samples_total The total number of samples ingested.
|
|
||||||
# TYPE prometheus_local_storage_ingested_samples_total counter
|
|
||||||
prometheus_local_storage_ingested_samples_total 30473
|
|
||||||
# HELP prometheus_local_storage_invalid_preload_requests_total The total number of preload requests referring to a non-existent series. This is an indication of outdated label indexes.
|
|
||||||
# TYPE prometheus_local_storage_invalid_preload_requests_total counter
|
|
||||||
prometheus_local_storage_invalid_preload_requests_total 0
|
|
||||||
# HELP prometheus_local_storage_memory_chunkdescs The current number of chunk descriptors in memory.
|
|
||||||
# TYPE prometheus_local_storage_memory_chunkdescs gauge
|
|
||||||
prometheus_local_storage_memory_chunkdescs 1059
|
|
||||||
# HELP prometheus_local_storage_memory_chunks The current number of chunks in memory, excluding cloned chunks (i.e. chunks without a descriptor).
|
|
||||||
# TYPE prometheus_local_storage_memory_chunks gauge
|
|
||||||
prometheus_local_storage_memory_chunks 1020
|
|
||||||
# HELP prometheus_local_storage_memory_series The current number of series in memory.
|
|
||||||
# TYPE prometheus_local_storage_memory_series gauge
|
|
||||||
prometheus_local_storage_memory_series 424
|
|
||||||
# HELP prometheus_local_storage_persist_latency_microseconds A summary of latencies for persisting each chunk.
|
|
||||||
# TYPE prometheus_local_storage_persist_latency_microseconds summary
|
|
||||||
prometheus_local_storage_persist_latency_microseconds{quantile="0.5"} 30.377
|
|
||||||
prometheus_local_storage_persist_latency_microseconds{quantile="0.9"} 203.539
|
|
||||||
prometheus_local_storage_persist_latency_microseconds{quantile="0.99"} 2626.463
|
|
||||||
prometheus_local_storage_persist_latency_microseconds_sum 20424.415
|
|
||||||
prometheus_local_storage_persist_latency_microseconds_count 174
|
|
||||||
# HELP prometheus_local_storage_persist_queue_capacity The total capacity of the persist queue.
|
|
||||||
# TYPE prometheus_local_storage_persist_queue_capacity gauge
|
|
||||||
prometheus_local_storage_persist_queue_capacity 1024
|
|
||||||
# HELP prometheus_local_storage_persist_queue_length The current number of chunks waiting in the persist queue.
|
|
||||||
# TYPE prometheus_local_storage_persist_queue_length gauge
|
|
||||||
prometheus_local_storage_persist_queue_length 0
|
|
||||||
# HELP prometheus_local_storage_series_ops_total The total number of series operations by their type.
|
|
||||||
# TYPE prometheus_local_storage_series_ops_total counter
|
|
||||||
prometheus_local_storage_series_ops_total{type="create"} 2
|
|
||||||
prometheus_local_storage_series_ops_total{type="maintenance_in_memory"} 11
|
|
||||||
# HELP prometheus_notifications_latency_milliseconds Latency quantiles for sending alert notifications (not including dropped notifications).
|
|
||||||
# TYPE prometheus_notifications_latency_milliseconds summary
|
|
||||||
prometheus_notifications_latency_milliseconds{quantile="0.5"} 0
|
|
||||||
prometheus_notifications_latency_milliseconds{quantile="0.9"} 0
|
|
||||||
prometheus_notifications_latency_milliseconds{quantile="0.99"} 0
|
|
||||||
prometheus_notifications_latency_milliseconds_sum 0
|
|
||||||
prometheus_notifications_latency_milliseconds_count 0
|
|
||||||
# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue.
|
|
||||||
# TYPE prometheus_notifications_queue_capacity gauge
|
|
||||||
prometheus_notifications_queue_capacity 100
|
|
||||||
# HELP prometheus_notifications_queue_length The number of alert notifications in the queue.
|
|
||||||
# TYPE prometheus_notifications_queue_length gauge
|
|
||||||
prometheus_notifications_queue_length 0
|
|
||||||
# HELP prometheus_rule_evaluation_duration_milliseconds The duration for a rule to execute.
|
|
||||||
# TYPE prometheus_rule_evaluation_duration_milliseconds summary
|
|
||||||
prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.5"} 0
|
|
||||||
prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.9"} 0
|
|
||||||
prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.99"} 2
|
|
||||||
prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="alerting"} 12
|
|
||||||
prometheus_rule_evaluation_duration_milliseconds_count{rule_type="alerting"} 115
|
|
||||||
prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.5"} 0
|
|
||||||
prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.9"} 0
|
|
||||||
prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.99"} 3
|
|
||||||
prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="recording"} 15
|
|
||||||
prometheus_rule_evaluation_duration_milliseconds_count{rule_type="recording"} 115
|
|
||||||
# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures.
|
|
||||||
# TYPE prometheus_rule_evaluation_failures_total counter
|
|
||||||
prometheus_rule_evaluation_failures_total 0
|
|
||||||
# HELP prometheus_samples_queue_capacity Capacity of the queue for unwritten samples.
|
|
||||||
# TYPE prometheus_samples_queue_capacity gauge
|
|
||||||
prometheus_samples_queue_capacity 4096
|
|
||||||
# HELP prometheus_samples_queue_length Current number of items in the queue for unwritten samples. Each item comprises all samples exposed by one target as one metric family (i.e. metrics of the same name).
|
|
||||||
# TYPE prometheus_samples_queue_length gauge
|
|
||||||
prometheus_samples_queue_length 0
|
|
||||||
# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes.
|
|
||||||
# TYPE prometheus_target_interval_length_seconds summary
|
|
||||||
prometheus_target_interval_length_seconds{interval="15s",quantile="0.01"} 14
|
|
||||||
prometheus_target_interval_length_seconds{interval="15s",quantile="0.05"} 14
|
|
||||||
prometheus_target_interval_length_seconds{interval="15s",quantile="0.5"} 15
|
|
||||||
prometheus_target_interval_length_seconds{interval="15s",quantile="0.9"} 15
|
|
||||||
prometheus_target_interval_length_seconds{interval="15s",quantile="0.99"} 15
|
|
||||||
prometheus_target_interval_length_seconds_sum{interval="15s"} 175
|
|
||||||
prometheus_target_interval_length_seconds_count{interval="15s"} 12
|
|
||||||
prometheus_target_interval_length_seconds{interval="1s",quantile="0.01"} 0
|
|
||||||
prometheus_target_interval_length_seconds{interval="1s",quantile="0.05"} 0
|
|
||||||
prometheus_target_interval_length_seconds{interval="1s",quantile="0.5"} 0
|
|
||||||
prometheus_target_interval_length_seconds{interval="1s",quantile="0.9"} 1
|
|
||||||
prometheus_target_interval_length_seconds{interval="1s",quantile="0.99"} 1
|
|
||||||
prometheus_target_interval_length_seconds_sum{interval="1s"} 55
|
|
||||||
prometheus_target_interval_length_seconds_count{interval="1s"} 117
|
|
||||||
Binary file not shown.
|
|
@ -1,303 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package expfmt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MetricFamilyToText converts a MetricFamily proto message into text format and
|
|
||||||
// writes the resulting lines to 'out'. It returns the number of bytes written
|
|
||||||
// and any error encountered. The output will have the same order as the input,
|
|
||||||
// no further sorting is performed. Furthermore, this function assumes the input
|
|
||||||
// is already sanitized and does not perform any sanity checks. If the input
|
|
||||||
// contains duplicate metrics or invalid metric or label names, the conversion
|
|
||||||
// will result in invalid text format output.
|
|
||||||
//
|
|
||||||
// This method fulfills the type 'prometheus.encoder'.
|
|
||||||
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
|
||||||
var written int
|
|
||||||
|
|
||||||
// Fail-fast checks.
|
|
||||||
if len(in.Metric) == 0 {
|
|
||||||
return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
|
|
||||||
}
|
|
||||||
name := in.GetName()
|
|
||||||
if name == "" {
|
|
||||||
return written, fmt.Errorf("MetricFamily has no name: %s", in)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Comments, first HELP, then TYPE.
|
|
||||||
if in.Help != nil {
|
|
||||||
n, err := fmt.Fprintf(
|
|
||||||
out, "# HELP %s %s\n",
|
|
||||||
name, escapeString(*in.Help, false),
|
|
||||||
)
|
|
||||||
written += n
|
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
metricType := in.GetType()
|
|
||||||
n, err := fmt.Fprintf(
|
|
||||||
out, "# TYPE %s %s\n",
|
|
||||||
name, strings.ToLower(metricType.String()),
|
|
||||||
)
|
|
||||||
written += n
|
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finally the samples, one line for each.
|
|
||||||
for _, metric := range in.Metric {
|
|
||||||
switch metricType {
|
|
||||||
case dto.MetricType_COUNTER:
|
|
||||||
if metric.Counter == nil {
|
|
||||||
return written, fmt.Errorf(
|
|
||||||
"expected counter in metric %s %s", name, metric,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
n, err = writeSample(
|
|
||||||
name, metric, "", "",
|
|
||||||
metric.Counter.GetValue(),
|
|
||||||
out,
|
|
||||||
)
|
|
||||||
case dto.MetricType_GAUGE:
|
|
||||||
if metric.Gauge == nil {
|
|
||||||
return written, fmt.Errorf(
|
|
||||||
"expected gauge in metric %s %s", name, metric,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
n, err = writeSample(
|
|
||||||
name, metric, "", "",
|
|
||||||
metric.Gauge.GetValue(),
|
|
||||||
out,
|
|
||||||
)
|
|
||||||
case dto.MetricType_UNTYPED:
|
|
||||||
if metric.Untyped == nil {
|
|
||||||
return written, fmt.Errorf(
|
|
||||||
"expected untyped in metric %s %s", name, metric,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
n, err = writeSample(
|
|
||||||
name, metric, "", "",
|
|
||||||
metric.Untyped.GetValue(),
|
|
||||||
out,
|
|
||||||
)
|
|
||||||
case dto.MetricType_SUMMARY:
|
|
||||||
if metric.Summary == nil {
|
|
||||||
return written, fmt.Errorf(
|
|
||||||
"expected summary in metric %s %s", name, metric,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
for _, q := range metric.Summary.Quantile {
|
|
||||||
n, err = writeSample(
|
|
||||||
name, metric,
|
|
||||||
model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
|
|
||||||
q.GetValue(),
|
|
||||||
out,
|
|
||||||
)
|
|
||||||
written += n
|
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n, err = writeSample(
|
|
||||||
name+"_sum", metric, "", "",
|
|
||||||
metric.Summary.GetSampleSum(),
|
|
||||||
out,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
written += n
|
|
||||||
n, err = writeSample(
|
|
||||||
name+"_count", metric, "", "",
|
|
||||||
float64(metric.Summary.GetSampleCount()),
|
|
||||||
out,
|
|
||||||
)
|
|
||||||
case dto.MetricType_HISTOGRAM:
|
|
||||||
if metric.Histogram == nil {
|
|
||||||
return written, fmt.Errorf(
|
|
||||||
"expected histogram in metric %s %s", name, metric,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
infSeen := false
|
|
||||||
for _, q := range metric.Histogram.Bucket {
|
|
||||||
n, err = writeSample(
|
|
||||||
name+"_bucket", metric,
|
|
||||||
model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
|
|
||||||
float64(q.GetCumulativeCount()),
|
|
||||||
out,
|
|
||||||
)
|
|
||||||
written += n
|
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
if math.IsInf(q.GetUpperBound(), +1) {
|
|
||||||
infSeen = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !infSeen {
|
|
||||||
n, err = writeSample(
|
|
||||||
name+"_bucket", metric,
|
|
||||||
model.BucketLabel, "+Inf",
|
|
||||||
float64(metric.Histogram.GetSampleCount()),
|
|
||||||
out,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
written += n
|
|
||||||
}
|
|
||||||
n, err = writeSample(
|
|
||||||
name+"_sum", metric, "", "",
|
|
||||||
metric.Histogram.GetSampleSum(),
|
|
||||||
out,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
written += n
|
|
||||||
n, err = writeSample(
|
|
||||||
name+"_count", metric, "", "",
|
|
||||||
float64(metric.Histogram.GetSampleCount()),
|
|
||||||
out,
|
|
||||||
)
|
|
||||||
default:
|
|
||||||
return written, fmt.Errorf(
|
|
||||||
"unexpected type in metric %s %s", name, metric,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
written += n
|
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return written, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeSample writes a single sample in text format to out, given the metric
|
|
||||||
// name, the metric proto message itself, optionally an additional label name
|
|
||||||
// and value (use empty strings if not required), and the value. The function
|
|
||||||
// returns the number of bytes written and any error encountered.
|
|
||||||
func writeSample(
|
|
||||||
name string,
|
|
||||||
metric *dto.Metric,
|
|
||||||
additionalLabelName, additionalLabelValue string,
|
|
||||||
value float64,
|
|
||||||
out io.Writer,
|
|
||||||
) (int, error) {
|
|
||||||
var written int
|
|
||||||
n, err := fmt.Fprint(out, name)
|
|
||||||
written += n
|
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
n, err = labelPairsToText(
|
|
||||||
metric.Label,
|
|
||||||
additionalLabelName, additionalLabelValue,
|
|
||||||
out,
|
|
||||||
)
|
|
||||||
written += n
|
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
n, err = fmt.Fprintf(out, " %v", value)
|
|
||||||
written += n
|
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
if metric.TimestampMs != nil {
|
|
||||||
n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
|
|
||||||
written += n
|
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n, err = out.Write([]byte{'\n'})
|
|
||||||
written += n
|
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
return written, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// labelPairsToText converts a slice of LabelPair proto messages plus the
|
|
||||||
// explicitly given additional label pair into text formatted as required by the
|
|
||||||
// text format and writes it to 'out'. An empty slice in combination with an
|
|
||||||
// empty string 'additionalLabelName' results in nothing being
|
|
||||||
// written. Otherwise, the label pairs are written, escaped as required by the
|
|
||||||
// text format, and enclosed in '{...}'. The function returns the number of
|
|
||||||
// bytes written and any error encountered.
|
|
||||||
func labelPairsToText(
|
|
||||||
in []*dto.LabelPair,
|
|
||||||
additionalLabelName, additionalLabelValue string,
|
|
||||||
out io.Writer,
|
|
||||||
) (int, error) {
|
|
||||||
if len(in) == 0 && additionalLabelName == "" {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
var written int
|
|
||||||
separator := '{'
|
|
||||||
for _, lp := range in {
|
|
||||||
n, err := fmt.Fprintf(
|
|
||||||
out, `%c%s="%s"`,
|
|
||||||
separator, lp.GetName(), escapeString(lp.GetValue(), true),
|
|
||||||
)
|
|
||||||
written += n
|
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
separator = ','
|
|
||||||
}
|
|
||||||
if additionalLabelName != "" {
|
|
||||||
n, err := fmt.Fprintf(
|
|
||||||
out, `%c%s="%s"`,
|
|
||||||
separator, additionalLabelName,
|
|
||||||
escapeString(additionalLabelValue, true),
|
|
||||||
)
|
|
||||||
written += n
|
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n, err := out.Write([]byte{'}'})
|
|
||||||
written += n
|
|
||||||
if err != nil {
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
return written, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
|
|
||||||
escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
|
|
||||||
)
|
|
||||||
|
|
||||||
// escapeString replaces '\' by '\\', new line character by '\n', and - if
|
|
||||||
// includeDoubleQuote is true - '"' by '\"'.
|
|
||||||
func escapeString(v string, includeDoubleQuote bool) string {
|
|
||||||
if includeDoubleQuote {
|
|
||||||
return escapeWithDoubleQuote.Replace(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return escape.Replace(v)
|
|
||||||
}
|
|
||||||
|
|
@ -1,443 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package expfmt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"math"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func testCreate(t testing.TB) {
|
|
||||||
var scenarios = []struct {
|
|
||||||
in *dto.MetricFamily
|
|
||||||
out string
|
|
||||||
}{
|
|
||||||
// 0: Counter, NaN as value, timestamp given.
|
|
||||||
{
|
|
||||||
in: &dto.MetricFamily{
|
|
||||||
Name: proto.String("name"),
|
|
||||||
Help: proto.String("two-line\n doc str\\ing"),
|
|
||||||
Type: dto.MetricType_COUNTER.Enum(),
|
|
||||||
Metric: []*dto.Metric{
|
|
||||||
&dto.Metric{
|
|
||||||
Label: []*dto.LabelPair{
|
|
||||||
&dto.LabelPair{
|
|
||||||
Name: proto.String("labelname"),
|
|
||||||
Value: proto.String("val1"),
|
|
||||||
},
|
|
||||||
&dto.LabelPair{
|
|
||||||
Name: proto.String("basename"),
|
|
||||||
Value: proto.String("basevalue"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Counter: &dto.Counter{
|
|
||||||
Value: proto.Float64(math.NaN()),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
&dto.Metric{
|
|
||||||
Label: []*dto.LabelPair{
|
|
||||||
&dto.LabelPair{
|
|
||||||
Name: proto.String("labelname"),
|
|
||||||
Value: proto.String("val2"),
|
|
||||||
},
|
|
||||||
&dto.LabelPair{
|
|
||||||
Name: proto.String("basename"),
|
|
||||||
Value: proto.String("basevalue"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Counter: &dto.Counter{
|
|
||||||
Value: proto.Float64(.23),
|
|
||||||
},
|
|
||||||
TimestampMs: proto.Int64(1234567890),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
out: `# HELP name two-line\n doc str\\ing
|
|
||||||
# TYPE name counter
|
|
||||||
name{labelname="val1",basename="basevalue"} NaN
|
|
||||||
name{labelname="val2",basename="basevalue"} 0.23 1234567890
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
// 1: Gauge, some escaping required, +Inf as value, multi-byte characters in label values.
|
|
||||||
{
|
|
||||||
in: &dto.MetricFamily{
|
|
||||||
Name: proto.String("gauge_name"),
|
|
||||||
Help: proto.String("gauge\ndoc\nstr\"ing"),
|
|
||||||
Type: dto.MetricType_GAUGE.Enum(),
|
|
||||||
Metric: []*dto.Metric{
|
|
||||||
&dto.Metric{
|
|
||||||
Label: []*dto.LabelPair{
|
|
||||||
&dto.LabelPair{
|
|
||||||
Name: proto.String("name_1"),
|
|
||||||
Value: proto.String("val with\nnew line"),
|
|
||||||
},
|
|
||||||
&dto.LabelPair{
|
|
||||||
Name: proto.String("name_2"),
|
|
||||||
Value: proto.String("val with \\backslash and \"quotes\""),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Gauge: &dto.Gauge{
|
|
||||||
Value: proto.Float64(math.Inf(+1)),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
&dto.Metric{
|
|
||||||
Label: []*dto.LabelPair{
|
|
||||||
&dto.LabelPair{
|
|
||||||
Name: proto.String("name_1"),
|
|
||||||
Value: proto.String("Björn"),
|
|
||||||
},
|
|
||||||
&dto.LabelPair{
|
|
||||||
Name: proto.String("name_2"),
|
|
||||||
Value: proto.String("佖佥"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Gauge: &dto.Gauge{
|
|
||||||
Value: proto.Float64(3.14E42),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
out: `# HELP gauge_name gauge\ndoc\nstr"ing
|
|
||||||
# TYPE gauge_name gauge
|
|
||||||
gauge_name{name_1="val with\nnew line",name_2="val with \\backslash and \"quotes\""} +Inf
|
|
||||||
gauge_name{name_1="Björn",name_2="佖佥"} 3.14e+42
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
// 2: Untyped, no help, one sample with no labels and -Inf as value, another sample with one label.
|
|
||||||
{
|
|
||||||
in: &dto.MetricFamily{
|
|
||||||
Name: proto.String("untyped_name"),
|
|
||||||
Type: dto.MetricType_UNTYPED.Enum(),
|
|
||||||
Metric: []*dto.Metric{
|
|
||||||
&dto.Metric{
|
|
||||||
Untyped: &dto.Untyped{
|
|
||||||
Value: proto.Float64(math.Inf(-1)),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
&dto.Metric{
|
|
||||||
Label: []*dto.LabelPair{
|
|
||||||
&dto.LabelPair{
|
|
||||||
Name: proto.String("name_1"),
|
|
||||||
Value: proto.String("value 1"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Untyped: &dto.Untyped{
|
|
||||||
Value: proto.Float64(-1.23e-45),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
out: `# TYPE untyped_name untyped
|
|
||||||
untyped_name -Inf
|
|
||||||
untyped_name{name_1="value 1"} -1.23e-45
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
// 3: Summary.
|
|
||||||
{
|
|
||||||
in: &dto.MetricFamily{
|
|
||||||
Name: proto.String("summary_name"),
|
|
||||||
Help: proto.String("summary docstring"),
|
|
||||||
Type: dto.MetricType_SUMMARY.Enum(),
|
|
||||||
Metric: []*dto.Metric{
|
|
||||||
&dto.Metric{
|
|
||||||
Summary: &dto.Summary{
|
|
||||||
SampleCount: proto.Uint64(42),
|
|
||||||
SampleSum: proto.Float64(-3.4567),
|
|
||||||
Quantile: []*dto.Quantile{
|
|
||||||
&dto.Quantile{
|
|
||||||
Quantile: proto.Float64(0.5),
|
|
||||||
Value: proto.Float64(-1.23),
|
|
||||||
},
|
|
||||||
&dto.Quantile{
|
|
||||||
Quantile: proto.Float64(0.9),
|
|
||||||
Value: proto.Float64(.2342354),
|
|
||||||
},
|
|
||||||
&dto.Quantile{
|
|
||||||
Quantile: proto.Float64(0.99),
|
|
||||||
Value: proto.Float64(0),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
&dto.Metric{
|
|
||||||
Label: []*dto.LabelPair{
|
|
||||||
&dto.LabelPair{
|
|
||||||
Name: proto.String("name_1"),
|
|
||||||
Value: proto.String("value 1"),
|
|
||||||
},
|
|
||||||
&dto.LabelPair{
|
|
||||||
Name: proto.String("name_2"),
|
|
||||||
Value: proto.String("value 2"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Summary: &dto.Summary{
|
|
||||||
SampleCount: proto.Uint64(4711),
|
|
||||||
SampleSum: proto.Float64(2010.1971),
|
|
||||||
Quantile: []*dto.Quantile{
|
|
||||||
&dto.Quantile{
|
|
||||||
Quantile: proto.Float64(0.5),
|
|
||||||
Value: proto.Float64(1),
|
|
||||||
},
|
|
||||||
&dto.Quantile{
|
|
||||||
Quantile: proto.Float64(0.9),
|
|
||||||
Value: proto.Float64(2),
|
|
||||||
},
|
|
||||||
&dto.Quantile{
|
|
||||||
Quantile: proto.Float64(0.99),
|
|
||||||
Value: proto.Float64(3),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
out: `# HELP summary_name summary docstring
|
|
||||||
# TYPE summary_name summary
|
|
||||||
summary_name{quantile="0.5"} -1.23
|
|
||||||
summary_name{quantile="0.9"} 0.2342354
|
|
||||||
summary_name{quantile="0.99"} 0
|
|
||||||
summary_name_sum -3.4567
|
|
||||||
summary_name_count 42
|
|
||||||
summary_name{name_1="value 1",name_2="value 2",quantile="0.5"} 1
|
|
||||||
summary_name{name_1="value 1",name_2="value 2",quantile="0.9"} 2
|
|
||||||
summary_name{name_1="value 1",name_2="value 2",quantile="0.99"} 3
|
|
||||||
summary_name_sum{name_1="value 1",name_2="value 2"} 2010.1971
|
|
||||||
summary_name_count{name_1="value 1",name_2="value 2"} 4711
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
// 4: Histogram
|
|
||||||
{
|
|
||||||
in: &dto.MetricFamily{
|
|
||||||
Name: proto.String("request_duration_microseconds"),
|
|
||||||
Help: proto.String("The response latency."),
|
|
||||||
Type: dto.MetricType_HISTOGRAM.Enum(),
|
|
||||||
Metric: []*dto.Metric{
|
|
||||||
&dto.Metric{
|
|
||||||
Histogram: &dto.Histogram{
|
|
||||||
SampleCount: proto.Uint64(2693),
|
|
||||||
SampleSum: proto.Float64(1756047.3),
|
|
||||||
Bucket: []*dto.Bucket{
|
|
||||||
&dto.Bucket{
|
|
||||||
UpperBound: proto.Float64(100),
|
|
||||||
CumulativeCount: proto.Uint64(123),
|
|
||||||
},
|
|
||||||
&dto.Bucket{
|
|
||||||
UpperBound: proto.Float64(120),
|
|
||||||
CumulativeCount: proto.Uint64(412),
|
|
||||||
},
|
|
||||||
&dto.Bucket{
|
|
||||||
UpperBound: proto.Float64(144),
|
|
||||||
CumulativeCount: proto.Uint64(592),
|
|
||||||
},
|
|
||||||
&dto.Bucket{
|
|
||||||
UpperBound: proto.Float64(172.8),
|
|
||||||
CumulativeCount: proto.Uint64(1524),
|
|
||||||
},
|
|
||||||
&dto.Bucket{
|
|
||||||
UpperBound: proto.Float64(math.Inf(+1)),
|
|
||||||
CumulativeCount: proto.Uint64(2693),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
out: `# HELP request_duration_microseconds The response latency.
|
|
||||||
# TYPE request_duration_microseconds histogram
|
|
||||||
request_duration_microseconds_bucket{le="100"} 123
|
|
||||||
request_duration_microseconds_bucket{le="120"} 412
|
|
||||||
request_duration_microseconds_bucket{le="144"} 592
|
|
||||||
request_duration_microseconds_bucket{le="172.8"} 1524
|
|
||||||
request_duration_microseconds_bucket{le="+Inf"} 2693
|
|
||||||
request_duration_microseconds_sum 1.7560473e+06
|
|
||||||
request_duration_microseconds_count 2693
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
// 5: Histogram with missing +Inf bucket.
|
|
||||||
{
|
|
||||||
in: &dto.MetricFamily{
|
|
||||||
Name: proto.String("request_duration_microseconds"),
|
|
||||||
Help: proto.String("The response latency."),
|
|
||||||
Type: dto.MetricType_HISTOGRAM.Enum(),
|
|
||||||
Metric: []*dto.Metric{
|
|
||||||
&dto.Metric{
|
|
||||||
Histogram: &dto.Histogram{
|
|
||||||
SampleCount: proto.Uint64(2693),
|
|
||||||
SampleSum: proto.Float64(1756047.3),
|
|
||||||
Bucket: []*dto.Bucket{
|
|
||||||
&dto.Bucket{
|
|
||||||
UpperBound: proto.Float64(100),
|
|
||||||
CumulativeCount: proto.Uint64(123),
|
|
||||||
},
|
|
||||||
&dto.Bucket{
|
|
||||||
UpperBound: proto.Float64(120),
|
|
||||||
CumulativeCount: proto.Uint64(412),
|
|
||||||
},
|
|
||||||
&dto.Bucket{
|
|
||||||
UpperBound: proto.Float64(144),
|
|
||||||
CumulativeCount: proto.Uint64(592),
|
|
||||||
},
|
|
||||||
&dto.Bucket{
|
|
||||||
UpperBound: proto.Float64(172.8),
|
|
||||||
CumulativeCount: proto.Uint64(1524),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
out: `# HELP request_duration_microseconds The response latency.
|
|
||||||
# TYPE request_duration_microseconds histogram
|
|
||||||
request_duration_microseconds_bucket{le="100"} 123
|
|
||||||
request_duration_microseconds_bucket{le="120"} 412
|
|
||||||
request_duration_microseconds_bucket{le="144"} 592
|
|
||||||
request_duration_microseconds_bucket{le="172.8"} 1524
|
|
||||||
request_duration_microseconds_bucket{le="+Inf"} 2693
|
|
||||||
request_duration_microseconds_sum 1.7560473e+06
|
|
||||||
request_duration_microseconds_count 2693
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
// 6: No metric type, should result in default type Counter.
|
|
||||||
{
|
|
||||||
in: &dto.MetricFamily{
|
|
||||||
Name: proto.String("name"),
|
|
||||||
Help: proto.String("doc string"),
|
|
||||||
Metric: []*dto.Metric{
|
|
||||||
&dto.Metric{
|
|
||||||
Counter: &dto.Counter{
|
|
||||||
Value: proto.Float64(math.Inf(-1)),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
out: `# HELP name doc string
|
|
||||||
# TYPE name counter
|
|
||||||
name -Inf
|
|
||||||
`,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, scenario := range scenarios {
|
|
||||||
out := bytes.NewBuffer(make([]byte, 0, len(scenario.out)))
|
|
||||||
n, err := MetricFamilyToText(out, scenario.in)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("%d. error: %s", i, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if expected, got := len(scenario.out), n; expected != got {
|
|
||||||
t.Errorf(
|
|
||||||
"%d. expected %d bytes written, got %d",
|
|
||||||
i, expected, got,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
if expected, got := scenario.out, out.String(); expected != got {
|
|
||||||
t.Errorf(
|
|
||||||
"%d. expected out=%q, got %q",
|
|
||||||
i, expected, got,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreate(t *testing.T) {
|
|
||||||
testCreate(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkCreate(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
testCreate(b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testCreateError(t testing.TB) {
|
|
||||||
var scenarios = []struct {
|
|
||||||
in *dto.MetricFamily
|
|
||||||
err string
|
|
||||||
}{
|
|
||||||
// 0: No metric.
|
|
||||||
{
|
|
||||||
in: &dto.MetricFamily{
|
|
||||||
Name: proto.String("name"),
|
|
||||||
Help: proto.String("doc string"),
|
|
||||||
Type: dto.MetricType_COUNTER.Enum(),
|
|
||||||
Metric: []*dto.Metric{},
|
|
||||||
},
|
|
||||||
err: "MetricFamily has no metrics",
|
|
||||||
},
|
|
||||||
// 1: No metric name.
|
|
||||||
{
|
|
||||||
in: &dto.MetricFamily{
|
|
||||||
Help: proto.String("doc string"),
|
|
||||||
Type: dto.MetricType_UNTYPED.Enum(),
|
|
||||||
Metric: []*dto.Metric{
|
|
||||||
&dto.Metric{
|
|
||||||
Untyped: &dto.Untyped{
|
|
||||||
Value: proto.Float64(math.Inf(-1)),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
err: "MetricFamily has no name",
|
|
||||||
},
|
|
||||||
// 2: Wrong type.
|
|
||||||
{
|
|
||||||
in: &dto.MetricFamily{
|
|
||||||
Name: proto.String("name"),
|
|
||||||
Help: proto.String("doc string"),
|
|
||||||
Type: dto.MetricType_COUNTER.Enum(),
|
|
||||||
Metric: []*dto.Metric{
|
|
||||||
&dto.Metric{
|
|
||||||
Untyped: &dto.Untyped{
|
|
||||||
Value: proto.Float64(math.Inf(-1)),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
err: "expected counter in metric",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, scenario := range scenarios {
|
|
||||||
var out bytes.Buffer
|
|
||||||
_, err := MetricFamilyToText(&out, scenario.in)
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("%d. expected error, got nil", i)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 {
|
|
||||||
t.Errorf(
|
|
||||||
"%d. expected error starting with %q, got %q",
|
|
||||||
i, expected, got,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreateError(t *testing.T) {
|
|
||||||
testCreateError(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkCreateError(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
testCreateError(b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,753 +0,0 @@
|
||||||
// Copyright 2014 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package expfmt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A stateFn is a function that represents a state in a state machine. By
|
|
||||||
// executing it, the state is progressed to the next state. The stateFn returns
|
|
||||||
// another stateFn, which represents the new state. The end state is represented
|
|
||||||
// by nil.
|
|
||||||
type stateFn func() stateFn
|
|
||||||
|
|
||||||
// ParseError signals errors while parsing the simple and flat text-based
|
|
||||||
// exchange format.
|
|
||||||
type ParseError struct {
|
|
||||||
Line int
|
|
||||||
Msg string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements the error interface.
|
|
||||||
func (e ParseError) Error() string {
|
|
||||||
return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TextParser is used to parse the simple and flat text-based exchange format. Its
|
|
||||||
// zero value is ready to use.
|
|
||||||
type TextParser struct {
|
|
||||||
metricFamiliesByName map[string]*dto.MetricFamily
|
|
||||||
buf *bufio.Reader // Where the parsed input is read through.
|
|
||||||
err error // Most recent error.
|
|
||||||
lineCount int // Tracks the line count for error messages.
|
|
||||||
currentByte byte // The most recent byte read.
|
|
||||||
currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
|
|
||||||
currentMF *dto.MetricFamily
|
|
||||||
currentMetric *dto.Metric
|
|
||||||
currentLabelPair *dto.LabelPair
|
|
||||||
|
|
||||||
// The remaining member variables are only used for summaries/histograms.
|
|
||||||
currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
|
|
||||||
// Summary specific.
|
|
||||||
summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
|
|
||||||
currentQuantile float64
|
|
||||||
// Histogram specific.
|
|
||||||
histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
|
|
||||||
currentBucket float64
|
|
||||||
// These tell us if the currently processed line ends on '_count' or
|
|
||||||
// '_sum' respectively and belong to a summary/histogram, representing the sample
|
|
||||||
// count and sum of that summary/histogram.
|
|
||||||
currentIsSummaryCount, currentIsSummarySum bool
|
|
||||||
currentIsHistogramCount, currentIsHistogramSum bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
|
|
||||||
// format and creates MetricFamily proto messages. It returns the MetricFamily
|
|
||||||
// proto messages in a map where the metric names are the keys, along with any
|
|
||||||
// error encountered.
|
|
||||||
//
|
|
||||||
// If the input contains duplicate metrics (i.e. lines with the same metric name
|
|
||||||
// and exactly the same label set), the resulting MetricFamily will contain
|
|
||||||
// duplicate Metric proto messages. Similar is true for duplicate label
|
|
||||||
// names. Checks for duplicates have to be performed separately, if required.
|
|
||||||
// Also note that neither the metrics within each MetricFamily are sorted nor
|
|
||||||
// the label pairs within each Metric. Sorting is not required for the most
|
|
||||||
// frequent use of this method, which is sample ingestion in the Prometheus
|
|
||||||
// server. However, for presentation purposes, you might want to sort the
|
|
||||||
// metrics, and in some cases, you must sort the labels, e.g. for consumption by
|
|
||||||
// the metric family injection hook of the Prometheus registry.
|
|
||||||
//
|
|
||||||
// Summaries and histograms are rather special beasts. You would probably not
|
|
||||||
// use them in the simple text format anyway. This method can deal with
|
|
||||||
// summaries and histograms if they are presented in exactly the way the
|
|
||||||
// text.Create function creates them.
|
|
||||||
//
|
|
||||||
// This method must not be called concurrently. If you want to parse different
|
|
||||||
// input concurrently, instantiate a separate Parser for each goroutine.
|
|
||||||
func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
|
|
||||||
p.reset(in)
|
|
||||||
for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
|
|
||||||
// Magic happens here...
|
|
||||||
}
|
|
||||||
// Get rid of empty metric families.
|
|
||||||
for k, mf := range p.metricFamiliesByName {
|
|
||||||
if len(mf.GetMetric()) == 0 {
|
|
||||||
delete(p.metricFamiliesByName, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If p.err is io.EOF now, we have run into a premature end of the input
|
|
||||||
// stream. Turn this error into something nicer and more
|
|
||||||
// meaningful. (io.EOF is often used as a signal for the legitimate end
|
|
||||||
// of an input stream.)
|
|
||||||
if p.err == io.EOF {
|
|
||||||
p.parseError("unexpected end of input stream")
|
|
||||||
}
|
|
||||||
return p.metricFamiliesByName, p.err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *TextParser) reset(in io.Reader) {
|
|
||||||
p.metricFamiliesByName = map[string]*dto.MetricFamily{}
|
|
||||||
if p.buf == nil {
|
|
||||||
p.buf = bufio.NewReader(in)
|
|
||||||
} else {
|
|
||||||
p.buf.Reset(in)
|
|
||||||
}
|
|
||||||
p.err = nil
|
|
||||||
p.lineCount = 0
|
|
||||||
if p.summaries == nil || len(p.summaries) > 0 {
|
|
||||||
p.summaries = map[uint64]*dto.Metric{}
|
|
||||||
}
|
|
||||||
if p.histograms == nil || len(p.histograms) > 0 {
|
|
||||||
p.histograms = map[uint64]*dto.Metric{}
|
|
||||||
}
|
|
||||||
p.currentQuantile = math.NaN()
|
|
||||||
p.currentBucket = math.NaN()
|
|
||||||
}
|
|
||||||
|
|
||||||
// startOfLine represents the state where the next byte read from p.buf is the
|
|
||||||
// start of a line (or whitespace leading up to it).
|
|
||||||
func (p *TextParser) startOfLine() stateFn {
|
|
||||||
p.lineCount++
|
|
||||||
if p.skipBlankTab(); p.err != nil {
|
|
||||||
// End of input reached. This is the only case where
|
|
||||||
// that is not an error but a signal that we are done.
|
|
||||||
p.err = nil
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
switch p.currentByte {
|
|
||||||
case '#':
|
|
||||||
return p.startComment
|
|
||||||
case '\n':
|
|
||||||
return p.startOfLine // Empty line, start the next one.
|
|
||||||
}
|
|
||||||
return p.readingMetricName
|
|
||||||
}
|
|
||||||
|
|
||||||
// startComment represents the state where the next byte read from p.buf is the
|
|
||||||
// start of a comment (or whitespace leading up to it).
|
|
||||||
func (p *TextParser) startComment() stateFn {
|
|
||||||
if p.skipBlankTab(); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
if p.currentByte == '\n' {
|
|
||||||
return p.startOfLine
|
|
||||||
}
|
|
||||||
if p.readTokenUntilWhitespace(); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
// If we have hit the end of line already, there is nothing left
|
|
||||||
// to do. This is not considered a syntax error.
|
|
||||||
if p.currentByte == '\n' {
|
|
||||||
return p.startOfLine
|
|
||||||
}
|
|
||||||
keyword := p.currentToken.String()
|
|
||||||
if keyword != "HELP" && keyword != "TYPE" {
|
|
||||||
// Generic comment, ignore by fast forwarding to end of line.
|
|
||||||
for p.currentByte != '\n' {
|
|
||||||
if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return p.startOfLine
|
|
||||||
}
|
|
||||||
// There is something. Next has to be a metric name.
|
|
||||||
if p.skipBlankTab(); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
if p.readTokenAsMetricName(); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
if p.currentByte == '\n' {
|
|
||||||
// At the end of the line already.
|
|
||||||
// Again, this is not considered a syntax error.
|
|
||||||
return p.startOfLine
|
|
||||||
}
|
|
||||||
if !isBlankOrTab(p.currentByte) {
|
|
||||||
p.parseError("invalid metric name in comment")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
p.setOrCreateCurrentMF()
|
|
||||||
if p.skipBlankTab(); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
if p.currentByte == '\n' {
|
|
||||||
// At the end of the line already.
|
|
||||||
// Again, this is not considered a syntax error.
|
|
||||||
return p.startOfLine
|
|
||||||
}
|
|
||||||
switch keyword {
|
|
||||||
case "HELP":
|
|
||||||
return p.readingHelp
|
|
||||||
case "TYPE":
|
|
||||||
return p.readingType
|
|
||||||
}
|
|
||||||
panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
|
|
||||||
}
|
|
||||||
|
|
||||||
// readingMetricName represents the state where the last byte read (now in
|
|
||||||
// p.currentByte) is the first byte of a metric name.
|
|
||||||
func (p *TextParser) readingMetricName() stateFn {
|
|
||||||
if p.readTokenAsMetricName(); p.err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if p.currentToken.Len() == 0 {
|
|
||||||
p.parseError("invalid metric name")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
p.setOrCreateCurrentMF()
|
|
||||||
// Now is the time to fix the type if it hasn't happened yet.
|
|
||||||
if p.currentMF.Type == nil {
|
|
||||||
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
|
|
||||||
}
|
|
||||||
p.currentMetric = &dto.Metric{}
|
|
||||||
// Do not append the newly created currentMetric to
|
|
||||||
// currentMF.Metric right now. First wait if this is a summary,
|
|
||||||
// and the metric exists already, which we can only know after
|
|
||||||
// having read all the labels.
|
|
||||||
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
return p.readingLabels
|
|
||||||
}
|
|
||||||
|
|
||||||
// readingLabels represents the state where the last byte read (now in
|
|
||||||
// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
|
|
||||||
// first byte of the value (otherwise).
|
|
||||||
func (p *TextParser) readingLabels() stateFn {
|
|
||||||
// Summaries/histograms are special. We have to reset the
|
|
||||||
// currentLabels map, currentQuantile and currentBucket before starting to
|
|
||||||
// read labels.
|
|
||||||
if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
|
|
||||||
p.currentLabels = map[string]string{}
|
|
||||||
p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
|
|
||||||
p.currentQuantile = math.NaN()
|
|
||||||
p.currentBucket = math.NaN()
|
|
||||||
}
|
|
||||||
if p.currentByte != '{' {
|
|
||||||
return p.readingValue
|
|
||||||
}
|
|
||||||
return p.startLabelName
|
|
||||||
}
|
|
||||||
|
|
||||||
// startLabelName represents the state where the next byte read from p.buf is
|
|
||||||
// the start of a label name (or whitespace leading up to it).
|
|
||||||
func (p *TextParser) startLabelName() stateFn {
|
|
||||||
if p.skipBlankTab(); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
if p.currentByte == '}' {
|
|
||||||
if p.skipBlankTab(); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
return p.readingValue
|
|
||||||
}
|
|
||||||
if p.readTokenAsLabelName(); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
if p.currentToken.Len() == 0 {
|
|
||||||
p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
|
|
||||||
if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
|
|
||||||
p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Special summary/histogram treatment. Don't add 'quantile' and 'le'
|
|
||||||
// labels to 'real' labels.
|
|
||||||
if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
|
|
||||||
!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
|
|
||||||
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
|
|
||||||
}
|
|
||||||
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
if p.currentByte != '=' {
|
|
||||||
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return p.startLabelValue
|
|
||||||
}
|
|
||||||
|
|
||||||
// startLabelValue represents the state where the next byte read from p.buf is
|
|
||||||
// the start of a (quoted) label value (or whitespace leading up to it).
|
|
||||||
func (p *TextParser) startLabelValue() stateFn {
|
|
||||||
if p.skipBlankTab(); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
if p.currentByte != '"' {
|
|
||||||
p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if p.readTokenAsLabelValue(); p.err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
p.currentLabelPair.Value = proto.String(p.currentToken.String())
|
|
||||||
// Special treatment of summaries:
|
|
||||||
// - Quantile labels are special, will result in dto.Quantile later.
|
|
||||||
// - Other labels have to be added to currentLabels for signature calculation.
|
|
||||||
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
|
|
||||||
if p.currentLabelPair.GetName() == model.QuantileLabel {
|
|
||||||
if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
|
|
||||||
// Create a more helpful error message.
|
|
||||||
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Similar special treatment of histograms.
|
|
||||||
if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
|
|
||||||
if p.currentLabelPair.GetName() == model.BucketLabel {
|
|
||||||
if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
|
|
||||||
// Create a more helpful error message.
|
|
||||||
p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if p.skipBlankTab(); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
switch p.currentByte {
|
|
||||||
case ',':
|
|
||||||
return p.startLabelName
|
|
||||||
|
|
||||||
case '}':
|
|
||||||
if p.skipBlankTab(); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
return p.readingValue
|
|
||||||
default:
|
|
||||||
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// readingValue represents the state where the last byte read (now in
|
|
||||||
// p.currentByte) is the first byte of the sample value (i.e. a float).
|
|
||||||
func (p *TextParser) readingValue() stateFn {
|
|
||||||
// When we are here, we have read all the labels, so for the
|
|
||||||
// special case of a summary/histogram, we can finally find out
|
|
||||||
// if the metric already exists.
|
|
||||||
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
|
|
||||||
signature := model.LabelsToSignature(p.currentLabels)
|
|
||||||
if summary := p.summaries[signature]; summary != nil {
|
|
||||||
p.currentMetric = summary
|
|
||||||
} else {
|
|
||||||
p.summaries[signature] = p.currentMetric
|
|
||||||
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
|
|
||||||
}
|
|
||||||
} else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
|
|
||||||
signature := model.LabelsToSignature(p.currentLabels)
|
|
||||||
if histogram := p.histograms[signature]; histogram != nil {
|
|
||||||
p.currentMetric = histogram
|
|
||||||
} else {
|
|
||||||
p.histograms[signature] = p.currentMetric
|
|
||||||
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
|
|
||||||
}
|
|
||||||
if p.readTokenUntilWhitespace(); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
value, err := strconv.ParseFloat(p.currentToken.String(), 64)
|
|
||||||
if err != nil {
|
|
||||||
// Create a more helpful error message.
|
|
||||||
p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
switch p.currentMF.GetType() {
|
|
||||||
case dto.MetricType_COUNTER:
|
|
||||||
p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
|
|
||||||
case dto.MetricType_GAUGE:
|
|
||||||
p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
|
|
||||||
case dto.MetricType_UNTYPED:
|
|
||||||
p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
|
|
||||||
case dto.MetricType_SUMMARY:
|
|
||||||
// *sigh*
|
|
||||||
if p.currentMetric.Summary == nil {
|
|
||||||
p.currentMetric.Summary = &dto.Summary{}
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case p.currentIsSummaryCount:
|
|
||||||
p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
|
|
||||||
case p.currentIsSummarySum:
|
|
||||||
p.currentMetric.Summary.SampleSum = proto.Float64(value)
|
|
||||||
case !math.IsNaN(p.currentQuantile):
|
|
||||||
p.currentMetric.Summary.Quantile = append(
|
|
||||||
p.currentMetric.Summary.Quantile,
|
|
||||||
&dto.Quantile{
|
|
||||||
Quantile: proto.Float64(p.currentQuantile),
|
|
||||||
Value: proto.Float64(value),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
case dto.MetricType_HISTOGRAM:
|
|
||||||
// *sigh*
|
|
||||||
if p.currentMetric.Histogram == nil {
|
|
||||||
p.currentMetric.Histogram = &dto.Histogram{}
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case p.currentIsHistogramCount:
|
|
||||||
p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
|
|
||||||
case p.currentIsHistogramSum:
|
|
||||||
p.currentMetric.Histogram.SampleSum = proto.Float64(value)
|
|
||||||
case !math.IsNaN(p.currentBucket):
|
|
||||||
p.currentMetric.Histogram.Bucket = append(
|
|
||||||
p.currentMetric.Histogram.Bucket,
|
|
||||||
&dto.Bucket{
|
|
||||||
UpperBound: proto.Float64(p.currentBucket),
|
|
||||||
CumulativeCount: proto.Uint64(uint64(value)),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
|
|
||||||
}
|
|
||||||
if p.currentByte == '\n' {
|
|
||||||
return p.startOfLine
|
|
||||||
}
|
|
||||||
return p.startTimestamp
|
|
||||||
}
|
|
||||||
|
|
||||||
// startTimestamp represents the state where the next byte read from p.buf is
|
|
||||||
// the start of the timestamp (or whitespace leading up to it).
|
|
||||||
func (p *TextParser) startTimestamp() stateFn {
|
|
||||||
if p.skipBlankTab(); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
if p.readTokenUntilWhitespace(); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
// Create a more helpful error message.
|
|
||||||
p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
p.currentMetric.TimestampMs = proto.Int64(timestamp)
|
|
||||||
if p.readTokenUntilNewline(false); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
if p.currentToken.Len() > 0 {
|
|
||||||
p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return p.startOfLine
|
|
||||||
}
|
|
||||||
|
|
||||||
// readingHelp represents the state where the last byte read (now in
|
|
||||||
// p.currentByte) is the first byte of the docstring after 'HELP'.
|
|
||||||
func (p *TextParser) readingHelp() stateFn {
|
|
||||||
if p.currentMF.Help != nil {
|
|
||||||
p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Rest of line is the docstring.
|
|
||||||
if p.readTokenUntilNewline(true); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
p.currentMF.Help = proto.String(p.currentToken.String())
|
|
||||||
return p.startOfLine
|
|
||||||
}
|
|
||||||
|
|
||||||
// readingType represents the state where the last byte read (now in
|
|
||||||
// p.currentByte) is the first byte of the type hint after 'HELP'.
|
|
||||||
func (p *TextParser) readingType() stateFn {
|
|
||||||
if p.currentMF.Type != nil {
|
|
||||||
p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Rest of line is the type.
|
|
||||||
if p.readTokenUntilNewline(false); p.err != nil {
|
|
||||||
return nil // Unexpected end of input.
|
|
||||||
}
|
|
||||||
metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
|
|
||||||
if !ok {
|
|
||||||
p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
p.currentMF.Type = dto.MetricType(metricType).Enum()
|
|
||||||
return p.startOfLine
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseError sets p.err to a ParseError at the current line with the given
|
|
||||||
// message.
|
|
||||||
func (p *TextParser) parseError(msg string) {
|
|
||||||
p.err = ParseError{
|
|
||||||
Line: p.lineCount,
|
|
||||||
Msg: msg,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
|
|
||||||
// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
|
|
||||||
func (p *TextParser) skipBlankTab() {
|
|
||||||
for {
|
|
||||||
if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
|
|
||||||
// anything if p.currentByte is neither ' ' nor '\t'.
|
|
||||||
func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
|
|
||||||
if isBlankOrTab(p.currentByte) {
|
|
||||||
p.skipBlankTab()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
|
|
||||||
// first byte considered is the byte already read (now in p.currentByte). The
|
|
||||||
// first whitespace byte encountered is still copied into p.currentByte, but not
|
|
||||||
// into p.currentToken.
|
|
||||||
func (p *TextParser) readTokenUntilWhitespace() {
|
|
||||||
p.currentToken.Reset()
|
|
||||||
for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
|
|
||||||
p.currentToken.WriteByte(p.currentByte)
|
|
||||||
p.currentByte, p.err = p.buf.ReadByte()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
|
|
||||||
// byte considered is the byte already read (now in p.currentByte). The first
|
|
||||||
// newline byte encountered is still copied into p.currentByte, but not into
|
|
||||||
// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
|
|
||||||
// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
|
|
||||||
// other escape sequences are invalid and cause an error.
|
|
||||||
func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
|
|
||||||
p.currentToken.Reset()
|
|
||||||
escaped := false
|
|
||||||
for p.err == nil {
|
|
||||||
if recognizeEscapeSequence && escaped {
|
|
||||||
switch p.currentByte {
|
|
||||||
case '\\':
|
|
||||||
p.currentToken.WriteByte(p.currentByte)
|
|
||||||
case 'n':
|
|
||||||
p.currentToken.WriteByte('\n')
|
|
||||||
default:
|
|
||||||
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
escaped = false
|
|
||||||
} else {
|
|
||||||
switch p.currentByte {
|
|
||||||
case '\n':
|
|
||||||
return
|
|
||||||
case '\\':
|
|
||||||
escaped = true
|
|
||||||
default:
|
|
||||||
p.currentToken.WriteByte(p.currentByte)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
p.currentByte, p.err = p.buf.ReadByte()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
|
|
||||||
// The first byte considered is the byte already read (now in p.currentByte).
|
|
||||||
// The first byte not part of a metric name is still copied into p.currentByte,
|
|
||||||
// but not into p.currentToken.
|
|
||||||
func (p *TextParser) readTokenAsMetricName() {
|
|
||||||
p.currentToken.Reset()
|
|
||||||
if !isValidMetricNameStart(p.currentByte) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
p.currentToken.WriteByte(p.currentByte)
|
|
||||||
p.currentByte, p.err = p.buf.ReadByte()
|
|
||||||
if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
|
|
||||||
// The first byte considered is the byte already read (now in p.currentByte).
|
|
||||||
// The first byte not part of a label name is still copied into p.currentByte,
|
|
||||||
// but not into p.currentToken.
|
|
||||||
func (p *TextParser) readTokenAsLabelName() {
|
|
||||||
p.currentToken.Reset()
|
|
||||||
if !isValidLabelNameStart(p.currentByte) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
p.currentToken.WriteByte(p.currentByte)
|
|
||||||
p.currentByte, p.err = p.buf.ReadByte()
|
|
||||||
if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
|
|
||||||
// In contrast to the other 'readTokenAs...' functions, which start with the
|
|
||||||
// last read byte in p.currentByte, this method ignores p.currentByte and starts
|
|
||||||
// with reading a new byte from p.buf. The first byte not part of a label value
|
|
||||||
// is still copied into p.currentByte, but not into p.currentToken.
|
|
||||||
func (p *TextParser) readTokenAsLabelValue() {
|
|
||||||
p.currentToken.Reset()
|
|
||||||
escaped := false
|
|
||||||
for {
|
|
||||||
if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if escaped {
|
|
||||||
switch p.currentByte {
|
|
||||||
case '"', '\\':
|
|
||||||
p.currentToken.WriteByte(p.currentByte)
|
|
||||||
case 'n':
|
|
||||||
p.currentToken.WriteByte('\n')
|
|
||||||
default:
|
|
||||||
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
escaped = false
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
switch p.currentByte {
|
|
||||||
case '"':
|
|
||||||
return
|
|
||||||
case '\n':
|
|
||||||
p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
|
|
||||||
return
|
|
||||||
case '\\':
|
|
||||||
escaped = true
|
|
||||||
default:
|
|
||||||
p.currentToken.WriteByte(p.currentByte)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *TextParser) setOrCreateCurrentMF() {
|
|
||||||
p.currentIsSummaryCount = false
|
|
||||||
p.currentIsSummarySum = false
|
|
||||||
p.currentIsHistogramCount = false
|
|
||||||
p.currentIsHistogramSum = false
|
|
||||||
name := p.currentToken.String()
|
|
||||||
if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Try out if this is a _sum or _count for a summary/histogram.
|
|
||||||
summaryName := summaryMetricName(name)
|
|
||||||
if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
|
|
||||||
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
|
|
||||||
if isCount(name) {
|
|
||||||
p.currentIsSummaryCount = true
|
|
||||||
}
|
|
||||||
if isSum(name) {
|
|
||||||
p.currentIsSummarySum = true
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
histogramName := histogramMetricName(name)
|
|
||||||
if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
|
|
||||||
if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
|
|
||||||
if isCount(name) {
|
|
||||||
p.currentIsHistogramCount = true
|
|
||||||
}
|
|
||||||
if isSum(name) {
|
|
||||||
p.currentIsHistogramSum = true
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
|
|
||||||
p.metricFamiliesByName[name] = p.currentMF
|
|
||||||
}
|
|
||||||
|
|
||||||
func isValidLabelNameStart(b byte) bool {
|
|
||||||
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
|
|
||||||
}
|
|
||||||
|
|
||||||
func isValidLabelNameContinuation(b byte) bool {
|
|
||||||
return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
|
|
||||||
}
|
|
||||||
|
|
||||||
func isValidMetricNameStart(b byte) bool {
|
|
||||||
return isValidLabelNameStart(b) || b == ':'
|
|
||||||
}
|
|
||||||
|
|
||||||
func isValidMetricNameContinuation(b byte) bool {
|
|
||||||
return isValidLabelNameContinuation(b) || b == ':'
|
|
||||||
}
|
|
||||||
|
|
||||||
func isBlankOrTab(b byte) bool {
|
|
||||||
return b == ' ' || b == '\t'
|
|
||||||
}
|
|
||||||
|
|
||||||
func isCount(name string) bool {
|
|
||||||
return len(name) > 6 && name[len(name)-6:] == "_count"
|
|
||||||
}
|
|
||||||
|
|
||||||
func isSum(name string) bool {
|
|
||||||
return len(name) > 4 && name[len(name)-4:] == "_sum"
|
|
||||||
}
|
|
||||||
|
|
||||||
func isBucket(name string) bool {
|
|
||||||
return len(name) > 7 && name[len(name)-7:] == "_bucket"
|
|
||||||
}
|
|
||||||
|
|
||||||
func summaryMetricName(name string) string {
|
|
||||||
switch {
|
|
||||||
case isCount(name):
|
|
||||||
return name[:len(name)-6]
|
|
||||||
case isSum(name):
|
|
||||||
return name[:len(name)-4]
|
|
||||||
default:
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func histogramMetricName(name string) string {
|
|
||||||
switch {
|
|
||||||
case isCount(name):
|
|
||||||
return name[:len(name)-6]
|
|
||||||
case isSum(name):
|
|
||||||
return name[:len(name)-4]
|
|
||||||
case isBucket(name):
|
|
||||||
return name[:len(name)-7]
|
|
||||||
default:
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue