2016-04-13 07:08:22 -07:00
|
|
|
// Copyright 2016 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2015-06-04 09:07:57 -07:00
|
|
|
package v1
|
|
|
|
|
|
|
|
import (
|
2017-10-24 21:21:42 -07:00
|
|
|
"context"
|
2023-11-07 19:49:39 -08:00
|
|
|
"errors"
|
2015-06-04 09:07:57 -07:00
|
|
|
"fmt"
|
2016-12-24 15:37:46 -08:00
|
|
|
"math"
|
2017-12-02 21:07:05 -08:00
|
|
|
"math/rand"
|
2020-02-17 09:19:15 -08:00
|
|
|
"net"
|
2015-06-04 09:07:57 -07:00
|
|
|
"net/http"
|
2017-04-24 22:42:33 -07:00
|
|
|
"net/url"
|
2017-12-02 21:07:05 -08:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2024-01-15 08:24:46 -08:00
|
|
|
"slices"
|
2022-12-23 02:55:08 -08:00
|
|
|
"sort"
|
2015-06-04 09:07:57 -07:00
|
|
|
"strconv"
|
2019-11-11 13:42:24 -08:00
|
|
|
"strings"
|
2015-06-04 09:07:57 -07:00
|
|
|
"time"
|
|
|
|
|
2021-06-11 09:17:59 -07:00
|
|
|
"github.com/go-kit/log"
|
|
|
|
"github.com/go-kit/log/level"
|
2022-02-12 15:58:27 -08:00
|
|
|
"github.com/grafana/regexp"
|
2018-02-07 04:27:57 -08:00
|
|
|
jsoniter "github.com/json-iterator/go"
|
2023-02-26 18:27:09 -08:00
|
|
|
"github.com/munnerz/goautoneg"
|
2018-10-10 16:09:08 -07:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2015-08-20 08:18:46 -07:00
|
|
|
"github.com/prometheus/common/model"
|
2015-09-24 08:07:11 -07:00
|
|
|
"github.com/prometheus/common/route"
|
2020-10-22 02:00:08 -07:00
|
|
|
|
2017-05-11 08:09:24 -07:00
|
|
|
"github.com/prometheus/prometheus/config"
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
2023-11-22 06:39:21 -08:00
|
|
|
"github.com/prometheus/prometheus/model/metadata"
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/timestamp"
|
2015-06-04 09:07:57 -07:00
|
|
|
"github.com/prometheus/prometheus/promql"
|
2020-02-03 10:06:39 -08:00
|
|
|
"github.com/prometheus/prometheus/promql/parser"
|
2018-03-25 09:50:34 -07:00
|
|
|
"github.com/prometheus/prometheus/rules"
|
2018-02-01 01:55:07 -08:00
|
|
|
"github.com/prometheus/prometheus/scrape"
|
2016-12-29 00:27:30 -08:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2017-10-23 13:28:17 -07:00
|
|
|
"github.com/prometheus/prometheus/storage/remote"
|
2019-11-18 11:53:33 -08:00
|
|
|
"github.com/prometheus/prometheus/tsdb"
|
|
|
|
"github.com/prometheus/prometheus/tsdb/index"
|
2023-09-14 09:57:31 -07:00
|
|
|
"github.com/prometheus/prometheus/util/annotations"
|
2015-09-17 05:49:50 -07:00
|
|
|
"github.com/prometheus/prometheus/util/httputil"
|
2017-02-08 03:58:40 -08:00
|
|
|
"github.com/prometheus/prometheus/util/stats"
|
2015-06-04 09:07:57 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
type status string
|
|
|
|
|
|
|
|
const (
|
|
|
|
statusSuccess status = "success"
|
2017-12-02 05:52:43 -08:00
|
|
|
statusError status = "error"
|
2023-01-25 11:22:25 -08:00
|
|
|
|
|
|
|
// Non-standard status code (originally introduced by nginx) for the case when a client closes
|
|
|
|
// the connection while the server is still processing the request.
|
|
|
|
statusClientClosedConnection = 499
|
2015-06-04 09:07:57 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
type errorType string
|
|
|
|
|
|
|
|
const (
|
2023-02-26 18:27:09 -08:00
|
|
|
errorNone errorType = ""
|
|
|
|
errorTimeout errorType = "timeout"
|
|
|
|
errorCanceled errorType = "canceled"
|
|
|
|
errorExec errorType = "execution"
|
|
|
|
errorBadData errorType = "bad_data"
|
|
|
|
errorInternal errorType = "internal"
|
|
|
|
errorUnavailable errorType = "unavailable"
|
|
|
|
errorNotFound errorType = "not_found"
|
|
|
|
errorNotAcceptable errorType = "not_acceptable"
|
2015-06-04 09:07:57 -07:00
|
|
|
)
|
|
|
|
|
2021-10-22 01:06:44 -07:00
|
|
|
var LocalhostRepresentations = []string{"127.0.0.1", "localhost", "::1"}
|
2018-10-10 16:09:08 -07:00
|
|
|
|
2015-06-04 09:07:57 -07:00
|
|
|
type apiError struct {
|
|
|
|
typ errorType
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *apiError) Error() string {
|
|
|
|
return fmt.Sprintf("%s: %s", e.typ, e.err)
|
|
|
|
}
|
|
|
|
|
2022-12-23 02:55:08 -08:00
|
|
|
// ScrapePoolsRetriever provide the list of all scrape pools.
|
|
|
|
type ScrapePoolsRetriever interface {
|
|
|
|
ScrapePools() []string
|
|
|
|
}
|
|
|
|
|
2020-04-16 01:30:47 -07:00
|
|
|
// TargetRetriever provides the list of active/dropped targets to scrape or not.
|
|
|
|
type TargetRetriever interface {
|
2018-09-26 02:20:56 -07:00
|
|
|
TargetsActive() map[string][]*scrape.Target
|
|
|
|
TargetsDropped() map[string][]*scrape.Target
|
2023-08-14 07:39:25 -07:00
|
|
|
TargetsDroppedCounts() map[string]int
|
2016-12-02 04:31:43 -08:00
|
|
|
}
|
|
|
|
|
2020-05-18 11:02:32 -07:00
|
|
|
// AlertmanagerRetriever provides a list of all/dropped AlertManager URLs.
|
|
|
|
type AlertmanagerRetriever interface {
|
2017-04-24 22:42:33 -07:00
|
|
|
Alertmanagers() []*url.URL
|
2018-02-21 01:00:07 -08:00
|
|
|
DroppedAlertmanagers() []*url.URL
|
2016-12-02 04:31:43 -08:00
|
|
|
}
|
|
|
|
|
2020-05-18 11:02:32 -07:00
|
|
|
// RulesRetriever provides a list of active rules and alerts.
|
|
|
|
type RulesRetriever interface {
|
2018-03-25 09:50:34 -07:00
|
|
|
RuleGroups() []*rules.Group
|
2018-06-27 00:15:17 -07:00
|
|
|
AlertingRules() []*rules.AlertingRule
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
|
|
|
|
2022-02-10 06:17:05 -08:00
|
|
|
type StatsRenderer func(context.Context, *stats.Statistics, string) stats.QueryStats
|
|
|
|
|
2023-04-12 04:05:41 -07:00
|
|
|
func defaultStatsRenderer(_ context.Context, s *stats.Statistics, param string) stats.QueryStats {
|
2022-02-10 06:17:05 -08:00
|
|
|
if param != "" {
|
|
|
|
return stats.NewQueryStats(s)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-02 08:53:32 -07:00
|
|
|
// PrometheusVersion contains build information about Prometheus.
|
|
|
|
type PrometheusVersion struct {
|
|
|
|
Version string `json:"version"`
|
|
|
|
Revision string `json:"revision"`
|
|
|
|
Branch string `json:"branch"`
|
|
|
|
BuildUser string `json:"buildUser"`
|
|
|
|
BuildDate string `json:"buildDate"`
|
|
|
|
GoVersion string `json:"goVersion"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// RuntimeInfo contains runtime information about Prometheus.
|
|
|
|
type RuntimeInfo struct {
|
|
|
|
StartTime time.Time `json:"startTime"`
|
|
|
|
CWD string `json:"CWD"`
|
|
|
|
ReloadConfigSuccess bool `json:"reloadConfigSuccess"`
|
|
|
|
LastConfigTime time.Time `json:"lastConfigTime"`
|
|
|
|
CorruptionCount int64 `json:"corruptionCount"`
|
|
|
|
GoroutineCount int `json:"goroutineCount"`
|
|
|
|
GOMAXPROCS int `json:"GOMAXPROCS"`
|
2023-03-28 11:47:18 -07:00
|
|
|
GOMEMLIMIT int64 `json:"GOMEMLIMIT"`
|
2019-11-02 08:53:32 -07:00
|
|
|
GOGC string `json:"GOGC"`
|
|
|
|
GODEBUG string `json:"GODEBUG"`
|
|
|
|
StorageRetention string `json:"storageRetention"`
|
|
|
|
}
|
|
|
|
|
2023-01-24 19:30:47 -08:00
|
|
|
// Response contains a response to a HTTP API request.
|
|
|
|
type Response struct {
|
2015-06-04 09:07:57 -07:00
|
|
|
Status status `json:"status"`
|
|
|
|
Data interface{} `json:"data,omitempty"`
|
|
|
|
ErrorType errorType `json:"errorType,omitempty"`
|
|
|
|
Error string `json:"error,omitempty"`
|
2018-11-30 06:27:12 -08:00
|
|
|
Warnings []string `json:"warnings,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type apiFuncResult struct {
|
|
|
|
data interface{}
|
|
|
|
err *apiError
|
2023-09-14 09:57:31 -07:00
|
|
|
warnings annotations.Annotations
|
2018-11-30 06:27:12 -08:00
|
|
|
finalizer func()
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
type apiFunc func(r *http.Request) apiFuncResult
|
2015-11-11 11:46:57 -08:00
|
|
|
|
2020-04-29 09:16:14 -07:00
|
|
|
// TSDBAdminStats defines the tsdb interfaces used by the v1 API for admin operations as well as statistics.
|
|
|
|
type TSDBAdminStats interface {
|
2018-11-15 05:22:16 -08:00
|
|
|
CleanTombstones() error
|
2023-09-13 06:43:06 -07:00
|
|
|
Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Matcher) error
|
2018-11-15 05:22:16 -08:00
|
|
|
Snapshot(dir string, withHead bool) error
|
2023-05-22 05:37:07 -07:00
|
|
|
Stats(statsByLabelName string, limit int) (*tsdb.Stats, error)
|
2021-06-05 07:29:32 -07:00
|
|
|
WALReplayStatus() (tsdb.WALReplayStatus, error)
|
2018-11-15 05:22:16 -08:00
|
|
|
}
|
|
|
|
|
2023-07-03 05:56:06 -07:00
|
|
|
type QueryOpts interface {
|
|
|
|
EnablePerStepStats() bool
|
|
|
|
LookbackDelta() time.Duration
|
2021-12-21 02:22:40 -08:00
|
|
|
}
|
|
|
|
|
2015-06-04 09:07:57 -07:00
|
|
|
// API can register a set of endpoints in a router and handle
|
|
|
|
// them using the provided storage and query engine.
|
|
|
|
type API struct {
|
2021-03-16 02:47:45 -07:00
|
|
|
Queryable storage.SampleAndChunkQueryable
|
2024-03-05 19:54:33 -08:00
|
|
|
QueryEngine promql.QueryEngine
|
2021-03-16 02:47:45 -07:00
|
|
|
ExemplarQueryable storage.ExemplarQueryable
|
2015-06-08 12:19:52 -07:00
|
|
|
|
2022-12-23 02:55:08 -08:00
|
|
|
scrapePoolsRetriever func(context.Context) ScrapePoolsRetriever
|
2020-04-16 01:30:47 -07:00
|
|
|
targetRetriever func(context.Context) TargetRetriever
|
2020-05-18 11:02:32 -07:00
|
|
|
alertmanagerRetriever func(context.Context) AlertmanagerRetriever
|
|
|
|
rulesRetriever func(context.Context) RulesRetriever
|
2018-03-25 09:50:34 -07:00
|
|
|
now func() time.Time
|
|
|
|
config func() config.Config
|
|
|
|
flagsMap map[string]string
|
|
|
|
ready func(http.HandlerFunc) http.HandlerFunc
|
2020-02-17 09:19:15 -08:00
|
|
|
globalURLOptions GlobalURLOptions
|
2017-12-02 21:07:05 -08:00
|
|
|
|
2022-02-10 06:17:05 -08:00
|
|
|
db TSDBAdminStats
|
|
|
|
dbDir string
|
|
|
|
enableAdmin bool
|
|
|
|
logger log.Logger
|
|
|
|
CORSOrigin *regexp.Regexp
|
|
|
|
buildInfo *PrometheusVersion
|
|
|
|
runtimeInfo func() (RuntimeInfo, error)
|
|
|
|
gatherer prometheus.Gatherer
|
|
|
|
isAgent bool
|
|
|
|
statsRenderer StatsRenderer
|
2021-02-26 08:43:19 -08:00
|
|
|
|
|
|
|
remoteWriteHandler http.Handler
|
|
|
|
remoteReadHandler http.Handler
|
2023-07-28 03:35:28 -07:00
|
|
|
otlpWriteHandler http.Handler
|
2015-06-04 09:07:57 -07:00
|
|
|
|
2023-02-26 18:27:09 -08:00
|
|
|
codecs []Codec
|
2018-10-10 16:09:08 -07:00
|
|
|
}
|
|
|
|
|
2015-11-11 11:46:57 -08:00
|
|
|
// NewAPI returns an initialized API type.
|
2017-10-06 08:20:20 -07:00
|
|
|
func NewAPI(
|
2024-03-05 19:54:33 -08:00
|
|
|
qe promql.QueryEngine,
|
2021-02-15 03:30:00 -08:00
|
|
|
q storage.SampleAndChunkQueryable,
|
|
|
|
ap storage.Appendable,
|
2021-03-16 02:47:45 -07:00
|
|
|
eq storage.ExemplarQueryable,
|
2022-12-23 02:55:08 -08:00
|
|
|
spsr func(context.Context) ScrapePoolsRetriever,
|
2020-04-16 01:30:47 -07:00
|
|
|
tr func(context.Context) TargetRetriever,
|
2020-05-18 11:02:32 -07:00
|
|
|
ar func(context.Context) AlertmanagerRetriever,
|
2017-10-06 08:20:20 -07:00
|
|
|
configFunc func() config.Config,
|
api: Added v1/status/flags endpoint. (#3864)
Endpoint URL: /api/v1/status/flags
Example Output:
```json
{
"status": "success",
"data": {
"alertmanager.notification-queue-capacity": "10000",
"alertmanager.timeout": "10s",
"completion-bash": "false",
"completion-script-bash": "false",
"completion-script-zsh": "false",
"config.file": "my_cool_prometheus.yaml",
"help": "false",
"help-long": "false",
"help-man": "false",
"log.level": "info",
"query.lookback-delta": "5m",
"query.max-concurrency": "20",
"query.timeout": "2m",
"storage.tsdb.max-block-duration": "36h",
"storage.tsdb.min-block-duration": "2h",
"storage.tsdb.no-lockfile": "false",
"storage.tsdb.path": "data/",
"storage.tsdb.retention": "15d",
"version": "false",
"web.console.libraries": "console_libraries",
"web.console.templates": "consoles",
"web.enable-admin-api": "false",
"web.enable-lifecycle": "false",
"web.external-url": "",
"web.listen-address": "0.0.0.0:9090",
"web.max-connections": "512",
"web.read-timeout": "5m",
"web.route-prefix": "/",
"web.user-assets": ""
}
}
```
Signed-off-by: Bartek Plotka <bwplotka@gmail.com>
2018-02-21 00:49:02 -08:00
|
|
|
flagsMap map[string]string,
|
2020-02-17 09:19:15 -08:00
|
|
|
globalURLOptions GlobalURLOptions,
|
2017-10-06 08:20:20 -07:00
|
|
|
readyFunc func(http.HandlerFunc) http.HandlerFunc,
|
2020-04-29 09:16:14 -07:00
|
|
|
db TSDBAdminStats,
|
|
|
|
dbDir string,
|
2017-12-02 21:07:05 -08:00
|
|
|
enableAdmin bool,
|
2018-07-06 10:44:45 -07:00
|
|
|
logger log.Logger,
|
2020-05-18 11:02:32 -07:00
|
|
|
rr func(context.Context) RulesRetriever,
|
2018-09-25 12:07:34 -07:00
|
|
|
remoteReadSampleLimit int,
|
|
|
|
remoteReadConcurrencyLimit int,
|
2019-08-19 13:16:10 -07:00
|
|
|
remoteReadMaxBytesInFrame int,
|
2021-10-29 08:25:05 -07:00
|
|
|
isAgent bool,
|
2023-04-09 00:08:40 -07:00
|
|
|
corsOrigin *regexp.Regexp,
|
2019-11-02 08:53:32 -07:00
|
|
|
runtimeInfo func() (RuntimeInfo, error),
|
|
|
|
buildInfo *PrometheusVersion,
|
2020-09-29 13:05:33 -07:00
|
|
|
gatherer prometheus.Gatherer,
|
2021-02-26 08:43:19 -08:00
|
|
|
registerer prometheus.Registerer,
|
2022-02-10 06:17:05 -08:00
|
|
|
statsRenderer StatsRenderer,
|
2023-07-28 03:35:28 -07:00
|
|
|
rwEnabled bool,
|
|
|
|
otlpEnabled bool,
|
2017-10-06 08:20:20 -07:00
|
|
|
) *API {
|
2021-01-30 03:04:48 -08:00
|
|
|
a := &API{
|
2021-03-16 02:47:45 -07:00
|
|
|
QueryEngine: qe,
|
|
|
|
Queryable: q,
|
|
|
|
ExemplarQueryable: eq,
|
2021-01-30 03:04:48 -08:00
|
|
|
|
2022-12-23 02:55:08 -08:00
|
|
|
scrapePoolsRetriever: spsr,
|
2017-01-13 01:20:11 -08:00
|
|
|
targetRetriever: tr,
|
|
|
|
alertmanagerRetriever: ar,
|
2018-09-05 06:50:50 -07:00
|
|
|
|
2021-02-26 08:43:19 -08:00
|
|
|
now: time.Now,
|
|
|
|
config: configFunc,
|
|
|
|
flagsMap: flagsMap,
|
|
|
|
ready: readyFunc,
|
|
|
|
globalURLOptions: globalURLOptions,
|
|
|
|
db: db,
|
|
|
|
dbDir: dbDir,
|
|
|
|
enableAdmin: enableAdmin,
|
|
|
|
rulesRetriever: rr,
|
|
|
|
logger: logger,
|
2023-04-09 00:08:40 -07:00
|
|
|
CORSOrigin: corsOrigin,
|
2021-02-26 08:43:19 -08:00
|
|
|
runtimeInfo: runtimeInfo,
|
|
|
|
buildInfo: buildInfo,
|
|
|
|
gatherer: gatherer,
|
2021-10-29 08:25:05 -07:00
|
|
|
isAgent: isAgent,
|
2022-02-10 06:17:05 -08:00
|
|
|
statsRenderer: defaultStatsRenderer,
|
2021-02-26 08:43:19 -08:00
|
|
|
|
|
|
|
remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame),
|
2015-11-11 11:46:57 -08:00
|
|
|
}
|
2021-01-30 03:04:48 -08:00
|
|
|
|
2023-02-26 18:27:09 -08:00
|
|
|
a.InstallCodec(JSONCodec{})
|
2023-01-24 19:30:47 -08:00
|
|
|
|
2022-02-10 06:17:05 -08:00
|
|
|
if statsRenderer != nil {
|
|
|
|
a.statsRenderer = statsRenderer
|
|
|
|
}
|
|
|
|
|
2023-07-28 03:35:28 -07:00
|
|
|
if ap == nil && (rwEnabled || otlpEnabled) {
|
|
|
|
panic("remote write or otlp write enabled, but no appender passed in.")
|
|
|
|
}
|
|
|
|
|
|
|
|
if rwEnabled {
|
2022-12-08 23:27:56 -08:00
|
|
|
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap)
|
2021-01-30 03:04:48 -08:00
|
|
|
}
|
2023-07-28 03:35:28 -07:00
|
|
|
if otlpEnabled {
|
|
|
|
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap)
|
|
|
|
}
|
2021-01-30 03:04:48 -08:00
|
|
|
|
|
|
|
return a
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2023-01-24 19:30:47 -08:00
|
|
|
// InstallCodec adds codec to this API's available codecs.
|
2023-02-26 18:27:09 -08:00
|
|
|
// Codecs installed first take precedence over codecs installed later when evaluating wildcards in Accept headers.
|
|
|
|
// The first installed codec is used as a fallback when the Accept header cannot be satisfied or if there is no Accept header.
|
2023-01-24 19:30:47 -08:00
|
|
|
func (api *API) InstallCodec(codec Codec) {
|
2023-02-26 18:27:09 -08:00
|
|
|
api.codecs = append(api.codecs, codec)
|
|
|
|
}
|
2023-01-24 19:30:47 -08:00
|
|
|
|
2023-02-26 18:27:09 -08:00
|
|
|
// ClearCodecs removes all available codecs from this API, including the default codec installed by NewAPI.
|
|
|
|
func (api *API) ClearCodecs() {
|
|
|
|
api.codecs = nil
|
2023-01-24 19:30:47 -08:00
|
|
|
}
|
|
|
|
|
2020-04-29 09:16:14 -07:00
|
|
|
func setUnavailStatusOnTSDBNotReady(r apiFuncResult) apiFuncResult {
|
2023-11-07 19:49:39 -08:00
|
|
|
if r.err != nil && errors.Is(r.err.err, tsdb.ErrNotReady) {
|
2020-04-29 09:16:14 -07:00
|
|
|
r.err.typ = errorUnavailable
|
|
|
|
}
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
2015-06-04 09:07:57 -07:00
|
|
|
// Register the API's endpoints in the given router.
|
|
|
|
func (api *API) Register(r *route.Router) {
|
2018-03-21 01:16:16 -07:00
|
|
|
wrap := func(f apiFunc) http.HandlerFunc {
|
2015-09-17 05:49:50 -07:00
|
|
|
hf := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
2019-01-17 07:01:06 -08:00
|
|
|
httputil.SetCORS(w, api.CORSOrigin, r)
|
2020-04-29 09:16:14 -07:00
|
|
|
result := setUnavailStatusOnTSDBNotReady(f(r))
|
2020-04-16 11:16:16 -07:00
|
|
|
if result.finalizer != nil {
|
|
|
|
defer result.finalizer()
|
|
|
|
}
|
2018-11-30 06:27:12 -08:00
|
|
|
if result.err != nil {
|
|
|
|
api.respondError(w, result.err, result.data)
|
2020-04-29 09:16:14 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if result.data != nil {
|
2023-09-14 09:57:31 -07:00
|
|
|
api.respond(w, r, result.data, result.warnings, r.FormValue("query"))
|
2020-04-29 09:16:14 -07:00
|
|
|
return
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
2020-04-29 09:16:14 -07:00
|
|
|
w.WriteHeader(http.StatusNoContent)
|
2015-06-04 09:07:57 -07:00
|
|
|
})
|
2018-03-21 01:16:16 -07:00
|
|
|
return api.ready(httputil.CompressionHandler{
|
2015-09-18 07:51:53 -07:00
|
|
|
Handler: hf,
|
2018-03-21 01:16:16 -07:00
|
|
|
}.ServeHTTP)
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2021-10-29 08:25:05 -07:00
|
|
|
wrapAgent := func(f apiFunc) http.HandlerFunc {
|
|
|
|
return wrap(func(r *http.Request) apiFuncResult {
|
|
|
|
if api.isAgent {
|
|
|
|
return apiFuncResult{nil, &apiError{errorExec, errors.New("unavailable with Prometheus Agent")}, nil, nil}
|
|
|
|
}
|
|
|
|
return f(r)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-03-21 01:16:16 -07:00
|
|
|
r.Options("/*path", wrap(api.options))
|
2016-01-25 16:32:46 -08:00
|
|
|
|
2021-10-29 08:25:05 -07:00
|
|
|
r.Get("/query", wrapAgent(api.query))
|
|
|
|
r.Post("/query", wrapAgent(api.query))
|
|
|
|
r.Get("/query_range", wrapAgent(api.queryRange))
|
|
|
|
r.Post("/query_range", wrapAgent(api.queryRange))
|
|
|
|
r.Get("/query_exemplars", wrapAgent(api.queryExemplars))
|
|
|
|
r.Post("/query_exemplars", wrapAgent(api.queryExemplars))
|
2015-06-04 09:07:57 -07:00
|
|
|
|
2022-07-20 05:55:09 -07:00
|
|
|
r.Get("/format_query", wrapAgent(api.formatQuery))
|
|
|
|
r.Post("/format_query", wrapAgent(api.formatQuery))
|
|
|
|
|
2021-10-29 08:25:05 -07:00
|
|
|
r.Get("/labels", wrapAgent(api.labelNames))
|
|
|
|
r.Post("/labels", wrapAgent(api.labelNames))
|
|
|
|
r.Get("/label/:name/values", wrapAgent(api.labelValues))
|
2015-06-09 07:09:31 -07:00
|
|
|
|
2021-10-29 08:25:05 -07:00
|
|
|
r.Get("/series", wrapAgent(api.series))
|
|
|
|
r.Post("/series", wrapAgent(api.series))
|
|
|
|
r.Del("/series", wrapAgent(api.dropSeries))
|
2016-12-02 04:31:43 -08:00
|
|
|
|
2022-12-23 02:55:08 -08:00
|
|
|
r.Get("/scrape_pools", wrap(api.scrapePools))
|
2018-03-21 01:16:16 -07:00
|
|
|
r.Get("/targets", wrap(api.targets))
|
2018-05-18 00:32:11 -07:00
|
|
|
r.Get("/targets/metadata", wrap(api.targetMetadata))
|
2021-10-29 08:25:05 -07:00
|
|
|
r.Get("/alertmanagers", wrapAgent(api.alertmanagers))
|
2017-05-11 08:09:24 -07:00
|
|
|
|
2019-12-10 06:56:16 -08:00
|
|
|
r.Get("/metadata", wrap(api.metricMetadata))
|
|
|
|
|
2018-03-21 01:16:16 -07:00
|
|
|
r.Get("/status/config", wrap(api.serveConfig))
|
2019-11-02 08:53:32 -07:00
|
|
|
r.Get("/status/runtimeinfo", wrap(api.serveRuntimeInfo))
|
|
|
|
r.Get("/status/buildinfo", wrap(api.serveBuildInfo))
|
2018-03-21 01:16:16 -07:00
|
|
|
r.Get("/status/flags", wrap(api.serveFlags))
|
2021-10-29 08:25:05 -07:00
|
|
|
r.Get("/status/tsdb", wrapAgent(api.serveTSDBStatus))
|
2021-06-05 07:29:32 -07:00
|
|
|
r.Get("/status/walreplay", api.serveWALReplayStatus)
|
2021-10-15 15:41:53 -07:00
|
|
|
r.Post("/read", api.ready(api.remoteRead))
|
|
|
|
r.Post("/write", api.ready(api.remoteWrite))
|
2023-07-28 03:35:28 -07:00
|
|
|
r.Post("/otlp/v1/metrics", api.ready(api.otlpWrite))
|
2017-12-02 21:07:05 -08:00
|
|
|
|
2021-10-29 08:25:05 -07:00
|
|
|
r.Get("/alerts", wrapAgent(api.alerts))
|
|
|
|
r.Get("/rules", wrapAgent(api.rules))
|
2018-03-25 09:50:34 -07:00
|
|
|
|
2017-12-02 21:07:05 -08:00
|
|
|
// Admin APIs
|
2021-10-29 08:25:05 -07:00
|
|
|
r.Post("/admin/tsdb/delete_series", wrapAgent(api.deleteSeries))
|
|
|
|
r.Post("/admin/tsdb/clean_tombstones", wrapAgent(api.cleanTombstones))
|
|
|
|
r.Post("/admin/tsdb/snapshot", wrapAgent(api.snapshot))
|
2019-03-20 10:33:45 -07:00
|
|
|
|
2021-10-29 08:25:05 -07:00
|
|
|
r.Put("/admin/tsdb/delete_series", wrapAgent(api.deleteSeries))
|
|
|
|
r.Put("/admin/tsdb/clean_tombstones", wrapAgent(api.cleanTombstones))
|
|
|
|
r.Put("/admin/tsdb/snapshot", wrapAgent(api.snapshot))
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2023-02-01 20:29:13 -08:00
|
|
|
type QueryData struct {
|
2022-02-10 06:17:05 -08:00
|
|
|
ResultType parser.ValueType `json:"resultType"`
|
|
|
|
Result parser.Value `json:"result"`
|
|
|
|
Stats stats.QueryStats `json:"stats,omitempty"`
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2020-12-23 16:27:13 -08:00
|
|
|
func invalidParamError(err error, parameter string) apiFuncResult {
|
|
|
|
return apiFuncResult{nil, &apiError{
|
2023-11-07 19:49:39 -08:00
|
|
|
errorBadData, fmt.Errorf("invalid parameter %q: %w", parameter, err),
|
2020-12-23 16:27:13 -08:00
|
|
|
}, nil, nil}
|
|
|
|
}
|
|
|
|
|
2023-04-12 04:05:41 -07:00
|
|
|
func (api *API) options(*http.Request) apiFuncResult {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, nil, nil, nil}
|
2016-01-25 16:32:46 -08:00
|
|
|
}
|
|
|
|
|
2020-04-17 11:03:26 -07:00
|
|
|
func (api *API) query(r *http.Request) (result apiFuncResult) {
|
2021-08-31 04:31:19 -07:00
|
|
|
ts, err := parseTimeParam(r, "time", api.now())
|
|
|
|
if err != nil {
|
|
|
|
return invalidParamError(err, "time")
|
|
|
|
}
|
|
|
|
ctx := r.Context()
|
|
|
|
if to := r.FormValue("timeout"); to != "" {
|
|
|
|
var cancel context.CancelFunc
|
|
|
|
timeout, err := parseDuration(to)
|
|
|
|
if err != nil {
|
|
|
|
return invalidParamError(err, "timeout")
|
|
|
|
}
|
|
|
|
|
2023-10-04 01:36:55 -07:00
|
|
|
ctx, cancel = context.WithDeadline(ctx, api.now().Add(timeout))
|
2021-08-31 04:31:19 -07:00
|
|
|
defer cancel()
|
|
|
|
}
|
|
|
|
|
2023-03-07 15:28:31 -08:00
|
|
|
opts, err := extractQueryOpts(r)
|
|
|
|
if err != nil {
|
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
|
|
|
}
|
2023-04-17 21:32:38 -07:00
|
|
|
qry, err := api.QueryEngine.NewInstantQuery(ctx, api.Queryable, opts, r.FormValue("query"), ts)
|
2021-08-31 04:31:19 -07:00
|
|
|
if err != nil {
|
|
|
|
return invalidParamError(err, "query")
|
|
|
|
}
|
|
|
|
|
|
|
|
// From now on, we must only return with a finalizer in the result (to
|
|
|
|
// be called by the caller) or call qry.Close ourselves (which is
|
|
|
|
// required in the case of a panic).
|
|
|
|
defer func() {
|
|
|
|
if result.finalizer == nil {
|
|
|
|
qry.Close()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
ctx = httputil.ContextFromRequest(ctx, r)
|
|
|
|
|
|
|
|
res := qry.Exec(ctx)
|
|
|
|
if res.Err != nil {
|
|
|
|
return apiFuncResult{nil, returnAPIError(res.Err), res.Warnings, qry.Close}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Optional stats field in response if parameter "stats" is not empty.
|
2022-02-10 06:17:05 -08:00
|
|
|
sr := api.statsRenderer
|
|
|
|
if sr == nil {
|
|
|
|
sr = defaultStatsRenderer
|
2021-08-31 04:31:19 -07:00
|
|
|
}
|
2022-02-10 06:17:05 -08:00
|
|
|
qs := sr(ctx, qry.Stats(), r.FormValue("stats"))
|
2017-11-16 07:30:48 -08:00
|
|
|
|
2023-02-01 20:29:13 -08:00
|
|
|
return apiFuncResult{&QueryData{
|
2021-08-31 04:31:19 -07:00
|
|
|
ResultType: res.Value.Type(),
|
|
|
|
Result: res.Value,
|
|
|
|
Stats: qs,
|
|
|
|
}, nil, res.Warnings, qry.Close}
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2022-07-20 05:55:09 -07:00
|
|
|
func (api *API) formatQuery(r *http.Request) (result apiFuncResult) {
|
|
|
|
expr, err := parser.ParseExpr(r.FormValue("query"))
|
|
|
|
if err != nil {
|
|
|
|
return invalidParamError(err, "query")
|
|
|
|
}
|
|
|
|
|
|
|
|
return apiFuncResult{expr.Pretty(0), nil, nil, nil}
|
|
|
|
}
|
|
|
|
|
2023-07-03 05:56:06 -07:00
|
|
|
func extractQueryOpts(r *http.Request) (promql.QueryOpts, error) {
|
|
|
|
var duration time.Duration
|
|
|
|
|
2023-03-07 15:28:31 -08:00
|
|
|
if strDuration := r.FormValue("lookback_delta"); strDuration != "" {
|
2023-07-03 05:56:06 -07:00
|
|
|
parsedDuration, err := parseDuration(strDuration)
|
2023-03-07 15:28:31 -08:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error parsing lookback delta duration: %w", err)
|
|
|
|
}
|
2023-07-03 05:56:06 -07:00
|
|
|
duration = parsedDuration
|
2023-03-07 15:28:31 -08:00
|
|
|
}
|
2023-07-03 05:56:06 -07:00
|
|
|
|
|
|
|
return promql.NewPrometheusQueryOpts(r.FormValue("stats") == "all", duration), nil
|
2022-02-01 18:07:23 -08:00
|
|
|
}
|
|
|
|
|
2020-04-17 11:03:26 -07:00
|
|
|
func (api *API) queryRange(r *http.Request) (result apiFuncResult) {
|
2015-06-04 09:07:57 -07:00
|
|
|
start, err := parseTime(r.FormValue("start"))
|
|
|
|
if err != nil {
|
2020-12-23 16:27:13 -08:00
|
|
|
return invalidParamError(err, "start")
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
end, err := parseTime(r.FormValue("end"))
|
|
|
|
if err != nil {
|
2020-12-23 16:27:13 -08:00
|
|
|
return invalidParamError(err, "end")
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
2016-11-01 06:25:34 -07:00
|
|
|
if end.Before(start) {
|
2020-12-23 16:27:13 -08:00
|
|
|
return invalidParamError(errors.New("end timestamp must not be before start time"), "end")
|
2016-11-01 06:25:34 -07:00
|
|
|
}
|
|
|
|
|
2015-06-04 09:07:57 -07:00
|
|
|
step, err := parseDuration(r.FormValue("step"))
|
|
|
|
if err != nil {
|
2020-12-23 16:27:13 -08:00
|
|
|
return invalidParamError(err, "step")
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2016-08-16 06:10:02 -07:00
|
|
|
if step <= 0 {
|
2020-12-23 16:27:13 -08:00
|
|
|
return invalidParamError(errors.New("zero or negative query resolution step widths are not accepted. Try a positive integer"), "step")
|
2016-08-16 06:10:02 -07:00
|
|
|
}
|
|
|
|
|
2015-06-04 09:07:57 -07:00
|
|
|
// For safety, limit the number of returned points per timeseries.
|
|
|
|
// This is sufficient for 60s resolution for a week or 1h resolution for a year.
|
|
|
|
if end.Sub(start)/step > 11000 {
|
|
|
|
err := errors.New("exceeded maximum resolution of 11,000 points per timeseries. Try decreasing the query resolution (?step=XX)")
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2017-05-02 16:49:29 -07:00
|
|
|
ctx := r.Context()
|
2017-03-06 09:32:21 -08:00
|
|
|
if to := r.FormValue("timeout"); to != "" {
|
|
|
|
var cancel context.CancelFunc
|
|
|
|
timeout, err := parseDuration(to)
|
|
|
|
if err != nil {
|
2020-12-23 16:27:13 -08:00
|
|
|
return invalidParamError(err, "timeout")
|
2017-03-06 09:32:21 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel = context.WithTimeout(ctx, timeout)
|
|
|
|
defer cancel()
|
|
|
|
}
|
|
|
|
|
2023-03-07 15:28:31 -08:00
|
|
|
opts, err := extractQueryOpts(r)
|
|
|
|
if err != nil {
|
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
|
|
|
}
|
2023-04-17 21:32:38 -07:00
|
|
|
qry, err := api.QueryEngine.NewRangeQuery(ctx, api.Queryable, opts, r.FormValue("query"), start, end, step)
|
2015-06-04 09:07:57 -07:00
|
|
|
if err != nil {
|
2023-04-11 13:45:34 -07:00
|
|
|
return invalidParamError(err, "query")
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
2021-08-31 04:31:19 -07:00
|
|
|
// From now on, we must only return with a finalizer in the result (to
|
|
|
|
// be called by the caller) or call qry.Close ourselves (which is
|
|
|
|
// required in the case of a panic).
|
|
|
|
defer func() {
|
|
|
|
if result.finalizer == nil {
|
|
|
|
qry.Close()
|
2021-07-05 03:47:34 -07:00
|
|
|
}
|
2021-08-31 04:31:19 -07:00
|
|
|
}()
|
2021-07-03 06:53:56 -07:00
|
|
|
|
2021-08-31 04:31:19 -07:00
|
|
|
ctx = httputil.ContextFromRequest(ctx, r)
|
2021-07-03 06:53:56 -07:00
|
|
|
|
2021-08-31 04:31:19 -07:00
|
|
|
res := qry.Exec(ctx)
|
|
|
|
if res.Err != nil {
|
|
|
|
return apiFuncResult{nil, returnAPIError(res.Err), res.Warnings, qry.Close}
|
2017-02-08 03:58:40 -08:00
|
|
|
}
|
|
|
|
|
2021-08-31 04:31:19 -07:00
|
|
|
// Optional stats field in response if parameter "stats" is not empty.
|
2022-02-10 06:17:05 -08:00
|
|
|
sr := api.statsRenderer
|
|
|
|
if sr == nil {
|
|
|
|
sr = defaultStatsRenderer
|
2021-07-05 03:47:34 -07:00
|
|
|
}
|
2022-02-10 06:17:05 -08:00
|
|
|
qs := sr(ctx, qry.Stats(), r.FormValue("stats"))
|
2021-07-05 03:47:34 -07:00
|
|
|
|
2023-02-01 20:29:13 -08:00
|
|
|
return apiFuncResult{&QueryData{
|
2021-08-31 04:31:19 -07:00
|
|
|
ResultType: res.Value.Type(),
|
|
|
|
Result: res.Value,
|
|
|
|
Stats: qs,
|
|
|
|
}, nil, res.Warnings, qry.Close}
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2021-03-16 02:47:45 -07:00
|
|
|
func (api *API) queryExemplars(r *http.Request) apiFuncResult {
|
2023-07-06 08:48:13 -07:00
|
|
|
start, err := parseTimeParam(r, "start", MinTime)
|
2021-03-16 02:47:45 -07:00
|
|
|
if err != nil {
|
|
|
|
return invalidParamError(err, "start")
|
|
|
|
}
|
2023-07-06 08:48:13 -07:00
|
|
|
end, err := parseTimeParam(r, "end", MaxTime)
|
2021-03-16 02:47:45 -07:00
|
|
|
if err != nil {
|
|
|
|
return invalidParamError(err, "end")
|
|
|
|
}
|
|
|
|
if end.Before(start) {
|
|
|
|
err := errors.New("end timestamp must not be before start timestamp")
|
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
|
|
|
}
|
|
|
|
|
|
|
|
expr, err := parser.ParseExpr(r.FormValue("query"))
|
|
|
|
if err != nil {
|
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
|
|
|
}
|
|
|
|
|
|
|
|
selectors := parser.ExtractSelectors(expr)
|
|
|
|
if len(selectors) < 1 {
|
|
|
|
return apiFuncResult{nil, nil, nil, nil}
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx := r.Context()
|
|
|
|
eq, err := api.ExemplarQueryable.ExemplarQuerier(ctx)
|
|
|
|
if err != nil {
|
2022-10-20 02:17:00 -07:00
|
|
|
return apiFuncResult{nil, returnAPIError(err), nil, nil}
|
2021-03-16 02:47:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
res, err := eq.Select(timestamp.FromTime(start), timestamp.FromTime(end), selectors...)
|
|
|
|
if err != nil {
|
2022-10-20 02:17:00 -07:00
|
|
|
return apiFuncResult{nil, returnAPIError(err), nil, nil}
|
2021-03-16 02:47:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return apiFuncResult{res, nil, nil, nil}
|
|
|
|
}
|
|
|
|
|
2018-11-14 01:55:54 -08:00
|
|
|
func returnAPIError(err error) *apiError {
|
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-11-07 19:49:39 -08:00
|
|
|
var eqc promql.ErrQueryCanceled
|
|
|
|
var eqt promql.ErrQueryTimeout
|
|
|
|
var es promql.ErrStorage
|
|
|
|
switch {
|
|
|
|
case errors.As(err, &eqc):
|
2018-11-14 01:55:54 -08:00
|
|
|
return &apiError{errorCanceled, err}
|
2023-11-07 19:49:39 -08:00
|
|
|
case errors.As(err, &eqt):
|
2018-11-14 01:55:54 -08:00
|
|
|
return &apiError{errorTimeout, err}
|
2023-11-07 19:49:39 -08:00
|
|
|
case errors.As(err, &es):
|
2018-11-14 01:55:54 -08:00
|
|
|
return &apiError{errorInternal, err}
|
|
|
|
}
|
|
|
|
|
2023-01-25 11:22:25 -08:00
|
|
|
if errors.Is(err, context.Canceled) {
|
|
|
|
return &apiError{errorCanceled, err}
|
|
|
|
}
|
|
|
|
|
2018-11-14 01:55:54 -08:00
|
|
|
return &apiError{errorExec, err}
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) labelNames(r *http.Request) apiFuncResult {
|
2024-02-29 07:31:13 -08:00
|
|
|
limit, err := parseLimitParam(r.FormValue("limit"))
|
|
|
|
if err != nil {
|
|
|
|
return invalidParamError(err, "limit")
|
|
|
|
}
|
|
|
|
|
2023-07-06 08:48:13 -07:00
|
|
|
start, err := parseTimeParam(r, "start", MinTime)
|
2020-05-30 05:50:09 -07:00
|
|
|
if err != nil {
|
2020-12-23 16:27:13 -08:00
|
|
|
return invalidParamError(err, "start")
|
2020-05-30 05:50:09 -07:00
|
|
|
}
|
2023-07-06 08:48:13 -07:00
|
|
|
end, err := parseTimeParam(r, "end", MaxTime)
|
2020-05-30 05:50:09 -07:00
|
|
|
if err != nil {
|
2020-12-23 16:27:13 -08:00
|
|
|
return invalidParamError(err, "end")
|
2020-05-30 05:50:09 -07:00
|
|
|
}
|
|
|
|
|
2020-12-22 03:02:19 -08:00
|
|
|
matcherSets, err := parseMatchersParam(r.Form["match[]"])
|
|
|
|
if err != nil {
|
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
|
|
|
}
|
|
|
|
|
2023-09-12 03:37:38 -07:00
|
|
|
q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end))
|
2018-11-19 02:21:14 -08:00
|
|
|
if err != nil {
|
2022-10-20 02:17:00 -07:00
|
|
|
return apiFuncResult{nil, returnAPIError(err), nil, nil}
|
2018-11-19 02:21:14 -08:00
|
|
|
}
|
|
|
|
defer q.Close()
|
|
|
|
|
2020-12-22 03:02:19 -08:00
|
|
|
var (
|
|
|
|
names []string
|
2023-09-14 09:57:31 -07:00
|
|
|
warnings annotations.Annotations
|
2020-12-22 03:02:19 -08:00
|
|
|
)
|
2023-11-13 10:28:48 -08:00
|
|
|
if len(matcherSets) > 1 {
|
2020-12-22 03:02:19 -08:00
|
|
|
labelNamesSet := make(map[string]struct{})
|
2021-07-20 05:38:08 -07:00
|
|
|
|
|
|
|
for _, matchers := range matcherSets {
|
2023-09-14 01:39:51 -07:00
|
|
|
vals, callWarnings, err := q.LabelNames(r.Context(), matchers...)
|
2021-07-20 05:38:08 -07:00
|
|
|
if err != nil {
|
2022-10-20 02:17:00 -07:00
|
|
|
return apiFuncResult{nil, returnAPIError(err), warnings, nil}
|
2020-12-22 03:02:19 -08:00
|
|
|
}
|
2021-07-20 05:38:08 -07:00
|
|
|
|
2023-09-14 09:57:31 -07:00
|
|
|
warnings.Merge(callWarnings)
|
2021-07-20 05:38:08 -07:00
|
|
|
for _, val := range vals {
|
|
|
|
labelNamesSet[val] = struct{}{}
|
|
|
|
}
|
2020-12-22 03:02:19 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Convert the map to an array.
|
|
|
|
names = make([]string, 0, len(labelNamesSet))
|
|
|
|
for key := range labelNamesSet {
|
|
|
|
names = append(names, key)
|
|
|
|
}
|
2022-09-30 07:33:56 -07:00
|
|
|
slices.Sort(names)
|
2020-12-22 03:02:19 -08:00
|
|
|
} else {
|
2023-11-13 10:28:48 -08:00
|
|
|
var matchers []*labels.Matcher
|
|
|
|
if len(matcherSets) == 1 {
|
|
|
|
matchers = matcherSets[0]
|
|
|
|
}
|
|
|
|
names, warnings, err = q.LabelNames(r.Context(), matchers...)
|
2020-12-22 03:02:19 -08:00
|
|
|
if err != nil {
|
|
|
|
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil}
|
|
|
|
}
|
2018-11-19 02:21:14 -08:00
|
|
|
}
|
2020-12-22 03:02:19 -08:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
if names == nil {
|
|
|
|
names = []string{}
|
|
|
|
}
|
2024-02-29 07:31:13 -08:00
|
|
|
|
|
|
|
if len(names) >= limit {
|
|
|
|
names = names[:limit]
|
|
|
|
warnings = warnings.Add(errors.New("results truncated due to limit"))
|
|
|
|
}
|
2019-06-17 00:31:17 -07:00
|
|
|
return apiFuncResult{names, nil, warnings, nil}
|
2018-11-19 02:21:14 -08:00
|
|
|
}
|
|
|
|
|
2020-04-17 11:03:26 -07:00
|
|
|
func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
|
2017-10-04 12:04:15 -07:00
|
|
|
ctx := r.Context()
|
|
|
|
name := route.Param(ctx, "name")
|
2015-06-08 12:19:52 -07:00
|
|
|
|
2015-08-20 08:18:46 -07:00
|
|
|
if !model.LabelNameRE.MatchString(name) {
|
2023-11-07 19:49:39 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, fmt.Errorf("invalid label name: %q", name)}, nil, nil}
|
2015-06-08 12:19:52 -07:00
|
|
|
}
|
2020-05-30 05:50:09 -07:00
|
|
|
|
2024-02-29 07:31:13 -08:00
|
|
|
limit, err := parseLimitParam(r.FormValue("limit"))
|
|
|
|
if err != nil {
|
|
|
|
return invalidParamError(err, "limit")
|
|
|
|
}
|
|
|
|
|
2023-07-06 08:48:13 -07:00
|
|
|
start, err := parseTimeParam(r, "start", MinTime)
|
2020-05-30 05:50:09 -07:00
|
|
|
if err != nil {
|
2020-12-23 16:27:13 -08:00
|
|
|
return invalidParamError(err, "start")
|
2020-05-30 05:50:09 -07:00
|
|
|
}
|
2023-07-06 08:48:13 -07:00
|
|
|
end, err := parseTimeParam(r, "end", MaxTime)
|
2020-05-30 05:50:09 -07:00
|
|
|
if err != nil {
|
2020-12-23 16:27:13 -08:00
|
|
|
return invalidParamError(err, "end")
|
2020-05-30 05:50:09 -07:00
|
|
|
}
|
|
|
|
|
2020-12-22 03:02:19 -08:00
|
|
|
matcherSets, err := parseMatchersParam(r.Form["match[]"])
|
|
|
|
if err != nil {
|
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
|
|
|
}
|
|
|
|
|
2023-09-12 03:37:38 -07:00
|
|
|
q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end))
|
2016-10-12 10:34:22 -07:00
|
|
|
if err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil}
|
2016-10-12 10:34:22 -07:00
|
|
|
}
|
2020-04-17 11:03:26 -07:00
|
|
|
// From now on, we must only return with a finalizer in the result (to
|
|
|
|
// be called by the caller) or call q.Close ourselves (which is required
|
|
|
|
// in the case of a panic).
|
|
|
|
defer func() {
|
|
|
|
if result.finalizer == nil {
|
|
|
|
q.Close()
|
|
|
|
}
|
|
|
|
}()
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
closer := func() {
|
|
|
|
q.Close()
|
|
|
|
}
|
2016-10-12 10:34:22 -07:00
|
|
|
|
2020-12-22 03:02:19 -08:00
|
|
|
var (
|
|
|
|
vals []string
|
2023-09-14 09:57:31 -07:00
|
|
|
warnings annotations.Annotations
|
2020-12-22 03:02:19 -08:00
|
|
|
)
|
2023-11-13 10:28:48 -08:00
|
|
|
if len(matcherSets) > 1 {
|
2023-09-14 09:57:31 -07:00
|
|
|
var callWarnings annotations.Annotations
|
2020-12-22 03:02:19 -08:00
|
|
|
labelValuesSet := make(map[string]struct{})
|
2021-02-09 09:38:35 -08:00
|
|
|
for _, matchers := range matcherSets {
|
2023-09-14 07:02:04 -07:00
|
|
|
vals, callWarnings, err = q.LabelValues(ctx, name, matchers...)
|
2021-02-09 09:38:35 -08:00
|
|
|
if err != nil {
|
|
|
|
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer}
|
2020-12-22 03:02:19 -08:00
|
|
|
}
|
2023-09-14 09:57:31 -07:00
|
|
|
warnings.Merge(callWarnings)
|
2021-02-09 09:38:35 -08:00
|
|
|
for _, val := range vals {
|
|
|
|
labelValuesSet[val] = struct{}{}
|
2020-12-22 03:02:19 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
vals = make([]string, 0, len(labelValuesSet))
|
2021-02-09 09:38:35 -08:00
|
|
|
for val := range labelValuesSet {
|
|
|
|
vals = append(vals, val)
|
2020-12-22 03:02:19 -08:00
|
|
|
}
|
|
|
|
} else {
|
2023-11-13 10:28:48 -08:00
|
|
|
var matchers []*labels.Matcher
|
|
|
|
if len(matcherSets) == 1 {
|
|
|
|
matchers = matcherSets[0]
|
|
|
|
}
|
|
|
|
vals, warnings, err = q.LabelValues(ctx, name, matchers...)
|
2020-12-22 03:02:19 -08:00
|
|
|
if err != nil {
|
|
|
|
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer}
|
|
|
|
}
|
|
|
|
|
2021-02-09 09:38:35 -08:00
|
|
|
if vals == nil {
|
|
|
|
vals = []string{}
|
|
|
|
}
|
2020-07-31 08:03:02 -07:00
|
|
|
}
|
2015-06-04 09:07:57 -07:00
|
|
|
|
2022-09-30 07:33:56 -07:00
|
|
|
slices.Sort(vals)
|
2021-02-09 09:38:35 -08:00
|
|
|
|
2024-02-29 07:31:13 -08:00
|
|
|
if len(vals) >= limit {
|
|
|
|
vals = vals[:limit]
|
|
|
|
warnings = warnings.Add(errors.New("results truncated due to limit"))
|
|
|
|
}
|
|
|
|
|
Load only some offsets into the symbol table into memory.
Rather than keeping the entire symbol table in memory, keep every nth
offset and walk from there to the entry we need. This ends up slightly
slower, ~360ms per 1M series returned from PostingsForMatchers which is
not much considering the rest of the CPU such a query would go on to
use.
Make LabelValues use the postings tables, rather than having
to do symbol lookups. Use yoloString, as PostingsForMatchers
doesn't need the strings to stick around and adjust the API
call to keep the Querier open until it's all marshalled.
Remove allocatedSymbols memory optimisation, we no longer keep all the
symbol strings in heap memory. Remove LabelValuesFor and LabelIndices,
they're dead code. Ensure we've still tests for label indices,
and add missing test that we can work with old V1 Format index files.
PostingForMatchers performance is slightly better, with a big drop in
allocation counts due to using yoloString for LabelValues:
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18%
BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73%
BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99%
BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99%
BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03%
BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03%
BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06%
However overall Select performance is down and involves more allocs, due to
having to do more than a simple map lookup to resolve a symbol and that all the strings
returned are allocated:
benchmark old ns/op new ns/op delta
BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46%
BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26%
BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60%
BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13%
BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29%
BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74%
BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90%
benchmark old allocs new allocs delta
BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00%
BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00%
BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00%
BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97%
BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75%
BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62%
BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33%
benchmark old bytes new bytes delta
BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09%
BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09%
BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09%
BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07%
BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88%
BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12%
BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
|
|
|
return apiFuncResult{vals, nil, warnings, closer}
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2017-01-16 05:09:59 -08:00
|
|
|
var (
|
2023-07-06 08:48:13 -07:00
|
|
|
// MinTime is the default timestamp used for the begin of optional time ranges.
|
|
|
|
// Exposed to let downstream projects to reference it.
|
|
|
|
MinTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC()
|
2019-07-08 02:43:59 -07:00
|
|
|
|
2023-07-06 08:48:13 -07:00
|
|
|
// MaxTime is the default timestamp used for the end of optional time ranges.
|
|
|
|
// Exposed to let downstream projects to reference it.
|
|
|
|
MaxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC()
|
|
|
|
|
|
|
|
minTimeFormatted = MinTime.Format(time.RFC3339Nano)
|
|
|
|
maxTimeFormatted = MaxTime.Format(time.RFC3339Nano)
|
2017-01-16 05:09:59 -08:00
|
|
|
)
|
|
|
|
|
2020-04-17 13:40:39 -07:00
|
|
|
func (api *API) series(r *http.Request) (result apiFuncResult) {
|
2023-09-12 03:37:38 -07:00
|
|
|
ctx := r.Context()
|
|
|
|
|
2018-08-17 08:24:35 -07:00
|
|
|
if err := r.ParseForm(); err != nil {
|
2023-11-07 19:49:39 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, fmt.Errorf("error parsing form values: %w", err)}, nil, nil}
|
2018-08-17 08:24:35 -07:00
|
|
|
}
|
2015-06-09 07:09:31 -07:00
|
|
|
if len(r.Form["match[]"]) == 0 {
|
2019-03-25 16:01:12 -07:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.New("no match[] parameter provided")}, nil, nil}
|
2015-06-09 07:09:31 -07:00
|
|
|
}
|
2016-05-11 14:59:52 -07:00
|
|
|
|
2024-02-29 07:31:13 -08:00
|
|
|
limit, err := parseLimitParam(r.FormValue("limit"))
|
|
|
|
if err != nil {
|
|
|
|
return invalidParamError(err, "limit")
|
|
|
|
}
|
|
|
|
|
2023-07-06 08:48:13 -07:00
|
|
|
start, err := parseTimeParam(r, "start", MinTime)
|
2020-03-06 02:33:01 -08:00
|
|
|
if err != nil {
|
2020-12-23 16:27:13 -08:00
|
|
|
return invalidParamError(err, "start")
|
2016-05-11 14:59:52 -07:00
|
|
|
}
|
2023-07-06 08:48:13 -07:00
|
|
|
end, err := parseTimeParam(r, "end", MaxTime)
|
2020-03-06 02:33:01 -08:00
|
|
|
if err != nil {
|
2020-12-23 16:27:13 -08:00
|
|
|
return invalidParamError(err, "end")
|
2016-05-11 14:59:52 -07:00
|
|
|
}
|
|
|
|
|
2020-12-22 03:02:19 -08:00
|
|
|
matcherSets, err := parseMatchersParam(r.Form["match[]"])
|
|
|
|
if err != nil {
|
2020-12-23 16:27:13 -08:00
|
|
|
return invalidParamError(err, "match[]")
|
2020-12-15 09:24:57 -08:00
|
|
|
}
|
|
|
|
|
2023-09-12 03:37:38 -07:00
|
|
|
q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end))
|
2016-12-30 01:43:44 -08:00
|
|
|
if err != nil {
|
2022-10-20 02:17:00 -07:00
|
|
|
return apiFuncResult{nil, returnAPIError(err), nil, nil}
|
2016-12-30 01:43:44 -08:00
|
|
|
}
|
2020-04-17 13:40:39 -07:00
|
|
|
// From now on, we must only return with a finalizer in the result (to
|
|
|
|
// be called by the caller) or call q.Close ourselves (which is required
|
|
|
|
// in the case of a panic).
|
|
|
|
defer func() {
|
|
|
|
if result.finalizer == nil {
|
|
|
|
q.Close()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
closer := func() {
|
|
|
|
q.Close()
|
|
|
|
}
|
2016-12-30 01:43:44 -08:00
|
|
|
|
2020-10-14 02:06:17 -07:00
|
|
|
hints := &storage.SelectHints{
|
|
|
|
Start: timestamp.FromTime(start),
|
|
|
|
End: timestamp.FromTime(end),
|
|
|
|
Func: "series", // There is no series function, this token is used for lookups that don't need samples.
|
|
|
|
}
|
2022-09-16 00:40:41 -07:00
|
|
|
var set storage.SeriesSet
|
2020-10-14 02:06:17 -07:00
|
|
|
|
2022-09-16 00:40:41 -07:00
|
|
|
if len(matcherSets) > 1 {
|
|
|
|
var sets []storage.SeriesSet
|
|
|
|
for _, mset := range matcherSets {
|
|
|
|
// We need to sort this select results to merge (deduplicate) the series sets later.
|
2023-09-12 03:37:38 -07:00
|
|
|
s := q.Select(ctx, true, hints, mset...)
|
2022-09-16 00:40:41 -07:00
|
|
|
sets = append(sets, s)
|
|
|
|
}
|
|
|
|
set = storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge)
|
|
|
|
} else {
|
|
|
|
// At this point at least one match exists.
|
2023-09-12 03:37:38 -07:00
|
|
|
set = q.Select(ctx, false, hints, matcherSets[0]...)
|
2016-12-30 01:43:44 -08:00
|
|
|
}
|
|
|
|
|
2017-04-04 02:09:11 -07:00
|
|
|
metrics := []labels.Labels{}
|
2024-02-29 07:31:13 -08:00
|
|
|
|
|
|
|
warnings := set.Warnings()
|
|
|
|
|
2017-04-04 02:09:11 -07:00
|
|
|
for set.Next() {
|
2024-03-19 10:37:43 -07:00
|
|
|
if err := ctx.Err(); err != nil {
|
|
|
|
return apiFuncResult{nil, returnAPIError(err), warnings, closer}
|
|
|
|
}
|
2017-04-04 02:09:11 -07:00
|
|
|
metrics = append(metrics, set.At().Labels())
|
2020-06-09 09:57:31 -07:00
|
|
|
|
2024-02-29 07:31:13 -08:00
|
|
|
if len(metrics) >= limit {
|
|
|
|
warnings.Add(errors.New("results truncated due to limit"))
|
|
|
|
return apiFuncResult{metrics, nil, warnings, closer}
|
|
|
|
}
|
|
|
|
}
|
2017-04-04 02:09:11 -07:00
|
|
|
if set.Err() != nil {
|
2022-10-20 02:17:00 -07:00
|
|
|
return apiFuncResult{nil, returnAPIError(set.Err()), warnings, closer}
|
2017-04-04 02:09:11 -07:00
|
|
|
}
|
|
|
|
|
2020-04-17 13:40:39 -07:00
|
|
|
return apiFuncResult{metrics, nil, warnings, closer}
|
2015-06-09 07:09:31 -07:00
|
|
|
}
|
|
|
|
|
2020-12-23 16:27:13 -08:00
|
|
|
func (api *API) dropSeries(_ *http.Request) apiFuncResult {
|
2019-03-25 16:01:12 -07:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, errors.New("not implemented")}, nil, nil}
|
2015-06-09 07:09:31 -07:00
|
|
|
}
|
|
|
|
|
2017-03-06 09:51:27 -08:00
|
|
|
// Target has the information for one target.
|
2016-12-02 04:31:43 -08:00
|
|
|
type Target struct {
|
|
|
|
// Labels before any processing.
|
2024-01-29 02:19:02 -08:00
|
|
|
DiscoveredLabels labels.Labels `json:"discoveredLabels"`
|
2016-12-02 04:31:43 -08:00
|
|
|
// Any labels that are added to this target and its metrics.
|
2024-01-29 02:19:02 -08:00
|
|
|
Labels labels.Labels `json:"labels"`
|
2016-12-02 04:31:43 -08:00
|
|
|
|
2019-11-11 13:42:24 -08:00
|
|
|
ScrapePool string `json:"scrapePool"`
|
|
|
|
ScrapeURL string `json:"scrapeUrl"`
|
2020-02-17 09:19:15 -08:00
|
|
|
GlobalURL string `json:"globalUrl"`
|
2016-12-02 04:31:43 -08:00
|
|
|
|
2019-11-11 13:42:24 -08:00
|
|
|
LastError string `json:"lastError"`
|
|
|
|
LastScrape time.Time `json:"lastScrape"`
|
|
|
|
LastScrapeDuration float64 `json:"lastScrapeDuration"`
|
|
|
|
Health scrape.TargetHealth `json:"health"`
|
2021-08-31 08:37:32 -07:00
|
|
|
|
|
|
|
ScrapeInterval string `json:"scrapeInterval"`
|
|
|
|
ScrapeTimeout string `json:"scrapeTimeout"`
|
2016-12-02 04:31:43 -08:00
|
|
|
}
|
|
|
|
|
2022-12-23 02:55:08 -08:00
|
|
|
type ScrapePoolsDiscovery struct {
|
|
|
|
ScrapePools []string `json:"scrapePools"`
|
|
|
|
}
|
|
|
|
|
2018-02-21 09:26:18 -08:00
|
|
|
// DroppedTarget has the information for one target that was dropped during relabelling.
|
|
|
|
type DroppedTarget struct {
|
|
|
|
// Labels before any processing.
|
2024-01-29 02:20:20 -08:00
|
|
|
DiscoveredLabels labels.Labels `json:"discoveredLabels"`
|
2018-02-21 09:26:18 -08:00
|
|
|
}
|
|
|
|
|
2017-03-06 03:46:37 -08:00
|
|
|
// TargetDiscovery has all the active targets.
|
2017-01-13 08:15:04 -08:00
|
|
|
type TargetDiscovery struct {
|
2023-08-14 07:39:25 -07:00
|
|
|
ActiveTargets []*Target `json:"activeTargets"`
|
|
|
|
DroppedTargets []*DroppedTarget `json:"droppedTargets"`
|
|
|
|
DroppedTargetCounts map[string]int `json:"droppedTargetCounts"`
|
2017-01-13 08:15:04 -08:00
|
|
|
}
|
|
|
|
|
2020-02-17 09:19:15 -08:00
|
|
|
// GlobalURLOptions contains fields used for deriving the global URL for local targets.
|
|
|
|
type GlobalURLOptions struct {
|
|
|
|
ListenAddress string
|
|
|
|
Host string
|
|
|
|
Scheme string
|
|
|
|
}
|
|
|
|
|
2021-02-05 03:45:44 -08:00
|
|
|
// sanitizeSplitHostPort acts like net.SplitHostPort.
|
|
|
|
// Additionally, if there is no port in the host passed as input, we return the
|
|
|
|
// original host, making sure that IPv6 addresses are not surrounded by square
|
|
|
|
// brackets.
|
|
|
|
func sanitizeSplitHostPort(input string) (string, string, error) {
|
|
|
|
host, port, err := net.SplitHostPort(input)
|
|
|
|
if err != nil && strings.HasSuffix(err.Error(), "missing port in address") {
|
|
|
|
var errWithPort error
|
|
|
|
host, _, errWithPort = net.SplitHostPort(input + ":80")
|
|
|
|
if errWithPort == nil {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return host, port, err
|
|
|
|
}
|
|
|
|
|
2020-02-17 09:19:15 -08:00
|
|
|
func getGlobalURL(u *url.URL, opts GlobalURLOptions) (*url.URL, error) {
|
2021-02-05 03:45:44 -08:00
|
|
|
host, port, err := sanitizeSplitHostPort(u.Host)
|
2020-02-17 09:19:15 -08:00
|
|
|
if err != nil {
|
|
|
|
return u, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, lhr := range LocalhostRepresentations {
|
|
|
|
if host == lhr {
|
|
|
|
_, ownPort, err := net.SplitHostPort(opts.ListenAddress)
|
|
|
|
if err != nil {
|
|
|
|
return u, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if port == ownPort {
|
|
|
|
// Only in the case where the target is on localhost and its port is
|
|
|
|
// the same as the one we're listening on, we know for sure that
|
|
|
|
// we're monitoring our own process and that we need to change the
|
|
|
|
// scheme, hostname, and port to the externally reachable ones as
|
|
|
|
// well. We shouldn't need to touch the path at all, since if a
|
|
|
|
// path prefix is defined, the path under which we scrape ourselves
|
|
|
|
// should already contain the prefix.
|
|
|
|
u.Scheme = opts.Scheme
|
|
|
|
u.Host = opts.Host
|
|
|
|
} else {
|
|
|
|
// Otherwise, we only know that localhost is not reachable
|
|
|
|
// externally, so we replace only the hostname by the one in the
|
|
|
|
// external URL. It could be the wrong hostname for the service on
|
|
|
|
// this port, but it's still the best possible guess.
|
2021-02-05 03:45:44 -08:00
|
|
|
host, _, err := sanitizeSplitHostPort(opts.Host)
|
2020-02-17 09:19:15 -08:00
|
|
|
if err != nil {
|
|
|
|
return u, err
|
|
|
|
}
|
2021-02-05 03:45:44 -08:00
|
|
|
u.Host = host
|
|
|
|
if port != "" {
|
|
|
|
u.Host = net.JoinHostPort(u.Host, port)
|
|
|
|
}
|
2020-02-17 09:19:15 -08:00
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return u, nil
|
|
|
|
}
|
|
|
|
|
2022-12-23 02:55:08 -08:00
|
|
|
func (api *API) scrapePools(r *http.Request) apiFuncResult {
|
|
|
|
names := api.scrapePoolsRetriever(r.Context()).ScrapePools()
|
|
|
|
sort.Strings(names)
|
|
|
|
res := &ScrapePoolsDiscovery{ScrapePools: names}
|
|
|
|
return apiFuncResult{data: res, err: nil, warnings: nil, finalizer: nil}
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) targets(r *http.Request) apiFuncResult {
|
2019-11-11 13:42:24 -08:00
|
|
|
sortKeys := func(targets map[string][]*scrape.Target) ([]string, int) {
|
2018-10-25 01:19:20 -07:00
|
|
|
var n int
|
|
|
|
keys := make([]string, 0, len(targets))
|
|
|
|
for k := range targets {
|
|
|
|
keys = append(keys, k)
|
|
|
|
n += len(targets[k])
|
|
|
|
}
|
2022-09-30 07:33:56 -07:00
|
|
|
slices.Sort(keys)
|
2019-11-11 13:42:24 -08:00
|
|
|
return keys, n
|
|
|
|
}
|
|
|
|
|
2022-12-23 02:55:08 -08:00
|
|
|
scrapePool := r.URL.Query().Get("scrapePool")
|
2019-11-11 13:42:24 -08:00
|
|
|
state := strings.ToLower(r.URL.Query().Get("state"))
|
|
|
|
showActive := state == "" || state == "any" || state == "active"
|
|
|
|
showDropped := state == "" || state == "any" || state == "dropped"
|
|
|
|
res := &TargetDiscovery{}
|
|
|
|
|
|
|
|
if showActive {
|
2020-04-16 01:30:47 -07:00
|
|
|
targetsActive := api.targetRetriever(r.Context()).TargetsActive()
|
2019-11-11 13:42:24 -08:00
|
|
|
activeKeys, numTargets := sortKeys(targetsActive)
|
|
|
|
res.ActiveTargets = make([]*Target, 0, numTargets)
|
2023-04-16 05:15:13 -07:00
|
|
|
builder := labels.NewScratchBuilder(0)
|
2019-11-11 13:42:24 -08:00
|
|
|
|
|
|
|
for _, key := range activeKeys {
|
2022-12-23 02:55:08 -08:00
|
|
|
if scrapePool != "" && key != scrapePool {
|
|
|
|
continue
|
|
|
|
}
|
2019-11-11 13:42:24 -08:00
|
|
|
for _, target := range targetsActive[key] {
|
|
|
|
lastErrStr := ""
|
|
|
|
lastErr := target.LastError()
|
|
|
|
if lastErr != nil {
|
|
|
|
lastErrStr = lastErr.Error()
|
|
|
|
}
|
|
|
|
|
2020-02-17 09:19:15 -08:00
|
|
|
globalURL, err := getGlobalURL(target.URL(), api.globalURLOptions)
|
|
|
|
|
2019-11-11 13:42:24 -08:00
|
|
|
res.ActiveTargets = append(res.ActiveTargets, &Target{
|
2024-01-29 02:19:02 -08:00
|
|
|
DiscoveredLabels: target.DiscoveredLabels(),
|
2023-04-16 05:15:13 -07:00
|
|
|
Labels: target.Labels(&builder),
|
2020-02-17 09:19:15 -08:00
|
|
|
ScrapePool: key,
|
|
|
|
ScrapeURL: target.URL().String(),
|
|
|
|
GlobalURL: globalURL.String(),
|
|
|
|
LastError: func() string {
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
switch {
|
|
|
|
case err == nil && lastErrStr == "":
|
2020-02-17 09:19:15 -08:00
|
|
|
return ""
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
case err != nil:
|
2023-11-07 19:49:39 -08:00
|
|
|
return fmt.Errorf("%s: %w", lastErrStr, err).Error()
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
default:
|
|
|
|
return lastErrStr
|
2020-02-17 09:19:15 -08:00
|
|
|
}
|
|
|
|
}(),
|
2019-11-11 13:42:24 -08:00
|
|
|
LastScrape: target.LastScrape(),
|
|
|
|
LastScrapeDuration: target.LastScrapeDuration().Seconds(),
|
|
|
|
Health: target.Health(),
|
2021-08-31 08:37:32 -07:00
|
|
|
ScrapeInterval: target.GetValue(model.ScrapeIntervalLabel),
|
|
|
|
ScrapeTimeout: target.GetValue(model.ScrapeTimeoutLabel),
|
2019-11-11 13:42:24 -08:00
|
|
|
})
|
|
|
|
}
|
2018-02-21 09:26:18 -08:00
|
|
|
}
|
2019-11-11 13:42:24 -08:00
|
|
|
} else {
|
|
|
|
res.ActiveTargets = []*Target{}
|
|
|
|
}
|
2023-08-14 07:39:25 -07:00
|
|
|
if showDropped {
|
|
|
|
res.DroppedTargetCounts = api.targetRetriever(r.Context()).TargetsDroppedCounts()
|
|
|
|
}
|
2019-11-11 13:42:24 -08:00
|
|
|
if showDropped {
|
2022-12-23 02:55:08 -08:00
|
|
|
targetsDropped := api.targetRetriever(r.Context()).TargetsDropped()
|
|
|
|
droppedKeys, numTargets := sortKeys(targetsDropped)
|
|
|
|
res.DroppedTargets = make([]*DroppedTarget, 0, numTargets)
|
|
|
|
for _, key := range droppedKeys {
|
|
|
|
if scrapePool != "" && key != scrapePool {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, target := range targetsDropped[key] {
|
|
|
|
res.DroppedTargets = append(res.DroppedTargets, &DroppedTarget{
|
2024-01-29 02:20:20 -08:00
|
|
|
DiscoveredLabels: target.DiscoveredLabels(),
|
2022-12-23 02:55:08 -08:00
|
|
|
})
|
|
|
|
}
|
2019-11-11 13:42:24 -08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
res.DroppedTargets = []*DroppedTarget{}
|
2018-02-21 09:26:18 -08:00
|
|
|
}
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{res, nil, nil, nil}
|
2016-12-02 04:31:43 -08:00
|
|
|
}
|
|
|
|
|
2018-11-08 06:11:38 -08:00
|
|
|
func matchLabels(lset labels.Labels, matchers []*labels.Matcher) bool {
|
|
|
|
for _, m := range matchers {
|
|
|
|
if !m.Matches(lset.Get(m.Name)) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) targetMetadata(r *http.Request) apiFuncResult {
|
2018-05-18 00:32:11 -07:00
|
|
|
limit := -1
|
|
|
|
if s := r.FormValue("limit"); s != "" {
|
|
|
|
var err error
|
|
|
|
if limit, err = strconv.Atoi(s); err != nil {
|
2019-03-25 16:01:12 -07:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit must be a number")}, nil, nil}
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-14 03:09:44 -08:00
|
|
|
matchTarget := r.FormValue("match_target")
|
|
|
|
var matchers []*labels.Matcher
|
|
|
|
var err error
|
|
|
|
if matchTarget != "" {
|
2020-02-03 10:06:39 -08:00
|
|
|
matchers, err = parser.ParseMetricSelector(matchTarget)
|
2019-11-14 03:09:44 -08:00
|
|
|
if err != nil {
|
2020-12-23 16:27:13 -08:00
|
|
|
return invalidParamError(err, "match_target")
|
2019-11-14 03:09:44 -08:00
|
|
|
}
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
|
|
|
|
2023-04-16 05:15:13 -07:00
|
|
|
builder := labels.NewScratchBuilder(0)
|
2018-06-05 03:30:19 -07:00
|
|
|
metric := r.FormValue("metric")
|
2019-12-10 06:56:16 -08:00
|
|
|
res := []metricMetadata{}
|
2020-04-16 01:30:47 -07:00
|
|
|
for _, tt := range api.targetRetriever(r.Context()).TargetsActive() {
|
2018-09-26 02:20:56 -07:00
|
|
|
for _, t := range tt {
|
|
|
|
if limit >= 0 && len(res) >= limit {
|
|
|
|
break
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
2023-04-16 05:15:13 -07:00
|
|
|
targetLabels := t.Labels(&builder)
|
2018-11-08 06:11:38 -08:00
|
|
|
// Filter targets that don't satisfy the label matchers.
|
2023-04-16 05:15:13 -07:00
|
|
|
if matchTarget != "" && !matchLabels(targetLabels, matchers) {
|
2018-11-08 06:11:38 -08:00
|
|
|
continue
|
2018-09-26 02:20:56 -07:00
|
|
|
}
|
|
|
|
// If no metric is specified, get the full list for the target.
|
|
|
|
if metric == "" {
|
2023-11-22 10:50:57 -08:00
|
|
|
for _, md := range t.ListMetadata() {
|
2018-09-26 02:20:56 -07:00
|
|
|
res = append(res, metricMetadata{
|
2023-04-16 05:15:13 -07:00
|
|
|
Target: targetLabels,
|
2018-09-26 02:20:56 -07:00
|
|
|
Metric: md.Metric,
|
|
|
|
Type: md.Type,
|
|
|
|
Help: md.Help,
|
2018-10-05 09:11:16 -07:00
|
|
|
Unit: md.Unit,
|
2018-09-26 02:20:56 -07:00
|
|
|
})
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Get metadata for the specified metric.
|
2023-11-22 10:50:57 -08:00
|
|
|
if md, ok := t.GetMetadata(metric); ok {
|
2018-05-18 00:32:11 -07:00
|
|
|
res = append(res, metricMetadata{
|
2023-04-16 05:15:13 -07:00
|
|
|
Target: targetLabels,
|
2018-05-18 00:32:11 -07:00
|
|
|
Type: md.Type,
|
|
|
|
Help: md.Help,
|
2018-10-05 09:11:16 -07:00
|
|
|
Unit: md.Unit,
|
2018-05-18 00:32:11 -07:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-12-10 06:56:16 -08:00
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{res, nil, nil, nil}
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
type metricMetadata struct {
|
2023-11-22 06:39:21 -08:00
|
|
|
Target labels.Labels `json:"target"`
|
|
|
|
Metric string `json:"metric,omitempty"`
|
|
|
|
Type model.MetricType `json:"type"`
|
|
|
|
Help string `json:"help"`
|
|
|
|
Unit string `json:"unit"`
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
|
|
|
|
2017-03-06 09:51:27 -08:00
|
|
|
// AlertmanagerDiscovery has all the active Alertmanagers.
|
2017-01-13 01:20:11 -08:00
|
|
|
type AlertmanagerDiscovery struct {
|
2018-02-21 01:00:07 -08:00
|
|
|
ActiveAlertmanagers []*AlertmanagerTarget `json:"activeAlertmanagers"`
|
|
|
|
DroppedAlertmanagers []*AlertmanagerTarget `json:"droppedAlertmanagers"`
|
2017-01-13 01:20:11 -08:00
|
|
|
}
|
|
|
|
|
2017-03-06 09:51:27 -08:00
|
|
|
// AlertmanagerTarget has info on one AM.
|
2017-01-13 01:20:11 -08:00
|
|
|
type AlertmanagerTarget struct {
|
|
|
|
URL string `json:"url"`
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) alertmanagers(r *http.Request) apiFuncResult {
|
2020-05-18 11:02:32 -07:00
|
|
|
urls := api.alertmanagerRetriever(r.Context()).Alertmanagers()
|
|
|
|
droppedURLS := api.alertmanagerRetriever(r.Context()).DroppedAlertmanagers()
|
2018-02-21 01:00:07 -08:00
|
|
|
ams := &AlertmanagerDiscovery{ActiveAlertmanagers: make([]*AlertmanagerTarget, len(urls)), DroppedAlertmanagers: make([]*AlertmanagerTarget, len(droppedURLS))}
|
2017-04-24 22:42:33 -07:00
|
|
|
for i, url := range urls {
|
|
|
|
ams.ActiveAlertmanagers[i] = &AlertmanagerTarget{URL: url.String()}
|
2017-01-13 01:20:11 -08:00
|
|
|
}
|
2018-02-21 01:00:07 -08:00
|
|
|
for i, url := range droppedURLS {
|
|
|
|
ams.DroppedAlertmanagers[i] = &AlertmanagerTarget{URL: url.String()}
|
|
|
|
}
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{ams, nil, nil, nil}
|
2017-01-13 01:20:11 -08:00
|
|
|
}
|
|
|
|
|
2018-06-27 00:15:17 -07:00
|
|
|
// AlertDiscovery has info for all active alerts.
|
2018-03-25 09:50:34 -07:00
|
|
|
type AlertDiscovery struct {
|
2018-06-27 00:15:17 -07:00
|
|
|
Alerts []*Alert `json:"alerts"`
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
|
|
|
|
2018-06-27 00:15:17 -07:00
|
|
|
// Alert has info for an alert.
|
2018-03-25 09:50:34 -07:00
|
|
|
type Alert struct {
|
2023-01-09 03:21:38 -08:00
|
|
|
Labels labels.Labels `json:"labels"`
|
|
|
|
Annotations labels.Labels `json:"annotations"`
|
|
|
|
State string `json:"state"`
|
|
|
|
ActiveAt *time.Time `json:"activeAt,omitempty"`
|
2023-01-19 02:33:54 -08:00
|
|
|
KeepFiringSince *time.Time `json:"keepFiringSince,omitempty"`
|
2023-01-09 03:21:38 -08:00
|
|
|
Value string `json:"value"`
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) alerts(r *http.Request) apiFuncResult {
|
2020-05-18 11:02:32 -07:00
|
|
|
alertingRules := api.rulesRetriever(r.Context()).AlertingRules()
|
2018-06-27 00:15:17 -07:00
|
|
|
alerts := []*Alert{}
|
2018-03-25 09:50:34 -07:00
|
|
|
|
2018-06-27 00:15:17 -07:00
|
|
|
for _, alertingRule := range alertingRules {
|
|
|
|
alerts = append(
|
|
|
|
alerts,
|
|
|
|
rulesAlertsToAPIAlerts(alertingRule.ActiveAlerts())...,
|
|
|
|
)
|
|
|
|
}
|
2018-03-25 09:50:34 -07:00
|
|
|
|
2018-06-27 00:15:17 -07:00
|
|
|
res := &AlertDiscovery{Alerts: alerts}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{res, nil, nil, nil}
|
2018-06-27 00:15:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func rulesAlertsToAPIAlerts(rulesAlerts []*rules.Alert) []*Alert {
|
|
|
|
apiAlerts := make([]*Alert, len(rulesAlerts))
|
|
|
|
for i, ruleAlert := range rulesAlerts {
|
|
|
|
apiAlerts[i] = &Alert{
|
2023-01-19 02:33:54 -08:00
|
|
|
Labels: ruleAlert.Labels,
|
|
|
|
Annotations: ruleAlert.Annotations,
|
|
|
|
State: ruleAlert.State.String(),
|
|
|
|
ActiveAt: &ruleAlert.ActiveAt,
|
|
|
|
Value: strconv.FormatFloat(ruleAlert.Value, 'e', -1, 64),
|
|
|
|
}
|
|
|
|
if !ruleAlert.KeepFiringSince.IsZero() {
|
|
|
|
apiAlerts[i].KeepFiringSince = &ruleAlert.KeepFiringSince
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-27 00:15:17 -07:00
|
|
|
return apiAlerts
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
|
|
|
|
2019-12-10 06:56:16 -08:00
|
|
|
func (api *API) metricMetadata(r *http.Request) apiFuncResult {
|
2023-11-22 06:39:21 -08:00
|
|
|
metrics := map[string]map[metadata.Metadata]struct{}{}
|
2019-12-10 06:56:16 -08:00
|
|
|
|
|
|
|
limit := -1
|
|
|
|
if s := r.FormValue("limit"); s != "" {
|
|
|
|
var err error
|
|
|
|
if limit, err = strconv.Atoi(s); err != nil {
|
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit must be a number")}, nil, nil}
|
|
|
|
}
|
|
|
|
}
|
2023-06-12 08:17:20 -07:00
|
|
|
limitPerMetric := -1
|
|
|
|
if s := r.FormValue("limit_per_metric"); s != "" {
|
|
|
|
var err error
|
|
|
|
if limitPerMetric, err = strconv.Atoi(s); err != nil {
|
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit_per_metric must be a number")}, nil, nil}
|
|
|
|
}
|
|
|
|
}
|
2019-12-10 06:56:16 -08:00
|
|
|
|
2019-12-10 07:22:10 -08:00
|
|
|
metric := r.FormValue("metric")
|
2020-04-16 01:30:47 -07:00
|
|
|
for _, tt := range api.targetRetriever(r.Context()).TargetsActive() {
|
2019-12-10 06:56:16 -08:00
|
|
|
for _, t := range tt {
|
2019-12-10 07:22:10 -08:00
|
|
|
if metric == "" {
|
2023-11-22 10:50:57 -08:00
|
|
|
for _, mm := range t.ListMetadata() {
|
2023-11-22 06:39:21 -08:00
|
|
|
m := metadata.Metadata{Type: mm.Type, Help: mm.Help, Unit: mm.Unit}
|
2019-12-10 07:22:10 -08:00
|
|
|
ms, ok := metrics[mm.Metric]
|
|
|
|
|
2023-06-12 08:17:20 -07:00
|
|
|
if limitPerMetric > 0 && len(ms) >= limitPerMetric {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-12-10 07:22:10 -08:00
|
|
|
if !ok {
|
2023-11-22 06:39:21 -08:00
|
|
|
ms = map[metadata.Metadata]struct{}{}
|
2019-12-10 07:22:10 -08:00
|
|
|
metrics[mm.Metric] = ms
|
|
|
|
}
|
|
|
|
ms[m] = struct{}{}
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-11-22 10:50:57 -08:00
|
|
|
if md, ok := t.GetMetadata(metric); ok {
|
2023-11-22 06:39:21 -08:00
|
|
|
m := metadata.Metadata{Type: md.Type, Help: md.Help, Unit: md.Unit}
|
2019-12-10 07:22:10 -08:00
|
|
|
ms, ok := metrics[md.Metric]
|
2019-12-10 06:56:16 -08:00
|
|
|
|
2023-06-12 08:17:20 -07:00
|
|
|
if limitPerMetric > 0 && len(ms) >= limitPerMetric {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-12-10 06:56:16 -08:00
|
|
|
if !ok {
|
2023-11-22 06:39:21 -08:00
|
|
|
ms = map[metadata.Metadata]struct{}{}
|
2019-12-10 07:22:10 -08:00
|
|
|
metrics[md.Metric] = ms
|
2019-12-10 06:56:16 -08:00
|
|
|
}
|
|
|
|
ms[m] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-10 07:22:10 -08:00
|
|
|
// Put the elements from the pseudo-set into a slice for marshaling.
|
2023-11-22 06:39:21 -08:00
|
|
|
res := map[string][]metadata.Metadata{}
|
2019-12-10 06:56:16 -08:00
|
|
|
for name, set := range metrics {
|
|
|
|
if limit >= 0 && len(res) >= limit {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2023-11-22 06:39:21 -08:00
|
|
|
s := []metadata.Metadata{}
|
2019-12-10 06:56:16 -08:00
|
|
|
for metadata := range set {
|
|
|
|
s = append(s, metadata)
|
|
|
|
}
|
|
|
|
res[name] = s
|
|
|
|
}
|
|
|
|
|
|
|
|
return apiFuncResult{res, nil, nil, nil}
|
|
|
|
}
|
|
|
|
|
2023-10-03 13:09:25 -07:00
|
|
|
// RuleDiscovery has info for all rules.
|
2018-06-27 00:15:17 -07:00
|
|
|
type RuleDiscovery struct {
|
|
|
|
RuleGroups []*RuleGroup `json:"groups"`
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
|
|
|
|
2023-10-03 13:09:25 -07:00
|
|
|
// RuleGroup has info for rules which are part of a group.
|
2018-06-27 00:15:17 -07:00
|
|
|
type RuleGroup struct {
|
|
|
|
Name string `json:"name"`
|
|
|
|
File string `json:"file"`
|
|
|
|
// In order to preserve rule ordering, while exposing type (alerting or recording)
|
|
|
|
// specific properties, both alerting and recording rules are exposed in the
|
|
|
|
// same array.
|
2021-11-21 09:00:27 -08:00
|
|
|
Rules []Rule `json:"rules"`
|
2020-01-27 01:27:43 -08:00
|
|
|
Interval float64 `json:"interval"`
|
2022-01-11 19:44:22 -08:00
|
|
|
Limit int `json:"limit"`
|
2020-01-27 01:27:43 -08:00
|
|
|
EvaluationTime float64 `json:"evaluationTime"`
|
|
|
|
LastEvaluation time.Time `json:"lastEvaluation"`
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
|
|
|
|
2021-11-21 09:00:27 -08:00
|
|
|
type Rule interface{}
|
2018-03-25 09:50:34 -07:00
|
|
|
|
2021-11-21 09:00:27 -08:00
|
|
|
type AlertingRule struct {
|
2019-12-09 14:42:59 -08:00
|
|
|
// State can be "pending", "firing", "inactive".
|
2020-01-27 01:27:43 -08:00
|
|
|
State string `json:"state"`
|
|
|
|
Name string `json:"name"`
|
|
|
|
Query string `json:"query"`
|
|
|
|
Duration float64 `json:"duration"`
|
2023-01-09 03:21:38 -08:00
|
|
|
KeepFiringFor float64 `json:"keepFiringFor"`
|
2020-01-27 01:27:43 -08:00
|
|
|
Labels labels.Labels `json:"labels"`
|
|
|
|
Annotations labels.Labels `json:"annotations"`
|
|
|
|
Alerts []*Alert `json:"alerts"`
|
|
|
|
Health rules.RuleHealth `json:"health"`
|
|
|
|
LastError string `json:"lastError,omitempty"`
|
|
|
|
EvaluationTime float64 `json:"evaluationTime"`
|
|
|
|
LastEvaluation time.Time `json:"lastEvaluation"`
|
2018-06-27 00:15:17 -07:00
|
|
|
// Type of an alertingRule is always "alerting".
|
|
|
|
Type string `json:"type"`
|
|
|
|
}
|
|
|
|
|
2021-11-21 09:00:27 -08:00
|
|
|
type RecordingRule struct {
|
2020-01-27 01:27:43 -08:00
|
|
|
Name string `json:"name"`
|
|
|
|
Query string `json:"query"`
|
|
|
|
Labels labels.Labels `json:"labels,omitempty"`
|
|
|
|
Health rules.RuleHealth `json:"health"`
|
|
|
|
LastError string `json:"lastError,omitempty"`
|
|
|
|
EvaluationTime float64 `json:"evaluationTime"`
|
|
|
|
LastEvaluation time.Time `json:"lastEvaluation"`
|
2018-06-27 00:15:17 -07:00
|
|
|
// Type of a recordingRule is always "recording".
|
|
|
|
Type string `json:"type"`
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) rules(r *http.Request) apiFuncResult {
|
2023-04-18 08:26:21 -07:00
|
|
|
if err := r.ParseForm(); err != nil {
|
2023-11-07 19:49:39 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, fmt.Errorf("error parsing form values: %w", err)}, nil, nil}
|
2023-04-18 08:26:21 -07:00
|
|
|
}
|
2019-12-09 14:42:59 -08:00
|
|
|
|
2023-04-18 08:26:21 -07:00
|
|
|
queryFormToSet := func(values []string) map[string]struct{} {
|
|
|
|
set := make(map[string]struct{}, len(values))
|
|
|
|
for _, v := range values {
|
|
|
|
set[v] = struct{}{}
|
2023-04-18 02:07:32 -07:00
|
|
|
}
|
2023-04-18 08:26:21 -07:00
|
|
|
return set
|
2023-04-18 02:07:32 -07:00
|
|
|
}
|
|
|
|
|
2023-04-18 08:26:21 -07:00
|
|
|
rnSet := queryFormToSet(r.Form["rule_name[]"])
|
|
|
|
rgSet := queryFormToSet(r.Form["rule_group[]"])
|
|
|
|
fSet := queryFormToSet(r.Form["file[]"])
|
|
|
|
|
2020-05-18 11:02:32 -07:00
|
|
|
ruleGroups := api.rulesRetriever(r.Context()).RuleGroups()
|
2023-04-20 03:20:10 -07:00
|
|
|
res := &RuleDiscovery{RuleGroups: make([]*RuleGroup, 0, len(ruleGroups))}
|
2020-12-23 16:27:13 -08:00
|
|
|
typ := strings.ToLower(r.URL.Query().Get("type"))
|
2019-12-09 14:42:59 -08:00
|
|
|
|
2020-12-23 16:27:13 -08:00
|
|
|
if typ != "" && typ != "alert" && typ != "record" {
|
2023-11-07 19:49:39 -08:00
|
|
|
return invalidParamError(fmt.Errorf("not supported value %q", typ), "type")
|
2019-12-09 14:42:59 -08:00
|
|
|
}
|
|
|
|
|
2020-12-23 16:27:13 -08:00
|
|
|
returnAlerts := typ == "" || typ == "alert"
|
|
|
|
returnRecording := typ == "" || typ == "record"
|
2019-12-09 14:42:59 -08:00
|
|
|
|
2023-10-17 19:02:03 -07:00
|
|
|
excludeAlerts, err := parseExcludeAlerts(r)
|
|
|
|
if err != nil {
|
|
|
|
return invalidParamError(err, "exclude_alerts")
|
|
|
|
}
|
|
|
|
|
2023-04-20 03:20:10 -07:00
|
|
|
rgs := make([]*RuleGroup, 0, len(ruleGroups))
|
|
|
|
for _, grp := range ruleGroups {
|
2023-04-18 08:26:21 -07:00
|
|
|
if len(rgSet) > 0 {
|
|
|
|
if _, ok := rgSet[grp.Name()]; !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(fSet) > 0 {
|
|
|
|
if _, ok := fSet[grp.File()]; !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-27 00:15:17 -07:00
|
|
|
apiRuleGroup := &RuleGroup{
|
2020-01-27 01:27:43 -08:00
|
|
|
Name: grp.Name(),
|
|
|
|
File: grp.File(),
|
|
|
|
Interval: grp.Interval().Seconds(),
|
2022-01-11 19:44:22 -08:00
|
|
|
Limit: grp.Limit(),
|
2021-11-21 09:00:27 -08:00
|
|
|
Rules: []Rule{},
|
2020-08-25 03:38:06 -07:00
|
|
|
EvaluationTime: grp.GetEvaluationTime().Seconds(),
|
|
|
|
LastEvaluation: grp.GetLastEvaluation(),
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
2023-04-18 02:07:32 -07:00
|
|
|
for _, rr := range grp.Rules() {
|
2021-11-21 09:00:27 -08:00
|
|
|
var enrichedRule Rule
|
2018-06-27 00:15:17 -07:00
|
|
|
|
2023-04-18 08:26:21 -07:00
|
|
|
if len(rnSet) > 0 {
|
|
|
|
if _, ok := rnSet[rr.Name()]; !ok {
|
2023-04-18 02:07:32 -07:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-23 06:00:10 -07:00
|
|
|
lastError := ""
|
2023-04-18 02:07:32 -07:00
|
|
|
if rr.LastError() != nil {
|
|
|
|
lastError = rr.LastError().Error()
|
2018-08-23 06:00:10 -07:00
|
|
|
}
|
2023-04-18 02:07:32 -07:00
|
|
|
switch rule := rr.(type) {
|
2018-06-27 00:15:17 -07:00
|
|
|
case *rules.AlertingRule:
|
2019-12-09 14:42:59 -08:00
|
|
|
if !returnAlerts {
|
|
|
|
break
|
|
|
|
}
|
2023-10-17 19:02:03 -07:00
|
|
|
var activeAlerts []*Alert
|
|
|
|
if !excludeAlerts {
|
|
|
|
activeAlerts = rulesAlertsToAPIAlerts(rule.ActiveAlerts())
|
|
|
|
}
|
2021-11-21 09:00:27 -08:00
|
|
|
enrichedRule = AlertingRule{
|
2020-01-27 01:27:43 -08:00
|
|
|
State: rule.State().String(),
|
|
|
|
Name: rule.Name(),
|
|
|
|
Query: rule.Query().String(),
|
2020-01-31 23:31:37 -08:00
|
|
|
Duration: rule.HoldDuration().Seconds(),
|
2023-01-09 03:21:38 -08:00
|
|
|
KeepFiringFor: rule.KeepFiringFor().Seconds(),
|
2020-01-27 01:27:43 -08:00
|
|
|
Labels: rule.Labels(),
|
|
|
|
Annotations: rule.Annotations(),
|
2023-10-17 19:02:03 -07:00
|
|
|
Alerts: activeAlerts,
|
2020-01-27 01:27:43 -08:00
|
|
|
Health: rule.Health(),
|
|
|
|
LastError: lastError,
|
|
|
|
EvaluationTime: rule.GetEvaluationDuration().Seconds(),
|
|
|
|
LastEvaluation: rule.GetEvaluationTimestamp(),
|
|
|
|
Type: "alerting",
|
2018-06-27 00:15:17 -07:00
|
|
|
}
|
|
|
|
case *rules.RecordingRule:
|
2019-12-09 14:42:59 -08:00
|
|
|
if !returnRecording {
|
|
|
|
break
|
|
|
|
}
|
2021-11-21 09:00:27 -08:00
|
|
|
enrichedRule = RecordingRule{
|
2020-01-27 01:27:43 -08:00
|
|
|
Name: rule.Name(),
|
|
|
|
Query: rule.Query().String(),
|
|
|
|
Labels: rule.Labels(),
|
|
|
|
Health: rule.Health(),
|
|
|
|
LastError: lastError,
|
|
|
|
EvaluationTime: rule.GetEvaluationDuration().Seconds(),
|
|
|
|
LastEvaluation: rule.GetEvaluationTimestamp(),
|
|
|
|
Type: "recording",
|
2018-06-27 00:15:17 -07:00
|
|
|
}
|
|
|
|
default:
|
2023-11-07 19:49:39 -08:00
|
|
|
err := fmt.Errorf("failed to assert type of rule '%v'", rule.Name())
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
2023-04-18 02:07:32 -07:00
|
|
|
|
2019-12-09 14:42:59 -08:00
|
|
|
if enrichedRule != nil {
|
|
|
|
apiRuleGroup.Rules = append(apiRuleGroup.Rules, enrichedRule)
|
|
|
|
}
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
2023-04-18 02:07:32 -07:00
|
|
|
|
|
|
|
// If the rule group response has no rules, skip it - this means we filtered all the rules of this group.
|
|
|
|
if len(apiRuleGroup.Rules) > 0 {
|
2023-04-20 03:20:10 -07:00
|
|
|
rgs = append(rgs, apiRuleGroup)
|
2023-04-18 02:07:32 -07:00
|
|
|
}
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
2023-04-20 03:20:10 -07:00
|
|
|
res.RuleGroups = rgs
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{res, nil, nil, nil}
|
2018-03-25 09:50:34 -07:00
|
|
|
}
|
|
|
|
|
2023-10-17 19:02:03 -07:00
|
|
|
func parseExcludeAlerts(r *http.Request) (bool, error) {
|
|
|
|
excludeAlertsParam := strings.ToLower(r.URL.Query().Get("exclude_alerts"))
|
|
|
|
|
|
|
|
if excludeAlertsParam == "" {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
excludeAlerts, err := strconv.ParseBool(excludeAlertsParam)
|
|
|
|
if err != nil {
|
|
|
|
return false, fmt.Errorf("error converting exclude_alerts: %w", err)
|
|
|
|
}
|
|
|
|
return excludeAlerts, nil
|
|
|
|
}
|
|
|
|
|
2017-05-11 08:09:24 -07:00
|
|
|
type prometheusConfig struct {
|
|
|
|
YAML string `json:"yaml"`
|
|
|
|
}
|
|
|
|
|
2020-12-23 16:27:13 -08:00
|
|
|
func (api *API) serveRuntimeInfo(_ *http.Request) apiFuncResult {
|
2019-11-02 08:53:32 -07:00
|
|
|
status, err := api.runtimeInfo()
|
|
|
|
if err != nil {
|
|
|
|
return apiFuncResult{status, &apiError{errorInternal, err}, nil, nil}
|
|
|
|
}
|
|
|
|
return apiFuncResult{status, nil, nil, nil}
|
|
|
|
}
|
|
|
|
|
2020-12-23 16:27:13 -08:00
|
|
|
func (api *API) serveBuildInfo(_ *http.Request) apiFuncResult {
|
2019-11-02 08:53:32 -07:00
|
|
|
return apiFuncResult{api.buildInfo, nil, nil, nil}
|
|
|
|
}
|
|
|
|
|
2020-12-23 16:27:13 -08:00
|
|
|
func (api *API) serveConfig(_ *http.Request) apiFuncResult {
|
2017-05-11 08:09:24 -07:00
|
|
|
cfg := &prometheusConfig{
|
|
|
|
YAML: api.config().String(),
|
|
|
|
}
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{cfg, nil, nil, nil}
|
2017-05-11 08:09:24 -07:00
|
|
|
}
|
|
|
|
|
2020-12-23 16:27:13 -08:00
|
|
|
func (api *API) serveFlags(_ *http.Request) apiFuncResult {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{api.flagsMap, nil, nil, nil}
|
api: Added v1/status/flags endpoint. (#3864)
Endpoint URL: /api/v1/status/flags
Example Output:
```json
{
"status": "success",
"data": {
"alertmanager.notification-queue-capacity": "10000",
"alertmanager.timeout": "10s",
"completion-bash": "false",
"completion-script-bash": "false",
"completion-script-zsh": "false",
"config.file": "my_cool_prometheus.yaml",
"help": "false",
"help-long": "false",
"help-man": "false",
"log.level": "info",
"query.lookback-delta": "5m",
"query.max-concurrency": "20",
"query.timeout": "2m",
"storage.tsdb.max-block-duration": "36h",
"storage.tsdb.min-block-duration": "2h",
"storage.tsdb.no-lockfile": "false",
"storage.tsdb.path": "data/",
"storage.tsdb.retention": "15d",
"version": "false",
"web.console.libraries": "console_libraries",
"web.console.templates": "consoles",
"web.enable-admin-api": "false",
"web.enable-lifecycle": "false",
"web.external-url": "",
"web.listen-address": "0.0.0.0:9090",
"web.max-connections": "512",
"web.read-timeout": "5m",
"web.route-prefix": "/",
"web.user-assets": ""
}
}
```
Signed-off-by: Bartek Plotka <bwplotka@gmail.com>
2018-02-21 00:49:02 -08:00
|
|
|
}
|
|
|
|
|
2022-06-07 08:13:21 -07:00
|
|
|
// TSDBStat holds the information about individual cardinality.
|
|
|
|
type TSDBStat struct {
|
2019-11-12 02:15:20 -08:00
|
|
|
Name string `json:"name"`
|
|
|
|
Value uint64 `json:"value"`
|
|
|
|
}
|
|
|
|
|
2020-09-29 13:05:33 -07:00
|
|
|
// HeadStats has information about the TSDB head.
|
|
|
|
type HeadStats struct {
|
2021-01-06 22:41:32 -08:00
|
|
|
NumSeries uint64 `json:"numSeries"`
|
|
|
|
NumLabelPairs int `json:"numLabelPairs"`
|
|
|
|
ChunkCount int64 `json:"chunkCount"`
|
|
|
|
MinTime int64 `json:"minTime"`
|
|
|
|
MaxTime int64 `json:"maxTime"`
|
2020-09-29 13:05:33 -07:00
|
|
|
}
|
|
|
|
|
2022-06-07 08:13:21 -07:00
|
|
|
// TSDBStatus has information of cardinality statistics from postings.
|
|
|
|
type TSDBStatus struct {
|
|
|
|
HeadStats HeadStats `json:"headStats"`
|
|
|
|
SeriesCountByMetricName []TSDBStat `json:"seriesCountByMetricName"`
|
|
|
|
LabelValueCountByLabelName []TSDBStat `json:"labelValueCountByLabelName"`
|
|
|
|
MemoryInBytesByLabelName []TSDBStat `json:"memoryInBytesByLabelName"`
|
|
|
|
SeriesCountByLabelValuePair []TSDBStat `json:"seriesCountByLabelValuePair"`
|
2019-11-12 02:15:20 -08:00
|
|
|
}
|
|
|
|
|
2022-06-07 08:13:21 -07:00
|
|
|
// TSDBStatsFromIndexStats converts a index.Stat slice to a TSDBStat slice.
|
|
|
|
func TSDBStatsFromIndexStats(stats []index.Stat) []TSDBStat {
|
|
|
|
result := make([]TSDBStat, 0, len(stats))
|
2020-04-29 09:16:14 -07:00
|
|
|
for _, item := range stats {
|
2022-06-07 08:13:21 -07:00
|
|
|
item := TSDBStat{Name: item.Name, Value: item.Count}
|
2020-04-29 09:16:14 -07:00
|
|
|
result = append(result, item)
|
2019-11-12 02:15:20 -08:00
|
|
|
}
|
2020-04-29 09:16:14 -07:00
|
|
|
return result
|
|
|
|
}
|
2019-11-12 02:15:20 -08:00
|
|
|
|
2023-05-22 05:37:07 -07:00
|
|
|
func (api *API) serveTSDBStatus(r *http.Request) apiFuncResult {
|
|
|
|
limit := 10
|
|
|
|
if s := r.FormValue("limit"); s != "" {
|
|
|
|
var err error
|
|
|
|
if limit, err = strconv.Atoi(s); err != nil || limit < 1 {
|
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit must be a positive number")}, nil, nil}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s, err := api.db.Stats(labels.MetricName, limit)
|
2020-04-29 09:16:14 -07:00
|
|
|
if err != nil {
|
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
|
2019-11-12 02:15:20 -08:00
|
|
|
}
|
2020-09-29 13:05:33 -07:00
|
|
|
metrics, err := api.gatherer.Gather()
|
|
|
|
if err != nil {
|
2023-11-07 19:49:39 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("error gathering runtime status: %w", err)}, nil, nil}
|
2020-09-29 13:05:33 -07:00
|
|
|
}
|
|
|
|
chunkCount := int64(math.NaN())
|
|
|
|
for _, mF := range metrics {
|
|
|
|
if *mF.Name == "prometheus_tsdb_head_chunks" {
|
2023-06-07 03:29:04 -07:00
|
|
|
m := mF.Metric[0]
|
2020-09-29 13:05:33 -07:00
|
|
|
if m.Gauge != nil {
|
|
|
|
chunkCount = int64(m.Gauge.GetValue())
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-06-07 08:13:21 -07:00
|
|
|
return apiFuncResult{TSDBStatus{
|
2020-09-29 13:05:33 -07:00
|
|
|
HeadStats: HeadStats{
|
2021-01-06 22:41:32 -08:00
|
|
|
NumSeries: s.NumSeries,
|
|
|
|
ChunkCount: chunkCount,
|
|
|
|
MinTime: s.MinTime,
|
|
|
|
MaxTime: s.MaxTime,
|
|
|
|
NumLabelPairs: s.IndexPostingStats.NumLabelPairs,
|
2020-09-29 13:05:33 -07:00
|
|
|
},
|
2022-06-07 08:13:21 -07:00
|
|
|
SeriesCountByMetricName: TSDBStatsFromIndexStats(s.IndexPostingStats.CardinalityMetricsStats),
|
|
|
|
LabelValueCountByLabelName: TSDBStatsFromIndexStats(s.IndexPostingStats.CardinalityLabelStats),
|
|
|
|
MemoryInBytesByLabelName: TSDBStatsFromIndexStats(s.IndexPostingStats.LabelValueStats),
|
|
|
|
SeriesCountByLabelValuePair: TSDBStatsFromIndexStats(s.IndexPostingStats.LabelValuePairsStats),
|
2020-04-29 09:16:14 -07:00
|
|
|
}, nil, nil, nil}
|
2019-11-12 02:15:20 -08:00
|
|
|
}
|
|
|
|
|
2021-06-05 07:29:32 -07:00
|
|
|
type walReplayStatus struct {
|
|
|
|
Min int `json:"min"`
|
|
|
|
Max int `json:"max"`
|
|
|
|
Current int `json:"current"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (api *API) serveWALReplayStatus(w http.ResponseWriter, r *http.Request) {
|
|
|
|
httputil.SetCORS(w, api.CORSOrigin, r)
|
|
|
|
status, err := api.db.WALReplayStatus()
|
|
|
|
if err != nil {
|
|
|
|
api.respondError(w, &apiError{errorInternal, err}, nil)
|
|
|
|
}
|
2023-01-24 19:30:47 -08:00
|
|
|
api.respond(w, r, walReplayStatus{
|
2021-06-05 07:29:32 -07:00
|
|
|
Min: status.Min,
|
|
|
|
Max: status.Max,
|
|
|
|
Current: status.Current,
|
2023-09-14 09:57:31 -07:00
|
|
|
}, nil, "")
|
2021-06-05 07:29:32 -07:00
|
|
|
}
|
|
|
|
|
2017-10-23 13:28:17 -07:00
|
|
|
func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) {
|
2021-02-26 08:43:19 -08:00
|
|
|
// This is only really for tests - this will never be nil IRL.
|
|
|
|
if api.remoteReadHandler != nil {
|
|
|
|
api.remoteReadHandler.ServeHTTP(w, r)
|
|
|
|
} else {
|
|
|
|
http.Error(w, "not found", http.StatusNotFound)
|
2019-08-19 13:16:10 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-30 03:04:48 -08:00
|
|
|
func (api *API) remoteWrite(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if api.remoteWriteHandler != nil {
|
|
|
|
api.remoteWriteHandler.ServeHTTP(w, r)
|
|
|
|
} else {
|
2023-03-11 04:50:52 -08:00
|
|
|
http.Error(w, "remote write receiver needs to be enabled with --web.enable-remote-write-receiver", http.StatusNotFound)
|
2021-01-30 03:04:48 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-28 03:35:28 -07:00
|
|
|
func (api *API) otlpWrite(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if api.otlpWriteHandler != nil {
|
|
|
|
api.otlpWriteHandler.ServeHTTP(w, r)
|
|
|
|
} else {
|
|
|
|
http.Error(w, "otlp write receiver needs to be enabled with --enable-feature=otlp-write-receiver", http.StatusNotFound)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) deleteSeries(r *http.Request) apiFuncResult {
|
2017-12-02 21:07:05 -08:00
|
|
|
if !api.enableAdmin {
|
2019-01-04 05:47:38 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
2018-08-17 08:24:35 -07:00
|
|
|
if err := r.ParseForm(); err != nil {
|
2023-11-07 19:49:39 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, fmt.Errorf("error parsing form values: %w", err)}, nil, nil}
|
2018-08-17 08:24:35 -07:00
|
|
|
}
|
2017-12-02 21:07:05 -08:00
|
|
|
if len(r.Form["match[]"]) == 0 {
|
2019-03-25 16:01:12 -07:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.New("no match[] parameter provided")}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
|
2023-07-06 08:48:13 -07:00
|
|
|
start, err := parseTimeParam(r, "start", MinTime)
|
2020-03-06 02:33:01 -08:00
|
|
|
if err != nil {
|
2020-12-23 16:27:13 -08:00
|
|
|
return invalidParamError(err, "start")
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
2023-07-06 08:48:13 -07:00
|
|
|
end, err := parseTimeParam(r, "end", MaxTime)
|
2020-03-06 02:33:01 -08:00
|
|
|
if err != nil {
|
2020-12-23 16:27:13 -08:00
|
|
|
return invalidParamError(err, "end")
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, s := range r.Form["match[]"] {
|
2020-02-03 10:06:39 -08:00
|
|
|
matchers, err := parser.ParseMetricSelector(s)
|
2017-12-02 21:07:05 -08:00
|
|
|
if err != nil {
|
2020-12-23 16:27:13 -08:00
|
|
|
return invalidParamError(err, "match[]")
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
2023-09-13 06:43:06 -07:00
|
|
|
if err := api.db.Delete(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end), matchers...); err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, nil, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
func (api *API) snapshot(r *http.Request) apiFuncResult {
|
2017-12-02 21:07:05 -08:00
|
|
|
if !api.enableAdmin {
|
2019-01-04 05:47:38 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
2018-10-02 03:48:07 -07:00
|
|
|
var (
|
|
|
|
skipHead bool
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
if r.FormValue("skip_head") != "" {
|
|
|
|
skipHead, err = strconv.ParseBool(r.FormValue("skip_head"))
|
|
|
|
if err != nil {
|
2023-11-07 19:49:39 -08:00
|
|
|
return invalidParamError(fmt.Errorf("unable to parse boolean: %w", err), "skip_head")
|
2018-10-02 03:48:07 -07:00
|
|
|
}
|
2018-08-17 08:24:35 -07:00
|
|
|
}
|
2018-03-08 02:43:41 -08:00
|
|
|
|
2017-12-02 21:07:05 -08:00
|
|
|
var (
|
2020-04-29 09:16:14 -07:00
|
|
|
snapdir = filepath.Join(api.dbDir, "snapshots")
|
2020-11-01 12:07:38 -08:00
|
|
|
name = fmt.Sprintf("%s-%016x",
|
2017-12-10 11:19:34 -08:00
|
|
|
time.Now().UTC().Format("20060102T150405Z0700"),
|
2020-11-01 12:07:38 -08:00
|
|
|
rand.Int63())
|
2017-12-10 11:19:34 -08:00
|
|
|
dir = filepath.Join(snapdir, name)
|
2017-12-02 21:07:05 -08:00
|
|
|
)
|
2021-10-22 01:06:44 -07:00
|
|
|
if err := os.MkdirAll(dir, 0o777); err != nil {
|
2023-11-07 19:49:39 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("create snapshot directory: %w", err)}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
2020-04-29 09:16:14 -07:00
|
|
|
if err := api.db.Snapshot(dir, !skipHead); err != nil {
|
2023-11-07 19:49:39 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("create snapshot: %w", err)}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{struct {
|
2017-12-02 21:07:05 -08:00
|
|
|
Name string `json:"name"`
|
2018-11-30 06:27:12 -08:00
|
|
|
}{name}, nil, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
|
2023-04-12 04:05:41 -07:00
|
|
|
func (api *API) cleanTombstones(*http.Request) apiFuncResult {
|
2017-12-02 21:07:05 -08:00
|
|
|
if !api.enableAdmin {
|
2019-01-04 05:47:38 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
2020-04-29 09:16:14 -07:00
|
|
|
if err := api.db.CleanTombstones(); err != nil {
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
|
2018-11-30 06:27:12 -08:00
|
|
|
return apiFuncResult{nil, nil, nil, nil}
|
2017-12-02 21:07:05 -08:00
|
|
|
}
|
|
|
|
|
2023-09-14 09:57:31 -07:00
|
|
|
// Query string is needed to get the position information for the annotations, and it
|
|
|
|
// can be empty if the position information isn't needed.
|
|
|
|
func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface{}, warnings annotations.Annotations, query string) {
|
2018-11-30 06:27:12 -08:00
|
|
|
statusMessage := statusSuccess
|
2023-01-24 19:30:47 -08:00
|
|
|
|
|
|
|
resp := &Response{
|
2018-11-30 06:27:12 -08:00
|
|
|
Status: statusMessage,
|
|
|
|
Data: data,
|
2023-09-14 09:57:31 -07:00
|
|
|
Warnings: warnings.AsStrings(query, 10),
|
2023-01-24 19:30:47 -08:00
|
|
|
}
|
|
|
|
|
2023-02-26 18:27:09 -08:00
|
|
|
codec, err := api.negotiateCodec(req, resp)
|
2015-06-04 09:07:57 -07:00
|
|
|
if err != nil {
|
2023-02-26 18:27:09 -08:00
|
|
|
api.respondError(w, &apiError{errorNotAcceptable, err}, nil)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-01-24 19:30:47 -08:00
|
|
|
b, err := codec.Encode(resp)
|
2015-06-04 09:07:57 -07:00
|
|
|
if err != nil {
|
2023-01-24 19:30:47 -08:00
|
|
|
level.Error(api.logger).Log("msg", "error marshaling response", "err", err)
|
2018-07-13 11:31:23 -07:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
2015-06-04 09:07:57 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-02-26 18:27:09 -08:00
|
|
|
w.Header().Set("Content-Type", codec.ContentType().String())
|
2018-07-13 11:31:23 -07:00
|
|
|
w.WriteHeader(http.StatusOK)
|
2018-07-06 10:44:45 -07:00
|
|
|
if n, err := w.Write(b); err != nil {
|
2018-07-25 05:35:47 -07:00
|
|
|
level.Error(api.logger).Log("msg", "error writing response", "bytesWritten", n, "err", err)
|
2018-07-06 10:44:45 -07:00
|
|
|
}
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
2023-02-26 18:27:09 -08:00
|
|
|
func (api *API) negotiateCodec(req *http.Request, resp *Response) (Codec, error) {
|
|
|
|
for _, clause := range goautoneg.ParseAccept(req.Header.Get("Accept")) {
|
|
|
|
for _, codec := range api.codecs {
|
|
|
|
if codec.ContentType().Satisfies(clause) && codec.CanEncode(resp) {
|
|
|
|
return codec, nil
|
|
|
|
}
|
|
|
|
}
|
2023-01-24 19:30:47 -08:00
|
|
|
}
|
|
|
|
|
2023-02-26 18:27:09 -08:00
|
|
|
defaultCodec := api.codecs[0]
|
|
|
|
if !defaultCodec.CanEncode(resp) {
|
|
|
|
return nil, fmt.Errorf("cannot encode response as %s", defaultCodec.ContentType())
|
2023-01-24 19:30:47 -08:00
|
|
|
}
|
|
|
|
|
2023-02-26 18:27:09 -08:00
|
|
|
return defaultCodec, nil
|
2023-01-24 19:30:47 -08:00
|
|
|
}
|
|
|
|
|
2018-07-06 10:44:45 -07:00
|
|
|
func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data interface{}) {
|
2018-07-25 05:17:10 -07:00
|
|
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
2023-01-24 19:30:47 -08:00
|
|
|
b, err := json.Marshal(&Response{
|
2018-07-25 05:17:10 -07:00
|
|
|
Status: statusError,
|
|
|
|
ErrorType: apiErr.typ,
|
|
|
|
Error: apiErr.err.Error(),
|
|
|
|
Data: data,
|
|
|
|
})
|
|
|
|
if err != nil {
|
2018-11-27 08:44:29 -08:00
|
|
|
level.Error(api.logger).Log("msg", "error marshaling json response", "err", err)
|
2018-07-25 05:17:10 -07:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2015-11-11 14:00:54 -08:00
|
|
|
|
|
|
|
var code int
|
|
|
|
switch apiErr.typ {
|
|
|
|
case errorBadData:
|
|
|
|
code = http.StatusBadRequest
|
|
|
|
case errorExec:
|
2020-08-28 03:18:31 -07:00
|
|
|
code = http.StatusUnprocessableEntity
|
2023-01-25 11:22:25 -08:00
|
|
|
case errorCanceled:
|
|
|
|
code = statusClientClosedConnection
|
|
|
|
case errorTimeout:
|
2015-11-11 14:00:54 -08:00
|
|
|
code = http.StatusServiceUnavailable
|
2017-04-04 09:22:51 -07:00
|
|
|
case errorInternal:
|
|
|
|
code = http.StatusInternalServerError
|
2018-05-18 00:32:11 -07:00
|
|
|
case errorNotFound:
|
|
|
|
code = http.StatusNotFound
|
2023-02-26 18:27:09 -08:00
|
|
|
case errorNotAcceptable:
|
|
|
|
code = http.StatusNotAcceptable
|
2015-11-11 14:00:54 -08:00
|
|
|
default:
|
|
|
|
code = http.StatusInternalServerError
|
|
|
|
}
|
2015-06-04 09:07:57 -07:00
|
|
|
|
2018-07-25 05:17:10 -07:00
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
w.WriteHeader(code)
|
2018-07-06 10:44:45 -07:00
|
|
|
if n, err := w.Write(b); err != nil {
|
2018-07-25 05:35:47 -07:00
|
|
|
level.Error(api.logger).Log("msg", "error writing response", "bytesWritten", n, "err", err)
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-06 02:33:01 -08:00
|
|
|
func parseTimeParam(r *http.Request, paramName string, defaultValue time.Time) (time.Time, error) {
|
|
|
|
val := r.FormValue(paramName)
|
|
|
|
if val == "" {
|
|
|
|
return defaultValue, nil
|
|
|
|
}
|
|
|
|
result, err := parseTime(val)
|
|
|
|
if err != nil {
|
2023-11-07 19:49:39 -08:00
|
|
|
return time.Time{}, fmt.Errorf("Invalid time value for '%s': %w", paramName, err)
|
2020-03-06 02:33:01 -08:00
|
|
|
}
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2016-12-24 15:37:46 -08:00
|
|
|
func parseTime(s string) (time.Time, error) {
|
2015-06-04 09:07:57 -07:00
|
|
|
if t, err := strconv.ParseFloat(s, 64); err == nil {
|
2016-12-24 15:37:46 -08:00
|
|
|
s, ns := math.Modf(t)
|
2018-12-03 04:25:54 -08:00
|
|
|
ns = math.Round(ns*1000) / 1000
|
2020-03-29 09:35:39 -07:00
|
|
|
return time.Unix(int64(s), int64(ns*float64(time.Second))).UTC(), nil
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
if t, err := time.Parse(time.RFC3339Nano, s); err == nil {
|
2016-12-24 15:37:46 -08:00
|
|
|
return t, nil
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
2019-07-08 02:43:59 -07:00
|
|
|
|
|
|
|
// Stdlib's time parser can only handle 4 digit years. As a workaround until
|
|
|
|
// that is fixed we want to at least support our own boundary times.
|
|
|
|
// Context: https://github.com/prometheus/client_golang/issues/614
|
|
|
|
// Upstream issue: https://github.com/golang/go/issues/20555
|
|
|
|
switch s {
|
|
|
|
case minTimeFormatted:
|
2023-07-06 08:48:13 -07:00
|
|
|
return MinTime, nil
|
2019-07-08 02:43:59 -07:00
|
|
|
case maxTimeFormatted:
|
2023-07-06 08:48:13 -07:00
|
|
|
return MaxTime, nil
|
2019-07-08 02:43:59 -07:00
|
|
|
}
|
2023-11-07 19:49:39 -08:00
|
|
|
return time.Time{}, fmt.Errorf("cannot parse %q to a valid timestamp", s)
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func parseDuration(s string) (time.Duration, error) {
|
|
|
|
if d, err := strconv.ParseFloat(s, 64); err == nil {
|
2017-03-16 07:16:20 -07:00
|
|
|
ts := d * float64(time.Second)
|
|
|
|
if ts > float64(math.MaxInt64) || ts < float64(math.MinInt64) {
|
2023-11-07 19:49:39 -08:00
|
|
|
return 0, fmt.Errorf("cannot parse %q to a valid duration. It overflows int64", s)
|
2017-03-16 07:16:20 -07:00
|
|
|
}
|
|
|
|
return time.Duration(ts), nil
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
2016-01-29 06:23:11 -08:00
|
|
|
if d, err := model.ParseDuration(s); err == nil {
|
|
|
|
return time.Duration(d), nil
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
2023-11-07 19:49:39 -08:00
|
|
|
return 0, fmt.Errorf("cannot parse %q to a valid duration", s)
|
2015-06-04 09:07:57 -07:00
|
|
|
}
|
2018-02-07 04:27:57 -08:00
|
|
|
|
2020-12-22 03:02:19 -08:00
|
|
|
func parseMatchersParam(matchers []string) ([][]*labels.Matcher, error) {
|
2024-01-15 02:29:53 -08:00
|
|
|
matcherSets, err := parser.ParseMetricSelectors(matchers)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2020-12-22 03:02:19 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
OUTER:
|
|
|
|
for _, ms := range matcherSets {
|
|
|
|
for _, lm := range ms {
|
|
|
|
if lm != nil && !lm.Matches("") {
|
|
|
|
continue OUTER
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil, errors.New("match[] must contain at least one non-empty matcher")
|
|
|
|
}
|
|
|
|
return matcherSets, nil
|
|
|
|
}
|
2024-02-29 07:31:13 -08:00
|
|
|
|
|
|
|
func parseLimitParam(limitStr string) (limit int, err error) {
|
|
|
|
limit = math.MaxInt
|
|
|
|
if limitStr == "" {
|
|
|
|
return limit, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
limit, err = strconv.Atoi(limitStr)
|
|
|
|
if err != nil {
|
|
|
|
return limit, err
|
|
|
|
}
|
|
|
|
if limit <= 0 {
|
|
|
|
return limit, errors.New("limit must be positive")
|
|
|
|
}
|
|
|
|
|
|
|
|
return limit, nil
|
|
|
|
}
|