prometheus/web/api/v1/api.go

1957 lines
57 KiB
Go
Raw Normal View History

// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1
import (
"context"
"fmt"
"math"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
2022-12-23 02:55:08 -08:00
"sort"
"strconv"
"strings"
"time"
"unsafe"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/regexp"
jsoniter "github.com/json-iterator/go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
2015-09-24 08:07:11 -07:00
"github.com/prometheus/common/route"
"golang.org/x/exp/slices"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/textparse"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage"
2017-10-23 13:28:17 -07:00
"github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/util/httputil"
"github.com/prometheus/prometheus/util/jsonutil"
"github.com/prometheus/prometheus/util/stats"
)
type status string
const (
statusSuccess status = "success"
2017-12-02 05:52:43 -08:00
statusError status = "error"
// Non-standard status code (originally introduced by nginx) for the case when a client closes
// the connection while the server is still processing the request.
statusClientClosedConnection = 499
)
type errorType string
const (
errorNone errorType = ""
errorTimeout errorType = "timeout"
errorCanceled errorType = "canceled"
errorExec errorType = "execution"
errorBadData errorType = "bad_data"
errorInternal errorType = "internal"
errorUnavailable errorType = "unavailable"
errorNotFound errorType = "not_found"
)
var LocalhostRepresentations = []string{"127.0.0.1", "localhost", "::1"}
type apiError struct {
typ errorType
err error
}
func (e *apiError) Error() string {
return fmt.Sprintf("%s: %s", e.typ, e.err)
}
2022-12-23 02:55:08 -08:00
// ScrapePoolsRetriever provide the list of all scrape pools.
type ScrapePoolsRetriever interface {
ScrapePools() []string
}
// TargetRetriever provides the list of active/dropped targets to scrape or not.
type TargetRetriever interface {
TargetsActive() map[string][]*scrape.Target
TargetsDropped() map[string][]*scrape.Target
2016-12-02 04:31:43 -08:00
}
// AlertmanagerRetriever provides a list of all/dropped AlertManager URLs.
type AlertmanagerRetriever interface {
Alertmanagers() []*url.URL
DroppedAlertmanagers() []*url.URL
2016-12-02 04:31:43 -08:00
}
// RulesRetriever provides a list of active rules and alerts.
type RulesRetriever interface {
RuleGroups() []*rules.Group
AlertingRules() []*rules.AlertingRule
}
type StatsRenderer func(context.Context, *stats.Statistics, string) stats.QueryStats
func defaultStatsRenderer(ctx context.Context, s *stats.Statistics, param string) stats.QueryStats {
if param != "" {
return stats.NewQueryStats(s)
}
return nil
}
2019-11-02 08:53:32 -07:00
// PrometheusVersion contains build information about Prometheus.
type PrometheusVersion struct {
Version string `json:"version"`
Revision string `json:"revision"`
Branch string `json:"branch"`
BuildUser string `json:"buildUser"`
BuildDate string `json:"buildDate"`
GoVersion string `json:"goVersion"`
}
// RuntimeInfo contains runtime information about Prometheus.
type RuntimeInfo struct {
StartTime time.Time `json:"startTime"`
CWD string `json:"CWD"`
ReloadConfigSuccess bool `json:"reloadConfigSuccess"`
LastConfigTime time.Time `json:"lastConfigTime"`
CorruptionCount int64 `json:"corruptionCount"`
GoroutineCount int `json:"goroutineCount"`
GOMAXPROCS int `json:"GOMAXPROCS"`
GOGC string `json:"GOGC"`
GODEBUG string `json:"GODEBUG"`
StorageRetention string `json:"storageRetention"`
}
type response struct {
Status status `json:"status"`
Data interface{} `json:"data,omitempty"`
ErrorType errorType `json:"errorType,omitempty"`
Error string `json:"error,omitempty"`
Warnings []string `json:"warnings,omitempty"`
}
type apiFuncResult struct {
data interface{}
err *apiError
warnings storage.Warnings
finalizer func()
}
type apiFunc func(r *http.Request) apiFuncResult
// TSDBAdminStats defines the tsdb interfaces used by the v1 API for admin operations as well as statistics.
type TSDBAdminStats interface {
CleanTombstones() error
Delete(mint, maxt int64, ms ...*labels.Matcher) error
Snapshot(dir string, withHead bool) error
Stats(statsByLabelName string) (*tsdb.Stats, error)
React UI: Add Starting Screen (#8662) * Added walreplay API endpoint Signed-off-by: Levi Harrison <git@leviharrison.dev> * Added starting page to react-ui Signed-off-by: Levi Harrison <git@leviharrison.dev> * Documented the new endpoint Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed typos Signed-off-by: Levi Harrison <git@leviharrison.dev> Co-authored-by: Julius Volz <julius.volz@gmail.com> * Removed logo Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed isResponding to isUnexpected Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed width of progress bar Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed width of progress bar Signed-off-by: Levi Harrison <git@leviharrison.dev> * Added DB stats object Signed-off-by: Levi Harrison <git@leviharrison.dev> * Updated starting page to work with new fields Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (pt. 2) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (pt. 3) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (and also implementing a method this time) (pt. 4) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (and also implementing a method this time) (pt. 5) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed const to let Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (pt. 6) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Remove SetStats method Signed-off-by: Levi Harrison <git@leviharrison.dev> * Added comma Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed api Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed to triple equals Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed data response types Signed-off-by: Levi Harrison <git@leviharrison.dev> * Don't return pointer Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed version Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed interface issue Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed pointer Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed copying lock value error Signed-off-by: Levi Harrison <git@leviharrison.dev> Co-authored-by: Julius Volz <julius.volz@gmail.com>
2021-06-05 07:29:32 -07:00
WALReplayStatus() (tsdb.WALReplayStatus, error)
}
// QueryEngine defines the interface for the *promql.Engine, so it can be replaced, wrapped or mocked.
type QueryEngine interface {
SetQueryLogger(l promql.QueryLogger)
NewInstantQuery(q storage.Queryable, opts *promql.QueryOpts, qs string, ts time.Time) (promql.Query, error)
NewRangeQuery(q storage.Queryable, opts *promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error)
}
// API can register a set of endpoints in a router and handle
// them using the provided storage and query engine.
type API struct {
Queryable storage.SampleAndChunkQueryable
QueryEngine QueryEngine
ExemplarQueryable storage.ExemplarQueryable
2022-12-23 02:55:08 -08:00
scrapePoolsRetriever func(context.Context) ScrapePoolsRetriever
targetRetriever func(context.Context) TargetRetriever
alertmanagerRetriever func(context.Context) AlertmanagerRetriever
rulesRetriever func(context.Context) RulesRetriever
now func() time.Time
config func() config.Config
flagsMap map[string]string
ready func(http.HandlerFunc) http.HandlerFunc
globalURLOptions GlobalURLOptions
db TSDBAdminStats
dbDir string
enableAdmin bool
logger log.Logger
CORSOrigin *regexp.Regexp
buildInfo *PrometheusVersion
runtimeInfo func() (RuntimeInfo, error)
gatherer prometheus.Gatherer
isAgent bool
statsRenderer StatsRenderer
remoteWriteHandler http.Handler
remoteReadHandler http.Handler
}
func init() {
jsoniter.RegisterTypeEncoderFunc("promql.Series", marshalSeriesJSON, marshalSeriesJSONIsEmpty)
jsoniter.RegisterTypeEncoderFunc("promql.Sample", marshalSampleJSON, marshalSampleJSONIsEmpty)
jsoniter.RegisterTypeEncoderFunc("promql.Point", marshalPointJSON, marshalPointJSONIsEmpty)
jsoniter.RegisterTypeEncoderFunc("exemplar.Exemplar", marshalExemplarJSON, marshalExemplarJSONEmpty)
}
// NewAPI returns an initialized API type.
func NewAPI(
qe QueryEngine,
q storage.SampleAndChunkQueryable,
ap storage.Appendable,
eq storage.ExemplarQueryable,
2022-12-23 02:55:08 -08:00
spsr func(context.Context) ScrapePoolsRetriever,
tr func(context.Context) TargetRetriever,
ar func(context.Context) AlertmanagerRetriever,
configFunc func() config.Config,
flagsMap map[string]string,
globalURLOptions GlobalURLOptions,
readyFunc func(http.HandlerFunc) http.HandlerFunc,
db TSDBAdminStats,
dbDir string,
enableAdmin bool,
logger log.Logger,
rr func(context.Context) RulesRetriever,
remoteReadSampleLimit int,
remoteReadConcurrencyLimit int,
remoteReadMaxBytesInFrame int,
Initial draft of prometheus-agent (#8785) * Initial draft of prometheus-agent This commit introduces a new binary, prometheus-agent, based on the Grafana Agent code. It runs a WAL-only version of prometheus without the TSDB, alerting, or rule evaluations. It is intended to be used to remote_write to Prometheus or another remote_write receiver. By default, prometheus-agent will listen on port 9095 to not collide with the prometheus default of 9090. Truncation of the WAL cooperates on a best-effort case with Remote Write. Every time the WAL is truncated, the minimum timestamp of data to truncate is determined by the lowest sent timestamp of all samples across all remote_write endpoints. This gives loose guarantees that data from the WAL will not try to be removed until the maximum sample lifetime passes or remote_write starts functionining. Signed-off-by: Robert Fratto <robertfratto@gmail.com> * add tests for Prometheus agent (#22) * add tests for Prometheus agent * add tests for Prometheus agent * rearranged tests as per the review comments * update tests for Agent * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * incremental changes to prometheus agent Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * Commit feedback from code review Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com> Co-authored-by: Ganesh Vernekar <ganeshvern@gmail.com> Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Port over some comments from grafana/agent Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Rename agent.Storage to agent.DB for tsdb consistency Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Consolidate agentMode ifs in cmd/prometheus/main.go Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Document PreAction usage requirements better for agent mode flags Signed-off-by: Robert Fratto <robertfratto@gmail.com> * remove unnecessary defaultListenAddr Signed-off-by: Robert Fratto <robertfratto@gmail.com> * `go fmt ./tsdb/agent` and fix lint errors Signed-off-by: Robert Fratto <robertfratto@gmail.com> Co-authored-by: SriKrishna Paparaju <paparaju@gmail.com>
2021-10-29 08:25:05 -07:00
isAgent bool,
CORSOrigin *regexp.Regexp,
2019-11-02 08:53:32 -07:00
runtimeInfo func() (RuntimeInfo, error),
buildInfo *PrometheusVersion,
gatherer prometheus.Gatherer,
registerer prometheus.Registerer,
statsRenderer StatsRenderer,
) *API {
a := &API{
QueryEngine: qe,
Queryable: q,
ExemplarQueryable: eq,
2022-12-23 02:55:08 -08:00
scrapePoolsRetriever: spsr,
2017-01-13 01:20:11 -08:00
targetRetriever: tr,
alertmanagerRetriever: ar,
now: time.Now,
config: configFunc,
flagsMap: flagsMap,
ready: readyFunc,
globalURLOptions: globalURLOptions,
db: db,
dbDir: dbDir,
enableAdmin: enableAdmin,
rulesRetriever: rr,
logger: logger,
CORSOrigin: CORSOrigin,
runtimeInfo: runtimeInfo,
buildInfo: buildInfo,
gatherer: gatherer,
Initial draft of prometheus-agent (#8785) * Initial draft of prometheus-agent This commit introduces a new binary, prometheus-agent, based on the Grafana Agent code. It runs a WAL-only version of prometheus without the TSDB, alerting, or rule evaluations. It is intended to be used to remote_write to Prometheus or another remote_write receiver. By default, prometheus-agent will listen on port 9095 to not collide with the prometheus default of 9090. Truncation of the WAL cooperates on a best-effort case with Remote Write. Every time the WAL is truncated, the minimum timestamp of data to truncate is determined by the lowest sent timestamp of all samples across all remote_write endpoints. This gives loose guarantees that data from the WAL will not try to be removed until the maximum sample lifetime passes or remote_write starts functionining. Signed-off-by: Robert Fratto <robertfratto@gmail.com> * add tests for Prometheus agent (#22) * add tests for Prometheus agent * add tests for Prometheus agent * rearranged tests as per the review comments * update tests for Agent * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * incremental changes to prometheus agent Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * Commit feedback from code review Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com> Co-authored-by: Ganesh Vernekar <ganeshvern@gmail.com> Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Port over some comments from grafana/agent Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Rename agent.Storage to agent.DB for tsdb consistency Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Consolidate agentMode ifs in cmd/prometheus/main.go Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Document PreAction usage requirements better for agent mode flags Signed-off-by: Robert Fratto <robertfratto@gmail.com> * remove unnecessary defaultListenAddr Signed-off-by: Robert Fratto <robertfratto@gmail.com> * `go fmt ./tsdb/agent` and fix lint errors Signed-off-by: Robert Fratto <robertfratto@gmail.com> Co-authored-by: SriKrishna Paparaju <paparaju@gmail.com>
2021-10-29 08:25:05 -07:00
isAgent: isAgent,
statsRenderer: defaultStatsRenderer,
remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame),
}
if statsRenderer != nil {
a.statsRenderer = statsRenderer
}
if ap != nil {
a.remoteWriteHandler = remote.NewWriteHandler(logger, ap)
}
return a
}
func setUnavailStatusOnTSDBNotReady(r apiFuncResult) apiFuncResult {
if r.err != nil && errors.Cause(r.err.err) == tsdb.ErrNotReady {
r.err.typ = errorUnavailable
}
return r
}
// Register the API's endpoints in the given router.
func (api *API) Register(r *route.Router) {
wrap := func(f apiFunc) http.HandlerFunc {
hf := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
httputil.SetCORS(w, api.CORSOrigin, r)
result := setUnavailStatusOnTSDBNotReady(f(r))
if result.finalizer != nil {
defer result.finalizer()
}
if result.err != nil {
api.respondError(w, result.err, result.data)
return
}
if result.data != nil {
api.respond(w, result.data, result.warnings)
return
}
w.WriteHeader(http.StatusNoContent)
})
return api.ready(httputil.CompressionHandler{
2015-09-18 07:51:53 -07:00
Handler: hf,
}.ServeHTTP)
}
Initial draft of prometheus-agent (#8785) * Initial draft of prometheus-agent This commit introduces a new binary, prometheus-agent, based on the Grafana Agent code. It runs a WAL-only version of prometheus without the TSDB, alerting, or rule evaluations. It is intended to be used to remote_write to Prometheus or another remote_write receiver. By default, prometheus-agent will listen on port 9095 to not collide with the prometheus default of 9090. Truncation of the WAL cooperates on a best-effort case with Remote Write. Every time the WAL is truncated, the minimum timestamp of data to truncate is determined by the lowest sent timestamp of all samples across all remote_write endpoints. This gives loose guarantees that data from the WAL will not try to be removed until the maximum sample lifetime passes or remote_write starts functionining. Signed-off-by: Robert Fratto <robertfratto@gmail.com> * add tests for Prometheus agent (#22) * add tests for Prometheus agent * add tests for Prometheus agent * rearranged tests as per the review comments * update tests for Agent * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * incremental changes to prometheus agent Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * Commit feedback from code review Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com> Co-authored-by: Ganesh Vernekar <ganeshvern@gmail.com> Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Port over some comments from grafana/agent Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Rename agent.Storage to agent.DB for tsdb consistency Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Consolidate agentMode ifs in cmd/prometheus/main.go Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Document PreAction usage requirements better for agent mode flags Signed-off-by: Robert Fratto <robertfratto@gmail.com> * remove unnecessary defaultListenAddr Signed-off-by: Robert Fratto <robertfratto@gmail.com> * `go fmt ./tsdb/agent` and fix lint errors Signed-off-by: Robert Fratto <robertfratto@gmail.com> Co-authored-by: SriKrishna Paparaju <paparaju@gmail.com>
2021-10-29 08:25:05 -07:00
wrapAgent := func(f apiFunc) http.HandlerFunc {
return wrap(func(r *http.Request) apiFuncResult {
if api.isAgent {
return apiFuncResult{nil, &apiError{errorExec, errors.New("unavailable with Prometheus Agent")}, nil, nil}
}
return f(r)
})
}
r.Options("/*path", wrap(api.options))
Initial draft of prometheus-agent (#8785) * Initial draft of prometheus-agent This commit introduces a new binary, prometheus-agent, based on the Grafana Agent code. It runs a WAL-only version of prometheus without the TSDB, alerting, or rule evaluations. It is intended to be used to remote_write to Prometheus or another remote_write receiver. By default, prometheus-agent will listen on port 9095 to not collide with the prometheus default of 9090. Truncation of the WAL cooperates on a best-effort case with Remote Write. Every time the WAL is truncated, the minimum timestamp of data to truncate is determined by the lowest sent timestamp of all samples across all remote_write endpoints. This gives loose guarantees that data from the WAL will not try to be removed until the maximum sample lifetime passes or remote_write starts functionining. Signed-off-by: Robert Fratto <robertfratto@gmail.com> * add tests for Prometheus agent (#22) * add tests for Prometheus agent * add tests for Prometheus agent * rearranged tests as per the review comments * update tests for Agent * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * incremental changes to prometheus agent Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * Commit feedback from code review Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com> Co-authored-by: Ganesh Vernekar <ganeshvern@gmail.com> Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Port over some comments from grafana/agent Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Rename agent.Storage to agent.DB for tsdb consistency Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Consolidate agentMode ifs in cmd/prometheus/main.go Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Document PreAction usage requirements better for agent mode flags Signed-off-by: Robert Fratto <robertfratto@gmail.com> * remove unnecessary defaultListenAddr Signed-off-by: Robert Fratto <robertfratto@gmail.com> * `go fmt ./tsdb/agent` and fix lint errors Signed-off-by: Robert Fratto <robertfratto@gmail.com> Co-authored-by: SriKrishna Paparaju <paparaju@gmail.com>
2021-10-29 08:25:05 -07:00
r.Get("/query", wrapAgent(api.query))
r.Post("/query", wrapAgent(api.query))
r.Get("/query_range", wrapAgent(api.queryRange))
r.Post("/query_range", wrapAgent(api.queryRange))
r.Get("/query_exemplars", wrapAgent(api.queryExemplars))
r.Post("/query_exemplars", wrapAgent(api.queryExemplars))
r.Get("/format_query", wrapAgent(api.formatQuery))
r.Post("/format_query", wrapAgent(api.formatQuery))
Initial draft of prometheus-agent (#8785) * Initial draft of prometheus-agent This commit introduces a new binary, prometheus-agent, based on the Grafana Agent code. It runs a WAL-only version of prometheus without the TSDB, alerting, or rule evaluations. It is intended to be used to remote_write to Prometheus or another remote_write receiver. By default, prometheus-agent will listen on port 9095 to not collide with the prometheus default of 9090. Truncation of the WAL cooperates on a best-effort case with Remote Write. Every time the WAL is truncated, the minimum timestamp of data to truncate is determined by the lowest sent timestamp of all samples across all remote_write endpoints. This gives loose guarantees that data from the WAL will not try to be removed until the maximum sample lifetime passes or remote_write starts functionining. Signed-off-by: Robert Fratto <robertfratto@gmail.com> * add tests for Prometheus agent (#22) * add tests for Prometheus agent * add tests for Prometheus agent * rearranged tests as per the review comments * update tests for Agent * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * incremental changes to prometheus agent Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * Commit feedback from code review Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com> Co-authored-by: Ganesh Vernekar <ganeshvern@gmail.com> Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Port over some comments from grafana/agent Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Rename agent.Storage to agent.DB for tsdb consistency Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Consolidate agentMode ifs in cmd/prometheus/main.go Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Document PreAction usage requirements better for agent mode flags Signed-off-by: Robert Fratto <robertfratto@gmail.com> * remove unnecessary defaultListenAddr Signed-off-by: Robert Fratto <robertfratto@gmail.com> * `go fmt ./tsdb/agent` and fix lint errors Signed-off-by: Robert Fratto <robertfratto@gmail.com> Co-authored-by: SriKrishna Paparaju <paparaju@gmail.com>
2021-10-29 08:25:05 -07:00
r.Get("/labels", wrapAgent(api.labelNames))
r.Post("/labels", wrapAgent(api.labelNames))
r.Get("/label/:name/values", wrapAgent(api.labelValues))
Initial draft of prometheus-agent (#8785) * Initial draft of prometheus-agent This commit introduces a new binary, prometheus-agent, based on the Grafana Agent code. It runs a WAL-only version of prometheus without the TSDB, alerting, or rule evaluations. It is intended to be used to remote_write to Prometheus or another remote_write receiver. By default, prometheus-agent will listen on port 9095 to not collide with the prometheus default of 9090. Truncation of the WAL cooperates on a best-effort case with Remote Write. Every time the WAL is truncated, the minimum timestamp of data to truncate is determined by the lowest sent timestamp of all samples across all remote_write endpoints. This gives loose guarantees that data from the WAL will not try to be removed until the maximum sample lifetime passes or remote_write starts functionining. Signed-off-by: Robert Fratto <robertfratto@gmail.com> * add tests for Prometheus agent (#22) * add tests for Prometheus agent * add tests for Prometheus agent * rearranged tests as per the review comments * update tests for Agent * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * incremental changes to prometheus agent Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * Commit feedback from code review Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com> Co-authored-by: Ganesh Vernekar <ganeshvern@gmail.com> Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Port over some comments from grafana/agent Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Rename agent.Storage to agent.DB for tsdb consistency Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Consolidate agentMode ifs in cmd/prometheus/main.go Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Document PreAction usage requirements better for agent mode flags Signed-off-by: Robert Fratto <robertfratto@gmail.com> * remove unnecessary defaultListenAddr Signed-off-by: Robert Fratto <robertfratto@gmail.com> * `go fmt ./tsdb/agent` and fix lint errors Signed-off-by: Robert Fratto <robertfratto@gmail.com> Co-authored-by: SriKrishna Paparaju <paparaju@gmail.com>
2021-10-29 08:25:05 -07:00
r.Get("/series", wrapAgent(api.series))
r.Post("/series", wrapAgent(api.series))
r.Del("/series", wrapAgent(api.dropSeries))
2016-12-02 04:31:43 -08:00
2022-12-23 02:55:08 -08:00
r.Get("/scrape_pools", wrap(api.scrapePools))
r.Get("/targets", wrap(api.targets))
r.Get("/targets/metadata", wrap(api.targetMetadata))
Initial draft of prometheus-agent (#8785) * Initial draft of prometheus-agent This commit introduces a new binary, prometheus-agent, based on the Grafana Agent code. It runs a WAL-only version of prometheus without the TSDB, alerting, or rule evaluations. It is intended to be used to remote_write to Prometheus or another remote_write receiver. By default, prometheus-agent will listen on port 9095 to not collide with the prometheus default of 9090. Truncation of the WAL cooperates on a best-effort case with Remote Write. Every time the WAL is truncated, the minimum timestamp of data to truncate is determined by the lowest sent timestamp of all samples across all remote_write endpoints. This gives loose guarantees that data from the WAL will not try to be removed until the maximum sample lifetime passes or remote_write starts functionining. Signed-off-by: Robert Fratto <robertfratto@gmail.com> * add tests for Prometheus agent (#22) * add tests for Prometheus agent * add tests for Prometheus agent * rearranged tests as per the review comments * update tests for Agent * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * incremental changes to prometheus agent Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * Commit feedback from code review Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com> Co-authored-by: Ganesh Vernekar <ganeshvern@gmail.com> Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Port over some comments from grafana/agent Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Rename agent.Storage to agent.DB for tsdb consistency Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Consolidate agentMode ifs in cmd/prometheus/main.go Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Document PreAction usage requirements better for agent mode flags Signed-off-by: Robert Fratto <robertfratto@gmail.com> * remove unnecessary defaultListenAddr Signed-off-by: Robert Fratto <robertfratto@gmail.com> * `go fmt ./tsdb/agent` and fix lint errors Signed-off-by: Robert Fratto <robertfratto@gmail.com> Co-authored-by: SriKrishna Paparaju <paparaju@gmail.com>
2021-10-29 08:25:05 -07:00
r.Get("/alertmanagers", wrapAgent(api.alertmanagers))
r.Get("/metadata", wrap(api.metricMetadata))
r.Get("/status/config", wrap(api.serveConfig))
2019-11-02 08:53:32 -07:00
r.Get("/status/runtimeinfo", wrap(api.serveRuntimeInfo))
r.Get("/status/buildinfo", wrap(api.serveBuildInfo))
r.Get("/status/flags", wrap(api.serveFlags))
Initial draft of prometheus-agent (#8785) * Initial draft of prometheus-agent This commit introduces a new binary, prometheus-agent, based on the Grafana Agent code. It runs a WAL-only version of prometheus without the TSDB, alerting, or rule evaluations. It is intended to be used to remote_write to Prometheus or another remote_write receiver. By default, prometheus-agent will listen on port 9095 to not collide with the prometheus default of 9090. Truncation of the WAL cooperates on a best-effort case with Remote Write. Every time the WAL is truncated, the minimum timestamp of data to truncate is determined by the lowest sent timestamp of all samples across all remote_write endpoints. This gives loose guarantees that data from the WAL will not try to be removed until the maximum sample lifetime passes or remote_write starts functionining. Signed-off-by: Robert Fratto <robertfratto@gmail.com> * add tests for Prometheus agent (#22) * add tests for Prometheus agent * add tests for Prometheus agent * rearranged tests as per the review comments * update tests for Agent * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * incremental changes to prometheus agent Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * Commit feedback from code review Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com> Co-authored-by: Ganesh Vernekar <ganeshvern@gmail.com> Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Port over some comments from grafana/agent Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Rename agent.Storage to agent.DB for tsdb consistency Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Consolidate agentMode ifs in cmd/prometheus/main.go Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Document PreAction usage requirements better for agent mode flags Signed-off-by: Robert Fratto <robertfratto@gmail.com> * remove unnecessary defaultListenAddr Signed-off-by: Robert Fratto <robertfratto@gmail.com> * `go fmt ./tsdb/agent` and fix lint errors Signed-off-by: Robert Fratto <robertfratto@gmail.com> Co-authored-by: SriKrishna Paparaju <paparaju@gmail.com>
2021-10-29 08:25:05 -07:00
r.Get("/status/tsdb", wrapAgent(api.serveTSDBStatus))
React UI: Add Starting Screen (#8662) * Added walreplay API endpoint Signed-off-by: Levi Harrison <git@leviharrison.dev> * Added starting page to react-ui Signed-off-by: Levi Harrison <git@leviharrison.dev> * Documented the new endpoint Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed typos Signed-off-by: Levi Harrison <git@leviharrison.dev> Co-authored-by: Julius Volz <julius.volz@gmail.com> * Removed logo Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed isResponding to isUnexpected Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed width of progress bar Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed width of progress bar Signed-off-by: Levi Harrison <git@leviharrison.dev> * Added DB stats object Signed-off-by: Levi Harrison <git@leviharrison.dev> * Updated starting page to work with new fields Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (pt. 2) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (pt. 3) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (and also implementing a method this time) (pt. 4) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (and also implementing a method this time) (pt. 5) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed const to let Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (pt. 6) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Remove SetStats method Signed-off-by: Levi Harrison <git@leviharrison.dev> * Added comma Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed api Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed to triple equals Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed data response types Signed-off-by: Levi Harrison <git@leviharrison.dev> * Don't return pointer Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed version Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed interface issue Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed pointer Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed copying lock value error Signed-off-by: Levi Harrison <git@leviharrison.dev> Co-authored-by: Julius Volz <julius.volz@gmail.com>
2021-06-05 07:29:32 -07:00
r.Get("/status/walreplay", api.serveWALReplayStatus)
r.Post("/read", api.ready(api.remoteRead))
r.Post("/write", api.ready(api.remoteWrite))
Initial draft of prometheus-agent (#8785) * Initial draft of prometheus-agent This commit introduces a new binary, prometheus-agent, based on the Grafana Agent code. It runs a WAL-only version of prometheus without the TSDB, alerting, or rule evaluations. It is intended to be used to remote_write to Prometheus or another remote_write receiver. By default, prometheus-agent will listen on port 9095 to not collide with the prometheus default of 9090. Truncation of the WAL cooperates on a best-effort case with Remote Write. Every time the WAL is truncated, the minimum timestamp of data to truncate is determined by the lowest sent timestamp of all samples across all remote_write endpoints. This gives loose guarantees that data from the WAL will not try to be removed until the maximum sample lifetime passes or remote_write starts functionining. Signed-off-by: Robert Fratto <robertfratto@gmail.com> * add tests for Prometheus agent (#22) * add tests for Prometheus agent * add tests for Prometheus agent * rearranged tests as per the review comments * update tests for Agent * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * incremental changes to prometheus agent Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * Commit feedback from code review Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com> Co-authored-by: Ganesh Vernekar <ganeshvern@gmail.com> Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Port over some comments from grafana/agent Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Rename agent.Storage to agent.DB for tsdb consistency Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Consolidate agentMode ifs in cmd/prometheus/main.go Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Document PreAction usage requirements better for agent mode flags Signed-off-by: Robert Fratto <robertfratto@gmail.com> * remove unnecessary defaultListenAddr Signed-off-by: Robert Fratto <robertfratto@gmail.com> * `go fmt ./tsdb/agent` and fix lint errors Signed-off-by: Robert Fratto <robertfratto@gmail.com> Co-authored-by: SriKrishna Paparaju <paparaju@gmail.com>
2021-10-29 08:25:05 -07:00
r.Get("/alerts", wrapAgent(api.alerts))
r.Get("/rules", wrapAgent(api.rules))
// Admin APIs
Initial draft of prometheus-agent (#8785) * Initial draft of prometheus-agent This commit introduces a new binary, prometheus-agent, based on the Grafana Agent code. It runs a WAL-only version of prometheus without the TSDB, alerting, or rule evaluations. It is intended to be used to remote_write to Prometheus or another remote_write receiver. By default, prometheus-agent will listen on port 9095 to not collide with the prometheus default of 9090. Truncation of the WAL cooperates on a best-effort case with Remote Write. Every time the WAL is truncated, the minimum timestamp of data to truncate is determined by the lowest sent timestamp of all samples across all remote_write endpoints. This gives loose guarantees that data from the WAL will not try to be removed until the maximum sample lifetime passes or remote_write starts functionining. Signed-off-by: Robert Fratto <robertfratto@gmail.com> * add tests for Prometheus agent (#22) * add tests for Prometheus agent * add tests for Prometheus agent * rearranged tests as per the review comments * update tests for Agent * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * incremental changes to prometheus agent Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * Commit feedback from code review Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com> Co-authored-by: Ganesh Vernekar <ganeshvern@gmail.com> Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Port over some comments from grafana/agent Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Rename agent.Storage to agent.DB for tsdb consistency Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Consolidate agentMode ifs in cmd/prometheus/main.go Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Document PreAction usage requirements better for agent mode flags Signed-off-by: Robert Fratto <robertfratto@gmail.com> * remove unnecessary defaultListenAddr Signed-off-by: Robert Fratto <robertfratto@gmail.com> * `go fmt ./tsdb/agent` and fix lint errors Signed-off-by: Robert Fratto <robertfratto@gmail.com> Co-authored-by: SriKrishna Paparaju <paparaju@gmail.com>
2021-10-29 08:25:05 -07:00
r.Post("/admin/tsdb/delete_series", wrapAgent(api.deleteSeries))
r.Post("/admin/tsdb/clean_tombstones", wrapAgent(api.cleanTombstones))
r.Post("/admin/tsdb/snapshot", wrapAgent(api.snapshot))
Initial draft of prometheus-agent (#8785) * Initial draft of prometheus-agent This commit introduces a new binary, prometheus-agent, based on the Grafana Agent code. It runs a WAL-only version of prometheus without the TSDB, alerting, or rule evaluations. It is intended to be used to remote_write to Prometheus or another remote_write receiver. By default, prometheus-agent will listen on port 9095 to not collide with the prometheus default of 9090. Truncation of the WAL cooperates on a best-effort case with Remote Write. Every time the WAL is truncated, the minimum timestamp of data to truncate is determined by the lowest sent timestamp of all samples across all remote_write endpoints. This gives loose guarantees that data from the WAL will not try to be removed until the maximum sample lifetime passes or remote_write starts functionining. Signed-off-by: Robert Fratto <robertfratto@gmail.com> * add tests for Prometheus agent (#22) * add tests for Prometheus agent * add tests for Prometheus agent * rearranged tests as per the review comments * update tests for Agent * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * incremental changes to prometheus agent Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * changes as per code review comments Signed-off-by: SriKrishna Paparaju <paparaju@gmail.com> * Commit feedback from code review Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com> Co-authored-by: Ganesh Vernekar <ganeshvern@gmail.com> Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Port over some comments from grafana/agent Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Rename agent.Storage to agent.DB for tsdb consistency Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Consolidate agentMode ifs in cmd/prometheus/main.go Signed-off-by: Robert Fratto <robertfratto@gmail.com> * Document PreAction usage requirements better for agent mode flags Signed-off-by: Robert Fratto <robertfratto@gmail.com> * remove unnecessary defaultListenAddr Signed-off-by: Robert Fratto <robertfratto@gmail.com> * `go fmt ./tsdb/agent` and fix lint errors Signed-off-by: Robert Fratto <robertfratto@gmail.com> Co-authored-by: SriKrishna Paparaju <paparaju@gmail.com>
2021-10-29 08:25:05 -07:00
r.Put("/admin/tsdb/delete_series", wrapAgent(api.deleteSeries))
r.Put("/admin/tsdb/clean_tombstones", wrapAgent(api.cleanTombstones))
r.Put("/admin/tsdb/snapshot", wrapAgent(api.snapshot))
}
type queryData struct {
ResultType parser.ValueType `json:"resultType"`
Result parser.Value `json:"result"`
Stats stats.QueryStats `json:"stats,omitempty"`
}
func invalidParamError(err error, parameter string) apiFuncResult {
return apiFuncResult{nil, &apiError{
errorBadData, errors.Wrapf(err, "invalid parameter %q", parameter),
}, nil, nil}
}
func (api *API) options(r *http.Request) apiFuncResult {
return apiFuncResult{nil, nil, nil, nil}
}
func (api *API) query(r *http.Request) (result apiFuncResult) {
ts, err := parseTimeParam(r, "time", api.now())
if err != nil {
return invalidParamError(err, "time")
}
ctx := r.Context()
if to := r.FormValue("timeout"); to != "" {
var cancel context.CancelFunc
timeout, err := parseDuration(to)
if err != nil {
return invalidParamError(err, "timeout")
}
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
opts, err := extractQueryOpts(r)
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
qry, err := api.QueryEngine.NewInstantQuery(api.Queryable, opts, r.FormValue("query"), ts)
if err != nil {
return invalidParamError(err, "query")
}
// From now on, we must only return with a finalizer in the result (to
// be called by the caller) or call qry.Close ourselves (which is
// required in the case of a panic).
defer func() {
if result.finalizer == nil {
qry.Close()
}
}()
ctx = httputil.ContextFromRequest(ctx, r)
res := qry.Exec(ctx)
if res.Err != nil {
return apiFuncResult{nil, returnAPIError(res.Err), res.Warnings, qry.Close}
}
// Optional stats field in response if parameter "stats" is not empty.
sr := api.statsRenderer
if sr == nil {
sr = defaultStatsRenderer
}
qs := sr(ctx, qry.Stats(), r.FormValue("stats"))
return apiFuncResult{&queryData{
ResultType: res.Value.Type(),
Result: res.Value,
Stats: qs,
}, nil, res.Warnings, qry.Close}
}
func (api *API) formatQuery(r *http.Request) (result apiFuncResult) {
expr, err := parser.ParseExpr(r.FormValue("query"))
if err != nil {
return invalidParamError(err, "query")
}
return apiFuncResult{expr.Pretty(0), nil, nil, nil}
}
func extractQueryOpts(r *http.Request) (*promql.QueryOpts, error) {
opts := &promql.QueryOpts{
EnablePerStepStats: r.FormValue("stats") == "all",
}
if strDuration := r.FormValue("lookback_delta"); strDuration != "" {
duration, err := parseDuration(strDuration)
if err != nil {
return nil, fmt.Errorf("error parsing lookback delta duration: %w", err)
}
opts.LookbackDelta = duration
}
return opts, nil
}
func (api *API) queryRange(r *http.Request) (result apiFuncResult) {
start, err := parseTime(r.FormValue("start"))
if err != nil {
return invalidParamError(err, "start")
}
end, err := parseTime(r.FormValue("end"))
if err != nil {
return invalidParamError(err, "end")
}
if end.Before(start) {
return invalidParamError(errors.New("end timestamp must not be before start time"), "end")
}
step, err := parseDuration(r.FormValue("step"))
if err != nil {
return invalidParamError(err, "step")
}
if step <= 0 {
return invalidParamError(errors.New("zero or negative query resolution step widths are not accepted. Try a positive integer"), "step")
}
// For safety, limit the number of returned points per timeseries.
// This is sufficient for 60s resolution for a week or 1h resolution for a year.
if end.Sub(start)/step > 11000 {
err := errors.New("exceeded maximum resolution of 11,000 points per timeseries. Try decreasing the query resolution (?step=XX)")
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
ctx := r.Context()
2017-03-06 09:32:21 -08:00
if to := r.FormValue("timeout"); to != "" {
var cancel context.CancelFunc
timeout, err := parseDuration(to)
if err != nil {
return invalidParamError(err, "timeout")
2017-03-06 09:32:21 -08:00
}
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
opts, err := extractQueryOpts(r)
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
qry, err := api.QueryEngine.NewRangeQuery(api.Queryable, opts, r.FormValue("query"), start, end, step)
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
// From now on, we must only return with a finalizer in the result (to
// be called by the caller) or call qry.Close ourselves (which is
// required in the case of a panic).
defer func() {
if result.finalizer == nil {
qry.Close()
}
}()
ctx = httputil.ContextFromRequest(ctx, r)
res := qry.Exec(ctx)
if res.Err != nil {
return apiFuncResult{nil, returnAPIError(res.Err), res.Warnings, qry.Close}
}
// Optional stats field in response if parameter "stats" is not empty.
sr := api.statsRenderer
if sr == nil {
sr = defaultStatsRenderer
}
qs := sr(ctx, qry.Stats(), r.FormValue("stats"))
return apiFuncResult{&queryData{
ResultType: res.Value.Type(),
Result: res.Value,
Stats: qs,
}, nil, res.Warnings, qry.Close}
}
func (api *API) queryExemplars(r *http.Request) apiFuncResult {
start, err := parseTimeParam(r, "start", minTime)
if err != nil {
return invalidParamError(err, "start")
}
end, err := parseTimeParam(r, "end", maxTime)
if err != nil {
return invalidParamError(err, "end")
}
if end.Before(start) {
err := errors.New("end timestamp must not be before start timestamp")
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
expr, err := parser.ParseExpr(r.FormValue("query"))
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
selectors := parser.ExtractSelectors(expr)
if len(selectors) < 1 {
return apiFuncResult{nil, nil, nil, nil}
}
ctx := r.Context()
eq, err := api.ExemplarQueryable.ExemplarQuerier(ctx)
if err != nil {
return apiFuncResult{nil, returnAPIError(err), nil, nil}
}
res, err := eq.Select(timestamp.FromTime(start), timestamp.FromTime(end), selectors...)
if err != nil {
return apiFuncResult{nil, returnAPIError(err), nil, nil}
}
return apiFuncResult{res, nil, nil, nil}
}
func returnAPIError(err error) *apiError {
if err == nil {
return nil
}
cause := errors.Unwrap(err)
if cause == nil {
cause = err
}
switch cause.(type) {
case promql.ErrQueryCanceled:
return &apiError{errorCanceled, err}
case promql.ErrQueryTimeout:
return &apiError{errorTimeout, err}
case promql.ErrStorage:
return &apiError{errorInternal, err}
}
if errors.Is(err, context.Canceled) {
return &apiError{errorCanceled, err}
}
return &apiError{errorExec, err}
}
func (api *API) labelNames(r *http.Request) apiFuncResult {
Added time range parameters to labelNames API (#7288) * add time range params to labelNames api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * evaluate min/max time range when reading labels from the head Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelValues api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test, add docs Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add a test for head min max range Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test to match comment Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * address CR comments Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * combine vars only used once Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelNames api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * evaluate min/max time range when reading labels from the head Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelValues api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test, add docs Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add a test for head min max range Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test to match comment Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * address CR comments Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * combine vars only used once Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * restart ci Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * use range expectedLabelNames instead of range actualLabelNames in test Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com>
2020-05-30 05:50:09 -07:00
start, err := parseTimeParam(r, "start", minTime)
if err != nil {
return invalidParamError(err, "start")
Added time range parameters to labelNames API (#7288) * add time range params to labelNames api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * evaluate min/max time range when reading labels from the head Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelValues api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test, add docs Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add a test for head min max range Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test to match comment Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * address CR comments Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * combine vars only used once Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelNames api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * evaluate min/max time range when reading labels from the head Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelValues api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test, add docs Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add a test for head min max range Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test to match comment Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * address CR comments Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * combine vars only used once Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * restart ci Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * use range expectedLabelNames instead of range actualLabelNames in test Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com>
2020-05-30 05:50:09 -07:00
}
end, err := parseTimeParam(r, "end", maxTime)
if err != nil {
return invalidParamError(err, "end")
Added time range parameters to labelNames API (#7288) * add time range params to labelNames api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * evaluate min/max time range when reading labels from the head Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelValues api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test, add docs Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add a test for head min max range Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test to match comment Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * address CR comments Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * combine vars only used once Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelNames api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * evaluate min/max time range when reading labels from the head Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelValues api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test, add docs Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add a test for head min max range Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test to match comment Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * address CR comments Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * combine vars only used once Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * restart ci Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * use range expectedLabelNames instead of range actualLabelNames in test Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com>
2020-05-30 05:50:09 -07:00
}
matcherSets, err := parseMatchersParam(r.Form["match[]"])
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
Added time range parameters to labelNames API (#7288) * add time range params to labelNames api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * evaluate min/max time range when reading labels from the head Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelValues api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test, add docs Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add a test for head min max range Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test to match comment Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * address CR comments Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * combine vars only used once Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelNames api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * evaluate min/max time range when reading labels from the head Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelValues api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test, add docs Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add a test for head min max range Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test to match comment Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * address CR comments Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * combine vars only used once Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * restart ci Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * use range expectedLabelNames instead of range actualLabelNames in test Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com>
2020-05-30 05:50:09 -07:00
q, err := api.Queryable.Querier(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end))
if err != nil {
return apiFuncResult{nil, returnAPIError(err), nil, nil}
}
defer q.Close()
var (
names []string
warnings storage.Warnings
)
if len(matcherSets) > 0 {
labelNamesSet := make(map[string]struct{})
LabelNames API with matchers (#9083) * Push the matchers for LabelNames all the way into the index. NB This doesn't actually implement it in the index, just plumbs it through for now... Signed-off-by: Tom Wilkie <tom@grafana.com> * Hack it up. Does not work. Signed-off-by: Tom Wilkie <tom@grafana.com> * Revert changes I don't understand Can't see why do we need to hold a mutex on symbols, and the purpose of the LabelNamesFor method. Maybe I'll need to re-add this later. Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Implement LabelNamesFor This method provides the label names that appear in the postings provided. We do that deeper than the label values because we know beforehand that most of the label names we'll be the same across different postings, and we don't want to go down an up looking up the same symbols for all different series. Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Mutex on symbols should be unlocked However, I still don't understand why do we need a mutex here. Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Fix head.LabelNamesFor Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Implement mockIndex LabelNames with matchers Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Nitpick on slice initialisation Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Add tests for LabelNamesWithMatchers Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Fix the mutex mess on head.LabelValues/LabelNames I still don't see why we need to grab that unrelated mutex, but at least now we're grabbing it consistently Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Check error after iterating postings Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Use the error from posting when there was en error in postings Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Update storage/interface.go comment Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> * Update tsdb/index/index.go comment Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> * Update tsdb/index/index.go wrapped error msg Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> * Update tsdb/index/index.go wrapped error msg Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> * Update tsdb/index/index.go warpped error msg Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> * Remove unneeded comment Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Add testcases for LabelNames w/matchers in api.go Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Use t.Cleanup() instead of defer in tests Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> Co-authored-by: Tom Wilkie <tom@grafana.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
2021-07-20 05:38:08 -07:00
for _, matchers := range matcherSets {
vals, callWarnings, err := q.LabelNames(matchers...)
if err != nil {
return apiFuncResult{nil, returnAPIError(err), warnings, nil}
}
LabelNames API with matchers (#9083) * Push the matchers for LabelNames all the way into the index. NB This doesn't actually implement it in the index, just plumbs it through for now... Signed-off-by: Tom Wilkie <tom@grafana.com> * Hack it up. Does not work. Signed-off-by: Tom Wilkie <tom@grafana.com> * Revert changes I don't understand Can't see why do we need to hold a mutex on symbols, and the purpose of the LabelNamesFor method. Maybe I'll need to re-add this later. Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Implement LabelNamesFor This method provides the label names that appear in the postings provided. We do that deeper than the label values because we know beforehand that most of the label names we'll be the same across different postings, and we don't want to go down an up looking up the same symbols for all different series. Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Mutex on symbols should be unlocked However, I still don't understand why do we need a mutex here. Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Fix head.LabelNamesFor Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Implement mockIndex LabelNames with matchers Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Nitpick on slice initialisation Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Add tests for LabelNamesWithMatchers Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Fix the mutex mess on head.LabelValues/LabelNames I still don't see why we need to grab that unrelated mutex, but at least now we're grabbing it consistently Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Check error after iterating postings Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Use the error from posting when there was en error in postings Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Update storage/interface.go comment Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> * Update tsdb/index/index.go comment Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> * Update tsdb/index/index.go wrapped error msg Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> * Update tsdb/index/index.go wrapped error msg Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> * Update tsdb/index/index.go warpped error msg Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> * Remove unneeded comment Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Add testcases for LabelNames w/matchers in api.go Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> * Use t.Cleanup() instead of defer in tests Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com> Co-authored-by: Tom Wilkie <tom@grafana.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
2021-07-20 05:38:08 -07:00
warnings = append(warnings, callWarnings...)
for _, val := range vals {
labelNamesSet[val] = struct{}{}
}
}
// Convert the map to an array.
names = make([]string, 0, len(labelNamesSet))
for key := range labelNamesSet {
names = append(names, key)
}
slices.Sort(names)
} else {
names, warnings, err = q.LabelNames()
if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil}
}
}
tsdb: Added ChunkQueryable implementations to db; unified MergeSeriesSets and vertical to single struct. (#7069) * tsdb: Added ChunkQueryable implementations to db; unified compactor, querier and fanout block iterating. Chained to https://github.com/prometheus/prometheus/pull/7059 * NewMerge(Chunk)Querier now takies multiple primaries allowing tsdb DB code to use it. * Added single SeriesEntry / ChunkEntry for all series implementations. * Unified all vertical, and non vertical for compact and querying to single merge series / chunk sets by reusing VerticalSeriesMergeFunc for overlapping algorithm (same logic as before) * Added block (Base/Chunk/)Querier for block querying. We then use populateAndTomb(Base/Chunk/) to iterate over chunks or samples. * Refactored endpoint tests and querier tests to include subtests. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed comments from Brian and Beorn. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fixed snapshot test and added chunk iterator support for DBReadOnly. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fixed race when iterating over Ats first. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fixed tests. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fixed populate block tests. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fixed endpoints test. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fixed test. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Added test & fixed case of head open chunk. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fixed DBReadOnly tests and bug producing 1 sample chunks. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Added cases for partial block overlap for multiple full chunks. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Added extra tests for chunk meta after compaction. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fixed small vertical merge bug and added more tests for that. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
2020-07-31 08:03:02 -07:00
if names == nil {
names = []string{}
}
return apiFuncResult{names, nil, warnings, nil}
}
func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
ctx := r.Context()
name := route.Param(ctx, "name")
if !model.LabelNameRE.MatchString(name) {
return apiFuncResult{nil, &apiError{errorBadData, errors.Errorf("invalid label name: %q", name)}, nil, nil}
}
Added time range parameters to labelNames API (#7288) * add time range params to labelNames api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * evaluate min/max time range when reading labels from the head Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelValues api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test, add docs Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add a test for head min max range Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test to match comment Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * address CR comments Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * combine vars only used once Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelNames api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * evaluate min/max time range when reading labels from the head Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelValues api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test, add docs Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add a test for head min max range Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test to match comment Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * address CR comments Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * combine vars only used once Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * restart ci Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * use range expectedLabelNames instead of range actualLabelNames in test Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com>
2020-05-30 05:50:09 -07:00
start, err := parseTimeParam(r, "start", minTime)
if err != nil {
return invalidParamError(err, "start")
Added time range parameters to labelNames API (#7288) * add time range params to labelNames api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * evaluate min/max time range when reading labels from the head Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelValues api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test, add docs Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add a test for head min max range Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test to match comment Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * address CR comments Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * combine vars only used once Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelNames api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * evaluate min/max time range when reading labels from the head Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelValues api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test, add docs Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add a test for head min max range Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test to match comment Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * address CR comments Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * combine vars only used once Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * restart ci Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * use range expectedLabelNames instead of range actualLabelNames in test Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com>
2020-05-30 05:50:09 -07:00
}
end, err := parseTimeParam(r, "end", maxTime)
if err != nil {
return invalidParamError(err, "end")
Added time range parameters to labelNames API (#7288) * add time range params to labelNames api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * evaluate min/max time range when reading labels from the head Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelValues api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test, add docs Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add a test for head min max range Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test to match comment Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * address CR comments Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * combine vars only used once Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelNames api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * evaluate min/max time range when reading labels from the head Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelValues api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test, add docs Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add a test for head min max range Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test to match comment Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * address CR comments Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * combine vars only used once Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * restart ci Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * use range expectedLabelNames instead of range actualLabelNames in test Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com>
2020-05-30 05:50:09 -07:00
}
matcherSets, err := parseMatchersParam(r.Form["match[]"])
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
Added time range parameters to labelNames API (#7288) * add time range params to labelNames api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * evaluate min/max time range when reading labels from the head Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelValues api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test, add docs Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add a test for head min max range Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test to match comment Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * address CR comments Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * combine vars only used once Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelNames api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * evaluate min/max time range when reading labels from the head Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add time range params to labelValues api Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test, add docs Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * add a test for head min max range Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test to match comment Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * address CR comments Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * combine vars only used once Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * fix test Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * restart ci Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com> * use range expectedLabelNames instead of range actualLabelNames in test Signed-off-by: jessicagreben <Jessica.greben1+github@gmail.com>
2020-05-30 05:50:09 -07:00
q, err := api.Queryable.Querier(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end))
if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil}
}
// From now on, we must only return with a finalizer in the result (to
// be called by the caller) or call q.Close ourselves (which is required
// in the case of a panic).
defer func() {
if result.finalizer == nil {
q.Close()
}
}()
Load only some offsets into the symbol table into memory. Rather than keeping the entire symbol table in memory, keep every nth offset and walk from there to the entry we need. This ends up slightly slower, ~360ms per 1M series returned from PostingsForMatchers which is not much considering the rest of the CPU such a query would go on to use. Make LabelValues use the postings tables, rather than having to do symbol lookups. Use yoloString, as PostingsForMatchers doesn't need the strings to stick around and adjust the API call to keep the Querier open until it's all marshalled. Remove allocatedSymbols memory optimisation, we no longer keep all the symbol strings in heap memory. Remove LabelValuesFor and LabelIndices, they're dead code. Ensure we've still tests for label indices, and add missing test that we can work with old V1 Format index files. PostingForMatchers performance is slightly better, with a big drop in allocation counts due to using yoloString for LabelValues: benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67% BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18% BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73% BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76% BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99% BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99% BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99% BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35% BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03% BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03% BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06% However overall Select performance is down and involves more allocs, due to having to do more than a simple map lookup to resolve a symbol and that all the strings returned are allocated: benchmark old ns/op new ns/op delta BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46% BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26% BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60% BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13% BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29% BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74% BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90% benchmark old allocs new allocs delta BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00% BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00% BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00% BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97% BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75% BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62% BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33% benchmark old bytes new bytes delta BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09% BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09% BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09% BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07% BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88% BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12% BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
closer := func() {
q.Close()
}
var (
vals []string
warnings storage.Warnings
)
if len(matcherSets) > 0 {
Add matchers to LabelValues() call (#8400) * Accept matchers in querier LabelValues() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * create matcher to only select metrics which have searched label Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * test case for merge querier with matchers Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * test LabelValues with matchers on head Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * add test for LabelValues on block Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * formatting fix Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Add comments Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * add missing lock release Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * remove unused parameter Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Benchmarks for LabelValues() methods on block/head Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Better comment Co-authored-by: Julien Pivotto <roidelapluie@gmail.com> Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * update comment Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * minor refactor make code cleaner Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * better comments Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * fix expected errors in test Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Deleting parameter which can only be empty Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * fix comments Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * remove unnecessary lock Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * only lookup label value if label name was looked up Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Return error when there is one Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Call .Get() on decoder before checking errors Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * only lock head.symMtx when necessary Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * remove unnecessary delete() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * re-use code instead of duplicating it Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Consistently return error from LabelValueFor() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * move helper func from util.go to querier.go Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Fix test expectation Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> * ensure result de-duplication and sorting works Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * return named error from LabelValueFor() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> Co-authored-by: Julien Pivotto <roidelapluie@gmail.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
2021-02-09 09:38:35 -08:00
var callWarnings storage.Warnings
labelValuesSet := make(map[string]struct{})
Add matchers to LabelValues() call (#8400) * Accept matchers in querier LabelValues() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * create matcher to only select metrics which have searched label Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * test case for merge querier with matchers Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * test LabelValues with matchers on head Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * add test for LabelValues on block Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * formatting fix Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Add comments Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * add missing lock release Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * remove unused parameter Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Benchmarks for LabelValues() methods on block/head Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Better comment Co-authored-by: Julien Pivotto <roidelapluie@gmail.com> Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * update comment Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * minor refactor make code cleaner Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * better comments Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * fix expected errors in test Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Deleting parameter which can only be empty Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * fix comments Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * remove unnecessary lock Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * only lookup label value if label name was looked up Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Return error when there is one Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Call .Get() on decoder before checking errors Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * only lock head.symMtx when necessary Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * remove unnecessary delete() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * re-use code instead of duplicating it Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Consistently return error from LabelValueFor() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * move helper func from util.go to querier.go Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Fix test expectation Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> * ensure result de-duplication and sorting works Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * return named error from LabelValueFor() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> Co-authored-by: Julien Pivotto <roidelapluie@gmail.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
2021-02-09 09:38:35 -08:00
for _, matchers := range matcherSets {
vals, callWarnings, err = q.LabelValues(name, matchers...)
if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer}
}
Add matchers to LabelValues() call (#8400) * Accept matchers in querier LabelValues() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * create matcher to only select metrics which have searched label Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * test case for merge querier with matchers Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * test LabelValues with matchers on head Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * add test for LabelValues on block Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * formatting fix Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Add comments Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * add missing lock release Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * remove unused parameter Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Benchmarks for LabelValues() methods on block/head Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Better comment Co-authored-by: Julien Pivotto <roidelapluie@gmail.com> Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * update comment Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * minor refactor make code cleaner Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * better comments Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * fix expected errors in test Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Deleting parameter which can only be empty Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * fix comments Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * remove unnecessary lock Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * only lookup label value if label name was looked up Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Return error when there is one Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Call .Get() on decoder before checking errors Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * only lock head.symMtx when necessary Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * remove unnecessary delete() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * re-use code instead of duplicating it Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Consistently return error from LabelValueFor() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * move helper func from util.go to querier.go Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Fix test expectation Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> * ensure result de-duplication and sorting works Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * return named error from LabelValueFor() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> Co-authored-by: Julien Pivotto <roidelapluie@gmail.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
2021-02-09 09:38:35 -08:00
warnings = append(warnings, callWarnings...)
for _, val := range vals {
labelValuesSet[val] = struct{}{}
}
}
vals = make([]string, 0, len(labelValuesSet))
Add matchers to LabelValues() call (#8400) * Accept matchers in querier LabelValues() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * create matcher to only select metrics which have searched label Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * test case for merge querier with matchers Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * test LabelValues with matchers on head Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * add test for LabelValues on block Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * formatting fix Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Add comments Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * add missing lock release Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * remove unused parameter Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Benchmarks for LabelValues() methods on block/head Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Better comment Co-authored-by: Julien Pivotto <roidelapluie@gmail.com> Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * update comment Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * minor refactor make code cleaner Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * better comments Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * fix expected errors in test Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Deleting parameter which can only be empty Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * fix comments Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * remove unnecessary lock Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * only lookup label value if label name was looked up Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Return error when there is one Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Call .Get() on decoder before checking errors Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * only lock head.symMtx when necessary Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * remove unnecessary delete() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * re-use code instead of duplicating it Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Consistently return error from LabelValueFor() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * move helper func from util.go to querier.go Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Fix test expectation Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> * ensure result de-duplication and sorting works Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * return named error from LabelValueFor() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> Co-authored-by: Julien Pivotto <roidelapluie@gmail.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
2021-02-09 09:38:35 -08:00
for val := range labelValuesSet {
vals = append(vals, val)
}
} else {
vals, warnings, err = q.LabelValues(name)
if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer}
}
Add matchers to LabelValues() call (#8400) * Accept matchers in querier LabelValues() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * create matcher to only select metrics which have searched label Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * test case for merge querier with matchers Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * test LabelValues with matchers on head Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * add test for LabelValues on block Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * formatting fix Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Add comments Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * add missing lock release Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * remove unused parameter Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Benchmarks for LabelValues() methods on block/head Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Better comment Co-authored-by: Julien Pivotto <roidelapluie@gmail.com> Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * update comment Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * minor refactor make code cleaner Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * better comments Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * fix expected errors in test Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Deleting parameter which can only be empty Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * fix comments Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * remove unnecessary lock Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * only lookup label value if label name was looked up Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Return error when there is one Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Call .Get() on decoder before checking errors Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * only lock head.symMtx when necessary Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * remove unnecessary delete() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * re-use code instead of duplicating it Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Consistently return error from LabelValueFor() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * move helper func from util.go to querier.go Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Fix test expectation Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> * ensure result de-duplication and sorting works Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * return named error from LabelValueFor() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> Co-authored-by: Julien Pivotto <roidelapluie@gmail.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
2021-02-09 09:38:35 -08:00
if vals == nil {
vals = []string{}
}
tsdb: Added ChunkQueryable implementations to db; unified MergeSeriesSets and vertical to single struct. (#7069) * tsdb: Added ChunkQueryable implementations to db; unified compactor, querier and fanout block iterating. Chained to https://github.com/prometheus/prometheus/pull/7059 * NewMerge(Chunk)Querier now takies multiple primaries allowing tsdb DB code to use it. * Added single SeriesEntry / ChunkEntry for all series implementations. * Unified all vertical, and non vertical for compact and querying to single merge series / chunk sets by reusing VerticalSeriesMergeFunc for overlapping algorithm (same logic as before) * Added block (Base/Chunk/)Querier for block querying. We then use populateAndTomb(Base/Chunk/) to iterate over chunks or samples. * Refactored endpoint tests and querier tests to include subtests. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed comments from Brian and Beorn. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fixed snapshot test and added chunk iterator support for DBReadOnly. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fixed race when iterating over Ats first. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fixed tests. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fixed populate block tests. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fixed endpoints test. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fixed test. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Added test & fixed case of head open chunk. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fixed DBReadOnly tests and bug producing 1 sample chunks. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Added cases for partial block overlap for multiple full chunks. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Added extra tests for chunk meta after compaction. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fixed small vertical merge bug and added more tests for that. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
2020-07-31 08:03:02 -07:00
}
slices.Sort(vals)
Add matchers to LabelValues() call (#8400) * Accept matchers in querier LabelValues() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * create matcher to only select metrics which have searched label Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * test case for merge querier with matchers Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * test LabelValues with matchers on head Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * add test for LabelValues on block Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * formatting fix Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Add comments Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * add missing lock release Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * remove unused parameter Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Benchmarks for LabelValues() methods on block/head Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Better comment Co-authored-by: Julien Pivotto <roidelapluie@gmail.com> Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * update comment Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * minor refactor make code cleaner Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * better comments Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * fix expected errors in test Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Deleting parameter which can only be empty Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * fix comments Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * remove unnecessary lock Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * only lookup label value if label name was looked up Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Return error when there is one Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Call .Get() on decoder before checking errors Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * only lock head.symMtx when necessary Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * remove unnecessary delete() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * re-use code instead of duplicating it Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Consistently return error from LabelValueFor() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * move helper func from util.go to querier.go Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * Fix test expectation Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> * ensure result de-duplication and sorting works Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> * return named error from LabelValueFor() Signed-off-by: Mauro Stettler <mauro.stettler@gmail.com> Co-authored-by: Julien Pivotto <roidelapluie@gmail.com> Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
2021-02-09 09:38:35 -08:00
Load only some offsets into the symbol table into memory. Rather than keeping the entire symbol table in memory, keep every nth offset and walk from there to the entry we need. This ends up slightly slower, ~360ms per 1M series returned from PostingsForMatchers which is not much considering the rest of the CPU such a query would go on to use. Make LabelValues use the postings tables, rather than having to do symbol lookups. Use yoloString, as PostingsForMatchers doesn't need the strings to stick around and adjust the API call to keep the Querier open until it's all marshalled. Remove allocatedSymbols memory optimisation, we no longer keep all the symbol strings in heap memory. Remove LabelValuesFor and LabelIndices, they're dead code. Ensure we've still tests for label indices, and add missing test that we can work with old V1 Format index files. PostingForMatchers performance is slightly better, with a big drop in allocation counts due to using yoloString for LabelValues: benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67% BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18% BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73% BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76% BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99% BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99% BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99% BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35% BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03% BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03% BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06% However overall Select performance is down and involves more allocs, due to having to do more than a simple map lookup to resolve a symbol and that all the strings returned are allocated: benchmark old ns/op new ns/op delta BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46% BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26% BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60% BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13% BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29% BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74% BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90% benchmark old allocs new allocs delta BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00% BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00% BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00% BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97% BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75% BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62% BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33% benchmark old bytes new bytes delta BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09% BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09% BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09% BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07% BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88% BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12% BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-12-12 08:55:32 -08:00
return apiFuncResult{vals, nil, warnings, closer}
}
var (
minTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC()
maxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC()
minTimeFormatted = minTime.Format(time.RFC3339Nano)
maxTimeFormatted = maxTime.Format(time.RFC3339Nano)
)
func (api *API) series(r *http.Request) (result apiFuncResult) {
if err := r.ParseForm(); err != nil {
return apiFuncResult{nil, &apiError{errorBadData, errors.Wrapf(err, "error parsing form values")}, nil, nil}
}
if len(r.Form["match[]"]) == 0 {
return apiFuncResult{nil, &apiError{errorBadData, errors.New("no match[] parameter provided")}, nil, nil}
}
start, err := parseTimeParam(r, "start", minTime)
if err != nil {
return invalidParamError(err, "start")
}
end, err := parseTimeParam(r, "end", maxTime)
if err != nil {
return invalidParamError(err, "end")
}
matcherSets, err := parseMatchersParam(r.Form["match[]"])
if err != nil {
return invalidParamError(err, "match[]")
}
q, err := api.Queryable.Querier(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end))
if err != nil {
return apiFuncResult{nil, returnAPIError(err), nil, nil}
}
// From now on, we must only return with a finalizer in the result (to
// be called by the caller) or call q.Close ourselves (which is required
// in the case of a panic).
defer func() {
if result.finalizer == nil {
q.Close()
}
}()
closer := func() {
q.Close()
}
hints := &storage.SelectHints{
Start: timestamp.FromTime(start),
End: timestamp.FromTime(end),
Func: "series", // There is no series function, this token is used for lookups that don't need samples.
}
var set storage.SeriesSet
if len(matcherSets) > 1 {
var sets []storage.SeriesSet
for _, mset := range matcherSets {
// We need to sort this select results to merge (deduplicate) the series sets later.
s := q.Select(true, hints, mset...)
sets = append(sets, s)
}
set = storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge)
} else {
// At this point at least one match exists.
set = q.Select(false, hints, matcherSets[0]...)
}
2017-04-04 02:09:11 -07:00
metrics := []labels.Labels{}
for set.Next() {
metrics = append(metrics, set.At().Labels())
}
*: Consistent Error/Warning handling for SeriesSet iterator: Allowing Async Select (#7251) * Add errors and Warnings to SeriesSet Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Change Querier interface and refactor accordingly Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor promql/engine to propagate warnings at eval stage Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Make sure all the series from all Selects are pre-advanced Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Separate merge series sets Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Clean Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactor merge querier failure handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Refactored and simplified fanout with improvements from incoming chunk iterator PRs. * Secondary logic is hidden, instead of weird failed series set logic we had. * Fanout is well commented * Fanout closing record all errors * MergeQuerier improved API (clearer) * deferredGenericMergeSeriesSet is not needed as we return no samples anyway for failed series sets (next = false). Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Fix formatting Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix CI issues Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Added final tests for error handling. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Addressed Brian's comments. * Moved hints in populate to be allocated only when needed. * Used sync.Once in secondary Querier to achieve all-or-nothing partial response logic. * Select after first Next is done will panic. NOTE: in lazySeriesSet in theory we could just panic, I think however we can totally just return error, it will panic in expand anyway. Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com> * Utilize errWithWarnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix recently introduced expansion issue Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add tests for secondary querier error handling Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Implement lazy merge Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Add name to test cases Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Reorganize Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Address review comments Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Remove redundant warnings Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> * Fix rebase mistake Signed-off-by: Kemal Akkoyun <kakkoyun@gmail.com> Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
2020-06-09 09:57:31 -07:00
warnings := set.Warnings()
2017-04-04 02:09:11 -07:00
if set.Err() != nil {
return apiFuncResult{nil, returnAPIError(set.Err()), warnings, closer}
2017-04-04 02:09:11 -07:00
}
return apiFuncResult{metrics, nil, warnings, closer}
}
func (api *API) dropSeries(_ *http.Request) apiFuncResult {
return apiFuncResult{nil, &apiError{errorInternal, errors.New("not implemented")}, nil, nil}
}
2017-03-06 09:51:27 -08:00
// Target has the information for one target.
2016-12-02 04:31:43 -08:00
type Target struct {
// Labels before any processing.
DiscoveredLabels map[string]string `json:"discoveredLabels"`
2016-12-02 04:31:43 -08:00
// Any labels that are added to this target and its metrics.
Labels map[string]string `json:"labels"`
2016-12-02 04:31:43 -08:00
ScrapePool string `json:"scrapePool"`
ScrapeURL string `json:"scrapeUrl"`
GlobalURL string `json:"globalUrl"`
2016-12-02 04:31:43 -08:00
LastError string `json:"lastError"`
LastScrape time.Time `json:"lastScrape"`
LastScrapeDuration float64 `json:"lastScrapeDuration"`
Health scrape.TargetHealth `json:"health"`
ScrapeInterval string `json:"scrapeInterval"`
ScrapeTimeout string `json:"scrapeTimeout"`
2016-12-02 04:31:43 -08:00
}
2022-12-23 02:55:08 -08:00
type ScrapePoolsDiscovery struct {
ScrapePools []string `json:"scrapePools"`
}
// DroppedTarget has the information for one target that was dropped during relabelling.
type DroppedTarget struct {
// Labels before any processing.
DiscoveredLabels map[string]string `json:"discoveredLabels"`
}
2017-03-06 03:46:37 -08:00
// TargetDiscovery has all the active targets.
type TargetDiscovery struct {
ActiveTargets []*Target `json:"activeTargets"`
DroppedTargets []*DroppedTarget `json:"droppedTargets"`
}
// GlobalURLOptions contains fields used for deriving the global URL for local targets.
type GlobalURLOptions struct {
ListenAddress string
Host string
Scheme string
}
// sanitizeSplitHostPort acts like net.SplitHostPort.
// Additionally, if there is no port in the host passed as input, we return the
// original host, making sure that IPv6 addresses are not surrounded by square
// brackets.
func sanitizeSplitHostPort(input string) (string, string, error) {
host, port, err := net.SplitHostPort(input)
if err != nil && strings.HasSuffix(err.Error(), "missing port in address") {
var errWithPort error
host, _, errWithPort = net.SplitHostPort(input + ":80")
if errWithPort == nil {
err = nil
}
}
return host, port, err
}
func getGlobalURL(u *url.URL, opts GlobalURLOptions) (*url.URL, error) {
host, port, err := sanitizeSplitHostPort(u.Host)
if err != nil {
return u, err
}
for _, lhr := range LocalhostRepresentations {
if host == lhr {
_, ownPort, err := net.SplitHostPort(opts.ListenAddress)
if err != nil {
return u, err
}
if port == ownPort {
// Only in the case where the target is on localhost and its port is
// the same as the one we're listening on, we know for sure that
// we're monitoring our own process and that we need to change the
// scheme, hostname, and port to the externally reachable ones as
// well. We shouldn't need to touch the path at all, since if a
// path prefix is defined, the path under which we scrape ourselves
// should already contain the prefix.
u.Scheme = opts.Scheme
u.Host = opts.Host
} else {
// Otherwise, we only know that localhost is not reachable
// externally, so we replace only the hostname by the one in the
// external URL. It could be the wrong hostname for the service on
// this port, but it's still the best possible guess.
host, _, err := sanitizeSplitHostPort(opts.Host)
if err != nil {
return u, err
}
u.Host = host
if port != "" {
u.Host = net.JoinHostPort(u.Host, port)
}
}
break
}
}
return u, nil
}
2022-12-23 02:55:08 -08:00
func (api *API) scrapePools(r *http.Request) apiFuncResult {
names := api.scrapePoolsRetriever(r.Context()).ScrapePools()
sort.Strings(names)
res := &ScrapePoolsDiscovery{ScrapePools: names}
return apiFuncResult{data: res, err: nil, warnings: nil, finalizer: nil}
}
func (api *API) targets(r *http.Request) apiFuncResult {
sortKeys := func(targets map[string][]*scrape.Target) ([]string, int) {
var n int
keys := make([]string, 0, len(targets))
for k := range targets {
keys = append(keys, k)
n += len(targets[k])
}
slices.Sort(keys)
return keys, n
}
2022-12-23 02:55:08 -08:00
scrapePool := r.URL.Query().Get("scrapePool")
state := strings.ToLower(r.URL.Query().Get("state"))
showActive := state == "" || state == "any" || state == "active"
showDropped := state == "" || state == "any" || state == "dropped"
res := &TargetDiscovery{}
if showActive {
targetsActive := api.targetRetriever(r.Context()).TargetsActive()
activeKeys, numTargets := sortKeys(targetsActive)
res.ActiveTargets = make([]*Target, 0, numTargets)
for _, key := range activeKeys {
2022-12-23 02:55:08 -08:00
if scrapePool != "" && key != scrapePool {
continue
}
for _, target := range targetsActive[key] {
lastErrStr := ""
lastErr := target.LastError()
if lastErr != nil {
lastErrStr = lastErr.Error()
}
globalURL, err := getGlobalURL(target.URL(), api.globalURLOptions)
res.ActiveTargets = append(res.ActiveTargets, &Target{
DiscoveredLabels: target.DiscoveredLabels().Map(),
Labels: target.Labels().Map(),
ScrapePool: key,
ScrapeURL: target.URL().String(),
GlobalURL: globalURL.String(),
LastError: func() string {
if err == nil && lastErrStr == "" {
return ""
} else if err != nil {
return errors.Wrapf(err, lastErrStr).Error()
}
return lastErrStr
}(),
LastScrape: target.LastScrape(),
LastScrapeDuration: target.LastScrapeDuration().Seconds(),
Health: target.Health(),
ScrapeInterval: target.GetValue(model.ScrapeIntervalLabel),
ScrapeTimeout: target.GetValue(model.ScrapeTimeoutLabel),
})
}
}
} else {
res.ActiveTargets = []*Target{}
}
if showDropped {
2022-12-23 02:55:08 -08:00
targetsDropped := api.targetRetriever(r.Context()).TargetsDropped()
droppedKeys, numTargets := sortKeys(targetsDropped)
res.DroppedTargets = make([]*DroppedTarget, 0, numTargets)
for _, key := range droppedKeys {
if scrapePool != "" && key != scrapePool {
continue
}
for _, target := range targetsDropped[key] {
res.DroppedTargets = append(res.DroppedTargets, &DroppedTarget{
DiscoveredLabels: target.DiscoveredLabels().Map(),
})
}
}
} else {
res.DroppedTargets = []*DroppedTarget{}
}
return apiFuncResult{res, nil, nil, nil}
2016-12-02 04:31:43 -08:00
}
func matchLabels(lset labels.Labels, matchers []*labels.Matcher) bool {
for _, m := range matchers {
if !m.Matches(lset.Get(m.Name)) {
return false
}
}
return true
}
func (api *API) targetMetadata(r *http.Request) apiFuncResult {
limit := -1
if s := r.FormValue("limit"); s != "" {
var err error
if limit, err = strconv.Atoi(s); err != nil {
return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit must be a number")}, nil, nil}
}
}
matchTarget := r.FormValue("match_target")
var matchers []*labels.Matcher
var err error
if matchTarget != "" {
matchers, err = parser.ParseMetricSelector(matchTarget)
if err != nil {
return invalidParamError(err, "match_target")
}
}
metric := r.FormValue("metric")
res := []metricMetadata{}
for _, tt := range api.targetRetriever(r.Context()).TargetsActive() {
for _, t := range tt {
if limit >= 0 && len(res) >= limit {
break
}
// Filter targets that don't satisfy the label matchers.
if matchTarget != "" && !matchLabels(t.Labels(), matchers) {
continue
}
// If no metric is specified, get the full list for the target.
if metric == "" {
for _, md := range t.MetadataList() {
res = append(res, metricMetadata{
Target: t.Labels(),
Metric: md.Metric,
Type: md.Type,
Help: md.Help,
Unit: md.Unit,
})
}
continue
}
// Get metadata for the specified metric.
if md, ok := t.Metadata(metric); ok {
res = append(res, metricMetadata{
Target: t.Labels(),
Type: md.Type,
Help: md.Help,
Unit: md.Unit,
})
}
}
}
return apiFuncResult{res, nil, nil, nil}
}
type metricMetadata struct {
Target labels.Labels `json:"target"`
Metric string `json:"metric,omitempty"`
Type textparse.MetricType `json:"type"`
Help string `json:"help"`
Unit string `json:"unit"`
}
2017-03-06 09:51:27 -08:00
// AlertmanagerDiscovery has all the active Alertmanagers.
2017-01-13 01:20:11 -08:00
type AlertmanagerDiscovery struct {
ActiveAlertmanagers []*AlertmanagerTarget `json:"activeAlertmanagers"`
DroppedAlertmanagers []*AlertmanagerTarget `json:"droppedAlertmanagers"`
2017-01-13 01:20:11 -08:00
}
2017-03-06 09:51:27 -08:00
// AlertmanagerTarget has info on one AM.
2017-01-13 01:20:11 -08:00
type AlertmanagerTarget struct {
URL string `json:"url"`
}
func (api *API) alertmanagers(r *http.Request) apiFuncResult {
urls := api.alertmanagerRetriever(r.Context()).Alertmanagers()
droppedURLS := api.alertmanagerRetriever(r.Context()).DroppedAlertmanagers()
ams := &AlertmanagerDiscovery{ActiveAlertmanagers: make([]*AlertmanagerTarget, len(urls)), DroppedAlertmanagers: make([]*AlertmanagerTarget, len(droppedURLS))}
for i, url := range urls {
ams.ActiveAlertmanagers[i] = &AlertmanagerTarget{URL: url.String()}
2017-01-13 01:20:11 -08:00
}
for i, url := range droppedURLS {
ams.DroppedAlertmanagers[i] = &AlertmanagerTarget{URL: url.String()}
}
return apiFuncResult{ams, nil, nil, nil}
2017-01-13 01:20:11 -08:00
}
// AlertDiscovery has info for all active alerts.
type AlertDiscovery struct {
Alerts []*Alert `json:"alerts"`
}
// Alert has info for an alert.
type Alert struct {
Labels labels.Labels `json:"labels"`
Annotations labels.Labels `json:"annotations"`
State string `json:"state"`
ActiveAt *time.Time `json:"activeAt,omitempty"`
KeepFiringSince *time.Time `json:"keepFiringSince,omitempty"`
Value string `json:"value"`
}
func (api *API) alerts(r *http.Request) apiFuncResult {
alertingRules := api.rulesRetriever(r.Context()).AlertingRules()
alerts := []*Alert{}
for _, alertingRule := range alertingRules {
alerts = append(
alerts,
rulesAlertsToAPIAlerts(alertingRule.ActiveAlerts())...,
)
}
res := &AlertDiscovery{Alerts: alerts}
return apiFuncResult{res, nil, nil, nil}
}
func rulesAlertsToAPIAlerts(rulesAlerts []*rules.Alert) []*Alert {
apiAlerts := make([]*Alert, len(rulesAlerts))
for i, ruleAlert := range rulesAlerts {
apiAlerts[i] = &Alert{
Labels: ruleAlert.Labels,
Annotations: ruleAlert.Annotations,
State: ruleAlert.State.String(),
ActiveAt: &ruleAlert.ActiveAt,
Value: strconv.FormatFloat(ruleAlert.Value, 'e', -1, 64),
}
if !ruleAlert.KeepFiringSince.IsZero() {
apiAlerts[i].KeepFiringSince = &ruleAlert.KeepFiringSince
}
}
return apiAlerts
}
type metadata struct {
Type textparse.MetricType `json:"type"`
Help string `json:"help"`
Unit string `json:"unit"`
}
func (api *API) metricMetadata(r *http.Request) apiFuncResult {
metrics := map[string]map[metadata]struct{}{}
limit := -1
if s := r.FormValue("limit"); s != "" {
var err error
if limit, err = strconv.Atoi(s); err != nil {
return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit must be a number")}, nil, nil}
}
}
metric := r.FormValue("metric")
for _, tt := range api.targetRetriever(r.Context()).TargetsActive() {
for _, t := range tt {
if metric == "" {
for _, mm := range t.MetadataList() {
m := metadata{Type: mm.Type, Help: mm.Help, Unit: mm.Unit}
ms, ok := metrics[mm.Metric]
if !ok {
ms = map[metadata]struct{}{}
metrics[mm.Metric] = ms
}
ms[m] = struct{}{}
}
continue
}
if md, ok := t.Metadata(metric); ok {
m := metadata{Type: md.Type, Help: md.Help, Unit: md.Unit}
ms, ok := metrics[md.Metric]
if !ok {
ms = map[metadata]struct{}{}
metrics[md.Metric] = ms
}
ms[m] = struct{}{}
}
}
}
// Put the elements from the pseudo-set into a slice for marshaling.
res := map[string][]metadata{}
for name, set := range metrics {
if limit >= 0 && len(res) >= limit {
break
}
s := []metadata{}
for metadata := range set {
s = append(s, metadata)
}
res[name] = s
}
return apiFuncResult{res, nil, nil, nil}
}
// RuleDiscovery has info for all rules
type RuleDiscovery struct {
RuleGroups []*RuleGroup `json:"groups"`
}
// RuleGroup has info for rules which are part of a group
type RuleGroup struct {
Name string `json:"name"`
File string `json:"file"`
// In order to preserve rule ordering, while exposing type (alerting or recording)
// specific properties, both alerting and recording rules are exposed in the
// same array.
Rules []Rule `json:"rules"`
Interval float64 `json:"interval"`
2022-01-11 19:44:22 -08:00
Limit int `json:"limit"`
EvaluationTime float64 `json:"evaluationTime"`
LastEvaluation time.Time `json:"lastEvaluation"`
}
type Rule interface{}
type AlertingRule struct {
React UI: Implement alerts page (#6402) * url filter rules param Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * address review changes Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * ui initial commit Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * improve ui Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * fix typo in component name Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * create query link + ui enhancements Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * add count to state labels Signed-off-by: blalov <boiskila@gmail.com> * put alerts table render in the right place Signed-off-by: blalov <boiskila@gmail.com> * refactoring Signed-off-by: blalov <boiskila@gmail.com> * fix rules endpoint test Signed-off-by: blalov <boiskila@gmail.com> * lint fixes Signed-off-by: blalov <boiskila@gmail.com> * test query params Signed-off-by: blalov <boiskila@gmail.com> * refactoring Signed-off-by: blalov <boiskila@gmail.com> * review changes Signed-off-by: blalov <boiskila@gmail.com> * adding down arrow as click indicator in Alert Signed-off-by: blalov <boiskila@gmail.com> * add period at the end of the comment Signed-off-by: blalov <boiskila@gmail.com> * review changes Signed-off-by: blalov <boiskila@gmail.com> * remove left-over css Signed-off-by: blalov <boiskila@gmail.com> * adding expand/collapse arrows on Alert Signed-off-by: blalov <boiskila@gmail.com> * create proper expression for alert name Signed-off-by: blalov <boiskila@gmail.com>
2019-12-09 14:42:59 -08:00
// State can be "pending", "firing", "inactive".
State string `json:"state"`
Name string `json:"name"`
Query string `json:"query"`
Duration float64 `json:"duration"`
KeepFiringFor float64 `json:"keepFiringFor"`
Labels labels.Labels `json:"labels"`
Annotations labels.Labels `json:"annotations"`
Alerts []*Alert `json:"alerts"`
Health rules.RuleHealth `json:"health"`
LastError string `json:"lastError,omitempty"`
EvaluationTime float64 `json:"evaluationTime"`
LastEvaluation time.Time `json:"lastEvaluation"`
// Type of an alertingRule is always "alerting".
Type string `json:"type"`
}
type RecordingRule struct {
Name string `json:"name"`
Query string `json:"query"`
Labels labels.Labels `json:"labels,omitempty"`
Health rules.RuleHealth `json:"health"`
LastError string `json:"lastError,omitempty"`
EvaluationTime float64 `json:"evaluationTime"`
LastEvaluation time.Time `json:"lastEvaluation"`
// Type of a recordingRule is always "recording".
Type string `json:"type"`
}
func (api *API) rules(r *http.Request) apiFuncResult {
ruleGroups := api.rulesRetriever(r.Context()).RuleGroups()
res := &RuleDiscovery{RuleGroups: make([]*RuleGroup, len(ruleGroups))}
typ := strings.ToLower(r.URL.Query().Get("type"))
React UI: Implement alerts page (#6402) * url filter rules param Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * address review changes Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * ui initial commit Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * improve ui Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * fix typo in component name Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * create query link + ui enhancements Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * add count to state labels Signed-off-by: blalov <boiskila@gmail.com> * put alerts table render in the right place Signed-off-by: blalov <boiskila@gmail.com> * refactoring Signed-off-by: blalov <boiskila@gmail.com> * fix rules endpoint test Signed-off-by: blalov <boiskila@gmail.com> * lint fixes Signed-off-by: blalov <boiskila@gmail.com> * test query params Signed-off-by: blalov <boiskila@gmail.com> * refactoring Signed-off-by: blalov <boiskila@gmail.com> * review changes Signed-off-by: blalov <boiskila@gmail.com> * adding down arrow as click indicator in Alert Signed-off-by: blalov <boiskila@gmail.com> * add period at the end of the comment Signed-off-by: blalov <boiskila@gmail.com> * review changes Signed-off-by: blalov <boiskila@gmail.com> * remove left-over css Signed-off-by: blalov <boiskila@gmail.com> * adding expand/collapse arrows on Alert Signed-off-by: blalov <boiskila@gmail.com> * create proper expression for alert name Signed-off-by: blalov <boiskila@gmail.com>
2019-12-09 14:42:59 -08:00
if typ != "" && typ != "alert" && typ != "record" {
return invalidParamError(errors.Errorf("not supported value %q", typ), "type")
React UI: Implement alerts page (#6402) * url filter rules param Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * address review changes Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * ui initial commit Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * improve ui Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * fix typo in component name Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * create query link + ui enhancements Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * add count to state labels Signed-off-by: blalov <boiskila@gmail.com> * put alerts table render in the right place Signed-off-by: blalov <boiskila@gmail.com> * refactoring Signed-off-by: blalov <boiskila@gmail.com> * fix rules endpoint test Signed-off-by: blalov <boiskila@gmail.com> * lint fixes Signed-off-by: blalov <boiskila@gmail.com> * test query params Signed-off-by: blalov <boiskila@gmail.com> * refactoring Signed-off-by: blalov <boiskila@gmail.com> * review changes Signed-off-by: blalov <boiskila@gmail.com> * adding down arrow as click indicator in Alert Signed-off-by: blalov <boiskila@gmail.com> * add period at the end of the comment Signed-off-by: blalov <boiskila@gmail.com> * review changes Signed-off-by: blalov <boiskila@gmail.com> * remove left-over css Signed-off-by: blalov <boiskila@gmail.com> * adding expand/collapse arrows on Alert Signed-off-by: blalov <boiskila@gmail.com> * create proper expression for alert name Signed-off-by: blalov <boiskila@gmail.com>
2019-12-09 14:42:59 -08:00
}
returnAlerts := typ == "" || typ == "alert"
returnRecording := typ == "" || typ == "record"
React UI: Implement alerts page (#6402) * url filter rules param Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * address review changes Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * ui initial commit Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * improve ui Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * fix typo in component name Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * create query link + ui enhancements Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * add count to state labels Signed-off-by: blalov <boiskila@gmail.com> * put alerts table render in the right place Signed-off-by: blalov <boiskila@gmail.com> * refactoring Signed-off-by: blalov <boiskila@gmail.com> * fix rules endpoint test Signed-off-by: blalov <boiskila@gmail.com> * lint fixes Signed-off-by: blalov <boiskila@gmail.com> * test query params Signed-off-by: blalov <boiskila@gmail.com> * refactoring Signed-off-by: blalov <boiskila@gmail.com> * review changes Signed-off-by: blalov <boiskila@gmail.com> * adding down arrow as click indicator in Alert Signed-off-by: blalov <boiskila@gmail.com> * add period at the end of the comment Signed-off-by: blalov <boiskila@gmail.com> * review changes Signed-off-by: blalov <boiskila@gmail.com> * remove left-over css Signed-off-by: blalov <boiskila@gmail.com> * adding expand/collapse arrows on Alert Signed-off-by: blalov <boiskila@gmail.com> * create proper expression for alert name Signed-off-by: blalov <boiskila@gmail.com>
2019-12-09 14:42:59 -08:00
for i, grp := range ruleGroups {
apiRuleGroup := &RuleGroup{
Name: grp.Name(),
File: grp.File(),
Interval: grp.Interval().Seconds(),
2022-01-11 19:44:22 -08:00
Limit: grp.Limit(),
Rules: []Rule{},
EvaluationTime: grp.GetEvaluationTime().Seconds(),
LastEvaluation: grp.GetLastEvaluation(),
}
for _, r := range grp.Rules() {
var enrichedRule Rule
lastError := ""
if r.LastError() != nil {
lastError = r.LastError().Error()
}
switch rule := r.(type) {
case *rules.AlertingRule:
React UI: Implement alerts page (#6402) * url filter rules param Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * address review changes Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * ui initial commit Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * improve ui Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * fix typo in component name Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * create query link + ui enhancements Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * add count to state labels Signed-off-by: blalov <boiskila@gmail.com> * put alerts table render in the right place Signed-off-by: blalov <boiskila@gmail.com> * refactoring Signed-off-by: blalov <boiskila@gmail.com> * fix rules endpoint test Signed-off-by: blalov <boiskila@gmail.com> * lint fixes Signed-off-by: blalov <boiskila@gmail.com> * test query params Signed-off-by: blalov <boiskila@gmail.com> * refactoring Signed-off-by: blalov <boiskila@gmail.com> * review changes Signed-off-by: blalov <boiskila@gmail.com> * adding down arrow as click indicator in Alert Signed-off-by: blalov <boiskila@gmail.com> * add period at the end of the comment Signed-off-by: blalov <boiskila@gmail.com> * review changes Signed-off-by: blalov <boiskila@gmail.com> * remove left-over css Signed-off-by: blalov <boiskila@gmail.com> * adding expand/collapse arrows on Alert Signed-off-by: blalov <boiskila@gmail.com> * create proper expression for alert name Signed-off-by: blalov <boiskila@gmail.com>
2019-12-09 14:42:59 -08:00
if !returnAlerts {
break
}
enrichedRule = AlertingRule{
State: rule.State().String(),
Name: rule.Name(),
Query: rule.Query().String(),
Duration: rule.HoldDuration().Seconds(),
KeepFiringFor: rule.KeepFiringFor().Seconds(),
Labels: rule.Labels(),
Annotations: rule.Annotations(),
Alerts: rulesAlertsToAPIAlerts(rule.ActiveAlerts()),
Health: rule.Health(),
LastError: lastError,
EvaluationTime: rule.GetEvaluationDuration().Seconds(),
LastEvaluation: rule.GetEvaluationTimestamp(),
Type: "alerting",
}
case *rules.RecordingRule:
React UI: Implement alerts page (#6402) * url filter rules param Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * address review changes Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * ui initial commit Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * improve ui Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * fix typo in component name Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * create query link + ui enhancements Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * add count to state labels Signed-off-by: blalov <boiskila@gmail.com> * put alerts table render in the right place Signed-off-by: blalov <boiskila@gmail.com> * refactoring Signed-off-by: blalov <boiskila@gmail.com> * fix rules endpoint test Signed-off-by: blalov <boiskila@gmail.com> * lint fixes Signed-off-by: blalov <boiskila@gmail.com> * test query params Signed-off-by: blalov <boiskila@gmail.com> * refactoring Signed-off-by: blalov <boiskila@gmail.com> * review changes Signed-off-by: blalov <boiskila@gmail.com> * adding down arrow as click indicator in Alert Signed-off-by: blalov <boiskila@gmail.com> * add period at the end of the comment Signed-off-by: blalov <boiskila@gmail.com> * review changes Signed-off-by: blalov <boiskila@gmail.com> * remove left-over css Signed-off-by: blalov <boiskila@gmail.com> * adding expand/collapse arrows on Alert Signed-off-by: blalov <boiskila@gmail.com> * create proper expression for alert name Signed-off-by: blalov <boiskila@gmail.com>
2019-12-09 14:42:59 -08:00
if !returnRecording {
break
}
enrichedRule = RecordingRule{
Name: rule.Name(),
Query: rule.Query().String(),
Labels: rule.Labels(),
Health: rule.Health(),
LastError: lastError,
EvaluationTime: rule.GetEvaluationDuration().Seconds(),
LastEvaluation: rule.GetEvaluationTimestamp(),
Type: "recording",
}
default:
err := errors.Errorf("failed to assert type of rule '%v'", rule.Name())
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
}
React UI: Implement alerts page (#6402) * url filter rules param Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * address review changes Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * ui initial commit Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * improve ui Signed-off-by: blalov <boiskila@gmail.com> Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * fix typo in component name Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * create query link + ui enhancements Signed-off-by: Boyko Lalov <boiskila@gmail.com> Signed-off-by: blalov <boiskila@gmail.com> * add count to state labels Signed-off-by: blalov <boiskila@gmail.com> * put alerts table render in the right place Signed-off-by: blalov <boiskila@gmail.com> * refactoring Signed-off-by: blalov <boiskila@gmail.com> * fix rules endpoint test Signed-off-by: blalov <boiskila@gmail.com> * lint fixes Signed-off-by: blalov <boiskila@gmail.com> * test query params Signed-off-by: blalov <boiskila@gmail.com> * refactoring Signed-off-by: blalov <boiskila@gmail.com> * review changes Signed-off-by: blalov <boiskila@gmail.com> * adding down arrow as click indicator in Alert Signed-off-by: blalov <boiskila@gmail.com> * add period at the end of the comment Signed-off-by: blalov <boiskila@gmail.com> * review changes Signed-off-by: blalov <boiskila@gmail.com> * remove left-over css Signed-off-by: blalov <boiskila@gmail.com> * adding expand/collapse arrows on Alert Signed-off-by: blalov <boiskila@gmail.com> * create proper expression for alert name Signed-off-by: blalov <boiskila@gmail.com>
2019-12-09 14:42:59 -08:00
if enrichedRule != nil {
apiRuleGroup.Rules = append(apiRuleGroup.Rules, enrichedRule)
}
}
res.RuleGroups[i] = apiRuleGroup
}
return apiFuncResult{res, nil, nil, nil}
}
type prometheusConfig struct {
YAML string `json:"yaml"`
}
func (api *API) serveRuntimeInfo(_ *http.Request) apiFuncResult {
2019-11-02 08:53:32 -07:00
status, err := api.runtimeInfo()
if err != nil {
return apiFuncResult{status, &apiError{errorInternal, err}, nil, nil}
}
return apiFuncResult{status, nil, nil, nil}
}
func (api *API) serveBuildInfo(_ *http.Request) apiFuncResult {
2019-11-02 08:53:32 -07:00
return apiFuncResult{api.buildInfo, nil, nil, nil}
}
func (api *API) serveConfig(_ *http.Request) apiFuncResult {
cfg := &prometheusConfig{
YAML: api.config().String(),
}
return apiFuncResult{cfg, nil, nil, nil}
}
func (api *API) serveFlags(_ *http.Request) apiFuncResult {
return apiFuncResult{api.flagsMap, nil, nil, nil}
}
// TSDBStat holds the information about individual cardinality.
type TSDBStat struct {
Name string `json:"name"`
Value uint64 `json:"value"`
}
// HeadStats has information about the TSDB head.
type HeadStats struct {
NumSeries uint64 `json:"numSeries"`
NumLabelPairs int `json:"numLabelPairs"`
ChunkCount int64 `json:"chunkCount"`
MinTime int64 `json:"minTime"`
MaxTime int64 `json:"maxTime"`
}
// TSDBStatus has information of cardinality statistics from postings.
type TSDBStatus struct {
HeadStats HeadStats `json:"headStats"`
SeriesCountByMetricName []TSDBStat `json:"seriesCountByMetricName"`
LabelValueCountByLabelName []TSDBStat `json:"labelValueCountByLabelName"`
MemoryInBytesByLabelName []TSDBStat `json:"memoryInBytesByLabelName"`
SeriesCountByLabelValuePair []TSDBStat `json:"seriesCountByLabelValuePair"`
}
// TSDBStatsFromIndexStats converts a index.Stat slice to a TSDBStat slice.
func TSDBStatsFromIndexStats(stats []index.Stat) []TSDBStat {
result := make([]TSDBStat, 0, len(stats))
for _, item := range stats {
item := TSDBStat{Name: item.Name, Value: item.Count}
result = append(result, item)
}
return result
}
func (api *API) serveTSDBStatus(*http.Request) apiFuncResult {
s, err := api.db.Stats(labels.MetricName)
if err != nil {
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
}
metrics, err := api.gatherer.Gather()
if err != nil {
return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("error gathering runtime status: %s", err)}, nil, nil}
}
chunkCount := int64(math.NaN())
for _, mF := range metrics {
if *mF.Name == "prometheus_tsdb_head_chunks" {
m := *mF.Metric[0]
if m.Gauge != nil {
chunkCount = int64(m.Gauge.GetValue())
break
}
}
}
return apiFuncResult{TSDBStatus{
HeadStats: HeadStats{
NumSeries: s.NumSeries,
ChunkCount: chunkCount,
MinTime: s.MinTime,
MaxTime: s.MaxTime,
NumLabelPairs: s.IndexPostingStats.NumLabelPairs,
},
SeriesCountByMetricName: TSDBStatsFromIndexStats(s.IndexPostingStats.CardinalityMetricsStats),
LabelValueCountByLabelName: TSDBStatsFromIndexStats(s.IndexPostingStats.CardinalityLabelStats),
MemoryInBytesByLabelName: TSDBStatsFromIndexStats(s.IndexPostingStats.LabelValueStats),
SeriesCountByLabelValuePair: TSDBStatsFromIndexStats(s.IndexPostingStats.LabelValuePairsStats),
}, nil, nil, nil}
}
React UI: Add Starting Screen (#8662) * Added walreplay API endpoint Signed-off-by: Levi Harrison <git@leviharrison.dev> * Added starting page to react-ui Signed-off-by: Levi Harrison <git@leviharrison.dev> * Documented the new endpoint Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed typos Signed-off-by: Levi Harrison <git@leviharrison.dev> Co-authored-by: Julius Volz <julius.volz@gmail.com> * Removed logo Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed isResponding to isUnexpected Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed width of progress bar Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed width of progress bar Signed-off-by: Levi Harrison <git@leviharrison.dev> * Added DB stats object Signed-off-by: Levi Harrison <git@leviharrison.dev> * Updated starting page to work with new fields Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (pt. 2) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (pt. 3) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (and also implementing a method this time) (pt. 4) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (and also implementing a method this time) (pt. 5) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed const to let Signed-off-by: Levi Harrison <git@leviharrison.dev> * Passing nil (pt. 6) Signed-off-by: Levi Harrison <git@leviharrison.dev> * Remove SetStats method Signed-off-by: Levi Harrison <git@leviharrison.dev> * Added comma Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed api Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed to triple equals Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed data response types Signed-off-by: Levi Harrison <git@leviharrison.dev> * Don't return pointer Signed-off-by: Levi Harrison <git@leviharrison.dev> * Changed version Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed interface issue Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed pointer Signed-off-by: Levi Harrison <git@leviharrison.dev> * Fixed copying lock value error Signed-off-by: Levi Harrison <git@leviharrison.dev> Co-authored-by: Julius Volz <julius.volz@gmail.com>
2021-06-05 07:29:32 -07:00
type walReplayStatus struct {
Min int `json:"min"`
Max int `json:"max"`
Current int `json:"current"`
}
func (api *API) serveWALReplayStatus(w http.ResponseWriter, r *http.Request) {
httputil.SetCORS(w, api.CORSOrigin, r)
status, err := api.db.WALReplayStatus()
if err != nil {
api.respondError(w, &apiError{errorInternal, err}, nil)
}
api.respond(w, walReplayStatus{
Min: status.Min,
Max: status.Max,
Current: status.Current,
}, nil)
}
2017-10-23 13:28:17 -07:00
func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) {
// This is only really for tests - this will never be nil IRL.
if api.remoteReadHandler != nil {
api.remoteReadHandler.ServeHTTP(w, r)
} else {
http.Error(w, "not found", http.StatusNotFound)
}
}
func (api *API) remoteWrite(w http.ResponseWriter, r *http.Request) {
if api.remoteWriteHandler != nil {
api.remoteWriteHandler.ServeHTTP(w, r)
} else {
http.Error(w, "remote write receiver needs to be enabled with --enable-feature=remote-write-receiver", http.StatusNotFound)
}
}
func (api *API) deleteSeries(r *http.Request) apiFuncResult {
if !api.enableAdmin {
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
}
if err := r.ParseForm(); err != nil {
return apiFuncResult{nil, &apiError{errorBadData, errors.Wrap(err, "error parsing form values")}, nil, nil}
}
if len(r.Form["match[]"]) == 0 {
return apiFuncResult{nil, &apiError{errorBadData, errors.New("no match[] parameter provided")}, nil, nil}
}
start, err := parseTimeParam(r, "start", minTime)
if err != nil {
return invalidParamError(err, "start")
}
end, err := parseTimeParam(r, "end", maxTime)
if err != nil {
return invalidParamError(err, "end")
}
for _, s := range r.Form["match[]"] {
matchers, err := parser.ParseMetricSelector(s)
if err != nil {
return invalidParamError(err, "match[]")
}
if err := api.db.Delete(timestamp.FromTime(start), timestamp.FromTime(end), matchers...); err != nil {
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
}
}
return apiFuncResult{nil, nil, nil, nil}
}
func (api *API) snapshot(r *http.Request) apiFuncResult {
if !api.enableAdmin {
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
}
var (
skipHead bool
err error
)
if r.FormValue("skip_head") != "" {
skipHead, err = strconv.ParseBool(r.FormValue("skip_head"))
if err != nil {
return invalidParamError(errors.Wrapf(err, "unable to parse boolean"), "skip_head")
}
}
var (
snapdir = filepath.Join(api.dbDir, "snapshots")
name = fmt.Sprintf("%s-%016x",
time.Now().UTC().Format("20060102T150405Z0700"),
rand.Int63())
dir = filepath.Join(snapdir, name)
)
if err := os.MkdirAll(dir, 0o777); err != nil {
return apiFuncResult{nil, &apiError{errorInternal, errors.Wrap(err, "create snapshot directory")}, nil, nil}
}
if err := api.db.Snapshot(dir, !skipHead); err != nil {
return apiFuncResult{nil, &apiError{errorInternal, errors.Wrap(err, "create snapshot")}, nil, nil}
}
return apiFuncResult{struct {
Name string `json:"name"`
}{name}, nil, nil, nil}
}
func (api *API) cleanTombstones(r *http.Request) apiFuncResult {
if !api.enableAdmin {
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
}
if err := api.db.CleanTombstones(); err != nil {
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
}
return apiFuncResult{nil, nil, nil, nil}
}
func (api *API) respond(w http.ResponseWriter, data interface{}, warnings storage.Warnings) {
statusMessage := statusSuccess
var warningStrings []string
for _, warning := range warnings {
warningStrings = append(warningStrings, warning.Error())
}
json := jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(&response{
Status: statusMessage,
Data: data,
Warnings: warningStrings,
})
if err != nil {
level.Error(api.logger).Log("msg", "error marshaling json response", "err", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
if n, err := w.Write(b); err != nil {
level.Error(api.logger).Log("msg", "error writing response", "bytesWritten", n, "err", err)
}
}
func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data interface{}) {
json := jsoniter.ConfigCompatibleWithStandardLibrary
b, err := json.Marshal(&response{
Status: statusError,
ErrorType: apiErr.typ,
Error: apiErr.err.Error(),
Data: data,
})
if err != nil {
level.Error(api.logger).Log("msg", "error marshaling json response", "err", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var code int
switch apiErr.typ {
case errorBadData:
code = http.StatusBadRequest
case errorExec:
code = http.StatusUnprocessableEntity
case errorCanceled:
code = statusClientClosedConnection
case errorTimeout:
code = http.StatusServiceUnavailable
case errorInternal:
code = http.StatusInternalServerError
case errorNotFound:
code = http.StatusNotFound
default:
code = http.StatusInternalServerError
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
if n, err := w.Write(b); err != nil {
level.Error(api.logger).Log("msg", "error writing response", "bytesWritten", n, "err", err)
}
}
func parseTimeParam(r *http.Request, paramName string, defaultValue time.Time) (time.Time, error) {
val := r.FormValue(paramName)
if val == "" {
return defaultValue, nil
}
result, err := parseTime(val)
if err != nil {
return time.Time{}, errors.Wrapf(err, "Invalid time value for '%s'", paramName)
}
return result, nil
}
func parseTime(s string) (time.Time, error) {
if t, err := strconv.ParseFloat(s, 64); err == nil {
s, ns := math.Modf(t)
ns = math.Round(ns*1000) / 1000
return time.Unix(int64(s), int64(ns*float64(time.Second))).UTC(), nil
}
if t, err := time.Parse(time.RFC3339Nano, s); err == nil {
return t, nil
}
// Stdlib's time parser can only handle 4 digit years. As a workaround until
// that is fixed we want to at least support our own boundary times.
// Context: https://github.com/prometheus/client_golang/issues/614
// Upstream issue: https://github.com/golang/go/issues/20555
switch s {
case minTimeFormatted:
return minTime, nil
case maxTimeFormatted:
return maxTime, nil
}
return time.Time{}, errors.Errorf("cannot parse %q to a valid timestamp", s)
}
func parseDuration(s string) (time.Duration, error) {
if d, err := strconv.ParseFloat(s, 64); err == nil {
ts := d * float64(time.Second)
if ts > float64(math.MaxInt64) || ts < float64(math.MinInt64) {
return 0, errors.Errorf("cannot parse %q to a valid duration. It overflows int64", s)
}
return time.Duration(ts), nil
}
if d, err := model.ParseDuration(s); err == nil {
return time.Duration(d), nil
}
return 0, errors.Errorf("cannot parse %q to a valid duration", s)
}
func parseMatchersParam(matchers []string) ([][]*labels.Matcher, error) {
var matcherSets [][]*labels.Matcher
for _, s := range matchers {
matchers, err := parser.ParseMetricSelector(s)
if err != nil {
return nil, err
}
matcherSets = append(matcherSets, matchers)
}
OUTER:
for _, ms := range matcherSets {
for _, lm := range ms {
if lm != nil && !lm.Matches("") {
continue OUTER
}
}
return nil, errors.New("match[] must contain at least one non-empty matcher")
}
return matcherSets, nil
}
// marshalSeriesJSON writes something like the following:
//
// {
// "metric" : {
// "__name__" : "up",
// "job" : "prometheus",
// "instance" : "localhost:9090"
// },
// "values": [
// [ 1435781451.781, "1" ],
// < more values>
// ],
// "histograms": [
// [ 1435781451.781, { < histogram, see below > } ],
// < more histograms >
// ],
// },
func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
s := *((*promql.Series)(ptr))
stream.WriteObjectStart()
stream.WriteObjectField(`metric`)
m, err := s.Metric.MarshalJSON()
if err != nil {
stream.Error = err
return
}
stream.SetBuffer(append(stream.Buffer(), m...))
// We make two passes through the series here: In the first marshaling
// all value points, in the second marshaling all histogram
// points. That's probably cheaper than just one pass in which we copy
// out histogram Points into a newly allocated slice for separate
// marshaling. (Could be benchmarked, though.)
var foundValue, foundHistogram bool
for _, p := range s.Points {
if p.H == nil {
stream.WriteMore()
if !foundValue {
stream.WriteObjectField(`values`)
stream.WriteArrayStart()
}
foundValue = true
marshalPointJSON(unsafe.Pointer(&p), stream)
} else {
foundHistogram = true
}
}
if foundValue {
stream.WriteArrayEnd()
}
if foundHistogram {
firstHistogram := true
for _, p := range s.Points {
if p.H != nil {
stream.WriteMore()
if firstHistogram {
stream.WriteObjectField(`histograms`)
stream.WriteArrayStart()
}
firstHistogram = false
marshalPointJSON(unsafe.Pointer(&p), stream)
}
}
stream.WriteArrayEnd()
}
stream.WriteObjectEnd()
}
func marshalSeriesJSONIsEmpty(ptr unsafe.Pointer) bool {
return false
}
// marshalSampleJSON writes something like the following for normal value samples:
//
// {
// "metric" : {
// "__name__" : "up",
// "job" : "prometheus",
// "instance" : "localhost:9090"
// },
// "value": [ 1435781451.781, "1" ]
// },
//
// For histogram samples, it writes something like this:
//
// {
// "metric" : {
// "__name__" : "up",
// "job" : "prometheus",
// "instance" : "localhost:9090"
// },
// "histogram": [ 1435781451.781, { < histogram, see below > } ]
// },
func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
s := *((*promql.Sample)(ptr))
stream.WriteObjectStart()
stream.WriteObjectField(`metric`)
m, err := s.Metric.MarshalJSON()
if err != nil {
stream.Error = err
return
}
stream.SetBuffer(append(stream.Buffer(), m...))
stream.WriteMore()
if s.Point.H == nil {
stream.WriteObjectField(`value`)
} else {
stream.WriteObjectField(`histogram`)
}
marshalPointJSON(unsafe.Pointer(&s.Point), stream)
stream.WriteObjectEnd()
}
func marshalSampleJSONIsEmpty(ptr unsafe.Pointer) bool {
return false
}
// marshalPointJSON writes `[ts, "val"]`.
func marshalPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
p := *((*promql.Point)(ptr))
stream.WriteArrayStart()
jsonutil.MarshalTimestamp(p.T, stream)
stream.WriteMore()
if p.H == nil {
jsonutil.MarshalValue(p.V, stream)
} else {
marshalHistogram(p.H, stream)
}
stream.WriteArrayEnd()
}
func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool {
return false
}
// marshalHistogramJSON writes something like:
//
// {
// "count": "42",
// "sum": "34593.34",
// "buckets": [
// [ 3, "-0.25", "0.25", "3"],
// [ 0, "0.25", "0.5", "12"],
// [ 0, "0.5", "1", "21"],
// [ 0, "2", "4", "6"]
// ]
// }
//
// The 1st element in each bucket array determines if the boundaries are
// inclusive (AKA closed) or exclusive (AKA open):
//
// 0: lower exclusive, upper inclusive
// 1: lower inclusive, upper exclusive
// 2: both exclusive
// 3: both inclusive
//
// The 2nd and 3rd elements are the lower and upper boundary. The 4th element is
// the bucket count.
func marshalHistogram(h *histogram.FloatHistogram, stream *jsoniter.Stream) {
stream.WriteObjectStart()
stream.WriteObjectField(`count`)
jsonutil.MarshalValue(h.Count, stream)
stream.WriteMore()
stream.WriteObjectField(`sum`)
jsonutil.MarshalValue(h.Sum, stream)
bucketFound := false
it := h.AllBucketIterator()
for it.Next() {
bucket := it.At()
if bucket.Count == 0 {
continue // No need to expose empty buckets in JSON.
}
stream.WriteMore()
if !bucketFound {
stream.WriteObjectField(`buckets`)
stream.WriteArrayStart()
}
bucketFound = true
boundaries := 2 // Exclusive on both sides AKA open interval.
if bucket.LowerInclusive {
if bucket.UpperInclusive {
boundaries = 3 // Inclusive on both sides AKA closed interval.
} else {
boundaries = 1 // Inclusive only on lower end AKA right open.
}
} else {
if bucket.UpperInclusive {
boundaries = 0 // Inclusive only on upper end AKA left open.
}
}
stream.WriteArrayStart()
stream.WriteInt(boundaries)
stream.WriteMore()
jsonutil.MarshalValue(bucket.Lower, stream)
stream.WriteMore()
jsonutil.MarshalValue(bucket.Upper, stream)
stream.WriteMore()
jsonutil.MarshalValue(bucket.Count, stream)
stream.WriteArrayEnd()
}
if bucketFound {
stream.WriteArrayEnd()
}
stream.WriteObjectEnd()
}
// marshalExemplarJSON writes.
//
// {
// labels: <labels>,
// value: "<string>",
// timestamp: <float>
// }
func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
p := *((*exemplar.Exemplar)(ptr))
stream.WriteObjectStart()
// "labels" key.
stream.WriteObjectField(`labels`)
lbls, err := p.Labels.MarshalJSON()
if err != nil {
stream.Error = err
return
}
stream.SetBuffer(append(stream.Buffer(), lbls...))
// "value" key.
stream.WriteMore()
stream.WriteObjectField(`value`)
jsonutil.MarshalValue(p.Value, stream)
// "timestamp" key.
stream.WriteMore()
stream.WriteObjectField(`timestamp`)
jsonutil.MarshalTimestamp(p.Ts, stream)
stream.WriteObjectEnd()
}
func marshalExemplarJSONEmpty(ptr unsafe.Pointer) bool {
return false
}