2016-04-13 07:08:22 -07:00
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2015-06-04 09:07:57 -07:00
package v1
import (
2017-10-24 21:21:42 -07:00
"context"
2023-11-07 19:49:39 -08:00
"errors"
2015-06-04 09:07:57 -07:00
"fmt"
2022-04-27 02:24:36 -07:00
"io"
2015-06-04 09:07:57 -07:00
"net/http"
"net/http/httptest"
"net/url"
2018-11-15 05:22:16 -08:00
"os"
2015-06-04 09:07:57 -07:00
"reflect"
2020-07-31 08:03:02 -07:00
"runtime"
2019-12-09 13:36:38 -08:00
"sort"
2024-05-13 08:36:19 -07:00
"strconv"
2017-11-10 16:53:48 -08:00
"strings"
2015-06-04 09:07:57 -07:00
"testing"
"time"
2022-10-20 02:17:00 -07:00
"github.com/prometheus/prometheus/prompb"
2022-02-10 06:17:05 -08:00
"github.com/prometheus/prometheus/util/stats"
2023-04-16 05:13:31 -07:00
"github.com/prometheus/prometheus/util/testutil"
2022-02-10 06:17:05 -08:00
2021-06-11 09:17:59 -07:00
"github.com/go-kit/log"
2024-04-28 12:02:18 -07:00
jsoniter "github.com/json-iterator/go"
2018-09-07 14:26:04 -07:00
"github.com/prometheus/client_golang/prometheus"
2018-06-16 10:26:37 -07:00
config_util "github.com/prometheus/common/config"
2015-08-20 08:18:46 -07:00
"github.com/prometheus/common/model"
2018-06-16 10:26:37 -07:00
"github.com/prometheus/common/promlog"
2015-09-24 08:07:11 -07:00
"github.com/prometheus/common/route"
2020-10-29 02:43:23 -07:00
"github.com/stretchr/testify/require"
2015-06-04 09:07:57 -07:00
2017-05-11 08:09:24 -07:00
"github.com/prometheus/prometheus/config"
2021-11-08 06:23:17 -08:00
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/labels"
2023-11-22 06:39:21 -08:00
"github.com/prometheus/prometheus/model/metadata"
2021-11-08 06:23:17 -08:00
"github.com/prometheus/prometheus/model/timestamp"
2015-06-04 09:07:57 -07:00
"github.com/prometheus/prometheus/promql"
2020-02-03 10:23:07 -08:00
"github.com/prometheus/prometheus/promql/parser"
2024-04-29 02:48:24 -07:00
"github.com/prometheus/prometheus/promql/promqltest"
2018-03-25 09:50:34 -07:00
"github.com/prometheus/prometheus/rules"
2018-02-01 01:55:07 -08:00
"github.com/prometheus/prometheus/scrape"
2018-05-08 01:48:13 -07:00
"github.com/prometheus/prometheus/storage"
2017-10-23 13:28:17 -07:00
"github.com/prometheus/prometheus/storage/remote"
2019-11-18 11:53:33 -08:00
"github.com/prometheus/prometheus/tsdb"
2019-08-08 18:35:39 -07:00
"github.com/prometheus/prometheus/util/teststorage"
2015-06-04 09:07:57 -07:00
)
2024-05-07 09:14:22 -07:00
func testEngine ( t * testing . T ) * promql . Engine {
t . Helper ( )
2024-07-14 04:28:59 -07:00
return promqltest . NewTestEngineWithOpts ( t , promql . EngineOpts {
2024-05-07 09:14:22 -07:00
Logger : nil ,
Reg : nil ,
MaxSamples : 10000 ,
Timeout : 100 * time . Second ,
NoStepSubqueryIntervalFn : func ( int64 ) int64 { return 60 * 1000 } ,
EnableAtModifier : true ,
EnableNegativeOffset : true ,
EnablePerStepStats : true ,
} )
}
2023-08-18 11:48:59 -07:00
2019-12-04 11:33:01 -08:00
// testMetaStore satisfies the scrape.MetricMetadataStore interface.
// It is used to inject specific metadata as part of a test case.
type testMetaStore struct {
Metadata [ ] scrape . MetricMetadata
}
func ( s * testMetaStore ) ListMetadata ( ) [ ] scrape . MetricMetadata {
return s . Metadata
}
func ( s * testMetaStore ) GetMetadata ( metric string ) ( scrape . MetricMetadata , bool ) {
for _ , m := range s . Metadata {
if metric == m . Metric {
return m , true
}
}
return scrape . MetricMetadata { } , false
}
2020-01-29 03:13:18 -08:00
func ( s * testMetaStore ) SizeMetadata ( ) int { return 0 }
func ( s * testMetaStore ) LengthMetadata ( ) int { return 0 }
2019-12-04 11:33:01 -08:00
// testTargetRetriever represents a list of targets to scrape.
// It is used to represent targets as part of test cases.
2019-12-04 03:08:21 -08:00
type testTargetRetriever struct {
activeTargets map [ string ] [ ] * scrape . Target
droppedTargets map [ string ] [ ] * scrape . Target
}
type testTargetParams struct {
Identifier string
2022-02-27 06:19:21 -08:00
Labels labels . Labels
DiscoveredLabels labels . Labels
2019-12-04 03:08:21 -08:00
Params url . Values
Reports [ ] * testReport
Active bool
}
type testReport struct {
Start time . Time
Duration time . Duration
Error error
}
func newTestTargetRetriever ( targetsInfo [ ] * testTargetParams ) * testTargetRetriever {
var activeTargets map [ string ] [ ] * scrape . Target
var droppedTargets map [ string ] [ ] * scrape . Target
activeTargets = make ( map [ string ] [ ] * scrape . Target )
droppedTargets = make ( map [ string ] [ ] * scrape . Target )
for _ , t := range targetsInfo {
nt := scrape . NewTarget ( t . Labels , t . DiscoveredLabels , t . Params )
for _ , r := range t . Reports {
nt . Report ( r . Start , r . Duration , r . Error )
}
if t . Active {
activeTargets [ t . Identifier ] = [ ] * scrape . Target { nt }
} else {
droppedTargets [ t . Identifier ] = [ ] * scrape . Target { nt }
}
}
return & testTargetRetriever {
activeTargets : activeTargets ,
droppedTargets : droppedTargets ,
}
}
2016-12-02 04:31:43 -08:00
2021-10-22 01:06:44 -07:00
var scrapeStart = time . Now ( ) . Add ( - 11 * time . Second )
2019-11-11 13:42:24 -08:00
2018-09-26 02:20:56 -07:00
func ( t testTargetRetriever ) TargetsActive ( ) map [ string ] [ ] * scrape . Target {
2019-12-04 03:08:21 -08:00
return t . activeTargets
2018-02-21 09:26:18 -08:00
}
2019-12-04 03:08:21 -08:00
2018-09-26 02:20:56 -07:00
func ( t testTargetRetriever ) TargetsDropped ( ) map [ string ] [ ] * scrape . Target {
2019-12-04 03:08:21 -08:00
return t . droppedTargets
2016-12-02 04:31:43 -08:00
}
2023-08-14 07:39:25 -07:00
func ( t testTargetRetriever ) TargetsDroppedCounts ( ) map [ string ] int {
r := make ( map [ string ] int )
for k , v := range t . droppedTargets {
r [ k ] = len ( v )
}
return r
}
2019-12-10 06:56:16 -08:00
func ( t * testTargetRetriever ) SetMetadataStoreForTargets ( identifier string , metadata scrape . MetricMetadataStore ) error {
2019-12-04 11:33:01 -08:00
targets , ok := t . activeTargets [ identifier ]
if ! ok {
return errors . New ( "targets not found" )
}
for _ , at := range targets {
at . SetMetadataStore ( metadata )
}
return nil
}
2019-12-10 06:56:16 -08:00
func ( t * testTargetRetriever ) ResetMetadataStore ( ) {
for _ , at := range t . activeTargets {
for _ , tt := range at {
tt . SetMetadataStore ( & testMetaStore { } )
}
}
}
2020-04-16 01:30:47 -07:00
func ( t * testTargetRetriever ) toFactory ( ) func ( context . Context ) TargetRetriever {
2020-05-18 11:02:32 -07:00
return func ( context . Context ) TargetRetriever { return t }
2020-04-16 01:30:47 -07:00
}
2018-02-21 01:00:07 -08:00
type testAlertmanagerRetriever struct { }
2017-01-13 01:20:11 -08:00
2018-02-21 01:00:07 -08:00
func ( t testAlertmanagerRetriever ) Alertmanagers ( ) [ ] * url . URL {
return [ ] * url . URL {
{
Scheme : "http" ,
Host : "alertmanager.example.com:8080" ,
Path : "/api/v1/alerts" ,
} ,
}
}
func ( t testAlertmanagerRetriever ) DroppedAlertmanagers ( ) [ ] * url . URL {
return [ ] * url . URL {
{
Scheme : "http" ,
Host : "dropped.alertmanager.example.com:8080" ,
Path : "/api/v1/alerts" ,
} ,
}
2016-12-02 04:31:43 -08:00
}
2020-05-18 11:02:32 -07:00
func ( t testAlertmanagerRetriever ) toFactory ( ) func ( context . Context ) AlertmanagerRetriever {
return func ( context . Context ) AlertmanagerRetriever { return t }
}
2018-06-27 00:15:17 -07:00
type rulesRetrieverMock struct {
2023-10-17 19:02:03 -07:00
alertingRules [ ] * rules . AlertingRule
ruleGroups [ ] * rules . Group
testing * testing . T
2018-03-25 09:50:34 -07:00
}
2023-10-17 19:02:03 -07:00
func ( m * rulesRetrieverMock ) CreateAlertingRules ( ) {
2020-02-03 10:23:07 -08:00
expr1 , err := parser . ParseExpr ( ` absent(test_metric3) != 1 ` )
2021-09-03 02:51:27 -07:00
require . NoError ( m . testing , err )
2020-02-03 10:23:07 -08:00
expr2 , err := parser . ParseExpr ( ` up == 1 ` )
2021-09-03 02:51:27 -07:00
require . NoError ( m . testing , err )
2023-10-17 19:02:03 -07:00
expr3 , err := parser . ParseExpr ( ` vector(1) ` )
2021-09-03 02:51:27 -07:00
require . NoError ( m . testing , err )
2023-10-17 19:02:03 -07:00
2018-03-25 09:50:34 -07:00
rule1 := rules . NewAlertingRule (
"test_metric3" ,
expr1 ,
time . Second ,
2023-01-09 03:21:38 -08:00
0 ,
2018-03-25 09:50:34 -07:00
labels . Labels { } ,
labels . Labels { } ,
2019-04-15 09:52:58 -07:00
labels . Labels { } ,
2021-05-30 20:56:01 -07:00
"" ,
2018-08-02 03:18:24 -07:00
true ,
2018-03-25 09:50:34 -07:00
log . NewNopLogger ( ) ,
)
rule2 := rules . NewAlertingRule (
"test_metric4" ,
expr2 ,
time . Second ,
2023-01-09 03:21:38 -08:00
0 ,
2018-03-25 09:50:34 -07:00
labels . Labels { } ,
labels . Labels { } ,
2019-04-15 09:52:58 -07:00
labels . Labels { } ,
2021-05-30 20:56:01 -07:00
"" ,
2018-08-02 03:18:24 -07:00
true ,
2018-03-25 09:50:34 -07:00
log . NewNopLogger ( ) ,
)
2023-10-17 19:02:03 -07:00
rule3 := rules . NewAlertingRule (
"test_metric5" ,
expr3 ,
time . Second ,
0 ,
labels . FromStrings ( "name" , "tm5" ) ,
labels . Labels { } ,
labels . FromStrings ( "name" , "tm5" ) ,
"" ,
false ,
log . NewNopLogger ( ) ,
)
2024-07-10 05:18:29 -07:00
rule4 := rules . NewAlertingRule (
"test_metric6" ,
expr2 ,
time . Second ,
0 ,
labels . FromStrings ( "testlabel" , "rule" ) ,
labels . Labels { } ,
labels . Labels { } ,
"" ,
true ,
log . NewNopLogger ( ) ,
)
rule5 := rules . NewAlertingRule (
"test_metric7" ,
expr2 ,
time . Second ,
0 ,
labels . FromStrings ( "templatedlabel" , "{{ $externalURL }}" ) ,
labels . Labels { } ,
labels . Labels { } ,
"" ,
true ,
log . NewNopLogger ( ) ,
)
2018-03-25 09:50:34 -07:00
var r [ ] * rules . AlertingRule
r = append ( r , rule1 )
r = append ( r , rule2 )
2023-10-17 19:02:03 -07:00
r = append ( r , rule3 )
2024-07-10 05:18:29 -07:00
r = append ( r , rule4 )
r = append ( r , rule5 )
2023-10-17 19:02:03 -07:00
m . alertingRules = r
2018-03-25 09:50:34 -07:00
}
2023-10-17 19:02:03 -07:00
func ( m * rulesRetrieverMock ) CreateRuleGroups ( ) {
m . CreateAlertingRules ( )
arules := m . AlertingRules ( )
2019-08-08 18:35:39 -07:00
storage := teststorage . New ( m . testing )
2018-03-25 09:50:34 -07:00
defer storage . Close ( )
2018-10-02 04:59:19 -07:00
engineOpts := promql . EngineOpts {
2020-01-28 12:38:49 -08:00
Logger : nil ,
Reg : nil ,
MaxSamples : 10 ,
Timeout : 100 * time . Second ,
2018-10-02 04:59:19 -07:00
}
2024-07-14 04:28:59 -07:00
engine := promqltest . NewTestEngineWithOpts ( m . testing , engineOpts )
2018-03-25 09:50:34 -07:00
opts := & rules . ManagerOptions {
QueryFunc : rules . EngineQueryFunc ( engine , storage ) ,
Appendable : storage ,
Context : context . Background ( ) ,
Logger : log . NewNopLogger ( ) ,
2023-10-17 19:02:03 -07:00
NotifyFunc : func ( ctx context . Context , expr string , alerts ... * rules . Alert ) { } ,
2018-03-25 09:50:34 -07:00
}
var r [ ] rules . Rule
for _ , alertrule := range arules {
r = append ( r , alertrule )
}
2020-02-03 10:23:07 -08:00
recordingExpr , err := parser . ParseExpr ( ` vector(1) ` )
2024-02-01 06:18:01 -08:00
require . NoError ( m . testing , err , "unable to parse alert expression" )
2018-06-27 00:15:17 -07:00
recordingRule := rules . NewRecordingRule ( "recording-rule-1" , recordingExpr , labels . Labels { } )
2024-07-10 05:18:29 -07:00
recordingRule2 := rules . NewRecordingRule ( "recording-rule-2" , recordingExpr , labels . FromStrings ( "testlabel" , "rule" ) )
2018-06-27 00:15:17 -07:00
r = append ( r , recordingRule )
2024-07-10 05:18:29 -07:00
r = append ( r , recordingRule2 )
2018-06-27 00:15:17 -07:00
2020-02-12 07:22:18 -08:00
group := rules . NewGroup ( rules . GroupOptions {
Name : "grp" ,
File : "/path/to/file" ,
Interval : time . Second ,
Rules : r ,
ShouldRestore : false ,
Opts : opts ,
} )
2023-10-17 19:02:03 -07:00
m . ruleGroups = [ ] * rules . Group { group }
}
func ( m * rulesRetrieverMock ) AlertingRules ( ) [ ] * rules . AlertingRule {
return m . alertingRules
}
func ( m * rulesRetrieverMock ) RuleGroups ( ) [ ] * rules . Group {
return m . ruleGroups
2018-03-25 09:50:34 -07:00
}
2023-10-17 19:02:03 -07:00
func ( m * rulesRetrieverMock ) toFactory ( ) func ( context . Context ) RulesRetriever {
2020-05-18 11:02:32 -07:00
return func ( context . Context ) RulesRetriever { return m }
}
2017-05-11 08:09:24 -07:00
var samplePrometheusCfg = config . Config {
GlobalConfig : config . GlobalConfig { } ,
AlertingConfig : config . AlertingConfig { } ,
RuleFiles : [ ] string { } ,
ScrapeConfigs : [ ] * config . ScrapeConfig { } ,
RemoteWriteConfigs : [ ] * config . RemoteWriteConfig { } ,
RemoteReadConfigs : [ ] * config . RemoteReadConfig { } ,
2024-06-03 09:02:26 -07:00
OTLPConfig : config . OTLPConfig { } ,
2017-05-11 08:09:24 -07:00
}
api: Added v1/status/flags endpoint. (#3864)
Endpoint URL: /api/v1/status/flags
Example Output:
```json
{
"status": "success",
"data": {
"alertmanager.notification-queue-capacity": "10000",
"alertmanager.timeout": "10s",
"completion-bash": "false",
"completion-script-bash": "false",
"completion-script-zsh": "false",
"config.file": "my_cool_prometheus.yaml",
"help": "false",
"help-long": "false",
"help-man": "false",
"log.level": "info",
"query.lookback-delta": "5m",
"query.max-concurrency": "20",
"query.timeout": "2m",
"storage.tsdb.max-block-duration": "36h",
"storage.tsdb.min-block-duration": "2h",
"storage.tsdb.no-lockfile": "false",
"storage.tsdb.path": "data/",
"storage.tsdb.retention": "15d",
"version": "false",
"web.console.libraries": "console_libraries",
"web.console.templates": "consoles",
"web.enable-admin-api": "false",
"web.enable-lifecycle": "false",
"web.external-url": "",
"web.listen-address": "0.0.0.0:9090",
"web.max-connections": "512",
"web.read-timeout": "5m",
"web.route-prefix": "/",
"web.user-assets": ""
}
}
```
Signed-off-by: Bartek Plotka <bwplotka@gmail.com>
2018-02-21 00:49:02 -08:00
var sampleFlagMap = map [ string ] string {
"flag1" : "value1" ,
"flag2" : "value2" ,
}
2015-06-04 09:07:57 -07:00
func TestEndpoints ( t * testing . T ) {
2024-04-29 02:48:24 -07:00
storage := promqltest . LoadedStorage ( t , `
2015-06-04 09:07:57 -07:00
load 1 m
test_metric1 { foo = "bar" } 0 + 100 x100
test_metric1 { foo = "boo" } 1 + 0x100
test_metric2 { foo = "boo" } 1 + 0x100
2020-08-28 16:21:39 -07:00
test_metric3 { foo = "bar" , dup = "1" } 1 + 0x100
test_metric3 { foo = "boo" , dup = "1" } 1 + 0x100
test_metric4 { foo = "bar" , dup = "1" } 1 + 0x100
test_metric4 { foo = "boo" , dup = "1" } 1 + 0x100
test_metric4 { foo = "boo" } 1 + 0x100
2015-06-04 09:07:57 -07:00
` )
2023-08-18 11:48:59 -07:00
t . Cleanup ( func ( ) { storage . Close ( ) } )
2021-03-16 02:47:45 -07:00
start := time . Unix ( 0 , 0 )
exemplars := [ ] exemplar . QueryResult {
{
SeriesLabels : labels . FromStrings ( "__name__" , "test_metric3" , "foo" , "boo" , "dup" , "1" ) ,
Exemplars : [ ] exemplar . Exemplar {
{
Labels : labels . FromStrings ( "id" , "abc" ) ,
Value : 10 ,
Ts : timestamp . FromTime ( start . Add ( 2 * time . Second ) ) ,
} ,
} ,
} ,
{
SeriesLabels : labels . FromStrings ( "__name__" , "test_metric4" , "foo" , "bar" , "dup" , "1" ) ,
Exemplars : [ ] exemplar . Exemplar {
{
Labels : labels . FromStrings ( "id" , "lul" ) ,
Value : 10 ,
Ts : timestamp . FromTime ( start . Add ( 4 * time . Second ) ) ,
} ,
} ,
} ,
{
SeriesLabels : labels . FromStrings ( "__name__" , "test_metric3" , "foo" , "boo" , "dup" , "1" ) ,
Exemplars : [ ] exemplar . Exemplar {
{
Labels : labels . FromStrings ( "id" , "abc2" ) ,
Value : 10 ,
Ts : timestamp . FromTime ( start . Add ( 4053 * time . Millisecond ) ) ,
} ,
} ,
} ,
{
SeriesLabels : labels . FromStrings ( "__name__" , "test_metric4" , "foo" , "bar" , "dup" , "1" ) ,
Exemplars : [ ] exemplar . Exemplar {
{
Labels : labels . FromStrings ( "id" , "lul2" ) ,
Value : 10 ,
Ts : timestamp . FromTime ( start . Add ( 4153 * time . Millisecond ) ) ,
} ,
} ,
} ,
}
for _ , ed := range exemplars {
2023-08-18 11:48:59 -07:00
_ , err := storage . AppendExemplar ( 0 , ed . SeriesLabels , ed . Exemplars [ 0 ] )
2021-03-16 02:47:45 -07:00
require . NoError ( t , err , "failed to add exemplar: %+v" , ed . Exemplars [ 0 ] )
}
2016-12-30 01:43:44 -08:00
now := time . Now ( )
2016-12-02 04:31:43 -08:00
2024-05-07 09:14:22 -07:00
ng := testEngine ( t )
2018-06-27 00:15:17 -07:00
t . Run ( "local" , func ( t * testing . T ) {
2024-05-07 09:14:22 -07:00
algr := rulesRetrieverMock { testing : t }
2018-03-25 09:50:34 -07:00
2023-10-17 19:02:03 -07:00
algr . CreateAlertingRules ( )
algr . CreateRuleGroups ( )
2018-03-25 09:50:34 -07:00
2023-10-17 19:02:03 -07:00
g := algr . RuleGroups ( )
g [ 0 ] . Eval ( context . Background ( ) , time . Now ( ) )
2018-03-25 09:50:34 -07:00
2019-12-04 11:33:01 -08:00
testTargetRetriever := setupTestTargetRetriever ( t )
2019-12-04 03:08:21 -08:00
2018-06-16 10:26:37 -07:00
api := & API {
2023-08-18 11:48:59 -07:00
Queryable : storage ,
2024-05-07 09:14:22 -07:00
QueryEngine : ng ,
2023-08-18 11:48:59 -07:00
ExemplarQueryable : storage . ExemplarQueryable ( ) ,
2020-04-16 01:30:47 -07:00
targetRetriever : testTargetRetriever . toFactory ( ) ,
2020-05-18 11:02:32 -07:00
alertmanagerRetriever : testAlertmanagerRetriever { } . toFactory ( ) ,
2018-11-19 02:21:14 -08:00
flagsMap : sampleFlagMap ,
2018-10-16 00:41:45 -07:00
now : func ( ) time . Time { return now } ,
config : func ( ) config . Config { return samplePrometheusCfg } ,
ready : func ( f http . HandlerFunc ) http . HandlerFunc { return f } ,
2020-05-18 11:02:32 -07:00
rulesRetriever : algr . toFactory ( ) ,
2018-06-16 10:26:37 -07:00
}
2023-08-18 11:48:59 -07:00
testEndpoints ( t , api , testTargetRetriever , storage , true )
2018-06-16 10:26:37 -07:00
} )
2017-01-13 01:20:11 -08:00
2023-10-04 01:36:55 -07:00
// Run all the API tests against an API that is wired to forward queries via
2018-06-16 10:26:37 -07:00
// the remote read client to a test server, which in turn sends them to the
2023-08-18 11:48:59 -07:00
// data from the test storage.
2018-06-16 10:26:37 -07:00
t . Run ( "remote" , func ( t * testing . T ) {
2023-08-18 11:48:59 -07:00
server := setupRemote ( storage )
2018-06-16 10:26:37 -07:00
defer server . Close ( )
u , err := url . Parse ( server . URL )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-16 10:26:37 -07:00
al := promlog . AllowedLevel { }
2020-10-29 02:43:23 -07:00
require . NoError ( t , al . Set ( "debug" ) )
2019-07-29 10:00:30 -07:00
2018-11-23 05:22:40 -08:00
af := promlog . AllowedFormat { }
2020-10-29 02:43:23 -07:00
require . NoError ( t , af . Set ( "logfmt" ) )
2019-07-29 10:00:30 -07:00
2018-11-23 05:22:40 -08:00
promlogConfig := promlog . Config {
Level : & al ,
Format : & af ,
}
2021-12-08 14:14:50 -08:00
dbDir := t . TempDir ( )
2018-09-07 14:26:04 -07:00
2020-11-19 07:23:03 -08:00
remote := remote . NewStorage ( promlog . New ( & promlogConfig ) , prometheus . DefaultRegisterer , func ( ) ( int64 , error ) {
return 0 , nil
[PRW 2.0] Merging `remote-write-2.0` feature branch to main (PRW 2.0 support + metadata in WAL) (#14395)
* Remote Write 1.1: e2e benchmarks (#13102)
* Remote Write e2e benchmarks
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Prometheus ports automatically assigned
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* make dashboard editable + more modular to different job label values
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* Dashboard improvements
* memory stats
* diffs look at counter increases
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* run script: absolute path for config templates
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* grafana dashboard improvements
* show actual values of metrics
* add memory stats and diff
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* dashboard changes
Signed-off-by: Callum Styan <callumstyan@gmail.com>
---------
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Co-authored-by: Callum Styan <callumstyan@gmail.com>
* replace snappy encoding library
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* add new proto types
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* add decode function for new write request proto
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* add lookup table struct that is used to build the symbol table in new
write request format
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Implement code paths for new proto format
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* update example server to include handler for new format
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Add new test client
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* tests and new -> original proto mapping util
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* add new proto support on receiver end
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Fix test
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* no-brainer copypaste but more performance write support
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remove some comented code
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix mocks and fixture
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* add basic reduce remote write handler benchmark
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* refactor out common code between write methods
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix: queue manager to include float histograms in new requests
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* add sender-side tests and fix failing ones
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* refactor queue manager code to remove some duplication
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix build
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Improve sender benchmarks and some allocations
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Use github.com/golang/snappy
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* cleanup: remove hardcoded fake url for testing
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Add 1.1 version handling code
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Remove config, update proto
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* gofmt
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix NewWriteClient and change new flags wording
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fields rewording in handler
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remote write handler to checks version header
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix typo in log
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Add minmized remote write proto format
Co-authored-by: Marco Pracucci <marco@pracucci.com>
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* add functions for translating between new proto formats symbol table and
actual prometheus labels
Co-authored-by: Marco Pracucci <marco@pracucci.com>
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* add functionality for new minimized remote write request format
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix minor things
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Make LabelSymbols a fixed32
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remove unused proto type
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* update tests
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix build for stringlabels tag
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Use two uint32 to encode (offset,leng)
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* manually optimize varint marshaling
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Use unsafe []byte->string cast to reuse buffer
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix writeRequestMinimizedFixture
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remove all code from previous interning approach
the 'minimized' version is now the only v1.1 version
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* minimally-tested exemplar support for rw 1.1
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* refactor new version flag to make it easier to pick a specific format
instead of having multiple flags, plus add new formats for testing
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* use exp slices for backwards compat. to go 1.20 plus add copyright
header to test file
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix label ranging
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Add bytes slice (instead of slice of 32bit vars) format for testing
Co-authored-by: Nicolás Pazos <npazosmendez@gmail.com>
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* test additional len and lenbytes formats
Co-authored-by: Nicolás Pazos <npazosmendez@gmail.com>
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remove mistaken package lock changes
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remove formats we've decided not to use
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remove more format types we probably won't use
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* More cleanup
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* use require instead of assert in custom marshal test
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* cleanup; remove some unused functions
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* more cleanup, mostly linting fixes
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remove package-lock.json change again
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* more cleanup, address review comments
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix test panic
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix minor lint issue + use labels Range function since it looks like
the tests fail to do `range labels.Labels` on CI
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* new interning format based on []string indeces
Co-authored-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remove all new rw formats but the []string one
also adapt tests to the new format
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* cleanup rwSymbolTable
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* add some TODOs for later
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* don't reserve field 3 for new proto and add TODO
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix custom marshaling
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* additional merge fixes
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* lint fixes
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix server example
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* revert package-lock.json changes
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* update example prometheus version
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* define separate proto types for remote write 2.0
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* rename new proto types and move to separate pkg
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* update prometheus version for example
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* make proto
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* make Metadata not nullable
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remove old MinSample proto message
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* change enum names to fit buf build recommend enum naming and lint rules
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* remote: Added test for classic histogram grouping when sending rw; Fixed queue manager test delay. (#13421)
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Remote write v2: metadata support in every write request (#13394)
* Approach bundling metadata along with samples and exemplars
Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com>
* Add first test; rebase with main
Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com>
* Alternative approach: bundle metadata in TimeSeries protobuf
Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com>
* update go mod to match main branch
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* fix after rebase
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* we're not going to modify the 1.X format anymore
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* Modify AppendMetadata based on the fact that we be putting metadata into
timeseries
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* Rename enums for remote write versions to something that makes more
sense + remove the added `sendMetadata` flag.
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* rename flag that enables writing of metadata records to the WAL
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* additional clean up
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* lint
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* fix usage of require.Len
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* some clean up from review comments
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* more review fixes
Signed-off-by: Callum Styan <callumstyan@gmail.com>
---------
Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com>
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Co-authored-by: Paschalis Tsilias <paschalist0@gmail.com>
* remote write 2.0: sync with `main` branch (#13510)
* consoles: exclude iowait and steal from CPU Utilisation
'iowait' and 'steal' indicate specific idle/wait states, which shouldn't
be counted into CPU Utilisation. Also see
https://github.com/prometheus-operator/kube-prometheus/pull/796 and
https://github.com/kubernetes-monitoring/kubernetes-mixin/pull/667.
Per the iostat man page:
%idle
Show the percentage of time that the CPU or CPUs were idle and the
system did not have an outstanding disk I/O request.
%iowait
Show the percentage of time that the CPU or CPUs were idle during
which the system had an outstanding disk I/O request.
%steal
Show the percentage of time spent in involuntary wait by the
virtual CPU or CPUs while the hypervisor was servicing another
virtual processor.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
* tsdb: shrink txRing with smaller integers
4 billion active transactions ought to be enough for anyone.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: create isolation transaction slice on demand
When Prometheus restarts it creates every series read in from the WAL,
but many of those series will be finished, and never receive any more
samples. By defering allocation of the txRing slice to when it is first
needed, we save 32 bytes per stale series.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* add cluster variable to Overview dashboard
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* promql: simplify Native Histogram arithmetics
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
* Cut 2.49.0-rc.0 (#13270)
* Cut 2.49.0-rc.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Removed the duplicate.
Signed-off-by: bwplotka <bwplotka@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Add unit protobuf parser
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Go on adding protobuf parsing for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* ui: create a reproduction for https://github.com/prometheus/prometheus/issues/13292
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Get conditional right
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Get VM Scale Set NIC (#13283)
Calling `*armnetwork.InterfacesClient.Get()` doesn't work for Scale Set
VM NIC, because these use a different Resource ID format.
Use `*armnetwork.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface()`
instead. This needs both the scale set name and the instance ID, so
add an `InstanceID` field to the `virtualMachine` struct. `InstanceID`
is empty for a VM that isn't a ScaleSetVM.
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
* Cut v2.49.0-rc.1
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Delete debugging lines, amend error message for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Correct order in error message
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Consider storage.ErrTooOldSample as non-retryable
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
* scrape_test.go: Increase scrape interval in TestScrapeLoopCache to reduce potential flakiness
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Avoid creating string for suffix, consider counters without _total suffix
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* build(deps): bump github.com/prometheus/client_golang
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.17.0 to 1.18.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0)
---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump actions/setup-node from 3.8.1 to 4.0.1
Bumps [actions/setup-node](https://github.com/actions/setup-node) from 3.8.1 to 4.0.1.
- [Release notes](https://github.com/actions/setup-node/releases)
- [Commits](https://github.com/actions/setup-node/compare/5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d...b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8)
---
updated-dependencies:
- dependency-name: actions/setup-node
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
* scripts: sort file list in embed directive
Otherwise the resulting string depends on find, which afaict depends on
the underlying filesystem. A stable file list make it easier to detect
UI changes in downstreams that need to track UI assets.
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
* Fix DataTableProps['data'] for resultType string
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* Fix handling of scalar and string in isHeatmapData
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* build(deps): bump github.com/influxdata/influxdb
Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.2 to 1.11.4.
- [Release notes](https://github.com/influxdata/influxdb/releases)
- [Commits](https://github.com/influxdata/influxdb/compare/v1.11.2...v1.11.4)
---
updated-dependencies:
- dependency-name: github.com/influxdata/influxdb
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump github.com/prometheus/prometheus
Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.48.0 to 0.48.1.
- [Release notes](https://github.com/prometheus/prometheus/releases)
- [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/prometheus/compare/v0.48.0...v0.48.1)
---
updated-dependencies:
- dependency-name: github.com/prometheus/prometheus
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* Bump client_golang to v1.18.0 (#13373)
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Drop old inmemory samples (#13002)
* Drop old inmemory samples
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Avoid copying timeseries when the feature is disabled
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Run gofmt
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Clarify docs
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Add more logging info
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Remove loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* optimize function and add tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Simplify filter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Update help info from metrics
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use metrics to keep track of drop elements during buildWriteRequest
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var in tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* pass time.Now as parameter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Change buildwriterequest during retries
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Revert "Remove loggers"
This reverts commit 54f91dfcae20488944162335ab4ad8be459df1ab.
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use log level debug for loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Fix linter
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove noisy debug-level logs; add 'reason' label to drop metrics
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove accidentally committed files
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Propagate logger to buildWriteRequest to log dropped data
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix docs comment
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Make drop reason more specific
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove unnecessary pass of logger
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Use snake_case for reason label
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix dropped samples metric
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
---------
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
* fix(discovery): allow requireUpdate util to timeout in discovery/file/file_test.go.
The loop ran indefinitely if the condition isn't met.
Before, each iteration created a new timer channel which was always outpaced by
the other timer channel with smaller duration.
minor detail: There was a memory leak: resources of the ~10 previous timers were
constantly kept. With the fix, we may keep the resources of one timer around for defaultWait
but this isn't worth the changes to make it right.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Merge pull request #13371 from kevinmingtarja/fix-isHeatmapData
ui: fix handling of scalar and string in isHeatmapData
* tsdb/{index,compact}: allow using custom postings encoding format (#13242)
* tsdb/{index,compact}: allow using custom postings encoding format
We would like to experiment with a different postings encoding format in
Thanos so in this change I am proposing adding another argument to
`NewWriter` which would allow users to change the format if needed.
Also, wire the leveled compactor so that it would be possible to change
the format there too.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb/compact: use a struct for leveled compactor options
As discussed on Slack, let's use a struct for the options in leveled
compactor.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: make changes after Bryan's review
- Make changes less intrusive
- Turn the postings encoder type into a function
- Add NewWriterWithEncoder()
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
---------
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0-rc.2
Signed-off-by: bwplotka <bwplotka@gmail.com>
* build(deps): bump actions/setup-go from 3.5.0 to 5.0.0 in /scripts (#13362)
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3.5.0 to 5.0.0.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/6edd4406fa81c3da01a34fa6f6343087c207a568...0c52d547c9bc32b1aa3301fd7a9cb496313a4491)
---
updated-dependencies:
- dependency-name: actions/setup-go
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump github/codeql-action from 2.22.8 to 3.22.12 (#13358)
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.22.8 to 3.22.12.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/github/codeql-action/compare/407ffafae6a767df3e0230c3df91b6443ae8df75...012739e5082ff0c22ca6d6ab32e07c36df03c4a4)
---
updated-dependencies:
- dependency-name: github/codeql-action
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* put @nexucis has a release shepherd (#13383)
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
* Add analyze histograms command to promtool (#12331)
Add `query analyze` command to promtool
This command analyzes the buckets of classic and native histograms,
based on data queried from the Prometheus query API, i.e. it
doesn't require direct access to the TSDB files.
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* included instance in all necessary descriptions
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* tsdb/compact: fix passing merge func
Fixing a very small logical problem I've introduced :(.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: add enable overlapping compaction
This functionality is needed in downstream projects because they have a
separate component that does compaction.
Upstreaming
https://github.com/grafana/mimir-prometheus/blob/7c8e9a2a76fc729e9078889782928b2fdfe240e9/tsdb/compact.go#L323-L325.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* promtool: allow setting multiple matchers to "promtool tsdb dump" command. (#13296)
Conditions are ANDed inside the same matcher but matchers are ORed
Including unit tests for "promtool tsdb dump".
Refactor some matchers scraping utils.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Fixed changelog
Signed-off-by: bwplotka <bwplotka@gmail.com>
* tsdb/main: wire "EnableOverlappingCompaction" to tsdb.Options (#13398)
This added the https://github.com/prometheus/prometheus/pull/13393
"EnableOverlappingCompaction" parameter to the compactor code but not to
the tsdb.Options. I forgot about that. Add it to `tsdb.Options` too and
set it to `true` in Prometheus.
Copy/paste the description from
https://github.com/prometheus/prometheus/pull/13393#issuecomment-1891787986
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Issue #13268: fix quality value in accept header
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
* Cut 2.49.1 with scrape q= bugfix.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Cut 2.49.1 web package.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Restore more efficient version of NewPossibleNonCounterInfo annotation (#13022)
Restore more efficient version of NewPossibleNonCounterInfo annotation
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Fix regressions introduced by #13242
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* fix slice copy in 1.20 (#13389)
The slices package is added to the standard library in Go 1.21;
we need to import from the exp area to maintain compatibility with Go 1.20.
Signed-off-by: tyltr <tylitianrui@126.com>
* Docs: Query Basics: link to rate (#10538)
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
* chore(kubernetes): check preconditions earlier and avoid unnecessary checks or iterations
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Examples: link to `rate` for new users (#10535)
* Examples: link to `rate` for new users
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
* promql: use natural sort in sort_by_label and sort_by_label_desc (#13411)
These functions are intended for humans, as robots can already sort the results
however they please. Humans like things sorted "naturally":
* https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
A similar thing has been done to Grafana, which is also used by humans:
* https://github.com/grafana/grafana/pull/78024
* https://github.com/grafana/grafana/pull/78494
Signed-off-by: Ivan Babrou <github@ivan.computer>
* TestLabelValuesWithMatchers: Add test case
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* remove obsolete build tag
Signed-off-by: tyltr <tylitianrui@126.com>
* Upgrade some golang dependencies for resty 2.11
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
* Native Histograms: support `native_histogram_min_bucket_factor` in scrape_config (#13222)
Native Histograms: support native_histogram_min_bucket_factor in scrape_config
---------
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram (#13392)
Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Minor fixes to otlp vendor update script
Signed-off-by: Goutham <gouthamve@gmail.com>
* build(deps): bump github.com/hetznercloud/hcloud-go/v2
Bumps [github.com/hetznercloud/hcloud-go/v2](https://github.com/hetznercloud/hcloud-go) from 2.4.0 to 2.6.0.
- [Release notes](https://github.com/hetznercloud/hcloud-go/releases)
- [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hetznercloud/hcloud-go/compare/v2.4.0...v2.6.0)
---
updated-dependencies:
- dependency-name: github.com/hetznercloud/hcloud-go/v2
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* Enhanced visibility for `promtool test rules` with JSON colored formatting (#13342)
* Added diff flag for unit test to improvise readability & debugging
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Removed blank spaces
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed linting error
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Added cli flags to documentation
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Revert unrrelated linting fixes
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed review suggestions
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Cleanup
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
---------
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* storage: skip merging when no remote storage configured
Prometheus is hard-coded to use a fanout storage between TSDB and
a remote storage which by default is empty.
This change detects the empty storage and skips merging between
result sets, which would make `Select()` sort results.
Bottom line: we skip a sort unless there really is some remote storage
configured.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Remove csmarchbanks from remote write owners (#13432)
I have not had the time to keep up with remote write and have no plans
to work on it in the near future so I am withdrawing my maintainership
of that part of the codebase. I continue to focus on client_python.
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
* add more context cancellation check at evaluation time
Signed-off-by: Ben Ye <benye@amazon.com>
* Optimize label values with matchers by taking shortcuts (#13426)
Don't calculate postings beforehand: we may not need them. If all
matchers are for the requested label, we can just filter its values.
Also, if there are no values at all, no need to run any kind of
logic.
Also add more labelValuesWithMatchers benchmarks
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Add automatic memory limit handling
Enable automatic detection of memory limits and configure GOMEMLIMIT to
match.
* Also includes a flag to allow controlling the reserved ratio.
Signed-off-by: SuperQ <superq@gmail.com>
* Update OSSF badge link (#13433)
Provide a more user friendly interface
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
* SD Managers taking over responsibility for registration of debug metrics (#13375)
SD Managers take over responsibility for SD metrics registration
---------
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Optimize histogram iterators (#13340)
Optimize histogram iterators
Histogram iterators allocate new objects in the AtHistogram and
AtFloatHistogram methods, which makes calculating rates over long
ranges expensive.
In #13215 we allowed an existing object to be reused
when converting an integer histogram to a float histogram. This commit follows
the same idea and allows injecting an existing object in the AtHistogram and
AtFloatHistogram methods. When the injected value is nil, iterators allocate
new histograms, otherwise they populate and return the injected object.
The commit also adds a CopyTo method to Histogram and FloatHistogram which
is used in the BufferedIterator to overwrite items in the ring instead of making
new copies.
Note that a specialized HPoint pool is needed for all of this to work
(`matrixSelectorHPool`).
---------
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
* doc: Mark `mad_over_time` as experimental (#13440)
We forgot to do that in
https://github.com/prometheus/prometheus/pull/13059
Signed-off-by: beorn7 <beorn@grafana.com>
* Change metric label for Puppetdb from 'http' to 'puppetdb'
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
* mirror metrics.proto change & generate code
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
* TestHeadLabelValuesWithMatchers: Add test case (#13414)
Add test case to TestHeadLabelValuesWithMatchers, while fixing a couple
of typos in other test cases. Also enclosing some implicit sub-tests in a
`t.Run` call to make them explicitly sub-tests.
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* update all go dependencies (#13438)
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
* build(deps): bump the k8s-io group with 2 updates (#13454)
Bumps the k8s-io group with 2 updates: [k8s.io/api](https://github.com/kubernetes/api) and [k8s.io/client-go](https://github.com/kubernetes/client-go).
Updates `k8s.io/api` from 0.28.4 to 0.29.1
- [Commits](https://github.com/kubernetes/api/compare/v0.28.4...v0.29.1)
Updates `k8s.io/client-go` from 0.28.4 to 0.29.1
- [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/kubernetes/client-go/compare/v0.28.4...v0.29.1)
---
updated-dependencies:
- dependency-name: k8s.io/api
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
- dependency-name: k8s.io/client-go
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump the go-opentelemetry-io group with 1 update (#13453)
Bumps the go-opentelemetry-io group with 1 update: [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector).
Updates `go.opentelemetry.io/collector/semconv` from 0.92.0 to 0.93.0
- [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.92.0...v0.93.0)
---
updated-dependencies:
- dependency-name: go.opentelemetry.io/collector/semconv
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: go-opentelemetry-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump actions/upload-artifact from 3.1.3 to 4.0.0 (#13355)
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3.1.3 to 4.0.0.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/a8a3f3ad30e3422c9c7b888a15615d19a852ae32...c7d193f32edcb7bfad88892161225aeda64e9392)
---
updated-dependencies:
- dependency-name: actions/upload-artifact
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump bufbuild/buf-push-action (#13357)
Bumps [bufbuild/buf-push-action](https://github.com/bufbuild/buf-push-action) from 342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 to a654ff18effe4641ebea4a4ce242c49800728459.
- [Release notes](https://github.com/bufbuild/buf-push-action/releases)
- [Commits](https://github.com/bufbuild/buf-push-action/compare/342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1...a654ff18effe4641ebea4a4ce242c49800728459)
---
updated-dependencies:
- dependency-name: bufbuild/buf-push-action
dependency-type: direct:production
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* Labels: Add DropMetricName function, used in PromQL (#13446)
This function is called very frequently when executing PromQL functions,
and we can do it much more efficiently inside Labels.
In the common case that `__name__` comes first in the labels, we simply
re-point to start at the next label, which is nearly free.
`DropMetricName` is now so cheap I removed the cache - benchmarks show
everything still goes faster.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: simplify internal series delete function (#13261)
Lifting an optimisation from Agent code, `seriesHashmap.del` can use
the unique series reference, doesn't need to check Labels.
Also streamline the logic for deleting from `unique` and `conflicts` maps,
and add some comments to help the next person.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* otlptranslator/update-copy.sh: Fix sed command lines
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Rollback k8s.io requirements (#13462)
Rollback k8s.io Go modules to v0.28.6 to avoid forcing upgrade of Go to
1.21. This allows us to keep compatibility with the currently supported
upstream Go releases.
Signed-off-by: SuperQ <superq@gmail.com>
* Make update-copy.sh work for both OSX and GNU sed
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Name @beorn7 and @krajorama as maintainers for native histograms
I have been the de-facto maintainer for native histograms from the
beginning. So let's put this into MAINTAINERS.md.
In addition, I hereby proposose George Krajcsovits AKA Krajo as a
co-maintainer. He has contributed a lot of native histogram code, but
more importantly, he has contributed substantially to reviewing other
contributors' native histogram code, up to a point where I was merely
rubberstamping the PRs he had already reviewed. I'm confident that he
is ready to to be granted commit rights as outlined in the
"Maintainers" section of the governance:
https://prometheus.io/governance/#maintainers
According to the same section of the governance, I will announce the
proposed change on the developers mailing list and will give some time
for lazy consensus before merging this PR.
Signed-off-by: beorn7 <beorn@grafana.com>
* ui/fix: correct url handling for stacked graphs (#13460)
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
* tsdb: use cheaper Mutex on series
Mutex is 8 bytes; RWMutex is 24 bytes and much more complicated. Since
`RLock` is only used in two places, `UpdateMetadata` and `Delete`,
neither of which are hotspots, we should use the cheaper one.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Fix last_over_time for native histograms
The last_over_time retains a histogram sample without making a copy.
This sample is now coming from the buffered iterator used for windowing functions,
and can be reused for reading subsequent samples as the iterator progresses.
I would propose copying the sample in the last_over_time function, similar to
how it is done for rate, sum_over_time and others.
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
* Implementation
NOTE:
Rebased from main after refactor in #13014
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Add feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactor concurrency control
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Optimising dependencies/dependents funcs to not produce new slices each request
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Rename flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring for performance, and to allow controller to be overridden
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Block until all rules, both sync & async, have completed evaluating
Updated & added tests
Review feedback nits
Return empty map if not indeterminate
Use highWatermark to track inflight requests counter
Appease the linter
Clarify feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Fix typo in CLI flag description
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Fixed auto-generated doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improve doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Simplify the design to update concurrency controller once the rule evaluation has done
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Add more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Added more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improved RuleConcurrencyController interface doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Introduced sequentialRuleEvalController
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Remove superfluous nil check in Group.metrics
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* api: Serialize discovered and target labels into JSON directly (#13469)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* api: Serialize discovered labels into JSON directly in dropped targets (#13484)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* Add ShardedPostings() support to TSDB (#10421)
This PR is a reference implementation of the proposal described in #10420.
In addition to what described in #10420, in this PR I've introduced labels.StableHash(). The idea is to offer an hashing function which doesn't change over time, and that's used by query sharding in order to get a stable behaviour over time. The implementation of labels.StableHash() is the hashing function used by Prometheus before stringlabels, and what's used by Grafana Mimir for query sharding (because built before stringlabels was a thing).
Follow up work
As mentioned in #10420, if this PR is accepted I'm also open to upload another foundamental piece used by Grafana Mimir query sharding to accelerate the query execution: an optional, configurable and fast in-memory cache for the series hashes.
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* storage/remote: document why two benchmarks are skipped
One was silently doing nothing; one was doing something but the work
didn't go up linearly with iteration count.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Pod status changes not discovered by Kube Endpoints SD (#13337)
* fix(discovery/kubernetes/endpoints): react to changes on Pods because some modifications can occur on them without triggering an update on the related Endpoints (The Pod phase changing from Pending to Running e.g.).
---------
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
* Small improvements, add const, remove copypasta (#8106)
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
* Proposal to improve FPointSlice and HPointSlice allocation. (#13448)
* Reusing points slice from previous series when the slice is under utilized
* Adding comments on the bench test
Signed-off-by: Alan Protasio <alanprot@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* go mod tidy
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
---------
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
Signed-off-by: Erik Sommer <ersotech@posteo.de>
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Signed-off-by: Marco Pracucci <marco@pracucci.com>
Signed-off-by: tyltr <tylitianrui@126.com>
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Signed-off-by: Ivan Babrou <github@ivan.computer>
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Signed-off-by: Goutham <gouthamve@gmail.com>
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
Signed-off-by: Ben Ye <benye@amazon.com>
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Signed-off-by: SuperQ <superq@gmail.com>
Signed-off-by: Ben Kochie <superq@gmail.com>
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Signed-off-by: beorn7 <beorn@grafana.com>
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
Signed-off-by: Alan Protasio <alanprot@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
Co-authored-by: Julian Wiedmann <jwi@linux.ibm.com>
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
Co-authored-by: Erik Sommer <ersotech@posteo.de>
Co-authored-by: Linas Medziunas <linas.medziunas@gmail.com>
Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
Co-authored-by: Arianna Vespri <arianna.vespri@yahoo.it>
Co-authored-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: daniel-resdiary <109083091+daniel-resdiary@users.noreply.github.com>
Co-authored-by: Daniel Kerbel <nmdanny@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jan Fajerski <jfajersk@redhat.com>
Co-authored-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Marc Tudurí <marctc@protonmail.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Co-authored-by: Augustin Husson <husson.augustin@gmail.com>
Co-authored-by: Björn Rabenstein <beorn@grafana.com>
Co-authored-by: zenador <zenador@users.noreply.github.com>
Co-authored-by: gotjosh <josue.abreu@gmail.com>
Co-authored-by: Ben Kochie <superq@gmail.com>
Co-authored-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Co-authored-by: Marco Pracucci <marco@pracucci.com>
Co-authored-by: tyltr <tylitianrui@126.com>
Co-authored-by: Ted Robertson <10043369+tredondo@users.noreply.github.com>
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
Co-authored-by: Matthias Loibl <mail@matthiasloibl.com>
Co-authored-by: Ivan Babrou <github@ivan.computer>
Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com>
Co-authored-by: Israel Blancas <iblancasa@gmail.com>
Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Goutham <gouthamve@gmail.com>
Co-authored-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Co-authored-by: Chris Marchbanks <csmarchbanks@gmail.com>
Co-authored-by: Ben Ye <benye@amazon.com>
Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Co-authored-by: Paulin Todev <paulin.todev@gmail.com>
Co-authored-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: Yury Molodov <yurymolodov@gmail.com>
Co-authored-by: Danny Kopping <danny.kopping@grafana.com>
Co-authored-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
Co-authored-by: Mikhail Fesenko <proggga@gmail.com>
Co-authored-by: Alan Protasio <alanprot@gmail.com>
* remote write 2.0 - follow up improvements (#13478)
* move remote write proto version config from a remote storage config to a
per remote write configuration option
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* rename scrape config for metadata, fix 2.0 header var name/value (was
1.1), and more clean up
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* address review comments, mostly lint fixes
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* another lint fix
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* lint imports
Signed-off-by: Callum Styan <callumstyan@gmail.com>
---------
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* go mod tidy
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Added commmentary to RW 2.0 protocol for easier adoption and explicit semantics. (#13502)
* Added commmentary to RW 2.0 protocol for easier adoption and explicit semantics.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Apply suggestions from code review
Co-authored-by: Nico Pazos <32206519+npazosmendez@users.noreply.github.com>
Signed-off-by: Callum Styan <callumstyan@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Co-authored-by: Callum Styan <callumstyan@gmail.com>
Co-authored-by: Nico Pazos <32206519+npazosmendez@users.noreply.github.com>
* prw2.0: Added support for "custom" layouts for native histogram proto (#13558)
* prw2.0: Added support for "custom" layouts for native histogram.
Result of the discussions:
* https://github.com/prometheus/prometheus/issues/13475#issuecomment-1931496924
* https://cloud-native.slack.com/archives/C02KR205UMU/p1707301006347199
Signed-off-by: bwplotka <bwplotka@gmail.com>
* prw2.0: Added support for "custom" layouts for native histogram.
Result of the discussions:
* https://github.com/prometheus/prometheus/issues/13475#issuecomment-1931496924
* https://cloud-native.slack.com/archives/C02KR205UMU/p1707301006347199
Signed-off-by: bwplotka <bwplotka@gmail.com>
# Conflicts:
# prompb/write/v2/types.pb.go
* Update prompb/write/v2/types.proto
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
* Addressed comments, fixed test.
Signed-off-by: bwplotka <bwplotka@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
* first draft of content negotiation
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* Lint
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* Fix race in test
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* Fix another test race
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* Almost done with lint
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* Fix todos around 405 HEAD handling
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* Changes based on review comments
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* Update storage/remote/client.go
Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* Latest updates to review comments
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* latest tweaks
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* remote write 2.0 - content negotiation remediation (#13921)
* Consolidate renegotiation error into one, fix tests
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* fix metric name and actuall increment counter
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
---------
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* Fixes after main sync.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* [PRW 2.0] Moved rw2 proto to the full path (both package name and placement) (#13973)
undefined
* [PRW2.0] Remove benchmark scripts (#13949)
See rationales on https://docs.google.com/document/d/1Bpf7mYjrHUhPHkie0qlnZFxzgqf_L32kM8ZOknSdJrU/edit
Signed-off-by: bwplotka <bwplotka@gmail.com>
* rw20: Update prw commentary after Callum spec review (#14136)
* rw20: Update prw commentary after Callum spec review
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
* Update types.proto
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
---------
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
* [PRW 2.0] Updated spec proto (2.0-rc.1); deterministic v1 interop; to be sympathetic with implementation. (#14330)
* [PRW 2.0] Updated spec proto (2.0-rc.1); deterministic v1 interop; to be sympathetic with implementation.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* update custom marshalling
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Removed confusing comments.
Signed-off-by: bwplotka <bwplotka@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
* [PRW-2.0] (chain1) New Remote Write 2.0 Config options for 2.0-rc.1 spec. (#14335)
NOTE: For simple review this change does not touch remote/ packages, only main and configs.
Spec: https://prometheus.io/docs/specs/remote_write_spec_2_0
Supersedes https://github.com/prometheus/prometheus/pull/13968
Signed-off-by: bwplotka <bwplotka@gmail.com>
* [PRW-2.0] (part 2) Removed automatic negotiation, updates for the latest spec semantics in remote pkg (#14329)
* [PRW-2.0] (part2) Moved to latest basic negotiation & spec semantics.
Spec: https://github.com/prometheus/docs/pull/2462
Supersedes https://github.com/prometheus/prometheus/pull/13968
Signed-off-by: bwplotka <bwplotka@gmail.com>
# Conflicts:
# config/config.go
# docs/configuration/configuration.md
# storage/remote/queue_manager_test.go
# storage/remote/write.go
# web/api/v1/api.go
* Addressed comments.
Signed-off-by: bwplotka <bwplotka@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* storage/remote tests: refactor: extract function newTestQueueManager
To reduce repetition.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* use newTestQueueManager for test
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* go mod tidy
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* [PRW 2.0] (part3) moved type specific conversions to prompb and writev2 codecs.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Added test for rwProtoMsgFlagParser; fixed TODO comment.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Renamed DecodeV2WriteRequestStr to DecodeWriteV2Request (with tests).
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Addressed comments on remote_storage example, updated it for 2.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Fixed `--enable-feature=metadata-wal-records` docs and error when using PRW 2.0 without it.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Addressed Callum comments on custom*.go
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Added TODO to genproto.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Addressed Callum comments in remote pkg.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Added metadata validation to write handler test; fixed ToMetadata.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Addressed rest of Callum comments.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Fixed writev2.FromMetadataType (was wrongly using prompb).
Signed-off-by: bwplotka <bwplotka@gmail.com>
* fix a few import whitespaces
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* add a default case with an error to the example RW receiver
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* more minor import whitespace chagnes
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* Apply suggestions from code review
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
* Update storage/remote/queue_manager_test.go
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
---------
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com>
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
Signed-off-by: Erik Sommer <ersotech@posteo.de>
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Signed-off-by: Marco Pracucci <marco@pracucci.com>
Signed-off-by: tyltr <tylitianrui@126.com>
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Signed-off-by: Ivan Babrou <github@ivan.computer>
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Signed-off-by: Goutham <gouthamve@gmail.com>
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
Signed-off-by: Ben Ye <benye@amazon.com>
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Signed-off-by: SuperQ <superq@gmail.com>
Signed-off-by: Ben Kochie <superq@gmail.com>
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Signed-off-by: beorn7 <beorn@grafana.com>
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
Signed-off-by: Alan Protasio <alanprot@gmail.com>
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
Co-authored-by: Nicolás Pazos <32206519+npazosmendez@users.noreply.github.com>
Co-authored-by: Callum Styan <callumstyan@gmail.com>
Co-authored-by: Nicolás Pazos <npazosmendez@gmail.com>
Co-authored-by: alexgreenbank <alex.greenbank@grafana.com>
Co-authored-by: Marco Pracucci <marco@pracucci.com>
Co-authored-by: Paschalis Tsilias <paschalist0@gmail.com>
Co-authored-by: Julian Wiedmann <jwi@linux.ibm.com>
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
Co-authored-by: Erik Sommer <ersotech@posteo.de>
Co-authored-by: Linas Medziunas <linas.medziunas@gmail.com>
Co-authored-by: Arianna Vespri <arianna.vespri@yahoo.it>
Co-authored-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: daniel-resdiary <109083091+daniel-resdiary@users.noreply.github.com>
Co-authored-by: Daniel Kerbel <nmdanny@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jan Fajerski <jfajersk@redhat.com>
Co-authored-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Marc Tudurí <marctc@protonmail.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Co-authored-by: Augustin Husson <husson.augustin@gmail.com>
Co-authored-by: Björn Rabenstein <beorn@grafana.com>
Co-authored-by: zenador <zenador@users.noreply.github.com>
Co-authored-by: gotjosh <josue.abreu@gmail.com>
Co-authored-by: Ben Kochie <superq@gmail.com>
Co-authored-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Co-authored-by: tyltr <tylitianrui@126.com>
Co-authored-by: Ted Robertson <10043369+tredondo@users.noreply.github.com>
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
Co-authored-by: Matthias Loibl <mail@matthiasloibl.com>
Co-authored-by: Ivan Babrou <github@ivan.computer>
Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com>
Co-authored-by: Israel Blancas <iblancasa@gmail.com>
Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Goutham <gouthamve@gmail.com>
Co-authored-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Co-authored-by: Chris Marchbanks <csmarchbanks@gmail.com>
Co-authored-by: Ben Ye <benye@amazon.com>
Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Co-authored-by: Paulin Todev <paulin.todev@gmail.com>
Co-authored-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: Yury Molodov <yurymolodov@gmail.com>
Co-authored-by: Danny Kopping <danny.kopping@grafana.com>
Co-authored-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
Co-authored-by: Mikhail Fesenko <proggga@gmail.com>
Co-authored-by: Alan Protasio <alanprot@gmail.com>
2024-07-04 14:29:20 -07:00
} , dbDir , 1 * time . Second , nil , false )
2018-06-16 10:26:37 -07:00
err = remote . ApplyConfig ( & config . Config {
RemoteReadConfigs : [ ] * config . RemoteReadConfig {
{
URL : & config_util . URL { URL : u } ,
RemoteTimeout : model . Duration ( 1 * time . Second ) ,
ReadRecent : true ,
} ,
} ,
} )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2018-06-16 10:26:37 -07:00
2024-05-07 09:14:22 -07:00
algr := rulesRetrieverMock { testing : t }
2018-03-25 09:50:34 -07:00
2023-10-17 19:02:03 -07:00
algr . CreateAlertingRules ( )
algr . CreateRuleGroups ( )
2018-03-25 09:50:34 -07:00
2023-10-17 19:02:03 -07:00
g := algr . RuleGroups ( )
g [ 0 ] . Eval ( context . Background ( ) , time . Now ( ) )
2018-03-25 09:50:34 -07:00
2019-12-04 11:33:01 -08:00
testTargetRetriever := setupTestTargetRetriever ( t )
2019-12-04 03:08:21 -08:00
2018-06-16 10:26:37 -07:00
api := & API {
Queryable : remote ,
2024-05-07 09:14:22 -07:00
QueryEngine : ng ,
2023-08-18 11:48:59 -07:00
ExemplarQueryable : storage . ExemplarQueryable ( ) ,
2020-04-16 01:30:47 -07:00
targetRetriever : testTargetRetriever . toFactory ( ) ,
2020-05-18 11:02:32 -07:00
alertmanagerRetriever : testAlertmanagerRetriever { } . toFactory ( ) ,
2018-11-19 02:21:14 -08:00
flagsMap : sampleFlagMap ,
2018-10-16 00:41:45 -07:00
now : func ( ) time . Time { return now } ,
config : func ( ) config . Config { return samplePrometheusCfg } ,
ready : func ( f http . HandlerFunc ) http . HandlerFunc { return f } ,
2020-05-18 11:02:32 -07:00
rulesRetriever : algr . toFactory ( ) ,
2018-06-16 10:26:37 -07:00
}
2023-08-18 11:48:59 -07:00
testEndpoints ( t , api , testTargetRetriever , storage , false )
2018-06-16 10:26:37 -07:00
} )
2018-11-19 02:21:14 -08:00
}
2022-10-20 02:17:00 -07:00
type byLabels [ ] labels . Labels
func ( b byLabels ) Len ( ) int { return len ( b ) }
func ( b byLabels ) Swap ( i , j int ) { b [ i ] , b [ j ] = b [ j ] , b [ i ] }
func ( b byLabels ) Less ( i , j int ) bool { return labels . Compare ( b [ i ] , b [ j ] ) < 0 }
func TestGetSeries ( t * testing . T ) {
2018-11-19 02:21:14 -08:00
// TestEndpoints doesn't have enough label names to test api.labelNames
// endpoint properly. Hence we test it separately.
2024-04-29 02:48:24 -07:00
storage := promqltest . LoadedStorage ( t , `
2018-11-19 02:21:14 -08:00
load 1 m
test_metric1 { foo1 = "bar" , baz = "abc" } 0 + 100 x100
test_metric1 { foo2 = "boo" } 1 + 0x100
test_metric2 { foo = "boo" } 1 + 0x100
test_metric2 { foo = "boo" , xyz = "qwerty" } 1 + 0x100
2021-07-20 05:38:08 -07:00
test_metric2 { foo = "baz" , abc = "qwerty" } 1 + 0x100
2018-11-19 02:21:14 -08:00
` )
2023-08-18 11:48:59 -07:00
t . Cleanup ( func ( ) { storage . Close ( ) } )
2022-10-20 02:17:00 -07:00
api := & API {
2023-08-18 11:48:59 -07:00
Queryable : storage ,
2022-10-20 02:17:00 -07:00
}
request := func ( method string , matchers ... string ) ( * http . Request , error ) {
u , err := url . Parse ( "http://example.com" )
require . NoError ( t , err )
q := u . Query ( )
for _ , matcher := range matchers {
q . Add ( "match[]" , matcher )
}
u . RawQuery = q . Encode ( )
r , err := http . NewRequest ( method , u . String ( ) , nil )
if method == http . MethodPost {
r . Header . Set ( "Content-Type" , "application/x-www-form-urlencoded" )
}
return r , err
}
for _ , tc := range [ ] struct {
name string
api * API
matchers [ ] string
expected [ ] labels . Labels
expectedErrorType errorType
} {
{
name : "no matchers" ,
expectedErrorType : errorBadData ,
api : api ,
} ,
{
name : "non empty label matcher" ,
matchers : [ ] string { ` { foo=~".+"} ` } ,
expected : [ ] labels . Labels {
2022-02-27 06:19:21 -08:00
labels . FromStrings ( "__name__" , "test_metric2" , "abc" , "qwerty" , "foo" , "baz" ) ,
labels . FromStrings ( "__name__" , "test_metric2" , "foo" , "boo" ) ,
labels . FromStrings ( "__name__" , "test_metric2" , "foo" , "boo" , "xyz" , "qwerty" ) ,
2022-10-20 02:17:00 -07:00
} ,
api : api ,
} ,
{
name : "exact label matcher" ,
matchers : [ ] string { ` { foo="boo"} ` } ,
expected : [ ] labels . Labels {
2022-02-27 06:19:21 -08:00
labels . FromStrings ( "__name__" , "test_metric2" , "foo" , "boo" ) ,
labels . FromStrings ( "__name__" , "test_metric2" , "foo" , "boo" , "xyz" , "qwerty" ) ,
2022-10-20 02:17:00 -07:00
} ,
api : api ,
} ,
{
name : "two matchers" ,
matchers : [ ] string { ` { foo="boo"} ` , ` { foo="baz"} ` } ,
expected : [ ] labels . Labels {
2022-02-27 06:19:21 -08:00
labels . FromStrings ( "__name__" , "test_metric2" , "abc" , "qwerty" , "foo" , "baz" ) ,
labels . FromStrings ( "__name__" , "test_metric2" , "foo" , "boo" ) ,
labels . FromStrings ( "__name__" , "test_metric2" , "foo" , "boo" , "xyz" , "qwerty" ) ,
2022-10-20 02:17:00 -07:00
} ,
api : api ,
} ,
{
name : "exec error type" ,
matchers : [ ] string { ` { foo="boo"} ` , ` { foo="baz"} ` } ,
expectedErrorType : errorExec ,
api : & API {
Queryable : errorTestQueryable { err : fmt . Errorf ( "generic" ) } ,
} ,
} ,
{
name : "storage error type" ,
matchers : [ ] string { ` { foo="boo"} ` , ` { foo="baz"} ` } ,
expectedErrorType : errorInternal ,
api : & API {
Queryable : errorTestQueryable { err : promql . ErrStorage { Err : fmt . Errorf ( "generic" ) } } ,
} ,
} ,
} {
t . Run ( tc . name , func ( t * testing . T ) {
ctx := context . Background ( )
req , err := request ( http . MethodGet , tc . matchers ... )
require . NoError ( t , err )
res := tc . api . series ( req . WithContext ( ctx ) )
assertAPIError ( t , res . err , tc . expectedErrorType )
if tc . expectedErrorType == errorNone {
r := res . data . ( [ ] labels . Labels )
sort . Sort ( byLabels ( tc . expected ) )
sort . Sort ( byLabels ( r ) )
2023-04-16 05:13:31 -07:00
testutil . RequireEqual ( t , tc . expected , r )
2022-10-20 02:17:00 -07:00
}
} )
}
}
func TestQueryExemplars ( t * testing . T ) {
start := time . Unix ( 0 , 0 )
2024-04-29 02:48:24 -07:00
storage := promqltest . LoadedStorage ( t , `
2022-10-20 02:17:00 -07:00
load 1 m
test_metric1 { foo = "bar" } 0 + 100 x100
test_metric1 { foo = "boo" } 1 + 0x100
test_metric2 { foo = "boo" } 1 + 0x100
test_metric3 { foo = "bar" , dup = "1" } 1 + 0x100
test_metric3 { foo = "boo" , dup = "1" } 1 + 0x100
test_metric4 { foo = "bar" , dup = "1" } 1 + 0x100
test_metric4 { foo = "boo" , dup = "1" } 1 + 0x100
test_metric4 { foo = "boo" } 1 + 0x100
` )
2023-08-18 11:48:59 -07:00
t . Cleanup ( func ( ) { storage . Close ( ) } )
2022-10-20 02:17:00 -07:00
api := & API {
2023-08-18 11:48:59 -07:00
Queryable : storage ,
2024-05-07 09:14:22 -07:00
QueryEngine : testEngine ( t ) ,
2023-08-18 11:48:59 -07:00
ExemplarQueryable : storage . ExemplarQueryable ( ) ,
2022-10-20 02:17:00 -07:00
}
request := func ( method string , qs url . Values ) ( * http . Request , error ) {
u , err := url . Parse ( "http://example.com" )
require . NoError ( t , err )
u . RawQuery = qs . Encode ( )
r , err := http . NewRequest ( method , u . String ( ) , nil )
if method == http . MethodPost {
r . Header . Set ( "Content-Type" , "application/x-www-form-urlencoded" )
}
return r , err
}
for _ , tc := range [ ] struct {
name string
query url . Values
exemplars [ ] exemplar . QueryResult
api * API
expectedErrorType errorType
} {
{
name : "no error" ,
api : api ,
query : url . Values {
"query" : [ ] string { ` test_metric3 { foo="boo"} - test_metric4 { foo="bar"} ` } ,
"start" : [ ] string { "0" } ,
"end" : [ ] string { "4" } ,
} ,
exemplars : [ ] exemplar . QueryResult {
{
SeriesLabels : labels . FromStrings ( "__name__" , "test_metric3" , "foo" , "boo" , "dup" , "1" ) ,
Exemplars : [ ] exemplar . Exemplar {
{
Labels : labels . FromStrings ( "id" , "abc" ) ,
Value : 10 ,
Ts : timestamp . FromTime ( start . Add ( 0 * time . Second ) ) ,
} ,
} ,
} ,
{
SeriesLabels : labels . FromStrings ( "__name__" , "test_metric4" , "foo" , "bar" , "dup" , "1" ) ,
Exemplars : [ ] exemplar . Exemplar {
{
Labels : labels . FromStrings ( "id" , "lul" ) ,
Value : 10 ,
Ts : timestamp . FromTime ( start . Add ( 3 * time . Second ) ) ,
} ,
} ,
} ,
} ,
} ,
{
name : "should return errorExec upon genetic error" ,
expectedErrorType : errorExec ,
api : & API {
ExemplarQueryable : errorTestQueryable { err : fmt . Errorf ( "generic" ) } ,
} ,
query : url . Values {
"query" : [ ] string { ` test_metric3 { foo="boo"} - test_metric4 { foo="bar"} ` } ,
"start" : [ ] string { "0" } ,
"end" : [ ] string { "4" } ,
} ,
} ,
{
name : "should return errorInternal err type is ErrStorage" ,
expectedErrorType : errorInternal ,
api : & API {
ExemplarQueryable : errorTestQueryable { err : promql . ErrStorage { Err : fmt . Errorf ( "generic" ) } } ,
} ,
query : url . Values {
"query" : [ ] string { ` test_metric3 { foo="boo"} - test_metric4 { foo="bar"} ` } ,
"start" : [ ] string { "0" } ,
"end" : [ ] string { "4" } ,
} ,
} ,
} {
t . Run ( tc . name , func ( t * testing . T ) {
2023-08-18 11:48:59 -07:00
es := storage
2022-10-20 02:17:00 -07:00
ctx := context . Background ( )
for _ , te := range tc . exemplars {
for _ , e := range te . Exemplars {
_ , err := es . AppendExemplar ( 0 , te . SeriesLabels , e )
2021-09-03 02:51:27 -07:00
require . NoError ( t , err )
2022-10-20 02:17:00 -07:00
}
}
2018-11-19 02:21:14 -08:00
2022-10-20 02:17:00 -07:00
req , err := request ( http . MethodGet , tc . query )
require . NoError ( t , err )
res := tc . api . queryExemplars ( req . WithContext ( ctx ) )
assertAPIError ( t , res . err , tc . expectedErrorType )
if tc . expectedErrorType == errorNone {
assertAPIResponse ( t , res . data , tc . exemplars )
}
} )
}
}
func TestLabelNames ( t * testing . T ) {
// TestEndpoints doesn't have enough label names to test api.labelNames
// endpoint properly. Hence we test it separately.
2024-04-29 02:48:24 -07:00
storage := promqltest . LoadedStorage ( t , `
2022-10-20 02:17:00 -07:00
load 1 m
test_metric1 { foo1 = "bar" , baz = "abc" } 0 + 100 x100
test_metric1 { foo2 = "boo" } 1 + 0x100
test_metric2 { foo = "boo" } 1 + 0x100
test_metric2 { foo = "boo" , xyz = "qwerty" } 1 + 0x100
test_metric2 { foo = "baz" , abc = "qwerty" } 1 + 0x100
` )
2023-08-18 11:48:59 -07:00
t . Cleanup ( func ( ) { storage . Close ( ) } )
2018-11-19 02:21:14 -08:00
api := & API {
2023-08-18 11:48:59 -07:00
Queryable : storage ,
2018-11-19 02:21:14 -08:00
}
2024-05-15 11:39:54 -07:00
request := func ( method , limit string , matchers ... string ) ( * http . Request , error ) {
2021-07-20 05:38:08 -07:00
u , err := url . Parse ( "http://example.com" )
require . NoError ( t , err )
q := u . Query ( )
for _ , matcher := range matchers {
q . Add ( "match[]" , matcher )
}
2024-05-15 11:39:54 -07:00
if limit != "" {
q . Add ( "limit" , limit )
}
2021-07-20 05:38:08 -07:00
u . RawQuery = q . Encode ( )
r , err := http . NewRequest ( method , u . String ( ) , nil )
if method == http . MethodPost {
2018-11-19 02:21:14 -08:00
r . Header . Set ( "Content-Type" , "application/x-www-form-urlencoded" )
}
2021-07-20 05:38:08 -07:00
return r , err
2018-11-19 02:21:14 -08:00
}
2021-07-20 05:38:08 -07:00
for _ , tc := range [ ] struct {
2022-10-20 02:17:00 -07:00
name string
api * API
matchers [ ] string
2024-05-15 11:39:54 -07:00
limit string
2022-10-20 02:17:00 -07:00
expected [ ] string
expectedErrorType errorType
2021-07-20 05:38:08 -07:00
} {
{
name : "no matchers" ,
expected : [ ] string { "__name__" , "abc" , "baz" , "foo" , "foo1" , "foo2" , "xyz" } ,
2022-10-20 02:17:00 -07:00
api : api ,
2021-07-20 05:38:08 -07:00
} ,
{
name : "non empty label matcher" ,
matchers : [ ] string { ` { foo=~".+"} ` } ,
expected : [ ] string { "__name__" , "abc" , "foo" , "xyz" } ,
2022-10-20 02:17:00 -07:00
api : api ,
2021-07-20 05:38:08 -07:00
} ,
2024-05-15 11:39:54 -07:00
{
name : "non empty label matcher with limit" ,
matchers : [ ] string { ` { foo=~".+"} ` } ,
expected : [ ] string { "__name__" , "abc" } ,
limit : "2" ,
api : api ,
} ,
2021-07-20 05:38:08 -07:00
{
name : "exact label matcher" ,
matchers : [ ] string { ` { foo="boo"} ` } ,
expected : [ ] string { "__name__" , "foo" , "xyz" } ,
2022-10-20 02:17:00 -07:00
api : api ,
2021-07-20 05:38:08 -07:00
} ,
{
name : "two matchers" ,
matchers : [ ] string { ` { foo="boo"} ` , ` { foo="baz"} ` } ,
expected : [ ] string { "__name__" , "abc" , "foo" , "xyz" } ,
2022-10-20 02:17:00 -07:00
api : api ,
} ,
{
name : "exec error type" ,
matchers : [ ] string { ` { foo="boo"} ` , ` { foo="baz"} ` } ,
expectedErrorType : errorExec ,
api : & API {
Queryable : errorTestQueryable { err : fmt . Errorf ( "generic" ) } ,
} ,
} ,
{
name : "storage error type" ,
matchers : [ ] string { ` { foo="boo"} ` , ` { foo="baz"} ` } ,
expectedErrorType : errorInternal ,
api : & API {
Queryable : errorTestQueryable { err : promql . ErrStorage { Err : fmt . Errorf ( "generic" ) } } ,
} ,
2021-07-20 05:38:08 -07:00
} ,
} {
t . Run ( tc . name , func ( t * testing . T ) {
for _ , method := range [ ] string { http . MethodGet , http . MethodPost } {
ctx := context . Background ( )
2024-05-15 11:39:54 -07:00
req , err := request ( method , tc . limit , tc . matchers ... )
2021-07-20 05:38:08 -07:00
require . NoError ( t , err )
2022-10-20 02:17:00 -07:00
res := tc . api . labelNames ( req . WithContext ( ctx ) )
assertAPIError ( t , res . err , tc . expectedErrorType )
if tc . expectedErrorType == errorNone {
assertAPIResponse ( t , res . data , tc . expected )
}
2021-07-20 05:38:08 -07:00
}
} )
2018-11-19 02:21:14 -08:00
}
2018-06-16 10:26:37 -07:00
}
2022-02-10 06:17:05 -08:00
type testStats struct {
Custom string ` json:"custom" `
}
func ( testStats ) Builtin ( ) ( _ stats . BuiltinStats ) {
return
}
2022-02-01 18:07:23 -08:00
func TestStats ( t * testing . T ) {
2023-08-18 11:48:59 -07:00
storage := teststorage . New ( t )
t . Cleanup ( func ( ) { storage . Close ( ) } )
2022-02-01 18:07:23 -08:00
api := & API {
2023-08-18 11:48:59 -07:00
Queryable : storage ,
2024-05-07 09:14:22 -07:00
QueryEngine : testEngine ( t ) ,
2022-02-01 18:07:23 -08:00
now : func ( ) time . Time {
return time . Unix ( 123 , 0 )
} ,
}
2022-02-10 06:17:05 -08:00
request := func ( method , param string ) ( * http . Request , error ) {
2022-02-01 18:07:23 -08:00
u , err := url . Parse ( "http://example.com" )
require . NoError ( t , err )
q := u . Query ( )
q . Add ( "stats" , param )
q . Add ( "query" , "up" )
q . Add ( "start" , "0" )
q . Add ( "end" , "100" )
q . Add ( "step" , "10" )
u . RawQuery = q . Encode ( )
r , err := http . NewRequest ( method , u . String ( ) , nil )
if method == http . MethodPost {
r . Header . Set ( "Content-Type" , "application/x-www-form-urlencoded" )
}
return r , err
}
for _ , tc := range [ ] struct {
name string
2022-02-10 06:17:05 -08:00
renderer StatsRenderer
2022-02-01 18:07:23 -08:00
param string
expected func ( * testing . T , interface { } )
} {
{
name : "stats is blank" ,
param : "" ,
expected : func ( t * testing . T , i interface { } ) {
2023-12-07 03:35:01 -08:00
require . IsType ( t , & QueryData { } , i )
2023-02-01 20:29:13 -08:00
qd := i . ( * QueryData )
2022-02-01 18:07:23 -08:00
require . Nil ( t , qd . Stats )
} ,
} ,
{
name : "stats is true" ,
param : "true" ,
expected : func ( t * testing . T , i interface { } ) {
2023-12-07 03:35:01 -08:00
require . IsType ( t , & QueryData { } , i )
2023-02-01 20:29:13 -08:00
qd := i . ( * QueryData )
2022-02-01 18:07:23 -08:00
require . NotNil ( t , qd . Stats )
2022-02-10 06:17:05 -08:00
qs := qd . Stats . Builtin ( )
2022-02-01 18:07:23 -08:00
require . NotNil ( t , qs . Timings )
require . Greater ( t , qs . Timings . EvalTotalTime , float64 ( 0 ) )
require . NotNil ( t , qs . Samples )
require . NotNil ( t , qs . Samples . TotalQueryableSamples )
require . Nil ( t , qs . Samples . TotalQueryableSamplesPerStep )
} ,
} ,
{
name : "stats is all" ,
param : "all" ,
expected : func ( t * testing . T , i interface { } ) {
2023-12-07 03:35:01 -08:00
require . IsType ( t , & QueryData { } , i )
2023-02-01 20:29:13 -08:00
qd := i . ( * QueryData )
2022-02-01 18:07:23 -08:00
require . NotNil ( t , qd . Stats )
2022-02-10 06:17:05 -08:00
qs := qd . Stats . Builtin ( )
2022-02-01 18:07:23 -08:00
require . NotNil ( t , qs . Timings )
require . Greater ( t , qs . Timings . EvalTotalTime , float64 ( 0 ) )
require . NotNil ( t , qs . Samples )
require . NotNil ( t , qs . Samples . TotalQueryableSamples )
require . NotNil ( t , qs . Samples . TotalQueryableSamplesPerStep )
} ,
} ,
2022-02-10 06:17:05 -08:00
{
name : "custom handler with known value" ,
renderer : func ( ctx context . Context , s * stats . Statistics , p string ) stats . QueryStats {
if p == "known" {
return testStats { "Custom Value" }
}
return nil
} ,
param : "known" ,
expected : func ( t * testing . T , i interface { } ) {
2023-12-07 03:35:01 -08:00
require . IsType ( t , & QueryData { } , i )
2023-02-01 20:29:13 -08:00
qd := i . ( * QueryData )
2022-02-10 06:17:05 -08:00
require . NotNil ( t , qd . Stats )
2024-04-28 12:02:18 -07:00
json := jsoniter . ConfigCompatibleWithStandardLibrary
2022-02-10 06:17:05 -08:00
j , err := json . Marshal ( qd . Stats )
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . JSONEq ( t , ` { "custom":"Custom Value"} ` , string ( j ) )
2022-02-10 06:17:05 -08:00
} ,
} ,
2022-02-01 18:07:23 -08:00
} {
t . Run ( tc . name , func ( t * testing . T ) {
2022-02-10 06:17:05 -08:00
before := api . statsRenderer
defer func ( ) { api . statsRenderer = before } ( )
api . statsRenderer = tc . renderer
2022-02-01 18:07:23 -08:00
for _ , method := range [ ] string { http . MethodGet , http . MethodPost } {
ctx := context . Background ( )
req , err := request ( method , tc . param )
require . NoError ( t , err )
res := api . query ( req . WithContext ( ctx ) )
assertAPIError ( t , res . err , "" )
tc . expected ( t , res . data )
res = api . queryRange ( req . WithContext ( ctx ) )
assertAPIError ( t , res . err , "" )
tc . expected ( t , res . data )
}
} )
}
}
2019-12-04 11:33:01 -08:00
func setupTestTargetRetriever ( t * testing . T ) * testTargetRetriever {
t . Helper ( )
targets := [ ] * testTargetParams {
{
Identifier : "test" ,
Labels : labels . FromMap ( map [ string ] string {
2021-08-31 08:37:32 -07:00
model . SchemeLabel : "http" ,
model . AddressLabel : "example.com:8080" ,
model . MetricsPathLabel : "/metrics" ,
model . JobLabel : "test" ,
model . ScrapeIntervalLabel : "15s" ,
model . ScrapeTimeoutLabel : "5s" ,
2019-12-04 11:33:01 -08:00
} ) ,
2022-02-27 06:19:21 -08:00
DiscoveredLabels : labels . EmptyLabels ( ) ,
2019-12-04 11:33:01 -08:00
Params : url . Values { } ,
Reports : [ ] * testReport { { scrapeStart , 70 * time . Millisecond , nil } } ,
Active : true ,
} ,
{
Identifier : "blackbox" ,
Labels : labels . FromMap ( map [ string ] string {
2021-08-31 08:37:32 -07:00
model . SchemeLabel : "http" ,
model . AddressLabel : "localhost:9115" ,
model . MetricsPathLabel : "/probe" ,
model . JobLabel : "blackbox" ,
model . ScrapeIntervalLabel : "20s" ,
model . ScrapeTimeoutLabel : "10s" ,
2019-12-04 11:33:01 -08:00
} ) ,
2022-02-27 06:19:21 -08:00
DiscoveredLabels : labels . EmptyLabels ( ) ,
2019-12-04 11:33:01 -08:00
Params : url . Values { "target" : [ ] string { "example.com" } } ,
Reports : [ ] * testReport { { scrapeStart , 100 * time . Millisecond , errors . New ( "failed" ) } } ,
Active : true ,
} ,
{
Identifier : "blackbox" ,
2022-02-27 06:19:21 -08:00
Labels : labels . EmptyLabels ( ) ,
2019-12-04 11:33:01 -08:00
DiscoveredLabels : labels . FromMap ( map [ string ] string {
2021-08-31 08:37:32 -07:00
model . SchemeLabel : "http" ,
model . AddressLabel : "http://dropped.example.com:9115" ,
model . MetricsPathLabel : "/probe" ,
model . JobLabel : "blackbox" ,
model . ScrapeIntervalLabel : "30s" ,
model . ScrapeTimeoutLabel : "15s" ,
2019-12-04 11:33:01 -08:00
} ) ,
Params : url . Values { } ,
Active : false ,
} ,
}
2019-12-10 06:56:16 -08:00
return newTestTargetRetriever ( targets )
2019-12-04 11:33:01 -08:00
}
2018-06-16 10:26:37 -07:00
func setupRemote ( s storage . Storage ) * httptest . Server {
handler := http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) {
req , err := remote . DecodeReadRequest ( r )
if err != nil {
http . Error ( w , err . Error ( ) , http . StatusBadRequest )
return
}
resp := prompb . ReadResponse {
Results : make ( [ ] * prompb . QueryResult , len ( req . Queries ) ) ,
}
for i , query := range req . Queries {
2019-08-19 13:16:10 -07:00
matchers , err := remote . FromLabelMatchers ( query . Matchers )
2018-06-16 10:26:37 -07:00
if err != nil {
http . Error ( w , err . Error ( ) , http . StatusBadRequest )
return
}
2020-03-12 02:36:09 -07:00
var hints * storage . SelectHints
2019-08-19 13:16:10 -07:00
if query . Hints != nil {
2020-03-12 02:36:09 -07:00
hints = & storage . SelectHints {
2019-08-19 13:16:10 -07:00
Start : query . Hints . StartMs ,
End : query . Hints . EndMs ,
Step : query . Hints . StepMs ,
Func : query . Hints . Func ,
}
}
2023-09-12 03:37:38 -07:00
querier , err := s . Querier ( query . StartTimestampMs , query . EndTimestampMs )
2018-06-16 10:26:37 -07:00
if err != nil {
http . Error ( w , err . Error ( ) , http . StatusInternalServerError )
return
}
defer querier . Close ( )
2023-09-12 03:37:38 -07:00
set := querier . Select ( r . Context ( ) , false , hints , matchers ... )
2020-06-09 09:57:31 -07:00
resp . Results [ i ] , _ , err = remote . ToQueryResult ( set , 1e6 )
2018-06-16 10:26:37 -07:00
if err != nil {
http . Error ( w , err . Error ( ) , http . StatusInternalServerError )
return
}
}
2024-08-27 23:23:54 -07:00
w . Header ( ) . Set ( "Content-Type" , "application/x-protobuf" )
w . Header ( ) . Set ( "Content-Encoding" , "snappy" )
2018-06-16 10:26:37 -07:00
if err := remote . EncodeReadResponse ( & resp , w ) ; err != nil {
http . Error ( w , err . Error ( ) , http . StatusInternalServerError )
return
}
} )
return httptest . NewServer ( handler )
}
2021-03-16 02:47:45 -07:00
func testEndpoints ( t * testing . T , api * API , tr * testTargetRetriever , es storage . ExemplarStorage , testLabelAPI bool ) {
2016-12-30 01:43:44 -08:00
start := time . Unix ( 0 , 0 )
2019-12-10 06:56:16 -08:00
type targetMetadata struct {
identifier string
metadata [ ] scrape . MetricMetadata
}
2018-06-16 10:26:37 -07:00
type test struct {
2023-06-12 08:17:20 -07:00
endpoint apiFunc
params map [ string ] string
query url . Values
response interface { }
2024-03-06 01:58:40 -08:00
responseLen int // If nonzero, check only the length; `response` is ignored.
2023-06-12 08:17:20 -07:00
responseMetadataTotal int
2024-02-26 01:53:39 -08:00
responseAsJSON string
2024-05-21 10:07:29 -07:00
warningsCount int
2023-06-12 08:17:20 -07:00
errType errorType
sorter func ( interface { } )
metadata [ ] targetMetadata
exemplars [ ] exemplar . QueryResult
2023-10-17 19:02:03 -07:00
zeroFunc func ( interface { } )
}
rulesZeroFunc := func ( i interface { } ) {
if i != nil {
v := i . ( * RuleDiscovery )
for _ , ruleGroup := range v . RuleGroups {
ruleGroup . EvaluationTime = float64 ( 0 )
ruleGroup . LastEvaluation = time . Time { }
for k , rule := range ruleGroup . Rules {
switch r := rule . ( type ) {
case AlertingRule :
r . LastEvaluation = time . Time { }
r . EvaluationTime = float64 ( 0 )
r . LastError = ""
r . Health = "ok"
for _ , alert := range r . Alerts {
alert . ActiveAt = nil
}
ruleGroup . Rules [ k ] = r
case RecordingRule :
r . LastEvaluation = time . Time { }
r . EvaluationTime = float64 ( 0 )
r . LastError = ""
r . Health = "ok"
ruleGroup . Rules [ k ] = r
}
}
}
}
2018-06-16 10:26:37 -07:00
}
2021-10-22 01:06:44 -07:00
tests := [ ] test {
2015-06-04 09:07:57 -07:00
{
endpoint : api . query ,
query : url . Values {
"query" : [ ] string { "2" } ,
2016-12-30 01:43:44 -08:00
"time" : [ ] string { "123.4" } ,
2015-06-04 09:07:57 -07:00
} ,
2023-02-01 20:29:13 -08:00
response : & QueryData {
2020-02-03 10:23:07 -08:00
ResultType : parser . ValueTypeScalar ,
2016-12-30 01:43:44 -08:00
Result : promql . Scalar {
V : 2 ,
T : timestamp . FromTime ( start . Add ( 123 * time . Second + 400 * time . Millisecond ) ) ,
2015-06-04 09:07:57 -07:00
} ,
} ,
} ,
{
endpoint : api . query ,
query : url . Values {
"query" : [ ] string { "0.333" } ,
"time" : [ ] string { "1970-01-01T00:02:03Z" } ,
} ,
2023-02-01 20:29:13 -08:00
response : & QueryData {
2020-02-03 10:23:07 -08:00
ResultType : parser . ValueTypeScalar ,
2016-12-30 01:43:44 -08:00
Result : promql . Scalar {
V : 0.333 ,
T : timestamp . FromTime ( start . Add ( 123 * time . Second ) ) ,
2015-06-04 09:07:57 -07:00
} ,
} ,
} ,
{
endpoint : api . query ,
query : url . Values {
"query" : [ ] string { "0.333" } ,
"time" : [ ] string { "1970-01-01T01:02:03+01:00" } ,
} ,
2023-02-01 20:29:13 -08:00
response : & QueryData {
2020-02-03 10:23:07 -08:00
ResultType : parser . ValueTypeScalar ,
2016-12-30 01:43:44 -08:00
Result : promql . Scalar {
V : 0.333 ,
T : timestamp . FromTime ( start . Add ( 123 * time . Second ) ) ,
2015-06-04 09:07:57 -07:00
} ,
} ,
} ,
2015-11-11 11:46:57 -08:00
{
endpoint : api . query ,
query : url . Values {
"query" : [ ] string { "0.333" } ,
} ,
2023-02-01 20:29:13 -08:00
response : & QueryData {
2020-02-03 10:23:07 -08:00
ResultType : parser . ValueTypeScalar ,
2016-12-30 01:43:44 -08:00
Result : promql . Scalar {
V : 0.333 ,
2018-06-16 10:26:37 -07:00
T : timestamp . FromTime ( api . now ( ) ) ,
2015-11-11 11:46:57 -08:00
} ,
} ,
} ,
2015-06-09 04:44:49 -07:00
{
endpoint : api . queryRange ,
query : url . Values {
"query" : [ ] string { "time()" } ,
"start" : [ ] string { "0" } ,
"end" : [ ] string { "2" } ,
"step" : [ ] string { "1" } ,
} ,
2023-02-01 20:29:13 -08:00
response : & QueryData {
2020-02-03 10:23:07 -08:00
ResultType : parser . ValueTypeMatrix ,
2016-12-30 01:43:44 -08:00
Result : promql . Matrix {
promql . Series {
promql: Separate `Point` into `FPoint` and `HPoint`
In other words: Instead of having a “polymorphous” `Point` that can
either contain a float value or a histogram value, use an `FPoint` for
floats and an `HPoint` for histograms.
This seemingly small change has a _lot_ of repercussions throughout
the codebase.
The idea here is to avoid the increase in size of `Point` arrays that
happened after native histograms had been added.
The higher-level data structures (`Sample`, `Series`, etc.) are still
“polymorphous”. The same idea could be applied to them, but at each
step the trade-offs needed to be evaluated.
The idea with this change is to do the minimum necessary to get back
to pre-histogram performance for functions that do not touch
histograms. Here are comparisons for the `changes` function. The test
data doesn't include histograms yet. Ideally, there would be no change
in the benchmark result at all.
First runtime v2.39 compared to directly prior to this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10)
```
And then runtime v2.39 compared to after this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9)
```
In summary, the runtime doesn't really improve with this change for
queries with just a few steps. For queries with many steps, this
commit essentially reinstates the old performance. This is good
because the many-step queries are the one that matter most (longest
absolute runtime).
In terms of allocations, though, this commit doesn't make a dent at
all (numbers not shown). The reason is that most of the allocations
happen in the sampleRingIterator (in the storage package), which has
to be addressed in a separate commit.
Signed-off-by: beorn7 <beorn@grafana.com>
2022-10-28 07:58:40 -07:00
Floats : [ ] promql . FPoint {
{ F : 0 , T : timestamp . FromTime ( start ) } ,
{ F : 1 , T : timestamp . FromTime ( start . Add ( 1 * time . Second ) ) } ,
{ F : 2 , T : timestamp . FromTime ( start . Add ( 2 * time . Second ) ) } ,
2015-06-09 04:44:49 -07:00
} ,
2022-02-27 06:19:21 -08:00
// No Metric returned - use zero value for comparison.
2015-06-09 04:44:49 -07:00
} ,
} ,
} ,
} ,
2024-04-28 12:33:52 -07:00
// Test empty vector result
{
endpoint : api . query ,
query : url . Values {
"query" : [ ] string { "bottomk(2, notExists)" } ,
} ,
responseAsJSON : ` { "resultType":"vector","result":[]} ` ,
} ,
// Test empty matrix result
{
endpoint : api . queryRange ,
query : url . Values {
"query" : [ ] string { "bottomk(2, notExists)" } ,
"start" : [ ] string { "0" } ,
"end" : [ ] string { "2" } ,
"step" : [ ] string { "1" } ,
} ,
responseAsJSON : ` { "resultType":"matrix","result":[]} ` ,
} ,
2015-06-09 04:44:49 -07:00
// Missing query params in range queries.
{
endpoint : api . queryRange ,
query : url . Values {
"query" : [ ] string { "time()" } ,
"end" : [ ] string { "2" } ,
"step" : [ ] string { "1" } ,
} ,
errType : errorBadData ,
} ,
{
endpoint : api . queryRange ,
query : url . Values {
"query" : [ ] string { "time()" } ,
"start" : [ ] string { "0" } ,
"step" : [ ] string { "1" } ,
} ,
errType : errorBadData ,
} ,
{
endpoint : api . queryRange ,
query : url . Values {
"query" : [ ] string { "time()" } ,
"start" : [ ] string { "0" } ,
"end" : [ ] string { "2" } ,
} ,
errType : errorBadData ,
} ,
// Bad query expression.
{
endpoint : api . query ,
query : url . Values {
"query" : [ ] string { "invalid][query" } ,
"time" : [ ] string { "1970-01-01T01:02:03+01:00" } ,
} ,
errType : errorBadData ,
} ,
{
endpoint : api . queryRange ,
query : url . Values {
"query" : [ ] string { "invalid][query" } ,
"start" : [ ] string { "0" } ,
"end" : [ ] string { "100" } ,
"step" : [ ] string { "1" } ,
} ,
errType : errorBadData ,
} ,
2017-03-16 07:16:20 -07:00
// Invalid step.
2016-08-16 06:10:02 -07:00
{
endpoint : api . queryRange ,
query : url . Values {
"query" : [ ] string { "time()" } ,
"start" : [ ] string { "1" } ,
"end" : [ ] string { "2" } ,
"step" : [ ] string { "0" } ,
} ,
errType : errorBadData ,
} ,
2017-03-16 07:16:20 -07:00
// Start after end.
2016-11-01 06:25:34 -07:00
{
endpoint : api . queryRange ,
query : url . Values {
"query" : [ ] string { "time()" } ,
"start" : [ ] string { "2" } ,
"end" : [ ] string { "1" } ,
"step" : [ ] string { "1" } ,
} ,
errType : errorBadData ,
} ,
2017-03-16 07:16:20 -07:00
// Start overflows int64 internally.
{
endpoint : api . queryRange ,
query : url . Values {
"query" : [ ] string { "time()" } ,
"start" : [ ] string { "148966367200.372" } ,
"end" : [ ] string { "1489667272.372" } ,
"step" : [ ] string { "1" } ,
} ,
errType : errorBadData ,
} ,
2022-07-20 05:55:09 -07:00
{
endpoint : api . formatQuery ,
query : url . Values {
"query" : [ ] string { "foo+bar" } ,
} ,
response : "foo + bar" ,
} ,
{
endpoint : api . formatQuery ,
query : url . Values {
"query" : [ ] string { "invalid_expression/" } ,
} ,
errType : errorBadData ,
} ,
2015-06-09 07:09:31 -07:00
{
endpoint : api . series ,
query : url . Values {
"match[]" : [ ] string { ` test_metric2 ` } ,
} ,
2016-12-30 01:43:44 -08:00
response : [ ] labels . Labels {
labels . FromStrings ( "__name__" , "test_metric2" , "foo" , "boo" ) ,
2015-06-09 07:09:31 -07:00
} ,
} ,
2020-12-15 09:24:57 -08:00
{
endpoint : api . series ,
query : url . Values {
"match[]" : [ ] string { ` { foo=""} ` } ,
} ,
errType : errorBadData ,
} ,
2015-06-09 07:09:31 -07:00
{
endpoint : api . series ,
query : url . Values {
2015-11-05 02:23:43 -08:00
"match[]" : [ ] string { ` test_metric1 { foo=~".+o"} ` } ,
2015-06-09 07:09:31 -07:00
} ,
2016-12-30 01:43:44 -08:00
response : [ ] labels . Labels {
labels . FromStrings ( "__name__" , "test_metric1" , "foo" , "boo" ) ,
2015-06-09 07:09:31 -07:00
} ,
} ,
{
endpoint : api . series ,
query : url . Values {
2016-12-30 01:43:44 -08:00
"match[]" : [ ] string { ` test_metric1 { foo=~".+o$"} ` , ` test_metric1 { foo=~".+o"} ` } ,
2015-06-09 07:09:31 -07:00
} ,
2016-12-30 01:43:44 -08:00
response : [ ] labels . Labels {
labels . FromStrings ( "__name__" , "test_metric1" , "foo" , "boo" ) ,
2015-06-09 07:09:31 -07:00
} ,
} ,
2020-08-28 16:21:39 -07:00
// Try to overlap the selected series set as much as possible to test the result de-duplication works well.
{
endpoint : api . series ,
query : url . Values {
"match[]" : [ ] string { ` test_metric4 { foo=~".+o$"} ` , ` test_metric4 { dup=~"^1"} ` } ,
} ,
response : [ ] labels . Labels {
labels . FromStrings ( "__name__" , "test_metric4" , "dup" , "1" , "foo" , "bar" ) ,
labels . FromStrings ( "__name__" , "test_metric4" , "dup" , "1" , "foo" , "boo" ) ,
labels . FromStrings ( "__name__" , "test_metric4" , "foo" , "boo" ) ,
} ,
} ,
2015-06-09 07:09:31 -07:00
{
endpoint : api . series ,
query : url . Values {
2015-11-05 02:23:43 -08:00
"match[]" : [ ] string { ` test_metric1 { foo=~".+o"} ` , ` none ` } ,
2015-06-09 07:09:31 -07:00
} ,
2016-12-30 01:43:44 -08:00
response : [ ] labels . Labels {
labels . FromStrings ( "__name__" , "test_metric1" , "foo" , "boo" ) ,
2015-06-09 07:09:31 -07:00
} ,
} ,
2016-05-11 14:59:52 -07:00
// Start and end before series starts.
{
endpoint : api . series ,
query : url . Values {
"match[]" : [ ] string { ` test_metric2 ` } ,
"start" : [ ] string { "-2" } ,
"end" : [ ] string { "-1" } ,
} ,
2016-12-30 01:43:44 -08:00
response : [ ] labels . Labels { } ,
2016-05-11 14:59:52 -07:00
} ,
// Start and end after series ends.
{
endpoint : api . series ,
query : url . Values {
"match[]" : [ ] string { ` test_metric2 ` } ,
"start" : [ ] string { "100000" } ,
"end" : [ ] string { "100001" } ,
} ,
2016-12-30 01:43:44 -08:00
response : [ ] labels . Labels { } ,
2016-05-11 14:59:52 -07:00
} ,
// Start before series starts, end after series ends.
{
endpoint : api . series ,
query : url . Values {
"match[]" : [ ] string { ` test_metric2 ` } ,
"start" : [ ] string { "-1" } ,
"end" : [ ] string { "100000" } ,
} ,
2016-12-30 01:43:44 -08:00
response : [ ] labels . Labels {
labels . FromStrings ( "__name__" , "test_metric2" , "foo" , "boo" ) ,
2016-05-11 14:59:52 -07:00
} ,
} ,
// Start and end within series.
{
endpoint : api . series ,
query : url . Values {
"match[]" : [ ] string { ` test_metric2 ` } ,
"start" : [ ] string { "1" } ,
"end" : [ ] string { "100" } ,
} ,
2016-12-30 01:43:44 -08:00
response : [ ] labels . Labels {
labels . FromStrings ( "__name__" , "test_metric2" , "foo" , "boo" ) ,
2016-05-11 14:59:52 -07:00
} ,
} ,
// Start within series, end after.
{
endpoint : api . series ,
query : url . Values {
"match[]" : [ ] string { ` test_metric2 ` } ,
"start" : [ ] string { "1" } ,
"end" : [ ] string { "100000" } ,
} ,
2016-12-30 01:43:44 -08:00
response : [ ] labels . Labels {
labels . FromStrings ( "__name__" , "test_metric2" , "foo" , "boo" ) ,
2016-05-11 14:59:52 -07:00
} ,
} ,
// Start before series, end within series.
{
endpoint : api . series ,
query : url . Values {
"match[]" : [ ] string { ` test_metric2 ` } ,
"start" : [ ] string { "-1" } ,
"end" : [ ] string { "1" } ,
} ,
2016-12-30 01:43:44 -08:00
response : [ ] labels . Labels {
labels . FromStrings ( "__name__" , "test_metric2" , "foo" , "boo" ) ,
2016-05-11 14:59:52 -07:00
} ,
} ,
2024-03-06 01:58:40 -08:00
// Series request with limit.
2024-02-29 07:31:13 -08:00
{
endpoint : api . series ,
query : url . Values {
"match[]" : [ ] string { "test_metric1" } ,
"limit" : [ ] string { "1" } ,
} ,
2024-05-21 10:07:29 -07:00
responseLen : 1 , // API does not specify which particular value will come back.
warningsCount : 1 ,
} ,
{
endpoint : api . series ,
query : url . Values {
"match[]" : [ ] string { "test_metric1" } ,
"limit" : [ ] string { "2" } ,
} ,
responseLen : 2 , // API does not specify which particular value will come back.
warningsCount : 0 , // No warnings if limit isn't exceeded.
2024-02-29 07:31:13 -08:00
} ,
2024-05-15 11:39:54 -07:00
{
endpoint : api . series ,
query : url . Values {
"match[]" : [ ] string { "test_metric1" } ,
"limit" : [ ] string { "0" } ,
} ,
responseLen : 2 , // API does not specify which particular value will come back.
warningsCount : 0 , // No warnings if limit isn't exceeded.
} ,
2024-03-06 01:58:40 -08:00
// Missing match[] query params in series requests.
2015-06-09 07:09:31 -07:00
{
endpoint : api . series ,
errType : errorBadData ,
} ,
{
endpoint : api . dropSeries ,
2017-07-06 05:38:40 -07:00
errType : errorInternal ,
2015-06-09 07:09:31 -07:00
} ,
2017-05-11 08:09:24 -07:00
{
2016-12-02 04:31:43 -08:00
endpoint : api . targets ,
2017-01-13 08:15:04 -08:00
response : & TargetDiscovery {
2018-10-25 01:19:20 -07:00
ActiveTargets : [ ] * Target {
{
2024-01-29 02:19:02 -08:00
DiscoveredLabels : labels . FromStrings ( ) ,
Labels : labels . FromStrings ( "job" , "blackbox" ) ,
2019-11-11 13:42:24 -08:00
ScrapePool : "blackbox" ,
ScrapeURL : "http://localhost:9115/probe?target=example.com" ,
2020-02-17 09:19:15 -08:00
GlobalURL : "http://localhost:9115/probe?target=example.com" ,
2019-11-11 13:42:24 -08:00
Health : "down" ,
2020-02-17 09:19:15 -08:00
LastError : "failed: missing port in address" ,
2019-11-11 13:42:24 -08:00
LastScrape : scrapeStart ,
LastScrapeDuration : 0.1 ,
2021-08-31 08:37:32 -07:00
ScrapeInterval : "20s" ,
ScrapeTimeout : "10s" ,
2018-10-25 01:19:20 -07:00
} ,
{
2024-01-29 02:19:02 -08:00
DiscoveredLabels : labels . FromStrings ( ) ,
Labels : labels . FromStrings ( "job" , "test" ) ,
2019-11-11 13:42:24 -08:00
ScrapePool : "test" ,
ScrapeURL : "http://example.com:8080/metrics" ,
2020-02-17 09:19:15 -08:00
GlobalURL : "http://example.com:8080/metrics" ,
2019-11-11 13:42:24 -08:00
Health : "up" ,
LastError : "" ,
LastScrape : scrapeStart ,
LastScrapeDuration : 0.07 ,
2021-08-31 08:37:32 -07:00
ScrapeInterval : "15s" ,
ScrapeTimeout : "5s" ,
2019-11-11 13:42:24 -08:00
} ,
} ,
DroppedTargets : [ ] * DroppedTarget {
{
2024-01-29 02:20:20 -08:00
DiscoveredLabels : labels . FromStrings (
"__address__" , "http://dropped.example.com:9115" ,
"__metrics_path__" , "/probe" ,
"__scheme__" , "http" ,
"job" , "blackbox" ,
"__scrape_interval__" , "30s" ,
"__scrape_timeout__" , "15s" ,
) ,
2017-01-13 08:15:04 -08:00
} ,
2016-12-02 04:31:43 -08:00
} ,
2023-08-14 07:39:25 -07:00
DroppedTargetCounts : map [ string ] int { "blackbox" : 1 } ,
2019-11-11 13:42:24 -08:00
} ,
} ,
{
endpoint : api . targets ,
query : url . Values {
"state" : [ ] string { "any" } ,
} ,
response : & TargetDiscovery {
ActiveTargets : [ ] * Target {
{
2024-01-29 02:19:02 -08:00
DiscoveredLabels : labels . FromStrings ( ) ,
Labels : labels . FromStrings ( "job" , "blackbox" ) ,
2019-11-11 13:42:24 -08:00
ScrapePool : "blackbox" ,
ScrapeURL : "http://localhost:9115/probe?target=example.com" ,
2020-02-17 09:19:15 -08:00
GlobalURL : "http://localhost:9115/probe?target=example.com" ,
2019-11-11 13:42:24 -08:00
Health : "down" ,
2020-02-17 09:19:15 -08:00
LastError : "failed: missing port in address" ,
2019-11-11 13:42:24 -08:00
LastScrape : scrapeStart ,
LastScrapeDuration : 0.1 ,
2021-08-31 08:37:32 -07:00
ScrapeInterval : "20s" ,
ScrapeTimeout : "10s" ,
2019-11-11 13:42:24 -08:00
} ,
{
2024-01-29 02:19:02 -08:00
DiscoveredLabels : labels . FromStrings ( ) ,
Labels : labels . FromStrings ( "job" , "test" ) ,
2019-11-11 13:42:24 -08:00
ScrapePool : "test" ,
ScrapeURL : "http://example.com:8080/metrics" ,
2020-02-17 09:19:15 -08:00
GlobalURL : "http://example.com:8080/metrics" ,
2019-11-11 13:42:24 -08:00
Health : "up" ,
LastError : "" ,
LastScrape : scrapeStart ,
LastScrapeDuration : 0.07 ,
2021-08-31 08:37:32 -07:00
ScrapeInterval : "15s" ,
ScrapeTimeout : "5s" ,
2019-11-11 13:42:24 -08:00
} ,
} ,
DroppedTargets : [ ] * DroppedTarget {
{
2024-01-29 02:20:20 -08:00
DiscoveredLabels : labels . FromStrings (
"__address__" , "http://dropped.example.com:9115" ,
"__metrics_path__" , "/probe" ,
"__scheme__" , "http" ,
"job" , "blackbox" ,
"__scrape_interval__" , "30s" ,
"__scrape_timeout__" , "15s" ,
) ,
2019-11-11 13:42:24 -08:00
} ,
} ,
2023-08-14 07:39:25 -07:00
DroppedTargetCounts : map [ string ] int { "blackbox" : 1 } ,
2019-11-11 13:42:24 -08:00
} ,
} ,
{
endpoint : api . targets ,
query : url . Values {
"state" : [ ] string { "active" } ,
} ,
response : & TargetDiscovery {
ActiveTargets : [ ] * Target {
{
2024-01-29 02:19:02 -08:00
DiscoveredLabels : labels . FromStrings ( ) ,
Labels : labels . FromStrings ( "job" , "blackbox" ) ,
2019-11-11 13:42:24 -08:00
ScrapePool : "blackbox" ,
ScrapeURL : "http://localhost:9115/probe?target=example.com" ,
2020-02-17 09:19:15 -08:00
GlobalURL : "http://localhost:9115/probe?target=example.com" ,
2019-11-11 13:42:24 -08:00
Health : "down" ,
2020-02-17 09:19:15 -08:00
LastError : "failed: missing port in address" ,
2019-11-11 13:42:24 -08:00
LastScrape : scrapeStart ,
LastScrapeDuration : 0.1 ,
2021-08-31 08:37:32 -07:00
ScrapeInterval : "20s" ,
ScrapeTimeout : "10s" ,
2019-11-11 13:42:24 -08:00
} ,
{
2024-01-29 02:19:02 -08:00
DiscoveredLabels : labels . FromStrings ( ) ,
Labels : labels . FromStrings ( "job" , "test" ) ,
2019-11-11 13:42:24 -08:00
ScrapePool : "test" ,
ScrapeURL : "http://example.com:8080/metrics" ,
2020-02-17 09:19:15 -08:00
GlobalURL : "http://example.com:8080/metrics" ,
2019-11-11 13:42:24 -08:00
Health : "up" ,
LastError : "" ,
LastScrape : scrapeStart ,
LastScrapeDuration : 0.07 ,
2021-08-31 08:37:32 -07:00
ScrapeInterval : "15s" ,
ScrapeTimeout : "5s" ,
2019-11-11 13:42:24 -08:00
} ,
} ,
DroppedTargets : [ ] * DroppedTarget { } ,
} ,
} ,
{
endpoint : api . targets ,
query : url . Values {
"state" : [ ] string { "Dropped" } ,
} ,
response : & TargetDiscovery {
ActiveTargets : [ ] * Target { } ,
2018-10-25 01:19:20 -07:00
DroppedTargets : [ ] * DroppedTarget {
{
2024-01-29 02:20:20 -08:00
DiscoveredLabels : labels . FromStrings (
"__address__" , "http://dropped.example.com:9115" ,
"__metrics_path__" , "/probe" ,
"__scheme__" , "http" ,
"job" , "blackbox" ,
"__scrape_interval__" , "30s" ,
"__scrape_timeout__" , "15s" ,
) ,
2018-02-21 09:26:18 -08:00
} ,
} ,
2023-08-14 07:39:25 -07:00
DroppedTargetCounts : map [ string ] int { "blackbox" : 1 } ,
2016-12-02 04:31:43 -08:00
} ,
2017-05-11 08:09:24 -07:00
} ,
2019-12-04 11:33:01 -08:00
// With a matching metric.
{
endpoint : api . targetMetadata ,
query : url . Values {
"metric" : [ ] string { "go_threads" } ,
} ,
2019-12-10 06:56:16 -08:00
metadata : [ ] targetMetadata {
{
identifier : "test" ,
metadata : [ ] scrape . MetricMetadata {
{
Metric : "go_threads" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2019-12-10 06:56:16 -08:00
Help : "Number of OS threads created." ,
Unit : "" ,
} ,
} ,
} ,
} ,
2019-12-04 11:33:01 -08:00
response : [ ] metricMetadata {
{
Target : labels . FromMap ( map [ string ] string {
"job" : "test" ,
} ) ,
Help : "Number of OS threads created." ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2019-12-04 11:33:01 -08:00
Unit : "" ,
} ,
} ,
} ,
// With a matching target.
{
endpoint : api . targetMetadata ,
query : url . Values {
"match_target" : [ ] string { "{job=\"blackbox\"}" } ,
} ,
2019-12-10 06:56:16 -08:00
metadata : [ ] targetMetadata {
{
identifier : "blackbox" ,
metadata : [ ] scrape . MetricMetadata {
{
Metric : "prometheus_tsdb_storage_blocks_bytes" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2019-12-10 06:56:16 -08:00
Help : "The number of bytes that are currently used for local storage by all blocks." ,
Unit : "" ,
} ,
} ,
} ,
} ,
2019-12-04 11:33:01 -08:00
response : [ ] metricMetadata {
{
Target : labels . FromMap ( map [ string ] string {
"job" : "blackbox" ,
} ) ,
Metric : "prometheus_tsdb_storage_blocks_bytes" ,
Help : "The number of bytes that are currently used for local storage by all blocks." ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2019-12-04 11:33:01 -08:00
Unit : "" ,
} ,
} ,
} ,
// Without a target or metric.
{
endpoint : api . targetMetadata ,
2019-12-10 06:56:16 -08:00
metadata : [ ] targetMetadata {
{
identifier : "test" ,
metadata : [ ] scrape . MetricMetadata {
{
Metric : "go_threads" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2019-12-10 06:56:16 -08:00
Help : "Number of OS threads created." ,
Unit : "" ,
} ,
} ,
} ,
{
identifier : "blackbox" ,
metadata : [ ] scrape . MetricMetadata {
{
Metric : "prometheus_tsdb_storage_blocks_bytes" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2019-12-10 06:56:16 -08:00
Help : "The number of bytes that are currently used for local storage by all blocks." ,
Unit : "" ,
} ,
} ,
} ,
} ,
2019-12-04 11:33:01 -08:00
response : [ ] metricMetadata {
{
Target : labels . FromMap ( map [ string ] string {
"job" : "test" ,
} ) ,
Metric : "go_threads" ,
Help : "Number of OS threads created." ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2019-12-04 11:33:01 -08:00
Unit : "" ,
} ,
{
Target : labels . FromMap ( map [ string ] string {
"job" : "blackbox" ,
} ) ,
Metric : "prometheus_tsdb_storage_blocks_bytes" ,
Help : "The number of bytes that are currently used for local storage by all blocks." ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2019-12-04 11:33:01 -08:00
Unit : "" ,
} ,
} ,
2019-12-09 13:36:38 -08:00
sorter : func ( m interface { } ) {
sort . Slice ( m . ( [ ] metricMetadata ) , func ( i , j int ) bool {
s := m . ( [ ] metricMetadata )
return s [ i ] . Metric < s [ j ] . Metric
} )
} ,
2019-12-04 11:33:01 -08:00
} ,
// Without a matching metric.
{
endpoint : api . targetMetadata ,
query : url . Values {
"match_target" : [ ] string { "{job=\"non-existentblackbox\"}" } ,
} ,
2019-12-10 06:56:16 -08:00
response : [ ] metricMetadata { } ,
2019-12-04 11:33:01 -08:00
} ,
2015-06-09 07:09:31 -07:00
{
2017-01-13 01:20:11 -08:00
endpoint : api . alertmanagers ,
response : & AlertmanagerDiscovery {
ActiveAlertmanagers : [ ] * AlertmanagerTarget {
2017-04-05 06:24:22 -07:00
{
2017-01-13 01:20:11 -08:00
URL : "http://alertmanager.example.com:8080/api/v1/alerts" ,
} ,
} ,
2018-02-21 01:00:07 -08:00
DroppedAlertmanagers : [ ] * AlertmanagerTarget {
{
URL : "http://dropped.alertmanager.example.com:8080/api/v1/alerts" ,
} ,
} ,
2017-01-13 01:20:11 -08:00
} ,
2015-06-09 07:09:31 -07:00
} ,
2019-12-10 06:56:16 -08:00
// With metadata available.
{
endpoint : api . metricMetadata ,
metadata : [ ] targetMetadata {
{
identifier : "test" ,
metadata : [ ] scrape . MetricMetadata {
{
Metric : "prometheus_engine_query_duration_seconds" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeSummary ,
2019-12-10 06:56:16 -08:00
Help : "Query timings" ,
Unit : "" ,
} ,
{
Metric : "go_info" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2019-12-10 06:56:16 -08:00
Help : "Information about the Go environment." ,
Unit : "" ,
} ,
} ,
} ,
} ,
2023-11-22 06:39:21 -08:00
response : map [ string ] [ ] metadata . Metadata {
"prometheus_engine_query_duration_seconds" : { { Type : model . MetricTypeSummary , Help : "Query timings" , Unit : "" } } ,
"go_info" : { { Type : model . MetricTypeGauge , Help : "Information about the Go environment." , Unit : "" } } ,
2019-12-10 06:56:16 -08:00
} ,
2024-02-26 01:53:39 -08:00
responseAsJSON : ` { "prometheus_engine_query_duration_seconds" : [ { "type" : "summary" , "unit" : "" ,
"help" : "Query timings" } ] , "go_info" : [ { "type" : "gauge" , "unit" : "" ,
"help" : "Information about the Go environment." } ] } ` ,
2019-12-10 06:56:16 -08:00
} ,
// With duplicate metadata for a metric that comes from different targets.
{
endpoint : api . metricMetadata ,
metadata : [ ] targetMetadata {
{
identifier : "test" ,
metadata : [ ] scrape . MetricMetadata {
{
Metric : "go_threads" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2019-12-10 06:56:16 -08:00
Help : "Number of OS threads created" ,
Unit : "" ,
} ,
} ,
} ,
{
identifier : "blackbox" ,
metadata : [ ] scrape . MetricMetadata {
{
Metric : "go_threads" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2019-12-10 06:56:16 -08:00
Help : "Number of OS threads created" ,
Unit : "" ,
} ,
} ,
} ,
} ,
2023-11-22 06:39:21 -08:00
response : map [ string ] [ ] metadata . Metadata {
"go_threads" : { { Type : model . MetricTypeGauge , Help : "Number of OS threads created" } } ,
2019-12-10 06:56:16 -08:00
} ,
2024-02-26 01:53:39 -08:00
responseAsJSON : ` { "go_threads" : [ { "type" : "gauge" , "unit" : "" ,
"help" : "Number of OS threads created" } ] } ` ,
2019-12-10 06:56:16 -08:00
} ,
// With non-duplicate metadata for the same metric from different targets.
{
endpoint : api . metricMetadata ,
metadata : [ ] targetMetadata {
{
identifier : "test" ,
metadata : [ ] scrape . MetricMetadata {
{
Metric : "go_threads" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2019-12-10 06:56:16 -08:00
Help : "Number of OS threads created" ,
Unit : "" ,
} ,
} ,
} ,
{
identifier : "blackbox" ,
metadata : [ ] scrape . MetricMetadata {
{
Metric : "go_threads" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2019-12-10 06:56:16 -08:00
Help : "Number of OS threads that were created." ,
Unit : "" ,
} ,
} ,
} ,
} ,
2023-11-22 06:39:21 -08:00
response : map [ string ] [ ] metadata . Metadata {
2020-06-22 07:29:35 -07:00
"go_threads" : {
2023-11-22 06:39:21 -08:00
{ Type : model . MetricTypeGauge , Help : "Number of OS threads created" } ,
{ Type : model . MetricTypeGauge , Help : "Number of OS threads that were created." } ,
2019-12-10 06:56:16 -08:00
} ,
} ,
2024-02-26 01:53:39 -08:00
responseAsJSON : ` { "go_threads" : [ { "type" : "gauge" , "unit" : "" ,
"help" : "Number of OS threads created" } , { "type" : "gauge" , "unit" : "" ,
"help" : "Number of OS threads that were created." } ] } ` ,
2019-12-10 06:56:16 -08:00
sorter : func ( m interface { } ) {
2023-11-22 06:39:21 -08:00
v := m . ( map [ string ] [ ] metadata . Metadata ) [ "go_threads" ]
2019-12-10 06:56:16 -08:00
sort . Slice ( v , func ( i , j int ) bool {
return v [ i ] . Help < v [ j ] . Help
} )
} ,
} ,
2019-12-10 07:15:13 -08:00
// With a limit for the number of metrics returned.
2019-12-10 06:56:16 -08:00
{
endpoint : api . metricMetadata ,
query : url . Values {
"limit" : [ ] string { "2" } ,
} ,
metadata : [ ] targetMetadata {
{
identifier : "test" ,
metadata : [ ] scrape . MetricMetadata {
{
Metric : "go_threads" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2019-12-10 06:56:16 -08:00
Help : "Number of OS threads created" ,
Unit : "" ,
} ,
{
Metric : "prometheus_engine_query_duration_seconds" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeSummary ,
2024-02-26 01:53:39 -08:00
Help : "Query Timings." ,
2019-12-10 06:56:16 -08:00
Unit : "" ,
} ,
} ,
} ,
{
identifier : "blackbox" ,
metadata : [ ] scrape . MetricMetadata {
{
Metric : "go_gc_duration_seconds" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeSummary ,
2019-12-10 06:56:16 -08:00
Help : "A summary of the GC invocation durations." ,
Unit : "" ,
} ,
} ,
} ,
} ,
responseLen : 2 ,
} ,
2023-06-12 08:17:20 -07:00
// With a limit for the number of metadata per metric.
{
endpoint : api . metricMetadata ,
query : url . Values { "limit_per_metric" : [ ] string { "1" } } ,
metadata : [ ] targetMetadata {
{
identifier : "test" ,
metadata : [ ] scrape . MetricMetadata {
{
Metric : "go_threads" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2023-06-12 08:17:20 -07:00
Help : "Number of OS threads created" ,
Unit : "" ,
} ,
{
Metric : "go_threads" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2023-06-12 08:17:20 -07:00
Help : "Repeated metadata" ,
Unit : "" ,
} ,
{
Metric : "go_gc_duration_seconds" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeSummary ,
2023-06-12 08:17:20 -07:00
Help : "A summary of the GC invocation durations." ,
Unit : "" ,
} ,
} ,
} ,
} ,
2023-11-22 06:39:21 -08:00
response : map [ string ] [ ] metadata . Metadata {
2023-06-12 08:17:20 -07:00
"go_threads" : {
2023-11-22 06:39:21 -08:00
{ Type : model . MetricTypeGauge , Help : "Number of OS threads created" } ,
2023-06-12 08:17:20 -07:00
} ,
"go_gc_duration_seconds" : {
2023-11-22 06:39:21 -08:00
{ Type : model . MetricTypeSummary , Help : "A summary of the GC invocation durations." } ,
2023-06-12 08:17:20 -07:00
} ,
} ,
2024-02-26 01:53:39 -08:00
responseAsJSON : ` { "go_gc_duration_seconds":[ { "help":"A summary of the GC invocation durations.","type":"summary","unit":""}],"go_threads": [ { "type":"gauge","unit":"","help":"Number of OS threads created"}]} ` ,
2023-06-12 08:17:20 -07:00
} ,
// With a limit for the number of metadata per metric and per metric.
{
endpoint : api . metricMetadata ,
query : url . Values { "limit_per_metric" : [ ] string { "1" } , "limit" : [ ] string { "1" } } ,
metadata : [ ] targetMetadata {
{
identifier : "test" ,
metadata : [ ] scrape . MetricMetadata {
{
Metric : "go_threads" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2023-06-12 08:17:20 -07:00
Help : "Number of OS threads created" ,
Unit : "" ,
} ,
{
Metric : "go_threads" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2023-06-12 08:17:20 -07:00
Help : "Repeated metadata" ,
Unit : "" ,
} ,
{
Metric : "go_gc_duration_seconds" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeSummary ,
2023-06-12 08:17:20 -07:00
Help : "A summary of the GC invocation durations." ,
Unit : "" ,
} ,
} ,
} ,
} ,
responseLen : 1 ,
responseMetadataTotal : 1 ,
} ,
// With a limit for the number of metadata per metric and per metric, while having multiple targets.
{
endpoint : api . metricMetadata ,
query : url . Values { "limit_per_metric" : [ ] string { "1" } , "limit" : [ ] string { "1" } } ,
metadata : [ ] targetMetadata {
{
identifier : "test" ,
metadata : [ ] scrape . MetricMetadata {
{
Metric : "go_threads" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2023-06-12 08:17:20 -07:00
Help : "Number of OS threads created" ,
Unit : "" ,
} ,
{
Metric : "go_threads" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2023-06-12 08:17:20 -07:00
Help : "Repeated metadata" ,
Unit : "" ,
} ,
{
Metric : "go_gc_duration_seconds" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeSummary ,
2023-06-12 08:17:20 -07:00
Help : "A summary of the GC invocation durations." ,
Unit : "" ,
} ,
} ,
} ,
{
identifier : "secondTarget" ,
metadata : [ ] scrape . MetricMetadata {
{
Metric : "go_threads" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2023-06-12 08:17:20 -07:00
Help : "Number of OS threads created, but from a different target" ,
Unit : "" ,
} ,
{
Metric : "go_gc_duration_seconds" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeSummary ,
2023-06-12 08:17:20 -07:00
Help : "A summary of the GC invocation durations, but from a different target." ,
Unit : "" ,
} ,
} ,
} ,
} ,
responseLen : 1 ,
responseMetadataTotal : 1 ,
} ,
2019-12-10 07:22:10 -08:00
// When requesting a specific metric that is present.
{
endpoint : api . metricMetadata ,
query : url . Values { "metric" : [ ] string { "go_threads" } } ,
metadata : [ ] targetMetadata {
{
identifier : "test" ,
metadata : [ ] scrape . MetricMetadata {
{
Metric : "go_threads" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2019-12-10 07:22:10 -08:00
Help : "Number of OS threads created" ,
Unit : "" ,
} ,
} ,
} ,
{
identifier : "blackbox" ,
metadata : [ ] scrape . MetricMetadata {
{
Metric : "go_gc_duration_seconds" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeSummary ,
2019-12-10 07:22:10 -08:00
Help : "A summary of the GC invocation durations." ,
Unit : "" ,
} ,
{
Metric : "go_threads" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2019-12-10 07:22:10 -08:00
Help : "Number of OS threads that were created." ,
Unit : "" ,
} ,
} ,
} ,
} ,
2023-11-22 06:39:21 -08:00
response : map [ string ] [ ] metadata . Metadata {
2020-06-22 07:29:35 -07:00
"go_threads" : {
2023-11-22 06:39:21 -08:00
{ Type : model . MetricTypeGauge , Help : "Number of OS threads created" } ,
{ Type : model . MetricTypeGauge , Help : "Number of OS threads that were created." } ,
2019-12-10 07:22:10 -08:00
} ,
} ,
2024-02-26 01:53:39 -08:00
responseAsJSON : ` { "go_threads": [ { "type":"gauge","unit":"","help":"Number of OS threads created"}, { "type":"gauge","unit":"","help":"Number of OS threads that were created."}]} ` ,
2019-12-10 07:22:10 -08:00
sorter : func ( m interface { } ) {
2023-11-22 06:39:21 -08:00
v := m . ( map [ string ] [ ] metadata . Metadata ) [ "go_threads" ]
2019-12-10 07:22:10 -08:00
sort . Slice ( v , func ( i , j int ) bool {
return v [ i ] . Help < v [ j ] . Help
} )
} ,
} ,
// With a specific metric that is not present.
{
endpoint : api . metricMetadata ,
query : url . Values { "metric" : [ ] string { "go_gc_duration_seconds" } } ,
metadata : [ ] targetMetadata {
{
identifier : "test" ,
metadata : [ ] scrape . MetricMetadata {
{
Metric : "go_threads" ,
2023-11-22 06:39:21 -08:00
Type : model . MetricTypeGauge ,
2019-12-10 07:22:10 -08:00
Help : "Number of OS threads created" ,
Unit : "" ,
} ,
} ,
} ,
} ,
2023-11-22 06:39:21 -08:00
response : map [ string ] [ ] metadata . Metadata { } ,
2019-12-10 07:22:10 -08:00
} ,
2019-12-10 07:15:13 -08:00
// With no available metadata.
2019-12-10 06:56:16 -08:00
{
endpoint : api . metricMetadata ,
2023-11-22 06:39:21 -08:00
response : map [ string ] [ ] metadata . Metadata { } ,
2019-12-10 06:56:16 -08:00
} ,
2017-05-11 08:09:24 -07:00
{
endpoint : api . serveConfig ,
response : & prometheusConfig {
YAML : samplePrometheusCfg . String ( ) ,
} ,
} ,
api: Added v1/status/flags endpoint. (#3864)
Endpoint URL: /api/v1/status/flags
Example Output:
```json
{
"status": "success",
"data": {
"alertmanager.notification-queue-capacity": "10000",
"alertmanager.timeout": "10s",
"completion-bash": "false",
"completion-script-bash": "false",
"completion-script-zsh": "false",
"config.file": "my_cool_prometheus.yaml",
"help": "false",
"help-long": "false",
"help-man": "false",
"log.level": "info",
"query.lookback-delta": "5m",
"query.max-concurrency": "20",
"query.timeout": "2m",
"storage.tsdb.max-block-duration": "36h",
"storage.tsdb.min-block-duration": "2h",
"storage.tsdb.no-lockfile": "false",
"storage.tsdb.path": "data/",
"storage.tsdb.retention": "15d",
"version": "false",
"web.console.libraries": "console_libraries",
"web.console.templates": "consoles",
"web.enable-admin-api": "false",
"web.enable-lifecycle": "false",
"web.external-url": "",
"web.listen-address": "0.0.0.0:9090",
"web.max-connections": "512",
"web.read-timeout": "5m",
"web.route-prefix": "/",
"web.user-assets": ""
}
}
```
Signed-off-by: Bartek Plotka <bwplotka@gmail.com>
2018-02-21 00:49:02 -08:00
{
endpoint : api . serveFlags ,
response : sampleFlagMap ,
} ,
2018-03-25 09:50:34 -07:00
{
endpoint : api . alerts ,
response : & AlertDiscovery {
2023-10-17 19:02:03 -07:00
Alerts : [ ] * Alert {
{
Labels : labels . FromStrings ( "alertname" , "test_metric5" , "name" , "tm5" ) ,
Annotations : labels . Labels { } ,
State : "pending" ,
Value : "1e+00" ,
} ,
} ,
} ,
zeroFunc : func ( i interface { } ) {
if i != nil {
v := i . ( * AlertDiscovery )
for _ , alert := range v . Alerts {
alert . ActiveAt = nil
}
}
2018-03-25 09:50:34 -07:00
} ,
} ,
{
endpoint : api . rules ,
2018-06-27 00:15:17 -07:00
response : & RuleDiscovery {
RuleGroups : [ ] * RuleGroup {
2018-03-25 09:50:34 -07:00
{
2018-06-27 00:15:17 -07:00
Name : "grp" ,
File : "/path/to/file" ,
Interval : 1 ,
2022-01-11 19:44:22 -08:00
Limit : 0 ,
2021-11-21 09:00:27 -08:00
Rules : [ ] Rule {
AlertingRule {
2019-12-09 14:42:59 -08:00
State : "inactive" ,
2018-06-27 00:15:17 -07:00
Name : "test_metric3" ,
Query : "absent(test_metric3) != 1" ,
Duration : 1 ,
Labels : labels . Labels { } ,
Annotations : labels . Labels { } ,
Alerts : [ ] * Alert { } ,
2023-10-17 19:02:03 -07:00
Health : "ok" ,
2018-06-27 00:15:17 -07:00
Type : "alerting" ,
} ,
2021-11-21 09:00:27 -08:00
AlertingRule {
2019-12-09 14:42:59 -08:00
State : "inactive" ,
2018-06-27 00:15:17 -07:00
Name : "test_metric4" ,
Query : "up == 1" ,
Duration : 1 ,
Labels : labels . Labels { } ,
Annotations : labels . Labels { } ,
Alerts : [ ] * Alert { } ,
2023-10-17 19:02:03 -07:00
Health : "ok" ,
Type : "alerting" ,
} ,
AlertingRule {
State : "pending" ,
Name : "test_metric5" ,
Query : "vector(1)" ,
Duration : 1 ,
Labels : labels . FromStrings ( "name" , "tm5" ) ,
Annotations : labels . Labels { } ,
Alerts : [ ] * Alert {
{
Labels : labels . FromStrings ( "alertname" , "test_metric5" , "name" , "tm5" ) ,
Annotations : labels . Labels { } ,
State : "pending" ,
Value : "1e+00" ,
} ,
} ,
Health : "ok" ,
Type : "alerting" ,
} ,
2024-07-10 05:18:29 -07:00
AlertingRule {
State : "inactive" ,
Name : "test_metric6" ,
Query : "up == 1" ,
Duration : 1 ,
Labels : labels . FromStrings ( "testlabel" , "rule" ) ,
Annotations : labels . Labels { } ,
Alerts : [ ] * Alert { } ,
Health : "ok" ,
Type : "alerting" ,
} ,
AlertingRule {
State : "inactive" ,
Name : "test_metric7" ,
Query : "up == 1" ,
Duration : 1 ,
Labels : labels . FromStrings ( "templatedlabel" , "{{ $externalURL }}" ) ,
Annotations : labels . Labels { } ,
Alerts : [ ] * Alert { } ,
Health : "ok" ,
Type : "alerting" ,
} ,
2023-10-17 19:02:03 -07:00
RecordingRule {
Name : "recording-rule-1" ,
Query : "vector(1)" ,
Labels : labels . Labels { } ,
Health : "ok" ,
Type : "recording" ,
} ,
2024-07-10 05:18:29 -07:00
RecordingRule {
Name : "recording-rule-2" ,
Query : "vector(1)" ,
Labels : labels . FromStrings ( "testlabel" , "rule" ) ,
Health : "ok" ,
Type : "recording" ,
} ,
2023-10-17 19:02:03 -07:00
} ,
} ,
} ,
} ,
zeroFunc : rulesZeroFunc ,
} ,
{
endpoint : api . rules ,
query : url . Values {
"exclude_alerts" : [ ] string { "true" } ,
} ,
response : & RuleDiscovery {
RuleGroups : [ ] * RuleGroup {
{
Name : "grp" ,
File : "/path/to/file" ,
Interval : 1 ,
Limit : 0 ,
Rules : [ ] Rule {
AlertingRule {
State : "inactive" ,
Name : "test_metric3" ,
Query : "absent(test_metric3) != 1" ,
Duration : 1 ,
Labels : labels . Labels { } ,
Annotations : labels . Labels { } ,
Alerts : nil ,
Health : "ok" ,
Type : "alerting" ,
} ,
AlertingRule {
State : "inactive" ,
Name : "test_metric4" ,
Query : "up == 1" ,
Duration : 1 ,
Labels : labels . Labels { } ,
Annotations : labels . Labels { } ,
Alerts : nil ,
Health : "ok" ,
Type : "alerting" ,
} ,
AlertingRule {
State : "pending" ,
Name : "test_metric5" ,
Query : "vector(1)" ,
Duration : 1 ,
Labels : labels . FromStrings ( "name" , "tm5" ) ,
Annotations : labels . Labels { } ,
Alerts : nil ,
Health : "ok" ,
2018-06-27 00:15:17 -07:00
Type : "alerting" ,
2018-03-25 09:50:34 -07:00
} ,
2024-07-10 05:18:29 -07:00
AlertingRule {
State : "inactive" ,
Name : "test_metric6" ,
Query : "up == 1" ,
Duration : 1 ,
Labels : labels . FromStrings ( "testlabel" , "rule" ) ,
Annotations : labels . Labels { } ,
Alerts : nil ,
Health : "ok" ,
Type : "alerting" ,
} ,
AlertingRule {
State : "inactive" ,
Name : "test_metric7" ,
Query : "up == 1" ,
Duration : 1 ,
Labels : labels . FromStrings ( "templatedlabel" , "{{ $externalURL }}" ) ,
Annotations : labels . Labels { } ,
Alerts : nil ,
Health : "ok" ,
Type : "alerting" ,
} ,
2021-11-21 09:00:27 -08:00
RecordingRule {
2018-06-27 00:15:17 -07:00
Name : "recording-rule-1" ,
Query : "vector(1)" ,
Labels : labels . Labels { } ,
2023-10-17 19:02:03 -07:00
Health : "ok" ,
2018-06-27 00:15:17 -07:00
Type : "recording" ,
2018-03-25 09:50:34 -07:00
} ,
2024-07-10 05:18:29 -07:00
RecordingRule {
Name : "recording-rule-2" ,
Query : "vector(1)" ,
Labels : labels . FromStrings ( "testlabel" , "rule" ) ,
Health : "ok" ,
Type : "recording" ,
} ,
2018-03-25 09:50:34 -07:00
} ,
} ,
} ,
} ,
2023-10-17 19:02:03 -07:00
zeroFunc : rulesZeroFunc ,
2018-03-25 09:50:34 -07:00
} ,
2019-12-09 14:42:59 -08:00
{
endpoint : api . rules ,
query : url . Values {
"type" : [ ] string { "alert" } ,
} ,
response : & RuleDiscovery {
RuleGroups : [ ] * RuleGroup {
{
Name : "grp" ,
File : "/path/to/file" ,
Interval : 1 ,
2022-01-11 19:44:22 -08:00
Limit : 0 ,
2021-11-21 09:00:27 -08:00
Rules : [ ] Rule {
AlertingRule {
2019-12-09 14:42:59 -08:00
State : "inactive" ,
Name : "test_metric3" ,
Query : "absent(test_metric3) != 1" ,
Duration : 1 ,
Labels : labels . Labels { } ,
Annotations : labels . Labels { } ,
Alerts : [ ] * Alert { } ,
2023-10-17 19:02:03 -07:00
Health : "ok" ,
2019-12-09 14:42:59 -08:00
Type : "alerting" ,
} ,
2021-11-21 09:00:27 -08:00
AlertingRule {
2019-12-09 14:42:59 -08:00
State : "inactive" ,
Name : "test_metric4" ,
Query : "up == 1" ,
Duration : 1 ,
Labels : labels . Labels { } ,
Annotations : labels . Labels { } ,
Alerts : [ ] * Alert { } ,
2023-10-17 19:02:03 -07:00
Health : "ok" ,
2019-12-09 14:42:59 -08:00
Type : "alerting" ,
} ,
2023-10-17 19:02:03 -07:00
AlertingRule {
State : "pending" ,
Name : "test_metric5" ,
Query : "vector(1)" ,
Duration : 1 ,
Labels : labels . FromStrings ( "name" , "tm5" ) ,
Annotations : labels . Labels { } ,
Alerts : [ ] * Alert {
{
Labels : labels . FromStrings ( "alertname" , "test_metric5" , "name" , "tm5" ) ,
Annotations : labels . Labels { } ,
State : "pending" ,
Value : "1e+00" ,
} ,
} ,
Health : "ok" ,
Type : "alerting" ,
} ,
2024-07-10 05:18:29 -07:00
AlertingRule {
State : "inactive" ,
Name : "test_metric6" ,
Query : "up == 1" ,
Duration : 1 ,
Labels : labels . FromStrings ( "testlabel" , "rule" ) ,
Annotations : labels . Labels { } ,
Alerts : [ ] * Alert { } ,
Health : "ok" ,
Type : "alerting" ,
} ,
AlertingRule {
State : "inactive" ,
Name : "test_metric7" ,
Query : "up == 1" ,
Duration : 1 ,
Labels : labels . FromStrings ( "templatedlabel" , "{{ $externalURL }}" ) ,
Annotations : labels . Labels { } ,
Alerts : [ ] * Alert { } ,
Health : "ok" ,
Type : "alerting" ,
} ,
2019-12-09 14:42:59 -08:00
} ,
} ,
} ,
} ,
2023-10-17 19:02:03 -07:00
zeroFunc : rulesZeroFunc ,
2019-12-09 14:42:59 -08:00
} ,
{
endpoint : api . rules ,
query : url . Values {
"type" : [ ] string { "record" } ,
} ,
response : & RuleDiscovery {
RuleGroups : [ ] * RuleGroup {
{
Name : "grp" ,
File : "/path/to/file" ,
Interval : 1 ,
2022-01-11 19:44:22 -08:00
Limit : 0 ,
2021-11-21 09:00:27 -08:00
Rules : [ ] Rule {
RecordingRule {
2019-12-09 14:42:59 -08:00
Name : "recording-rule-1" ,
Query : "vector(1)" ,
Labels : labels . Labels { } ,
2023-10-17 19:02:03 -07:00
Health : "ok" ,
2019-12-09 14:42:59 -08:00
Type : "recording" ,
} ,
2024-07-10 05:18:29 -07:00
RecordingRule {
Name : "recording-rule-2" ,
Query : "vector(1)" ,
Labels : labels . FromStrings ( "testlabel" , "rule" ) ,
Health : "ok" ,
Type : "recording" ,
} ,
2019-12-09 14:42:59 -08:00
} ,
} ,
} ,
} ,
2023-10-17 19:02:03 -07:00
zeroFunc : rulesZeroFunc ,
2019-12-09 14:42:59 -08:00
} ,
2023-04-18 02:07:32 -07:00
{
endpoint : api . rules ,
2023-04-18 08:26:21 -07:00
query : url . Values { "rule_name[]" : [ ] string { "test_metric4" } } ,
response : & RuleDiscovery {
RuleGroups : [ ] * RuleGroup {
{
Name : "grp" ,
File : "/path/to/file" ,
Interval : 1 ,
Limit : 0 ,
Rules : [ ] Rule {
AlertingRule {
State : "inactive" ,
Name : "test_metric4" ,
Query : "up == 1" ,
Duration : 1 ,
Labels : labels . Labels { } ,
Annotations : labels . Labels { } ,
Alerts : [ ] * Alert { } ,
2023-10-17 19:02:03 -07:00
Health : "ok" ,
2023-04-18 08:26:21 -07:00
Type : "alerting" ,
} ,
} ,
} ,
} ,
} ,
2023-10-17 19:02:03 -07:00
zeroFunc : rulesZeroFunc ,
2023-04-18 08:26:21 -07:00
} ,
{
endpoint : api . rules ,
query : url . Values { "rule_group[]" : [ ] string { "respond-with-nothing" } } ,
2023-04-20 03:20:10 -07:00
response : & RuleDiscovery { RuleGroups : [ ] * RuleGroup { } } ,
2023-04-18 08:26:21 -07:00
} ,
{
endpoint : api . rules ,
query : url . Values { "file[]" : [ ] string { "/path/to/file" } , "rule_name[]" : [ ] string { "test_metric4" } } ,
2023-04-18 02:07:32 -07:00
response : & RuleDiscovery {
RuleGroups : [ ] * RuleGroup {
{
Name : "grp" ,
File : "/path/to/file" ,
Interval : 1 ,
Limit : 0 ,
Rules : [ ] Rule {
AlertingRule {
State : "inactive" ,
Name : "test_metric4" ,
Query : "up == 1" ,
Duration : 1 ,
Labels : labels . Labels { } ,
Annotations : labels . Labels { } ,
Alerts : [ ] * Alert { } ,
2023-10-17 19:02:03 -07:00
Health : "ok" ,
2023-04-18 02:07:32 -07:00
Type : "alerting" ,
2024-07-10 05:18:29 -07:00
} ,
} ,
} ,
} ,
} ,
zeroFunc : rulesZeroFunc ,
} ,
{
endpoint : api . rules ,
query : url . Values {
"match[]" : [ ] string { ` { testlabel="rule"} ` } ,
} ,
response : & RuleDiscovery {
RuleGroups : [ ] * RuleGroup {
{
Name : "grp" ,
File : "/path/to/file" ,
Interval : 1 ,
Limit : 0 ,
Rules : [ ] Rule {
AlertingRule {
State : "inactive" ,
Name : "test_metric6" ,
Query : "up == 1" ,
Duration : 1 ,
Labels : labels . FromStrings ( "testlabel" , "rule" ) ,
Annotations : labels . Labels { } ,
Alerts : [ ] * Alert { } ,
Health : "ok" ,
Type : "alerting" ,
} ,
RecordingRule {
Name : "recording-rule-2" ,
Query : "vector(1)" ,
Labels : labels . FromStrings ( "testlabel" , "rule" ) ,
Health : "ok" ,
Type : "recording" ,
} ,
} ,
} ,
} ,
} ,
zeroFunc : rulesZeroFunc ,
} ,
{
endpoint : api . rules ,
query : url . Values {
"type" : [ ] string { "alert" } ,
"match[]" : [ ] string { ` { templatedlabel=" {{ $externalURL }} "} ` } ,
} ,
response : & RuleDiscovery {
RuleGroups : [ ] * RuleGroup {
{
Name : "grp" ,
File : "/path/to/file" ,
Interval : 1 ,
Limit : 0 ,
Rules : [ ] Rule {
AlertingRule {
State : "inactive" ,
Name : "test_metric7" ,
Query : "up == 1" ,
Duration : 1 ,
Labels : labels . FromStrings ( "templatedlabel" , "{{ $externalURL }}" ) ,
Annotations : labels . Labels { } ,
Alerts : [ ] * Alert { } ,
Health : "ok" ,
Type : "alerting" ,
} ,
} ,
} ,
} ,
} ,
zeroFunc : rulesZeroFunc ,
} ,
{
endpoint : api . rules ,
query : url . Values {
"match[]" : [ ] string { ` { testlabel="abc"} ` } ,
} ,
response : & RuleDiscovery {
RuleGroups : [ ] * RuleGroup { } ,
} ,
} ,
// This is testing OR condition, the api response should return rule if it matches one of the label selector
{
endpoint : api . rules ,
query : url . Values {
"match[]" : [ ] string { ` { testlabel="abc"} ` , ` { testlabel="rule"} ` } ,
} ,
response : & RuleDiscovery {
RuleGroups : [ ] * RuleGroup {
{
Name : "grp" ,
File : "/path/to/file" ,
Interval : 1 ,
Limit : 0 ,
Rules : [ ] Rule {
AlertingRule {
State : "inactive" ,
Name : "test_metric6" ,
Query : "up == 1" ,
Duration : 1 ,
Labels : labels . FromStrings ( "testlabel" , "rule" ) ,
Annotations : labels . Labels { } ,
Alerts : [ ] * Alert { } ,
Health : "ok" ,
Type : "alerting" ,
} ,
RecordingRule {
Name : "recording-rule-2" ,
Query : "vector(1)" ,
Labels : labels . FromStrings ( "testlabel" , "rule" ) ,
Health : "ok" ,
Type : "recording" ,
} ,
} ,
} ,
} ,
} ,
zeroFunc : rulesZeroFunc ,
} ,
{
endpoint : api . rules ,
query : url . Values {
"type" : [ ] string { "record" } ,
"match[]" : [ ] string { ` { testlabel="rule"} ` } ,
} ,
response : & RuleDiscovery {
RuleGroups : [ ] * RuleGroup {
{
Name : "grp" ,
File : "/path/to/file" ,
Interval : 1 ,
Limit : 0 ,
Rules : [ ] Rule {
RecordingRule {
Name : "recording-rule-2" ,
Query : "vector(1)" ,
Labels : labels . FromStrings ( "testlabel" , "rule" ) ,
Health : "ok" ,
Type : "recording" ,
} ,
} ,
} ,
} ,
} ,
zeroFunc : rulesZeroFunc ,
} ,
{
endpoint : api . rules ,
query : url . Values {
"type" : [ ] string { "alert" } ,
"match[]" : [ ] string { ` { testlabel="rule"} ` } ,
} ,
response : & RuleDiscovery {
RuleGroups : [ ] * RuleGroup {
{
Name : "grp" ,
File : "/path/to/file" ,
Interval : 1 ,
Limit : 0 ,
Rules : [ ] Rule {
AlertingRule {
State : "inactive" ,
Name : "test_metric6" ,
Query : "up == 1" ,
Duration : 1 ,
Labels : labels . FromStrings ( "testlabel" , "rule" ) ,
Annotations : labels . Labels { } ,
Alerts : [ ] * Alert { } ,
Health : "ok" ,
Type : "alerting" ,
2023-04-18 02:07:32 -07:00
} ,
} ,
2019-12-09 14:42:59 -08:00
} ,
} ,
} ,
2023-10-17 19:02:03 -07:00
zeroFunc : rulesZeroFunc ,
2019-12-09 14:42:59 -08:00
} ,
2021-03-16 02:47:45 -07:00
{
endpoint : api . queryExemplars ,
query : url . Values {
"query" : [ ] string { ` test_metric3 { foo="boo"} - test_metric4 { foo="bar"} ` } ,
"start" : [ ] string { "0" } ,
"end" : [ ] string { "4" } ,
} ,
// Note extra integer length of timestamps for exemplars because of millisecond preservation
// of timestamps within Prometheus (see timestamp package).
response : [ ] exemplar . QueryResult {
{
SeriesLabels : labels . FromStrings ( "__name__" , "test_metric3" , "foo" , "boo" , "dup" , "1" ) ,
Exemplars : [ ] exemplar . Exemplar {
{
Labels : labels . FromStrings ( "id" , "abc" ) ,
Value : 10 ,
Ts : timestamp . FromTime ( start . Add ( 2 * time . Second ) ) ,
} ,
} ,
} ,
{
SeriesLabels : labels . FromStrings ( "__name__" , "test_metric4" , "foo" , "bar" , "dup" , "1" ) ,
Exemplars : [ ] exemplar . Exemplar {
{
Labels : labels . FromStrings ( "id" , "lul" ) ,
Value : 10 ,
Ts : timestamp . FromTime ( start . Add ( 4 * time . Second ) ) ,
} ,
} ,
} ,
} ,
} ,
{
endpoint : api . queryExemplars ,
query : url . Values {
"query" : [ ] string { ` { foo="boo"} ` } ,
"start" : [ ] string { "4" } ,
"end" : [ ] string { "4.1" } ,
} ,
response : [ ] exemplar . QueryResult {
{
SeriesLabels : labels . FromStrings ( "__name__" , "test_metric3" , "foo" , "boo" , "dup" , "1" ) ,
Exemplars : [ ] exemplar . Exemplar {
{
Labels : labels . FromStrings ( "id" , "abc2" ) ,
Value : 10 ,
Ts : 4053 ,
} ,
} ,
} ,
} ,
} ,
{
endpoint : api . queryExemplars ,
query : url . Values {
"query" : [ ] string { ` { foo="boo"} ` } ,
} ,
response : [ ] exemplar . QueryResult {
{
SeriesLabels : labels . FromStrings ( "__name__" , "test_metric3" , "foo" , "boo" , "dup" , "1" ) ,
Exemplars : [ ] exemplar . Exemplar {
{
Labels : labels . FromStrings ( "id" , "abc" ) ,
Value : 10 ,
Ts : 2000 ,
} ,
{
Labels : labels . FromStrings ( "id" , "abc2" ) ,
Value : 10 ,
Ts : 4053 ,
} ,
} ,
} ,
} ,
} ,
{
endpoint : api . queryExemplars ,
query : url . Values {
"query" : [ ] string { ` { __name__="test_metric5"} ` } ,
} ,
response : [ ] exemplar . QueryResult { } ,
} ,
2015-06-04 09:07:57 -07:00
}
2018-06-16 10:26:37 -07:00
if testLabelAPI {
tests = append ( tests , [ ] test {
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "__name__" ,
} ,
response : [ ] string {
"test_metric1" ,
"test_metric2" ,
2020-08-28 16:21:39 -07:00
"test_metric3" ,
"test_metric4" ,
2018-06-16 10:26:37 -07:00
} ,
} ,
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "foo" ,
} ,
response : [ ] string {
"bar" ,
"boo" ,
} ,
} ,
// Bad name parameter.
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "not!!!allowed" ,
} ,
errType : errorBadData ,
} ,
2020-05-30 05:50:09 -07:00
// Start and end before LabelValues starts.
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "foo" ,
} ,
query : url . Values {
"start" : [ ] string { "-2" } ,
"end" : [ ] string { "-1" } ,
} ,
response : [ ] string { } ,
} ,
// Start and end within LabelValues.
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "foo" ,
} ,
query : url . Values {
"start" : [ ] string { "1" } ,
"end" : [ ] string { "100" } ,
} ,
response : [ ] string {
"bar" ,
"boo" ,
} ,
} ,
// Start before LabelValues, end within LabelValues.
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "foo" ,
} ,
query : url . Values {
"start" : [ ] string { "-1" } ,
"end" : [ ] string { "3" } ,
} ,
response : [ ] string {
"bar" ,
"boo" ,
} ,
} ,
// Start before LabelValues starts, end after LabelValues ends.
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "foo" ,
} ,
query : url . Values {
"start" : [ ] string { "1969-12-31T00:00:00Z" } ,
"end" : [ ] string { "1970-02-01T00:02:03Z" } ,
} ,
response : [ ] string {
"bar" ,
"boo" ,
} ,
} ,
// Start with bad data, end within LabelValues.
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "foo" ,
} ,
query : url . Values {
"start" : [ ] string { "boop" } ,
"end" : [ ] string { "1" } ,
} ,
errType : errorBadData ,
} ,
// Start within LabelValues, end after.
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "foo" ,
} ,
query : url . Values {
"start" : [ ] string { "1" } ,
"end" : [ ] string { "100000000" } ,
} ,
response : [ ] string {
"bar" ,
"boo" ,
} ,
} ,
// Start and end after LabelValues ends.
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "foo" ,
} ,
query : url . Values {
"start" : [ ] string { "148966367200.372" } ,
"end" : [ ] string { "148966367200.972" } ,
} ,
response : [ ] string { } ,
} ,
// Only provide Start within LabelValues, don't provide an end time.
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "foo" ,
} ,
query : url . Values {
"start" : [ ] string { "2" } ,
} ,
response : [ ] string {
"bar" ,
"boo" ,
} ,
} ,
// Only provide end within LabelValues, don't provide a start time.
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "foo" ,
} ,
query : url . Values {
"end" : [ ] string { "100" } ,
} ,
response : [ ] string {
"bar" ,
"boo" ,
} ,
} ,
2020-12-22 03:02:19 -08:00
// Label values with bad matchers.
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "foo" ,
} ,
query : url . Values {
"match[]" : [ ] string { ` { foo="" ` , ` test_metric2 ` } ,
} ,
errType : errorBadData ,
} ,
// Label values with empty matchers.
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "foo" ,
} ,
query : url . Values {
"match[]" : [ ] string { ` { foo=""} ` } ,
} ,
errType : errorBadData ,
} ,
// Label values with matcher.
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "foo" ,
} ,
query : url . Values {
"match[]" : [ ] string { ` test_metric2 ` } ,
} ,
response : [ ] string {
"boo" ,
} ,
} ,
// Label values with matcher.
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "foo" ,
} ,
query : url . Values {
"match[]" : [ ] string { ` test_metric1 ` } ,
} ,
response : [ ] string {
"bar" ,
"boo" ,
} ,
} ,
// Label values with matcher using label filter.
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "foo" ,
} ,
query : url . Values {
"match[]" : [ ] string { ` test_metric1 { foo="bar"} ` } ,
} ,
response : [ ] string {
"bar" ,
} ,
} ,
// Label values with matcher and time range.
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "foo" ,
} ,
query : url . Values {
"match[]" : [ ] string { ` test_metric1 ` } ,
"start" : [ ] string { "1" } ,
"end" : [ ] string { "100000000" } ,
} ,
response : [ ] string {
"bar" ,
"boo" ,
} ,
} ,
2021-02-09 09:38:35 -08:00
// Try to overlap the selected series set as much as possible to test that the value de-duplication works.
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "foo" ,
} ,
query : url . Values {
"match[]" : [ ] string { ` test_metric4 { dup=~"^1"} ` , ` test_metric4 { foo=~".+o$"} ` } ,
} ,
response : [ ] string {
"bar" ,
"boo" ,
} ,
} ,
2024-03-06 01:58:40 -08:00
// Label values with limit.
2024-02-29 07:31:13 -08:00
{
endpoint : api . labelValues ,
params : map [ string ] string {
2024-03-06 01:58:40 -08:00
"name" : "__name__" ,
2024-02-29 07:31:13 -08:00
} ,
query : url . Values {
2024-03-06 01:58:40 -08:00
"limit" : [ ] string { "2" } ,
2024-02-29 07:31:13 -08:00
} ,
2024-05-21 10:07:29 -07:00
responseLen : 2 , // API does not specify which particular values will come back.
warningsCount : 1 ,
} ,
{
endpoint : api . labelValues ,
params : map [ string ] string {
"name" : "__name__" ,
} ,
query : url . Values {
"limit" : [ ] string { "4" } ,
} ,
responseLen : 4 , // API does not specify which particular values will come back.
warningsCount : 0 , // No warnings if limit isn't exceeded.
2024-02-29 07:31:13 -08:00
} ,
2018-11-19 02:21:14 -08:00
// Label names.
{
endpoint : api . labelNames ,
2020-08-28 16:21:39 -07:00
response : [ ] string { "__name__" , "dup" , "foo" } ,
2018-11-19 02:21:14 -08:00
} ,
2020-05-30 05:50:09 -07:00
// Start and end before Label names starts.
{
endpoint : api . labelNames ,
query : url . Values {
"start" : [ ] string { "-2" } ,
"end" : [ ] string { "-1" } ,
} ,
response : [ ] string { } ,
} ,
// Start and end within Label names.
{
endpoint : api . labelNames ,
query : url . Values {
"start" : [ ] string { "1" } ,
"end" : [ ] string { "100" } ,
} ,
2020-08-28 16:21:39 -07:00
response : [ ] string { "__name__" , "dup" , "foo" } ,
2020-05-30 05:50:09 -07:00
} ,
// Start before Label names, end within Label names.
{
endpoint : api . labelNames ,
query : url . Values {
"start" : [ ] string { "-1" } ,
"end" : [ ] string { "10" } ,
} ,
2020-08-28 16:21:39 -07:00
response : [ ] string { "__name__" , "dup" , "foo" } ,
2020-05-30 05:50:09 -07:00
} ,
// Start before Label names starts, end after Label names ends.
{
endpoint : api . labelNames ,
query : url . Values {
"start" : [ ] string { "-1" } ,
"end" : [ ] string { "100000" } ,
} ,
2020-08-28 16:21:39 -07:00
response : [ ] string { "__name__" , "dup" , "foo" } ,
2020-05-30 05:50:09 -07:00
} ,
// Start with bad data for Label names, end within Label names.
{
endpoint : api . labelNames ,
query : url . Values {
"start" : [ ] string { "boop" } ,
"end" : [ ] string { "1" } ,
} ,
errType : errorBadData ,
} ,
// Start within Label names, end after.
{
endpoint : api . labelNames ,
query : url . Values {
"start" : [ ] string { "1" } ,
"end" : [ ] string { "1000000006" } ,
} ,
2020-08-28 16:21:39 -07:00
response : [ ] string { "__name__" , "dup" , "foo" } ,
2020-05-30 05:50:09 -07:00
} ,
// Start and end after Label names ends.
{
endpoint : api . labelNames ,
query : url . Values {
"start" : [ ] string { "148966367200.372" } ,
"end" : [ ] string { "148966367200.972" } ,
} ,
response : [ ] string { } ,
} ,
// Only provide Start within Label names, don't provide an end time.
{
endpoint : api . labelNames ,
query : url . Values {
"start" : [ ] string { "4" } ,
} ,
2020-08-28 16:21:39 -07:00
response : [ ] string { "__name__" , "dup" , "foo" } ,
2020-05-30 05:50:09 -07:00
} ,
// Only provide End within Label names, don't provide a start time.
{
endpoint : api . labelNames ,
query : url . Values {
"end" : [ ] string { "20" } ,
} ,
2020-08-28 16:21:39 -07:00
response : [ ] string { "__name__" , "dup" , "foo" } ,
2020-05-30 05:50:09 -07:00
} ,
2020-12-22 03:02:19 -08:00
// Label names with bad matchers.
{
endpoint : api . labelNames ,
query : url . Values {
"match[]" : [ ] string { ` { foo="" ` , ` test_metric2 ` } ,
} ,
errType : errorBadData ,
} ,
// Label values with empty matchers.
{
endpoint : api . labelNames ,
params : map [ string ] string {
"name" : "foo" ,
} ,
query : url . Values {
"match[]" : [ ] string { ` { foo=""} ` } ,
} ,
errType : errorBadData ,
} ,
// Label names with matcher.
{
endpoint : api . labelNames ,
query : url . Values {
"match[]" : [ ] string { ` test_metric2 ` } ,
} ,
response : [ ] string { "__name__" , "foo" } ,
} ,
// Label names with matcher.
{
endpoint : api . labelNames ,
query : url . Values {
"match[]" : [ ] string { ` test_metric3 ` } ,
} ,
response : [ ] string { "__name__" , "dup" , "foo" } ,
} ,
// Label names with matcher using label filter.
// There is no matching series.
{
endpoint : api . labelNames ,
query : url . Values {
"match[]" : [ ] string { ` test_metric1 { foo="test"} ` } ,
} ,
response : [ ] string { } ,
} ,
// Label names with matcher and time range.
{
endpoint : api . labelNames ,
query : url . Values {
"match[]" : [ ] string { ` test_metric2 ` } ,
"start" : [ ] string { "1" } ,
"end" : [ ] string { "100000000" } ,
} ,
response : [ ] string { "__name__" , "foo" } ,
} ,
2024-03-06 01:58:40 -08:00
// Label names with limit.
2024-02-29 07:31:13 -08:00
{
endpoint : api . labelNames ,
query : url . Values {
2024-03-06 01:58:40 -08:00
"limit" : [ ] string { "2" } ,
2024-02-29 07:31:13 -08:00
} ,
2024-05-21 10:07:29 -07:00
responseLen : 2 , // API does not specify which particular values will come back.
warningsCount : 1 ,
} ,
{
endpoint : api . labelNames ,
query : url . Values {
"limit" : [ ] string { "3" } ,
} ,
responseLen : 3 , // API does not specify which particular values will come back.
warningsCount : 0 , // No warnings if limit isn't exceeded.
2024-02-29 07:31:13 -08:00
} ,
2018-06-16 10:26:37 -07:00
} ... )
}
2017-11-10 16:53:48 -08:00
methods := func ( f apiFunc ) [ ] string {
fp := reflect . ValueOf ( f ) . Pointer ( )
2019-04-02 10:00:29 -07:00
if fp == reflect . ValueOf ( api . query ) . Pointer ( ) || fp == reflect . ValueOf ( api . queryRange ) . Pointer ( ) || fp == reflect . ValueOf ( api . series ) . Pointer ( ) {
2017-11-10 16:53:48 -08:00
return [ ] string { http . MethodGet , http . MethodPost }
2015-06-08 12:19:52 -07:00
}
2017-11-10 16:53:48 -08:00
return [ ] string { http . MethodGet }
}
2015-06-08 12:19:52 -07:00
2017-11-10 16:53:48 -08:00
request := func ( m string , q url . Values ) ( * http . Request , error ) {
if m == http . MethodPost {
r , err := http . NewRequest ( m , "http://example.com" , strings . NewReader ( q . Encode ( ) ) )
r . Header . Set ( "Content-Type" , "application/x-www-form-urlencoded" )
2020-01-08 05:28:43 -08:00
r . RemoteAddr = "127.0.0.1:20201"
2017-11-10 16:53:48 -08:00
return r , err
2015-06-04 09:07:57 -07:00
}
2020-01-08 05:28:43 -08:00
r , err := http . NewRequest ( m , fmt . Sprintf ( "http://example.com?%s" , q . Encode ( ) ) , nil )
r . RemoteAddr = "127.0.0.1:20201"
return r , err
2017-11-10 16:53:48 -08:00
}
2018-06-16 10:26:37 -07:00
for i , test := range tests {
2020-07-31 08:03:02 -07:00
t . Run ( fmt . Sprintf ( "run %d %s %q" , i , describeAPIFunc ( test . endpoint ) , test . query . Encode ( ) ) , func ( t * testing . T ) {
for _ , method := range methods ( test . endpoint ) {
t . Run ( method , func ( t * testing . T ) {
// Build a context with the correct request params.
ctx := context . Background ( )
for p , v := range test . params {
ctx = route . WithParam ( ctx , p , v )
}
req , err := request ( method , test . query )
2021-09-03 02:51:27 -07:00
require . NoError ( t , err )
2020-07-31 08:03:02 -07:00
tr . ResetMetadataStore ( )
for _ , tm := range test . metadata {
tr . SetMetadataStoreForTargets ( tm . identifier , & testMetaStore { Metadata : tm . metadata } )
}
2021-03-16 02:47:45 -07:00
for _ , te := range test . exemplars {
for _ , e := range te . Exemplars {
_ , err := es . AppendExemplar ( 0 , te . SeriesLabels , e )
2021-09-03 02:51:27 -07:00
require . NoError ( t , err )
2021-03-16 02:47:45 -07:00
}
}
2020-07-31 08:03:02 -07:00
res := test . endpoint ( req . WithContext ( ctx ) )
assertAPIError ( t , res . err , test . errType )
if test . sorter != nil {
test . sorter ( res . data )
}
if test . responseLen != 0 {
assertAPIResponseLength ( t , res . data , test . responseLen )
2023-06-12 08:17:20 -07:00
if test . responseMetadataTotal != 0 {
assertAPIResponseMetadataLen ( t , res . data , test . responseMetadataTotal )
}
2020-07-31 08:03:02 -07:00
} else {
2023-10-17 19:02:03 -07:00
if test . zeroFunc != nil {
test . zeroFunc ( res . data )
}
2024-04-28 12:03:51 -07:00
if test . response != nil {
assertAPIResponse ( t , res . data , test . response )
}
2020-07-31 08:03:02 -07:00
}
2024-02-26 01:53:39 -08:00
if test . responseAsJSON != "" {
2024-04-28 12:02:18 -07:00
json := jsoniter . ConfigCompatibleWithStandardLibrary
2024-02-26 01:53:39 -08:00
s , err := json . Marshal ( res . data )
require . NoError ( t , err )
require . JSONEq ( t , test . responseAsJSON , string ( s ) )
}
2024-05-21 10:07:29 -07:00
require . Len ( t , res . warnings , test . warningsCount )
2020-07-31 08:03:02 -07:00
} )
2019-12-10 06:56:16 -08:00
}
2020-07-31 08:03:02 -07:00
} )
2018-11-15 05:22:16 -08:00
}
}
2018-06-27 00:15:17 -07:00
2020-07-31 08:03:02 -07:00
func describeAPIFunc ( f apiFunc ) string {
name := runtime . FuncForPC ( reflect . ValueOf ( f ) . Pointer ( ) ) . Name ( )
return strings . Split ( name [ strings . LastIndex ( name , "." ) + 1 : ] , "-" ) [ 0 ]
}
2018-11-15 05:22:16 -08:00
func assertAPIError ( t * testing . T , got * apiError , exp errorType ) {
t . Helper ( )
2018-06-27 00:15:17 -07:00
2021-09-03 02:51:27 -07:00
if exp == errorNone {
require . Nil ( t , got )
} else {
require . NotNil ( t , got )
require . Equal ( t , exp , got . typ , "(%q)" , got )
2018-11-15 05:22:16 -08:00
}
}
2021-10-22 01:06:44 -07:00
func assertAPIResponse ( t * testing . T , got , exp interface { } ) {
2019-12-09 13:36:38 -08:00
t . Helper ( )
2023-04-16 05:13:31 -07:00
testutil . RequireEqual ( t , exp , got )
2015-06-04 09:07:57 -07:00
}
2019-12-10 06:56:16 -08:00
func assertAPIResponseLength ( t * testing . T , got interface { } , expLen int ) {
t . Helper ( )
gotLen := reflect . ValueOf ( got ) . Len ( )
2021-09-03 02:51:27 -07:00
require . Equal ( t , expLen , gotLen , "Response length does not match" )
2019-12-10 06:56:16 -08:00
}
2023-06-12 08:17:20 -07:00
func assertAPIResponseMetadataLen ( t * testing . T , got interface { } , expLen int ) {
t . Helper ( )
var gotLen int
2023-11-22 06:39:21 -08:00
response := got . ( map [ string ] [ ] metadata . Metadata )
2023-06-12 08:17:20 -07:00
for _ , m := range response {
gotLen += len ( m )
}
2021-09-03 02:51:27 -07:00
require . Equal ( t , expLen , gotLen , "Amount of metadata in the response does not match" )
2023-06-12 08:17:20 -07:00
}
2018-11-15 05:22:16 -08:00
type fakeDB struct {
2020-04-29 09:16:14 -07:00
err error
2018-11-15 05:22:16 -08:00
}
2023-09-13 06:43:06 -07:00
func ( f * fakeDB ) CleanTombstones ( ) error { return f . err }
func ( f * fakeDB ) Delete ( context . Context , int64 , int64 , ... * labels . Matcher ) error { return f . err }
func ( f * fakeDB ) Snapshot ( string , bool ) error { return f . err }
2023-05-22 05:37:07 -07:00
func ( f * fakeDB ) Stats ( statsByLabelName string , limit int ) ( _ * tsdb . Stats , retErr error ) {
2022-04-27 02:24:36 -07:00
dbDir , err := os . MkdirTemp ( "" , "tsdb-api-ready" )
2020-05-06 08:30:00 -07:00
if err != nil {
return nil , err
}
defer func ( ) {
err := os . RemoveAll ( dbDir )
if retErr != nil {
retErr = err
}
} ( )
2021-02-09 06:12:48 -08:00
opts := tsdb . DefaultHeadOptions ( )
opts . ChunkRange = 1000
2022-09-20 10:05:50 -07:00
h , _ := tsdb . NewHead ( nil , nil , nil , nil , opts , nil )
2023-05-22 05:37:07 -07:00
return h . Stats ( statsByLabelName , limit ) , nil
2019-11-12 02:15:20 -08:00
}
2021-10-22 01:06:44 -07:00
2021-06-05 07:29:32 -07:00
func ( f * fakeDB ) WALReplayStatus ( ) ( tsdb . WALReplayStatus , error ) {
return tsdb . WALReplayStatus { } , nil
}
2018-11-15 05:22:16 -08:00
func TestAdminEndpoints ( t * testing . T ) {
2023-11-07 19:49:39 -08:00
tsdb , tsdbWithError , tsdbNotReady := & fakeDB { } , & fakeDB { err : errors . New ( "some error" ) } , & fakeDB { err : fmt . Errorf ( "wrap: %w" , tsdb . ErrNotReady ) }
2018-11-15 05:22:16 -08:00
snapshotAPI := func ( api * API ) apiFunc { return api . snapshot }
cleanAPI := func ( api * API ) apiFunc { return api . cleanTombstones }
deleteAPI := func ( api * API ) apiFunc { return api . deleteSeries }
2020-04-29 09:16:14 -07:00
for _ , tc := range [ ] struct {
2018-11-15 05:22:16 -08:00
db * fakeDB
enableAdmin bool
endpoint func ( api * API ) apiFunc
method string
values url . Values
errType errorType
} {
// Tests for the snapshot endpoint.
{
db : tsdb ,
enableAdmin : false ,
endpoint : snapshotAPI ,
errType : errorUnavailable ,
} ,
{
db : tsdb ,
enableAdmin : true ,
endpoint : snapshotAPI ,
errType : errorNone ,
} ,
{
db : tsdb ,
enableAdmin : true ,
endpoint : snapshotAPI ,
2019-01-16 14:28:08 -08:00
values : map [ string ] [ ] string { "skip_head" : { "true" } } ,
2018-11-15 05:22:16 -08:00
errType : errorNone ,
} ,
{
db : tsdb ,
enableAdmin : true ,
endpoint : snapshotAPI ,
2019-01-16 14:28:08 -08:00
values : map [ string ] [ ] string { "skip_head" : { "xxx" } } ,
2018-11-15 05:22:16 -08:00
errType : errorBadData ,
} ,
{
db : tsdbWithError ,
enableAdmin : true ,
endpoint : snapshotAPI ,
errType : errorInternal ,
} ,
{
2020-04-29 09:16:14 -07:00
db : tsdbNotReady ,
2018-11-15 05:22:16 -08:00
enableAdmin : true ,
endpoint : snapshotAPI ,
errType : errorUnavailable ,
} ,
// Tests for the cleanTombstones endpoint.
{
db : tsdb ,
enableAdmin : false ,
endpoint : cleanAPI ,
errType : errorUnavailable ,
} ,
{
db : tsdb ,
enableAdmin : true ,
endpoint : cleanAPI ,
errType : errorNone ,
} ,
{
db : tsdbWithError ,
enableAdmin : true ,
endpoint : cleanAPI ,
errType : errorInternal ,
} ,
{
2020-04-29 09:16:14 -07:00
db : tsdbNotReady ,
2018-11-15 05:22:16 -08:00
enableAdmin : true ,
endpoint : cleanAPI ,
errType : errorUnavailable ,
} ,
// Tests for the deleteSeries endpoint.
{
db : tsdb ,
enableAdmin : false ,
endpoint : deleteAPI ,
errType : errorUnavailable ,
} ,
{
db : tsdb ,
enableAdmin : true ,
endpoint : deleteAPI ,
errType : errorBadData ,
} ,
{
db : tsdb ,
enableAdmin : true ,
endpoint : deleteAPI ,
2019-01-16 14:28:08 -08:00
values : map [ string ] [ ] string { "match[]" : { "123" } } ,
2018-11-15 05:22:16 -08:00
errType : errorBadData ,
} ,
{
db : tsdb ,
enableAdmin : true ,
endpoint : deleteAPI ,
2019-01-16 14:28:08 -08:00
values : map [ string ] [ ] string { "match[]" : { "up" } , "start" : { "xxx" } } ,
2018-11-15 05:22:16 -08:00
errType : errorBadData ,
} ,
{
db : tsdb ,
enableAdmin : true ,
endpoint : deleteAPI ,
2019-01-16 14:28:08 -08:00
values : map [ string ] [ ] string { "match[]" : { "up" } , "end" : { "xxx" } } ,
2018-11-15 05:22:16 -08:00
errType : errorBadData ,
} ,
{
db : tsdb ,
enableAdmin : true ,
endpoint : deleteAPI ,
2019-01-16 14:28:08 -08:00
values : map [ string ] [ ] string { "match[]" : { "up" } } ,
2018-11-15 05:22:16 -08:00
errType : errorNone ,
} ,
{
db : tsdb ,
enableAdmin : true ,
endpoint : deleteAPI ,
2019-01-16 14:28:08 -08:00
values : map [ string ] [ ] string { "match[]" : { "up{job!=\"foo\"}" , "{job=~\"bar.+\"}" , "up{instance!~\"fred.+\"}" } } ,
2018-11-15 05:22:16 -08:00
errType : errorNone ,
} ,
{
db : tsdbWithError ,
enableAdmin : true ,
endpoint : deleteAPI ,
2019-01-16 14:28:08 -08:00
values : map [ string ] [ ] string { "match[]" : { "up" } } ,
2018-11-15 05:22:16 -08:00
errType : errorInternal ,
} ,
{
2020-04-29 09:16:14 -07:00
db : tsdbNotReady ,
2018-11-15 05:22:16 -08:00
enableAdmin : true ,
endpoint : deleteAPI ,
2020-04-29 09:16:14 -07:00
values : map [ string ] [ ] string { "match[]" : { "up" } } ,
2018-11-15 05:22:16 -08:00
errType : errorUnavailable ,
} ,
} {
tc := tc
2020-04-29 09:16:14 -07:00
t . Run ( "" , func ( t * testing . T ) {
2021-12-08 14:14:50 -08:00
dir := t . TempDir ( )
2020-04-29 09:16:14 -07:00
2018-11-15 05:22:16 -08:00
api := & API {
2020-04-29 09:16:14 -07:00
db : tc . db ,
dbDir : dir ,
2018-11-15 05:22:16 -08:00
ready : func ( f http . HandlerFunc ) http . HandlerFunc { return f } ,
enableAdmin : tc . enableAdmin ,
}
endpoint := tc . endpoint ( api )
req , err := http . NewRequest ( tc . method , fmt . Sprintf ( "?%s" , tc . values . Encode ( ) ) , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-04-29 09:16:14 -07:00
res := setUnavailStatusOnTSDBNotReady ( endpoint ( req ) )
2018-11-30 06:27:12 -08:00
assertAPIError ( t , res . err , tc . errType )
2018-11-15 05:22:16 -08:00
} )
}
}
2015-06-04 09:07:57 -07:00
func TestRespondSuccess ( t * testing . T ) {
2023-01-24 19:30:47 -08:00
api := API {
logger : log . NewNopLogger ( ) ,
}
2023-02-26 18:27:09 -08:00
api . ClearCodecs ( )
api . InstallCodec ( JSONCodec { } )
api . InstallCodec ( & testCodec { contentType : MIMEType { "test" , "cannot-encode" } , canEncode : false } )
api . InstallCodec ( & testCodec { contentType : MIMEType { "test" , "can-encode" } , canEncode : true } )
api . InstallCodec ( & testCodec { contentType : MIMEType { "test" , "can-encode-2" } , canEncode : true } )
2023-01-24 19:30:47 -08:00
2015-07-02 01:37:19 -07:00
s := httptest . NewServer ( http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) {
2023-09-14 09:57:31 -07:00
api . respond ( w , r , "test" , nil , "" )
2015-07-02 01:37:19 -07:00
} ) )
defer s . Close ( )
2015-06-04 09:07:57 -07:00
2023-01-24 19:30:47 -08:00
for _ , tc := range [ ] struct {
name string
acceptHeader string
expectedContentType string
expectedBody string
} {
{
name : "no Accept header" ,
expectedContentType : "application/json" ,
expectedBody : ` { "status":"success","data":"test"} ` ,
} ,
{
name : "Accept header with single content type which is suitable" ,
acceptHeader : "test/can-encode" ,
expectedContentType : "test/can-encode" ,
expectedBody : ` response from test/can-encode codec ` ,
} ,
{
name : "Accept header with single content type which is not available" ,
acceptHeader : "test/not-registered" ,
expectedContentType : "application/json" ,
expectedBody : ` { "status":"success","data":"test"} ` ,
} ,
{
name : "Accept header with single content type which cannot encode the response payload" ,
acceptHeader : "test/cannot-encode" ,
expectedContentType : "application/json" ,
expectedBody : ` { "status":"success","data":"test"} ` ,
} ,
{
name : "Accept header with multiple content types, all of which are suitable" ,
acceptHeader : "test/can-encode, test/can-encode-2" ,
expectedContentType : "test/can-encode" ,
expectedBody : ` response from test/can-encode codec ` ,
} ,
{
name : "Accept header with multiple content types, only one of which is available" ,
acceptHeader : "test/not-registered, test/can-encode" ,
expectedContentType : "test/can-encode" ,
expectedBody : ` response from test/can-encode codec ` ,
} ,
{
name : "Accept header with multiple content types, only one of which can encode the response payload" ,
acceptHeader : "test/cannot-encode, test/can-encode" ,
expectedContentType : "test/can-encode" ,
expectedBody : ` response from test/can-encode codec ` ,
} ,
{
name : "Accept header with multiple content types, none of which are available" ,
acceptHeader : "test/not-registered, test/also-not-registered" ,
expectedContentType : "application/json" ,
expectedBody : ` { "status":"success","data":"test"} ` ,
} ,
} {
t . Run ( tc . name , func ( t * testing . T ) {
req , err := http . NewRequest ( http . MethodGet , s . URL , nil )
require . NoError ( t , err )
2015-06-04 09:07:57 -07:00
2023-01-24 19:30:47 -08:00
if tc . acceptHeader != "" {
req . Header . Set ( "Accept" , tc . acceptHeader )
}
2015-07-02 01:37:19 -07:00
2023-01-24 19:30:47 -08:00
resp , err := http . DefaultClient . Do ( req )
require . NoError ( t , err )
body , err := io . ReadAll ( resp . Body )
defer resp . Body . Close ( )
require . NoError ( t , err )
2015-06-04 09:07:57 -07:00
2023-01-24 19:30:47 -08:00
require . Equal ( t , http . StatusOK , resp . StatusCode )
require . Equal ( t , tc . expectedContentType , resp . Header . Get ( "Content-Type" ) )
require . Equal ( t , tc . expectedBody , string ( body ) )
} )
2015-06-04 09:07:57 -07:00
}
}
2023-02-26 18:27:09 -08:00
func TestRespondSuccess_DefaultCodecCannotEncodeResponse ( t * testing . T ) {
api := API {
logger : log . NewNopLogger ( ) ,
2015-06-04 09:07:57 -07:00
}
2023-02-26 18:27:09 -08:00
api . ClearCodecs ( )
api . InstallCodec ( & testCodec { contentType : MIMEType { "application" , "default-format" } , canEncode : false } )
s := httptest . NewServer ( http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) {
2023-09-14 09:57:31 -07:00
api . respond ( w , r , "test" , nil , "" )
2023-02-26 18:27:09 -08:00
} ) )
defer s . Close ( )
req , err := http . NewRequest ( http . MethodGet , s . URL , nil )
require . NoError ( t , err )
resp , err := http . DefaultClient . Do ( req )
require . NoError ( t , err )
body , err := io . ReadAll ( resp . Body )
defer resp . Body . Close ( )
require . NoError ( t , err )
require . Equal ( t , http . StatusNotAcceptable , resp . StatusCode )
require . Equal ( t , "application/json" , resp . Header . Get ( "Content-Type" ) )
require . Equal ( t , ` { "status":"error","errorType":"not_acceptable","error":"cannot encode response as application/default-format"} ` , string ( body ) )
2015-06-04 09:07:57 -07:00
}
func TestRespondError ( t * testing . T ) {
2015-07-02 01:37:19 -07:00
s := httptest . NewServer ( http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) {
2018-07-06 10:44:45 -07:00
api := API { }
api . respondError ( w , & apiError { errorTimeout , errors . New ( "message" ) } , "test" )
2015-07-02 01:37:19 -07:00
} ) )
defer s . Close ( )
2015-06-04 09:07:57 -07:00
2015-07-02 01:37:19 -07:00
resp , err := http . Get ( s . URL )
2024-02-01 06:18:01 -08:00
require . NoError ( t , err , "Error on test request" )
2022-04-27 02:24:36 -07:00
body , err := io . ReadAll ( resp . Body )
2015-07-02 01:37:19 -07:00
defer resp . Body . Close ( )
2024-02-01 06:18:01 -08:00
require . NoError ( t , err , "Error reading response body" )
2021-09-03 02:51:27 -07:00
want , have := http . StatusServiceUnavailable , resp . StatusCode
require . Equal ( t , want , have , "Return code %d expected in error response but got %d" , want , have )
h := resp . Header . Get ( "Content-Type" )
require . Equal ( t , "application/json" , h , "Expected Content-Type %q but got %q" , "application/json" , h )
2024-04-28 11:57:48 -07:00
require . JSONEq ( t , ` { "status": "error", "data": "test", "errorType": "timeout", "error": "message"} ` , string ( body ) )
2015-06-04 09:07:57 -07:00
}
2020-03-06 02:33:01 -08:00
func TestParseTimeParam ( t * testing . T ) {
type resultType struct {
asTime time . Time
asError func ( ) error
}
ts , err := parseTime ( "1582468023986" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-03-06 02:33:01 -08:00
2021-10-22 01:06:44 -07:00
tests := [ ] struct {
2020-03-06 02:33:01 -08:00
paramName string
paramValue string
defaultValue time . Time
result resultType
} {
{ // When data is valid.
paramName : "start" ,
paramValue : "1582468023986" ,
2023-07-06 08:48:13 -07:00
defaultValue : MinTime ,
2020-03-06 02:33:01 -08:00
result : resultType {
asTime : ts ,
asError : nil ,
} ,
} ,
{ // When data is empty string.
paramName : "end" ,
paramValue : "" ,
2023-07-06 08:48:13 -07:00
defaultValue : MaxTime ,
2020-03-06 02:33:01 -08:00
result : resultType {
2023-07-06 08:48:13 -07:00
asTime : MaxTime ,
2020-03-06 02:33:01 -08:00
asError : nil ,
} ,
} ,
{ // When data is not valid.
paramName : "foo" ,
paramValue : "baz" ,
2023-07-06 08:48:13 -07:00
defaultValue : MaxTime ,
2020-03-06 02:33:01 -08:00
result : resultType {
asTime : time . Time { } ,
asError : func ( ) error {
_ , err := parseTime ( "baz" )
2023-11-07 19:49:39 -08:00
return fmt . Errorf ( "Invalid time value for '%s': %w" , "foo" , err )
2020-03-06 02:33:01 -08:00
} ,
} ,
} ,
}
for _ , test := range tests {
2024-04-08 12:26:23 -07:00
req , err := http . NewRequest ( http . MethodGet , "localhost:42/foo?" + test . paramName + "=" + test . paramValue , nil )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-03-06 02:33:01 -08:00
result := test . result
asTime , err := parseTimeParam ( req , test . paramName , test . defaultValue )
if err != nil {
2020-10-29 02:43:23 -07:00
require . EqualError ( t , err , result . asError ( ) . Error ( ) )
2020-03-06 02:33:01 -08:00
} else {
2020-10-29 02:43:23 -07:00
require . True ( t , asTime . Equal ( result . asTime ) , "time as return value: %s not parsed correctly. Expected %s. Actual %s" , test . paramValue , result . asTime , asTime )
2020-03-06 02:33:01 -08:00
}
}
}
2015-06-04 09:07:57 -07:00
func TestParseTime ( t * testing . T ) {
ts , err := time . Parse ( time . RFC3339Nano , "2015-06-03T13:21:58.555Z" )
if err != nil {
panic ( err )
}
2021-10-22 01:06:44 -07:00
tests := [ ] struct {
2015-06-04 09:07:57 -07:00
input string
fail bool
result time . Time
} {
{
input : "" ,
fail : true ,
2021-10-22 01:06:44 -07:00
} ,
{
2015-06-04 09:07:57 -07:00
input : "abc" ,
fail : true ,
2021-10-22 01:06:44 -07:00
} ,
{
2015-06-04 09:07:57 -07:00
input : "30s" ,
fail : true ,
2021-10-22 01:06:44 -07:00
} ,
{
2015-06-04 09:07:57 -07:00
input : "123" ,
result : time . Unix ( 123 , 0 ) ,
2021-10-22 01:06:44 -07:00
} ,
{
2015-06-04 09:07:57 -07:00
input : "123.123" ,
result : time . Unix ( 123 , 123000000 ) ,
2021-10-22 01:06:44 -07:00
} ,
{
2015-06-04 09:07:57 -07:00
input : "2015-06-03T13:21:58.555Z" ,
result : ts ,
2021-10-22 01:06:44 -07:00
} ,
{
2015-06-04 09:07:57 -07:00
input : "2015-06-03T14:21:58.555+01:00" ,
result : ts ,
2021-10-22 01:06:44 -07:00
} ,
{
2018-12-03 04:25:54 -08:00
// Test float rounding.
input : "1543578564.705" ,
result : time . Unix ( 1543578564 , 705 * 1e6 ) ,
2015-06-04 09:07:57 -07:00
} ,
2019-07-08 02:43:59 -07:00
{
2023-07-06 08:48:13 -07:00
input : MinTime . Format ( time . RFC3339Nano ) ,
result : MinTime ,
2019-07-08 02:43:59 -07:00
} ,
{
2023-07-06 08:48:13 -07:00
input : MaxTime . Format ( time . RFC3339Nano ) ,
result : MaxTime ,
2019-07-08 02:43:59 -07:00
} ,
2015-06-04 09:07:57 -07:00
}
for _ , test := range tests {
ts , err := parseTime ( test . input )
2021-09-03 02:51:27 -07:00
if ! test . fail {
2024-02-01 06:18:01 -08:00
require . NoError ( t , err , "Unexpected error for %q" , test . input )
2021-09-03 02:51:27 -07:00
require . NotNil ( t , ts )
require . True ( t , ts . Equal ( test . result ) , "Expected time %v for input %q but got %v" , test . result , test . input , ts )
2015-06-04 09:07:57 -07:00
continue
}
2021-09-03 02:51:27 -07:00
require . Error ( t , err , "Expected error for %q but got none" , test . input )
2015-06-04 09:07:57 -07:00
}
}
func TestParseDuration ( t * testing . T ) {
2021-10-22 01:06:44 -07:00
tests := [ ] struct {
2015-06-04 09:07:57 -07:00
input string
fail bool
result time . Duration
} {
{
input : "" ,
fail : true ,
} , {
input : "abc" ,
fail : true ,
} , {
input : "2015-06-03T13:21:58.555Z" ,
fail : true ,
2017-03-16 07:16:20 -07:00
} , {
// Internal int64 overflow.
input : "-148966367200.372" ,
fail : true ,
} , {
// Internal int64 overflow.
input : "148966367200.372" ,
fail : true ,
2015-06-04 09:07:57 -07:00
} , {
input : "123" ,
result : 123 * time . Second ,
} , {
input : "123.333" ,
result : 123 * time . Second + 333 * time . Millisecond ,
} , {
input : "15s" ,
result : 15 * time . Second ,
} , {
input : "5m" ,
result : 5 * time . Minute ,
} ,
}
for _ , test := range tests {
d , err := parseDuration ( test . input )
2021-09-03 02:51:27 -07:00
if ! test . fail {
2024-02-01 06:18:01 -08:00
require . NoError ( t , err , "Unexpected error for %q" , test . input )
2021-09-03 02:51:27 -07:00
require . Equal ( t , test . result , d , "Expected duration %v for input %q but got %v" , test . result , test . input , d )
2015-06-04 09:07:57 -07:00
continue
}
2021-09-03 02:51:27 -07:00
require . Error ( t , err , "Expected error for %q but got none" , test . input )
2015-06-04 09:07:57 -07:00
}
}
2016-01-25 16:32:46 -08:00
func TestOptionsMethod ( t * testing . T ) {
2017-05-02 16:49:29 -07:00
r := route . New ( )
2017-10-06 08:20:20 -07:00
api := & API { ready : func ( f http . HandlerFunc ) http . HandlerFunc { return f } }
2016-01-25 16:32:46 -08:00
api . Register ( r )
s := httptest . NewServer ( r )
defer s . Close ( )
2024-04-08 12:26:23 -07:00
req , err := http . NewRequest ( http . MethodOptions , s . URL + "/any_path" , nil )
2021-09-03 02:51:27 -07:00
require . NoError ( t , err , "Error creating OPTIONS request" )
2016-01-25 16:32:46 -08:00
client := & http . Client { }
resp , err := client . Do ( req )
2021-09-03 02:51:27 -07:00
require . NoError ( t , err , "Error executing OPTIONS request" )
require . Equal ( t , http . StatusNoContent , resp . StatusCode )
2016-01-25 16:32:46 -08:00
}
2018-02-07 07:40:36 -08:00
2019-11-12 02:15:20 -08:00
func TestTSDBStatus ( t * testing . T ) {
tsdb := & fakeDB { }
tsdbStatusAPI := func ( api * API ) apiFunc { return api . serveTSDBStatus }
for i , tc := range [ ] struct {
db * fakeDB
endpoint func ( api * API ) apiFunc
method string
values url . Values
errType errorType
} {
// Tests for the TSDB Status endpoint.
{
db : tsdb ,
endpoint : tsdbStatusAPI ,
2023-05-22 05:37:07 -07:00
errType : errorNone ,
} ,
{
db : tsdb ,
endpoint : tsdbStatusAPI ,
values : map [ string ] [ ] string { "limit" : { "20" } } ,
errType : errorNone ,
} ,
{
db : tsdb ,
endpoint : tsdbStatusAPI ,
values : map [ string ] [ ] string { "limit" : { "0" } } ,
errType : errorBadData ,
2019-11-12 02:15:20 -08:00
} ,
} {
tc := tc
2024-05-13 08:36:19 -07:00
t . Run ( strconv . Itoa ( i ) , func ( t * testing . T ) {
2020-09-29 13:05:33 -07:00
api := & API { db : tc . db , gatherer : prometheus . DefaultGatherer }
2019-11-12 02:15:20 -08:00
endpoint := tc . endpoint ( api )
req , err := http . NewRequest ( tc . method , fmt . Sprintf ( "?%s" , tc . values . Encode ( ) ) , nil )
2021-09-03 02:51:27 -07:00
require . NoError ( t , err , "Error when creating test request" )
2019-11-12 02:15:20 -08:00
res := endpoint ( req )
assertAPIError ( t , res . err , tc . errType )
} )
}
}
2020-06-22 07:29:35 -07:00
func TestReturnAPIError ( t * testing . T ) {
cases := [ ] struct {
err error
expected errorType
} {
{
err : promql . ErrStorage { Err : errors . New ( "storage error" ) } ,
expected : errorInternal ,
} , {
2022-06-13 08:45:35 -07:00
err : fmt . Errorf ( "wrapped: %w" , promql . ErrStorage { Err : errors . New ( "storage error" ) } ) ,
2020-06-22 07:29:35 -07:00
expected : errorInternal ,
} , {
err : promql . ErrQueryTimeout ( "timeout error" ) ,
expected : errorTimeout ,
} , {
2022-06-13 08:45:35 -07:00
err : fmt . Errorf ( "wrapped: %w" , promql . ErrQueryTimeout ( "timeout error" ) ) ,
2020-06-22 07:29:35 -07:00
expected : errorTimeout ,
} , {
err : promql . ErrQueryCanceled ( "canceled error" ) ,
expected : errorCanceled ,
} , {
2022-06-13 08:45:35 -07:00
err : fmt . Errorf ( "wrapped: %w" , promql . ErrQueryCanceled ( "canceled error" ) ) ,
2020-06-22 07:29:35 -07:00
expected : errorCanceled ,
} , {
err : errors . New ( "exec error" ) ,
expected : errorExec ,
2024-03-19 10:37:43 -07:00
} , {
err : context . Canceled ,
expected : errorCanceled ,
2020-06-22 07:29:35 -07:00
} ,
}
2022-06-13 08:45:35 -07:00
for ix , c := range cases {
2020-06-22 07:29:35 -07:00
actual := returnAPIError ( c . err )
2022-06-13 08:45:35 -07:00
require . Error ( t , actual , ix )
require . Equal ( t , c . expected , actual . typ , ix )
2020-06-22 07:29:35 -07:00
}
}
2018-02-07 07:40:36 -08:00
// This is a global to avoid the benchmark being optimized away.
var testResponseWriter = httptest . ResponseRecorder { }
func BenchmarkRespond ( b * testing . B ) {
promql: Separate `Point` into `FPoint` and `HPoint`
In other words: Instead of having a “polymorphous” `Point` that can
either contain a float value or a histogram value, use an `FPoint` for
floats and an `HPoint` for histograms.
This seemingly small change has a _lot_ of repercussions throughout
the codebase.
The idea here is to avoid the increase in size of `Point` arrays that
happened after native histograms had been added.
The higher-level data structures (`Sample`, `Series`, etc.) are still
“polymorphous”. The same idea could be applied to them, but at each
step the trade-offs needed to be evaluated.
The idea with this change is to do the minimum necessary to get back
to pre-histogram performance for functions that do not touch
histograms. Here are comparisons for the `changes` function. The test
data doesn't include histograms yet. Ideally, there would be no change
in the benchmark result at all.
First runtime v2.39 compared to directly prior to this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10)
```
And then runtime v2.39 compared to after this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9)
```
In summary, the runtime doesn't really improve with this change for
queries with just a few steps. For queries with many steps, this
commit essentially reinstates the old performance. This is good
because the many-step queries are the one that matter most (longest
absolute runtime).
In terms of allocations, though, this commit doesn't make a dent at
all (numbers not shown). The reason is that most of the allocations
happen in the sampleRingIterator (in the storage package), which has
to be addressed in a separate commit.
Signed-off-by: beorn7 <beorn@grafana.com>
2022-10-28 07:58:40 -07:00
points := [ ] promql . FPoint { }
2018-02-07 07:40:36 -08:00
for i := 0 ; i < 10000 ; i ++ {
promql: Separate `Point` into `FPoint` and `HPoint`
In other words: Instead of having a “polymorphous” `Point` that can
either contain a float value or a histogram value, use an `FPoint` for
floats and an `HPoint` for histograms.
This seemingly small change has a _lot_ of repercussions throughout
the codebase.
The idea here is to avoid the increase in size of `Point` arrays that
happened after native histograms had been added.
The higher-level data structures (`Sample`, `Series`, etc.) are still
“polymorphous”. The same idea could be applied to them, but at each
step the trade-offs needed to be evaluated.
The idea with this change is to do the minimum necessary to get back
to pre-histogram performance for functions that do not touch
histograms. Here are comparisons for the `changes` function. The test
data doesn't include histograms yet. Ideally, there would be no change
in the benchmark result at all.
First runtime v2.39 compared to directly prior to this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10)
```
And then runtime v2.39 compared to after this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9)
```
In summary, the runtime doesn't really improve with this change for
queries with just a few steps. For queries with many steps, this
commit essentially reinstates the old performance. This is good
because the many-step queries are the one that matter most (longest
absolute runtime).
In terms of allocations, though, this commit doesn't make a dent at
all (numbers not shown). The reason is that most of the allocations
happen in the sampleRingIterator (in the storage package), which has
to be addressed in a separate commit.
Signed-off-by: beorn7 <beorn@grafana.com>
2022-10-28 07:58:40 -07:00
points = append ( points , promql . FPoint { F : float64 ( i * 1000000 ) , T : int64 ( i ) } )
2018-02-07 07:40:36 -08:00
}
2023-07-13 07:36:38 -07:00
matrix := promql . Matrix { }
for i := 0 ; i < 1000 ; i ++ {
matrix = append ( matrix , promql . Series {
Metric : labels . FromStrings ( "__name__" , fmt . Sprintf ( "series%v" , i ) ,
"label" , fmt . Sprintf ( "series%v" , i ) ,
"label2" , fmt . Sprintf ( "series%v" , i ) ) ,
Floats : points [ : 10 ] ,
} )
2018-02-07 07:40:36 -08:00
}
2023-07-13 07:36:38 -07:00
series := [ ] labels . Labels { }
for i := 0 ; i < 1000 ; i ++ {
series = append ( series , labels . FromStrings ( "__name__" , fmt . Sprintf ( "series%v" , i ) ,
"label" , fmt . Sprintf ( "series%v" , i ) ,
"label2" , fmt . Sprintf ( "series%v" , i ) ) )
}
cases := [ ] struct {
name string
response interface { }
} {
{ name : "10000 points no labels" , response : & QueryData {
ResultType : parser . ValueTypeMatrix ,
Result : promql . Matrix {
promql . Series {
Floats : points ,
Metric : labels . EmptyLabels ( ) ,
} ,
} ,
} } ,
{ name : "1000 labels" , response : series } ,
{ name : "1000 series 10 points" , response : & QueryData {
ResultType : parser . ValueTypeMatrix ,
Result : matrix ,
} } ,
}
for _ , c := range cases {
b . Run ( c . name , func ( b * testing . B ) {
b . ReportAllocs ( )
request , err := http . NewRequest ( http . MethodGet , "/does-not-matter" , nil )
require . NoError ( b , err )
b . ResetTimer ( )
api := API { }
api . InstallCodec ( JSONCodec { } )
for n := 0 ; n < b . N ; n ++ {
2023-09-14 09:57:31 -07:00
api . respond ( & testResponseWriter , request , c . response , nil , "" )
2023-07-13 07:36:38 -07:00
}
} )
2018-02-07 07:40:36 -08:00
}
}
2021-02-05 03:45:44 -08:00
func TestGetGlobalURL ( t * testing . T ) {
mustParseURL := func ( t * testing . T , u string ) * url . URL {
parsed , err := url . Parse ( u )
require . NoError ( t , err )
return parsed
}
testcases := [ ] struct {
input * url . URL
opts GlobalURLOptions
expected * url . URL
errorful bool
} {
{
mustParseURL ( t , "http://127.0.0.1:9090" ) ,
GlobalURLOptions {
ListenAddress : "127.0.0.1:9090" ,
Host : "127.0.0.1:9090" ,
Scheme : "http" ,
} ,
mustParseURL ( t , "http://127.0.0.1:9090" ) ,
false ,
} ,
{
mustParseURL ( t , "http://127.0.0.1:9090" ) ,
GlobalURLOptions {
ListenAddress : "127.0.0.1:9090" ,
Host : "prometheus.io" ,
Scheme : "https" ,
} ,
mustParseURL ( t , "https://prometheus.io" ) ,
false ,
} ,
{
2024-09-10 13:32:03 -07:00
mustParseURL ( t , "http://example.com" ) ,
2021-02-05 03:45:44 -08:00
GlobalURLOptions {
ListenAddress : "127.0.0.1:9090" ,
Host : "prometheus.io" ,
Scheme : "https" ,
} ,
2024-09-10 13:32:03 -07:00
mustParseURL ( t , "http://example.com" ) ,
2021-02-05 03:45:44 -08:00
false ,
} ,
{
mustParseURL ( t , "http://localhost:8080" ) ,
GlobalURLOptions {
ListenAddress : "127.0.0.1:9090" ,
Host : "prometheus.io" ,
Scheme : "https" ,
} ,
mustParseURL ( t , "http://prometheus.io:8080" ) ,
false ,
} ,
{
mustParseURL ( t , "http://[::1]:8080" ) ,
GlobalURLOptions {
ListenAddress : "127.0.0.1:9090" ,
Host : "prometheus.io" ,
Scheme : "https" ,
} ,
mustParseURL ( t , "http://prometheus.io:8080" ) ,
false ,
} ,
{
mustParseURL ( t , "http://localhost" ) ,
GlobalURLOptions {
ListenAddress : "127.0.0.1:9090" ,
Host : "prometheus.io" ,
Scheme : "https" ,
} ,
mustParseURL ( t , "http://prometheus.io" ) ,
false ,
} ,
{
mustParseURL ( t , "http://localhost:9091" ) ,
GlobalURLOptions {
ListenAddress : "[::1]:9090" ,
Host : "[::1]" ,
Scheme : "https" ,
} ,
mustParseURL ( t , "http://[::1]:9091" ) ,
false ,
} ,
{
mustParseURL ( t , "http://localhost:9091" ) ,
GlobalURLOptions {
ListenAddress : "[::1]:9090" ,
Host : "[::1]:9090" ,
Scheme : "https" ,
} ,
mustParseURL ( t , "http://[::1]:9091" ) ,
false ,
} ,
}
for i , tc := range testcases {
t . Run ( fmt . Sprintf ( "Test %d" , i ) , func ( t * testing . T ) {
output , err := getGlobalURL ( tc . input , tc . opts )
if tc . errorful {
require . Error ( t , err )
return
}
require . NoError ( t , err )
require . Equal ( t , tc . expected , output )
} )
}
}
2023-03-07 15:28:31 -08:00
2023-01-24 19:30:47 -08:00
type testCodec struct {
2023-02-26 18:27:09 -08:00
contentType MIMEType
2023-01-24 19:30:47 -08:00
canEncode bool
}
2023-02-26 18:27:09 -08:00
func ( t * testCodec ) ContentType ( ) MIMEType {
2023-01-24 19:30:47 -08:00
return t . contentType
}
func ( t * testCodec ) CanEncode ( _ * Response ) bool {
return t . canEncode
}
func ( t * testCodec ) Encode ( _ * Response ) ( [ ] byte , error ) {
return [ ] byte ( fmt . Sprintf ( "response from %v codec" , t . contentType ) ) , nil
}
2023-03-08 17:06:26 -08:00
2023-03-07 15:28:31 -08:00
func TestExtractQueryOpts ( t * testing . T ) {
tests := [ ] struct {
name string
form url . Values
2023-07-03 05:56:06 -07:00
expect promql . QueryOpts
2023-03-07 15:28:31 -08:00
err error
} {
{
name : "with stats all" ,
form : url . Values {
"stats" : [ ] string { "all" } ,
} ,
2023-07-03 05:56:06 -07:00
expect : promql . NewPrometheusQueryOpts ( true , 0 ) ,
2023-03-07 15:28:31 -08:00
err : nil ,
} ,
{
name : "with stats none" ,
form : url . Values {
"stats" : [ ] string { "none" } ,
} ,
2023-07-03 05:56:06 -07:00
expect : promql . NewPrometheusQueryOpts ( false , 0 ) ,
err : nil ,
2023-03-07 15:28:31 -08:00
} ,
{
name : "with lookback delta" ,
form : url . Values {
"stats" : [ ] string { "all" } ,
"lookback_delta" : [ ] string { "30s" } ,
} ,
2023-07-03 05:56:06 -07:00
expect : promql . NewPrometheusQueryOpts ( true , 30 * time . Second ) ,
err : nil ,
2023-03-07 15:28:31 -08:00
} ,
{
name : "with invalid lookback delta" ,
form : url . Values {
"lookback_delta" : [ ] string { "invalid" } ,
} ,
expect : nil ,
err : errors . New ( ` error parsing lookback delta duration: cannot parse "invalid" to a valid duration ` ) ,
} ,
}
for _ , test := range tests {
t . Run ( test . name , func ( t * testing . T ) {
req := & http . Request { Form : test . form }
opts , err := extractQueryOpts ( req )
require . Equal ( t , test . expect , opts )
if test . err == nil {
require . NoError ( t , err )
} else {
2024-10-06 09:35:29 -07:00
require . EqualError ( t , err , test . err . Error ( ) )
2023-03-07 15:28:31 -08:00
}
} )
}
}
2023-10-04 01:36:55 -07:00
// Test query timeout parameter.
func TestQueryTimeout ( t * testing . T ) {
2024-04-29 02:48:24 -07:00
storage := promqltest . LoadedStorage ( t , `
2023-10-04 01:36:55 -07:00
load 1 m
test_metric1 { foo = "bar" } 0 + 100 x100
` )
t . Cleanup ( func ( ) {
_ = storage . Close ( )
} )
now := time . Now ( )
for _ , tc := range [ ] struct {
name string
method string
} {
{
name : "GET method" ,
method : http . MethodGet ,
} ,
{
name : "POST method" ,
method : http . MethodPost ,
} ,
} {
t . Run ( tc . name , func ( t * testing . T ) {
engine := & fakeEngine { }
api := & API {
Queryable : storage ,
QueryEngine : engine ,
ExemplarQueryable : storage . ExemplarQueryable ( ) ,
alertmanagerRetriever : testAlertmanagerRetriever { } . toFactory ( ) ,
flagsMap : sampleFlagMap ,
now : func ( ) time . Time { return now } ,
config : func ( ) config . Config { return samplePrometheusCfg } ,
ready : func ( f http . HandlerFunc ) http . HandlerFunc { return f } ,
}
query := url . Values {
"query" : [ ] string { "2" } ,
"timeout" : [ ] string { "1s" } ,
}
ctx := context . Background ( )
req , err := http . NewRequest ( tc . method , fmt . Sprintf ( "http://example.com?%s" , query . Encode ( ) ) , nil )
require . NoError ( t , err )
req . RemoteAddr = "127.0.0.1:20201"
res := api . query ( req . WithContext ( ctx ) )
assertAPIError ( t , res . err , errorNone )
require . Len ( t , engine . query . execCalls , 1 )
deadline , ok := engine . query . execCalls [ 0 ] . Deadline ( )
require . True ( t , ok )
require . Equal ( t , now . Add ( time . Second ) , deadline )
} )
}
}
// fakeEngine is a fake QueryEngine implementation.
type fakeEngine struct {
query fakeQuery
}
func ( e * fakeEngine ) NewInstantQuery ( ctx context . Context , q storage . Queryable , opts promql . QueryOpts , qs string , ts time . Time ) ( promql . Query , error ) {
return & e . query , nil
}
func ( e * fakeEngine ) NewRangeQuery ( ctx context . Context , q storage . Queryable , opts promql . QueryOpts , qs string , start , end time . Time , interval time . Duration ) ( promql . Query , error ) {
return & e . query , nil
}
// fakeQuery is a fake Query implementation.
type fakeQuery struct {
query string
execCalls [ ] context . Context
}
func ( q * fakeQuery ) Exec ( ctx context . Context ) * promql . Result {
q . execCalls = append ( q . execCalls , ctx )
return & promql . Result {
Value : & parser . StringLiteral {
Val : "test" ,
} ,
}
}
func ( q * fakeQuery ) Close ( ) { }
func ( q * fakeQuery ) Statement ( ) parser . Statement {
return nil
}
func ( q * fakeQuery ) Stats ( ) * stats . Statistics {
return nil
}
func ( q * fakeQuery ) Cancel ( ) { }
func ( q * fakeQuery ) String ( ) string {
return q . query
}