2015-01-21 11:07:45 -08:00
// Copyright 2013 The Prometheus Authors
2013-01-15 08:06:17 -08:00
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2018-02-01 01:55:07 -08:00
package scrape
2016-08-25 11:36:26 -07:00
import (
2024-10-02 03:52:03 -07:00
"bytes"
2022-12-23 02:55:08 -08:00
"context"
2024-11-03 04:15:51 -08:00
"errors"
2023-12-11 00:43:42 -08:00
"fmt"
2019-04-10 05:20:00 -07:00
"net/http"
2023-12-11 00:43:42 -08:00
"net/http/httptest"
"net/url"
"os"
2023-11-15 02:41:12 -08:00
"sort"
2018-09-26 02:20:56 -07:00
"strconv"
2023-12-11 00:43:42 -08:00
"sync"
2016-08-25 11:36:26 -07:00
"testing"
2018-09-26 02:20:56 -07:00
"time"
2016-08-25 11:36:26 -07:00
2023-12-11 00:43:42 -08:00
"github.com/gogo/protobuf/proto"
2023-09-22 09:47:44 -07:00
"github.com/prometheus/client_golang/prometheus"
2023-12-11 00:43:42 -08:00
dto "github.com/prometheus/client_model/go"
2024-10-02 03:52:03 -07:00
"github.com/prometheus/common/expfmt"
2016-09-05 05:17:10 -07:00
"github.com/prometheus/common/model"
2024-09-09 18:41:53 -07:00
"github.com/prometheus/common/promslog"
2020-10-29 02:43:23 -07:00
"github.com/stretchr/testify/require"
2023-12-11 00:43:42 -08:00
"google.golang.org/protobuf/types/known/timestamppb"
2022-08-31 06:50:38 -07:00
"gopkg.in/yaml.v2"
2019-03-25 16:01:12 -07:00
2024-12-11 03:01:15 -08:00
"github.com/prometheus/prometheus/storage"
2024-10-02 03:52:03 -07:00
"github.com/prometheus/prometheus/model/timestamp"
2016-08-25 11:36:26 -07:00
"github.com/prometheus/prometheus/config"
2022-12-23 02:55:08 -08:00
"github.com/prometheus/prometheus/discovery"
2023-11-15 02:41:12 -08:00
_ "github.com/prometheus/prometheus/discovery/file"
2018-09-26 02:20:56 -07:00
"github.com/prometheus/prometheus/discovery/targetgroup"
2024-07-19 07:28:00 -07:00
"github.com/prometheus/prometheus/model/histogram"
2021-11-08 06:23:17 -08:00
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
2024-07-19 07:28:00 -07:00
"github.com/prometheus/prometheus/tsdb/tsdbutil"
2023-12-11 00:43:42 -08:00
"github.com/prometheus/prometheus/util/runutil"
2024-01-24 08:48:22 -08:00
"github.com/prometheus/prometheus/util/testutil"
2016-08-25 11:36:26 -07:00
)
2024-08-21 07:38:27 -07:00
func init ( ) {
// This can be removed when the default validation scheme in common is updated.
model . NameValidationScheme = model . UTF8Validation
}
2016-09-05 05:17:10 -07:00
func TestPopulateLabels ( t * testing . T ) {
cases := [ ] struct {
2024-09-14 11:04:33 -07:00
in labels . Labels
cfg * config . ScrapeConfig
res labels . Labels
resOrig labels . Labels
err string
2016-09-05 05:17:10 -07:00
} {
// Regular population of scrape config options.
{
2016-12-29 00:27:30 -08:00
in : labels . FromMap ( map [ string ] string {
2016-09-05 05:17:10 -07:00
model . AddressLabel : "1.2.3.4:1000" ,
"custom" : "value" ,
2016-12-29 00:27:30 -08:00
} ) ,
2016-09-05 05:17:10 -07:00
cfg : & config . ScrapeConfig {
2021-08-31 08:37:32 -07:00
Scheme : "https" ,
MetricsPath : "/metrics" ,
JobName : "job" ,
ScrapeInterval : model . Duration ( time . Second ) ,
ScrapeTimeout : model . Duration ( time . Second ) ,
2016-09-05 05:17:10 -07:00
} ,
2016-12-29 00:27:30 -08:00
res : labels . FromMap ( map [ string ] string {
2021-08-31 08:37:32 -07:00
model . AddressLabel : "1.2.3.4:1000" ,
model . InstanceLabel : "1.2.3.4:1000" ,
model . SchemeLabel : "https" ,
model . MetricsPathLabel : "/metrics" ,
model . JobLabel : "job" ,
model . ScrapeIntervalLabel : "1s" ,
model . ScrapeTimeoutLabel : "1s" ,
"custom" : "value" ,
2016-12-29 00:27:30 -08:00
} ) ,
resOrig : labels . FromMap ( map [ string ] string {
2021-08-31 08:37:32 -07:00
model . AddressLabel : "1.2.3.4:1000" ,
model . SchemeLabel : "https" ,
model . MetricsPathLabel : "/metrics" ,
model . JobLabel : "job" ,
"custom" : "value" ,
model . ScrapeIntervalLabel : "1s" ,
model . ScrapeTimeoutLabel : "1s" ,
2016-12-29 00:27:30 -08:00
} ) ,
2016-09-05 05:17:10 -07:00
} ,
// Pre-define/overwrite scrape config labels.
// Leave out port and expect it to be defaulted to scheme.
{
2016-12-29 00:27:30 -08:00
in : labels . FromMap ( map [ string ] string {
2021-08-31 08:37:32 -07:00
model . AddressLabel : "1.2.3.4" ,
model . SchemeLabel : "http" ,
model . MetricsPathLabel : "/custom" ,
model . JobLabel : "custom-job" ,
model . ScrapeIntervalLabel : "2s" ,
model . ScrapeTimeoutLabel : "2s" ,
2016-12-29 00:27:30 -08:00
} ) ,
2016-09-05 05:17:10 -07:00
cfg : & config . ScrapeConfig {
2021-08-31 08:37:32 -07:00
Scheme : "https" ,
MetricsPath : "/metrics" ,
JobName : "job" ,
ScrapeInterval : model . Duration ( time . Second ) ,
ScrapeTimeout : model . Duration ( time . Second ) ,
2016-09-05 05:17:10 -07:00
} ,
2016-12-29 00:27:30 -08:00
res : labels . FromMap ( map [ string ] string {
2024-09-14 11:04:33 -07:00
model . AddressLabel : "1.2.3.4" ,
model . InstanceLabel : "1.2.3.4" ,
2021-08-31 08:37:32 -07:00
model . SchemeLabel : "http" ,
model . MetricsPathLabel : "/custom" ,
model . JobLabel : "custom-job" ,
model . ScrapeIntervalLabel : "2s" ,
model . ScrapeTimeoutLabel : "2s" ,
2016-12-29 00:27:30 -08:00
} ) ,
resOrig : labels . FromMap ( map [ string ] string {
2021-08-31 08:37:32 -07:00
model . AddressLabel : "1.2.3.4" ,
model . SchemeLabel : "http" ,
model . MetricsPathLabel : "/custom" ,
model . JobLabel : "custom-job" ,
model . ScrapeIntervalLabel : "2s" ,
model . ScrapeTimeoutLabel : "2s" ,
2016-12-29 00:27:30 -08:00
} ) ,
2016-09-05 05:17:10 -07:00
} ,
// Provide instance label. HTTPS port default for IPv6.
{
2016-12-29 00:27:30 -08:00
in : labels . FromMap ( map [ string ] string {
2016-09-05 05:17:10 -07:00
model . AddressLabel : "[::1]" ,
model . InstanceLabel : "custom-instance" ,
2016-12-29 00:27:30 -08:00
} ) ,
2016-09-05 05:17:10 -07:00
cfg : & config . ScrapeConfig {
2021-08-31 08:37:32 -07:00
Scheme : "https" ,
MetricsPath : "/metrics" ,
JobName : "job" ,
ScrapeInterval : model . Duration ( time . Second ) ,
ScrapeTimeout : model . Duration ( time . Second ) ,
2016-09-05 05:17:10 -07:00
} ,
2016-12-29 00:27:30 -08:00
res : labels . FromMap ( map [ string ] string {
2024-09-14 11:04:33 -07:00
model . AddressLabel : "[::1]" ,
2021-08-31 08:37:32 -07:00
model . InstanceLabel : "custom-instance" ,
model . SchemeLabel : "https" ,
model . MetricsPathLabel : "/metrics" ,
model . JobLabel : "job" ,
model . ScrapeIntervalLabel : "1s" ,
model . ScrapeTimeoutLabel : "1s" ,
2016-12-29 00:27:30 -08:00
} ) ,
resOrig : labels . FromMap ( map [ string ] string {
2021-08-31 08:37:32 -07:00
model . AddressLabel : "[::1]" ,
model . InstanceLabel : "custom-instance" ,
model . SchemeLabel : "https" ,
model . MetricsPathLabel : "/metrics" ,
model . JobLabel : "job" ,
model . ScrapeIntervalLabel : "1s" ,
model . ScrapeTimeoutLabel : "1s" ,
2016-12-29 00:27:30 -08:00
} ) ,
2016-09-05 05:17:10 -07:00
} ,
2017-06-09 08:18:19 -07:00
// Address label missing.
2016-09-05 05:17:10 -07:00
{
2017-06-23 04:15:44 -07:00
in : labels . FromStrings ( "custom" , "value" ) ,
2017-06-09 08:18:19 -07:00
cfg : & config . ScrapeConfig {
2021-08-31 08:37:32 -07:00
Scheme : "https" ,
MetricsPath : "/metrics" ,
JobName : "job" ,
ScrapeInterval : model . Duration ( time . Second ) ,
ScrapeTimeout : model . Duration ( time . Second ) ,
2017-06-09 08:18:19 -07:00
} ,
2022-05-30 07:37:16 -07:00
res : labels . EmptyLabels ( ) ,
resOrig : labels . EmptyLabels ( ) ,
2020-10-22 02:00:08 -07:00
err : "no address" ,
2017-06-09 08:18:19 -07:00
} ,
// Address label missing, but added in relabelling.
{
2017-06-23 04:15:44 -07:00
in : labels . FromStrings ( "custom" , "host:1234" ) ,
2017-06-09 08:18:19 -07:00
cfg : & config . ScrapeConfig {
2021-08-31 08:37:32 -07:00
Scheme : "https" ,
MetricsPath : "/metrics" ,
JobName : "job" ,
ScrapeInterval : model . Duration ( time . Second ) ,
ScrapeTimeout : model . Duration ( time . Second ) ,
2018-12-18 03:26:36 -08:00
RelabelConfigs : [ ] * relabel . Config {
2017-06-09 08:18:19 -07:00
{
2018-12-18 03:26:36 -08:00
Action : relabel . Replace ,
Regex : relabel . MustNewRegexp ( "(.*)" ) ,
2017-06-09 08:18:19 -07:00
SourceLabels : model . LabelNames { "custom" } ,
Replacement : "${1}" ,
TargetLabel : string ( model . AddressLabel ) ,
} ,
} ,
} ,
2017-06-23 04:15:44 -07:00
res : labels . FromMap ( map [ string ] string {
2021-08-31 08:37:32 -07:00
model . AddressLabel : "host:1234" ,
model . InstanceLabel : "host:1234" ,
model . SchemeLabel : "https" ,
model . MetricsPathLabel : "/metrics" ,
model . JobLabel : "job" ,
model . ScrapeIntervalLabel : "1s" ,
model . ScrapeTimeoutLabel : "1s" ,
"custom" : "host:1234" ,
2017-06-23 04:15:44 -07:00
} ) ,
resOrig : labels . FromMap ( map [ string ] string {
2021-08-31 08:37:32 -07:00
model . SchemeLabel : "https" ,
model . MetricsPathLabel : "/metrics" ,
model . JobLabel : "job" ,
model . ScrapeIntervalLabel : "1s" ,
model . ScrapeTimeoutLabel : "1s" ,
"custom" : "host:1234" ,
2016-12-29 00:27:30 -08:00
} ) ,
2017-06-09 08:18:19 -07:00
} ,
// Address label missing, but added in relabelling.
{
2017-06-23 04:15:44 -07:00
in : labels . FromStrings ( "custom" , "host:1234" ) ,
2016-09-05 05:17:10 -07:00
cfg : & config . ScrapeConfig {
2021-08-31 08:37:32 -07:00
Scheme : "https" ,
MetricsPath : "/metrics" ,
JobName : "job" ,
ScrapeInterval : model . Duration ( time . Second ) ,
ScrapeTimeout : model . Duration ( time . Second ) ,
2018-12-18 03:26:36 -08:00
RelabelConfigs : [ ] * relabel . Config {
2016-09-05 05:17:10 -07:00
{
2018-12-18 03:26:36 -08:00
Action : relabel . Replace ,
Regex : relabel . MustNewRegexp ( "(.*)" ) ,
2017-06-09 08:18:19 -07:00
SourceLabels : model . LabelNames { "custom" } ,
Replacement : "${1}" ,
TargetLabel : string ( model . AddressLabel ) ,
2016-09-05 05:17:10 -07:00
} ,
} ,
} ,
2017-06-23 04:15:44 -07:00
res : labels . FromMap ( map [ string ] string {
2021-08-31 08:37:32 -07:00
model . AddressLabel : "host:1234" ,
model . InstanceLabel : "host:1234" ,
model . SchemeLabel : "https" ,
model . MetricsPathLabel : "/metrics" ,
model . JobLabel : "job" ,
model . ScrapeIntervalLabel : "1s" ,
model . ScrapeTimeoutLabel : "1s" ,
"custom" : "host:1234" ,
2017-06-23 04:15:44 -07:00
} ) ,
resOrig : labels . FromMap ( map [ string ] string {
2021-08-31 08:37:32 -07:00
model . SchemeLabel : "https" ,
model . MetricsPathLabel : "/metrics" ,
model . JobLabel : "job" ,
model . ScrapeIntervalLabel : "1s" ,
model . ScrapeTimeoutLabel : "1s" ,
"custom" : "host:1234" ,
2017-06-23 04:15:44 -07:00
} ) ,
2017-06-09 08:18:19 -07:00
} ,
// Invalid UTF-8 in label.
{
2017-06-23 04:15:44 -07:00
in : labels . FromMap ( map [ string ] string {
2017-06-09 08:18:19 -07:00
model . AddressLabel : "1.2.3.4:1000" ,
"custom" : "\xbd" ,
2017-06-23 04:15:44 -07:00
} ) ,
2017-06-09 08:18:19 -07:00
cfg : & config . ScrapeConfig {
2021-08-31 08:37:32 -07:00
Scheme : "https" ,
MetricsPath : "/metrics" ,
JobName : "job" ,
ScrapeInterval : model . Duration ( time . Second ) ,
ScrapeTimeout : model . Duration ( time . Second ) ,
2017-06-09 08:18:19 -07:00
} ,
2022-05-30 07:37:16 -07:00
res : labels . EmptyLabels ( ) ,
resOrig : labels . EmptyLabels ( ) ,
2020-10-22 02:00:08 -07:00
err : "invalid label value for \"custom\": \"\\xbd\"" ,
2016-09-05 05:17:10 -07:00
} ,
2021-08-31 08:37:32 -07:00
// Invalid duration in interval label.
{
in : labels . FromMap ( map [ string ] string {
model . AddressLabel : "1.2.3.4:1000" ,
model . ScrapeIntervalLabel : "2notseconds" ,
} ) ,
cfg : & config . ScrapeConfig {
Scheme : "https" ,
MetricsPath : "/metrics" ,
JobName : "job" ,
ScrapeInterval : model . Duration ( time . Second ) ,
ScrapeTimeout : model . Duration ( time . Second ) ,
} ,
2022-05-30 07:37:16 -07:00
res : labels . EmptyLabels ( ) ,
resOrig : labels . EmptyLabels ( ) ,
2023-03-08 07:32:39 -08:00
err : "error parsing scrape interval: unknown unit \"notseconds\" in duration \"2notseconds\"" ,
2021-08-31 08:37:32 -07:00
} ,
// Invalid duration in timeout label.
{
in : labels . FromMap ( map [ string ] string {
model . AddressLabel : "1.2.3.4:1000" ,
model . ScrapeTimeoutLabel : "2notseconds" ,
} ) ,
cfg : & config . ScrapeConfig {
Scheme : "https" ,
MetricsPath : "/metrics" ,
JobName : "job" ,
ScrapeInterval : model . Duration ( time . Second ) ,
ScrapeTimeout : model . Duration ( time . Second ) ,
} ,
2022-05-30 07:37:16 -07:00
res : labels . EmptyLabels ( ) ,
resOrig : labels . EmptyLabels ( ) ,
2023-03-08 07:32:39 -08:00
err : "error parsing scrape timeout: unknown unit \"notseconds\" in duration \"2notseconds\"" ,
2021-08-31 08:37:32 -07:00
} ,
// 0 interval in timeout label.
{
in : labels . FromMap ( map [ string ] string {
model . AddressLabel : "1.2.3.4:1000" ,
model . ScrapeIntervalLabel : "0s" ,
} ) ,
cfg : & config . ScrapeConfig {
Scheme : "https" ,
MetricsPath : "/metrics" ,
JobName : "job" ,
ScrapeInterval : model . Duration ( time . Second ) ,
ScrapeTimeout : model . Duration ( time . Second ) ,
} ,
2022-05-30 07:37:16 -07:00
res : labels . EmptyLabels ( ) ,
resOrig : labels . EmptyLabels ( ) ,
2021-08-31 08:37:32 -07:00
err : "scrape interval cannot be 0" ,
} ,
// 0 duration in timeout label.
{
in : labels . FromMap ( map [ string ] string {
model . AddressLabel : "1.2.3.4:1000" ,
model . ScrapeTimeoutLabel : "0s" ,
} ) ,
cfg : & config . ScrapeConfig {
Scheme : "https" ,
MetricsPath : "/metrics" ,
JobName : "job" ,
ScrapeInterval : model . Duration ( time . Second ) ,
ScrapeTimeout : model . Duration ( time . Second ) ,
} ,
2022-05-30 07:37:16 -07:00
res : labels . EmptyLabels ( ) ,
resOrig : labels . EmptyLabels ( ) ,
2021-08-31 08:37:32 -07:00
err : "scrape timeout cannot be 0" ,
} ,
// Timeout less than interval.
{
in : labels . FromMap ( map [ string ] string {
model . AddressLabel : "1.2.3.4:1000" ,
model . ScrapeIntervalLabel : "1s" ,
model . ScrapeTimeoutLabel : "2s" ,
} ) ,
cfg : & config . ScrapeConfig {
Scheme : "https" ,
MetricsPath : "/metrics" ,
JobName : "job" ,
ScrapeInterval : model . Duration ( time . Second ) ,
ScrapeTimeout : model . Duration ( time . Second ) ,
} ,
2022-05-30 07:37:16 -07:00
res : labels . EmptyLabels ( ) ,
resOrig : labels . EmptyLabels ( ) ,
2021-08-31 08:37:32 -07:00
err : "scrape timeout cannot be greater than scrape interval (\"2s\" > \"1s\")" ,
} ,
2022-07-20 04:35:47 -07:00
// Don't attach default port.
{
in : labels . FromMap ( map [ string ] string {
model . AddressLabel : "1.2.3.4" ,
} ) ,
cfg : & config . ScrapeConfig {
Scheme : "https" ,
MetricsPath : "/metrics" ,
JobName : "job" ,
ScrapeInterval : model . Duration ( time . Second ) ,
ScrapeTimeout : model . Duration ( time . Second ) ,
} ,
res : labels . FromMap ( map [ string ] string {
model . AddressLabel : "1.2.3.4" ,
model . InstanceLabel : "1.2.3.4" ,
model . SchemeLabel : "https" ,
model . MetricsPathLabel : "/metrics" ,
model . JobLabel : "job" ,
model . ScrapeIntervalLabel : "1s" ,
model . ScrapeTimeoutLabel : "1s" ,
} ) ,
resOrig : labels . FromMap ( map [ string ] string {
model . AddressLabel : "1.2.3.4" ,
model . SchemeLabel : "https" ,
model . MetricsPathLabel : "/metrics" ,
model . JobLabel : "job" ,
model . ScrapeIntervalLabel : "1s" ,
model . ScrapeTimeoutLabel : "1s" ,
} ) ,
} ,
2024-09-14 11:04:33 -07:00
// verify that the default port is not removed (http).
2022-07-20 04:35:47 -07:00
{
in : labels . FromMap ( map [ string ] string {
model . AddressLabel : "1.2.3.4:80" ,
} ) ,
cfg : & config . ScrapeConfig {
Scheme : "http" ,
MetricsPath : "/metrics" ,
JobName : "job" ,
ScrapeInterval : model . Duration ( time . Second ) ,
ScrapeTimeout : model . Duration ( time . Second ) ,
} ,
res : labels . FromMap ( map [ string ] string {
2024-09-14 11:04:33 -07:00
model . AddressLabel : "1.2.3.4:80" ,
2022-07-20 04:35:47 -07:00
model . InstanceLabel : "1.2.3.4:80" ,
model . SchemeLabel : "http" ,
model . MetricsPathLabel : "/metrics" ,
model . JobLabel : "job" ,
model . ScrapeIntervalLabel : "1s" ,
model . ScrapeTimeoutLabel : "1s" ,
} ) ,
resOrig : labels . FromMap ( map [ string ] string {
model . AddressLabel : "1.2.3.4:80" ,
model . SchemeLabel : "http" ,
model . MetricsPathLabel : "/metrics" ,
model . JobLabel : "job" ,
model . ScrapeIntervalLabel : "1s" ,
model . ScrapeTimeoutLabel : "1s" ,
} ) ,
} ,
2024-09-14 11:04:33 -07:00
// verify that the default port is not removed (https).
2022-07-20 04:35:47 -07:00
{
in : labels . FromMap ( map [ string ] string {
model . AddressLabel : "1.2.3.4:443" ,
} ) ,
cfg : & config . ScrapeConfig {
Scheme : "https" ,
MetricsPath : "/metrics" ,
JobName : "job" ,
ScrapeInterval : model . Duration ( time . Second ) ,
ScrapeTimeout : model . Duration ( time . Second ) ,
} ,
res : labels . FromMap ( map [ string ] string {
2024-09-14 11:04:33 -07:00
model . AddressLabel : "1.2.3.4:443" ,
2022-07-20 04:35:47 -07:00
model . InstanceLabel : "1.2.3.4:443" ,
model . SchemeLabel : "https" ,
model . MetricsPathLabel : "/metrics" ,
model . JobLabel : "job" ,
model . ScrapeIntervalLabel : "1s" ,
model . ScrapeTimeoutLabel : "1s" ,
} ) ,
resOrig : labels . FromMap ( map [ string ] string {
model . AddressLabel : "1.2.3.4:443" ,
model . SchemeLabel : "https" ,
model . MetricsPathLabel : "/metrics" ,
model . JobLabel : "job" ,
model . ScrapeIntervalLabel : "1s" ,
model . ScrapeTimeoutLabel : "1s" ,
} ) ,
} ,
2016-09-05 05:17:10 -07:00
}
2018-04-27 05:11:16 -07:00
for _ , c := range cases {
2017-03-08 06:37:12 -08:00
in := c . in . Copy ( )
2024-09-14 11:04:33 -07:00
res , orig , err := PopulateLabels ( labels . NewBuilder ( c . in ) , c . cfg )
2020-10-22 02:00:08 -07:00
if c . err != "" {
2020-10-29 02:43:23 -07:00
require . EqualError ( t , err , c . err )
2020-10-22 02:00:08 -07:00
} else {
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2020-10-22 02:00:08 -07:00
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , c . in , in )
2024-01-24 08:48:22 -08:00
testutil . RequireEqual ( t , c . res , res )
testutil . RequireEqual ( t , c . resOrig , orig )
2016-09-05 05:17:10 -07:00
}
}
2018-01-19 03:36:21 -08:00
2023-02-28 08:12:27 -08:00
func loadConfiguration ( t testing . TB , c string ) * config . Config {
2019-02-13 05:24:22 -08:00
t . Helper ( )
2018-01-19 03:36:21 -08:00
2024-12-11 03:01:15 -08:00
cfg , err := config . Load ( c , promslog . NewNopLogger ( ) )
require . NoError ( t , err )
2019-02-13 05:24:22 -08:00
return cfg
}
func noopLoop ( ) loop {
return & testLoop {
startFunc : func ( interval , timeout time . Duration , errc chan <- error ) { } ,
stopFunc : func ( ) { } ,
}
}
func TestManagerApplyConfig ( t * testing . T ) {
// Valid initial configuration.
cfgText1 := `
2018-07-04 04:01:19 -07:00
scrape_configs :
2019-02-13 05:24:22 -08:00
- job_name : job1
2018-07-04 04:01:19 -07:00
static_configs :
- targets : [ "foo:9090" ]
`
2019-02-13 05:24:22 -08:00
// Invalid configuration.
cfgText2 := `
scrape_configs :
- job_name : job1
scheme : https
static_configs :
- targets : [ "foo:9090" ]
tls_config :
ca_file : / not / existing / ca / file
`
// Valid configuration.
cfgText3 := `
scrape_configs :
- job_name : job1
scheme : https
static_configs :
- targets : [ "foo:9090" ]
`
var (
cfg1 = loadConfiguration ( t , cfgText1 )
cfg2 = loadConfiguration ( t , cfgText2 )
cfg3 = loadConfiguration ( t , cfgText3 )
2018-01-19 03:36:21 -08:00
2019-02-13 05:24:22 -08:00
ch = make ( chan struct { } , 1 )
2023-09-22 09:47:44 -07:00
testRegistry = prometheus . NewRegistry ( )
2019-02-13 05:24:22 -08:00
)
2018-07-04 04:01:19 -07:00
2021-08-24 05:31:14 -07:00
opts := Options { }
2024-08-26 02:41:56 -07:00
scrapeManager , err := NewManager ( & opts , nil , nil , nil , testRegistry )
2023-09-22 09:47:44 -07:00
require . NoError ( t , err )
2019-03-12 03:26:18 -07:00
newLoop := func ( scrapeLoopOptions ) loop {
2019-02-13 05:24:22 -08:00
ch <- struct { } { }
return noopLoop ( )
2018-01-19 03:36:21 -08:00
}
sp := & scrapePool {
2022-06-28 02:58:52 -07:00
appendable : & nopAppendable { } ,
activeTargets : map [ uint64 ] * Target {
1 : { } ,
} ,
2018-01-19 03:36:21 -08:00
loops : map [ uint64 ] loop {
2019-02-13 05:24:22 -08:00
1 : noopLoop ( ) ,
2018-01-19 03:36:21 -08:00
} ,
2023-11-24 11:46:26 -08:00
newLoop : newLoop ,
logger : nil ,
config : cfg1 . ScrapeConfigs [ 0 ] ,
client : http . DefaultClient ,
metrics : scrapeManager . metrics ,
symbolTable : labels . NewSymbolTable ( ) ,
2018-01-19 03:36:21 -08:00
}
scrapeManager . scrapePools = map [ string ] * scrapePool {
2019-02-13 05:24:22 -08:00
"job1" : sp ,
2018-01-19 03:36:21 -08:00
}
2019-02-13 05:24:22 -08:00
// Apply the initial configuration.
2021-09-04 05:35:03 -07:00
err = scrapeManager . ApplyConfig ( cfg1 )
require . NoError ( t , err , "Unable to apply configuration." )
2019-02-13 05:24:22 -08:00
select {
case <- ch :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Reload happened." )
2019-02-13 05:24:22 -08:00
default :
}
// Apply a configuration for which the reload fails.
2021-09-04 05:35:03 -07:00
err = scrapeManager . ApplyConfig ( cfg2 )
require . Error ( t , err , "Expecting error but got none." )
2019-02-13 05:24:22 -08:00
select {
case <- ch :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Reload happened." )
2019-02-13 05:24:22 -08:00
default :
}
// Apply a configuration for which the reload succeeds.
2021-09-04 05:35:03 -07:00
err = scrapeManager . ApplyConfig ( cfg3 )
require . NoError ( t , err , "Unable to apply configuration." )
2019-02-13 05:24:22 -08:00
select {
case <- ch :
default :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Reload didn't happen." )
2019-02-13 05:24:22 -08:00
}
// Re-applying the same configuration shouldn't trigger a reload.
2021-09-04 05:35:03 -07:00
err = scrapeManager . ApplyConfig ( cfg3 )
require . NoError ( t , err , "Unable to apply configuration." )
2019-02-13 05:24:22 -08:00
select {
case <- ch :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Reload happened." )
2019-02-13 05:24:22 -08:00
default :
}
2018-01-19 03:36:21 -08:00
}
2018-09-26 02:20:56 -07:00
func TestManagerTargetsUpdates ( t * testing . T ) {
2021-08-24 05:31:14 -07:00
opts := Options { }
2023-09-22 09:47:44 -07:00
testRegistry := prometheus . NewRegistry ( )
2024-08-26 02:41:56 -07:00
m , err := NewManager ( & opts , nil , nil , nil , testRegistry )
2023-09-22 09:47:44 -07:00
require . NoError ( t , err )
2018-09-26 02:20:56 -07:00
ts := make ( chan map [ string ] [ ] * targetgroup . Group )
go m . Run ( ts )
2019-09-23 03:28:37 -07:00
defer m . Stop ( )
2018-09-26 02:20:56 -07:00
tgSent := make ( map [ string ] [ ] * targetgroup . Group )
for x := 0 ; x < 10 ; x ++ {
tgSent [ strconv . Itoa ( x ) ] = [ ] * targetgroup . Group {
2019-01-16 14:28:08 -08:00
{
2018-09-26 02:20:56 -07:00
Source : strconv . Itoa ( x ) ,
} ,
}
select {
case ts <- tgSent :
case <- time . After ( 10 * time . Millisecond ) :
2021-09-04 05:35:03 -07:00
require . Fail ( t , "Scrape manager's channel remained blocked after the set threshold." )
2018-09-26 02:20:56 -07:00
}
}
m . mtxScrape . Lock ( )
tsetActual := m . targetSets
m . mtxScrape . Unlock ( )
// Make sure all updates have been received.
2020-10-29 02:43:23 -07:00
require . Equal ( t , tgSent , tsetActual )
2018-09-26 02:20:56 -07:00
select {
case <- m . triggerReload :
default :
2021-09-04 05:35:03 -07:00
require . Fail ( t , "No scrape loops reload was triggered after targets update." )
2018-09-26 02:20:56 -07:00
}
}
2019-03-12 03:46:15 -07:00
2023-05-25 02:49:43 -07:00
func TestSetOffsetSeed ( t * testing . T ) {
2019-03-12 03:46:15 -07:00
getConfig := func ( prometheus string ) * config . Config {
cfgText := `
global :
external_labels :
prometheus : ' ` + prometheus + ` '
`
cfg := & config . Config { }
2021-09-04 05:35:03 -07:00
err := yaml . UnmarshalStrict ( [ ] byte ( cfgText ) , cfg )
require . NoError ( t , err , "Unable to load YAML config cfgYaml." )
2019-03-12 03:46:15 -07:00
return cfg
}
2021-08-24 05:31:14 -07:00
opts := Options { }
2023-09-22 09:47:44 -07:00
testRegistry := prometheus . NewRegistry ( )
2024-08-26 02:41:56 -07:00
scrapeManager , err := NewManager ( & opts , nil , nil , nil , testRegistry )
2023-09-22 09:47:44 -07:00
require . NoError ( t , err )
2019-03-12 03:46:15 -07:00
// Load the first config.
cfg1 := getConfig ( "ha1" )
2021-09-04 05:35:03 -07:00
err = scrapeManager . setOffsetSeed ( cfg1 . GlobalConfig . ExternalLabels )
require . NoError ( t , err )
2023-05-25 02:49:43 -07:00
offsetSeed1 := scrapeManager . offsetSeed
2019-03-12 03:46:15 -07:00
2021-09-04 05:35:03 -07:00
require . NotZero ( t , offsetSeed1 , "Offset seed has to be a hash of uint64." )
2019-03-12 03:46:15 -07:00
// Load the first config.
cfg2 := getConfig ( "ha2" )
2021-09-04 05:35:03 -07:00
require . NoError ( t , scrapeManager . setOffsetSeed ( cfg2 . GlobalConfig . ExternalLabels ) )
2023-05-25 02:49:43 -07:00
offsetSeed2 := scrapeManager . offsetSeed
2019-03-12 03:46:15 -07:00
2021-09-04 05:35:03 -07:00
require . NotEqual ( t , offsetSeed1 , offsetSeed2 , "Offset seed should not be the same on different set of external labels." )
2019-03-12 03:46:15 -07:00
}
2022-12-23 02:55:08 -08:00
func TestManagerScrapePools ( t * testing . T ) {
cfgText1 := `
scrape_configs :
- job_name : job1
static_configs :
- targets : [ "foo:9090" ]
- job_name : job2
static_configs :
- targets : [ "foo:9091" , "foo:9092" ]
`
cfgText2 := `
scrape_configs :
- job_name : job1
static_configs :
- targets : [ "foo:9090" , "foo:9094" ]
- job_name : job3
static_configs :
- targets : [ "foo:9093" ]
`
var (
2023-09-22 09:47:44 -07:00
cfg1 = loadConfiguration ( t , cfgText1 )
cfg2 = loadConfiguration ( t , cfgText2 )
testRegistry = prometheus . NewRegistry ( )
2022-12-23 02:55:08 -08:00
)
reload := func ( scrapeManager * Manager , cfg * config . Config ) {
newLoop := func ( scrapeLoopOptions ) loop {
return noopLoop ( )
}
scrapeManager . scrapePools = map [ string ] * scrapePool { }
for _ , sc := range cfg . ScrapeConfigs {
_ , cancel := context . WithCancel ( context . Background ( ) )
defer cancel ( )
sp := & scrapePool {
appendable : & nopAppendable { } ,
activeTargets : map [ uint64 ] * Target { } ,
loops : map [ uint64 ] loop {
1 : noopLoop ( ) ,
} ,
newLoop : newLoop ,
logger : nil ,
config : sc ,
client : http . DefaultClient ,
cancel : cancel ,
}
for _ , c := range sc . ServiceDiscoveryConfigs {
staticConfig := c . ( discovery . StaticConfig )
for _ , group := range staticConfig {
for i := range group . Targets {
sp . activeTargets [ uint64 ( i ) ] = & Target { }
}
}
}
scrapeManager . scrapePools [ sc . JobName ] = sp
}
}
opts := Options { }
2024-08-26 02:41:56 -07:00
scrapeManager , err := NewManager ( & opts , nil , nil , nil , testRegistry )
2023-09-22 09:47:44 -07:00
require . NoError ( t , err )
2022-12-23 02:55:08 -08:00
reload ( scrapeManager , cfg1 )
require . ElementsMatch ( t , [ ] string { "job1" , "job2" } , scrapeManager . ScrapePools ( ) )
reload ( scrapeManager , cfg2 )
require . ElementsMatch ( t , [ ] string { "job1" , "job3" } , scrapeManager . ScrapePools ( ) )
}
2023-12-11 00:43:42 -08:00
2024-10-02 03:52:03 -07:00
func setupTestServer ( t * testing . T , typ string , toWrite [ ] byte ) * httptest . Server {
once := sync . Once { }
2023-12-11 00:43:42 -08:00
2024-10-02 03:52:03 -07:00
server := httptest . NewServer (
http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) {
fail := true
once . Do ( func ( ) {
fail = false
w . Header ( ) . Set ( "Content-Type" , typ )
w . Write ( toWrite )
2023-12-11 00:43:42 -08:00
} )
2024-10-02 03:52:03 -07:00
if fail {
w . WriteHeader ( http . StatusInternalServerError )
2024-08-29 00:40:17 -07:00
}
2024-10-02 03:52:03 -07:00
} ) ,
)
t . Cleanup ( func ( ) { server . Close ( ) } )
2023-12-11 00:43:42 -08:00
2024-10-02 03:52:03 -07:00
return server
}
// TestManagerCTZeroIngestion tests scrape manager for various CT cases.
func TestManagerCTZeroIngestion ( t * testing . T ) {
const (
// _total suffix is required, otherwise expfmt with OMText will mark metric as "unknown"
expectedMetricName = "expected_metric_total"
expectedCreatedMetricName = "expected_metric_created"
expectedSampleValue = 17.0
)
for _ , testFormat := range [ ] config . ScrapeProtocol { config . PrometheusProto , config . OpenMetricsText1_0_0 } {
t . Run ( fmt . Sprintf ( "format=%s" , testFormat ) , func ( t * testing . T ) {
for _ , testWithCT := range [ ] bool { false , true } {
t . Run ( fmt . Sprintf ( "withCT=%v" , testWithCT ) , func ( t * testing . T ) {
for _ , testCTZeroIngest := range [ ] bool { false , true } {
t . Run ( fmt . Sprintf ( "ctZeroIngest=%v" , testCTZeroIngest ) , func ( t * testing . T ) {
2024-12-11 03:01:15 -08:00
ctx , cancel := context . WithCancel ( context . Background ( ) )
defer cancel ( )
2024-10-02 03:52:03 -07:00
sampleTs := time . Now ( )
ctTs := time . Time { }
if testWithCT {
ctTs = sampleTs . Add ( - 2 * time . Minute )
}
// TODO(bwplotka): Add more types than just counter?
encoded := prepareTestEncodedCounter ( t , testFormat , expectedMetricName , expectedSampleValue , sampleTs , ctTs )
2024-12-11 03:01:15 -08:00
app := & collectResultAppender { }
discoveryManager , scrapeManager := runManagers ( t , ctx , & Options {
EnableCreatedTimestampZeroIngestion : testCTZeroIngest ,
skipOffsetting : true ,
} , & collectResultAppendable { app } )
defer scrapeManager . Stop ( )
server := setupTestServer ( t , config . ScrapeProtocolsHeaders [ testFormat ] , encoded )
serverURL , err := url . Parse ( server . URL )
require . NoError ( t , err )
testConfig := fmt . Sprintf ( `
global :
# Disable regular scrapes .
scrape_interval : 9999 m
scrape_timeout : 5 s
scrape_configs :
- job_name : test
honor_timestamps : true
static_configs :
- targets : [ ' % s ' ]
` , serverURL . Host )
applyConfig ( t , testConfig , scrapeManager , discoveryManager )
// Wait for one scrape.
ctx , cancel = context . WithTimeout ( ctx , 1 * time . Minute )
defer cancel ( )
require . NoError ( t , runutil . Retry ( 100 * time . Millisecond , ctx . Done ( ) , func ( ) error {
app . mtx . Lock ( )
defer app . mtx . Unlock ( )
// Check if scrape happened and grab the relevant samples.
if len ( app . resultFloats ) > 0 {
return nil
}
return errors . New ( "expected some float samples, got none" )
} ) , "after 1 minute" )
2024-10-02 03:52:03 -07:00
// Verify results.
// Verify what we got vs expectations around CT injection.
samples := findSamplesForMetric ( app . resultFloats , expectedMetricName )
if testWithCT && testCTZeroIngest {
require . Len ( t , samples , 2 )
require . Equal ( t , 0.0 , samples [ 0 ] . f )
require . Equal ( t , timestamp . FromTime ( ctTs ) , samples [ 0 ] . t )
require . Equal ( t , expectedSampleValue , samples [ 1 ] . f )
require . Equal ( t , timestamp . FromTime ( sampleTs ) , samples [ 1 ] . t )
} else {
require . Len ( t , samples , 1 )
require . Equal ( t , expectedSampleValue , samples [ 0 ] . f )
require . Equal ( t , timestamp . FromTime ( sampleTs ) , samples [ 0 ] . t )
}
// Verify what we got vs expectations around additional _created series for OM text.
// enableCTZeroInjection also kills that _created line.
createdSeriesSamples := findSamplesForMetric ( app . resultFloats , expectedCreatedMetricName )
if testFormat == config . OpenMetricsText1_0_0 && testWithCT && ! testCTZeroIngest {
// For OM Text, when counter has CT, and feature flag disabled we should see _created lines.
require . Len ( t , createdSeriesSamples , 1 )
// Conversion taken from common/expfmt.writeOpenMetricsFloat.
// We don't check the ct timestamp as explicit ts was not implemented in expfmt.Encoder,
2024-12-13 13:32:20 -08:00
// but exists in OM https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#:~:text=An%20example%20with%20a%20Metric%20with%20no%20labels%2C%20and%20a%20MetricPoint%20with%20a%20timestamp%20and%20a%20created
2024-10-02 03:52:03 -07:00
// We can implement this, but we want to potentially get rid of OM 1.0 CT lines
require . Equal ( t , float64 ( timestamppb . New ( ctTs ) . AsTime ( ) . UnixNano ( ) ) / 1e9 , createdSeriesSamples [ 0 ] . f )
} else {
require . Empty ( t , createdSeriesSamples )
}
} )
}
} )
}
2024-08-29 00:40:17 -07:00
} )
2023-12-11 00:43:42 -08:00
}
}
2024-04-05 09:00:52 -07:00
2024-10-02 03:52:03 -07:00
func prepareTestEncodedCounter ( t * testing . T , format config . ScrapeProtocol , mName string , v float64 , ts , ct time . Time ) ( encoded [ ] byte ) {
t . Helper ( )
counter := & dto . Counter { Value : proto . Float64 ( v ) }
if ! ct . IsZero ( ) {
counter . CreatedTimestamp = timestamppb . New ( ct )
}
ctrType := dto . MetricType_COUNTER
inputMetric := & dto . MetricFamily {
Name : proto . String ( mName ) ,
Type : & ctrType ,
Metric : [ ] * dto . Metric { {
TimestampMs : proto . Int64 ( timestamp . FromTime ( ts ) ) ,
Counter : counter ,
} } ,
}
switch format {
case config . PrometheusProto :
return protoMarshalDelimited ( t , inputMetric )
case config . OpenMetricsText1_0_0 :
buf := & bytes . Buffer { }
require . NoError ( t , expfmt . NewEncoder ( buf , expfmt . NewFormat ( expfmt . TypeOpenMetrics ) , expfmt . WithCreatedLines ( ) , expfmt . WithUnit ( ) ) . Encode ( inputMetric ) )
_ , _ = buf . WriteString ( "# EOF" )
t . Log ( "produced OM text to expose:" , buf . String ( ) )
return buf . Bytes ( )
default :
t . Fatalf ( "not implemented format: %v" , format )
return nil
}
}
func findSamplesForMetric ( floats [ ] floatSample , metricName string ) ( ret [ ] floatSample ) {
for _ , f := range floats {
if f . metric . Get ( model . MetricNameLabel ) == metricName {
ret = append ( ret , f )
}
}
return ret
}
2024-07-19 07:28:00 -07:00
// generateTestHistogram generates the same thing as tsdbutil.GenerateTestHistogram,
// but in the form of dto.Histogram.
func generateTestHistogram ( i int ) * dto . Histogram {
2024-12-15 14:53:36 -08:00
helper := tsdbutil . GenerateTestHistogram ( int64 ( i ) )
2024-07-19 07:28:00 -07:00
h := & dto . Histogram { }
h . SampleCount = proto . Uint64 ( helper . Count )
h . SampleSum = proto . Float64 ( helper . Sum )
h . Schema = proto . Int32 ( helper . Schema )
h . ZeroThreshold = proto . Float64 ( helper . ZeroThreshold )
h . ZeroCount = proto . Uint64 ( helper . ZeroCount )
h . PositiveSpan = make ( [ ] * dto . BucketSpan , len ( helper . PositiveSpans ) )
for i , span := range helper . PositiveSpans {
h . PositiveSpan [ i ] = & dto . BucketSpan {
Offset : proto . Int32 ( span . Offset ) ,
Length : proto . Uint32 ( span . Length ) ,
}
}
h . PositiveDelta = helper . PositiveBuckets
h . NegativeSpan = make ( [ ] * dto . BucketSpan , len ( helper . NegativeSpans ) )
for i , span := range helper . NegativeSpans {
h . NegativeSpan [ i ] = & dto . BucketSpan {
Offset : proto . Int32 ( span . Offset ) ,
Length : proto . Uint32 ( span . Length ) ,
}
}
h . NegativeDelta = helper . NegativeBuckets
return h
}
func TestManagerCTZeroIngestionHistogram ( t * testing . T ) {
const mName = "expected_histogram"
for _ , tc := range [ ] struct {
name string
inputHistSample * dto . Histogram
enableCTZeroIngestion bool
} {
{
name : "disabled with CT on histogram" ,
inputHistSample : func ( ) * dto . Histogram {
h := generateTestHistogram ( 0 )
h . CreatedTimestamp = timestamppb . Now ( )
return h
} ( ) ,
enableCTZeroIngestion : false ,
} ,
{
name : "enabled with CT on histogram" ,
inputHistSample : func ( ) * dto . Histogram {
h := generateTestHistogram ( 0 )
h . CreatedTimestamp = timestamppb . Now ( )
return h
} ( ) ,
enableCTZeroIngestion : true ,
} ,
{
name : "enabled without CT on histogram" ,
inputHistSample : func ( ) * dto . Histogram {
h := generateTestHistogram ( 0 )
return h
} ( ) ,
enableCTZeroIngestion : true ,
} ,
} {
t . Run ( tc . name , func ( t * testing . T ) {
2024-12-11 03:01:15 -08:00
ctx , cancel := context . WithCancel ( context . Background ( ) )
defer cancel ( )
2024-07-19 07:28:00 -07:00
2024-12-11 03:01:15 -08:00
app := & collectResultAppender { }
discoveryManager , scrapeManager := runManagers ( t , ctx , & Options {
EnableCreatedTimestampZeroIngestion : tc . enableCTZeroIngestion ,
EnableNativeHistogramsIngestion : true ,
skipOffsetting : true ,
} , & collectResultAppendable { app } )
defer scrapeManager . Stop ( )
2024-07-19 07:28:00 -07:00
once := sync . Once { }
// Start fake HTTP target to that allow one scrape only.
server := httptest . NewServer (
http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) {
2024-12-11 03:01:15 -08:00
fail := true
2024-07-19 07:28:00 -07:00
once . Do ( func ( ) {
fail = false
w . Header ( ) . Set ( "Content-Type" , ` application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited ` )
ctrType := dto . MetricType_HISTOGRAM
w . Write ( protoMarshalDelimited ( t , & dto . MetricFamily {
Name : proto . String ( mName ) ,
Type : & ctrType ,
Metric : [ ] * dto . Metric { { Histogram : tc . inputHistSample } } ,
} ) )
} )
if fail {
w . WriteHeader ( http . StatusInternalServerError )
}
} ) ,
)
defer server . Close ( )
serverURL , err := url . Parse ( server . URL )
require . NoError ( t , err )
2024-12-11 03:01:15 -08:00
testConfig := fmt . Sprintf ( `
global :
# Disable regular scrapes .
scrape_interval : 9999 m
scrape_timeout : 5 s
scrape_configs :
- job_name : test
static_configs :
- targets : [ ' % s ' ]
` , serverURL . Host )
applyConfig ( t , testConfig , scrapeManager , discoveryManager )
2024-07-19 07:28:00 -07:00
var got [ ] histogramSample
// Wait for one scrape.
2024-12-11 03:01:15 -08:00
ctx , cancel = context . WithTimeout ( ctx , 1 * time . Minute )
2024-07-19 07:28:00 -07:00
defer cancel ( )
require . NoError ( t , runutil . Retry ( 100 * time . Millisecond , ctx . Done ( ) , func ( ) error {
app . mtx . Lock ( )
defer app . mtx . Unlock ( )
// Check if scrape happened and grab the relevant histograms, they have to be there - or it's a bug
// and it's not worth waiting.
for _ , h := range app . resultHistograms {
if h . metric . Get ( model . MetricNameLabel ) == mName {
got = append ( got , h )
}
}
if len ( app . resultHistograms ) > 0 {
return nil
}
2024-11-03 04:15:51 -08:00
return errors . New ( "expected some histogram samples, got none" )
2024-07-19 07:28:00 -07:00
} ) , "after 1 minute" )
// Check for zero samples, assuming we only injected always one histogram sample.
// Did it contain CT to inject? If yes, was CT zero enabled?
if tc . inputHistSample . CreatedTimestamp . IsValid ( ) && tc . enableCTZeroIngestion {
require . Len ( t , got , 2 )
// Zero sample.
require . Equal ( t , histogram . Histogram { } , * got [ 0 ] . h )
// Quick soft check to make sure it's the same sample or at least not zero.
require . Equal ( t , tc . inputHistSample . GetSampleSum ( ) , got [ 1 ] . h . Sum )
return
}
// Expect only one, valid sample.
require . Len ( t , got , 1 )
// Quick soft check to make sure it's the same sample or at least not zero.
require . Equal ( t , tc . inputHistSample . GetSampleSum ( ) , got [ 0 ] . h . Sum )
} )
}
}
2024-04-05 09:00:52 -07:00
func TestUnregisterMetrics ( t * testing . T ) {
reg := prometheus . NewRegistry ( )
// Check that all metrics can be unregistered, allowing a second manager to be created.
for i := 0 ; i < 2 ; i ++ {
opts := Options { }
2024-08-26 02:41:56 -07:00
manager , err := NewManager ( & opts , nil , nil , nil , reg )
2024-04-05 09:00:52 -07:00
require . NotNil ( t , manager )
require . NoError ( t , err )
// Unregister all metrics.
manager . UnregisterMetrics ( )
}
}
2023-11-15 02:41:12 -08:00
func applyConfig (
t * testing . T ,
config string ,
scrapeManager * Manager ,
discoveryManager * discovery . Manager ,
) {
t . Helper ( )
cfg := loadConfiguration ( t , config )
require . NoError ( t , scrapeManager . ApplyConfig ( cfg ) )
c := make ( map [ string ] discovery . Configs )
scfgs , err := cfg . GetScrapeConfigs ( )
require . NoError ( t , err )
for _ , v := range scfgs {
c [ v . JobName ] = v . ServiceDiscoveryConfigs
}
require . NoError ( t , discoveryManager . ApplyConfig ( c ) )
}
2024-12-11 03:01:15 -08:00
func runManagers ( t * testing . T , ctx context . Context , opts * Options , app storage . Appendable ) ( * discovery . Manager , * Manager ) {
2023-11-15 02:41:12 -08:00
t . Helper ( )
2024-12-11 03:01:15 -08:00
if opts == nil {
opts = & Options { }
}
opts . DiscoveryReloadInterval = model . Duration ( 100 * time . Millisecond )
if app == nil {
app = nopAppendable { }
}
2023-11-15 02:41:12 -08:00
reg := prometheus . NewRegistry ( )
sdMetrics , err := discovery . RegisterSDMetrics ( reg , discovery . NewRefreshMetrics ( reg ) )
require . NoError ( t , err )
discoveryManager := discovery . NewManager (
ctx ,
2024-09-09 18:41:53 -07:00
promslog . NewNopLogger ( ) ,
2023-11-15 02:41:12 -08:00
reg ,
sdMetrics ,
discovery . Updatert ( 100 * time . Millisecond ) ,
)
scrapeManager , err := NewManager (
2024-12-11 03:01:15 -08:00
opts ,
2023-11-15 02:41:12 -08:00
nil ,
2024-08-26 02:41:56 -07:00
nil ,
2024-12-11 03:01:15 -08:00
app ,
2023-11-15 02:41:12 -08:00
prometheus . NewRegistry ( ) ,
)
require . NoError ( t , err )
go discoveryManager . Run ( )
go scrapeManager . Run ( discoveryManager . SyncCh ( ) )
return discoveryManager , scrapeManager
}
func writeIntoFile ( t * testing . T , content , filePattern string ) * os . File {
t . Helper ( )
file , err := os . CreateTemp ( "" , filePattern )
require . NoError ( t , err )
_ , err = file . WriteString ( content )
require . NoError ( t , err )
return file
}
func requireTargets (
t * testing . T ,
scrapeManager * Manager ,
jobName string ,
waitToAppear bool ,
expectedTargets [ ] string ,
) {
t . Helper ( )
require . Eventually ( t , func ( ) bool {
targets , ok := scrapeManager . TargetsActive ( ) [ jobName ]
if ! ok {
if waitToAppear {
return false
}
t . Fatalf ( "job %s shouldn't be dropped" , jobName )
}
if expectedTargets == nil {
return targets == nil
}
if len ( targets ) != len ( expectedTargets ) {
return false
}
sTargets := [ ] string { }
for _ , t := range targets {
sTargets = append ( sTargets , t . String ( ) )
}
sort . Strings ( expectedTargets )
sort . Strings ( sTargets )
for i , t := range sTargets {
if t != expectedTargets [ i ] {
return false
}
}
return true
} , 1 * time . Second , 100 * time . Millisecond )
}
// TestTargetDisappearsAfterProviderRemoved makes sure that when a provider is dropped, (only) its targets are dropped.
func TestTargetDisappearsAfterProviderRemoved ( t * testing . T ) {
ctx , cancel := context . WithCancel ( context . Background ( ) )
defer cancel ( )
myJob := "my-job"
myJobSDTargetURL := "my:9876"
myJobStaticTargetURL := "my:5432"
sdFileContent := fmt . Sprintf ( ` [ { "targets": ["%s"]}] ` , myJobSDTargetURL )
sDFile := writeIntoFile ( t , sdFileContent , "*targets.json" )
baseConfig := `
scrape_configs :
- job_name : % s
static_configs :
- targets : [ ' % s ' ]
file_sd_configs :
- files : [ ' % s ' ]
`
2024-12-11 03:01:15 -08:00
discoveryManager , scrapeManager := runManagers ( t , ctx , nil , nil )
2023-11-15 02:41:12 -08:00
defer scrapeManager . Stop ( )
applyConfig (
t ,
fmt . Sprintf (
baseConfig ,
myJob ,
myJobStaticTargetURL ,
sDFile . Name ( ) ,
) ,
scrapeManager ,
discoveryManager ,
)
// Make sure the jobs targets are taken into account
requireTargets (
t ,
scrapeManager ,
myJob ,
true ,
[ ] string {
fmt . Sprintf ( "http://%s/metrics" , myJobSDTargetURL ) ,
fmt . Sprintf ( "http://%s/metrics" , myJobStaticTargetURL ) ,
} ,
)
// Apply a new config where a provider is removed
baseConfig = `
scrape_configs :
- job_name : % s
static_configs :
- targets : [ ' % s ' ]
`
applyConfig (
t ,
fmt . Sprintf (
baseConfig ,
myJob ,
myJobStaticTargetURL ,
) ,
scrapeManager ,
discoveryManager ,
)
// Make sure the corresponding target was dropped
requireTargets (
t ,
scrapeManager ,
myJob ,
false ,
[ ] string {
fmt . Sprintf ( "http://%s/metrics" , myJobStaticTargetURL ) ,
} ,
)
// Apply a new config with no providers
baseConfig = `
scrape_configs :
- job_name : % s
`
applyConfig (
t ,
fmt . Sprintf (
baseConfig ,
myJob ,
) ,
scrapeManager ,
discoveryManager ,
)
// Make sure the corresponding target was dropped
requireTargets (
t ,
scrapeManager ,
myJob ,
false ,
nil ,
)
}
// TestOnlyProviderStaleTargetsAreDropped makes sure that when a job has only one provider with multiple targets
// and when the provider can no longer discover some of those targets, only those stale targets are dropped.
func TestOnlyProviderStaleTargetsAreDropped ( t * testing . T ) {
ctx , cancel := context . WithCancel ( context . Background ( ) )
defer cancel ( )
jobName := "my-job"
jobTarget1URL := "foo:9876"
jobTarget2URL := "foo:5432"
sdFile1Content := fmt . Sprintf ( ` [ { "targets": ["%s"]}] ` , jobTarget1URL )
sdFile2Content := fmt . Sprintf ( ` [ { "targets": ["%s"]}] ` , jobTarget2URL )
sDFile1 := writeIntoFile ( t , sdFile1Content , "*targets.json" )
sDFile2 := writeIntoFile ( t , sdFile2Content , "*targets.json" )
baseConfig := `
scrape_configs :
- job_name : % s
file_sd_configs :
- files : [ ' % s ' , ' % s ' ]
`
2024-12-11 03:01:15 -08:00
discoveryManager , scrapeManager := runManagers ( t , ctx , nil , nil )
2023-11-15 02:41:12 -08:00
defer scrapeManager . Stop ( )
applyConfig (
t ,
fmt . Sprintf ( baseConfig , jobName , sDFile1 . Name ( ) , sDFile2 . Name ( ) ) ,
scrapeManager ,
discoveryManager ,
)
// Make sure the job's targets are taken into account
requireTargets (
t ,
scrapeManager ,
jobName ,
true ,
[ ] string {
fmt . Sprintf ( "http://%s/metrics" , jobTarget1URL ) ,
fmt . Sprintf ( "http://%s/metrics" , jobTarget2URL ) ,
} ,
)
// Apply the same config for the same job but with a non existing file to make the provider
// unable to discover some targets
applyConfig (
t ,
fmt . Sprintf ( baseConfig , jobName , sDFile1 . Name ( ) , "/idontexistdoi.json" ) ,
scrapeManager ,
discoveryManager ,
)
// The old target should get dropped
requireTargets (
t ,
scrapeManager ,
jobName ,
false ,
[ ] string { fmt . Sprintf ( "http://%s/metrics" , jobTarget1URL ) } ,
)
}
// TestProviderStaleTargetsAreDropped makes sure that when a job has only one provider and when that provider
// should no longer discover targets, the targets of that provider are dropped.
// See: https://github.com/prometheus/prometheus/issues/12858
func TestProviderStaleTargetsAreDropped ( t * testing . T ) {
ctx , cancel := context . WithCancel ( context . Background ( ) )
defer cancel ( )
jobName := "my-job"
jobTargetURL := "foo:9876"
sdFileContent := fmt . Sprintf ( ` [ { "targets": ["%s"]}] ` , jobTargetURL )
sDFile := writeIntoFile ( t , sdFileContent , "*targets.json" )
baseConfig := `
scrape_configs :
- job_name : % s
file_sd_configs :
- files : [ ' % s ' ]
`
2024-12-11 03:01:15 -08:00
discoveryManager , scrapeManager := runManagers ( t , ctx , nil , nil )
2023-11-15 02:41:12 -08:00
defer scrapeManager . Stop ( )
applyConfig (
t ,
fmt . Sprintf ( baseConfig , jobName , sDFile . Name ( ) ) ,
scrapeManager ,
discoveryManager ,
)
// Make sure the job's targets are taken into account
requireTargets (
t ,
scrapeManager ,
jobName ,
true ,
[ ] string {
fmt . Sprintf ( "http://%s/metrics" , jobTargetURL ) ,
} ,
)
// Apply the same config for the same job but with a non existing file to make the provider
// unable to discover some targets
applyConfig (
t ,
fmt . Sprintf ( baseConfig , jobName , "/idontexistdoi.json" ) ,
scrapeManager ,
discoveryManager ,
)
// The old target should get dropped
requireTargets (
t ,
scrapeManager ,
jobName ,
false ,
nil ,
)
}
2024-08-30 04:37:25 -07:00
// TestOnlyStaleTargetsAreDropped makes sure that when a job has multiple providers, when one of them should no
2024-09-10 13:32:03 -07:00
// longer discover targets, only the stale targets of that provider are dropped.
2023-11-15 02:41:12 -08:00
func TestOnlyStaleTargetsAreDropped ( t * testing . T ) {
ctx , cancel := context . WithCancel ( context . Background ( ) )
defer cancel ( )
myJob := "my-job"
myJobSDTargetURL := "my:9876"
myJobStaticTargetURL := "my:5432"
otherJob := "other-job"
otherJobTargetURL := "other:1234"
sdFileContent := fmt . Sprintf ( ` [ { "targets": ["%s"]}] ` , myJobSDTargetURL )
sDFile := writeIntoFile ( t , sdFileContent , "*targets.json" )
baseConfig := `
scrape_configs :
- job_name : % s
static_configs :
- targets : [ ' % s ' ]
file_sd_configs :
- files : [ ' % s ' ]
- job_name : % s
static_configs :
- targets : [ ' % s ' ]
`
2024-12-11 03:01:15 -08:00
discoveryManager , scrapeManager := runManagers ( t , ctx , nil , nil )
2023-11-15 02:41:12 -08:00
defer scrapeManager . Stop ( )
// Apply the initial config with an existing file
applyConfig (
t ,
fmt . Sprintf (
baseConfig ,
myJob ,
myJobStaticTargetURL ,
sDFile . Name ( ) ,
otherJob ,
otherJobTargetURL ,
) ,
scrapeManager ,
discoveryManager ,
)
// Make sure the jobs targets are taken into account
requireTargets (
t ,
scrapeManager ,
myJob ,
true ,
[ ] string {
fmt . Sprintf ( "http://%s/metrics" , myJobSDTargetURL ) ,
fmt . Sprintf ( "http://%s/metrics" , myJobStaticTargetURL ) ,
} ,
)
requireTargets (
t ,
scrapeManager ,
otherJob ,
true ,
[ ] string { fmt . Sprintf ( "http://%s/metrics" , otherJobTargetURL ) } ,
)
// Apply the same config with a non existing file for myJob
applyConfig (
t ,
fmt . Sprintf (
baseConfig ,
myJob ,
myJobStaticTargetURL ,
"/idontexistdoi.json" ,
otherJob ,
otherJobTargetURL ,
) ,
scrapeManager ,
discoveryManager ,
)
// Only the SD target should get dropped for myJob
requireTargets (
t ,
scrapeManager ,
myJob ,
false ,
[ ] string {
fmt . Sprintf ( "http://%s/metrics" , myJobStaticTargetURL ) ,
} ,
)
// The otherJob should keep its target
requireTargets (
t ,
scrapeManager ,
otherJob ,
false ,
[ ] string { fmt . Sprintf ( "http://%s/metrics" , otherJobTargetURL ) } ,
)
}