2016-02-23 01:58:16 -08:00
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2018-02-01 01:55:07 -08:00
package scrape
2016-02-23 01:58:16 -08:00
import (
2017-01-15 08:33:07 -08:00
"bytes"
2021-05-15 19:19:22 -07:00
"compress/gzip"
2017-10-24 21:21:42 -07:00
"context"
2023-07-13 05:16:10 -07:00
"encoding/binary"
2023-11-01 12:06:46 -07:00
"errors"
2016-02-28 00:51:02 -08:00
"fmt"
2017-01-15 08:33:07 -08:00
"io"
2017-04-13 10:07:23 -07:00
"math"
2016-02-28 14:59:03 -08:00
"net/http"
"net/http/httptest"
"net/url"
2022-05-31 02:31:20 -07:00
"strconv"
2016-02-28 14:59:03 -08:00
"strings"
2016-02-28 00:51:02 -08:00
"sync"
2016-02-23 01:58:16 -08:00
"testing"
2024-07-03 02:56:48 -07:00
"text/template"
2016-02-23 01:58:16 -08:00
"time"
2023-07-13 05:16:10 -07:00
"github.com/gogo/protobuf/proto"
2023-04-16 05:13:31 -07:00
"github.com/google/go-cmp/cmp"
2024-09-06 05:02:44 -07:00
"github.com/grafana/regexp"
2023-04-21 12:14:19 -07:00
"github.com/prometheus/client_golang/prometheus"
2024-08-19 02:58:35 -07:00
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
2019-02-13 05:24:22 -08:00
dto "github.com/prometheus/client_model/go"
2020-01-22 04:13:47 -08:00
config_util "github.com/prometheus/common/config"
2016-02-23 01:58:16 -08:00
"github.com/prometheus/common/model"
2024-09-09 18:41:53 -07:00
"github.com/prometheus/common/promslog"
2020-10-29 02:43:23 -07:00
"github.com/stretchr/testify/require"
2016-02-23 01:58:16 -08:00
2016-02-23 02:56:09 -08:00
"github.com/prometheus/prometheus/config"
2024-03-27 08:32:37 -07:00
"github.com/prometheus/prometheus/discovery"
2018-01-04 06:13:31 -08:00
"github.com/prometheus/prometheus/discovery/targetgroup"
2021-11-08 06:23:17 -08:00
"github.com/prometheus/prometheus/model/exemplar"
2023-07-13 05:16:10 -07:00
"github.com/prometheus/prometheus/model/histogram"
2021-11-08 06:23:17 -08:00
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/model/textparse"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
2016-02-28 00:51:02 -08:00
"github.com/prometheus/prometheus/storage"
2021-11-28 23:54:23 -08:00
"github.com/prometheus/prometheus/tsdb/chunkenc"
2024-09-09 18:41:53 -07:00
"github.com/prometheus/prometheus/util/logging"
2023-10-17 02:27:46 -07:00
"github.com/prometheus/prometheus/util/pool"
2019-08-08 18:35:39 -07:00
"github.com/prometheus/prometheus/util/teststorage"
2017-09-15 02:08:51 -07:00
"github.com/prometheus/prometheus/util/testutil"
2016-02-23 01:58:16 -08:00
)
2020-07-27 01:38:08 -07:00
func TestMain ( m * testing . M ) {
testutil . TolerantVerifyLeak ( m )
}
2024-04-10 02:38:15 -07:00
func newTestRegistryAndScrapeMetrics ( t testing . TB ) ( * prometheus . Registry , * scrapeMetrics ) {
2023-09-22 09:47:44 -07:00
reg := prometheus . NewRegistry ( )
metrics , err := newScrapeMetrics ( reg )
require . NoError ( t , err )
2024-04-10 02:38:15 -07:00
return reg , metrics
}
func newTestScrapeMetrics ( t testing . TB ) * scrapeMetrics {
_ , metrics := newTestRegistryAndScrapeMetrics ( t )
2023-09-22 09:47:44 -07:00
return metrics
}
2016-02-28 00:51:02 -08:00
func TestNewScrapePool ( t * testing . T ) {
var (
2019-02-13 05:24:22 -08:00
app = & nopAppendable { }
cfg = & config . ScrapeConfig { }
2023-10-17 02:27:46 -07:00
sp , _ = newScrapePool ( cfg , app , 0 , nil , nil , & Options { } , newTestScrapeMetrics ( t ) )
2016-02-28 00:51:02 -08:00
)
2021-09-04 05:35:03 -07:00
a , ok := sp . appendable . ( * nopAppendable )
require . True ( t , ok , "Failure to append." )
require . Equal ( t , app , a , "Wrong sample appender." )
require . Equal ( t , cfg , sp . config , "Wrong scrape config." )
require . NotNil ( t , sp . newLoop , "newLoop function not initialized." )
2016-02-28 00:51:02 -08:00
}
2024-10-23 08:34:28 -07:00
func TestStorageHandlesOutOfOrderTimestamps ( t * testing . T ) {
// Test with default OutOfOrderTimeWindow (0)
t . Run ( "Out-Of-Order Sample Disabled" , func ( t * testing . T ) {
s := teststorage . New ( t )
defer s . Close ( )
runScrapeLoopTest ( t , s , false )
} )
// Test with specific OutOfOrderTimeWindow (600000)
t . Run ( "Out-Of-Order Sample Enabled" , func ( t * testing . T ) {
s := teststorage . New ( t , 600000 )
defer s . Close ( )
runScrapeLoopTest ( t , s , true )
} )
}
func runScrapeLoopTest ( t * testing . T , s * teststorage . TestStorage , expectOutOfOrder bool ) {
// Create an appender for adding samples to the storage.
app := s . Appender ( context . Background ( ) )
capp := & collectResultAppender { next : app }
sl := newBasicScrapeLoop ( t , context . Background ( ) , nil , func ( ctx context . Context ) storage . Appender { return capp } , 0 )
// Current time for generating timestamps.
now := time . Now ( )
// Calculate timestamps for the samples based on the current time.
now = now . Truncate ( time . Minute ) // round down the now timestamp to the nearest minute
timestampInorder1 := now
timestampOutOfOrder := now . Add ( - 5 * time . Minute )
timestampInorder2 := now . Add ( 5 * time . Minute )
slApp := sl . appender ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err := sl . append ( slApp , [ ] byte ( ` metric_a { a="1",b="1"} 1 ` ) , "text/plain" , timestampInorder1 )
2024-10-23 08:34:28 -07:00
require . NoError ( t , err )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err = sl . append ( slApp , [ ] byte ( ` metric_a { a="1",b="1"} 2 ` ) , "text/plain" , timestampOutOfOrder )
2024-10-23 08:34:28 -07:00
require . NoError ( t , err )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err = sl . append ( slApp , [ ] byte ( ` metric_a { a="1",b="1"} 3 ` ) , "text/plain" , timestampInorder2 )
2024-10-23 08:34:28 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
// Query the samples back from the storage.
ctx , cancel := context . WithCancel ( context . Background ( ) )
defer cancel ( )
q , err := s . Querier ( time . Time { } . UnixNano ( ) , time . Now ( ) . UnixNano ( ) )
require . NoError ( t , err )
defer q . Close ( )
// Use a matcher to filter the metric name.
series := q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , "metric_a" ) )
var results [ ] floatSample
for series . Next ( ) {
it := series . At ( ) . Iterator ( nil )
for it . Next ( ) == chunkenc . ValFloat {
t , v := it . At ( )
results = append ( results , floatSample {
metric : series . At ( ) . Labels ( ) ,
t : t ,
f : v ,
} )
}
require . NoError ( t , it . Err ( ) )
}
require . NoError ( t , series . Err ( ) )
// Define the expected results
want := [ ] floatSample {
{
metric : labels . FromStrings ( "__name__" , "metric_a" , "a" , "1" , "b" , "1" ) ,
t : timestamp . FromTime ( timestampInorder1 ) ,
f : 1 ,
} ,
{
metric : labels . FromStrings ( "__name__" , "metric_a" , "a" , "1" , "b" , "1" ) ,
t : timestamp . FromTime ( timestampInorder2 ) ,
f : 3 ,
} ,
}
if expectOutOfOrder {
require . NotEqual ( t , want , results , "Expected results to include out-of-order sample:\n%s" , results )
} else {
require . Equal ( t , want , results , "Appended samples not as expected:\n%s" , results )
}
}
2018-01-04 06:13:31 -08:00
func TestDroppedTargetsList ( t * testing . T ) {
var (
app = & nopAppendable { }
cfg = & config . ScrapeConfig {
JobName : "dropMe" ,
ScrapeInterval : model . Duration ( 1 ) ,
2018-12-18 03:26:36 -08:00
RelabelConfigs : [ ] * relabel . Config {
2018-01-04 06:13:31 -08:00
{
2018-12-18 03:26:36 -08:00
Action : relabel . Drop ,
Regex : relabel . MustNewRegexp ( "dropMe" ) ,
2018-01-04 06:13:31 -08:00
SourceLabels : model . LabelNames { "job" } ,
} ,
} ,
}
tgs = [ ] * targetgroup . Group {
{
Targets : [ ] model . LabelSet {
2019-01-16 14:28:08 -08:00
{ model . AddressLabel : "127.0.0.1:9090" } ,
2023-08-20 06:25:32 -07:00
{ model . AddressLabel : "127.0.0.1:9091" } ,
2018-01-04 06:13:31 -08:00
} ,
} ,
}
2023-10-17 02:27:46 -07:00
sp , _ = newScrapePool ( cfg , app , 0 , nil , nil , & Options { } , newTestScrapeMetrics ( t ) )
2021-08-31 08:37:32 -07:00
expectedLabelSetString = "{__address__=\"127.0.0.1:9090\", __scrape_interval__=\"0s\", __scrape_timeout__=\"0s\", job=\"dropMe\"}"
2023-08-20 06:25:32 -07:00
expectedLength = 2
2018-01-04 06:13:31 -08:00
)
sp . Sync ( tgs )
sp . Sync ( tgs )
2023-12-07 03:35:01 -08:00
require . Len ( t , sp . droppedTargets , expectedLength )
2023-08-20 06:25:32 -07:00
require . Equal ( t , expectedLength , sp . droppedTargetsCount )
require . Equal ( t , expectedLabelSetString , sp . droppedTargets [ 0 ] . DiscoveredLabels ( ) . String ( ) )
// Check that count is still correct when we don't retain all dropped targets.
sp . config . KeepDroppedTargets = 1
sp . Sync ( tgs )
2023-12-07 03:35:01 -08:00
require . Len ( t , sp . droppedTargets , 1 )
2023-08-20 06:25:32 -07:00
require . Equal ( t , expectedLength , sp . droppedTargetsCount )
2018-01-04 06:13:31 -08:00
}
2018-02-07 02:29:27 -08:00
// TestDiscoveredLabelsUpdate checks that DiscoveredLabels are updated
// even when new labels don't affect the target `hash`.
func TestDiscoveredLabelsUpdate ( t * testing . T ) {
2023-09-22 09:47:44 -07:00
sp := & scrapePool {
metrics : newTestScrapeMetrics ( t ) ,
}
2018-02-07 02:29:27 -08:00
// These are used when syncing so need this to avoid a panic.
sp . config = & config . ScrapeConfig {
ScrapeInterval : model . Duration ( 1 ) ,
ScrapeTimeout : model . Duration ( 1 ) ,
}
2018-09-26 02:20:56 -07:00
sp . activeTargets = make ( map [ uint64 ] * Target )
2018-02-07 02:29:27 -08:00
t1 := & Target {
2022-05-30 07:37:16 -07:00
discoveredLabels : labels . FromStrings ( "label" , "name" ) ,
2018-02-07 02:29:27 -08:00
}
2018-09-26 02:20:56 -07:00
sp . activeTargets [ t1 . hash ( ) ] = t1
2018-02-07 02:29:27 -08:00
t2 := & Target {
2022-05-30 07:37:16 -07:00
discoveredLabels : labels . FromStrings ( "labelNew" , "nameNew" ) ,
2018-02-07 02:29:27 -08:00
}
sp . sync ( [ ] * Target { t2 } )
2020-10-29 02:43:23 -07:00
require . Equal ( t , t2 . DiscoveredLabels ( ) , sp . activeTargets [ t1 . hash ( ) ] . DiscoveredLabels ( ) )
2018-02-07 02:29:27 -08:00
}
2016-02-28 00:51:02 -08:00
type testLoop struct {
2020-07-30 05:20:24 -07:00
startFunc func ( interval , timeout time . Duration , errc chan <- error )
stopFunc func ( )
forcedErr error
forcedErrMtx sync . Mutex
2020-09-30 11:21:32 -07:00
runOnce bool
2021-08-31 08:37:32 -07:00
interval time . Duration
timeout time . Duration
2016-02-28 00:51:02 -08:00
}
2024-09-09 18:41:53 -07:00
func ( l * testLoop ) setScrapeFailureLogger ( * logging . JSONFileLogger ) {
2024-08-26 02:41:56 -07:00
}
2021-08-31 08:37:32 -07:00
func ( l * testLoop ) run ( errc chan <- error ) {
2020-09-30 11:21:32 -07:00
if l . runOnce {
panic ( "loop must be started only once" )
}
l . runOnce = true
2021-08-31 08:37:32 -07:00
l . startFunc ( l . interval , l . timeout , errc )
2016-02-28 00:51:02 -08:00
}
2020-03-20 09:43:26 -07:00
func ( l * testLoop ) disableEndOfRunStalenessMarkers ( ) {
}
2020-07-30 05:20:24 -07:00
func ( l * testLoop ) setForcedError ( err error ) {
l . forcedErrMtx . Lock ( )
defer l . forcedErrMtx . Unlock ( )
l . forcedErr = err
}
func ( l * testLoop ) getForcedError ( ) error {
l . forcedErrMtx . Lock ( )
defer l . forcedErrMtx . Unlock ( )
return l . forcedErr
}
2016-02-28 00:51:02 -08:00
func ( l * testLoop ) stop ( ) {
l . stopFunc ( )
}
2020-01-22 04:13:47 -08:00
func ( l * testLoop ) getCache ( ) * scrapeCache {
return nil
}
2016-02-28 00:51:02 -08:00
func TestScrapePoolStop ( t * testing . T ) {
sp := & scrapePool {
2018-09-26 02:20:56 -07:00
activeTargets : map [ uint64 ] * Target { } ,
loops : map [ uint64 ] loop { } ,
cancel : func ( ) { } ,
2019-04-10 05:20:00 -07:00
client : http . DefaultClient ,
2023-09-22 09:47:44 -07:00
metrics : newTestScrapeMetrics ( t ) ,
2016-02-28 00:51:02 -08:00
}
var mtx sync . Mutex
2016-02-28 10:56:18 -08:00
stopped := map [ uint64 ] bool { }
2016-02-28 00:51:02 -08:00
numTargets := 20
// Stopping the scrape pool must call stop() on all scrape loops,
// clean them and the respective targets up. It must wait until each loop's
// stop function returned before returning itself.
for i := 0 ; i < numTargets ; i ++ {
t := & Target {
2016-12-29 00:27:30 -08:00
labels : labels . FromStrings ( model . AddressLabel , fmt . Sprintf ( "example.com:%d" , i ) ) ,
2016-02-28 00:51:02 -08:00
}
l := & testLoop { }
2023-04-26 07:26:58 -07:00
d := time . Duration ( ( i + 1 ) * 20 ) * time . Millisecond
2016-02-28 00:51:02 -08:00
l . stopFunc = func ( ) {
2023-04-26 07:26:58 -07:00
time . Sleep ( d )
2016-02-28 00:51:02 -08:00
mtx . Lock ( )
2016-02-28 10:56:18 -08:00
stopped [ t . hash ( ) ] = true
2016-02-28 00:51:02 -08:00
mtx . Unlock ( )
}
2018-09-26 02:20:56 -07:00
sp . activeTargets [ t . hash ( ) ] = t
2016-02-28 10:56:18 -08:00
sp . loops [ t . hash ( ) ] = l
2016-02-28 00:51:02 -08:00
}
done := make ( chan struct { } )
stopTime := time . Now ( )
go func ( ) {
sp . stop ( )
close ( done )
} ( )
select {
case <- time . After ( 5 * time . Second ) :
2021-09-04 05:35:03 -07:00
require . Fail ( t , "scrapeLoop.stop() did not return as expected" )
2016-02-28 00:51:02 -08:00
case <- done :
// This should have taken at least as long as the last target slept.
2021-09-04 05:35:03 -07:00
require . GreaterOrEqual ( t , time . Since ( stopTime ) , time . Duration ( numTargets * 20 ) * time . Millisecond , "scrapeLoop.stop() exited before all targets stopped" )
2016-02-28 00:51:02 -08:00
}
mtx . Lock ( )
2023-12-07 03:35:01 -08:00
require . Len ( t , stopped , numTargets , "Unexpected number of stopped loops" )
2016-02-28 00:51:02 -08:00
mtx . Unlock ( )
2023-12-07 03:35:01 -08:00
require . Empty ( t , sp . activeTargets , "Targets were not cleared on stopping: %d left" , len ( sp . activeTargets ) )
require . Empty ( t , sp . loops , "Loops were not cleared on stopping: %d left" , len ( sp . loops ) )
2016-02-28 00:51:02 -08:00
}
func TestScrapePoolReload ( t * testing . T ) {
var mtx sync . Mutex
numTargets := 20
2016-02-28 10:56:18 -08:00
stopped := map [ uint64 ] bool { }
2016-02-28 00:51:02 -08:00
reloadCfg := & config . ScrapeConfig {
ScrapeInterval : model . Duration ( 3 * time . Second ) ,
ScrapeTimeout : model . Duration ( 2 * time . Second ) ,
}
2016-09-14 20:23:28 -07:00
// On starting to run, new loops created on reload check whether their preceding
2016-02-28 00:51:02 -08:00
// equivalents have been stopped.
2019-03-12 03:26:18 -07:00
newLoop := func ( opts scrapeLoopOptions ) loop {
2021-08-31 08:37:32 -07:00
l := & testLoop { interval : time . Duration ( reloadCfg . ScrapeInterval ) , timeout : time . Duration ( reloadCfg . ScrapeTimeout ) }
2016-02-28 00:51:02 -08:00
l . startFunc = func ( interval , timeout time . Duration , errc chan <- error ) {
2020-10-29 02:43:23 -07:00
require . Equal ( t , 3 * time . Second , interval , "Unexpected scrape interval" )
require . Equal ( t , 2 * time . Second , timeout , "Unexpected scrape timeout" )
2019-11-04 15:43:42 -08:00
2016-02-28 00:51:02 -08:00
mtx . Lock ( )
2019-11-04 15:43:42 -08:00
targetScraper := opts . scraper . ( * targetScraper )
2020-10-29 02:43:23 -07:00
require . True ( t , stopped [ targetScraper . hash ( ) ] , "Scrape loop for %v not stopped yet" , targetScraper )
2016-02-28 00:51:02 -08:00
mtx . Unlock ( )
}
return l
}
2023-09-22 09:47:44 -07:00
2024-04-10 02:38:15 -07:00
reg , metrics := newTestRegistryAndScrapeMetrics ( t )
2016-02-28 00:51:02 -08:00
sp := & scrapePool {
2018-09-26 02:20:56 -07:00
appendable : & nopAppendable { } ,
activeTargets : map [ uint64 ] * Target { } ,
loops : map [ uint64 ] loop { } ,
newLoop : newLoop ,
logger : nil ,
2019-04-10 05:20:00 -07:00
client : http . DefaultClient ,
2024-04-10 02:38:15 -07:00
metrics : metrics ,
2023-11-24 11:46:26 -08:00
symbolTable : labels . NewSymbolTable ( ) ,
2016-02-28 00:51:02 -08:00
}
// Reloading a scrape pool with a new scrape configuration must stop all scrape
2016-09-14 20:23:28 -07:00
// loops and start new ones. A new loop must not be started before the preceding
2016-02-28 00:51:02 -08:00
// one terminated.
for i := 0 ; i < numTargets ; i ++ {
2021-08-31 08:37:32 -07:00
labels := labels . FromStrings ( model . AddressLabel , fmt . Sprintf ( "example.com:%d" , i ) )
2016-02-28 00:51:02 -08:00
t := & Target {
2021-08-31 08:37:32 -07:00
labels : labels ,
discoveredLabels : labels ,
2016-02-28 00:51:02 -08:00
}
l := & testLoop { }
2023-04-26 07:26:58 -07:00
d := time . Duration ( ( i + 1 ) * 20 ) * time . Millisecond
2016-02-28 00:51:02 -08:00
l . stopFunc = func ( ) {
2023-04-26 07:26:58 -07:00
time . Sleep ( d )
2016-02-28 00:51:02 -08:00
mtx . Lock ( )
2016-02-28 10:56:18 -08:00
stopped [ t . hash ( ) ] = true
2016-02-28 00:51:02 -08:00
mtx . Unlock ( )
}
2018-09-26 02:20:56 -07:00
sp . activeTargets [ t . hash ( ) ] = t
2016-02-28 10:56:18 -08:00
sp . loops [ t . hash ( ) ] = l
2016-02-28 00:51:02 -08:00
}
done := make ( chan struct { } )
2016-02-28 10:56:18 -08:00
beforeTargets := map [ uint64 ] * Target { }
2018-09-26 02:20:56 -07:00
for h , t := range sp . activeTargets {
2016-02-28 10:56:18 -08:00
beforeTargets [ h ] = t
2016-02-28 00:51:02 -08:00
}
reloadTime := time . Now ( )
go func ( ) {
sp . reload ( reloadCfg )
close ( done )
} ( )
select {
case <- time . After ( 5 * time . Second ) :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "scrapeLoop.reload() did not return as expected" )
2016-02-28 00:51:02 -08:00
case <- done :
// This should have taken at least as long as the last target slept.
2021-09-04 05:35:03 -07:00
require . GreaterOrEqual ( t , time . Since ( reloadTime ) , time . Duration ( numTargets * 20 ) * time . Millisecond , "scrapeLoop.stop() exited before all targets stopped" )
2016-02-28 00:51:02 -08:00
}
mtx . Lock ( )
2023-12-07 03:35:01 -08:00
require . Len ( t , stopped , numTargets , "Unexpected number of stopped loops" )
2016-02-28 00:51:02 -08:00
mtx . Unlock ( )
2020-10-29 02:43:23 -07:00
require . Equal ( t , sp . activeTargets , beforeTargets , "Reloading affected target states unexpectedly" )
2023-12-07 03:35:01 -08:00
require . Len ( t , sp . loops , numTargets , "Unexpected number of stopped loops after reload" )
2024-04-10 02:38:15 -07:00
got , err := gatherLabels ( reg , "prometheus_target_reload_length_seconds" )
require . NoError ( t , err )
expectedName , expectedValue := "interval" , "3s"
require . Equal ( t , [ ] [ ] * dto . LabelPair { { { Name : & expectedName , Value : & expectedValue } } } , got )
require . Equal ( t , 1.0 , prom_testutil . ToFloat64 ( sp . metrics . targetScrapePoolReloads ) )
2016-02-28 00:51:02 -08:00
}
2022-06-28 02:58:52 -07:00
func TestScrapePoolReloadPreserveRelabeledIntervalTimeout ( t * testing . T ) {
reloadCfg := & config . ScrapeConfig {
ScrapeInterval : model . Duration ( 3 * time . Second ) ,
ScrapeTimeout : model . Duration ( 2 * time . Second ) ,
}
newLoop := func ( opts scrapeLoopOptions ) loop {
2023-04-09 00:08:40 -07:00
l := & testLoop { interval : opts . interval , timeout : opts . timeout }
2022-06-28 02:58:52 -07:00
l . startFunc = func ( interval , timeout time . Duration , errc chan <- error ) {
require . Equal ( t , 5 * time . Second , interval , "Unexpected scrape interval" )
require . Equal ( t , 3 * time . Second , timeout , "Unexpected scrape timeout" )
}
return l
}
2024-04-10 02:38:15 -07:00
reg , metrics := newTestRegistryAndScrapeMetrics ( t )
2022-06-28 02:58:52 -07:00
sp := & scrapePool {
appendable : & nopAppendable { } ,
activeTargets : map [ uint64 ] * Target {
1 : {
labels : labels . FromStrings ( model . ScrapeIntervalLabel , "5s" , model . ScrapeTimeoutLabel , "3s" ) ,
} ,
} ,
loops : map [ uint64 ] loop {
1 : noopLoop ( ) ,
} ,
2023-11-24 11:46:26 -08:00
newLoop : newLoop ,
logger : nil ,
client : http . DefaultClient ,
2024-04-10 02:38:15 -07:00
metrics : metrics ,
2023-11-24 11:46:26 -08:00
symbolTable : labels . NewSymbolTable ( ) ,
2022-06-28 02:58:52 -07:00
}
err := sp . reload ( reloadCfg )
if err != nil {
t . Fatalf ( "unable to reload configuration: %s" , err )
}
2024-04-10 02:38:15 -07:00
// Check that the reload metric is labeled with the pool interval, not the overridden interval.
got , err := gatherLabels ( reg , "prometheus_target_reload_length_seconds" )
require . NoError ( t , err )
expectedName , expectedValue := "interval" , "3s"
require . Equal ( t , [ ] [ ] * dto . LabelPair { { { Name : & expectedName , Value : & expectedValue } } } , got )
}
// Gather metrics from the provided Gatherer with specified familyName,
// and return all sets of name/value pairs.
func gatherLabels ( g prometheus . Gatherer , familyName string ) ( [ ] [ ] * dto . LabelPair , error ) {
families , err := g . Gather ( )
if err != nil {
return nil , err
}
ret := make ( [ ] [ ] * dto . LabelPair , 0 )
for _ , f := range families {
if f . GetName ( ) == familyName {
for _ , m := range f . GetMetric ( ) {
ret = append ( ret , m . GetLabel ( ) )
}
break
}
}
return ret , nil
2022-06-28 02:58:52 -07:00
}
2020-07-30 05:20:24 -07:00
func TestScrapePoolTargetLimit ( t * testing . T ) {
2020-09-30 11:21:32 -07:00
var wg sync . WaitGroup
2020-07-30 05:20:24 -07:00
// On starting to run, new loops created on reload check whether their preceding
// equivalents have been stopped.
newLoop := func ( opts scrapeLoopOptions ) loop {
2020-09-30 11:21:32 -07:00
wg . Add ( 1 )
2020-07-30 05:20:24 -07:00
l := & testLoop {
2020-09-30 11:21:32 -07:00
startFunc : func ( interval , timeout time . Duration , errc chan <- error ) {
wg . Done ( )
} ,
stopFunc : func ( ) { } ,
2020-07-30 05:20:24 -07:00
}
return l
}
sp := & scrapePool {
appendable : & nopAppendable { } ,
activeTargets : map [ uint64 ] * Target { } ,
loops : map [ uint64 ] loop { } ,
newLoop : newLoop ,
2024-09-09 18:41:53 -07:00
logger : promslog . NewNopLogger ( ) ,
2020-07-30 05:20:24 -07:00
client : http . DefaultClient ,
2023-09-22 09:47:44 -07:00
metrics : newTestScrapeMetrics ( t ) ,
2023-11-24 11:46:26 -08:00
symbolTable : labels . NewSymbolTable ( ) ,
2020-07-30 05:20:24 -07:00
}
2021-10-22 01:06:44 -07:00
tgs := [ ] * targetgroup . Group { }
2020-07-30 05:20:24 -07:00
for i := 0 ; i < 50 ; i ++ {
tgs = append ( tgs ,
& targetgroup . Group {
Targets : [ ] model . LabelSet {
{ model . AddressLabel : model . LabelValue ( fmt . Sprintf ( "127.0.0.1:%d" , 9090 + i ) ) } ,
} ,
} ,
)
}
var limit uint
reloadWithLimit := func ( l uint ) {
limit = l
2020-10-29 02:43:23 -07:00
require . NoError ( t , sp . reload ( & config . ScrapeConfig {
2020-07-30 05:20:24 -07:00
ScrapeInterval : model . Duration ( 3 * time . Second ) ,
ScrapeTimeout : model . Duration ( 2 * time . Second ) ,
TargetLimit : l ,
} ) )
}
var targets int
loadTargets := func ( n int ) {
targets = n
sp . Sync ( tgs [ : n ] )
}
2020-09-30 11:21:32 -07:00
validateIsRunning := func ( ) {
wg . Wait ( )
for _ , l := range sp . loops {
2020-10-29 02:43:23 -07:00
require . True ( t , l . ( * testLoop ) . runOnce , "loop should be running" )
2020-09-30 11:21:32 -07:00
}
}
2020-07-30 05:20:24 -07:00
validateErrorMessage := func ( shouldErr bool ) {
for _ , l := range sp . loops {
lerr := l . ( * testLoop ) . getForcedError ( )
if shouldErr {
2023-12-07 03:35:01 -08:00
require . Error ( t , lerr , "error was expected for %d targets with a limit of %d" , targets , limit )
2024-10-06 09:35:29 -07:00
require . EqualError ( t , lerr , fmt . Sprintf ( "target_limit exceeded (number of targets: %d, limit: %d)" , targets , limit ) )
2020-07-30 05:20:24 -07:00
} else {
2023-12-07 03:35:01 -08:00
require . NoError ( t , lerr )
2020-07-30 05:20:24 -07:00
}
}
}
reloadWithLimit ( 0 )
loadTargets ( 50 )
2020-09-30 11:21:32 -07:00
validateIsRunning ( )
2020-07-30 05:20:24 -07:00
// Simulate an initial config with a limit.
sp . config . TargetLimit = 30
limit = 30
loadTargets ( 50 )
2020-09-30 11:21:32 -07:00
validateIsRunning ( )
2020-07-30 05:20:24 -07:00
validateErrorMessage ( true )
reloadWithLimit ( 50 )
2020-09-30 11:21:32 -07:00
validateIsRunning ( )
2020-07-30 05:20:24 -07:00
validateErrorMessage ( false )
reloadWithLimit ( 40 )
2020-09-30 11:21:32 -07:00
validateIsRunning ( )
2020-07-30 05:20:24 -07:00
validateErrorMessage ( true )
loadTargets ( 30 )
2020-09-30 11:21:32 -07:00
validateIsRunning ( )
2020-07-30 05:20:24 -07:00
validateErrorMessage ( false )
loadTargets ( 40 )
2020-09-30 11:21:32 -07:00
validateIsRunning ( )
2020-07-30 05:20:24 -07:00
validateErrorMessage ( false )
loadTargets ( 41 )
2020-09-30 11:21:32 -07:00
validateIsRunning ( )
2020-07-30 05:20:24 -07:00
validateErrorMessage ( true )
2021-07-27 03:48:55 -07:00
reloadWithLimit ( 0 )
validateIsRunning ( )
validateErrorMessage ( false )
2020-07-30 05:20:24 -07:00
reloadWithLimit ( 51 )
2020-09-30 11:21:32 -07:00
validateIsRunning ( )
validateErrorMessage ( false )
tgs = append ( tgs ,
& targetgroup . Group {
Targets : [ ] model . LabelSet {
{ model . AddressLabel : model . LabelValue ( "127.0.0.1:1090" ) } ,
} ,
} ,
& targetgroup . Group {
Targets : [ ] model . LabelSet {
{ model . AddressLabel : model . LabelValue ( "127.0.0.1:1090" ) } ,
} ,
} ,
)
sp . Sync ( tgs )
validateIsRunning ( )
2020-07-30 05:20:24 -07:00
validateErrorMessage ( false )
}
2017-09-08 05:34:45 -07:00
func TestScrapePoolAppender ( t * testing . T ) {
cfg := & config . ScrapeConfig { }
2016-12-30 12:35:35 -08:00
app := & nopAppendable { }
2023-10-17 02:27:46 -07:00
sp , _ := newScrapePool ( cfg , app , 0 , nil , nil , & Options { } , newTestScrapeMetrics ( t ) )
2016-02-23 02:56:09 -08:00
2019-03-12 03:26:18 -07:00
loop := sp . newLoop ( scrapeLoopOptions {
target : & Target { } ,
} )
2018-04-12 07:54:53 -07:00
appl , ok := loop . ( * scrapeLoop )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "Expected scrapeLoop but got %T" , loop )
2019-11-04 15:43:42 -08:00
2024-02-28 05:06:43 -08:00
wrapped := appender ( appl . appender ( context . Background ( ) ) , 0 , 0 , histogram . ExponentialSchemaMax )
2016-02-23 02:56:09 -08:00
2017-09-08 05:34:45 -07:00
tl , ok := wrapped . ( * timeLimitAppender )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "Expected timeLimitAppender but got %T" , wrapped )
2019-11-04 15:43:42 -08:00
_ , ok = tl . Appender . ( nopAppender )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "Expected base appender but got %T" , tl . Appender )
2016-02-23 02:56:09 -08:00
2021-12-10 04:03:28 -08:00
sampleLimit := 100
2019-03-12 03:26:18 -07:00
loop = sp . newLoop ( scrapeLoopOptions {
2021-05-06 01:56:21 -07:00
target : & Target { } ,
2021-12-10 04:03:28 -08:00
sampleLimit : sampleLimit ,
2019-03-12 03:26:18 -07:00
} )
2018-04-12 07:54:53 -07:00
appl , ok = loop . ( * scrapeLoop )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "Expected scrapeLoop but got %T" , loop )
2019-11-04 15:43:42 -08:00
2024-02-28 05:06:43 -08:00
wrapped = appender ( appl . appender ( context . Background ( ) ) , sampleLimit , 0 , histogram . ExponentialSchemaMax )
2017-09-08 05:34:45 -07:00
sl , ok := wrapped . ( * limitAppender )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "Expected limitAppender but got %T" , wrapped )
2019-11-04 15:43:42 -08:00
2017-09-08 05:34:45 -07:00
tl , ok = sl . Appender . ( * timeLimitAppender )
2021-12-10 04:03:28 -08:00
require . True ( t , ok , "Expected timeLimitAppender but got %T" , sl . Appender )
2019-11-04 15:43:42 -08:00
_ , ok = tl . Appender . ( nopAppender )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "Expected base appender but got %T" , tl . Appender )
2023-04-21 12:14:19 -07:00
2024-02-28 05:06:43 -08:00
wrapped = appender ( appl . appender ( context . Background ( ) ) , sampleLimit , 100 , histogram . ExponentialSchemaMax )
2023-04-21 12:14:19 -07:00
bl , ok := wrapped . ( * bucketLimitAppender )
require . True ( t , ok , "Expected bucketLimitAppender but got %T" , wrapped )
sl , ok = bl . Appender . ( * limitAppender )
require . True ( t , ok , "Expected limitAppender but got %T" , bl )
tl , ok = sl . Appender . ( * timeLimitAppender )
require . True ( t , ok , "Expected timeLimitAppender but got %T" , sl . Appender )
_ , ok = tl . Appender . ( nopAppender )
require . True ( t , ok , "Expected base appender but got %T" , tl . Appender )
2024-01-17 07:58:54 -08:00
wrapped = appender ( appl . appender ( context . Background ( ) ) , sampleLimit , 100 , 0 )
ml , ok := wrapped . ( * maxSchemaAppender )
require . True ( t , ok , "Expected maxSchemaAppender but got %T" , wrapped )
bl , ok = ml . Appender . ( * bucketLimitAppender )
require . True ( t , ok , "Expected bucketLimitAppender but got %T" , wrapped )
sl , ok = bl . Appender . ( * limitAppender )
require . True ( t , ok , "Expected limitAppender but got %T" , bl )
tl , ok = sl . Appender . ( * timeLimitAppender )
require . True ( t , ok , "Expected timeLimitAppender but got %T" , sl . Appender )
_ , ok = tl . Appender . ( nopAppender )
require . True ( t , ok , "Expected base appender but got %T" , tl . Appender )
2016-02-23 02:56:09 -08:00
}
2018-04-13 05:21:41 -07:00
func TestScrapePoolRaces ( t * testing . T ) {
2021-08-31 08:37:32 -07:00
interval , _ := model . ParseDuration ( "1s" )
timeout , _ := model . ParseDuration ( "500ms" )
2018-04-13 05:21:41 -07:00
newConfig := func ( ) * config . ScrapeConfig {
return & config . ScrapeConfig { ScrapeInterval : interval , ScrapeTimeout : timeout }
}
2023-10-17 02:27:46 -07:00
sp , _ := newScrapePool ( newConfig ( ) , & nopAppendable { } , 0 , nil , nil , & Options { } , newTestScrapeMetrics ( t ) )
2018-04-13 05:21:41 -07:00
tgts := [ ] * targetgroup . Group {
2019-01-16 14:28:08 -08:00
{
2018-04-13 05:21:41 -07:00
Targets : [ ] model . LabelSet {
2019-01-16 14:28:08 -08:00
{ model . AddressLabel : "127.0.0.1:9090" } ,
{ model . AddressLabel : "127.0.0.2:9090" } ,
{ model . AddressLabel : "127.0.0.3:9090" } ,
{ model . AddressLabel : "127.0.0.4:9090" } ,
{ model . AddressLabel : "127.0.0.5:9090" } ,
{ model . AddressLabel : "127.0.0.6:9090" } ,
{ model . AddressLabel : "127.0.0.7:9090" } ,
{ model . AddressLabel : "127.0.0.8:9090" } ,
2018-04-13 05:21:41 -07:00
} ,
} ,
}
2018-09-26 02:20:56 -07:00
sp . Sync ( tgts )
active := sp . ActiveTargets ( )
dropped := sp . DroppedTargets ( )
2018-04-13 05:21:41 -07:00
expectedActive , expectedDropped := len ( tgts [ 0 ] . Targets ) , 0
2019-11-04 15:43:42 -08:00
2023-12-07 03:35:01 -08:00
require . Len ( t , active , expectedActive , "Invalid number of active targets" )
require . Len ( t , dropped , expectedDropped , "Invalid number of dropped targets" )
2018-04-13 05:21:41 -07:00
for i := 0 ; i < 20 ; i ++ {
2023-04-09 00:08:40 -07:00
time . Sleep ( 10 * time . Millisecond )
2018-04-13 05:21:41 -07:00
sp . reload ( newConfig ( ) )
}
sp . stop ( )
}
2020-09-30 11:21:32 -07:00
func TestScrapePoolScrapeLoopsStarted ( t * testing . T ) {
var wg sync . WaitGroup
newLoop := func ( opts scrapeLoopOptions ) loop {
wg . Add ( 1 )
l := & testLoop {
startFunc : func ( interval , timeout time . Duration , errc chan <- error ) {
wg . Done ( )
} ,
stopFunc : func ( ) { } ,
}
return l
}
sp := & scrapePool {
appendable : & nopAppendable { } ,
activeTargets : map [ uint64 ] * Target { } ,
loops : map [ uint64 ] loop { } ,
newLoop : newLoop ,
logger : nil ,
client : http . DefaultClient ,
2023-09-22 09:47:44 -07:00
metrics : newTestScrapeMetrics ( t ) ,
2023-11-24 11:46:26 -08:00
symbolTable : labels . NewSymbolTable ( ) ,
2020-09-30 11:21:32 -07:00
}
tgs := [ ] * targetgroup . Group {
{
Targets : [ ] model . LabelSet {
{ model . AddressLabel : model . LabelValue ( "127.0.0.1:9090" ) } ,
} ,
} ,
{
Targets : [ ] model . LabelSet {
{ model . AddressLabel : model . LabelValue ( "127.0.0.1:9090" ) } ,
} ,
} ,
}
2020-10-29 02:43:23 -07:00
require . NoError ( t , sp . reload ( & config . ScrapeConfig {
2020-09-30 11:21:32 -07:00
ScrapeInterval : model . Duration ( 3 * time . Second ) ,
ScrapeTimeout : model . Duration ( 2 * time . Second ) ,
} ) )
sp . Sync ( tgs )
2023-12-07 03:35:01 -08:00
require . Len ( t , sp . loops , 1 )
2020-09-30 11:21:32 -07:00
wg . Wait ( )
for _ , l := range sp . loops {
2020-10-29 02:43:23 -07:00
require . True ( t , l . ( * testLoop ) . runOnce , "loop should be running" )
2020-09-30 11:21:32 -07:00
}
}
2023-10-16 06:47:10 -07:00
func newBasicScrapeLoop ( t testing . TB , ctx context . Context , scraper scraper , app func ( ctx context . Context ) storage . Appender , interval time . Duration ) * scrapeLoop {
2024-11-07 03:30:03 -08:00
return newBasicScrapeLoopWithFallback ( t , ctx , scraper , app , interval , "" )
}
func newBasicScrapeLoopWithFallback ( t testing . TB , ctx context . Context , scraper scraper , app func ( ctx context . Context ) storage . Appender , interval time . Duration , fallback string ) * scrapeLoop {
2023-10-16 06:47:10 -07:00
return newScrapeLoop ( ctx ,
2017-09-08 05:34:45 -07:00
scraper ,
nil , nil ,
nopMutator ,
nopMutator ,
2023-10-16 06:47:10 -07:00
app ,
nil ,
2023-11-24 11:46:26 -08:00
labels . NewSymbolTable ( ) ,
2023-10-16 06:47:10 -07:00
0 ,
2019-03-15 03:04:15 -07:00
true ,
2023-10-31 13:58:42 -07:00
false ,
2023-11-20 04:02:53 -08:00
true ,
2024-02-28 05:06:43 -08:00
0 , 0 , histogram . ExponentialSchemaMax ,
2021-05-06 01:56:21 -07:00
nil ,
2023-10-16 06:47:10 -07:00
interval ,
time . Hour ,
2021-08-24 05:31:14 -07:00
false ,
2022-08-31 06:50:05 -07:00
false ,
2023-05-10 16:59:21 -07:00
false ,
2023-12-11 00:43:42 -08:00
false ,
2024-04-24 06:53:54 -07:00
false ,
2024-07-03 02:56:48 -07:00
false ,
2022-05-03 11:45:52 -07:00
nil ,
false ,
2023-09-22 09:47:44 -07:00
newTestScrapeMetrics ( t ) ,
2023-12-11 00:43:42 -08:00
false ,
2024-08-28 08:15:42 -07:00
model . LegacyValidation ,
2024-11-07 03:30:03 -08:00
fallback ,
2017-09-08 05:34:45 -07:00
)
2023-10-16 06:47:10 -07:00
}
func TestScrapeLoopStopBeforeRun ( t * testing . T ) {
scraper := & testScraper { }
sl := newBasicScrapeLoop ( t , context . Background ( ) , scraper , nil , 1 )
2016-02-28 00:51:02 -08:00
// The scrape pool synchronizes on stopping scrape loops. However, new scrape
2017-01-07 08:28:49 -08:00
// loops are started asynchronously. Thus it's possible, that a loop is stopped
2016-02-28 00:51:02 -08:00
// again before having started properly.
// Stopping not-yet-started loops must block until the run method was called and exited.
// The run method must exit immediately.
stopDone := make ( chan struct { } )
go func ( ) {
sl . stop ( )
close ( stopDone )
} ( )
select {
case <- stopDone :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Stopping terminated before run exited successfully." )
2016-02-28 00:51:02 -08:00
case <- time . After ( 500 * time . Millisecond ) :
}
// Running the scrape loop must exit before calling the scraper even once.
2017-01-15 08:33:07 -08:00
scraper . scrapeFunc = func ( context . Context , io . Writer ) error {
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Scraper was called for terminated scrape loop." )
2017-01-15 08:33:07 -08:00
return nil
2016-02-28 00:51:02 -08:00
}
runDone := make ( chan struct { } )
go func ( ) {
2021-08-31 08:37:32 -07:00
sl . run ( nil )
2016-02-28 00:51:02 -08:00
close ( runDone )
} ( )
select {
case <- runDone :
case <- time . After ( 1 * time . Second ) :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Running terminated scrape loop did not exit." )
2016-02-28 00:51:02 -08:00
}
select {
case <- stopDone :
case <- time . After ( 1 * time . Second ) :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Stopping did not terminate after running exited." )
2016-02-28 00:51:02 -08:00
}
}
2017-09-08 05:34:45 -07:00
func nopMutator ( l labels . Labels ) labels . Labels { return l }
2017-05-10 08:59:02 -07:00
func TestScrapeLoopStop ( t * testing . T ) {
var (
2020-02-12 23:53:07 -08:00
signal = make ( chan struct { } , 1 )
2017-09-08 05:34:45 -07:00
appender = & collectResultAppender { }
scraper = & testScraper { }
2020-07-24 07:10:51 -07:00
app = func ( ctx context . Context ) storage . Appender { return appender }
2017-05-10 08:59:02 -07:00
)
2024-11-07 03:30:03 -08:00
// Since we're writing samples directly below we need to provide a protocol fallback.
sl := newBasicScrapeLoopWithFallback ( t , context . Background ( ) , scraper , app , 10 * time . Millisecond , "text/plain" )
2017-09-08 05:34:45 -07:00
// Terminate loop after 2 scrapes.
numScrapes := 0
2017-05-10 08:59:02 -07:00
scraper . scrapeFunc = func ( ctx context . Context , w io . Writer ) error {
2017-05-26 01:44:48 -07:00
numScrapes ++
2017-05-10 08:59:02 -07:00
if numScrapes == 2 {
2017-05-26 01:44:48 -07:00
go sl . stop ( )
2020-08-07 06:58:16 -07:00
<- sl . ctx . Done ( )
2017-05-10 08:59:02 -07:00
}
w . Write ( [ ] byte ( "metric_a 42\n" ) )
2020-08-07 06:58:16 -07:00
return ctx . Err ( )
2017-05-10 08:59:02 -07:00
}
go func ( ) {
2021-08-31 08:37:32 -07:00
sl . run ( nil )
2017-05-10 08:59:02 -07:00
signal <- struct { } { }
} ( )
select {
case <- signal :
case <- time . After ( 5 * time . Second ) :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Scrape wasn't stopped." )
2017-05-10 08:59:02 -07:00
}
2019-05-08 14:24:00 -07:00
// We expected 1 actual sample for each scrape plus 5 for report samples.
2017-09-08 05:34:45 -07:00
// At least 2 scrapes were made, plus the final stale markers.
2021-09-04 05:35:03 -07:00
require . GreaterOrEqual ( t , len ( appender . resultFloats ) , 6 * 3 , "Expected at least 3 scrapes with 6 samples each." )
require . Zero ( t , len ( appender . resultFloats ) % 6 , "There is a scrape with missing samples." )
2018-04-08 02:51:54 -07:00
// All samples in a scrape must have the same timestamp.
2017-09-08 05:34:45 -07:00
var ts int64
2023-07-13 05:27:51 -07:00
for i , s := range appender . resultFloats {
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
switch {
case i % 6 == 0 :
2017-09-08 05:34:45 -07:00
ts = s . t
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
case s . t != ts :
2017-09-08 05:34:45 -07:00
t . Fatalf ( "Unexpected multiple timestamps within single scrape" )
}
2017-05-11 06:43:43 -07:00
}
2017-09-08 05:34:45 -07:00
// All samples from the last scrape must be stale markers.
2023-07-13 05:27:51 -07:00
for _ , s := range appender . resultFloats [ len ( appender . resultFloats ) - 5 : ] {
2021-09-04 05:35:03 -07:00
require . True ( t , value . IsStaleNaN ( s . f ) , "Appended last sample not as expected. Wanted: stale NaN Got: %x" , math . Float64bits ( s . f ) )
2017-05-11 06:43:43 -07:00
}
2017-05-10 08:59:02 -07:00
}
2016-02-23 01:58:16 -08:00
func TestScrapeLoopRun ( t * testing . T ) {
var (
2020-02-12 23:53:07 -08:00
signal = make ( chan struct { } , 1 )
2016-02-23 01:58:16 -08:00
errc = make ( chan error )
2023-11-23 03:24:08 -08:00
scraper = & testScraper { }
app = func ( ctx context . Context ) storage . Appender { return & nopAppender { } }
scrapeMetrics = newTestScrapeMetrics ( t )
2016-02-23 01:58:16 -08:00
)
ctx , cancel := context . WithCancel ( context . Background ( ) )
2017-09-08 05:34:45 -07:00
sl := newScrapeLoop ( ctx ,
scraper ,
nil , nil ,
nopMutator ,
nopMutator ,
app ,
2018-05-18 00:32:11 -07:00
nil ,
2023-11-24 10:08:56 -08:00
nil ,
2019-03-12 03:46:15 -07:00
0 ,
2019-03-15 03:04:15 -07:00
true ,
2023-10-31 13:58:42 -07:00
false ,
2023-11-20 04:02:53 -08:00
true ,
2024-02-28 05:06:43 -08:00
0 , 0 , histogram . ExponentialSchemaMax ,
2021-05-06 01:56:21 -07:00
nil ,
2021-08-31 08:37:32 -07:00
time . Second ,
time . Hour ,
2021-08-24 05:31:14 -07:00
false ,
2022-08-31 06:50:05 -07:00
false ,
2023-05-10 16:59:21 -07:00
false ,
2023-12-11 00:43:42 -08:00
false ,
2024-04-24 06:53:54 -07:00
false ,
2024-07-03 02:56:48 -07:00
false ,
2022-05-03 11:45:52 -07:00
nil ,
false ,
2023-11-23 03:24:08 -08:00
scrapeMetrics ,
2023-12-11 00:43:42 -08:00
false ,
2024-08-28 08:15:42 -07:00
model . LegacyValidation ,
2024-11-07 03:30:03 -08:00
"" ,
2017-09-08 05:34:45 -07:00
)
2016-02-23 01:58:16 -08:00
// The loop must terminate during the initial offset if the context
// is canceled.
scraper . offsetDur = time . Hour
go func ( ) {
2021-08-31 08:37:32 -07:00
sl . run ( errc )
2016-02-23 01:58:16 -08:00
signal <- struct { } { }
} ( )
// Wait to make sure we are actually waiting on the offset.
time . Sleep ( 1 * time . Second )
cancel ( )
select {
case <- signal :
case <- time . After ( 5 * time . Second ) :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Cancellation during initial offset failed." )
2016-02-23 01:58:16 -08:00
case err := <- errc :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Unexpected error: %s" , err )
2016-02-23 01:58:16 -08:00
}
2019-10-10 02:47:30 -07:00
// The provided timeout must cause cancellation of the context passed down to the
2016-02-23 01:58:16 -08:00
// scraper. The scraper has to respect the context.
scraper . offsetDur = 0
block := make ( chan struct { } )
2017-01-15 08:33:07 -08:00
scraper . scrapeFunc = func ( ctx context . Context , _ io . Writer ) error {
2016-02-23 01:58:16 -08:00
select {
case <- block :
case <- ctx . Done ( ) :
2017-01-15 08:33:07 -08:00
return ctx . Err ( )
2016-02-23 01:58:16 -08:00
}
2017-01-15 08:33:07 -08:00
return nil
2016-02-23 01:58:16 -08:00
}
ctx , cancel = context . WithCancel ( context . Background ( ) )
2023-10-16 06:47:10 -07:00
sl = newBasicScrapeLoop ( t , ctx , scraper , app , time . Second )
sl . timeout = 100 * time . Millisecond
2016-02-23 01:58:16 -08:00
go func ( ) {
2021-08-31 08:37:32 -07:00
sl . run ( errc )
2016-02-23 01:58:16 -08:00
signal <- struct { } { }
} ( )
select {
case err := <- errc :
2021-09-04 05:35:03 -07:00
require . ErrorIs ( t , err , context . DeadlineExceeded )
2016-02-23 01:58:16 -08:00
case <- time . After ( 3 * time . Second ) :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Expected timeout error but got none." )
2016-02-23 01:58:16 -08:00
}
// We already caught the timeout error and are certainly in the loop.
// Let the scrapes returns immediately to cause no further timeout errors
// and check whether canceling the parent context terminates the loop.
close ( block )
cancel ( )
select {
case <- signal :
// Loop terminated as expected.
case err := <- errc :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Unexpected error: %s" , err )
2016-02-23 01:58:16 -08:00
case <- time . After ( 3 * time . Second ) :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Loop did not terminate on context cancellation" )
2016-02-23 01:58:16 -08:00
}
}
2020-07-30 05:20:24 -07:00
func TestScrapeLoopForcedErr ( t * testing . T ) {
var (
signal = make ( chan struct { } , 1 )
errc = make ( chan error )
scraper = & testScraper { }
2020-07-31 00:33:56 -07:00
app = func ( ctx context . Context ) storage . Appender { return & nopAppender { } }
2020-07-30 05:20:24 -07:00
)
ctx , cancel := context . WithCancel ( context . Background ( ) )
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , ctx , scraper , app , time . Second )
2020-07-30 05:20:24 -07:00
2024-11-03 04:15:51 -08:00
forcedErr := errors . New ( "forced err" )
2020-07-30 05:20:24 -07:00
sl . setForcedError ( forcedErr )
scraper . scrapeFunc = func ( context . Context , io . Writer ) error {
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Should not be scraped." )
2020-07-30 05:20:24 -07:00
return nil
}
go func ( ) {
2021-08-31 08:37:32 -07:00
sl . run ( errc )
2020-07-30 05:20:24 -07:00
signal <- struct { } { }
} ( )
select {
case err := <- errc :
2021-09-04 05:35:03 -07:00
require . ErrorIs ( t , err , forcedErr )
2020-07-30 05:20:24 -07:00
case <- time . After ( 3 * time . Second ) :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Expected forced error but got none." )
2020-07-30 05:20:24 -07:00
}
cancel ( )
select {
case <- signal :
case <- time . After ( 5 * time . Second ) :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Scrape not stopped." )
2020-07-30 05:20:24 -07:00
}
}
2018-05-18 00:32:11 -07:00
func TestScrapeLoopMetadata ( t * testing . T ) {
var (
2023-11-23 03:24:08 -08:00
signal = make ( chan struct { } )
scraper = & testScraper { }
scrapeMetrics = newTestScrapeMetrics ( t )
cache = newScrapeCache ( scrapeMetrics )
2018-05-18 00:32:11 -07:00
)
defer close ( signal )
ctx , cancel := context . WithCancel ( context . Background ( ) )
sl := newScrapeLoop ( ctx ,
scraper ,
nil , nil ,
nopMutator ,
nopMutator ,
2020-07-24 07:10:51 -07:00
func ( ctx context . Context ) storage . Appender { return nopAppender { } } ,
2018-05-18 00:32:11 -07:00
cache ,
2023-11-24 10:08:56 -08:00
labels . NewSymbolTable ( ) ,
2019-03-12 03:46:15 -07:00
0 ,
2019-03-15 03:04:15 -07:00
true ,
2023-10-31 13:58:42 -07:00
false ,
2023-11-20 04:02:53 -08:00
true ,
2024-02-28 05:06:43 -08:00
0 , 0 , histogram . ExponentialSchemaMax ,
2021-05-06 01:56:21 -07:00
nil ,
2021-08-31 08:37:32 -07:00
0 ,
0 ,
2021-08-24 05:31:14 -07:00
false ,
2022-08-31 06:50:05 -07:00
false ,
2023-05-10 16:59:21 -07:00
false ,
2023-12-11 00:43:42 -08:00
false ,
2024-04-24 06:53:54 -07:00
false ,
2024-07-03 02:56:48 -07:00
false ,
2022-05-03 11:45:52 -07:00
nil ,
false ,
2023-11-23 03:24:08 -08:00
scrapeMetrics ,
2023-12-11 00:43:42 -08:00
false ,
2024-08-28 08:15:42 -07:00
model . LegacyValidation ,
2024-11-07 03:30:03 -08:00
"" ,
2018-05-18 00:32:11 -07:00
)
defer cancel ( )
2020-07-24 07:10:51 -07:00
slApp := sl . appender ( ctx )
2020-07-16 04:53:39 -07:00
total , _ , _ , err := sl . append ( slApp , [ ] byte ( ` # TYPE test_metric counter
2018-05-18 00:32:11 -07:00
# HELP test_metric some help text
2018-10-05 09:11:16 -07:00
# UNIT test_metric metric
2018-05-18 00:32:11 -07:00
test_metric 1
# TYPE test_metric_no_help gauge
2018-10-05 09:11:16 -07:00
# HELP test_metric_no_type other help text
# EOF ` ) , "application/openmetrics-text" , time . Now ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
require . Equal ( t , 1 , total )
2018-05-18 00:32:11 -07:00
2019-12-04 07:18:27 -08:00
md , ok := cache . GetMetadata ( "test_metric" )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "expected metadata to be present" )
2023-11-22 06:39:21 -08:00
require . Equal ( t , model . MetricTypeCounter , md . Type , "unexpected metric type" )
2020-10-29 02:43:23 -07:00
require . Equal ( t , "some help text" , md . Help )
require . Equal ( t , "metric" , md . Unit )
2018-05-18 00:32:11 -07:00
2019-12-04 07:18:27 -08:00
md , ok = cache . GetMetadata ( "test_metric_no_help" )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "expected metadata to be present" )
2023-11-22 06:39:21 -08:00
require . Equal ( t , model . MetricTypeGauge , md . Type , "unexpected metric type" )
2020-10-29 02:43:23 -07:00
require . Equal ( t , "" , md . Help )
require . Equal ( t , "" , md . Unit )
2018-05-18 00:32:11 -07:00
2019-12-04 07:18:27 -08:00
md , ok = cache . GetMetadata ( "test_metric_no_type" )
2020-10-29 02:43:23 -07:00
require . True ( t , ok , "expected metadata to be present" )
2023-11-22 06:39:21 -08:00
require . Equal ( t , model . MetricTypeUnknown , md . Type , "unexpected metric type" )
2020-10-29 02:43:23 -07:00
require . Equal ( t , "other help text" , md . Help )
require . Equal ( t , "" , md . Unit )
2018-05-18 00:32:11 -07:00
}
2021-09-08 01:09:21 -07:00
func simpleTestScrapeLoop ( t testing . TB ) ( context . Context , * scrapeLoop ) {
2019-05-08 14:24:00 -07:00
// Need a full storage for correct Add/AddFast semantics.
2019-08-08 18:35:39 -07:00
s := teststorage . New ( t )
2021-09-08 01:09:21 -07:00
t . Cleanup ( func ( ) { s . Close ( ) } )
2019-05-08 14:24:00 -07:00
ctx , cancel := context . WithCancel ( context . Background ( ) )
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , ctx , & testScraper { } , s . Appender , 0 )
2021-09-08 01:09:21 -07:00
t . Cleanup ( func ( ) { cancel ( ) } )
return ctx , sl
}
func TestScrapeLoopSeriesAdded ( t * testing . T ) {
ctx , sl := simpleTestScrapeLoop ( t )
2019-05-08 14:24:00 -07:00
2020-07-24 07:10:51 -07:00
slApp := sl . appender ( ctx )
2024-11-07 03:30:03 -08:00
total , added , seriesAdded , err := sl . append ( slApp , [ ] byte ( "test_metric 1\n" ) , "text/plain" , time . Time { } )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
require . Equal ( t , 1 , total )
require . Equal ( t , 1 , added )
require . Equal ( t , 1 , seriesAdded )
2019-05-08 14:24:00 -07:00
2020-07-24 07:10:51 -07:00
slApp = sl . appender ( ctx )
2024-11-07 03:30:03 -08:00
total , added , seriesAdded , err = sl . append ( slApp , [ ] byte ( "test_metric 1\n" ) , "text/plain" , time . Time { } )
2020-10-29 02:43:23 -07:00
require . NoError ( t , slApp . Commit ( ) )
require . NoError ( t , err )
require . Equal ( t , 1 , total )
require . Equal ( t , 1 , added )
require . Equal ( t , 0 , seriesAdded )
2019-05-08 14:24:00 -07:00
}
2022-12-07 19:09:43 -08:00
func TestScrapeLoopFailWithInvalidLabelsAfterRelabel ( t * testing . T ) {
2024-08-21 07:38:27 -07:00
model . NameValidationScheme = model . LegacyValidation
2022-12-07 19:09:43 -08:00
s := teststorage . New ( t )
defer s . Close ( )
ctx , cancel := context . WithCancel ( context . Background ( ) )
defer cancel ( )
target := & Target {
labels : labels . FromStrings ( "pod_label_invalid_012" , "test" ) ,
}
relabelConfig := [ ] * relabel . Config { {
Action : relabel . LabelMap ,
Regex : relabel . MustNewRegexp ( "pod_label_invalid_(.+)" ) ,
Separator : ";" ,
Replacement : "$1" ,
} }
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , ctx , & testScraper { } , s . Appender , 0 )
sl . sampleMutator = func ( l labels . Labels ) labels . Labels {
return mutateSampleLabels ( l , target , true , relabelConfig )
}
2022-12-07 19:09:43 -08:00
slApp := sl . appender ( ctx )
2024-11-07 03:30:03 -08:00
total , added , seriesAdded , err := sl . append ( slApp , [ ] byte ( "test_metric 1\n" ) , "text/plain" , time . Time { } )
2022-12-07 19:09:43 -08:00
require . ErrorContains ( t , err , "invalid metric name or label names" )
require . NoError ( t , slApp . Rollback ( ) )
require . Equal ( t , 1 , total )
require . Equal ( t , 0 , added )
require . Equal ( t , 0 , seriesAdded )
}
2024-08-28 08:15:42 -07:00
func TestScrapeLoopFailLegacyUnderUTF8 ( t * testing . T ) {
// Test that scrapes fail when default validation is utf8 but scrape config is
// legacy.
model . NameValidationScheme = model . UTF8Validation
defer func ( ) {
model . NameValidationScheme = model . LegacyValidation
} ( )
s := teststorage . New ( t )
defer s . Close ( )
ctx , cancel := context . WithCancel ( context . Background ( ) )
defer cancel ( )
sl := newBasicScrapeLoop ( t , ctx , & testScraper { } , s . Appender , 0 )
sl . validationScheme = model . LegacyValidation
slApp := sl . appender ( ctx )
2024-11-07 03:30:03 -08:00
total , added , seriesAdded , err := sl . append ( slApp , [ ] byte ( "{\"test.metric\"} 1\n" ) , "text/plain" , time . Time { } )
2024-08-28 08:15:42 -07:00
require . ErrorContains ( t , err , "invalid metric name or label names" )
require . NoError ( t , slApp . Rollback ( ) )
require . Equal ( t , 1 , total )
require . Equal ( t , 0 , added )
require . Equal ( t , 0 , seriesAdded )
// When scrapeloop has validation set to UTF-8, the metric is allowed.
sl . validationScheme = model . UTF8Validation
slApp = sl . appender ( ctx )
2024-11-07 03:30:03 -08:00
total , added , seriesAdded , err = sl . append ( slApp , [ ] byte ( "{\"test.metric\"} 1\n" ) , "text/plain" , time . Time { } )
2024-08-28 08:15:42 -07:00
require . NoError ( t , err )
require . Equal ( t , 1 , total )
require . Equal ( t , 1 , added )
require . Equal ( t , 1 , seriesAdded )
}
2021-09-08 01:09:21 -07:00
func makeTestMetrics ( n int ) [ ] byte {
// Construct a metrics string to parse
sb := bytes . Buffer { }
for i := 0 ; i < n ; i ++ {
fmt . Fprintf ( & sb , "# TYPE metric_a gauge\n" )
fmt . Fprintf ( & sb , "# HELP metric_a help text\n" )
fmt . Fprintf ( & sb , "metric_a{foo=\"%d\",bar=\"%d\"} 1\n" , i , i * 100 )
}
2023-11-16 05:16:47 -08:00
fmt . Fprintf ( & sb , "# EOF\n" )
2021-09-08 01:09:21 -07:00
return sb . Bytes ( )
}
func BenchmarkScrapeLoopAppend ( b * testing . B ) {
ctx , sl := simpleTestScrapeLoop ( b )
slApp := sl . appender ( ctx )
metrics := makeTestMetrics ( 100 )
ts := time . Time { }
b . ResetTimer ( )
for i := 0 ; i < b . N ; i ++ {
ts = ts . Add ( time . Second )
2024-11-07 03:30:03 -08:00
_ , _ , _ , _ = sl . append ( slApp , metrics , "text/plain" , ts )
2021-09-08 01:09:21 -07:00
}
}
2021-10-22 01:06:44 -07:00
2021-09-08 01:09:21 -07:00
func BenchmarkScrapeLoopAppendOM ( b * testing . B ) {
ctx , sl := simpleTestScrapeLoop ( b )
slApp := sl . appender ( ctx )
metrics := makeTestMetrics ( 100 )
ts := time . Time { }
b . ResetTimer ( )
for i := 0 ; i < b . N ; i ++ {
ts = ts . Add ( time . Second )
_ , _ , _ , _ = sl . append ( slApp , metrics , "application/openmetrics-text" , ts )
}
}
2024-10-23 08:34:28 -07:00
func TestSetOptionsHandlingStaleness ( t * testing . T ) {
s := teststorage . New ( t , 600000 )
defer s . Close ( )
signal := make ( chan struct { } , 1 )
ctx , cancel := context . WithCancel ( context . Background ( ) )
defer cancel ( )
// Function to run the scrape loop
runScrapeLoop := func ( ctx context . Context , t * testing . T , cue int , action func ( * scrapeLoop ) ) {
var (
scraper = & testScraper { }
app = func ( ctx context . Context ) storage . Appender {
return s . Appender ( ctx )
}
)
sl := newBasicScrapeLoop ( t , ctx , scraper , app , 10 * time . Millisecond )
numScrapes := 0
scraper . scrapeFunc = func ( ctx context . Context , w io . Writer ) error {
numScrapes ++
if numScrapes == cue {
action ( sl )
}
w . Write ( [ ] byte ( fmt . Sprintf ( "metric_a{a=\"1\",b=\"1\"} %d\n" , 42 + numScrapes ) ) )
return nil
}
sl . run ( nil )
}
go func ( ) {
runScrapeLoop ( ctx , t , 2 , func ( sl * scrapeLoop ) {
go sl . stop ( )
// Wait a bit then start a new target.
time . Sleep ( 100 * time . Millisecond )
go func ( ) {
runScrapeLoop ( ctx , t , 4 , func ( _ * scrapeLoop ) {
cancel ( )
} )
signal <- struct { } { }
} ( )
} )
} ( )
select {
case <- signal :
case <- time . After ( 10 * time . Second ) :
t . Fatalf ( "Scrape wasn't stopped." )
}
ctx1 , cancel := context . WithCancel ( context . Background ( ) )
defer cancel ( )
q , err := s . Querier ( 0 , time . Now ( ) . UnixNano ( ) )
require . NoError ( t , err )
defer q . Close ( )
series := q . Select ( ctx1 , false , nil , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , "metric_a" ) )
var results [ ] floatSample
for series . Next ( ) {
it := series . At ( ) . Iterator ( nil )
for it . Next ( ) == chunkenc . ValFloat {
t , v := it . At ( )
results = append ( results , floatSample {
metric : series . At ( ) . Labels ( ) ,
t : t ,
f : v ,
} )
}
require . NoError ( t , it . Err ( ) )
}
require . NoError ( t , series . Err ( ) )
var c int
for _ , s := range results {
if value . IsStaleNaN ( s . f ) {
c ++
}
}
require . Equal ( t , 0 , c , "invalid count of staleness markers after stopping the engine" )
}
2017-05-03 06:55:35 -07:00
func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape ( t * testing . T ) {
appender := & collectResultAppender { }
var (
2020-02-12 23:53:07 -08:00
signal = make ( chan struct { } , 1 )
2017-09-08 05:34:45 -07:00
scraper = & testScraper { }
2020-07-24 07:10:51 -07:00
app = func ( ctx context . Context ) storage . Appender { return appender }
2017-05-03 06:55:35 -07:00
)
ctx , cancel := context . WithCancel ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
// Since we're writing samples directly below we need to provide a protocol fallback.
sl := newBasicScrapeLoopWithFallback ( t , ctx , scraper , app , 10 * time . Millisecond , "text/plain" )
2017-05-03 06:55:35 -07:00
// Succeed once, several failures, then stop.
2017-09-08 05:34:45 -07:00
numScrapes := 0
2017-05-03 06:55:35 -07:00
scraper . scrapeFunc = func ( ctx context . Context , w io . Writer ) error {
2017-05-26 01:44:48 -07:00
numScrapes ++
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
switch numScrapes {
case 1 :
2017-05-03 06:55:35 -07:00
w . Write ( [ ] byte ( "metric_a 42\n" ) )
return nil
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
case 5 :
2017-05-03 06:55:35 -07:00
cancel ( )
}
2019-03-25 16:01:12 -07:00
return errors . New ( "scrape failed" )
2017-05-03 06:55:35 -07:00
}
go func ( ) {
2021-08-31 08:37:32 -07:00
sl . run ( nil )
2017-05-03 06:55:35 -07:00
signal <- struct { } { }
} ( )
select {
case <- signal :
case <- time . After ( 5 * time . Second ) :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Scrape wasn't stopped." )
2017-05-03 06:55:35 -07:00
}
2019-05-08 14:24:00 -07:00
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
2017-09-08 05:34:45 -07:00
// each scrape successful or not.
2023-12-07 03:35:01 -08:00
require . Len ( t , appender . resultFloats , 27 , "Appended samples not as expected:\n%s" , appender )
2023-07-13 05:27:51 -07:00
require . Equal ( t , 42.0 , appender . resultFloats [ 0 ] . f , "Appended first sample not as expected" )
require . True ( t , value . IsStaleNaN ( appender . resultFloats [ 6 ] . f ) ,
"Appended second sample not as expected. Wanted: stale NaN Got: %x" , math . Float64bits ( appender . resultFloats [ 6 ] . f ) )
2017-05-03 08:51:45 -07:00
}
func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure ( t * testing . T ) {
appender := & collectResultAppender { }
var (
2020-02-12 23:53:07 -08:00
signal = make ( chan struct { } , 1 )
2017-05-03 08:51:45 -07:00
scraper = & testScraper { }
2020-07-24 07:10:51 -07:00
app = func ( ctx context . Context ) storage . Appender { return appender }
2017-05-03 08:51:45 -07:00
numScrapes = 0
)
ctx , cancel := context . WithCancel ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
// Since we're writing samples directly below we need to provide a protocol fallback.
sl := newBasicScrapeLoopWithFallback ( t , ctx , scraper , app , 10 * time . Millisecond , "text/plain" )
2017-05-03 08:51:45 -07:00
// Succeed once, several failures, then stop.
scraper . scrapeFunc = func ( ctx context . Context , w io . Writer ) error {
2017-05-26 01:44:48 -07:00
numScrapes ++
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
switch numScrapes {
case 1 :
2017-05-03 08:51:45 -07:00
w . Write ( [ ] byte ( "metric_a 42\n" ) )
return nil
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
case 2 :
2017-05-03 08:51:45 -07:00
w . Write ( [ ] byte ( "7&-\n" ) )
return nil
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
case 3 :
2017-05-03 08:51:45 -07:00
cancel ( )
}
2019-03-25 16:01:12 -07:00
return errors . New ( "scrape failed" )
2017-05-03 08:51:45 -07:00
}
go func ( ) {
2021-08-31 08:37:32 -07:00
sl . run ( nil )
2017-05-03 08:51:45 -07:00
signal <- struct { } { }
} ( )
select {
case <- signal :
case <- time . After ( 5 * time . Second ) :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Scrape wasn't stopped." )
2017-05-03 08:51:45 -07:00
}
2019-05-08 14:24:00 -07:00
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
2017-09-08 05:34:45 -07:00
// each scrape successful or not.
2023-12-07 03:35:01 -08:00
require . Len ( t , appender . resultFloats , 17 , "Appended samples not as expected:\n%s" , appender )
2023-07-13 05:27:51 -07:00
require . Equal ( t , 42.0 , appender . resultFloats [ 0 ] . f , "Appended first sample not as expected" )
require . True ( t , value . IsStaleNaN ( appender . resultFloats [ 6 ] . f ) ,
"Appended second sample not as expected. Wanted: stale NaN Got: %x" , math . Float64bits ( appender . resultFloats [ 6 ] . f ) )
2019-03-28 10:07:14 -07:00
}
func TestScrapeLoopCache ( t * testing . T ) {
2019-08-08 18:35:39 -07:00
s := teststorage . New ( t )
2019-03-28 10:07:14 -07:00
defer s . Close ( )
2020-07-17 03:30:22 -07:00
appender := & collectResultAppender { }
2019-03-28 10:07:14 -07:00
var (
2020-02-12 23:53:07 -08:00
signal = make ( chan struct { } , 1 )
2019-03-28 10:07:14 -07:00
scraper = & testScraper { }
2020-07-24 07:10:51 -07:00
app = func ( ctx context . Context ) storage . Appender { appender . next = s . Appender ( ctx ) ; return appender }
2019-03-28 10:07:14 -07:00
)
ctx , cancel := context . WithCancel ( context . Background ( ) )
2023-12-26 13:06:03 -08:00
// Decreasing the scrape interval could make the test fail, as multiple scrapes might be initiated at identical millisecond timestamps.
// See https://github.com/prometheus/prometheus/issues/12727.
2024-11-07 03:30:03 -08:00
// Since we're writing samples directly below we need to provide a protocol fallback.
sl := newBasicScrapeLoopWithFallback ( t , ctx , scraper , app , 100 * time . Millisecond , "text/plain" )
2019-03-28 10:07:14 -07:00
numScrapes := 0
scraper . scrapeFunc = func ( ctx context . Context , w io . Writer ) error {
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
switch numScrapes {
case 1 , 2 :
2021-09-04 05:35:03 -07:00
_ , ok := sl . cache . series [ "metric_a" ]
require . True ( t , ok , "metric_a missing from cache after scrape %d" , numScrapes )
_ , ok = sl . cache . series [ "metric_b" ]
require . True ( t , ok , "metric_b missing from cache after scrape %d" , numScrapes )
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
case 3 :
2021-09-04 05:35:03 -07:00
_ , ok := sl . cache . series [ "metric_a" ]
require . True ( t , ok , "metric_a missing from cache after scrape %d" , numScrapes )
_ , ok = sl . cache . series [ "metric_b" ]
require . False ( t , ok , "metric_b present in cache after scrape %d" , numScrapes )
2019-03-28 10:07:14 -07:00
}
numScrapes ++
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
switch numScrapes {
case 1 :
2019-03-28 10:07:14 -07:00
w . Write ( [ ] byte ( "metric_a 42\nmetric_b 43\n" ) )
return nil
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
case 3 :
2019-03-28 10:07:14 -07:00
w . Write ( [ ] byte ( "metric_a 44\n" ) )
return nil
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
case 4 :
2019-03-28 10:07:14 -07:00
cancel ( )
}
2024-11-03 04:15:51 -08:00
return errors . New ( "scrape failed" )
2019-03-28 10:07:14 -07:00
}
go func ( ) {
2021-08-31 08:37:32 -07:00
sl . run ( nil )
2019-03-28 10:07:14 -07:00
signal <- struct { } { }
} ( )
select {
case <- signal :
case <- time . After ( 5 * time . Second ) :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Scrape wasn't stopped." )
2019-03-28 10:07:14 -07:00
}
2019-05-08 14:24:00 -07:00
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
2019-03-28 10:07:14 -07:00
// each scrape successful or not.
2023-12-07 03:35:01 -08:00
require . Len ( t , appender . resultFloats , 26 , "Appended samples not as expected:\n%s" , appender )
2019-03-28 10:52:46 -07:00
}
func TestScrapeLoopCacheMemoryExhaustionProtection ( t * testing . T ) {
2019-08-08 18:35:39 -07:00
s := teststorage . New ( t )
2019-03-28 10:52:46 -07:00
defer s . Close ( )
2020-07-24 07:10:51 -07:00
sapp := s . Appender ( context . Background ( ) )
2019-11-04 15:43:42 -08:00
2019-03-28 10:52:46 -07:00
appender := & collectResultAppender { next : sapp }
var (
2020-02-12 23:53:07 -08:00
signal = make ( chan struct { } , 1 )
2019-03-28 10:52:46 -07:00
scraper = & testScraper { }
2020-07-24 07:10:51 -07:00
app = func ( ctx context . Context ) storage . Appender { return appender }
2019-03-28 10:52:46 -07:00
)
ctx , cancel := context . WithCancel ( context . Background ( ) )
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , ctx , scraper , app , 10 * time . Millisecond )
2019-03-28 10:52:46 -07:00
numScrapes := 0
scraper . scrapeFunc = func ( ctx context . Context , w io . Writer ) error {
numScrapes ++
if numScrapes < 5 {
s := ""
for i := 0 ; i < 500 ; i ++ {
s = fmt . Sprintf ( "%smetric_%d_%d 42\n" , s , i , numScrapes )
}
2024-05-13 08:36:19 -07:00
w . Write ( [ ] byte ( s + "&" ) )
2019-03-28 10:52:46 -07:00
} else {
cancel ( )
}
return nil
}
go func ( ) {
2021-08-31 08:37:32 -07:00
sl . run ( nil )
2019-03-28 10:52:46 -07:00
signal <- struct { } { }
} ( )
select {
case <- signal :
case <- time . After ( 5 * time . Second ) :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Scrape wasn't stopped." )
2019-03-28 10:52:46 -07:00
}
2021-09-04 05:35:03 -07:00
require . LessOrEqual ( t , len ( sl . cache . series ) , 2000 , "More than 2000 series cached." )
2017-05-03 06:55:35 -07:00
}
2017-04-11 07:42:17 -07:00
func TestScrapeLoopAppend ( t * testing . T ) {
2018-02-15 06:26:24 -08:00
tests := [ ] struct {
title string
honorLabels bool
scrapeLabels string
discoveryLabels [ ] string
expLset labels . Labels
expValue float64
} {
2017-04-11 07:42:17 -07:00
{
2018-02-15 06:26:24 -08:00
// When "honor_labels" is not set
// label name collision is handler by adding a prefix.
title : "Label name collision" ,
honorLabels : false ,
scrapeLabels : ` metric { n="1"} 0 ` ,
discoveryLabels : [ ] string { "n" , "2" } ,
expLset : labels . FromStrings ( "__name__" , "metric" , "exported_n" , "1" , "n" , "2" ) ,
expValue : 0 ,
2019-11-20 07:50:05 -08:00
} , {
// When "honor_labels" is not set
// exported label from discovery don't get overwritten
title : "Label name collision" ,
honorLabels : false ,
scrapeLabels : ` metric 0 ` ,
discoveryLabels : [ ] string { "n" , "2" , "exported_n" , "2" } ,
expLset : labels . FromStrings ( "__name__" , "metric" , "n" , "2" , "exported_n" , "2" ) ,
expValue : 0 ,
2018-02-15 06:26:24 -08:00
} , {
// Labels with no value need to be removed as these should not be ingested.
title : "Delete Empty labels" ,
honorLabels : false ,
scrapeLabels : ` metric { n=""} 0 ` ,
discoveryLabels : nil ,
expLset : labels . FromStrings ( "__name__" , "metric" ) ,
expValue : 0 ,
} , {
// Honor Labels should ignore labels with the same name.
title : "Honor Labels" ,
honorLabels : true ,
2024-02-15 11:25:12 -08:00
scrapeLabels : ` metric { n1="1", n2="2"} 0 ` ,
2018-02-15 06:26:24 -08:00
discoveryLabels : [ ] string { "n1" , "0" } ,
expLset : labels . FromStrings ( "__name__" , "metric" , "n1" , "1" , "n2" , "2" ) ,
expValue : 0 ,
} , {
title : "Stale - NaN" ,
honorLabels : false ,
scrapeLabels : ` metric NaN ` ,
discoveryLabels : nil ,
expLset : labels . FromStrings ( "__name__" , "metric" ) ,
2024-01-24 03:53:36 -08:00
expValue : math . Float64frombits ( value . NormalNaN ) ,
2017-04-11 07:42:17 -07:00
} ,
}
2018-02-15 06:26:24 -08:00
for _ , test := range tests {
app := & collectResultAppender { }
discoveryLabels := & Target {
labels : labels . FromStrings ( test . discoveryLabels ... ) ,
}
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , context . Background ( ) , nil , func ( ctx context . Context ) storage . Appender { return app } , 0 )
sl . sampleMutator = func ( l labels . Labels ) labels . Labels {
return mutateSampleLabels ( l , discoveryLabels , test . honorLabels , nil )
}
sl . reportSampleMutator = func ( l labels . Labels ) labels . Labels {
return mutateReportSampleLabels ( l , discoveryLabels )
}
2018-02-15 06:26:24 -08:00
now := time . Now ( )
2020-07-30 04:11:13 -07:00
slApp := sl . appender ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err := sl . append ( slApp , [ ] byte ( test . scrapeLabels ) , "text/plain" , now )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
2018-02-15 06:26:24 -08:00
2023-07-13 05:27:51 -07:00
expected := [ ] floatSample {
2018-02-15 06:26:24 -08:00
{
metric : test . expLset ,
t : timestamp . FromTime ( now ) ,
2023-07-13 05:27:51 -07:00
f : test . expValue ,
2018-02-15 06:26:24 -08:00
} ,
}
t . Logf ( "Test:%s" , test . title )
2023-04-16 05:13:31 -07:00
requireEqual ( t , expected , app . resultFloats )
2017-04-11 07:42:17 -07:00
}
2017-04-14 02:41:18 -07:00
}
2023-04-16 05:13:31 -07:00
func requireEqual ( t * testing . T , expected , actual interface { } , msgAndArgs ... interface { } ) {
testutil . RequireEqualWithOptions ( t , expected , actual ,
2024-01-24 03:53:36 -08:00
[ ] cmp . Option { cmp . Comparer ( equalFloatSamples ) , cmp . AllowUnexported ( histogramSample { } ) } ,
2023-04-16 05:13:31 -07:00
msgAndArgs ... )
}
2021-10-15 11:31:03 -07:00
func TestScrapeLoopAppendForConflictingPrefixedLabels ( t * testing . T ) {
testcases := map [ string ] struct {
targetLabels [ ] string
exposedLabels string
expected [ ] string
} {
"One target label collides with existing label" : {
targetLabels : [ ] string { "foo" , "2" } ,
exposedLabels : ` metric { foo="1"} 0 ` ,
expected : [ ] string { "__name__" , "metric" , "exported_foo" , "1" , "foo" , "2" } ,
} ,
"One target label collides with existing label, plus target label already with prefix 'exported'" : {
targetLabels : [ ] string { "foo" , "2" , "exported_foo" , "3" } ,
exposedLabels : ` metric { foo="1"} 0 ` ,
expected : [ ] string { "__name__" , "metric" , "exported_exported_foo" , "1" , "exported_foo" , "3" , "foo" , "2" } ,
} ,
"One target label collides with existing label, plus existing label already with prefix 'exported" : {
targetLabels : [ ] string { "foo" , "3" } ,
2024-02-15 11:25:12 -08:00
exposedLabels : ` metric { foo="1", exported_foo="2"} 0 ` ,
2021-10-15 11:31:03 -07:00
expected : [ ] string { "__name__" , "metric" , "exported_exported_foo" , "1" , "exported_foo" , "2" , "foo" , "3" } ,
} ,
"One target label collides with existing label, both already with prefix 'exported'" : {
targetLabels : [ ] string { "exported_foo" , "2" } ,
exposedLabels : ` metric { exported_foo="1"} 0 ` ,
expected : [ ] string { "__name__" , "metric" , "exported_exported_foo" , "1" , "exported_foo" , "2" } ,
} ,
"Two target labels collide with existing labels, both with and without prefix 'exported'" : {
targetLabels : [ ] string { "foo" , "3" , "exported_foo" , "4" } ,
2024-02-15 11:25:12 -08:00
exposedLabels : ` metric { foo="1", exported_foo="2"} 0 ` ,
2021-10-22 01:06:44 -07:00
expected : [ ] string {
"__name__" , "metric" , "exported_exported_foo" , "1" , "exported_exported_exported_foo" ,
"2" , "exported_foo" , "4" , "foo" , "3" ,
} ,
2021-10-15 11:31:03 -07:00
} ,
"Extreme example" : {
targetLabels : [ ] string { "foo" , "0" , "exported_exported_foo" , "1" , "exported_exported_exported_foo" , "2" } ,
2024-02-15 11:25:12 -08:00
exposedLabels : ` metric { foo="3", exported_foo="4", exported_exported_exported_foo="5"} 0 ` ,
2021-10-15 11:31:03 -07:00
expected : [ ] string {
"__name__" , "metric" ,
"exported_exported_exported_exported_exported_foo" , "5" ,
"exported_exported_exported_exported_foo" , "3" ,
"exported_exported_exported_foo" , "2" ,
"exported_exported_foo" , "1" ,
"exported_foo" , "4" ,
"foo" , "0" ,
} ,
} ,
}
for name , tc := range testcases {
t . Run ( name , func ( t * testing . T ) {
app := & collectResultAppender { }
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , context . Background ( ) , nil , func ( ctx context . Context ) storage . Appender { return app } , 0 )
sl . sampleMutator = func ( l labels . Labels ) labels . Labels {
return mutateSampleLabels ( l , & Target { labels : labels . FromStrings ( tc . targetLabels ... ) } , false , nil )
}
2021-10-15 11:31:03 -07:00
slApp := sl . appender ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err := sl . append ( slApp , [ ] byte ( tc . exposedLabels ) , "text/plain" , time . Date ( 2000 , 1 , 1 , 1 , 0 , 0 , 0 , time . UTC ) )
2021-10-15 11:31:03 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
2023-04-16 05:13:31 -07:00
requireEqual ( t , [ ] floatSample {
2021-10-15 11:31:03 -07:00
{
metric : labels . FromStrings ( tc . expected ... ) ,
t : timestamp . FromTime ( time . Date ( 2000 , 1 , 1 , 1 , 0 , 0 , 0 , time . UTC ) ) ,
2023-07-13 05:27:51 -07:00
f : 0 ,
2021-10-15 11:31:03 -07:00
} ,
2023-07-13 05:27:51 -07:00
} , app . resultFloats )
2021-10-15 11:31:03 -07:00
} )
}
}
2020-03-25 19:31:48 -07:00
func TestScrapeLoopAppendCacheEntryButErrNotFound ( t * testing . T ) {
// collectResultAppender's AddFast always returns ErrNotFound if we don't give it a next.
app := & collectResultAppender { }
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , context . Background ( ) , nil , func ( ctx context . Context ) storage . Appender { return app } , 0 )
2020-03-25 19:31:48 -07:00
2021-11-06 03:10:04 -07:00
fakeRef := storage . SeriesRef ( 1 )
2020-03-25 19:31:48 -07:00
expValue := float64 ( 1 )
2022-12-20 08:54:07 -08:00
metric := [ ] byte ( ` metric { n="1"} 1 ` )
2024-10-18 08:12:31 -07:00
p , warning := textparse . New ( metric , "text/plain" , "" , false , false , labels . NewSymbolTable ( ) )
require . NotNil ( t , p )
2022-02-08 01:57:56 -08:00
require . NoError ( t , warning )
2020-03-25 19:31:48 -07:00
var lset labels . Labels
p . Next ( )
2022-12-20 08:54:07 -08:00
p . Metric ( & lset )
2020-03-25 19:31:48 -07:00
hash := lset . Hash ( )
// Create a fake entry in the cache
2022-12-20 08:54:07 -08:00
sl . cache . addRef ( metric , fakeRef , lset , hash )
2020-03-25 19:31:48 -07:00
now := time . Now ( )
2020-07-30 04:11:13 -07:00
slApp := sl . appender ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err := sl . append ( slApp , metric , "text/plain" , now )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
2020-03-25 19:31:48 -07:00
2023-07-13 05:27:51 -07:00
expected := [ ] floatSample {
2020-03-25 19:31:48 -07:00
{
metric : lset ,
t : timestamp . FromTime ( now ) ,
2023-07-13 05:27:51 -07:00
f : expValue ,
2020-03-25 19:31:48 -07:00
} ,
}
2023-07-13 05:27:51 -07:00
require . Equal ( t , expected , app . resultFloats )
2020-03-25 19:31:48 -07:00
}
2018-01-09 07:43:28 -08:00
func TestScrapeLoopAppendSampleLimit ( t * testing . T ) {
resApp := & collectResultAppender { }
app := & limitAppender { Appender : resApp , limit : 1 }
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , context . Background ( ) , nil , func ( ctx context . Context ) storage . Appender { return app } , 0 )
sl . sampleMutator = func ( l labels . Labels ) labels . Labels {
if l . Has ( "deleteme" ) {
return labels . EmptyLabels ( )
}
return l
}
sl . sampleLimit = app . limit
2018-01-09 07:43:28 -08:00
// Get the value of the Counter before performing the append.
beforeMetric := dto . Metric { }
2023-09-22 09:47:44 -07:00
err := sl . metrics . targetScrapeSampleLimit . Write ( & beforeMetric )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-11-04 15:43:42 -08:00
2018-01-09 07:43:28 -08:00
beforeMetricValue := beforeMetric . GetCounter ( ) . GetValue ( )
now := time . Now ( )
2020-07-30 04:11:13 -07:00
slApp := sl . appender ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
total , added , seriesAdded , err := sl . append ( app , [ ] byte ( "metric_a 1\nmetric_b 1\nmetric_c 1\n" ) , "text/plain" , now )
2021-09-04 05:35:03 -07:00
require . ErrorIs ( t , err , errSampleLimit )
2020-10-29 02:43:23 -07:00
require . NoError ( t , slApp . Rollback ( ) )
require . Equal ( t , 3 , total )
require . Equal ( t , 3 , added )
require . Equal ( t , 1 , seriesAdded )
2018-01-09 07:43:28 -08:00
2019-02-10 01:46:20 -08:00
// Check that the Counter has been incremented a single time for the scrape,
2018-01-09 07:43:28 -08:00
// not multiple times for each sample.
metric := dto . Metric { }
2023-09-22 09:47:44 -07:00
err = sl . metrics . targetScrapeSampleLimit . Write ( & metric )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2019-11-04 15:43:42 -08:00
2018-01-09 07:43:28 -08:00
value := metric . GetCounter ( ) . GetValue ( )
2019-11-04 15:43:42 -08:00
change := value - beforeMetricValue
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1.0 , change , "Unexpected change of sample limit metric: %f" , change )
2018-01-09 07:43:28 -08:00
// And verify that we got the samples that fit under the limit.
2023-07-13 05:27:51 -07:00
want := [ ] floatSample {
2018-01-09 07:43:28 -08:00
{
metric : labels . FromStrings ( model . MetricNameLabel , "metric_a" ) ,
t : timestamp . FromTime ( now ) ,
2023-07-13 05:27:51 -07:00
f : 1 ,
2018-01-09 07:43:28 -08:00
} ,
}
2023-04-16 05:13:31 -07:00
requireEqual ( t , want , resApp . rolledbackFloats , "Appended samples not as expected:\n%s" , appender )
2020-01-29 09:47:36 -08:00
now = time . Now ( )
2020-07-30 04:11:13 -07:00
slApp = sl . appender ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
total , added , seriesAdded , err = sl . append ( slApp , [ ] byte ( "metric_a 1\nmetric_b 1\nmetric_c{deleteme=\"yes\"} 1\nmetric_d 1\nmetric_e 1\nmetric_f 1\nmetric_g 1\nmetric_h{deleteme=\"yes\"} 1\nmetric_i{deleteme=\"yes\"} 1\n" ) , "text/plain" , now )
2021-09-04 05:35:03 -07:00
require . ErrorIs ( t , err , errSampleLimit )
2020-10-29 02:43:23 -07:00
require . NoError ( t , slApp . Rollback ( ) )
require . Equal ( t , 9 , total )
require . Equal ( t , 6 , added )
require . Equal ( t , 0 , seriesAdded )
2018-01-09 07:43:28 -08:00
}
2023-04-21 12:14:19 -07:00
func TestScrapeLoop_HistogramBucketLimit ( t * testing . T ) {
resApp := & collectResultAppender { }
app := & bucketLimitAppender { Appender : resApp , limit : 2 }
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , context . Background ( ) , nil , func ( ctx context . Context ) storage . Appender { return app } , 0 )
2024-04-24 06:53:54 -07:00
sl . enableNativeHistogramIngestion = true
2023-10-16 06:47:10 -07:00
sl . sampleMutator = func ( l labels . Labels ) labels . Labels {
if l . Has ( "deleteme" ) {
return labels . EmptyLabels ( )
}
return l
}
sl . sampleLimit = app . limit
2023-04-21 12:14:19 -07:00
metric := dto . Metric { }
2023-09-22 09:47:44 -07:00
err := sl . metrics . targetScrapeNativeHistogramBucketLimit . Write ( & metric )
2023-04-21 12:14:19 -07:00
require . NoError ( t , err )
beforeMetricValue := metric . GetCounter ( ) . GetValue ( )
2023-04-24 10:41:04 -07:00
nativeHistogram := prometheus . NewHistogramVec (
prometheus . HistogramOpts {
Namespace : "testing" ,
Name : "example_native_histogram" ,
Help : "This is used for testing" ,
ConstLabels : map [ string ] string { "some" : "value" } ,
NativeHistogramBucketFactor : 1.1 , // 10% increase from bucket to bucket
NativeHistogramMaxBucketNumber : 100 , // intentionally higher than the limit we'll use in the scraper
} ,
[ ] string { "size" } ,
)
2023-04-21 12:14:19 -07:00
registry := prometheus . NewRegistry ( )
registry . Register ( nativeHistogram )
2023-04-24 10:41:04 -07:00
nativeHistogram . WithLabelValues ( "S" ) . Observe ( 1.0 )
nativeHistogram . WithLabelValues ( "M" ) . Observe ( 1.0 )
nativeHistogram . WithLabelValues ( "L" ) . Observe ( 1.0 )
nativeHistogram . WithLabelValues ( "M" ) . Observe ( 10.0 )
nativeHistogram . WithLabelValues ( "L" ) . Observe ( 10.0 ) // in different bucket since > 1*1.1
2023-04-21 12:14:19 -07:00
gathered , err := registry . Gather ( )
require . NoError ( t , err )
require . NotEmpty ( t , gathered )
histogramMetricFamily := gathered [ 0 ]
2023-05-03 23:36:44 -07:00
msg , err := MetricFamilyToProtobuf ( histogramMetricFamily )
2023-04-21 12:14:19 -07:00
require . NoError ( t , err )
now := time . Now ( )
total , added , seriesAdded , err := sl . append ( app , msg , "application/vnd.google.protobuf" , now )
require . NoError ( t , err )
2023-04-24 10:41:04 -07:00
require . Equal ( t , 3 , total )
require . Equal ( t , 3 , added )
require . Equal ( t , 3 , seriesAdded )
2023-04-21 12:14:19 -07:00
2023-09-22 09:47:44 -07:00
err = sl . metrics . targetScrapeNativeHistogramBucketLimit . Write ( & metric )
2023-04-21 12:14:19 -07:00
require . NoError ( t , err )
metricValue := metric . GetCounter ( ) . GetValue ( )
require . Equal ( t , beforeMetricValue , metricValue )
beforeMetricValue = metricValue
2023-04-24 10:41:04 -07:00
nativeHistogram . WithLabelValues ( "L" ) . Observe ( 100.0 ) // in different bucket since > 10*1.1
2023-04-21 12:14:19 -07:00
gathered , err = registry . Gather ( )
require . NoError ( t , err )
require . NotEmpty ( t , gathered )
histogramMetricFamily = gathered [ 0 ]
2023-05-03 23:36:44 -07:00
msg , err = MetricFamilyToProtobuf ( histogramMetricFamily )
2023-04-21 12:14:19 -07:00
require . NoError ( t , err )
now = time . Now ( )
total , added , seriesAdded , err = sl . append ( app , msg , "application/vnd.google.protobuf" , now )
2023-11-21 00:56:56 -08:00
require . NoError ( t , err )
require . Equal ( t , 3 , total )
require . Equal ( t , 3 , added )
require . Equal ( t , 3 , seriesAdded )
err = sl . metrics . targetScrapeNativeHistogramBucketLimit . Write ( & metric )
require . NoError ( t , err )
metricValue = metric . GetCounter ( ) . GetValue ( )
require . Equal ( t , beforeMetricValue , metricValue )
beforeMetricValue = metricValue
nativeHistogram . WithLabelValues ( "L" ) . Observe ( 100000.0 ) // in different bucket since > 10*1.1
gathered , err = registry . Gather ( )
require . NoError ( t , err )
require . NotEmpty ( t , gathered )
histogramMetricFamily = gathered [ 0 ]
msg , err = MetricFamilyToProtobuf ( histogramMetricFamily )
require . NoError ( t , err )
2023-04-21 12:14:19 -07:00
now = time . Now ( )
total , added , seriesAdded , err = sl . append ( app , msg , "application/vnd.google.protobuf" , now )
2023-11-01 12:06:46 -07:00
if ! errors . Is ( err , errBucketLimit ) {
2023-04-21 12:14:19 -07:00
t . Fatalf ( "Did not see expected histogram bucket limit error: %s" , err )
}
require . NoError ( t , app . Rollback ( ) )
2023-04-24 10:41:04 -07:00
require . Equal ( t , 3 , total )
require . Equal ( t , 3 , added )
2023-04-21 12:14:19 -07:00
require . Equal ( t , 0 , seriesAdded )
2023-09-22 09:47:44 -07:00
err = sl . metrics . targetScrapeNativeHistogramBucketLimit . Write ( & metric )
2023-04-21 12:14:19 -07:00
require . NoError ( t , err )
metricValue = metric . GetCounter ( ) . GetValue ( )
require . Equal ( t , beforeMetricValue + 1 , metricValue )
}
2017-09-15 02:08:51 -07:00
func TestScrapeLoop_ChangingMetricString ( t * testing . T ) {
// This is a regression test for the scrape loop cache not properly maintaining
// IDs when the string representation of a metric changes across a scrape. Thus
// we use a real storage appender here.
2019-08-08 18:35:39 -07:00
s := teststorage . New ( t )
2017-09-15 02:08:51 -07:00
defer s . Close ( )
2020-07-17 03:30:22 -07:00
capp := & collectResultAppender { }
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , context . Background ( ) , nil , func ( ctx context . Context ) storage . Appender { return capp } , 0 )
2017-09-15 02:08:51 -07:00
now := time . Now ( )
2020-07-30 04:11:13 -07:00
slApp := sl . appender ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err := sl . append ( slApp , [ ] byte ( ` metric_a { a="1",b="1"} 1 ` ) , "text/plain" , now )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
2019-11-04 15:43:42 -08:00
2020-07-30 04:11:13 -07:00
slApp = sl . appender ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err = sl . append ( slApp , [ ] byte ( ` metric_a { b="1",a="1"} 2 ` ) , "text/plain" , now . Add ( time . Minute ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
2017-09-15 02:08:51 -07:00
2023-07-13 05:27:51 -07:00
want := [ ] floatSample {
2017-09-15 02:08:51 -07:00
{
metric : labels . FromStrings ( "__name__" , "metric_a" , "a" , "1" , "b" , "1" ) ,
t : timestamp . FromTime ( now ) ,
2023-07-13 05:27:51 -07:00
f : 1 ,
2017-09-15 02:08:51 -07:00
} ,
{
metric : labels . FromStrings ( "__name__" , "metric_a" , "a" , "1" , "b" , "1" ) ,
t : timestamp . FromTime ( now . Add ( time . Minute ) ) ,
2023-07-13 05:27:51 -07:00
f : 2 ,
2017-09-15 02:08:51 -07:00
} ,
}
2023-07-13 05:27:51 -07:00
require . Equal ( t , want , capp . resultFloats , "Appended samples not as expected:\n%s" , appender )
2017-09-15 02:08:51 -07:00
}
2024-11-07 03:30:03 -08:00
func TestScrapeLoopAppendFailsWithNoContentType ( t * testing . T ) {
app := & collectResultAppender { }
// Explicitly setting the lack of fallback protocol here to make it obvious.
sl := newBasicScrapeLoopWithFallback ( t , context . Background ( ) , nil , func ( ctx context . Context ) storage . Appender { return app } , 0 , "" )
now := time . Now ( )
slApp := sl . appender ( context . Background ( ) )
_ , _ , _ , err := sl . append ( slApp , [ ] byte ( "metric_a 1\n" ) , "" , now )
// We expect the appropriate error.
require . ErrorContains ( t , err , "non-compliant scrape target sending blank Content-Type and no fallback_scrape_protocol specified for target" , "Expected \"non-compliant scrape\" error but got: %s" , err )
}
func TestScrapeLoopAppendEmptyWithNoContentType ( t * testing . T ) {
// This test ensures we there are no errors when we get a blank scrape or just want to append a stale marker.
app := & collectResultAppender { }
// Explicitly setting the lack of fallback protocol here to make it obvious.
sl := newBasicScrapeLoopWithFallback ( t , context . Background ( ) , nil , func ( ctx context . Context ) storage . Appender { return app } , 0 , "" )
now := time . Now ( )
slApp := sl . appender ( context . Background ( ) )
_ , _ , _ , err := sl . append ( slApp , [ ] byte ( "" ) , "" , now )
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
}
2017-04-14 02:41:18 -07:00
func TestScrapeLoopAppendStaleness ( t * testing . T ) {
app := & collectResultAppender { }
2017-09-08 05:34:45 -07:00
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , context . Background ( ) , nil , func ( ctx context . Context ) storage . Appender { return app } , 0 )
2017-04-14 02:41:18 -07:00
now := time . Now ( )
2020-07-30 04:11:13 -07:00
slApp := sl . appender ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err := sl . append ( slApp , [ ] byte ( "metric_a 1\n" ) , "text/plain" , now )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
2019-11-04 15:43:42 -08:00
2020-07-30 04:11:13 -07:00
slApp = sl . appender ( context . Background ( ) )
2020-07-16 04:53:39 -07:00
_ , _ , _ , err = sl . append ( slApp , [ ] byte ( "" ) , "" , now . Add ( time . Second ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
2017-04-14 02:41:18 -07:00
2023-07-13 05:27:51 -07:00
want := [ ] floatSample {
2017-04-14 02:41:18 -07:00
{
metric : labels . FromStrings ( model . MetricNameLabel , "metric_a" ) ,
t : timestamp . FromTime ( now ) ,
2023-07-13 05:27:51 -07:00
f : 1 ,
2017-04-14 02:41:18 -07:00
} ,
{
metric : labels . FromStrings ( model . MetricNameLabel , "metric_a" ) ,
t : timestamp . FromTime ( now . Add ( time . Second ) ) ,
2024-01-24 03:53:36 -08:00
f : math . Float64frombits ( value . StaleNaN ) ,
2017-04-14 02:41:18 -07:00
} ,
}
2024-01-24 03:53:36 -08:00
requireEqual ( t , want , app . resultFloats , "Appended samples not as expected:\n%s" , appender )
2017-04-11 07:42:17 -07:00
}
2017-04-28 08:36:36 -07:00
func TestScrapeLoopAppendNoStalenessIfTimestamp ( t * testing . T ) {
app := & collectResultAppender { }
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , context . Background ( ) , nil , func ( ctx context . Context ) storage . Appender { return app } , 0 )
2017-04-28 08:36:36 -07:00
now := time . Now ( )
2020-07-30 04:11:13 -07:00
slApp := sl . appender ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err := sl . append ( slApp , [ ] byte ( "metric_a 1 1000\n" ) , "text/plain" , now )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
2019-11-04 15:43:42 -08:00
2020-07-30 04:11:13 -07:00
slApp = sl . appender ( context . Background ( ) )
2020-07-16 04:53:39 -07:00
_ , _ , _ , err = sl . append ( slApp , [ ] byte ( "" ) , "" , now . Add ( time . Second ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
2017-04-28 08:36:36 -07:00
2023-07-13 05:27:51 -07:00
want := [ ] floatSample {
2017-04-28 08:36:36 -07:00
{
metric : labels . FromStrings ( model . MetricNameLabel , "metric_a" ) ,
t : 1000 ,
2023-07-13 05:27:51 -07:00
f : 1 ,
2017-04-28 08:36:36 -07:00
} ,
}
2023-07-13 05:27:51 -07:00
require . Equal ( t , want , app . resultFloats , "Appended samples not as expected:\n%s" , appender )
2017-05-29 06:08:55 -07:00
}
2023-11-09 08:18:00 -08:00
func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness ( t * testing . T ) {
app := & collectResultAppender { }
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , context . Background ( ) , nil , func ( ctx context . Context ) storage . Appender { return app } , 0 )
sl . trackTimestampsStaleness = true
2023-11-09 08:18:00 -08:00
now := time . Now ( )
slApp := sl . appender ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err := sl . append ( slApp , [ ] byte ( "metric_a 1 1000\n" ) , "text/plain" , now )
2023-11-09 08:18:00 -08:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
slApp = sl . appender ( context . Background ( ) )
_ , _ , _ , err = sl . append ( slApp , [ ] byte ( "" ) , "" , now . Add ( time . Second ) )
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
want := [ ] floatSample {
{
metric : labels . FromStrings ( model . MetricNameLabel , "metric_a" ) ,
t : 1000 ,
f : 1 ,
} ,
{
metric : labels . FromStrings ( model . MetricNameLabel , "metric_a" ) ,
t : timestamp . FromTime ( now . Add ( time . Second ) ) ,
2024-01-24 03:53:36 -08:00
f : math . Float64frombits ( value . StaleNaN ) ,
2023-11-09 08:18:00 -08:00
} ,
}
2024-01-24 03:53:36 -08:00
requireEqual ( t , want , app . resultFloats , "Appended samples not as expected:\n%s" , appender )
2023-11-09 08:18:00 -08:00
}
2021-03-16 02:47:45 -07:00
func TestScrapeLoopAppendExemplar ( t * testing . T ) {
tests := [ ] struct {
2024-04-24 06:53:54 -07:00
title string
2024-10-18 00:32:15 -07:00
alwaysScrapeClassicHist bool
2024-04-24 06:53:54 -07:00
enableNativeHistogramsIngestion bool
scrapeText string
contentType string
discoveryLabels [ ] string
floats [ ] floatSample
histograms [ ] histogramSample
exemplars [ ] exemplar . Exemplar
2021-03-16 02:47:45 -07:00
} {
{
title : "Metric without exemplars" ,
scrapeText : "metric_total{n=\"1\"} 0\n# EOF" ,
2023-07-13 05:16:10 -07:00
contentType : "application/openmetrics-text" ,
2021-03-16 02:47:45 -07:00
discoveryLabels : [ ] string { "n" , "2" } ,
2023-07-13 05:27:51 -07:00
floats : [ ] floatSample { {
2021-03-16 02:47:45 -07:00
metric : labels . FromStrings ( "__name__" , "metric_total" , "exported_n" , "1" , "n" , "2" ) ,
2023-07-13 05:27:51 -07:00
f : 0 ,
2021-03-16 02:47:45 -07:00
} } ,
} ,
{
title : "Metric with exemplars" ,
scrapeText : "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0\n# EOF" ,
2023-07-13 05:16:10 -07:00
contentType : "application/openmetrics-text" ,
2021-03-16 02:47:45 -07:00
discoveryLabels : [ ] string { "n" , "2" } ,
2023-07-13 05:27:51 -07:00
floats : [ ] floatSample { {
2021-03-16 02:47:45 -07:00
metric : labels . FromStrings ( "__name__" , "metric_total" , "exported_n" , "1" , "n" , "2" ) ,
2023-07-13 05:27:51 -07:00
f : 0 ,
2021-03-16 02:47:45 -07:00
} } ,
exemplars : [ ] exemplar . Exemplar {
{ Labels : labels . FromStrings ( "a" , "abc" ) , Value : 1 } ,
} ,
2021-10-22 01:06:44 -07:00
} ,
{
2021-03-16 02:47:45 -07:00
title : "Metric with exemplars and TS" ,
scrapeText : "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0 10000\n# EOF" ,
2023-07-13 05:16:10 -07:00
contentType : "application/openmetrics-text" ,
2021-03-16 02:47:45 -07:00
discoveryLabels : [ ] string { "n" , "2" } ,
2023-07-13 05:27:51 -07:00
floats : [ ] floatSample { {
2021-03-16 02:47:45 -07:00
metric : labels . FromStrings ( "__name__" , "metric_total" , "exported_n" , "1" , "n" , "2" ) ,
2023-07-13 05:27:51 -07:00
f : 0 ,
2021-03-16 02:47:45 -07:00
} } ,
exemplars : [ ] exemplar . Exemplar {
{ Labels : labels . FromStrings ( "a" , "abc" ) , Value : 1 , Ts : 10000000 , HasTs : true } ,
} ,
2021-10-22 01:06:44 -07:00
} ,
{
2021-03-16 02:47:45 -07:00
title : "Two metrics and exemplars" ,
scrapeText : ` metric_total { n = "1" } 1 # { t = "1" } 1.0 10000
metric_total { n = "2" } 2 # { t = "2" } 2.0 20000
# EOF ` ,
2023-07-13 05:16:10 -07:00
contentType : "application/openmetrics-text" ,
2023-07-13 05:27:51 -07:00
floats : [ ] floatSample { {
2021-03-16 02:47:45 -07:00
metric : labels . FromStrings ( "__name__" , "metric_total" , "n" , "1" ) ,
2023-07-13 05:27:51 -07:00
f : 1 ,
2021-03-16 02:47:45 -07:00
} , {
metric : labels . FromStrings ( "__name__" , "metric_total" , "n" , "2" ) ,
2023-07-13 05:27:51 -07:00
f : 2 ,
2021-03-16 02:47:45 -07:00
} } ,
exemplars : [ ] exemplar . Exemplar {
{ Labels : labels . FromStrings ( "t" , "1" ) , Value : 1 , Ts : 10000000 , HasTs : true } ,
{ Labels : labels . FromStrings ( "t" , "2" ) , Value : 2 , Ts : 20000000 , HasTs : true } ,
} ,
} ,
2023-07-13 05:16:10 -07:00
{
2023-11-16 06:07:37 -08:00
title : "Native histogram with three exemplars" ,
2024-04-24 06:53:54 -07:00
enableNativeHistogramsIngestion : true ,
2023-07-13 05:16:10 -07:00
scrapeText : ` name : "test_histogram"
help : "Test histogram with many buckets removed to keep it manageable in size."
type : HISTOGRAM
metric : <
histogram : <
sample_count : 175
sample_sum : 0.0008280461746287094
bucket : <
cumulative_count : 2
upper_bound : - 0.0004899999999999998
>
bucket : <
cumulative_count : 4
upper_bound : - 0.0003899999999999998
exemplar : <
label : <
name : "dummyID"
value : "59727"
>
value : - 0.00039
timestamp : <
seconds : 1625851155
nanos : 146848499
>
>
>
bucket : <
cumulative_count : 16
upper_bound : - 0.0002899999999999998
exemplar : <
label : <
name : "dummyID"
value : "5617"
>
value : - 0.00029
>
>
2023-11-16 06:07:37 -08:00
bucket : <
cumulative_count : 32
upper_bound : - 0.0001899999999999998
exemplar : <
label : <
name : "dummyID"
value : "58215"
>
value : - 0.00019
timestamp : <
seconds : 1625851055
nanos : 146848599
>
>
>
2023-07-13 05:16:10 -07:00
schema : 3
zero_threshold : 2.938735877055719e-39
zero_count : 2
negative_span : <
offset : - 162
length : 1
>
negative_span : <
offset : 23
length : 4
>
negative_delta : 1
negative_delta : 3
negative_delta : - 2
negative_delta : - 1
negative_delta : 1
positive_span : <
offset : - 161
length : 1
>
positive_span : <
offset : 8
length : 3
>
positive_delta : 1
positive_delta : 2
positive_delta : - 1
positive_delta : - 1
>
timestamp_ms : 1234568
>
` ,
contentType : "application/vnd.google.protobuf" ,
histograms : [ ] histogramSample { {
2024-07-19 07:28:00 -07:00
t : 1234568 ,
metric : labels . FromStrings ( "__name__" , "test_histogram" ) ,
2023-07-13 05:16:10 -07:00
h : & histogram . Histogram {
Count : 175 ,
ZeroCount : 2 ,
Sum : 0.0008280461746287094 ,
ZeroThreshold : 2.938735877055719e-39 ,
Schema : 3 ,
PositiveSpans : [ ] histogram . Span {
{ Offset : - 161 , Length : 1 } ,
{ Offset : 8 , Length : 3 } ,
} ,
NegativeSpans : [ ] histogram . Span {
{ Offset : - 162 , Length : 1 } ,
{ Offset : 23 , Length : 4 } ,
} ,
PositiveBuckets : [ ] int64 { 1 , 2 , - 1 , - 1 } ,
NegativeBuckets : [ ] int64 { 1 , 3 , - 2 , - 1 , 1 } ,
} ,
} } ,
exemplars : [ ] exemplar . Exemplar {
2023-11-16 06:07:37 -08:00
// Native histogram exemplars are arranged by timestamp, and those with missing timestamps are dropped.
{ Labels : labels . FromStrings ( "dummyID" , "58215" ) , Value : - 0.00019 , Ts : 1625851055146 , HasTs : true } ,
2023-07-13 05:16:10 -07:00
{ Labels : labels . FromStrings ( "dummyID" , "59727" ) , Value : - 0.00039 , Ts : 1625851155146 , HasTs : true } ,
} ,
} ,
2023-08-21 04:12:45 -07:00
{
2023-11-16 06:07:37 -08:00
title : "Native histogram with three exemplars scraped as classic histogram" ,
2024-04-24 06:53:54 -07:00
enableNativeHistogramsIngestion : true ,
2023-08-21 04:12:45 -07:00
scrapeText : ` name : "test_histogram"
help : "Test histogram with many buckets removed to keep it manageable in size."
type : HISTOGRAM
metric : <
histogram : <
sample_count : 175
sample_sum : 0.0008280461746287094
bucket : <
cumulative_count : 2
upper_bound : - 0.0004899999999999998
>
bucket : <
cumulative_count : 4
upper_bound : - 0.0003899999999999998
exemplar : <
label : <
name : "dummyID"
value : "59727"
>
value : - 0.00039
timestamp : <
seconds : 1625851155
nanos : 146848499
>
>
>
bucket : <
cumulative_count : 16
upper_bound : - 0.0002899999999999998
exemplar : <
label : <
name : "dummyID"
value : "5617"
>
value : - 0.00029
>
>
2023-11-16 06:07:37 -08:00
bucket : <
cumulative_count : 32
upper_bound : - 0.0001899999999999998
exemplar : <
label : <
name : "dummyID"
value : "58215"
>
value : - 0.00019
timestamp : <
seconds : 1625851055
nanos : 146848599
>
>
>
2023-08-21 04:12:45 -07:00
schema : 3
zero_threshold : 2.938735877055719e-39
zero_count : 2
negative_span : <
offset : - 162
length : 1
>
negative_span : <
offset : 23
length : 4
>
negative_delta : 1
negative_delta : 3
negative_delta : - 2
negative_delta : - 1
negative_delta : 1
positive_span : <
offset : - 161
length : 1
>
positive_span : <
offset : 8
length : 3
>
positive_delta : 1
positive_delta : 2
positive_delta : - 1
positive_delta : - 1
>
timestamp_ms : 1234568
>
` ,
2024-10-18 00:32:15 -07:00
alwaysScrapeClassicHist : true ,
2023-08-21 04:12:45 -07:00
contentType : "application/vnd.google.protobuf" ,
2023-08-21 04:55:13 -07:00
floats : [ ] floatSample {
{ metric : labels . FromStrings ( "__name__" , "test_histogram_count" ) , t : 1234568 , f : 175 } ,
{ metric : labels . FromStrings ( "__name__" , "test_histogram_sum" ) , t : 1234568 , f : 0.0008280461746287094 } ,
{ metric : labels . FromStrings ( "__name__" , "test_histogram_bucket" , "le" , "-0.0004899999999999998" ) , t : 1234568 , f : 2 } ,
2023-08-21 05:44:53 -07:00
{ metric : labels . FromStrings ( "__name__" , "test_histogram_bucket" , "le" , "-0.0003899999999999998" ) , t : 1234568 , f : 4 } ,
{ metric : labels . FromStrings ( "__name__" , "test_histogram_bucket" , "le" , "-0.0002899999999999998" ) , t : 1234568 , f : 16 } ,
2023-11-16 06:07:37 -08:00
{ metric : labels . FromStrings ( "__name__" , "test_histogram_bucket" , "le" , "-0.0001899999999999998" ) , t : 1234568 , f : 32 } ,
2023-08-21 04:55:13 -07:00
{ metric : labels . FromStrings ( "__name__" , "test_histogram_bucket" , "le" , "+Inf" ) , t : 1234568 , f : 175 } ,
} ,
2023-08-21 04:12:45 -07:00
histograms : [ ] histogramSample { {
2024-07-19 07:28:00 -07:00
t : 1234568 ,
metric : labels . FromStrings ( "__name__" , "test_histogram" ) ,
2023-08-21 04:12:45 -07:00
h : & histogram . Histogram {
Count : 175 ,
ZeroCount : 2 ,
Sum : 0.0008280461746287094 ,
ZeroThreshold : 2.938735877055719e-39 ,
Schema : 3 ,
PositiveSpans : [ ] histogram . Span {
{ Offset : - 161 , Length : 1 } ,
{ Offset : 8 , Length : 3 } ,
} ,
NegativeSpans : [ ] histogram . Span {
{ Offset : - 162 , Length : 1 } ,
{ Offset : 23 , Length : 4 } ,
} ,
PositiveBuckets : [ ] int64 { 1 , 2 , - 1 , - 1 } ,
NegativeBuckets : [ ] int64 { 1 , 3 , - 2 , - 1 , 1 } ,
} ,
} } ,
exemplars : [ ] exemplar . Exemplar {
2023-11-16 06:07:37 -08:00
// Native histogram one is arranged by timestamp.
// Exemplars with missing timestamps are dropped for native histograms.
{ Labels : labels . FromStrings ( "dummyID" , "58215" ) , Value : - 0.00019 , Ts : 1625851055146 , HasTs : true } ,
2023-08-21 04:12:45 -07:00
{ Labels : labels . FromStrings ( "dummyID" , "59727" ) , Value : - 0.00039 , Ts : 1625851155146 , HasTs : true } ,
2023-11-16 06:07:37 -08:00
// Classic histogram one is in order of appearance.
// Exemplars with missing timestamps are supported for classic histograms.
2023-08-21 04:55:13 -07:00
{ Labels : labels . FromStrings ( "dummyID" , "59727" ) , Value : - 0.00039 , Ts : 1625851155146 , HasTs : true } ,
{ Labels : labels . FromStrings ( "dummyID" , "5617" ) , Value : - 0.00029 , Ts : 1234568 , HasTs : false } ,
2023-11-16 06:07:37 -08:00
{ Labels : labels . FromStrings ( "dummyID" , "58215" ) , Value : - 0.00019 , Ts : 1625851055146 , HasTs : true } ,
2023-08-21 04:12:45 -07:00
} ,
} ,
2021-03-16 02:47:45 -07:00
}
for _ , test := range tests {
t . Run ( test . title , func ( t * testing . T ) {
app := & collectResultAppender { }
discoveryLabels := & Target {
labels : labels . FromStrings ( test . discoveryLabels ... ) ,
}
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , context . Background ( ) , nil , func ( ctx context . Context ) storage . Appender { return app } , 0 )
2024-04-24 06:53:54 -07:00
sl . enableNativeHistogramIngestion = test . enableNativeHistogramsIngestion
2023-10-16 06:47:10 -07:00
sl . sampleMutator = func ( l labels . Labels ) labels . Labels {
return mutateSampleLabels ( l , discoveryLabels , false , nil )
}
sl . reportSampleMutator = func ( l labels . Labels ) labels . Labels {
return mutateReportSampleLabels ( l , discoveryLabels )
}
2024-10-18 00:32:15 -07:00
sl . alwaysScrapeClassicHist = test . alwaysScrapeClassicHist
2021-03-16 02:47:45 -07:00
now := time . Now ( )
2023-07-13 05:16:10 -07:00
for i := range test . floats {
2023-08-21 04:55:13 -07:00
if test . floats [ i ] . t != 0 {
continue
}
2023-07-13 05:16:10 -07:00
test . floats [ i ] . t = timestamp . FromTime ( now )
2021-03-16 02:47:45 -07:00
}
// We need to set the timestamp for expected exemplars that does not have a timestamp.
for i := range test . exemplars {
if test . exemplars [ i ] . Ts == 0 {
test . exemplars [ i ] . Ts = timestamp . FromTime ( now )
}
}
2023-07-13 05:16:10 -07:00
buf := & bytes . Buffer { }
if test . contentType == "application/vnd.google.protobuf" {
// In case of protobuf, we have to create the binary representation.
pb := & dto . MetricFamily { }
// From text to proto message.
require . NoError ( t , proto . UnmarshalText ( test . scrapeText , pb ) )
// From proto message to binary protobuf.
protoBuf , err := proto . Marshal ( pb )
require . NoError ( t , err )
// Write first length, then binary protobuf.
varintBuf := binary . AppendUvarint ( nil , uint64 ( len ( protoBuf ) ) )
buf . Write ( varintBuf )
buf . Write ( protoBuf )
} else {
buf . WriteString ( test . scrapeText )
}
_ , _ , _ , err := sl . append ( app , buf . Bytes ( ) , test . contentType , now )
2021-03-16 02:47:45 -07:00
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
2023-04-16 05:13:31 -07:00
requireEqual ( t , test . floats , app . resultFloats )
requireEqual ( t , test . histograms , app . resultHistograms )
requireEqual ( t , test . exemplars , app . resultExemplars )
2021-03-16 02:47:45 -07:00
} )
}
}
func TestScrapeLoopAppendExemplarSeries ( t * testing . T ) {
scrapeText := [ ] string { ` metric_total { n = "1" } 1 # { t = "1" } 1.0 10000
# EOF ` , ` metric_total { n = "1" } 2 # { t = "2" } 2.0 20000
# EOF ` }
2023-07-13 05:27:51 -07:00
samples := [ ] floatSample { {
2021-03-16 02:47:45 -07:00
metric : labels . FromStrings ( "__name__" , "metric_total" , "n" , "1" ) ,
2023-07-13 05:27:51 -07:00
f : 1 ,
2021-03-16 02:47:45 -07:00
} , {
metric : labels . FromStrings ( "__name__" , "metric_total" , "n" , "1" ) ,
2023-07-13 05:27:51 -07:00
f : 2 ,
2021-03-16 02:47:45 -07:00
} }
exemplars := [ ] exemplar . Exemplar {
{ Labels : labels . FromStrings ( "t" , "1" ) , Value : 1 , Ts : 10000000 , HasTs : true } ,
{ Labels : labels . FromStrings ( "t" , "2" ) , Value : 2 , Ts : 20000000 , HasTs : true } ,
}
discoveryLabels := & Target {
labels : labels . FromStrings ( ) ,
}
app := & collectResultAppender { }
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , context . Background ( ) , nil , func ( ctx context . Context ) storage . Appender { return app } , 0 )
sl . sampleMutator = func ( l labels . Labels ) labels . Labels {
return mutateSampleLabels ( l , discoveryLabels , false , nil )
}
sl . reportSampleMutator = func ( l labels . Labels ) labels . Labels {
return mutateReportSampleLabels ( l , discoveryLabels )
}
2021-03-16 02:47:45 -07:00
now := time . Now ( )
for i := range samples {
ts := now . Add ( time . Second * time . Duration ( i ) )
samples [ i ] . t = timestamp . FromTime ( ts )
}
// We need to set the timestamp for expected exemplars that does not have a timestamp.
for i := range exemplars {
if exemplars [ i ] . Ts == 0 {
ts := now . Add ( time . Second * time . Duration ( i ) )
exemplars [ i ] . Ts = timestamp . FromTime ( ts )
}
}
for i , st := range scrapeText {
_ , _ , _ , err := sl . append ( app , [ ] byte ( st ) , "application/openmetrics-text" , timestamp . Time ( samples [ i ] . t ) )
require . NoError ( t , err )
require . NoError ( t , app . Commit ( ) )
}
2023-04-16 05:13:31 -07:00
requireEqual ( t , samples , app . resultFloats )
requireEqual ( t , exemplars , app . resultExemplars )
2021-03-16 02:47:45 -07:00
}
2017-06-14 19:08:03 -07:00
func TestScrapeLoopRunReportsTargetDownOnScrapeError ( t * testing . T ) {
var (
2017-09-08 05:34:45 -07:00
scraper = & testScraper { }
appender = & collectResultAppender { }
2020-07-24 07:10:51 -07:00
app = func ( ctx context . Context ) storage . Appender { return appender }
2017-06-14 19:08:03 -07:00
)
ctx , cancel := context . WithCancel ( context . Background ( ) )
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , ctx , scraper , app , 10 * time . Millisecond )
2017-06-14 19:08:03 -07:00
scraper . scrapeFunc = func ( ctx context . Context , w io . Writer ) error {
cancel ( )
2019-03-25 16:01:12 -07:00
return errors . New ( "scrape failed" )
2017-06-14 19:08:03 -07:00
}
2021-08-31 08:37:32 -07:00
sl . run ( nil )
2023-07-13 05:27:51 -07:00
require . Equal ( t , 0.0 , appender . resultFloats [ 0 ] . f , "bad 'up' value" )
2017-06-14 19:08:03 -07:00
}
2017-06-16 05:09:50 -07:00
func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8 ( t * testing . T ) {
var (
2017-09-08 05:34:45 -07:00
scraper = & testScraper { }
appender = & collectResultAppender { }
2020-07-24 07:10:51 -07:00
app = func ( ctx context . Context ) storage . Appender { return appender }
2017-06-16 05:09:50 -07:00
)
ctx , cancel := context . WithCancel ( context . Background ( ) )
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , ctx , scraper , app , 10 * time . Millisecond )
2017-06-16 05:09:50 -07:00
scraper . scrapeFunc = func ( ctx context . Context , w io . Writer ) error {
cancel ( )
2017-09-08 05:34:45 -07:00
w . Write ( [ ] byte ( "a{l=\"\xff\"} 1\n" ) )
2017-06-16 05:09:50 -07:00
return nil
}
2021-08-31 08:37:32 -07:00
sl . run ( nil )
2023-07-13 05:27:51 -07:00
require . Equal ( t , 0.0 , appender . resultFloats [ 0 ] . f , "bad 'up' value" )
2017-06-16 05:09:50 -07:00
}
2017-05-03 09:20:07 -07:00
type errorAppender struct {
collectResultAppender
}
2021-11-06 03:10:04 -07:00
func ( app * errorAppender ) Append ( ref storage . SeriesRef , lset labels . Labels , t int64 , v float64 ) ( storage . SeriesRef , error ) {
2017-07-04 05:55:33 -07:00
switch lset . Get ( model . MetricNameLabel ) {
case "out_of_order" :
2017-09-07 05:14:41 -07:00
return 0 , storage . ErrOutOfOrderSample
2017-07-04 05:55:33 -07:00
case "amend" :
2017-09-07 05:14:41 -07:00
return 0 , storage . ErrDuplicateSampleForTimestamp
2017-07-04 05:55:33 -07:00
case "out_of_bounds" :
2017-09-07 05:14:41 -07:00
return 0 , storage . ErrOutOfBounds
2017-07-04 05:55:33 -07:00
default :
2021-02-18 04:07:00 -08:00
return app . collectResultAppender . Append ( ref , lset , t , v )
2017-05-03 09:20:07 -07:00
}
}
2017-07-04 05:55:33 -07:00
func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds ( t * testing . T ) {
2017-05-03 09:20:07 -07:00
app := & errorAppender { }
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , context . Background ( ) , nil , func ( ctx context . Context ) storage . Appender { return app } , 0 )
2017-05-03 09:20:07 -07:00
now := time . Unix ( 1 , 0 )
2020-07-30 04:11:13 -07:00
slApp := sl . appender ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
total , added , seriesAdded , err := sl . append ( slApp , [ ] byte ( "out_of_order 1\namend 1\nnormal 1\nout_of_bounds 1\n" ) , "text/plain" , now )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
2019-11-04 15:43:42 -08:00
2023-07-13 05:27:51 -07:00
want := [ ] floatSample {
2017-05-03 09:20:07 -07:00
{
metric : labels . FromStrings ( model . MetricNameLabel , "normal" ) ,
t : timestamp . FromTime ( now ) ,
2023-07-13 05:27:51 -07:00
f : 1 ,
2017-05-03 09:20:07 -07:00
} ,
}
2023-04-16 05:13:31 -07:00
requireEqual ( t , want , app . resultFloats , "Appended samples not as expected:\n%s" , appender )
2020-10-29 02:43:23 -07:00
require . Equal ( t , 4 , total )
require . Equal ( t , 4 , added )
require . Equal ( t , 1 , seriesAdded )
2017-07-04 05:55:33 -07:00
}
2017-05-03 09:20:07 -07:00
2017-07-04 05:55:33 -07:00
func TestScrapeLoopOutOfBoundsTimeError ( t * testing . T ) {
app := & collectResultAppender { }
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , context . Background ( ) , nil ,
2020-07-24 07:10:51 -07:00
func ( ctx context . Context ) storage . Appender {
2017-07-04 05:55:33 -07:00
return & timeLimitAppender {
Appender : app ,
maxTime : timestamp . FromTime ( time . Now ( ) . Add ( 10 * time . Minute ) ) ,
}
} ,
2019-03-12 03:46:15 -07:00
0 ,
2017-07-04 05:55:33 -07:00
)
now := time . Now ( ) . Add ( 20 * time . Minute )
2020-07-30 04:11:13 -07:00
slApp := sl . appender ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
total , added , seriesAdded , err := sl . append ( slApp , [ ] byte ( "normal 1\n" ) , "text/plain" , now )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
require . Equal ( t , 1 , total )
require . Equal ( t , 1 , added )
require . Equal ( t , 0 , seriesAdded )
2017-05-03 09:20:07 -07:00
}
2016-02-28 14:59:03 -08:00
func TestTargetScraperScrapeOK ( t * testing . T ) {
2017-04-04 10:26:28 -07:00
const (
configTimeout = 1500 * time . Millisecond
2021-06-18 00:38:12 -07:00
expectedTimeout = "1.5"
2017-04-04 10:26:28 -07:00
)
2024-09-06 05:02:44 -07:00
var (
protobufParsing bool
allowUTF8 bool
qValuePattern = regexp . MustCompile ( ` q=([0-9]+(\.\d+)?) ` )
)
2022-10-12 00:48:25 -07:00
2016-02-28 14:59:03 -08:00
server := httptest . NewServer (
http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) {
2024-07-18 11:08:21 -07:00
accept := r . Header . Get ( "Accept" )
if allowUTF8 {
2024-11-20 08:22:20 -08:00
require . Containsf ( t , accept , "escaping=allow-utf-8" , "Expected Accept header to allow utf8, got %q" , accept )
2024-07-18 11:08:21 -07:00
}
2022-10-12 00:48:25 -07:00
if protobufParsing {
2021-09-04 05:35:03 -07:00
require . True ( t , strings . HasPrefix ( accept , "application/vnd.google.protobuf;" ) ,
"Expected Accept header to prefer application/vnd.google.protobuf." )
2017-09-22 09:06:43 -07:00
}
2024-09-06 05:02:44 -07:00
contentTypes := strings . Split ( accept , "," )
for _ , ct := range contentTypes {
match := qValuePattern . FindStringSubmatch ( ct )
require . Len ( t , match , 3 )
qValue , err := strconv . ParseFloat ( match [ 1 ] , 64 )
require . NoError ( t , err , "Error parsing q value" )
require . GreaterOrEqual ( t , qValue , float64 ( 0 ) )
require . LessOrEqual ( t , qValue , float64 ( 1 ) )
require . LessOrEqual ( t , len ( strings . Split ( match [ 1 ] , "." ) [ 1 ] ) , 3 , "q value should have at most 3 decimal places" )
}
2017-04-05 11:56:22 -07:00
timeout := r . Header . Get ( "X-Prometheus-Scrape-Timeout-Seconds" )
2021-09-04 05:35:03 -07:00
require . Equal ( t , expectedTimeout , timeout , "Expected scrape timeout header." )
2017-04-04 10:26:28 -07:00
2024-07-18 11:08:21 -07:00
if allowUTF8 {
w . Header ( ) . Set ( "Content-Type" , ` text/plain; version=1.0.0; escaping=allow-utf-8 ` )
} else {
w . Header ( ) . Set ( "Content-Type" , ` text/plain; version=0.0.4 ` )
}
2016-02-28 14:59:03 -08:00
w . Write ( [ ] byte ( "metric_a 1\nmetric_b 2\n" ) )
} ) ,
)
defer server . Close ( )
serverURL , err := url . Parse ( server . URL )
if err != nil {
panic ( err )
}
2022-10-12 00:48:25 -07:00
runTest := func ( acceptHeader string ) {
ts := & targetScraper {
Target : & Target {
labels : labels . FromStrings (
model . SchemeLabel , serverURL . Scheme ,
model . AddressLabel , serverURL . Host ,
) ,
} ,
client : http . DefaultClient ,
timeout : configTimeout ,
acceptHeader : acceptHeader ,
}
var buf bytes . Buffer
2023-10-09 09:23:53 -07:00
resp , err := ts . scrape ( context . Background ( ) )
require . NoError ( t , err )
contentType , err := ts . readResponse ( context . Background ( ) , resp , & buf )
2022-10-12 00:48:25 -07:00
require . NoError ( t , err )
2024-07-18 11:08:21 -07:00
if allowUTF8 {
require . Equal ( t , "text/plain; version=1.0.0; escaping=allow-utf-8" , contentType )
} else {
require . Equal ( t , "text/plain; version=0.0.4" , contentType )
}
2022-10-12 00:48:25 -07:00
require . Equal ( t , "metric_a 1\nmetric_b 2\n" , buf . String ( ) )
2016-02-28 14:59:03 -08:00
}
2024-07-18 11:08:21 -07:00
runTest ( acceptHeader ( config . DefaultScrapeProtocols , model . LegacyValidation ) )
protobufParsing = true
runTest ( acceptHeader ( config . DefaultProtoFirstScrapeProtocols , model . LegacyValidation ) )
protobufParsing = false
allowUTF8 = true
runTest ( acceptHeader ( config . DefaultScrapeProtocols , model . UTF8Validation ) )
2022-10-12 00:48:25 -07:00
protobufParsing = true
2024-07-18 11:08:21 -07:00
runTest ( acceptHeader ( config . DefaultProtoFirstScrapeProtocols , model . UTF8Validation ) )
2016-02-28 14:59:03 -08:00
}
func TestTargetScrapeScrapeCancel ( t * testing . T ) {
block := make ( chan struct { } )
server := httptest . NewServer (
http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) {
<- block
} ) ,
)
defer server . Close ( )
serverURL , err := url . Parse ( server . URL )
if err != nil {
panic ( err )
}
ts := & targetScraper {
Target : & Target {
2016-12-29 00:27:30 -08:00
labels : labels . FromStrings (
model . SchemeLabel , serverURL . Scheme ,
model . AddressLabel , serverURL . Host ,
) ,
2016-02-28 14:59:03 -08:00
} ,
2022-10-12 00:48:25 -07:00
client : http . DefaultClient ,
2024-07-18 11:08:21 -07:00
acceptHeader : acceptHeader ( config . DefaultGlobalConfig . ScrapeProtocols , model . LegacyValidation ) ,
2016-02-28 14:59:03 -08:00
}
ctx , cancel := context . WithCancel ( context . Background ( ) )
2020-02-12 23:53:07 -08:00
errc := make ( chan error , 1 )
2016-02-28 14:59:03 -08:00
go func ( ) {
time . Sleep ( 1 * time . Second )
cancel ( )
} ( )
go func ( ) {
2023-10-09 09:23:53 -07:00
_ , err := ts . scrape ( ctx )
2023-04-09 00:08:40 -07:00
switch {
case err == nil :
2019-03-25 16:01:12 -07:00
errc <- errors . New ( "Expected error but got nil" )
2023-11-01 12:06:46 -07:00
case ! errors . Is ( ctx . Err ( ) , context . Canceled ) :
errc <- fmt . Errorf ( "Expected context cancellation error but got: %w" , ctx . Err ( ) )
2023-04-09 00:08:40 -07:00
default :
2020-02-12 23:53:07 -08:00
close ( errc )
2016-02-28 14:59:03 -08:00
}
} ( )
select {
case <- time . After ( 5 * time . Second ) :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Scrape function did not return unexpectedly." )
2016-11-13 09:21:42 -08:00
case err := <- errc :
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2016-02-28 14:59:03 -08:00
}
// If this is closed in a defer above the function the test server
2018-04-27 05:04:02 -07:00
// doesn't terminate and the test doesn't complete.
2016-02-28 14:59:03 -08:00
close ( block )
}
func TestTargetScrapeScrapeNotFound ( t * testing . T ) {
server := httptest . NewServer (
http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) {
w . WriteHeader ( http . StatusNotFound )
} ) ,
)
defer server . Close ( )
serverURL , err := url . Parse ( server . URL )
if err != nil {
panic ( err )
}
ts := & targetScraper {
Target : & Target {
2016-12-29 00:27:30 -08:00
labels : labels . FromStrings (
model . SchemeLabel , serverURL . Scheme ,
model . AddressLabel , serverURL . Host ,
) ,
2016-02-28 14:59:03 -08:00
} ,
2022-10-12 00:48:25 -07:00
client : http . DefaultClient ,
2024-07-18 11:08:21 -07:00
acceptHeader : acceptHeader ( config . DefaultGlobalConfig . ScrapeProtocols , model . LegacyValidation ) ,
2016-02-28 14:59:03 -08:00
}
2023-10-09 09:23:53 -07:00
resp , err := ts . scrape ( context . Background ( ) )
require . NoError ( t , err )
_ , err = ts . readResponse ( context . Background ( ) , resp , io . Discard )
2024-10-06 09:35:29 -07:00
require . ErrorContains ( t , err , "404" , "Expected \"404 NotFound\" error but got: %s" , err )
2016-02-28 14:59:03 -08:00
}
2021-05-15 19:19:22 -07:00
func TestTargetScraperBodySizeLimit ( t * testing . T ) {
const (
bodySizeLimit = 15
responseBody = "metric_a 1\nmetric_b 2\n"
)
var gzipResponse bool
server := httptest . NewServer (
http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) {
w . Header ( ) . Set ( "Content-Type" , ` text/plain; version=0.0.4 ` )
if gzipResponse {
w . Header ( ) . Set ( "Content-Encoding" , "gzip" )
gw := gzip . NewWriter ( w )
defer gw . Close ( )
gw . Write ( [ ] byte ( responseBody ) )
return
}
w . Write ( [ ] byte ( responseBody ) )
} ) ,
)
defer server . Close ( )
serverURL , err := url . Parse ( server . URL )
if err != nil {
panic ( err )
}
ts := & targetScraper {
Target : & Target {
labels : labels . FromStrings (
model . SchemeLabel , serverURL . Scheme ,
model . AddressLabel , serverURL . Host ,
) ,
} ,
client : http . DefaultClient ,
bodySizeLimit : bodySizeLimit ,
2024-07-18 11:08:21 -07:00
acceptHeader : acceptHeader ( config . DefaultGlobalConfig . ScrapeProtocols , model . LegacyValidation ) ,
2023-09-22 09:47:44 -07:00
metrics : newTestScrapeMetrics ( t ) ,
2021-05-15 19:19:22 -07:00
}
var buf bytes . Buffer
// Target response uncompressed body, scrape with body size limit.
2023-10-09 09:23:53 -07:00
resp , err := ts . scrape ( context . Background ( ) )
require . NoError ( t , err )
_ , err = ts . readResponse ( context . Background ( ) , resp , & buf )
2021-05-15 19:19:22 -07:00
require . ErrorIs ( t , err , errBodySizeLimit )
require . Equal ( t , bodySizeLimit , buf . Len ( ) )
// Target response gzip compressed body, scrape with body size limit.
gzipResponse = true
buf . Reset ( )
2023-10-09 09:23:53 -07:00
resp , err = ts . scrape ( context . Background ( ) )
require . NoError ( t , err )
_ , err = ts . readResponse ( context . Background ( ) , resp , & buf )
2021-05-15 19:19:22 -07:00
require . ErrorIs ( t , err , errBodySizeLimit )
require . Equal ( t , bodySizeLimit , buf . Len ( ) )
// Target response uncompressed body, scrape without body size limit.
gzipResponse = false
buf . Reset ( )
ts . bodySizeLimit = 0
2023-10-09 09:23:53 -07:00
resp , err = ts . scrape ( context . Background ( ) )
require . NoError ( t , err )
_ , err = ts . readResponse ( context . Background ( ) , resp , & buf )
2021-05-15 19:19:22 -07:00
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Len ( t , responseBody , buf . Len ( ) )
2021-05-15 19:19:22 -07:00
// Target response gzip compressed body, scrape without body size limit.
gzipResponse = true
buf . Reset ( )
2023-10-09 09:23:53 -07:00
resp , err = ts . scrape ( context . Background ( ) )
require . NoError ( t , err )
_ , err = ts . readResponse ( context . Background ( ) , resp , & buf )
2021-05-15 19:19:22 -07:00
require . NoError ( t , err )
2023-12-07 03:35:01 -08:00
require . Len ( t , responseBody , buf . Len ( ) )
2021-05-15 19:19:22 -07:00
}
2016-02-23 01:58:16 -08:00
// testScraper implements the scraper interface and allows setting values
// returned by its methods. It also allows setting a custom scrape function.
type testScraper struct {
offsetDur time . Duration
lastStart time . Time
lastDuration time . Duration
lastError error
scrapeErr error
2017-01-15 08:33:07 -08:00
scrapeFunc func ( context . Context , io . Writer ) error
2016-02-23 01:58:16 -08:00
}
2023-04-12 04:05:41 -07:00
func ( ts * testScraper ) offset ( time . Duration , uint64 ) time . Duration {
2016-02-23 01:58:16 -08:00
return ts . offsetDur
}
2019-11-11 13:42:24 -08:00
func ( ts * testScraper ) Report ( start time . Time , duration time . Duration , err error ) {
2016-02-23 01:58:16 -08:00
ts . lastStart = start
ts . lastDuration = duration
ts . lastError = err
}
2023-10-09 09:23:53 -07:00
func ( ts * testScraper ) scrape ( ctx context . Context ) ( * http . Response , error ) {
return nil , ts . scrapeErr
}
func ( ts * testScraper ) readResponse ( ctx context . Context , resp * http . Response , w io . Writer ) ( string , error ) {
2016-02-23 01:58:16 -08:00
if ts . scrapeFunc != nil {
2018-10-04 06:52:03 -07:00
return "" , ts . scrapeFunc ( ctx , w )
2016-02-23 01:58:16 -08:00
}
2018-10-04 06:52:03 -07:00
return "" , ts . scrapeErr
2016-02-23 01:58:16 -08:00
}
2019-03-15 03:04:15 -07:00
func TestScrapeLoop_RespectTimestamps ( t * testing . T ) {
2019-08-08 18:35:39 -07:00
s := teststorage . New ( t )
2019-03-15 03:04:15 -07:00
defer s . Close ( )
2020-07-30 04:11:13 -07:00
app := s . Appender ( context . Background ( ) )
2019-03-15 03:04:15 -07:00
capp := & collectResultAppender { next : app }
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , context . Background ( ) , nil , func ( ctx context . Context ) storage . Appender { return capp } , 0 )
2019-03-15 03:04:15 -07:00
now := time . Now ( )
2020-07-30 04:11:13 -07:00
slApp := sl . appender ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err := sl . append ( slApp , [ ] byte ( ` metric_a { a="1",b="1"} 1 0 ` ) , "text/plain" , now )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
2019-03-15 03:04:15 -07:00
2023-07-13 05:27:51 -07:00
want := [ ] floatSample {
2019-03-15 03:04:15 -07:00
{
metric : labels . FromStrings ( "__name__" , "metric_a" , "a" , "1" , "b" , "1" ) ,
t : 0 ,
2023-07-13 05:27:51 -07:00
f : 1 ,
2019-03-15 03:04:15 -07:00
} ,
}
2023-07-13 05:27:51 -07:00
require . Equal ( t , want , capp . resultFloats , "Appended samples not as expected:\n%s" , appender )
2019-03-15 03:04:15 -07:00
}
func TestScrapeLoop_DiscardTimestamps ( t * testing . T ) {
2019-08-08 18:35:39 -07:00
s := teststorage . New ( t )
2019-03-15 03:04:15 -07:00
defer s . Close ( )
2020-07-30 04:11:13 -07:00
app := s . Appender ( context . Background ( ) )
2019-11-04 15:43:42 -08:00
2019-03-15 03:04:15 -07:00
capp := & collectResultAppender { next : app }
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , context . Background ( ) , nil , func ( ctx context . Context ) storage . Appender { return capp } , 0 )
sl . honorTimestamps = false
2019-03-15 03:04:15 -07:00
now := time . Now ( )
2020-07-30 04:11:13 -07:00
slApp := sl . appender ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err := sl . append ( slApp , [ ] byte ( ` metric_a { a="1",b="1"} 1 0 ` ) , "text/plain" , now )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
2019-03-15 03:04:15 -07:00
2023-07-13 05:27:51 -07:00
want := [ ] floatSample {
2019-03-15 03:04:15 -07:00
{
metric : labels . FromStrings ( "__name__" , "metric_a" , "a" , "1" , "b" , "1" ) ,
t : timestamp . FromTime ( now ) ,
2023-07-13 05:27:51 -07:00
f : 1 ,
2019-03-15 03:04:15 -07:00
} ,
}
2023-07-13 05:27:51 -07:00
require . Equal ( t , want , capp . resultFloats , "Appended samples not as expected:\n%s" , appender )
2019-03-15 03:04:15 -07:00
}
2020-01-20 03:05:27 -08:00
func TestScrapeLoopDiscardDuplicateLabels ( t * testing . T ) {
s := teststorage . New ( t )
defer s . Close ( )
ctx , cancel := context . WithCancel ( context . Background ( ) )
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , ctx , & testScraper { } , s . Appender , 0 )
2020-01-20 03:05:27 -08:00
defer cancel ( )
// We add a good and a bad metric to check that both are discarded.
2020-07-24 07:10:51 -07:00
slApp := sl . appender ( ctx )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err := sl . append ( slApp , [ ] byte ( "test_metric{le=\"500\"} 1\ntest_metric{le=\"600\",le=\"700\"} 1\n" ) , "text/plain" , time . Time { } )
2020-10-29 02:43:23 -07:00
require . Error ( t , err )
require . NoError ( t , slApp . Rollback ( ) )
2023-11-16 06:35:44 -08:00
// We need to cycle staleness cache maps after a manual rollback. Otherwise they will have old entries in them,
// which would cause ErrDuplicateSampleForTimestamp errors on the next append.
sl . cache . iterDone ( true )
2020-01-20 03:05:27 -08:00
2023-09-12 03:37:38 -07:00
q , err := s . Querier ( time . Time { } . UnixNano ( ) , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-09-12 03:37:38 -07:00
series := q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , ".*" ) )
2023-12-07 03:35:01 -08:00
require . False ( t , series . Next ( ) , "series found in tsdb" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , series . Err ( ) )
2020-01-20 03:05:27 -08:00
// We add a good metric to check that it is recorded.
2020-07-24 07:10:51 -07:00
slApp = sl . appender ( ctx )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err = sl . append ( slApp , [ ] byte ( "test_metric{le=\"500\"} 1\n" ) , "text/plain" , time . Time { } )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
2020-01-20 03:05:27 -08:00
2023-09-12 03:37:38 -07:00
q , err = s . Querier ( time . Time { } . UnixNano ( ) , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-09-12 03:37:38 -07:00
series = q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "le" , "500" ) )
2023-12-07 03:35:01 -08:00
require . True ( t , series . Next ( ) , "series not found in tsdb" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , series . Err ( ) )
2023-12-07 03:35:01 -08:00
require . False ( t , series . Next ( ) , "more than one series found in tsdb" )
2020-01-20 03:05:27 -08:00
}
2020-01-22 04:13:47 -08:00
2020-03-01 23:18:05 -08:00
func TestScrapeLoopDiscardUnnamedMetrics ( t * testing . T ) {
s := teststorage . New ( t )
defer s . Close ( )
2020-07-30 04:11:13 -07:00
app := s . Appender ( context . Background ( ) )
2020-03-01 23:18:05 -08:00
ctx , cancel := context . WithCancel ( context . Background ( ) )
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , context . Background ( ) , & testScraper { } , func ( ctx context . Context ) storage . Appender { return app } , 0 )
sl . sampleMutator = func ( l labels . Labels ) labels . Labels {
if l . Has ( "drop" ) {
return labels . FromStrings ( "no" , "name" ) // This label set will trigger an error.
}
return l
}
2020-03-01 23:18:05 -08:00
defer cancel ( )
2020-07-30 04:11:13 -07:00
slApp := sl . appender ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err := sl . append ( slApp , [ ] byte ( "nok 1\nnok2{drop=\"drop\"} 1\n" ) , "text/plain" , time . Time { } )
2020-10-29 02:43:23 -07:00
require . Error ( t , err )
require . NoError ( t , slApp . Rollback ( ) )
require . Equal ( t , errNameLabelMandatory , err )
2020-03-01 23:18:05 -08:00
2023-09-12 03:37:38 -07:00
q , err := s . Querier ( time . Time { } . UnixNano ( ) , 0 )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-09-12 03:37:38 -07:00
series := q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , ".*" ) )
2023-12-07 03:35:01 -08:00
require . False ( t , series . Next ( ) , "series found in tsdb" )
2020-10-29 02:43:23 -07:00
require . NoError ( t , series . Err ( ) )
2020-03-01 23:18:05 -08:00
}
2020-01-22 04:13:47 -08:00
func TestReusableConfig ( t * testing . T ) {
variants := [ ] * config . ScrapeConfig {
2020-04-15 03:17:41 -07:00
{
2020-01-22 04:13:47 -08:00
JobName : "prometheus" ,
ScrapeTimeout : model . Duration ( 15 * time . Second ) ,
} ,
2020-04-15 03:17:41 -07:00
{
2020-01-22 04:13:47 -08:00
JobName : "httpd" ,
ScrapeTimeout : model . Duration ( 15 * time . Second ) ,
} ,
2020-04-15 03:17:41 -07:00
{
2020-01-22 04:13:47 -08:00
JobName : "prometheus" ,
ScrapeTimeout : model . Duration ( 5 * time . Second ) ,
} ,
2020-04-15 03:17:41 -07:00
{
2020-01-22 04:13:47 -08:00
JobName : "prometheus" ,
MetricsPath : "/metrics" ,
} ,
2020-04-15 03:17:41 -07:00
{
2020-01-22 04:13:47 -08:00
JobName : "prometheus" ,
MetricsPath : "/metrics2" ,
} ,
2020-04-15 03:17:41 -07:00
{
2020-01-22 04:13:47 -08:00
JobName : "prometheus" ,
ScrapeTimeout : model . Duration ( 5 * time . Second ) ,
MetricsPath : "/metrics2" ,
} ,
2020-04-15 03:17:41 -07:00
{
2020-01-22 04:13:47 -08:00
JobName : "prometheus" ,
ScrapeInterval : model . Duration ( 5 * time . Second ) ,
MetricsPath : "/metrics2" ,
} ,
2020-04-15 03:17:41 -07:00
{
2020-01-22 04:13:47 -08:00
JobName : "prometheus" ,
ScrapeInterval : model . Duration ( 5 * time . Second ) ,
SampleLimit : 1000 ,
MetricsPath : "/metrics2" ,
} ,
}
match := [ ] [ ] int {
2020-04-15 03:17:41 -07:00
{ 0 , 2 } ,
{ 4 , 5 } ,
{ 4 , 6 } ,
{ 4 , 7 } ,
{ 5 , 6 } ,
{ 5 , 7 } ,
{ 6 , 7 } ,
2020-01-22 04:13:47 -08:00
}
noMatch := [ ] [ ] int {
2020-04-15 03:17:41 -07:00
{ 1 , 2 } ,
{ 0 , 4 } ,
{ 3 , 4 } ,
2020-01-22 04:13:47 -08:00
}
for i , m := range match {
2023-12-07 03:35:01 -08:00
require . True ( t , reusableCache ( variants [ m [ 0 ] ] , variants [ m [ 1 ] ] ) , "match test %d" , i )
require . True ( t , reusableCache ( variants [ m [ 1 ] ] , variants [ m [ 0 ] ] ) , "match test %d" , i )
require . True ( t , reusableCache ( variants [ m [ 1 ] ] , variants [ m [ 1 ] ] ) , "match test %d" , i )
require . True ( t , reusableCache ( variants [ m [ 0 ] ] , variants [ m [ 0 ] ] ) , "match test %d" , i )
2020-01-22 04:13:47 -08:00
}
for i , m := range noMatch {
2023-12-07 03:35:01 -08:00
require . False ( t , reusableCache ( variants [ m [ 0 ] ] , variants [ m [ 1 ] ] ) , "not match test %d" , i )
require . False ( t , reusableCache ( variants [ m [ 1 ] ] , variants [ m [ 0 ] ] ) , "not match test %d" , i )
2020-01-22 04:13:47 -08:00
}
}
func TestReuseScrapeCache ( t * testing . T ) {
var (
app = & nopAppendable { }
cfg = & config . ScrapeConfig {
JobName : "Prometheus" ,
ScrapeTimeout : model . Duration ( 5 * time . Second ) ,
ScrapeInterval : model . Duration ( 5 * time . Second ) ,
MetricsPath : "/metrics" ,
}
2023-10-17 02:27:46 -07:00
sp , _ = newScrapePool ( cfg , app , 0 , nil , nil , & Options { } , newTestScrapeMetrics ( t ) )
2020-01-22 04:13:47 -08:00
t1 = & Target {
2022-05-30 07:37:16 -07:00
discoveredLabels : labels . FromStrings ( "labelNew" , "nameNew" , "labelNew1" , "nameNew1" , "labelNew2" , "nameNew2" ) ,
2020-01-22 04:13:47 -08:00
}
proxyURL , _ = url . Parse ( "http://localhost:2128" )
)
2020-07-27 01:38:08 -07:00
defer sp . stop ( )
2020-01-22 04:13:47 -08:00
sp . sync ( [ ] * Target { t1 } )
steps := [ ] struct {
keep bool
newConfig * config . ScrapeConfig
} {
{
keep : true ,
newConfig : & config . ScrapeConfig {
JobName : "Prometheus" ,
ScrapeInterval : model . Duration ( 5 * time . Second ) ,
ScrapeTimeout : model . Duration ( 5 * time . Second ) ,
MetricsPath : "/metrics" ,
} ,
} ,
{
keep : false ,
newConfig : & config . ScrapeConfig {
JobName : "Prometheus" ,
ScrapeInterval : model . Duration ( 5 * time . Second ) ,
ScrapeTimeout : model . Duration ( 15 * time . Second ) ,
MetricsPath : "/metrics2" ,
} ,
} ,
{
keep : true ,
newConfig : & config . ScrapeConfig {
JobName : "Prometheus" ,
SampleLimit : 400 ,
ScrapeInterval : model . Duration ( 5 * time . Second ) ,
ScrapeTimeout : model . Duration ( 15 * time . Second ) ,
MetricsPath : "/metrics2" ,
} ,
} ,
{
keep : false ,
newConfig : & config . ScrapeConfig {
JobName : "Prometheus" ,
HonorTimestamps : true ,
SampleLimit : 400 ,
ScrapeInterval : model . Duration ( 5 * time . Second ) ,
ScrapeTimeout : model . Duration ( 15 * time . Second ) ,
MetricsPath : "/metrics2" ,
} ,
} ,
{
keep : true ,
newConfig : & config . ScrapeConfig {
JobName : "Prometheus" ,
HonorTimestamps : true ,
SampleLimit : 400 ,
HTTPClientConfig : config_util . HTTPClientConfig {
2023-03-08 02:44:15 -08:00
ProxyConfig : config_util . ProxyConfig { ProxyURL : config_util . URL { URL : proxyURL } } ,
2020-01-22 04:13:47 -08:00
} ,
ScrapeInterval : model . Duration ( 5 * time . Second ) ,
ScrapeTimeout : model . Duration ( 15 * time . Second ) ,
MetricsPath : "/metrics2" ,
} ,
} ,
{
keep : false ,
newConfig : & config . ScrapeConfig {
JobName : "Prometheus" ,
HonorTimestamps : true ,
HonorLabels : true ,
SampleLimit : 400 ,
ScrapeInterval : model . Duration ( 5 * time . Second ) ,
ScrapeTimeout : model . Duration ( 15 * time . Second ) ,
MetricsPath : "/metrics2" ,
} ,
} ,
2022-03-03 09:37:53 -08:00
{
keep : false ,
newConfig : & config . ScrapeConfig {
JobName : "Prometheus" ,
ScrapeInterval : model . Duration ( 5 * time . Second ) ,
ScrapeTimeout : model . Duration ( 15 * time . Second ) ,
MetricsPath : "/metrics" ,
LabelLimit : 1 ,
} ,
} ,
{
keep : false ,
newConfig : & config . ScrapeConfig {
JobName : "Prometheus" ,
ScrapeInterval : model . Duration ( 5 * time . Second ) ,
ScrapeTimeout : model . Duration ( 15 * time . Second ) ,
MetricsPath : "/metrics" ,
LabelLimit : 15 ,
} ,
} ,
{
keep : false ,
newConfig : & config . ScrapeConfig {
JobName : "Prometheus" ,
ScrapeInterval : model . Duration ( 5 * time . Second ) ,
ScrapeTimeout : model . Duration ( 15 * time . Second ) ,
MetricsPath : "/metrics" ,
LabelLimit : 15 ,
LabelNameLengthLimit : 5 ,
} ,
} ,
{
keep : false ,
newConfig : & config . ScrapeConfig {
JobName : "Prometheus" ,
ScrapeInterval : model . Duration ( 5 * time . Second ) ,
ScrapeTimeout : model . Duration ( 15 * time . Second ) ,
MetricsPath : "/metrics" ,
LabelLimit : 15 ,
LabelNameLengthLimit : 5 ,
LabelValueLengthLimit : 7 ,
} ,
} ,
2020-01-22 04:13:47 -08:00
}
cacheAddr := func ( sp * scrapePool ) map [ uint64 ] string {
r := make ( map [ uint64 ] string )
for fp , l := range sp . loops {
r [ fp ] = fmt . Sprintf ( "%p" , l . getCache ( ) )
}
return r
}
for i , s := range steps {
initCacheAddr := cacheAddr ( sp )
sp . reload ( s . newConfig )
for fp , newCacheAddr := range cacheAddr ( sp ) {
if s . keep {
2020-10-29 02:43:23 -07:00
require . Equal ( t , initCacheAddr [ fp ] , newCacheAddr , "step %d: old cache and new cache are not the same" , i )
2020-01-22 04:13:47 -08:00
} else {
2020-10-29 02:43:23 -07:00
require . NotEqual ( t , initCacheAddr [ fp ] , newCacheAddr , "step %d: old cache and new cache are the same" , i )
2020-01-22 04:13:47 -08:00
}
}
initCacheAddr = cacheAddr ( sp )
sp . reload ( s . newConfig )
for fp , newCacheAddr := range cacheAddr ( sp ) {
2020-10-29 02:43:23 -07:00
require . Equal ( t , initCacheAddr [ fp ] , newCacheAddr , "step %d: reloading the exact config invalidates the cache" , i )
2020-01-22 04:13:47 -08:00
}
}
}
2020-03-16 14:52:02 -07:00
func TestScrapeAddFast ( t * testing . T ) {
s := teststorage . New ( t )
defer s . Close ( )
ctx , cancel := context . WithCancel ( context . Background ( ) )
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , ctx , & testScraper { } , s . Appender , 0 )
2020-03-16 14:52:02 -07:00
defer cancel ( )
2020-07-24 07:10:51 -07:00
slApp := sl . appender ( ctx )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err := sl . append ( slApp , [ ] byte ( "up 1\n" ) , "text/plain" , time . Time { } )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
2020-03-16 14:52:02 -07:00
// Poison the cache. There is just one entry, and one series in the
// storage. Changing the ref will create a 'not found' error.
for _ , v := range sl . getCache ( ) . series {
v . ref ++
}
2020-07-24 07:10:51 -07:00
slApp = sl . appender ( ctx )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err = sl . append ( slApp , [ ] byte ( "up 1\n" ) , "text/plain" , time . Time { } . Add ( time . Second ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
2020-03-16 14:52:02 -07:00
}
2020-03-20 09:43:26 -07:00
2023-09-22 09:47:44 -07:00
func TestReuseCacheRace ( t * testing . T ) {
2020-03-20 09:43:26 -07:00
var (
app = & nopAppendable { }
cfg = & config . ScrapeConfig {
JobName : "Prometheus" ,
ScrapeTimeout : model . Duration ( 5 * time . Second ) ,
ScrapeInterval : model . Duration ( 5 * time . Second ) ,
MetricsPath : "/metrics" ,
}
2023-10-17 02:27:46 -07:00
buffers = pool . New ( 1e3 , 100e6 , 3 , func ( sz int ) interface { } { return make ( [ ] byte , 0 , sz ) } )
sp , _ = newScrapePool ( cfg , app , 0 , nil , buffers , & Options { } , newTestScrapeMetrics ( t ) )
t1 = & Target {
2022-05-30 07:37:16 -07:00
discoveredLabels : labels . FromStrings ( "labelNew" , "nameNew" ) ,
2020-03-20 09:43:26 -07:00
}
)
2020-07-27 01:38:08 -07:00
defer sp . stop ( )
2020-03-20 09:43:26 -07:00
sp . sync ( [ ] * Target { t1 } )
start := time . Now ( )
for i := uint ( 1 ) ; i > 0 ; i ++ {
if time . Since ( start ) > 5 * time . Second {
break
}
sp . reload ( & config . ScrapeConfig {
JobName : "Prometheus" ,
ScrapeTimeout : model . Duration ( 1 * time . Millisecond ) ,
ScrapeInterval : model . Duration ( 1 * time . Millisecond ) ,
MetricsPath : "/metrics" ,
SampleLimit : i ,
} )
}
}
2020-05-26 07:14:55 -07:00
func TestCheckAddError ( t * testing . T ) {
var appErrs appendErrors
2024-09-09 18:41:53 -07:00
sl := scrapeLoop { l : promslog . NewNopLogger ( ) , metrics : newTestScrapeMetrics ( t ) }
2023-11-28 09:42:29 -08:00
sl . checkAddError ( nil , storage . ErrOutOfOrderSample , nil , nil , & appErrs )
2020-10-29 02:43:23 -07:00
require . Equal ( t , 1 , appErrs . numOutOfOrder )
2020-05-26 07:14:55 -07:00
}
2020-07-16 04:53:39 -07:00
func TestScrapeReportSingleAppender ( t * testing . T ) {
s := teststorage . New ( t )
defer s . Close ( )
var (
signal = make ( chan struct { } , 1 )
scraper = & testScraper { }
)
ctx , cancel := context . WithCancel ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
// Since we're writing samples directly below we need to provide a protocol fallback.
sl := newBasicScrapeLoopWithFallback ( t , ctx , scraper , s . Appender , 10 * time . Millisecond , "text/plain" )
2020-07-16 04:53:39 -07:00
numScrapes := 0
scraper . scrapeFunc = func ( ctx context . Context , w io . Writer ) error {
numScrapes ++
if numScrapes % 4 == 0 {
2024-11-03 04:15:51 -08:00
return errors . New ( "scrape failed" )
2020-07-16 04:53:39 -07:00
}
w . Write ( [ ] byte ( "metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n" ) )
return nil
}
go func ( ) {
2021-08-31 08:37:32 -07:00
sl . run ( nil )
2020-07-16 04:53:39 -07:00
signal <- struct { } { }
} ( )
start := time . Now ( )
for time . Since ( start ) < 3 * time . Second {
2023-09-12 03:37:38 -07:00
q , err := s . Querier ( time . Time { } . UnixNano ( ) , time . Now ( ) . UnixNano ( ) )
2020-10-29 02:43:23 -07:00
require . NoError ( t , err )
2023-09-12 03:37:38 -07:00
series := q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , ".+" ) )
2020-07-16 04:53:39 -07:00
c := 0
for series . Next ( ) {
2022-09-20 10:16:45 -07:00
i := series . At ( ) . Iterator ( nil )
2021-11-28 23:54:23 -08:00
for i . Next ( ) != chunkenc . ValNone {
2020-07-16 04:53:39 -07:00
c ++
}
}
2020-10-29 02:43:23 -07:00
require . Equal ( t , 0 , c % 9 , "Appended samples not as expected: %d" , c )
2020-07-16 04:53:39 -07:00
q . Close ( )
}
cancel ( )
select {
case <- signal :
case <- time . After ( 5 * time . Second ) :
2021-09-04 05:35:03 -07:00
require . FailNow ( t , "Scrape wasn't stopped." )
2020-07-16 04:53:39 -07:00
}
}
2021-05-06 01:56:21 -07:00
2021-12-10 04:03:28 -08:00
func TestScrapeReportLimit ( t * testing . T ) {
s := teststorage . New ( t )
defer s . Close ( )
cfg := & config . ScrapeConfig {
JobName : "test" ,
SampleLimit : 5 ,
Scheme : "http" ,
ScrapeInterval : model . Duration ( 100 * time . Millisecond ) ,
ScrapeTimeout : model . Duration ( 100 * time . Millisecond ) ,
}
2024-09-09 07:46:31 -07:00
ts , scrapedTwice := newScrapableServer ( "metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n" )
2021-12-10 04:03:28 -08:00
defer ts . Close ( )
2023-10-17 02:27:46 -07:00
sp , err := newScrapePool ( cfg , s , 0 , nil , nil , & Options { } , newTestScrapeMetrics ( t ) )
2021-12-10 04:03:28 -08:00
require . NoError ( t , err )
defer sp . stop ( )
testURL , err := url . Parse ( ts . URL )
require . NoError ( t , err )
sp . Sync ( [ ] * targetgroup . Group {
{
Targets : [ ] model . LabelSet { { model . AddressLabel : model . LabelValue ( testURL . Host ) } } ,
} ,
} )
select {
case <- time . After ( 5 * time . Second ) :
t . Fatalf ( "target was not scraped twice" )
case <- scrapedTwice :
// If the target has been scraped twice, report samples from the first
// scrape have been inserted in the database.
}
ctx , cancel := context . WithCancel ( context . Background ( ) )
defer cancel ( )
2023-09-12 03:37:38 -07:00
q , err := s . Querier ( time . Time { } . UnixNano ( ) , time . Now ( ) . UnixNano ( ) )
2021-12-10 04:03:28 -08:00
require . NoError ( t , err )
defer q . Close ( )
2023-09-12 03:37:38 -07:00
series := q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , "up" ) )
2021-12-10 04:03:28 -08:00
var found bool
for series . Next ( ) {
2022-09-20 10:16:45 -07:00
i := series . At ( ) . Iterator ( nil )
2021-12-18 05:12:01 -08:00
for i . Next ( ) == chunkenc . ValFloat {
2021-12-10 04:03:28 -08:00
_ , v := i . At ( )
require . Equal ( t , 1.0 , v )
found = true
}
}
require . True ( t , found )
}
2024-09-09 07:46:31 -07:00
func TestScrapeUTF8 ( t * testing . T ) {
s := teststorage . New ( t )
defer s . Close ( )
model . NameValidationScheme = model . UTF8Validation
t . Cleanup ( func ( ) { model . NameValidationScheme = model . LegacyValidation } )
cfg := & config . ScrapeConfig {
JobName : "test" ,
Scheme : "http" ,
ScrapeInterval : model . Duration ( 100 * time . Millisecond ) ,
ScrapeTimeout : model . Duration ( 100 * time . Millisecond ) ,
MetricNameValidationScheme : config . UTF8ValidationConfig ,
}
ts , scrapedTwice := newScrapableServer ( "{\"with.dots\"} 42\n" )
defer ts . Close ( )
sp , err := newScrapePool ( cfg , s , 0 , nil , nil , & Options { } , newTestScrapeMetrics ( t ) )
require . NoError ( t , err )
defer sp . stop ( )
testURL , err := url . Parse ( ts . URL )
require . NoError ( t , err )
sp . Sync ( [ ] * targetgroup . Group {
{
Targets : [ ] model . LabelSet { { model . AddressLabel : model . LabelValue ( testURL . Host ) } } ,
} ,
} )
select {
case <- time . After ( 5 * time . Second ) :
t . Fatalf ( "target was not scraped twice" )
case <- scrapedTwice :
// If the target has been scraped twice, report samples from the first
// scrape have been inserted in the database.
}
ctx , cancel := context . WithCancel ( context . Background ( ) )
defer cancel ( )
q , err := s . Querier ( time . Time { } . UnixNano ( ) , time . Now ( ) . UnixNano ( ) )
require . NoError ( t , err )
defer q . Close ( )
series := q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , "with.dots" ) )
require . True ( t , series . Next ( ) , "series not found in tsdb" )
}
2021-05-06 01:56:21 -07:00
func TestScrapeLoopLabelLimit ( t * testing . T ) {
tests := [ ] struct {
title string
scrapeLabels string
discoveryLabels [ ] string
labelLimits labelLimits
expectErr bool
} {
{
title : "Valid number of labels" ,
scrapeLabels : ` metric { l1="1", l2="2"} 0 ` ,
discoveryLabels : nil ,
labelLimits : labelLimits { labelLimit : 5 } ,
expectErr : false ,
} , {
title : "Too many labels" ,
scrapeLabels : ` metric { l1="1", l2="2", l3="3", l4="4", l5="5", l6="6"} 0 ` ,
discoveryLabels : nil ,
labelLimits : labelLimits { labelLimit : 5 } ,
expectErr : true ,
} , {
title : "Too many labels including discovery labels" ,
scrapeLabels : ` metric { l1="1", l2="2", l3="3", l4="4"} 0 ` ,
discoveryLabels : [ ] string { "l5" , "5" , "l6" , "6" } ,
labelLimits : labelLimits { labelLimit : 5 } ,
expectErr : true ,
} , {
title : "Valid labels name length" ,
scrapeLabels : ` metric { l1="1", l2="2"} 0 ` ,
discoveryLabels : nil ,
labelLimits : labelLimits { labelNameLengthLimit : 10 } ,
expectErr : false ,
} , {
title : "Label name too long" ,
scrapeLabels : ` metric { label_name_too_long="0"} 0 ` ,
discoveryLabels : nil ,
labelLimits : labelLimits { labelNameLengthLimit : 10 } ,
expectErr : true ,
} , {
title : "Discovery label name too long" ,
scrapeLabels : ` metric { l1="1", l2="2"} 0 ` ,
discoveryLabels : [ ] string { "label_name_too_long" , "0" } ,
labelLimits : labelLimits { labelNameLengthLimit : 10 } ,
expectErr : true ,
} , {
title : "Valid labels value length" ,
scrapeLabels : ` metric { l1="1", l2="2"} 0 ` ,
discoveryLabels : nil ,
labelLimits : labelLimits { labelValueLengthLimit : 10 } ,
expectErr : false ,
} , {
title : "Label value too long" ,
scrapeLabels : ` metric { l1="label_value_too_long"} 0 ` ,
discoveryLabels : nil ,
labelLimits : labelLimits { labelValueLengthLimit : 10 } ,
expectErr : true ,
} , {
title : "Discovery label value too long" ,
scrapeLabels : ` metric { l1="1", l2="2"} 0 ` ,
discoveryLabels : [ ] string { "l1" , "label_value_too_long" } ,
labelLimits : labelLimits { labelValueLengthLimit : 10 } ,
expectErr : true ,
} ,
}
for _ , test := range tests {
app := & collectResultAppender { }
discoveryLabels := & Target {
labels : labels . FromStrings ( test . discoveryLabels ... ) ,
}
2023-10-16 06:47:10 -07:00
sl := newBasicScrapeLoop ( t , context . Background ( ) , nil , func ( ctx context . Context ) storage . Appender { return app } , 0 )
sl . sampleMutator = func ( l labels . Labels ) labels . Labels {
return mutateSampleLabels ( l , discoveryLabels , false , nil )
}
sl . reportSampleMutator = func ( l labels . Labels ) labels . Labels {
return mutateReportSampleLabels ( l , discoveryLabels )
}
sl . labelLimits = & test . labelLimits
2021-05-06 01:56:21 -07:00
slApp := sl . appender ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
_ , _ , _ , err := sl . append ( slApp , [ ] byte ( test . scrapeLabels ) , "text/plain" , time . Now ( ) )
2021-05-06 01:56:21 -07:00
t . Logf ( "Test:%s" , test . title )
if test . expectErr {
require . Error ( t , err )
} else {
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
}
}
}
2021-08-31 08:37:32 -07:00
func TestTargetScrapeIntervalAndTimeoutRelabel ( t * testing . T ) {
interval , _ := model . ParseDuration ( "2s" )
timeout , _ := model . ParseDuration ( "500ms" )
config := & config . ScrapeConfig {
ScrapeInterval : interval ,
ScrapeTimeout : timeout ,
RelabelConfigs : [ ] * relabel . Config {
{
SourceLabels : model . LabelNames { model . ScrapeIntervalLabel } ,
Regex : relabel . MustNewRegexp ( "2s" ) ,
Replacement : "3s" ,
TargetLabel : model . ScrapeIntervalLabel ,
Action : relabel . Replace ,
} ,
{
SourceLabels : model . LabelNames { model . ScrapeTimeoutLabel } ,
Regex : relabel . MustNewRegexp ( "500ms" ) ,
Replacement : "750ms" ,
TargetLabel : model . ScrapeTimeoutLabel ,
Action : relabel . Replace ,
} ,
} ,
}
2023-10-17 02:27:46 -07:00
sp , _ := newScrapePool ( config , & nopAppendable { } , 0 , nil , nil , & Options { } , newTestScrapeMetrics ( t ) )
2021-08-31 08:37:32 -07:00
tgts := [ ] * targetgroup . Group {
{
Targets : [ ] model . LabelSet { { model . AddressLabel : "127.0.0.1:9090" } } ,
} ,
}
sp . Sync ( tgts )
defer sp . stop ( )
require . Equal ( t , "3s" , sp . ActiveTargets ( ) [ 0 ] . labels . Get ( model . ScrapeIntervalLabel ) )
require . Equal ( t , "750ms" , sp . ActiveTargets ( ) [ 0 ] . labels . Get ( model . ScrapeTimeoutLabel ) )
}
2023-10-31 13:58:42 -07:00
2023-11-01 10:30:34 -07:00
// Testing whether we can remove trailing .0 from histogram 'le' and summary 'quantile' labels.
func TestLeQuantileReLabel ( t * testing . T ) {
simpleStorage := teststorage . New ( t )
defer simpleStorage . Close ( )
config := & config . ScrapeConfig {
JobName : "test" ,
MetricRelabelConfigs : [ ] * relabel . Config {
{
SourceLabels : model . LabelNames { "le" , "__name__" } ,
Regex : relabel . MustNewRegexp ( "(\\d+)\\.0+;.*_bucket" ) ,
Replacement : relabel . DefaultRelabelConfig . Replacement ,
Separator : relabel . DefaultRelabelConfig . Separator ,
TargetLabel : "le" ,
Action : relabel . Replace ,
} ,
{
SourceLabels : model . LabelNames { "quantile" } ,
Regex : relabel . MustNewRegexp ( "(\\d+)\\.0+" ) ,
Replacement : relabel . DefaultRelabelConfig . Replacement ,
Separator : relabel . DefaultRelabelConfig . Separator ,
TargetLabel : "quantile" ,
Action : relabel . Replace ,
} ,
} ,
SampleLimit : 100 ,
Scheme : "http" ,
ScrapeInterval : model . Duration ( 100 * time . Millisecond ) ,
ScrapeTimeout : model . Duration ( 100 * time . Millisecond ) ,
}
metricsText := `
# HELP test_histogram This is a histogram with default buckets
# TYPE test_histogram histogram
test_histogram_bucket { address = "0.0.0.0" , port = "5001" , le = "0.005" } 0
test_histogram_bucket { address = "0.0.0.0" , port = "5001" , le = "0.01" } 0
test_histogram_bucket { address = "0.0.0.0" , port = "5001" , le = "0.025" } 0
test_histogram_bucket { address = "0.0.0.0" , port = "5001" , le = "0.05" } 0
test_histogram_bucket { address = "0.0.0.0" , port = "5001" , le = "0.1" } 0
test_histogram_bucket { address = "0.0.0.0" , port = "5001" , le = "0.25" } 0
test_histogram_bucket { address = "0.0.0.0" , port = "5001" , le = "0.5" } 0
test_histogram_bucket { address = "0.0.0.0" , port = "5001" , le = "1.0" } 0
test_histogram_bucket { address = "0.0.0.0" , port = "5001" , le = "2.5" } 0
test_histogram_bucket { address = "0.0.0.0" , port = "5001" , le = "5.0" } 0
test_histogram_bucket { address = "0.0.0.0" , port = "5001" , le = "10.0" } 0
test_histogram_bucket { address = "0.0.0.0" , port = "5001" , le = "+Inf" } 0
test_histogram_sum { address = "0.0.0.0" , port = "5001" } 0
test_histogram_count { address = "0.0.0.0" , port = "5001" } 0
# HELP test_summary Number of inflight requests sampled at a regular interval . Quantile buckets keep track of inflight requests over the last 60 s .
# TYPE test_summary summary
test_summary { quantile = "0.5" } 0
test_summary { quantile = "0.9" } 0
test_summary { quantile = "0.95" } 0
test_summary { quantile = "0.99" } 0
test_summary { quantile = "1.0" } 1
test_summary_sum 1
test_summary_count 199
`
// The expected "le" values do not have the trailing ".0".
expectedLeValues := [ ] string { "0.005" , "0.01" , "0.025" , "0.05" , "0.1" , "0.25" , "0.5" , "1" , "2.5" , "5" , "10" , "+Inf" }
// The expected "quantile" values do not have the trailing ".0".
expectedQuantileValues := [ ] string { "0.5" , "0.9" , "0.95" , "0.99" , "1" }
2024-09-09 07:46:31 -07:00
ts , scrapedTwice := newScrapableServer ( metricsText )
2023-11-01 10:30:34 -07:00
defer ts . Close ( )
2023-10-17 02:27:46 -07:00
sp , err := newScrapePool ( config , simpleStorage , 0 , nil , nil , & Options { } , newTestScrapeMetrics ( t ) )
2023-11-01 10:30:34 -07:00
require . NoError ( t , err )
defer sp . stop ( )
testURL , err := url . Parse ( ts . URL )
require . NoError ( t , err )
sp . Sync ( [ ] * targetgroup . Group {
{
Targets : [ ] model . LabelSet { { model . AddressLabel : model . LabelValue ( testURL . Host ) } } ,
} ,
} )
2023-12-07 03:35:01 -08:00
require . Len ( t , sp . ActiveTargets ( ) , 1 )
2023-11-01 10:30:34 -07:00
select {
case <- time . After ( 5 * time . Second ) :
t . Fatalf ( "target was not scraped" )
2024-09-09 07:46:31 -07:00
case <- scrapedTwice :
2023-11-01 10:30:34 -07:00
}
ctx , cancel := context . WithCancel ( context . Background ( ) )
defer cancel ( )
q , err := simpleStorage . Querier ( time . Time { } . UnixNano ( ) , time . Now ( ) . UnixNano ( ) )
require . NoError ( t , err )
defer q . Close ( )
checkValues := func ( labelName string , expectedValues [ ] string , series storage . SeriesSet ) {
foundLeValues := map [ string ] bool { }
for series . Next ( ) {
s := series . At ( )
v := s . Labels ( ) . Get ( labelName )
require . NotContains ( t , foundLeValues , v , "duplicate label value found" )
foundLeValues [ v ] = true
}
require . Equal ( t , len ( expectedValues ) , len ( foundLeValues ) , "number of label values not as expected" )
for _ , v := range expectedValues {
require . Contains ( t , foundLeValues , v , "label value not found" )
}
}
series := q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , "test_histogram_bucket" ) )
checkValues ( "le" , expectedLeValues , series )
series = q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , "test_summary" ) )
checkValues ( "quantile" , expectedQuantileValues , series )
}
2024-07-03 02:56:48 -07:00
// Testing whether we can automatically convert scraped classic histograms into native histograms with custom buckets.
2024-10-21 04:22:58 -07:00
func TestConvertClassicHistogramsToNHCB ( t * testing . T ) {
2024-07-03 02:56:48 -07:00
genTestCounterText := func ( name string , value int , withMetadata bool ) string {
if withMetadata {
return fmt . Sprintf ( `
# HELP % s some help text
# TYPE % s counter
2024-07-03 02:56:48 -07:00
% s { address = "0.0.0.0" , port = "5001" } % d
2024-07-03 02:56:48 -07:00
` , name , name , name , value )
2024-07-03 02:56:48 -07:00
}
return fmt . Sprintf ( `
2024-07-03 02:56:48 -07:00
% s % d
` , name , value )
}
genTestHistText := func ( name string , withMetadata bool ) string {
data := map [ string ] interface { } {
"name" : name ,
}
b := & bytes . Buffer { }
if withMetadata {
template . Must ( template . New ( "" ) . Parse ( `
# HELP { { . name } } This is a histogram with default buckets
# TYPE { { . name } } histogram
` ) ) . Execute ( b , data )
}
template . Must ( template . New ( "" ) . Parse ( `
{ { . name } } _bucket { address = "0.0.0.0" , port = "5001" , le = "0.005" } 0
{ { . name } } _bucket { address = "0.0.0.0" , port = "5001" , le = "0.01" } 0
{ { . name } } _bucket { address = "0.0.0.0" , port = "5001" , le = "0.025" } 0
{ { . name } } _bucket { address = "0.0.0.0" , port = "5001" , le = "0.05" } 0
{ { . name } } _bucket { address = "0.0.0.0" , port = "5001" , le = "0.1" } 0
{ { . name } } _bucket { address = "0.0.0.0" , port = "5001" , le = "0.25" } 0
{ { . name } } _bucket { address = "0.0.0.0" , port = "5001" , le = "0.5" } 0
{ { . name } } _bucket { address = "0.0.0.0" , port = "5001" , le = "1" } 0
{ { . name } } _bucket { address = "0.0.0.0" , port = "5001" , le = "2.5" } 0
{ { . name } } _bucket { address = "0.0.0.0" , port = "5001" , le = "5" } 0
{ { . name } } _bucket { address = "0.0.0.0" , port = "5001" , le = "10" } 1
{ { . name } } _bucket { address = "0.0.0.0" , port = "5001" , le = "+Inf" } 1
{ { . name } } _sum { address = "0.0.0.0" , port = "5001" } 10
{ { . name } } _count { address = "0.0.0.0" , port = "5001" } 1
` ) ) . Execute ( b , data )
return b . String ( )
}
genTestCounterProto := func ( name string , value int ) string {
return fmt . Sprintf ( `
name : "%s"
help : "some help text"
type : COUNTER
metric : <
2024-07-03 02:56:48 -07:00
label : <
name : "address"
value : "0.0.0.0"
>
label : <
name : "port"
value : "5001"
>
2024-07-03 02:56:48 -07:00
counter : <
value : % d
>
>
` , name , value )
}
2024-07-03 02:56:48 -07:00
genTestHistProto := func ( name string , hasClassic , hasExponential bool ) string {
var classic string
if hasClassic {
classic = `
bucket : <
cumulative_count : 0
upper_bound : 0.005
>
bucket : <
cumulative_count : 0
upper_bound : 0.01
>
bucket : <
cumulative_count : 0
upper_bound : 0.025
>
bucket : <
cumulative_count : 0
upper_bound : 0.05
>
bucket : <
cumulative_count : 0
upper_bound : 0.1
>
bucket : <
cumulative_count : 0
upper_bound : 0.25
>
bucket : <
cumulative_count : 0
upper_bound : 0.5
>
bucket : <
cumulative_count : 0
upper_bound : 1
>
bucket : <
cumulative_count : 0
upper_bound : 2.5
>
bucket : <
cumulative_count : 0
upper_bound : 5
>
bucket : <
cumulative_count : 1
upper_bound : 10
> `
}
var expo string
if hasExponential {
expo = `
schema : 3
zero_threshold : 2.938735877055719e-39
zero_count : 0
positive_span : <
offset : 2
length : 1
>
positive_delta : 1 `
}
2024-07-03 02:56:48 -07:00
return fmt . Sprintf ( `
name : "%s"
help : "This is a histogram with default buckets"
type : HISTOGRAM
metric : <
label : <
name : "address"
value : "0.0.0.0"
>
label : <
name : "port"
value : "5001"
>
histogram : <
sample_count : 1
sample_sum : 10
2024-07-03 02:56:48 -07:00
% s
% s
2024-07-03 02:56:48 -07:00
>
timestamp_ms : 1234568
>
2024-07-03 02:56:48 -07:00
` , name , classic , expo )
2024-07-03 02:56:48 -07:00
}
2024-07-03 02:56:48 -07:00
metricsTexts := map [ string ] struct {
2024-07-03 02:56:48 -07:00
text [ ] string
contentType string
2024-07-03 02:56:48 -07:00
hasClassic bool
2024-07-03 02:56:48 -07:00
hasExponential bool
2024-07-03 02:56:48 -07:00
} {
"text" : {
text : [ ] string {
genTestCounterText ( "test_metric_1" , 1 , true ) ,
2024-07-03 02:56:48 -07:00
genTestCounterText ( "test_metric_1_count" , 1 , true ) ,
genTestCounterText ( "test_metric_1_sum" , 1 , true ) ,
genTestCounterText ( "test_metric_1_bucket" , 1 , true ) ,
2024-07-03 02:56:48 -07:00
genTestHistText ( "test_histogram_1" , true ) ,
genTestCounterText ( "test_metric_2" , 1 , true ) ,
2024-07-03 02:56:48 -07:00
genTestCounterText ( "test_metric_2_count" , 1 , true ) ,
genTestCounterText ( "test_metric_2_sum" , 1 , true ) ,
genTestCounterText ( "test_metric_2_bucket" , 1 , true ) ,
2024-07-03 02:56:48 -07:00
genTestHistText ( "test_histogram_2" , true ) ,
genTestCounterText ( "test_metric_3" , 1 , true ) ,
2024-07-03 02:56:48 -07:00
genTestCounterText ( "test_metric_3_count" , 1 , true ) ,
genTestCounterText ( "test_metric_3_sum" , 1 , true ) ,
genTestCounterText ( "test_metric_3_bucket" , 1 , true ) ,
2024-07-03 02:56:48 -07:00
genTestHistText ( "test_histogram_3" , true ) ,
} ,
2024-07-03 02:56:48 -07:00
hasClassic : true ,
2024-07-03 02:56:48 -07:00
} ,
2024-07-03 02:56:48 -07:00
"text, in different order" : {
2024-07-03 02:56:48 -07:00
text : [ ] string {
2024-07-03 02:56:48 -07:00
genTestCounterText ( "test_metric_1" , 1 , true ) ,
genTestCounterText ( "test_metric_1_count" , 1 , true ) ,
genTestCounterText ( "test_metric_1_sum" , 1 , true ) ,
genTestCounterText ( "test_metric_1_bucket" , 1 , true ) ,
genTestHistText ( "test_histogram_1" , true ) ,
genTestCounterText ( "test_metric_2" , 1 , true ) ,
genTestCounterText ( "test_metric_2_count" , 1 , true ) ,
genTestCounterText ( "test_metric_2_sum" , 1 , true ) ,
genTestCounterText ( "test_metric_2_bucket" , 1 , true ) ,
genTestHistText ( "test_histogram_2" , true ) ,
genTestHistText ( "test_histogram_3" , true ) ,
genTestCounterText ( "test_metric_3" , 1 , true ) ,
genTestCounterText ( "test_metric_3_count" , 1 , true ) ,
genTestCounterText ( "test_metric_3_sum" , 1 , true ) ,
genTestCounterText ( "test_metric_3_bucket" , 1 , true ) ,
2024-07-03 02:56:48 -07:00
} ,
2024-07-03 02:56:48 -07:00
hasClassic : true ,
2024-07-03 02:56:48 -07:00
} ,
"protobuf" : {
text : [ ] string {
genTestCounterProto ( "test_metric_1" , 1 ) ,
2024-07-03 02:56:48 -07:00
genTestCounterProto ( "test_metric_1_count" , 1 ) ,
genTestCounterProto ( "test_metric_1_sum" , 1 ) ,
genTestCounterProto ( "test_metric_1_bucket" , 1 ) ,
2024-07-03 02:56:48 -07:00
genTestHistProto ( "test_histogram_1" , true , false ) ,
2024-07-03 02:56:48 -07:00
genTestCounterProto ( "test_metric_2" , 1 ) ,
2024-07-03 02:56:48 -07:00
genTestCounterProto ( "test_metric_2_count" , 1 ) ,
genTestCounterProto ( "test_metric_2_sum" , 1 ) ,
genTestCounterProto ( "test_metric_2_bucket" , 1 ) ,
2024-07-03 02:56:48 -07:00
genTestHistProto ( "test_histogram_2" , true , false ) ,
2024-07-03 02:56:48 -07:00
genTestCounterProto ( "test_metric_3" , 1 ) ,
2024-07-03 02:56:48 -07:00
genTestCounterProto ( "test_metric_3_count" , 1 ) ,
genTestCounterProto ( "test_metric_3_sum" , 1 ) ,
genTestCounterProto ( "test_metric_3_bucket" , 1 ) ,
2024-07-03 02:56:48 -07:00
genTestHistProto ( "test_histogram_3" , true , false ) ,
2024-07-03 02:56:48 -07:00
} ,
contentType : "application/vnd.google.protobuf" ,
2024-07-03 02:56:48 -07:00
hasClassic : true ,
2024-07-03 02:56:48 -07:00
} ,
"protobuf, in different order" : {
text : [ ] string {
2024-07-03 02:56:48 -07:00
genTestHistProto ( "test_histogram_1" , true , false ) ,
2024-07-03 02:56:48 -07:00
genTestCounterProto ( "test_metric_1" , 1 ) ,
2024-07-03 02:56:48 -07:00
genTestCounterProto ( "test_metric_1_count" , 1 ) ,
genTestCounterProto ( "test_metric_1_sum" , 1 ) ,
genTestCounterProto ( "test_metric_1_bucket" , 1 ) ,
2024-07-03 02:56:48 -07:00
genTestHistProto ( "test_histogram_2" , true , false ) ,
2024-07-03 02:56:48 -07:00
genTestCounterProto ( "test_metric_2" , 1 ) ,
2024-07-03 02:56:48 -07:00
genTestCounterProto ( "test_metric_2_count" , 1 ) ,
genTestCounterProto ( "test_metric_2_sum" , 1 ) ,
genTestCounterProto ( "test_metric_2_bucket" , 1 ) ,
2024-07-03 02:56:48 -07:00
genTestHistProto ( "test_histogram_3" , true , false ) ,
2024-07-03 02:56:48 -07:00
genTestCounterProto ( "test_metric_3" , 1 ) ,
2024-07-03 02:56:48 -07:00
genTestCounterProto ( "test_metric_3_count" , 1 ) ,
genTestCounterProto ( "test_metric_3_sum" , 1 ) ,
genTestCounterProto ( "test_metric_3_bucket" , 1 ) ,
2024-07-03 02:56:48 -07:00
} ,
contentType : "application/vnd.google.protobuf" ,
2024-07-03 02:56:48 -07:00
hasClassic : true ,
2024-07-03 02:56:48 -07:00
} ,
2024-07-03 02:56:48 -07:00
"protobuf, with additional native exponential histogram" : {
2024-07-03 02:56:48 -07:00
text : [ ] string {
genTestCounterProto ( "test_metric_1" , 1 ) ,
2024-07-03 02:56:48 -07:00
genTestCounterProto ( "test_metric_1_count" , 1 ) ,
genTestCounterProto ( "test_metric_1_sum" , 1 ) ,
genTestCounterProto ( "test_metric_1_bucket" , 1 ) ,
2024-07-03 02:56:48 -07:00
genTestHistProto ( "test_histogram_1" , true , true ) ,
genTestCounterProto ( "test_metric_2" , 1 ) ,
2024-07-03 02:56:48 -07:00
genTestCounterProto ( "test_metric_2_count" , 1 ) ,
genTestCounterProto ( "test_metric_2_sum" , 1 ) ,
genTestCounterProto ( "test_metric_2_bucket" , 1 ) ,
2024-07-03 02:56:48 -07:00
genTestHistProto ( "test_histogram_2" , true , true ) ,
genTestCounterProto ( "test_metric_3" , 1 ) ,
2024-07-03 02:56:48 -07:00
genTestCounterProto ( "test_metric_3_count" , 1 ) ,
genTestCounterProto ( "test_metric_3_sum" , 1 ) ,
genTestCounterProto ( "test_metric_3_bucket" , 1 ) ,
2024-07-03 02:56:48 -07:00
genTestHistProto ( "test_histogram_3" , true , true ) ,
} ,
contentType : "application/vnd.google.protobuf" ,
2024-07-03 02:56:48 -07:00
hasClassic : true ,
hasExponential : true ,
} ,
"protobuf, with only native exponential histogram" : {
text : [ ] string {
genTestCounterProto ( "test_metric_1" , 1 ) ,
genTestCounterProto ( "test_metric_1_count" , 1 ) ,
genTestCounterProto ( "test_metric_1_sum" , 1 ) ,
genTestCounterProto ( "test_metric_1_bucket" , 1 ) ,
genTestHistProto ( "test_histogram_1" , false , true ) ,
genTestCounterProto ( "test_metric_2" , 1 ) ,
genTestCounterProto ( "test_metric_2_count" , 1 ) ,
genTestCounterProto ( "test_metric_2_sum" , 1 ) ,
genTestCounterProto ( "test_metric_2_bucket" , 1 ) ,
genTestHistProto ( "test_histogram_2" , false , true ) ,
genTestCounterProto ( "test_metric_3" , 1 ) ,
genTestCounterProto ( "test_metric_3_count" , 1 ) ,
genTestCounterProto ( "test_metric_3_sum" , 1 ) ,
genTestCounterProto ( "test_metric_3_bucket" , 1 ) ,
genTestHistProto ( "test_histogram_3" , false , true ) ,
} ,
contentType : "application/vnd.google.protobuf" ,
2024-07-03 02:56:48 -07:00
hasExponential : true ,
} ,
2024-07-03 02:56:48 -07:00
}
2024-07-03 02:56:48 -07:00
2024-10-21 02:10:50 -07:00
checkBucketValues := func ( expectedCount int , series storage . SeriesSet ) {
2024-07-03 02:56:48 -07:00
labelName := "le"
var expectedValues [ ] string
if expectedCount > 0 {
2024-10-21 02:10:50 -07:00
expectedValues = [ ] string { "0.005" , "0.01" , "0.025" , "0.05" , "0.1" , "0.25" , "0.5" , "1.0" , "2.5" , "5.0" , "10.0" , "+Inf" }
2024-07-03 02:56:48 -07:00
}
2024-07-03 02:56:48 -07:00
foundLeValues := map [ string ] bool { }
for series . Next ( ) {
s := series . At ( )
v := s . Labels ( ) . Get ( labelName )
require . NotContains ( t , foundLeValues , v , "duplicate label value found" )
foundLeValues [ v ] = true
}
2024-07-03 02:56:48 -07:00
require . Equal ( t , len ( expectedValues ) , len ( foundLeValues ) , "unexpected number of label values, expected %v but found %v" , expectedValues , foundLeValues )
2024-07-03 02:56:48 -07:00
for _ , v := range expectedValues {
require . Contains ( t , foundLeValues , v , "label value not found" )
}
}
2024-07-03 02:56:48 -07:00
// Checks that the expected series is present and runs a basic sanity check of the float values.
checkFloatSeries := func ( series storage . SeriesSet , expectedCount int , expectedFloat float64 ) {
2024-07-03 02:56:48 -07:00
count := 0
for series . Next ( ) {
i := series . At ( ) . Iterator ( nil )
2024-07-03 02:56:48 -07:00
loop :
for {
switch i . Next ( ) {
case chunkenc . ValNone :
break loop
case chunkenc . ValFloat :
2024-07-03 02:56:48 -07:00
_ , f := i . At ( )
2024-07-03 02:56:48 -07:00
require . Equal ( t , expectedFloat , f )
case chunkenc . ValHistogram :
panic ( "unexpected value type: histogram" )
case chunkenc . ValFloatHistogram :
panic ( "unexpected value type: float histogram" )
default :
panic ( "unexpected value type" )
2024-07-03 02:56:48 -07:00
}
2024-07-03 02:56:48 -07:00
}
count ++
}
require . Equal ( t , expectedCount , count , "number of float series not as expected" )
}
// Checks that the expected series is present and runs a basic sanity check of the histogram values.
checkHistSeries := func ( series storage . SeriesSet , expectedCount int , expectedSchema int32 ) {
count := 0
for series . Next ( ) {
i := series . At ( ) . Iterator ( nil )
loop :
for {
switch i . Next ( ) {
case chunkenc . ValNone :
break loop
case chunkenc . ValFloat :
panic ( "unexpected value type: float" )
case chunkenc . ValHistogram :
2024-07-03 02:56:48 -07:00
_ , h := i . AtHistogram ( nil )
2024-07-03 02:56:48 -07:00
require . Equal ( t , expectedSchema , h . Schema )
require . Equal ( t , uint64 ( 1 ) , h . Count )
require . Equal ( t , 10.0 , h . Sum )
case chunkenc . ValFloatHistogram :
_ , h := i . AtFloatHistogram ( nil )
require . Equal ( t , expectedSchema , h . Schema )
2024-07-03 02:56:48 -07:00
require . Equal ( t , uint64 ( 1 ) , h . Count )
require . Equal ( t , 10.0 , h . Sum )
2024-07-03 02:56:48 -07:00
default :
panic ( "unexpected value type" )
2024-07-03 02:56:48 -07:00
}
}
count ++
}
2024-07-03 02:56:48 -07:00
require . Equal ( t , expectedCount , count , "number of histogram series not as expected" )
2024-07-03 02:56:48 -07:00
}
for metricsTextName , metricsText := range metricsTexts {
for name , tc := range map [ string ] struct {
2024-10-21 02:03:47 -07:00
alwaysScrapeClassicHistograms bool
2024-10-21 04:22:58 -07:00
convertClassicHistToNHCB bool
2024-07-03 02:56:48 -07:00
} {
"convert with scrape" : {
2024-10-21 02:03:47 -07:00
alwaysScrapeClassicHistograms : true ,
2024-10-21 04:22:58 -07:00
convertClassicHistToNHCB : true ,
2024-07-03 02:56:48 -07:00
} ,
"convert without scrape" : {
2024-10-21 02:03:47 -07:00
alwaysScrapeClassicHistograms : false ,
2024-10-21 04:22:58 -07:00
convertClassicHistToNHCB : true ,
2024-07-03 02:56:48 -07:00
} ,
"scrape without convert" : {
2024-10-21 02:03:47 -07:00
alwaysScrapeClassicHistograms : true ,
2024-10-21 04:22:58 -07:00
convertClassicHistToNHCB : false ,
2024-07-03 02:56:48 -07:00
} ,
"neither scrape nor convert" : {
2024-10-21 02:03:47 -07:00
alwaysScrapeClassicHistograms : false ,
2024-10-21 04:22:58 -07:00
convertClassicHistToNHCB : false ,
2024-07-03 02:56:48 -07:00
} ,
} {
2024-07-03 02:56:48 -07:00
var expectedClassicHistCount , expectedNativeHistCount int
var expectCustomBuckets bool
if metricsText . hasExponential {
expectedNativeHistCount = 1
expectCustomBuckets = false
2024-07-03 02:56:48 -07:00
expectedClassicHistCount = 0
2024-10-21 02:03:47 -07:00
if metricsText . hasClassic && tc . alwaysScrapeClassicHistograms {
2024-07-03 02:56:48 -07:00
expectedClassicHistCount = 1
}
} else if metricsText . hasClassic {
switch {
2024-10-21 04:22:58 -07:00
case tc . alwaysScrapeClassicHistograms && tc . convertClassicHistToNHCB :
2024-07-03 02:56:48 -07:00
expectedClassicHistCount = 1
expectedNativeHistCount = 1
expectCustomBuckets = true
2024-10-21 04:22:58 -07:00
case ! tc . alwaysScrapeClassicHistograms && tc . convertClassicHistToNHCB :
2024-07-03 02:56:48 -07:00
expectedClassicHistCount = 0
expectedNativeHistCount = 1
expectCustomBuckets = true
2024-10-21 04:22:58 -07:00
case ! tc . convertClassicHistToNHCB :
2024-07-03 02:56:48 -07:00
expectedClassicHistCount = 1
expectedNativeHistCount = 0
}
2024-07-03 02:56:48 -07:00
}
2024-07-03 02:56:48 -07:00
t . Run ( fmt . Sprintf ( "%s with %s" , name , metricsTextName ) , func ( t * testing . T ) {
simpleStorage := teststorage . New ( t )
defer simpleStorage . Close ( )
config := & config . ScrapeConfig {
2024-10-21 04:22:58 -07:00
JobName : "test" ,
SampleLimit : 100 ,
Scheme : "http" ,
2024-10-22 09:49:25 -07:00
ScrapeInterval : model . Duration ( 50 * time . Millisecond ) ,
ScrapeTimeout : model . Duration ( 25 * time . Millisecond ) ,
2024-10-21 04:22:58 -07:00
AlwaysScrapeClassicHistograms : tc . alwaysScrapeClassicHistograms ,
ConvertClassicHistogramsToNHCB : tc . convertClassicHistToNHCB ,
2024-07-03 02:56:48 -07:00
}
2024-07-03 02:56:48 -07:00
2024-07-03 02:56:48 -07:00
scrapeCount := 0
scraped := make ( chan bool )
ts := httptest . NewServer ( http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) {
2024-07-03 02:56:48 -07:00
if metricsText . contentType != "" {
w . Header ( ) . Set ( "Content-Type" , ` application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited ` )
for _ , text := range metricsText . text {
buf := & bytes . Buffer { }
// In case of protobuf, we have to create the binary representation.
pb := & dto . MetricFamily { }
// From text to proto message.
require . NoError ( t , proto . UnmarshalText ( text , pb ) )
// From proto message to binary protobuf.
protoBuf , err := proto . Marshal ( pb )
require . NoError ( t , err )
// Write first length, then binary protobuf.
varintBuf := binary . AppendUvarint ( nil , uint64 ( len ( protoBuf ) ) )
buf . Write ( varintBuf )
buf . Write ( protoBuf )
w . Write ( buf . Bytes ( ) )
}
} else {
for _ , text := range metricsText . text {
fmt . Fprint ( w , text )
}
}
2024-07-03 02:56:48 -07:00
scrapeCount ++
if scrapeCount > 2 {
close ( scraped )
}
} ) )
defer ts . Close ( )
2024-07-03 02:56:48 -07:00
2024-10-22 09:49:25 -07:00
sp , err := newScrapePool ( config , simpleStorage , 0 , nil , nil , & Options { DiscoveryReloadInterval : model . Duration ( 10 * time . Millisecond ) , EnableNativeHistogramsIngestion : true } , newTestScrapeMetrics ( t ) )
2024-07-03 02:56:48 -07:00
require . NoError ( t , err )
defer sp . stop ( )
testURL , err := url . Parse ( ts . URL )
require . NoError ( t , err )
sp . Sync ( [ ] * targetgroup . Group {
{
Targets : [ ] model . LabelSet { { model . AddressLabel : model . LabelValue ( testURL . Host ) } } ,
} ,
} )
require . Len ( t , sp . ActiveTargets ( ) , 1 )
select {
case <- time . After ( 5 * time . Second ) :
t . Fatalf ( "target was not scraped" )
case <- scraped :
}
ctx , cancel := context . WithCancel ( context . Background ( ) )
defer cancel ( )
q , err := simpleStorage . Querier ( time . Time { } . UnixNano ( ) , time . Now ( ) . UnixNano ( ) )
require . NoError ( t , err )
defer q . Close ( )
2024-07-03 02:56:48 -07:00
2024-07-03 02:56:48 -07:00
var series storage . SeriesSet
2024-07-03 02:56:48 -07:00
2024-07-03 02:56:48 -07:00
for i := 1 ; i <= 3 ; i ++ {
series = q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , fmt . Sprintf ( "test_metric_%d" , i ) ) )
checkFloatSeries ( series , 1 , 1. )
2024-07-03 02:56:48 -07:00
2024-07-03 02:56:48 -07:00
series = q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , fmt . Sprintf ( "test_metric_%d_count" , i ) ) )
checkFloatSeries ( series , 1 , 1. )
series = q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , fmt . Sprintf ( "test_metric_%d_sum" , i ) ) )
checkFloatSeries ( series , 1 , 1. )
series = q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , fmt . Sprintf ( "test_metric_%d_bucket" , i ) ) )
checkFloatSeries ( series , 1 , 1. )
2024-07-03 02:56:48 -07:00
2024-07-03 02:56:48 -07:00
series = q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , fmt . Sprintf ( "test_histogram_%d_count" , i ) ) )
2024-07-03 02:56:48 -07:00
checkFloatSeries ( series , expectedClassicHistCount , 1. )
2024-07-03 02:56:48 -07:00
2024-07-03 02:56:48 -07:00
series = q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , fmt . Sprintf ( "test_histogram_%d_sum" , i ) ) )
2024-07-03 02:56:48 -07:00
checkFloatSeries ( series , expectedClassicHistCount , 10. )
2024-07-03 02:56:48 -07:00
2024-07-03 02:56:48 -07:00
series = q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , fmt . Sprintf ( "test_histogram_%d_bucket" , i ) ) )
2024-10-21 02:10:50 -07:00
checkBucketValues ( expectedClassicHistCount , series )
2024-07-03 02:56:48 -07:00
2024-07-03 02:56:48 -07:00
series = q . Select ( ctx , false , nil , labels . MustNewMatcher ( labels . MatchRegexp , "__name__" , fmt . Sprintf ( "test_histogram_%d" , i ) ) )
2024-07-03 02:56:48 -07:00
var expectedSchema int32
if expectCustomBuckets {
expectedSchema = histogram . CustomBucketsSchema
2024-07-03 02:56:48 -07:00
} else {
2024-07-03 02:56:48 -07:00
expectedSchema = 3
2024-07-03 02:56:48 -07:00
}
2024-07-03 02:56:48 -07:00
checkHistSeries ( series , expectedNativeHistCount , expectedSchema )
2024-07-03 02:56:48 -07:00
}
2024-07-03 02:56:48 -07:00
} )
}
}
2024-07-03 02:56:48 -07:00
}
2023-10-31 13:58:42 -07:00
func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics ( t * testing . T ) {
appender := & collectResultAppender { }
var (
signal = make ( chan struct { } , 1 )
scraper = & testScraper { }
app = func ( ctx context . Context ) storage . Appender { return appender }
)
ctx , cancel := context . WithCancel ( context . Background ( ) )
2024-11-07 03:30:03 -08:00
// Since we're writing samples directly below we need to provide a protocol fallback.
sl := newBasicScrapeLoopWithFallback ( t , ctx , scraper , app , 10 * time . Millisecond , "text/plain" )
2023-10-16 06:47:10 -07:00
sl . trackTimestampsStaleness = true
2023-10-31 13:58:42 -07:00
// Succeed once, several failures, then stop.
numScrapes := 0
scraper . scrapeFunc = func ( ctx context . Context , w io . Writer ) error {
numScrapes ++
switch numScrapes {
case 1 :
w . Write ( [ ] byte ( fmt . Sprintf ( "metric_a 42 %d\n" , time . Now ( ) . UnixNano ( ) / int64 ( time . Millisecond ) ) ) )
return nil
case 5 :
cancel ( )
}
return errors . New ( "scrape failed" )
}
go func ( ) {
sl . run ( nil )
signal <- struct { } { }
} ( )
select {
case <- signal :
case <- time . After ( 5 * time . Second ) :
t . Fatalf ( "Scrape wasn't stopped." )
}
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
// each scrape successful or not.
2023-12-07 03:35:01 -08:00
require . Len ( t , appender . resultFloats , 27 , "Appended samples not as expected:\n%s" , appender )
2023-10-31 13:58:42 -07:00
require . Equal ( t , 42.0 , appender . resultFloats [ 0 ] . f , "Appended first sample not as expected" )
require . True ( t , value . IsStaleNaN ( appender . resultFloats [ 6 ] . f ) ,
"Appended second sample not as expected. Wanted: stale NaN Got: %x" , math . Float64bits ( appender . resultFloats [ 6 ] . f ) )
}
2023-11-20 04:02:53 -08:00
func TestScrapeLoopCompression ( t * testing . T ) {
simpleStorage := teststorage . New ( t )
defer simpleStorage . Close ( )
metricsText := makeTestMetrics ( 10 )
for _ , tc := range [ ] struct {
enableCompression bool
acceptEncoding string
} {
{
enableCompression : true ,
acceptEncoding : "gzip" ,
} ,
{
enableCompression : false ,
acceptEncoding : "identity" ,
} ,
} {
t . Run ( fmt . Sprintf ( "compression=%v,acceptEncoding=%s" , tc . enableCompression , tc . acceptEncoding ) , func ( t * testing . T ) {
scraped := make ( chan bool )
ts := httptest . NewServer ( http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) {
require . Equal ( t , tc . acceptEncoding , r . Header . Get ( "Accept-Encoding" ) , "invalid value of the Accept-Encoding header" )
fmt . Fprint ( w , metricsText )
close ( scraped )
} ) )
defer ts . Close ( )
config := & config . ScrapeConfig {
JobName : "test" ,
SampleLimit : 100 ,
Scheme : "http" ,
ScrapeInterval : model . Duration ( 100 * time . Millisecond ) ,
ScrapeTimeout : model . Duration ( 100 * time . Millisecond ) ,
EnableCompression : tc . enableCompression ,
}
2023-10-17 02:27:46 -07:00
sp , err := newScrapePool ( config , simpleStorage , 0 , nil , nil , & Options { } , newTestScrapeMetrics ( t ) )
2023-11-20 04:02:53 -08:00
require . NoError ( t , err )
defer sp . stop ( )
testURL , err := url . Parse ( ts . URL )
require . NoError ( t , err )
sp . Sync ( [ ] * targetgroup . Group {
{
Targets : [ ] model . LabelSet { { model . AddressLabel : model . LabelValue ( testURL . Host ) } } ,
} ,
} )
2023-12-07 03:35:01 -08:00
require . Len ( t , sp . ActiveTargets ( ) , 1 )
2023-11-20 04:02:53 -08:00
select {
case <- time . After ( 5 * time . Second ) :
t . Fatalf ( "target was not scraped" )
case <- scraped :
}
} )
}
}
2024-01-17 07:58:54 -08:00
func TestPickSchema ( t * testing . T ) {
tcs := [ ] struct {
factor float64
schema int32
} {
{
factor : 65536 ,
schema : - 4 ,
} ,
{
factor : 256 ,
schema : - 3 ,
} ,
{
factor : 16 ,
schema : - 2 ,
} ,
{
factor : 4 ,
schema : - 1 ,
} ,
{
factor : 2 ,
schema : 0 ,
} ,
{
factor : 1.4 ,
schema : 1 ,
} ,
{
factor : 1.1 ,
schema : 2 ,
} ,
{
factor : 1.09 ,
schema : 3 ,
} ,
{
factor : 1.04 ,
schema : 4 ,
} ,
{
factor : 1.02 ,
schema : 5 ,
} ,
{
factor : 1.01 ,
schema : 6 ,
} ,
{
factor : 1.005 ,
schema : 7 ,
} ,
{
factor : 1.002 ,
schema : 8 ,
} ,
// The default value of native_histogram_min_bucket_factor
{
factor : 0 ,
schema : 8 ,
} ,
}
for _ , tc := range tcs {
schema := pickSchema ( tc . factor )
require . Equal ( t , tc . schema , schema )
}
}
2022-05-31 02:31:20 -07:00
func BenchmarkTargetScraperGzip ( b * testing . B ) {
scenarios := [ ] struct {
metricsCount int
body [ ] byte
} {
{ metricsCount : 1 } ,
{ metricsCount : 100 } ,
{ metricsCount : 1000 } ,
{ metricsCount : 10000 } ,
{ metricsCount : 100000 } ,
}
for i := 0 ; i < len ( scenarios ) ; i ++ {
var buf bytes . Buffer
var name string
gw := gzip . NewWriter ( & buf )
for j := 0 ; j < scenarios [ i ] . metricsCount ; j ++ {
name = fmt . Sprintf ( "go_memstats_alloc_bytes_total_%d" , j )
fmt . Fprintf ( gw , "# HELP %s Total number of bytes allocated, even if freed.\n" , name )
fmt . Fprintf ( gw , "# TYPE %s counter\n" , name )
fmt . Fprintf ( gw , "%s %d\n" , name , i * j )
}
gw . Close ( )
scenarios [ i ] . body = buf . Bytes ( )
}
handler := http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) {
w . Header ( ) . Set ( "Content-Type" , ` text/plain; version=0.0.4 ` )
w . Header ( ) . Set ( "Content-Encoding" , "gzip" )
for _ , scenario := range scenarios {
if strconv . Itoa ( scenario . metricsCount ) == r . URL . Query ( ) [ "count" ] [ 0 ] {
w . Write ( scenario . body )
return
}
}
w . WriteHeader ( http . StatusBadRequest )
} )
server := httptest . NewServer ( handler )
defer server . Close ( )
serverURL , err := url . Parse ( server . URL )
if err != nil {
panic ( err )
}
client , err := config_util . NewClientFromConfig ( config_util . DefaultHTTPClientConfig , "test_job" )
if err != nil {
panic ( err )
}
for _ , scenario := range scenarios {
b . Run ( fmt . Sprintf ( "metrics=%d" , scenario . metricsCount ) , func ( b * testing . B ) {
ts := & targetScraper {
Target : & Target {
labels : labels . FromStrings (
model . SchemeLabel , serverURL . Scheme ,
model . AddressLabel , serverURL . Host ,
) ,
params : url . Values { "count" : [ ] string { strconv . Itoa ( scenario . metricsCount ) } } ,
} ,
client : client ,
timeout : time . Second ,
}
b . ResetTimer ( )
for i := 0 ; i < b . N ; i ++ {
_ , err = ts . scrape ( context . Background ( ) )
require . NoError ( b , err )
}
} )
}
}
2023-11-16 05:22:28 -08:00
// When a scrape contains multiple instances for the same time series we should increment
// prometheus_target_scrapes_sample_duplicate_timestamp_total metric.
func TestScrapeLoopSeriesAddedDuplicates ( t * testing . T ) {
ctx , sl := simpleTestScrapeLoop ( t )
slApp := sl . appender ( ctx )
2024-11-07 03:30:03 -08:00
total , added , seriesAdded , err := sl . append ( slApp , [ ] byte ( "test_metric 1\ntest_metric 2\ntest_metric 3\n" ) , "text/plain" , time . Time { } )
2023-11-16 05:22:28 -08:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
require . Equal ( t , 3 , total )
require . Equal ( t , 3 , added )
require . Equal ( t , 1 , seriesAdded )
2024-08-19 02:58:35 -07:00
require . Equal ( t , 2.0 , prom_testutil . ToFloat64 ( sl . metrics . targetScrapeSampleDuplicate ) )
2023-11-16 05:22:28 -08:00
slApp = sl . appender ( ctx )
2024-11-07 03:30:03 -08:00
total , added , seriesAdded , err = sl . append ( slApp , [ ] byte ( "test_metric 1\ntest_metric 1\ntest_metric 1\n" ) , "text/plain" , time . Time { } )
2023-11-16 05:22:28 -08:00
require . NoError ( t , err )
require . NoError ( t , slApp . Commit ( ) )
require . Equal ( t , 3 , total )
require . Equal ( t , 3 , added )
require . Equal ( t , 0 , seriesAdded )
2024-08-19 02:58:35 -07:00
require . Equal ( t , 4.0 , prom_testutil . ToFloat64 ( sl . metrics . targetScrapeSampleDuplicate ) )
2023-11-16 05:22:28 -08:00
2024-08-19 02:58:35 -07:00
// When different timestamps are supplied, multiple samples are accepted.
slApp = sl . appender ( ctx )
2024-11-07 03:30:03 -08:00
total , added , seriesAdded , err = sl . append ( slApp , [ ] byte ( "test_metric 1 1001\ntest_metric 1 1002\ntest_metric 1 1003\n" ) , "text/plain" , time . Time { } )
2023-11-16 05:22:28 -08:00
require . NoError ( t , err )
2024-08-19 02:58:35 -07:00
require . NoError ( t , slApp . Commit ( ) )
require . Equal ( t , 3 , total )
require . Equal ( t , 3 , added )
require . Equal ( t , 0 , seriesAdded )
// Metric is not higher than last time.
require . Equal ( t , 4.0 , prom_testutil . ToFloat64 ( sl . metrics . targetScrapeSampleDuplicate ) )
2023-11-16 05:22:28 -08:00
}
2024-03-27 08:32:37 -07:00
// This tests running a full scrape loop and checking that the scrape option
// `native_histogram_min_bucket_factor` is used correctly.
func TestNativeHistogramMaxSchemaSet ( t * testing . T ) {
testcases := map [ string ] struct {
minBucketFactor string
expectedSchema int32
} {
"min factor not specified" : {
minBucketFactor : "" ,
expectedSchema : 3 , // Factor 1.09.
} ,
"min factor 1" : {
minBucketFactor : "native_histogram_min_bucket_factor: 1" ,
expectedSchema : 3 , // Factor 1.09.
} ,
"min factor 2" : {
minBucketFactor : "native_histogram_min_bucket_factor: 2" ,
expectedSchema : 0 , // Factor 2.00.
} ,
}
for name , tc := range testcases {
2024-09-26 09:35:15 -07:00
tc := tc
2024-03-27 08:32:37 -07:00
t . Run ( name , func ( t * testing . T ) {
2024-09-26 09:35:15 -07:00
t . Parallel ( )
2024-03-27 08:32:37 -07:00
testNativeHistogramMaxSchemaSet ( t , tc . minBucketFactor , tc . expectedSchema )
} )
}
}
func testNativeHistogramMaxSchemaSet ( t * testing . T , minBucketFactor string , expectedSchema int32 ) {
// Create a ProtoBuf message to serve as a Prometheus metric.
nativeHistogram := prometheus . NewHistogram (
prometheus . HistogramOpts {
Namespace : "testing" ,
Name : "example_native_histogram" ,
Help : "This is used for testing" ,
NativeHistogramBucketFactor : 1.1 ,
NativeHistogramMaxBucketNumber : 100 ,
} ,
)
registry := prometheus . NewRegistry ( )
registry . Register ( nativeHistogram )
nativeHistogram . Observe ( 1.0 )
nativeHistogram . Observe ( 1.0 )
nativeHistogram . Observe ( 1.0 )
nativeHistogram . Observe ( 10.0 ) // in different bucket since > 1*1.1.
nativeHistogram . Observe ( 10.0 )
gathered , err := registry . Gather ( )
require . NoError ( t , err )
require . NotEmpty ( t , gathered )
histogramMetricFamily := gathered [ 0 ]
buffer := protoMarshalDelimited ( t , histogramMetricFamily )
// Create a HTTP server to serve /metrics via ProtoBuf
metricsServer := httptest . NewServer ( http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) {
w . Header ( ) . Set ( "Content-Type" , ` application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited ` )
w . Write ( buffer )
} ) )
defer metricsServer . Close ( )
// Create a scrape loop with the HTTP server as the target.
configStr := fmt . Sprintf ( `
global :
2024-08-21 07:38:27 -07:00
metric_name_validation_scheme : legacy
2024-09-26 09:35:15 -07:00
scrape_interval : 50 ms
scrape_timeout : 25 ms
2024-03-27 08:32:37 -07:00
scrape_configs :
- job_name : test
% s
static_configs :
- targets : [ % s ]
` , minBucketFactor , strings . ReplaceAll ( metricsServer . URL , "http://" , "" ) )
s := teststorage . New ( t )
defer s . Close ( )
s . DB . EnableNativeHistograms ( )
reg := prometheus . NewRegistry ( )
2024-09-26 09:35:15 -07:00
mng , err := NewManager ( & Options { DiscoveryReloadInterval : model . Duration ( 10 * time . Millisecond ) , EnableNativeHistogramsIngestion : true } , nil , nil , s , reg )
2024-03-27 08:32:37 -07:00
require . NoError ( t , err )
2024-10-17 01:25:05 -07:00
cfg , err := config . Load ( configStr , promslog . NewNopLogger ( ) )
2024-03-27 08:32:37 -07:00
require . NoError ( t , err )
mng . ApplyConfig ( cfg )
tsets := make ( chan map [ string ] [ ] * targetgroup . Group )
go func ( ) {
err = mng . Run ( tsets )
require . NoError ( t , err )
} ( )
defer mng . Stop ( )
// Get the static targets and apply them to the scrape manager.
require . Len ( t , cfg . ScrapeConfigs , 1 )
scrapeCfg := cfg . ScrapeConfigs [ 0 ]
require . Len ( t , scrapeCfg . ServiceDiscoveryConfigs , 1 )
staticDiscovery , ok := scrapeCfg . ServiceDiscoveryConfigs [ 0 ] . ( discovery . StaticConfig )
require . True ( t , ok )
require . Len ( t , staticDiscovery , 1 )
tsets <- map [ string ] [ ] * targetgroup . Group { "test" : staticDiscovery }
// Wait for the scrape loop to scrape the target.
require . Eventually ( t , func ( ) bool {
q , err := s . Querier ( 0 , math . MaxInt64 )
require . NoError ( t , err )
seriesS := q . Select ( context . Background ( ) , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "__name__" , "testing_example_native_histogram" ) )
countSeries := 0
for seriesS . Next ( ) {
countSeries ++
}
return countSeries > 0
2024-09-26 09:35:15 -07:00
} , 5 * time . Second , 100 * time . Millisecond )
2024-03-27 08:32:37 -07:00
// Check that native histogram schema is as expected.
q , err := s . Querier ( 0 , math . MaxInt64 )
require . NoError ( t , err )
seriesS := q . Select ( context . Background ( ) , false , nil , labels . MustNewMatcher ( labels . MatchEqual , "__name__" , "testing_example_native_histogram" ) )
histogramSamples := [ ] * histogram . Histogram { }
for seriesS . Next ( ) {
series := seriesS . At ( )
it := series . Iterator ( nil )
for vt := it . Next ( ) ; vt != chunkenc . ValNone ; vt = it . Next ( ) {
if vt != chunkenc . ValHistogram {
// don't care about other samples
continue
}
_ , h := it . AtHistogram ( nil )
histogramSamples = append ( histogramSamples , h )
}
}
require . NoError ( t , seriesS . Err ( ) )
require . NotEmpty ( t , histogramSamples )
for _ , h := range histogramSamples {
require . Equal ( t , expectedSchema , h . Schema )
}
}
2022-07-15 06:20:44 -07:00
func TestTargetScrapeConfigWithLabels ( t * testing . T ) {
const (
configTimeout = 1500 * time . Millisecond
expectedTimeout = "1.5"
expectedTimeoutLabel = "1s500ms"
secondTimeout = 500 * time . Millisecond
secondTimeoutLabel = "500ms"
expectedParam = "value1"
secondParam = "value2"
expectedPath = "/metric-ok"
secondPath = "/metric-nok"
httpScheme = "http"
paramLabel = "__param_param"
jobName = "test"
)
createTestServer := func ( t * testing . T , done chan struct { } ) * url . URL {
server := httptest . NewServer (
http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) {
defer close ( done )
require . Equal ( t , expectedTimeout , r . Header . Get ( "X-Prometheus-Scrape-Timeout-Seconds" ) )
require . Equal ( t , expectedParam , r . URL . Query ( ) . Get ( "param" ) )
require . Equal ( t , expectedPath , r . URL . Path )
w . Header ( ) . Set ( "Content-Type" , ` text/plain; version=0.0.4 ` )
w . Write ( [ ] byte ( "metric_a 1\nmetric_b 2\n" ) )
} ) ,
)
t . Cleanup ( server . Close )
serverURL , err := url . Parse ( server . URL )
require . NoError ( t , err )
return serverURL
}
run := func ( t * testing . T , cfg * config . ScrapeConfig , targets [ ] * targetgroup . Group ) chan struct { } {
done := make ( chan struct { } )
srvURL := createTestServer ( t , done )
// Update target addresses to use the dynamically created server URL.
for _ , target := range targets {
for i := range target . Targets {
target . Targets [ i ] [ model . AddressLabel ] = model . LabelValue ( srvURL . Host )
}
}
sp , err := newScrapePool ( cfg , & nopAppendable { } , 0 , nil , nil , & Options { } , newTestScrapeMetrics ( t ) )
require . NoError ( t , err )
t . Cleanup ( sp . stop )
sp . Sync ( targets )
return done
}
cases := [ ] struct {
name string
cfg * config . ScrapeConfig
targets [ ] * targetgroup . Group
} {
{
name : "Everything in scrape config" ,
cfg : & config . ScrapeConfig {
ScrapeInterval : model . Duration ( 2 * time . Second ) ,
ScrapeTimeout : model . Duration ( configTimeout ) ,
Params : url . Values { "param" : [ ] string { expectedParam } } ,
JobName : jobName ,
Scheme : httpScheme ,
MetricsPath : expectedPath ,
} ,
targets : [ ] * targetgroup . Group {
{
Targets : [ ] model . LabelSet {
{ model . AddressLabel : model . LabelValue ( "" ) } ,
} ,
} ,
} ,
} ,
{
name : "Overridden in target" ,
cfg : & config . ScrapeConfig {
ScrapeInterval : model . Duration ( 2 * time . Second ) ,
ScrapeTimeout : model . Duration ( secondTimeout ) ,
JobName : jobName ,
Scheme : httpScheme ,
MetricsPath : secondPath ,
Params : url . Values { "param" : [ ] string { secondParam } } ,
} ,
targets : [ ] * targetgroup . Group {
{
Targets : [ ] model . LabelSet {
{
model . AddressLabel : model . LabelValue ( "" ) ,
model . ScrapeTimeoutLabel : expectedTimeoutLabel ,
model . MetricsPathLabel : expectedPath ,
paramLabel : expectedParam ,
} ,
} ,
} ,
} ,
} ,
{
name : "Overridden in relabel_config" ,
cfg : & config . ScrapeConfig {
ScrapeInterval : model . Duration ( 2 * time . Second ) ,
ScrapeTimeout : model . Duration ( secondTimeout ) ,
JobName : jobName ,
Scheme : httpScheme ,
MetricsPath : secondPath ,
Params : url . Values { "param" : [ ] string { secondParam } } ,
RelabelConfigs : [ ] * relabel . Config {
{
Action : relabel . DefaultRelabelConfig . Action ,
Regex : relabel . DefaultRelabelConfig . Regex ,
SourceLabels : relabel . DefaultRelabelConfig . SourceLabels ,
TargetLabel : model . ScrapeTimeoutLabel ,
Replacement : expectedTimeoutLabel ,
} ,
{
Action : relabel . DefaultRelabelConfig . Action ,
Regex : relabel . DefaultRelabelConfig . Regex ,
SourceLabels : relabel . DefaultRelabelConfig . SourceLabels ,
TargetLabel : paramLabel ,
Replacement : expectedParam ,
} ,
{
Action : relabel . DefaultRelabelConfig . Action ,
Regex : relabel . DefaultRelabelConfig . Regex ,
SourceLabels : relabel . DefaultRelabelConfig . SourceLabels ,
TargetLabel : model . MetricsPathLabel ,
Replacement : expectedPath ,
} ,
} ,
} ,
targets : [ ] * targetgroup . Group {
{
Targets : [ ] model . LabelSet {
{
model . AddressLabel : model . LabelValue ( "" ) ,
model . ScrapeTimeoutLabel : secondTimeoutLabel ,
model . MetricsPathLabel : secondPath ,
paramLabel : secondParam ,
} ,
} ,
} ,
} ,
} ,
}
for _ , c := range cases {
t . Run ( c . name , func ( t * testing . T ) {
select {
case <- run ( t , c . cfg , c . targets ) :
case <- time . After ( 10 * time . Second ) :
t . Fatal ( "timeout after 10 seconds" )
}
} )
}
}
2024-09-10 11:51:20 -07:00
2024-09-09 07:46:31 -07:00
func newScrapableServer ( scrapeText string ) ( s * httptest . Server , scrapedTwice chan bool ) {
var scrapes int
scrapedTwice = make ( chan bool )
return httptest . NewServer ( http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) {
fmt . Fprint ( w , scrapeText )
scrapes ++
if scrapes == 2 {
close ( scrapedTwice )
}
} ) ) , scrapedTwice
}
2024-12-02 06:08:37 -08:00
2024-12-09 07:36:36 -08:00
// Regression test for the panic fixed in https://github.com/prometheus/prometheus/pull/15523.
func TestScrapePoolScrapeAfterReload ( t * testing . T ) {
2024-12-08 22:17:32 -08:00
h := httptest . NewServer ( http . HandlerFunc (
func ( w http . ResponseWriter , r * http . Request ) {
w . Write ( [ ] byte { 0x42 , 0x42 } )
} ,
) )
2024-12-02 06:08:37 -08:00
t . Cleanup ( h . Close )
cfg := & config . ScrapeConfig {
BodySizeLimit : 1 ,
JobName : "test" ,
Scheme : "http" ,
ScrapeInterval : model . Duration ( 100 * time . Millisecond ) ,
ScrapeTimeout : model . Duration ( 100 * time . Millisecond ) ,
EnableCompression : false ,
ServiceDiscoveryConfigs : discovery . Configs {
& discovery . StaticConfig {
{
Targets : [ ] model . LabelSet { { model . AddressLabel : model . LabelValue ( h . URL ) } } ,
} ,
} ,
} ,
}
2024-12-08 22:17:32 -08:00
p , err := newScrapePool ( cfg , & nopAppendable { } , 0 , nil , nil , & Options { } , newTestScrapeMetrics ( t ) )
2024-12-02 06:08:37 -08:00
require . NoError ( t , err )
t . Cleanup ( p . stop )
p . Sync ( [ ] * targetgroup . Group {
{
Targets : [ ] model . LabelSet { { model . AddressLabel : model . LabelValue ( strings . TrimPrefix ( h . URL , "http://" ) ) } } ,
Source : "test" ,
} ,
} )
require . NoError ( t , p . reload ( cfg ) )
<- time . After ( 1 * time . Second )
}